blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 257 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0e17285b254162dcbc6ef22c3ee57ec790c39cd6 | 436b90b49fac7a3f8312f4716336552ce7452d30 | /run_novosort.py | c999ad96e9e5ba36ee7010f3f1911e3c9f7a578e | [] | no_license | djf604/PsychENCODE_RNAseq | 1d2160e5d327b390c420d1c5c71d44c05ccb41c7 | 773c3a945d45f8b611fd96cf49fa8b9443f210f1 | refs/heads/master | 2021-01-13T00:16:04.843683 | 2015-10-06T15:25:19 | 2015-10-06T15:25:19 | 43,759,144 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,856 | py | import subprocess
import sys
input_file = sys.argv[1]
bids = []
bams = []
with open(input_file) as _:
for line in _:
if line.strip()[0] == '#':
continue
bid_type, bid, bam = line.rstrip('\n').split('\t')
bids.append(bid)
bams.append(bam)
#bids = ['2015-17', '2014-1456', '2015-18', '2014-1457', '2015-19', '2014-1458', '2015-20', '2014-1459', '2015-21', '2015-19']
#bams = ['/mnt/cinder/SCRATCH/SCRATCH/RAW/2015-17/2015-17_150109_SN484_0326_AC5FU8ACXX_8Aligned.out.bam',
#'/mnt/cinder/SCRATCH/SCRATCH/RAW/2014-1456/2014-1456_140820_SN1070_0247_AHA97GADXX_2Aligned.out.bam',
#'/mnt/cinder/SCRATCH/SCRATCH/RAW/2015-18/2015-18_150109_SN484_0326_AC5FU8ACXX_8Aligned.out.bam',
#'/mnt/cinder/SCRATCH/SCRATCH/RAW/2014-1457/2014-1457_140806_SN1070_0243_AH9FTFADXX_1Aligned.out.bam',
#'/mnt/cinder/SCRATCH/SCRATCH/RAW/2015-19/2015-19_150109_SN484_0326_AC5FU8ACXX_5Aligned.out.bam',
#'/mnt/cinder/SCRATCH/SCRATCH/RAW/2014-1458/2014-1458_140806_SN1070_0243_AH9FTFADXX_1Aligned.out.bam',
#'/mnt/cinder/SCRATCH/SCRATCH/RAW/2015-20/2015-20_150109_SN484_0326_AC5FU8ACXX_5Aligned.out.bam',
#'/mnt/cinder/SCRATCH/SCRATCH/RAW/2014-1459/2014-1459_140806_SN1070_0243_AH9FTFADXX_2Aligned.out.bam',
#'/mnt/cinder/SCRATCH/SCRATCH/RAW/2015-21/2015-21_150109_SN484_0326_AC5FU8ACXX_3Aligned.out.bam',
#'/mnt/cinder/SCRATCH/SCRATCH/RAW/2015-19/2015-19_150616_SN484_0358_AC73V5ACXX_8Aligned.out.bam']
novosort_path = '~/TOOLS/nova/novosort'
prefix = '/mnt/cinder/SCRATCH/SCRATCH/RAW/'
run_cmd = ''
for i,bam in enumerate(bams):
file_name = bam.split('/')[7]
file_name = file_name.split('.')[0]
output_path = prefix + bids[i] + '/' + file_name + '.sorted.Aligned.out.bam'
run_cmd += novosort_path + ' --output ' + output_path + ' --index ' + bam + ' 2>' + prefix + bids[i] + '/' + 'novosort.run.log;'
print run_cmd
| [
"ubuntu@rnaseq2015aug12.novalocal"
] | ubuntu@rnaseq2015aug12.novalocal |
91e2f09a920b96ad5419030191939df0f1d3bf8c | e1fb4f1e599983facdc1510ad8c57de4ba6460b3 | /Holdy_Blog/Holdy_Blog/settings.py | 6d0a1196e50edaa138afff04e4a4202e259c2435 | [] | no_license | sandeepcse2004/Django_Bloging_App | eaa8d570e54cda916873068b092b19da75ce4782 | 7cbd18a79095f73494845d4ab49dd30ef70bea96 | refs/heads/master | 2023-03-18T04:26:21.510435 | 2021-03-08T15:16:45 | 2021-03-08T15:16:45 | 262,552,022 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,403 | py | """
Django settings for Holdy_Blog project.
Generated by 'django-admin startproject' using Django 3.0.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '434-a769gkb-j(3*mf)37fu0c2nxbkgk8vd=ckk)*250%=$38c'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'articles',
'accounts',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'Holdy_Blog.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
# 'DIRS': [os.path.join(BASE_DIR, 'articles/templates', 'articles')],
# 'DIRS': [os.path.join(BASE_DIR, 'templates')],
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'Holdy_Blog.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (os.path.join(BASE_DIR, 'assets')),
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
| [
"58394871+sandeepcse2004@users.noreply.github.com"
] | 58394871+sandeepcse2004@users.noreply.github.com |
fe5af079ed018584ffb250f1a2f08f5b1a5f1071 | 5cddb0aa131ec9f3d31ffab45932006be209e080 | /blog/views.py | a43c8a9aebd0f1b3a477cf974ffa3606f9036099 | [] | no_license | infinitejest/my-first-blog | 2fed568b4f7afa7d60dd11fd9bc5fdf2bda0f484 | 30ef5ca7419060405e1c156be7fcbd2a6cba4dfc | refs/heads/master | 2021-01-24T03:12:39.067490 | 2018-02-28T02:28:02 | 2018-02-28T02:28:02 | 122,882,247 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 471 | py | from django.shortcuts import render, get_object_or_404
from .models import Post
from django.utils import timezone
# Create your views here.
def post_list(request):
posts = Post.objects.filter(published_date__lte=timezone.now()).order_by('published_date')
return render(request, 'blog/post_list.html', {'posts': posts})
def post_detail(request,pk):
post = get_object_or_404(Post, pk=pk)
return render(request, 'blog/post_detail.html', {'post':post})
| [
"rodriguezandr@gmail.com"
] | rodriguezandr@gmail.com |
8036266ccbef0327a21e8d4fcd949ac354764b82 | a9b4c4298599310123245ea90730a5bcfd6108a8 | /NewOne/NewOne/wsgi.py | 14409221b2094ae61f439e709affaef84ed098bb | [] | no_license | Foxtrot983/DjangoNew | 338edfb0d0c4b9cc97b90b9e1920e90e1512aa3e | 4b4eef7b7cdd8866451e3eeb6cd576849c7f043a | refs/heads/master | 2023-08-17T18:15:18.850371 | 2021-10-09T06:41:32 | 2021-10-09T06:41:32 | 417,080,482 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 389 | py | """
WSGI config for NewOne project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'NewOne.settings')
application = get_wsgi_application()
| [
"wladiksan@gmail.com"
] | wladiksan@gmail.com |
b9909582d05dccb31f82877fae7a30a999ad3b4d | 893126714c188c906a2d43301294e5126f466de7 | /app/notifications.py | 97baa25b8bcb7c3cf1f2109b775e0d2252487b47 | [] | no_license | patnaa2/waterbud | d5339cd90b0fd7f48bb941dc70f8c2530e16be1a | dbf2d506d622d2f8f0462fbeb1e8bb5e36828662 | refs/heads/master | 2020-12-25T13:45:04.154588 | 2018-03-26T18:58:17 | 2018-03-26T18:58:17 | 62,075,051 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,614 | py | from __future__ import division
import calendar
import datetime
import pymongo
import sys
class Notifications(object):
MONGO_LOCATION = "127.0.0.1:27017"
DB = "waterbud"
def __init__(self, db=None):
self._db = db
self.coll = 'notifications'
# Try connecting to db, if failure (break)
if not self._db:
self.connect_to_db()
def alert_leak(self, location):
msg = "CRITICAL: Leak detected at %s sensor." %(location),
self.general_alert(msg)
def alert_usage_level(self):
'''
Hack function here, we will do some of the logic here,
since I am not really down to create a job/cron
to take care of this, just do the analysis in the alert function..
We can have an overhead process to take care of when to alert
so we don't get spammed consistently
'''
# $$$ ---> data analysis ---> bling bling
month = datetime.datetime.now().replace(day=1,
hour=0,
minute=0,
second=0,
microsecond=0)
res = self._db['monthly_summary'].find_one({"month":month})
limit = res["limit"]
current = res["current_spending"]
# Basic linear interpolation
days_in_month = calendar.monthrange(month.year, month.month)[1]
current_day = datetime.datetime.now().day
days_left = days_in_month - current_day
expected_spending = (current / current_day) * days_in_month
diff = expected_spending - limit
percent_of_limit = int( diff / limit * 100)
if expected_spending > limit:
msg = "Warning: You are expected to overspend by $%.2f."\
" Expected monthly expense is $%.2f(%s%% of your set monthly limit)."\
%(diff, expected_spending, percent_of_limit)
# let's do some positive reinforcement too
elif expected_spending < limit:
msg = "Great Job. You are expected to save $%.2f off your limit."\
" Expected monthly expense is $%.2f(%s%% of your set monthly limit)."\
%(diff, expected_spending, percent_of_limit)
# in case somehow we make it equal.. idk how but sure
else:
msg = "Good Job. You are expected to meet your monthly limit of $%.2f."\
%(limit)
# Hack Anshuman --> I have a gut feeling we are going to need to
# need an easy way to differentiate these alerts from everything else
# so I am just going to create a type field specifically for these
# alerts
data = {"msg" : msg,
"timestamp" : datetime.datetime.now(),
"read" : False,
"type" : "Usage Level"}
self._db[self.coll].insert_one(data)
def general_alert(self, msg):
data = {"msg" : msg,
"timestamp" : datetime.datetime.now(),
"read" : False}
self._db[self.coll].insert_one(data)
def connect_to_db(self):
try:
self._db = pymongo.MongoClient(self.MONGO_LOCATION)[self.DB]
except:
# treat any exception as a failure
msg = "Unable to connect to database"
self.epic_failure(msg)
def epic_failure(self, text):
print "\033[1;41m" + text + "\033[1;m"
sys.exit(1)
if __name__ == '__main__':
print "This is just a helper class library. Don't call with main"
sys.exit(1)
| [
"anshuman.patnaik2008@gmail.com"
] | anshuman.patnaik2008@gmail.com |
aa8a70b270c71c4b86bc4e741363c6707ccad445 | 9fa4dc0461ccbe8365ec2ec686e28896cec90f85 | /src/eval.py | 7f99f50bd393214b5581b62ba96823223a8ec855 | [] | no_license | ahwang16/senior-thesis-2019 | 537be007e32bf072c66d5b76db378c94aad02fa7 | 4db3825e5726775050eccb03f763dcd037f073e2 | refs/heads/master | 2021-07-15T03:01:45.247186 | 2020-10-20T16:08:06 | 2020-10-20T16:08:06 | 215,397,637 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 8,073 | py | # eval.py
from gensim.models import Word2Vec
import itertools
import json
from mittens import GloVe
from nltk import bigrams
from nltk.cluster import KMeansClusterer
from nltk.cluster.util import cosine_distance
from nltk.corpus import wordnet
import numpy as np
import pandas as pd
import pickle as pkl
import re
from sklearn.cluster import AgglomerativeClustering
import spacy
import sys
# datasets
_aae_vocab = set()
_gv_vocab = set()
# embeddings
_glove_50 = {}
_cn = {}
GLOVE_50_DIR = "../glove.twitter.27B/glove.twitter.27B.50d.txt"
# load datasets (GV and AAE)
def load_aae():
global _aae_vocab
with open("../data/aae_vocab.pkl", "rb") as infile:
_aae_vocab = pkl.load(infile)
def load_gv():
global _gv_vocab
with open("../gv_vocab.pkl", "rb") as infile:
_gv_vocab = pkl.load(infile)
def load_cn(data, path="../data/"):
with open("../data/gv_cn_gold.txt", "r") as infile:
next(infile)
for line in infile:
l = line.split("\t")
_cn[l[0]] = json.loads(l[1])
# Load 50-dim pretrained GloVe embeddings from text file
def load_glove(dir=GLOVE_50_DIR) :
with open(dir, "r") as glove_file:
for line in glove_file:
l = line.split()
_glove_50[l[0]] = np.asarray(l[1:], dtype="float32")
# https://codereview.stackexchange.com/questions/235633/generating-a-co-occurrence-matrix
def by_indexes(iterable):
output = {}
for index, key in enumerate(iterable):
output.setdefault(key, []).append(index)
return output
# https://codereview.stackexchange.com/questions/235633/generating-a-co-occurrence-matrix
def co_occurrence_matrix(corpus, vocabulary, window_size=2):
def split_tokens(tokens):
for token in tokens:
indexs = vocabulary_indexes.get(token)
if indexs is not None:
yield token, indexs[0]
matrix = np.zeros((len(vocabulary), len(vocabulary)), np.float64)
vocabulary_indexes = by_indexes(vocabulary)
for sent in corpus:
tokens = by_indexes(split_tokens(sent.split())).items()
for ((word_1, x), indexes_1), ((word_2, y), indexes_2) in itertools.permutations(tokens, 2):
for k in indexes_1:
for l in indexes_2:
if abs(l - k) <= window_size:
matrix[x, y] += 1
return matrix
# finetune GloVe
# https://github.com/ashutoshsingh0223/mittens
def finetune_glove(corpus, vocab):
cooc = co_occurrence_matrix(corpus, vocab)
glove = GloVe(n=2, max_iter=100)
embeddings = glove.fit(cooc)
# https://www.shanelynn.ie/word-embeddings-in-python-with-spacy-and-gensim/
def load_w2v(corpus):
nlp = spacy.load('en_core_web_sm')
sents = []
for c in corpus:
doc = nlp(c)
sents += [d.text for d in doc if not d.is_punct]
model = Word2Vec(sents, min_count=1)
return model
# get embeddings
def get_embeddings(vocab, embed_type):
if embed_type == "w2v":
with open("../data/twitteraae_aa.txt", "r") as infile:
w2v = load_w2v(infile.read().splitlines())
return w2v.wv[w2v.wv.vocab], list(w2v.wv.vocab), None
print("loading glove")
load_glove()
# get GloVe word embeddings and number of missing words
print("gloving vocab")
embeds, words = [], []
missing = 0
for v in vocab :
try:
embeds.append(_glove_50[v])
words.append(v)
except:
missing += 1
return embeds, words, missing
# cluster (kmeans)
def kmeans(vocab, data, embed_type, k=900, r=25, file_num=0):
"""
Cluster glove embeddings with kmeans algorithm
Params:
vocab (set): set of all words in dataset
data (string): dataset name for output file names
k (int): number of clusters
r (int): number of repeats
file_num (int): number for output file names
Returns:
"""
### CLUSTERING #############################################################
print("clustering")
embeds, words, missing = get_embeddings(vocab, embed_type)
print("missing from glove:", missing)
clusterer = KMeansClusterer(k, distance=cosine_distance, repeats=r)
clusters = clusterer.cluster(embeds, assign_clusters=True)
print("enumerating")
cluster_dict = { i : [] for i in range(k) }
word_to_cluster = {}
for i, v in enumerate(words):
cluster_dict[clusters[i]].append(v)
word_to_cluster[v] = clusters[i]
for c in cluster_dict :
cluster_dict[c] = set(cluster_dict[c])
print("pickling")
with open("../data/kmeans_clusters_{}_{}.pkl".format(data, file_num), "wb") as p :
pkl.dump(cluster_dict, p)
############################################################################
def eval(path_to_cluster, data, embed_type, file_num):
words = []
words_idx = []
clusters = []
with open(path_to_cluster, "rb") as infile:
kmeans_clusters_cn = pkl.load(infile)
for cluster_idx in kmeans_clusters_cn :
precision_wn, recall_wn, precision_cn, recall_cn = [], [], [], []
cluster = kmeans_clusters_cn[cluster_idx]
for word in cluster :
missing_from_wn, missing_from_cn = 0, 0
gold_wn = get_gold_wn(word)
try:
gold_cn = _cn[word]
except:
gold_cn = set()
gold_cn.add(word)
missing_from_wn += len(gold_wn) == 1
missing_from_cn += len(gold_cn) == 1
true_positive_wn = len(cluster.intersection(gold_wn))
false_positive_wn = len(cluster - gold_wn)
false_negative_wn = len(gold_wn - cluster)
p_wn = true_positive_wn / (true_positive_wn + false_positive_wn)
r_wn = true_positive_wn / (true_positive_wn + false_negative_wn)
precision_wn.append(p_wn)
recall_wn.append(r_wn)
true_positive_cn = len(cluster.intersection(gold_cn))
false_positive_cn = len(cluster - gold_cn)
false_negative_cn = len(gold_cn - cluster)
p_cn = true_positive_cn / (true_positive_cn + false_positive_cn)
r_cn = true_positive_cn / (true_positive_cn + false_negative_cn)
precision_cn.append(p_cn)
recall_cn.append(r_cn)
words_idx.append(word)
words.append({"precision_wn" : p_wn, "recall_wn" : r_wn,
"precision_cn" : p_cn, "recall_cn" : r_cn,
"missing_from_cn" : missing_from_cn,
"missing_from_wn" : missing_from_wn})
clusters.append({"precision_wn" : np.mean(precision_wn),
"recall_wn" : np.mean(recall_wn),
"precision_cn" : np.mean(precision_cn),
"recall_cn" : np.mean(recall_cn)})
pd.DataFrame(words, index=words_idx).to_csv("{}_words_{}_{}.csv".format(data, embed_type, file_num))
pd.DataFrame(clusters).to_csv("{}_clusters_{}_{}.csv".format(data, embed_type, file_num))
def get_gold_wn(word):
gold = set()
for syn in wordnet.synsets(word):
for l in syn.lemmas():
gold.add(l.name())
gold.add(word)
return gold
'''
### UNECESSARY FUNCTIONS ######################################################
# eval with CN
def eval_cn(cluster):
"""
Given a cluster, compute precision and recall for each word and average for
entire cluster. Return number of words not in concept net.
Params:
cluster (set): set of words
Returns:
scores (dict): ...
"""
pass
# eval with WN
def eval_wn(cluster):
words = [] # dictionary of precision and recall values
words_idx
precision, recall = [], []
for word in cluster:
gold = get_gold_wn(word)
tp = len(cluster.intersection(gold))
fp = len(cluster - gold)
fn = len(gold - cluster)
precision.append(tp / (tp + fp))
recall.append(tp / (tp + fn))
def get_gold_wn(word):
gold = set()
for syn in wordnet.synsets(word):
for l in syn.lemmas():
gold.add(l.name())
gold.add(word)
return gold
'''
if __name__ == "__main__":
data, file_num, embed_type = sys.argv[1], sys.argv[2], sys.argv[3]
if data == "gv":
print("loading gv")
load_gv()
k = int(len(_gv_vocab) / 10)
print(k)
print("clustering")
kmeans(_gv_vocab, data, embed_type, k=k, file_num=file_num)
print("evaluating")
load_cn("gv_cn_gold.txt")
elif data == "aae":
print("loading aae")
load_aae()
k = int(len(_aae_vocab) / 10)
print(k)
print("clustering")
kmeans(_aae_vocab, data, embed_type, k=k, file_num=file_num)
print("evaluating")
load_cn("aae_cn_gold.txt")
eval("../data/kmeans_clusters_{}_{}_{}.pkl".format(data, embed_type, file_num), data, embed_type, file_num)
| [
"ahh2143@columbia.edu"
] | ahh2143@columbia.edu |
75f976ce151aefde1fa3d404c7209b7ddf72d743 | 6c82017287ada42e7705542e6dc43ed04dbebaab | /rep/01-第一个爬虫.py | ab8e4c660099f2afe4202a0df82b918c98447a13 | [] | no_license | lubensh/rep | e211c13a8ae9ac558776fed457c6b6f5c8013afe | 84309c739fa24f15b3ae55720bb39e17cba24453 | refs/heads/master | 2020-04-07T05:10:50.250659 | 2018-11-21T13:00:57 | 2018-11-21T13:00:57 | 158,085,662 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 335 | py | from urllib.request import urlopen
url = "http://www.baidu.com/"
#发送请求
response = urlopen(url)
#读取内容
info = response.read()
#打印内容
print(info.decode())
#状态码
# code = response.getcode()
# print(code)
# #真实url
# urls = response.geturl()
# print(urls)
# #响应头
# info = response.info()
# print(info) | [
"13521811669@163.com"
] | 13521811669@163.com |
e7f079a24da8675233f657282768daa52802bbed | e919655b0bf47085ea70cbe0826c870e782a630c | /docs/doxygen/doxyxml/doxyindex.py | 2de577e1ad81ee97444528467336b84df05430a7 | [] | no_license | zhoushiqi88/phylayer | 7dc5ea21d2658b494e07847259d8db8b490dd011 | 95ba2b59eb65f8873eca36ae6912a0cc6bb35d88 | refs/heads/master | 2020-11-26T21:42:05.114099 | 2019-12-27T07:56:02 | 2019-12-27T07:56:02 | 229,210,308 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,898 | py | #
# Copyright 2010 Free Software Foundation, Inc.
#
# This file was generated by gr_modtool, a tool from the GNU Radio framework
# This file is a part of gr-phylayer
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
"""
Classes providing more user-friendly interfaces to the doxygen xml
docs than the generated classes provide.
"""
import os
from generated import index
from base import Base
from text import description
class DoxyIndex(Base):
"""
Parses a doxygen xml directory.
"""
__module__ = "gnuradio.utils.doxyxml"
def _parse(self):
if self._parsed:
return
super(DoxyIndex, self)._parse()
self._root = index.parse(os.path.join(self._xml_path, 'index.xml'))
for mem in self._root.compound:
converted = self.convert_mem(mem)
# For files and namespaces we want the contents to be
# accessible directly from the parent rather than having
# to go through the file object.
if self.get_cls(mem) == DoxyFile:
if mem.name.endswith('.h'):
self._members += converted.members()
self._members.append(converted)
elif self.get_cls(mem) == DoxyNamespace:
self._members += converted.members()
self._members.append(converted)
else:
self._members.append(converted)
def generate_swig_doc_i(self):
"""
%feature("docstring") gr_make_align_on_samplenumbers_ss::align_state "
Wraps the C++: gr_align_on_samplenumbers_ss::align_state";
"""
pass
class DoxyCompMem(Base):
kind = None
def __init__(self, *args, **kwargs):
super(DoxyCompMem, self).__init__(*args, **kwargs)
@classmethod
def can_parse(cls, obj):
return obj.kind == cls.kind
def set_descriptions(self, parse_data):
bd = description(getattr(parse_data, 'briefdescription', None))
dd = description(getattr(parse_data, 'detaileddescription', None))
self._data['brief_description'] = bd
self._data['detailed_description'] = dd
def set_parameters(self, data):
vs = [ddc.value for ddc in data.detaileddescription.content_]
pls = []
for v in vs:
if hasattr(v, 'parameterlist'):
pls += v.parameterlist
pis = []
for pl in pls:
pis += pl.parameteritem
dpis = []
for pi in pis:
dpi = DoxyParameterItem(pi)
dpi._parse()
dpis.append(dpi)
self._data['params'] = dpis
class DoxyCompound(DoxyCompMem):
pass
class DoxyMember(DoxyCompMem):
pass
class DoxyFunction(DoxyMember):
__module__ = "gnuradio.utils.doxyxml"
kind = 'function'
def _parse(self):
if self._parsed:
return
super(DoxyFunction, self)._parse()
self.set_descriptions(self._parse_data)
self.set_parameters(self._parse_data)
if not self._data['params']:
# If the params weren't set by a comment then just grab the names.
self._data['params'] = []
prms = self._parse_data.param
for prm in prms:
self._data['params'].append(DoxyParam(prm))
brief_description = property(lambda self: self.data()['brief_description'])
detailed_description = property(lambda self: self.data()['detailed_description'])
params = property(lambda self: self.data()['params'])
Base.mem_classes.append(DoxyFunction)
class DoxyParam(DoxyMember):
__module__ = "gnuradio.utils.doxyxml"
def _parse(self):
if self._parsed:
return
super(DoxyParam, self)._parse()
self.set_descriptions(self._parse_data)
self._data['declname'] = self._parse_data.declname
@property
def description(self):
descriptions = []
if self.brief_description:
descriptions.append(self.brief_description)
if self.detailed_description:
descriptions.append(self.detailed_description)
return '\n\n'.join(descriptions)
brief_description = property(lambda self: self.data()['brief_description'])
detailed_description = property(lambda self: self.data()['detailed_description'])
name = property(lambda self: self.data()['declname'])
class DoxyParameterItem(DoxyMember):
"""A different representation of a parameter in Doxygen."""
def _parse(self):
if self._parsed:
return
super(DoxyParameterItem, self)._parse()
names = []
for nl in self._parse_data.parameternamelist:
for pn in nl.parametername:
names.append(description(pn))
# Just take first name
self._data['name'] = names[0]
# Get description
pd = description(self._parse_data.get_parameterdescription())
self._data['description'] = pd
description = property(lambda self: self.data()['description'])
name = property(lambda self: self.data()['name'])
class DoxyClass(DoxyCompound):
__module__ = "gnuradio.utils.doxyxml"
kind = 'class'
def _parse(self):
if self._parsed:
return
super(DoxyClass, self)._parse()
self.retrieve_data()
if self._error:
return
self.set_descriptions(self._retrieved_data.compounddef)
self.set_parameters(self._retrieved_data.compounddef)
# Sectiondef.kind tells about whether private or public.
# We just ignore this for now.
self.process_memberdefs()
brief_description = property(lambda self: self.data()['brief_description'])
detailed_description = property(lambda self: self.data()['detailed_description'])
params = property(lambda self: self.data()['params'])
Base.mem_classes.append(DoxyClass)
class DoxyFile(DoxyCompound):
__module__ = "gnuradio.utils.doxyxml"
kind = 'file'
def _parse(self):
if self._parsed:
return
super(DoxyFile, self)._parse()
self.retrieve_data()
self.set_descriptions(self._retrieved_data.compounddef)
if self._error:
return
self.process_memberdefs()
brief_description = property(lambda self: self.data()['brief_description'])
detailed_description = property(lambda self: self.data()['detailed_description'])
Base.mem_classes.append(DoxyFile)
class DoxyNamespace(DoxyCompound):
__module__ = "gnuradio.utils.doxyxml"
kind = 'namespace'
def _parse(self):
if self._parsed:
return
super(DoxyNamespace, self)._parse()
self.retrieve_data()
self.set_descriptions(self._retrieved_data.compounddef)
if self._error:
return
self.process_memberdefs()
Base.mem_classes.append(DoxyNamespace)
class DoxyGroup(DoxyCompound):
__module__ = "gnuradio.utils.doxyxml"
kind = 'group'
def _parse(self):
if self._parsed:
return
super(DoxyGroup, self)._parse()
self.retrieve_data()
if self._error:
return
cdef = self._retrieved_data.compounddef
self._data['title'] = description(cdef.title)
# Process inner groups
grps = cdef.innergroup
for grp in grps:
converted = DoxyGroup.from_refid(grp.refid, top=self.top)
self._members.append(converted)
# Process inner classes
klasses = cdef.innerclass
for kls in klasses:
converted = DoxyClass.from_refid(kls.refid, top=self.top)
self._members.append(converted)
# Process normal members
self.process_memberdefs()
title = property(lambda self: self.data()['title'])
Base.mem_classes.append(DoxyGroup)
class DoxyFriend(DoxyMember):
__module__ = "gnuradio.utils.doxyxml"
kind = 'friend'
Base.mem_classes.append(DoxyFriend)
class DoxyOther(Base):
__module__ = "gnuradio.utils.doxyxml"
kinds = set(['variable', 'struct', 'union', 'define', 'typedef', 'enum',
'dir', 'page', 'signal', 'slot', 'property'])
@classmethod
def can_parse(cls, obj):
return obj.kind in cls.kinds
Base.mem_classes.append(DoxyOther)
| [
"zhoushiqi_uestc@163.com"
] | zhoushiqi_uestc@163.com |
746189a6a31b7800c0ae04e188a38f2aef70a6b3 | 3783123732851d3306265fd2299474372dccc8a1 | /main_folder/models/sample/sample.py | 8b1f17c721c9abff0a285020250fa869130d841d | [] | no_license | davevs/pyTM---addendum | 94308b01e51da95e28cd394d8eece2c6abe7af12 | f86a3c2881da328a55f0748a1dbc54485e6c545b | refs/heads/master | 2020-05-04T17:05:15.580547 | 2019-04-03T13:50:25 | 2019-04-03T13:50:25 | 179,297,853 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,433 | py | # !/usr/bin/env python3
from pytm.pytm import TM, Server, Datastore, Dataflow, Boundary, Actor, Lambda
tm = TM("my test tm")
tm.description = "another test tm"
User_Web = Boundary("User/Web")
Web_DB = Boundary("Web/DB")
user = Actor("User")
user.inBoundary = User_Web
web = Server("Web Server")
web.OS = "CloudOS"
web.isHardened = True
db = Datastore("SQL Database (*)")
db.OS = "CentOS"
db.isHardened = False
db.inBoundary = Web_DB
db.isSql = True
db.inScope = False
web.
my_lambda = Lambda("cleanDBevery6hours")
my_lambda.hasAccessControl = True
my_lambda.inBoundary = Web_DB
my_lambda_to_db = Dataflow(my_lambda, db, "(λ)Periodically cleans DB")
my_lambda_to_db.protocol = "SQL"
my_lambda_to_db.dstPort = 3306
user_to_web = Dataflow(user, web, "User enters comments (*)")
user_to_web.protocol = "HTTP"
user_to_web.dstPort = 80
user_to_web.data = 'Comments in HTML or Markdown'
user_to_web.order = 1
web_to_user = Dataflow(web, user, "Comments saved (*)")
web_to_user.protocol = "HTTP"
web_to_user.data = 'Ack of saving or error message, in JSON'
web_to_user.order = 2
web_to_db = Dataflow(web, db, "Insert query with comments")
web_to_db.protocol = "MySQL"
web_to_db.dstPort = 3306
web_to_db.data = 'MySQL insert statement, all literals'
web_to_db.order = 3
db_to_web = Dataflow(db, web, "Comments contents")
db_to_web.protocol = "MySQL"
db_to_web.data = 'Results of insert op'
db_to_web.order = 4
tm.process() | [
"dvanstein@xebia.com"
] | dvanstein@xebia.com |
f80eb72afaa6b99fa30979777eea4dcb73b0b439 | fb2c19e1677c18e74898ac69aa05e66688723b70 | /DenseFisher/models/densegan_complete.py | 450ed6e4ecc0956a4fe6f28a7e3fb21214dbebbd | [] | no_license | Columbia-Creative-Machines-Lab/Dense-Generators | 337abe164e90649b1462eab90e1e7a1b63835eb4 | 3cc3b8b6501b03a63e52cfdd7098a721f0f6e4a9 | refs/heads/master | 2021-09-01T05:42:30.300764 | 2017-12-25T05:53:12 | 2017-12-25T05:53:12 | 103,167,449 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,106 | py | import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
import torchvision.datasets as dset
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
import torchvision.models as models
import sys
import math
class GenBottleneck(nn.Module):
def __init__(self, nChannels, growthRate):
super(GenBottleneck, self).__init__()
interChannels = 4*growthRate
self.bn1 = nn.BatchNorm2d(nChannels)
self.conv1 = nn.ConvTranspose2d(nChannels, interChannels, kernel_size=1,
bias=False)
self.bn2 = nn.BatchNorm2d(interChannels)
self.conv2 = nn.ConvTranspose2d(interChannels, growthRate, kernel_size=3,
padding=1, bias=False)
def forward(self, x):
#print("dense internal shape 1 " + str(x.size()))
out = self.conv1(F.relu(self.bn1(x)))
#print("dense internal shape 2 " + str(out.size()))
out = self.conv2(F.relu(self.bn2(out)))
#print("dense internal shape 3 " + str(out.size()))
out = torch.cat((x, out), 1)
#print("dense internal shape 4 " + str(out.size()))
return out
class GenTransition(nn.Module):
def __init__(self, nChannels, nOutChannels):
super(GenTransition, self).__init__()
self.bn1 = nn.BatchNorm2d(nChannels)
self.conv1 = nn.ConvTranspose2d(nChannels, nOutChannels, kernel_size=1,
bias=False)
self.up1 = nn.Upsample(nOutChannels, scale_factor=2, mode='nearest')
def forward(self, x):
#print("transition internal shape 1" + str(x.size()))
out = self.conv1(F.relu(self.bn1(x)))
#print("transition internal shape 2" + str(out.size()))
#out = F.avg_pool2d(out, 2)
out = self.up1(out)
#print("transition internal shape 3" + str(out.size()))
return out
class GenDenseNet(nn.Module):
def __init__(self, growthRate, depth, increase, nz, bottleneck=1, verbose=1):
super(GenDenseNet, self).__init__()
self.verbose = verbose
self.conv1 = nn.ConvTranspose2d(nz, growthRate*7 , kernel_size=3, padding=1,
bias=False)
#self.bn_1 = nn.BatchNorm2d(growthRate*7)
nDenseBlocks = (depth-4) // 3
if bottleneck:
nDenseBlocks //= 2
nChannels = growthRate*7
self.dense1 = self._make_dense( nChannels, growthRate, nDenseBlocks, bottleneck)
nChannels += nDenseBlocks*growthRate
nOutChannels = nChannels-(growthRate*2)
self.trans1 = GenTransition(nChannels, nOutChannels)
nChannels = nOutChannels
self.dense2 = self._make_dense(nChannels, growthRate, nDenseBlocks, bottleneck)
nChannels += nDenseBlocks*growthRate
nOutChannels = nChannels-(growthRate*2)
self.trans2 = GenTransition( nChannels, nOutChannels)
nChannels = nOutChannels
self.dense3 = self._make_dense(nChannels, growthRate, nDenseBlocks, bottleneck)
nChannels += nDenseBlocks*growthRate
nOutChannels = nChannels-(growthRate*2)
self.trans3 = GenTransition( nChannels, nOutChannels)
nChannels = nOutChannels
self.dense4 = self._make_dense(nChannels, growthRate, nDenseBlocks, bottleneck)
nChannels += nDenseBlocks*growthRate
nOutChannels = nChannels-(growthRate*2)
self.trans4 = GenTransition( nChannels, nOutChannels)
nChannels = nOutChannels
self.dense5 = self._make_dense(nChannels, growthRate, nDenseBlocks, bottleneck)
nChannels += nDenseBlocks*growthRate
nOutChannels = nChannels-(growthRate*2)
self.trans5 = GenTransition( nChannels, nOutChannels)
self.conv_f = nn.ConvTranspose2d(nOutChannels, 3, kernel_size=3, padding=1,
bias=False)
#self.bn_f = nn.BatchNorm2d(nOutChannels)
#self.bn1 = nn.BatchNorm2d(nChannels)
self.ufinal = nn.Tanh()
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def _make_dense(self, nChannels, growthRate, nDenseBlocks, bottleneck=1):
layers = []
for i in range(int(nDenseBlocks)):
if bottleneck:
layers.append(GenBottleneck(nChannels, growthRate))
nChannels += growthRate
return nn.Sequential(*layers)
def forward(self, x):
if self.verbose:
print("######################G#####################")
print("Input shape " + str(x.size()))
out = self.conv1(x)
if self.verbose:
print("conv1 shape " + str(out.size()))
#out = F.relu(self.bn_1(out))
out = self.trans1(self.dense1(out))
if self.verbose:
print("dense + trans 1 finished shape " + str(out.size()))
out = self.trans2(self.dense2(out))
if self.verbose:
print("dense + trans 2 finished shape " + str(out.size()))
out = self.trans3(self.dense3(out))
if self.verbose:
print("dense + trans 3 finished shape " + str(out.size()))
out = self.trans4(self.dense4(out))
if self.verbose:
print("dense + trans 4 finished shape " + str(out.size()))
out = self.trans5(self.dense5(out))
if self.verbose:
print("dense + trans 5 finished shape " + str(out.size()))
out = self.conv_f(out)
#out = self.bn_f(out)
out = self.ufinal(out)
#out = torch.squeeze(F.avg_pool2d(F.relu(self.bn1(out)), 8))
#out = F.log_softmax(self.fc(out))
if self.verbose:
print("######################G#####################")
return out
class DisBottleneck(nn.Module):
def __init__(self, nChannels, growthRate):
super(DisBottleneck, self).__init__()
interChannels = 4*growthRate
self.bn1 = nn.BatchNorm2d(nChannels)
self.conv1 = nn.Conv2d(nChannels, interChannels, kernel_size=1,
bias=False)
self.bn2 = nn.BatchNorm2d(interChannels)
self.conv2 = nn.Conv2d(interChannels, growthRate, kernel_size=3,
padding=1, bias=False)
def forward(self, x):
#print("dense internal shape 1 " + str(x.size()))
out = self.conv1(F.relu(self.bn1(x)))
#print("dense internal shape 2 " + str(out.size()))
out = self.conv2(F.relu(self.bn2(out)))
#print("dense internal shape 3 " + str(out.size()))
out = torch.cat((x, out), 1)
#print("dense internal shape 4 " + str(out.size()))
return out
class DisTransition(nn.Module):
def __init__(self, nChannels, nOutChannels):
super(DisTransition, self).__init__()
self.bn1 = nn.BatchNorm2d(nChannels)
self.conv1 = nn.Conv2d(nChannels, nOutChannels, kernel_size=1,
bias=False)
def forward(self, x):
#print("transition internal shape 1" + str(x.size()))
out = self.conv1(F.relu(self.bn1(x)))
#print("transition internal shape 2" + str(out.size()))
out = F.avg_pool2d(out, 2)
#print("transition internal shape 3" + str(out.size()))
return out
#net = densenet.DenseNet(growthRate=12, depth=100, reduction=0.5,
# bottleneck=True, nClasses=10)
class DisDenseNet(nn.Module):
def __init__(self, growthRate, depth, reduction, verbose=1, bottleneck=1):
super(DisDenseNet, self).__init__()
self.verbose=verbose
nDenseBlocks = (depth-4) // 3
if bottleneck:
nDenseBlocks //= 2
nChannels = 2*growthRate
self.conv1 = nn.Conv2d(3, nChannels, kernel_size=3, padding=1,
bias=False)
self.dense1 = self._make_dense(nChannels, growthRate, nDenseBlocks, bottleneck)
nChannels += nDenseBlocks*growthRate
nOutChannels = int(math.floor(nChannels*reduction))
self.trans1 = DisTransition(nChannels, nOutChannels)
nChannels = nOutChannels
self.dense2 = self._make_dense(nChannels, growthRate, nDenseBlocks, bottleneck)
nChannels += nDenseBlocks*growthRate
nOutChannels = int(math.floor(nChannels*reduction))
self.trans2 = DisTransition(nChannels, nOutChannels)
nChannels = nOutChannels
self.dense3 = self._make_dense(nChannels, growthRate, nDenseBlocks, bottleneck)
nChannels += nDenseBlocks*growthRate
nOutChannels = int(math.floor(nChannels*reduction))
self.trans3 = DisTransition(nChannels, nOutChannels)
nChannels = nOutChannels
self.dense4 = self._make_dense(nChannels, growthRate, nDenseBlocks, bottleneck)
nChannels += nDenseBlocks*growthRate
nOutChannels = int(math.floor(nChannels*reduction))
self.trans4 = DisTransition(nChannels, nOutChannels)
nChannels = nOutChannels
self.dense5 = self._make_dense(nChannels, growthRate, nDenseBlocks, bottleneck)
nChannels += nDenseBlocks*growthRate
nOutChannels = int(math.floor(nChannels*reduction))
self.trans5 = DisTransition(nChannels, nOutChannels)
self.bnf = nn.BatchNorm2d(nOutChannels)
self.convf = nn.Conv2d(nOutChannels, 1, kernel_size=3, padding=1,
bias=False)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def _make_dense(self, nChannels, growthRate, nDenseBlocks, bottleneck=1):
layers = []
for i in range(int(nDenseBlocks)):
if bottleneck:
layers.append(DisBottleneck(nChannels, growthRate))
nChannels += growthRate
return nn.Sequential(*layers)
def forward(self, x):
if self.verbose:
print("######################D#####################")
print("Input shape " + str(x.size()))
out = F.relu(self.conv1(x))
if self.verbose:
print("con1 shape " + str(out.size()))
out = self.trans1(self.dense1(out))
if self.verbose:
print("dense + trans 1 finished shape " + str(out.size()))
out = self.trans2(self.dense2(out))
if self.verbose:
print("dense + trans 2 finished shape " + str(out.size()))
out = self.trans3(self.dense3(out))
if self.verbose:
print("dense + trans 3 finished shape " + str(out.size()))
out = self.trans4(self.dense4(out))
if self.verbose:
print("dense + trans 4 finished shape " + str(out.size()))
out = self.trans5(self.dense5(out))
if self.verbose:
print("dense + trans 5 finished shape " + str(out.size()))
out = self.convf(F.relu(self.bnf(out)))
#out = self.bnf(out)
if self.verbose:
print("Final shape " + str(out.size()))
##print("dense f finished shape " + str(out.size()))
##out = torch.squeeze(F.avg_pool2d(F.relu(self.bn1(out)), 8))
##out = F.log_softmax(self.fc(out))
if self.verbose:
print("######################D#####################")
return out.view(-1)
#return out | [
"noreply@github.com"
] | Columbia-Creative-Machines-Lab.noreply@github.com |
ad76218731310f5bab8bac3e309be9460465a613 | 1d480ec0807eee561405a94b0a77ffc11974728d | /app/services/decorator.py | 4bfe42513c0a673029675d4d00c33f6c0d8ed491 | [] | no_license | dot190997/youtube_video_fetcher | 40a04266533e966801d18719cdc9f6c41dc0a5f6 | 27e5aae7b1280ffc5932468f0e4c4e83c89c6b9e | refs/heads/main | 2023-05-15T00:16:19.821285 | 2021-05-29T17:14:15 | 2021-05-29T17:14:15 | 371,778,295 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,673 | py | import functools
import inspect
import logging
import time
log = logging.getLogger(__name__)
def func_time(func):
"""Print the runtime of the decorated function"""
@functools.wraps(func)
def wrapper_timer(*args, **kwargs):
func_args = inspect.signature(func).bind(*args, **kwargs).arguments
func_args_str = ", ".join("{} = {!r}".format(*item) for item in func_args.items())
start_time = time.perf_counter()
value = func(*args, **kwargs)
end_time = time.perf_counter()
run_time = end_time - start_time
log.warning(f"Finished {func.__name__!r} in {run_time:.4f} secs")
print(f"Finished {func.__name__!r} in {run_time:.4f} secs")
return value
return wrapper_timer
def retry(exception_to_check, tries=3, delay=4, backoff=2, logger=None, fallback_func=None):
"""Retry calling the decorated function using an exponential backoff.
http://www.saltycrane.com/blog/2009/11/trying-out-retry-decorator-python/
:param exception_to_check: the exception to check. may be a tuple of
exceptions to check
:type exception_to_check: Exception or tuple
:param tries: number of times to try (not retry) before giving up
:type tries: int
:param delay: initial delay between retries in seconds
:type delay: int
:param backoff: backoff multiplier e.g. value of 2 will double the delay
each retry
:type backoff: int
:param logger: logger to use. If None, print
:type logger: logging.Logger instance
:param fallback_func: function to refresh state of caller function in case of exception
:type fallback_func: Python function
"""
def deco_retry(func):
@functools.wraps(func)
def f_retry(*args, **kwargs):
mtries, mdelay = tries, delay
while mtries > 1:
try:
return func(*args, **kwargs)
except exception_to_check as e:
msg = "%s, Retrying in %d seconds..." % (str(e), mdelay)
if logger:
logger.warning(msg)
else:
print(msg)
if fallback_func is not None:
fallback_func()
time.sleep(mdelay)
mtries -= 1
mdelay *= backoff
return func(*args, **kwargs)
return f_retry # true decorator
return deco_retry
def withlock(func):
import threading
lock = threading.Lock()
@functools.wraps(func)
def wrapper(*a, **k):
with lock:
return func(*a, **k)
return wrapper
| [
"abhishek@nference.net"
] | abhishek@nference.net |
2cc201cf266ff314089da1d203b0b3d0cc31fdfd | 1b862f34c125ce200244dd79e4fda4b5b605ce2e | /.history/images_20210218000603.py | 0a459524a8dabc3b90e7a0501798a67b7e4b69cb | [] | no_license | edwino26/CoreImages | 26085a49cf1cb79442ae563a88354b2fdceace87 | 6bf6e68cac8ab36c87b1e6ea702bfe6882b0f40e | refs/heads/master | 2023-06-22T12:53:37.344895 | 2021-07-21T04:31:44 | 2021-07-21T04:31:44 | 309,553,247 | 0 | 4 | null | 2021-04-29T23:23:15 | 2020-11-03T02:45:07 | Lasso | UTF-8 | Python | false | false | 2,563 | py | import os
clear = lambda : os.system('cls')
#
# %%
import glob
import cv2
import os.path
import numpy as np
import matplotlib.pyplot as plt
# %%
cores_per_image = 6
uvFiles = glob.glob('./Photos/*.jpg')
print(uvFiles)
# Picture path
img = cv2.imread(uvFiles[0].replace('./Photos/',''))
print(img)
a = []
b = []
# %%
def oneventlbuttondown(event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDOWN:
xy = "%d,%d" % (x, y)
a.append(x)
b.append(y)
cv2.circle(img, (x, y), 10, (0, 0, 255), thickness=-1)
# cv2.putText(img, xy, (x, y), cv2.FONT_HERSHEY_PLAIN, 1.0, (0, 0, 0), thickness=1)
cv2.imshow("image", img)
core_length = 3
vc = []
do = int(uvFiles[0][2:6])
dn = int(uvFiles[0][7:11])
for i in range(cores_per_image):
if i == 0:
cv2.namedWindow("image", cv2.WINDOW_NORMAL)
# cv2.resizeWindow("output", 400, 300)
cv2.setMouseCallback("image", oneventlbuttondown)
cv2.imshow("image", img)
print(
'Click 1) left upper corner 2) right lower corner in leftmost core and 3) leftupper corner in second core')
cv2.waitKey(0)
y = b[0];
x = a[0];
dy = b[1] - b[0];
dx = a[1] - a[0]
gap = a[2] - a[1]
if i == 3:
midgap = gap * 4
else:
midgap = 0
if i > 0: x = x + (dx + gap) + midgap
crop_img = img[y:y + dy, x:x + dx]
if i == 0:
vc = crop_img
else:
vc = cv2.vconcat([vc, crop_img])
crop_name = str(int(uvFiles[0][2:6]) + (core_length * i)) + ".jpg"
path = os.path.join(os.path.relpath('Cropped', start=os.curdir), crop_name)
cv2.imwrite(path, crop_img)
concat_name = uvFiles[0][2:6] + "-" + uvFiles[0][7:11] + ".jpg"
path = os.path.join(os.path.relpath('Cropped', start=os.curdir), concat_name)
cv2.imwrite(path, vc)
p = vc.shape
vc_gray = cv2.cvtColor(vc, cv2.COLOR_BGR2GRAY)
print(vc.shape) # Dimensions of Image
print(vc_gray.shape) # It is already a numpy array
print(type(vc_gray))
# print(p[:10, :10, 1 ])
img_log = np.average(vc_gray[:, 80:120], axis=1)
depths = np.arange(do, dn, (dn - do) / len(img_log))
plt.figure()
# plt.subplot(1, 2, 1)
plt.subplot2grid((1, 10), (0, 0), colspan=3)
plt.plot(img_log, depths, 'green');
plt.axis([0, 120, do, dn]);
plt.gca().invert_yaxis();
plt.gca().invert_xaxis()
# plt.subplot(1, 2 ,2)
plt.subplot2grid((1, 10), (0, 3), colspan=7)
plt.imshow(vc_gray[:, 40:120], aspect='auto', origin='upper');
plt.colorbar()
p_50 = np.percentile(img_log, 50)
plt.show()
# %%
| [
"ortega.edwin.y@gmail.com"
] | ortega.edwin.y@gmail.com |
7aaf30d580238668767fc362313bb0f9006f72eb | a838d4bed14d5df5314000b41f8318c4ebe0974e | /sdk/azurestackhci/azure-mgmt-azurestackhci/azure/mgmt/azurestackhci/_configuration.py | b419fc134f8d50033f2cbf5f5c9a6e3c66cd4704 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | scbedd/azure-sdk-for-python | ee7cbd6a8725ddd4a6edfde5f40a2a589808daea | cc8bdfceb23e5ae9f78323edc2a4e66e348bb17a | refs/heads/master | 2023-09-01T08:38:56.188954 | 2021-06-17T22:52:28 | 2021-06-17T22:52:28 | 159,568,218 | 2 | 0 | MIT | 2019-08-11T21:16:01 | 2018-11-28T21:34:49 | Python | UTF-8 | Python | false | false | 3,264 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMHttpLoggingPolicy
from ._version import VERSION
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any
from azure.core.credentials import TokenCredential
class AzureStackHCIClientConfiguration(Configuration):
"""Configuration for AzureStackHCIClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.TokenCredential
:param subscription_id: The ID of the target subscription.
:type subscription_id: str
"""
def __init__(
self,
credential, # type: "TokenCredential"
subscription_id, # type: str
**kwargs # type: Any
):
# type: (...) -> None
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
super(AzureStackHCIClientConfiguration, self).__init__(**kwargs)
self.credential = credential
self.subscription_id = subscription_id
self.api_version = "2020-10-01"
self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default'])
kwargs.setdefault('sdk_moniker', 'mgmt-azurestackhci/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs # type: Any
):
# type: (...) -> None
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
if self.credential and not self.authentication_policy:
self.authentication_policy = policies.BearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs)
| [
"noreply@github.com"
] | scbedd.noreply@github.com |
55b225247ae7cc5e0604abd8fa51ce0e8b166907 | feb61d29901a5dfd9cf301097757b36ce4d727e9 | /mysite/plants/models.py | d4934b8350ac18c5bf258c14d70776cd3b3f9a3f | [] | no_license | wwq345556/spa | c52253188e0b420368bc804e173c8fdba0a95931 | 6b785abe0874ae25dee21351e849671f24bab93d | refs/heads/master | 2022-12-14T17:59:12.764977 | 2020-05-26T02:42:06 | 2020-05-26T02:42:06 | 248,178,626 | 1 | 0 | null | 2022-12-12T05:54:06 | 2020-03-18T08:40:10 | JavaScript | UTF-8 | Python | false | false | 874 | py | from django.db import models
import time
# Create your models here.
class PlantsInfo(models.Model):
plant_name = models.CharField(u'名称', max_length=256)
flora_id = models.IntegerField(u'种类ID', default=0)
introduction = models.TextField(u'简介', blank=True, null=True)
photo = models.ImageField(upload_to='img', null=True)
shooting_time = models.IntegerField(u'添加时间', default=int(time.time()), editable=False)
is_del = models.IntegerField(u'是否删除', default=0, editable=False)
create_time = models.IntegerField(u'添加时间', default=int(time.time()), editable=False)
update_time = models.IntegerField(u'修改时间', default=int(time.time()), editable=False)
delete_time = models.IntegerField(u'删除时间', default=0, editable=False)
class Meta:
db_table = 'plants'
plants = models.Manager() | [
"1175134755@qq.com"
] | 1175134755@qq.com |
f5c150f8f7a29c8e620c52c635db78aa0c5f0b1f | 6799458993640f8e9345b245af87f1ef8057149f | /Super.py | 1714991b098fcf758b3adedcce676d5b8e7339a5 | [] | no_license | bmkearne/DRoP | 0626a24524ea82952fceb9d762cdd9f775a2de78 | 303ecc56381951f838cca3105992cb7840f87c30 | refs/heads/master | 2021-01-01T19:19:38.869750 | 2014-09-18T09:55:10 | 2014-09-18T09:55:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,458 | py | #!/usr/bin/python
#*********************************************************************************
# Super.py
# Feb. 26, 2012
# Bradley Kearney
# Superimposes protein structures that have been preprocessed by DRoP.
#*********************************************************************************
import sys
import time
import webbrowser
import os
import urllib
import shutil
def run():
argv = sys.argv[1:]
try:
id=argv[0]
except:
return
root = 'Renumbered/'
if(root[-1:] != '/'):
root += '/'
url = '''REDACTED''''php?job=%d&status=300'%int(id)
print url
raw_return=urllib.urlopen(url).read()
pdb_filenames = []
filenames = os.listdir(os.getcwd())
for f in filenames:
if(f[-4:] == '.pdb'):
os.rename(f, f.replace(" ", "-"))
filenames = os.listdir(os.getcwd())
for f in filenames:
if(f[-4:] == '.pdb'):
pdb_filenames.append(f)
basis=pdb_filenames[1]
shutil.copyfile(basis,'../Final/'+basis)
if len(pdb_filenames)<2:
url=''''REDACTED''''.php?job=%d&staus=333'%int(id)
return
for f in pdb_filenames:
if (f!=basis):
os.system("python cealign.py %s %s"%(basis,f))
url = ''''REDACTED''''.php?job=%d&status=399'%int(id)
raw_return=urllib.urlopen(url).read()
os.chdir('../Final')
os.system("python DRoP.py "+id)
return
if(__name__ == '__main__'):
run()
| [
"bmkearne@gmail.com"
] | bmkearne@gmail.com |
e0a1f5a316d34042d099a185a757946ab5667625 | 3f99756553008745dcac63da942c8afe189a0bbb | /src/common/debug.py | 2c2a20b77c88f20d2287098df133eea0833a9ebc | [] | no_license | hekaistorm/DQN_wechat_jump_AI | b34e1d15486e4f6884221e68cb110f4b5f8fcaa6 | 2dc71f22e234bc17dd280d309103e84596754588 | refs/heads/master | 2020-09-06T09:05:59.478004 | 2018-02-04T05:28:06 | 2018-02-04T05:28:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,185 | py | # -*- coding: utf-8 -*-
"""
这儿是debug的代码,当DEBUG_SWITCH开关开启的时候,会将各种信息存在本地,方便检查故障
"""
import os
import sys
import shutil
from PIL import ImageDraw
# 用来保存每一次的图片
screenshot_backup_dir = '../data/backups/'
def make_debug_dir(screenshot_backup_dir):
"""
创建备份文件夹
"""
if not os.path.isdir(screenshot_backup_dir):
os.mkdir(screenshot_backup_dir)
def backup_screenshot(ts):
"""
为了方便失败的时候 debug
"""
make_debug_dir(screenshot_backup_dir)
shutil.copy('autojump.png', '{}{}.png'.format(screenshot_backup_dir, ts))
def save_debug_screenshot(ts, im, piece_x, piece_y, board_x, board_y):
"""
对 debug 图片加上详细的注释
"""
make_debug_dir(screenshot_backup_dir)
draw = ImageDraw.Draw(im)
draw.line((piece_x, piece_y) + (board_x, board_y), fill=2, width=3)
draw.line((piece_x, 0, piece_x, im.size[1]), fill=(255, 0, 0))
draw.line((0, piece_y, im.size[0], piece_y), fill=(255, 0, 0))
draw.line((board_x, 0, board_x, im.size[1]), fill=(0, 0, 255))
draw.line((0, board_y, im.size[0], board_y), fill=(0, 0, 255))
draw.ellipse((piece_x - 10, piece_y - 10, piece_x + 10, piece_y + 10), fill=(255, 0, 0))
draw.ellipse((board_x - 10, board_y - 10, board_x + 10, board_y + 10), fill=(0, 0, 255))
del draw
im.save('{}{}{}_d.png'.format(screenshot_backup_dir, ts, str(piece_x) + '_' + str(piece_y)))
def dump_device_info():
"""
显示设备信息
"""
size_str = os.popen('adb shell wm size').read()
device_str = os.popen('adb shell getprop ro.product.device').read()
phone_os_str = os.popen('adb shell getprop ro.build.version.release').read()
density_str = os.popen('adb shell wm density').read()
print("""**********
Screen: {size}
Density: {dpi}
Device: {device}
Phone OS: {phone_os}
Host OS: {host_os}
Python: {python}
**********""".format(
size=size_str.strip(),
dpi=density_str.strip(),
device=device_str.strip(),
phone_os=phone_os_str.strip(),
host_os=sys.platform,
python=sys.version
))
| [
"50673223@qq.com"
] | 50673223@qq.com |
ee4e5ba67072d17cb87c3d167e85dfec37495d32 | bac37a96ead59a3c4caaac63745d5748f5060195 | /第9章 异常/异常4.py | 7086b6c235bbdfbd54433b2b3796687261036263 | [] | no_license | pod1019/python_learning | 1e7d3a9c10fc8c1b4e8ff31554d495df518fb385 | a15213d33a253c3a77ab0d5de9a4f937c27693ca | refs/heads/master | 2020-09-14T11:11:53.100591 | 2020-04-11T04:00:27 | 2020-04-11T04:00:27 | 223,112,718 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 500 | py | # 多个except结构
try:
a = input("请输入被除数:")
b = input("请输入除数:")
c = float(a)/float(b)
print("两数相除的结果是:",c)
except ZeroDivisionError:
print("异常:除数不能为0")
except TypeError:
print("异常:除数和被除数都应该为数值类型")
except NameError:
print("异常:变量不存在")
except BaseException as e:
print(e)
print(type(e))
finally: # 无论如果,此语句必然执行
print("kkkkkkkkk") | [
"pod1019@163.com"
] | pod1019@163.com |
bbaf94fa20ef4ef4ced2a6bb7d0acbf80bb074dc | 42d5c2735887f05434c7eb9be25e476dc1f7f53c | /board.py | 2f4a4f5e2f900cf8b266cb6709c989aa81ccec3d | [] | no_license | JosephReps/Python3Chess | e28b31a5d05f28c4e7000a30fff0016127275d44 | a7dd5f2257974cb04e2c4a6c6cd519dd1915f754 | refs/heads/master | 2020-06-26T10:49:32.656518 | 2019-08-13T09:14:40 | 2019-08-13T09:14:40 | 199,611,968 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,147 | py | import pygame
class Tile(object):
'''
Tile object which makes up the board.
'''
def __init__(self, tile_x, tile_y, tile_size, tile_colour,
tile_number, occupant=None):
'''
Parameters:
tile_x <int>: X-position of tile.
tile_y <int>: Y-position of tile.
tile_size <int>: Size of tile.
tile_colour <tuple><int>: RGB.
tile_number <Piece object>: Piece which is occupying tile.
'''
self.tile_x = tile_x
self.tile_y = tile_y
self.tile_number = tile_number
self.tile_size = tile_size
self.tile_colour = tile_colour
self.occupant = occupant
def draw_tile(self, screen):
'''
Creates tile and draws to screen.
Args:
screen <Pygame object>: Pygame screen object set in Main.
tile_object <Pygame rectangle>: Pygame rectangle object.
Returns:
Draws a pygame rectangle on screen when called.
Sets self.tile_object to the pygame rectangle object.
'''
self.tile_object = pygame.draw.rect(screen, self.tile_colour,
[self.tile_x,
self.tile_y,
self.tile_size,
self.tile_size])
def draw_board(screen):
"""
Creates/draws board.
"""
tile_number = 1
tiles = ['']
tile_size = 60 # Height and width of checkerboard squares.
for i in range(8): # Note that i ranges from 0 through 7, inclusive.
for y in range(8): # So does j.
if (i + y) % 2 == 0: # The top left square is white.
tile_colour = (255,255,255)
else:
tile_colour = (40,40,40)
tile_x = tile_size*y
tile_y = tile_size*i
tiles.append(Tile(tile_x,tile_y,tile_size,tile_colour,tile_number))
tile_number += 1
for i in tiles[1:]:
i.draw_tile(screen)
return tiles
| [
"josephreps@users.noreply.github.com"
] | josephreps@users.noreply.github.com |
8336f14eb60ba8b70f687a50cfcfb4356b0cb70a | 9360aeefb3605a3fe0c5e512e52ec3bc0942903f | /app.py | 7527e6647e891d5f6706d20edee3162f0ce7496d | [
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] | permissive | eliaswalyba/facebound | 1ff7dc32cc4bf50d14f2e6434af2adfb14300245 | 92500e61b1bc50702ea339563ee8b38b55a31169 | refs/heads/master | 2022-07-01T17:42:02.360416 | 2020-05-08T15:23:03 | 2020-05-08T15:23:03 | 262,851,606 | 0 | 0 | MIT | 2020-05-10T18:37:03 | 2020-05-10T18:37:02 | null | UTF-8 | Python | false | false | 3,484 | py |
import cv2, os
import numpy as np
import streamlit as st
import matplotlib.pyplot as plt
from PIL import Image, ImageEnhance
@st.cache
def load_image(img):
im = Image.open(img)
return im
FACE_CASCADE_PATH = '/algos/haarcascade_frontalface_default.xml'
face_cascade = cv2.CascadeClassifier(FACE_CASCADE_PATH )
# eye_cascade = cv2.CascadeClassifier('algos/haarcascade_eye.xml')
# smile_cascade = cv2.CascadeClassifier('algos/haarcascade_smile.xml')
def detect_faces(uploaded_image):
new_img = np.array(uploaded_image.convert('RGB'))
temp_img = cv2.cvtColor(new_img, 1)
gray = cv2.cvtColor(temp_img, cv2.COLOR_BGR2GRAY)
# Detect Face
faces = face_cascade.detectMultiScale(gray, 1.1, 4)
# Draw Rectangle
for (x,y,w,h) in faces:
cv2.rectangle(temp_img, (x,y), (x+w, y+h), (255,0,0), 2)
return temp_img, faces
def main():
'''
Face Detection App
'''
st.title('Facebound')
st.text('by Fodé Diop')
options = ['Detection', 'About']
choice = st.sidebar.selectbox('Select Option', options)
if choice == 'Detection':
st.subheader('Face Detection')
image_file = st.file_uploader('Upload Image', type=['jpg', 'png', 'jpeg'])
if image_file is not None:
uploaded = Image.open(image_file)
# st.write(type(uploaded))
st.text('Original Image')
st.image(uploaded)
enhance_type = st.sidebar.radio('Enhance Type', ['Original', 'Grayscale', 'Contrast', 'Brightness', 'Blur'])
if enhance_type == 'Grayscale':
new_img = np.array(uploaded.convert('RGB'))
temp_img = cv2.cvtColor(new_img, 1)
gray = cv2.cvtColor(temp_img, cv2.COLOR_BGR2GRAY)
st.image(gray)
# Print on screen
st.write(gray)
st.write(new_img)
if enhance_type == 'Contrast':
contrast_rate = st.sidebar.slider('Contrtast', 0.5, 3.5)
enhancer = ImageEnhance.Contrast(uploaded)
img_output = enhancer.enhance(contrast_rate)
st.image(img_output)
if enhance_type == 'Brightness':
contrast_rate = st.sidebar.slider('Brigthness', 0.5, 3.5)
enhancer = ImageEnhance.Brightness(uploaded)
img_output = enhancer.enhance(contrast_rate)
st.image(img_output)
if enhance_type == 'Blur':
blur_rate = st.sidebar.slider('Blur', 0.5, 3.5)
new_img = np.array(uploaded.convert('RGB'))
temp_img = cv2.cvtColor(new_img, 1)
blurred = cv2.GaussianBlur(temp_img, (11,11), blur_rate)
st.image(blurred)
# else:
# st.image(uploaded)
# Face Detection
target = ['Face', 'Smiles', 'Eyes']
feature_choice = st.sidebar.selectbox('Find Features', target)
if st.button('Detect Faces'):
if feature_choice == 'Faces':
st.write('Print something goda damn it!!!!')
result_img, result_faces = detect_faces(uploaded)
st.image(result_img)
st.success(f'Found {len(result_faces)} faces.')
elif choice == 'About':
st.subheader('About Facebound')
st.markdown("Built with Streamlit and OpenCV by [Fodé Diop](https://www.github.com/diop)")
st.text("© Copyright 2020 Fodé Diop - MIT")
st.success("Dakar Institute of Technology")
if __name__ == '__main__':
main() | [
"fodesdiop@gmail.com"
] | fodesdiop@gmail.com |
92abd0930e4243edf86846e7a20659a052c6f246 | 8dab94de494d1c9e3b3dd2c00b7afaa8fdbeefe2 | /python_bootcamp/Lambdas/filter.py | 9183dec07e9a9929333ce2ae49376023d2be152e | [] | no_license | lis5662/Python | 58cf5d05834930eece413b92c7562a273b4d8e56 | ff208b8ca3d3ec12b9ef8326cef3f21cc5cc00e8 | refs/heads/master | 2020-06-14T02:26:24.553686 | 2019-08-05T12:00:44 | 2019-08-05T12:00:44 | 194,855,640 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 888 | py | users = [
{"username": "samuel", "tweets": ["I love cake", "I love pie", "hello world!"]},
{"username": "katie", "tweets": ["I love my cat"]},
{"username": "jeff", "tweets": []},
{"username": "bob123", "tweets": []},
{"username": "doggo_luvr", "tweets": ["dogs are the best", "I'm hungry"]},
{"username": "guitar_gal", "tweets": []}
]
#extract inactive users using filter:
inactive_users = list(filter(lambda u: not u['tweets'], users))
#extract inactive users using list comprehension:
inactive_users2= [user for user in users if not user["tweets"]]
# extract usernames of inactive users w/ map and filter:
usernames = list(map(lambda user: user["username"].upper(),
filter(lambda u: not u['tweets'], users)))
# extract usernames of inactive users w/ list comprehension
usernames2 = [user["username"].upper() for user in users if not user["tweets"]]
| [
"lis5662@gmail.com"
] | lis5662@gmail.com |
cc9ec9446a5d1a83664b1a695845250e6048e2a9 | a8aa060d2369133b0677c640de3d8da179678316 | /9.Numbers.py | 5b469db52006d4cf132fc0a5c38b7db04f6acc37 | [] | no_license | YashikaNavin/BasicsOfPython | 5061df7f3d6962e45e84b421d1dc1efb0532b35c | 9db6d134302df76087ba703acde0408ee0b80e23 | refs/heads/master | 2020-06-19T02:16:22.071856 | 2019-08-10T17:43:58 | 2019-08-10T17:43:58 | 196,529,245 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 787 | py | # Numbers
a=56
b=32562709545676454456373412084736272839
c=75.9
d=complex(3,7) # complex() is a function in python to convert real number into complex number
e=9-6j
print("a=", a, "of type", type(a))
print("b=", b, "of type", type(b))
print("c=", c, "of type", type(c))
print("d=", d, "of type", type(d))
print("e=", e, "of type", type(e))
# Type Conversion
x=12324
y=6389.837409
z=57+49j # Complex number can't be changed into integer or float
print("x=", x, "of type", type(x))
print("y=", y, "of type", type(y))
print("z=", z, "of type", type(z))
m=int(y)
n=float(x)
o=complex(y)
print("m=", m, "of type", type(m))
print("n=", n, "of type", type(n))
print("o=", o, "of type", type(o))
# Random Number
import random
print(random.randrange(1,10))
| [
"noreply@github.com"
] | YashikaNavin.noreply@github.com |
ec33133b88d9860ea6fbe67ba5f1f112d915e2bd | 72816e34e5c3cf0b8ab781529465a4f3a526d4d4 | /leetcode/maximum_69_number/main.py | ed627ad6430f8c724ae14e2def9679ad2cc58bc4 | [] | no_license | ervitis/challenges | db2ce944f3d7c3fc38271d8194bc3938bb42f3a2 | a3fcdb0d5c373c4704387acf73b8a5120e344783 | refs/heads/master | 2021-10-26T18:38:07.583788 | 2021-10-15T07:52:36 | 2021-10-15T07:52:36 | 205,415,272 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 579 | py | """
Given a positive integer num consisting only of digits 6 and 9.
Return the maximum number you can get by changing at most one digit (6 becomes 9, and 9 becomes 6).
"""
def maximum_69_number(num: int) -> int:
ns = list(str(num))
if '6' not in ns:
return num
for i in range(len(ns)):
if ns[i] == '6':
ns[i] = '9'
break
return int(''.join(ns))
if __name__ == '__main__':
print(maximum_69_number(9969))
print(maximum_69_number(9996))
print(maximum_69_number(9999))
print(maximum_69_number(99969969))
| [
"vitomarti@gmail.com"
] | vitomarti@gmail.com |
078ae056149920ea0a5f14a81bc7ea95b97d1715 | b2c13561bd33bd5b924f99409e3871e9bf4af267 | /python/ray/tune/tests/test_tuner.py | f3973bee175d0b87d6d61451a9951cc435d73ce9 | [
"MIT",
"BSD-3-Clause",
"Apache-2.0"
] | permissive | shiyi23/ray | 9218370fe804f7d6cc3d1e51673bae654aa840d5 | c847433c1ecd429833f552dc968a3fa0ef6b732f | refs/heads/master | 2022-11-14T19:19:43.172264 | 2022-11-04T23:34:16 | 2022-11-04T23:34:16 | 256,953,524 | 0 | 0 | null | 2020-04-19T08:47:36 | 2020-04-19T08:47:36 | null | UTF-8 | Python | false | false | 14,790 | py | import os
from pathlib import Path
from unittest.mock import patch
import pytest
import shutil
import unittest
from typing import Optional
import ray.air
from sklearn.datasets import load_breast_cancer
from sklearn.utils import shuffle
from ray import tune
from ray.air import session
from ray.air.config import RunConfig, ScalingConfig
from ray.train.examples.pytorch.torch_linear_example import (
train_func as linear_train_func,
)
from ray.data import Dataset, Datasource, ReadTask, from_pandas, read_datasource
from ray.data.block import BlockMetadata
from ray.train.torch import TorchTrainer
from ray.train.trainer import BaseTrainer
from ray.train.xgboost import XGBoostTrainer
from ray.tune import Callback, TuneError, CLIReporter
from ray.tune.result import DEFAULT_RESULTS_DIR
from ray.tune.tune_config import TuneConfig
from ray.tune.tuner import Tuner
class DummyTrainer(BaseTrainer):
_scaling_config_allowed_keys = BaseTrainer._scaling_config_allowed_keys + [
"num_workers",
"use_gpu",
"resources_per_worker",
"placement_strategy",
]
def training_loop(self) -> None:
for i in range(5):
with tune.checkpoint_dir(step=i) as checkpoint_dir:
path = os.path.join(checkpoint_dir, "checkpoint")
with open(path, "w") as f:
f.write(str(i))
tune.report(step=i)
class FailingTrainer(DummyTrainer):
def training_loop(self) -> None:
raise RuntimeError("There is an error in trainer!")
class TestDatasource(Datasource):
def __init__(self, do_shuffle: bool):
self._shuffle = do_shuffle
def prepare_read(self, parallelism: int, **read_args):
import pyarrow as pa
def load_data():
data_raw = load_breast_cancer(as_frame=True)
dataset_df = data_raw["data"]
dataset_df["target"] = data_raw["target"]
if self._shuffle:
dataset_df = shuffle(dataset_df)
return [pa.Table.from_pandas(dataset_df)]
meta = BlockMetadata(
num_rows=None,
size_bytes=None,
schema=None,
input_files=None,
exec_stats=None,
)
return [ReadTask(load_data, meta)]
def gen_dataset_func(do_shuffle: Optional[bool] = False) -> Dataset:
test_datasource = TestDatasource(do_shuffle)
return read_datasource(test_datasource)
def gen_dataset_func_eager():
data_raw = load_breast_cancer(as_frame=True)
dataset_df = data_raw["data"]
dataset_df["target"] = data_raw["target"]
dataset = from_pandas(dataset_df)
return dataset
class TunerTest(unittest.TestCase):
"""The e2e test for hparam tuning using Tuner API."""
def test_tuner_with_xgboost_trainer(self):
"""Test a successful run."""
shutil.rmtree(
os.path.join(DEFAULT_RESULTS_DIR, "test_tuner"), ignore_errors=True
)
trainer = XGBoostTrainer(
label_column="target",
params={},
datasets={"train": gen_dataset_func_eager()},
)
# prep_v1 = StandardScaler(["worst radius", "worst area"])
# prep_v2 = StandardScaler(["worst concavity", "worst smoothness"])
param_space = {
"scaling_config": ScalingConfig(num_workers=tune.grid_search([1, 2])),
# "preprocessor": tune.grid_search([prep_v1, prep_v2]),
"datasets": {
"train": tune.grid_search(
[gen_dataset_func(), gen_dataset_func(do_shuffle=True)]
),
},
"params": {
"objective": "binary:logistic",
"tree_method": "approx",
"eval_metric": ["logloss", "error"],
"eta": tune.loguniform(1e-4, 1e-1),
"subsample": tune.uniform(0.5, 1.0),
"max_depth": tune.randint(1, 9),
},
}
tuner = Tuner(
trainable=trainer,
run_config=RunConfig(name="test_tuner"),
param_space=param_space,
tune_config=TuneConfig(mode="min", metric="train-error"),
# limiting the number of trials running at one time.
# As the unit test only has access to 4 CPUs on Buildkite.
_tuner_kwargs={"max_concurrent_trials": 1},
)
results = tuner.fit()
assert len(results) == 4
def test_tuner_with_xgboost_trainer_driver_fail_and_resume(self):
# So that we have some global checkpointing happening.
os.environ["TUNE_GLOBAL_CHECKPOINT_S"] = "1"
shutil.rmtree(
os.path.join(DEFAULT_RESULTS_DIR, "test_tuner_driver_fail"),
ignore_errors=True,
)
trainer = XGBoostTrainer(
label_column="target",
params={},
datasets={"train": gen_dataset_func_eager()},
)
# prep_v1 = StandardScaler(["worst radius", "worst area"])
# prep_v2 = StandardScaler(["worst concavity", "worst smoothness"])
param_space = {
"scaling_config": ScalingConfig(num_workers=tune.grid_search([1, 2])),
# "preprocessor": tune.grid_search([prep_v1, prep_v2]),
"datasets": {
"train": tune.grid_search(
[gen_dataset_func(), gen_dataset_func(do_shuffle=True)]
),
},
"params": {
"objective": "binary:logistic",
"tree_method": "approx",
"eval_metric": ["logloss", "error"],
"eta": tune.loguniform(1e-4, 1e-1),
"subsample": tune.uniform(0.5, 1.0),
"max_depth": tune.randint(1, 9),
},
}
class FailureInjectionCallback(Callback):
"""Inject failure at the configured iteration number."""
def __init__(self, num_iters=10):
self.num_iters = num_iters
def on_step_end(self, iteration, trials, **kwargs):
if iteration == self.num_iters:
print(f"Failing after {self.num_iters} iters.")
raise RuntimeError
tuner = Tuner(
trainable=trainer,
run_config=RunConfig(
name="test_tuner_driver_fail", callbacks=[FailureInjectionCallback()]
),
param_space=param_space,
tune_config=TuneConfig(mode="min", metric="train-error"),
# limiting the number of trials running at one time.
# As the unit test only has access to 4 CPUs on Buildkite.
_tuner_kwargs={"max_concurrent_trials": 1},
)
with self.assertRaises(TuneError):
tuner.fit()
# Test resume
restore_path = os.path.join(DEFAULT_RESULTS_DIR, "test_tuner_driver_fail")
tuner = Tuner.restore(restore_path)
# A hack before we figure out RunConfig semantics across resumes.
tuner._local_tuner._run_config.callbacks = None
results = tuner.fit()
assert len(results) == 4
def test_tuner_trainer_fail(self):
trainer = FailingTrainer()
param_space = {
"scaling_config": ScalingConfig(num_workers=tune.grid_search([1, 2]))
}
tuner = Tuner(
trainable=trainer,
run_config=RunConfig(name="test_tuner_trainer_fail"),
param_space=param_space,
tune_config=TuneConfig(mode="max", metric="iteration"),
)
results = tuner.fit()
assert len(results) == 2
for i in range(2):
assert results[i].error
def test_tuner_with_torch_trainer(self):
"""Test a successful run using torch trainer."""
shutil.rmtree(
os.path.join(DEFAULT_RESULTS_DIR, "test_tuner_torch"), ignore_errors=True
)
# The following two should be tunable.
config = {"lr": 1e-2, "hidden_size": 1, "batch_size": 4, "epochs": 10}
scaling_config = ScalingConfig(num_workers=1, use_gpu=False)
trainer = TorchTrainer(
train_loop_per_worker=linear_train_func,
train_loop_config=config,
scaling_config=scaling_config,
)
param_space = {
"scaling_config": ScalingConfig(num_workers=tune.grid_search([1, 2])),
"train_loop_config": {
"batch_size": tune.grid_search([4, 8]),
"epochs": tune.grid_search([5, 10]),
},
}
tuner = Tuner(
trainable=trainer,
run_config=RunConfig(name="test_tuner"),
param_space=param_space,
tune_config=TuneConfig(mode="min", metric="loss"),
)
results = tuner.fit()
assert len(results) == 8
def test_tuner_run_config_override(self):
trainer = DummyTrainer(run_config=RunConfig(stop={"metric": 4}))
tuner = Tuner(trainer)
assert tuner._local_tuner._run_config.stop == {"metric": 4}
@pytest.mark.parametrize(
"params_expected",
[
(
{"run_config": RunConfig(progress_reporter=CLIReporter())},
lambda kw: isinstance(kw["progress_reporter"], CLIReporter),
),
(
{"tune_config": TuneConfig(reuse_actors=True)},
lambda kw: kw["reuse_actors"] is True,
),
(
{"run_config": RunConfig(log_to_file="some_file")},
lambda kw: kw["log_to_file"] == "some_file",
),
(
{"tune_config": TuneConfig(max_concurrent_trials=3)},
lambda kw: kw["max_concurrent_trials"] == 3,
),
(
{"tune_config": TuneConfig(time_budget_s=60)},
lambda kw: kw["time_budget_s"] == 60,
),
],
)
def test_tuner_api_kwargs(params_expected):
tuner_params, assertion = params_expected
tuner = Tuner(lambda config: 1, **tuner_params)
caught_kwargs = {}
def catch_kwargs(**kwargs):
caught_kwargs.update(kwargs)
with patch("ray.tune.impl.tuner_internal.run", catch_kwargs):
tuner.fit()
assert assertion(caught_kwargs)
def test_tuner_fn_trainable_checkpoint_at_end_true():
tuner = Tuner(
lambda config, checkpoint_dir: 1,
run_config=ray.air.RunConfig(
checkpoint_config=ray.air.CheckpointConfig(checkpoint_at_end=True)
),
)
with pytest.raises(TuneError):
tuner.fit()
def test_tuner_fn_trainable_checkpoint_at_end_false():
tuner = Tuner(
lambda config, checkpoint_dir: 1,
run_config=ray.air.RunConfig(
checkpoint_config=ray.air.CheckpointConfig(checkpoint_at_end=False)
),
)
tuner.fit()
def test_tuner_fn_trainable_checkpoint_at_end_none():
tuner = Tuner(
lambda config, checkpoint_dir: 1,
run_config=ray.air.RunConfig(
checkpoint_config=ray.air.CheckpointConfig(checkpoint_at_end=None)
),
)
tuner.fit()
@pytest.mark.parametrize("runtime_env", [{}, {"working_dir": "."}])
def test_tuner_no_chdir_to_trial_dir(runtime_env):
"""Tests that setting `chdir_to_trial_dir=False` in `TuneConfig` allows for
reading relatives paths to the original working directory.
Also tests that `session.get_trial_dir()` env variable can be used as the directory
to write data to within the Trainable.
"""
if ray.is_initialized():
ray.shutdown()
ray.init(num_cpus=1, runtime_env=runtime_env)
# Write a data file that we want to read in our training loop
with open("./read.txt", "w") as f:
f.write("data")
def train_func(config):
orig_working_dir = Path(os.environ["TUNE_ORIG_WORKING_DIR"])
assert orig_working_dir == os.getcwd(), (
"Working directory should not have changed from "
f"{orig_working_dir} to {os.getcwd()}"
)
# Make sure we can access the data from the original working dir
assert os.path.exists("./read.txt") and open("./read.txt", "r").read() == "data"
# Write operations should happen in each trial's independent logdir to
# prevent write conflicts
trial_dir = Path(session.get_trial_dir())
with open(trial_dir / "write.txt", "w") as f:
f.write(f"{config['id']}")
# Make sure we didn't write to the working dir
assert not os.path.exists(orig_working_dir / "write.txt")
# Make sure that the file we wrote to isn't overwritten
assert open(trial_dir / "write.txt", "r").read() == f"{config['id']}"
tuner = Tuner(
train_func,
tune_config=TuneConfig(
chdir_to_trial_dir=False,
),
param_space={"id": tune.grid_search(list(range(4)))},
)
tuner.fit()
ray.shutdown()
@pytest.mark.parametrize("runtime_env", [{}, {"working_dir": "."}])
def test_tuner_relative_pathing_with_env_vars(runtime_env):
"""Tests that `TUNE_ORIG_WORKING_DIR` environment variable can be used to access
relative paths to the original working directory.
"""
# Even if we set our runtime_env `{"working_dir": "."}` to the current directory,
# Tune should still chdir to the trial directory, since we didn't disable the
# `chdir_to_trial_dir` flag.
if ray.is_initialized():
ray.shutdown()
ray.init(num_cpus=1, runtime_env=runtime_env)
# Write a data file that we want to read in our training loop
with open("./read.txt", "w") as f:
f.write("data")
def train_func(config):
orig_working_dir = Path(os.environ["TUNE_ORIG_WORKING_DIR"])
assert (
str(orig_working_dir) != os.getcwd()
), f"Working directory should have changed from {orig_working_dir}"
# Make sure we can access the data from the original working dir
# Different from above: create an absolute path using the env variable
data_path = orig_working_dir / "read.txt"
assert os.path.exists(data_path) and open(data_path, "r").read() == "data"
trial_dir = Path(session.get_trial_dir())
# Tune should have changed the working directory to the trial directory
assert str(trial_dir) == os.getcwd()
with open(trial_dir / "write.txt", "w") as f:
f.write(f"{config['id']}")
assert not os.path.exists(orig_working_dir / "write.txt")
assert open(trial_dir / "write.txt", "r").read() == f"{config['id']}"
tuner = Tuner(
train_func,
tune_config=TuneConfig(
chdir_to_trial_dir=True,
),
param_space={"id": tune.grid_search(list(range(4)))},
)
tuner.fit()
ray.shutdown()
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__] + sys.argv[1:]))
| [
"noreply@github.com"
] | shiyi23.noreply@github.com |
fd07de3d5d3a2288f381e55246f4331593b092d8 | f4b8c90c1349c8740c1805f7b6b0e15eb5db7f41 | /test/test_room_space_inventory_status_item.py | 832bf42dbe4715ab3e1fb79003804c3340495d2d | [] | no_license | CalPolyResDev/StarRezAPI | 012fb8351159f96a81352d6c7bfa36cd2d7df13c | b184e1863c37ff4fcf7a05509ad8ea8ba825b367 | refs/heads/master | 2021-01-25T10:29:37.966602 | 2018-03-15T01:01:35 | 2018-03-15T01:01:35 | 123,355,501 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,152 | py | # coding: utf-8
"""
StarRez API
This is a way to connect with the StarRez API. We are not the developers of the StarRez API, we are just an organization that uses it and wanted a better way to connect to it. # noqa: E501
OpenAPI spec version: 1.0.0
Contact: resdev@calpoly.edu
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import starrez_client
from starrez_client.models.room_space_inventory_status_item import RoomSpaceInventoryStatusItem # noqa: E501
from starrez_client.rest import ApiException
class TestRoomSpaceInventoryStatusItem(unittest.TestCase):
"""RoomSpaceInventoryStatusItem unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testRoomSpaceInventoryStatusItem(self):
"""Test RoomSpaceInventoryStatusItem"""
# FIXME: construct object with mandatory attributes with example values
# model = starrez_client.models.room_space_inventory_status_item.RoomSpaceInventoryStatusItem() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"fedorareis@gmail.com"
] | fedorareis@gmail.com |
12bc7081611f0cf4e76ac1ca7877c8802cf8993e | 61673ab9a42f7151de7337608c442fa6247f13bb | /pyqtgraph/TextItem/main.py | cee6f112509d051cd992e54fb2b7de2352ab1089 | [
"MIT"
] | permissive | furas/python-examples | 22d101670ecd667a29376d7c7d7d86f8ec71f6cf | 95cb53b664f312e0830f010c0c96be94d4a4db90 | refs/heads/master | 2022-08-23T23:55:08.313936 | 2022-08-01T14:48:33 | 2022-08-01T14:48:33 | 45,575,296 | 176 | 91 | MIT | 2021-02-17T23:33:37 | 2015-11-04T23:54:32 | Python | UTF-8 | Python | false | false | 268 | py | from PyQt5 import QtGui
import pyqtgraph as pg
app = QtGui.QApplication([])
x = [1,2,3,4,5]
y = [0,3,1,2,0]
plotWidget = pg.plot()
plotWidget.plot(x, y)
text = pg.TextItem("Hello World", color='f00')
plotWidget.addItem(text)
text.setPos(3, 2)
app.exec_()
| [
"furas@tlen.pl"
] | furas@tlen.pl |
564dcce09583b1d203fcc397678e7a5c480c8f17 | dc1a0ebe87b13e79fc0bb47c53ae61822e1184ca | /app/core/management/commands/wait_for_db.py | e8b210a911a83d70efffe949e2c3976ae2f56ce1 | [
"MIT"
] | permissive | wewe15/recipe-app-api | 4b5a7dc4705aada28cdb62aa09d1a77b5e299f0e | 0b491d4e33a5113703b2856c8e5845e3ec621636 | refs/heads/main | 2023-08-29T09:03:39.505835 | 2021-10-13T10:28:14 | 2021-10-13T10:28:14 | 409,936,889 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 667 | py | import time
from django.db import connections
from django.db.utils import OperationalError
from django.core.management.base import BaseCommand
class Command(BaseCommand):
"""Django command to pause execution until database is available"""
def handle(self, *args, **options):
self.stdout.write('waiting for database ....')
db_conn = None
while not db_conn:
try:
db_conn = connections['default']
except OperationalError:
self.stdout.write('database unavailable wait a sec ..')
time.sleep(1)
self.stdout.write(self.style.SUCCESS('Database available!'))
| [
"ahm.sami.271@gmail.com"
] | ahm.sami.271@gmail.com |
86c01d24c8a65bb62895d8f7fd5b4121e6227f36 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_032/ch15_2020_03_09_16_43_39_394880.py | d97f1bf48a978c5d3d9d2f3b83f3fff684be1147 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 132 | py | NOME=input('Digite o seu nome:')
if NOME == 'Chris':
print('Todo mundo odeia o Chris')
else:
print('Olá, {0}'.format(NOME)) | [
"you@example.com"
] | you@example.com |
4c72dff9feb8f888dc02a1444ab2161d5d33c6d4 | e00d41c9f4045b6c6f36c0494f92cad2bec771e2 | /multimedia/graphics/geh/actions.py | d0075a3227e29e19e28050c575c1f7c6ff2bd63b | [] | no_license | pisilinux/main | c40093a5ec9275c771eb5fb47a323e308440efef | bfe45a2e84ea43608e77fb9ffad1bf9850048f02 | refs/heads/master | 2023-08-19T00:17:14.685830 | 2023-08-18T20:06:02 | 2023-08-18T20:06:02 | 37,426,721 | 94 | 295 | null | 2023-09-14T08:22:22 | 2015-06-14T19:38:36 | Python | UTF-8 | Python | false | false | 484 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU General Public License, version 3.
# See the file https://www.gnu.org/licenses/gpl-3.0.txt
from pisi.actionsapi import autotools
from pisi.actionsapi import pisitools
from pisi.actionsapi import get
def setup():
autotools.configure("--disable-gtk2")
def build():
autotools.make()
def install():
autotools.rawInstall("DESTDIR=%s" % get.installDIR())
pisitools.dodoc("AUTHORS", "LICENSE", "NEWS", "README")
| [
"icu7c1@gmail.com"
] | icu7c1@gmail.com |
a96abf1bde0e271c2e15dd0cad6e0e9dd88e12a0 | 7ccbf3eb810bdbba4ffb65c20e7c0eaac6ddb66e | /rtt_analyzer.py | 943ec3b4e735273702442c0cc947b4252fba7ab1 | [] | no_license | ntc-netmesh/netmesh-rfc6349-server | 5264de18d02f9a244080ae0ead26e8e9c578903a | dbd6e2006cd8dcbe18767bd52f3f9d82dc94c119 | refs/heads/master | 2021-09-26T19:32:10.719831 | 2020-08-28T06:24:30 | 2020-08-28T06:24:30 | 231,306,093 | 0 | 4 | null | 2021-09-18T04:26:36 | 2020-01-02T04:13:30 | Python | UTF-8 | Python | false | false | 3,523 | py | from scapy.all import *
import traceback
import sys
import time
'''
Gets the average rtt of a network trace file
provided with the client and server IPv4 add.
The average RTT is computed from the perspective
of the client host.
@PARAMS:
filename : filename of the pcap/pcapng file
client_ip : IPv4 address of the client host
server_ip : IPv4 address of the server host
service_port : port number of the local connection
@RETURNS:
min_rtt : minimum RTT value measured
max_rtt : maximum RTT value measured
average_rtt : average round trip time of packets matched
on a SEQ-ACK number condition
'''
def get_average_rtt(filename, client_ip, server_ip, dataoffset, service_port):
packets = rdpcap(filename)
packettotal = len(packets)
rtt_min = 100000
rtt_max = 0
ACK = 0x10
SYN = 0x02
client_packets = {}
server_packets = {}
rtt = []
counter = 0
divider_flag = False
divider = 1.0
for packet in packets:
try:
if IP in packet:
if TCP in packet:
if ( service_port == str(packet[TCP].dport) ) or ( service_port == str(packet[TCP].sport) ):
if not divider_flag:
# means packet.time is in ms
if packet.time - time.time() > 10000:
divider = 1000.0
divider_flag = True
if (not packet[TCP].flags & SYN):
if (packet[IP].src == client_ip) and (not len(packet[TCP].payload) < int(dataoffset) - 300):
print(len(packet[TCP].payload))
expected_seqnum = packet[TCP].seq + int(dataoffset)
client_packets[expected_seqnum] = packet.time
expected_seqnum = packet[TCP].seq + int(len(packet[TCP].payload))
client_packets[expected_seqnum] = packet.time
if (packet[IP].src in server_ip) and (packet[TCP].flags & ACK):
if (packet[TCP].ack in client_packets):
rtt_lol = (packet.time - client_packets[packet[TCP].ack])/divider
if rtt_lol < rtt_min:
rtt_min = rtt_lol
if rtt_lol > rtt_max:
rtt_max = rtt_lol
rtt.append(rtt_lol)
del client_packets[packet[TCP].ack]
counter += 1
#if (packet[TCP].ack not in server_packets):
# server_packets[packet[TCP].ack] = packet.time
except:
traceback.print_exc()
pass
try:
average_rtt = sum(rtt)/len(rtt)#*1.0
return rtt_min*divider, rtt_max*divider, round(average_rtt*1000,5)
except:
raise
return
if __name__ == "__main__":
mini, maxi, avg = get_average_rtt(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[6])
mini = "min : "+str(mini)
maxi = "max : "+str(maxi)
avg = "avg : "+str(avg)
open(sys.argv[5],"w+").write(mini+"\n"+maxi+"\n"+avg)
| [
"jcvaldez1@up.edu.ph"
] | jcvaldez1@up.edu.ph |
a0ed7cbed9ef438cd2a8f009ffb45f28b0f19996 | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/synthetic/sieve-big-1515.py | fd70e3a1a249fdf5f31490f6386c64d3a7cd4f0b | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31,755 | py | # A resizable list of integers
class Vector(object):
items: [int] = None
size: int = 0
def __init__(self:"Vector"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector", idx: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector") -> int:
return self.size
# A resizable list of integers
class Vector2(object):
items: [int] = None
items2: [int] = None
size: int = 0
size2: int = 0
def __init__(self:"Vector2"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector2") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector2") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector2") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector2") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector2", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector2", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector2", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector2", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector2", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector2", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector2", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector2", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector2") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector2") -> int:
return self.size
# A resizable list of integers
class Vector3(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
def __init__(self:"Vector3"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector3") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector3") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector3") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:$IDSTRING) -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector3", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector3", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector3", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector3", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector3", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector3", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector3", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector3", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector3", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector3", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector3", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector3", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector3") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector3") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector3") -> int:
return self.size
# A resizable list of integers
class Vector4(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
items4: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
size4: int = 0
def __init__(self:"Vector4"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity4(self:"Vector4") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity4(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector4", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector4", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector4", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append4(self:"Vector4", item: int, item2: int, item3: int, item4: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector4", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector4", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector4", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all4(self:"Vector4", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector4", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector4", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector4", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at4(self:"Vector4", idx: int, idx2: int, idx3: int, idx4: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector4", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector4", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector4", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get4(self:"Vector4", idx: int, idx2: int, idx3: int, idx4: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length4(self:"Vector4") -> int:
return self.size
# A resizable list of integers
class Vector5(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
items4: [int] = None
items5: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
size4: int = 0
size5: int = 0
def __init__(self:"Vector5"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity4(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity5(self:"Vector5") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity4(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity5(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector5", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector5", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector5", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append4(self:"Vector5", item: int, item2: int, item3: int, item4: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append5(self:"Vector5", item: int, item2: int, item3: int, item4: int, item5: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector5", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector5", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all4(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all5(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int], new_items5: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
item5:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector5", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector5", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector5", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at4(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at5(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int, idx5: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector5", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector5", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector5", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get4(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get5(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int, idx5: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length4(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length5(self:"Vector5") -> int:
return self.size
# A faster (but more memory-consuming) implementation of vector
class DoublingVector(Vector):
doubling_limit:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector2(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector2") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector2") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector3(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector4(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
doubling_limit4:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity4(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector5(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
doubling_limit4:int = 1000
doubling_limit5:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity4(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity5(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Makes a vector in the range [i, j)
def vrange(i:int, j:int) -> Vector:
v:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange2(i:int, j:int, i2:int, j2:int) -> Vector:
v:Vector = None
v2:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange3(i:int, j:int, i2:int, j2:int, i3:int, j3:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange4(i:int, j:int, i2:int, j2:int, i3:int, j3:int, i4:int, j4:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange5(i:int, j:int, i2:int, j2:int, i3:int, j3:int, i4:int, j4:int, i5:int, j5:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v5:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
# Sieve of Eratosthenes (not really)
def sieve(v:Vector) -> object:
i:int = 0
j:int = 0
k:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve2(v:Vector, v2:Vector) -> object:
i:int = 0
i2:int = 0
j:int = 0
j2:int = 0
k:int = 0
k2:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve3(v:Vector, v2:Vector, v3:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
j:int = 0
j2:int = 0
j3:int = 0
k:int = 0
k2:int = 0
k3:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve4(v:Vector, v2:Vector, v3:Vector, v4:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
j:int = 0
j2:int = 0
j3:int = 0
j4:int = 0
k:int = 0
k2:int = 0
k3:int = 0
k4:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve5(v:Vector, v2:Vector, v3:Vector, v4:Vector, v5:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
j:int = 0
j2:int = 0
j3:int = 0
j4:int = 0
j5:int = 0
k:int = 0
k2:int = 0
k3:int = 0
k4:int = 0
k5:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
# Input parameter
n:int = 50
n2:int = 50
n3:int = 50
n4:int = 50
n5:int = 50
# Data
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v5:Vector = None
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
# Crunch
v = vrange(2, n)
v2 = vrange(2, n)
v3 = vrange(2, n)
v4 = vrange(2, n)
v5 = vrange(2, n)
sieve(v)
# Print
while i < v.length():
print(v.get(i))
i = i + 1
| [
"647530+Virtlink@users.noreply.github.com"
] | 647530+Virtlink@users.noreply.github.com |
226292d87f9041ea7a86f8d84b6f3280dad920ad | 08c8d45937e3b9db5ebb714089ac688876c14c0d | /Day4/Day4.py | 86e70f332b155cbca055adf61e02ec3fd85ac5cc | [] | no_license | alexanderfast/AdventOfCode | 3745d03ea4b4f6970e098bcacf30fd2a7ff4c206 | 89e4934312348c694964ad870f5f4df0932c7f3d | refs/heads/master | 2021-05-30T04:28:53.332128 | 2015-12-21T19:20:55 | 2015-12-21T19:20:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 350 | py | import md5
numberOfZeros = 6
def test(s, i):
m = md5.md5()
m.update(s)
m.update(str(i))
d = m.hexdigest()
return d, len(d) - len(d.lstrip('0'))
#print test("abcdef", 609043)
#print test("pqrstuv", 1048970)
i = 0
while True:
d, c = test("iwrupvqb", i)
if c >= numberOfZeros:
print d, i
break
i += 1
| [
"mizipzor@gmail.com"
] | mizipzor@gmail.com |
c9d2286497bb35b93516dcc297c320d2f96cfc57 | 53885f6a7d91d725a902619355d31a717954a885 | /Paralelno programiranje/src/Connect4.py | c0a0db26ca8e674eec37c2c45b1b117b578c397e | [] | no_license | miabarzic/FER-labosi | 68c7e93972996d6e1bf2e6cf945cb7142ffc6939 | f3422f335c1fd704e6fca4438bddd9a5c73456ff | refs/heads/master | 2023-03-03T21:33:20.115307 | 2021-02-19T14:22:21 | 2021-02-19T14:22:21 | 340,382,418 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,394 | py | # ako nije postavljena zastavica NASTAVI_IGRU, program izračunava samo prvi potez računala i prekida s radom
# mjerenja su provedena za dubinu 7
import sys
from statistics import mean
from mpi4py import MPI
from Board import *
import time
from queue import Queue
DUBINA = 6
NASTAVI_IGRU = True
comm = MPI.COMM_WORLD
size = comm.Get_size()
rank = comm.Get_rank()
def posalji_kraj():
for i in range(1, size):
comm.send(["end"], dest=i)
def posalji_novi_potez():
for i in range(1, size):
comm.send(['novi_potez'], dest=i)
def posalji_racunaj():
for i in range(1, size):
comm.send(["racunaj"], dest=i)
def stvori_zadatke(p):
cvorovi_vrijednosti = {}
red_poteza = Queue()
for i in range(p.broj_stupaca):
cvorovi_vrijednosti[i] = []
for i in range(p.broj_stupaca):
if p.legalan_potez(i):
p.napravi_potez(i, 1)
if p.provjeri_pobjedu(i):
cvorovi_vrijednosti[i].append(1)
else:
for j in range(p.broj_stupaca):
if p.legalan_potez(j):
red_poteza.put((i, j))
p.ponisti_potez(i)
return cvorovi_vrijednosti, red_poteza
def evaluate(p, zadnji_igrac, zadnji_potez, dubina):
alllose = True
allwin = True
ploca = p.kopiraj_plocu()
if ploca.provjeri_pobjedu(zadnji_potez):
if zadnji_igrac == 1:
return 1
else:
return -1
if dubina == 0:
return 0
dubina -= 1
novi_igrac = 1 if zadnji_igrac == 2 else 2
ukupno = 0
broj_poteza = 0
for i in range(ploca.broj_stupaca):
if ploca.legalan_potez(i):
broj_poteza += 1
ploca.napravi_potez(i, novi_igrac)
rezultat = evaluate(ploca, novi_igrac, i, dubina)
ploca.ponisti_potez(i)
if rezultat > -1:
alllose = False
if rezultat != 1:
allwin = False
if rezultat == 1 and novi_igrac == 1:
return 1
if rezultat == -1 and novi_igrac == 2:
return -1
ukupno += rezultat
if allwin:
return 1
if alllose:
return -1
ukupno /= broj_poteza
return ukupno
if rank == 0:
ploca = Board()
status = MPI.Status()
stanje = 0
potezi = Queue()
vrijednosti_cvorova = {}
broj_poslanih_poruka_kraj = 0
while True:
if stanje == 0:
broj_poslanih_poruka_kraj = 0
legalan = False
while not legalan:
igracev_potez = int(input())
if ploca.legalan_potez(igracev_potez):
ploca.napravi_potez(igracev_potez, 2)
legalan = True
"""else:
print("Vas potez nije dopusten, odaberite drugi stupac")
sys.stdout.flush()"""
start_time = time.time()
if ploca.provjeri_pobjedu(igracev_potez):
#print("Igrac je pobijedio")
posalji_kraj()
break
vrijednosti_cvorova, potezi = stvori_zadatke(ploca)
if ploca.ploca_puna():
#print("Ploca popunjena, igra zavrsava nerijeseno")
posalji_kraj()
break
else:
posalji_racunaj()
stanje = 1
elif stanje == 1:
if potezi.qsize() == 0:
stanje = 2
else:
poruka = comm.recv(source=MPI.ANY_SOURCE, status=status)
if poruka[0] == 'z':
broj_procesa = status.Get_source()
p = potezi.get()
zadatak = ['o', ploca.kopiraj_plocu(), p]
comm.send(zadatak, dest=broj_procesa)
elif poruka[0] == 'o':
indeks_zadatka = poruka[1]
vrijednost = poruka[2]
vrijednosti_cvorova[indeks_zadatka[0]].append(vrijednost)
# u ovom stanju jos primam odgovore i zahtjeve za zadatke, ali saljem obavijest da nema zadataka
elif stanje == 2:
poruka = comm.recv(source=MPI.ANY_SOURCE, status=status)
if poruka[0] == 'z':
broj_procesa = status.Get_source()
comm.send(['end'], dest=broj_procesa)
broj_poslanih_poruka_kraj += 1
if broj_poslanih_poruka_kraj == size - 1:
stanje = 3
elif poruka[0] == 'o':
indeks_zadatka = poruka[1]
vrijednost = poruka[2]
vrijednosti_cvorova[indeks_zadatka[0]].append(vrijednost)
# u ovom stanju gleda najbolji potez i radi ga
elif stanje == 3:
lista_vrijednosti = []
for i in range(ploca.broj_stupaca):
if not vrijednosti_cvorova[i]:
if ploca.legalan_potez(i):
lista_vrijednosti.append(-2)
else:
lista_vrijednosti.append(-1000)
else:
if -1 in vrijednosti_cvorova[i]:
lista_vrijednosti.append(-1)
else:
lista_vrijednosti.append(mean(vrijednosti_cvorova[i]))
novi_potez = lista_vrijednosti.index(max(lista_vrijednosti))
ploca.napravi_potez(novi_potez, 1)
for v in lista_vrijednosti:
if v >= -1:
print("%.3f" % v, end=" ")
print("")
ploca.ispisi_plocu()
#print(" %s s" % (time.time() - start_time))
if ploca.ploca_puna():
#print("Ploca popunjena, igra zavrsava nerijeseno")
posalji_kraj()
break
if ploca.provjeri_pobjedu(novi_potez):
#print("Racunalo je pobijedilo")
posalji_kraj()
break
if NASTAVI_IGRU:
posalji_novi_potez()
stanje = 0
else:
break
else:
stanje = 0
poruka = []
# salje zahtjev, odgovor i prelazi u stanje 1 u kojem računa i vraća vrijednost
while True:
if stanje == 0:
poruka = comm.recv(source = 0)
if poruka[0] == 'end':
break
elif poruka[0] == 'racunaj':
stanje = 1
elif stanje == 1:
comm.send(['z'], dest=0)
poruka = comm.recv(source=0)
if poruka[0] == 'o':
stanje = 2
elif poruka[0] == 'end':
stanje = 3
# rjesava zadatak i vraca rezultat
elif stanje == 2:
ploca = poruka[1]
potezi = poruka[2]
ploca.napravi_potez(potezi[0], 1)
ploca.napravi_potez(potezi[1], 2)
vrijednost = evaluate(ploca, 2, potezi[1], DUBINA - 2)
comm.send(['o', potezi, vrijednost], dest=0)
stanje = 1
elif stanje == 3:
sys.stdout.flush()
if NASTAVI_IGRU:
poruka = comm.recv(source=0)
if poruka[0] == 'novi_potez':
stanje = 0
elif poruka[0] == 'end':
break
else:
break
| [
"mia.barzic10@gmail.com"
] | mia.barzic10@gmail.com |
95b99eeeb62fe5d5845a1d7211ce8f29cf1115e8 | 64d1211404c89da4e09d77d859f2cdf6609a057e | /models/official/nlp/bert/model_training_utils_test.py | 1940a764e46a11fba48b6faab7706797c15bace1 | [
"Apache-2.0"
] | permissive | Nerfertili/Deep_learning_learning_udemy | f375209e0675ab8f4da9551d8a5bdee4f2948ed8 | 0fe6c1f36019b29151acb17a1f248b34d6089aeb | refs/heads/master | 2023-02-17T10:10:52.536426 | 2021-01-19T02:48:23 | 2021-01-19T02:48:23 | 330,823,730 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 12,085 | py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for official.modeling.training.model_training_utils."""
import os
from absl import logging
from absl.testing import flagsaver
from absl.testing import parameterized
from absl.testing.absltest import mock
import numpy as np
import tensorflow as tf
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import strategy_combinations
from official.nlp.bert import common_flags
from official.nlp.bert import model_training_utils
common_flags.define_common_bert_flags()
def eager_strategy_combinations():
return combinations.combine(
distribution=[
strategy_combinations.default_strategy,
strategy_combinations.cloud_tpu_strategy,
strategy_combinations.one_device_strategy_gpu,
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.mirrored_strategy_with_two_gpus,
],)
def eager_gpu_strategy_combinations():
return combinations.combine(
distribution=[
strategy_combinations.default_strategy,
strategy_combinations.one_device_strategy_gpu,
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.mirrored_strategy_with_two_gpus,
],)
def create_fake_data_input_fn(batch_size, features_shape, num_classes):
"""Creates a dummy input function with the given feature and label shapes.
Args:
batch_size: integer.
features_shape: list[int]. Feature shape for an individual example.
num_classes: integer. Number of labels.
Returns:
An input function that is usable in the executor.
"""
def _dataset_fn(input_context=None):
"""An input function for generating fake data."""
local_batch_size = input_context.get_per_replica_batch_size(batch_size)
features = np.random.rand(64, *features_shape)
labels = np.random.randint(2, size=[64, num_classes])
# Convert the inputs to a Dataset.
dataset = tf.data.Dataset.from_tensor_slices((features, labels))
dataset = dataset.shard(input_context.num_input_pipelines,
input_context.input_pipeline_id)
def _assign_dtype(features, labels):
features = tf.cast(features, tf.float32)
labels = tf.cast(labels, tf.float32)
return features, labels
# Shuffle, repeat, and batch the examples.
dataset = dataset.map(_assign_dtype)
dataset = dataset.shuffle(64).repeat()
dataset = dataset.batch(local_batch_size, drop_remainder=True)
dataset = dataset.prefetch(buffer_size=64)
return dataset
return _dataset_fn
def create_model_fn(input_shape, num_classes, use_float16=False):
def _model_fn():
"""A one-layer softmax model suitable for testing."""
input_layer = tf.keras.layers.Input(shape=input_shape)
x = tf.keras.layers.Dense(num_classes, activation='relu')(input_layer)
output_layer = tf.keras.layers.Dense(num_classes, activation='softmax')(x)
sub_model = tf.keras.models.Model(input_layer, x, name='sub_model')
model = tf.keras.models.Model(input_layer, output_layer, name='model')
model.add_metric(
tf.reduce_mean(input_layer), name='mean_input', aggregation='mean')
model.optimizer = tf.keras.optimizers.SGD(learning_rate=0.1, momentum=0.9)
if use_float16:
model.optimizer = tf.keras.mixed_precision.LossScaleOptimizer(
model.optimizer)
return model, sub_model
return _model_fn
def metric_fn():
"""Gets a tf.keras metric object."""
return tf.keras.metrics.CategoricalAccuracy(name='accuracy', dtype=tf.float32)
def summaries_with_matching_keyword(keyword, summary_dir):
"""Yields summary protos matching given keyword from event file."""
event_paths = tf.io.gfile.glob(os.path.join(summary_dir, 'events*'))
for event in tf.compat.v1.train.summary_iterator(event_paths[-1]):
if event.summary is not None:
for value in event.summary.value:
if keyword in value.tag:
logging.error(event)
yield event.summary
def check_eventfile_for_keyword(keyword, summary_dir):
"""Checks event files for the keyword."""
return any(summaries_with_matching_keyword(keyword, summary_dir))
class RecordingCallback(tf.keras.callbacks.Callback):
def __init__(self):
self.batch_begin = [] # (batch, logs)
self.batch_end = [] # (batch, logs)
self.epoch_begin = [] # (epoch, logs)
self.epoch_end = [] # (epoch, logs)
def on_batch_begin(self, batch, logs=None):
self.batch_begin.append((batch, logs))
def on_batch_end(self, batch, logs=None):
self.batch_end.append((batch, logs))
def on_epoch_begin(self, epoch, logs=None):
self.epoch_begin.append((epoch, logs))
def on_epoch_end(self, epoch, logs=None):
self.epoch_end.append((epoch, logs))
class ModelTrainingUtilsTest(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
super(ModelTrainingUtilsTest, self).setUp()
self._model_fn = create_model_fn(input_shape=[128], num_classes=3)
@flagsaver.flagsaver
def run_training(self, strategy, model_dir, steps_per_loop, run_eagerly):
input_fn = create_fake_data_input_fn(
batch_size=8, features_shape=[128], num_classes=3)
model_training_utils.run_customized_training_loop(
strategy=strategy,
model_fn=self._model_fn,
loss_fn=tf.keras.losses.categorical_crossentropy,
model_dir=model_dir,
steps_per_epoch=20,
steps_per_loop=steps_per_loop,
epochs=2,
train_input_fn=input_fn,
eval_input_fn=input_fn,
eval_steps=10,
init_checkpoint=None,
sub_model_export_name='my_submodel_name',
metric_fn=metric_fn,
custom_callbacks=None,
run_eagerly=run_eagerly)
@combinations.generate(eager_strategy_combinations())
def test_train_eager_single_step(self, distribution):
model_dir = self.create_tempdir().full_path
if isinstance(
distribution,
(tf.distribute.TPUStrategy, tf.distribute.experimental.TPUStrategy)):
with self.assertRaises(ValueError):
self.run_training(
distribution, model_dir, steps_per_loop=1, run_eagerly=True)
else:
self.run_training(
distribution, model_dir, steps_per_loop=1, run_eagerly=True)
@combinations.generate(eager_gpu_strategy_combinations())
def test_train_eager_mixed_precision(self, distribution):
model_dir = self.create_tempdir().full_path
tf.keras.mixed_precision.set_global_policy('mixed_float16')
self._model_fn = create_model_fn(
input_shape=[128], num_classes=3, use_float16=True)
self.run_training(
distribution, model_dir, steps_per_loop=1, run_eagerly=True)
@combinations.generate(eager_strategy_combinations())
def test_train_check_artifacts(self, distribution):
model_dir = self.create_tempdir().full_path
self.run_training(
distribution, model_dir, steps_per_loop=10, run_eagerly=False)
# Two checkpoints should be saved after two epochs.
files = map(os.path.basename,
tf.io.gfile.glob(os.path.join(model_dir, 'ctl_step_*index')))
self.assertCountEqual(
['ctl_step_20.ckpt-1.index', 'ctl_step_40.ckpt-2.index'], files)
# Three submodel checkpoints should be saved after two epochs (one after
# each epoch plus one final).
files = map(
os.path.basename,
tf.io.gfile.glob(os.path.join(model_dir, 'my_submodel_name*index')))
self.assertCountEqual([
'my_submodel_name.ckpt-3.index',
'my_submodel_name_step_20.ckpt-1.index',
'my_submodel_name_step_40.ckpt-2.index'
], files)
self.assertNotEmpty(
tf.io.gfile.glob(
os.path.join(model_dir, 'summaries/training_summary*')))
# Loss and accuracy values should be written into summaries.
self.assertTrue(
check_eventfile_for_keyword('loss',
os.path.join(model_dir, 'summaries/train')))
self.assertTrue(
check_eventfile_for_keyword('accuracy',
os.path.join(model_dir, 'summaries/train')))
self.assertTrue(
check_eventfile_for_keyword('mean_input',
os.path.join(model_dir, 'summaries/train')))
self.assertTrue(
check_eventfile_for_keyword('accuracy',
os.path.join(model_dir, 'summaries/eval')))
self.assertTrue(
check_eventfile_for_keyword('mean_input',
os.path.join(model_dir, 'summaries/eval')))
@combinations.generate(eager_strategy_combinations())
def test_train_check_callbacks(self, distribution):
model_dir = self.create_tempdir().full_path
callback = RecordingCallback()
callbacks = [callback]
input_fn = create_fake_data_input_fn(
batch_size=8, features_shape=[128], num_classes=3)
model_training_utils.run_customized_training_loop(
strategy=distribution,
model_fn=self._model_fn,
loss_fn=tf.keras.losses.categorical_crossentropy,
model_dir=model_dir,
steps_per_epoch=20,
num_eval_per_epoch=4,
steps_per_loop=10,
epochs=2,
train_input_fn=input_fn,
eval_input_fn=input_fn,
eval_steps=10,
init_checkpoint=None,
metric_fn=metric_fn,
custom_callbacks=callbacks,
run_eagerly=False)
self.assertEqual(callback.epoch_begin, [(1, {}), (2, {})])
epoch_ends, epoch_end_infos = zip(*callback.epoch_end)
self.assertEqual(list(epoch_ends), [1, 2, 2])
for info in epoch_end_infos:
self.assertIn('accuracy', info)
self.assertEqual(callback.batch_begin, [(0, {}), (5, {}), (10, {}),
(15, {}), (20, {}), (25, {}),
(30, {}), (35, {})])
batch_ends, batch_end_infos = zip(*callback.batch_end)
self.assertEqual(list(batch_ends), [4, 9, 14, 19, 24, 29, 34, 39])
for info in batch_end_infos:
self.assertIn('loss', info)
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.one_device_strategy_gpu,
],))
def test_train_check_artifacts_non_chief(self, distribution):
# We shouldn't export artifacts on non-chief workers. Since there's no easy
# way to test with real MultiWorkerMirroredStrategy, we patch the strategy
# to make it as if it's MultiWorkerMirroredStrategy on non-chief workers.
extended = distribution.extended
with mock.patch.object(extended.__class__, 'should_checkpoint',
new_callable=mock.PropertyMock, return_value=False), \
mock.patch.object(extended.__class__, 'should_save_summary',
new_callable=mock.PropertyMock, return_value=False):
model_dir = self.create_tempdir().full_path
self.run_training(
distribution, model_dir, steps_per_loop=10, run_eagerly=False)
self.assertEmpty(tf.io.gfile.listdir(model_dir))
if __name__ == '__main__':
tf.test.main()
| [
"leal.afonso@outlook.com"
] | leal.afonso@outlook.com |
7158389b398eb38e4631bb45f0697e767a5deb83 | cab065ad6c2e754584c2c32c8ac27b283538045f | /question_voting/temp.py | a0de2013baefcfc6567534bd2a9dedfac84f525c | [] | no_license | Nasdin/Streamlit_Public_Question_Voting | 548d3e496ae403dd65133f4f423d50cdeac2fb66 | 2261e87511567c47b702ba744cb424802a61152f | refs/heads/main | 2023-07-17T23:04:54.378569 | 2021-08-24T07:37:33 | 2021-08-24T07:37:33 | 378,585,349 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,935 | py | # import hashlib
# import time
# import datetime
# from dataclasses import dataclass, field
# import uuid
#
# import streamlit as st
# from enum import Enum
# from collections import deque
# from operator import attrgetter
#
# from typing import List
#
# st.set_page_config("Anonymous questions voting")
#
#
# @st.cache(allow_output_mutation=True, persist=True)
# def questions_list():
# return []
#
#
# @st.cache(allow_output_mutation=True, persist=True)
# def questions_hash_list():
# return []
#
#
# @st.cache(allow_output_mutation=True, persist=True)
# def questions_votes():
# return []
#
#
# @st.cache(allow_output_mutation=True, persist=True)
# def question_comments():
# return {}
#
#
#
#
#
#
#
#
#
# def add_comments_to_question(question_text: str, comment: str):
# question_hashed = hash_question(question_text)
# comments_map = question_comments()
# comments_map[question_hashed].append([comment, datetime.datetime.now(), 0])
# st.info("Thanks for your comment")
#
#
# def add_question(question_text: str):
# if question_text.strip() == "":
# st.warning("Please add some texts")
# return
#
# question_hashed = hash_question(question_text)
#
# if question_hashed in questions_hash_list():
# st.warning("Question already added")
# return
# else:
# q_l = questions_list()
# q_v = questions_votes()
# q_c = question_comments()
# q_l.append(question_text)
# q_v.append(0)
# questions_hash_list().append(question_hashed)
# q_c[question_hashed] = []
# st.success("Your question has been added!")
# return
#
#
# def vote_question(index_to_vote, header_container):
# q_v = questions_votes()
# q_v[index_to_vote] = q_v[index_to_vote] + 1
# header_container.success("Thanks for voting")
#
#
# def hash_question(question_text):
# return hashlib.md5(question_text.encode('utf-8')).hexdigest()
#
#
# def display_timedelta_from_now(timestamp):
# delta = datetime.datetime.now() - timestamp
# has_minutes = delta.seconds > 60
# has_hour = delta.seconds > 3600
# has_day = delta.seconds > 86400
#
#
# def display_comment(container, comment):
# date_processed = datetime.datetime.now() - comment[1]
# container.write(f"#### Anonymous commenter: {comment[1]}")
# comment_box, to_upvote = st.beta_columns(2)
# with container.beta_container():
# comment_box.write(comment[0])
# to_upvote.write(f"Upvotes: {comment[2]}")
# return to_upvote.button("Upvote")
#
#
# def upvote_comment(comment):
# comment[2] += 1
# st.success("Thanks for upvoting the comment")
#
#
# def display_question_comments(container, question_text):
# question_hashed = hash_question(question_text)
# comments = question_comments()[question_hashed]
#
# with container.beta_expander(f"See comments: {len(comments)} comments"):
#
# container.subheader("Comments")
# if len(comments) == 0:
# container.write("There are no comments at the moment")
# for comment in comments:
# to_up_vote = display_comment(container, comment)
# if to_up_vote:
# upvote_comment(comment)
#
# add_comment_form(container, question_text)
#
#
# def add_comment_form(container, question_text):
# with container.form(f"add_comment_{question_text}"):
# comment = container.text_area(label="Add a public comment anonymously...",
# key=f"add_comment_{question_text}_text")
# _, _, right = container.beta_columns(3)
# to_comment = right.form_submit_button("Comment")
#
# if to_comment:
# add_comments_to_question(question_text, comment)
#
#
# def main():
#
#
#
#
# if to_add_question:
# add_question(new_question)
#
# question_board_header = st.beta_container()
# st.header("Question board")
# hash_table = set() # Instead of hash table, we should have done a queue as per a consumer producer pattern
# upvote_charts = {}
# while True:
#
# new_questions = []
# new_initial_upvotes = []
# new_indexes = []
# new_data = False
#
# # Get new questions added
# print("Searching new questions")
# for question_index, question_hash in enumerate(questions_hash_list()):
# if question_hash not in hash_table:
# new_data = True
# new_questions.append(questions_list()[question_index])
# new_initial_upvotes.append(questions_votes()[question_index])
# hash_table.add(question_hash)
# new_indexes.append(question_index)
#
# # Add elements for each new question added
# if new_data:
# print("Adding latest questions")
# for q_i, question, upvotes in zip(new_indexes, new_questions, new_initial_upvotes):
# question_container, question_vote = st.beta_columns(2)
# # comment_container = st.beta_container()
# question_container.subheader(f"Question No: {q_i + 1}")
# upvote_charts[q_i] = question_vote.empty()
# upvote_charts[q_i].subheader(f"Upvotes: {upvotes}")
#
# question_container.write(question)
# to_vote = question_vote.button("Upvote", key=f"{q_i}vote", )
# display_question_comments(st, question)
# # add_comment_form(st, question)
#
# if to_vote:
# vote_question(q_i, question_board_header)
# # Updates votes
# print("Updating vote counts")
# for q_i, upvote_chart in upvote_charts.items():
# latest_votes = questions_votes()
# upvote_chart.subheader(f"Upvotes: {latest_votes[q_i]}")
#
# time.sleep(1)
#
#
# if __name__ == '__main__':
# main()
| [
"Nasrudin.Salim.Suden@gmail.com"
] | Nasrudin.Salim.Suden@gmail.com |
098ece09a8cf8faf5fc39bbc98ef1bc12039261d | 41b8f4284e4caef9f06539eb0926d41c37c2457a | /ss_folder_plot.py | 75cc4cc4684c197011333bb6639bb6d9c934e587 | [] | no_license | spel-uchile/SeismicScripts | 019ee34b9cced2401f17549c1a662d3bb75faf10 | cad533721a4dd3b7e934c2bda6a6749d37fb6efb | refs/heads/master | 2021-01-18T22:51:24.562043 | 2016-07-28T15:05:21 | 2016-07-28T15:05:21 | 65,047,952 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 5,902 | py | #!/usr/bin/env python
__author__ = 'toopazo'
import os
# import argparse
# from obspy.core import read
# from os import listdir
# from os.path import isfile, join
import ss_plot
import ss_utilities
import subprocess
class ApplyToFolder():
def __init__(self):
pass
@staticmethod
def apply_to_folder(infolder, str1, str2, outdayplot, outfilter):
print "[apply_to_folder] infolder %s " % infolder
print "**********************************************************"
# 1) uncompress ".xxx.gz" files
xxxgz_files = ss_utilities.ParserUtilities.get_xxx_files(folderpath=infolder, extension=".MSEED.gz")
for path_i in xxxgz_files:
gz_path_i = os.path.abspath(path_i)
print "[apply_to_folder] Uncompressing gz_path_i %s .." % gz_path_i
cmdline = "gzip -d %s" % gz_path_i
subprocess.call(cmdline, shell=True) # resp = str(subprocess.call(cmdline, shell=True))
# arg = "[convert_slist2mseed] cmdline %s, resp %s" % (cmdline, resp)
# print arg
print "**********************************************************"
# 2) get ".xxx" files and apply "apply_to_file"
xxx_files = ss_utilities.ParserUtilities.get_xxx_files(folderpath=infolder, extension=".MSEED")
for path_i in xxx_files:
infile_i = os.path.abspath(path_i)
print "[apply_to_folder] Processing infile_i %s .." % infile_i
ApplyToFolder.apply_to_file(infile_i, str1, str2, outdayplot, outfilter)
print "Next file >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n" # separate infile_i
print "**********************************************************"
# 3) Done
print "Done"
@staticmethod
def apply_to_file(infile, str1, str2, u_dayplot, u_filter):
if (str1 in str(infile)) and (str2 in str(infile)):
outfile = infile.replace(".MSEED", ".png")
ss_plot.plot_file(infile=infile, outfile=outfile, outdayplot=u_dayplot, outfilter=u_filter)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Apply function xxx to corresponding files .yyy in the given folder')
parser.add_argument('directory', help='directory to use', action='store')
parser.add_argument('--str1', action='store', help='str1 to filter', default='MSEED') # , required=True)
parser.add_argument('--str2', action='store', help='str2 to filter', default='MSEED') # , required=True)
parser.add_argument('--dayplot', action='store_true', help='dayplot of files')
parser.add_argument('--filter', action='store', help='filter signal before ploting')
args = parser.parse_args()
uinfolder = args.directory
uinfolder = os.path.normcase(uinfolder)
uinfolder = os.path.normpath(uinfolder)
uinfolder = os.path.realpath(uinfolder)
ApplyToFolder.apply_to_folder(infolder=uinfolder,
str1=args.str1, str2=args.str2,
outdayplot=args.dayplot, outfilter=args.filter)
# parser = argparse.ArgumentParser(description='Obspy wrapper: Apply \"plot\" operation for infolder')
# parser.add_argument('--infolder', action='store', help='files to process', required=True)
# parser.add_argument('--str1', action='store', help='str2 to filter', required=True)
# parser.add_argument('--str2', action='store', help='str2 to filter', required=True)
# parser.add_argument('--showplot', action='store_true', help='show plot instead of saving it')
# parser.add_argument('--dayplot', action='store_true', help='dayplot of files')
# parser.add_argument('--filter', action='store', help='filter signal before ploting')
#
# args = parser.parse_args()
#
# # 1) Make sure user inputs are correct
# filter_plot = args.filter
# dayplot = args.dayplot
# showplot = args.showplot
# str1 = args.str1
# print(str1)
# str2 = args.str2
# print(str2)
# # Convert to real (no symlink) and full path
# infolder_path = args.infolder
# infolder_path = os.path.normcase(infolder_path)
# infolder_path = os.path.normpath(infolder_path)
# infolder_path = os.path.realpath(infolder_path)
# print(infolder_path)
# # Get all files in folder that contain "str1" and "str2"
# onlyfiles = [f for f in listdir(infolder_path) if isfile(join(infolder_path, f))]
# onlyfiles.sort()
# sel_files = []
# for file_i in onlyfiles:
# if (str1 in str(file_i)) and (str2 in str(file_i)):
# sel_files.append(file_i)
# sel_files.sort()
# infolder_files = sel_files
# print(infolder_files)
#
#
# # 3) Plot
# outfile_extension = '.png'
# if dayplot:
# # Construct Stream object, appending every trace in the folder
# st = read(infolder_files[0])
# for i in range(1, len(infolder_files)):
# st += read(infolder_files[i])
# st = st.sort()
# print(st[0].stats)
# print(st[0].data)
#
# if filter_plot is not None:
# st.filter("lowpass", freq=int(filter_plot), corners=10) # , zerophase=True
#
# plot_option_type = 'dayplot'
# if showplot is not None:
# outfile_name = 'dayplot'
# outfile_name += outfile_extension
# st.plot(type=plot_option_type, outfile=outfile_name, size=(800, 600))
# else:
# st.plot(type=plot_option_type, method='full')
#
# else:
# for file_i in infolder_files:
# # Construct Stream object, dayplotidually
# st = read(file_i)
# print(st[0].stats)
# print(st[0].data)
#
# if filter_plot is not None:
# st.filter("lowpass", freq=int(filter_plot), corners=10) # , zerophase=True
#
# filename, file_extension = os.path.splitext(file_i)
# plot_option_type = 'normal'
# outfile_name = str(filename)
# outfile_name += outfile_extension
# st.plot(type=plot_option_type, outfile=outfile_name, size=(800, 600))
| [
"tomas.opazo.t@gmail.com"
] | tomas.opazo.t@gmail.com |
f05afaefedd21c8a8362d23218c7eb4c9d7ffa0f | 1ffc17893d9e15fd939628bbc41c3d2633713ebd | /docs/tests/test_documentation_tutorial.py | d607bf9a8a7c3372aaf0f4fa1cdc37a04f40be05 | [
"Apache-2.0"
] | permissive | xadupre/sklearn-onnx | 646e8a158cdded725064964494f0f8a760630aa8 | b05e4864cedbf4f2a9e6c003781d1db8b53264ac | refs/heads/master | 2023-09-01T15:58:38.112315 | 2022-12-21T01:59:45 | 2022-12-21T01:59:45 | 382,323,831 | 0 | 2 | Apache-2.0 | 2023-01-04T13:41:33 | 2021-07-02T11:22:00 | Python | UTF-8 | Python | false | false | 3,935 | py | # SPDX-License-Identifier: Apache-2.0
"""
Tests examples from the documentation.
"""
import unittest
import os
import sys
import importlib
import subprocess
def import_source(module_file_path, module_name):
if not os.path.exists(module_file_path):
raise FileNotFoundError(module_file_path)
module_spec = importlib.util.spec_from_file_location(
module_name, module_file_path)
if module_spec is None:
raise FileNotFoundError(
"Unable to find '{}' in '{}'.".format(
module_name, module_file_path))
module = importlib.util.module_from_spec(module_spec)
return module_spec.loader.exec_module(module)
class TestDocumentationTutorial(unittest.TestCase):
def test_documentation_tutorial(self):
this = os.path.abspath(os.path.dirname(__file__))
fold = os.path.normpath(os.path.join(this, '..', 'tutorial'))
found = os.listdir(fold)
tested = 0
for name in found:
if name.startswith("plot_") and name.endswith(".py"):
print("run %r" % name)
try:
mod = import_source(fold, os.path.splitext(name)[0])
assert mod is not None
except FileNotFoundError:
# try another way
cmds = [sys.executable, "-u",
os.path.join(fold, name)]
p = subprocess.Popen(
cmds, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
res = p.communicate()
out, err = res
st = err.decode('ascii', errors='ignore')
if len(st) > 0 and 'Traceback' in st:
if "No such file or directory: 'dot'" in st:
# dot not installed, this part
# is tested in onnx framework
pass
elif '"dot" not found in path.' in st:
# dot not installed, this part
# is tested in onnx framework
pass
elif ("cannot import name 'LightGbmModelContainer' "
"from 'onnxmltools.convert.common."
"_container'") in st:
# onnxmltools not recent enough
pass
elif ('Please fix either the inputs or '
'the model.') in st:
# onnxruntime datasets changed in master branch,
# still the same in released version on pypi
pass
elif ('Current official support for domain ai.onnx '
'is till opset 12.') in st:
# one example is using opset 13 but onnxruntime
# only support up to opset 12.
pass
elif "'str' object has no attribute 'decode'" in st:
# unstable bug in scikit-learn<0.24
pass
elif ("This method should be overwritten for "
"operator") in st:
# raised by old version of packages
# used in the documentation
pass
else:
raise RuntimeError(
"Example '{}' (cmd: {} - exec_prefix='{}') "
"failed due to\n{}"
"".format(name, cmds, sys.exec_prefix, st))
tested += 1
if tested == 0:
raise RuntimeError("No example was tested.")
if __name__ == "__main__":
unittest.main()
| [
"noreply@github.com"
] | xadupre.noreply@github.com |
49c6cbd957b72bbe58f4d43230aeb3420ad6a399 | fad392b7b1533103a0ddcc18e059fcd2e85c0fda | /install/px4_msgs/lib/python3.6/site-packages/px4_msgs/msg/_ulog_stream_ack.py | 70bef6edd37055221ffb8d9a6fa0096b1b9e0d97 | [] | no_license | adamdai/px4_ros_com_ros2 | bee6ef27559a3a157d10c250a45818a5c75f2eff | bcd7a1bd13c318d69994a64215f256b9ec7ae2bb | refs/heads/master | 2023-07-24T18:09:24.817561 | 2021-08-23T21:47:18 | 2021-08-23T21:47:18 | 399,255,215 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 103 | py | /home/navlab-tx2-4/px4_ros_com_ros2/build/px4_msgs/rosidl_generator_py/px4_msgs/msg/_ulog_stream_ack.py | [
"adamdai97@gmail.com"
] | adamdai97@gmail.com |
d9918150ac573b71d76f30a0003d5f85cfd22438 | b76f7cd15857016c9b7d7a444488f0e122d90109 | /dongyeop/2020.07.29.py | cf15c73be6e0f913cef857e2272ff3985ff1f911 | [] | no_license | eunjin917/Summer-Algorithm-Merge | 705e5a8837898a27911d8270799a37e6360262f5 | d2e11873dac7745e8d662c3d7b4ff34570e1286d | refs/heads/master | 2023-03-23T22:02:18.957706 | 2020-08-29T14:00:50 | 2020-08-29T14:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 140 | py | #개인 퀴즈 서울에서 김서방 찾기
def solution(seoul):
return ("김서방은 {}에 있다".format(seoul.index('Kim')))
| [
"ckehdduq95@gmail.com"
] | ckehdduq95@gmail.com |
5fdf1941c40bcd68ef7d3c47bee5241f87b9daa3 | 191525bc567cc024674409235452a9b25fa3eda3 | /website_test/utils/Incorporation.py | 57d90ba9e3b6e8f66186f5cac8b6d403ad3ddfb7 | [] | no_license | wangzilinwzl/largedata | 80636d312e3f8872bcd7470d36bbe80981bae707 | 90e11ae96971f3df94ca715e24c5d1479949f6aa | refs/heads/master | 2020-03-11T10:43:09.888339 | 2018-04-25T04:00:22 | 2018-04-25T04:00:22 | 129,949,712 | 0 | 0 | null | 2018-04-25T04:00:23 | 2018-04-17T18:40:26 | Python | UTF-8 | Python | false | false | 1,109 | py | # -*- coding: UTF-8 -*-
from collections import OrderedDict
import re
import ast
class Incorporation:
def __init__(self):
self.tripId = None # unique trip identifier, both trip_update and vehicle have
self.routeId = None # mainly for trip_update
self.startDate = None # trip_update
self.direction = None # from trip_id
self.currentStopId = None # from vehicle
self.currentStopStatus = None # from vehicle
self.vehicleTimeStamp = None # timestamp from vehicle info
self.futureStops = OrderedDict() # stop_time_updated, # Format {stopId : [arrivalTime,departureTime]}
self.timeStamp = None
def constructFromDyDict(self,d): # construct dynamically
self.tripId = d[u'tripId']
self.routeId = d[u'routeId']
self.startDate = d[u'startDate']
self.direction = d[u'direction']
self.currentStopId = d[u'currentStopId']
self.currentStopStatus = d[u'currentStopStatus']
self.vehicleTimeStamp = d[u'vehicleTimeStamp']
m = re.match(r'^OrderedDict\((.+)\)$', d[u'futureStops'])
if m:
self.futureStops = OrderedDict(ast.literal_eval(m.group(1)))
self.timeStamp = d[u'timestamp'] | [
"noreply@github.com"
] | wangzilinwzl.noreply@github.com |
aacd4d9f8e9e86ba2ae0a5c0d5aed7be182347ba | 2fd2aeda6e15fd8b4f9550aec2bed9699b5c8482 | /4 Convolutional Neural Networks/Week 3/Car Detection for Autonomous Driving/Autonomous_driving_application_Car_detection_v3a.py | fe14b74dc8237576ac04c4ce62e8ae70831885a2 | [
"CC-BY-4.0",
"MIT"
] | permissive | saadashraf/Deep-Learning-Specialization | 92ddf01a83431f5c02878e88b729d2b716c9d729 | 46f9a4762ad7c99b736cf683bf0c299a4dc413fc | refs/heads/master | 2021-02-07T03:00:09.663607 | 2020-06-20T16:56:54 | 2020-06-20T16:56:54 | 243,975,042 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 44,948 | py |
# coding: utf-8
# # Autonomous driving - Car detection
#
# Welcome to your week 3 programming assignment. You will learn about object detection using the very powerful YOLO model. Many of the ideas in this notebook are described in the two YOLO papers: [Redmon et al., 2016](https://arxiv.org/abs/1506.02640) and [Redmon and Farhadi, 2016](https://arxiv.org/abs/1612.08242).
#
# **You will learn to**:
# - Use object detection on a car detection dataset
# - Deal with bounding boxes
#
#
# ## <font color='darkblue'>Updates</font>
#
# #### If you were working on the notebook before this update...
# * The current notebook is version "3a".
# * You can find your original work saved in the notebook with the previous version name ("v3")
# * To view the file directory, go to the menu "File->Open", and this will open a new tab that shows the file directory.
#
# #### List of updates
# * Clarified "YOLO" instructions preceding the code.
# * Added details about anchor boxes.
# * Added explanation of how score is calculated.
# * `yolo_filter_boxes`: added additional hints. Clarify syntax for argmax and max.
# * `iou`: clarify instructions for finding the intersection.
# * `iou`: give variable names for all 8 box vertices, for clarity. Adds `width` and `height` variables for clarity.
# * `iou`: add test cases to check handling of non-intersecting boxes, intersection at vertices, or intersection at edges.
# * `yolo_non_max_suppression`: clarify syntax for tf.image.non_max_suppression and keras.gather.
# * "convert output of the model to usable bounding box tensors": Provides a link to the definition of `yolo_head`.
# * `predict`: hint on calling sess.run.
# * Spelling, grammar, wording and formatting updates to improve clarity.
# ## Import libraries
# Run the following cell to load the packages and dependencies that you will find useful as you build the object detector!
# In[1]:
import argparse
import os
import matplotlib.pyplot as plt
from matplotlib.pyplot import imshow
import scipy.io
import scipy.misc
import numpy as np
import pandas as pd
import PIL
import tensorflow as tf
from keras import backend as K
from keras.layers import Input, Lambda, Conv2D
from keras.models import load_model, Model
from yolo_utils import read_classes, read_anchors, generate_colors, preprocess_image, draw_boxes, scale_boxes
from yad2k.models.keras_yolo import yolo_head, yolo_boxes_to_corners, preprocess_true_boxes, yolo_loss, yolo_body
get_ipython().magic('matplotlib inline')
# **Important Note**: As you can see, we import Keras's backend as K. This means that to use a Keras function in this notebook, you will need to write: `K.function(...)`.
# ## 1 - Problem Statement
#
# You are working on a self-driving car. As a critical component of this project, you'd like to first build a car detection system. To collect data, you've mounted a camera to the hood (meaning the front) of the car, which takes pictures of the road ahead every few seconds while you drive around.
#
# <center>
# <video width="400" height="200" src="nb_images/road_video_compressed2.mp4" type="video/mp4" controls>
# </video>
# </center>
#
# <caption><center> Pictures taken from a car-mounted camera while driving around Silicon Valley. <br> We thank [drive.ai](htps://www.drive.ai/) for providing this dataset.
# </center></caption>
#
# You've gathered all these images into a folder and have labelled them by drawing bounding boxes around every car you found. Here's an example of what your bounding boxes look like.
#
# <img src="nb_images/box_label.png" style="width:500px;height:250;">
# <caption><center> <u> **Figure 1** </u>: **Definition of a box**<br> </center></caption>
#
# If you have 80 classes that you want the object detector to recognize, you can represent the class label $c$ either as an integer from 1 to 80, or as an 80-dimensional vector (with 80 numbers) one component of which is 1 and the rest of which are 0. The video lectures had used the latter representation; in this notebook, we will use both representations, depending on which is more convenient for a particular step.
#
# In this exercise, you will learn how "You Only Look Once" (YOLO) performs object detection, and then apply it to car detection. Because the YOLO model is very computationally expensive to train, we will load pre-trained weights for you to use.
# ## 2 - YOLO
# "You Only Look Once" (YOLO) is a popular algorithm because it achieves high accuracy while also being able to run in real-time. This algorithm "only looks once" at the image in the sense that it requires only one forward propagation pass through the network to make predictions. After non-max suppression, it then outputs recognized objects together with the bounding boxes.
#
# ### 2.1 - Model details
#
# #### Inputs and outputs
# - The **input** is a batch of images, and each image has the shape (m, 608, 608, 3)
# - The **output** is a list of bounding boxes along with the recognized classes. Each bounding box is represented by 6 numbers $(p_c, b_x, b_y, b_h, b_w, c)$ as explained above. If you expand $c$ into an 80-dimensional vector, each bounding box is then represented by 85 numbers.
#
# #### Anchor Boxes
# * Anchor boxes are chosen by exploring the training data to choose reasonable height/width ratios that represent the different classes. For this assignment, 5 anchor boxes were chosen for you (to cover the 80 classes), and stored in the file './model_data/yolo_anchors.txt'
# * The dimension for anchor boxes is the second to last dimension in the encoding: $(m, n_H,n_W,anchors,classes)$.
# * The YOLO architecture is: IMAGE (m, 608, 608, 3) -> DEEP CNN -> ENCODING (m, 19, 19, 5, 85).
#
#
# #### Encoding
# Let's look in greater detail at what this encoding represents.
#
# <img src="nb_images/architecture.png" style="width:700px;height:400;">
# <caption><center> <u> **Figure 2** </u>: **Encoding architecture for YOLO**<br> </center></caption>
#
# If the center/midpoint of an object falls into a grid cell, that grid cell is responsible for detecting that object.
# Since we are using 5 anchor boxes, each of the 19 x19 cells thus encodes information about 5 boxes. Anchor boxes are defined only by their width and height.
#
# For simplicity, we will flatten the last two last dimensions of the shape (19, 19, 5, 85) encoding. So the output of the Deep CNN is (19, 19, 425).
#
# <img src="nb_images/flatten.png" style="width:700px;height:400;">
# <caption><center> <u> **Figure 3** </u>: **Flattening the last two last dimensions**<br> </center></caption>
# #### Class score
#
# Now, for each box (of each cell) we will compute the following element-wise product and extract a probability that the box contains a certain class.
# The class score is $score_{c,i} = p_{c} \times c_{i}$: the probability that there is an object $p_{c}$ times the probability that the object is a certain class $c_{i}$.
#
# <img src="nb_images/probability_extraction.png" style="width:700px;height:400;">
# <caption><center> <u> **Figure 4** </u>: **Find the class detected by each box**<br> </center></caption>
#
# ##### Example of figure 4
# * In figure 4, let's say for box 1 (cell 1), the probability that an object exists is $p_{1}=0.60$. So there's a 60% chance that an object exists in box 1 (cell 1).
# * The probability that the object is the class "category 3 (a car)" is $c_{3}=0.73$.
# * The score for box 1 and for category "3" is $score_{1,3}=0.60 \times 0.73 = 0.44$.
# * Let's say we calculate the score for all 80 classes in box 1, and find that the score for the car class (class 3) is the maximum. So we'll assign the score 0.44 and class "3" to this box "1".
#
# #### Visualizing classes
# Here's one way to visualize what YOLO is predicting on an image:
# - For each of the 19x19 grid cells, find the maximum of the probability scores (taking a max across the 80 classes, one maximum for each of the 5 anchor boxes).
# - Color that grid cell according to what object that grid cell considers the most likely.
#
# Doing this results in this picture:
#
# <img src="nb_images/proba_map.png" style="width:300px;height:300;">
# <caption><center> <u> **Figure 5** </u>: Each one of the 19x19 grid cells is colored according to which class has the largest predicted probability in that cell.<br> </center></caption>
#
# Note that this visualization isn't a core part of the YOLO algorithm itself for making predictions; it's just a nice way of visualizing an intermediate result of the algorithm.
#
# #### Visualizing bounding boxes
# Another way to visualize YOLO's output is to plot the bounding boxes that it outputs. Doing that results in a visualization like this:
#
# <img src="nb_images/anchor_map.png" style="width:200px;height:200;">
# <caption><center> <u> **Figure 6** </u>: Each cell gives you 5 boxes. In total, the model predicts: 19x19x5 = 1805 boxes just by looking once at the image (one forward pass through the network)! Different colors denote different classes. <br> </center></caption>
#
# #### Non-Max suppression
# In the figure above, we plotted only boxes for which the model had assigned a high probability, but this is still too many boxes. You'd like to reduce the algorithm's output to a much smaller number of detected objects.
#
# To do so, you'll use **non-max suppression**. Specifically, you'll carry out these steps:
# - Get rid of boxes with a low score (meaning, the box is not very confident about detecting a class; either due to the low probability of any object, or low probability of this particular class).
# - Select only one box when several boxes overlap with each other and detect the same object.
#
#
# ### 2.2 - Filtering with a threshold on class scores
#
# You are going to first apply a filter by thresholding. You would like to get rid of any box for which the class "score" is less than a chosen threshold.
#
# The model gives you a total of 19x19x5x85 numbers, with each box described by 85 numbers. It is convenient to rearrange the (19,19,5,85) (or (19,19,425)) dimensional tensor into the following variables:
# - `box_confidence`: tensor of shape $(19 \times 19, 5, 1)$ containing $p_c$ (confidence probability that there's some object) for each of the 5 boxes predicted in each of the 19x19 cells.
# - `boxes`: tensor of shape $(19 \times 19, 5, 4)$ containing the midpoint and dimensions $(b_x, b_y, b_h, b_w)$ for each of the 5 boxes in each cell.
# - `box_class_probs`: tensor of shape $(19 \times 19, 5, 80)$ containing the "class probabilities" $(c_1, c_2, ... c_{80})$ for each of the 80 classes for each of the 5 boxes per cell.
#
# #### **Exercise**: Implement `yolo_filter_boxes()`.
# 1. Compute box scores by doing the elementwise product as described in Figure 4 ($p \times c$).
# The following code may help you choose the right operator:
# ```python
# a = np.random.randn(19*19, 5, 1)
# b = np.random.randn(19*19, 5, 80)
# c = a * b # shape of c will be (19*19, 5, 80)
# ```
# This is an example of **broadcasting** (multiplying vectors of different sizes).
#
# 2. For each box, find:
# - the index of the class with the maximum box score
# - the corresponding box score
#
# **Useful references**
# * [Keras argmax](https://keras.io/backend/#argmax)
# * [Keras max](https://keras.io/backend/#max)
#
# **Additional Hints**
# * For the `axis` parameter of `argmax` and `max`, if you want to select the **last** axis, one way to do so is to set `axis=-1`. This is similar to Python array indexing, where you can select the last position of an array using `arrayname[-1]`.
# * Applying `max` normally collapses the axis for which the maximum is applied. `keepdims=False` is the default option, and allows that dimension to be removed. We don't need to keep the last dimension after applying the maximum here.
# * Even though the documentation shows `keras.backend.argmax`, use `keras.argmax`. Similarly, use `keras.max`.
#
#
# 3. Create a mask by using a threshold. As a reminder: `([0.9, 0.3, 0.4, 0.5, 0.1] < 0.4)` returns: `[False, True, False, False, True]`. The mask should be True for the boxes you want to keep.
#
# 4. Use TensorFlow to apply the mask to `box_class_scores`, `boxes` and `box_classes` to filter out the boxes we don't want. You should be left with just the subset of boxes you want to keep.
#
# **Useful reference**:
# * [boolean mask](https://www.tensorflow.org/api_docs/python/tf/boolean_mask)
#
# **Additional Hints**:
# * For the `tf.boolean_mask`, we can keep the default `axis=None`.
#
# **Reminder**: to call a Keras function, you should use `K.function(...)`.
# In[2]:
# GRADED FUNCTION: yolo_filter_boxes
def yolo_filter_boxes(box_confidence, boxes, box_class_probs, threshold = .6):
"""Filters YOLO boxes by thresholding on object and class confidence.
Arguments:
box_confidence -- tensor of shape (19, 19, 5, 1)
boxes -- tensor of shape (19, 19, 5, 4)
box_class_probs -- tensor of shape (19, 19, 5, 80)
threshold -- real value, if [ highest class probability score < threshold], then get rid of the corresponding box
Returns:
scores -- tensor of shape (None,), containing the class probability score for selected boxes
boxes -- tensor of shape (None, 4), containing (b_x, b_y, b_h, b_w) coordinates of selected boxes
classes -- tensor of shape (None,), containing the index of the class detected by the selected boxes
Note: "None" is here because you don't know the exact number of selected boxes, as it depends on the threshold.
For example, the actual output size of scores would be (10,) if there are 10 boxes.
"""
# Step 1: Compute box scores
### START CODE HERE ### (≈ 1 line)
box_scores = box_confidence * box_class_probs
### END CODE HERE ###
# Step 2: Find the box_classes using the max box_scores, keep track of the corresponding score
### START CODE HERE ### (≈ 2 lines)
box_classes = K.argmax(box_scores , axis = -1)
box_class_scores = K.max(box_scores , axis = -1)
### END CODE HERE ###
# Step 3: Create a filtering mask based on "box_class_scores" by using "threshold". The mask should have the
# same dimension as box_class_scores, and be True for the boxes you want to keep (with probability >= threshold)
### START CODE HERE ### (≈ 1 line)
filtering_mask = box_class_scores > threshold
### END CODE HERE ###
# Step 4: Apply the mask to box_class_scores, boxes and box_classes
### START CODE HERE ### (≈ 3 lines)
scores = tf.boolean_mask(box_class_scores , filtering_mask)
boxes = tf.boolean_mask(boxes , filtering_mask)
classes = tf.boolean_mask(box_classes , filtering_mask)
### END CODE HERE ###
return scores, boxes, classes
# In[3]:
with tf.Session() as test_a:
box_confidence = tf.random_normal([19, 19, 5, 1], mean=1, stddev=4, seed = 1)
boxes = tf.random_normal([19, 19, 5, 4], mean=1, stddev=4, seed = 1)
box_class_probs = tf.random_normal([19, 19, 5, 80], mean=1, stddev=4, seed = 1)
scores, boxes, classes = yolo_filter_boxes(box_confidence, boxes, box_class_probs, threshold = 0.5)
print("scores[2] = " + str(scores[2].eval()))
print("boxes[2] = " + str(boxes[2].eval()))
print("classes[2] = " + str(classes[2].eval()))
print("scores.shape = " + str(scores.shape))
print("boxes.shape = " + str(boxes.shape))
print("classes.shape = " + str(classes.shape))
# **Expected Output**:
#
# <table>
# <tr>
# <td>
# **scores[2]**
# </td>
# <td>
# 10.7506
# </td>
# </tr>
# <tr>
# <td>
# **boxes[2]**
# </td>
# <td>
# [ 8.42653275 3.27136683 -0.5313437 -4.94137383]
# </td>
# </tr>
#
# <tr>
# <td>
# **classes[2]**
# </td>
# <td>
# 7
# </td>
# </tr>
# <tr>
# <td>
# **scores.shape**
# </td>
# <td>
# (?,)
# </td>
# </tr>
# <tr>
# <td>
# **boxes.shape**
# </td>
# <td>
# (?, 4)
# </td>
# </tr>
#
# <tr>
# <td>
# **classes.shape**
# </td>
# <td>
# (?,)
# </td>
# </tr>
#
# </table>
# **Note** In the test for `yolo_filter_boxes`, we're using random numbers to test the function. In real data, the `box_class_probs` would contain non-zero values between 0 and 1 for the probabilities. The box coordinates in `boxes` would also be chosen so that lengths and heights are non-negative.
# ### 2.3 - Non-max suppression ###
#
# Even after filtering by thresholding over the class scores, you still end up with a lot of overlapping boxes. A second filter for selecting the right boxes is called non-maximum suppression (NMS).
# <img src="nb_images/non-max-suppression.png" style="width:500px;height:400;">
# <caption><center> <u> **Figure 7** </u>: In this example, the model has predicted 3 cars, but it's actually 3 predictions of the same car. Running non-max suppression (NMS) will select only the most accurate (highest probability) of the 3 boxes. <br> </center></caption>
#
# Non-max suppression uses the very important function called **"Intersection over Union"**, or IoU.
# <img src="nb_images/iou.png" style="width:500px;height:400;">
# <caption><center> <u> **Figure 8** </u>: Definition of "Intersection over Union". <br> </center></caption>
#
# #### **Exercise**: Implement iou(). Some hints:
# - In this code, we use the convention that (0,0) is the top-left corner of an image, (1,0) is the upper-right corner, and (1,1) is the lower-right corner. In other words, the (0,0) origin starts at the top left corner of the image. As x increases, we move to the right. As y increases, we move down.
# - For this exercise, we define a box using its two corners: upper left $(x_1, y_1)$ and lower right $(x_2,y_2)$, instead of using the midpoint, height and width. (This makes it a bit easier to calculate the intersection).
# - To calculate the area of a rectangle, multiply its height $(y_2 - y_1)$ by its width $(x_2 - x_1)$. (Since $(x_1,y_1)$ is the top left and $x_2,y_2$ are the bottom right, these differences should be non-negative.
# - To find the **intersection** of the two boxes $(xi_{1}, yi_{1}, xi_{2}, yi_{2})$:
# - Feel free to draw some examples on paper to clarify this conceptually.
# - The top left corner of the intersection $(xi_{1}, yi_{1})$ is found by comparing the top left corners $(x_1, y_1)$ of the two boxes and finding a vertex that has an x-coordinate that is closer to the right, and y-coordinate that is closer to the bottom.
# - The bottom right corner of the intersection $(xi_{2}, yi_{2})$ is found by comparing the bottom right corners $(x_2,y_2)$ of the two boxes and finding a vertex whose x-coordinate is closer to the left, and the y-coordinate that is closer to the top.
# - The two boxes **may have no intersection**. You can detect this if the intersection coordinates you calculate end up being the top right and/or bottom left corners of an intersection box. Another way to think of this is if you calculate the height $(y_2 - y_1)$ or width $(x_2 - x_1)$ and find that at least one of these lengths is negative, then there is no intersection (intersection area is zero).
# - The two boxes may intersect at the **edges or vertices**, in which case the intersection area is still zero. This happens when either the height or width (or both) of the calculated intersection is zero.
#
#
# **Additional Hints**
#
# - `xi1` = **max**imum of the x1 coordinates of the two boxes
# - `yi1` = **max**imum of the y1 coordinates of the two boxes
# - `xi2` = **min**imum of the x2 coordinates of the two boxes
# - `yi2` = **min**imum of the y2 coordinates of the two boxes
# - `inter_area` = You can use `max(height, 0)` and `max(width, 0)`
#
# In[4]:
# GRADED FUNCTION: iou
def iou(box1, box2):
"""Implement the intersection over union (IoU) between box1 and box2
Arguments:
box1 -- first box, list object with coordinates (box1_x1, box1_y1, box1_x2, box_1_y2)
box2 -- second box, list object with coordinates (box2_x1, box2_y1, box2_x2, box2_y2)
"""
# Assign variable names to coordinates for clarity
(box1_x1, box1_y1, box1_x2, box1_y2) = box1
(box2_x1, box2_y1, box2_x2, box2_y2) = box2
# Calculate the (yi1, xi1, yi2, xi2) coordinates of the intersection of box1 and box2. Calculate its Area.
### START CODE HERE ### (≈ 7 lines)
xi1 = max(box1_x1 , box2_x1)
yi1 = max(box1_y1 , box2_y1)
xi2 = min(box1_x2 , box2_x2)
yi2 = min(box1_y2 , box2_y2)
inter_width = xi2 - xi1
inter_height = yi2 - yi1
if(inter_width < 0 or inter_height < 0):
inter_area = 0
else:
inter_area = inter_width * inter_height
### END CODE HERE ###
# Calculate the Union area by using Formula: Union(A,B) = A + B - Inter(A,B)
### START CODE HERE ### (≈ 3 lines)
box1_area = (box1_x2 - box1_x1) * (box1_y2 - box1_y1)
box2_area = (box2_x2 - box2_x1) * (box2_y2 - box2_y1)
union_area = box1_area + box2_area - inter_area
### END CODE HERE ###
# compute the IoU
### START CODE HERE ### (≈ 1 line)
iou = inter_area / union_area
### END CODE HERE ###
return iou
# In[5]:
## Test case 1: boxes intersect
box1 = (2, 1, 4, 3)
box2 = (1, 2, 3, 4)
print("iou for intersecting boxes = " + str(iou(box1, box2)))
## Test case 2: boxes do not intersect
box1 = (1,2,3,4)
box2 = (5,6,7,8)
print("iou for non-intersecting boxes = " + str(iou(box1,box2)))
## Test case 3: boxes intersect at vertices only
box1 = (1,1,2,2)
box2 = (2,2,3,3)
print("iou for boxes that only touch at vertices = " + str(iou(box1,box2)))
## Test case 4: boxes intersect at edge only
box1 = (1,1,3,3)
box2 = (2,3,3,4)
print("iou for boxes that only touch at edges = " + str(iou(box1,box2)))
# **Expected Output**:
#
# ```
# iou for intersecting boxes = 0.14285714285714285
# iou for non-intersecting boxes = 0.0
# iou for boxes that only touch at vertices = 0.0
# iou for boxes that only touch at edges = 0.0
# ```
# #### YOLO non-max suppression
#
# You are now ready to implement non-max suppression. The key steps are:
# 1. Select the box that has the highest score.
# 2. Compute the overlap of this box with all other boxes, and remove boxes that overlap significantly (iou >= `iou_threshold`).
# 3. Go back to step 1 and iterate until there are no more boxes with a lower score than the currently selected box.
#
# This will remove all boxes that have a large overlap with the selected boxes. Only the "best" boxes remain.
#
# **Exercise**: Implement yolo_non_max_suppression() using TensorFlow. TensorFlow has two built-in functions that are used to implement non-max suppression (so you don't actually need to use your `iou()` implementation):
#
# ** Reference documentation **
#
# - [tf.image.non_max_suppression()](https://www.tensorflow.org/api_docs/python/tf/image/non_max_suppression)
# ```
# tf.image.non_max_suppression(
# boxes,
# scores,
# max_output_size,
# iou_threshold=0.5,
# name=None
# )
# ```
# Note that in the version of tensorflow used here, there is no parameter `score_threshold` (it's shown in the documentation for the latest version) so trying to set this value will result in an error message: *got an unexpected keyword argument 'score_threshold.*
#
# - [K.gather()](https://www.tensorflow.org/api_docs/python/tf/keras/backend/gather)
# Even though the documentation shows `tf.keras.backend.gather()`, you can use `keras.gather()`.
# ```
# keras.gather(
# reference,
# indices
# )
# ```
# In[6]:
# GRADED FUNCTION: yolo_non_max_suppression
def yolo_non_max_suppression(scores, boxes, classes, max_boxes = 10, iou_threshold = 0.5):
"""
Applies Non-max suppression (NMS) to set of boxes
Arguments:
scores -- tensor of shape (None,), output of yolo_filter_boxes()
boxes -- tensor of shape (None, 4), output of yolo_filter_boxes() that have been scaled to the image size (see later)
classes -- tensor of shape (None,), output of yolo_filter_boxes()
max_boxes -- integer, maximum number of predicted boxes you'd like
iou_threshold -- real value, "intersection over union" threshold used for NMS filtering
Returns:
scores -- tensor of shape (, None), predicted score for each box
boxes -- tensor of shape (4, None), predicted box coordinates
classes -- tensor of shape (, None), predicted class for each box
Note: The "None" dimension of the output tensors has obviously to be less than max_boxes. Note also that this
function will transpose the shapes of scores, boxes, classes. This is made for convenience.
"""
max_boxes_tensor = K.variable(max_boxes, dtype='int32') # tensor to be used in tf.image.non_max_suppression()
K.get_session().run(tf.variables_initializer([max_boxes_tensor])) # initialize variable max_boxes_tensor
# Use tf.image.non_max_suppression() to get the list of indices corresponding to boxes you keep
### START CODE HERE ### (≈ 1 line)
nms_indices = tf.image.non_max_suppression(boxes , scores , max_boxes , iou_threshold = 0.5)
### END CODE HERE ###
# Use K.gather() to select only nms_indices from scores, boxes and classes
### START CODE HERE ### (≈ 3 lines)
scores = K.gather(scores , nms_indices)
boxes = K.gather(boxes , nms_indices)
classes = K.gather(classes , nms_indices)
### END CODE HERE ###
return scores, boxes, classes
# In[7]:
with tf.Session() as test_b:
scores = tf.random_normal([54,], mean=1, stddev=4, seed = 1)
boxes = tf.random_normal([54, 4], mean=1, stddev=4, seed = 1)
classes = tf.random_normal([54,], mean=1, stddev=4, seed = 1)
scores, boxes, classes = yolo_non_max_suppression(scores, boxes, classes)
print("scores[2] = " + str(scores[2].eval()))
print("boxes[2] = " + str(boxes[2].eval()))
print("classes[2] = " + str(classes[2].eval()))
print("scores.shape = " + str(scores.eval().shape))
print("boxes.shape = " + str(boxes.eval().shape))
print("classes.shape = " + str(classes.eval().shape))
# **Expected Output**:
#
# <table>
# <tr>
# <td>
# **scores[2]**
# </td>
# <td>
# 6.9384
# </td>
# </tr>
# <tr>
# <td>
# **boxes[2]**
# </td>
# <td>
# [-5.299932 3.13798141 4.45036697 0.95942086]
# </td>
# </tr>
#
# <tr>
# <td>
# **classes[2]**
# </td>
# <td>
# -2.24527
# </td>
# </tr>
# <tr>
# <td>
# **scores.shape**
# </td>
# <td>
# (10,)
# </td>
# </tr>
# <tr>
# <td>
# **boxes.shape**
# </td>
# <td>
# (10, 4)
# </td>
# </tr>
#
# <tr>
# <td>
# **classes.shape**
# </td>
# <td>
# (10,)
# </td>
# </tr>
#
# </table>
# ### 2.4 Wrapping up the filtering
#
# It's time to implement a function taking the output of the deep CNN (the 19x19x5x85 dimensional encoding) and filtering through all the boxes using the functions you've just implemented.
#
# **Exercise**: Implement `yolo_eval()` which takes the output of the YOLO encoding and filters the boxes using score threshold and NMS. There's just one last implementational detail you have to know. There're a few ways of representing boxes, such as via their corners or via their midpoint and height/width. YOLO converts between a few such formats at different times, using the following functions (which we have provided):
#
# ```python
# boxes = yolo_boxes_to_corners(box_xy, box_wh)
# ```
# which converts the yolo box coordinates (x,y,w,h) to box corners' coordinates (x1, y1, x2, y2) to fit the input of `yolo_filter_boxes`
# ```python
# boxes = scale_boxes(boxes, image_shape)
# ```
# YOLO's network was trained to run on 608x608 images. If you are testing this data on a different size image--for example, the car detection dataset had 720x1280 images--this step rescales the boxes so that they can be plotted on top of the original 720x1280 image.
#
# Don't worry about these two functions; we'll show you where they need to be called.
# In[8]:
# GRADED FUNCTION: yolo_eval
def yolo_eval(yolo_outputs, image_shape = (720., 1280.), max_boxes=10, score_threshold=.6, iou_threshold=.5):
"""
Converts the output of YOLO encoding (a lot of boxes) to your predicted boxes along with their scores, box coordinates and classes.
Arguments:
yolo_outputs -- output of the encoding model (for image_shape of (608, 608, 3)), contains 4 tensors:
box_confidence: tensor of shape (None, 19, 19, 5, 1)
box_xy: tensor of shape (None, 19, 19, 5, 2)
box_wh: tensor of shape (None, 19, 19, 5, 2)
box_class_probs: tensor of shape (None, 19, 19, 5, 80)
image_shape -- tensor of shape (2,) containing the input shape, in this notebook we use (608., 608.) (has to be float32 dtype)
max_boxes -- integer, maximum number of predicted boxes you'd like
score_threshold -- real value, if [ highest class probability score < threshold], then get rid of the corresponding box
iou_threshold -- real value, "intersection over union" threshold used for NMS filtering
Returns:
scores -- tensor of shape (None, ), predicted score for each box
boxes -- tensor of shape (None, 4), predicted box coordinates
classes -- tensor of shape (None,), predicted class for each box
"""
### START CODE HERE ###
# Retrieve outputs of the YOLO model (≈1 line)
box_confidence, box_xy, box_wh, box_class_probs = yolo_outputs
# Convert boxes to be ready for filtering functions (convert boxes box_xy and box_wh to corner coordinates)
boxes = yolo_boxes_to_corners(box_xy, box_wh)
# Use one of the functions you've implemented to perform Score-filtering with a threshold of score_threshold (≈1 line)
scores, boxes, classes = yolo_filter_boxes(box_confidence , boxes , box_class_probs , threshold = score_threshold)
# Scale boxes back to original image shape.
boxes = scale_boxes(boxes, image_shape)
# Use one of the functions you've implemented to perform Non-max suppression with
# maximum number of boxes set to max_boxes and a threshold of iou_threshold (≈1 line)
scores, boxes, classes = yolo_non_max_suppression(scores , boxes , classes , max_boxes = max_boxes , iou_threshold = iou_threshold)
### END CODE HERE ###
return scores, boxes, classes
# In[9]:
with tf.Session() as test_b:
yolo_outputs = (tf.random_normal([19, 19, 5, 1], mean=1, stddev=4, seed = 1),
tf.random_normal([19, 19, 5, 2], mean=1, stddev=4, seed = 1),
tf.random_normal([19, 19, 5, 2], mean=1, stddev=4, seed = 1),
tf.random_normal([19, 19, 5, 80], mean=1, stddev=4, seed = 1))
scores, boxes, classes = yolo_eval(yolo_outputs)
print("scores[2] = " + str(scores[2].eval()))
print("boxes[2] = " + str(boxes[2].eval()))
print("classes[2] = " + str(classes[2].eval()))
print("scores.shape = " + str(scores.eval().shape))
print("boxes.shape = " + str(boxes.eval().shape))
print("classes.shape = " + str(classes.eval().shape))
# **Expected Output**:
#
# <table>
# <tr>
# <td>
# **scores[2]**
# </td>
# <td>
# 138.791
# </td>
# </tr>
# <tr>
# <td>
# **boxes[2]**
# </td>
# <td>
# [ 1292.32971191 -278.52166748 3876.98925781 -835.56494141]
# </td>
# </tr>
#
# <tr>
# <td>
# **classes[2]**
# </td>
# <td>
# 54
# </td>
# </tr>
# <tr>
# <td>
# **scores.shape**
# </td>
# <td>
# (10,)
# </td>
# </tr>
# <tr>
# <td>
# **boxes.shape**
# </td>
# <td>
# (10, 4)
# </td>
# </tr>
#
# <tr>
# <td>
# **classes.shape**
# </td>
# <td>
# (10,)
# </td>
# </tr>
#
# </table>
# ## Summary for YOLO:
# - Input image (608, 608, 3)
# - The input image goes through a CNN, resulting in a (19,19,5,85) dimensional output.
# - After flattening the last two dimensions, the output is a volume of shape (19, 19, 425):
# - Each cell in a 19x19 grid over the input image gives 425 numbers.
# - 425 = 5 x 85 because each cell contains predictions for 5 boxes, corresponding to 5 anchor boxes, as seen in lecture.
# - 85 = 5 + 80 where 5 is because $(p_c, b_x, b_y, b_h, b_w)$ has 5 numbers, and 80 is the number of classes we'd like to detect
# - You then select only few boxes based on:
# - Score-thresholding: throw away boxes that have detected a class with a score less than the threshold
# - Non-max suppression: Compute the Intersection over Union and avoid selecting overlapping boxes
# - This gives you YOLO's final output.
# ## 3 - Test YOLO pre-trained model on images
# In this part, you are going to use a pre-trained model and test it on the car detection dataset. We'll need a session to execute the computation graph and evaluate the tensors.
# In[10]:
sess = K.get_session()
# ### 3.1 - Defining classes, anchors and image shape.
#
# * Recall that we are trying to detect 80 classes, and are using 5 anchor boxes.
# * We have gathered the information on the 80 classes and 5 boxes in two files "coco_classes.txt" and "yolo_anchors.txt".
# * We'll read class names and anchors from text files.
# * The car detection dataset has 720x1280 images, which we've pre-processed into 608x608 images.
# In[11]:
class_names = read_classes("model_data/coco_classes.txt")
anchors = read_anchors("model_data/yolo_anchors.txt")
image_shape = (720., 1280.)
# ### 3.2 - Loading a pre-trained model
#
# * Training a YOLO model takes a very long time and requires a fairly large dataset of labelled bounding boxes for a large range of target classes.
# * You are going to load an existing pre-trained Keras YOLO model stored in "yolo.h5".
# * These weights come from the official YOLO website, and were converted using a function written by Allan Zelener. References are at the end of this notebook. Technically, these are the parameters from the "YOLOv2" model, but we will simply refer to it as "YOLO" in this notebook.
#
# Run the cell below to load the model from this file.
# In[12]:
yolo_model = load_model("model_data/yolo.h5")
# This loads the weights of a trained YOLO model. Here's a summary of the layers your model contains.
# In[13]:
yolo_model.summary()
# **Note**: On some computers, you may see a warning message from Keras. Don't worry about it if you do--it is fine.
#
# **Reminder**: this model converts a preprocessed batch of input images (shape: (m, 608, 608, 3)) into a tensor of shape (m, 19, 19, 5, 85) as explained in Figure (2).
# ### 3.3 - Convert output of the model to usable bounding box tensors
#
# The output of `yolo_model` is a (m, 19, 19, 5, 85) tensor that needs to pass through non-trivial processing and conversion. The following cell does that for you.
#
# If you are curious about how `yolo_head` is implemented, you can find the function definition in the file ['keras_yolo.py'](https://github.com/allanzelener/YAD2K/blob/master/yad2k/models/keras_yolo.py). The file is located in your workspace in this path 'yad2k/models/keras_yolo.py'.
# In[14]:
yolo_outputs = yolo_head(yolo_model.output, anchors, len(class_names))
# You added `yolo_outputs` to your graph. This set of 4 tensors is ready to be used as input by your `yolo_eval` function.
# ### 3.4 - Filtering boxes
#
# `yolo_outputs` gave you all the predicted boxes of `yolo_model` in the correct format. You're now ready to perform filtering and select only the best boxes. Let's now call `yolo_eval`, which you had previously implemented, to do this.
# In[15]:
scores, boxes, classes = yolo_eval(yolo_outputs, image_shape)
# ### 3.5 - Run the graph on an image
#
# Let the fun begin. You have created a graph that can be summarized as follows:
#
# 1. <font color='purple'> yolo_model.input </font> is given to `yolo_model`. The model is used to compute the output <font color='purple'> yolo_model.output </font>
# 2. <font color='purple'> yolo_model.output </font> is processed by `yolo_head`. It gives you <font color='purple'> yolo_outputs </font>
# 3. <font color='purple'> yolo_outputs </font> goes through a filtering function, `yolo_eval`. It outputs your predictions: <font color='purple'> scores, boxes, classes </font>
#
# **Exercise**: Implement predict() which runs the graph to test YOLO on an image.
# You will need to run a TensorFlow session, to have it compute `scores, boxes, classes`.
#
# The code below also uses the following function:
# ```python
# image, image_data = preprocess_image("images/" + image_file, model_image_size = (608, 608))
# ```
# which outputs:
# - image: a python (PIL) representation of your image used for drawing boxes. You won't need to use it.
# - image_data: a numpy-array representing the image. This will be the input to the CNN.
#
# **Important note**: when a model uses BatchNorm (as is the case in YOLO), you will need to pass an additional placeholder in the feed_dict {K.learning_phase(): 0}.
#
# #### Hint: Using the TensorFlow Session object
# * Recall that above, we called `K.get_Session()` and saved the Session object in `sess`.
# * To evaluate a list of tensors, we call `sess.run()` like this:
# ```
# sess.run(fetches=[tensor1,tensor2,tensor3],
# feed_dict={yolo_model.input: the_input_variable,
# K.learning_phase():0
# }
# ```
# * Notice that the variables `scores, boxes, classes` are not passed into the `predict` function, but these are global variables that you will use within the `predict` function.
# In[18]:
def predict(sess, image_file):
"""
Runs the graph stored in "sess" to predict boxes for "image_file". Prints and plots the predictions.
Arguments:
sess -- your tensorflow/Keras session containing the YOLO graph
image_file -- name of an image stored in the "images" folder.
Returns:
out_scores -- tensor of shape (None, ), scores of the predicted boxes
out_boxes -- tensor of shape (None, 4), coordinates of the predicted boxes
out_classes -- tensor of shape (None, ), class index of the predicted boxes
Note: "None" actually represents the number of predicted boxes, it varies between 0 and max_boxes.
"""
# Preprocess your image
image, image_data = preprocess_image("images/" + image_file, model_image_size = (608, 608))
# Run the session with the correct tensors and choose the correct placeholders in the feed_dict.
# You'll need to use feed_dict={yolo_model.input: ... , K.learning_phase(): 0})
### START CODE HERE ### (≈ 1 line)
out_scores, out_boxes, out_classes = sess.run(fetches = [scores , boxes , classes] , feed_dict = {yolo_model.input:image_data , K.learning_phase() : 0})
### END CODE HERE ###
# Print predictions info
print('Found {} boxes for {}'.format(len(out_boxes), image_file))
# Generate colors for drawing bounding boxes.
colors = generate_colors(class_names)
# Draw bounding boxes on the image file
draw_boxes(image, out_scores, out_boxes, out_classes, class_names, colors)
# Save the predicted bounding box on the image
image.save(os.path.join("out", image_file), quality=90)
# Display the results in the notebook
output_image = scipy.misc.imread(os.path.join("out", image_file))
imshow(output_image)
return out_scores, out_boxes, out_classes
# Run the following cell on the "test.jpg" image to verify that your function is correct.
# In[19]:
out_scores, out_boxes, out_classes = predict(sess, "test.jpg")
# **Expected Output**:
#
# <table>
# <tr>
# <td>
# **Found 7 boxes for test.jpg**
# </td>
# </tr>
# <tr>
# <td>
# **car**
# </td>
# <td>
# 0.60 (925, 285) (1045, 374)
# </td>
# </tr>
# <tr>
# <td>
# **car**
# </td>
# <td>
# 0.66 (706, 279) (786, 350)
# </td>
# </tr>
# <tr>
# <td>
# **bus**
# </td>
# <td>
# 0.67 (5, 266) (220, 407)
# </td>
# </tr>
# <tr>
# <td>
# **car**
# </td>
# <td>
# 0.70 (947, 324) (1280, 705)
# </td>
# </tr>
# <tr>
# <td>
# **car**
# </td>
# <td>
# 0.74 (159, 303) (346, 440)
# </td>
# </tr>
# <tr>
# <td>
# **car**
# </td>
# <td>
# 0.80 (761, 282) (942, 412)
# </td>
# </tr>
# <tr>
# <td>
# **car**
# </td>
# <td>
# 0.89 (367, 300) (745, 648)
# </td>
# </tr>
# </table>
# The model you've just run is actually able to detect 80 different classes listed in "coco_classes.txt". To test the model on your own images:
# 1. Click on "File" in the upper bar of this notebook, then click "Open" to go on your Coursera Hub.
# 2. Add your image to this Jupyter Notebook's directory, in the "images" folder
# 3. Write your image's name in the cell above code
# 4. Run the code and see the output of the algorithm!
#
# If you were to run your session in a for loop over all your images. Here's what you would get:
#
# <center>
# <video width="400" height="200" src="nb_images/pred_video_compressed2.mp4" type="video/mp4" controls>
# </video>
# </center>
#
# <caption><center> Predictions of the YOLO model on pictures taken from a camera while driving around the Silicon Valley <br> Thanks [drive.ai](https://www.drive.ai/) for providing this dataset! </center></caption>
#
# ## <font color='darkblue'>What you should remember:
#
# - YOLO is a state-of-the-art object detection model that is fast and accurate
# - It runs an input image through a CNN which outputs a 19x19x5x85 dimensional volume.
# - The encoding can be seen as a grid where each of the 19x19 cells contains information about 5 boxes.
# - You filter through all the boxes using non-max suppression. Specifically:
# - Score thresholding on the probability of detecting a class to keep only accurate (high probability) boxes
# - Intersection over Union (IoU) thresholding to eliminate overlapping boxes
# - Because training a YOLO model from randomly initialized weights is non-trivial and requires a large dataset as well as lot of computation, we used previously trained model parameters in this exercise. If you wish, you can also try fine-tuning the YOLO model with your own dataset, though this would be a fairly non-trivial exercise.
# **References**: The ideas presented in this notebook came primarily from the two YOLO papers. The implementation here also took significant inspiration and used many components from Allan Zelener's GitHub repository. The pre-trained weights used in this exercise came from the official YOLO website.
# - Joseph Redmon, Santosh Divvala, Ross Girshick, Ali Farhadi - [You Only Look Once: Unified, Real-Time Object Detection](https://arxiv.org/abs/1506.02640) (2015)
# - Joseph Redmon, Ali Farhadi - [YOLO9000: Better, Faster, Stronger](https://arxiv.org/abs/1612.08242) (2016)
# - Allan Zelener - [YAD2K: Yet Another Darknet 2 Keras](https://github.com/allanzelener/YAD2K)
# - The official YOLO website (https://pjreddie.com/darknet/yolo/)
# **Car detection dataset**:
# <a rel="license" href="http://creativecommons.org/licenses/by/4.0/"><img alt="Creative Commons License" style="border-width:0" src="https://i.creativecommons.org/l/by/4.0/88x31.png" /></a><br /><span xmlns:dct="http://purl.org/dc/terms/" property="dct:title">The Drive.ai Sample Dataset</span> (provided by drive.ai) is licensed under a <a rel="license" href="http://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution 4.0 International License</a>. We are grateful to Brody Huval, Chih Hu and Rahul Patel for providing this data.
# In[ ]:
| [
"saadbinashraf14@gmail.com"
] | saadbinashraf14@gmail.com |
dfad5f6ea7eb74e5d4322d0071934b582395383f | 0bf3d0c1c3498ab1b83d97db4952473961c1b7b3 | /model.py | 5ab133e48d6165234a1d80c7d0d66cc2fbdeee50 | [] | no_license | francislata/Qualitative-Bankruptcy | 9c1eeddbe2df6d56763397937ef9faec8c75137d | d67d88a3027d68ec516322ca4209dbfe3cb05a17 | refs/heads/master | 2020-06-06T04:40:21.972153 | 2018-05-01T13:04:52 | 2018-05-01T13:04:52 | 192,640,069 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 770 | py | ####
####
#### Creates, trains, and evaluates the performance of the model
####
####
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from sklearn.svm import SVC
'''
Creates a logistic regression model
'''
def create_logistic_regression_model(X_train, y_train, C=1.0):
lr = LogisticRegression(C=C, class_weight='balanced')
lr.fit(X_train, y_train.values.ravel())
return lr
'''
Creates a support vector machine model
'''
def create_SVM(X_train, y_train, C=1.0):
svc = SVC(C=C)
svc.fit(X_train, y_train.values.ravel())
return svc
'''
Calculates the classifier's accuracy
'''
def calculate_accuracy(classifier, X_test, y_test):
predictions = classifier.predict(X_test)
return accuracy_score(y_test, predictions)
| [
"francisalbertlata@gmail.com"
] | francisalbertlata@gmail.com |
f2b90c51b49583857902826574b0babfede18352 | 074815153c80531087cc95d20c5e74678a74140b | /voice_with_gtts.py | eae05542cc9da2f9116520e18b73b09b2b71cfd2 | [] | no_license | yosshor/generator_iterator_oop | 2f51c07445c3c08df510716b77e9ce78ff881d11 | c17b67811f07c91ac12a3ea8de9386d36f0367c7 | refs/heads/master | 2021-01-03T19:02:47.463329 | 2020-03-15T12:27:06 | 2020-03-15T12:27:06 | 240,201,057 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 999 | py | """
@author: Yossi
"""
import pyaudio
import os
from gtts import gTTS
import time
import playsound
import speech_recognition as sr
def speak(text):
tts = gTTS(text = text, lang = 'en')
filename = 'vo1e.mp3'
tts.save(filename)
playsound.playsound(filename)
def get_audio():
r = sr.Recognizer()
with sr.Microphone() as source:
audio = r.listen(source)
said = ""
try :
said = r.recognize_google(audio)
print(said)
except Exception as e:
print("Extention : " + str(e))
return said
speak("hello yossi ")
get_audio()
text = get_audio()
if "hello" in text:
speak("hello, how are you")
if "what is your name" in text:
speak("MY name is Tim")
from gtts import gTTS
import os
tts = gTTS(text="first time i'm using a package in next.py course", lang='en')
tts.save("welcome.mp3")
os.system("mpg321 welcome.mp3")
| [
"noreply@github.com"
] | yosshor.noreply@github.com |
6c581520c0a8ff756de56610a2aa4c7bfb50f4cd | 8622480303d72c305e0674355372fbca2ca77056 | /models.py | f77d12d50a27c6b67ed22d7a0cc22398249f4ce6 | [] | no_license | Deepaknkumar/book-reviews-goodreadsAPI | 000a780be340420249c746f712fc8ba207c280e2 | 9f0aa0aeff213750760f63aa7fea29b3d45556e5 | refs/heads/master | 2020-03-27T19:00:36.457603 | 2018-09-12T09:13:16 | 2018-09-12T09:13:16 | 146,959,792 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 858 | py | import os
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
class Book(db.Model):
__tablename__ = "books"
isbn = db.Column(db.String, primary_key=True)
title = db.Column(db.String,nullable=False)
author = db.Column(db.String,nullable=False)
year = db.Column(db.Integer)
class User(db.Model):
__tablename__ = "users"
userid = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String, nullable=False)
email = db.Column(db.String, nullable=False)
passwordsalt = db.Column(db.String, nullable=False)
password = db.Column(db.String, nullable=False)
duration = db.Column(db.Integer, nullable=False)
datecreated = db.Column(db.DATE, nullable=False)
class BookReview(db.Model):
__tablename__ = "bookreviews"
reviewid = db.Column(db.Integer, primary_key=True)
| [
"Deepaknkumar@users.noreply.github.com"
] | Deepaknkumar@users.noreply.github.com |
6894a6b396ff574611bbaab9f07d5bb7b3b63a6c | 063082a1edc9b4b99e1f3d617b8629aad409927b | /manage.py | 23000f6e486c30052ab7e8b67cdafc7083212465 | [] | no_license | vijay-pal/dj-cricket | 13c1f2729b5db6e5667d30f5b24083e6a0024ba2 | 3a5a601cde00d9d2f2ba9b4c8d08e36cdc9b9045 | refs/heads/master | 2022-11-25T14:26:05.478216 | 2020-01-30T10:09:55 | 2020-01-30T10:09:55 | 237,181,358 | 0 | 0 | null | 2022-11-22T05:16:47 | 2020-01-30T09:39:18 | Python | UTF-8 | Python | false | false | 539 | py | #!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'cricket.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| [
"vijaypal.vishwakarma@careers360.com"
] | vijaypal.vishwakarma@careers360.com |
d724ffcb1a40b3d90ae5e185e3cb7e2963d67e4e | b833e195b0ffa18f7aec56b7ff89995743d70559 | /app/__init__.py | d2a92b649d97e5e2fe19e1d82f0bb00c39bab700 | [] | no_license | y-himanen/english_finnish_coding_dictionary_website | 00c291b76b84bc0fd5ee3107f71736fd1f1de5b4 | c29a4dab4029fc264074e70b9ed2171008f4ac67 | refs/heads/master | 2023-04-01T20:32:49.099766 | 2021-04-16T07:19:11 | 2021-04-16T07:19:11 | 358,505,601 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 178 | py | from flask import Flask
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config.from_object('config')
db = SQLAlchemy(app)
from app import views, models | [
"y.himanen@gmail.com"
] | y.himanen@gmail.com |
25eb28da4c15af658689383ec67271d21e30711e | 2e145222a18d4509d937951f5cec4df0e26ee86f | /vas/sqlfire/AgentInstances.py | c7014e3358ef52496bfff95762d437cb06c53a4c | [
"Apache-2.0"
] | permissive | vdreamakitex/vas-python-api | 7627b7e3fcf76c16b1ea8b9fb670fdb708eff083 | ce7148a2044863e078e78b47abbaafc426f732ee | refs/heads/master | 2021-01-18T05:13:25.459916 | 2012-11-05T09:58:45 | 2012-11-05T09:58:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,711 | py | # vFabric Administration Server API
# Copyright (c) 2012 VMware, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from vas.shared.Instance import Instance
from vas.shared.MutableCollection import MutableCollection
class AgentInstances(MutableCollection):
"""Used to enumerate, create, and delete agent instances
:ivar `vas.shared.Security.Security` security: The resource's security
"""
def __init__(self, client, location):
super(AgentInstances, self).__init__(client, location, 'agent-group-instances', AgentInstance)
def create(self, installation, name, jvm_options=None):
"""Creates a new agent instance
:param `vas.sqlfire.Installations.Installation` installation: The installation ot be used by the instance
:param str name: The name of the instances
:param list jvm_options: The JVM options that are based to the agent's
JVM when it is started
:rtype: :class:`vas.sqlfire.AgentInstances.AgentInstance`
:return: The new agent instance
"""
payload = {'installation': installation._location, 'name': name}
if jvm_options is not None:
payload['jvm-options'] = jvm_options
return self._create(payload, 'agent-group-instance')
class AgentInstance(Instance):
"""An agent instance
:ivar `vas.sqlfire.Groups.Group` group: The group that contains this instance
:ivar `vas.sqlfire.Installations.Installation` installation: The installation that this instance is using
:ivar list jvm_options: The JVM options that are passed to the agent's JVM
when it is started
:ivar `vas.sqlfire.AgentLiveConfigurations.AgentLiveConfigurations` live_configurations: The instance's live
configurations
:ivar str name: The instance's name
:ivar list node_instances: The instance's individual node instances
:ivar `vas.sqlfire.AgentPendingConfigurations.AgentPendingConfigurations` pending_configurations: The instance's pending
configurations
:ivar `vas.shared.Security.Security` security: The resource's security
:ivar str state: Retrieves the state of the resource from the server.
Will be one of:
* ``STARTING``
* ``STARTED``
* ``STOPPING``
* ``STOPPED``
"""
@property
def jvm_options(self):
return self.__jvm_options
def __init__(self, client, location):
super(AgentInstance, self).__init__(client, location, Group, Installation, AgentLiveConfigurations,
AgentPendingConfigurations, AgentNodeInstance, 'agent-node-instance')
def reload(self):
"""Reloads the agent instance's details from the server"""
super(AgentInstance, self).reload()
self.__jvm_options = self._details['jvm-options']
def update(self, installation=None, jvm_options=None):
"""Updates the instance
:param `vas.sqlfire.Installations.Installation` installation: The installation to be used by the instance. If
omitted or `None`, the configuration will not be
changed
:param list jvm_options: The JVM options that are passed to the agent's
JVM when it is started. If omitted or `None`,
the configuration will not be changed
"""
payload = {}
if installation:
payload['installation'] = installation._location
if jvm_options is not None:
payload['jvm-options'] = jvm_options
self._client.post(self._location, payload)
self.reload()
def __str__(self):
return "<{} name={} jvm_options={}>".format(self.__class__, self.name, self.__jvm_options)
from vas.sqlfire.AgentLiveConfigurations import AgentLiveConfigurations
from vas.sqlfire.AgentNodeInstances import AgentNodeInstance
from vas.sqlfire.AgentPendingConfigurations import AgentPendingConfigurations
from vas.sqlfire.Groups import Group
from vas.sqlfire.Installations import Installation
| [
"bhale@vmware.com"
] | bhale@vmware.com |
5768fb03e448aa751461332c7f06eb6eb3c301ff | 6e9960a37c4aac87e5677eff1ab8b2dcde6a43c8 | /ecommerce/settings/local.py | c81bf4f59835727498139a169ba44363ee6714ed | [
"Apache-2.0"
] | permissive | ferdyrod/basic-ecommerce | 57fe6cc7f0cfc2da41e733d98f5c985b5f5eb9cb | 46f4be17be09cbd7b80486f39ebf5eb745157a48 | refs/heads/master | 2021-01-19T16:24:26.724187 | 2013-10-08T05:09:14 | 2013-10-08T05:09:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,918 | py | # Django settings for ecommerce project.
from os.path import dirname, abspath, join
PROJECT_ROOT = dirname(dirname(dirname(abspath(__file__))))
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'ecommerce.sqlite', # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': '',
'PASSWORD': '',
'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = join(PROJECT_ROOT, 'static', 'media')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = join(PROJECT_ROOT, 'static', 'static-only')
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
join(PROJECT_ROOT, 'static', 'static'),
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
TEMPLATE_DIRS = (
join(PROJECT_ROOT, 'static', 'templates')
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'django.contrib.admindocs',
'south',
'registration',
'products',
'contact',
'cart',
'profiles',
'orders',
)
ACCOUNT_ACTIVATION_DAYS = 7
AUTH_PROFILE_MODULE = 'profiles.profile'
EMAIL_HOST = 'stmp.gmail.com'
EMAIL_HOST_USER = 'Your_Email_Here'
EMAIL_HOST_PASSWORD = 'Your_Password_Here'
EMAIL_USE_TLS = True
| [
"ferdyrodriguez@gmail.com"
] | ferdyrodriguez@gmail.com |
60cca0554fc391c8910a0b8c969b20ba3d55b93c | 0799209af0ad6f1c675d1128f3acfc7fba6d44e4 | /leetcode_offer/55_balanced_BT/bbt.py | 44449f53aacba0f465eeb3c68dfbb7af18210792 | [] | no_license | qq714412541/leetcode | 27398351b9dd428f5bbb4835e437fabe36aa6251 | cf10aa55a65da250926cc31e9f170625f73ae62d | refs/heads/master | 2020-07-21T19:06:16.661100 | 2020-03-03T15:28:10 | 2020-03-03T15:28:10 | 206,950,777 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 417 | py | class Solution:
def isBalanced(self, root: TreeNode) -> bool:
res,deep = self.re(root,1)
return res
def re(self,node,deep):
if not node:
return True,deep-1
else:
#print()
l,deepl = self.re(node.left,deep+1)
r,deepr = self.re(node.right,deep+1)
return abs(deepl-deepr)<=1 and l and r, max(deepl,deepr)
| [
"noreply@github.com"
] | qq714412541.noreply@github.com |
6a025822293d280b363c8ee09b8d2aaa45d207cd | 9c9874dcd52819b5c79cdc063d32d544aa73977e | /MindLink-Eumpy/real_time_detection/GUI/FaceFeatureReader.py | 9bf4fe8a9e2753373d721d85aac82253ac027962 | [
"MIT"
] | permissive | wozu-dichter/MindLink-Explorer | cbfe1e1a55a6cdccb7f040e90b2faa8c3d94880e | 531f646dcb493dce2575af3b9d77403ebc1f4a35 | refs/heads/master | 2023-06-27T10:57:58.411387 | 2021-07-24T11:09:46 | 2021-07-24T11:09:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,662 | py | # encoding: utf-8
'''
Created on Dec 23, 2018
@author: Yongrui Huang
'''
import keras
import keras.layers as L
import cv2
import random
import string
import os
from bokeh.themes import default # In which cases should I use bokeh in Eumpy? From Ruixin Lee
import queue
# class FaceFeatureReader(object)
class FaceFeatureReader(object):
'''
class docs
This class is used to see features in CNN model for faces
'''
# __init__(self, graph)
def __init__(self, graph):
'''
Constructor
'''
# now we don't have .h5 model, so we can't use it(self.model).
# self.model = keras.models.load_model('D:/eclipse/PythonWorkspaces/Eumpy/model/CNN_expression_baseline.h5')
# self.model = keras.models.load_model("D:/PythonWorkPlaceALL/Eumpy-master/algorithm_implement/train_baseline_model_inFer2013/CNN_expression_baseline.h5")
# self.model = keras.models.load_model("D:/workSpace/python_workspace/MindLink-Explorer/algorithm_implement/train_baseline_model_inFer2013/CNN_expression_baseline.h5")
# self.model = keras.models.load_model("D:/workSpace/python_workspace/MindLink-Explorer/model/CNN_expression_baseline.h5")
self.model = keras.models.load_model("D:/workSpace/python_workspace/MindLink-Explorer/model/CNN_face_regression.h5")
print("FaceFeatureReader.py....self.model...")
self.face = None
self.used_face = False
self.res = []
self.graph = graph
self.first_layer = self.build_layer('conv2d_1')
self.second_layer = self.build_layer('conv2d_2')
self.third_layer = self.build_layer('conv2d_3')
self.delete_queue = queue.Queue()
# __init__(self, graph)
# build_layer(self, layer_name)
def build_layer(self, layer_name):
'''
build layer
Arguments:
layer_name: accept 3 parameter: 'conv2d_1', 'conv2d_2', 'conv2d_3',
represent the first, the second and the last convolutional
layers respectively.
'''
with self.graph.as_default():
layer = self.model.get_layer(layer_name).output
# layer = L.Deconv2D(filters=32, kernel_size=(3, 3), padding = 'same')(layer)
layer = L.Deconvolution2D(filters=32, kernel_size=(3, 3), padding='same')(layer)
conv_layer_output = keras.models.Model(inputs=self.model.input, outputs=layer)
return conv_layer_output
# build_layer(self, layer_name)
# delete_files(self)
def delete_files(self):
'''
delete files for releasing the resourse.
'''
while self.delete_queue.qsize() > 540:
file = self.delete_queue.get()
if (os.path.exists(file)):
os.remove(file)
# delete_files(self)
# revert_img(self, img)
def revert_img(self, img):
'''
give more weight to the image pixel since they
are normalized into -1~1
'''
img = (img)*255
return img
# revert_img(self, img)
# set_face(self, face)
def set_face(self, face):
'''
Arguments:
faces: the faces to process.
'''
self.face = face
self.used_face = False
# set_face(self, face)
# format_face(self, face)
def format_face(self, face):
return face.reshape(1, 48, 48, 1)
# format_face(self, face)
# read_layer(self, conv_layer_output, layer_name)
def read_layer(self, conv_layer_output, layer_name):
'''
Arguments:
conv_layer_output:
layer_name: accept 3 parameter: 'conv2d_1', 'conv2d_2', 'conv2d_3',
represent the first, the second and the last convolutional
layers respectively.
faces: the faces to process.
'''
if self.face is None:
return []
with self.graph.as_default():
imgs = conv_layer_output.predict(self.format_face(self.face))[0]
res_list = []
for i in range(30):
img = imgs[:, :, i]
img = self.revert_img(img)
path = 'static/cache_image/%s' % layer_name+''.join(random.sample(string.ascii_letters + string.digits, 12)) + '.png'
cv2.imwrite(path, img)
res_list.append(path)
return res_list
# read_layer(self, conv_layer_output, layer_name
def read_feature_map(self):
'''
read feature map
Returns:
a list contains file name saving feature map
'''
if self.used_face:
return self.res
self.used_face = True
print(self.res)
for file_name in self.res:
self.delete_queue.put(file_name)
if self.delete_queue.qsize() > 540:
self.delete_files()
self.res = []
self.res = self.read_layer(self.first_layer, 'conv2d_1_')\
+ self.read_layer(self.second_layer, 'conv2d_2_') + self.read_layer(self.third_layer, 'conv2d_3_')
return self.res
# class FaceFeatureReader(object)
# cascPath = "D:/eclipse/PythonWorkspaces/Eumpy/model/haarcascade_frontalface_alt.xml"
# faceCascade = cv2.CascadeClassifier(cascPath)
# def detect_face(gray):
# '''
# find faces from a gray image.
# Arguments:
# gray: a gray image
# Returns:
# (x, y, w, h)
# x, y: the left-up points of the face
# w, h: the width and height of the face
# '''
# faces = faceCascade.detectMultiScale(
# gray,
# scaleFactor = 1.1,
# minNeighbors = 5,
# minSize=(32, 32)
# )
# if len(faces) > 0:
# (x, y, w, h) = faces[0]
# else:
# (x, y, w, h) = (0, 0, 0, 0)
#
# return (x, y, w, h)
#
# if __name__ == '__main__':
# obj = FaceFeatureReader()
#
# cap = cv2.VideoCapture(0)
# while True:
# ret, frame = cap.read()
# gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# (x, y, w, h) = detect_face(gray)
#
# if (w != 0):
# face = gray[y:y+h, x:x+w]
# face = cv2.resize(face, (48, 48))
# obj.set_face(face)
# frame = cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 0, 255), thickness = 2)
# cv2.imshow('11', frame)
# cv2.waitKey(10)
# res = obj.read_feature_map()
# print (len(res))
# print ('|'.join(res))
| [
"41141489+Breeze1in1drizzle@users.noreply.github.com"
] | 41141489+Breeze1in1drizzle@users.noreply.github.com |
83e2c440565a02e41fd9bbf9131108e3ed884e3c | 5e1f1364ea41e8e687772b4d6240bfa457324550 | /quantity.py | 7760ab13dbd602862aa27c2be0b8d6ccf09698c0 | [] | no_license | yaseppochi/my-python | 2872ef26dd3c857864126bb581f6624b54b1fac7 | 63fe9b81a737c23cfea60a9dcd13894a1b814f1d | refs/heads/master | 2023-07-01T09:17:25.614078 | 2021-08-12T06:53:16 | 2021-08-12T06:53:16 | 395,220,082 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,316 | py | # Sure, but for Python programmers that's not how it would be done. In
# a general-purpose language, you would use the type system. It would
# be done like this (only better, I just don't feel like writing out a
# whole suite of math dunders):
# If I remember my 1976 EE course correctly:
# one of several ways to compute power.
class Quantity:
def __init__(self, value):
self.value = value
def __float__(self):
return self.value
class Potential(Quantity):
pass
class Current(Quantity):
pass
class Resistance(Quantity):
pass
class Power(Quantity):
pass
def power2(potential: Potential, resistance: Resistance) -> Power:
# Consenting adults! Ie, we trust programmers to get this formula
# right, do the computation without units (types), and provide
# the correct unit (type) on the way out.
return Power(float(potential) ** 2 / float(resistance))
ohms = Resistance(2)
volts = Potential(10)
amperes = Current(6)
watts = power2(ohms, volts) # arguments reversed
watts = power2(volts, amperes) # wrong type argument 2
# # in module ee
# import quantity
# from quantity import Quantity
#
# quantity.register(type=ee.Current, fix='post', notation='A')
# quantity.reg
ister(type=ee.Potential, sfix='post', notation='V')
#
# # in module spicy
# from ee import Quantity as Q
#
# amperes = Q("6e-3A")
# volts = Q("6mV")
# It's just that it would force you to stop
# being lazy once in a while when you run into a new parameter with
# wacko units.
# As for "V = I", a sufficiently smart circuit simulator would color
# that equation in red and prompt "Shall I multiply by 1.0 mhos? [y/n]".
# Or autocomplete to "V = I * 1mho" on enter, and flash the autoinserted
# factor a couple times to make sure you notice, so you can correct it
# if it's wrong. I bet you would get used to that quickly enough. (If
# there are multiple such factors, it could make up units like
# Deutschemark-volts/mole.) Of course, for those using Spice since
# 1974, there's a --suppress-trivial-factors-and-their-units option.
# One idea for output formatting would be to keep a list of SI scale
# factors ever used in the computation, and use the closest one. I
# bet this would be an effective heuristic.
| [
"stephen@xemacs.org"
] | stephen@xemacs.org |
205101325b29051add4fec00ed7a7ca59766cd56 | f1e7c31d22ee90e5e3705352170388c09b7470d4 | /day2/decorator4.py | 69c18443a573abae91c47ec9c78ea8ce44331c52 | [] | no_license | taizilinger123/apple | d0b58eb184d4a7c5aec674e914fa67139d2419ca | 285c10eedfa0863ad6d51efb9dea5c7d0ae540a6 | refs/heads/master | 2020-03-28T07:54:50.118668 | 2018-09-09T05:37:12 | 2018-09-09T05:37:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 499 | py | import time
def timer(func): #timer(test1) func=test1
def deco(*args,**kwargs):
start_time=time.time()
func(*args,**kwargs) #run test1()
stop_time=time.time()
print("the func run time is %s" %(stop_time-start_time))
return deco
@timer #test1=timer(test1)
def test1():
time.sleep(1)
print('in the test1')
@timer # test2 = timer(test2) #deco test2(name) = deco(name)
def test2(name,age):
print("test2:",name,age)
test1()
test2("alex",22) | [
"837337164@qq.com"
] | 837337164@qq.com |
5a65d9db69cc96ce79400cbb5568df203c4e877b | a7bf2878bb0d17626b9a58e00f2c7308448b4c31 | /task 6.py | 58eb209cc4dbdd719027983995b8ec346678c2dc | [] | no_license | Aaron44201/iteration | be85f7b308fd12985301b2906f3b474022cda7a8 | 5585c3cb48013bbc285b23a6c979cbb26e7c2daa | refs/heads/master | 2021-01-19T11:15:48.894513 | 2014-11-14T14:00:03 | 2014-11-14T14:00:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 154 | py | #Aaron Bentley
#21/10/1014
#task 6
count = 0
for count range(1,21)
weight = count * 2.2
print("{0:>2}KG = {1:>2}Pounds".format (count,weight))
| [
"44201@longroad.ac.uk"
] | 44201@longroad.ac.uk |
9a7fc8e23d82ffb80c491b1e51e26e71ab025f91 | ce18cf6bdb1a85a65a509597b4c0ec046b855186 | /2020年12月/换酒问题.py | 2aae2a3d340b6efc017fbb7a2722d70f2ee89088 | [] | no_license | elssm/leetcode | e12e39faff1da5afb234be08e7d9db85fbee58f8 | a38103d2d93b34bc8bcf09f87c7ea698f99c4e36 | refs/heads/master | 2021-06-11T06:44:44.993905 | 2021-04-28T06:14:23 | 2021-04-28T06:14:23 | 171,072,054 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 376 | py | class Solution(object):
def numWaterBottles(self, numBottles, numExchange):
"""
:type numBottles: int
:type numExchange: int
:rtype: int
"""
res=numBottles
while numBottles//numExchange:
res+=numBottles//numExchange
numBottles=numBottles//numExchange+numBottles%numExchange
return res | [
"noreply@github.com"
] | elssm.noreply@github.com |
2172eae3b8a6040dca586a55ab06a8232237dacd | 35247b545100a3b550f45b7231f364447993ed4f | /cadastro_orama/models/perfil_usuario.py | ce5a96eaf3dbe6e5b687d3005a9c2933f5fbeecf | [] | no_license | orama-investimentos/orama-cadastro-python | f96ceeac0b0eedfadfa9f7b24acb9c0a91d3cdee | b1625db4ad965b51c478df4d4fa742b641838461 | refs/heads/master | 2022-04-12T03:24:28.918549 | 2020-02-17T14:12:28 | 2020-02-17T14:12:28 | 202,339,973 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,853 | py | # coding: utf-8
"""
Criação de Contas
API de Criação de Contas. # noqa: E501
The version of the OpenAPI document: 2.0
Contact: cadastro_api@orama.com.br
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from cadastro_orama.configuration import Configuration
class PerfilUsuario(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'us_person': 'bool',
'politicamente_exposto': 'bool',
'investidor_qualificado': 'bool',
'nacionalidade': 'str',
'uf_nascimento': 'str',
'cidade_nascimento': 'str',
'pais_nascimento': 'str',
'sexo': 'str',
'estado_civil': 'str',
'nome_conjuge': 'str',
'nome_mae': 'str',
'nome_pai': 'str',
'login': 'LoginObjeto',
'documento': 'list[Documento]',
'profissao': 'DadosProfissionais',
'endereco': 'Endereco',
'patrimonio': 'DadosPatrimonial',
'conta_bancaria': 'list[ContaBancaria]',
'front_end': 'FrontEndStep'
}
attribute_map = {
'us_person': 'usPerson',
'politicamente_exposto': 'politicamenteExposto',
'investidor_qualificado': 'investidorQualificado',
'nacionalidade': 'nacionalidade',
'uf_nascimento': 'ufNascimento',
'cidade_nascimento': 'cidadeNascimento',
'pais_nascimento': 'paisNascimento',
'sexo': 'sexo',
'estado_civil': 'estadoCivil',
'nome_conjuge': 'nomeConjuge',
'nome_mae': 'nomeMae',
'nome_pai': 'nomePai',
'login': 'login',
'documento': 'documento',
'profissao': 'profissao',
'endereco': 'endereco',
'patrimonio': 'patrimonio',
'conta_bancaria': 'contaBancaria',
'front_end': 'frontEnd'
}
def __init__(self, us_person=False, politicamente_exposto=False, investidor_qualificado=False, nacionalidade=None, uf_nascimento=None, cidade_nascimento=None, pais_nascimento=None, sexo=None, estado_civil=None, nome_conjuge=None, nome_mae=None, nome_pai=None, login=None, documento=None, profissao=None, endereco=None, patrimonio=None, conta_bancaria=None, front_end=None, local_vars_configuration=None): # noqa: E501
"""PerfilUsuario - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._us_person = None
self._politicamente_exposto = None
self._investidor_qualificado = None
self._nacionalidade = None
self._uf_nascimento = None
self._cidade_nascimento = None
self._pais_nascimento = None
self._sexo = None
self._estado_civil = None
self._nome_conjuge = None
self._nome_mae = None
self._nome_pai = None
self._login = None
self._documento = None
self._profissao = None
self._endereco = None
self._patrimonio = None
self._conta_bancaria = None
self._front_end = None
self.discriminator = None
if us_person is not None:
self.us_person = us_person
if politicamente_exposto is not None:
self.politicamente_exposto = politicamente_exposto
if investidor_qualificado is not None:
self.investidor_qualificado = investidor_qualificado
if nacionalidade is not None:
self.nacionalidade = nacionalidade
if uf_nascimento is not None:
self.uf_nascimento = uf_nascimento
if cidade_nascimento is not None:
self.cidade_nascimento = cidade_nascimento
if pais_nascimento is not None:
self.pais_nascimento = pais_nascimento
if sexo is not None:
self.sexo = sexo
if estado_civil is not None:
self.estado_civil = estado_civil
if nome_conjuge is not None:
self.nome_conjuge = nome_conjuge
self.nome_mae = nome_mae
if nome_pai is not None:
self.nome_pai = nome_pai
if login is not None:
self.login = login
self.documento = documento
if profissao is not None:
self.profissao = profissao
self.endereco = endereco
if patrimonio is not None:
self.patrimonio = patrimonio
if conta_bancaria is not None:
self.conta_bancaria = conta_bancaria
if front_end is not None:
self.front_end = front_end
@property
def us_person(self):
"""Gets the us_person of this PerfilUsuario. # noqa: E501
define se o usuário pode ou não ser enquadrado como US person de acordo com a definição da CVM # noqa: E501
:return: The us_person of this PerfilUsuario. # noqa: E501
:rtype: bool
"""
return self._us_person
@us_person.setter
def us_person(self, us_person):
"""Sets the us_person of this PerfilUsuario.
define se o usuário pode ou não ser enquadrado como US person de acordo com a definição da CVM # noqa: E501
:param us_person: The us_person of this PerfilUsuario. # noqa: E501
:type: bool
"""
self._us_person = us_person
@property
def politicamente_exposto(self):
"""Gets the politicamente_exposto of this PerfilUsuario. # noqa: E501
define se o usuário pode ou não ser enquadrado como pessoa politicamente exposta de acordo com a definição da Deliberação Coremec nº 2, de 1º de dezembro de 2006 # noqa: E501
:return: The politicamente_exposto of this PerfilUsuario. # noqa: E501
:rtype: bool
"""
return self._politicamente_exposto
@politicamente_exposto.setter
def politicamente_exposto(self, politicamente_exposto):
"""Sets the politicamente_exposto of this PerfilUsuario.
define se o usuário pode ou não ser enquadrado como pessoa politicamente exposta de acordo com a definição da Deliberação Coremec nº 2, de 1º de dezembro de 2006 # noqa: E501
:param politicamente_exposto: The politicamente_exposto of this PerfilUsuario. # noqa: E501
:type: bool
"""
self._politicamente_exposto = politicamente_exposto
@property
def investidor_qualificado(self):
"""Gets the investidor_qualificado of this PerfilUsuario. # noqa: E501
Define se o usuário é investidor qualifiquado. Investidor Qualificado - PF ou PJ que possuam investimentos financeiros em valor superior a 1 Milhão, Investidor aprovado em exame de qualificação técnica, e atestem por escrito sua condição de investidor qualificado. Investidores Profissionais, etc. # noqa: E501
:return: The investidor_qualificado of this PerfilUsuario. # noqa: E501
:rtype: bool
"""
return self._investidor_qualificado
@investidor_qualificado.setter
def investidor_qualificado(self, investidor_qualificado):
"""Sets the investidor_qualificado of this PerfilUsuario.
Define se o usuário é investidor qualifiquado. Investidor Qualificado - PF ou PJ que possuam investimentos financeiros em valor superior a 1 Milhão, Investidor aprovado em exame de qualificação técnica, e atestem por escrito sua condição de investidor qualificado. Investidores Profissionais, etc. # noqa: E501
:param investidor_qualificado: The investidor_qualificado of this PerfilUsuario. # noqa: E501
:type: bool
"""
self._investidor_qualificado = investidor_qualificado
@property
def nacionalidade(self):
"""Gets the nacionalidade of this PerfilUsuario. # noqa: E501
Definição de Nacionalidade de acordo com o Art. 12 da CF # noqa: E501
:return: The nacionalidade of this PerfilUsuario. # noqa: E501
:rtype: str
"""
return self._nacionalidade
@nacionalidade.setter
def nacionalidade(self, nacionalidade):
"""Sets the nacionalidade of this PerfilUsuario.
Definição de Nacionalidade de acordo com o Art. 12 da CF # noqa: E501
:param nacionalidade: The nacionalidade of this PerfilUsuario. # noqa: E501
:type: str
"""
allowed_values = ["Brasileiro Nato", "Estrangeiro", "Brasileiro Naturalizado"] # noqa: E501
if self.local_vars_configuration.client_side_validation and nacionalidade not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `nacionalidade` ({0}), must be one of {1}" # noqa: E501
.format(nacionalidade, allowed_values)
)
self._nacionalidade = nacionalidade
@property
def uf_nascimento(self):
"""Gets the uf_nascimento of this PerfilUsuario. # noqa: E501
Unidade da Federação em que a pessoa nasceu # noqa: E501
:return: The uf_nascimento of this PerfilUsuario. # noqa: E501
:rtype: str
"""
return self._uf_nascimento
@uf_nascimento.setter
def uf_nascimento(self, uf_nascimento):
"""Sets the uf_nascimento of this PerfilUsuario.
Unidade da Federação em que a pessoa nasceu # noqa: E501
:param uf_nascimento: The uf_nascimento of this PerfilUsuario. # noqa: E501
:type: str
"""
allowed_values = ["AC", "AL", "AM", "AP", "BA", "CE", "DF", "ES", "GO", "MA", "MG", "MS", "MT", "PA", "PB", "PE", "PI", "PR", "RJ", "RN", "RO", "RR", "RS", "SC", "SE", "SP", "TO"] # noqa: E501
if self.local_vars_configuration.client_side_validation and uf_nascimento not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `uf_nascimento` ({0}), must be one of {1}" # noqa: E501
.format(uf_nascimento, allowed_values)
)
self._uf_nascimento = uf_nascimento
@property
def cidade_nascimento(self):
"""Gets the cidade_nascimento of this PerfilUsuario. # noqa: E501
Município em que a pessoa nascida no Brasil nasceu. Formato é o nome lexicograficamente igual a descrição do IBGE ou o código de cidade completo do IBGE # noqa: E501
:return: The cidade_nascimento of this PerfilUsuario. # noqa: E501
:rtype: str
"""
return self._cidade_nascimento
@cidade_nascimento.setter
def cidade_nascimento(self, cidade_nascimento):
"""Sets the cidade_nascimento of this PerfilUsuario.
Município em que a pessoa nascida no Brasil nasceu. Formato é o nome lexicograficamente igual a descrição do IBGE ou o código de cidade completo do IBGE # noqa: E501
:param cidade_nascimento: The cidade_nascimento of this PerfilUsuario. # noqa: E501
:type: str
"""
if (self.local_vars_configuration.client_side_validation and
cidade_nascimento is not None and len(cidade_nascimento) > 200):
raise ValueError("Invalid value for `cidade_nascimento`, length must be less than or equal to `200`") # noqa: E501
self._cidade_nascimento = cidade_nascimento
@property
def pais_nascimento(self):
"""Gets the pais_nascimento of this PerfilUsuario. # noqa: E501
País em que a pessoa nasceu. Código ISO 3166-1 alpha-2 # noqa: E501
:return: The pais_nascimento of this PerfilUsuario. # noqa: E501
:rtype: str
"""
return self._pais_nascimento
@pais_nascimento.setter
def pais_nascimento(self, pais_nascimento):
"""Sets the pais_nascimento of this PerfilUsuario.
País em que a pessoa nasceu. Código ISO 3166-1 alpha-2 # noqa: E501
:param pais_nascimento: The pais_nascimento of this PerfilUsuario. # noqa: E501
:type: str
"""
self._pais_nascimento = pais_nascimento
@property
def sexo(self):
"""Gets the sexo of this PerfilUsuario. # noqa: E501
Sexo do indivíduo # noqa: E501
:return: The sexo of this PerfilUsuario. # noqa: E501
:rtype: str
"""
return self._sexo
@sexo.setter
def sexo(self, sexo):
"""Sets the sexo of this PerfilUsuario.
Sexo do indivíduo # noqa: E501
:param sexo: The sexo of this PerfilUsuario. # noqa: E501
:type: str
"""
allowed_values = ["Feminino", "Masculino"] # noqa: E501
if self.local_vars_configuration.client_side_validation and sexo not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `sexo` ({0}), must be one of {1}" # noqa: E501
.format(sexo, allowed_values)
)
self._sexo = sexo
@property
def estado_civil(self):
"""Gets the estado_civil of this PerfilUsuario. # noqa: E501
Estado civil do usuário # noqa: E501
:return: The estado_civil of this PerfilUsuario. # noqa: E501
:rtype: str
"""
return self._estado_civil
@estado_civil.setter
def estado_civil(self, estado_civil):
"""Sets the estado_civil of this PerfilUsuario.
Estado civil do usuário # noqa: E501
:param estado_civil: The estado_civil of this PerfilUsuario. # noqa: E501
:type: str
"""
allowed_values = ["Casado(a)", "Solteiro(a)", "Divorciado(a)", "União estável", "Separado(a)", "Viúvo(a)"] # noqa: E501
if self.local_vars_configuration.client_side_validation and estado_civil not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `estado_civil` ({0}), must be one of {1}" # noqa: E501
.format(estado_civil, allowed_values)
)
self._estado_civil = estado_civil
@property
def nome_conjuge(self):
"""Gets the nome_conjuge of this PerfilUsuario. # noqa: E501
Nome do conjuge ou companheiro, necessário em casos que o estado civil seja 'Casado(a)' ou 'União estável' # noqa: E501
:return: The nome_conjuge of this PerfilUsuario. # noqa: E501
:rtype: str
"""
return self._nome_conjuge
@nome_conjuge.setter
def nome_conjuge(self, nome_conjuge):
"""Sets the nome_conjuge of this PerfilUsuario.
Nome do conjuge ou companheiro, necessário em casos que o estado civil seja 'Casado(a)' ou 'União estável' # noqa: E501
:param nome_conjuge: The nome_conjuge of this PerfilUsuario. # noqa: E501
:type: str
"""
if (self.local_vars_configuration.client_side_validation and
nome_conjuge is not None and len(nome_conjuge) > 200):
raise ValueError("Invalid value for `nome_conjuge`, length must be less than or equal to `200`") # noqa: E501
self._nome_conjuge = nome_conjuge
@property
def nome_mae(self):
"""Gets the nome_mae of this PerfilUsuario. # noqa: E501
Nome da mãe do usuário # noqa: E501
:return: The nome_mae of this PerfilUsuario. # noqa: E501
:rtype: str
"""
return self._nome_mae
@nome_mae.setter
def nome_mae(self, nome_mae):
"""Sets the nome_mae of this PerfilUsuario.
Nome da mãe do usuário # noqa: E501
:param nome_mae: The nome_mae of this PerfilUsuario. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and nome_mae is None: # noqa: E501
raise ValueError("Invalid value for `nome_mae`, must not be `None`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
nome_mae is not None and len(nome_mae) > 200):
raise ValueError("Invalid value for `nome_mae`, length must be less than or equal to `200`") # noqa: E501
self._nome_mae = nome_mae
@property
def nome_pai(self):
"""Gets the nome_pai of this PerfilUsuario. # noqa: E501
Nome do pai do usuário. O nome deve ser string vazia ou null caso o pai seja desconhecido. # noqa: E501
:return: The nome_pai of this PerfilUsuario. # noqa: E501
:rtype: str
"""
return self._nome_pai
@nome_pai.setter
def nome_pai(self, nome_pai):
"""Sets the nome_pai of this PerfilUsuario.
Nome do pai do usuário. O nome deve ser string vazia ou null caso o pai seja desconhecido. # noqa: E501
:param nome_pai: The nome_pai of this PerfilUsuario. # noqa: E501
:type: str
"""
if (self.local_vars_configuration.client_side_validation and
nome_pai is not None and len(nome_pai) > 200):
raise ValueError("Invalid value for `nome_pai`, length must be less than or equal to `200`") # noqa: E501
self._nome_pai = nome_pai
@property
def login(self):
"""Gets the login of this PerfilUsuario. # noqa: E501
:return: The login of this PerfilUsuario. # noqa: E501
:rtype: LoginObjeto
"""
return self._login
@login.setter
def login(self, login):
"""Sets the login of this PerfilUsuario.
:param login: The login of this PerfilUsuario. # noqa: E501
:type: LoginObjeto
"""
self._login = login
@property
def documento(self):
"""Gets the documento of this PerfilUsuario. # noqa: E501
:return: The documento of this PerfilUsuario. # noqa: E501
:rtype: list[Documento]
"""
return self._documento
@documento.setter
def documento(self, documento):
"""Sets the documento of this PerfilUsuario.
:param documento: The documento of this PerfilUsuario. # noqa: E501
:type: list[Documento]
"""
if self.local_vars_configuration.client_side_validation and documento is None: # noqa: E501
raise ValueError("Invalid value for `documento`, must not be `None`") # noqa: E501
self._documento = documento
@property
def profissao(self):
"""Gets the profissao of this PerfilUsuario. # noqa: E501
:return: The profissao of this PerfilUsuario. # noqa: E501
:rtype: DadosProfissionais
"""
return self._profissao
@profissao.setter
def profissao(self, profissao):
"""Sets the profissao of this PerfilUsuario.
:param profissao: The profissao of this PerfilUsuario. # noqa: E501
:type: DadosProfissionais
"""
self._profissao = profissao
@property
def endereco(self):
"""Gets the endereco of this PerfilUsuario. # noqa: E501
:return: The endereco of this PerfilUsuario. # noqa: E501
:rtype: Endereco
"""
return self._endereco
@endereco.setter
def endereco(self, endereco):
"""Sets the endereco of this PerfilUsuario.
:param endereco: The endereco of this PerfilUsuario. # noqa: E501
:type: Endereco
"""
if self.local_vars_configuration.client_side_validation and endereco is None: # noqa: E501
raise ValueError("Invalid value for `endereco`, must not be `None`") # noqa: E501
self._endereco = endereco
@property
def patrimonio(self):
"""Gets the patrimonio of this PerfilUsuario. # noqa: E501
:return: The patrimonio of this PerfilUsuario. # noqa: E501
:rtype: DadosPatrimonial
"""
return self._patrimonio
@patrimonio.setter
def patrimonio(self, patrimonio):
"""Sets the patrimonio of this PerfilUsuario.
:param patrimonio: The patrimonio of this PerfilUsuario. # noqa: E501
:type: DadosPatrimonial
"""
self._patrimonio = patrimonio
@property
def conta_bancaria(self):
"""Gets the conta_bancaria of this PerfilUsuario. # noqa: E501
:return: The conta_bancaria of this PerfilUsuario. # noqa: E501
:rtype: list[ContaBancaria]
"""
return self._conta_bancaria
@conta_bancaria.setter
def conta_bancaria(self, conta_bancaria):
"""Sets the conta_bancaria of this PerfilUsuario.
:param conta_bancaria: The conta_bancaria of this PerfilUsuario. # noqa: E501
:type: list[ContaBancaria]
"""
self._conta_bancaria = conta_bancaria
@property
def front_end(self):
"""Gets the front_end of this PerfilUsuario. # noqa: E501
:return: The front_end of this PerfilUsuario. # noqa: E501
:rtype: FrontEndStep
"""
return self._front_end
@front_end.setter
def front_end(self, front_end):
"""Sets the front_end of this PerfilUsuario.
:param front_end: The front_end of this PerfilUsuario. # noqa: E501
:type: FrontEndStep
"""
self._front_end = front_end
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PerfilUsuario):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, PerfilUsuario):
return True
return self.to_dict() != other.to_dict()
| [
"marcelo.lino@orama.com.br"
] | marcelo.lino@orama.com.br |
21a1cc4eb1bd179ff89d75cea8a6a470f7fd27a8 | 8686ab49db2fa1d13711820bc241bf618b59b8f8 | /Transformer_originTorch/transformer/Modules.py | 5aeb16d095f1210ff80af2a3066622c7a98101e4 | [] | no_license | helloworld729/40_torch-self-learning | b3b781f39b2da7d5a5bd8be5c9767b9e3dbe56a6 | d04a02a5392f33a74d2421a1f04c36dba691b70d | refs/heads/master | 2023-06-01T00:55:38.673425 | 2021-06-06T08:24:47 | 2021-06-06T08:24:47 | 322,810,672 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,254 | py | import torch
import torch.nn as nn
import numpy as np
__author__ = "Yu-Hsiang Huang"
class ScaledDotProductAttention(nn.Module):
''' Scaled Dot-Product Attention '''
def __init__(self, temperature, attn_dropout=0.1):
super().__init__()
self.temperature = temperature # 分母
self.dropout = nn.Dropout(attn_dropout)
self.softmax = nn.Softmax(dim=2)
def forward(self, q, k, v, mask=None):
# q shape: (heads*batch) x len_q x dk
# k shape: (heads*batch) x len_k x dk
# v shape: (heads*batch) x len_v x dv
# mask shape: heads*batch_size, len_q, len_k
# q 乘 k 转置, attn shape: heads*batch, len_q, len_q
attn = torch.bmm(q, k.transpose(1, 2))
attn = attn / self.temperature
if mask is not None:
attn = attn.masked_fill(mask, -np.inf) # 第一维度索引,隔batch_size后数据的mask位置相同, mask为true的位置变成负inf
attn = self.softmax(attn)
attn = self.dropout(attn) # 某些位置随即为0
# attn shape: batch*heads, len_q, len_q
# v shape: heads*batch, len_v, dv
# 实际上就是 ss * s dv
output = torch.bmm(attn, v)
return output, attn
| [
"1952933329@qq.com"
] | 1952933329@qq.com |
d39a4d7e9dc42b930eb7632dc42c3279613626e7 | b10945c8462fd0388dd5f68e93e0c142ad8ad571 | /testlist/consumers/__init__.py | ac2c328345edbc88cacf5a35aa43d713584190dc | [] | no_license | ju1900/netmgr | 88cd89794a1c84d9c6e0c394ebf04981e4fdc469 | 0a2b5f00590517df15b3f49cca7a4ed652bf0f7b | refs/heads/master | 2020-04-28T04:52:21.114836 | 2019-04-03T16:28:30 | 2019-04-03T16:28:30 | 174,997,756 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 228 | py | from .testlist import TestlistConsumer
from .product import ProductConsumer
from .testcase import TestcaseConsumer
from .chapter import ChapterConsumer
from .section import SectionConsumer
from .testcase import TestcaseConsumer | [
"1044161599@qq.com"
] | 1044161599@qq.com |
289bec8fb1a7d43b36a2e31a1fe72a52fe207a0b | f2407926946476f6e09ac82547ff394e3d24e40c | /app/app.py | ea0ee1f9986ec5ccb2b685dc9584975501d871d0 | [] | no_license | abramic/python_email_test_code | 31609d2c6cb35880eb58de7bba7f454ece40224b | cbd0bc52235e6f62b389e465ae2e0ba948da77dd | refs/heads/master | 2020-09-23T06:47:35.427964 | 2019-12-02T17:47:46 | 2019-12-02T17:47:46 | 225,430,943 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 301 | py | from flask import Flask, request, make_response, jsonify
app = Flask("python_email_test_server")
@app.route('/ping', methods = ['GET', 'PATCH', 'POST', 'PUT', 'DELETE'])
def handler():
return make_response('Hello World From Python Test Email Server!', 200)
app.run(port=int('5001'), debug=True)
| [
"57453141+abramic@users.noreply.github.com"
] | 57453141+abramic@users.noreply.github.com |
766cc242f93877211cb77bca7ec7854882082424 | c5b690228639a0dd8f8e7c371e58994f04d19474 | /levelupapi/models/event.py | a72159c47a1b8f8b217abc52811944d9b472387b | [] | no_license | KyleSimmonsC44/levelup-backend | 9c299f4d94f9698ea7128992c2deda9990e61f9a | 926901fb67e0c337975400a22d28f66c3953a948 | refs/heads/main | 2023-03-13T06:28:39.672932 | 2021-03-01T21:16:21 | 2021-03-01T21:16:21 | 337,529,762 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 459 | py | from django.db import models
class Event(models.Model):
event_time = models.DateTimeField(auto_now=False, auto_now_add=False)
game = models.ForeignKey("Game", on_delete=models.CASCADE)
location = models.CharField(max_length=50)
scheduler = models.ForeignKey("Gamer", on_delete=models.CASCADE)
@property
def joined(self):
return self.__joined
@joined.setter
def joined(self, value):
self.__joined = value | [
"darrensimmons92@gmail.com"
] | darrensimmons92@gmail.com |
19051aed542c9f4efa751cfbf4908783c1d3215e | dd0d2a4da64200a7bea42d23122384189b900850 | /common_digit.py | 64c95fda4c01ff6bdc0db9231dae66fbd66e46a4 | [] | no_license | gakkistyle/comp9021 | 06ad00b47b7b0135013b014464b5f13530cad49d | 4d0d4a2d719745528bf84ed0dfb88a43f858be7e | refs/heads/master | 2022-09-24T13:10:29.609277 | 2020-06-06T16:54:42 | 2020-06-06T16:54:42 | 270,043,710 | 14 | 7 | null | null | null | null | UTF-8 | Python | false | false | 770 | py | def average_of_digits(digit=None):
if digit == None:
return -1
if len(digit) == 1:
digit_set = set(str(digit[0]))
sum = 0
for e in digit_set:
sum += int(e)
return sum/len(digit_set)
common = []
word_set1 = set(str(digit[0]))
word_set2 = set(str(digit[1]))
for e in word_set1:
if e in word_set2:
common.append(e)
for i in range(2,len(digit)):
word_setn = set(str(digit[i]))
for e in common:
if e not in word_setn:
common.remove(e)
if common == []:
return -1
sum = 0
for e in common:
sum += int(e)
return sum/len(common)
print(average_of_digits([3136823,665537857,8363265,35652385]))
| [
"1824150996@qq.com"
] | 1824150996@qq.com |
cc3dc9d0cd4cc83d55b95cacc6d634ffd4784dba | 147c0e0ff8dc7db0dbdf19e5d4a5cea65e23ecbe | /gridlabd_functions_ToU.py | 01834a9c38be842f4f61cf5286a7522c7b994c24 | [] | no_license | mlamlamla/powernet_pyGridlabD_market | 54684f922a73cb0ba5ad06769fc2a1e6e866fbd5 | 0eec1ebb4377e90c5a193e4db62139511b921c55 | refs/heads/master | 2021-05-21T21:36:35.703471 | 2021-01-19T23:48:48 | 2021-01-19T23:48:48 | 252,811,004 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,269 | py | import gldimport
import os
import random
import pandas
import json
import numpy as np
import datetime
from datetime import timedelta
from dateutil import parser
import HH_functions as HHfct
import battery_functions as Bfct
import EV_functions as EVfct
import PV_functions as PVfct
import market_functions as Mfct
import time
from HH_global import results_folder, flexible_houses, C, p_max, market_data, which_price, city
from HH_global import interval, prec, price_intervals, allocation_rule, unresp_factor
from HH_global import FIXED_TARIFF, include_SO
ToU_min = 15
p_min = 20
ToU_max = 19
p_max = 75
p_vec = ToU_min*[p_min] + (ToU_max - ToU_min)*[p_max] + (24 - ToU_max)*[p_min]
mean_p = sum(p_vec)/len(p_vec)
var_p = np.var(p_vec)
def on_init(t):
global t0;
t0 = time.time()
global step;
step = 0
#Instead of mysql
global df_buy_bids, df_supply_bids, df_awarded_bids;
df_buy_bids = pandas.DataFrame(columns=['timestamp','appliance_name','bid_price','bid_quantity'])
df_supply_bids = pandas.DataFrame(columns=['timestamp','appliance_name','bid_price','bid_quantity'])
df_awarded_bids = pandas.DataFrame(columns=['timestamp','appliance_name','bid_price','bid_quantity','S_D'])
#Find objects
global houses;
if flexible_houses == 0:
houses = []
else:
houses = gldimport.find_objects('class=house')[:flexible_houses]
#global df_house_state;
#df_house_state = HHfct.get_settings_houses(houses,interval)
batteries = gldimport.find_objects('class=battery')
global batterylist, EVlist;
batterylist, EVlist = gldimport.sort_batteries(batteries)
global df_battery_state;
df_battery_state = Bfct.get_settings_batteries(batterylist,interval)
global df_EV_state;
df_EV_state = EVfct.get_settings_EVs(EVlist,interval)
global df_prices, df_WS;
df_prices = pandas.DataFrame(columns=['clearing_price','clearing_quantity','unresponsive_loads','slack_t-1'])
df_WS = pandas.read_csv('glm_generation_'+city+'/'+market_data,parse_dates=[0],index_col=[0])
#df_WS = pandas.DataFrame(index=pandas.to_datetime(df_WS.index.astype(str)),columns=df_WS.columns,data=df_WS.values)
print('Initialize finished after '+str(time.time()-t0))
return True
def init(t):
print('Objective-specific Init')
return True
#Global precommit
#Should be mostly moved to market precommit
def on_precommit(t):
dt_sim_time = parser.parse(gridlabd.get_global('clock')).replace(tzinfo=None)
#Run market only every five minutes
if not ((dt_sim_time.second == 0) and (dt_sim_time.minute % (interval/60) == 0)):
return t
else: #interval in minutes #is not start time
print('Start precommit: '+str(dt_sim_time))
global step;
global df_house_state, df_battery_state, df_EV_state, df_PV_state;
global df_buy_bids, df_supply_bids, df_awarded_bids;
if step == 0:
df_house_state = HHfct.get_settings_houses(houses,interval)
#Save DB files and shorten dfs every 12 hours
saving_interval = 1
if step > 0 and (dt_sim_time.hour%saving_interval == 0) and (dt_sim_time.minute == 0):
i = int(step/(saving_interval*12)) #for 5min interval
df_supply_bids.to_csv(results_folder+'/df_supply_bids.csv')
#df_supply_bids = pandas.DataFrame(columns = df_supply_bids.columns)
df_buy_bids.to_csv(results_folder+'/df_buy_bids.csv')
#df_buy_bids = pandas.DataFrame(columns = df_buy_bids.columns)
df_awarded_bids.to_csv(results_folder+'/df_awarded_bids.csv')
#df_awarded_bids = pandas.DataFrame(columns = df_awarded_bids.columns)
#Get current ToU price
retail = Mfct.Market()
retail.Pmin = 0.0
retail.Pmax = p_max
retail.Pprec = prec
if (dt_sim_time.hour >= ToU_min) & (dt_sim_time.hour < ToU_max):
Pd = p_max
else:
Pd = p_min
df_temp = pandas.DataFrame(index=[dt_sim_time],columns=['clearing_price','clearing_quantity','unresponsive_loads','slack_t-1'],data=[[Pd,0.0,0.0,0.0]])
df_prices = df_prices.append(df_temp)
#Update physical values for new period
#global df_house_state;
df_house_state = HHfct.update_house(dt_sim_time,df_house_state)
if len(batterylist) > 0:
df_battery_state = Bfct.update_battery(df_battery_state)
if len(EVlist) > 0:
df_EV_state = EVfct.update_EV(dt_sim_time,df_EV_state)
#if len(pvlist) > 0:
# df_PV_state = PVfct.update_PV(dt_sim_time,df_PV_state)
#Determine willingness to pay for HVACs
df_house_state = HHfct.calc_bids_HVAC(dt_sim_time,df_house_state,retail,mean_p,var_p)
#Batteries try to sell in peak times, and buy in non-peak times until they are full
#peak hours: sell
if (dt_sim_time.hour >= ToU_min) & (dt_sim_time.hour < ToU_max):
df_battery_state
#Quantity depends on SOC and u
df_battery_state['residual_s'] = round((3600./interval)*(df_battery_state['SOC_t'] - df_battery_state['SOC_min']*df_battery_state['SOC_max']),prec) #Recalculate to kW
df_battery_state['q_sell'] = df_battery_state[['residual_s','u_max']].min(axis=1) #in kW / only if fully dischargeable
df_battery_state['q_sell'].loc[df_battery_state['q_sell'] < 0.1] = 0.0
df_battery_state['p_sell'] = -p_max #sell in any case
df_battery_state['q_buy'] = 0.0
df_battery_state['p_buy'] = -p_max
else:
safety_fac = 0.99
df_battery_state['residual_b'] = round((3600./interval)*(safety_fac*df_battery_state['SOC_max'] - df_battery_state['SOC_t']),prec) #Recalculate to kW
df_battery_state['q_buy'] = df_battery_state[['residual_b','u_max']].min(axis=1) #in kW
df_battery_state['q_buy'].loc[df_battery_state['q_buy'] < 0.1] = 0.0
df_battery_state['p_buy'] = p_max #buy in any case
df_battery_state['q_sell'] = 0.0
df_battery_state['p_sell'] = p_max #never sell
#Determine willingness to pay for EVs
#Quantity
safety_fac = 0.99
df_EV_state['q_buy'] = 0.0 #general
df_EV_state['residual_SOC'] = round((3600./interval)*(safety_fac*df_EV_state['SOC_max'] - df_EV_state['SOC_t']),prec)
df_EV_state['q_buy'].loc[df_EV_state['connected'] == 1] = df_EV_state.loc[df_EV_state['connected'] == 1][['residual_SOC','u_max']].min(axis=1) #in kW
df_EV_state['q_buy'].loc[df_EV_state['q_buy'] < 1.] = 0.0
#Price
df_EV_state['p_buy'] = 0.0 #general
#peak hours: only charge if necessary
if (dt_sim_time.hour >= ToU_min) & (dt_sim_time.hour < ToU_max):
#Home-based charging
df_EV_state['delta'] = df_EV_state['next_event'] - dt_sim_time
df_EV_state['residual_t'] = df_EV_state['delta'].apply(lambda x: x.seconds)/3600. #residual time until departure; in h
df_EV_state['time_needed_charging'] = df_EV_state['residual_SOC']/df_EV_state['u_max'] #in h
df_EV_state['must_charge'] = 0
df_EV_state['must_charge'].loc[df_EV_state['residual_t'] <= df_EV_state['time_needed_charging']] = 1
#import pdb; pdb.set_trace()
#df_EV_state.at[df_EV_state.loc[df_EV_state['must_charge'] == 0].index,'p_buy'] = 0.0
df_EV_state['p_buy'].loc[df_EV_state['must_charge'] == 0] = 0.0
#df_EV_state.at[df_EV_state.loc[df_EV_state['must_charge'] == 1].index,'p_buy'] = p_max
df_EV_state['p_buy'].loc[df_EV_state['must_charge'] == 1] = p_max
else:
df_EV_state['p_buy'] = p_max
#Commercial
df_EV_state.loc[df_EV_state['charging_type'].str.contains('comm') & (df_EV_state['connected'] == 1) & (df_EV_state['q_buy'] > 0.001),'p_buy'] = retail.Pmax #max for commercial cars
#Dispatch
allocation_rule == 'by_price' #open loop!
df_house_state,df_awarded_bids = HHfct.set_HVAC_by_price(dt_sim_time,df_house_state,mean_p,var_p, Pd,df_awarded_bids) #Switches the HVAC system on and off directly (depending on bid >= p)
df_bids_battery, df_awarded_bids = Bfct.set_battery_by_price(dt_sim_time,df_battery_state,mean_p,var_p, Pd, df_awarded_bids) #Controls battery based on bid <-> p
df_EV_state, df_awarded_bids = EVfct.set_EV_by_price(dt_sim_time,df_EV_state,mean_p,var_p, Pd, df_awarded_bids) #Controls EV based on bid <-> p
step += 1
return t
def on_term(t):
print('Simulation ended, saving results')
saving_results()
global t0;
t1 = time.time()
print('Time needed (min):')
print((t1-t0)/60)
return None
def saving_results():
#Save settings of objects
global df_house_state;
df_house_state.to_csv(results_folder+'/df_house_state.csv')
global df_battery_state
df_battery_state.to_csv(results_folder+'/df_battery_state.csv')
global df_EV_state
df_EV_state.to_csv(results_folder+'/df_EV_state.csv')
#global df_PV_state;
#df_PV_state.to_csv(results_folder+'/df_PV_state.csv')
#Saving former mysql
global df_prices;
df_prices.to_csv(results_folder+'/df_prices.csv')
global df_supply_bids;
df_supply_bids.to_csv(results_folder+'/df_supply_bids.csv')
global df_buy_bids;
df_buy_bids.to_csv(results_folder+'/df_buy_bids.csv')
global df_awarded_bids;
df_awarded_bids.to_csv(results_folder+'/df_awarded_bids.csv')
#Saving mysql databases
#import download_databases
#download_databases.save_databases(timestamp)
#mysql_functions.clear_databases(table_list) #empty up database
#Saving globals
file = 'HH_global.py'
new_file = results_folder+'/HH_global.py'
glm = open(file,'r')
new_glm = open(new_file,'w')
j = 0
for line in glm:
new_glm.write(line)
glm.close()
new_glm.close()
#Do evaluations
return
#Object-specific precommit
def precommit(obj,t) :
print(t)
tt = int(300*((t/300)+1))
print('Market precommit')
print(tt)
return gridlabd.NEVER #t #True #tt
| [
"admin@admins-air.attlocal.net"
] | admin@admins-air.attlocal.net |
4449c65735420538e3030db0927495d01c74b926 | 95ac3e99aefdbd5c7e402d3da18c0d036ef550db | /manage.py | fdaf7b004cb414e5a5d305877816debccdd4cce7 | [] | no_license | HugoTorquato/HighFive | 69c0679f0c365675d7a652376d078b32a3a778f3 | f95f47b75aa0c33e628e0b068bab7a6ddd0cdf04 | refs/heads/master | 2020-11-28T22:17:15.558274 | 2016-11-11T19:33:25 | 2016-11-11T19:33:25 | 73,505,823 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 251 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "HighFive.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| [
"hugo.1601@hotmail.com"
] | hugo.1601@hotmail.com |
9cca650b3301e4916cc9617d1975ef0d38bf3470 | 73da213ea771f2b9d949899316f359f9e9ad6202 | /write_message.py | 56a4f8f8163c05f02b31630353e4c58d43899d08 | [] | no_license | chewbocky/Python | 99e0d1dedcd8b2abc15bbcd31f70aed02ffdd466 | d7c85b4afaff1c5e9af6b56522a1ae55d79e179e | refs/heads/master | 2022-12-19T23:57:38.099333 | 2020-07-13T15:42:16 | 2020-07-13T15:42:16 | 279,340,672 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 362 | py | filename = 'programming.txt'
with open(filename, 'w') as file_object:
file_object.write("I love programming.\n")
file_object.write("I love creating new games.\n")
with open(filename, 'a') as file_object:
file_object.write("I also love finding meaning in a large datasets.\n")
file_object.write("I love creating apps that can run in a browser.\n") | [
"noreply@github.com"
] | chewbocky.noreply@github.com |
d91d149ee7f1f3e7311509b927757d609ff8d8eb | b7e9ab14db325c1463a1272ff8f08598605b8a59 | /manage.py | 8897bf0260b87945dce2932d5dec384136caa125 | [] | no_license | Jaron-Lane/terrace-server | f5074f0b5ef520f5f04e8ca0d356076869f574f3 | 7f4082431116f92b62e6e47a72961d42a390080f | refs/heads/main | 2023-04-18T03:15:18.415139 | 2021-05-05T22:09:31 | 2021-05-05T22:09:31 | 347,155,628 | 0 | 0 | null | 2021-05-05T22:09:32 | 2021-03-12T18:08:39 | Python | UTF-8 | Python | false | false | 663 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'terrace.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"jaronwohlwend@icloud.com"
] | jaronwohlwend@icloud.com |
25e7327406a17a8417fb65524c93e6f254497534 | 3cd46ae87939ab863d2e2f66ae182678410c39dc | /gott.2.py | e62295103e026140df8c811a527e9fc56bf19fb2 | [] | no_license | Retryzzzz/DDoS | 279595b1e122b4c6cb56552ea0c376a31d9e9298 | 0ea63eacfbde527725cae7d19872665beae7c14b | refs/heads/main | 2023-07-07T08:48:37.372111 | 2021-08-03T23:08:17 | 2021-08-03T23:08:17 | 392,460,970 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 715 | py | #!/usr/bin/python
import socket,random,sys,time
if len(sys.argv)==1:
sys.exit('Usage: f.py ip port(0=random) length(0=forever)')
def UDPFlood():
port = int(sys.argv[2])
randport=(True,False)[port==0]
ip = sys.argv[1]
dur = int(sys.argv[3])
clock=(lambda:0,time.clock)[dur>0]
duration=(1,(clock()+dur))[dur>0]
print('Atacando GoTTFlood: %s:%s Por %s Segundos'%(ip,port,dur or 'infinite'))
sock=socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
bytes=random._urandom(15000)
while True:
port=(random.randint(1,15000000),port)[randport]
if clock()<duration:
sock.sendto(bytes,(ip,port))
else:
break
print('DONE')
UDPFlood() | [
"noreply@github.com"
] | Retryzzzz.noreply@github.com |
4e3a31b6e2fd4886d76d75722feba2bfeb82bb89 | 584959d4d8ef3a30620bc7c1c77dea3ff0921385 | /explorer_bringup/explorer_bringup/manager.py | 55ff37aed634bf554f86e50a11fd2fc2b1dbeda0 | [] | no_license | MistyMoonR/ros2_explorer | 29b6fbd44d3ffe8110ec2f85c91efc0b125c7894 | aea13461513b5e47953f998fa96869072e2c1a62 | refs/heads/main | 2023-06-09T00:24:25.966183 | 2021-07-03T17:14:08 | 2021-07-03T17:14:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,372 | py | from action_msgs.msg import GoalStatus
from geometry_msgs.msg import PoseStamped
from explorer_interfaces.action import Wander
from explorer_interfaces.action import Discover
from nav2_msgs.action import NavigateToPose
from std_msgs.msg import Float32
from visualization_msgs.msg import MarkerArray
import rclpy
import math
from rclpy.action import ActionClient
from rclpy.node import Node
from rclpy.node import Node
from rcl_interfaces.srv import GetParameters
#ros2 action send_goal /navigate_to_pose nav2_msgs/action/NavigateToPose "{pose: {header: {stamp: {sec: 0}, frame_id: 'map'}, pose: {position: {x: 0.0, y: 0.0, z: 0.0}, orientation: {w: 1.0}}}}"
#ros2 param get /controller_server goal_checker.xy_goal_tolerance
class Manager(Node):
def __init__(self):
super().__init__('manager')
self._action_client_wanderer = ActionClient(self, Wander, 'wander')
self._action_client_discover = ActionClient(self, Discover, 'discover')
self.navigation_client = NavigationClient()
self.watchtower_subscription = self.create_subscription(Float32,'map_progress',self.watchtower_callback,10)
self.trajectory_subscription = self.create_subscription(MarkerArray,'trajectory_node_list',self.trajectory_callback,10)
timer_period = 5 # seconds
self.timer = self.create_timer(timer_period, self.timer_callback)
self.map_explored=0.01
self.map_finished=False
self.trajectory_distance=0.0
self.trajectory_markers=MarkerArray()
self.start_time=self.get_clock().now()
def print_feedback(self):
try:
self.map_explored="{:.2f}".format(self.map_explored) #Crop to 2 decimals
self.trajectory_distance=self.compute_distance_from_markers(self.trajectory_markers)
self.trajectory_distance="{:.2f}".format(self.trajectory_distance) #Crop to 2 decimals
time_now=self.get_clock().now()
duration=str(int((time_now.nanoseconds-self.start_time.nanoseconds)/(10**9)))
self.get_logger().info("Duration: %s s - Map: %s - Distance: %s m " %(duration, self.map_explored, self.trajectory_distance))
except:
pass
def timer_callback(self):
#Print feedback in terminal acording to timer_period
if not self.map_finished:
self.print_feedback()
def watchtower_callback(self, msg):
self.map_explored=msg.data*100 #Convert to %
def trajectory_callback(self, msg):
self.trajectory_markers=msg.markers
def compute_distance_from_markers(self, markers):
trajectory_distance=0.0
last_point=[0,0]
try:
for marker in self.trajectory_markers:
marker_points=marker.points
for point in marker_points:
point=[point.x, point.y]
trajectory_distance+=math.dist(last_point, point)
last_point=point
return trajectory_distance
except:
self.get_logger().warn("Trajectory not received yet")
def goal_response_callback_wanderer(self, future):
goal_handle = future.result()
if not goal_handle.accepted:
self.get_logger().info('Exploration goal rejected')
return
self.get_logger().info('Exploration goal accepted')
self._get_result_future = goal_handle.get_result_async()
self._get_result_future.add_done_callback(self.get_result_callback_wanderer)
def feedback_callback_wanderer(self, feedback):
self.get_logger().info('Received feedback: {0}'.format(feedback.feedback.sequence))
def get_result_callback_wanderer(self, future):
result = future.result().result
status = future.result().status
if status == GoalStatus.STATUS_SUCCEEDED:
self.map_finished=True
self.get_logger().info('MAP SUCCESSFULLY EXPLORED')
self.print_feedback()
#Return to home
self.navigation_client.send_goal()
else:
self.get_logger().info('Goal failed with status: {0}'.format(status))
def send_goal_wanderer(self):
self.get_logger().info('Waiting for action server...')
self._action_client_wanderer.wait_for_server()
goal_msg = Wander.Goal()
goal_msg.map_completed_thres = 0.9
self.get_logger().info('Sending wanderer goal request...')
self.get_logger().info('Wandering until 90% map completed')
self._send_goal_future = self._action_client_wanderer.send_goal_async(
goal_msg,
feedback_callback=self.feedback_callback_wanderer)
self._send_goal_future.add_done_callback(self.goal_response_callback_wanderer)
def goal_response_callback_discoverer(self, future):
goal_handle = future.result()
if not goal_handle.accepted:
self.get_logger().info('Exploration goal rejected')
return
self.get_logger().info('Exploration goal accepted')
self._get_result_future = goal_handle.get_result_async()
self._get_result_future.add_done_callback(self.get_result_callback_discoverer)
def feedback_callback_discoverer(self, feedback):
self.get_logger().info('Received feedback: {0}'.format(feedback.feedback.sequence))
def get_result_callback_discoverer(self, future):
result = future.result().result
status = future.result().status
if status == GoalStatus.STATUS_SUCCEEDED:
self.map_finished=True
self.get_logger().info('MAP SUCCESSFULLY EXPLORED')
self.print_feedback()
#Return to home
self.navigation_client.send_goal()
else:
self.get_logger().info('Goal failed with status: {0}'.format(status))
def send_goal_discoverer(self):
self.get_logger().info('Waiting for action server...')
self._action_client_discover.wait_for_server()
goal_msg = Discover.Goal()
goal_msg.strategy= 1
goal_msg.map_completed_thres = 0.97
self.get_logger().info('Sending discoverer goal request...')
self.get_logger().info('Discovering until 97% map completed')
self._send_goal_future = self._action_client_discover.send_goal_async(
goal_msg,
feedback_callback=self.feedback_callback_discoverer)
self._send_goal_future.add_done_callback(self.goal_response_callback_discoverer)
class NavigationClient(Node):
def __init__(self):
super().__init__('navigation_client')
self._action_client = ActionClient(self, NavigateToPose, 'navigate_to_pose')
def goal_response_callback(self, future):
goal_handle = future.result()
if not goal_handle.accepted:
self.get_logger().info('Exploration goal rejected')
return
self.get_logger().info('Navigation goal accepted')
self._get_result_future = goal_handle.get_result_async()
self._get_result_future.add_done_callback(self.get_result_callback)
def get_result_callback(self, future):
result = future.result().result
status = future.result().status
if status == GoalStatus.STATUS_SUCCEEDED:
self.get_logger().info('Arrived to home position')
else:
self.get_logger().info('Goal failed with status: {0}'.format(status))
def send_goal(self):
self.get_logger().info('Waiting for action server...')
self._action_client.wait_for_server()
goal_msg = NavigateToPose.Goal()
goal_msg.pose.pose.orientation.w=1.0 #Home position
self.get_logger().info('Returning to base...')
self._send_goal_future = self._action_client.send_goal_async(goal_msg)
self._send_goal_future.add_done_callback(self.goal_response_callback)
def main(args=None):
rclpy.init(args=args)
manager = Manager()
select=0
select=input('Select exploring algorithm:\n 1)Wanderer\n 2)Discoverer\n')
if select=='1':
manager.send_goal_wanderer()
rclpy.spin(manager)
elif select=='2':
manager.send_goal_discoverer()
rclpy.spin(manager)
else:
raise ValueError("Exploring algorithm not selected correctly")
if __name__ == '__main__':
main() | [
"d.garcialopez@hotmail.com"
] | d.garcialopez@hotmail.com |
b5ce86e5c7206e0947b0bcb912983f891ecd0ce1 | 6df76f8a6fcdf444c3863e3788a2f4b2c539c22c | /django code/p109/p109/asgi.py | dbabed799f89d9fe7ba5076c4cdafffb94c9e6d1 | [] | no_license | basantbhandari/DjangoProjectsAsDocs | 068e4a704fade4a97e6c40353edb0a4299bd9678 | 594dbb560391eaf94bb6db6dc07702d127010b88 | refs/heads/master | 2022-12-18T22:33:23.902228 | 2020-09-22T13:11:01 | 2020-09-22T13:11:01 | 297,651,728 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 385 | py | """
ASGI config for p109 project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'p109.settings')
application = get_asgi_application()
| [
"36443209+basantbhandari@users.noreply.github.com"
] | 36443209+basantbhandari@users.noreply.github.com |
4fdbf3ec62b7689a08b2f098eb3daa2d0e7c2129 | 6a81e186b9ece0d7e35d3836018ae65810d91caa | /pwngef/memoize.py | bd0ffbc4005262dd461a6c9895567225a6d018e6 | [
"MIT"
] | permissive | AEROBATlCS/pwngef | e3231b8df6ff3695ae213f3bf21cb7f02f170118 | 77619d7266f58523fda2c40a54255093389027e6 | refs/heads/master | 2020-12-13T01:12:21.276350 | 2020-01-10T13:26:33 | 2020-01-10T13:26:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,847 | py | #!/usr/bin/python
"""
Caches return values until some event in the inferior happens,
e.g. execution stops because of a SIGINT or breakpoint, or a
new library/objfile are loaded, etc.
"""
import collections
import functools
import sys
import pwngef.events
debug = False
class memoize(object):
"""
Base memoization class. Do not use directly. Instead use one of classes defined below.
"""
caching = True
def __init__(self, func):
self.func = func
self.cache = {}
self.caches.append(self) # must be provided by base class
functools.update_wrapper(self, func)
def __call__(self, *args, **kwargs):
how = None
if not isinstance(args, collections.Hashable):
print("Cannot memoize %r!", file=sys.stderr)
how = "Not memoizeable!"
value = self.func(*args)
if self.caching and args in self.cache:
how = "Cached"
value = self.cache[args]
else:
how = "Executed"
value = self.func(*args, **kwargs)
self.cache[args] = value
if isinstance(value, list):
print("Shouldnt cache mutable types! %r" % self.func.__name__)
if debug:
print("%s: %s(%r)" % (how, self, args))
print(".... %r" % (value,))
return value
def __repr__(self):
funcname = self.func.__module__ + '.' + self.func.__name__
return "<%s-memoized function %s>" % (self.kind, funcname)
def __get__(self, obj, objtype):
return functools.partial(self.__call__, obj)
def clear(self):
if debug:
print("Clearing %s %r" % (self, self.cache))
self.cache.clear()
class forever(memoize):
"""
Memoizes forever - for a pwngef session or until `_reset` is called explicitly.
"""
caches = []
@staticmethod
def _reset():
for obj in forever.caches:
obj.cache.clear()
class reset_on_stop(memoize):
caches = []
kind = 'stop'
@staticmethod
@pwngef.events.stop
@pwngef.events.mem_changed
@pwngef.events.reg_changed
def __reset_on_stop(event):
for obj in reset_on_stop.caches:
obj.cache.clear()
_reset = __reset_on_stop
class reset_on_exit(memoize):
caches = []
kind = 'exit'
@staticmethod
@pwngef.events.exit
def __reset_on_exit(event):
for obj in reset_on_exit.caches:
obj.clear()
_reset = __reset_on_exit
class reset_on_objfile(memoize):
caches = []
kind = 'objfile'
@staticmethod
@pwngef.events.new_objfile
def __reset_on_objfile(event):
for obj in reset_on_objfile.caches:
obj.clear()
_reset = __reset_on_objfile
class reset_on_start(memoize):
caches = []
kind = 'start'
@staticmethod
@pwngef.events.stop
def __reset_on_start(event):
for obj in reset_on_start.caches:
obj.clear()
_reset = __reset_on_start
class reset_on_cont(memoize):
caches = []
kind = 'cont'
@staticmethod
@pwngef.events.cont
def __reset_on_cont(event):
for obj in reset_on_cont.caches:
obj.clear()
_reset = __reset_on_cont
class while_running(memoize):
caches = []
kind = 'running'
caching = False
@staticmethod
def __start_caching(event):
while_running.caching = True
@staticmethod
@pwngef.events.exit
def __reset_while_running(event):
for obj in while_running.caches:
obj.clear()
while_running.caching = False
_reset = __reset_while_running
def reset():
forever._reset()
reset_on_stop._reset()
reset_on_exit._reset()
reset_on_objfile._reset()
reset_on_start._reset()
reset_on_cont._reset()
while_running._reset()
| [
"GH0st3rs@users.noreply.github.com"
] | GH0st3rs@users.noreply.github.com |
c357997cbb60325855930257b942fbd28f13b1d8 | 6130f811f3acfcb9f60939d8752bb035cadaf928 | /examples/adspygoogle/dfp/v201311/order_service/update_orders.py | 24750560181ff34e7c6f4beb06d0f96e0cb10b50 | [
"Apache-2.0"
] | permissive | gsembi/googleads-python-legacy-lib | f2e3197413c23c1192b11e54bf78c087f04a2baa | 9de235ffb65d014dd6ba22be50659c910eca5ae2 | refs/heads/master | 2021-01-23T23:38:28.076465 | 2014-10-14T20:38:20 | 2014-10-14T20:38:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,707 | py | #!/usr/bin/python
#
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example updates the notes of a single order specified by ID.
To determine which orders exist, run get_all_orders.py."""
__author__ = 'Nicholas Chen'
# Locate the client library. If module was installed via "setup.py" script, then
# the following two lines are not needed.
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import DfpClient
from adspygoogle.common import Utils
from adspygoogle.dfp import DfpUtils
ORDER_ID = 'INSERT_ORDER_ID_HERE'
def main(client, order_id):
# Initialize appropriate service.
order_service = client.GetService('OrderService', version='v201311')
# Create statement object to select a single order by an ID.
values = [{
'key': 'orderId',
'value': {
'xsi_type': 'NumberValue',
'value': order_id
}
}]
query = 'WHERE id = :orderId'
statement = DfpUtils.FilterStatement(query, values)
# Get orders by statement.
response = order_service.GetOrdersByStatement(statement.ToStatement())[0]
orders = response.get('results')
if orders:
# Update each local order object by changing its notes.
updated_orders = []
for order in orders:
# Archived orders cannot be updated.
if not Utils.BoolTypeConvert(order['isArchived']):
order['notes'] = 'Spoke to advertiser. All is well.'
updated_orders.append(order)
# Update orders remotely.
orders = order_service.UpdateOrders(updated_orders)
# Display results.
if orders:
for order in orders:
print ('Order with id \'%s\', name \'%s\', advertiser id \'%s\', and '
'notes \'%s\' was updated.'
% (order['id'], order['name'], order['advertiserId'],
order['notes']))
else:
print 'No orders were updated.'
else:
print 'No orders found to update.'
if __name__ == '__main__':
# Initialize client object.
dfp_client = DfpClient(path=os.path.join('..', '..', '..', '..', '..'))
main(dfp_client, ORDER_ID)
| [
"emeralddragon88@gmail.com"
] | emeralddragon88@gmail.com |
ac8f9f2c4057ab0711a7c50124f020d7bd010361 | 5479cdac56abc115d3b52fbd31814dfd27262da7 | /TaobaoSdk/Request/MarketingPromotionsGetRequest.py | 5cd18fc86d83e7ffabe7feb005d71b73b4f75e98 | [] | no_license | xuyaoqiang-maimiao/TaobaoOpenPythonSDK | d9d2be6a7aa27c02bea699ed5667a9a30bf483ab | c82cde732e443ecb03cfeac07843e884e5b2167c | refs/heads/master | 2021-01-18T07:49:57.984245 | 2012-08-21T07:31:10 | 2012-08-21T07:31:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,100 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim: set ts=4 sts=4 sw=4 et:
## @brief 根据商品ID查询卖家使用该第三方工具对商品设置的所有优惠策略
# @author wuliang@maimiaotech.com
# @date 2012-08-09 12:36:54
# @version: 0.0.0
import os
import sys
import time
def __getCurrentPath():
return os.path.normpath(os.path.join(os.path.realpath(__file__), os.path.pardir))
__modulePath = os.path.join(__getCurrentPath(), os.path.pardir)
__modulePath = os.path.normpath(__modulePath)
if __modulePath not in sys.path:
sys.path.insert(0, __modulePath)
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">根据商品ID查询卖家使用该第三方工具对商品设置的所有优惠策略</SPAN>
# <UL>
# </UL>
class MarketingPromotionsGetRequest(object):
def __init__(self):
super(self.__class__, self).__init__()
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">获取API名称</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">str</SPAN>
# </LI>
# </UL>
self.method = "taobao.marketing.promotions.get"
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">时间戳,如果不设置,发送请求时将使用当时的时间</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">int</SPAN>
# </LI>
# </UL>
self.timestamp = int(time.time())
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">需返回的优惠策略结构字段列表。可选值为Promotion中所有字段,如:promotion_id, promotion_title, item_id, status, tag_id等等</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">Field List</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Required</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">required</SPAN>
# </LI>
# </UL>
self.fields = None
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">商品数字ID。根据该ID查询商品下通过第三方工具设置的所有优惠策略</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">String</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Required</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">required</SPAN>
# </LI>
# </UL>
self.num_iid = None
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">优惠策略状态。可选值:ACTIVE(有效),UNACTIVE(无效),若不传或者传入其他值,则默认查询全部</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">String</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Required</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">optional</SPAN>
# </LI>
# </UL>
self.status = None
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">标签ID</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">Number</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Required</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">optional</SPAN>
# </LI>
# </UL>
self.tag_id = None
| [
"liyangmin@maimiaotech.com"
] | liyangmin@maimiaotech.com |
5589707f2d4cd3872f1c6f79dbc9afe2ad876a17 | 926444e11e546a3b27f0631ce919ce3cb21e5d91 | /SheCodeAfricaC2M/myproj/resullt.py | 7d216cc2efc3d12ef7f16a2ab5f9dbe502b3f366 | [] | no_license | VictoryWekwa/SCAMP-C2-Assignment-and-Projects | 3e1f4a2dc39e7e559d9bf8425555dd24af9165c9 | 3ec55c1db937732443d976fa73e3b4a79e4d40bf | refs/heads/master | 2022-11-27T07:40:25.246238 | 2020-07-28T21:46:29 | 2020-07-28T21:46:29 | 276,183,522 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 227 | py | """ Print the list by importing the list name from the file in the module
by doing from module_name.filename import list_name(what you used to define your list) """
from animals.breeds import animal_type
print(animal_type) | [
"victorywekwa@gmail.com"
] | victorywekwa@gmail.com |
30cf5e66638583d9b43a956441ad69f207d90997 | 2807c9de1807502c84bd835d24d198029c59a951 | /init.py | 01b5bb7512dcbb3bbb63057d492bf69ef4b454c1 | [] | no_license | tokamaster/shield_opt | bcf856cce981ad0f08b8e7b9194c347796e717c8 | f5048cd7d1da55c186ab66e9e679adcb05f4e5ff | refs/heads/master | 2020-03-26T14:44:23.908449 | 2018-08-30T12:35:43 | 2018-08-30T12:35:43 | 145,002,489 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,581 | py | from halton_seq import *
from gp_tools import *
from tokamak import *
from tqdm import tqdm
from pygmo import *
import numpy as np
import itertools
import pickle
import json
import multiprocessing
import glob
import os
def init(outer, major, coil, halton, m_batches, m_error, neutrons):
number_of_datapoints=halton
lower_x=coil
upper_x=coil+outer-0.01
lower_y=coil+0.00001
upper_y=coil+outer-0.001
points_to_search=[]
leak=[]
leak_err=[]
points=[]
points_to_search_double_list = halton_sequence(number_of_datapoints, 2)
for x,y in zip(points_to_search_double_list[0],points_to_search_double_list[1]):
new_x = rescale(x, 0.0, 1.0, lower_x, upper_x)
new_y = rescale(y, 0.0, 1.0, lower_y, upper_y)
points_to_search.append([new_x,new_y])
j=0
for i in range(len(points_to_search)):
if points_to_search[i][1]>points_to_search[i][0]:
points.append(points_to_search[i])
j+=1
print('Sampling...', multiprocessing.cpu_count(),'cores.')
pbar = tqdm(total=j)
for i in range(len(points)):
leakage, leakage_error = shield(points[i], major, coil, False, outer, m_batches, m_error, neutrons)
leak.append(leakage)
leak_err.append(leakage_error)
pbar.update(1)
points = np.array(points)
pbar.close()
print('Sampling finished.')
coords = list(zip(points[:,0],points[:,1]))
print('GP model working...', multiprocessing.cpu_count(),'cores.')
GP = GpRegressor(coords, leak, leak_err)
return GP, points, leak, leak_err
| [
"emd542@york.ac.uk"
] | emd542@york.ac.uk |
5f8dc7f6ee3faba3e195cb8bd5c54cebab04e678 | 6eb282bbe4d43273b9e9cc8c0fa79400888cba9e | /tests/test_routes_image.py | e98c7912a9f48cdf2e6d390350ecba8cd232dc51 | [
"MIT"
] | permissive | jdelgad/memegen | d4300c707c5fee59aa2f5c5e0e8e606d699255ef | 0de8e5d6bfae75843bbe0d149c7796cb044e24a7 | refs/heads/master | 2020-12-25T08:51:20.523408 | 2016-06-10T13:50:46 | 2016-06-10T13:50:46 | 60,996,119 | 1 | 0 | null | 2016-06-13T00:30:19 | 2016-06-13T00:30:19 | null | UTF-8 | Python | false | false | 7,191 | py | # pylint: disable=unused-variable,misplaced-comparison-constant,expression-not-assigned
import os
import pytest
from expecter import expect
from .conftest import load
TESTS = os.path.dirname(__file__)
ROOT = os.path.dirname(TESTS)
IMAGES = os.path.join(ROOT, "data", "images")
LATEST = os.path.join(IMAGES, "latest.jpg")
def describe_get():
def describe_visible():
def with_nominal_text(client):
path = os.path.join(IMAGES, 'iw', 'hello', 'world.jpg')
if os.path.exists(path):
os.remove(path)
response = client.get("/iw/hello/world.jpg")
assert 200 == response.status_code
assert 'image/jpeg' == response.mimetype
assert os.path.isfile(path)
def with_only_1_line(client):
response = client.get("/iw/hello.jpg")
assert 200 == response.status_code
assert 'image/jpeg' == response.mimetype
@pytest.mark.xfail(os.name == 'nt', reason="Windows has a path limit")
def with_lots_of_text(client):
top = "-".join(["hello"] * 20)
bottom = "-".join(["world"] * 20)
response = client.get("/iw/" + top + "/" + bottom + ".jpg")
assert 200 == response.status_code
assert 'image/jpeg' == response.mimetype
def describe_hidden():
def when_jpg(client):
response = client.get("/_aXcJaGVsbG8vd29ybGQJ.jpg")
assert 200 == response.status_code
assert 'image/jpeg' == response.mimetype
def describe_custom_style():
def when_provided(client):
response = client.get("/sad-biden/hello.jpg?alt=scowl")
assert 200 == response.status_code
assert 'image/jpeg' == response.mimetype
def it_redirects_to_lose_alt_when_default_style(client):
response = client.get("/sad-biden/hello.jpg?alt=default")
assert 302 == response.status_code
assert '<a href="/sad-biden/hello.jpg">' in \
load(response, as_json=False)
def it_redirects_to_lose_alt_when_unknown_style(client):
response = client.get("/sad-biden/hello.jpg?alt=__unknown__")
assert 302 == response.status_code
assert '<a href="/sad-biden/hello.jpg">' in \
load(response, as_json=False)
def it_keeps_alt_after_template_redirect(client):
response = client.get("/sad-joe/hello.jpg?alt=scowl")
assert 302 == response.status_code
assert '<a href="/sad-biden/hello.jpg?alt=scowl">' in \
load(response, as_json=False)
def it_keeps_alt_after_text_redirect(client):
response = client.get("/sad-biden.jpg?alt=scowl")
assert 302 == response.status_code
assert '-vote.jpg?alt=scowl">' in \
load(response, as_json=False)
def when_url(client):
url = "http://www.gstatic.com/webp/gallery/1.jpg"
response = client.get("/sad-biden/hello.jpg?alt=" + url)
expect(response.status_code) == 200
expect(response.mimetype) == 'image/jpeg'
def it_returns_an_error_with_non_image_urls(client):
url = "http://example.com"
response = client.get("/sad-biden/hello.jpg?alt=" + url)
expect(response.status_code) == 415
def it_redirects_to_lose_alt_when_unknown_url(client):
url = "http://example.com/not/a/real/image.jpg"
response = client.get("/sad-biden/hello.jpg?alt=" + url)
expect(response.status_code) == 302
expect(load(response, as_json=False)).contains(
'<a href="/sad-biden/hello.jpg">')
def it_redirects_to_lose_alt_when_bad_url(client):
url = "http:invalid"
response = client.get("/sad-biden/hello.jpg?alt=" + url)
expect(response.status_code) == 302
expect(load(response, as_json=False)).contains(
'<a href="/sad-biden/hello.jpg">')
def describe_custom_font():
def when_provided(client):
response = client.get("/iw/hello.jpg?font=impact")
expect(response.status_code) == 200
expect(response.mimetype) == 'image/jpeg'
def it_redirects_on_unknown_fonts(client):
response = client.get("/iw/hello.jpg?font=__unknown__")
expect(response.status_code) == 302
expect(load(response, as_json=False)).contains(
'<a href="/iw/hello.jpg">')
def describe_latest():
def when_existing(client):
open(LATEST, 'w').close() # force the file to exist
response = client.get("/latest.jpg")
assert 200 == response.status_code
assert 'image/jpeg' == response.mimetype
def when_missing(client):
try:
os.remove(LATEST)
except FileNotFoundError:
pass
response = client.get("/latest.jpg")
assert 200 == response.status_code
assert 'image/png' == response.mimetype
def describe_redirects():
def when_missing_dashes(client):
response = client.get("/iw/HelloThere_World/How-areYOU.jpg")
assert 302 == response.status_code
assert '<a href="/iw/hello-there-world/how-are-you.jpg">' in \
load(response, as_json=False)
def when_no_text(client):
response = client.get("/live.jpg")
assert 302 == response.status_code
assert '<a href="/live/_/do-it-live!.jpg">' in \
load(response, as_json=False)
def when_aliased_template(client):
response = client.get("/insanity-wolf/hello/world.jpg")
assert 302 == response.status_code
assert '<a href="/iw/hello/world.jpg">' in \
load(response, as_json=False)
def when_jpeg_extension_without_text(client):
response = client.get("/iw.jpeg")
assert 302 == response.status_code
assert '<a href="/iw.jpg">' in \
load(response, as_json=False)
def when_jpeg_extension_with_text(client):
response = client.get("/iw/hello/world.jpeg")
assert 302 == response.status_code
assert '<a href="/iw/hello/world.jpg">' in \
load(response, as_json=False)
def describe_errors():
def when_unknown_template(client):
response = client.get("/make/sudo/give.me.jpg")
assert 200 == response.status_code
assert 'image/jpeg' == response.mimetype
# unit tests ensure this is a placeholder image
@pytest.mark.xfail(os.name == 'nt', reason="Windows has a path limit")
def when_too_much_text_for_a_filename(client):
top = "hello"
bottom = "-".join(["world"] * 50)
response = client.get("/iw/" + top + "/" + bottom + ".jpg")
assert 414 == response.status_code
assert {
'message': "Filename too long."
} == load(response)
| [
"jacebrowning@gmail.com"
] | jacebrowning@gmail.com |
8e3e89e77d9924d1ddabb24682fde6dd16748c3b | 8f034869364a9a4d1a87caa829f3fa054f5e4d0c | /C_IMS_FieldCalc_MonthNum.py | ca271ca11880f59aac730d63c872b795f7734942 | [] | no_license | MichaelIseli/IMS_DataMining | fc894c6bac7bd915b2035934270a751f0d36e112 | 2071dde1c25be8f4359f4ce86eaa03210136b63b | refs/heads/master | 2021-05-04T18:26:01.543232 | 2018-02-04T10:05:25 | 2018-02-04T10:05:25 | 120,173,646 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 810 | py | # -*- coding: utf-8 -*-
# ---------------------------------------------------------------------------
# C_IMS_FieldCalc_MonthNum.py
# Created on: 2017-11-06 14:53:54.00000
# (generated by ArcGIS/ModelBuilder)
# written for UNIFIL JGIS by Michael Iseli
# Description:
# ---------------------------------------------------------------------------
# Import arcpy module
import arcpy
# code to calculate the field month number for IMS incident analysis
# Local variables:
SOIR_Analysis = "H:\\12e_Analysis\\Analysis.gdb\\SOIR_View\\SOIR_Analysis"
SOIR_Analysis__3_ = SOIR_Analysis
# Process: Calculate Field
arcpy.CalculateField_management(SOIR_Analysis, "MonthNum", "(([yearcount]-1)*12)+ [MonthInc]", "VB", "")
# return message that the code completed
print ("CALCULATION FIELD MONTH NUMBER COMPLETED")
| [
"michael.iseli3069@gmail.com"
] | michael.iseli3069@gmail.com |
25882e57a82a78e6c00def51293bb0314b77606e | e6bbdb2f3cfac2b32230ef309b9d85cf3cbc2487 | /DavinciCode/DavinciCode/davinciCode/game.py | 5ddc1a98d80aad27b58327e25e9e33d1f43b67f9 | [] | no_license | neuer712/CardGame_DavinciCode | d87642579b231ecdb736f30ab79aead7393ac85e | d6a6ce56e8f72e1a52af36152394d3f2ac7515bf | refs/heads/master | 2021-08-23T20:50:42.457195 | 2017-12-06T13:43:28 | 2017-12-06T13:43:28 | 113,063,903 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 923 | py | '''
Created on 2017年12月4日
@author: 魏来
'''
from davinciCode import card
import functools
def printAllCardsStep(cardList,n):
for card in cardList:
card.paintLine(n)
print(' ',end='')
print('')
def printAllCards(cardList):
i=0
while(i<=4):
printAllCardsStep(cardList, i)
i+=1
def prepareCards(preparedCardList,myCardList,hisOrHerCardList):
i=0
while i<=3:
currentCard=preparedCardList.pop()
currentCard.setBelong('my')
myCardList.append(currentCard)
i+=1
while i<=7:
currentCard=preparedCardList.pop()
currentCard.setBelong('hisOrHer')
hisOrHerCardList.append(currentCard)
i+=1
sortCard(myCardList)
sortCard(hisOrHerCardList)
def sortCard(listToSort):
listToSort.sort(key=lambda x: (x.regards,x.color,x.number)) | [
"neuer712@163.com"
] | neuer712@163.com |
b5e23c5c655c526644f144779516ce18dd7a353e | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/97/usersdata/194/54823/submittedfiles/lecker.py | f56acb6233287f3cbe81bfd2b3aa0164580158d3 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 824 | py | # -*- coding: utf-8 -*-
from __future__ import division
def lecker(lista):
cont=0
for i in range(0,len(lista)-1,1):
if i==0:
if lista[i]>lista[i+1]:
cont=cont+1
elif i==(len(lista)-1):
if lista[i]>lista[i-1]:
cont=cont+1
else:
if lista[i]>lista[i+1] and lista[i]>lista[i-1]:
cont=cont+1
if cont==1:
return True
else:
return False
a=[]
b=[]
n=int(input('quantidade de elementos:'))
for i in range(1,n+1,1):
valor=float(input('elementos da lista 1:'))
a.append(valor)
for i in range(1,n+1,1):
valor=float(input('elementos da lista 2:'))
b.append(valor)
if lecker(a):
print('S')
else:
print('N')
if lecker(b):
print('S')
else:
print('N') | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
e3d12e4210c69e1c172dac13bea2b51e14587321 | a0447b03ad89a41a5c2e2073e32aeaf4d6279340 | /ironic/drivers/modules/noop_mgmt.py | 0efc089e9932c6c25902c35209c4712a7a348bac | [
"Apache-2.0"
] | permissive | openstack/ironic | 2ae87e36d7a62d44b7ed62cad4e2e294d48e061b | ab76ff12e1c3c2208455e917f1a40d4000b4e990 | refs/heads/master | 2023-08-31T11:08:34.486456 | 2023-08-31T04:45:05 | 2023-08-31T04:45:05 | 10,066,301 | 411 | 365 | Apache-2.0 | 2023-07-25T02:05:53 | 2013-05-14T22:28:24 | Python | UTF-8 | Python | false | false | 2,090 | py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""No-op management interface implementation."""
from oslo_log import log
from ironic.common import boot_devices
from ironic.common import exception
from ironic.common.i18n import _
from ironic.drivers import base
LOG = log.getLogger(__name__)
class NoopManagement(base.ManagementInterface):
"""No-op management interface implementation.
Using this implementation requires the boot order to be preconfigured
to first try PXE booting, then fall back to hard drives.
"""
def get_properties(self):
return {}
def validate(self, task):
pass
def get_supported_boot_devices(self, task):
return [boot_devices.PXE, boot_devices.DISK]
def set_boot_device(self, task, device, persistent=False):
supported = self.get_supported_boot_devices(task)
if device not in supported:
raise exception.InvalidParameterValue(
_("Invalid boot device %(dev)s specified, supported are "
"%(supported)s.") % {'dev': device,
'supported': ', '.join(supported)})
LOG.debug('Setting boot device to %(target)s requested for node '
'%(node)s with noop management. Assuming the correct '
'boot order is already configured',
{'target': device, 'node': task.node.uuid})
def get_boot_device(self, task):
return {'boot_device': boot_devices.PXE, 'persistent': True}
def get_sensors_data(self, task):
raise NotImplementedError()
| [
"juliaashleykreger@gmail.com"
] | juliaashleykreger@gmail.com |
47074efbcc3c04a478c11b66f1027b0c4c55a16c | f68cdb00a704effd4250ab7a749b81f5ef7eee1e | /threshold.py | 1c4fa8f176d5f6996c5329ed32577e8cdb1ba9d4 | [] | no_license | adityajangir/openCV | 86669373b1ccc3efc685d0c2aa9bfac29cc8d94c | 25d642877a39def9b281586bcd25e912506c3974 | refs/heads/main | 2023-08-11T14:18:10.420249 | 2021-10-07T17:06:24 | 2021-10-07T17:06:24 | 414,689,897 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 380 | py | import numpy as np
import cv2 as cv
grad = cv.imread('gradient.png')
_, th1 = cv.threshold(grad, 127, 255, cv.THRESH_BINARY)
_, th2 = cv.threshold(grad, 127, 255, cv.THRESH_TRUNC)
_, th3 = cv.threshold(grad, 127, 255, cv.THRESH_TOZERO)
cv.imshow('image', grad)
cv.imshow('th1', th1)
cv.imshow('th2', th2)
cv.imshow('th3', th3)
cv.waitKey(0)
cv.destroyAllWindows() | [
"noreply@github.com"
] | adityajangir.noreply@github.com |
8aac474ed41ab941cc830699ba847bd56a96843a | 7698a74a06e10dd5e1f27e6bd9f9b2a5cda1c5fb | /zzz.masterscriptsTEB_GIST/for005md.py | 5c2e1af3abcf60dbbdff817943ffd3a973318e9a | [] | no_license | kingbo2008/teb_scripts_programs | ef20b24fe8982046397d3659b68f0ad70e9b6b8b | 5fd9d60c28ceb5c7827f1bd94b1b8fdecf74944e | refs/heads/master | 2023-02-11T00:57:59.347144 | 2021-01-07T17:42:11 | 2021-01-07T17:42:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,330 | py | import sys
import copy
import math
import matplotlib
import scipy
import numpy
import pylab
def read_MD_outfile(filename,totE, kE, pE, time, temp, pres):
fileh = open(filename,'r')
result_flag = False
count = 0
for line in fileh:
line = line.strip('\n')
splitline = line.split()
if "4. RESULTS" in line:
result_flag = True
elif "A V E R A G E S O V E R" in line:
result_flag = False
if (result_flag):
if "NSTEP" in line:
if (len(splitline)<11):
continue
t_time = float(splitline[5])/1000.0 # convert from ps to ns
t_temp = float(splitline[8])
t_pres = float(splitline[11])
time.append(t_time)
temp.append(t_temp)
pres.append(t_pres)
if "Etot" in line:
if (len(splitline)<8):
continue
t_totE = float(splitline[2])
t_kE = float(splitline[5])
t_pE = float(splitline[8])
totE.append(t_totE)
kE.append(t_kE)
pE.append(t_pE)
fileh.close()
return totE, kE, pE, time, temp, pres
def main():
if len(sys.argv) != 3:
print "error: this program takes 2 inputs:"
print " (1) filename that contains a list of md output files. If it doesn't exist do sth like this: "
print " ls 5609039/*.out > tmpout.txt"
print " (2) filename for png plot"
print " This should be done automatically as part of 005md.checkMDrun.csh"
exit()
filelist = sys.argv[1]
filenamepng = sys.argv[2]
# read in file with a list of mdout files.
print "filelist containing MD.out files: " + filelist
print "Plot will be saved as: " + filenamepng
filenamelist = []
fileh = open(filelist,'r')
for line in fileh:
tfile = line.strip("\n")
splitline = tfile.split(".")
if (splitline[-1] != "out"):
print "Error. %s is not a .out file" % tfile
exit()
filenamelist.append(tfile)
fileh.close()
totE = []
kE = []
pE = []
time = []
temp = []
pres = []
for filename in filenamelist:
print "reading info from file: " + filename
totE, kE, pE, time, temp, pres = read_MD_outfile(filename,totE, kE, pE, time, temp, pres)
# Plot with 5 panels; tabs [x_left,y_left,x_up,y_up].
subpanel = [ [0.2,0.1,0.3,0.2], [0.6,0.1,0.3,0.2], [0.2,0.4,0.3,0.2], [0.6,0.4,0.3,0.2], [0.2,0.7,0.3,0.2], [0.6,0.7,0.3,0.2] ]
descname = ["totE", "kE", "pE", "temp", "pres"]
fig = pylab.figure(figsize=(8,8))
for i,desc in enumerate([totE, kE, pE, temp, pres]):
#print len(desc), len(totE), len(time)
axis = fig.add_axes(subpanel[i])
#lim_min = min(math.floor(Ymin),math.floor(Xmin))
# lim_max = max(math.ceil(Ymax), math.ceil(Xmax))
im = axis.plot(time,desc,'k-') #,[0,100],[0,100],'--')
axis.set_xlabel("time (ns)")
axis.set_ylabel(descname[i])
#axis.set_title('file='+xyfilename)
#axis.set_ylim(lim_min, lim_max)
#axis.set_xlim(lim_min, lim_max)
#fig.savefig('md_analysis_fig.png',dpi=600)
fig.savefig(filenamepng,dpi=600)
main()
| [
"tbalius@gimel.cluster.ucsf.bkslab.org"
] | tbalius@gimel.cluster.ucsf.bkslab.org |
ac1861ccf453f5869239f42d8cd7408592ea1555 | 185b9fbdddd1163802683acd76f4e6afec82505a | /day07/11-字典的练习.py | 7704d2246212e6300f3187cb937b48493523a706 | [] | no_license | xiaofeng12138/python0622 | 63404efbe6faf95c95f99ea3473dcb5ff373c32b | e0900e73839d76e321a410707e10f296a59f44f6 | refs/heads/master | 2022-11-14T14:54:41.634488 | 2020-07-10T08:49:16 | 2020-07-10T08:49:16 | 274,008,697 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 351 | py |
#获取下面列表中出现次数最多的字母
chars = ['a','c','c','c','a','f','e','a','r','a']
chars_count ={}
for x in chars:
if x in chars_count:
chars_count[x] += 1
else:
chars_count[x] = 1
# print(chars_count)
vs = max(chars_count.values())
for k,v in chars_count.items():
if (v == vs):
print(k)
| [
"760811650@qq.com"
] | 760811650@qq.com |
7bfaaf0db70cf0354f13f8bb62ab277d818e5da2 | 972dff80b81c78082e9022084ef75e954b204471 | /gui/system/alertmods/volume_status.py | 44a265cdb00c201d6b3499a3c0ac6c890b8daed5 | [] | no_license | TomHoenderdos/freenas | 34bbf9957ed5904f1296af5a57eedc95e04f1074 | 83ae0c1805ea7e57b70f436810eca3b9cc0c9825 | refs/heads/master | 2021-01-17T09:29:19.668079 | 2014-01-28T01:58:23 | 2014-01-28T01:58:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,124 | py | import re
import subprocess
from django.utils.translation import ugettext_lazy as _
from freenasUI.storage.models import Volume
from freenasUI.system.alert import alertPlugins, Alert, BaseAlert
class VolumeStatusAlert(BaseAlert):
def on_volume_status_not_healthy(self, vol, status, message):
if message:
return Alert(
Alert.WARN,
_('The volume %(volume)s status is %(status)s:'
' %(message)s') % {
'volume': vol,
'status': status,
'message': message,
}
)
else:
return Alert(
Alert.WARN,
_('The volume %(volume)s status is %(status)s') % {
'volume': vol,
'status': status,
}
)
def volumes_status_enabled(self):
return True
def on_volume_status_degraded(self, vol, status, message):
self.log(self.LOG_CRIT, _('The volume %s status is DEGRADED') % vol)
def run(self):
if not self.volumes_status_enabled():
return
for vol in Volume.objects.filter(vol_fstype__in=['ZFS', 'UFS']):
if not vol.is_decrypted():
continue
status = vol.status
message = ""
if vol.vol_fstype == 'ZFS':
p1 = subprocess.Popen(
["zpool", "status", "-x", vol.vol_name],
stdout=subprocess.PIPE
)
stdout = p1.communicate()[0]
if stdout.find("pool '%s' is healthy" % vol.vol_name) != -1:
status = 'HEALTHY'
else:
reg1 = re.search('^\s*state: (\w+)', stdout, re.M)
if reg1:
status = reg1.group(1)
else:
# The default case doesn't print out anything helpful,
# but instead coredumps ;).
status = 'UNKNOWN'
reg1 = re.search(r'^\s*status: (.+)\n\s*action+:',
stdout, re.S | re.M)
reg2 = re.search(r'^\s*action: ([^:]+)\n\s*\w+:',
stdout, re.S | re.M)
if reg1:
msg = reg1.group(1)
msg = re.sub(r'\s+', ' ', msg)
message += msg
if reg2:
msg = reg2.group(1)
msg = re.sub(r'\s+', ' ', msg)
message += msg
if status == 'HEALTHY':
return [Alert(
Alert.OK, _('The volume %s status is HEALTHY') % (vol, )
)]
elif status == 'DEGRADED':
return [self.on_volume_status_degraded(vol, status, message)]
else:
return [
self.on_volume_status_not_healthy(vol, status, message)
]
alertPlugins.register(VolumeStatusAlert)
| [
"wg@FreeBSD.org"
] | wg@FreeBSD.org |
c5a3cbfc2e0f6b6aa10fc2c33aa96d88cb8488e6 | 4e59088217a26b6da53ba51ec94183ca504ef7f4 | /letter_combo_phone.py | a677b5f6c54a3a5c2d017a0174e7c3ef5d77ff78 | [] | no_license | RaymondZW/lc600 | 5770a046a3e2a65d096c668052cc00f742e3487e | c0b927e1206b468e345b707f024259770852c8b8 | refs/heads/master | 2020-06-29T03:33:09.175337 | 2020-01-31T05:52:42 | 2020-01-31T05:52:42 | 200,427,923 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,134 | py | from Type
class Solution:
def letterCombinations(self, digits: str) -> List[str]:
phone = {'2': ['a', 'b', 'c'],
'3': ['d', 'e', 'f'],
'4': ['g', 'h', 'i'],
'5': ['j', 'k', 'l'],
'6': ['m', 'n', 'o'],
'7': ['p', 'q', 'r', 's'],
'8': ['t', 'u', 'v'],
'9': ['w', 'x', 'y', 'z']}
def backtrack(combination, next_digits):
# if there is no more digits to check
if len(next_digits) == 0:
# the combination is done
output.append(combination)
# if there are still digits to check
else:
# iterate over all letters which map
# the next available digit
for letter in phone[next_digits[0]]:
# append the current letter to the combination
# and proceed to the next digits
backtrack(combination + letter, next_digits[1:])
output = []
if digits:
backtrack("", digits)
return output
| [
"ali@truecar.com"
] | ali@truecar.com |
e7336ad6624b3e4ec4ea7407e1bcdfe869106bdb | c258ce2e179c362c75628c07a049854b6062d5f5 | /accountmanager/wsgi.py | c0bb7dfa1ad43f515c9dccb4abffe00c18c1d1e9 | [] | no_license | gajendrarahul/AccountM | 288594ef2a0dc5666061cfd44261ee82074dfba6 | 7429f32fbe0825e926dfb7e9d1854eff9cf03513 | refs/heads/master | 2023-04-21T02:49:23.206053 | 2021-04-18T16:11:03 | 2021-04-18T16:11:03 | 359,184,724 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 405 | py | """
WSGI config for accountmanager project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'accountmanager.settings')
application = get_wsgi_application()
| [
"mahatogajen123@gmail.com"
] | mahatogajen123@gmail.com |
bf9b4ed3d132b7192cf4ec80e888a9ffdbc3a442 | 97497f7b0b52306c5115bfa92b240c787caeb221 | /python/algorithm1.py | aa0e8aee19d8d43a5eb6d3ab28db6ac0552b33d5 | [] | no_license | matheusportela/pagerank | 528fd9c0fe094742a3aff396a24905db5b749c83 | 5ded93155351346e69ef93cf0caf606191a93f0f | refs/heads/master | 2021-06-27T20:33:22.022666 | 2021-04-02T17:08:45 | 2021-04-02T17:08:45 | 224,736,899 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,610 | py | # Algorithm 1 implementation multithreaded
import queue
import threading
import time
import networkx as nx
NUM_ITERATIONS = 1000
class Graph:
def __init__(self):
self.graph = nx.DiGraph()
def __repr__(self):
return str(self.graph.edges)
def load(self, filename):
self.graph = nx.read_edgelist(filename, create_using=nx.DiGraph)
def calculate_pagerank(self, m=0.15):
start_channels = {n: queue.Queue() for n in self.graph.nodes}
end_channels = {n: queue.Queue() for n in self.graph.nodes}
data_channels = {n: queue.Queue() for n in self.graph.nodes}
nodes = []
for node in self.graph.nodes:
node = Node(
node_id=node,
neighbors=list(self.graph.neighbors(node)),
num_nodes=len(self.graph.nodes),
m=m,
start_channels=start_channels,
end_channels=end_channels,
data_channels=data_channels,
)
node.start()
nodes.append(node)
for _ in range(NUM_ITERATIONS):
for node in self.graph.nodes:
start_channels[node].put(None)
end_channels[node].get()
return {node.id: node.x for node in nodes}
class Node(threading.Thread):
def __init__(self, node_id, neighbors, num_nodes, m, start_channels, end_channels, data_channels):
super().__init__(daemon=True)
self.id = node_id
self.neighbors = neighbors
self.m = m
self.n = len(self.neighbors)
self.x = self.m/num_nodes
self.z = self.m/num_nodes
self.start_channels = start_channels
self.end_channels = end_channels
self.data_channels = data_channels
def run(self):
while True:
self.start_channels[self.id].get()
self.run_pagerank_step()
self.end_channels[self.id].put(None)
def run_pagerank_step(self):
self.send_data()
self.update_pagerank()
def send_data(self):
for dst in self.neighbors:
self.data_channels[dst].put((self.id, self.n, self.z))
def update_pagerank(self):
x = self.x
z = 0
while not self.data_channels[self.id].empty():
src, nj, zj = self.data_channels[self.id].get()
x += ((1 - self.m)/nj)*zj
z += ((1 - self.m)/nj)*zj
self.x = x
self.z = z
def main():
graph = Graph()
graph.load('graphs/graph.txt')
print(graph.calculate_pagerank())
if __name__ == '__main__':
main() | [
"matheus.v.portela@gmail.com"
] | matheus.v.portela@gmail.com |
e8b3dc7fadbc4d65619f5fff6fe14f663eedb944 | ba78a499accc6011ff61488a189ab3c0e34db193 | /students/migrations/0001_initial.py | 680c6924e3c543307f0feed808458a61d0c20dc2 | [] | no_license | mikailyusuf/SDMS | c16e2aad7b778341bced4f0a784c5fdca4185a1a | 0ea60ae9396af8d8e3aa4de85ea05a8745510497 | refs/heads/master | 2023-01-08T19:46:26.349753 | 2020-11-08T06:11:55 | 2020-11-08T06:11:55 | 310,503,403 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,141 | py | # Generated by Django 3.0 on 2020-11-06 18:50
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Students',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=200)),
('last_name', models.CharField(max_length=200)),
('phone', models.CharField(max_length=200, null=True)),
('email', models.CharField(max_length=200, null=True)),
('date_created', models.DateTimeField(auto_now_add=True, null=True)),
('profile_pic', models.ImageField(blank=True, null=True, upload_to='')),
],
),
migrations.CreateModel(
name='Teachers',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(max_length=100, null=True)),
('first_name', models.CharField(max_length=200, null=True)),
('last_name', models.CharField(max_length=200)),
('phone', models.CharField(max_length=200, null=True)),
('email', models.CharField(max_length=200, null=True)),
('date_created', models.DateTimeField(auto_now_add=True, null=True)),
('profile_pic', models.ImageField(blank=True, null=True, upload_to='')),
('user', models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Result',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('math_exan', models.FloatField(null=True)),
('math_test', models.FloatField(null=True)),
('math_total', models.FloatField(null=True)),
('math_grade', models.CharField(max_length=10)),
('eng_exan', models.FloatField(null=True)),
('eng_test', models.FloatField(null=True)),
('eng_total', models.FloatField(null=True)),
('eng_grade', models.CharField(max_length=10)),
('physics_exan', models.FloatField(null=True)),
('physics_test', models.FloatField(null=True)),
('physics_total', models.FloatField(null=True)),
('physics_grade', models.CharField(max_length=10)),
('bio_exan', models.FloatField(null=True)),
('bio_test', models.FloatField(null=True)),
('bio_total', models.FloatField(null=True)),
('bio_grade', models.CharField(max_length=10)),
('chem_exan', models.FloatField(null=True)),
('chem_test', models.FloatField(null=True)),
('chem_total', models.FloatField(null=True)),
('chem_grade', models.CharField(max_length=10)),
('agric_exan', models.FloatField(null=True)),
('agric_test', models.FloatField(null=True)),
('agric_total', models.FloatField(null=True)),
('agric_grade', models.CharField(max_length=10)),
('civic_exan', models.FloatField(null=True)),
('civic_test', models.FloatField(null=True)),
('civic_total', models.FloatField(null=True)),
('civic_grade', models.CharField(max_length=10)),
('comment', models.TextField()),
('date_created', models.DateTimeField(auto_now_add=True, null=True)),
('student', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='students.Students')),
],
),
]
| [
"mikailkyusuf@gmail.com"
] | mikailkyusuf@gmail.com |
5eaaf6de6d1b6eaeb701046c4c0e10b4a1558aad | 4749b64b52965942f785b4e592392d3ab4fa3cda | /components/domain_reliability/bake_in_configs.py | 56f7aae0215b5a4b805ae758d7c78705be64cae9 | [
"BSD-3-Clause"
] | permissive | crosswalk-project/chromium-crosswalk-efl | 763f6062679727802adeef009f2fe72905ad5622 | ff1451d8c66df23cdce579e4c6f0065c6cae2729 | refs/heads/efl/crosswalk-10/39.0.2171.19 | 2023-03-23T12:34:43.905665 | 2014-12-23T13:44:34 | 2014-12-23T13:44:34 | 27,142,234 | 2 | 8 | null | 2014-12-23T06:02:24 | 2014-11-25T19:27:37 | C++ | UTF-8 | Python | false | false | 3,444 | py | #!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Takes the JSON files in components/domain_reliability/baked_in_configs and
encodes their contents as an array of C strings that gets compiled in to Chrome
and loaded at runtime."""
import json
import os
import sys
# A whitelist of domains that the script will accept when baking configs in to
# Chrome, to ensure incorrect ones are not added accidentally. Subdomains of
# whitelist entries are also allowed (e.g. maps.google.com, ssl.gstatic.com).
DOMAIN_WHITELIST = ('2mdn.net', 'admob.com', 'doubleclick.net', 'ggpht.com',
'google.cn', 'google.co.uk', 'google.com', 'google.com.au',
'google.de', 'google.fr', 'google.it', 'google.jp',
'google.org', 'google.ru', 'googleadservices.com',
'googleapis.com', 'googlesyndication.com',
'googleusercontent.com', 'googlevideo.com', 'gstatic.com',
'gvt1.com', 'youtube.com', 'ytimg.com')
CC_HEADER = """// Copyright (C) 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// AUTOGENERATED FILE. DO NOT EDIT.
//
// (Update configs in components/domain_reliability/baked_in_configs and list
// configs in components/domain_reliability.gypi instead.)
#include "components/domain_reliability/baked_in_configs.h"
#include <stdlib.h>
namespace domain_reliability {
const char* const kBakedInJsonConfigs[] = {
"""
CC_FOOTER = """ NULL
};
} // namespace domain_reliability
"""
def domain_is_whitelisted(domain):
return any(domain == e or domain.endswith('.' + e) for e in DOMAIN_WHITELIST)
def quote_and_wrap_text(text, width=79, prefix=' "', suffix='"'):
max_length = width - len(prefix) - len(suffix)
output = prefix
line_length = 0
for c in text:
if c == "\"":
c = "\\\""
elif c == "\n":
c = "\\n"
elif c == "\\":
c = "\\\\"
if line_length + len(c) > max_length:
output += suffix + "\n" + prefix
line_length = 0
output += c
line_length += len(c)
output += suffix
return output
def main():
if len(sys.argv) < 3:
print >> sys.stderr, ('Usage: %s <JSON files...> <output C++ file>' %
sys.argv[0])
print >> sys.stderr, sys.modules[__name__].__doc__
return 1
cpp_code = CC_HEADER
found_invalid_config = False
for json_file in sys.argv[1:-1]:
with open(json_file, 'r') as f:
json_text = f.read()
config = json.loads(json_text)
if 'monitored_domain' not in config:
print >> sys.stderr, ('%s: no monitored_domain found' % json_file)
found_invalid_config = True
continue
domain = config['monitored_domain']
if not domain_is_whitelisted(domain):
print >> sys.stderr, ('%s: monitored_domain "%s" not in whitelist' %
(json_file, domain))
found_invalid_config = True
continue
cpp_code += " // " + json_file + ":\n"
cpp_code += quote_and_wrap_text(json_text) + ",\n"
cpp_code += "\n"
cpp_code += CC_FOOTER
if found_invalid_config:
return 1
with open(sys.argv[-1], 'wb') as f:
f.write(cpp_code)
return 0
if __name__ == '__main__':
sys.exit(main())
| [
"ttuttle@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98"
] | ttuttle@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98 |
a6fa412a4318bdd44745d738c2d2aa91cac8f9d2 | 277b9292d494db49836c93693257ecab87ebeb18 | /ynlu/sdk/evaluation/tests/test_entity_overlapping_ratio.py | 541945abbd95063fa16157907ee1d43443903ffe | [
"MIT"
] | permissive | hsiaoyi0504/yoctol-nlu-py | 90c2df421994006a49a4db7fe6f104d247201fbd | 4cec8d52ba3dd7827bddde152c95e814e533c0f2 | refs/heads/master | 2020-03-23T23:41:40.055683 | 2018-04-19T08:57:42 | 2018-04-19T08:57:42 | 142,249,617 | 0 | 0 | MIT | 2018-07-25T05:00:05 | 2018-07-25T05:00:05 | null | UTF-8 | Python | false | false | 6,265 | py | from unittest import TestCase
from ..entity_overlapping_score import (
single__entity_overlapping_score,
entity_overlapping_score,
)
class OverlappingScoreTestCase(TestCase):
def test_single__entity_overlapping_score_different_length(self):
with self.assertRaises(ValueError):
single__entity_overlapping_score(
utterance="12",
entity_prediction=[
{"value": "1", "entity": "a"},
{"value": "2", "entity": "b"},
],
y_true=["a", "b", "c"],
)
def test_single__entity_overlapping_score(self):
test_cases = [
(
{
"entity_prediction": [
{"entity": "1", "value": "1"},
{"entity": "2", "value": "2"},
{"entity": "3", "value": "3"},
],
"utterance": "123",
"y_true": ["4", "5", "6"],
"wrong_penalty_rate": 2.0,
},
-1.0,
),
(
{
"entity_prediction": [
{"entity": "1", "value": "1"},
{"entity": "2", "value": "2"},
{"entity": "3", "value": "3"},
],
"utterance": "123",
"y_true": ["4", "DONT_CARE", "6"],
"wrong_penalty_rate": 2.0,
},
-0.666666666667,
),
(
{
"entity_prediction": [
{"entity": "1", "value": "1"},
{"entity": "2", "value": "2"},
{"entity": "3", "value": "3"},
],
"utterance": "123",
"y_true": ["4", "2", "6"],
"wrong_penalty_rate": 2.0,
},
-0.33333333333333,
),
(
{
"entity_prediction": [
{"entity": "1", "value": "1"},
{"entity": "2", "value": "2"},
{"entity": "3", "value": "3"},
],
"utterance": "123",
"y_true": ["DONT_CARE", "DONT_CARE", "DONT_CARE"],
"wrong_penalty_rate": 2.0,
},
0.0,
),
(
{
"entity_prediction": [
{"entity": "1", "value": "1"},
{"entity": "DONT_CARE", "value": "2"},
{"entity": "DONT_CARE", "value": "3"},
],
"utterance": "123",
"y_true": ["DONT_CARE", "2", "3"],
"wrong_penalty_rate": 2.0,
},
0.0,
),
(
{
"entity_prediction": [
{"entity": "1", "value": "1"},
{"entity": "2", "value": "2"},
{"entity": "3", "value": "3"},
],
"utterance": "123",
"y_true": ["DONT_CARE", "2", "3"],
"wrong_penalty_rate": 2.0,
},
0.6666666666666667,
),
(
{
"entity_prediction": [
{"entity": "1", "value": "1"},
{"entity": "2", "value": "2"},
{"entity": "3", "value": "3"},
],
"utterance": "123",
"y_true": ["5", "2", "3"],
"wrong_penalty_rate": 2.0,
},
0.3333333333333333,
),
(
{
"entity_prediction": [
{"entity": "DONT_CARE", "value": "1"},
{"entity": "DONT_CARE", "value": "2"},
{"entity": "DONT_CARE", "value": "3"},
],
"utterance": "123",
"y_true": ["DONT_CARE", "DONT_CARE", "DONT_CARE"],
"wrong_penalty_rate": 2.0,
},
1.0,
),
(
{
"entity_prediction": [
{"entity": "1", "value": "1"},
{"entity": "2", "value": "2"},
{"entity": "3", "value": "3"},
],
"utterance": "123",
"y_true": ["1", "2", "3"],
"wrong_penalty_rate": 2.0,
},
1.0,
),
]
for i, test_case in enumerate(test_cases):
with self.subTest(i=i):
result = single__entity_overlapping_score(**test_case[0])
self.assertAlmostEqual(test_case[1], result)
def test_entity_overlapping_score_different_amount(self):
with self.assertRaises(ValueError):
entity_overlapping_score(
utterances=["123", "345"],
entity_predictions=[[{"a": 1}], [{"b": 2}]],
y_trues=[["a"], ["b"], ["c"]],
)
def test_entity_overlapping_score(self):
result = entity_overlapping_score(
utterances=["123", "123"],
entity_predictions=[
[
{"entity": "1", "value": "1"},
{"entity": "2", "value": "2"},
{"entity": "3", "value": "3"},
],
[
{"entity": "DONT_CARE", "value": "1"},
{"entity": "DONT_CARE", "value": "2"},
{"entity": "DONT_CARE", "value": "3"},
],
],
y_trues=[
["5", "2", "3"],
["DONT_CARE", "DONT_CARE", "DONT_CARE"],
],
)
self.assertAlmostEqual(
(0.33333333333 + 1.0) / 2,
result,
)
| [
"s916526000@gmail.com"
] | s916526000@gmail.com |
29c6e052fc913a1935ac72e2e9e70dd59a7c7e0d | 96741d21821e230588c9850cc232743a7521aae5 | /base/my_deque.py | 82ea217cbc541f78c8679d9fa798769dda69c622 | [] | no_license | luhu888/SeleniumProject | 687dce50ec138fcf0af104621e51790f078134f3 | 42546c52a039c91fdac0c1d4f9b0c6de96edaeb0 | refs/heads/master | 2020-04-07T04:35:52.296132 | 2018-11-18T08:24:45 | 2018-11-18T08:24:45 | 114,006,695 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,578 | py | # -*- coding: utf-8 -*-
# __author__=luhu
from collections import deque
def queue_list(): # append添加在列表尾部,popleft取出最左边的值
queue = deque(["Eric", "John", "Michael"]) # 双向队列
new_queue = queue.copy() # 浅拷贝
queue.append("Terry") # 往右边添加一个元素
queue.appendleft("Graham") # 往左边添加一个元素
queue.popleft() # 获取最左边一个元素,并在队列中删除
queue.pop() # 获取最右边一个元素,并在队列中删除
# queue.clear() # 清空队列
queue.extend([4, ]) # 从队列右边扩展一个列表的元素
print(type(queue))
print(queue.count('Michael')) # 返回指定元素的出现次数
print("new_queue:", new_queue, "queue:", queue)
try:
print('Michael的索引位置为:', queue.index("Michael", 3, 5)) # 查找某个元素的索引位置
except ValueError:
print("Michael不在索引区间内")
queue.insert(-5, '哈哈') # 索引位置超出范围时,默认放在最后,或开始(看正序插,还是倒序插)
print('插入后的queue为:', queue)
try:
queue.remove('head')
print(queue)
except ValueError:
print("要删除的元素不存在")
queue.reverse() # 队列反转
print("队列反转后为:", queue)
queue.rotate(3) # 把右边元素放到左边,指定次数,默认1次
print("右边元素放到左边3次后:", queue)
if __name__ == '__main__':
queue_list()
| [
"luhu0105@gmail.com"
] | luhu0105@gmail.com |
da93d260c8ed7beb18b44b918ab7e3cf84ba3b3e | 0e5abee2b9224acab825bc71bb92a675d8a98209 | /exp_carla_static.py | bb8cb652e1a4f84e4770868c530252f3524bb4e1 | [] | no_license | kanglicheng/neural_3d_mapping | f0a1ba226a72613c5d0b9f7e7ec70da972b3b9ab | 5b3851569eae3f35fec2189d0a30e01ff8e913d2 | refs/heads/master | 2022-07-12T21:30:57.708257 | 2020-05-19T03:04:55 | 2020-05-19T03:04:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,851 | py | from exp_base import *
############## choose an experiment ##############
current = 'builder'
current = 'trainer'
# current = 'tester_basic'
mod = '"sta00"' # nothing; builder
mod = '"sta01"' # just prep and return
mod = '"sta02"' # again, fewer prints
mod = '"sta03"' # run feat3d forward; drop the sparse stuff
mod = '"sta04"' # really run it
mod = '"sta05"' # again
mod = '"sta06"' # warp; show altfeat
mod = '"sta07"' # ensure either ==1 or a==b
mod = '"sta08"' # try emb
mod = '"sta09"' # train a while
mod = '"sta10"' #
mod = '"sta11"' # show altfeat input
mod = '"sta12"' #
mod = '"sta13"' # train occ
mod = '"sta14"' # move things to R
mod = '"sta14"' # do view
mod = '"sta15"' # encode in X0
mod = '"sta16"' #
mod = '"sta17"' # show rgb_camX1, so i can understand the inbound idea better
mod = '"sta18"' # show inbound separately
mod = '"sta19"' # allow 0 to 32m
mod = '"sta20"' # builder
mod = '"sta21"' # show occ_memXs
mod = '"sta22"' # wider bounds please
mod = '"sta23"' # properly combine bounds with centorid
mod = '"sta24"' # train a hwile
mod = '"sta25"' # same but encode in Xs and warp to R then X0
mod = '"sta26"' # use resnet3d
mod = '"sta27"' # skipnet; randomize the centroid a bit
mod = '"sta28"' # wider rand, and inbound check
mod = '"sta29"' # handle the false return
mod = '"sta30"' # add emb2d
mod = '"sta31"' # freeze the slow model
mod = '"sta32"' # 2d parts
mod = '"sta33"' # fewer prints
mod = '"sta34"' # nice suffixes; JUST 2d learning
mod = '"sta35"' # fix bug
mod = '"sta36"' # better summ suffix
mod = '"sta37"' # tell me about neg pool size
mod = '"sta38"' # fix small bug in the hyp lettering
mod = '"sta39"' # cleaned up hyps
mod = '"sta40"' # weak smooth coeff on feats
mod = '"sta41"' # run occnet on altfeat instead
mod = '"sta42"' # redo
mod = '"sta43"' # replication padding
mod = '"sta44"' # pret 170k 02_s2_m128x32x128_p64x192_1e-3_F2_d32_F3_d32_s.01_O_c1_s.01_V_d32_e1_E2_e.1_n4_d32_c1_E3_n2_c1_mags7i3t_sta41
mod = '"sta45"' # inspect and maybe fix the loading; log10
mod = '"sta46"' # init slow in model base after saverloader
mod = '"sta47"' # zero padding; log500
mod = '"sta48"' # replication padding; log500
mod = '"sta49"' # repeat after deleting some code
############## exps ##############
exps['builder'] = [
'carla_static', # mode
'carla_multiview_10_data', # dataset
'carla_bounds',
'3_iters',
'lr0',
'B1',
'no_shuf',
'train_feat3d',
# 'train_occ',
# 'train_view',
# 'train_emb2d',
# 'train_emb3d',
'log1',
]
exps['trainer'] = [
'carla_static', # mode
'carla_multiview_train_data', # dataset
'carla_bounds',
'300k_iters',
'lr3',
'B2',
'pretrained_feat3d',
'pretrained_occ',
'train_feat3d',
'train_emb3d',
'train_occ',
# 'train_view',
# 'train_feat2d',
# 'train_emb2d',
'log500',
]
############## groups ##############
groups['carla_static'] = ['do_carla_static = True']
groups['train_feat2d'] = [
'do_feat2d = True',
'feat2d_dim = 32',
# 'feat2d_smooth_coeff = 0.1',
]
groups['train_feat3d'] = [
'do_feat3d = True',
'feat3d_dim = 32',
'feat3d_smooth_coeff = 0.01',
]
groups['train_occ'] = [
'do_occ = True',
'occ_coeff = 2.0',
'occ_smooth_coeff = 0.1',
]
groups['train_view'] = [
'do_view = True',
'view_depth = 32',
'view_l1_coeff = 1.0',
]
groups['train_emb2d'] = [
'do_emb2d = True',
# 'emb2d_smooth_coeff = 0.01',
'emb2d_ce_coeff = 1.0',
'emb2d_l2_coeff = 0.1',
'emb2d_mindist = 32.0',
'emb2d_num_samples = 4',
# 'do_view = True',
# 'view_depth = 32',
# 'view_l1_coeff = 1.0',
]
groups['train_emb3d'] = [
'do_emb3d = True',
'emb3d_ce_coeff = 0.1',
# 'emb3d_mindist = 8.0',
# 'emb3d_l2_coeff = 0.1',
'emb3d_num_samples = 2',
]
############## datasets ##############
# dims for mem
SIZE = 32
Z = int(SIZE*4)
Y = int(SIZE*1)
X = int(SIZE*4)
K = 2 # how many objects to consider
N = 8 # how many objects per npz
S = 2
H = 128
W = 384
# H and W for proj stuff
PH = int(H/2.0)
PW = int(W/2.0)
dataset_location = "/projects/katefgroup/datasets/carla/processed/npzs"
groups['carla_multiview_10_data'] = [
'dataset_name = "carla"',
'H = %d' % H,
'W = %d' % W,
'trainset = "mags7i3ten"',
'trainset_format = "multiview"',
'trainset_seqlen = %d' % S,
'dataset_location = "%s"' % dataset_location,
'dataset_filetype = "npz"'
]
groups['carla_multiview_train_data'] = [
'dataset_name = "carla"',
'H = %d' % H,
'W = %d' % W,
'trainset = "mags7i3t"',
'trainset_format = "multiview"',
'trainset_seqlen = %d' % S,
'dataset_location = "%s"' % dataset_location,
'dataset_filetype = "npz"'
]
groups['carla_multiview_test_data'] = [
'dataset_name = "carla"',
'H = %d' % H,
'W = %d' % W,
'testset = "mags7i3v"',
'testset_format = "multiview"',
'testset_seqlen = %d' % S,
'dataset_location = "%s"' % dataset_location,
'dataset_filetype = "npz"'
]
groups['carla_multiview_train_val_data'] = [
'dataset_name = "carla"',
'H = %d' % H,
'W = %d' % W,
'trainset = "mags7i3t"',
'trainset_format = "multiview"',
'trainset_seqlen = %d' % S,
'valset = "mags7i3v"',
'valset_format = "multiview"',
'valset_seqlen = %d' % S,
'dataset_location = "%s"' % dataset_location,
'dataset_filetype = "npz"'
]
############## verify and execute ##############
def _verify_(s):
varname, eq, val = s.split(' ')
assert varname in globals()
assert eq == '='
assert type(s) is type('')
print(current)
assert current in exps
for group in exps[current]:
print(" " + group)
assert group in groups
for s in groups[group]:
print(" " + s)
_verify_(s)
exec(s)
s = "mod = " + mod
_verify_(s)
exec(s)
| [
"aharley@cmu.edu"
] | aharley@cmu.edu |
6294fa2e9e61aaff0b4702c9dbdad0046dc5fdc9 | 5222a4b4c14ed71b8520984f944d0ebee0b0a694 | /eliav_one/eliav_one/settings.py | 9fb4d388d976b0b46d9f398c5f2e416bffeeb67d | [] | no_license | eliavco/eliav_one | afa2e5237cf294d4fe782f44ba6e0babb9ad2466 | 259d1f8d6138b646769f34a1c0b724f00e0d3313 | refs/heads/master | 2020-04-06T10:49:29.797554 | 2018-11-13T15:26:39 | 2018-11-13T15:26:39 | 157,393,198 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,795 | py | """
Django settings for eliav_one project.
Generated by 'django-admin startproject' using Django 2.1.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
TEMPLATES_DIR = os.path.join(BASE_DIR,'templates')
STATICFILES = os.path.join(BASE_DIR,'static')
MEDIAFILES = os.path.join(BASE_DIR,'media')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '!h=f-h08-qjdv5-vq2pck-(v7jdp%*6^_yyolk5h%76=hm^q=2'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'home',
'user_data',
'form',
'ordinary_form',
'sign_up',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'eliav_one.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATES_DIR,MEDIAFILES],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'eliav_one.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
PASSWORD_HASHERS = [
'django.contrib.auth.hashers.Argon2PasswordHasher',
'django.contrib.auth.hashers.BCryptSHA256PasswordHasher',
'django.contrib.auth.hashers.BCryptPasswordHasher',
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',
]
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
STATICFILES,
]
# media
MEDIA_URL = '/media/'
MEDIA_ROOT = MEDIAFILES
LOGIN_URL = 'sign_up/user_login'
| [
"eliav.s.cohen@gmail.com"
] | eliav.s.cohen@gmail.com |
0ee058a8c35365b396ec2e5e7d2f861fe6a0868b | 598b6a91fbefc70fd81474ab9a1775d8701b8187 | /main.py | de067f4cf702c1e32521138ae049afdf4365b75e | [] | no_license | JeffBohn/PluralsightPythonCourse | 144a5c022dd0b6d0977f8aeb5dd68df367b99832 | 38cbe00b588886c0ced1fc4c4e4a583d91ce8e80 | refs/heads/main | 2023-01-03T01:36:57.804491 | 2020-11-02T13:11:15 | 2020-11-02T13:11:15 | 308,983,411 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 186 | py | from hs_student import *
# mark = Student("Mark")
# print(mark)
# print(students)
print(Student.school_name)
james = HighSchoolStudent("james")
print(james.get_name_capitalize())
| [
"53544162+JeffBohn@users.noreply.github.com"
] | 53544162+JeffBohn@users.noreply.github.com |
7ffcb76ec73333e2ac89d9c1b17839de77716f5e | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/420/usersdata/329/87976/submittedfiles/exe11.py | 715adcb70c57813e5b1796b83f844bcbc85024f3 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 244 | py | # -*- coding: utf-8 -*-
n = int(input("digite um numero com 8 algarismos: "))
soma = 0
while n < 10000000 and n > 9999999:
resto = n % 10
n = (n - resto)/10
soma = soma + resto
print ('%d' % soma)
else:
print("NAO SEI")
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
0dafc6f8717413f81c4f8c71164ad3eddb78bc1a | 7919b620d26e135e3a508bb8dd3346edc70e5cbf | /setup.py | c14c6733304fbf4274be69373aec0bc77efa2777 | [] | no_license | lhuett/flask-multiauth | 7218ace9d83e6d7feb6ea3e3b8961bc9811aa54a | 51de71ce9b9c34412b561d9b9ecb89b19f075ca6 | refs/heads/master | 2021-09-13T07:59:53.236265 | 2018-04-26T23:10:32 | 2018-04-26T23:10:32 | 114,010,536 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 986 | py | import os
from setuptools import setup, find_packages
__here__ = os.path.dirname(os.path.abspath(__file__))
runtime = {
'requests',
'flask_session',
'flask',
'Jinja2',
'flask-ldap',
'kerberos',
'pycrypto',
}
develop = {
'flake8',
'coverage',
'pytest',
'pytest-cov',
'Sphinx',
'sphinx_rtd_theme',
}
if __name__ == "__main__":
# allows for runtime modification of rpm name
name = "flask-multiauth"
try:
setup(
name=name,
version="0.0.1",
description="Insights RuleAnalysis Services",
packages=find_packages(),
include_package_data=True,
py_modules=['flask_multiauth'],
install_requires=list(runtime),
extras_require={
'develop': list(runtime | develop),
'optional': ['python-cjson', 'python-logstash', 'python-statsd', 'watchdog'],
},
)
finally:
pass
| [
"lhuett@redhat.com"
] | lhuett@redhat.com |
924b4c29c9de01e2ec92111017dfa404c8e2db3b | bc2b5b777a4bbcc7b52f868ab93085e9ab9d1908 | /tools/processing/mesh_annotate/mesh_annotate/Expression.py | 54b6a6382e0fc7746a1134b7b74f0792cb66fc29 | [] | no_license | hewhocannotbetamed/edge | d732ee072720fdfce666f945a4970d8755a4dc20 | 85c9ed210f8dadcd4dab44454f7b5b9f2636d4b0 | refs/heads/master | 2022-04-24T10:15:08.379129 | 2019-07-31T13:13:36 | 2019-07-31T13:13:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,363 | py | ##
# @file This file is part of EDGE.
#
# @author Alexander Breuer (anbreuer AT ucsd.edu)
#
# @section LICENSE
# Copyright (c) 2018, Regents of the University of California
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# @section DESCRIPTION
# Annotation of entities through expressions.
##
import numpy
##
# Evaluates the expression for the given points
#
# @param i_nVars number of variables.
# @param i_expr expression which gets evaluated.
# @param i_ptCrds coordinates of the points.
##
def evalPt( i_nVars,
i_expr,
i_ptCrds ):
# determine result values
l_vals = numpy.zeros( (i_nVars, len(i_ptCrds) ), dtype = 'float64')
l_comp = compile( i_expr, '<string>', 'exec' )
for l_pt in range( len(i_ptCrds) ):
l_vars = { 'x': i_ptCrds[l_pt, 0],
'y': i_ptCrds[l_pt, 1],
'z': i_ptCrds[l_pt, 2] }
exec( l_comp, l_vars )
l_vals[:, l_pt] = l_vars['q']
return l_vals
##
# Evaluates the expression for the given entities.
# The average values of the vertices will be used.
#
# @param i_nVars number of variables.
# @param i_expr expression which gets evaluated.
# @param i_enVe vertices adjacent to the entities.
# @param i_veCrds coordinates of the vertices.
##
def evalVe( i_nVars,
i_expr,
i_enVe,
i_veCrds ):
# determine result values
l_vals = numpy.zeros( (i_nVars, len(i_enVe) ), dtype = 'float64')
l_comp = compile( i_expr, '<string>', 'exec' )
# iterate over the elements
for l_el in range(len(i_enVe)):
l_tmpVars = numpy.zeros( i_nVars )
# determine values of vertices
for l_ve in i_enVe[l_el]:
l_vars = { 'x': i_veCrds[l_ve, 0],
'y': i_veCrds[l_ve, 1],
'z': i_veCrds[l_ve, 2] }
exec( l_comp, l_vars )
l_tmpVars += l_vars['q']
# average vertex values for the elements
l_vals[:, l_el] = l_tmpVars / len(i_enVe[l_el])
return l_vals | [
"anbreuer@ucsd.edu"
] | anbreuer@ucsd.edu |
85872ca81454d863e57c47043a303a247a75e42d | 2a8abd5d6acdc260aff3639bce35ca1e688869e9 | /telestream_cloud_qc_sdk/telestream_cloud_qc/models/frame_aspect_ratio_test.py | e350d1d1f34c6e4931d4824fe21895777c5735ce | [
"MIT"
] | permissive | Telestream/telestream-cloud-python-sdk | 57dd2f0422c83531e213f48d87bc0c71f58b5872 | ce0ad503299661a0f622661359367173c06889fc | refs/heads/master | 2021-01-18T02:17:44.258254 | 2020-04-09T11:36:07 | 2020-04-09T11:36:07 | 49,494,916 | 0 | 0 | MIT | 2018-01-22T10:07:49 | 2016-01-12T11:10:56 | Python | UTF-8 | Python | false | false | 6,377 | py | # coding: utf-8
"""
Qc API
Qc API # noqa: E501
The version of the OpenAPI document: 3.0.0
Contact: cloudsupport@telestream.net
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from telestream_cloud_qc.configuration import Configuration
class FrameAspectRatioTest(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'frame_aspect_ratio_numerator': 'int',
'frame_aspect_ratio_denominator': 'int',
'reject_on_error': 'bool',
'checked': 'bool'
}
attribute_map = {
'frame_aspect_ratio_numerator': 'frame_aspect_ratio_numerator',
'frame_aspect_ratio_denominator': 'frame_aspect_ratio_denominator',
'reject_on_error': 'reject_on_error',
'checked': 'checked'
}
def __init__(self, frame_aspect_ratio_numerator=None, frame_aspect_ratio_denominator=None, reject_on_error=None, checked=None, local_vars_configuration=None): # noqa: E501
"""FrameAspectRatioTest - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._frame_aspect_ratio_numerator = None
self._frame_aspect_ratio_denominator = None
self._reject_on_error = None
self._checked = None
self.discriminator = None
if frame_aspect_ratio_numerator is not None:
self.frame_aspect_ratio_numerator = frame_aspect_ratio_numerator
if frame_aspect_ratio_denominator is not None:
self.frame_aspect_ratio_denominator = frame_aspect_ratio_denominator
if reject_on_error is not None:
self.reject_on_error = reject_on_error
if checked is not None:
self.checked = checked
@property
def frame_aspect_ratio_numerator(self):
"""Gets the frame_aspect_ratio_numerator of this FrameAspectRatioTest. # noqa: E501
:return: The frame_aspect_ratio_numerator of this FrameAspectRatioTest. # noqa: E501
:rtype: int
"""
return self._frame_aspect_ratio_numerator
@frame_aspect_ratio_numerator.setter
def frame_aspect_ratio_numerator(self, frame_aspect_ratio_numerator):
"""Sets the frame_aspect_ratio_numerator of this FrameAspectRatioTest.
:param frame_aspect_ratio_numerator: The frame_aspect_ratio_numerator of this FrameAspectRatioTest. # noqa: E501
:type: int
"""
self._frame_aspect_ratio_numerator = frame_aspect_ratio_numerator
@property
def frame_aspect_ratio_denominator(self):
"""Gets the frame_aspect_ratio_denominator of this FrameAspectRatioTest. # noqa: E501
:return: The frame_aspect_ratio_denominator of this FrameAspectRatioTest. # noqa: E501
:rtype: int
"""
return self._frame_aspect_ratio_denominator
@frame_aspect_ratio_denominator.setter
def frame_aspect_ratio_denominator(self, frame_aspect_ratio_denominator):
"""Sets the frame_aspect_ratio_denominator of this FrameAspectRatioTest.
:param frame_aspect_ratio_denominator: The frame_aspect_ratio_denominator of this FrameAspectRatioTest. # noqa: E501
:type: int
"""
self._frame_aspect_ratio_denominator = frame_aspect_ratio_denominator
@property
def reject_on_error(self):
"""Gets the reject_on_error of this FrameAspectRatioTest. # noqa: E501
:return: The reject_on_error of this FrameAspectRatioTest. # noqa: E501
:rtype: bool
"""
return self._reject_on_error
@reject_on_error.setter
def reject_on_error(self, reject_on_error):
"""Sets the reject_on_error of this FrameAspectRatioTest.
:param reject_on_error: The reject_on_error of this FrameAspectRatioTest. # noqa: E501
:type: bool
"""
self._reject_on_error = reject_on_error
@property
def checked(self):
"""Gets the checked of this FrameAspectRatioTest. # noqa: E501
:return: The checked of this FrameAspectRatioTest. # noqa: E501
:rtype: bool
"""
return self._checked
@checked.setter
def checked(self, checked):
"""Sets the checked of this FrameAspectRatioTest.
:param checked: The checked of this FrameAspectRatioTest. # noqa: E501
:type: bool
"""
self._checked = checked
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, FrameAspectRatioTest):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, FrameAspectRatioTest):
return True
return self.to_dict() != other.to_dict()
| [
"cloudsupport@telestream.net"
] | cloudsupport@telestream.net |
852a33623690d60e6ac33700f127812c5c529587 | 6cbcff9d87a60f78e0cfce32df3148c6b4533801 | /myapp/migrations/0003_auto_20210504_2255.py | a99201c0f2b7175b3a540aa6b1d945d098418c7e | [] | no_license | michaelhindle/Reto1 | dd3d630501b1e682a1deedf7b377f14a233fc29b | 045c8b8259ca4b17ff8272d2e16a99580ec019d4 | refs/heads/main | 2023-05-02T01:33:42.388444 | 2021-05-30T12:31:32 | 2021-05-30T12:31:32 | 360,569,499 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,885 | py | # Generated by Django 3.1.7 on 2021-05-04 20:55
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('myapp', '0002_empresa_trabajador'),
]
operations = [
migrations.CreateModel(
name='Empleado',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(max_length=25)),
('apellidos', models.CharField(max_length=50)),
('email', models.CharField(max_length=50)),
('dni', models.CharField(max_length=9)),
],
),
migrations.CreateModel(
name='Ticket',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('titulo', models.CharField(max_length=50)),
('descripcion', models.TextField(max_length=1000)),
('fecha_apertura', models.DateField()),
('fecha_resolucion', models.DateField()),
('nivel_urgencia', models.CharField(max_length=50)),
('tipo_ticket', models.CharField(max_length=50)),
('estado_ticket', models.CharField(max_length=50)),
('comentario', models.TextField(max_length=1000)),
('FK_Empleado_ID', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='myapp.empleado')),
],
),
migrations.RemoveField(
model_name='trabajador',
name='empresa',
),
migrations.RemoveField(
model_name='equipo',
name='red',
),
migrations.RemoveField(
model_name='equipo',
name='tipo',
),
migrations.RemoveField(
model_name='equipo',
name='votes',
),
migrations.AddField(
model_name='equipo',
name='fecha_adquisicion',
field=models.DateField(default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='equipo',
name='fecha_puestaenmarcha',
field=models.DateField(default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='equipo',
name='marca',
field=models.CharField(default=1, max_length=50),
preserve_default=False,
),
migrations.AddField(
model_name='equipo',
name='modelo',
field=models.CharField(default=1, max_length=50),
preserve_default=False,
),
migrations.AddField(
model_name='equipo',
name='planta',
field=models.CharField(default=1, max_length=50),
preserve_default=False,
),
migrations.AddField(
model_name='equipo',
name='proveedor_nombre',
field=models.CharField(default=1, max_length=50),
preserve_default=False,
),
migrations.AddField(
model_name='equipo',
name='tipoequipo',
field=models.CharField(default=1, max_length=50),
preserve_default=False,
),
migrations.DeleteModel(
name='Empresa',
),
migrations.DeleteModel(
name='Red',
),
migrations.DeleteModel(
name='Trabajador',
),
migrations.AddField(
model_name='ticket',
name='FK_Equipo_ID',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='myapp.equipo'),
),
]
| [
"ivandilarios@opendeusto.es"
] | ivandilarios@opendeusto.es |
ed7b5fcf55324e383b99dd8f860e850435b47ada | 0faf534ebb6db6f32279e5bee25b968bd425ce3a | /tests/core/_while/_while.py | b6d827a12289764a394e2ef4beffb7579457bc29 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | PyHDI/veriloggen | e8647cb2d40737d84e31d6b89c5799bab9cbd583 | f2b1b9567150af097eed1b5e79ba2b412854ef43 | refs/heads/develop | 2023-08-09T10:02:35.626403 | 2023-08-09T00:50:14 | 2023-08-09T00:50:14 | 37,813,184 | 282 | 60 | Apache-2.0 | 2023-07-20T03:03:29 | 2015-06-21T15:05:30 | Python | UTF-8 | Python | false | false | 1,032 | py | from __future__ import absolute_import
from __future__ import print_function
import sys
import os
# the next line can be removed after installation
sys.path.insert(0, os.path.dirname(os.path.dirname(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))))))
from veriloggen import *
def mkTest():
m = Module('test')
clk = m.Reg('CLK')
rst = m.Reg('RST')
count = m.Reg('count', width=32)
m.Initial(
Systask('dumpfile', '_while.vcd'),
Systask('dumpvars', 0, clk, rst, count),
)
m.Initial(
clk(0),
Forever(clk(Not(clk), ldelay=5)) # forever #5 CLK = ~CLK;
)
m.Initial(
rst(0),
Delay(100),
rst(1),
Delay(100),
rst(0),
Delay(1000),
count(0),
While(count < 1024)(
count(count + 1),
Event(Posedge(clk))
),
Systask('finish'),
)
return m
if __name__ == '__main__':
test = mkTest()
verilog = test.to_verilog('')
print(verilog)
| [
"shta.ky1018@gmail.com"
] | shta.ky1018@gmail.com |
c4b3327abff5497c6177759bd9c47d889468636c | f82cbca5c332d1e10a2f0910cfa81e6f7d2ad804 | /examples/ann/ANN_matrix.py | 5460cdbe65c1d87bbb7765abc4f17434ed514c10 | [] | no_license | raulmogos/AI-course-work | 0460596a9aae73df7453bd3644bd9e859a52d0c1 | bc8600987bce36f421a7662b14f9a6af8bc22653 | refs/heads/master | 2022-07-04T13:40:30.064733 | 2020-05-09T17:31:24 | 2020-05-09T17:31:24 | 255,887,725 | 0 | 0 | null | 2020-04-21T20:37:23 | 2020-04-15T10:52:02 | Python | UTF-8 | Python | false | false | 2,430 | py | """
An example of a simple ANN with 1+2 layers
The implementation uses 2 matrixes in order to memorise the weights.
For a full description:
https://towardsdatascience.com/how-to-build-your-own-neural-network-from-scratch-in-python-68998a08e4f6
"""
import numpy as np
import matplotlib as mpl
np.random.seed(1)
# the activation function:
def sigmoid(x):
return 1.0 / (1 + np.exp(-x))
# the derivate of te activation function
def sigmoid_derivative(x):
return x * (1.0 - x)
class NeuralNetwork:
# constructor for this VERY particular network with 2 layers (plus one for input)
def __init__(self, x, y, hidden):
self.input = x
self.weights1 = np.random.rand(self.input.shape[1], hidden)
self.weights2 = np.random.rand(hidden, 1)
self.y = y
self.output = np.zeros(self.y.shape)
self.loss = []
# the function that computs the output of the network for some input
def feedforward(self):
self.layer1 = sigmoid(np.dot(self.input, self.weights1))
self.output = sigmoid(np.dot(self.layer1, self.weights2))
# the backpropagation algorithm
def backprop(self, l_rate):
# application of the chain rule to find derivative of the
# loss function with respect to weights2 and weights1
d_weights2 = np.dot(self.layer1.T, (2 * (self.y - self.output) * sigmoid_derivative(self.output)))
d_weights1 = np.dot(self.input.T, (np.dot(2 * (self.y - self.output) * sigmoid_derivative(self.output), self.weights2.T) * sigmoid_derivative(self.layer1)))
# update the weights with the derivative (slope) of the loss function
self.weights1 += l_rate * d_weights1
self.weights2 += l_rate * d_weights2
self.loss.append(sum((self.y - self.output) ** 2))
if __name__ == "__main__":
# X the array of inputs, y the array of outputs, 4 pairs in total
X = np.array([[0, 0],
[0, 1],
[1, 0],
[1, 1]])
y = np.array([[0], [1], [1], [0]])
nn = NeuralNetwork(X, y, 2)
nn.loss = []
iterations = []
for i in range(4000):
nn.feedforward()
nn.backprop(1)
iterations.append(i)
print(nn.output)
mpl.pyplot.plot(iterations, nn.loss, label='loss value vs iteration')
mpl.pyplot.xlabel('Iterations')
mpl.pyplot.ylabel('loss function')
mpl.pyplot.legend()
mpl.pyplot.show()
| [
"raulmogos109@yahoo.com"
] | raulmogos109@yahoo.com |
62ac4d2e13d183193292f8beb03b79b48f40d672 | 6b787ef4ad9eaafdb7d13462a52636d10bf5503c | /ascii.py | 64133881fe555905c4b22471bb039d6706c30dc9 | [] | no_license | lifeicq/anagram_from_unscrambled | da04252b62e97bb526c721f5c223b30040f33e29 | 2c59c33a863675058312bceacda4708310a3325e | refs/heads/master | 2020-05-31T16:27:11.502679 | 2015-07-04T06:17:12 | 2015-07-04T06:17:12 | 38,474,216 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 448 | py | #!/usr/bin/env python
#This is my ASCII character calculation program that provides a sum of all characters
#Coded by LifeIcq
print "Welcome to ASCII word converter and its sum calculator!"
phrase = raw_input("Please enter your scrambled phrase here for ASCII calculation:")
temp = 0
final_Value = 0
for letter in phrase:
temp = ord(letter)
print temp
final_Value += temp
print final_Value
print "Thank you for using this program!"
| [
"lifeicq@gmail.com"
] | lifeicq@gmail.com |
f8154a2f9cbcacf7d09e31bbf7ece38b4b1b34d6 | 2707c002fb215e44fc7567f6fa6a9d651243b06d | /Python/first.py | 0b249285bfad26753f398ac646b55f70b9df97f2 | [] | no_license | andickinson/Practical-Ethical-Hacking | 46ac50519822a973f87e396e2caf590a78eccef0 | 822658fda8052395efb8e7846355ffd9dd0bb827 | refs/heads/main | 2023-03-12T15:56:12.567986 | 2021-03-02T08:18:28 | 2021-03-02T08:18:28 | 341,519,905 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 267 | py | #!/bin/python3
#Print string
print("Hello, world!") #double quotes
print('\n') #new line
print('Hello, world!') #single quotes
print("""This string runs
multiple lines!""") #triple quote for multi-line
print("This string is " + "awesome!") #we can also concatenate | [
"andickinson@gmail.com"
] | andickinson@gmail.com |
4541f9dcb4fab88b6bbf5c77db6b8d07c29b9cc9 | 16ccfb5d13029afde7fb5d54371c97d1866de905 | /corkscrew/version.py | 3f686add9e1a94e02216d00bd7ebc2291ef4da42 | [] | no_license | mattvonrocketstein/corkscrew | b69c32ea78f0bfe948b83a85bb4f60351e560116 | 8c992599e865aee8cfc93900a945ff5248ed1ab2 | refs/heads/master | 2021-01-01T18:42:10.205684 | 2015-11-08T09:55:45 | 2015-11-08T09:55:45 | 2,240,780 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 44 | py | """ corkscrew.version
"""
__version__=0.18 | [
"matthewvonrocketstein@gmail-dot-com"
] | matthewvonrocketstein@gmail-dot-com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.