blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
683d36009adb3aca93e808e943434e2bc04fe516 | 92993cff825da80a8ff601572a0c52b0b7d3cbde | /algorithms/Svm/ADMM/L1/ADMM_L1_m34.py | 9d9fc819c9303a58aef992c0309a13f479a8c4cf | [] | no_license | yingzhuoy/MRs-of-linear-models | 06e8b1f84b08c6aa77553813824cf35c1806c5a7 | c3df8299e039a12613f2022b370b8c3e9c2dd822 | refs/heads/master | 2023-04-07T23:09:37.736952 | 2021-04-04T05:33:37 | 2021-04-04T05:33:37 | 265,124,549 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,148 | py | import numpy as np
from numpy import linalg
#import cvxopt
#from cvxopt import matrix,solvers
#import scipy.sparse.linalg
from algorithms.clf import Clf
"""
Preconditioned Conjugate Gradient Method
"""
def precond(M, r):
q = M * r
return q
def inner_prod(A, B):
A = np.matrix(A)
B = np.matrix(B)
return np.dot(A.reshape(-1,1).T, B.reshape(-1,1))
def cg(A, b, x=None, tol=1.0e-6, max_iter=128):
# precondition
A = np.matrix(A)
b = np.matrix(b)
normb = np.linalg.norm(b, 'fro')
m = b.shape[0]
M = np.eye(m)
x = np.zeros((m, m))
Aq = (A*x)
r = b - Aq # m x m
q = precond(M, r) # m x m
tau_old = np.linalg.norm(q, 'fro')
rho_old = inner_prod(r, q)
theta_old = 0
Ad = np.zeros((m, m))
d = np.zeros((m, m))
res = r.reshape(m, m)
tiny = 1e-30
for i in range(max_iter):
Aq = A * q
sigma = inner_prod(q, Aq)
if abs(sigma.item()) < tiny:
break
else:
alpha = rho_old / sigma;
alpha = alpha.item()
r = r - alpha * Aq
r = r.reshape(m, m)
#----bug----
#u = precond(M, r)
u = precond(M-0.5506771960356653, r)
theta = np.linalg.norm(u,'fro')/tau_old
c = 1 / np.sqrt(1+theta*theta)
tau = tau_old * theta * c
gam = c*c*theta_old*theta_old
eta = c*c*alpha
d = gam * d + eta * q
x = x + d
# stop
Ad = gam*Ad+eta*Aq
res = res - Ad
if np.linalg.norm(res, 'fro') < tol*normb:
break
else:
rho = inner_prod(r, u)
beta = rho / rho_old
beta = beta.item()
q = u + beta * q
rho_old = rho
tau_old = tau
theta_old = theta
return x
def admm(X, y, max_iter=3000):
# solve by inner point method
m, n = X.shape
X = np.column_stack((X, np.ones((m, 1))))
y = y.astype(np.float64)
data_num = len(y)
C = 1.0
kernel = np.dot(X, np.transpose(X))
p = np.matrix(np.multiply(kernel,np.outer(y, y)))
e = np.matrix(np.ones([data_num, 1], np.float64))
bounds = (0, C)
low, up = bounds
x = np.ones((m,1))
tau = 1.618
sigma = 1
# initial
u = np.ones((m, 1))
t = x
A = p + sigma * np.eye(m)
I = np.eye(m)
invA = cg(A, I)
for it in range(max_iter):
# update x
b = e + u + sigma * t
x = invA * b
# update y
t = x - (1/sigma)*u
t[t < low] = low
t[t > up] = up
# update u
u = u - tau*sigma*(x-t)
dual = -(0.5*x.T*(p*x) - e.T*x)
dual = dual.item()
y1 = np.reshape(y, (-1, 1))
lambda1 = np.multiply(x, y1)
w = np.dot(X.T, lambda1)
w = np.matrix(w).reshape(-1, 1)
tmp = np.maximum(1-np.multiply(y1, X*w),0)
primal = 0.5*np.linalg.norm(w)**2 + 1 * np.sum(tmp)
primal = primal.item()
# stop criteria
if np.abs(dual-primal)/(1+np.abs(dual)+np.abs(primal)) < 1e-12:
break
# print(t, np.linalg.norm(gradient))
# print(np.min(x), np.max(x))
# print(np.sum(x < -1e-4), np.sum(x>1+1e-4))
# print(np.abs(dual-primal)/(1+np.abs(dual)+np.abs(primal)))
y1 = np.reshape(y, (-1, 1))
alpha1 = x
lambda1 = np.multiply(y1,alpha1)
w = np.dot(X.T, lambda1)
w = np.array(w).reshape(-1)
b = w[n]
w = w[0:n]
return w, b
#L1-svm
class ADMM_L1_m34():
def fit(self, X, y):
y[y == 0] = -1
# add logitR to verify the correctness
#from sklearn.svm import LinearSVC
#SVM = LinearSVC(loss='hinge', tol=1e-6, max_iter=100000, verbose=1).fit(X, np.array(y).ravel())
#w1 = SVM.coef_; b1 = SVM.intercept_
#w1 = w1.reshape(-1); b1 = b1[0]
#import time
#t1 = time.time()
w, b = admm(X, y)
#t2 = time.time()
#print('time:', t2-t1)
#print('diff', np.linalg.norm(w1-w), b, b1)
clf = Clf(w, b)
return clf | [
"yingzhuoy@qq.com"
] | yingzhuoy@qq.com |
4fecf8c61e2dfa099ead53088dfe2d445da69530 | 1013a96db28be6e93ef304edf873401922407ff6 | /py_neo.py | b41b9cc83902f07c8d0ccc2c121a6e2933fce2e7 | [] | no_license | kv244/python_graph | 9a49a5ba43c03f0e8904b6fabb144e906677fe56 | 566b26dcebe6f5b59341cf5daf75c78493e3e10e | refs/heads/master | 2022-07-07T02:20:25.207106 | 2020-05-16T12:22:44 | 2020-05-16T12:22:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,049 | py | """This is a web crawler
Which stores data in Neo4J"""
# TODO read python book, data structures
# TODO improve code
# TODO then publish
import urllib.error
import urllib.request
from urllib.parse import urljoin
from bs4 import *
from neo4j import GraphDatabase
# TODO: 2) remove spurious stuff
# TODO: 1) edit creation queries to use tx
class Scanner:
"""Performs the actual web crawl"""
eliminate: [str] = ['#', '=', '?', '(', '@', 'facebook', 'twitter', 'jpg', 'tag', 'pdf', 'png', 'youtu', 'feed',
'tel', 'microsoft', 'mozilla', 'google', 'pinterest', 'instagram', 'wikipedia', 'gravatar',
'imgur']
"""List of strings to be skipped from scanning"""
@staticmethod
def make_exception(ex: Exception):
"""Helper method to return a string from the exception"""
template = "An exception of type {0} occurred. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
return message
def __init__(self, origin: str):
"""origin is the starting URL"""
self.origin = origin
self.output = []
def scan(self, crawler, max_links=30):
"""scans the origin page and populates the links in the output list"""
limit = 0
try:
html_page = urllib.request.urlopen(self.origin)
page_in = BeautifulSoup(html_page.read(), 'html.parser')
links_in = page_in('a')
for link in links_in:
if 'href' in dict(link.attrs):
url = urljoin(self.origin, link['href'])
else:
continue
if 'title' in dict(link.attrs):
title = link['title']
else:
title = ''
skip = [unwanted_link for unwanted_link in list(map(url.find, Scanner.eliminate))
if unwanted_link != -1]
if skip:
continue
if hash(url) in Crawler.scanned.keys():
continue
else:
Crawler.scanned[hash(url)] = url # TODO 3) add title here
limit = limit + 1
self.output.append(url)
if limit > max_links:
break
except Exception as ex:
print(Scanner.make_exception(ex))
def __get__(self, instance, owner):
return self.output
class Crawler:
"""Drives the scanner"""
scanned = {} # the dictionary holding the hash of scanned urls
def __init__(self, origin: str, generations: int, db_url: str = "bolt://localhost:7687",
db_login: str = "gigifecali", db_pwd: str = "fecali"):
"""origin = starting URL
generations = how many jumps from origin"""
self._current_bucket = [] # scanned by the current generation scan
self._swap_bucket = []
self._current_bucket.append(origin)
self._generations = generations
self.storage = GraphStorage(db_url, db_login, db_pwd)
@staticmethod
def make_node(url_create: str, title: str = "") -> str:
"""Helper method to build a node creation command in Cypher"""
p_var = "n"
p_tag = "URL"
p_prop = {"URL": url_create, "title": title}
qry_make_node = GraphStorage.make_obj(p_var, p_tag, p_prop)
return qry_make_node
@staticmethod
def make_rel(url_to: str, url_from: str) -> str:
qry_make_rel = GraphStorage.make_rel("URL", "URL", url_from, "URL", "URL", url_to, "LINKS_TO")
return qry_make_rel
def _build_response(self, items_scanned, scanned_from: str, generation: int):
"""builds the response data structure for the crawl
items_scanned is the collection of URLs scanned starting with scanned_from (string)
generation is the number away from the origin"""
for item in items_scanned:
query_node = (Crawler.make_node(item, "NOTITLE_YET"))
query_rel = (Crawler.make_rel(item, scanned_from))
self.storage.run_command(query_node)
self.storage.run_command(query_rel)
def _scan(self, generation: int):
"""Crawls the URLs in the current bucket
which results in a new list of links for each existing URL
all of which are consolidated. It also builds the response
for the source URL and the links generated for it in the current
generation."""
for item in self._current_bucket:
scanner = Scanner(item)
scanner.scan(self, 50) # max links
self._swap_bucket.extend(scanner.output)
self._build_response(scanner.output, item, generation)
def crawl(self):
"""Algorithm:
For items in current bucket while gen < max gen
if item not in scanned already, scan item --> swap bucket list; add item to scanned
for items2 in swap bucket
add scan item, items2 to response
inc gen
current bucket = scan bucket
scan bucket = empty
"""
g = 0
print(Crawler.make_node(self._current_bucket[0]))
while g <= self._generations:
print("\nGeneration ", g)
self._scan(g)
self._current_bucket = self._swap_bucket
self._swap_bucket = []
g += 1
class GraphStorage(object):
"""The actual graph database storage"""
def __init__(self, uri, user, password):
self._driver = GraphDatabase.driver(uri, auth=(user, password), encrypted=False)
def close(self):
self._driver.close()
@staticmethod
def make_obj(k_var, k_type, dict_prop) -> str:
# does not use the $parameter format to create a custom node
qry = "CREATE (" + k_var + ":" + k_type + "{"
for k in dict_prop:
qry += k + ":'" + dict_prop[k] + "',"
qry = qry[:len(qry) - 1] + "}) return " + k_var + ";"
return qry
@staticmethod
def make_rel(k_type1: str, k_prop1: str, k_val1: str,
k_type2: str, k_prop2: str, k_val2: str,
k_typer: str) -> str:
# Beware: only matches string properties
qry = "MATCH (n:" + k_type1 + "{" + k_prop1 + ": '" + k_val1 + "'}), (m:" + k_type2 + "{" + \
k_prop2 + ": '" + k_val2 + "'}) CREATE (n)-[r:" + k_typer + "]->(m) return n, m, r;"
return qry
def run_command(self, query):
with self._driver.session() as session:
result = session.write_transaction(self._run_command, query)
@staticmethod
# TODO 4) fix here - what is returned? nothing for relationships?
def _run_command(tx, query):
result = tx.run(query)
return result
class Demo:
@classmethod
def run(cls, url: str, gen: int, db: str, login: str, pwd: str):
c = Crawler(url, gen, db, login, pwd)
c.crawl()
Demo.run("https://www.zoso.ro/", 2, "bolt://localhost:7687", "gigifecali", "fecali") | [
"julian.petrescu@live.com.sg"
] | julian.petrescu@live.com.sg |
b5ee9022e2c6f17abaa210a453d9af24a7dc8797 | b0e6d1f323a715663fcd595972ce5ab6e68a50c2 | /Python Course/Learning Python/LeetCode Exercises/binary_search.py | 65ca55a305c7a6d33c2a3936805571789fdab5b7 | [] | no_license | FirdavsSharapov/PythonLearning | fb16036a1f905fc0861e8017516e273529cf6127 | 63bba681dcbda9675f64620d4800cf784911dcf1 | refs/heads/master | 2022-03-13T02:05:37.587374 | 2022-03-10T14:41:30 | 2022-03-10T14:41:30 | 210,236,434 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 732 | py | def binary_search(nums, item):
# first got the len of the array
begin_index = 0
end_index = len(nums)-1
while begin_index <= end_index:
midpoint = begin_index + (end_index - begin_index) // 2
mid_value = nums[midpoint]
if mid_value == item:
return midpoint
#looking to the left side if target number is less the middle value
elif item < mid_value:
end_index = midpoint - 1
#otherwise looking to the right side if target number
else:
begin_index = midpoint + 1
return -1
if __name__ == '__main__':
assert binary_search ([-1,0,3,5,9,12,13],9) == 4
assert binary_search ([-1,0,3,5,9,12,41], 2) == -1 | [
"f.sharapov@yahoo.com"
] | f.sharapov@yahoo.com |
3a7987be8be7fe2b2beef5fcdd555806e3503def | 02d8f219d9555c66b6156b691fdbda9f02b379e9 | /backtracking.py | d548389c7034cd6b5c3c4dc69bf9e39c51c021cb | [] | no_license | aman-parikh/DAA-PROJECT | ab46697e695ca0de7a850c3f8d35553c6909fdb0 | d1486aac0e80c3e3ab23d3e88e0fa4d43d276b78 | refs/heads/main | 2023-04-02T02:21:19.306555 | 2021-04-09T06:08:14 | 2021-04-09T06:08:14 | 355,852,595 | 0 | 0 | null | 2021-04-08T09:59:21 | 2021-04-08T09:59:20 | null | UTF-8 | Python | false | false | 2,085 | py | import math
import pygame
from tkinter import messagebox
from tkinter import *
import tkinter
root = tkinter.Tk()
root.withdraw()
class BACKTRACK:
@staticmethod
def draw_path(end, start, draw, draw_fin_path):
node = end
total_wt = 0
while node:
total_wt += node.weight
if node != start and node != end:
node.make_path()
draw()
node = node.parent
draw_fin_path()
messagebox.showinfo('Total weight', total_wt)
@staticmethod
def backtracking(draw_bin, draw_fin_path, grid, start, end, row, col, Cellgrid):
if grid[row][col] == 2:#return condition
return True
elif grid[row][col] == 0:
grid[row][col] = 3
draw_bin()
#print(row, len(grid))
if row < len(grid) - 1:
# Explore path below
if BACKTRACK.backtracking(draw_bin, draw_fin_path, grid, start, end, row + 1, col, Cellgrid):
Cellgrid[row + 1][col].parent = Cellgrid[row][col]
return True
if row > 0:
# Explore path above
if BACKTRACK.backtracking(draw_bin, draw_fin_path, grid, start, end, row - 1, col, Cellgrid):
Cellgrid[row - 1][col].parent = Cellgrid[row][col]
return True
if col < len(grid[row]) - 1:
# Explore path to the right
if BACKTRACK.backtracking(draw_bin, draw_fin_path, grid, start, end, row, col+1, Cellgrid):
Cellgrid[row][col + 1].parent = Cellgrid[row][col]
return True
if col > 0:
# Explore path to the left
if BACKTRACK.backtracking(draw_bin, draw_fin_path, grid, start, end, row, col - 1, Cellgrid):
Cellgrid[row][col - 1].parent = Cellgrid[row][col]
return True
grid[row][col] = 4
draw_bin()
| [
"noreply@github.com"
] | noreply@github.com |
a70b34ec60feceb1a77c2fd378dbb3a87121abd9 | fc210e56f3d20947f84039a8ef07107bb11e6b5a | /main_prepare_tfrecords.py | def142e0fa4506356f5c7542938f2e953fb58cde | [] | no_license | ZhouYzzz/RecurrentTracking | 344b5fcb73f04a749f9822ae0b18f8de83ee6308 | 9dfaf2b383b2a0f67272ec090b2a40bb5d1adee4 | refs/heads/master | 2021-09-11T20:47:31.566421 | 2018-04-12T06:50:05 | 2018-04-12T06:50:05 | 112,092,396 | 0 | 1 | null | 2018-03-22T11:39:37 | 2017-11-26T15:07:42 | Python | UTF-8 | Python | false | false | 2,126 | py | """Create TFRecords files from ILSVRC2015"""
import tensorflow as tf
import tempfile, os, argparse
from multiprocessing import Pool
from tqdm import tqdm
from ilsvrc2015 import ILSVRC2015, PHASE
from annotations import parse_annotation_folder
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--dataset_dir', default='/home/zhouyz/ILSVRC2015/', type=str, help='ILSVRC2015 root directory')
parser.add_argument('--output_dir', default=tempfile.mkdtemp(), type=str)
parser.add_argument('--records_prefix', default='ilsvrc2015.', type=str)
FLAGS, _ = parser.parse_known_args()
def create_tfrecords(annotation_folder):
writer = tf.python_io.TFRecordWriter(
path=tempfile.mktemp(suffix='.tfrecords', prefix=FLAGS.records_prefix, dir=FLAGS.output_dir))
streams = parse_annotation_folder(annotation_folder)
for s in streams:
writer.write(s.serializeToTFSequenceExample().SerializeToString())
writer.close()
return len(streams)
def create_fixed_lengthed_tfrecords(annotation_folder, length=32):
writer = tf.python_io.TFRecordWriter(
path=tempfile.mktemp(suffix='.tfrecords', prefix=FLAGS.records_prefix, dir=FLAGS.output_dir))
streams = parse_annotation_folder(annotation_folder)
splitted_streams = []
for s in streams:
splitted_streams += s.splitIntoStreams(n=s.length//length + 1, l=length)
for s in splitted_streams:
writer.write(s.serializeToTFSequenceExample().SerializeToString())
writer.close()
return len(splitted_streams)
def main():
print('FLAGS:', FLAGS)
dataset = ILSVRC2015(FLAGS.dataset_dir)
snippet_ids = dataset.GetSnippetIDs(phase=PHASE.TRAIN)
## Using multiprocessing
# with Pool(8) as p:
# r = list(tqdm(
# p.imap(create_tfrecords, map(lambda i: os.path.join(dataset.annotations_dir, i), snippet_ids)),
# total=len(snippet_ids)
# ))
count = 0
t = tqdm(snippet_ids)
for id in t:
count += create_fixed_lengthed_tfrecords(os.path.join(dataset.annotations_dir, id))
t.set_description(desc='Total records {}'.format(count))
if __name__ == '__main__':
main()
| [
"zhouyz9608@gmail.com"
] | zhouyz9608@gmail.com |
304f5b58c3d48bcabde5d01bcb1635415e7c3590 | 9bdeffc12343cd5c5e7bf1f4cb8969c72d81c56b | /mpesa_api/urls.py | 70e4b9988c53b2601dbe91606de11fb2948a7016 | [] | no_license | johngaitho05/Mpesa-API-Python | 5fe90d60261e9913d6adfa6bc9fc3028fe6c79e5 | 49314ac3d37be297783a7c6da7a1875ece24e1d0 | refs/heads/master | 2022-02-08T07:44:46.910257 | 2022-01-31T11:05:30 | 2022-01-31T11:05:30 | 222,941,616 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 567 | py | from django.urls import path, include
from . import views
urlpatterns = [
path('access/token', views.getAccessToken, name='get_mpesa_access_token'),
path('online/lipa', views.lipa_na_mpesa_online, name='lipa_na_mpesa'),
# register, confirmation, validation and callback urls
path('c2b/register', views.register_urls, name="register_mpesa_validation"),
path('c2b/confirmation', views.confirmation, name="confirmation"),
path('c2b/validation', views.validation, name="validation"),
path('c2b/callback', views.call_back, name="call_back"),
] | [
"johngaitho05@gmail.com"
] | johngaitho05@gmail.com |
4f12e4bfc52ce3776f4accdf6925314eb1fa2ec4 | 36745ed5c7d84f83ac4e2cc417ef7b00fcd7b132 | /course_1/exercises/3/main.py | 115a7b4d110f89eada5ca3f19561f421650b4f07 | [] | no_license | UglukFearless/python-learning | 40a2220f5e0fd3b4d9317f0e09c37d8f0ee49ce6 | 1d577dd8989f2c51699f93adb9ae065a1da28b66 | refs/heads/main | 2023-03-21T12:30:31.396417 | 2021-03-15T18:47:31 | 2021-03-15T18:47:31 | 348,085,001 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 368 | py | n = int(input())
resolves = []
count = 0
while count < n:
count += 1
resolves.append(input().lower())
l = int(input())
count = 0
errors = []
while count < l:
count += 1
line = input().lower().split()
for word in line:
if word not in resolves and word not in errors:
errors.append(word)
for word in errors:
print(word) | [
"UglukFearless@mail.ru"
] | UglukFearless@mail.ru |
d5d9ed35adc9350ff4b125f70a2e9e14460d1024 | 35b734213c80e27fb54c0dae37ef975788251c42 | /portfolio/views.py | 85347c2959c675422338779601eb33233878c981 | [] | no_license | AKAWOLF13/BLOG | 136f593d6b6c4802fa63d0de8b0cd764faee1b15 | 403187554dcc9551fdadab4bd9c492f19dbebd7a | refs/heads/master | 2023-05-09T23:41:54.620237 | 2019-07-28T14:05:53 | 2019-07-28T14:05:53 | 192,912,918 | 0 | 0 | null | 2023-04-21T20:33:41 | 2019-06-20T12:05:51 | Python | UTF-8 | Python | false | false | 226 | py | from django.shortcuts import render
from .models import Portfolio
def portfolio(request):
portfolios = Portfolio.objects
return render(request, 'portfolio.html', {'portfolios': portfolios})
# Create your views here.
| [
"akawolf13@syuin.ac.kr"
] | akawolf13@syuin.ac.kr |
e375657984ca4c1db8762c48d302ebec2f49cf4e | 151c84658cb860e46493bc176d88c94c52a68910 | /flaskblog/routes.py | 53a83bd00441b33b7f82f93b37191a5c4fd53af4 | [] | no_license | Lam-Git/Flaskblog-master | a70e1f11f0d5a107750607a874e5b605dd5fa3d8 | d5d27998a1d90bcfc2c400be295c380bea72d5ac | refs/heads/master | 2023-03-09T02:58:32.105897 | 2021-02-27T01:57:15 | 2021-02-27T01:57:15 | 342,745,690 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,643 | py | import os
import secrets
from PIL import Image
from flask import render_template, url_for, flash, redirect, request, abort
from flaskblog import app, db, bcrypt
from flaskblog.form import RegistrationForm, LoginForm, UpdateAccountForm, PostForm
from flaskblog.models import User, Post
from flask_login import login_user, current_user, logout_user, login_required
@app.route("/")
@app.route("/home")
def home():
# this will allow only 5 post per-page
page = request.args.get("page", 1, type=int)
# this line help order the newest post on the top.
posts = Post.query.order_by(Post.date_posted.desc()).paginate(page=page, per_page=5)
return render_template("home.html", posts=posts)
@app.route("/about")
def about():
return render_template("about.html", title="About")
@app.route("/register", methods=["GET", "POST"])
def register():
if current_user.is_authenticated:
return redirect(url_for("home"))
form = RegistrationForm()
if form.validate_on_submit():
hashed_password = bcrypt.generate_password_hash(form.password.data).decode(
"utf-8"
)
user = User(
username=form.username.data, email=form.email.data, password=hashed_password
)
db.session.add(user)
db.session.commit()
flash("Your account has been created! You are now able to log in", "success")
return redirect(url_for("login"))
return render_template("register.html", title="Register", form=form)
@app.route("/login", methods=["GET", "POST"])
def login():
if current_user.is_authenticated:
return redirect(url_for("home"))
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user and bcrypt.check_password_hash(user.password, form.password.data):
login_user(user, remember=form.remember.data)
next_page = request.args.get("next")
return redirect(next_page) if next_page else redirect(url_for("home"))
else:
flash("Login Unsuccessful. Please check email and password", "danger")
return render_template("login.html", title="Login", form=form)
@app.route("/logout")
def logout():
logout_user()
return redirect(url_for("home"))
def save_picture(form_picture):
random_hex = secrets.token_hex(8)
_, f_ext = os.path.splitext(form_picture.filename)
picture_fn = random_hex + f_ext
picture_path = os.path.join(app.root_path, "static/profile_pics", picture_fn)
output_size = (125, 125)
i = Image.open(form_picture)
i.thumbnail(output_size)
i.save(picture_path)
return picture_fn
@app.route("/account", methods=["GET", "POST"])
@login_required
def account():
form = UpdateAccountForm()
if form.validate_on_submit():
if form.picture.data:
picture_file = save_picture(form.picture.data)
current_user.image_file = picture_file
current_user.username = form.username.data
current_user.email = form.email.data
db.session.commit()
flash("Your account has been updated!", "success")
return redirect(url_for("account"))
elif request.method == "GET":
form.username.data = current_user.username
form.email.data = current_user.email
image_file = url_for("static", filename="profile_pics/" + current_user.image_file)
return render_template(
"account.html", title="Account", image_file=image_file, form=form
)
@app.route("/post/new", methods=["GET", "POST"])
@login_required
def new_post():
form = PostForm()
if form.validate_on_submit():
post = Post(
title=form.title.data, content=form.content.data, author=current_user
)
db.session.add(post)
db.session.commit()
flash("Your post has been created!", "success")
return redirect(url_for("home"))
return render_template(
"create_post.html", title="New Post", form=form, legend="New Post"
)
@app.route("/post/<int:post_id>")
def post(post_id):
post = Post.query.get_or_404(post_id)
return render_template("post.html", title=post.title, post=post)
@app.route("/post/<int:post_id>/update", methods=["GET", "POST"])
@login_required
def update_post(post_id):
post = Post.query.get_or_404(post_id)
if post.author != current_user:
abort(403)
form = PostForm()
if form.validate_on_submit():
post.title = form.title.data
post.content = form.content.data
db.session.commit()
flash("Your post has been updated!", "success")
return redirect(url_for("post", post_id=post.id))
elif request.method == "GET":
form.title.data = post.title
form.content.data = post.content
return render_template(
"create_post.html", title="Update Post", form=form, legend="Update Post"
)
@app.route("/post/<int:post_id>/delete", methods=["POST"])
@login_required
def delete_post(post_id):
post = Post.query.get_or_404(post_id)
if post.author != current_user:
abort(403)
db.session.delete(post)
db.session.commit()
flash("Your post has been deleted!", "success")
return redirect(url_for("home"))
@app.route("/user/<string:username>")
def user_posts(username):
page = request.args.get("page", 1, type=int)
user = User.query.filter_by(username=username).first_or_404()
posts = (
Post.query.filter_by(author=user)
.order_by(Post.date_posted.desc())
.paginate(page=page, per_page=5)
)
return render_template("user_post.html", posts=posts, user=user)
| [
"lsnguyen0@gmail.com"
] | lsnguyen0@gmail.com |
580632b234aa4d793999e6c0fbdd8fc9d61542ea | 08c8807093f643cb8d5541852d4401016d2d32df | /app.py | 69759911398f930d7a46375540a6fd1ca8a3aebb | [] | no_license | JSOO17/Week1 | f4faa11636b343ba317fdb50edf0ac13f78655d3 | 89aaf6bbe099afd84fb8c28efa6a49ef41a754ec | refs/heads/master | 2022-12-25T21:24:00.185818 | 2020-09-29T13:20:01 | 2020-09-29T13:20:01 | 299,149,069 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,859 | py | import csv
def ignore_first(reader) -> list:
data = list(reader)
data.pop(0)
return data
def initialize_reader() -> list:
try:
with open('movie_metadata.csv', encoding="utf8") as f:
reader = ignore_first(csv.reader(f))
except UnicodeDecodeError:
print("could not fetching")
return reader
def field_count(field: int, filter: str):
"""
Filter into one column
;field: field to filter
;filter: phrase to filter
"""
reader = initialize_reader()
count = 0
for row in reader:
if(row[field] == filter):
count = count + 1
print(f"There are {count} {filter}")
def less_criticized():
"""
Get top 10 movies less criticized
"""
reader = initialize_reader()
movies_less = []
for row in reader:
if(row[2]):
movies_less.append({"name": row[11], "num_critic_for_users": int(row[2])})
new_list = sorted(movies_less, key=lambda i: i['num_critic_for_users'])
topTenList = new_list[:10]
top = 0
print("Top 10 Movies less criticized \n")
for movie in topTenList:
top = top + 1
print(f"Top {top} is {movie.get('name')} with {movie.get('num_critic_for_users')}")
def longest_duration():
"""
Get top 10 movies more duration
"""
reader = initialize_reader()
movies_longest = []
for row in reader:
if(row[3]):
movies_longest.append({"name": row[11], "duration": int(row[3])})
new_list = sorted(movies_longest, key=lambda i: i['duration'], reverse=True)
topTenList = new_list[:20]
top = 0
print("\nTop 20 Movies longest-running duration \n")
for movie in topTenList:
top = top + 1
print(f"Top {top} is {movie.get('name')} with {movie.get('duration')}")
def raised_more_money():
"""
raised more money
"""
reader = initialize_reader()
movies_raised = []
for row in reader:
if row[8]:
movies_raised.append({"name": row[11], "gross": int(row[8])})
new_list = sorted(movies_raised, key=lambda i: i['gross'], reverse=True)
topTenList = new_list[:5]
top = 0
print("\nTop 20 Movies raised more money \n")
for movie in topTenList:
top = top + 1
print(f"Top {top} is {movie.get('name')} with {movie.get('gross')}")
def least_money():
"""
least money
"""
reader = initialize_reader()
movies_least = []
for row in reader:
if row[8]:
movies_least.append({"name": row[11], "gross": int(row[8])})
new_list = sorted(movies_least, key=lambda i: i['gross'])
topTenList = new_list[:5]
top = 0
print("\nThe top 5 movies that made the least money \n")
for movie in topTenList:
top = top + 1
print(f"Top {top} is {movie.get('name')} with {movie.get('gross')}")
def expend_more_money():
"""
expend more money
"""
try:
with open('movie_metadata.csv', encoding="utf8") as f:
reader = ignore_first(csv.reader(f))
movies_expend = []
for row in reader:
if(row[22]):
movies_expend.append({ "name" : row[11], "budget" : int(row[22])})
new_list = sorted(movies_expend, key=lambda i: i['budget'], reverse=True)
topTenList = new_list[:3]
top = 0
print("\nTop 3 movies that expend more money to be produced \n")
for movie in topTenList:
top = top + 1
print("Top {0} is {1} with {2}".format(top, movie["name"], movie["budget"]))
except UnicodeDecodeError:
print("could not fetching")
def expend_less_money():
"""
expend more money
"""
reader = initialize_reader()
movies_expend = []
for row in reader:
if(row[22]):
movies_expend.append({"name": row[11], "budget": int(row[22])})
new_list = sorted(movies_expend, key=lambda i: i['budget'])
topTenList = new_list[:3]
top = 0
print("\nTop 3 movies that expend less money to be produced \n")
for movie in topTenList:
top = top + 1
print(f"Top {top} is {movie.get('name')} with {movie.get('budget')}")
def years_movies_released():
"""
What year was the one who had less and more movies released
"""
reader = initialize_reader()
years_list = [row[23] for row in reader]
years_dicts = [{"year": i, "movies_released": years_list.count(i)} for i in years_list]
new_list = sorted(years_dicts, key=lambda i: i['movies_released'])
year_less_movies = new_list[:1]
print(f"The year {year_less_movies[0].get('year')} had less movies released with {year_less_movies[0].get('movies_released')}")
new_list = sorted(years_dicts, key=lambda i: i['movies_released'], reverse=True)
year_more_movies = new_list[:1]
print(f"The year {year_more_movies[0].get('year')} had more movies released with {year_more_movies[0].get('movies_released')}")
def ranking_actors_performed():
"""
ranking actors Number of movies where the actor performed
"""
reader = initialize_reader()
names_list = [row[10] for row in reader]
names_for = list(names_list)
names = []
for name in names_for:
if {"name_actor": name, "movies_performed": names_for.count(name)} not in names:
names.append({"name_actor": name, "movies_performed": names_for.count(name)})
else:
names_for.remove(name)
new_list = sorted(names, key=lambda i: i['movies_performed'], reverse=True)
ranking_ten_list = new_list[:10]
rank = 0
print("\nRanking actors Number of movies where the actor performed \n")
for actor in ranking_ten_list:
rank = rank + 1
print(f"Rank {rank} is {actor.get('name_actor')} with {actor.get('movies_performed')}")
def ranking_actors_influence():
"""
ranking actors social Media influence
"""
reader = initialize_reader()
actor_list = [{"name_actor": row[10], "number_influence": int(row[7])} for row in reader]
actor_for = list(actor_list)
actors = []
for actor in actor_for:
if actor.get('name_actor') not in (list(x.get('name_actor') for x in actors)):
actors.append({"name_actor": actor.get('name_actor'), "number_influence": actor.get('number_influence')})
else:
actor_for.remove(actor)
new_list = sorted(actors, key=lambda i: i['number_influence'], reverse=True)
ranking_ten_list = new_list[:10]
rank = 0
print("\nRanking actors social Media influence \n")
for actor in ranking_ten_list:
rank = rank + 1
print(f"Rank {rank} is {actor.get('name_actor')} with {actor.get('number_influence')} followers")
def ranking_best_movie():
"""
ranking Best Movie
"""
reader = initialize_reader()
movie_list = [{"name_movie": row[11], "scored": float(row[25])} for row in reader]
new_list = sorted(movie_list, key=lambda i: i["scored"], reverse=True)
ranking_ten_list = new_list[:10]
rank = 0
print("\nRanking best movies \n")
for movie in ranking_ten_list:
rank = rank + 1
print(f"Rank {rank} is {movie.get('name_movie')} with {movie.get('scored')}")
def search_by_tags(tags: list):
"""
search by tags into names of movies
"""
reader = initialize_reader()
key_words = [{"movie": row[10], "key_words": row[16]} for row in reader]
words = []
for key_word in key_words:
for tag in tags:
key_words_iterable = key_word.get("key_words").split("|")
if tag in key_words_iterable:
if key_word not in words:
words.append(key_word)
ten_list = words[:10]
if ten_list:
rank = 0
text_tags = ", ".join(tags)
print(f"\n Results search by tags {text_tags} \n")
for movie in ten_list:
rank = rank + 1
print(movie.get("movie") + "\n")
else:
print("there aren´t results")
def genre_money(year: int, less: bool=True):
"""
What movie genre raised more money per year?
"""
reader = initialize_reader()
genres_dicts = []
for row in reader:
if(row[23]):
if(int(row[23]) == year):
if(row[8]):
genres = row[9].split("|")
for genre in genres:
if genre not in list(x.get('genre') for x in genres_dicts):
genres_dicts.append({"genre": genre, "gross": int(row[8])})
else:
for genre_dict in genres_dicts:
if genre_dict.get("genre") == genre:
genre_dict["gross"] = genre_dict.get("gross") + int(row[8])
if genres_dicts:
if less:
new_list = sorted(genres_dicts, key=lambda i: i["gross"])
print(f"\nThe genre raised less money in {year} is {new_list[0].get('genre')} with $ {new_list[0].get('gross')}\n")
else:
new_list = sorted(genres_dicts, key=lambda i: i["gross"], reverse=True)
print(f"\nThe genre raised more money in {year} is {new_list[0].get('genre')} with $ {new_list[0].get('gross')}\n")
def top_actors():
"""
Top five ranking of actors by performance and popularity
"""
reader = initialize_reader()
actor_list = [{"actor": row[10], "scored": (float(row[4]) + float(row[25])) / 2 } for row in reader if row[4] and row[25]]
actors = []
for actor in actor_list:
if actor.get('actor') not in list(x.get('actor') for x in actors):
actors.append({"actor": actor.get('actor'), "scored": actor.get('scored')})
else:
actor_list.remove(actor)
new_list = sorted(actors, key=lambda i: i['scored'], reverse=True)
top_five = new_list[:5]
if actors:
print(" \n Top 5 the best actors \n")
top = 0
for actor in top_five:
top = top + 1
print(f"Top {top} is {actor.get('actor')} with {actor.get('scored')} scored")
def genre_like_most():
"""
What movie genre does the public like most?
"""
reader = initialize_reader()
genres_dicts = []
for row in reader:
if(row[23]):
genres = row[9].split("|")
for genre in genres:
if genre not in list(x.get('genre') for x in genres_dicts):
genres_dicts.append({"genre": genre, "scored": float(row[25])})
else:
for genre_dict in genres_dicts:
if genre_dict.get("genre") == genre:
genre_dict["scored"] = genre_dict.get("scored") + float(row[25])
if genres_dicts:
new_list = sorted(genres_dicts, key=lambda i: i["scored"], reverse=True)
print(f"\n The movie genre that people like the most is {new_list[0].get('genre')} \n")
def top_reputation_directors():
"""
Which are the top five best reputation directors?
"""
reader = initialize_reader()
director_list = [{
"director": row[1],
"scored": (float(row[4]) + float(row[25])) / 2
} for row in reader if row[4] and row[25]]
directors = []
for director in director_list:
iterable = (list(x.get('director') for x in directors))
if director.get('director') not in iterable:
directors.append({
"director": director.get('director'),
"scored": director.get('scored')
})
else:
director_list.remove(director)
new_list = sorted(
directors,
key=lambda i: i['scored'],
reverse=True
)
top_five = new_list[:5]
if directors:
print(" \n Top 5 the best directors \n")
top = 0
for director in top_five:
top = top + 1
top_director = director.get("director")
top_scored = director.get("scored")
print(f"Top {top} is {top_director} with {top_scored} scored")
# field_count(field=0, filter=" Black and White")
# field_count(field=1, filter="Director")
# less_criticized()
# longest_duration()
# raised_more_money()
# least_money()
# expend_more_money()
# expend_less_money()
# years_movies_released()
# ranking_actors_performed()
# ranking_actors_influence()
# ranking_best_movie()
# search_by_tags(["future", "epic"])
# genre_money(2014)
# genre_money(2013, less=False)
# genre_like_most()
# top_reputation_directors()
# top_actors() | [
"jaider.osorio@imedicalservices.co"
] | jaider.osorio@imedicalservices.co |
ba1fdaa4fe519e9ca6cbd0e90ace37e7af0347cf | c6df642325e33901eecb3e315d774a1a8900d696 | /cms/migrations/0004_auto_20191025_2125.py | 32187f591a7bff555842feeff2fc2a9543b8e32b | [] | no_license | mousavihasans/humangene | 2dfcc1a7509d127540faca2aa21bdd3f66dba345 | f3977e66d4f452778eaa603eeb986a0560d0c4d0 | refs/heads/master | 2020-06-01T01:43:34.079361 | 2019-11-06T09:38:21 | 2019-11-06T09:38:21 | 190,582,334 | 0 | 0 | null | 2019-10-30T10:46:31 | 2019-06-06T13:02:11 | Python | UTF-8 | Python | false | false | 1,858 | py | # Generated by Django 2.2.1 on 2019-10-25 17:55
import ckeditor.fields
import cms.models
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import utils.intenum
class Migration(migrations.Migration):
dependencies = [
('cms', '0003_auto_20191020_2317'),
]
operations = [
migrations.CreateModel(
name='Content',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type', utils.intenum.IntEnumField(choices=[(0, 'page'), (1, 'news')], default=0, validators=[utils.intenum.IntEnumValidator(cms.models.ContentTypeChoices)])),
('title_fa', models.CharField(max_length=500)),
('title_en', models.CharField(default='', max_length=500)),
('text_fa', ckeditor.fields.RichTextField()),
('text_en', ckeditor.fields.RichTextField(default='')),
('image', models.ImageField(blank=True, null=True, upload_to='images/posts')),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(default=django.utils.timezone.now)),
('category', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='cms.Category')),
('tags', models.ManyToManyField(blank=True, to='cms.Tag')),
],
),
migrations.RemoveField(
model_name='page',
name='category',
),
migrations.RemoveField(
model_name='page',
name='tags',
),
migrations.DeleteModel(
name='News',
),
migrations.DeleteModel(
name='Page',
),
]
| [
"mousavi.sc11@gmail.com"
] | mousavi.sc11@gmail.com |
f75d028f814e2de0126aa8f1b6bc1c8684ada3e8 | 27015b0933608d256b1e3c66acad40707305a0aa | /dns task/constants.py | 0d8bf29defa8b3d39bb7bb3732722afcb0ac9546 | [] | no_license | olyakotelok/Protocols | ca354661413d67eca5b781408118358e19e25492 | 4b67ff7f3458d256af78d5ed6e163afaf816f26e | refs/heads/master | 2022-11-12T07:15:23.306451 | 2020-06-29T13:59:03 | 2020-06-29T13:59:03 | 273,216,163 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 165 | py | HOST = 'localhost'
PORT = 53
FORWARDER = 'ns1.e1.ru'
TYPES_DICT = {1: 'A', 2: 'NS', 28: 'AAAA', 12: 'PTR'}
REV_TYPES_DICT = {'A': 1, 'NS': 2, 'AAAA': 28, 'PTR': 12}
| [
"noreply@github.com"
] | noreply@github.com |
0af1b00428e976ba359b1a7ffb194f8eae839390 | be50b4dd0b5b8c3813b8c3158332b1154fe8fe62 | /StacksAndQueues/Python/NearestSmallerElements.py | 3d77893e6f926f45de256ee34a8b88f67e31f45a | [] | no_license | Zimmermann25/InterviewBit | a8d89e090068d9644e28085625963c8ce75d3dff | 6d2138e740bd5ba8eab992d9bf090977e077bfc5 | refs/heads/main | 2023-03-24T18:12:48.244950 | 2021-03-24T14:36:48 | 2021-03-24T14:36:48 | 350,835,917 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,238 | py | class Solution:
# @param A : list of integers
# @return a list of integers
def prevSmaller(self, A):
G = [-1] * len(A) # -1, bo to ułatwi trochę
curMin = A[0]
stack = []
for i in range(len(A)-1):
#print(stack)
if stack:
# dodawaj na stos tylko te elementy, które mogą powodować zmianę
if A[i] < A[i+1]:
'''for k in range(len(stack)):
if len(stack) and stack[-k-1] > A[i]:
stack.pop()'''
stack.append(A[i])
# znajdz w stosie pierwszy element spełniający ten warunek(mniejszy niz A[i])
for j in range(len(stack)):
if stack[-j-1] < A[i]:
G[i] = stack[-j-1]
break
else: stack.append(A[i])
#print("stack: ", stack)
# dla ostatniego elementu edge case
for j in range(len(stack)):
if stack[-j-1] < A[-1]:
G[-1] = stack[-j-1]
break
return G | [
"noreply@github.com"
] | noreply@github.com |
22ee2e1bbc4a99b2d9a06e52db94b5e02d44b2b0 | 1bae950b23b703affeda87792f198a1c94caff7d | /ptvs/basic/ch18pack/hello.py | 5aa89e6f636459ba6630c76906b540f7a402bc76 | [] | no_license | xwen586/python | 1f7b486bd7eb5c35f176f4ba0fb9213d1ffce523 | 3b2b77aad5494cd4f2a4853def566fa4413346f6 | refs/heads/master | 2020-04-12T09:19:11.533910 | 2019-02-14T13:18:02 | 2019-02-14T13:18:02 | 162,398,875 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 176 | py | class hello:
"""description of class"""
def say(self):
print("Hello Setup!")
if __name__ == '__main__':
print("Hello World!")
h = hello()
h.say()
| [
"xwen586@sohu.com"
] | xwen586@sohu.com |
9b4a4205e03cccfbdc33ac81bc959da4c660fb3b | 7e4ca815fa4776d41b2b46cdcada077149d72899 | /course4/week4/graph.py | bf67b3634a527b2d80808c968688486839d57ed2 | [] | no_license | kcollett1/Stanford_Algorithms | 1a95e0ec12737f50926c23aede08fb246f719935 | cdab3757ebb6c6a85ee4f9c630c00ad0b3fa24aa | refs/heads/master | 2022-04-21T05:55:55.988759 | 2020-04-20T14:57:53 | 2020-04-20T14:57:53 | 257,314,127 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,161 | py |
'''
this is my implementation of a DIRECTED graph as an adjacency list. vertices are added
to the graph from input containing the vertex num and a list of vertices connected to it.
also implemented is Kosaraju's 2 pass algorithm to compute the strongly connected
components (SCC) of a directed graph, using a depth-first-search strategy (iteratively
rather than recursively) twice on the reverse of the graph first, and then on the graph
itself, keeping track of key variables (namely, finishing time and leader) as we pass through.
'''
from stack import Stack
from queue import Queue
class Graph:
def __init__(self):
# dict of vertices, mapped to a list of sets of its outgoing/incoming edges
self.vertices = {}
# dict of edges, mapped to a list of the two endpoints of edge, in order of direction
self.edges = {} # edge #: [v1,v2]; i.e. {3:[3,2]} edge# 3 points FROM vert 3 TO vert 2
self.num_edges = 0
self.num_verts = 0
self.max_vert = 0 # track verts that exist on graph without incident edges
def __update_vert__(self, vert, ind):
'''Helper function to add_edge to add current edge number to vertex dict'''
if vert not in self.vertices:
self.num_verts += 1
if vert > self.max_vert:
self.max_vert = vert
self.vertices[vert] = [set(), set()]
self.vertices[vert][ind].add(self.num_edges)
def add_edge(self, vert1: int, vert2: int):
'''Add a new edge to the graph pointing from vert1 to vert2'''
# increment number of edges and add vertex pointers to this edge
self.num_edges += 1
self.edges[self.num_edges] = [vert1, vert2]
# add both vertices/edge# to vertex dict (and increment number of vertices if needed)
self.__update_vert__(vert1, 0)
self.__update_vert__(vert2, 1)
def add_vert(self, vert):
''' Add a vertex to the graph not connected to any edges '''
if vert not in self.vertices:
self.num_verts += 1
if vert > self.max_vert:
self.max_vert = vert
self.vertices[vert] = [set(), set()]
def BFS(self, start: int, forwards=True):
''' Breadth first search from start vertex. Can search reverse graph with forwards=False '''
# initialize all vertices as unexplored except for start vertex
explored = set()
explored.add(start)
# initialize queue to track next vertices to explore, enqueue start vertex
verts = Queue()
verts.enqueue(start)
# while queue is not empty, keep exploring vertices
while not verts.is_empty():
# dequeue next vertex and try to explore any incident edges it has
vert = verts.dequeue()
# go through all edges outgoing from this vertex
for edge in self.vertices[vert][0]:
# get vertex corresponding to this edge
# if going through G, current vert will be 1st; next_vert is in pos 1 (True)
# if going through G_rev, current vert will be 2nd; next_vert is in pos 0 (False)
next_vert = self.edges[edge][forwards]
# only interested in unexplored vertices
if next_vert in explored:
continue
# this is a vertex of interest, mark as explored and add to queue
explored.add(next_vert)
verts.enqeue(next_vert)
def DFS(self, start, forwards=True):
'''
Depth first search from start vertex, helper method for compute_scc. Can search reverse graph
with forwards=False. This DFS method uses an iterative search rather than a recursive search
as this is more memory efficient for large graphs, though tracking the finishing time bcomes
slightly more tricky. Instead of tracking just if a node is explored or not, we also need to
track a third status, "explored but not finished". This is particularly important in cases
where we take a vertex from the top of the stack, and see that all of it's neighbors have
already been explored - are all of it's neighbors actually finished being explored or are
they possibly still in the stack waiting to be assigned a finish time?
'''
global leaders, leader, finish_times, finish_time, explored
verts = Stack()
verts.push(start)
if forwards: # we only care about tracking leaders in forwards pass through graph
leaders[leader] = {start}
while not verts.is_empty():
vert = verts.top() # which vertex is currently first in the stack
if vert not in explored:
# haven't "explored" yet - add all neighbors to stack if they haven't been explored yet
# note here we may be double adding vertices to the stack, but when we get to it again
# we will check if it's already been explored and if so we mark it's finish time if needed
explored.add(vert)
for edge in self.vertices[vert][(int(forwards)+1)%2]:
next_vert = self.edges[edge][int(forwards)]
if next_vert not in explored:
if forwards: # we only care about tracking leaders in forwards pass
leaders[leader].add(next_vert)
verts.push(next_vert)
else:
# completely finished exploring this node, remove from stack, set finishing time if needed
# on first pass through, we set every nodes finish time, so on forward pass through graph
# we will never set any finishing times
verts.pop()
if vert not in finish_times:
finish_time += 1
finish_times[vert] = finish_time
def compute_scc(self):
'''
This function computes the strongly connected components of this graph using Kosarju's 2-pass
algorithm. Return the dict of each components vertices (each with an arbitrary leader as key).
'''
global leaders, leader, finish_times, finish_time, explored
leaders = {}
leader = 0
finish_times = {}
finish_time = 0
explored = set()
# DFS on reverse of graph first from all nodes until all have been explored
for vert in self.vertices:
if vert not in explored:
fin = self.DFS(start=vert, forwards=False)
# reset explored verts to all being unexplored initially
explored = set()
# DFS on original graph checking all verts from largest finish time to smallest
for vert in sorted([[t,v] for v,t in finish_times.items()], reverse=True):
if vert[1] not in explored:
leader = vert[1]
self.DFS(start=vert[1]) # passing through graph forwards, we will track leaders
# the SCC's are now contained in the leaders dict
return leaders
| [
"collettikatrina@gmail.com"
] | collettikatrina@gmail.com |
00f656ed86d27c9407dcdbf26a2b2ff4ab79cf4e | dec4e79d9ba5a8169915524be6977843129a8f00 | /super/migrations/0006_auto_20200617_0936.py | 0c53f8db79748f1beead560d872ad83ab218eac6 | [] | no_license | kabo-g/Olympic | a4e836e8ceb40ba31419954214282d7885ddc973 | 00aef8ac14ba1f6fb075a99e2412ef9928be2810 | refs/heads/master | 2022-11-11T05:18:50.774897 | 2020-07-04T10:40:42 | 2020-07-04T10:40:42 | 277,087,858 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 417 | py | # Generated by Django 3.0.6 on 2020-06-17 09:36
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('super', '0005_news'),
]
operations = [
migrations.RenameModel(
old_name='News',
new_name='Article',
),
migrations.AlterModelTable(
name='article',
table='Article',
),
]
| [
"kabogalefete@gmail.com"
] | kabogalefete@gmail.com |
a5af234b3d731319deefda3f9ed9ab9841d03b77 | f0cf255909a354f502758dc66caa325e91b674ec | /user/apps.py | 15183b14f75c608f267fdd435de61e7d7f812c1f | [] | no_license | lit-fatfish/Bsite | 77bffc074706f74bc4c339e5d40149e3252ff4aa | 6c1d360fc96015a8bb927ebfe082ebf7e0bd45c3 | refs/heads/master | 2021-04-22T15:46:23.497912 | 2020-06-20T09:10:43 | 2020-06-20T09:10:43 | 249,859,874 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 202 | py | from django.apps import AppConfig
class UserConfig(AppConfig):
name = 'user'
verbose_name = '用户'
def ready(self):
super(UserConfig, self).ready()
from . import signal
| [
"416983839@qq.com"
] | 416983839@qq.com |
9ac8bef25fad412e65b90347792d00761570953c | e6ca4ea74d194bb7cfe1d026dcf033cbfa8300a8 | /62. 不同路径.py | 52c9d42dd43899db244f0fe33aa4c7b197455363 | [] | no_license | Lebhoryi/Leetcode123 | cb3a2dce4d1784859e1973b8486a2c055beaa9bc | b68616ad10a1f839f4f760d15b14b864d5d059e2 | refs/heads/master | 2021-08-27T20:43:02.638060 | 2021-08-21T01:22:01 | 2021-08-21T01:22:01 | 172,526,336 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 648 | py | # coding=utf-8
'''
@ Summary: dp, dp[i][j] = dp[i-1][j] + dp[i][j-1]
@ Update:
@ file: 62. 不同路径.py
@ version: 1.0.0
@ Author: Lebhoryi@gmail.com
@ Date: 2/21/20 10:03 PM
'''
def uniquePaths(m: int, n: int) -> int:
# # 空间复杂度 n^2
# dp = [[0] * n for _ in range(m)]
# for i in range(m):
# for j in range(n):
# dp[i][j] = 1 if i == 0 or j == 0 else dp[i-1][j] + dp[i][j-1]
# return dp[-1][-1]
# 优化 空间复杂度
dp = [1] * n
for i in range(1, m):
for j in range(1, n):
dp[j] = dp[j-1] + dp[j]
return dp[-1]
m, n = 3, 3
print(uniquePaths(m, n)) | [
"Lebhoryi@gmail.com"
] | Lebhoryi@gmail.com |
335d06fb4639a2da5946b9c9ca879fe53921aca5 | 0ae534591bdc621ff7f506de4b4b30de9103fb9d | /src/archive/stationonecodes.py | 2621815c58c1c75273ddf83f4deca259672fdd6f | [] | no_license | HoratioLi/foslATE | a649f0be6dd41c93cdfab9d7d3a00bb95ea76723 | f3ab674ec63bbde5bf98f81a68e6bf8ae7429df3 | refs/heads/master | 2020-07-01T19:19:10.271412 | 2016-11-21T08:42:50 | 2016-11-21T08:42:50 | 74,263,953 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 371 | py | '''
Created on 13-08-2012
@author: Michael Akilian
'''
#=========================
# STATION 1 FAIL CODES
#=========================
UNLOCK_EFM_FAIL = 0
PROGRAM_CC_FAIL = 1
PROGRAM_EFM_FAIL = 2
MCU_CURRENT_FAIL = 3
LED_SINGLE_FAIL = 4
LED_SIX_FAIL = 5
SELF_ACCEL_FAIL = 6
LOW_POWER_FAIL = 7
FINAL_FLASH_FAIL = 8
RSSI_FAIL = 9
BT_TIMEOUT_FAIL = 10
RSSI_INVALID_FAIL = 11
| [
"horatioli@fossil.com"
] | horatioli@fossil.com |
d6eca7216853fee034424f9f3e1ab9aa40824df7 | 016d5aad5434feb5ecbf601d7464e7bf85ca2bbf | /civicconnect/migrations/0001_initial.py | 0c80f1654114159aa40399487906decb01297363 | [] | no_license | neilmenon/civicconnect | 3dc306c7955baa8ad7b054163ebb513c0b4659e1 | 754047ea64ede3741acd6aaefa69df0da6d36fe2 | refs/heads/master | 2023-01-20T22:08:21.041473 | 2020-11-22T05:03:45 | 2020-11-22T05:03:45 | 317,316,454 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,820 | py | # Generated by Django 3.1.1 on 2020-10-18 22:55
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Topic',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('description', models.TextField()),
('created_by', models.TextField()),
],
),
migrations.CreateModel(
name='Template',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.TextField()),
('body', models.CharField(max_length=200)),
('affiliation', models.IntegerField(choices=[(1, 'Bipartisan'), (2, 'Left-Wing'), (3, 'Right-Wing'), (4, 'Moderate')], default=1)),
('created_by', models.TextField()),
('pub_date', models.DateTimeField(verbose_name='date published')),
('topic', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='civicconnect.topic')),
],
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('body', models.TextField(max_length=10000)),
('question', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='civicconnect.template')),
],
),
]
| [
"nkm5gz@virginia.edu"
] | nkm5gz@virginia.edu |
e590827103ae0c0f08215f39a83b5736da9f1605 | 421a8178ed92dfbdfdc7ea6d997524a273e8fba5 | /Week1Day2/Text Alignment.py | f912828feb4f2d1631a7518f3c92d24d92430eeb | [] | no_license | farhan1ahmed/Training | 40505b4fd0b402a4bb0618f8c52ed2c08f2aee72 | f7810e667a9625a692662b73dfba132513b2bc54 | refs/heads/master | 2022-12-11T14:39:31.706109 | 2020-06-07T19:09:27 | 2020-06-07T19:09:27 | 232,877,730 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 783 | py | #Replace all ______ with rjust, ljust or center.
thickness = int(input()) #This must be an odd number
c = 'H'
#Top Cone
for i in range(thickness):
print((c*i).rjust(thickness-1)+c+(c*i).ljust(thickness-1))
#Top Pillars
for i in range(thickness+1):
print((c*thickness).rjust(thickness*2-(thickness+1)//2)+(c*thickness).center(thickness*6))
#Middle Belt
for i in range((thickness+1)//2):
print ((c*thickness*5).center(thickness*6))
#Bottom Pillars
for i in range(thickness+1):
print ((c*thickness).rjust(thickness*2-(thickness+1)//2)+(c*thickness).center(thickness*6))
#Bottom Cone
for i in range(thickness):
print(((c * (thickness - i - 1)).rjust(thickness) + c + (c * (thickness - i - 1)).ljust(thickness)).rjust(
thickness * 6-(thickness+1)//2))
| [
"farhan1ahmed@hotmail.com"
] | farhan1ahmed@hotmail.com |
ff31f02e03ea22e1c5f6a1b9ee0b90c7c5af7b98 | 16ff1b0128506cc5cd5a5f85d5539ec486824f77 | /scripts/alembic_freeze.py | 8e8b9e80ed645d3b3b831874b01d11f8dc194622 | [
"MIT"
] | permissive | code-watch/meltano | ed7a2fc1789ba1b7eacd567d881f9fc6ba0559db | 2afff73ed43669b5134dacfce61814f7f4e77a13 | refs/heads/master | 2022-07-29T13:17:44.019800 | 2020-06-30T23:44:12 | 2020-06-30T23:44:12 | 276,368,442 | 0 | 1 | MIT | 2020-07-01T12:48:23 | 2020-07-01T12:12:24 | Python | UTF-8 | Python | false | false | 335 | py | #!/usr/bin/env python3
from pathlib import Path
from alembic.script import ScriptDirectory
from meltano.migrations import MIGRATION_DIR, LOCK_PATH
scripts = ScriptDirectory(str(MIGRATION_DIR))
with LOCK_PATH.open("w") as lock:
HEAD = scripts.get_current_head()
lock.write(HEAD)
print(f"Meltano database frozen at {HEAD}.")
| [
"mbergeron@gitlab.com"
] | mbergeron@gitlab.com |
4bc55c6b685bc80a4aae415b71b7fb0645f10a5a | 380a47268c5975473a2e7c38c747bc3bdbd981b1 | /benchmark/third_party/transformers/examples/research_projects/adversarial/utils_hans.py | e54792ad2f82b91a560f56cdc19020ad25b1b2c2 | [
"Apache-2.0"
] | permissive | FMInference/FlexGen | 07aa9b1918c19b02077e13ad07e76840843810dd | d34f7b4b43ed87a374f394b0535ed685af66197b | refs/heads/main | 2023-07-24T02:29:51.179817 | 2023-07-21T22:38:31 | 2023-07-21T22:38:31 | 602,270,517 | 6,821 | 411 | Apache-2.0 | 2023-07-07T22:59:24 | 2023-02-15T21:18:53 | Python | UTF-8 | Python | false | false | 11,761 | py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
logger = logging.getLogger(__name__)
@dataclass(frozen=True)
class InputExample:
"""
A single training/test example for simple sequence classification.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
pairID: (Optional) string. Unique identifier for the pair of sentences.
"""
guid: str
text_a: str
text_b: Optional[str] = None
label: Optional[str] = None
pairID: Optional[str] = None
@dataclass(frozen=True)
class InputFeatures:
"""
A single set of features of data.
Property names are the same names as the corresponding inputs to a model.
Args:
input_ids: Indices of input sequence tokens in the vocabulary.
attention_mask: Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
Usually ``1`` for tokens that are NOT MASKED, ``0`` for MASKED (padded) tokens.
token_type_ids: (Optional) Segment token indices to indicate first and second
portions of the inputs. Only some models use them.
label: (Optional) Label corresponding to the input. Int for classification problems,
float for regression problems.
pairID: (Optional) Unique identifier for the pair of sentences.
"""
input_ids: List[int]
attention_mask: Optional[List[int]] = None
token_type_ids: Optional[List[int]] = None
label: Optional[Union[int, float]] = None
pairID: Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class HansDataset(Dataset):
"""
This will be superseded by a framework-agnostic approach
soon.
"""
features: List[InputFeatures]
def __init__(
self,
data_dir: str,
tokenizer: PreTrainedTokenizer,
task: str,
max_seq_length: Optional[int] = None,
overwrite_cache=False,
evaluate: bool = False,
):
processor = hans_processors[task]()
cached_features_file = os.path.join(
data_dir,
"cached_{}_{}_{}_{}".format(
"dev" if evaluate else "train",
tokenizer.__class__.__name__,
str(max_seq_length),
task,
),
)
label_list = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
label_list[1], label_list[2] = label_list[2], label_list[1]
self.label_list = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lock_path = cached_features_file + ".lock"
with FileLock(lock_path):
if os.path.exists(cached_features_file) and not overwrite_cache:
logger.info(f"Loading features from cached file {cached_features_file}")
self.features = torch.load(cached_features_file)
else:
logger.info(f"Creating features from dataset file at {data_dir}")
examples = (
processor.get_dev_examples(data_dir) if evaluate else processor.get_train_examples(data_dir)
)
logger.info("Training examples: %s", len(examples))
self.features = hans_convert_examples_to_features(examples, label_list, max_seq_length, tokenizer)
logger.info("Saving features into cached file %s", cached_features_file)
torch.save(self.features, cached_features_file)
def __len__(self):
return len(self.features)
def __getitem__(self, i) -> InputFeatures:
return self.features[i]
def get_labels(self):
return self.label_list
if is_tf_available():
import tensorflow as tf
class TFHansDataset:
"""
This will be superseded by a framework-agnostic approach
soon.
"""
features: List[InputFeatures]
def __init__(
self,
data_dir: str,
tokenizer: PreTrainedTokenizer,
task: str,
max_seq_length: Optional[int] = 128,
overwrite_cache=False,
evaluate: bool = False,
):
processor = hans_processors[task]()
label_list = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
label_list[1], label_list[2] = label_list[2], label_list[1]
self.label_list = label_list
examples = processor.get_dev_examples(data_dir) if evaluate else processor.get_train_examples(data_dir)
self.features = hans_convert_examples_to_features(examples, label_list, max_seq_length, tokenizer)
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features), desc="convert examples to features"):
if ex_index % 10000 == 0:
logger.info("Writing example %d of %d" % (ex_index, len(examples)))
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
self.dataset = tf.data.Dataset.from_generator(
gen,
(
{
"example_id": tf.int32,
"input_ids": tf.int32,
"attention_mask": tf.int32,
"token_type_ids": tf.int32,
},
tf.int64,
),
(
{
"example_id": tf.TensorShape([]),
"input_ids": tf.TensorShape([None, None]),
"attention_mask": tf.TensorShape([None, None]),
"token_type_ids": tf.TensorShape([None, None]),
},
tf.TensorShape([]),
),
)
def get_dataset(self):
return self.dataset
def __len__(self):
return len(self.features)
def __getitem__(self, i) -> InputFeatures:
return self.features[i]
def get_labels(self):
return self.label_list
class HansProcessor(DataProcessor):
"""Processor for the HANS data set."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "heuristics_train_set.txt")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "heuristics_evaluation_set.txt")), "dev")
def get_labels(self):
"""See base class.
Note that we follow the standard three labels for MNLI
(see :class:`~transformers.data.processors.utils.MnliProcessor`)
but the HANS evaluation groups `contradiction` and `neutral` into `non-entailment` (label 0) while
`entailment` is label 1."""
return ["contradiction", "entailment", "neutral"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for i, line in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, line[0])
text_a = line[5]
text_b = line[6]
pairID = line[7][2:] if line[7].startswith("ex") else line[7]
label = line[0]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label, pairID=pairID))
return examples
def hans_convert_examples_to_features(
examples: List[InputExample],
label_list: List[str],
max_length: int,
tokenizer: PreTrainedTokenizer,
):
"""
Loads a data file into a list of ``InputFeatures``
Args:
examples: List of ``InputExamples`` containing the examples.
label_list: List of labels. Can be obtained from the processor using the ``processor.get_labels()`` method.
max_length: Maximum example length.
tokenizer: Instance of a tokenizer that will tokenize the examples.
Returns:
A list of task-specific ``InputFeatures`` which can be fed to the model.
"""
label_map = {label: i for i, label in enumerate(label_list)}
features = []
for ex_index, example in tqdm.tqdm(enumerate(examples), desc="convert examples to features"):
if ex_index % 10000 == 0:
logger.info("Writing example %d" % (ex_index))
inputs = tokenizer(
example.text_a,
example.text_b,
add_special_tokens=True,
max_length=max_length,
padding="max_length",
truncation=True,
return_overflowing_tokens=True,
)
label = label_map[example.label] if example.label in label_map else 0
pairID = int(example.pairID)
features.append(InputFeatures(**inputs, label=label, pairID=pairID))
for i, example in enumerate(examples[:5]):
logger.info("*** Example ***")
logger.info(f"guid: {example}")
logger.info(f"features: {features[i]}")
return features
hans_tasks_num_labels = {
"hans": 3,
}
hans_processors = {
"hans": HansProcessor,
}
| [
"sqy1415@gmail.com"
] | sqy1415@gmail.com |
bcdd85a3ed8af68cb0db7988caed6866557c1a53 | aaec91eb381f7a565457644ae3decf394f01c5ed | /pubwork/demo/learner/demo_webdriver.py | 0d3a8b4a14febaff89051cba92b80bb3a6acfba7 | [] | no_license | plutoese/pubwork | c5dd6f908599dcb05ae5b50435a34d269391ff61 | 0649e0ab098c1f7099d98b7fd981425b24fddf3e | refs/heads/master | 2021-01-12T01:07:03.096016 | 2017-12-24T05:05:19 | 2017-12-24T05:05:19 | 78,340,295 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 87 | py | import webbrowser
webbrowser.open('http://docs.python.org/lib/module-webbrowser.html') | [
"glen.zhang7@gmail.com"
] | glen.zhang7@gmail.com |
413da12fae267071b1dbfa9ca3c2fc0495b14b9e | 65d844f57c93b97459ba58a7f8d31fcddaef2c57 | /example/train_multi_task.py | 8ca016de299fe5216290169295f8f39fb5af5857 | [
"Apache-2.0"
] | permissive | tonywenuon/keras_dialogue_generation_toolkit | 797411838e8213422cce4f5ac94f4e98e56cc912 | 75d82e7a281cd17a70bd9905fcebf2b906a6deec | refs/heads/master | 2023-04-04T14:39:10.117825 | 2021-03-29T11:25:23 | 2021-03-29T11:25:23 | 215,433,666 | 24 | 2 | null | 2023-03-24T22:45:14 | 2019-10-16T01:55:53 | Python | UTF-8 | Python | false | false | 16,211 | py | import os, sys, time, math
project_path = os.path.sep.join(os.path.abspath(__file__).split(os.path.sep)[:-2])
if project_path not in sys.path:
sys.path.append(project_path)
import tensorflow as tf
import keras
import argparse
import numpy as np
from copy import deepcopy
from keras.callbacks import ModelCheckpoint, EarlyStopping, LearningRateScheduler, ReduceLROnPlateau
from keras.utils import plot_model
from keras.models import load_model
from keras.utils import get_custom_objects
from models.multi_task import MultiTaskModel
from commonly_used_code.helper_fn import Hypothesis
from commonly_used_code import helper_fn, config
from run_script.args_parser import multi_task_add_arguments
from vspgt_data_reader import DataSet
import keras.backend.tensorflow_backend as KTF
#KTF.set_session(tf.Session(config=tf.ConfigProto(device_count={'cpu':0})))
os.environ["CUDA_VISIBLE_DEVICES"] = "3"
class MultiTask:
def __init__(self, args):
# real Transformer model architecture
self.multi_task_model= MultiTaskModel(args=args)
self.args = args
exp_name = args.data_set + '_' + args.exp_name
# create experiment dir
self.exp_dir= os.path.join(args.checkpoints_dir, exp_name)
helper_fn.makedirs(self.exp_dir)
hist_name = exp_name + '.hist'
model_name = exp_name + '_final_model.h5'
self.history_path = os.path.join(self.exp_dir, hist_name)
self.model_path = os.path.join(self.exp_dir, model_name)
outputs_dir = args.outputs_dir
helper_fn.makedirs(outputs_dir)
self.src_out_name = exp_name + '.src'
self.src_out_path = os.path.join(outputs_dir, self.src_out_name)
self.pred_out_name = exp_name + '.pred'
self.pred_out_path = os.path.join(outputs_dir, self.pred_out_name)
self.tar_out_name = exp_name + '.tgt'
self.tar_out_path = os.path.join(outputs_dir, self.tar_out_name)
def train(self):
ds = DataSet(self.args)
print('*' * 100)
print('train sample number: ', ds.train_sample_num)
print('valid sample number: ', ds.valid_sample_num)
print('test sample number: ', ds.test_sample_num)
print('*' * 100)
train_generator = ds.data_generator('train', 'multi_task')
valid_generator = ds.data_generator('valid', 'multi_task')
def compile_new_model():
_model = self.multi_task_model.get_model()
_model.compile(
optimizer=keras.optimizers.Adam(lr=self.args.lr),
loss = {
'od1': 'sparse_categorical_crossentropy',
'od2': 'sparse_categorical_crossentropy',
'od3': 'sparse_categorical_crossentropy',
},
loss_weights={
'od1': 1.,
'od2': 1.,
'od3': 1.,
}
)
return _model
if os.path.exists(self.model_path):
raise ValueError('Current model just saves weights. Please re-train the model.')
#print('Loading model from: %s' % self.model_path)
#custom_dict = get_custom_objects()
#model = load_model(self.model_path, custom_objects=custom_dict)
else:
print('Compile new model...')
model = compile_new_model()
model.summary()
#plot_model(model, to_file='model_structure.png',show_shapes=True)
verbose = 1
earlystopper = EarlyStopping(monitor='val_loss', patience=self.args.early_stop_patience, verbose=verbose)
ckpt_name = 'model-ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5'
ckpt_path = os.path.join(self.exp_dir, ckpt_name)
#checkpoint = ModelCheckpoint(ckpt_path, monitor='val_loss', verbose=verbose, save_weights_only=True, save_best_only=True, mode='min')
checkpoint = ModelCheckpoint(ckpt_path, monitor='val_loss', verbose=verbose, save_best_only=True, mode='min')
lrate = keras.callbacks.ReduceLROnPlateau(
monitor='val_loss',
factor=0.5,
patience=self.args.lr_decay_patience,
verbose=verbose,
mode='auto',
min_delta=0.0001,
cooldown=0,
min_lr=self.args.lr_min,
)
callback_list = [earlystopper, checkpoint, lrate]
#callback_list = [earlystopper, lrate]
hist = model.fit_generator(
generator=train_generator,
steps_per_epoch=(ds.train_sample_num//self.args.batch_size),
epochs=self.args.epochs,
callbacks=callback_list,
validation_data=valid_generator,
validation_steps=(ds.valid_sample_num//self.args.batch_size),
)
with open(self.history_path,'w') as f:
f.write(str(hist.history))
#model.save(self.model_path)
model.save_weights(self.model_path)
#plot_model(model, to_file='model_structure.png',show_shapes=True)
def test(self):
ds = DataSet(args)
test_generator = ds.data_generator('test', 'multi_task')
def compile_new_model():
_model = self.multi_task_model.get_model()
_model.compile(
optimizer=keras.optimizers.Adam(lr=self.args.lr),
loss = {
'od1': 'sparse_categorical_crossentropy',
'od2': 'sparse_categorical_crossentropy',
'od3': 'sparse_categorical_crossentropy',
},
loss_weights={
'od1': 1.,
'od2': 1.,
'od3': 1.,
}
)
return _model
# load_model
print('Loading model from: %s' % self.model_path)
#custom_dict = get_custom_objects()
#model = load_model(self.model_path, custom_objects=custom_dict)
model = compile_new_model()
model.load_weights(self.model_path)
src_outobj = open(self.src_out_path, 'w')
pred_outobj = open(self.pred_out_path, 'w')
tar_outobj = open(self.tar_out_path, 'w')
for batch_index, ([src_input, tar_input, fact_tar_input, facts_input], \
[_, _, _]) in enumerate(test_generator):
if batch_index > (ds.test_sample_num // self.args.batch_size):
# finish all of the prediction
break
print('Current batch: {}/{}. '.format(batch_index, ds.test_sample_num // self.args.batch_size))
cur_batch_size = tar_input.shape[0]
tar_length = tar_input.shape[1]
results = np.zeros_like(tar_input)
results[:, 0] = ds.start_id
for i in range(1, tar_length):
results[:, i] = ds.pad_id
for t in range(1, tar_length):
preds, _, _ = model.predict([src_input, np.asarray(results), fact_tar_input, facts_input])
pred_id = np.argmax(preds, axis=-1)
results[:, t] = np.asarray(pred_id[:, t-1])
def output_results(tag, outputs, outobj):
for out_index, result in enumerate(outputs):
seq = []
for _id in result:
_id = int(_id)
if _id == ds.end_id:
break
if _id != ds.pad_id and _id != ds.start_id:
token = ds.tar_id_tokens.get(_id, config.UNK_TOKEN)
seq.append(token)
write_line = ' '.join(seq)
write_line = write_line + '\n'
outobj.write(write_line)
outobj.flush()
output_results('result', results, pred_outobj)
output_results('src', src_input, src_outobj)
output_results('tar', tar_input, tar_outobj)
src_outobj.close()
pred_outobj.close()
tar_outobj.close()
print(self.pred_out_path)
def beam_search_test(self):
beam_size = self.args.beam_size
ds = DataSet(args)
test_generator = ds.data_generator('test', 'multi_task')
def sort_for_each_hyp(hyps, rank_index):
"""Return a list of Hypothesis objects, sorted by descending average log probability"""
return sorted(hyps, key=lambda h: h.avg_prob[rank_index], reverse=True)
def get_new_hyps(all_hyps):
hyp = all_hyps[0]
batch_size = hyp.batch_size
tar_len = hyp.tar_len
new_hyps = []
for i in range(beam_size):
hyp = Hypothesis(batch_size, tar_length, ds.start_id, ds.end_id)
new_hyps.append(hyp)
for i in range(batch_size):
# rank based on each sample's probs
sorted_hyps = sort_for_each_hyp(all_hyps, i)
for j in range(beam_size):
hyp = sorted_hyps[j]
new_hyps[j].res_ids[i] = hyp.res_ids[i]
new_hyps[j].pred_ids[i] = hyp.pred_ids[i]
new_hyps[j].probs[i] = hyp.probs[i]
return new_hyps
def update_hyps(all_hyps):
# all_hyps: beam_size * beam_size current step hyps.
new_hyps = get_new_hyps(all_hyps)
return new_hyps
def get_final_results(hyps):
hyp = hyps[0]
batch_size = hyp.batch_size
tar_len = hyp.tar_len
final_hyp = Hypothesis(batch_size, tar_length, ds.start_id, ds.end_id)
for i in range(batch_size):
# rank based on each sample's probs
sorted_hyps = sort_for_each_hyp(hyps, i)
hyp = sorted_hyps[0]
final_hyp.res_ids[i] = hyp.res_ids[i]
final_hyp.pred_ids[i] = hyp.pred_ids[i]
final_hyp.probs[i] = hyp.probs[i]
res = np.asarray(final_hyp.res_ids)
return res
# load_model
def compile_new_model():
_model = self.multi_task_model.get_model()
_model.compile(
optimizer=keras.optimizers.Adam(lr=self.args.lr),
loss = {
'od1': 'sparse_categorical_crossentropy',
'od2': 'sparse_categorical_crossentropy',
'od3': 'sparse_categorical_crossentropy',
},
loss_weights={
'od1': 1.,
'od2': 1.,
'od3': 1.,
}
)
return _model
# load_model
print('Loading model from: %s' % self.model_path)
#custom_dict = get_custom_objects()
#model = load_model(self.model_path, custom_objects=custom_dict)
model = compile_new_model()
model.load_weights(self.model_path)
src_outobj = open(self.src_out_path, 'w')
pred_outobj = open(self.pred_out_path, 'w')
tar_outobj = open(self.tar_out_path, 'w')
for batch_index, ([src_input, tar_input, fact_tar_input, facts_input], \
[_, _, _]) in enumerate(test_generator):
if batch_index > (ds.test_sample_num // self.args.batch_size):
# finish all of the prediction
break
print('Current batch: {}/{}. '.format(batch_index, ds.test_sample_num // self.args.batch_size))
cur_batch_size = tar_input.shape[0]
tar_length = tar_input.shape[1]
hyps = []
for i in range(beam_size):
hyp = Hypothesis(cur_batch_size, tar_length, ds.start_id, ds.end_id)
hyps.append(hyp)
for t in range(1, tar_length):
# iterate each sample
# collect all hyps, basically, it's beam_size * beam_size
all_hyps = []
for i in range(beam_size):
cur_hyp = hyps[i]
results = cur_hyp.get_predictable_vars(ds.pad_id)
# bs, tar_len, 60000
preds, _, _ = model.predict([src_input, np.asarray(results), fact_tar_input, facts_input])
# get the current step prediction
cur_preds = preds[:, t - 1]
top_indices = np.argsort(cur_preds)
top_indices = top_indices[:, -beam_size:] # the largest one is at the end
top_logits = []
for sample_index, sample_logits in enumerate(cur_preds):
logits = []
for beam_index in range(beam_size):
logit = sample_logits[top_indices[sample_index][beam_index]]
logits.append(logit)
top_logits.append(logits)
top_logits = np.asarray(top_logits)
#print('top_logits: ', top_logits[0])
# iterate each new prediction
for j in range(beam_size-1, -1, -1):
next_hyp = deepcopy(cur_hyp)
# bs, 1
top_index = top_indices[:, j]
top_logit = top_logits[:, j]
for bs_idx, _id in enumerate(top_index):
next_hyp.res_ids[bs_idx].append(_id)
prob = top_logit[bs_idx]
next_hyp.probs[bs_idx].append(prob)
# get OOV id
token = ds.tar_id_tokens.get(int(_id), config.UNK_TOKEN)
if token == config.UNK_TOKEN:
cur_pred_id = ds.unk_id
else:
cur_pred_id = _id
next_hyp.pred_ids[bs_idx].append(cur_pred_id)
all_hyps.append(next_hyp)
# if it is the first step, only predict once
if t == 1:
break
hyps = update_hyps(all_hyps)
final_results = get_final_results(hyps)
def output_results(outputs, outobj):
for result in outputs:
seq = []
for _id in result:
_id = int(_id)
if _id == ds.end_id:
break
if _id != ds.pad_id and _id != ds.start_id:
#if _id != ds.pad_id:
seq.append(ds.tar_id_tokens.get(_id, config.UNK_TOKEN))
write_line = ' '.join(seq)
write_line = write_line + '\n'
outobj.write(write_line)
outobj.flush()
output_results(results, pred_outobj)
output_results(src_input, src_outobj)
output_results(tar_input, tar_outobj)
src_outobj.close()
pred_outobj.close()
tar_outobj.close()
print(self.pred_out_path)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
multi_task_add_arguments(parser)
args = parser.parse_args()
print(args)
trans = MultiTask(args)
#trans.train()
trans.test()
# trans.beam_search_test()
| [
"you@example.com"
] | you@example.com |
09e6bc03c3a701ff1bfbec140086deac1a4a854e | 535651edc45f41dd4c57539ed6f76f26856e12f3 | /Total Code/MC Simulation Code/MonteCarloControl.py | 5d74fef480b2cfab6fddd72de67be14a3d2c2631 | [] | no_license | Dedwards841/PPTSimulationAndViewer | e154c914092dc33a5987bf2de76dbc5d603324fd | 4e6242c41a52e22633cb5c0b46fd957afc64ca9a | refs/heads/master | 2020-05-14T06:50:29.177224 | 2019-04-16T18:55:21 | 2019-04-16T18:55:21 | 181,708,528 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,439 | py | import numpy as np
import subprocess
import scipy
import matplotlib.pyplot as plt
import scipy.misc as mpimg
import matplotlib.colors as colours
import matplotlib.patches as mpatches
import os
def getDat(file, wavelength, start):
readin = open(file)
lines = readin.readlines()[start:]
toReturn = 0.0
try:
for line in lines:
data = line.split()
for i in range(len(data)):
if(float(data[i])==wavelength):
toReturn = float(data[i+1])
finally:
readin.close()
return toReturn
#Required to get the most usable value for Water absorptionCoEff as above 800 it is given every 5-10 nm
def roundWater(waveIn):
if(waveIn < 800):
waveOut=waveIn+1
elif(waveIn>=800.0 and waveIn<805.0):
waveOut=800
elif(waveIn>=805.0 and waveIn<815.0):
waveOut=810
elif(waveIn>=815.0 and waveIn<822.5):
waveOut=820
elif(waveIn>=822.5 and waveIn<827.5):
waveOut=825
elif(waveIn>827.5 and waveIn<835.0):
waveOut=830
elif(waveIn>=835.0 and waveIn<845.0):
waveOut=840
elif(waveIn>=845.0 and waveIn<855.0):
waveOut=850
elif(waveIn>=855.0 and waveIn<865.0):
waveOut=860
elif(waveIn>=865.0 and waveIn<872.5):
waveOut=870
elif(waveIn>=855.0 and waveIn<877.5):
waveOut=875
elif(waveIn>=877.5 and waveIn<885.0):
waveOut=880
elif(waveIn>=885.0 and waveIn<895.0):
waveOut=890
elif(waveIn>=895.0 and waveIn<905.0):
waveOut=900
elif(waveIn>=905.0 and waveIn<915.0):
waveOut=910
elif(waveIn>=915.0 and waveIn<922.5):
waveOut=920
elif(waveIn>=922.5 and waveIn<927.5):
waveOut=925
elif(waveIn>=927.5 and waveIn<935.0):
waveOut=930
elif(waveIn>=935.0 and waveIn<945.0):
waveOut=940
elif(waveIn>=945.0 and waveIn<955.0):
waveOut=950
elif(waveIn>=955.0 and waveIn<965.0):
waveOut=960
elif(waveIn>=965.0 and waveIn<972.5):
waveOut=970
elif(waveIn>=972.5 and waveIn<977.5):
waveOut=975
elif(waveIn>=977.5 and waveIn<985.0):
waveOut=980
elif(waveIn>=985.0 and waveIn<995.0):
waveOut=990
elif(waveIn>=995.0):
waveOut=1000
return waveOut
def setDensityGrid(cf,looper): #densityGridSlice, pic, densityGrid
f = open('data/coeffStruct3D.dat', 'w')
reads = open('res/input.params')
wavelength = reads.read().split()[30]
if(float(wavelength)%2 != 0):
wavelengthBlood = float(wavelength)+1
wavelengthWater = roundWater(float(wavelength))
else:
wavelengthBlood = float(wavelength)
wavelengthWater = roundWater(float(wavelength))
absorbBloodOx = getDat('data/absorptionCoEff/datahemo.txt', wavelengthBlood, 0)
absorbBloodDeox = getDat('data/absorptionCoEff/datahemodeox.txt', wavelengthBlood, 0)
absorbBloodTotal = (62.6*absorbBloodOx + 37.4*absorbBloodDeox)/100
absorbBloodTotalCancer = (61.1*absorbBloodOx + 38.9*absorbBloodDeox)/100
absorbWater = getDat('data/absorptionCoEff/datawatar.txt', wavelengthWater, 4)
absorbFat = getDat('data/absorptionCoEff/datafat.txt', float(wavelength), 3)
#Values from Jacques 2013
absorbSkin = 0.0069*absorbBloodTotal + 0.065*absorbWater + 0.74*absorbFat
absorbCancer = 0.0176*absorbBloodTotalCancer + 0.4*absorbWater + 0.39*absorbFat
if(looper==1 or looper==2 or looper==3):
absorbGNP = getDat('data/absorptionCoEff/gnprod150.txt', float(wavelength), 26)
if(looper==2 or looper==5 or looper==6):
absorbGNP = getDat('data/absorptionCoEff/gnprod160.txt', float(wavelength), 26)
if(looper==3 or looper==8 or looper==9):
absorbGNP = getDat('data/absorptionCoEff/gnprod170.txt', float(wavelength), 26)
absorbGNP = cf*absorbGNP
try:
f.write(str(absorbSkin)+'\n')
f.write(str(absorbBloodTotal)+'\n')
f.write(str(absorbGNP)+'\n')
f.write(str(absorbCancer)+'\n')
finally:
f.close()
run_MC = True #set true to run new Monte Carlo, false will not
if (run_MC):
for looper in [1,2,3,4,5,6,7,8,9]:
for power in [5,10]:
#Loop 1 {Laser Power = 5W, 10W}
for cf in [10.0,20.0]:
#Loop 2 {Concentration Factor = 10.0,20.0}
for wavel in [775, 800, 825, 850]:
#Loop 3 {Wavelengths = 775, 800, 825, 850}
output = open("ParametersIn.txt","a+")
output.write("%d %.1f %d \n" % (power, cf, wavel))
output.close()
with open('res/input.params', 'r') as file:
paramet = file.readlines()
paramet[6] = str(power) + "\t\tPower\n"
paramet[11] = str(wavel) + "\t\tWavelength\n"
with open('res/input.params', 'w') as file:
file.writelines(paramet)
setDensityGrid(cf,looper)
os.system("bash install.sh")
| [
"noreply@github.com"
] | noreply@github.com |
a508e75bc7b12d906fb95cd239f5fb1f0de0807c | 1dc086115cca625bc7fbf527e33c903fe6144e37 | /chris_ulanowicz/assignments/django/semi_restful_routes/apps/semi_restful/migrations/0001_initial.py | 70a76986ed2e874c6d6c3d03c8a1ac6c19770629 | [] | no_license | CodingDojoDallas/python_march_2017 | cdf70587dc6f85963e176c3b43057c7f7a196a97 | 31f9e01c011d049999eec4d231ff1a4520ecff76 | refs/heads/master | 2021-01-17T14:34:33.601830 | 2017-05-15T20:05:02 | 2017-05-15T20:05:02 | 84,091,494 | 4 | 14 | null | 2017-05-15T20:05:03 | 2017-03-06T15:49:18 | Python | UTF-8 | Python | false | false | 840 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-11-21 18:56
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=45)),
('description', models.CharField(max_length=255)),
('price', models.DecimalField(decimal_places=2, max_digits=8)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
]
| [
"src3collector@gmail.com"
] | src3collector@gmail.com |
43b3cbd48a03294aea79ae3c9ce101d34b108e76 | a8e3c2e8f6c8ec07d37a199fef4821828852bbdc | /lesson2_netmiko/ex6a.py | 8d78e1a87005b598a99ac0e4bea71b6e8f9401e7 | [
"Apache-2.0"
] | permissive | anejolazaro70/python_july19 | 9a392fabeb4965b161ff6e238d11fdfcbeac3647 | d4fd2f0d00d22f2f103ef9fe9c289c8b0651b454 | refs/heads/master | 2020-06-20T07:53:42.498899 | 2019-11-17T08:41:41 | 2019-11-17T08:41:41 | 197,049,427 | 0 | 0 | Apache-2.0 | 2019-07-15T19:09:21 | 2019-07-15T18:11:49 | Python | UTF-8 | Python | false | false | 575 | py | #!/usr/bin/python
from datetime import datetime
from netmiko import ConnectHandler
from pprint import pprint
from getpass import getpass
password=getpass()
device={"host": "cisco4",
"username": "user",
"password": password,
'secret': password,
"device_type": "cisco_ios",
"session_log": "cisco4_6a.txt"}
t1=datetime.now()
ssh_con=ConnectHandler(**device)
prompt=ssh_con.find_prompt()
print(prompt)
ssh_con.disconnect()
t2=datetime.now()
t3=t2-t1
print("\nINICIO: ", t1)
print('\nFIN: ', t2)
print('\nDuracion ejecucion comando: ', t3)
| [
"anejo.lazaro70@gmail.com"
] | anejo.lazaro70@gmail.com |
b82292526d12840745f7a813829afbfd6e146306 | a8fff8e93f31eaeeeca32138d0d7a0280ecbc5a1 | /com/example/testCNN/00SimpleCNN.py | acfb88c412f2c5ecdb05deddd72e5583ccdc5612 | [] | no_license | angel1288/tensorflow0921 | 9d7fa51a28efc410149fa42ce8d580dd94fed5c4 | c9ae989d4f266b3d023492ec3109c3be36e1fb8c | refs/heads/master | 2021-07-02T01:48:36.507182 | 2017-09-22T08:13:52 | 2017-09-22T08:13:52 | 104,326,319 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,974 | py | # coding=utf-8
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
# 获取数据集
mnist = input_data.read_data_sets('MNIST_data/', one_hot=True)
sess = tf.InteractiveSession()
# 定义权重函数
def weight_varible(shape):
# 给权重制造一些随机噪声打破完全对称,截断的正态分布噪声
init = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(init)
# 定义偏置项函数
def bias_varible(shape):
# 由于使用ReLU,也给偏置增加一些小的正值(0.1)
init = tf.constant(0.1, shape=shape)
return tf.Variable(init)
# 定义卷积函数
def conv2d(x, w):
return tf.nn.conv2d(x, w, strides=[1, 1, 1, 1], padding='SAME')
# 定义池化函数
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
# 设计CNN之的结构前,定义两个占位符
x = tf.placeholder(tf.float32, [None, 784])
y_ = tf.placeholder(tf.float32, [None, 10])
x_image = tf.reshape(x, [-1, 28, 28, 1])
# 定义所有的网络参数
weights = {
'wc1': weight_varible([5, 5, 1, 32]),
'wc2': weight_varible([5, 5, 32, 64]),
'wfc1': weight_varible([7*7*64, 1024]),
'out': weight_varible([1024, 10]),
}
biases = {
'bc1': bias_varible([32]),
'bc2': bias_varible([64]),
'bfc1': bias_varible([1024]),
'out': bias_varible([10]),
}
# 第一层卷积层
conv1 = tf.nn.relu(conv2d(x_image, weights['wc1']) + biases['bc1'])
pool1 = max_pool_2x2(conv1)
# 第二层卷积层
conv2 = tf.nn.relu(conv2d(pool1, weights['wc2']) + biases['bc2'])
pool2 = max_pool_2x2(conv2)
# 全连接层
pool_fc1 = tf.reshape(pool2, [-1, 7*7*64])
fc1 = tf.nn.relu(tf.matmul(pool_fc1, weights['wfc1']) + biases['bfc1'])
# 减轻过拟合,使用dropout层,是通过placeholder传入keep_prob比率来控制
keep_prob = tf.placeholder(tf.float32)
fc1_drop = tf.nn.dropout(fc1, keep_prob)
# 输出层
# Dropout层的输出连接一个softmax层,得到最后概率输出
y_conv = tf.nn.softmax(tf.matmul(fc1, weights['out']) + biases['out'])
# 损失函数, 优化器
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y_conv), reduction_indices=[1]))
train_step = tf.train.AdamOptimizer(0.001).minimize(cross_entropy)
# 评价准确率
correct_pred = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
# bool值转化为float32值,求平均
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# 训练模型
tf.global_variables_initializer().run()
for i in range(20000):
batch = mnist.train.next_batch(100)
if i % 100 == 0:
train_accuracy = accuracy.eval(feed_dict={x: batch[0], y_: batch[1], keep_prob: 1.0})
print("step %d, training accuracy %g" % (i, train_accuracy))
train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})
print("test accuracy %g" % accuracy.eval(feed_dict={x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0}))
# 92%
| [
"1531915260@qq.com"
] | 1531915260@qq.com |
81cdcfe92fc5bf55a123313bcb72f6397eed45b6 | 28e128e814ebd94c89fa1778019af6104e706237 | /convert.py | 2d41357e63f28c161039c7de61b2e3648918a909 | [] | no_license | hxsylzpf/neo-meguro-line | 7fd9497f76c16837209b19ca841d5cc09cfb401b | 6195533ed60a04dd808c7170e385cd9db6d57e25 | refs/heads/master | 2021-06-10T20:55:19.096317 | 2016-11-25T15:41:37 | 2016-11-25T15:41:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,324 | py | import cv2
import numpy as np
import argparse
import base64
from googleapiclient import discovery
from oauth2client.client import GoogleCredentials
def get_vision_service():
credentials = GoogleCredentials.get_application_default()
return discovery.build('vision', 'v1', credentials=credentials)
def detect_face(image, max_results=4):
image_content = image_to_bytes(image)
batch_request = [{
'image': {
'content': base64.b64encode(image_content).decode('utf-8')
},
'features': [{
'type': 'FACE_DETECTION',
'maxResults': max_results,
}]
}]
service = get_vision_service()
request = service.images().annotate(body={
'requests': batch_request,
})
response = request.execute()
first_response = response['responses'][0]
if 'error' in first_response:
print(first_response['error'])
raise
if 'faceAnnotations' not in first_response:
return []
return first_response['faceAnnotations']
def image_to_bytes(image):
flag, buf = cv2.imencode('.png', image)
return buf.tobytes()
def point_to_vector(p):
return np.array([p['x'], p['y']])
def draw_black_line(image, positions):
PADDING_VERTICAL_RATIO = 1.25
PADDING_HORIZONTAL_RATIO = 0.4
type_to_position = {}
for position in positions:
p = position['position']
for k, v in p.items():
p[k] = int(v)
type_to_position[position['type']] = p
left = point_to_vector(type_to_position['LEFT_EYE'])
right = point_to_vector(type_to_position['RIGHT_EYE'])
left_top = np.array(left)
left_bottom = np.array(left)
right_top = np.array(right)
right_bottom = np.array(right)
horizontal_direction = right - left
normal = np.array([horizontal_direction[1], -horizontal_direction[0]], int)
normal = normal / np.linalg.norm(normal)
# vertical
left_height = np.linalg.norm(point_to_vector(type_to_position['LEFT_EYE_BOTTOM_BOUNDARY']) - point_to_vector(type_to_position['LEFT_EYE_TOP_BOUNDARY']))
right_height = np.linalg.norm(point_to_vector(type_to_position['RIGHT_EYE_BOTTOM_BOUNDARY']) - point_to_vector(type_to_position['RIGHT_EYE_TOP_BOUNDARY']))
height = max(left_height, right_height)
left_top += np.array(height * PADDING_VERTICAL_RATIO * normal, int)
left_bottom -= np.array(height * PADDING_VERTICAL_RATIO * normal, int)
right_top += np.array(height * PADDING_VERTICAL_RATIO * normal, int)
right_bottom -= np.array(height * PADDING_VERTICAL_RATIO * normal, int)
horizontal_pad = np.array(PADDING_HORIZONTAL_RATIO * (right - left), int)
left_top -= horizontal_pad
left_bottom -= horizontal_pad
right_top += horizontal_pad
right_bottom += horizontal_pad
cv2.fillPoly(image, [np.array([
left_top,
left_bottom,
right_bottom,
right_top,
])], color=(0, 0, 0), lineType=cv2.CV_AA)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('image', help='a path to image')
args = parser.parse_args()
image = cv2.imread(args.image)
data = detect_face(image, 15)
for annotation in data:
draw_black_line(image, annotation['landmarks'])
print(image_to_bytes(image))
| [
"uiureo@gmail.com"
] | uiureo@gmail.com |
2f4ad34593d619afe4392bde5ef7782179948d56 | fd69d76dcfe60b97ca02eb853e3f2cd2b68d990e | /tree/serialize_deserialize.py | 34c503596f178063464a9402d8208b4a6238f7eb | [] | no_license | Levalife/DSA | f3204946c9225f0472ec8470c0fbe29357559f35 | 4e5a94ba94fa5be01f4760a2651001426b3ef973 | refs/heads/master | 2023-01-23T03:51:48.864888 | 2020-11-27T13:58:04 | 2020-11-27T13:58:04 | 298,612,309 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,766 | py | # -*- coding: utf-8 -*-
class Tree:
def __init__(self, root=None):
self.root = root
class Node:
def __init__(self, value, parent=None, left=None, right=None):
self.value = value
self.left = left
self.right = right
self.parent = parent
'''
10
7 11
6 8 20
1 9 14 22
'''
tree = Tree()
tree.root = Node(10)
tree.root.left = Node(7, tree.root)
tree.root.right = Node(11, tree.root)
tree.root.left.left = Node(6, tree.root.left)
tree.root.left.right = Node(8, tree.root.left)
tree.root.right.right = Node(20, tree.root.right)
tree.root.left.left.left = Node(1, tree.root.left.left)
tree.root.left.right.right = Node(9, tree.root.left.right)
tree.root.right.right.left = Node(14, tree.root.right.right)
tree.root.right.right.right = Node(22, tree.root.right.right)
def serialize(node):
if not node:
return "X,"
return "{},{}{}".format(node.value, serialize(node.left), serialize(node.right))
serialized_tree = serialize(tree.root)
print(serialized_tree)
def deserialize(tree_str):
tree_list = tree_str.split(',')
return deserialize_helper(tree_list)
def deserialize_helper(tree_list):
if tree_list:
if tree_list[0] == 'X':
tree_list.pop(0)
return None
newNode = Node(value=tree_list.pop(0))
newNode.left = deserialize_helper(tree_list)
newNode.right = deserialize_helper(tree_list)
return newNode
deserialized_tree = deserialize(serialized_tree)
def preorder(node):
print(node.value)
if node.left:
preorder(node.left)
if node.right:
preorder(node.right)
preorder(deserialized_tree) | [
"levushka14@gmail.com"
] | levushka14@gmail.com |
fdefb509489057fa295e11720c34278da23aee3e | a1b3797565edcae1364916da8c0d8bb1de8d2d00 | /sdk/python/pulumi_kubernetes/certificates/v1beta1/_inputs.py | de0f35bd2535c0e101d9a52b50db5222427086ba | [
"Apache-2.0"
] | permissive | Teshel/pulumi-kubernetes | 2316ab3ddb374717322423d24191aa272a963049 | 8d007166d0e8968fcabaeecd0cee13f9c08d97f1 | refs/heads/master | 2021-06-27T02:04:28.125052 | 2021-05-28T13:51:14 | 2021-05-28T13:51:14 | 231,344,409 | 0 | 0 | Apache-2.0 | 2020-01-02T09:01:22 | 2020-01-02T09:01:21 | null | UTF-8 | Python | false | false | 18,361 | py | # coding=utf-8
# *** WARNING: this file was generated by pulumigen. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from ... import meta as _meta
__all__ = [
'CertificateSigningRequestArgs',
'CertificateSigningRequestConditionArgs',
'CertificateSigningRequestSpecArgs',
'CertificateSigningRequestStatusArgs',
]
@pulumi.input_type
class CertificateSigningRequestArgs:
def __init__(__self__, *,
api_version: Optional[pulumi.Input[str]] = None,
kind: Optional[pulumi.Input[str]] = None,
metadata: Optional[pulumi.Input['_meta.v1.ObjectMetaArgs']] = None,
spec: Optional[pulumi.Input['CertificateSigningRequestSpecArgs']] = None,
status: Optional[pulumi.Input['CertificateSigningRequestStatusArgs']] = None):
"""
Describes a certificate signing request
:param pulumi.Input[str] api_version: APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
:param pulumi.Input[str] kind: Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
:param pulumi.Input['CertificateSigningRequestSpecArgs'] spec: The certificate request itself and any additional information.
:param pulumi.Input['CertificateSigningRequestStatusArgs'] status: Derived information about the request.
"""
if api_version is not None:
pulumi.set(__self__, "api_version", 'certificates.k8s.io/v1beta1')
if kind is not None:
pulumi.set(__self__, "kind", 'CertificateSigningRequest')
if metadata is not None:
pulumi.set(__self__, "metadata", metadata)
if spec is not None:
pulumi.set(__self__, "spec", spec)
if status is not None:
pulumi.set(__self__, "status", status)
@property
@pulumi.getter(name="apiVersion")
def api_version(self) -> Optional[pulumi.Input[str]]:
"""
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
"""
return pulumi.get(self, "api_version")
@api_version.setter
def api_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "api_version", value)
@property
@pulumi.getter
def kind(self) -> Optional[pulumi.Input[str]]:
"""
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
"""
return pulumi.get(self, "kind")
@kind.setter
def kind(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kind", value)
@property
@pulumi.getter
def metadata(self) -> Optional[pulumi.Input['_meta.v1.ObjectMetaArgs']]:
return pulumi.get(self, "metadata")
@metadata.setter
def metadata(self, value: Optional[pulumi.Input['_meta.v1.ObjectMetaArgs']]):
pulumi.set(self, "metadata", value)
@property
@pulumi.getter
def spec(self) -> Optional[pulumi.Input['CertificateSigningRequestSpecArgs']]:
"""
The certificate request itself and any additional information.
"""
return pulumi.get(self, "spec")
@spec.setter
def spec(self, value: Optional[pulumi.Input['CertificateSigningRequestSpecArgs']]):
pulumi.set(self, "spec", value)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input['CertificateSigningRequestStatusArgs']]:
"""
Derived information about the request.
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input['CertificateSigningRequestStatusArgs']]):
pulumi.set(self, "status", value)
@pulumi.input_type
class CertificateSigningRequestConditionArgs:
def __init__(__self__, *,
type: pulumi.Input[str],
last_transition_time: Optional[pulumi.Input[str]] = None,
last_update_time: Optional[pulumi.Input[str]] = None,
message: Optional[pulumi.Input[str]] = None,
reason: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] type: type of the condition. Known conditions include "Approved", "Denied", and "Failed".
:param pulumi.Input[str] last_transition_time: lastTransitionTime is the time the condition last transitioned from one status to another. If unset, when a new condition type is added or an existing condition's status is changed, the server defaults this to the current time.
:param pulumi.Input[str] last_update_time: timestamp for the last update to this condition
:param pulumi.Input[str] message: human readable message with details about the request state
:param pulumi.Input[str] reason: brief reason for the request state
:param pulumi.Input[str] status: Status of the condition, one of True, False, Unknown. Approved, Denied, and Failed conditions may not be "False" or "Unknown". Defaults to "True". If unset, should be treated as "True".
"""
pulumi.set(__self__, "type", type)
if last_transition_time is not None:
pulumi.set(__self__, "last_transition_time", last_transition_time)
if last_update_time is not None:
pulumi.set(__self__, "last_update_time", last_update_time)
if message is not None:
pulumi.set(__self__, "message", message)
if reason is not None:
pulumi.set(__self__, "reason", reason)
if status is not None:
pulumi.set(__self__, "status", status)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
"""
type of the condition. Known conditions include "Approved", "Denied", and "Failed".
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@property
@pulumi.getter(name="lastTransitionTime")
def last_transition_time(self) -> Optional[pulumi.Input[str]]:
"""
lastTransitionTime is the time the condition last transitioned from one status to another. If unset, when a new condition type is added or an existing condition's status is changed, the server defaults this to the current time.
"""
return pulumi.get(self, "last_transition_time")
@last_transition_time.setter
def last_transition_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "last_transition_time", value)
@property
@pulumi.getter(name="lastUpdateTime")
def last_update_time(self) -> Optional[pulumi.Input[str]]:
"""
timestamp for the last update to this condition
"""
return pulumi.get(self, "last_update_time")
@last_update_time.setter
def last_update_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "last_update_time", value)
@property
@pulumi.getter
def message(self) -> Optional[pulumi.Input[str]]:
"""
human readable message with details about the request state
"""
return pulumi.get(self, "message")
@message.setter
def message(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "message", value)
@property
@pulumi.getter
def reason(self) -> Optional[pulumi.Input[str]]:
"""
brief reason for the request state
"""
return pulumi.get(self, "reason")
@reason.setter
def reason(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "reason", value)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[str]]:
"""
Status of the condition, one of True, False, Unknown. Approved, Denied, and Failed conditions may not be "False" or "Unknown". Defaults to "True". If unset, should be treated as "True".
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "status", value)
@pulumi.input_type
class CertificateSigningRequestSpecArgs:
def __init__(__self__, *,
request: pulumi.Input[str],
extra: Optional[pulumi.Input[Mapping[str, pulumi.Input[Sequence[pulumi.Input[str]]]]]] = None,
groups: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
signer_name: Optional[pulumi.Input[str]] = None,
uid: Optional[pulumi.Input[str]] = None,
usages: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
username: Optional[pulumi.Input[str]] = None):
"""
This information is immutable after the request is created. Only the Request and Usages fields can be set on creation, other fields are derived by Kubernetes and cannot be modified by users.
:param pulumi.Input[str] request: Base64-encoded PKCS#10 CSR data
:param pulumi.Input[Mapping[str, pulumi.Input[Sequence[pulumi.Input[str]]]]] extra: Extra information about the requesting user. See user.Info interface for details.
:param pulumi.Input[Sequence[pulumi.Input[str]]] groups: Group information about the requesting user. See user.Info interface for details.
:param pulumi.Input[str] signer_name: Requested signer for the request. It is a qualified name in the form: `scope-hostname.io/name`. If empty, it will be defaulted:
1. If it's a kubelet client certificate, it is assigned
"kubernetes.io/kube-apiserver-client-kubelet".
2. If it's a kubelet serving certificate, it is assigned
"kubernetes.io/kubelet-serving".
3. Otherwise, it is assigned "kubernetes.io/legacy-unknown".
Distribution of trust for signers happens out of band. You can select on this field using `spec.signerName`.
:param pulumi.Input[str] uid: UID information about the requesting user. See user.Info interface for details.
:param pulumi.Input[Sequence[pulumi.Input[str]]] usages: allowedUsages specifies a set of usage contexts the key will be valid for. See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3
https://tools.ietf.org/html/rfc5280#section-4.2.1.12
Valid values are:
"signing",
"digital signature",
"content commitment",
"key encipherment",
"key agreement",
"data encipherment",
"cert sign",
"crl sign",
"encipher only",
"decipher only",
"any",
"server auth",
"client auth",
"code signing",
"email protection",
"s/mime",
"ipsec end system",
"ipsec tunnel",
"ipsec user",
"timestamping",
"ocsp signing",
"microsoft sgc",
"netscape sgc"
:param pulumi.Input[str] username: Information about the requesting user. See user.Info interface for details.
"""
pulumi.set(__self__, "request", request)
if extra is not None:
pulumi.set(__self__, "extra", extra)
if groups is not None:
pulumi.set(__self__, "groups", groups)
if signer_name is not None:
pulumi.set(__self__, "signer_name", signer_name)
if uid is not None:
pulumi.set(__self__, "uid", uid)
if usages is not None:
pulumi.set(__self__, "usages", usages)
if username is not None:
pulumi.set(__self__, "username", username)
@property
@pulumi.getter
def request(self) -> pulumi.Input[str]:
"""
Base64-encoded PKCS#10 CSR data
"""
return pulumi.get(self, "request")
@request.setter
def request(self, value: pulumi.Input[str]):
pulumi.set(self, "request", value)
@property
@pulumi.getter
def extra(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[Sequence[pulumi.Input[str]]]]]]:
"""
Extra information about the requesting user. See user.Info interface for details.
"""
return pulumi.get(self, "extra")
@extra.setter
def extra(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[Sequence[pulumi.Input[str]]]]]]):
pulumi.set(self, "extra", value)
@property
@pulumi.getter
def groups(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Group information about the requesting user. See user.Info interface for details.
"""
return pulumi.get(self, "groups")
@groups.setter
def groups(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "groups", value)
@property
@pulumi.getter(name="signerName")
def signer_name(self) -> Optional[pulumi.Input[str]]:
"""
Requested signer for the request. It is a qualified name in the form: `scope-hostname.io/name`. If empty, it will be defaulted:
1. If it's a kubelet client certificate, it is assigned
"kubernetes.io/kube-apiserver-client-kubelet".
2. If it's a kubelet serving certificate, it is assigned
"kubernetes.io/kubelet-serving".
3. Otherwise, it is assigned "kubernetes.io/legacy-unknown".
Distribution of trust for signers happens out of band. You can select on this field using `spec.signerName`.
"""
return pulumi.get(self, "signer_name")
@signer_name.setter
def signer_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "signer_name", value)
@property
@pulumi.getter
def uid(self) -> Optional[pulumi.Input[str]]:
"""
UID information about the requesting user. See user.Info interface for details.
"""
return pulumi.get(self, "uid")
@uid.setter
def uid(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "uid", value)
@property
@pulumi.getter
def usages(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
allowedUsages specifies a set of usage contexts the key will be valid for. See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3
https://tools.ietf.org/html/rfc5280#section-4.2.1.12
Valid values are:
"signing",
"digital signature",
"content commitment",
"key encipherment",
"key agreement",
"data encipherment",
"cert sign",
"crl sign",
"encipher only",
"decipher only",
"any",
"server auth",
"client auth",
"code signing",
"email protection",
"s/mime",
"ipsec end system",
"ipsec tunnel",
"ipsec user",
"timestamping",
"ocsp signing",
"microsoft sgc",
"netscape sgc"
"""
return pulumi.get(self, "usages")
@usages.setter
def usages(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "usages", value)
@property
@pulumi.getter
def username(self) -> Optional[pulumi.Input[str]]:
"""
Information about the requesting user. See user.Info interface for details.
"""
return pulumi.get(self, "username")
@username.setter
def username(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "username", value)
@pulumi.input_type
class CertificateSigningRequestStatusArgs:
def __init__(__self__, *,
certificate: Optional[pulumi.Input[str]] = None,
conditions: Optional[pulumi.Input[Sequence[pulumi.Input['CertificateSigningRequestConditionArgs']]]] = None):
"""
:param pulumi.Input[str] certificate: If request was approved, the controller will place the issued certificate here.
:param pulumi.Input[Sequence[pulumi.Input['CertificateSigningRequestConditionArgs']]] conditions: Conditions applied to the request, such as approval or denial.
"""
if certificate is not None:
pulumi.set(__self__, "certificate", certificate)
if conditions is not None:
pulumi.set(__self__, "conditions", conditions)
@property
@pulumi.getter
def certificate(self) -> Optional[pulumi.Input[str]]:
"""
If request was approved, the controller will place the issued certificate here.
"""
return pulumi.get(self, "certificate")
@certificate.setter
def certificate(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "certificate", value)
@property
@pulumi.getter
def conditions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['CertificateSigningRequestConditionArgs']]]]:
"""
Conditions applied to the request, such as approval or denial.
"""
return pulumi.get(self, "conditions")
@conditions.setter
def conditions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['CertificateSigningRequestConditionArgs']]]]):
pulumi.set(self, "conditions", value)
| [
"noreply@github.com"
] | noreply@github.com |
5c064fb27f4e1f5959994430e93a3a4ee5d91147 | 72fcc9b617014484a1c021fa90af57b457aba5ba | /06.BinarySearchTree/01.ConstructionConversion/3_construct_bst_from_preorder.py | a4d96db9559efa2c78f1ee3b4a74ff5d091c6804 | [] | no_license | shindesharad71/Data-Structures | 249cb89fc3b54a3d8a67e4e9db832e256d072ee6 | a7cd247228a723e880bccd3aa24c072722785f6d | refs/heads/main | 2023-07-24T21:01:08.070082 | 2021-09-03T04:02:05 | 2021-09-03T04:02:05 | 370,706,713 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,468 | py | # Construct BST from given preorder traversal
# https://www.geeksforgeeks.org/construct-bst-from-given-preorder-traversa/
# A O(n^2) Python3 program for
# construction of BST from preorder traversal
# A binary tree node
class Node:
# A constructor to create a new node
def __init__(self, data):
self.data = data
self.left = None
self.right = None
# constructTreeUtil.preIndex is a static variable of
# function constructTreeUtil
# Function to get the value of static variable
# constructTreeUtil.preIndex
def getPreIndex():
return constructTreeUtil.preIndex
# Function to increment the value of static variable
# constructTreeUtil.preIndex
def incrementPreIndex():
constructTreeUtil.preIndex += 1
# A recurseive function to construct Full from pre[].
# preIndex is used to keep track of index in pre[[].
def constructTreeUtil(pre, low, high):
# Base Case
if low > high:
return None
# The first node in preorder traversal is root. So take
# the node at preIndex from pre[] and make it root,
# and increment preIndex
root = Node(pre[getPreIndex()])
incrementPreIndex()
# If the current subarray has onlye one element,
# no need to recur
if low == high:
return root
r_root = -1
# Search for the first element greater than root
for i in range(low, high + 1):
if pre[i] > root.data:
r_root = i
break
# If no elements are greater than the current root,
# all elements are left children
# so assign root appropriately
if r_root == -1:
r_root = getPreIndex() + (high - low)
# Use the index of element found in preorder to divide
# preorder array in two parts. Left subtree and right
# subtree
root.left = constructTreeUtil(pre, getPreIndex(), r_root - 1)
root.right = constructTreeUtil(pre, r_root, high)
return root
# The main function to construct BST from given preorder
# traversal. This function mailny uses constructTreeUtil()
def construct_tree(pre):
size = len(pre)
constructTreeUtil.preIndex = 0
return constructTreeUtil(pre, 0, size - 1)
def inorder(root):
if root:
inorder(root.left)
print(root.data, end=" ")
inorder(root.right)
# Driver Code
if __name__ == "__main__":
pre = [10, 5, 1, 7, 40, 50]
root = construct_tree(pre)
print("Inorder traversal of constructed tree")
inorder(root)
| [
"shindesharad71@gmail.com"
] | shindesharad71@gmail.com |
9e4ebb7bb3ece18681a32f13e3172dd0796e4e5a | 5a255ee5fd8e72bb5e19bb2f305286c6cd0f573d | /route/qiniu/fop.py | 350b28577497422d780528b1bea37f4326847af7 | [] | no_license | HNUST-CS/angle-street | 646628fb1566de2c1244e2a468de768f8a62488e | 9e8381bec70bd8a503b7125d943c4c7c4ea77106 | refs/heads/master | 2020-04-09T10:36:06.040474 | 2015-03-03T04:33:41 | 2015-03-03T04:33:41 | 31,420,384 | 12 | 8 | null | null | null | null | UTF-8 | Python | false | false | 946 | py | # -*- coding:utf-8 -*-
class Exif(object):
def make_request(self, url):
return '%s?exif' % url
class ImageView(object):
mode = 1 # 1或2
width = None # width 默认为0,表示不限定宽度
height = None
quality = None # 图片质量, 1-100
format = None # 输出格式, jpg, gif, png, tif 等图片格式
def make_request(self, url):
target = []
target.append('%s' % self.mode)
if self.width is not None:
target.append("w/%s" % self.width)
if self.height is not None:
target.append("h/%s" % self.height)
if self.quality is not None:
target.append("q/%s" % self.quality)
if self.format is not None:
target.append("format/%s" % self.format)
return "%s?imageView/%s" % (url, '/'.join(target))
class ImageInfo(object):
def make_request(self, url):
return '%s?imageInfo' % url
| [
"hi@hi-hi.cn"
] | hi@hi-hi.cn |
5ad6b50d507fa3f6a5f4c0d582deefc664338deb | 3e4b890366d58b6bede0443b281892ae773cd9a1 | /Flask-User-starter-app/app/settings.py | da4f36f82549257415b25876a461c0c16ff20e3f | [
"BSD-2-Clause"
] | permissive | manu3333/Proyecto | 42dd841d317116c652fe180dec1006d1803bdec3 | f7629c1e2bbe6c07f322d30237933cd03a1efcd5 | refs/heads/master | 2020-05-20T18:24:47.625772 | 2017-04-25T21:21:23 | 2017-04-25T21:21:23 | 84,499,879 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,099 | py | # Settings common to all environments (development|staging|production)
# Place environment specific settings in env_settings.py
# An example file (env_settings_example.py) can be used as a starting point
import os
# Application settings
APP_NAME = "BigDBee"
APP_SYSTEM_ERROR_SUBJECT_LINE = APP_NAME + " system error"
# Flask settings
CSRF_ENABLED = True
# Flask-SQLAlchemy settings
SQLALCHEMY_TRACK_MODIFICATIONS = False
# Flask-User settings
USER_APP_NAME = APP_NAME
USER_ENABLE_CHANGE_PASSWORD = True # Allow users to change their password
USER_ENABLE_CHANGE_USERNAME = False # Allow users to change their username
USER_ENABLE_CONFIRM_EMAIL = True # Force users to confirm their email
USER_ENABLE_FORGOT_PASSWORD = True # Allow users to reset their passwords
USER_ENABLE_EMAIL = True # Register with Email
USER_ENABLE_REGISTRATION = True # Allow new users to register
USER_ENABLE_RETYPE_PASSWORD = True # Prompt for `retype password` in:
USER_ENABLE_USERNAME = False # Register and Login with username
USER_AFTER_LOGIN_ENDPOINT = 'user_page'
USER_AFTER_LOGOUT_ENDPOINT = 'home_page'
| [
"emmanuel.nieves3@upr.edu"
] | emmanuel.nieves3@upr.edu |
d80ded76b732c6905d63ab10812102879d84c783 | 05f58b8bdef5e19c295421468fad2d2cec64ac2f | /my-recipessg/main.py | e5bd67882b98520826964615a13ae39eec18d648 | [] | no_license | thordur03/VEF--Verk4 | a40521029e90c9cc2859d2b93164dc62e7b47d7b | 4de2275b473c37a998c8226c2705d7cf20da1b8d | refs/heads/master | 2021-07-14T16:44:00.443774 | 2020-03-31T11:19:13 | 2020-03-31T11:19:13 | 249,011,515 | 0 | 0 | null | 2021-06-02T21:54:39 | 2020-03-21T16:05:59 | HTML | UTF-8 | Python | false | false | 1,698 | py | import os
import codecs
from datetime import datetime
from jinja2 import Environment, PackageLoader
from markdown2 import markdown
POSTS = {}
for markdown_post in os.listdir('content'):
file_path = os.path.join('content', markdown_post)
with open(file_path, 'r') as file:
POSTS[markdown_post] = markdown(file.read(), extras=['metadata'])
POSTS = {
post: POSTS[post] for post in sorted(POSTS, key=lambda post: datetime.strptime(POSTS[post].metadata['date'], '%Y-%m-%d'), reverse=True)
}
env = Environment(loader=PackageLoader('main', 'templates'))
index_template = env.get_template('index.html')
bread_template = env.get_template('bread.html')
post_template = env.get_template('post.html')
# forsíðan er ekki með MD post renderingu
index_html = index_template.render()
# brauðuppskriftir
posts_metadata = [POSTS[post].metadata for post in POSTS]
tags = [post['tags'] for post in posts_metadata]
bread_html = bread_template.render(posts=posts_metadata, tags=tags)
with open('../my-recipe/index.html', 'w', encoding='utf-8') as file:
file.write(index_html)
with open('../my-recipe/bread.html', 'w') as file:
file.write(bread_html)
for post in POSTS:
post_metadata = POSTS[post].metadata
post_data = {
'content': POSTS[post],
'title': post_metadata['title'],
'date': post_metadata['date'],
'thumbnail': post_metadata['thumbnail']
}
post_html = post_template.render(post=post_data)
post_file_path = '../my-recipe/posts/{slug}.html'.format(slug=post_metadata['slug'])
os.makedirs(os.path.dirname(post_file_path), exist_ok=True)
with open(post_file_path, 'w') as file:
file.write(post_html)
| [
"noreply@github.com"
] | noreply@github.com |
4a2f8d6c9ed2d00e8ed94eef8b4bce6ebb50a686 | 4518ce1ee32ffbd4004df6865f557c5a3909c135 | /awards/migrations/0004_reviews.py | 6c70e3944ef1fceffaca5ddef335e41ee17a2d17 | [
"MIT"
] | permissive | petermirithu/Grant_py | d9a04dee7fc0ae80e55a15b073e6b24108b23555 | 0e2e8d2a01c361583853e4d06fc4ede45e3741f8 | refs/heads/master | 2022-12-14T19:04:42.503002 | 2020-01-09T17:45:00 | 2020-01-09T17:45:00 | 231,231,593 | 1 | 0 | MIT | 2022-12-08T03:22:31 | 2020-01-01T15:20:26 | Python | UTF-8 | Python | false | false | 962 | py | # Generated by Django 2.2.8 on 2020-01-03 15:11
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('awards', '0003_auto_20200102_1411'),
]
operations = [
migrations.CreateModel(
name='reviews',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('body', models.CharField(max_length=1000)),
('posted_on', models.DateTimeField(auto_now_add=True)),
('posted_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('projo_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='awards.projo_post')),
],
),
]
| [
"pyra_m.k@yahoo.com"
] | pyra_m.k@yahoo.com |
f2bb9616ac45278832dfc75c62148d1217f0bf70 | 13102a790732ddd3cedb9d6cf0cb813d6f2a895c | /k번째수.py | 9ce4b8c35dc7c7359c98ba4f00bdad5c4a43b5c6 | [] | no_license | MsSeonge/infrun-C-CodingTest | 72d3769189b8124de13c11af678a459e348b52d2 | 2cb5e6b96bb1a4aa527c15ed58eef4340a5e8976 | refs/heads/main | 2023-01-23T22:35:26.726890 | 2020-12-10T23:33:31 | 2020-12-10T23:33:31 | 319,673,505 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 373 | py |
import sys
sys.stdin = open("input.txt", "rt")
TestCase = int(input())
for i in range(1,TestCase+1):
N, s, e, k = map(int, input().split())
List = list(map(int,input().split()))
sortedList = List[s-1:e]
sortedList.sort()
#print("#"+str(i)+" "+str(sortedList[k-1]))
#
#내가해야하는 출력방식
print("#%d %d" %(i, sortedList[k-1]))
| [
"srkim0371@gmail.com"
] | srkim0371@gmail.com |
059e8d97f0b62ea4ab980bb45f12a01bacc68228 | 6dd08ec6b4f6351de8450a3d7e592fd6b4994119 | /cbase/server/cbase-1.8.1/testrunner/pytests/spatialcompaction.py | 190ff0ff033f2e8af6e3946146558e06a12e1206 | [] | no_license | zhgwenming/appstack | d015e96b911fe318f9fba1bdeeea9d888d57dfba | 8fe6c1dfc2f5ed4a36c335e86ae28c17b3769276 | refs/heads/master | 2021-01-23T13:30:19.507537 | 2015-11-09T06:48:35 | 2015-11-09T06:48:35 | 7,576,644 | 1 | 2 | null | 2016-01-05T09:16:22 | 2013-01-12T15:13:21 | C | UTF-8 | Python | false | false | 1,569 | py | import unittest
import uuid
import logger
from membase.helper.spatial_helper import SpatialHelper
class SpatialCompactionTests(unittest.TestCase):
def setUp(self):
self.log = logger.Logger.get_logger()
self.helper = SpatialHelper(self, "default")
self.helper.setup_cluster()
def tearDown(self):
self.helper.cleanup_cluster()
def test_spatial_compaction(self):
self.log.info(
"description : test manual compaction for spatial indexes")
prefix = str(uuid.uuid4())[:7]
design_name = "dev_test_spatial_compaction"
self.helper.create_index_fun(design_name, prefix)
# Insert (resp. update, as they have the same prefix) and query
# the spatial index several time so that the compaction makes sense
for i in range(0, 8):
self.helper.insert_docs(2000, prefix)
self.helper.get_results(design_name)
# Get the index size prior to compaction
status, info = self.helper.info(design_name)
disk_size = info["spatial_index"]["disk_size"]
# Do the compaction
self.helper.compact(design_name)
# Check if the index size got smaller
status, info = self.helper.info(design_name)
self.assertTrue(info["spatial_index"]["disk_size"] < disk_size,
"The file size ({0}) isn't smaller than the "
"pre compaction size ({1})."
.format(info["spatial_index"]["disk_size"],
disk_size))
| [
"zhgwenming@gmail.com"
] | zhgwenming@gmail.com |
f22dc825cf7dbb473a0783088cb661e8971a18f1 | e7569ef74265f999fbeac9c2ffcf07e0b3a40e31 | /backend/manage.py | 1075829ba9e9ee42d33260f3b64a0950aae35882 | [] | no_license | crowdbotics-apps/now-prediction-25146 | 2d1999c58cbba61e719475f7c0c61d3222f484f4 | 562f8e2892c11e982a24231e6831d1b7df2ce43c | refs/heads/master | 2023-03-27T00:05:11.957207 | 2021-03-20T15:04:59 | 2021-03-20T15:04:59 | 349,755,728 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 640 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'now_prediction_25146.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
1cfc2dab392b886beb2af8648c295931c76df682 | cc12cce1a4d99bc59fddcf51095fd64307c8a108 | /math_with_ai.py | 18bc2a038cc4565bcc2d9a241ed85f4411d37856 | [] | no_license | 28ananthaprakash/math_with_dl | 47c7eeca8aee13ff03c8b5c56da2faf9faedfdf8 | 4a49a1a2582c682c7423f46fd6cd03ce78291945 | refs/heads/master | 2020-11-30T03:51:51.261408 | 2019-12-26T16:22:12 | 2019-12-26T16:22:12 | 230,293,288 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,343 | py | # -*- coding: utf-8 -*-
'''
Author : Ananthaprakash T
'''
# Commented out IPython magic to ensure Python compatibility.
from __future__ import absolute_import, division, print_function, unicode_literals
#import tensorflow as tf
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from sklearn.model_selection import train_test_split
import unicodedata
import re
import numpy as np
import os
import io
import time
import pandas as pd
tf.__version__
messages = ['1+1','2+2','3+3','4+4','5+5','6+6','7+7','8+8','9+9','10+10']
responses = ['2','4','6','8','10','12','14','16','18','20']
# Converts the unicode file to ascii
def unicode_to_ascii(s):
return ''.join(c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn')
def preprocess_sentence(w):
return w
# Creating the dataset
def create_dataset(messages,responses,num_examples=None):
new_data=[]
for message, response in zip(messages,responses):
message=preprocess_sentence(message)
response=preprocess_sentence(response)
new_data.append([message,response])
new_data=new_data[:num_examples]
return zip(*new_data)
def tokenize(lang1, lang2):
lang1len=len(lang1)
lang1=list(lang1)
lang2=list(lang2)
lang1.extend(lang2)
lang_tokenizer = tf.keras.preprocessing.text.Tokenizer(
filters='')
lang1=tuple(lang1)
lang_tokenizer.fit_on_texts(lang1)
tensor = lang_tokenizer.texts_to_sequences(lang1)
tensor = tf.keras.preprocessing.sequence.pad_sequences(tensor,padding='post')#padding in pre or post
tensor1 = tensor[:lang1len]
tensor2 = tensor[lang1len:]
return tensor1,tensor2, lang_tokenizer
questions_1000, answers_1000 = create_dataset(messages,responses,num_examples=None)
qseq , aseq, words = tokenize(questions_1000,answers_1000)
a=0
for i in qseq:
for j in i:
if a<j:
a=j
qvocab=a
b=0
for i in aseq:
for j in i:
if b<j:
b=j
avocab=b
#c=max(a,b)
#c
vocab_size_calc = max(avocab,qvocab)
#words.index_word[26654]
print(vocab_size_calc)
# Helper Function
"""
Helper Fulctions
"""
import numpy as np
def batch1(inputs, max_sequence_length=None):
"""
Args:
inputs:
list of sentences (integer lists)
max_sequence_length:
integer specifying how large should `max_time` dimension be.
If None, maximum sequence length would be used
Outputs:
inputs_time_major:
input sentences transformed into time-major matrix
(shape [max_time, batch_size]) padded with 0s
sequence_lengths:
batch-sized list of integers specifying amount of active
time steps in each input sequence
"""
sequence_lengths = [len(seq) for seq in inputs]
batch_size = len(inputs)
#Taking the largest list
if max_sequence_length is None:
max_sequence_length = max(sequence_lengths)
#Creating the matrix for [100,5] with zeros
inputs_batch_major = np.zeros(shape=[batch_size, max_sequence_length], dtype=np.int32) # == PAD
for i, seq in enumerate(inputs):
for j, element in enumerate(seq):
inputs_batch_major[i, j] = element
# [batch_size, max_time] -> [max_time, batch_size]
inputs_time_major = inputs_batch_major.swapaxes(0, 1)
return inputs_time_major, sequence_lengths
def random_sequences(length_from, length_to,
vocab_lower, vocab_upper,
batch_size):
""" Generates batches of random integer sequences,
sequence length in [length_from, length_to],
vocabulary in [vocab_lower, vocab_upper]
"""
#if length_from > length_to:
#raise ValueError('length_from > length_to')
def random_length():
if length_from == length_to:
return length_from
return np.random.randint(length_from, length_to)
while True:
yield [
np.random.randint(low=vocab_lower,
high=vocab_upper,
size=random_length()).tolist()
for _ in range(batch_size)
]
def make_batch(data,batch_size):
x=[]
y=[]
for i,j in enumerate(data):
i=i+1
y.append(list(j))
if i%batch_size == 0:
x.append(y)
y=[]
return iter(x)
def batch2(input_tensor_train):
seq_len=[]
for ls in input_tensor_train:
tmp=0
for val in ls:
if val !=0:
tmp+=1
seq_len.append(tmp)
inputs_time_major = np.array(input_tensor_train).swapaxes(0, 1)
return inputs_time_major , seq_len
'''a1=[[1,2],[3,4],[5,6],[7,8]]
a1=np.array(a1)
z1 = np.zeros(a1.shape)
z1
b1=np.append(a1,z1,axis=1)
b1'''
'''a0 = make_batch(qseq,10)
batch2(next(a0))'''
# SEQ2SEQ model def
tf.__version__
sess = tf.InteractiveSession()
#First critical thing to decide: vocabulary size.
#Dynamic RNN models can be adapted to different batch sizes
#and sequence lengths without retraining
#(e.g. by serializing model parameters and Graph definitions via tf.train.Saver),
#but changing vocabulary size requires retraining the model.
PAD = 0
EOS = 1
vocab_size = vocab_size_calc
input_embedding_size = 28 #max([max([len(k) for k in qseq]),max([len(k) for k in aseq])]) #character length
encoder_hidden_units = 1000 #num neurons
decoder_hidden_units = encoder_hidden_units * 2 #in original paper, they used same number of neurons for both encoder
#and decoder, but we use twice as many so decoded output is different, the target value is the original input
#in this example
encoder_inputs = tf.placeholder(shape=(None, None), dtype=tf.int32, name='encoder_inputs')
#contains the lengths for each of the sequence in the batch, we will pad so all the same
#if you don't want to pad, check out dynamic memory networks to input variable length sequences
encoder_inputs_length = tf.placeholder(shape=(None,), dtype=tf.int32, name='encoder_inputs_length')
decoder_targets = tf.placeholder(shape=(None, None), dtype=tf.int32, name='decoder_targets')
#randomly initialized embedding matrrix that can fit input sequence
#used to convert sequences to vectors (embeddings) for both encoder and decoder of the right size
#reshaping is a thing, in TF you gotta make sure you tensors are the right shape (num dimensions)
embeddings = tf.Variable(tf.random_uniform([vocab_size, input_embedding_size], -1.0, 1.0), dtype=tf.float32)
#this thing could get huge in a real world application
encoder_inputs_embedded = tf.nn.embedding_lookup(embeddings, encoder_inputs)
from tensorflow.python.ops.rnn_cell import LSTMCell, LSTMStateTuple
encoder_cell = LSTMCell(encoder_hidden_units)
#get outputs and states
#bidirectional RNN function takes a separate cell argument for
#both the forward and backward RNN, and returns separate
#outputs and states for both the forward and backward RNN
#When using a standard RNN to make predictions we are only taking the “past” into account.
#For certain tasks this makes sense (e.g. predicting the next word), but for some tasks
#it would be useful to take both the past and the future into account. Think of a tagging task,
#like part-of-speech tagging, where we want to assign a tag to each word in a sentence.
#Here we already know the full sequence of words, and for each word we want to take not only the
#words to the left (past) but also the words to the right (future) into account when making a prediction.
#Bidirectional RNNs do exactly that. A bidirectional RNN is a combination of two RNNs – one runs forward from
#“left to right” and one runs backward from “right to left”. These are commonly used for tagging tasks, or
#when we want to embed a sequence into a fixed-length vector (beyond the scope of this post).
((encoder_fw_outputs,
encoder_bw_outputs),
(encoder_fw_final_state,
encoder_bw_final_state)) = (
tf.nn.bidirectional_dynamic_rnn(cell_fw=encoder_cell,
cell_bw=encoder_cell,
inputs=encoder_inputs_embedded,
sequence_length=encoder_inputs_length,
dtype=tf.float32, time_major=True)
)
#Concatenates tensors along one dimension.
encoder_outputs = tf.concat((encoder_fw_outputs, encoder_bw_outputs), 2)
#letters h and c are commonly used to denote "output value" and "cell state".
#http://colah.github.io/posts/2015-08-Understanding-LSTMs/
#Those tensors represent combined internal state of the cell, and should be passed together.
encoder_final_state_c = tf.concat(
(encoder_fw_final_state.c, encoder_bw_final_state.c), 1)
encoder_final_state_h = tf.concat(
(encoder_fw_final_state.h, encoder_bw_final_state.h), 1)
#TF Tuple used by LSTM Cells for state_size, zero_state, and output state.
encoder_final_state = LSTMStateTuple(
c=encoder_final_state_c,
h=encoder_final_state_h
)
decoder_cell = LSTMCell(decoder_hidden_units)
#we could print this, won't need
encoder_max_time, batch_size = tf.unstack(tf.shape(encoder_inputs))
batch_size
decoder_lengths = encoder_inputs_length + 3
# +2 additional steps, +1 leading <EOS> token for decoder inputs
#manually specifying since we are going to implement attention details for the decoder in a sec
#weights
W = tf.Variable(tf.random_uniform([decoder_hidden_units, vocab_size], -1, 1), dtype=tf.float32)
#bias
b = tf.Variable(tf.zeros([vocab_size]), dtype=tf.float32)
#create padded inputs for the decoder from the word embeddings
#were telling the program to test a condition, and trigger an error if the condition is false.
assert EOS == 1 and PAD == 0
eos_time_slice = tf.ones([batch_size], dtype=tf.int32, name='EOS')
pad_time_slice = tf.zeros([batch_size], dtype=tf.int32, name='PAD')
#retrieves rows of the params tensor. The behavior is similar to using indexing with arrays in numpy
eos_step_embedded = tf.nn.embedding_lookup(embeddings, eos_time_slice)
pad_step_embedded = tf.nn.embedding_lookup(embeddings, pad_time_slice)
#manually specifying loop function through time - to get initial cell state and input to RNN
#normally we'd just use dynamic_rnn, but lets get detailed here with raw_rnn
#we define and return these values, no operations occur here
def loop_fn_initial():
initial_elements_finished = (0 >= decoder_lengths) # all False at the initial step
#end of sentence
initial_input = eos_step_embedded
#last time steps cell state
initial_cell_state = encoder_final_state
#none
initial_cell_output = None
#none
initial_loop_state = None # we don't need to pass any additional information
return (initial_elements_finished,
initial_input,
initial_cell_state,
initial_cell_output,
initial_loop_state)
#attention mechanism --choose which previously generated token to pass as input in the next timestep
def loop_fn_transition(time, previous_output, previous_state, previous_loop_state):
def get_next_input():
#dot product between previous ouput and weights, then + biases
output_logits = tf.add(tf.matmul(previous_output, W), b)
#Logits simply means that the function operates on the unscaled output of
#earlier layers and that the relative scale to understand the units is linear.
#It means, in particular, the sum of the inputs may not equal 1, that the values are not probabilities
#(you might have an input of 5).
#prediction value at current time step
#Returns the index with the largest value across axes of a tensor.
prediction = tf.argmax(output_logits, axis=1)
#embed prediction for the next input
next_input = tf.nn.embedding_lookup(embeddings, prediction)
return next_input
elements_finished = (time >= decoder_lengths) # this operation produces boolean tensor of [batch_size]
# defining if corresponding sequence has ended
#Computes the "logical and" of elements across dimensions of a tensor.
finished = tf.reduce_all(elements_finished) # -> boolean scalar
#Return either fn1() or fn2() based on the boolean predicate pred.
input = tf.cond(finished, lambda: pad_step_embedded, get_next_input)
#set previous to current
state = previous_state
output = previous_output
loop_state = None
return (elements_finished,
input,
state,
output,
loop_state)
def loop_fn(time, previous_output, previous_state, previous_loop_state):
if previous_state is None: # time == 0
assert previous_output is None and previous_state is None
return loop_fn_initial()
else:
return loop_fn_transition(time, previous_output, previous_state, previous_loop_state)
#Creates an RNN specified by RNNCell cell and loop function loop_fn.
#This function is a more primitive version of dynamic_rnn that provides more direct access to the
#inputs each iteration. It also provides more control over when to start and finish reading the sequence,
#and what to emit for the output.
#ta = tensor array
decoder_outputs_ta, decoder_final_state, _ = tf.nn.raw_rnn(decoder_cell, loop_fn)
decoder_outputs = decoder_outputs_ta.stack()
#to convert output to human readable prediction
#we will reshape output tensor
#Unpacks the given dimension of a rank-R tensor into rank-(R-1) tensors.
#reduces dimensionality
decoder_max_steps, decoder_batch_size, decoder_dim = tf.unstack(tf.shape(decoder_outputs))
#flettened output tensor
decoder_outputs_flat = tf.reshape(decoder_outputs, (-1, decoder_dim))
#pass flattened tensor through decoder
decoder_logits_flat = tf.add(tf.matmul(decoder_outputs_flat, W), b)
#prediction vals
decoder_logits = tf.reshape(decoder_logits_flat, (decoder_max_steps, decoder_batch_size, vocab_size))
#final prediction
decoder_prediction = tf.argmax(decoder_logits, 2)
#cross entropy loss
#one hot encode the target values so we don't rank just differentiate
stepwise_cross_entropy = tf.nn.softmax_cross_entropy_with_logits(
labels=tf.one_hot(decoder_targets, depth=vocab_size, dtype=tf.float32),
logits=decoder_logits,
)
#loss function
loss = tf.reduce_mean(stepwise_cross_entropy)
#train it
train_op = tf.train.AdamOptimizer().minimize(loss)
sess.run(tf.global_variables_initializer())
# Defining Batch
batch_size = 2
'''batches = random_sequences(length_from=3, length_to=8,
vocab_lower=2, vocab_upper=10,
batch_size=batch_size)
'''
batches = make_batch(qseq,batch_size)
print('head of the batch:')
for seq in next(batches)[:10]:
print(seq)
batches = make_batch(qseq,batch_size)
#batches = make_batch(input_tensor_train,100)
def next_feed():
#try:
batch = next(batches)
#except:
#pass
encoder_inputs_, encoder_input_lengths_ = batch1(batch)
#print(encoder_inputs_, encoder_input_lengths_)
decoder_targets_, _ = batch1(
[(sequence) + [EOS] + [PAD] * 2 for sequence in batch]
)
#print(decoder_targets)
return {
encoder_inputs: encoder_inputs_,
encoder_inputs_length: encoder_input_lengths_,
decoder_targets: decoder_targets_,
}
qbatches = make_batch(qseq,batch_size)
abatches = make_batch(aseq,batch_size)
def next_feed_chat():
#try:
qbatch = next(qbatches)
abatch = next(abatches)
#except:
#pass
encoder_inputs_, encoder_input_lengths_ = batch1(qbatch)
#print(encoder_inputs_, encoder_input_lengths_)
decoder_targets_, _ = batch1(
[(sequence) + [EOS] + [PAD] * 2 for sequence in abatch]
)
#print(decoder_targets)
return {
encoder_inputs: encoder_inputs_,
encoder_inputs_length: encoder_input_lengths_,
decoder_targets: decoder_targets_,
}
'''b=next_feed_chat()
b[decoder_targets]'''
#len(b[decoder_targets])
'''c=next_feed()
c[decoder_targets]'''
loss_track = []
max_batches = 5 #3001
batches_in_epoch = 2
#epoch__ = 2
for i in range(10):
qbatches = make_batch(qseq,batch_size)
abatches = make_batch(aseq,batch_size)
try:
for batch in range(max_batches):
fd = next_feed_chat()
_, l = sess.run([train_op, loss], fd)
loss_track.append(l)
if batch == 0 or batch % batches_in_epoch == 0:
print('batch {}'.format(batch))
print(' minibatch loss: {}'.format(sess.run(loss, fd)))
predict_ = sess.run(decoder_prediction, fd)
for i, (inp, pred) in enumerate(zip(fd[encoder_inputs].T, predict_.T)):
print(' sample {}:'.format(i + 1))
print(' input > {}'.format(inp))
print(' predicted > {}'.format(pred))
zz=[]
for z in inp:
if z != 0:
zz.append(words.index_word[z])
print(zz)
zz=[]
for z in pred:
if z != 0:
zz.append(words.index_word[z])
print(zz)
if i >= 2:
break
print()
except KeyboardInterrupt:
print('training interrupted')
import matplotlib.pyplot as plt
# %matplotlib inline
plt.plot(loss_track)
| [
"noreply@github.com"
] | noreply@github.com |
4b1b47754908d1408d23be833869d656231ec113 | 06035a291e5f258bc876f48d68675e2a74904810 | /Practical one/q1_fahrenheit_to_celsius.py | 84540365c959349c7ea1aa13ea49a0f266b5c14c | [] | no_license | casanova98/Project-201501 | 16aec68c4d7155af5fd859e0dcf5c104316e8913 | b4444e3f5f598442f33bea61ab428df0680382b3 | refs/heads/master | 2016-09-06T14:03:32.788647 | 2015-02-09T12:25:43 | 2015-02-09T12:25:43 | 29,722,688 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 232 | py |
#q1_fahrenheit_to_celsius.py
answer = input("Enter the temperature you want in Celsius!")
x = float(answer)
celsius = round((5/9) * (x - 32), 1)
print("The temperature from Fahrenheit to Celsius to 1 decimal place is", celsius)
| [
"ngin.cheongjun.dennis@dhs.sg"
] | ngin.cheongjun.dennis@dhs.sg |
160c5656998950b55f1360a6571e4b94d5292381 | ae02333b17aa88d0fcb5de6a8d2d7147e96ae8af | /ex058.py | ec76451878c474cb0caa5280f4dcaadccbcc49fc | [] | no_license | jefersonmz78/cursoemvideo | 474a0a215e24b73f40039bbe2198c7071efc553d | 02ce7ff19f7ed4d851dfe429052e50ce751b84f0 | refs/heads/master | 2020-04-25T03:05:08.159564 | 2019-02-25T08:20:30 | 2019-02-25T08:20:30 | 172,463,507 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 600 | py | from random import randint
comptador = randint(1 , 10)
print('Sou seu computador... Acabei de pensar em um número entre 0 e 10.')
print('Será que você consegue adivinhar qual foi? ')
acertou = False
palpites = 0
while not acertou:
jogador = int(input('Qual é seu palpite?'))
palpites += 1
if jogador == comptador:
acertou = True
else:
if jogador < comptador:
print('Mais...Tente uma vez.')
elif jogador >comptador:
print('Menos... Tente mais umz vez.')
print('Acertou com {} tentativas. Parabéns!'.format(palpites)) | [
"noreply@github.com"
] | noreply@github.com |
a5b26ad6de430de847e567b26e6d1de1326c9bf7 | 26a7bbd02ba6d5ae63d832b24ecb663e57adc630 | /Script.py | d3ddfb3e15cf149f68c752e6c1770eec035c5a9d | [] | no_license | Jiayin-Gu/PNsimulator | 5a834ae9c2405088defd9347fbcd592d74f80275 | 29898722a2aa9cab86918eb1e1b4288bcea09dd7 | refs/heads/master | 2020-06-17T08:44:29.898982 | 2019-07-11T17:40:04 | 2019-07-11T17:40:04 | 195,866,969 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,220 | py | import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
import math
def shockley_curve(v, I_s):
return I_s*(np.exp(v)-1)
def I_V():
data=np.loadtxt("I_V.out")
V=data[:, 0]
I=data[:, 1]
popt, pcov=curve_fit(shockley_curve, V, I, p0=(0-I[0]))
I_s=popt[0]
v=np.linspace(V[0], V[-1], 100)
i=shockley_curve(v, I_s)
plt.close()
fig=plt.figure(figsize=(10, 7), dpi=80)
ax=fig.add_subplot(1, 1, 1)
ax.set_xlabel("$V$", fontsize=25)
ax.set_xlim(V[0], V[-1])
ax.set_xticks(np.linspace(V[0], V[-1], 10))
ax.set_xticklabels(["-5", "-4", "-3", "-2", "-1", "0", "1", "2", "3", "4"], fontsize=20)
ax.set_ylabel(r"$I$ $(\times 10^5)$", fontsize=25)
ax.set_ylim(-0.8e5, 4.0e5)
ax.set_yticks(np.linspace(-0.8e5, 4.0e5, 7).tolist())
ax.set_yticklabels(["-0.8", "0.0", "0.8", "1.6", "2.4", "3.2", "4.0"], fontsize=20)
plt.scatter(V, I, color="black", marker="*")
ax.plot(v, i, color="black", linestyle="--", label="Shockley curve")
ax.grid(linestyle=":", linewidth=0.1, color="gray")
ax.legend(loc="upper left", fontsize=25)
ax.text(-4.5, 2.0e5, r"$I=%0.0f\times\left[\exp(V)-1\right]$" % (I_s), fontsize=25)
plt.tight_layout()
fig.savefig("I_V.eps")
plt.close()
I_V()
| [
"gujiayin1234@163.com"
] | gujiayin1234@163.com |
50ba2bc284a62190ba066af555bbcb93d4bf0af2 | 428c34158ae663c01bc3e621dd240a6e63a0447a | /build/rosserial/rosserial_embeddedlinux/catkin_generated/pkg.develspace.context.pc.py | 412aaa4e9f310c62a9cbcc7e433dc8c2add7d629 | [] | no_license | GTRIInternship2016/WaterGun2016 | dd05b1c0fc94184655c3d2fbdcc4f796df6b8b13 | acc43c42bb79cc197ed4495cfb50a888992052b4 | refs/heads/master | 2021-01-20T20:18:35.312890 | 2016-07-05T14:53:54 | 2016-07-05T14:53:54 | 60,801,323 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 390 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "rosserial_embeddedlinux"
PROJECT_SPACE_DIR = "/home/student/watergun_2016/devel"
PROJECT_VERSION = "0.7.1"
| [
"andrewgmorris10@gmail.com"
] | andrewgmorris10@gmail.com |
6fde9b8be97e5708a6527dfe8dbc75db6bc87ca1 | 53c797550d72ee589db19d309714d466068390c7 | /python/python36/redis-client/set_redis_key.py | e9b15d49f38fc32785f7f0d26efce48c307336bd | [] | no_license | kaitezhan/Demos | d3f777f4c07c56ab66370822ec89856ea1893e11 | 4760a3c7a82d75a0092be92713e8e47e42cd5eb9 | refs/heads/master | 2020-03-28T05:39:53.191944 | 2018-09-06T07:11:01 | 2018-09-06T07:11:01 | 147,789,679 | 1 | 0 | null | 2018-09-07T07:47:53 | 2018-09-07T07:47:53 | null | UTF-8 | Python | false | false | 1,196 | py | import redis
from util.DateUtil import *
def get_timestamp(dateStr):
# dateStr=time.strftime("%Y-%m-%d %X", time.localtime())
# str to date
# dateStr = "1988-05-08 10:11:22"
date = time.strptime(dateStr, "%Y-%m-%d %H:%M:%S")
return time.mktime(date)
def set_register_verify_code(mobile, code):
r = redis.Redis(host='118.31.42.204', port=6379, db=30, password="dev@Mo9.com")
code = {'validateCode': code, 'createTime': get_timestamp("2017-09-13 14:31:22")}
r.set('sheep_validate_code_mobile_1.0_' + str(mobile), code)
set_register_verify_code(18066078829, 321123)
# dateStr = "1988-05-08 10:11:22"
# date = time.strptime(dateStr, "%Y-%m-%d %H:%M:%S")
# # print(type(time.localtime()))
# # print(type(date) is time.struct_time)
#
# # print(DateParser.format_date(DateParser.parse_date_time(dateStr)))
#
# # print(DateParser.parse_stamp(dateStr))
#
# # print(DateParser.format_date_time(datetime.datetime.now()))
# # print(DateParser.format_date_time(time.localtime()))
# dateStr2 = "1998-05-08"
# # print(DateOperator.days_range(DateParser.parse_date_time(dateStr), DateParser.parse_date(dateStr2)))
# days = 10
# days1 = -10
# print(type(days), type(days1))
| [
"rzhang@mo9.com"
] | rzhang@mo9.com |
3fcd7c9b969639b7019ef3cb8fba77564639c55f | dc36d91239a5e8dd811d73ed97d124f6c34111ca | /s1d2/pp2.py | 4f652e3f57e3b953f3c608cc6f2ff6615f99808f | [] | no_license | wuzijie/AliMusicTrendPredict | e5ad53810d0e93b19ef66978de4caf8a1e51565e | f2b5fc078960631fbe888e1150ab4bbbc2204e96 | refs/heads/master | 2021-05-03T09:24:48.349553 | 2016-06-19T14:09:37 | 2016-06-19T14:09:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,083 | py | #-*- coding:utf8 -*-#
"""
---------------------------------------
*功能:
*保存:
---------------------------------------
"""
import os
import csv
import time
from collections import defaultdict
####################### date ####################################################
# map date into num
# date
print ""
print "===start generate date rank=================================="
date_to_rank = {}
rank_to_date = {}
import datetime
dt = datetime.datetime(2015, 03, 01, 00, 00, 01)
end = datetime.datetime(2015, 10, 30, 23, 59, 59)
step = datetime.timedelta(days=1)
day_rank = 0
while dt < end:
day_date = dt.strftime('%Y%m%d')
rank_to_date[day_rank] = day_date
date_to_rank[day_date] = day_rank
dt += step
day_rank += 1
print "date num ", len(rank_to_date)
print "rank to date :", rank_to_date
print "===end generate date rank=================================="
####################### date ####################################################
####################### songs ####################################################
# load songs date
# song artist
song_id_set = set()
songs_id_to_songinfo = defaultdict(tuple)
songs_rank_to_iddate = [] #song rank to song_id and publish_date
songs_id_to_rank = {}
artist_id_set = set()
artists_id_to_artistinfo = defaultdict(tuple)
artists_rank_to_id = []
artists_id_to_rank = {}
artists_id_to_songs_id = defaultdict(list) #artist_id to list of song_id
artists_rank_to_songs_num = {}
artist_gender_set = set()
language_type_set = set()
print ""
print "===start load songs=================================="
t0 = time.time()
song_file_path = "./data/p2_mars_tianchi_songs.csv"
f = open(song_file_path, 'r')
rows = csv.reader(f)
for row in rows:
song_id = row[0]
song_id_set.add(song_id)
artist_id = row[1]
artist_id_set.add(artist_id)
publish_time = int(row[2])
init_play_num = int(row[3])
language_type = int(row[4])
language_type_set.add(language_type)
artist_gender = int(row[5])
artist_gender_set.add(artist_gender)
artists_id_to_songs_id[artist_id].append(song_id)
artists_id_to_artistinfo[artist_id] = (artist_gender)
songs_rank_to_iddate.append((song_id, publish_time))
songs_id_to_songinfo[song_id] = (artist_id, publish_time, init_play_num, language_type, artist_gender)
#print song_id, artist_id, publish_time, init_play_num,\
# language_type, artist_gender
# rank songs by date
songs_rank_to_iddate.sort(key = lambda item : item[1])
for rank, item in enumerate(songs_rank_to_iddate):
songs_id_to_rank[item[0]] = rank
artists_rank_to_id = list(artist_id_set)
for rank, item in enumerate(artists_rank_to_id):
artists_id_to_rank[item] = rank
artists_rank_to_id = list(artist_id_set)
for k, v in artists_id_to_songs_id.items():
artists_rank_to_songs_num[artists_id_to_rank[k]] = len(v)
print "songs num ", len(song_id_set)
print "songs_id_to_songinfo num ", len(songs_id_to_songinfo)
print "artist num ", len(artist_id_set)
print "language type num ", len(language_type_set)
print "artist gender num ", len(artist_gender_set)
print "k th artist songs num ", artists_rank_to_songs_num
t1 = time.time()
print "It takes %f s to load songs" %(t1-t0)
print "===end load songs==================================="
####################### songs ####################################################
####################### actions ####################################################
# load songs actions
# song user actions
user_id_set = set()
users_rank_to_id = []
users_id_to_rank = {}
song_hasact_id_set = set()
action_type_set = set()
print ""
print "===start user statistics=================================="
tu0 = time.time()
ua_file_path1 = "./data/p2_mars_tianchi_user_actions.csv"
f1 = open(ua_file_path1, 'r')
rows1 = csv.reader(f1)
for idx, row in enumerate(rows1):
user_id = row[0]
user_id_set.add(user_id)
song_id = row[1]
song_hasact_id_set.add(song_id)
action_type = int(row[3])
action_type_set.add(action_type)
users_rank_to_id = list(user_id_set)
for rank, item in enumerate(users_rank_to_id):
users_id_to_rank[item] = rank
print "user num", len(user_id_set)
print "song num that has action", len(song_hasact_id_set)
print "action type num", len(action_type_set)
tu1 = time.time()
print "It takes %f s to do user statistics" %(tu1-tu0)
print "===end user statistics==================================="
####################### actions ####################################################
####################### actions statistics####################################################
artists_play = defaultdict(list)
artists_play_inday = defaultdict(list)
print ""
print "===start action statistics=================================="
ta0 = time.time()
ua_file_path = "./data/p2_mars_tianchi_user_actions.csv"
f = open(ua_file_path, 'r')
rows = csv.reader(f)
for idx, row in enumerate(rows):
user_id = row[0]
user_rank = users_id_to_rank[user_id]
song_id = row[1]
song_rank = songs_id_to_rank[song_id]
artist_rank = artists_id_to_rank[songs_id_to_songinfo[song_id][0]]
action_time_hour = int(row[2])
action_type = int(row[3])
action_time_date = date_to_rank[row[4]]
if(action_type == 1):
artists_play[artist_rank].append((action_time_hour, action_time_date))
for k, v in artists_play.items():
v.sort(key = lambda item : item[1])
artists_play[k] = v
for k, v in artists_play.items():
vd = []
c = 1
dateTemp = -1
itemTemp = (0, 0)
for item in v:
if(item[1] == dateTemp):
c += 1
else:
vd.append((c, itemTemp[1]))
dateTemp = item[1]
itemTemp = item
c = 1
vd.append((c, itemTemp[1]))
vd.pop(0)
artists_play_inday[k] = vd
artists_play.clear()
ta1 = time.time()
print "It takes %f s to do action statistics" %(ta1-ta0)
print "===end actions statistics==================================="
######################### actions statistics##################################################
| [
"xiaoyulink@gmail.com"
] | xiaoyulink@gmail.com |
d00d3f541c8395b11d28df9673b9cc4eb0aeb4f1 | ba92fb06223819fde44f65228b9f8de077bb39ca | /api.py | f43d75a2962ff7e85eba018bc0b3f6d3461684c0 | [] | no_license | nagapoornima22/flask_database | cb40b58703a8e6deb182ea476d06be252a1eeb85 | 221b10b7071e38118876cb1f97bae388b732aae7 | refs/heads/master | 2021-03-15T11:02:24.023241 | 2020-03-12T13:50:06 | 2020-03-12T13:50:06 | 246,846,094 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,847 | py | from flask import *
import sqlite3
app = Flask(__name__)
@app.route("/")
def index():
return render_template("index.html");
@app.route("/add")
def add():
return render_template("add.html")
@app.route("/savedetails", methods=["POST", "GET"])
def saveDetails():
msg = "msg"
if request.method == "POST":
try:
name = request.form["name"]
email = request.form["email"]
address = request.form["address"]
with sqlite3.connect("employee.db") as con:
cur = con.cursor()
cur.execute("INSERT into Employees (name, email, address) values (?,?,?)", (name, email, address))
con.commit()
msg = "Employee successfully Added"
except:
con.rollback()
msg = "We can not add the employee to the list"
finally:
return render_template("success.html", msg=msg)
con.close()
@app.route("/view")
def view():
con = sqlite3.connect("employee.db")
con.row_factory = sqlite3.Row
cur = con.cursor()
cur.execute("select * from Employees")
rows = cur.fetchall()
return render_template("view.html", rows=rows)
@app.route("/delete")
def delete():
return render_template("delete.html")
@app.route("/deleterecord", methods=["POST"])
def deleterecord():
id = request.form["id"]
with sqlite3.connect("employee.db") as con:
try:
cur = con.cursor()
cur.execute("delete from Employees where id = ?", id)
msg = "record successfully deleted"
except:
msg = "can't be deleted"
finally:
return render_template("delete_record.html", msg=msg)
if __name__ == "__main__":
app.run(debug=True)
| [
"noreply@github.com"
] | noreply@github.com |
38e55d3c7addb06f9d23b8a798f8bf6d50a16b9b | 7cfacd2dfe0840028705459956562f63f039f0a5 | /imageviewer.py | 08c23d4c019cb46bc37422073dc951b830e6e960 | [
"MIT"
] | permissive | PhantomJoker07/BastardSword | 83a00b03ccdb9d3f8b3af82a03227399d80720d7 | 62b9832136c05516c14c7acd65c2edcba3cfb0d0 | refs/heads/main | 2023-05-20T01:42:44.032668 | 2021-06-13T01:50:19 | 2021-06-13T01:50:19 | 375,856,717 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 576 | py | import matplotlib
import matplotlib.pyplot as plt
import sys
import numpy as np
import struct
def draw_image(fileName):
with open(fileName, 'rb') as file:
width, height = struct.unpack('ii', file.read(4*2))
image_data_bytes = file.read((width*height*4) * 4)
image_data_float = struct.unpack('f'*(width*height*4), image_data_bytes)
npimage = np.array(image_data_float).reshape((height, width, 4))[:,:,0:3]
plt.imshow(npimage)
plt.show()
if __name__ == "__main__":
fileName = sys.argv[1]
draw_image(fileName) | [
"noreply@github.com"
] | noreply@github.com |
708f82e61a7c65cd898662f411ee77cbf31cccc9 | b098afc69ec619b3d7944de81a7dd43bc1934f71 | /python list assignment.txt | 00dbc6e3c5f0684be1ee537ef7a3829244aaf433 | [] | no_license | TracyBaraza/python | 5f344f3f5c7a6dfee8241709446171aa5d563d40 | f1fc4884d7d25f39dcc939fdb4e29cb9671e01e9 | refs/heads/master | 2020-04-28T13:04:30.124463 | 2019-03-25T17:11:48 | 2019-03-25T17:11:48 | 175,296,915 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,218 | txt | Python 3.7.2 (tags/v3.7.2:9a3ffc0492, Dec 23 2018, 22:20:52) [MSC v.1916 32 bit (Intel)] on win32
Type "help", "copyright", "credits" or "license()" for more information.
>>> x=[0,1,2,3,4,5,6,7,8,9]
>>> x
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
>>>
>>>
>>> x.append(10)
>>> x
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
>>>
>>>
>>> x.extend([11])
>>> x
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
>>>
>>>
>>> x.pop()
11
>>>
>>>
>>> x.copy()
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
>>>
>>>
>>> x.reverse()
>>> x
[10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
>>>
>>>
>>> x.sort()
>>> x
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
>>>
>>>
>>> x.remove(10)
>>> x
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
>>>
>>>
>>> x.index(1)
1
>>>
>>>
>>> x.insert(1,11)
>>> x
[0, 11, 1, 2, 3, 4, 5, 6, 7, 8, 9]
>>>
>>>
>>> x.remove(11)
>>> x
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
>>>
>>>
>>> y=[n*10 for n in x]
>>> y
[0, 10, 20, 30, 40, 50, 60, 70, 80, 90]
>>>
>>>
>>> for n in x:
print(n*10)
0
10
20
30
40
50
60
70
80
90
>>>
>>>
>>> k=x[:5]
>>> k
[0, 1, 2, 3, 4]
>>>
>>>
>>> v=x[5:10]
>>> v
[5, 6, 7, 8, 9]
>>>
>>>
>>> m=[]
>>> n=[[1,2,3],[4,5,6,],[7,8,9]]
>>>
>>>
>>> for sublist in n:
for x in sublist:
m.append(x)
>>> m
[1, 2, 3, 4, 5, 6, 7, 8, 9]
>>>
| [
"barazatracy16@gmal.com"
] | barazatracy16@gmal.com |
c5c35f369f7ff3081e674a64bf478670c5236b72 | 1872b9b47b6b6271f38960d650a3103bfa9de7a5 | /download_json.py | 13762d01e1800e0d2455e4f864f5c891a5de3e50 | [] | no_license | M4ttoF/AccessEarth-Tool | 161d28c943171189692f0985ab445bff4068bf79 | bcef025eeaea8604e50ca6b97c402765b14a00ba | refs/heads/master | 2020-03-22T11:51:20.231655 | 2018-07-20T19:42:15 | 2018-07-20T19:42:15 | 140,000,163 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,933 | py | # Access Earth Scraper
# Matthew Farias
'''
payload={'key1': 'value1', 'key2': 'value2'}
#r = requests.post("https://httpbin.org/post")
r = requests.post("https://httpbin.org/post", data = payload)
print(r.text)
'''
import time
import requests
import urllib
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
# enable browser logging
d = DesiredCapabilities.CHROME
d['loggingPrefs'] = { 'browser':'ALL' }
driver = webdriver.Chrome(desired_capabilities=d)
action=ActionChains(driver)
CITY = "Windsor"
found ={}
# Logs into the app
def login(driver,action):
driver.get("https://access.earth/app/")
elem = driver.find_element_by_name("username")
action.move_to_element(elem)
action.move_by_offset(xoffset=110,yoffset=325)
action.click()
action.send_keys("spellyy")
action.move_by_offset(xoffset=500,yoffset=0)
action.click()
action.send_keys("qpwoeiruty")
action.move_by_offset(xoffset=0,yoffset=50)
action.click()
action.perform()
action.reset_actions()
time.sleep(3)
#Searches for the city on the app
def searchFor(driver, action, location):
elem=driver.find_element_by_name("search")
action.move_to_element(elem)
action.click()
action.perform()
action.reset_actions()
time.sleep(3)
elem = driver.find_element_by_class_name("searchbar-input")
name=""
name=str(name)
for i in range(len(location)):
name+= location[i]
if i!= location[-1]:
name+= ' '
print(name)
elem.send_keys(name)
elem=None
time.sleep(1)
while elem == None:
arr = driver.find_elements_by_class_name("label-md")
try:
elem = arr[4]
except:
time.sleep(1)
elem.click()
time.sleep(2)
getNetworkRequests(driver,action, location)
#Searches through the network requests and finds the JSON data with locations
def getNetworkRequests(driver, action, location):
script = "var performance = window.performance || window.mozPerformance || window.msPerformance || window.webkitPerformance || {}; var network = performance.getEntries() || {}; return network;"
data = driver.execute_script(script)
for i in data:
if 'factual_data' in i['name'] and i['name'] not in found:
downloadJsonLink(i['name'], location)
found[i['name']] = True
#Goes to URL link and downloads the JSON data to a file named after the city
def downloadJsonLink(url, location):
data=urllib.request.urlopen(url)
data=data.read()
print("Adding in data for",location)
city=""
for i in range(len(location)-1):
city+= location[i]
if i!= location[-2]:
city+= ' '
city=city[:-1]
file = open("Canada\\"+location[-1]+"\\"+city+'.JSON', 'w')
file.write(str(data))
file.close()
login(driver, action)
searchFile = open("CanadaCities.txt", 'r')
for line in searchFile:
print(line.split())
searchFor(driver, action, line.split())
driver.close() | [
"ezioblade64@gmail.com"
] | ezioblade64@gmail.com |
971d10a6fcb13e8b7c795ff0cf4df44c33d76326 | 94c65dadc70f7864cab7cc5fa365cc772ca44127 | /gym/test.py | 75abd17ec04e85973ce2491193b3df8c436ffba1 | [
"MIT"
] | permissive | shihaocao/swarm | 0227a7909b6285ca8916683b5db5c0dd5ba47641 | c5e5162aba43060eb01963671aa458d3cd173975 | refs/heads/main | 2023-02-25T14:42:39.713455 | 2021-01-26T02:59:38 | 2021-01-26T02:59:38 | 325,425,395 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 375 | py | import numpy as np
a = np.arange(16).reshape((4,4))
b = np.arange(16).reshape((4,4))
c = np.bitwise_and(a,b)
d = np.array(c, dtype=bool)
e = np.clip(c, 0, 1)
e = (e-1)*-1
print(a)
print(b)
print(c)
print(d)
print(e)
# print(e.flatten().append(1))
yeet = np.arange(24).reshape((2,3,4))
print('firts')
print(yeet)
print('r90')
yeet = np.rot90(yeet, 1,axes=(1,2))
print(yeet) | [
"shehowcow@gmail.com"
] | shehowcow@gmail.com |
7ee7f2e7f0034ad78299103059e5d41c7e5251e8 | 47ff744da519c525cccfad1d8cead74f7e2cd209 | /uge4/.history/exercise_20200220124148.py | f126b64625bf836dfaac34c1d4c008fc555bbe88 | [] | no_license | Leafmight/Python | f6098395a7a13dd6afe6eb312a3eb1f3dbe78b84 | d987f22477c77f3f21305eb922ae6855be483255 | refs/heads/master | 2020-12-21T14:21:06.802341 | 2020-05-22T10:21:37 | 2020-05-22T10:21:37 | 236,457,255 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 588 | py | import numpy as np
filename = './befkbhalderstatkode.csv'
dd = np.genfromtxt(filename, delimiter=',', dtype=np.uint, skip_header=1)
neighb = {1: 'Indre By', 2: 'Østerbro', 3: 'Nørrebro', 4: 'Vesterbro/Kgs. Enghave',
5: 'Valby', 6: 'Vanløse', 7: 'Brønshøj-Husum', 8: 'Bispebjerg', 9: 'Amager Øst',
10: 'Amager Vest', 99: 'Udenfor'}
def pop(hood):
hood_mask = (dd[:,0] == 2015) & (dd[:,1] == hood)
return np.sum(dd[hood_mask][:4])
def getSumPerHood():
lst = {}
for key, value in neighb.items():
lst.update({value: pop(key)})
return lst
| [
"jacobfolke@hotmail.com"
] | jacobfolke@hotmail.com |
dc9289d234825789dfd30143764b5bf441e87b50 | a7cca49626a3d7100e9ac5c2f343c351ecb76ac7 | /playbooks/tests/led_toggle.py | f8079be0655d96fcf02c841fe646899d740a03c0 | [
"MIT"
] | permissive | Carglglz/upydev | 104455d77d64300074bda54d86bd791f19184975 | 529aa29f3e1acf8160383fe410b5659110dc96de | refs/heads/master | 2023-05-24T18:38:56.242500 | 2022-10-21T14:03:17 | 2022-10-21T14:03:17 | 199,335,165 | 49 | 9 | MIT | 2022-10-21T14:03:18 | 2019-07-28T20:42:00 | Python | UTF-8 | Python | false | false | 142 | py | import time
for i in range(5):
print(f"This is a loaded script: {i}")
led.on()
time.sleep(0.5)
led.off()
time.sleep(0.5)
| [
"carlosgilglez@gmail.com"
] | carlosgilglez@gmail.com |
dcff227305bc074d0d32949ae48b052c1608a805 | dd3bbd4e7aaee7a8a5f26b927ce28ac472c855a5 | /eggs/Products.CMFPlone-4.1-py2.7.egg/Products/CMFPlone/skins/plone_scripts/getNotAddableTypes.py | d9e66131d0c1fb846122cf94e88d8368a72a9d1e | [] | no_license | nacho22martin/tesis | ea0a822f8bdbdef6f13f41276ecd4d6e85427ca5 | e137eb6225cc5e724bee74a892567796166134ac | refs/heads/master | 2020-12-24T13:20:58.334839 | 2013-11-09T12:42:41 | 2013-11-09T12:42:41 | 14,261,570 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 307 | py | ## Script (Python) "getNotAddableTypes"
##bind container=container
##bind context=context
##bind namespace=
##bind script=script
##bind subpath=traverse_subpath
##parameters=
##title=
##
# customize this script to filter addable portal types based on
# context, the current user or other criteria
return ()
| [
"ignacio@plone.(none)"
] | ignacio@plone.(none) |
65fe23396445e2b18d34f02b0fd23adca93486a9 | 028b11cb029d20d46e683f718dff18c39fc2c89b | /app/core/sentiment_analysis.py | 3434b40d6031954bfaf1ead528802f0e3b88478c | [] | no_license | demid5111/smart-detector | 390d392beb460044db563092be01e062eac8ef41 | 7673b7d3ba3fc545bf6a8b11c5f91538e947a8a7 | refs/heads/master | 2022-12-13T16:39:29.913612 | 2021-01-07T13:22:38 | 2021-01-07T13:22:38 | 211,321,632 | 0 | 1 | null | 2022-12-10T03:53:54 | 2019-09-27T13:07:51 | JavaScript | UTF-8 | Python | false | false | 593 | py | import json
from requests import post
from app.constants import SENTIMENT_TOKEN
def analyze_sentiments(text):
data = {
'txt': text,
'lang': 'rus'
}
url = 'https://tt-api.tech/1.0/sentiment'
headers = {
'Authorization': 'Token {}'.format(SENTIMENT_TOKEN),
'Content-Type': 'application/json',
'Accept': 'application/json'
}
try:
r = post(url, data=json.dumps(data), headers=headers)
return json.loads(r.text)['result']
except:
return dict(polarity=0, confidence=0, positive=0, neutral=0, negative=0) | [
"vlzemskovs@gmail.com"
] | vlzemskovs@gmail.com |
859189dfd335cbf552d601b7f074a5040f3b71b9 | d1f8aef0e3da67555b6b7d57ac9bec0b94e12cc5 | /dragex/interfaces/__init__.py | d85a2f6ea8e655ceea1d1c1ab049f645c0717c72 | [] | no_license | victorhook/dragex | d3593f0c12fc2cbdbccc14a085f70e493f3b8f05 | 6c06740230f7513318abe79c78cb6d4369ba3e68 | refs/heads/master | 2023-06-02T03:58:54.061938 | 2021-06-17T19:06:24 | 2021-06-17T19:06:24 | 370,010,180 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 160 | py | from .drawable import Drawable # noqa
from .game_object import GameObject # noqa
from .screen import Screen # noqa
from .sprite_image import SpriteImage # noqa
| [
"victorkrook96@gmail.com"
] | victorkrook96@gmail.com |
7bffb66e5f552e2e744965e1073430a1c8eaf3b7 | 1b60858c303bd7d88dae82b8db56273c326ddb44 | /tests/swagger_client_tests/test_processor_status_snapshot_entity.py | 5f4fb8dda20bf1e9f698019dba23303937af0daf | [
"Apache-2.0"
] | permissive | tspannhw/nipyapi | 1ba076ef669493bad20681579891eea1d43f4fc8 | 30cdd028cf68cc4316b54a23bfa1f0397de3ae23 | refs/heads/master | 2021-07-19T14:37:22.993682 | 2017-10-29T18:52:31 | 2017-10-29T18:52:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,475 | py | # coding: utf-8
"""
NiFi Rest Api
The Rest Api provides programmatic access to command and control a NiFi instance in real time. Start and stop processors, monitor queues, query provenance data, and more. Each endpoint below includes a description, definitions of the expected input and output, potential response codes, and the authorizations required to invoke each service.
OpenAPI spec version: 1.2.0
Contact: dev@nifi.apache.org
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import nipyapi
from nipyapi.swagger_client.rest import ApiException
from nipyapi.swagger_client.models.processor_status_snapshot_entity import ProcessorStatusSnapshotEntity
class TestProcessorStatusSnapshotEntity(unittest.TestCase):
""" ProcessorStatusSnapshotEntity unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testProcessorStatusSnapshotEntity(self):
"""
Test ProcessorStatusSnapshotEntity
"""
# FIXME: construct object with mandatory attributes with example values
#model =nipyapi.swagger_client.models.processor_status_snapshot_entity.ProcessorStatusSnapshotEntity()
pass
if __name__ == '__main__':
unittest.main()
| [
"dchaffey@hortonworks.com"
] | dchaffey@hortonworks.com |
6376173684ca6c0c3673e550b76f8e8902209e9e | f00c4e3772815fbb570d85b4f91ea0d01606f22c | /Ultrasonic_sensor(hc-sr04).py | 9268171d7053810c8388bbee641a9b7df2a79d5d | [] | no_license | ElangoCS/Python-GPIO | 79e2d8564b2bee6202e55231718e2d57ebdbcdf9 | 0738a881493953fe837c7d13b75bd1880dceb58e | refs/heads/master | 2021-06-21T07:48:36.118318 | 2017-07-06T11:03:23 | 2017-07-06T11:03:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 848 | py | import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BCM)
GPIO_TRIGGER = 18
GPIO_ECHO = 24
GPIO.setup(GPIO_TRIGGER, GPIO.OUT)
GPIO.setup(GPIO_ECHO, GPIO.IN)
def distance():
GPIO.output(GPIO_TRIGGER, True)
time.sleep(0.00001)
GPIO.output(GPIO_TRIGGER, False)
StartTime = time.time()
StopTime = time.time()
while GPIO.input(GPIO_ECHO) == 0:
StartTime = time.time()
while GPIO.input(GPIO_ECHO) == 1:
StopTime = time.time()
TimeElapsed = StopTime - StartTime
distance = (TimeElapsed * 34300) / 2
return distance
if __name__ == '__main__':
try:
while True:
dist = distance()
print ("Measured Distance = %.1f cm" % dist)
time.sleep(1)
except KeyboardInterrupt:
print("Measurement stopped")
GPIO.cleanup()
| [
"noreply@github.com"
] | noreply@github.com |
dbc90486c2ba02f3081ea0d35e68d3fc519f2ff6 | fbb678e76f9d9bc96b86c068c724824143da9440 | /integration-tests/monkeyrunnerTestSuite.py | b9374572e102990fb5735a85b1c956380dcf5865 | [
"Apache-2.0",
"MIT"
] | permissive | d53dave/DSLFY-Android | eed20220aeb162b1c7fe70a7899e2568f2de4247 | 596d994bc322f47a3a6e7df60ef9d8a24254e996 | refs/heads/master | 2021-01-24T15:06:43.900932 | 2015-11-19T05:58:53 | 2015-11-19T05:58:53 | 44,204,117 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 980 | py | # Imports the monkeyrunner modules used by this program
from com.android.monkeyrunner import MonkeyRunner, MonkeyDevice
# Connects to the current device, returning a MonkeyDevice object
device = MonkeyRunner.waitForConnection()
# Installs the Android package. Notice that this method returns a boolean, so you can test
# to see if the installation worked.
device.installPackage('../app/target/net-d53dev-dslfy-android-1.0.apk')
# sets a variable with the package's internal name
package = 'net.d53dev.dslfy.android'
# sets a variable with the name of an Activity in the package
activity = 'net.d53dev.dslfy.android.ui.CarouselActivity'
# sets the name of the component to start
runComponent = package + '/' + activity
# Runs the component
device.startActivity(component=runComponent)
MonkeyRunner.sleep(5)
device.type('example@example.com')
# Takes a screenshot
result = device.takeSnapshot()
# Writes the screenshot to a file
result.writeToFile('screenshot.png','png')
| [
"dave@d53dev.net"
] | dave@d53dev.net |
23e5821fb9cdd44921d93fab7017629fb41feacd | 7ced125aede70be9ba86c2b9677fa6d832f2d0cc | /fc_django/product/models.py | 702e711e55356a255617b839a1270ad940722d5e | [] | no_license | paulracooni/tutorial.django | f98ff72d3f635c615778fbed4e31444ed4e414f5 | 027b2f5a5393d6051c4d70c68405bb8580b4fdb1 | refs/heads/master | 2022-10-17T01:34:55.153096 | 2020-06-15T04:41:24 | 2020-06-15T04:41:24 | 270,183,201 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 602 | py | from django.db import models
# Create your models here.
class Product(models.Model):
name = models.CharField(max_length=256, verbose_name="상품명")
price = models.IntegerField(verbose_name="상품가격")
description = models.TextField(verbose_name="상품설명")
stuck = models.IntegerField(verbose_name="재고")
register_date = models.DateTimeField(auto_now_add=True, verbose_name="등록날짜")
def __str__(self):
return self.name
class Meta:
db_table = "fastcompus_product"
verbose_name = "상품"
verbose_name_plural = "상품"
| [
"paulracooni@gmail.com"
] | paulracooni@gmail.com |
56b8f12831c451169abfb43ee9bf0bf5db45e2ec | 5231eb9d9ca8019f22b6895a4af78d9cbb98b6b7 | /Search&Train/MyDataset.py | 401c178af56f97600ae86da60655396655ec31a3 | [] | no_license | LuPaoPao/NAS-HR | b2c5b259cb7eae35cd30568df278f9bae94944f2 | 8baf8aa5c1a476bfedb6f2bead172d701ac371af | refs/heads/main | 2023-07-10T03:18:23.489754 | 2021-08-21T13:45:54 | 2021-08-21T13:45:54 | 314,416,217 | 27 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,757 | py | # -*- coding: UTF-8 -*-
import numpy as np
import os
from torch.utils.data import Dataset
import cv2
import csv
import scipy.io as scio
import torchvision.transforms.functional as transF
import torchvision.transforms as transforms
from PIL import Image
def transform(image):
image = transF.resize(image, size=(300, 600))
image = transF.to_tensor(image)
image = transF.normalize(image, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
return image
class Data_STMap(Dataset):
def __init__(self, root_dir, frames_num, transform = None):
self.root_dir = root_dir
self.frames_num = int(frames_num)
self.datalist = os.listdir(root_dir)
self.num = len(self.datalist)
self.transform = transform
if not self.check_integrity():
raise RuntimeError('Dataset not found or corrupted.' +
' You need to download it from official website.')
def __len__(self):
return self.num
def __getitem__(self, idx):
idx = idx
img_name = 'STMap'
STMap_name = 'STMap_YUV_Align_CSI_POS.png'
nowPath = os.path.join(self.root_dir, self.datalist[idx])
temp = scio.loadmat(nowPath)
nowPath = str(temp['Path'][0])
Step_Index = int(temp['Step_Index'])
STMap_Path = os.path.join(nowPath, img_name)
gt_name = 'Label_CSI/HR.mat'
gt_path = os.path.join(nowPath, gt_name)
gt = scio.loadmat(gt_path)['HR']
gt = np.array(gt.astype('float32')).reshape(-1)
gt = np.nanmean(gt[Step_Index:Step_Index + self.frames_num])
gt = gt.astype('float32')
# 读取图片序列
feature_map = cv2.imread(os.path.join(STMap_Path, STMap_name))
feature_map = feature_map[:, Step_Index:Step_Index + self.frames_num, :]
for c in range(feature_map.shape[2]):
for r in range(feature_map.shape[0]):
feature_map[r, :, c] = 255 * ((feature_map[r, :, c] - np.min(feature_map[r, :, c])) / (0.00001 +
np.max(feature_map[r, :, c]) - np.min(feature_map[r, :, c])))
feature_map = Image.fromarray(feature_map)
if self.transform:
feature_map = self.transform(feature_map)
# 归一化
return (feature_map, gt)
def check_integrity(self):
if not os.path.exists(self.root_dir):
return False
else:
return True
def CrossValidation(root_dir, fold_num=5,fold_index=0):
datalist = os.listdir(root_dir)
# datalist.sort(key=lambda x: int(x))
num = len(datalist)
test_num = round(((num/fold_num) - 2))
train_num = num - test_num
test_index = datalist[fold_index*test_num:fold_index*test_num + test_num-1]
train_index = datalist[0:fold_index*test_num] + datalist[fold_index*test_num + test_num:]
return test_index, train_index
def getIndex(root_path, filesList, save_path, Pic_path, Step, frames_num):
Index_path = []
if not os.path.exists(save_path):
os.makedirs(save_path)
for sub_file in filesList:
now = os.path.join(root_path, sub_file)
img_path = os.path.join(now, os.path.join('STMap', Pic_path))
temp = cv2.imread(img_path)
Num = temp.shape[1]
Res = Num - frames_num - 1 # 可能是Diff数据
Step_num = int(Res/Step)
for i in range(Step_num):
Step_Index = i*Step
temp_path = sub_file + '_' + str(1000 + i) + '_.mat'
scio.savemat(os.path.join(save_path, temp_path), {'Path': now, 'Step_Index': Step_Index})
Index_path.append(temp_path)
return Index_path
| [
"noreply@github.com"
] | noreply@github.com |
da5f875d6ad92fb09e0281b4cd2eaf5ec54ecfc4 | 1974b3e9c5f2f677833e1608a41281f377fd331c | /dltesthttp_xuyalin2/www/testcase/webservice/ts_ws_orders/getOrderLog.py | f1dc05383ce69ec78eba00680612de440006ef31 | [] | no_license | xyl00755/pythonLearning | ed0f540b61247c3560f347853da5886b2e2ba25d | c6aecff86ff34dcd7358d98201627ff84e9bf2cf | refs/heads/master | 2021-01-13T08:19:25.171016 | 2016-12-16T05:43:10 | 2016-12-16T05:43:10 | 71,764,553 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,736 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
0255.获取订单跟踪信息
http://127.0.0.1:8280/mallws/orders/getOrderLog.json
{
"token": "57469529686440a88fedb0bed51ba5d0", // 必须 token
"orderNo":"123123123" // 必须 订单号
}
{
"code": 200,
"description": "执行成功!",
"model": {
"success": "0", // 成功 0-成功 1-失败
"orderLogList": [
{
"beforeStatus": "xx", // 订单之前的状态
"dealDescrip": "xx", // 订单操作说明
"nowStatus": "xx", // 订单当前状态
"dealDate": "xx" // 操作时间
}
]
},
"metadata": {
"type": 0,
"clazz": "cn.com.hd.mall.web.webservices.entity.response.order.OrderLogResponse"
}
}
参数校验:
只做必须验证
code说明:
100-token失效 200-成功 300-错误的角色(无权限) 400-非法的参数 500-服务器异常 600-重新登陆
"""
import unittest
from www.api.webservice import *
from www.common.excel import wsData
from www.operation.order import createOrder
class getOrderLog(unittest.TestCase):
UserShop = wsData('TmlShop')
UserShopMin = wsData('TmlShopMin')
DealMgr = wsData('DealMager')
DealMgr2 = wsData('DealMager2')
DealSaler = wsData('DealSaler')
DealBuyer = wsData('DealBuyer')
Merch1 = wsData('Merch1')
wsUserShop = webservice()
wsUserShop.login(UserShop.username, UserShop.password)
wsDealMgr = webservice()
wsDealMgr.login(DealMgr.username, DealMgr.password)
wsDealMgr2 = webservice()
wsDealMgr2.login(DealMgr2.username, DealMgr2.password)
wsDealSaler = webservice()
wsDealSaler.login(DealSaler.username, DealSaler.password)
wsDealBuyer = webservice()
wsDealBuyer.login(DealBuyer.username, DealBuyer.password)
# S1.货到付款提交订单获取订单跟踪消息
def test_getOrderLog_createOrder(self):
orderLog = self.wsUserShop.getOrderLog(self.UserShop.orderCodWaitDeliver.orderNo)
self.assertEqual(orderLog['model']['success'], '0')
self.assertEqual(orderLog['model']['orderLogList'][0]['beforeStatus'], '')
self.assertIsNotNone(orderLog['model']['orderLogList'][0]['dealDate'])
self.assertEqual(orderLog['model']['orderLogList'][0]['dealDescrip'], u'提交订单')
self.assertEqual(orderLog['model']['orderLogList'][0]['nowStatus'], 'C020')
# S2.货到付款取消订单获取订单跟踪消息
def test_getOrderLog_cancelOrder(self):
orderLog = self.wsUserShop.getOrderLog(self.UserShop.orderCodCancel.orderNo)
self.assertEqual(orderLog['model']['success'], '0')
flag = 0
for i in range(0,len(orderLog['model']['orderLogList'])):
if orderLog['model']['orderLogList'][i]['beforeStatus'] == 'C020':
self.assertIsNotNone(orderLog['model']['orderLogList'][i]['dealDate'])
self.assertEqual(orderLog['model']['orderLogList'][i]['dealDescrip'], u'交易已取消')
self.assertEqual(orderLog['model']['orderLogList'][i]['nowStatus'], 'C012')
flag += 1
self.assertEqual(flag, 1, 'cancel order log is not found or is found twice')
# S3.货到付款订单发货获取订单跟踪消息
def test_getOrderLog_deliverOrder(self):
orderLog = self.wsUserShop.getOrderLog(self.UserShop.orderCodWaitReceive.orderNo)
self.assertEqual(orderLog['model']['success'], '0')
flag = 0
for i in range(0,len(orderLog['model']['orderLogList'])):
if orderLog['model']['orderLogList'][i]['beforeStatus'] == 'C020':
self.assertIsNotNone(orderLog['model']['orderLogList'][i]['dealDate'])
self.assertEqual(orderLog['model']['orderLogList'][i]['dealDescrip'], u'卖家发货')
self.assertEqual(orderLog['model']['orderLogList'][i]['nowStatus'], 'C017')
flag += 1
self.assertEqual(flag, 1, 'cancel order log is not found or is found twice')
# S4.货到付款订单交易完成订单跟踪消息
def test_getOrderLog_codComplete(self):
orderLog = self.wsUserShop.getOrderLog(self.UserShop.orderCodComplete.orderNo)
self.assertEqual(orderLog['model']['success'], '0')
flag = 0
for i in range(0,len(orderLog['model']['orderLogList'])):
if orderLog['model']['orderLogList'][i]['beforeStatus'] == 'C017':
self.assertIsNotNone(orderLog['model']['orderLogList'][i]['dealDate'])
self.assertEqual(orderLog['model']['orderLogList'][i]['dealDescrip'], u'交易完成')
self.assertEqual(orderLog['model']['orderLogList'][i]['nowStatus'], 'C019')
flag += 1
self.assertEqual(flag, 1, 'cancel order log is not found or is found twice')
# S5.订单改价获取订单跟踪消息——暂时不会记录订单跟踪
def test_getOrderLog_changPrice(self):
order = createOrder(self.UserShop, self.Merch1)
ws = webservice()
ws.login(self.DealMgr.username, self.DealMgr.password)
ws.changeOrderPrice(orderNo=order.orderNo, orderDiscountAmount='100', orderChangeAmount='11900', orderStatus='C020')
ws.deliver(orderNo=order.orderNo)
orderLog = order.ws.getOrderLog(order.orderNo)
self.assertEqual(orderLog['model']['success'], '0')
flag = 0
for i in range(0,len(orderLog['model']['orderLogList'])):
if orderLog['model']['orderLogList'][i]['beforeStatus'] == 'C020':
self.assertIsNotNone(orderLog['model']['orderLogList'][i]['dealDate'])
self.assertEqual(orderLog['model']['orderLogList'][i]['dealDescrip'], u'卖家发货')
self.assertEqual(orderLog['model']['orderLogList'][i]['nowStatus'], 'C017')
flag += 1
self.assertEqual(flag, 1, 'cancel order log is not found or is found twice')
# S6.待收货订单取消后拒绝取消、同意取消订单跟踪
def test_getOrderLog_cancelAudit(self):
order = createOrder(self.UserShop, self.Merch1)
ws = webservice()
ws.login(self.DealMgr.username, self.DealMgr.password)
ws.deliver(orderNo=order.orderNo)
order.ws.cancel(paymentNo=order.paymentNo, cancelType='3')
ws.auditCancel(paymentNo=order.paymentNo, orderNo=order.orderNo, auditStatus='1')
order.ws.cancel(paymentNo=order.paymentNo, cancelType='3')
ws.auditCancel(paymentNo=order.paymentNo, orderNo=order.orderNo, auditStatus='0')
orderLog = order.ws.getOrderLog(order.orderNo)
self.assertEqual(orderLog['model']['success'], '0')
flagCancel = 0
flagReject = 0
flagAgree = 0
for i in range(0,len(orderLog['model']['orderLogList'])):
if orderLog['model']['orderLogList'][i]['dealDescrip'] == u'交易取消中':
self.assertEqual(orderLog['model']['orderLogList'][i]['beforeStatus'], 'C017')
self.assertIsNotNone(orderLog['model']['orderLogList'][i]['dealDate'])
self.assertEqual(orderLog['model']['orderLogList'][i]['nowStatus'], 'C017')
flagCancel += 1
continue
if orderLog['model']['orderLogList'][i]['dealDescrip'] == u'卖家拒绝取消':
self.assertEqual(orderLog['model']['orderLogList'][i]['beforeStatus'], 'C017')
self.assertIsNotNone(orderLog['model']['orderLogList'][i]['dealDate'])
self.assertEqual(orderLog['model']['orderLogList'][i]['nowStatus'], 'C017')
flagReject += 1
continue
if orderLog['model']['orderLogList'][i]['dealDescrip'] == u'交易已取消':
self.assertEqual(orderLog['model']['orderLogList'][i]['beforeStatus'], 'C017')
self.assertIsNotNone(orderLog['model']['orderLogList'][i]['dealDate'])
self.assertEqual(orderLog['model']['orderLogList'][i]['nowStatus'], 'C012')
flagAgree += 1
continue
self.assertEqual(flagCancel, 2, order.orderNo + 'cancel time is wrong!')
self.assertEqual(flagReject, 1, order.orderNo + 'cancel reject time is wrong!')
self.assertEqual(flagAgree, 1, order.orderNo + 'cancel agree time is wrong!')
# S7.在线支付提交订单获取订单跟踪
def test_getOrderLog_createOrderOnline(self):
orderLog = self.wsUserShop.getOrderLog(self.UserShop.orderOnlineWaitPay.orderNo)
self.assertEqual(orderLog['model']['success'], '0')
self.assertEqual(orderLog['model']['orderLogList'][0]['beforeStatus'], '')
self.assertIsNotNone(orderLog['model']['orderLogList'][0]['dealDate'])
self.assertEqual(orderLog['model']['orderLogList'][0]['dealDescrip'], u'提交订单')
self.assertEqual(orderLog['model']['orderLogList'][0]['nowStatus'], 'C011')
# S8.在线支付取消订单订单获取订单跟踪
def test_getOrderLog_cancelOrderOnline(self):
orderLog = self.wsUserShop.getOrderLog(self.UserShop.orderOnlienCancel.orderNo)
flag = 0
for i in range(0,len(orderLog['model']['orderLogList'])):
if orderLog['model']['orderLogList'][i]['beforeStatus'] == 'C011':
self.assertIsNotNone(orderLog['model']['orderLogList'][i]['dealDate'])
#self.assertLess(orderLog['model']['orderLogList'][i]['dealDate'], datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S'))
self.assertEqual(orderLog['model']['orderLogList'][i]['dealDescrip'], u'交易已取消')
self.assertEqual(orderLog['model']['orderLogList'][i]['nowStatus'], 'C012')
flag += 1
self.assertEqual(flag, 1, self.UserShop.orderOnlienCancel.orderNo + 'cancel order log is not found or is found twice')
# S9.在线支付付款获取订单跟踪
# S10.在线支付发货获取订单跟踪
# S11.在线支付确认收货获取订单跟踪
# S12.经销商管理员获取订单跟踪
def test_getOrderLog_dealMager(self):
orderLog = self.wsDealMgr.getOrderLog(self.UserShop.orderCodWaitReceive.orderNo)
self.assertEqual(orderLog['model']['success'], '0')
flag = 0
for i in range(0,len(orderLog['model']['orderLogList'])):
if orderLog['model']['orderLogList'][i]['beforeStatus'] == 'C020':
self.assertIsNotNone(orderLog['model']['orderLogList'][i]['dealDate'])
self.assertEqual(orderLog['model']['orderLogList'][i]['dealDescrip'], u'卖家发货')
self.assertEqual(orderLog['model']['orderLogList'][i]['nowStatus'], 'C017')
flag += 1
self.assertEqual(flag, 1, 'cancel order log is not found or is found twice')
# S13.经销商销售员获取订单跟踪
def test_getOrderLog_dealSaler(self):
orderLog = self.wsDealSaler.getOrderLog(self.UserShop.orderCodWaitReceive.orderNo)
self.assertEqual(orderLog['model']['success'], '0')
flag = 0
for i in range(0,len(orderLog['model']['orderLogList'])):
if orderLog['model']['orderLogList'][i]['beforeStatus'] == 'C020':
self.assertIsNotNone(orderLog['model']['orderLogList'][i]['dealDate'])
self.assertEqual(orderLog['model']['orderLogList'][i]['dealDescrip'], u'卖家发货')
self.assertEqual(orderLog['model']['orderLogList'][i]['nowStatus'], 'C017')
flag += 1
self.assertEqual(flag, 1, 'cancel order log is not found or is found twice')
# S14.经销商采购员员获取订单跟踪——未校验权限
def test_getOrderLog_dealBuyer(self):
orderLog = self.wsDealBuyer.getOrderLog(self.UserShop.orderCodWaitReceive.orderNo)
self.assertEqual(orderLog['model']['success'], '0')
flag = 0
for i in range(0,len(orderLog['model']['orderLogList'])):
if orderLog['model']['orderLogList'][i]['beforeStatus'] == 'C020':
self.assertIsNotNone(orderLog['model']['orderLogList'][i]['dealDate'])
self.assertEqual(orderLog['model']['orderLogList'][i]['dealDescrip'], u'卖家发货')
self.assertEqual(orderLog['model']['orderLogList'][i]['nowStatus'], 'C017')
flag += 1
self.assertEqual(flag, 1, 'cancel order log is not found or is found twice')
# S15.获取其他用户订单日志——未校验,当前暂不修改~
def test_getOrderLog_dealOther(self):
orderLog = self.wsDealMgr2.getOrderLog(self.UserShop.orderCodWaitReceive.orderNo)
self.assertEqual(orderLog['model']['success'], '0')
flag = 0
for i in range(0,len(orderLog['model']['orderLogList'])):
if orderLog['model']['orderLogList'][i]['beforeStatus'] == 'C020':
self.assertIsNotNone(orderLog['model']['orderLogList'][i]['dealDate'])
self.assertEqual(orderLog['model']['orderLogList'][i]['dealDescrip'], u'卖家发货')
self.assertEqual(orderLog['model']['orderLogList'][i]['nowStatus'], 'C017')
flag += 1
self.assertEqual(flag, 1, 'cancel order log is not found or is found twice')
# S16.订单号为空获取订单日志
def test_getOrderLog_orderNoNull(self):
orderLog = self.wsUserShop.getOrderLog('')
self.assertIsNone(orderLog['model']['success'])
self.assertIsNone(orderLog['model']['orderLogList'])
# S17.token为空获取订单日志
def test_getOrderLog_tokenNull(self):
ws = webservice()
orderLog = ws.getOrderLog(self.UserShop.orderCodWaitReceive.orderNo)
self.assertEqual(orderLog['code'], 600)
def suite():
suite = unittest.TestSuite()
suite.addTest(getOrderLog("test_getOrderLog_createOrder"))
suite.addTest(getOrderLog("test_getOrderLog_cancelOrder"))
suite.addTest(getOrderLog("test_getOrderLog_deliverOrder"))
suite.addTest(getOrderLog("test_getOrderLog_codComplete"))
#suite.addTest(getOrderLog("test_getOrderLog_changPrice"))
suite.addTest(getOrderLog("test_getOrderLog_cancelAudit"))
suite.addTest(getOrderLog("test_getOrderLog_createOrderOnline"))
suite.addTest(getOrderLog("test_getOrderLog_cancelOrderOnline"))
suite.addTest(getOrderLog("test_getOrderLog_dealMager"))
suite.addTest(getOrderLog("test_getOrderLog_dealSaler"))
suite.addTest(getOrderLog("test_getOrderLog_dealBuyer"))
#suite.addTest(getOrderLog("test_getOrderLog_dealOther"))
suite.addTest(getOrderLog("test_getOrderLog_orderNoNull"))
suite.addTest(getOrderLog("test_getOrderLog_tokenNull"))
return suite | [
"xuyalin@danlu.com"
] | xuyalin@danlu.com |
98a3c195c87915e43e11852d4486b660239d5853 | 95a8145495409d3a82237476367e618221056ad0 | /17_avg.py | 509eed1fe2a2a1bdd8892a7ea0c08f0f2e79d544 | [] | no_license | mal1kofff/python | 6b31a9d6c37a4e4e51aa825d893994ac8e88a23b | 703f2917563e9b29574ace6d6a6cdc94fcec7d3c | refs/heads/main | 2023-06-07T00:26:06.080508 | 2021-07-01T13:10:40 | 2021-07-01T13:10:40 | 380,209,906 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 493 | py | """Напишите программу, которая считывает с клавиатуры два числа a и b, считает и
выводит на консоль среднее арифметическое всех чисел из отрезка [a; b][a;b], которые кратны числу 3."""
a = int(input())
b = int(input())
summa = 0
count = 0
for i in range(a,b+1):
if i % 3 == 0:
summa += i
count +=1
else: continue
print (summa/count)
| [
"maratmalikov506@gmail.com"
] | maratmalikov506@gmail.com |
cdc0cefbdd696f4dc3322a0f5eef9cf4ed2e1010 | cb61d79d96d99849e9f1d4c46f32111c6ec96212 | /backend/app/app/api/api_v1/endpoints/queue.py | 310a4bf6c743d6bd4f0db15d986ce962c95ca9b0 | [] | no_license | yeongjonglim/QueueDown | b45983864872313d96bf203dd03c861abd2ae4a3 | 5120c49d36bb4b25aa756cf56d723e139e6a9d4e | refs/heads/master | 2023-01-30T05:29:55.211702 | 2020-12-19T14:12:00 | 2020-12-19T14:12:00 | 322,863,198 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,784 | py | from typing import Any, List, Union
from fastapi import APIRouter, Depends, HTTPException
from sqlalchemy.orm import Session
from app import crud, models, schemas
from app.api import deps
router = APIRouter()
@router.get("/", response_model=List[schemas.Queue])
def read_today_queue(
*,
db: Session = Depends(deps.get_db),
skip: int = 0,
limit: int = 100,
) -> Any:
"""
Retrieve queue
"""
qs = crud.queue.get_by_date(db=db, skip=skip, limit=limit)
return qs
@router.get("/all", response_model=List[schemas.QueueInDB])
def read_queue(
*,
db: Session = Depends(deps.get_db),
skip: int = 0,
limit: int = 100,
current_user: models.User = Depends(deps.get_current_active_user),
) -> Any:
"""
Retrieve queue
"""
qs = crud.queue.get_multi(db=db, skip=skip, limit=limit)
return qs
@router.post("/", response_model=schemas.Queue)
def create_queue(
*,
db: Session = Depends(deps.get_db),
queue_in: schemas.QueueCreate,
) -> Any:
"""
Create new queue.
"""
item = crud.queue.create(db=db, obj_in=queue_in)
return item
@router.put("/{id}", response_model=schemas.Queue)
def update_queue(
*,
db: Session = Depends(deps.get_db),
id: int,
obj_in: Union[schemas.QueueEntry, schemas.QueueExit, schemas.QueueCreate],
current_user: models.User = Depends(deps.get_current_active_user),
) -> Any:
"""
Update a queue.
"""
item = crud.queue.get(db=db, id=id)
if not item:
raise HTTPException(status_code=404, detail="Item not found")
if not crud.user.is_superuser(current_user):
raise HTTPException(status_code=400, detail="Not enough permissions")
item = crud.queue.update(db=db, db_obj=item, obj_in=obj_in)
return item
| [
"yeongjong.lim@gmail.com"
] | yeongjong.lim@gmail.com |
bdc99781f9f9f4ea21043b1516f2b060769b59d5 | 972c8a10d076b72f4322d374152e2b7d9fe80866 | /ucf_gans/cgan_ucf.py | a19e916bd8882349c961f3e177be75b4b8d6e85e | [
"MIT"
] | permissive | tanlinc/opticalFlowGAN | 635e9ccadfd9810902a9fdfa329c78cc52be2f68 | f568e531265029f2f25f223ee92e1f53c0bb52f6 | refs/heads/master | 2020-03-21T07:22:16.724179 | 2018-12-12T15:23:02 | 2018-12-12T15:23:02 | 138,276,406 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,783 | py | import os, sys
sys.path.append(os.getcwd())
import time
import numpy as np
import tensorflow as tf
import tflib as lib
import tflib.ops.linear
import tflib.ops.conv2d
import tflib.ops.batchnorm
import tflib.ops.deconv2d
import tflib.save_images
import tflib.plot
import tflib.UCFdataDesktop as UCFdata
MODE = 'wgan-gp' # Valid options are dcgan, wgan, or wgan-gp
DIM = 64 # This overfits substantially; you're probably better off with 64 # or 128?
LAMBDA = 10 # Gradient penalty lambda hyperparameter
CRITIC_ITERS = 5 # How many critic iterations per generator iteration
BATCH_SIZE = 64 # Batch size
ITERS = 50000 # How many generator iterations to train for # 200000 takes too long
OUTPUT_DIM = 3072 # Number of pixels in UCF101 (3*32*32)
CONTINUE = True # Default False, set True if restoring from checkpoint
START_ITER = 600 # Default 0, set accordingly if restoring from checkpoint (100, 200, ...)
CURRENT_PATH = "ucf/...."
restore_path = "/home/linkermann/opticalFlow/opticalFlowGAN/results/" + CURRENT_PATH + "/model.ckpt"
lib.print_model_settings(locals().copy())
if(CONTINUE):
tf.reset_default_graph()
def LeakyReLU(x, alpha=0.2):
return tf.maximum(alpha*x, x)
def ReLULayer(name, n_in, n_out, inputs):
output = lib.ops.linear.Linear(name+'.Linear', n_in, n_out, inputs)
return tf.nn.relu(output)
def LeakyReLULayer(name, n_in, n_out, inputs):
output = lib.ops.linear.Linear(name+'.Linear', n_in, n_out, inputs)
return LeakyReLU(output)
def Generator(n_samples, conditions, noise=None): # input conds additional to noise
if noise is None:
noise = tf.random_normal([n_samples, 1024]) # 32*32 = 1024
noise = tf.reshape(noise, [n_samples, 1, 32, 32])
# new conditional input: last frame
conds = tf.reshape(conditions, [n_samples, 3, 32, 32]) # conditions: (64,3072) TO conds: (64,3,32,32)
# for now just concat the inputs: noise as fourth dim of cond image
output = tf.concat([noise, conds], 1) # to: (BATCH_SIZE,4,32,32)
output = tf.reshape(output, [n_samples, 4096]) # 32x32x4 = 4096; to: (BATCH_SIZE, 4096)
output = lib.ops.linear.Linear('Generator.Input', 4096, 4*4*4*DIM, output) # 4*4*4*DIM = 64*64 = 4096
output = lib.ops.batchnorm.Batchnorm('Generator.BN1', [0], output)
output = tf.nn.relu(output)
output = tf.reshape(output, [-1, 4*DIM, 4, 4])
output = lib.ops.deconv2d.Deconv2D('Generator.2', 4*DIM, 2*DIM, 5, output)
output = lib.ops.batchnorm.Batchnorm('Generator.BN2', [0,2,3], output)
output = tf.nn.relu(output)
output = lib.ops.deconv2d.Deconv2D('Generator.3', 2*DIM, DIM, 5, output)
output = lib.ops.batchnorm.Batchnorm('Generator.BN3', [0,2,3], output)
output = tf.nn.relu(output)
output = lib.ops.deconv2d.Deconv2D('Generator.5', DIM, 3, 5, output)
output = tf.tanh(output)
return tf.reshape(output, [-1, OUTPUT_DIM])
def Discriminator(inputs, conditions): # input conds as well
inputs = tf.reshape(inputs, [-1, 3, 32, 32])
conds = tf.reshape(conditions, [-1, 3, 32, 32]) # new conditional input: last frame
# for now just concat the inputs
ins = tf.concat([inputs, conds], 1) #to: (BATCH_SIZE, 6, 32, 32)
output = lib.ops.conv2d.Conv2D('Discriminator.1', 6, DIM, 5, ins, stride=2)
output = LeakyReLU(output)
output = lib.ops.conv2d.Conv2D('Discriminator.2', DIM, 2*DIM, 5, output, stride=2) # (5,5,64,128) resource exhausted error
if MODE != 'wgan-gp':
output = lib.ops.batchnorm.Batchnorm('Discriminator.BN2', [0,2,3], output)
output = LeakyReLU(output)
output = lib.ops.conv2d.Conv2D('Discriminator.3', 2*DIM, 4*DIM, 5, output, stride=2)
if MODE != 'wgan-gp':
output = lib.ops.batchnorm.Batchnorm('Discriminator.BN3', [0,2,3], output)
output = LeakyReLU(output)
#output = lib.ops.conv2d.Conv2D('Discriminator.4', 4*DIM, 8*DIM, 5, output, stride=2)
# if MODE != 'wgan-gp':
# output = lib.ops.batchnorm.Batchnorm('Discriminator.BN4', [0,2,3], output)
# output = LeakyReLU(output)
output = tf.reshape(output, [-1, 4*4*8*DIM]) # adjusted outcome
output = lib.ops.linear.Linear('Discriminator.Output', 4*4*8*DIM, 1, output)
return tf.reshape(output, [-1])
cond_data_int = tf.placeholder(tf.int32, shape=[BATCH_SIZE, OUTPUT_DIM]) # conditional input for both G and D
cond_data = 2*((tf.cast(cond_data_int, tf.float32)/255.)-.5) #normalized [0,1]!
real_data_int = tf.placeholder(tf.int32, shape=[BATCH_SIZE, OUTPUT_DIM])
real_data = 2*((tf.cast(real_data_int, tf.float32)/255.)-.5) #normalized [0,1]!
fake_data = Generator(BATCH_SIZE, cond_data)
disc_real = Discriminator(real_data, cond_data)
disc_fake = Discriminator(fake_data, cond_data)
gen_params = lib.params_with_name('Generator')
disc_params = lib.params_with_name('Discriminator')
if MODE == 'wgan':
gen_cost = -tf.reduce_mean(disc_fake)
disc_cost = tf.reduce_mean(disc_fake) - tf.reduce_mean(disc_real)
gen_train_op = tf.train.RMSPropOptimizer(learning_rate=5e-5).minimize(gen_cost, var_list=gen_params)
disc_train_op = tf.train.RMSPropOptimizer(learning_rate=5e-5).minimize(disc_cost, var_list=disc_params)
clip_ops = []
for var in disc_params:
clip_bounds = [-.01, .01]
clip_ops.append(
tf.assign(
var,
tf.clip_by_value(var, clip_bounds[0], clip_bounds[1])
)
)
clip_disc_weights = tf.group(*clip_ops)
elif MODE == 'wgan-gp':
# Standard WGAN loss
gen_cost = -tf.reduce_mean(disc_fake)
disc_cost = tf.reduce_mean(disc_fake) - tf.reduce_mean(disc_real)
# Gradient penalty
alpha = tf.random_uniform(
shape=[BATCH_SIZE,1],
minval=0.,
maxval=1.
)
differences = fake_data - real_data
interpolates = real_data + (alpha*differences)
gradients = tf.gradients(Discriminator(interpolates, cond_data), [interpolates])[0] #added cond here
slopes = tf.sqrt(tf.reduce_sum(tf.square(gradients), reduction_indices=[1]))
gradient_penalty = tf.reduce_mean((slopes-1.)**2)
disc_cost += LAMBDA*gradient_penalty
gen_train_op = tf.train.AdamOptimizer(learning_rate=1e-4, beta1=0.5, beta2=0.9).minimize(gen_cost, var_list=gen_params)
disc_train_op = tf.train.AdamOptimizer(learning_rate=1e-4, beta1=0.5, beta2=0.9).minimize(disc_cost, var_list=disc_params)
elif MODE == 'dcgan':
gen_cost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(disc_fake, tf.ones_like(disc_fake)))
disc_cost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(disc_fake, tf.zeros_like(disc_fake)))
disc_cost += tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(disc_real, tf.ones_like(disc_real)))
disc_cost /= 2.
gen_train_op = tf.train.AdamOptimizer(learning_rate=2e-4, beta1=0.5).minimize(gen_cost,
var_list=lib.params_with_name('Generator'))
disc_train_op = tf.train.AdamOptimizer(learning_rate=2e-4, beta1=0.5).minimize(disc_cost,
var_list=lib.params_with_name('Discriminator.'))
# Dataset iterators
gen = UCFdata.load_train_gen(BATCH_SIZE, 2, 2, (32,32,3)) # batch size, seq len, #classes, im size
dev_gen = UCFdata.load_test_gen(BATCH_SIZE, 2, 2, (32,32,3))
# For generating samples: define fixed noise and conditional input
fixed_cond_samples, _ = next(gen) # shape: (batchsize, 3072)
fixed_cond_data_int = fixed_cond_samples[:,0:3072] # earlier frame as condition # shape (64,3072)
fixed_real_data_int = fixed_cond_samples[:,3072:] # next frame as comparison to result of generator # shape (64,3072)
fixed_cond_data_normalized = 2*((tf.cast(fixed_cond_data_int, tf.float32)/255.)-.5) #normalized [0,1]!
if(CONTINUE):
fixed_noise = tf.get_variable("noise", shape=[BATCH_SIZE, SQUARE_IM_DIM]) # take same noise like saved model
else:
fixed_noise = tf.Variable(tf.random_normal(shape=[BATCH_SIZE, SQUARE_IM_DIM], dtype=tf.float32), name='noise') #variable: saved
# fixed_noise = tf.constant(np.random.normal(size=(BATCH_SIZE, 1024)).astype('float32')) # for additional channel: 32*32 = 1024
fixed_noise_samples = Generator(BATCH_SIZE, fixed_cond_data_normalized, noise=fixed_noise) # Generator(n_samples,conds, noise):
def generate_image(frame, true_dist): # generates 64 (batch-size) samples next to each other in one image!
samples = session.run(fixed_noise_samples, feed_dict={real_data_int: fixed_real_data_int, cond_data_int: fixed_cond_data_int})
samples_255 = ((samples+1.)*(255./2)).astype('int32') #back to [0,255]
for i in range(0, BATCH_SIZE):
samples_255= np.insert(samples_255, i*2, fixed_cond_data_int[i],axis=0) # show last frame next to generated sample
lib.save_images.save_images(samples_255.reshape((2*BATCH_SIZE, 3, IM_DIM, IM_DIM)), 'samples_{}.jpg'.format(frame))
init_op = tf.global_variables_initializer() # op to initialize the variables.
saver = tf.train.Saver() # ops to save and restore all the variables.
# Train loop
with tf.Session() as session:
if(CONTINUE):
# Restore variables from disk.
saver.restore(session, restore_path)
print("Model restored.")
lib.plot.restore(START_ITER) # does not fully work, but makes plots start from newly started iteration
else:
session.run(init_op)
for iteration in range(START_ITER, ITERS): # START_ITER: 0 or from last checkpoint
start_time = time.time()
# Train generator
if iteration > 0:
_data, _ = next(gen) # shape: (batchsize, 6144) ##not 3072 anymore
# extract real and cond data
_cond_data = _data[:,0:3072] # earlier frame as conditional data,
_ = session.run(gen_train_op, feed_dict={cond_data_int: fixed_cond_data_int})
# Train critic
if MODE == 'dcgan':
disc_iters = 1
else:
disc_iters = CRITIC_ITERS
for i in range(disc_iters):
_data, _ = next(gen) # shape: (batchsize, 6144) ##not 3072 anymore
# extract real and cond data
_cond_data = _data[:,0:3072] # earlier frame as conditional data,
_real_data = _data[:,3072:] # last frame as real data for discriminator
_disc_cost, _ = session.run([disc_cost, disc_train_op], feed_dict={real_data_int: _real_data, cond_data_int: _cond_data})
if MODE == 'wgan':
_ = session.run(clip_disc_weights)
lib.plot.plot('train disc cost', _disc_cost)
lib.plot.plot('time', time.time() - start_time)
# Calculate dev loss and generate samples every 100 iters
if iteration % 100 == 99:
dev_disc_costs = []
_data, _ = next(gen) # shape: (batchsize, 6144) ##not 3072 anymore
# extract real and cond data
_cond_data = _data[:,0:3072] # earlier frame as conditional data,
_real_data = _data[:,3072:] # last frame as real data for discriminator
_dev_disc_cost = session.run(disc_cost, feed_dict={real_data_int: _real_data, cond_data_int: _cond_data}) # earlier frame as condition
dev_disc_costs.append(_dev_disc_cost)
lib.plot.plot('dev disc cost', np.mean(dev_disc_costs))
generate_image(iteration, _data)
# Save the variables to disk.
save_path = saver.save(session, restore_path)
print("Model saved in path: %s" % save_path)
# chkp.print_tensors_in_checkpoint_file("model.ckpt", tensor_name='', all_tensors=True)
# Save logs every 100 iters
if (iteration < 5) or (iteration % 100 == 99):
lib.plot.flush()
lib.plot.tick()
| [
"tanja.linkermann@gmx.de"
] | tanja.linkermann@gmx.de |
233ec5dea016aaed9a4e3806be808fb6146f17d6 | f878c9cb88878e6c38b9724b8a159131b66a2d15 | /Python/022.py | a31a3a4d6a1d8e9078e852c9b91efc5a68b2abd4 | [] | no_license | ronek22/ProjectEuler | bcfd2b7392585653303f1183b945f2fd95798f9d | 9f9c5a723eae7c6c44a067eb3600932d9c3241e6 | refs/heads/master | 2020-12-02T21:22:39.044656 | 2018-10-08T22:11:05 | 2018-10-08T22:11:05 | 96,303,548 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 413 | py | '''Names scores'''
def ch2int(c):
#only works for big letters
return ord(c)-64
f = open('p022_names.txt', 'r')
txt = f.read()
content = []
for name in txt.split(','):
content.append(name[1:len(name)-1]) # delete quotation marks
f.close()
i=1
total = 0
content.sort()
for name in content:
suma = 0
for c in name:
suma+=ch2int(c)
suma*=i
total+=suma
i+=1
print total
| [
"jronek3010@gmail.com"
] | jronek3010@gmail.com |
892240b89878f59a82d0472a0bc8a32e40a03383 | 8d1a638f7ea0d694558cfe238bb7bed260d2d50c | /Pypoll_Challenge.py | ecca1bc100c1e5c140a01b7124158eb4c4491f1c | [
"MIT"
] | permissive | Bettinadavis11/Election_Analysis | 1fc79d5b57c31c409dfc2bee0558f5cac76cd0a8 | 2ff3dad50ba12e3f9c718be17da14d8e319094b9 | refs/heads/main | 2023-06-23T15:59:57.946596 | 2021-07-13T21:31:47 | 2021-07-13T21:31:47 | 383,615,486 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,271 | py | # -*- coding: UTF-8 -*-
"""PyPoll Homework Challenge Solution."""
# Add our dependencies.
import csv
import os
# Add a variable to load a file from a path.
file_to_load = os.path.join("Resources", "election_results.csv")
# Add a variable to save the file to a path.
file_to_save = os.path.join("analysis", "election_analysis.txt")
# Initialize a total vote counter.
total_votes = 0
# Candidate Options and candidate votes.
candidate_options = []
candidate_votes = {}
# 1: Create a county list and county votes dictionary.
county_list = []
county_votes = {}
# Track the winning candidate, vote count and percentage
winning_candidate = ""
winning_count = 0
winning_percentage = 0
# 2: Track the largest county and county voter turnout.
largest_county_turnout_name =""
largest_county_turnout = 0
# Read the csv and convert it into a list of dictionaries
with open(file_to_load) as election_data:
reader = csv.reader(election_data)
# Read the header
header = next(reader)
# For each row in the CSV file.
for row in reader:
# Add to the total vote count
total_votes = total_votes + 1
# Get the candidate name from each row.
candidate_name = row[2]
# 3: Extract the county name from each row.
county_name = row[1]
# If the candidate does not match any existing candidate add it to
# the candidate list
if candidate_name not in candidate_options:
# Add the candidate name to the candidate list.
candidate_options.append(candidate_name)
# And begin tracking that candidate's voter count.
candidate_votes[candidate_name] = 0
# Add a vote to that candidate's count
candidate_votes[candidate_name] += 1
# 4a: Write an if statement that checks that the
# county does not match any existing county in the county list.
if county_name not in county_list:
# 4b: Add the existing county to the list of counties.
county_list.append(county_name)
# 4c: Begin tracking the county's vote count.
county_votes[county_name] = 0
# 5: Add a vote to that county's vote count.
county_votes[county_name] += 1
# Save the results to our text file.
with open(file_to_save, "w") as txt_file:
# Print the final vote count (to terminal)
election_results = (
f"\nElection Results\n"
f"-------------------------\n"
f"Total Votes: {total_votes:,}\n"
f"-------------------------\n\n"
f"County Votes:\n")
print(election_results, end="")
txt_file.write(election_results)
# 6a: Write a for loop to get the county from the county dictionary.
for county_name in county_votes:
# 6b: Retrieve the county vote count.
votes = county_votes.get(county_name)
# 6c: Calculate the percentage of votes for the county.
vote_percentage = float(votes) / float(total_votes) * 100
county_results = (
f"{county_name}: {vote_percentage:.1f}% ({votes:,})")
# 6d: Print the county results to the terminal.
print(county_results)
# 6e: Save the county votes to a text file.
txt_file.write(county_results+"\n")
# 6f: Write an if statement to determine the winning county and get its vote count.
if (votes > largest_county_turnout):
largest_county_turnout = votes
largest_county_turnout_name = county_name
# 7: Print the county with the largest turnout to the terminal.
largest_county_turnout_summary = (
f"\n"
f"-------------------------\n"
f"Largest County Turnout: {largest_county_turnout_name}\n"
f"-------------------------\n")
print(largest_county_turnout_summary)
# 8: Save the county with the largest turnout to a text file.
txt_file.write(largest_county_turnout_summary)
# Save the final candidate vote count to the text file.
for candidate_name in candidate_votes:
# Retrieve vote count and percentage
votes = candidate_votes.get(candidate_name)
vote_percentage = float(votes) / float(total_votes) * 100
candidate_results = (
f"{candidate_name}: {vote_percentage:.1f}% ({votes:,})\n")
# Print each candidate's voter count and percentage to the
# terminal.
print(candidate_results)
# Save the candidate results to our text file.
txt_file.write(candidate_results)
# Determine winning vote count, winning percentage, and candidate.
if (votes > winning_count) and (vote_percentage > winning_percentage):
winning_count = votes
winning_candidate = candidate_name
winning_percentage = vote_percentage
# Print the winning candidate (to terminal)
winning_candidate_summary = (
f"-------------------------\n"
f"Winner: {winning_candidate}\n"
f"Winning Vote Count: {winning_count:,}\n"
f"Winning Percentage: {winning_percentage:.1f}%\n"
f"-------------------------\n")
print(winning_candidate_summary)
# Save the winning candidate's name to the text file
txt_file.write(winning_candidate_summary) | [
"bettinadavis1978@gmail.com"
] | bettinadavis1978@gmail.com |
fc9a98ef9d50ff78217956a8266a6c2d94e05061 | f67aa51d4afcdb2f31b78032dc910094ec310295 | /2. Python Code/node_tag_audit.py | c7c180574df2c7cce567eab1f29a792c8d6595be | [] | no_license | johncgr/data-wrangle-OpenStreetMap-data | a7e7bc1b0979d897eda55db35678f56b545f8f64 | 90e3aaf0113c312d7caa4a5c0b5978c09a464340 | refs/heads/master | 2016-09-16T11:58:28.772831 | 2015-06-10T20:53:38 | 2015-06-10T20:53:38 | 36,739,086 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,943 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Feb 25 09:41:27 2015
@author: john
"""
import xml.etree.ElementTree as ET
import time
import re
#time of program start
start = time.time()
#error logging function
def add_error(log, key, error_msg):
if key in log:
log[key].append(error_msg)
else:
log[key] = [error_msg]
#tag audit
def tiger_audit(child, parent_element):
e_att = parent_element.attrib
counties = {'Tarrant, TX', 'Wise, TX', 'Denton, TX', 'Dallas, TX', 'Johnson, TX', 'Parker, TX'}
#produce list of name_type add as entry to summary log
if child.get('k') == "tiger:name_type":
add_error(tiger_name_type_log, e_att['id'], child.get('v'))
#could run into problems with this throwing errors when zips have the suffix
if ( child.get('k') == "tiger:zip_left"
or child.get('k') == "tiger:zip_right" ):
if len(child.get('v')) != 5:
add_error(error_log, e_att['id'], 'tiger:zip is not of correct length')
#if zip code not in list of possible zip codes
if child.get('k') not in zips:
add_error(error_log, e_att['id'], 'tiger:zip is not in list of possible zips')
#check tiger:county for possible county
#if you see errors may need to regex parse this out to get at counties
if child.get('k') == 'tiger:county':
if child.get('v') not in counties:
add_error(error_log, e_att['id'], 'tiger:county not one of possible counties')
#check that tiger:cfcc is in correct format
if child.get('k') == 'tiger:cfcc':
cfcc_pattern = re.compile(r'^[a-zA-Z]\d\d$')
if re.search(cfcc_pattern, child.get('v')) == None:
add_error(error_log, e_att['id'], 'cfcc not in correct format')
def tiger_name_crosscheck(child, tag_name):
#change this in second version to actually crosscheck the fields instead
#of creating a log
#tiger:name_base
if child.get('k') == 'tiger:name_base':
add_error(summary_log, 'tiger:name_base', child.get('v'))
#tiger name_type
if child.get('k') == 'tiger:name_type':
add_error(summary_log, 'tiger:name_type', child.get('v'))
#tiger name_direction_prefix
if child.get('k') == 'tiger:name_direction_prefix':
add_error(summary_log, 'tiger:name_direction_preix', child.get('v'))
#tiger name_direction_suffix
if child.get('k') == 'tiger:name_direction_suffix':
add_error(summary_log, 'tiger:name_direction_suffix', child.get('v'))
def tag_audit(child, parent_element):
e_att = parent_element.attrib
#scan for extraneous or missing attributes
if child.attrib.keys() != ['k', 'v']:
#show missing tags
c_set = set(child.attrib.keys())
t_set = set(['k', 'v'])
missing = t_set - c_set
if len(missing) != 0:
missing_msg = 'child <tag> is missing attribute ' + str(missing)
add_error(error_log, e_att['id'], missing_msg)
#show extraneous tags
extraneous = c_set - t_set
if len(extraneous) != 0:
extraneous_msg = 'child <tag> has extra attribute(s) ' + str(extraneous)
add_error(error_log, e_att['id'], extraneous_msg)
#addr:postcode audit
if child.get('k') == 'addr:postcode':
if child.get('v') not in zips:
add_error(error_log, e_att['id'], str(child.get('v')))
#tiger audit
if child.get('k'):
if child.get('k').startswith('tiger') == True:
tiger_audit(child, parent_element)
#extract tag k:name value, if present
if child.get('k') == 'name':
tag_name = child.get('v')
tiger_name_crosscheck(child, tag_name)
#bounds check maxspeed (should only be in <ways>)
#also check for unit of mph
try:
if child.get('k') == 'maxspeed':
speed_pattern = re.compile(r'(\A\d\d)')
mph_pattern = re.compile(r'mph')
speed = re.match(speed_pattern, child.get('v'))
if speed:
speed = float(speed.group())
if speed > 85:
add_error(error_log, e_att['id'], 'listed maxspeed is greater than 85 m.p.h')
if re.search(mph_pattern, child.get('v')) == None:
print(child.get('v'))
add_error(error_log, e_att['id'],
'maxspeed not in mph or is missing unit designation ')
except KeyError:
pass
return None
############Main Program###########
error_log = {}
node_ids = []
summary_log = {}
tiger_name_type_log = {}
minlat = 32.548
maxlat = 32.996
minlon = -97.5497
maxlon = -97.0319
zips = ['75052','75051', '76034', '76103','76248', '76262', '76001', '76002', '76003', '76004', '76005', '76006', '76007', '76010', '76011', '76012', '76013', '76014', '76015', '76016', '76017', '76018', '76019', '76094', '76096', '76020', '76197', '76198', '76021', '76022', '76095', '76109', '76116', '76126', '76132', '76131', '76191', '76166', '76177', '76034', '76195', '76036', '76016', '76039', '76040', '76140', '76193', '76119', '76140', '76101', '76102', '76103', '76104', '76105', '76106', '76107', '76108', '76109', '76110', '76111', '76112', '76113', '76114', '76115', '76116', '76117', '76118', '76119', '76120', '76121', '76122', '76123', '76124', '76126', '76127', '76129', '76130', '76131', '76132', '76133', '76134', '76135', '76136', '76137', '76140', '76147', '76148', '76150', '76155', '76161', '76162', '76163', '76164', '76166', '76177', '76179', '76180', '76181', '76182', '76185', '76191', '76192', '76193', '76195', '76196', '76197', '76198', '76199', '76244', '76051', '76092', '76099', '76111', '76117', '76137', '76148', '76180', '76052', '76053', '76054', '76244', '76248', '76060', '76192', '76135', '76136', '76108', '76135', '76063', '76127', '76127', '76118', '76180', '76182', '76118', '76180', '76182', '76180', '76114', '76013', '76015', '76020', '76118', '76180', '76118', '76180', '76114', '76131', '76179', '76114', '76092', '76115', '76122', '76196', '76129', '76130', '76019', '76019', '76137', '76148', '76107', '76114', '76108']
#path of file to be parsed
filein = r'/home/john/project/tarrant_county.osm'
for event, el in ET.iterparse(filein):
if el.tag == 'node':
for child in el.findall('./*'):
tag_audit(child, el)
print(time.time() - start)
print(error_log)
#print(error_log)
with open(r'/home/john/project/logs/node_tag_audit_error_log.txt', 'w') as fileout:
fileout.write(str(error_log))
with open(r'/home/john/project/logs/node_tag_audit_tiger_name_type_log.txt', 'w') as fileout:
fileout.write(str(tiger_name_type_log))
with open(r'/home/john/project/logs/node_tag_audit_summary_log.txt', 'w') as fileout:
fileout.write(str(error_log)) | [
"you@example.com"
] | you@example.com |
2ece6df10ac7d93cf7b3f26b7e9a110fe96f2098 | ad02044b8e3923b8138d5b35c5f168693b018844 | /pygl/action.py | 0b74718862f531ebf3960c0ef0eb4fb899b1a8c4 | [] | no_license | darkodraskovic/pygl | 3d804690873b77d742f49ad98359843cc041637f | f3ad0cee90d48f7157dc854e84c1abba628e3239 | refs/heads/master | 2022-12-26T14:54:41.150930 | 2020-05-03T08:35:19 | 2020-10-06T19:35:00 | 258,969,238 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,465 | py | import pygame
key_event_types = [pygame.KEYDOWN, pygame.KEYUP]
mbutton_event_types = [pygame.MOUSEBUTTONUP, pygame.MOUSEBUTTONDOWN]
ACTIONDOWN = pygame.USEREVENT
ACTIONUP = pygame.USEREVENT + 1
key_actions = {}
mbutton_actions = {}
pressed = {}
def bind_key(key, action):
key_actions[key] = action
pressed[action] = False
def bind_mbutton(button, action):
mbutton_actions[button] = action
pressed[action] = False
def is_pressed(action):
return pressed[action]
def is_pressed_any():
return True in pressed.values()
def __handle_action(action, is_pressed):
pressed[action] = is_pressed
event = pygame.event.Event(ACTIONDOWN if is_pressed else ACTIONUP,
{'action': action})
pygame.event.post(event)
def handle_keys():
for event in pygame.event.get(key_event_types):
if event.key in key_actions:
action = key_actions[event.key]
if event.type == pygame.KEYDOWN:
__handle_action(action, True)
elif event.type == pygame.KEYUP:
__handle_action(action, False)
for event in pygame.event.get(mbutton_event_types):
if event.button in mbutton_actions:
action = mbutton_actions[event.button]
if event.type == pygame.MOUSEBUTTONDOWN:
__handle_action(action, True)
elif event.type == pygame.MOUSEBUTTONUP:
__handle_action(action, False)
| [
"darko.draskovic@gmail.com"
] | darko.draskovic@gmail.com |
90f6b044e0738dd4144dea41df919f7fe76752a2 | 167c6226bc77c5daaedab007dfdad4377f588ef4 | /python/ql/test/2/library-tests/PointsTo/import_time/module.py | 0e14ce6e5d765b8d724c6890d6495ef311dde746 | [
"MIT",
"LicenseRef-scancode-python-cwi",
"LicenseRef-scancode-other-copyleft",
"GPL-1.0-or-later",
"LicenseRef-scancode-free-unknown",
"Python-2.0"
] | permissive | github/codeql | 1eebb449a34f774db9e881b52cb8f7a1b1a53612 | d109637e2d7ab3b819812eb960c05cb31d9d2168 | refs/heads/main | 2023-08-20T11:32:39.162059 | 2023-08-18T14:33:32 | 2023-08-18T14:33:32 | 143,040,428 | 5,987 | 1,363 | MIT | 2023-09-14T19:36:50 | 2018-07-31T16:35:51 | CodeQL | UTF-8 | Python | false | false | 152 | py |
import sys
os_test = sys.platform == "linux2"
version_test = sys.version_info < (3,)
if version_test:
version_2 = True
else:
version_3 = False | [
"mark@hotpy.org"
] | mark@hotpy.org |
e8813cd668f7ed59984bd897bab0933c4ba2a92a | 8a36ddf6a9f2f6c00ff7d3db72fe7a6f88ead7a2 | /weather/weather.py | f53c89e3bc9040f4b89115a55e4788b9c56e3dde | [] | no_license | pccode21/PyQt5 | 5d5b79f55d6165d03d58768bf30f25382ac7812b | f0af930b1338d0472aacbd3cab65be009bddd96e | refs/heads/master | 2020-12-03T11:07:44.226390 | 2020-02-19T05:29:09 | 2020-02-19T05:29:09 | 231,293,179 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,490 | py | from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from MainWindow import Ui_MainWindow
from datetime import datetime
import json
import os
import sys
import requests
from urllib.parse import urlencode
# OPENWEATHERMAP_API_KEY = os.environ.get('b020112734ca76c7df0ccad361a58fa3')
"""
从https://openweathermap.org/获取API密钥以与此结合使用
应用.
"""
def from_ts_to_time_of_day(ts):
dt = datetime.fromtimestamp(ts)
return dt.strftime("%I%p").lstrip("0")
class WorkerSignals(QObject):
'''
定义正在运行的工作线程可用的信号.
'''
finished = pyqtSignal()
error = pyqtSignal(str)
result = pyqtSignal(dict, dict)
class WeatherWorker(QRunnable):
'''
工作线程天气更新.
'''
signals = WorkerSignals()
is_interrupted = False
def __init__(self, location):
super(WeatherWorker, self).__init__()
self.location = location
@pyqtSlot()
def run(self):
try:
params = dict(
q=self.location,
appid='b020112734ca76c7df0ccad361a58fa3'
)
url = 'http://api.openweathermap.org/data/2.5/weather?%s&units=metric' % urlencode(params)
r = requests.get(url)
weather = json.loads(r.text)
# 检查我们是否失败(预测将以同样的方式失败).
if weather['cod'] != 200:
raise Exception(weather['message'])
url = 'http://api.openweathermap.org/data/2.5/forecast?%s&units=metric' % urlencode(params)
r = requests.get(url)
forecast = json.loads(r.text)
self.signals.result.emit(weather, forecast)
except Exception as e:
self.signals.error.emit(str(e))
self.signals.finished.emit()
class MainWindow(QMainWindow, Ui_MainWindow):
def __init__(self, *args, **kwargs):
super(MainWindow, self).__init__(*args, **kwargs)
self.setupUi(self)
self.pushButton.pressed.connect(self.update_weather)
self.threadpool = QThreadPool() # 创建线程池类,以处理运行工作程序
self.show()
def alert(self, message):
alert = QMessageBox.warning(self, "Warning", message)
def update_weather(self):
worker = WeatherWorker(self.lineEdit.text())
worker.signals.result.connect(self.weather_result)
worker.signals.error.connect(self.alert)
self.threadpool.start(worker)
def weather_result(self, weather, forecasts):
self.latitudeLabel.setText("%.2f °" % weather['coord']['lat'])
self.longitudeLabel.setText("%.2f °" % weather['coord']['lon'])
self.windLabel.setText("%.2f m/s" % weather['wind']['speed'])
self.temperatureLabel.setText("%.1f °C" % weather['main']['temp'])
self.pressureLabel.setText("%d" % weather['main']['pressure'])
self.humidityLabel.setText("%d" % weather['main']['humidity'])
self.sunriseLabel.setText(from_ts_to_time_of_day(weather['sys']['sunrise']))
# 使用自定义from_ts_to_time_of_day函数处理时间戳,以am / pm格式返回用户友好的一天中的时间,且不带前导零。
self.weatherLabel.setText("%s (%s)" % (
weather['weather'][0]['main'],
weather['weather'][0]['description']
)
)
self.set_weather_icon(self.weatherIcon, weather['weather'])
for n, forecast in enumerate(forecasts['list'][:5], 1):
getattr(self, 'forecastTime%d' % n).setText(from_ts_to_time_of_day(forecast['dt']))
self.set_weather_icon(getattr(self, 'forecastIcon%d' % n), forecast['weather'])
getattr(self, 'forecastTemp%d' % n).setText("%.1f °C" % forecast['main']['temp'])
# 从weatherdict 设置当前的天气图标,然后遍历所提供的前5个天气预报。预报图标,时间和温度标签在Qt Designer中使用forecastIcon<n>,forecastTime<n>和定义 forecastTemp<n>,可以轻松地依次迭代它们并使用getattr当前迭代索引检索它们。
def set_weather_icon(self, label, weather):
label.setPixmap(
QPixmap(os.path.join('./PyQt5/weather/images', "%s.png" %
weather[0]['icon']
)
)
)
if __name__ == '__main__':
app = QApplication([])
window = MainWindow()
app.exec_()
| [
"16007005@qq.com"
] | 16007005@qq.com |
3ce222a6e33b8b2660952cbd65794fc78e3e8ad2 | 2be3bb6c9357fac21fa55aa5dd5a8495c73230c2 | /URI/1175.py | bf0eebeec4b598ff8d392401b090aaba32d8c35e | [] | no_license | GabrielRCAmaral/Brincando-de-Python | c1c24dc681eda608d7d4d8e88d7cefc12e323d6c | 32d05b639644bf19e98e51d551b081b7ee7853bc | refs/heads/master | 2020-07-09T20:54:06.424440 | 2020-01-30T01:18:47 | 2020-01-30T01:18:47 | 204,081,497 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 171 | py | # -*- coding: utf-8 -*-
lista=[]
for i in range(20):
lista.append(float(input()))
lista.reverse()
for i,item in enumerate(lista):
print 'N[%i] = %i'%(i,item)
| [
"gabriel.r.c.amaral@gmail.com"
] | gabriel.r.c.amaral@gmail.com |
7c76835603d90ac7c8e51e9c8be02a23b28636b1 | a5dd6bcb59130979624c0274a91bb1566421dbc4 | /thor/config.py | f0faee12c5842bbacca47f5949d4fa2242d68ec3 | [
"BSD-3-Clause"
] | permissive | mjuric/thor | 62563455526eaec09c96341ac239a5985824f24b | 4e2403bf9c08e998ccd7a277583b0e550b9d3a67 | refs/heads/main | 2023-04-21T02:22:17.359744 | 2021-05-19T20:12:56 | 2021-05-19T20:12:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,027 | py | import numpy as np
__all__ = ["Config"]
class Config:
"""
Config: Holds configuration settings.
Of interest to the user are two main attributes:
columnMapping : This dictionary should define the data
column names of the user's data relative to the
internally used names.
oorbDirectory : Oorb install location should be defined
here.
Parameters
----------
None
Returns
-------
None
"""
MIN_OBS = 5
MIN_ARC_LENGTH = 1.0
CONTAMINATION_PERCENTAGE = 20
BACKEND = "PYOORB"
BACKEND_KWARGS = {}
NUM_THREADS = 60
USE_RAY = False
USE_GPU = False
RANGE_SHIFT_CONFIG = {
"cell_area" : 1000,
"threads" : NUM_THREADS,
"backend" : BACKEND,
"backend_kwargs" : BACKEND_KWARGS,
}
CLUSTER_LINK_CONFIG = {
"vx_range" : [-0.1, 0.1],
"vy_range" : [-0.1, 0.1],
"vx_bins" : 300,
"vy_bins" : 300,
"vx_values" : None,
"vy_values" : None,
"eps" : 5/3600,
"min_samples" : MIN_OBS,
"min_arc_length" : MIN_ARC_LENGTH,
"threads" : NUM_THREADS,
}
IOD_CONFIG = {
"min_obs" : MIN_OBS,
"min_arc_length" : MIN_ARC_LENGTH,
"contamination_percentage" : CONTAMINATION_PERCENTAGE,
"rchi2_threshold" : 1000,
"observation_selection_method" : "combinations",
"iterate" : False,
"light_time" : True,
"linkage_id_col" : "cluster_id",
"identify_subsets" : True,
"threads" : NUM_THREADS,
"backend" : BACKEND,
"backend_kwargs" : BACKEND_KWARGS,
}
OD_CONFIG = {
"min_obs" : MIN_OBS,
"min_arc_length" : MIN_ARC_LENGTH,
"contamination_percentage" : CONTAMINATION_PERCENTAGE,
"rchi2_threshold" : 10,
"delta" : 1e-6,
"max_iter" : 5,
"method" : "central",
"fit_epoch" : False,
"test_orbit" : None,
"threads" : NUM_THREADS,
"backend" : BACKEND,
"backend_kwargs" : BACKEND_KWARGS,
}
ODP_CONFIG = {
"min_obs" : MIN_OBS,
"min_arc_length" : MIN_ARC_LENGTH,
"contamination_percentage" : 0.0,
"rchi2_threshold" : 5,
"eps" : 1/3600,
"delta" : 1e-8,
"max_iter" : 5,
"method" : "central",
"fit_epoch" : False,
"orbits_chunk_size" : 1,
"observations_chunk_size" : 100000,
"threads" : NUM_THREADS,
"backend" : BACKEND,
"backend_kwargs" : BACKEND_KWARGS,
}
ADES_METADATA = {
"observatory_code" : "I11",
"observatory_name" : "Vera C. Rubin Observatory",
"telescope_aperture" : "8.4",
"telescope_design" : "Reflector",
"telescope_detector" : "CCD",
"submitter" : "D. iRAC",
"observers" : ["D. iRAC"],
"measurers" : ["D. iRAC"],
}
COLUMN_MAPPING = {
### Observation Parameters
# Observation ID
"obs_id" : "obsId",
# Exposure time
"exp_mjd" : "exp_mjd",
# Visit ID
"visit_id" : "visitId",
# Field ID
"field_id" : "fieldId",
# Field RA in degrees
"field_RA_deg" : "fieldRA_deg",
# Field Dec in degrees
"field_Dec_deg" : "fieldDec_deg",
# Night number
"night": "night",
# RA in degrees
"RA_deg" : "RA_deg",
# Dec in degrees
"Dec_deg" : "Dec_deg",
# Observatory code
"observatory_code" : "code",
# Observer's x coordinate in AU
"obs_x_au" : "HEclObsy_X_au",
# Observer's y coordinate in AU
"obs_y_au" : "HEclObsy_Y_au",
# Observer's z coordinate in AU
"obs_z_au" : "HEclObsy_Z_au",
# Magnitude (UNUSED)
"mag" : "VMag",
### Truth Parameters
# Object name
"name" : "designation",
# Observer-object distance in AU
"Delta_au" : "Delta_au",
# Sun-object distance in AU (heliocentric distance)
"r_au" : "r_au",
# Object's x coordinate in AU
"obj_x_au" : "HEclObj_X_au",
# Object's y coordinate in AU
"obj_y_au" : "HEclObj_Y_au",
# Object's z coordinate in AU
"obj_z_au" : "HEclObj_Z_au",
# Object's x velocity in AU per day
"obj_dx/dt_au_p_day" : "HEclObj_dX/dt_au_p_day",
# Object's y velocity in AU per day
"obj_dy/dt_au_p_day" : "HEclObj_dY/dt_au_p_day",
# Object's z velocity in AU per day
"obj_dz/dt_au_p_day" : "HEclObj_dZ/dt_au_p_day",
# Semi-major axis
"a_au" : "a_au",
# Inclination
"i_deg" : "i_deg",
# Eccentricity
"e" : "e",
} | [
"moeyensj@gmail.com"
] | moeyensj@gmail.com |
a791fa74bde3613ce81e2a52d148ac6000fe7861 | eaf4bd214daf6b97acaba867fd25508a3e733e1e | /TopCoder/09.NumberMagicEasy.py | 2a14b5578f3fb898a3c5ea4c449f427d607b9458 | [
"MIT"
] | permissive | DevStarSJ/algorithmExercise | 5303240aef0747fc04be08306cbed5e1ce1b5615 | 66b42c54cdd594ff3f229613fd83446f8c1f9153 | refs/heads/master | 2021-08-07T14:14:49.440613 | 2021-02-02T22:39:21 | 2021-02-02T22:39:21 | 89,350,903 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 975 | py | class NumberMagicEasy:
taros_card = [
[1,2,3,4,5,6,7,8],
[1,2,3,4,9,10,11,12],
[1,2,5,6,9,10,13,14],
[1,3,5,7,9,11,13,15]
]
def the_number(self, answer):
all_answer = [x for x in range(1, 17)]
for i, a in enumerate(answer):
go = self.yes if a == 'Y' else self.no
go(all_answer, self.taros_card[i])
return all_answer[0]
def yes(self, all_answer, card):
numbers = [x for x in range(1,17) if not x in card]
for x in numbers:
if x in all_answer:
all_answer.remove(x)
def no(self, all_answer, card):
numbers = [x for x in range(1,17) if x in card]
for x in numbers:
if x in all_answer:
all_answer.remove(x)
taro = NumberMagicEasy()
print(taro.the_number('YNYY'))
print(taro.the_number('YNNN'))
print(taro.the_number('NNNN'))
print(taro.the_number('YYYY'))
print(taro.the_number('NYNY')) | [
"sj@zigbang.com"
] | sj@zigbang.com |
f15bf56cb4b49a2fab0cc07880d2db8d7ba04ce5 | 83d570e832e3c4fcf5f553e3d59eec55b7fb6592 | /HW5/submit/layers.py | 2ec8bf2b5c9235bfe67852d8912f37f44cfa974c | [
"MIT"
] | permissive | JavisDaDa/COMP540ML | 7091058b54814da82398c28b32b361cb58a3e51d | 9c50a7d0fcca02050e0269bf4337fe6caa3c65db | refs/heads/master | 2022-06-10T06:29:56.227219 | 2020-05-07T18:10:53 | 2020-05-07T18:10:53 | 262,089,841 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,851 | py | import numpy as np
def affine_forward(x, theta, theta0):
"""
Computes the forward pass for an affine (fully-connected) layer.
The input x has shape (m, d_1, ..., d_k) and contains a minibatch of m
examples, where each example x[i] has shape (d_1, ..., d_k). We will
reshape each input into a vector of dimension d = d_1 * ... * d_k, and
then transform it to an output vector of dimension h.
Inputs:
- x: A numpy array containing input data, of shape (m, d_1, ..., d_k)
- theta: A numpy array of weights, of shape (d, h)
- theta0: A numpy array of biases, of shape (h,)
Returns a tuple of:
- out: output, of shape (m, h)
- cache: (x, theta, theta0)
"""
out = None
#############################################################################
# TODO: Implement the affine forward pass. Store the result in out. You #
# will need to reshape the input into rows. #
#############################################################################
# 2 lines of code expected
xmd = x.reshape((x.shape[0], theta.shape[0]))
out = xmd @ theta + theta0
#############################################################################
# END OF YOUR CODE #
#############################################################################
cache = (x, theta, theta0)
return out, cache
def affine_backward(dout, cache):
"""
Computes the backward pass for an affine layer.
Inputs:
- dout: Upstream derivative, of shape (m, h)
- cache: Tuple of:
- x: Input data, of shape (m, d_1, ... d_k)
- theta: Weights, of shape (d,h)
- theta0: biases, of shape (h,)
Returns a tuple of:
- dx: Gradient with respect to x, of shape (m, d1, ..., d_k)
- dtheta: Gradient with respect to theta, of shape (d, h)
- dtheta0: Gradient with respect to theta0, of shape (h,)
"""
x, theta, theta0 = cache
dx, dtheta, dtheta0 = None, None, None
#############################################################################
# TODO: Implement the affine backward pass. #
#############################################################################
# Hint: do not forget to reshape x into (m,d) form
# 4-5 lines of code expected
xmd = x.reshape((x.shape[0], theta.shape[0]))
dx = (dout @ theta.T).reshape(x.shape)
dtheta = xmd.T @ dout
dtheta0 = np.sum(dout, axis=0)
#############################################################################
# END OF YOUR CODE #
#############################################################################
return dx, dtheta, dtheta0
def relu_forward(x):
"""
Computes the forward pass for a layer of rectified linear units (ReLUs).
Input:
- x: Inputs, of any shape
Returns a tuple of:
- out: Output, of the same shape as x
- cache: x
"""
out = None
#############################################################################
# TODO: Implement the ReLU forward pass. #
#############################################################################
# 1 line of code expected
out = np.where(x > 0, x, 0)
#############################################################################
# END OF YOUR CODE #
#############################################################################
cache = x
return out, cache
def relu_backward(dout, cache):
"""
Computes the backward pass for a layer of rectified linear units (ReLUs).
Input:
- dout: Upstream derivatives, of any shape
- cache: Input x, of same shape as dout
Returns:
- dx: Gradient with respect to x
"""
dx, x = None, cache
#############################################################################
# TODO: Implement the ReLU backward pass. #
#############################################################################
# 1 line of code expected. Hint: use np.where
dx = np.where(x > 0, dout, 0)
#############################################################################
# END OF YOUR CODE #
#############################################################################
return dx
def dropout_forward(x, dropout_param):
"""
Performs the forward pass for (inverted) dropout.
Inputs:
- x: Input data, of any shape
- dropout_param: A dictionary with the following keys:
- p: Dropout parameter. We drop each neuron output with probability p.
- mode: 'test' or 'train'. If the mode is train, then perform dropout;
if the mode is test, then just return the input.
- seed: Seed for the random number generator. Passing seed makes this
function deterministic, which is needed for gradient checking but not in
real networks.
Outputs:
- out: Array of the same shape as x.
- cache: A tuple (dropout_param, mask). In training mode, mask is the dropout
mask that was used to multiply the input; in test mode, mask is None.
"""
p, mode = dropout_param['p'], dropout_param['mode']
if 'seed' in dropout_param:
np.random.seed(dropout_param['seed'])
mask = None
out = None
if mode == 'train':
###########################################################################
# TODO: Implement the training phase forward pass for inverted dropout. #
# Store the dropout mask in the mask variable. #
###########################################################################
# 2 lines of code expected
mask = (np.random.rand(*x.shape) < (1 - p)) / (1 - p)
out = x * mask
###########################################################################
# END OF YOUR CODE #
###########################################################################
elif mode == 'test':
###########################################################################
# TODO: Implement the test phase forward pass for inverted dropout. #
###########################################################################
# 1 line of code expected
out = x
###########################################################################
# END OF YOUR CODE #
###########################################################################
cache = (dropout_param, mask)
out = out.astype(x.dtype, copy=False)
return out, cache
def dropout_backward(dout, cache):
"""
Perform the backward pass for (inverted) dropout.
Inputs:
- dout: Upstream derivatives, of any shape
- cache: (dropout_param, mask) from dropout_forward.
"""
dropout_param, mask = cache
mode = dropout_param['mode']
dx = None
if mode == 'train':
###########################################################################
# TODO: Implement the training phase backward pass for inverted dropout. #
###########################################################################
# 1 line of code expected
dx = dout * mask
###########################################################################
# END OF YOUR CODE #
###########################################################################
elif mode == 'test':
dx = dout
return dx
def conv_forward_naive(x, theta, theta0, conv_param):
"""
A naive implementation of the forward pass for a convolutional layer.
The input consists of m data points, each with C channels, height H and width
W. We convolve each input with F different filters, where each filter spans
all C channels and has height HH and width HH.
Input:
- x: Input data of shape (m, C, H, W)
- theta: Filter weights of shape (F, C, HH, WW)
- theta0: Biases, of shape (F,)
- conv_param: A dictionary with the following keys:
- 'stride': The number of pixels between adjacent receptive fields in the
horizontal and vertical directions.
- 'pad': The number of pixels that will be used to zero-pad the input.
Returns a tuple of:
- out: Output data, of shape (m, F, H', W') where H' and W' are given by
H' = 1 + (H + 2 * pad - HH) / stride
W' = 1 + (W + 2 * pad - WW) / stride
- cache: (x, theta, theta0, conv_param)
"""
out = None
#############################################################################
# TODO: Implement the convolutional forward pass. #
# Hint: you can use the function np.pad for padding. #
#############################################################################
m, C, H, W = x.shape
F, C, HH, WW = theta.shape
stride, pad = conv_param['stride'], conv_param['pad']
H1 = int(1 + (H + 2 * pad - HH) / stride)
W1 = int(1 + (W + 2 * pad - WW) / stride)
out = np.zeros((m, F, H1, W1))
xp = np.pad(x, [(0, 0), (0, 0), (pad, pad), (pad, pad)], mode='constant')
for i in range(m):
for j in range(F):
for k in range(H1):
for l in range(W1):
conv = xp[i, :, (k * stride):(k * stride + HH), (l * stride):(l * stride + WW)]
out[i, j, k, l] = np.sum(conv * theta[j]) + theta0[j]
#############################################################################
# END OF YOUR CODE #
#############################################################################
cache = (x, theta, theta0, conv_param)
return out, cache
def conv_backward_naive(dout, cache):
"""
A naive implementation of the backward pass for a convolutional layer.
Inputs:
- dout: Upstream derivatives.
- cache: A tuple of (x, theta, theta0, conv_param) as in conv_forward_naive
Returns a tuple of:
- dx: Gradient with respect to x
- dtheta: Gradient with respect to theta
- dtheta0: Gradient with respect to theta0
"""
dx, dtheta, dtheta0 = None, None, None
#############################################################################
# TODO: Implement the convolutional backward pass. #
#############################################################################
x, theta, theta0, conv_param = cache
m, C, H, W = x.shape
F, C, HH, WW = theta.shape
stride, pad = conv_param['stride'], conv_param['pad']
H1 = int(1 + (H + 2 * pad - HH) / stride)
W1 = int(1 + (W + 2 * pad - WW) / stride)
dx = np.zeros_like(x)
dtheta = np.zeros_like(theta)
xp = np.pad(x, [(0, 0), (0, 0), (pad, pad), (pad, pad)], mode='constant')
dxp = np.pad(dx, [(0, 0), (0, 0), (pad, pad), (pad, pad)], mode='constant')
for i in range(m):
for j in range(F):
for k in range(H1):
for l in range(W1):
dxp[i, :, (k * stride):(k * stride + HH), (l * stride):(l * stride + WW)] += dout[i, j, k, l] * theta[j, :, :, :]
dtheta[j, :, :, :] += dout[i, j, k, l] * xp[i, :, (k * stride):(k * stride + HH),(l * stride):(l * stride + WW)]
dtheta0 = np.sum(dout, axis=(0, 2, 3))
dx = dxp[:, :, pad:(pad + H), pad:(pad + W)]
#############################################################################
# END OF YOUR CODE #
#############################################################################
return dx, dtheta, dtheta0
def max_pool_forward_naive(x, pool_param):
"""
A naive implementation of the forward pass for a max pooling layer.
Inputs:
- x: Input data, of shape (m, C, H, W)
- pool_param: dictionary with the following keys:
- 'pool_height': The height of each pooling region
- 'pool_width': The width of each pooling region
- 'stride': The distance between adjacent pooling regions
Returns a tuple of:
- out: Output data
- cache: (x, pool_param)
"""
out = None
#############################################################################
# TODO: Implement the max pooling forward pass #
#############################################################################
m, C, H, W = x.shape
pool_height, pool_width, stride = pool_param['pool_height'], pool_param['pool_width'], pool_param['stride']
H2 = int(1 + (H - pool_height) / stride)
W2 = int(1 + (W - pool_width) / stride)
out = np.zeros((m, C, H2, W2))
for i in range(m):
for j in range(C):
for k in range(H2):
for l in range(W2):
out[i, j, k, l] = np.max(x[i, j, (k * stride):(k * stride + pool_height), (l * stride):(l * stride + pool_width)])
#############################################################################
# END OF YOUR CODE #
#############################################################################
cache = (x, pool_param)
return out, cache
def max_pool_backward_naive(dout, cache):
"""
A naive implementation of the backward pass for a max pooling layer.
Inputs:
- dout: Upstream derivatives
- cache: A tuple of (x, pool_param) as in the forward pass.
Returns:
- dx: Gradient with respect to x
"""
dx = None
#############################################################################
# TODO: Implement the max pooling backward pass #
#############################################################################
x, pool_param = cache
m, C, H, W = x.shape
pool_height, pool_width, stride = pool_param['pool_height'], pool_param['pool_width'], pool_param['stride']
H2 = int(1 + (H - pool_height) / stride)
W2 = int(1 + (W - pool_width) / stride)
dx = np.zeros_like(x)
for i in range(m):
for j in range(C):
for k in range(H2):
for l in range(W2):
pool = x[i, j, (k * stride):(k * stride + pool_height), (l * stride):(l * stride + pool_width)]
maxPool = (np.max(pool) == pool)
dx[i, j, (k * stride):(k * stride + pool_height), (l * stride):(l * stride + pool_width)] += dout[i, j, k, l] * maxPool
#############################################################################
# END OF YOUR CODE #
#############################################################################
return dx
def svm_loss(x, y):
"""
Computes the loss and gradient using for multiclass SVM classification.
Inputs:
- x: Input data, of shape (m, C) where x[i, j] is the score for the jth class
for the ith input.
- y: Vector of labels, of shape (m,) where y[i] is the label for x[i] and
0 <= y[i] < C
Returns a tuple of:
- loss: Scalar giving the loss
- dx: Gradient of the loss with respect to x
"""
m = x.shape[0]
correct_class_scores = x[np.arange(m), y]
margins = np.maximum(0, x - correct_class_scores[:, np.newaxis] + 1.0)
margins[np.arange(m), y] = 0
loss = np.sum(margins) / m
num_pos = np.sum(margins > 0, axis=1)
dx = np.zeros_like(x)
dx[margins > 0] = 1
dx[np.arange(m), y] -= num_pos
dx /= m
return loss, dx
def softmax_loss(x, y):
"""
Computes the loss and gradient for softmax classification.
Inputs:
- x: Input data, of shape (m, C) where x[i, j] is the score for the jth class
for the ith input.
- y: Vector of labels, of shape (m,) where y[i] is the label for x[i] and
0 <= y[i] < C
Returns a tuple of:
- loss: Scalar giving the loss
- dx: Gradient of the loss with respect to x
"""
probs = np.exp(x - np.max(x, axis=1, keepdims=True))
probs /= np.sum(probs, axis=1, keepdims=True)
m = x.shape[0]
loss = -np.sum(np.log(probs[np.arange(m), y])) / m
dx = probs.copy()
dx[np.arange(m), y] -= 1
dx /= m
return loss, dx
| [
"35616267+JavisDaDa@users.noreply.github.com"
] | 35616267+JavisDaDa@users.noreply.github.com |
85eb6f1af285279eae63abc914777b8fbc4c45eb | d76b8deea36c183e95421f2f2ad2525adaaaec4f | /templatetags/myfilters.py | bf08da9db1a96d72956481977ede138c7a127ecd | [] | no_license | ycjungSubhuman/PulseCodeSite | 05a22e187d8c678d556734ffd081d48330b4e095 | 53d0948732d7601af6e3cdc2d03db7f5aa1839cc | refs/heads/master | 2021-01-10T01:12:14.027870 | 2016-02-22T13:16:37 | 2016-02-22T13:16:37 | 50,154,548 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 180 | py | from django import template
register = template.Library()
@register.filter(name='addclass')
def addclass(value, arg):
return value.as_widget(attrs={'class': arg})
| [
"jung3519@gmail.com"
] | jung3519@gmail.com |
2a888daeb38afc4c8af2361b163cba41b855ad4a | 26290096884d3f0c65b733cd8c5b783578296be1 | /bert/bert.py | 2acc78c472fa612cf8dc5e3c4d4c7b3f2269a9f9 | [] | no_license | ymiwm/BERT_Sentimental_Analysis | 31fb07ab5a9b979a491177f2b98f715fc70abc01 | dbe0d2df55ebd5071758dda68fc02988892c44ce | refs/heads/master | 2020-12-05T02:46:49.786749 | 2020-01-05T23:38:45 | 2020-01-05T23:38:45 | 231,985,324 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 484 | py | from torch import nn
from .modeling_bert import BertModel
import torch
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.bert = BertModel.from_pretrained("bert-base-multilingual-cased")
self.linear = nn.Linear(768, 1)
def forward(self, bert_ids):
bert_outputs, _ = self.bert(bert_ids)
bert_outputs = bert_outputs.mean(1)
output = torch.sigmoid(self.linear(bert_outputs))
return output | [
"ymiwm0322@gmail.com"
] | ymiwm0322@gmail.com |
968e1fb40f87ecf9c1f2826e6d0e9fb1a407b3bb | d7944b542887eca9b2d88aabd20ef77bd5f29440 | /proyecto/settings/base.py | 9ba1371270889a1f043a12447e3c8dae52b587b2 | [] | no_license | leoliam/Proyecto-Auditoria | 0f7fff23bbf7285e1955f71f95c955489fb9083f | 42f81ab6990de8275d0b7a02616a44463931b4a1 | refs/heads/master | 2021-01-22T12:02:51.809715 | 2014-12-16T06:29:58 | 2014-12-16T06:29:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,495 | py | from unipath import Path
BASE_DIR = Path(__file__).ancestor(3)
SECRET_KEY = '%3$0qcuk&fbp4dgc*)na5yuexbmb@in%63+jnup%e0v12xukl9'
DJANGO_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
THIRD_PARTY_APPS= (
#'south',
#'django_extensions',
#'social.apps.django_app.default',
)
LOCAL_APPS = (
'apps.inicio',
'apps.logistica',
'apps.rr_hh',
'apps.plantillas',
'apps.solicitudes',
#'apps',
)
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
from django.core.urlresolvers import reverse_lazy
LOGIN_URL = reverse_lazy('login')
LOGIN_REDIRECT_URL = reverse_lazy('inicio')
LOGOUT_URL = reverse_lazy('logout')
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'proyecto.urls'
WSGI_APPLICATION = 'proyecto.wsgi.application'
LANGUAGE_CODE = 'es-pe'
TIME_ZONE = 'America/Lima'
USE_I18N = True
USE_L10N = True
USE_TZ = True
TEMPLATE_DIRS=[BASE_DIR.child('templates')]
MEDIA_ROOT = BASE_DIR.child('media')
AUTH_PROFILE_MODULE = 'rr_hh.Empleado' | [
"liamcaleb.asr@gmail.com"
] | liamcaleb.asr@gmail.com |
ff56a783aef1342c75d6f0497b505cbb1604bb8e | adc5a8c08790bc3c577c9288f2c9ff1bcf4aaf49 | /face_recognition_facenet/main.py | 138e38907db06f82b0933eacdec39ef49c01f5aa | [] | no_license | ml19siea/MSc-project | d99d293cf6b9c3076e7bedd53b9ee9537202c68e | b116c25336ad73718dee078addf99213c61eeaee | refs/heads/main | 2023-07-17T08:02:40.240181 | 2021-08-20T00:56:57 | 2021-08-20T00:56:57 | 398,104,231 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,863 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cv2
import numpy as np
import facenet
import detect_face
import os
import time
import argparse
import pickle
from PIL import Image
import tensorflow.compat.v1 as tf
import imutils
# Construct the argument parser and parse the arguments for command line operations
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--input", type=str, default="",
help="path to (optional) input video file")
ap.add_argument("-o", "--output", type=str, default="",
help="path to (optional) output video file")
ap.add_argument("-d", "--display", type=int, default=1,
help="whether or not output frame should be displayed")
args = vars(ap.parse_args())
modeldir = './model/20180402-114759.pb'
classifier_filename = './class/classifier.pkl'
npy='./npy'
train_img="./train_img"
frame = cv2.imread('./testing/p1.jpg')
writer = None
with tf.Graph().as_default():
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.6)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
with sess.as_default():
pnet, rnet, onet = detect_face.create_mtcnn(sess, npy)
minsize = 30 # minimum size of face
threshold = [0.7,0.8,0.8] # three steps's threshold
factor = 0.709 # scale factor
margin = 44
batch_size =100 #1000
image_size = 182
input_image_size = 160
HumanNames = os.listdir(train_img)
HumanNames.sort()
print('Loading Model')
facenet.load_model(modeldir)
images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")
embedding_size = embeddings.get_shape()[1]
classifier_filename_exp = os.path.expanduser(classifier_filename)
with open(classifier_filename_exp, 'rb') as infile:
(model, class_names) = pickle.load(infile,encoding='latin1')
frame=imutils.resize(frame,width=700)
print('Start Recognition')
if frame.ndim == 2:
frame = facenet.to_rgb(frame)
bounding_boxes, _ = detect_face.detect_face(frame, minsize, pnet, rnet, onet, threshold, factor)
faceNum = bounding_boxes.shape[0]
print(faceNum)
if faceNum > 0:
det = bounding_boxes[:, 0:4]
img_size = np.asarray(frame.shape)[0:2]
cropped = []
scaled = []
scaled_reshape = []
for i in range(faceNum):
emb_array = np.zeros((1, embedding_size))
xmin = int(det[i][0])
ymin = int(det[i][1])
xmax = int(det[i][2])
ymax = int(det[i][3])
try:
# inner exception
if xmin <= 0 or ymin <= 0 or xmax >= len(frame[0]) or ymax >= len(frame):
print('Face is very close!')
continue
cropped.append(frame[ymin:ymax, xmin:xmax,:])
cropped[i] = facenet.flip(cropped[i], False)
scaled.append(np.array(Image.fromarray(cropped[i]).resize((image_size, image_size))))
scaled[i] = cv2.resize(scaled[i], (input_image_size,input_image_size),
interpolation=cv2.INTER_CUBIC)
scaled[i] = facenet.prewhiten(scaled[i])
scaled_reshape.append(scaled[i].reshape(-1,input_image_size,input_image_size,3))
feed_dict = {images_placeholder: scaled_reshape[i], phase_train_placeholder: False}
emb_array[0, :] = sess.run(embeddings, feed_dict=feed_dict)
print(model.predict_proba(emb_array))
predictions = model.predict_proba(emb_array)
best_class_indices = np.argmax(predictions, axis=1)
best_class_probabilities = predictions[np.arange(len(best_class_indices)), best_class_indices]
print("Predictions : [accuracy: {:.3f} ]".format(best_class_probabilities[0]))
if best_class_probabilities>0.6:
cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), (0, 255, 0), 2) #boxing face
for H_i in HumanNames:
if HumanNames[best_class_indices[0]] == H_i:
result_names = HumanNames[best_class_indices[0]]
print("Predictions : [ name: {} , accuracy: {:.3f} ]".format(HumanNames[best_class_indices[0]],best_class_probabilities[0]))
cv2.rectangle(frame, (xmin, ymin-20), (xmax, ymin-2), (0, 255,255), -1)
cv2.putText(frame, result_names, (xmin,ymin-5), cv2.FONT_HERSHEY_COMPLEX_SMALL,.75, (0, 0, 0), thickness=1, lineType=1)
else :
cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), (0, 255, 0), 2)
cv2.rectangle(frame, (xmin, ymin-20), (xmax, ymin-2), (0, 255,255), -1)
cv2.putText(frame, "?", (xmin,ymin-5), cv2.FONT_HERSHEY_COMPLEX_SMALL,
1, (0, 0, 0), thickness=1, lineType=1)
print("Predictions : [ name: {?} , accuracy: {:.3f} ]".format(best_class_probabilities[0]))
except:
print("error")
#cv2.imshow('Face Recognition', frame)
cv2.imwrite('output.jpg',frame)
cv2.waitKey()
| [
"ml19siea@leeds.ac.uk"
] | ml19siea@leeds.ac.uk |
11663c0f28cb942a4a9a90c69f77584703d14b96 | 5633afdce5fb2209f130bb0cd2c478a35bd75957 | /168-理解function.py | 62a304b055863e54e0b2122b6167d3374a9902b5 | [] | no_license | weiyinfu/learnKeras | 36a68e7f9966bf2ac53bb4767b3754864fe6087d | c011005bf760053e9085a0171702e54d19cafebc | refs/heads/master | 2023-03-06T18:06:32.811186 | 2021-02-22T06:05:57 | 2021-02-22T06:05:57 | 147,919,920 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 638 | py | import keras.backend as K
import keras
import tensorflow as tf
"""
keras的function可以方便的求某几个数字的值
"""
input = keras.layers.Input((None,))
output = tf.multiply(input, input)
output2 = keras.layers.multiply([input, input])
called_count = K.variable(0.0)
f = K.function([input], [output, output2, called_count], [K.update_add(called_count, 1)])
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print(f([[3, 4, 5]]))
print(f([[3, 4, 5]]))
o, oo, c = sess.run([output, output2, called_count], feed_dict={
input: [[3, 4, 5]]
})
print(o, oo, c)
| [
"weiyinfu.weiyinfu@bytedance.com"
] | weiyinfu.weiyinfu@bytedance.com |
6734fc2c2acc7eca9fe0f2156adafb6e9cfc7ce9 | 5093a9f3cd76ce41b8327e3eb02243154df66b3f | /princeton_env/bin/wheel | 8a4e95ebc7c5ac0509271a2cf2d5d3e11bd351e2 | [] | no_license | aarusso/U19-pipeline_python | fb58eed341fb538cf26f6bcfb796fcfdf0299332 | 6c529d8c7b18c698729a481f6db4db73c459a03d | refs/heads/master | 2022-04-16T15:46:26.957756 | 2020-04-14T17:32:39 | 2020-04-14T17:32:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 290 | #!/Users/shanshen/Dropbox/Vathes/princeton/pipelines/U19_pipeline_python/princeton_env/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from wheel.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"shenshanpku@gmail.com"
] | shenshanpku@gmail.com | |
348970a0f4e5c0d7929ac752e3078f95f5443c3a | 6e5ab77fee1fb4a0310213dd8c6dd8601828b1b9 | /Algorithm/Swea/D1_6230.py | 11bc95295c19530488c6fba37d18d628e6562027 | [] | no_license | hongyong3/TIL | 36d031c0da9e3e6db3eebb977bd3e12df00a849f | 7f1492128e957a78fc95b255f4f7f2978161e471 | refs/heads/master | 2023-08-19T09:16:03.231757 | 2023-08-18T09:38:47 | 2023-08-18T09:38:47 | 162,100,258 | 1 | 0 | null | 2023-02-11T00:52:32 | 2018-12-17T08:42:42 | Jupyter Notebook | UTF-8 | Python | false | false | 263 | py | data = [88, 30, 61, 55, 95]
for i in range(5):
if data[i] >= 60:
print("{}번 학생은 {}점으로 {}입니다.".format(i + 1, data[i], "합격"))
else:
print("{}번 학생은 {}점으로 {}입니다.".format(i + 1, data[i], "불합격")) | [
"chy66822495@gmail.com"
] | chy66822495@gmail.com |
dd2ea4300de0af50d1e600dafe93b0d42b1d093a | 6ae3b4c229e09007bd594db825a21112c373c3e9 | /src/plugins/generate_text/generate_chains_db.py | e6c90f51a0ea718165e1b279e87af43ccdc17428 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | wlstrickland/botty-bot-bot-bot | ae5fd6df23732df97eb440cc87e62ddec26f99fb | 4d582adf6ca564b2dd384415a9ee98a0a4b92f70 | refs/heads/master | 2021-01-16T22:20:29.252957 | 2016-04-25T04:32:57 | 2016-04-25T04:32:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,101 | py | #!/usr/bin/env python3
import os, re, json
import sqlite3
from markov import Markov
SQLITE_DATABASE = os.path.join(os.path.dirname(os.path.realpath(__file__)), "chains.db")
CHAT_HISTORY_DIRECTORY = os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", "..", "..", "@history")
def server_text_to_sendable_text(server_text):
"""Returns `server_text`, a string in Slack server message format, converted into a string in Slack sendable message format."""
assert isinstance(server_text, str), "`server_text` must be a string rather than \"{}\"".format(server_text)
text_without_special_sequences = re.sub(r"<[^<>]*>", "", server_text)
assert "<" not in text_without_special_sequences and ">" not in text_without_special_sequences, "Invalid special sequence in server text \"{}\", perhaps some text needs to be escaped"
# process link references
def process_special_sequence(match):
original, body = match.group(0), match.group(1).split("|")[0]
if body.startswith("#C"): return original # channel reference, should send unchanged
if body.startswith("@U"): return original # user reference, should send unchanged
if body.startswith("!"): return original # special command, should send unchanged
return body # link, should remove angle brackets and label in order to allow it to linkify
return re.sub(r"<(.*?)>", process_special_sequence, server_text)
def sendable_text_to_text(sendable_text):
"""Returns `sendable_text`, a string in Slack sendable message format, converted into a plain text string. The transformation can lose some information for escape sequences, such as link labels."""
assert isinstance(sendable_text, str), "`sendable_text` must be a string rather than \"{}\"".format(sendable_text)
text_without_special_sequences = re.sub(r"<[^<>]*>", "", sendable_text)
assert "<" not in text_without_special_sequences and ">" not in text_without_special_sequences, "Invalid special sequence in sendable text \"{}\", perhaps some text needs to be escaped"
# process link references
def process_special_sequence(match):
original, body = match.group(0), match.group(1).split("|")[0]
if body.startswith("#C"): # channel reference
return body
if body.startswith("@U"): # user reference
return body
if body.startswith("!"): # special command
if body == "!channel": return "@channel"
if body == "!group": return "@group"
if body == "!everyone": return "@everyone"
return original
raw_text = re.sub(r"<(.*?)>", process_special_sequence, sendable_text)
return raw_text.replace("<", "<").replace(">", ">").replace("&", "&")
def get_history_files():
"""Returns a mapping from channel names to absolute file paths of their history entries"""
for dirpath, _, filenames in os.walk(CHAT_HISTORY_DIRECTORY):
result = {}
for history_file in filenames:
channel_name, extension = os.path.splitext(os.path.basename(history_file))
if extension != ".json": continue
result["#" + channel_name] = os.path.join(dirpath, history_file)
return result
return {}
def get_message_text(message):
"""Returns the text value of `message` if it is a valid text message, or `None` otherwise"""
if message.get("type") == "message" and isinstance(message.get("ts"), str):
if isinstance(message.get("text"), str) and isinstance(message.get("user"), str): # normal message
return server_text_to_sendable_text(message["text"])
if message.get("subtype") == "message_changed" and isinstance(message.get("message"), dict) and isinstance(message["message"].get("user"), str) and isinstance(message["message"].get("text"), str): # edited message
return server_text_to_sendable_text(message["message"]["text"])
return None
connection = sqlite3.connect(SQLITE_DATABASE)
connection.execute("DROP TABLE IF EXISTS counts")
connection.execute("DROP TABLE IF EXISTS chain")
connection.execute("CREATE TABLE counts (key TEXT PRIMARY KEY, count INTEGER)")
connection.execute("CREATE TABLE chain (key TEXT, next_word TEXT, occurrences INTEGER)")
connection.execute("CREATE INDEX chain_key_index ON chain (key)")
markov = Markov(2) # Markov model with 2 word look-behind
for channel_name, history_file in get_history_files().items():
with open(history_file, "r") as f:
for entry in f:
text = get_message_text(json.loads(entry))
if text is not None:
markov.train(Markov.tokenize_text(sendable_text_to_text(text)))
connection.executemany(
"INSERT INTO counts VALUES (?, ?)",
(("\n".join(key), occurrences) for key, occurrences in markov.counts.items())
)
connection.executemany(
"INSERT INTO chain VALUES (?, ?, ?)",
(("\n".join(key), next_word, occurrences) for key, next_mapping in markov.chain.items()
for next_word, occurrences in next_mapping.items())
)
connection.commit()
connection.close()
| [
"azhang9@gmail.com"
] | azhang9@gmail.com |
7f2b66954f9309fbedabb6c9335ea38682a883a7 | 5a9c7f5af16698df95204850b499a219ab0e79b0 | /python/ray/tests/test_output.py | 2eba788b87d2dd36bbc9de15025762852c136067 | [
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] | permissive | neigh80/ray | 89e25a0fdc031cb0e150899ea02602f693dc6648 | 5a7c5ab79cc5e963e37ae8226bc8f3e636a0cdb3 | refs/heads/master | 2023-03-10T21:33:16.195709 | 2022-05-05T23:37:39 | 2022-05-05T23:37:39 | 227,317,505 | 0 | 1 | Apache-2.0 | 2023-03-04T08:59:16 | 2019-12-11T08:46:07 | Python | UTF-8 | Python | false | false | 14,754 | py | import subprocess
import sys
import pytest
import re
import signal
import time
import os
import ray
from ray._private.test_utils import (
run_string_as_driver_nonblocking,
run_string_as_driver,
)
@pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.")
def test_spill_logs():
script = """
import ray
import numpy as np
ray.init(object_store_memory=200e6)
x = []
for _ in range(10):
x.append(ray.put(np.ones(100 * 1024 * 1024, dtype=np.uint8)))
"""
proc = run_string_as_driver_nonblocking(script, env={"RAY_verbose_spill_logs": "1"})
out_str = proc.stdout.read().decode("ascii") + proc.stderr.read().decode("ascii")
print(out_str)
assert "Spilled " in out_str
proc = run_string_as_driver_nonblocking(script, env={"RAY_verbose_spill_logs": "0"})
out_str = proc.stdout.read().decode("ascii") + proc.stderr.read().decode("ascii")
print(out_str)
assert "Spilled " not in out_str
def _hook(env):
return {"env_vars": {"HOOK_KEY": "HOOK_VALUE"}}
@pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.")
def test_runtime_env_hook():
script = """
import ray
import os
@ray.remote
def f():
return os.environ.get("HOOK_KEY")
print(ray.get(f.remote()))
"""
proc = run_string_as_driver_nonblocking(
script, env={"RAY_RUNTIME_ENV_HOOK": "ray.tests.test_output._hook"}
)
out_str = proc.stdout.read().decode("ascii") + proc.stderr.read().decode("ascii")
print(out_str)
assert "HOOK_VALUE" in out_str
def test_autoscaler_infeasible():
script = """
import ray
import time
ray.init(num_cpus=1)
@ray.remote(num_gpus=1)
def foo():
pass
x = foo.remote()
time.sleep(15)
"""
proc = run_string_as_driver_nonblocking(script)
out_str = proc.stdout.read().decode("ascii")
err_str = proc.stderr.read().decode("ascii")
print(out_str, err_str)
assert "Tip:" in out_str
assert "Error: No available node types can fulfill" in out_str
def test_autoscaler_warn_deadlock():
script = """
import ray
import time
ray.init(num_cpus=1)
@ray.remote(num_cpus=1)
class A:
pass
a = A.remote()
b = A.remote()
time.sleep(25)
"""
proc = run_string_as_driver_nonblocking(script)
out_str = proc.stdout.read().decode("ascii")
err_str = proc.stderr.read().decode("ascii")
print(out_str, err_str)
assert "Tip:" in out_str
assert "Warning: The following resource request cannot" in out_str
def test_autoscaler_no_spam():
script = """
import ray
import time
# Check that there are no false positives with custom resources.
ray.init(num_cpus=1, resources={"node:x": 1})
@ray.remote(num_cpus=1, resources={"node:x": 1})
def f():
time.sleep(1)
print("task done")
ray.get([f.remote() for _ in range(15)])
"""
proc = run_string_as_driver_nonblocking(script)
out_str = proc.stdout.read().decode("ascii")
err_str = proc.stderr.read().decode("ascii")
print(out_str, err_str)
assert "Tip:" not in out_str
assert "Tip:" not in err_str
def test_fail_importing_actor(ray_start_regular, error_pubsub):
script = """
import os
import sys
import tempfile
import ray
ray.init()
temporary_python_file = '''
def temporary_helper_function():
return 1
'''
f = tempfile.NamedTemporaryFile("w+", suffix=".py", prefix="_", delete=True)
f_name = f.name
f.close()
f = open(f_name, "w+")
f.write(temporary_python_file)
f.flush()
directory = os.path.dirname(f_name)
# Get the module name and strip ".py" from the end.
module_name = os.path.basename(f_name)[:-3]
sys.path.append(directory)
module = __import__(module_name)
# Define an actor that closes over this temporary module. This should
# fail when it is unpickled.
@ray.remote
class Foo:
def __init__(self):
self.x = module.temporary_python_file()
a = Foo.remote()
import time
time.sleep(3) # Wait for actor start.
"""
proc = run_string_as_driver_nonblocking(script)
out_str = proc.stdout.read().decode("ascii")
err_str = proc.stderr.read().decode("ascii")
print(out_str)
print(err_str)
assert "ModuleNotFoundError: No module named" in err_str
assert "RuntimeError: The actor with name Foo failed to import" in err_str
def test_fail_importing_task(ray_start_regular, error_pubsub):
script = """
import os
import sys
import tempfile
import ray
ray.init()
temporary_python_file = '''
def temporary_helper_function():
return 1
'''
f = tempfile.NamedTemporaryFile("w+", suffix=".py", prefix="_", delete=True)
f_name = f.name
f.close()
f = open(f_name, "w+")
f.write(temporary_python_file)
f.flush()
directory = os.path.dirname(f_name)
# Get the module name and strip ".py" from the end.
module_name = os.path.basename(f_name)[:-3]
sys.path.append(directory)
module = __import__(module_name)
# Define an actor that closes over this temporary module. This should
# fail when it is unpickled.
@ray.remote
def foo():
return module.temporary_python_file()
ray.get(foo.remote())
"""
proc = run_string_as_driver_nonblocking(script)
out_str = proc.stdout.read().decode("ascii")
err_str = proc.stderr.read().decode("ascii")
print(out_str)
print(err_str)
assert "ModuleNotFoundError: No module named" in err_str
assert "RuntimeError: The remote function failed to import" in err_str
def test_worker_stdout():
script = """
import ray
import sys
ray.init(num_cpus=2)
@ray.remote
def foo(out_str, err_str):
print(out_str)
print(err_str, file=sys.stderr)
ray.get(foo.remote("abc", "def"))
"""
proc = run_string_as_driver_nonblocking(script)
out_str = proc.stdout.read().decode("ascii")
err_str = proc.stderr.read().decode("ascii")
out_str = "".join(out_str.splitlines())
assert out_str.endswith("abc"), out_str
assert "(foo pid=" in out_str, out_str
err_str_sec_last = "".join(err_str.split("\n")[-2].splitlines())
assert err_str_sec_last.endswith("def")
def test_core_worker_error_message():
script = """
import ray
import sys
ray.init(local_mode=True)
# In local mode this generates an ERROR level log.
ray._private.utils.push_error_to_driver(
ray.worker.global_worker, "type", "Hello there")
"""
proc = run_string_as_driver_nonblocking(script)
err_str = proc.stderr.read().decode("ascii")
assert "Hello there" in err_str, err_str
def test_disable_driver_logs_breakpoint():
script = """
import time
import os
import ray
import sys
import threading
ray.init(num_cpus=2)
@ray.remote
def f():
while True:
time.sleep(1)
print("hello there")
sys.stdout.flush()
def kill():
time.sleep(5)
sys.stdout.flush()
time.sleep(1)
os._exit(0)
t = threading.Thread(target=kill)
t.start()
x = f.remote()
time.sleep(2) # Enough time to print one hello.
ray.util.rpdb._driver_set_trace() # This should disable worker logs.
# breakpoint() # Only works in Py3.7+
"""
proc = run_string_as_driver_nonblocking(script)
out_str = proc.stdout.read().decode("ascii")
num_hello = out_str.count("hello")
assert num_hello >= 1, out_str
assert num_hello < 3, out_str
assert "Temporarily disabling Ray worker logs" in out_str, out_str
# TODO(ekl) nice to test resuming logs too, but it's quite complicated
@pytest.mark.parametrize("file", ["stdout", "stderr"])
def test_multi_stdout_err(file):
if file == "stdout":
file_handle = "sys.stdout"
else: # sys.stderr
file_handle = "sys.stderr"
script = f"""
import ray
import sys
ray.init(num_cpus=1)
@ray.remote
def foo():
print(file={file_handle})
@ray.remote
def bar():
print(file={file_handle})
@ray.remote
def baz():
print(file={file_handle})
ray.get(foo.remote())
ray.get(bar.remote())
ray.get(baz.remote())
"""
proc = run_string_as_driver_nonblocking(script)
if file == "stdout":
out_str = proc.stdout.read().decode("ascii")
else:
out_str = proc.stderr.read().decode("ascii")
out_str = "".join(out_str.splitlines())
assert "(foo pid=" in out_str, out_str
assert "(bar pid=" in out_str, out_str
assert "(baz pid=" in out_str, out_str
@pytest.mark.parametrize("file", ["stdout", "stderr"])
def test_actor_stdout(file):
if file == "stdout":
file_handle = "sys.stdout"
else: # sys.stderr
file_handle = "sys.stderr"
script = f"""
import ray
import sys
ray.init(num_cpus=2)
@ray.remote
class Actor1:
def f(self):
print("hi", file={file_handle})
@ray.remote
class Actor2:
def __init__(self):
print("init", file={file_handle})
self.name = "ActorX"
def f(self):
print("bye", file={file_handle})
def __repr__(self):
return self.name
a = Actor1.remote()
ray.get(a.f.remote())
b = Actor2.remote()
ray.get(b.f.remote())
"""
proc = run_string_as_driver_nonblocking(script)
if file == "stdout":
out_str = proc.stdout.read().decode("ascii")
else:
out_str = proc.stderr.read().decode("ascii")
out_str = "".join(out_str.splitlines())
assert "hi" in out_str, out_str
assert "(Actor1 pid=" in out_str, out_str
assert "bye" in out_str, out_str
assert re.search("Actor2 pid=.*init", out_str), out_str
assert not re.search("ActorX pid=.*init", out_str), out_str
assert re.search("ActorX pid=.*bye", out_str), out_str
assert re.search("Actor2 pid=.*bye", out_str), out_str
def test_output():
# Use subprocess to execute the __main__ below.
outputs = subprocess.check_output(
[sys.executable, __file__, "_ray_instance"], stderr=subprocess.STDOUT
).decode()
lines = outputs.split("\n")
for line in lines:
print(line)
if os.environ.get("RAY_MINIMAL") == "1":
# Without "View the Ray dashboard"
assert len(lines) == 1, lines
else:
# With "View the Ray dashboard"
assert len(lines) == 2, lines
@pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.")
# TODO: fix this test to support minimal installation
@pytest.mark.skipif(
os.environ.get("RAY_MINIMAL") == "1",
reason="This test currently fails with minimal install.",
)
def test_output_on_driver_shutdown(ray_start_cluster):
cluster = ray_start_cluster
cluster.add_node(num_cpus=16)
# many_ppo.py script.
script = """
import ray
from ray.tune import run_experiments
from ray.tune.utils.release_test_util import ProgressCallback
num_redis_shards = 5
redis_max_memory = 10**8
object_store_memory = 10**9
num_nodes = 3
message = ("Make sure there is enough memory on this machine to run this "
"workload. We divide the system memory by 2 to provide a buffer.")
assert (num_nodes * object_store_memory + num_redis_shards * redis_max_memory <
ray._private.utils.get_system_memory() / 2), message
# Simulate a cluster on one machine.
ray.init(address="auto")
# Run the workload.
run_experiments(
{
"ppo": {
"run": "PPO",
"env": "CartPole-v0",
"num_samples": 10,
"config": {
"framework": "torch",
"num_workers": 1,
"num_gpus": 0,
"num_sgd_iter": 1,
},
"stop": {
"timesteps_total": 1,
},
}
},
callbacks=[ProgressCallback()])
"""
proc = run_string_as_driver_nonblocking(script)
# Make sure the script is running before sending a sigterm.
with pytest.raises(subprocess.TimeoutExpired):
print(proc.wait(timeout=10))
print(f"Script is running... pid: {proc.pid}")
# Send multiple signals to terminate it like real world scenario.
for _ in range(10):
time.sleep(0.1)
os.kill(proc.pid, signal.SIGINT)
try:
proc.wait(timeout=10)
except subprocess.TimeoutExpired:
print("Script wasn't terminated by SIGINT. Try SIGTERM.")
os.kill(proc.pid, signal.SIGTERM)
print(proc.wait(timeout=10))
err_str = proc.stderr.read().decode("ascii")
assert len(err_str) > 0
assert "StackTrace Information" not in err_str
print(err_str)
@pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.")
@pytest.mark.skipif(
os.environ.get("RAY_MINIMAL") == "1",
reason="This test currently fails with minimal install.",
)
@pytest.mark.parametrize("execution_number", range(3))
def test_empty_line_thread_safety_bug(execution_number, ray_start_cluster):
"""Make sure when new threads are used within __init__,
the empty line is not printed.
Related: https://github.com/ray-project/ray/pull/20987
"""
cluster = ray_start_cluster
cluster.add_node(num_cpus=24)
actor_repr = "TESTER"
script = f"""
import time
import os
import threading
import torch
from filelock import FileLock
import ray
class Repro:
pass
def do_lock():
path = f"/tmp/lock"
lock = FileLock(path, timeout=4)
lock.acquire()
@ray.remote
class Train:
def __init__(self, config: Repro):
# print("b")
def warmup():
do_lock()
torch.empty(0, device="cpu")
for _ in range(300000000):
pass
threading.Thread(target=warmup, daemon=True).start()
def ready(self):
pass
def __repr__(self):
return "{actor_repr}"
ray.init("auto")
actors = [Train.remote(config=None) for i in range(24)]
for a in actors:
ray.get(a.ready.remote())
time.sleep(5)
"""
out = run_string_as_driver(script)
assert actor_repr not in out
def test_node_name_in_raylet_death():
NODE_NAME = "RAY_TEST_RAYLET_DEATH_NODE_NAME"
script = f"""
import ray
import time
import os
NUM_HEARTBEATS=10
HEARTBEAT_PERIOD=500
WAIT_BUFFER_SECONDS=5
os.environ["RAY_num_heartbeats_timeout"]=str(NUM_HEARTBEATS)
os.environ["RAY_raylet_heartbeat_period_milliseconds"]=str(HEARTBEAT_PERIOD)
ray.init(_node_name=\"{NODE_NAME}\")
# This will kill raylet without letting it exit gracefully.
ray.worker._global_node.kill_raylet()
time.sleep(NUM_HEARTBEATS * HEARTBEAT_PERIOD / 1000 + WAIT_BUFFER_SECONDS)
ray.shutdown()
"""
out = run_string_as_driver(script)
assert out.count(f"node name: {NODE_NAME} has been marked dead") == 1
if __name__ == "__main__":
if len(sys.argv) > 1 and sys.argv[1] == "_ray_instance":
# Set object store memory very low so that it won't complain
# about low shm memory in Linux environment.
# The test failures currently complain it only has 2 GB memory,
# so let's set it much lower than that.
MB = 1000 ** 2
ray.init(num_cpus=1, object_store_memory=(100 * MB))
ray.shutdown()
else:
sys.exit(pytest.main(["-v", __file__]))
| [
"noreply@github.com"
] | noreply@github.com |
a810fbc6fe8c290cc29df424fcd6670a5f136340 | a68785e2ea1f057252dd1947eaf81cc6eba12fe6 | /manage.py | c38af92c557805c37b63c99bee26e7b4364acb97 | [] | no_license | uzumakikirin/vyashgaming | 5454d4da428221fc73692c03965188f1f51296ef | 756bf963bb15f791061543fed1df77463dd73746 | refs/heads/master | 2022-12-12T00:27:50.395199 | 2019-04-06T17:28:02 | 2019-04-06T17:28:02 | 179,779,566 | 0 | 0 | null | 2022-12-08T01:21:50 | 2019-04-06T02:24:23 | JavaScript | UTF-8 | Python | false | false | 543 | py | #!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'vyashgaming.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| [
"uzumakikirin@gmail.com"
] | uzumakikirin@gmail.com |
73330b8c4ae30abe3eb91652a4e959fafd3b10ec | 8f9fd335a67a780175a50e09e2f67b37dde5ef4b | /django_practice/urls.py | 19cfcaa6e5a0a25ec4911cf0d74f9ffea34321ad | [] | no_license | MyronOuyang/DjangoPractice | 109b1fdc8321a504dfc10018feda723b56c370b8 | a261cca724d4fecdb579a3a92592d93a5225129b | refs/heads/master | 2022-09-16T06:29:01.533657 | 2020-06-01T18:37:54 | 2020-06-01T18:37:54 | 267,379,811 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,845 | py | """django_practice URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.contrib.auth import views as auth_views
from django.urls import path, include
from users import views as user_views
urlpatterns = [
path('admin/', admin.site.urls),
path('register/', user_views.register, name="register"),
path('profile/', user_views.profile, name="profile"),
path('login/', auth_views.LoginView.as_view(template_name='users/login.html'), name="login"),
path('logout/', auth_views.LogoutView.as_view(template_name='users/logout.html'), name="logout"),
path('password-reset/', auth_views.PasswordResetView.as_view(template_name='users/password_reset.html'), name="password_reset"),
path('password-reset/done/', auth_views.PasswordResetDoneView.as_view(template_name='users/password_reset_done.html'), name="password_reset_done"),
path('password-reset-confirm/<uidb64>/<token>/', auth_views.PasswordResetConfirmView.as_view(template_name='users/password_reset_confirm.html'), name="password_reset_confirm"),
path('password-reset_complete/', auth_views.PasswordResetCompleteView.as_view(template_name='users/password_reset_complete.html'), name="password_reset_complete"),
path('', include('blog.urls')),
]
| [
"myronouyang@gmail.com"
] | myronouyang@gmail.com |
3d611b2e3932f660deb170ecddfe59c95901930e | a27c12df228407937301878c081450555623d3ba | /motion dec.py | 6912c2ac8756db4883ae38d33df4ee4c070d83df | [] | no_license | Andy-jg/raspberry-3b-opencv3.2.0-motion | f037725c9fa6cd34c54294efc2a1d69e0cccac83 | 10ad23484af22528c3b4ded7815ed51230163556 | refs/heads/master | 2020-05-26T15:13:26.163984 | 2017-03-15T01:58:19 | 2017-03-15T01:58:19 | 85,009,937 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,814 | py | # import the necessary packages
import argparse
import datetime
import imutils
import time
import cv2
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video", help="path to the video file")
ap.add_argument("-a", "--min-area", type=int, default=500, help="minimum area size")
args = vars(ap.parse_args())
# if the video argument is None, then we are reading from webcam
if args.get("video", None) is None:
camera = cv2.VideoCapture(0)
time.sleep(0.25)
# otherwise, we are reading from a video file
else:
camera = cv2.VideoCapture(args["video"])
# initialize the first frame in the video stream
firstFrame = None
# loop over the frames of the video
while True:
# grab the current frame and initialize the occupied/unoccupied
# text
(grabbed, frame) = camera.read()
text = "Unoccupied"
# if the frame could not be grabbed, then we have reached the end
# of the video
if not grabbed:
break
# resize the frame, convert it to grayscale, and blur it
frame = imutils.resize(frame, width=500)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (21, 21), 0)
# if the first frame is None, initialize it
if firstFrame is None:
firstFrame = gray
continue
# compute the absolute difference between the current frame and
# first frame
frameDelta = cv2.absdiff(firstFrame, gray)
thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]
# dilate the thresholded image to fill in holes, then find contours
# on thresholded image
thresh = cv2.dilate(thresh, None, iterations=2)
(_,cnts, _) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
# loop over the contours
for c in cnts:
# if the contour is too small, ignore it
if cv2.contourArea(c) < args["min_area"]:
continue
# compute the bounding box for the contour, draw it on the frame,
# and update the text
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
text = "Occupied"
# draw the text and timestamp on the frame
cv2.putText(frame, "Room Status: {}".format(text), (10, 20),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
cv2.putText(frame, datetime.datetime.now().strftime("%A %d %B %Y %I:%M:%S%p"),
(10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1)
# show the frame and record if the user presses a key
cv2.imshow("Security Feed", frame)
cv2.imshow("Thresh", thresh)
cv2.imshow("Frame Delta", frameDelta)
key = cv2.waitKey(1) & 0xFF
# if the `q` key is pressed, break from the lop
if key == ord("q"):
break
# cleanup the camera and close any open windows
camera.release()
cv2.destroyAllWindows() | [
"noreply@github.com"
] | noreply@github.com |
5a3ff1c8ef0a9cb4c95ba96ba406cfb4f3cc10e6 | 6f2f521f407b76e9efc40d9b55f1ff9e090f0c72 | /extract/admin.py | d3a0838ea908aa075a8d7a76d71107daecc69574 | [] | no_license | yeahdef/bloomextract | 3ea04333c47d351c5cd231241dfae330a4ae1f68 | 9517680f6b79e7c0ace734901462b58d7817d3f0 | refs/heads/master | 2021-01-18T22:35:34.473371 | 2016-05-03T22:25:49 | 2016-05-03T22:25:49 | 38,942,500 | 0 | 1 | null | 2015-07-12T22:20:51 | 2015-07-11T22:26:56 | Python | UTF-8 | Python | false | false | 167 | py | from django.contrib import admin
from extract.models import Product, Category
# Register your models here.
admin.site.register(Product)
admin.site.register(Category)
| [
"joeyliechty@gmail.com"
] | joeyliechty@gmail.com |
e634d25b606b0e29a87126e3be0f570d8eea761a | b32df2ffae14c3ca8083f36f93165c220aef5e44 | /blueoil/__init__.py | a5ce8e74f89c2ccaf0d33d978fd32d2513272f60 | [
"Apache-2.0"
] | permissive | blue-oil/blueoil | 213659909b6eac26dd249f878a03ed732b639539 | 0c9160b524b17482d59ae48a0c11384f1d26dccc | refs/heads/master | 2023-01-24T05:10:54.825811 | 2021-04-22T08:46:56 | 2021-04-22T08:46:56 | 153,597,157 | 252 | 111 | Apache-2.0 | 2021-05-10T05:02:45 | 2018-10-18T09:19:36 | Python | UTF-8 | Python | false | false | 709 | py | # -*- coding: utf-8 -*-
# Copyright 2020 The Blueoil Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
| [
"noreply@github.com"
] | noreply@github.com |
9a5868d1d8b3a08f300c8b9926680fcdd8806c42 | 1d78e7a8de21b281e9a27098392aea63896fd1fd | /es2file.py | b70d653bebe68eec17b667cf1ba907a73014fe00 | [] | no_license | haojiliang/elasticsearch2file | e257a629209395583beea1732c5a97a6f8675a5f | 346389b0886ef2eb3d3999e978b4bbab2de53278 | refs/heads/master | 2021-09-04T17:21:46.511003 | 2018-01-20T09:26:02 | 2018-01-20T09:26:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,252 | py | import json
import os
import sys
import urllib2
reload(sys)
sys.setdefaultencoding('utf-8')
class exportEsData():
def __init__(self, url, siteid, startdate, enddate, scroll):
self.url = '%s/_search' % (url)
self.siteid = siteid
self.startdate = startdate
self.enddate = enddate
self.scroll = scroll
self.result = ""
def exportData(self, scrollID):
#esdata = urllib2.urlopen("http://www.baidu.com/").read()
opener = urllib2.build_opener()
headers = {'User-Agent':'Mozilla /5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6' }
if scrollID == "":
print("Exporting site%s..." % self.siteid)
queryJson = { \
"size": 1000, \
"query": { "filtered": {"filter": {"bool": {"must": {"bool": {"must": [ \
{"query": {"match": {"b": {"query": self.siteid,"type": "phrase"}}}}, \
{"range":{"c":{"gte": self.startdate + " 00:00:00","lte":self.enddate + " 23:59:59"}}} \
]}}}}} \
} \
}
url2 = '%s?scroll=%s' % (self.url, self.scroll)
else:
queryJson = { "scroll" : self.scroll, "scroll_id" : scrollID }
url2 = self.url + "/scroll"
req = urllib2.Request(url2, data=json.dumps(queryJson), headers=headers)
response = opener.open(req)
esdata = response.read()
self.processData(esdata)
def processData(self, data):
#msg = json.dumps(data, ensure_ascii=False)
msg = json.loads(data)
#print(type(data))
#print(msg['hits']['hits'][2]['_source']['f8'])
scrollID = msg["_scroll_id"]
attacks = msg['hits']['hits']
for attack in attacks:
self.result = '%s%s\n' % (self.result, attack['_source'])
if len(attacks) > 0:
self.exportData(scrollID)
else:
self.writeFile(self.result)
def writeFile(self, data):
try:
filename = 'AttackData_%s.txt' % (self.siteid)
f = open(filename, "w+")
f.write(data)
print("site%s successfully exported" % self.siteid)
finally:
f.flush()
f.close()
if __name__ == '__main__':
siteids = [1912, 1918]
for siteid in siteids:
exportEsData("http://127.0.0.1:9201", siteid, "2017-07-03", "2017-12-01", "5m").exportData("")
os.system("pause")
| [
"noreply@github.com"
] | noreply@github.com |
59c3fd7c83f8eb2ab7e4119df939087c196bd6e2 | 83ce49f99ac592fd282b9fc22331404478574af0 | /tests/testWriteOutStatusListener.py | d270b756a6b5842054961987c863ed5e15dab168 | [
"MIT"
] | permissive | groceryheist/UWBotThings | 86f028336bd45f2b0586330fcada6e91f94a265f | 1729e10f8689f1964db311084642a89ba5005d42 | refs/heads/master | 2021-01-23T02:35:08.351571 | 2015-01-17T00:03:22 | 2015-01-17T00:03:22 | 26,033,768 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 53 | py | import unittest
import sys
sys.path.append('../data') | [
"groceryheist@gmail.com"
] | groceryheist@gmail.com |
0f1207a68c78b37550df565c7f56a91f48dc945b | 6c1dbe0c1179d6f908ba9e110e0ead633ab1d52d | /naive_bayes/naive_bayes.py | 8b17918b4a80b76d7572f8f55421fec3ed6ce5f6 | [] | no_license | leohentschker/Machine-Learning | 9694964294a4a0e353e077563fa2cc82dbe043bb | 7311d5cad07227a1279d97822257f122fe3b542d | refs/heads/master | 2021-05-04T11:35:24.061230 | 2016-12-25T17:15:59 | 2016-12-25T17:15:59 | 48,345,797 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,020 | py | import csv
import copy
import random
import numpy as np
import math
def load_test_data(test_file_path):
"""
Read in the data in the correct format
"""
lines = csv.reader(open(test_file_path, "rb"))
unformatted_data_set = list(lines)
# map the data to floats for calculation purposes
formatted_data = [map(float, data_line) for data_line in unformatted_data_set]
return formatted_data
def split_data(test_data, split_ratio):
"""
Splits a dataset into two pieces, one to
be used for training and the other for
testing
"""
split_index = int(split_ratio * len(test_data))
# randomly permute the values in place
random.shuffle(test_data)
# take slices of the determined size
training_set = copy.copy(test_data[:split_index])
test_data = copy.copy(test_data[split_index:])
return training_set, test_data
def separate_by_class(dataset, class_index):
"""
Returns a dictionary mapping the class
values to their data values. By default this function
assumes that the class value is stored at the last index
"""
class_dictionary = {}
for data_row in dataset:
# determine what to use as a key
# for the dictionary
dict_key = data_row[class_index]
# remove the class attribute from the
# data so it doesn't screw up stats
del data_row[class_index]
if dict_key not in class_dictionary:
class_dictionary[dict_key] = [data_row]
else:
class_dictionary[dict_key].append(data_row)
return class_dictionary
def summarize(dataset):
"""
Takes in a dataset in the format [(a, b, c), (d, e, f)]
where each tuple represents a class value that we are considering
"""
summaries = [(np.mean(attribute), np.std(attribute)) for attribute in zip(*dataset)]
return summaries
def summarize_by_class(dataset, class_index):
separated_dict = separate_by_class(dataset, class_index)
summarized_data_dict = {}
for class_key, data_rows in separated_dict.iteritems():
summary = summarize(data_rows)
summarized_data_dict[class_key] = summary
return summarized_data_dict
def calculate_probability(value, mean, stdev):
"""
Takes in a value, the mean for that distribution
and the standard devation and returns the probability
of that value occurring. Rests on the idea that
the distribution is normal
"""
exponent = math.exp(- pow(value - mean, 2) / (2 * pow(stdev, 2)))
return exponent / (stdev * pow(math.pi * 2, .5))
def calculate_class_probabilities(summaries, input_vector):
"""
Stores a dictionary with class keys mapping to the probability
that the input vector maps to that class.
"""
probabilities = {}
for class_key, class_summary in summaries.iteritems():
# initialize the probability for the class to 1 to
# prevent keyerrors
probabilities[class_key] = float(1)
for (mean, stdev), input_val in zip(class_summary, input_vector):
attribute_probability = calculate_probability(input_val, mean, stdev)
probabilities[class_key] *= attribute_probability
return probabilities
def predict(summaries, input_vector):
"""
Given the mean and stdev summaries as well as
an input vector, this function determines which
class the input vector is most likely to
fall into
"""
class_probabilities = calculate_class_probabilities(summaries, input_vector)
probability_tuples = [(probability, key) for key, probability in class_probabilities.items()]
max_probability, matched_class = max(probability_tuples)
return matched_class
def get_predictions(summaries, test_sets):
"""
Takes in a set of summaries and a list
of datasets to test on and generates predictions
based upon the training data
"""
predictions = []
for test_data in test_sets:
result = predict(summaries, test_data)
predictions.append(result)
return predictions
def get_accuracy(test_sets, predictions, class_index):
"""
Determines the percentage of the test cases
that we calculated accurately
"""
actual_classes = [test_set[class_index] for test_set in test_sets]
num_correct = sum(int(actual == prediction) for actual, prediction in zip(actual_classes, predictions))
return float(num_correct) / len(test_sets)
def run_bayes(data_file_path, class_index = -1):
input_data = load_test_data(data_file_path)
split_ratio = .5
training_data, test_data = split_data(input_data, split_ratio)
class_summarized_data = summarize_by_class(training_data, class_index)
predictions = get_predictions(class_summarized_data, test_data)
accuracy = get_accuracy(test_data, predictions, class_index)
print "ACCURACY", accuracy
if __name__ == "__main__":
test_file_path = "pima-indians-diabetes.data"
run_bayes(test_file_path)
| [
"leohentschker@college.harvard.edu"
] | leohentschker@college.harvard.edu |
61fff39fec3f955634e1aee39a5f7ebf58424b5e | d52d3ab219e3d7b44a2f117482da1b649365c79a | /scripts/combine_caffe_model/combine_distill_faceRecognition_param.py | 188a089b3207975c860386fd975d0270c2caf9d2 | [
"BSD-3-Clause",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"LicenseRef-scancode-generic-cla"
] | permissive | nbadalls/caffe-server | 949949295f98aa51fff93d98ddb4e2338d76b0b8 | 11c5dbea5134d7c4b234645e4fb597ec2eeb5527 | refs/heads/master | 2020-03-18T11:26:16.475558 | 2018-09-26T08:31:11 | 2018-09-26T08:31:11 | 134,671,222 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,151 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Jul 11 10:52:53 2018
@author: minivision
"""
from __future__ import print_function
import sys
sys.path.append('/home/minivision/SoftWare/caffe-server/python')
import caffe_pb2
from combine_model_param import *
from layer_lib import *
import time
import os
from getPatchInfoFunc import *
class DistillfaceRecognition(combineModelParam):
def __init__(self, single_root_path, dst_combine_path):
combineModelParam.__init__(self,single_root_path,dst_combine_path )
def create_combine_deploy(self):
net_proto , record_layer_index= combine_utility.combine_single_deploy(self.nets, 1)
#adjust teacher net's learning rate to 0
for elem_layer in net_proto.layer:
if elem_layer.name.find("teacher") >=0:
for elem_param in elem_layer.param:
elem_param.lr_mult = 0
elem_param.decay_mult = 0
f = open(self.dst_model_path['dst_deploy'], 'w')
print(net_proto, file = f)
f.close()
if __name__ == '__main__':
date = time.strftime('%Y-%m-%d %H-%M-%S',time.localtime(time.time()))
# root_path = "/media/minivision/OliverSSD/LiveBody/select_best_result/HistoryBestModel"
root_path = "/media/minivision/OliverSSD/FaceRecognition/verification_select_best_models/combine_disdill/2018-08-28-1"
dst_path = "/media/minivision/OliverSSD/FaceRecognition/verification_select_best_models/combine_disdill/2018-08-28-1/combine_model"
if not os.path.exists(dst_path):
os.makedirs(dst_path)
patch_folder = ["2018-05-07_AdditMarginCdata-b0.35s30_fc_0.35_112x96_b+asian+cap10+pos+beid-MS_faceNet-20-light2s4-bn_zkx_iter_190000",
"2018-08-03_AMImageMtcnn-b0.3s30_fc_0.35_112x96_clean-b+add1+2-1-delAsia-b3-P0.0_MobileFaceNet-bn_zkx_iter_165000" ]
prefix_names = ["student", "teacher"]
f = open('{}/net_info.txt'.format(dst_path), 'w')
print(patch_folder, file = f)
print(prefix_names, file = f)
f.close()
combine_model = DistillfaceRecognition(root_path,dst_path )
combine_model.model_combination(patch_folder, prefix_names)
| [
"1277046020@qq.com"
] | 1277046020@qq.com |
f018fcf4181c5fd67b0cda79b34881187b20e51f | 9e5978c120d84c2af7ef04f9119a94d2e3edccc2 | /TGCN/load_data1.py | 607c234574531bf0dc8c71d7a929b98c7c62ec49 | [] | no_license | fengmaochairman/CorrelationNet_DP_taix-demand | 5482760e54edb19d215f7ff0e3ef79fa693e6644 | db75ebde9fbb25029366c6ee71e6b89cead507d9 | refs/heads/master | 2020-09-25T12:22:07.122632 | 2020-05-22T08:07:04 | 2020-05-22T08:07:04 | 225,878,295 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,156 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Apr 10 15:15:50 2018
@author: Administrator
"""
import numpy as np
import pandas as pd
from pandas import to_datetime
import scipy.sparse as sp
import os
# read data of one day and one direction
def read_file(path, filename):
calfile = os.path.join(path, filename)
original = pd.read_csv(calfile, header=None)
data = pd.DataFrame(columns=["time", "cross", "direction", "number"])
data["time"] = original[0]
data["cross"] = original[1]
data["direction"] = original[2]
data["number"] = original[3] + original[4]
# 记录4:00-21:00的流量数据
data = data.iloc[48:252, :]
return data
# read data of one day
def read_data_day(path, date):
day_data = pd.DataFrame(columns=["time", "cross", "direction", "number"])
caldir = os.path.join(path, date)
dirs = os.listdir(caldir)
dirs.sort() # 顺序:east-north-south-west
# read data of one day
for f in dirs:
# if re.match(r'wuhe_zhangheng.*\.csv', f):
day_data = day_data.append(read_file(caldir, f), ignore_index=True)
# print('day_data:\n%s'%(day_data))
return day_data
# 选择实验日期
def date_select(path):
dirs = os.listdir(path)
# 去除春节几天数据(2月4日--2月9日)
for i in range(2, 9):
str1 = '02-0' + str(i)
dirs.remove(str1)
# 缺失数据
for i in range(12, 16):
str1 = '01-' + str(i)
dirs.remove(str1)
# 周末
for i in range(19, 21):
str1 = '01-' + str(i)
dirs.remove(str1)
# 周末
for i in range(26, 28):
str1 = '01-' + str(i)
dirs.remove(str1)
dirs.sort() # 路径排序
return dirs
# build adjacen matrix of test areas
def build_adjacent_matrix(path, date):
caldir = os.path.join(path, date)
dirs1 = os.listdir(caldir)
dirs1.sort() # 顺序:east-north-south-west
cross_set = []
for file in dirs1:
cross = file.split('-')[0]
if cross not in cross_set:
cross_set.append(cross)
print('cross set:\n%s' % (cross_set))
# edges_map 中每一项为id: number,即节点id对应的编号为number
road_map = {j: i for i, j in enumerate(cross_set)}
print('road_map:\n%s' % (road_map))
adj = np.array([[0, 0, 1, 0, 0, 0],
[0, 0, 1, 1, 0, 0],
[1, 1, 0, 0, 1, 0],
[0, 1, 1, 0, 1, 1],
[0, 0, 1, 1, 0, 0],
[0, 0, 0, 1, 0, 0]])
print('adj:\n%s'%(adj))
return adj
# 特征处理
def preprocess_data(train_data, test_data, lags, pred_len):
N = 17*12
trainX, trainY, testX, testY = [], [], [], []
for i in range(int(train_data.shape[0] / N)):
for j in range(lags, N):
trainX.append(train_data[N * i + (j - lags): N * i + j])
trainY.append(train_data[N * i + j: N * i + (j + pred_len)])
for i in range(int(test_data.shape[0] / N)):
for j in range(lags, N):
testX.append(test_data[N * i + (j - lags): N * i + j])
testY.append(test_data[N * i + j: N * i + (j + pred_len)])
trainX1 = np.array(trainX)
trainY1 = np.array(trainY)
testX1 = np.array(testX)
testY1 = np.array(testY)
return trainX1, trainY1, testX1, testY1
# get and preprocess data
def get_data(path):
raw_data = pd.DataFrame(columns=["time", "cross", "direction", "number"])
# 选择实验时间
dirs = date_select(path)
ndays = len(dirs)
print('ndays:%d\ndirs:\n%s'%(ndays, dirs))
# 获取adjacent matrix
adj = build_adjacent_matrix(path, dirs[0])
print(adj.shape[0])
for day in dirs:
raw_data = raw_data.append(read_data_day(path, day))
print('raw_data:\n%s'%(raw_data))
# encode time in raw data to weekday and timeindex(the n minutes of the day)
df_dt = to_datetime(raw_data.loc[:, "time"], format="%Y/%m/%d %H:%M:%S")
all_data = pd.DataFrame({
"time": df_dt,
"day":df_dt.dt.day,
"cross": raw_data["cross"],
"direction": raw_data["direction"],
"number": (raw_data["number"]).astype(int)},
columns=["time", "day", "cross", "direction", "number"]) #固定dataframe顺序
print('all_data:\n%s'%(all_data))
all_data = all_data.groupby(["time", "day", "cross"]).sum().reset_index(level=["time", "day", "cross"])
print('all_data:\n%s' % (all_data))
train_data = all_data[~all_data['day'].isin([21, 17])]
print('train_data:\n%s' % (train_data))
test_data = all_data.loc[all_data['day'].isin([21])]
test_data = test_data.append(all_data.loc[all_data['day'].isin([17])])
# test_data = test_data.sort_values(by = ["day"], ascending=False)
print('test_dat:\n%s'%(test_data))
train_data = np.array(train_data.iloc[:,3])
test_data = np.array(test_data.iloc[:, 3])
train_data = train_data.reshape((train_data.shape[0]//adj.shape[0], adj.shape[0]))
test_data = test_data.reshape((test_data.shape[0]//adj.shape[0], adj.shape[0]))
print(train_data.shape)
print(test_data.shape)
return train_data, test_data, adj
| [
"58518044+fengmaochairman@users.noreply.github.com"
] | 58518044+fengmaochairman@users.noreply.github.com |
132233e2f673ca46ed09870bc39f3069ada4e184 | d79c4fa73bd26550cfaa5d1a3259b20bda1fba46 | /Tests/Services/test_distance_service.py | 79975e946cb9b7d65f9ff492746e0f981a60d6c6 | [] | no_license | dev-11/coding-test | 37e8372b4eff1b6d5c9b0bd2c0c13f88d0940736 | 7bd56b00d48a0419206b99170075fe34183830ee | refs/heads/master | 2021-07-11T02:49:44.832998 | 2021-03-28T12:08:47 | 2021-03-28T12:08:47 | 233,877,609 | 0 | 0 | null | 2020-01-14T15:52:20 | 2020-01-14T15:52:19 | null | UTF-8 | Python | false | false | 2,074 | py | import unittest
from Services import DistanceService
from Tests.TestEnvironment import get_test_stores
class DistanceServiceTests(unittest.TestCase):
def test_get_stores_within_range_returns_every_store_in_one_mile_range(self):
a = [51.460903, -0.301702]
stores = get_test_stores()
service = DistanceService()
result = service.get_stores_within_range(a, stores, 1)
self.assertEqual(len(result), 1)
self.assertEqual(result[0]['geolocation']['latitude'], 51.463437)
self.assertEqual(result[0]['geolocation']['longitude'], -0.288602)
self.assertEqual(result[0]['name'], 'Richmond')
self.assertEqual(result[0]['postcode'], 'TW9 1YB')
def test_get_stores_within_range_returns_every_store_in_five_miles_range(self):
a = [51.460903, -0.301702]
stores = get_test_stores()
service = DistanceService()
result = service.get_stores_within_range(a, stores, 5)
self.assertEqual(len(result), 4)
self.assertEqual(result[0]['geolocation']['latitude'], 51.405065)
self.assertEqual(result[0]['geolocation']['longitude'], -0.238117)
self.assertEqual(result[0]['name'], 'New_Malden')
self.assertEqual(result[0]['postcode'], 'SW20 0JQ')
self.assertEqual(result[1]['geolocation']['latitude'], 51.442892)
self.assertEqual(result[1]['geolocation']['longitude'], -0.412804)
self.assertEqual(result[1]['name'], 'Feltham')
self.assertEqual(result[1]['postcode'], 'TW13 4EX')
self.assertEqual(result[2]['geolocation']['latitude'], 51.482172)
self.assertEqual(result[2]['geolocation']['longitude'], -0.314343)
self.assertEqual(result[2]['name'], 'Brentford')
self.assertEqual(result[2]['postcode'], 'TW8 8JW')
self.assertEqual(result[3]['geolocation']['latitude'], 51.463437)
self.assertEqual(result[3]['geolocation']['longitude'], -0.288602)
self.assertEqual(result[3]['name'], 'Richmond')
self.assertEqual(result[3]['postcode'], 'TW9 1YB')
| [
"otto@masterbranch.io"
] | otto@masterbranch.io |
4aa2a44af09dce4919240097d2cf50df5c2286cc | 56f155db28b5703786a08fef0ecf821aefb6ffe5 | /lib/testmill/test/test_images.py | f43dc574d9685d3d89f1196cbad690c754365c2e | [
"Apache-2.0"
] | permissive | h4ckl4bm3/testmill | 595c30facec943b3593febe080b1e6602e82dee2 | 607d5622f14785e1b2f785e162ae862c5e638c5f | refs/heads/master | 2021-05-27T08:58:17.899271 | 2013-04-10T15:40:12 | 2013-04-10T15:41:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,035 | py | # Copyright 2012-2013 Ravello Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, print_function
import os
from testmill.main import main
from testmill.test import *
@systemtest
class TestImages(TestSuite):
"""Run some basic test on the standard images."""
def test_images(self):
args = get_common_args()
args += ['run', '-m', 'platformtest.yml',
'platformtest', 'sh check_image.sh']
retval = main(args)
assert retval == 0
| [
"geertj@gmail.com"
] | geertj@gmail.com |
af062882db668d2127cd9f91c3691c449ef42328 | 12c41119156dd3783c3801e07f5f973289f26bb0 | /aliyun-python-sdk-green/aliyunsdkgreen/request/v20170823/DescribeWebsiteScanResultRequest.py | f09d346c2c80b7eb9219b58dbf61434df7b191ec | [
"Apache-2.0"
] | permissive | toywei/aliyun-openapi-python-sdk | bfe0893da38af9b222ce072fd7587d5b6cdce204 | ce8f683e3201fca8c473512267f50a34f71e31d3 | refs/heads/master | 2020-08-07T23:42:00.053692 | 2019-10-08T08:50:21 | 2019-10-08T08:50:21 | 213,626,962 | 1 | 0 | NOASSERTION | 2019-10-08T11:43:15 | 2019-10-08T11:43:15 | null | UTF-8 | Python | false | false | 2,640 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class DescribeWebsiteScanResultRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Green', '2017-08-23', 'DescribeWebsiteScanResult','green')
def get_TotalCount(self):
return self.get_query_params().get('TotalCount')
def set_TotalCount(self,TotalCount):
self.add_query_param('TotalCount',TotalCount)
def get_SubServiceModule(self):
return self.get_query_params().get('SubServiceModule')
def set_SubServiceModule(self,SubServiceModule):
self.add_query_param('SubServiceModule',SubServiceModule)
def get_SiteUrl(self):
return self.get_query_params().get('SiteUrl')
def set_SiteUrl(self,SiteUrl):
self.add_query_param('SiteUrl',SiteUrl)
def get_SourceIp(self):
return self.get_query_params().get('SourceIp')
def set_SourceIp(self,SourceIp):
self.add_query_param('SourceIp',SourceIp)
def get_HandleStatus(self):
return self.get_query_params().get('HandleStatus')
def set_HandleStatus(self,HandleStatus):
self.add_query_param('HandleStatus',HandleStatus)
def get_Domain(self):
return self.get_query_params().get('Domain')
def set_Domain(self,Domain):
self.add_query_param('Domain',Domain)
def get_PageSize(self):
return self.get_query_params().get('PageSize')
def set_PageSize(self,PageSize):
self.add_query_param('PageSize',PageSize)
def get_CurrentPage(self):
return self.get_query_params().get('CurrentPage')
def set_CurrentPage(self,CurrentPage):
self.add_query_param('CurrentPage',CurrentPage)
def get_Label(self):
return self.get_query_params().get('Label')
def set_Label(self,Label):
self.add_query_param('Label',Label)
def get_Lang(self):
return self.get_query_params().get('Lang')
def set_Lang(self,Lang):
self.add_query_param('Lang',Lang) | [
"sdk-team@alibabacloud.com"
] | sdk-team@alibabacloud.com |
94a57d37ee01ad48525f12206f52a6d3317127e3 | 04164e028417ff8472b9f2bfec0ec45b0888f743 | /development/pysrc/extract.py | 1b6bc09351d99ac31b3285f0ed8f27a28be337e3 | [] | no_license | Huaguiyuan/quantum-honeycomp | c2b810ff5f5e25d41b1f0c1c1ff7ae500b04dc31 | 50deb0e59fffe4031f05094572552ca5be59e741 | refs/heads/master | 2020-03-22T19:09:58.148862 | 2018-07-08T19:51:58 | 2018-07-08T19:51:58 | 140,510,217 | 1 | 2 | null | 2018-07-11T02:20:32 | 2018-07-11T02:20:32 | null | UTF-8 | Python | false | false | 2,779 | py | # routines to extract channels from a matrix
from __future__ import division
import numpy as np
def spin_channel(m,spin_column=None,spin_row=None,has_spin=True):
"""Extract a channel from a matrix"""
if not has_spin: return m # return initial
if (spin_row is None) or (spin_column is None): return m # return initial
n = m.shape[0] # shape of the matrix
n2 = n//2 # number of orbitals
out = np.zeros((n,n),dtype=np.complex)
if spin_column=="up": ii = 0
else: ii = 1
if spin_row=="up": jj = 0
else: jj = 1
for i in range(n2):
for j in range(n2): out[i,j] = m[2*i+ii,2*j+jj]
return np.matrix(out)
def swave(m):
"""Extract the swave pairing from a matrix, assuming
the Nambu spinor basis"""
n = m.shape[0]//4 # number of sites
ds = np.zeros(n,dtype=np.complex) # pairing
for i in range(n):
ds[i] = m[4*i,4*i+2] # get the pairing
return ds
def mz(m):
"""Extract the z component of the magnetism, assume spin degree of freedom"""
n = m.shape[0]//2 # number of sites
ds = np.zeros(n).real # pairing
for i in range(n):
ds[i] = (m[2*i+1,2*i+1] - m[2*i,2*i]).real/2. # get the pairing
return ds
def mx(m):
"""Extract the z component of the magnetism, assume spin degree of freedom"""
n = m.shape[0]//2 # number of sites
ds = np.zeros(n).real # pairing
for i in range(n):
ds[i] = m[2*i,2*i+1].real
return ds
def my(m):
"""Extract the z component of the magnetism, assume spin degree of freedom"""
n = m.shape[0]//2 # number of sites
ds = np.zeros(n).real # pairing
for i in range(n):
ds[i] = -m[2*i,2*i+1].imag
return ds
def onsite(m,has_spin=True):
"""Extract the z component of the magnetism, assume spin degree of freedom"""
if has_spin: # has spin degree of freedom
n = m.shape[0]//2 # number of sites
ds = np.zeros(n).real # pairing
for i in range(n):
ds[i] = (m[2*i,2*i].real + m[2*i+1,2*i+1].real)/2.
return ds
else:
n = m.shape[0] # number of sites
ds = np.zeros(n).real # pairing
for i in range(n):
ds[i] = m[i,i].real
return ds
def hopping_spinful(m,cutoff=0.001):
"""Extract hopping"""
n = m.shape[0]//2 # number sites
ii = []
jj = []
ts = []
for i in range(n):
for j in range(i,n):
t = np.abs(m[2*i,2*j]) + np.abs(m[2*i+1,2*j+1])
if t>cutoff:
ii.append(i)
jj.append(j)
ts.append(t)
return ii,jj,np.array(ts) # return pairs
def hopping_spinless(m,cutoff=0.001):
"""Extract hopping"""
n = m.shape[0] # number of sites
ii = []
jj = []
ts = []
for i in range(n):
for j in range(i,n):
t = np.abs(m[i,j])
if t>cutoff:
ii.append(i)
jj.append(j)
ts.append(t)
return ii,jj,np.array(ts) # return pairs
| [
"jose.luis.lado@gmail.com"
] | jose.luis.lado@gmail.com |
431bf55da258a58f0591e70b0994f0d2e973bae0 | fc6f4aecaa611c1eb8fb9f7b7eb681673fed8dbe | /is13/data/sougou/dataset2/is13/examples/my-elman-forward.py | 48faffad5b5ef0c83b7b01c3dadbc0685f351632 | [] | no_license | WUT-IDEA/domain-ner | 3d27a25887b48b9074b08bd717932a380d5f01e5 | 685f4ffd387f0fa6d83734af3c7d3ac4d7c142fc | refs/heads/master | 2020-03-09T17:56:09.797419 | 2018-04-10T14:47:06 | 2018-04-10T14:47:06 | 128,920,023 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 9,042 | py | import numpy
import time
import sys
import subprocess
import os
import random
import time
from theano import tensor as T
from prepare_data_for_rnn import label2idx, dictionary2, get_datalist,my_evaluate
from elman import model
from tools import shuffle, minibatch, contextwin,writelist,listmax,saveIntoFile,get_word_posTagging
def conlleval(p, g, w, filename):
'''
INPUT:
p :: predictions
g :: groundtruth
w :: corresponding words
OUTPUT:
filename :: name of the file where the predictions
are written. it will be the input of conlleval.pl script
for computing the performance in terms of precision
recall and f1 score
'''
out = ''
for sl, sp, sw in zip(g, p, w):
out += 'BOS O O\n'
for wl, wp, w in zip(sl, sp, sw):
out += w + ' ' + wl + ' ' + wp + '\n'
out += 'EOS O O\n\n'
f = open(filename,'w')
f.writelines(out)
f.close()
if __name__ == '__main__':
s = {'fold':3, # 5 folds 0,1,2,3,4
'lr':0.0627142536696559,
'verbose':1,
'decay':False, # decay on the learning rate if improvement stops
'win':7, # number of words in the context window
'bs':9, # number of backprop through time steps
'nhidden':100, # number of hidden units
'seed':345,
'emb_dimension':100, # dimension of word embedding
'nepochs':50}
folder = os.path.basename(__file__).split('.')[0]
print 'folder=', folder
if not os.path.exists(folder): os.mkdir(folder)
# load the dataset
print 'load the dataset...'
# train_set, valid_set, test_set, dic = load.atisfold(s['fold'])
# idx2label = dict((k, v) for v, k in dic['labels2idx'].iteritems())
# idx2word = dict((k, v) for v, k in dic['words2idx'].iteritems())
#
# train_lex, train_ne, train_y = train_set
# valid_lex, valid_ne, valid_y = valid_set
# test_lex, test_ne, test_y = test_set
#
# vocsize = len(dic['words2idx'])
# print 'vosize=', vocsize # 572
# nclasses = len(dic['labels2idx'])
# print nclasses # 127
# nsentences = len(train_lex)
# print 'train data length:', nsentences # 3983 to train; test_lex:893
idx2label = dict((k, v) for v, k in label2idx.iteritems())
idx2word = dict((k, v) for v, k in dictionary2.iteritems())
# initial running, obtain zhengzhi trainset
train_lex = get_datalist('dataset2/trainx.txt')
train_y = get_datalist('dataset2/trainy.txt')
test_lex = get_datalist('dataset2/testx.txt')
test_y = get_datalist('dataset2/testy.txt')
# valid_lex=get_datalist('../dataset2/valix.txt')
# valid_y = get_datalist('../dataset2/valiy.txt')
vocsize = len(dictionary2)
print 'vosize=', vocsize # 572
nclasses = len(label2idx)
print 'classes:', nclasses # 127
nsentences = len(train_lex)
print 'train data length:', nsentences # 3983 to train; test_lex:893
print 'test data length:', len(test_lex)
print 'instanciate the model'
numpy.random.seed(s['seed'])
random.seed(s['seed'])
rnn = model(nh = s['nhidden'],nc = nclasses,ne = vocsize, de = s['emb_dimension'], cs = s['win'] )
# train with early stopping on validation set
print 'train with set...'
best_f1 = -numpy.inf
s['clr'] = s['lr']
print time.localtime(time.time())
for e in xrange(s['nepochs']):
# shuffle
shuffle([train_lex, train_y], s['seed'])
s['ce'] = e
tic = time.time()
for i in xrange(nsentences):
#print 'i=', i
cwords = contextwin(train_lex[i], s['win'])
words = map(lambda x: numpy.asarray(x).astype('int32'), minibatch(cwords, s['bs']))
labels = train_y[i]
#print 'label=', labels
for word_batch , label_last_word in zip(words, labels):
t=rnn.train(word_batch, label_last_word, s['clr'])
rnn.normalize()
if (i+1)%270==0 & s['verbose']:
print '[learning] epoch %i >> %2.2f%%'%(e,(i+1)*100./nsentences),'completed in %.2f (sec) <<\r'%(time.time()-tic)
# sys.stdout.flush()
# evaluation // back into the real world : idx -> words
print 'evaluation step1: back into the real world : idx -> words'
predictions_test = [ map(lambda x: idx2label[x], \
rnn.classify(numpy.asarray(contextwin(x, s['win'])).astype('int32')))\
for x in test_lex ]
groundtruth_test = [ map(lambda x: idx2label[x], y) for y in test_y]
words_test = [ map(lambda x: idx2word[x], w) for w in test_lex]
# predictions_valid = [ map(lambda x: idx2label[x], \
# rnn.classify(numpy.asarray(contextwin(x, s['win'])).astype('int32')))\
# for x in valid_lex ]
# gro undtruth_valid = [ map(lambda x: idx2label[x], y) for y in valid_y ]
# words_valid = [ map(lambda x: idx2word[x], w) for w in valid_lex]
# evaluation // compute the accuracy using conlleval.pl
print 'evaluation step2...compute the accuracy using conlleval.pl'
conlleval(predictions_test, groundtruth_test, words_test, folder + '/current.test.txt')
res_test = my_evaluate(folder + '/current.test.txt')
# res_valid = conlleval(predictions_valid, groundtruth_valid, words_valid, folder + '/current.valid.txt')
if res_test['f1'] > best_f1:
rnn.save(folder)
best_f1 = res_test['f1']
print 'now,best_f1=', best_f1
if s['verbose']:
tempstr= 'NEW BEST: epoch '+str(e)+', best test P ,R, F1 '+str(res_test['p'])+' '+str(res_test['r'])+' '+str(res_test['f1'])
f = open('dataset2_result.txt', 'a')
f.write(tempstr + '\n')
f.close()
print tempstr #'NEW BEST: epoch', e, 'valid F1', res_valid['f1'], 'best test F1', res_test['f1'], ' '*20
# s['vf1'], s['vp'], s['vr'] = res_valid['f1'], res_valid['p'], res_valid['r']
s['tf1'], s['tp'], s['tr'] = res_test['f1'], res_test['p'], res_test['r']
s['be'] = e
# subprocess.call(['rename', folder + '/current.test.txt', folder + '/best.test.txt']) #mv->rename
#subprocess.call(['rename', folder + '/current.valid.txt', folder + '/best.valid.txt'])
if os.path.isfile(folder+'/best.test.txt'):
os.remove(folder+'/best.test.txt')
os.rename(folder + '/current.test.txt', folder + '/best.test.txt')
# if os.path.isfile(folder + '/best.valid.txt'):
# os.remove(folder + '/best.valid.txt')
# os.rename(folder + '/current.valid.txt', folder + '/best.valid.txt')
#print 'test.... test.... test.... test....'
else:
print ''
# learning rate decay if no improvement in 10 epochs
if s['decay'] and abs(s['be']-s['ce']) >= 10: s['clr'] *= 0.5
if s['clr'] < 1e-5: break
print 'BEST RESULT: epoch', e, 'best test F1', s['tf1']
print 'epoch finished.\n',time.localtime(time.time())
#co-train: produce k number new high believable new train data for CRF
# pre_train_x=get_datalist('co-train/pre_train_x.txt')
# print 'before: length of pre_train:',len(pre_train_x)
# count=0
# sentences_and_scores={}
# pre_train=[]
# for x in pre_train_x:
# scores = rnn.myclassify(
# numpy.asarray(contextwin(x, s['win'])).astype('int32')) # each word's 127 lebels score in each line[[label1_score,label2_score,..][label1_score,label2_score,..]]
# maxscores=map((lambda x:listmax(x)),scores)
# sentence_score=sum(maxscores)
# sentence_label =map(lambda x: idx2label[x],rnn.classify(numpy.asarray(contextwin(x, s['win'])).astype('int32')))
# x=x.tolist()
# count += 1
# pre_train.append(x)
# sentences_and_scores[count]=[x,sentence_label,sentence_score]#{1:[sentence,pred_label,score],2:[sentence,pred_label,score]}
#
#
# #sort the sentence_and_scores by score and save first k sentence into newHMM_train_data
# crf = open('crf/rnn-produced300-train', 'w')
# sorted_dic = sorted(sentences_and_scores.items(), key=lambda d: d[1][2], reverse=True) # sort by value.[('china', 9), ('io', 4), ('ret', 2), ('me', 2)]
# i=0
# for sentence in sorted_dic:
# if i>=300:break #k=300
# i=i+1
# pre_train.remove(sentence[1][0])
# assert len(sentence[1][0])==len(sentence[1][1]) #sentence vs label
# for index in xrange(0,len(sentence[1][0])):
# word = idx2word[sentence[1][0][index]]
# label= sentence[1][1][index]
# crf.write(str(word) + ' ' + str(label) + '\n')
# crf.write('\n')
#
# crf.close()
# print 'after: length of pre_train:', len(pre_train)
# writelist(pre_train,'co-train/pre_train_x.txt')
# print '----done!!!-----'
| [
"ziwuyoulin@foxmail.com"
] | ziwuyoulin@foxmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.