blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c3a6b20e85b3de2e36ba3fd12147a8aab0010593 | 6770e129cba79abad641e68376d4afa3bd40d847 | /fifth.py | 6b30fe23deca7146d1f9c2cbe5c186c15119ec75 | [] | no_license | EPesnya/PythonOOP | ed6ff65f32d33246da8d40d7967dc3124846feea | 860792343ea82628d03a132af75f652a2516adfd | refs/heads/master | 2021-05-19T09:31:49.727432 | 2020-04-27T13:28:59 | 2020-04-27T13:28:59 | 251,631,339 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,518 | py | # Песня Евгений
import numpy as np
import matplotlib.pyplot as plt
from abc import ABC
import second as sec
class AbstractExplicitRKmethod(ABC):
def __init__(self, f, u_0, num_blocks, t_start, t_end):
self.a = None
self.b = None
self.c = None
self.f = f
self.u_0 = u_0
self.num_blocks, self.num_points = num_blocks, num_blocks + 1
self.dt = (float(t_end) - float(t_start))/self.num_blocks
self.solution_array = np.zeros(self.num_points)
self.time_array = np.linspace(t_start, t_end, self.num_points)
self.t_start, self.t_end = float(t_start), float(t_end)
def solve(self):
self.solution_array[0] = self.u_0
for i in range(self.num_blocks):
u_old = self.solution_array[i]
self.solution_array[i + 1] = u_old + self.dt * np.dot(self.b, self.k(u_old))
def k(self, u_i):
k = np.zeros(len(self.b))
k[0] = self.f(u_i)
for i in range(len(k) - 1):
k[i + 1] = self.f(u_i + self.dt * np.dot(self.a[i + 1, :], k))
return k
def plot_solution(self):
plt.plot(self.time_array, self.solution_array, '-', linewidth=2, label=self.__class__.__name__)
class ExplicitEuler(AbstractExplicitRKmethod):
def __init__(self, f, u_0, num_blocks, t_start, t_end):
super().__init__(f, u_0, num_blocks, t_start, t_end)
self.a = np.array([0])
self.b = np.array([1])
class Heun(AbstractExplicitRKmethod):
def __init__(self, f, u_0, num_blocks, t_start, t_end):
super().__init__(f, u_0, num_blocks, t_start, t_end)
self.a = np.array([
[0, 0],
[1, 0]
])
self.b = np.array([1/2, 1/2])
class RK4(AbstractExplicitRKmethod):
def __init__(self, f, u_0, num_blocks, t_start, t_end):
super().__init__(f, u_0, num_blocks, t_start, t_end)
self.a = np.array([
[ 0, 0, 0, 0],
[1/2, 0, 0, 0],
[ 0, 1/2, 0, 0],
[ 0, 0, 1, 0]
])
self.b = np.array([1/6, 1/3, 1/3, 1/6])
class ImplicitTrapezoidal(AbstractExplicitRKmethod):
def solve(self):
self.solution_array[0] = self.u_0
epsilon = 1e-3
for i in range(self.num_blocks):
u_old = self.solution_array[i]
F = lambda u_n: u_n - u_old - self.dt / 2 * (self.f(u_n) + self.f(u_old))
d_num_F = sec.DerivativeNum(F, self.dt, [-1/2, 0, 1/2])
u_k_0 = u_old
u_k_1 = u_old + self.dt * self.f(u_old)
while abs(u_k_1 - u_k_0) > epsilon:
u_k_0 = u_k_1
u_k_1 = u_k_1 - F(u_k_1) / d_num_F(u_k_1)
self.solution_array[i + 1] = u_k_1
class LogisticRightHandSide:
def __init__(self, alpha, R):
self._alpha = float(alpha)
self._R = float(R)
def __call__(self, u):
return self._alpha * u * (1. - u/self._R)
if __name__ == "__main__":
methods_class = [ExplicitEuler, Heun, RK4, ImplicitTrapezoidal]
rhs_1 = LogisticRightHandSide(alpha=0.2, R=100.)
for method_class in methods_class:
method = method_class(f=rhs_1, u_0=2., num_blocks=30, t_start=0., t_end=80.)
method.solve()
method.plot_solution()
plt.xlabel('Время')
plt.ylabel('Популяция')
plt.grid('off')
plt.legend()
plt.show() | [
"pesnyaevgeniy@gmail.com"
] | pesnyaevgeniy@gmail.com |
c831d4fab5a0bc51b2f66fa9942e85f52d7bdf2c | 5835626bf4f7b63c833fc03e1022f0c3f281eab1 | /degrees/degrees.py | 82ec53b3b54f95c36c21553f3e4b4a26e73a96ce | [
"MIT"
] | permissive | Amirerfan/AI | 2b50cb4c4199a74d5d7c8c7821f0354208d96613 | fbba8ef9cbbb40969b73efebbd5774b5d006ab28 | refs/heads/master | 2021-05-21T08:53:06.443030 | 2020-04-24T13:49:49 | 2020-04-24T13:49:49 | 252,625,575 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,613 | py | import csv
import sys
from util import Node, StackFrontier, QueueFrontier
# Maps names to a set of corresponding person_ids
names = {}
# Maps person_ids to a dictionary of: name, birth, movies (a set of movie_ids)
people = {}
# Maps movie_ids to a dictionary of: title, year, stars (a set of person_ids)
movies = {}
def load_data(directory):
"""
Load data from CSV files into memory.
"""
# Load people
with open(f"{directory}/people.csv", encoding="utf-8") as f:
reader = csv.DictReader(f)
for row in reader:
people[row["id"]] = {
"name": row["name"],
"birth": row["birth"],
"movies": set()
}
if row["name"].lower() not in names:
names[row["name"].lower()] = {row["id"]}
else:
names[row["name"].lower()].add(row["id"])
# Load movies
with open(f"{directory}/movies.csv", encoding="utf-8") as f:
reader = csv.DictReader(f)
for row in reader:
movies[row["id"]] = {
"title": row["title"],
"year": row["year"],
"stars": set()
}
# Load stars
with open(f"{directory}/stars.csv", encoding="utf-8") as f:
reader = csv.DictReader(f)
for row in reader:
try:
people[row["person_id"]]["movies"].add(row["movie_id"])
movies[row["movie_id"]]["stars"].add(row["person_id"])
except KeyError:
pass
def main():
if len(sys.argv) > 2:
sys.exit("Usage: python degrees.py [directory]")
directory = sys.argv[1] if len(sys.argv) == 2 else "large"
# Load data from files into memory
print("Loading data...")
load_data(directory)
print("Data loaded.")
source = person_id_for_name(input("Name: "))
if source is None:
sys.exit("Person not found.")
target = person_id_for_name(input("Name: "))
if target is None:
sys.exit("Person not found.")
path = shortest_path(source, target)
if path is None:
print("Not connected.")
else:
degrees = len(path)
print(f"{degrees} degrees of separation.")
path = [(None, source)] + path
for i in range(degrees):
person1 = people[path[i][1]]["name"]
person2 = people[path[i + 1][1]]["name"]
movie = movies[path[i + 1][0]]["title"]
print(f"{i + 1}: {person1} and {person2} starred in {movie}")
def shortest_path(source, target):
"""
Returns the shortest list of (movie_id, person_id) pairs
that connect the source to the target.
If no possible path, returns None.
"""
frontier = QueueFrontier()
frontier.add(Node(source, None, None))
visited_actors = []
while (not frontier.empty()):
actor = (frontier.remove())
visited_actors.append(actor)
if (actor.state == target):
path = []
while (actor.parent != None):
path.insert(0, (actor.action, actor.state))
actor = actor.parent
return path
for neighb_movie_id, neighb_actor_id in neighbors_for_person(actor.state):
if (neighb_actor_id not in visited_actors and not frontier.contains_state(neighb_actor_id)):
frontier.add(Node(neighb_actor_id, actor, neighb_movie_id))
return None
def person_id_for_name(name):
"""
Returns the IMDB id for a person's name,
resolving ambiguities as needed.
"""
person_ids = list(names.get(name.lower(), set()))
if len(person_ids) == 0:
return None
elif len(person_ids) > 1:
print(f"Which '{name}'?")
for person_id in person_ids:
person = people[person_id]
name = person["name"]
birth = person["birth"]
print(f"ID: {person_id}, Name: {name}, Birth: {birth}")
try:
person_id = input("Intended Person ID: ")
if person_id in person_ids:
return person_id
except ValueError:
pass
return None
else:
return person_ids[0]
def neighbors_for_person(person_id):
"""
Returns (movie_id, person_id) pairs for people
who starred with a given person.
"""
movie_ids = people[person_id]["movies"]
neighbors = set()
for movie_id in movie_ids:
for person_id in movies[movie_id]["stars"]:
neighbors.add((movie_id, person_id))
return neighbors
if __name__ == "__main__":
main()
| [
"amirerfan.siamaky@gmail.com"
] | amirerfan.siamaky@gmail.com |
1c8aa6607f76847dc408c496fcd797637c0c99ca | 21c5d986f0b3c366e9d7cc237e91e199fd62bfdf | /vienna/wsgi.py | 41acc5e1b7c6d3b3f7899925baa9b53ad8ad633c | [] | no_license | sunar555/djangoproject | badbbe6ae95b14e244d8a3f056dfa34fe77b3c7d | 8835882c8a6be35b2a52dc40804d628e4b6d7dbe | refs/heads/master | 2022-12-22T22:53:46.638723 | 2020-09-01T01:43:59 | 2020-09-01T01:43:59 | 291,866,829 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 389 | py | """
WSGI config for vienna project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'vienna.settings')
application = get_wsgi_application()
| [
"vishalsen@vishals-MacBook-Pro.local"
] | vishalsen@vishals-MacBook-Pro.local |
8ba93f85aac3bcb84d02b456896f51f274fd68cf | a3bf2a4bb4e99efc76544721092c99e8661ec274 | /CS_1210_Intro_to_CS/HW4Q2&3.py | 6560dc3a589060276efc9d5b6c13547921b59625 | [] | no_license | anthony-cunningham/CS_1210_Intro_to_CS | 75ddcf245dead3dd7b017bad66d314fccf391b49 | 018c0fee39b1c897bf76da24ba03d49090a95acc | refs/heads/master | 2022-01-09T05:34:10.037283 | 2019-07-21T13:41:48 | 2019-07-21T13:41:48 | 198,061,044 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 954 | py | # Author: Anthony Cunningham Section A01
# Homeword 4
# Question 2
def q2List(n):
recursiveList = []
if n == 0:
recursiveList.append(1)
else:
recursiveList.append(1)
recursiveList.append((n + 1)**2)
recursiveList.extend(q2List(n - 1))
return recursiveList
# Question 3
def listsAreSimilar(list1, list2):
if (len(list1) == 0) and (len(list2) == 0):
print(True)
elif (len(list1) == 0) and (len(list2) != 0):
print(False)
elif (len(list1) != 0) and (len(list2) == 0):
print(False)
else:
if (type(list1[0]) != type(list2[0])):
print(False)
return
elif (type(list1[0]) == type(list)):
listInList1 = list1[0][:]
listInList2 = list2[0][:]
listsAreSimilar(listInList1, listInList2)
else:
listsAreSimilar(list1[1:], list2[1:])
| [
"noreply@github.com"
] | anthony-cunningham.noreply@github.com |
ac99db37453b2787efc364fe201a853f661ebf3f | babd977104571fc1e6fd11f6ac34b2808f41a92f | /generator_ex.py | 50516745ce5df28ccf3abca34d8e057df71a0fd3 | [] | no_license | kyomind/Colt-Python-3-Course | 2511d58cdea79438a0723f2c6c3588a58641abb6 | 6274b3b87fdf93512e9a14519032ca42a30a3758 | refs/heads/master | 2020-04-02T20:45:02.898168 | 2019-04-20T07:38:59 | 2019-04-20T07:38:59 | 154,778,044 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 233 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Oct 23 13:45:51 2018
@author: kyo
"""
def week() :
days=('Monday','Tuesday','Wednesday','Thursday','Friday','Saturday','Sunday')
for i in days:
yield i
day=week() | [
"odinxp@gmail.com"
] | odinxp@gmail.com |
f9c85f04d2afa86aa01f7b102ccb92c88e5d0082 | 1fef88fbe9e8b90a321e93162a28e267628a5dc3 | /main.py | 0edd5640a82df42e6c7cec6fd5e5b32c30c132d4 | [] | no_license | Marcos001/OrdemAssintotica | 5cc6d7892f787f32ee686fa2fb02c9262a15eb8e | a999d94a30b8a7c38e7cb1ed2962f1dbd9973e73 | refs/heads/master | 2020-03-16T02:58:58.793695 | 2018-05-07T12:13:52 | 2018-05-07T12:13:52 | 132,477,540 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 692 | py |
import matplotlib.pyplot as plt
def distancia(ordem,f,g):
plt.title(ordem)
plt.plot([float(g)], 'go',label='g(n)')
plt.plot([float(f)], 'bo', label='f(n)')
plt.legend()
plt.show()
def omega(f,g):
if f >= g:
print('f(',f,') >= g(',g,') [Omega]')
return True
return False
def big_o(f,g):
if f <= g:
print('f(',f,') <= g(',g,') [Big O]')
return True
return False
def analise_assintotica(f, g):
if omega(f,g) is True:
return str('Omega')
else:
return str('Big O')
if __name__ == '__main__':
print()
n = 1000
f = 10*(n**55)
g = (2**n)
distancia(analise_assintotica(f,g),f,g) | [
"santosMsantos01@gmail.com"
] | santosMsantos01@gmail.com |
92205477ed83ce0074dfdc697d43b9171aceb553 | 7c6a9fc5daf773aa0d9b1bd5d07b0a45690ecd5e | /day7/part1.py | 95569a6284528ecc213f952ad51c1a0b17385359 | [] | no_license | Segmev/AdventOfCode2020 | 91533b841c295590b6ee022a2bd56da2234da75e | f6095827aedd88b47d32f74542ab51871fe577fd | refs/heads/master | 2023-02-12T08:30:40.706328 | 2021-01-01T15:29:15 | 2021-01-01T15:29:15 | 318,764,137 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,145 | py |
class Bag:
def __init__(self, line):
self.id = line.split(' contain ')[0].split(' bags')[0]
self.contains = {}
if line.split(' contain ')[1] != "no other bags.\n":
for bagTxt in line.split(' contain ')[1].split(', '):
id = bagTxt[bagTxt.find(' ') + 1:bagTxt.rfind(' ')]
self.contains[id] = int(bagTxt[:(bagTxt.find(' '))])
def getId(self):
return self.id
def getContains(self):
return self.contains
def get(self):
return self
def canContains(bagsDict, bag, bagName):
count = 0
for bId in bag.getContains():
if bId == bagName:
count += 1
elif bId in bagsDict.keys():
count += canContains(bagsDict, bagsDict[bId], bagName)
return (count)
def main():
f = open("./inputs.txt")
bagsDict = {}
for l in f:
b = Bag(l)
bagsDict[b.getId()] = b
total = 0
for bag in bagsDict:
total += 1 if canContains(bagsDict,
bagsDict[bag], "shiny gold") > 0 else 0
print(total)
if __name__ == "__main__":
main()
| [
"stephane.karraz@gmail.com"
] | stephane.karraz@gmail.com |
47ee6a7c8dae9537d451038d895d236345c3d7f8 | 622d4160497cf67665183b3fdbcdf871c72f43cf | /load_generator.py | 8f0903af056986531afd4256177e83bf935043fa | [] | no_license | RakibulHoque/Acute_Lymphoblastic_Leukemia-Binary_Classification- | 7b5934dd1b8e2dc82264539ccd275d6378e9f7e8 | fee4bb1ce040c1f5bc92805b010101b72267287b | refs/heads/master | 2020-08-17T21:17:38.653020 | 2019-10-17T05:55:27 | 2019-10-17T05:55:27 | 215,712,868 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,006 | py | from keras.utils import to_categorical
from imageio import imread
import numpy as np
import random
#random.seed(7)
from observe_data import all_imgs_dict, hem_imgs_dict
from augmentation import transform
#making two classes equal in training data
keys = list(all_imgs_dict.keys())
random.shuffle(keys)
all_img_dict_trimmed = {x:all_imgs_dict[x] for x in keys[0:len(hem_imgs_dict)]}
#all_img_dict_trimmed = all_imgs_dict
#modify ALL data
keys = list(all_img_dict_trimmed.keys())
random.shuffle(keys)
all_train_data = {x:all_img_dict_trimmed[x] for x in keys[0:4*(len(keys)//5)]}
all_valid_data = {x:all_img_dict_trimmed[x] for x in keys[4*(len(keys)//5):]}
#modify healthy data
keys = list(hem_imgs_dict.keys())
random.shuffle(keys)
hem_train_data = {x:hem_imgs_dict[x] for x in keys[0:4*(len(keys)//5)]}
hem_valid_data = {x:hem_imgs_dict[x] for x in keys[4*(len(keys)//5):]}
#training data final
train_data = {**all_train_data,**hem_train_data}
valid_data = {**all_valid_data,**hem_valid_data}
'''extra data where train and valid is not equally distributed.'''
#data = {**all_img_dict_trimmed,**hem_imgs_dict}
#generator for training
def generator_for_dict(data, img_size=(450,450,3), num_class=2, batchsize = 32, load_augmentor = False):
keys = list(data.keys())
img_rows, img_cols, channel = img_size
batch_x = np.zeros((batchsize, img_rows, img_cols, channel))
batch_y = np.zeros((batchsize, num_class))
while 1:
random.shuffle(keys)
for i_key in range(0,len(keys) - len(keys)%batchsize, batchsize):
for i_batch in range(batchsize):
packet = data[keys[i_key+i_batch]]
x = imread(packet['PATH'])
if load_augmentor:
x = transform(x)
y = to_categorical(packet['CONDITION'], num_class)
batch_x[i_batch] = x
batch_y[i_batch] = y
yield batch_x, batch_y
"""loading from RAM"""
#def generator_for_dict(data, img_size=(450,450,3), num_class=2, batchsize = 32, load_augmentor = False):
# keys = list(data.keys())
# img_rows, img_cols, channel = img_size
# batch_x = np.zeros((batchsize, img_rows, img_cols, channel))
# batch_y = np.zeros((batchsize, num_class))
# while 1:
# random.shuffle(keys)
# data_in_ram = list(map(imread,[packet['PATH'] for packet in
# [data[p] for p in keys]]))
# label_in_ram = [packet['CONDITION'] for packet in
# [data[p] for p in keys]]
# for i_key in range(0,len(data_in_ram) - len(data_in_ram)%batchsize, batchsize):
# for i_batch in range(batchsize):
# x = data_in_ram[i_key + i_batch]
# if load_augmentor:
# x = transform(x)
# y = to_categorical(label_in_ram[i_key + i_batch], num_class)
# batch_x[i_batch] = x
# batch_y[i_batch] = y
# yield batch_x, batch_y
| [
"rakibul_hoque@yahoo.com"
] | rakibul_hoque@yahoo.com |
df255cd7317c5433d2dc5b40125c3fa5425c0b16 | 805a4e478ed508a9bafab1741b6f642f45826bfb | /CycloPeptide/CyclopeptideSequencing.py | 0a58ef24afb3b78f4fe80e9d6f58b655d6242181 | [] | no_license | ellemcfarlane/bioinformatics | 7076f1d61eb080b06e73af92d94d3bdea3442c18 | e9c81f4201973a151b7dde40c6e38f7a63719d91 | refs/heads/master | 2021-01-08T10:28:36.525511 | 2020-02-20T22:13:23 | 2020-02-20T22:13:23 | 242,003,030 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,864 | py | # Elle McFarlane
def cyclopeptide_sequencing(spectrum):
"""
Given an ideal experimental spectrum, finds the cyclic peptide(s) whose
theoretical spectrum matches the experimental spectrum.
:param spectrum: a collection of (possibly repeated) integers corresponding to an ideal experimental spectrum
:return: every amino acid string peptide such that cyclospectrum(peptide) = spectrum (if such a string exists).
Ex:
cyclopeptide_sequencing([0, 113, 128, 186, 241, 299, 314, 427]) ->
[[186,128,113], [186,113,128], [128,186,113], [128,113,186], [113,186,128] [113,128,186]]
"""
# list containing only empty peptide
peptides = [[]]
# peptides that match spectrum
winners = []
# while peptides is nonempty
while peptides:
# expand peptides by one amino acid
peptides = expand(peptides)
# remove any peptides not consistent with spectrum
peptides = [pep for pep in peptides if is_consistent(pep, spectrum)]
for peptide in peptides:
if mass(peptide) == parent_mass(spectrum):
if cyclospectrum(peptide) == spectrum:
winners.append(peptide)
winners.sort(reverse=True)
return winners
def expand(peptides):
"""
:param peptides: list of list of peptides where a peptide is a list of integers representing amino acid masses in Da
:return: new list containing all possible extensions of peptides by a single amino acid mass
Ex:
expand([[]]) -> [[57], [71], [87], [97], [99], [101], [103], [113], [114], [115], [128], [129],\
[131], [137], [147], [156], [163], [186]]
"""
if len(peptides) <= 0:
return peptides
new_peptides = []
amino_acids = [57, 71, 87, 97, 99, 101, 103, 113, 114, 115, 128, 129, 131, 137, 147, 156, 163, 186]
for peptide in peptides:
# append each amino acid separately to each peptide
for aa in amino_acids:
# create copy of original peptide
new_peptide = list(peptide)
# expand peptide by one amino acid
new_peptide.append(aa)
# add to new peptide set
new_peptides.append(new_peptide)
return new_peptides
def mass(peptide):
"""
:param peptide: list of integers representing mass in Da of each amino acid in peptide
:return: sum of amino acid masses in Da
Ex:
Peptide VKLFPWFNQY = [99, 128, 113, 147, 97, 186, 147, 114, 128, 163]
mass(VKLFPWFNQY) = 1322
"""
amino_acid_sum = 0
for aa in peptide:
amino_acid_sum += aa
return amino_acid_sum
def parent_mass(spectrum):
"""
:param spectrum: collection of integers corresponding to ideal experimental spectrum
:return: last entry, which should be the biggest value representing entire peptide
Ex:
parent_mass([0, 113, 128, 186, 241, 299, 314, 427]) -> 427
"""
sz = len(spectrum)
if sz <= 0:
return 0
return spectrum[sz-1]
def cyclospectrum(peptide):
"""
:param peptide: list of integers representing mass in Da of each amino acid in cyclic peptide in natural order
:return: sorted list representing cyclic spectrum
Ex:
NQEL = [114, 128, 129, 113]
cyclospectrum(NQEL) -> [0, 113, 114, 128, 129, 227, 242, 242, 257, 355, 356, 370, 371, 484]
"""
# create list of masses of all prefixes from peptide
prefix_masses = [0]
for i in range(len(peptide)):
prefix_masses.append(prefix_masses[i] + peptide[i])
# get peptide's mass for finding cyclic subpeptides later
peptide_mass = mass(peptide)
# use prex_masses to build full cyclopectrum
cyclospec = [0]
for i in range(len(peptide)):
for j in range(i+1, len(peptide) + 1):
sub_pep = prefix_masses[j]-prefix_masses[i]
cyclospec.append(sub_pep)
# add cyclic subpeptides if possible
if i > 0 and j < len(peptide):
cyclospec.append(peptide_mass-sub_pep)
# sort spectrum
cyclospec.sort()
return cyclospec
def linear_spectrum(peptide):
"""
:param peptide: list of integers representing mass in Da of each amino acid in linear peptide in natural order
:return: sorted list representing linear spectrum
Ex:
NQEL = [114, 128, 129, 113]
linear_spectrum(NQEL) -> [0, 113, 114, 128, 129, 242, 242, 257, 370, 371, 484]
"""
# create list of masses of all prefixes from peptide
prefix_masses = [0]
for i in range(len(peptide)):
prefix_masses.append(prefix_masses[i] + peptide[i])
# use prex_masses to build full linear spectrum
linear_spec = [0]
for i in range(len(peptide)):
for j in range(i+1, len(peptide) + 1):
linear_spec.append(prefix_masses[j]-prefix_masses[i])
# sort spectrum
linear_spec.sort()
return linear_spec
def is_consistent(peptide, spectrum):
"""
:param peptide: list of integers representing mass in Da of each amino acid in peptide in natural order
:param spectrum: a collection of (possibly repeated) integers corresponding to an ideal experimental spectrum
:return: boolean, whether peptide's linear spectrum is a subset of spectrum
Ex:
tyrocidine_b1_spec = []
VKF = [99, 128, 147]
VKY = [99, 128, 163]
is_consistent(VKF, tyrocidine_b1_spec) -> False
is_consistent(VKY, tyrocidine_b1_spec) -> True
"""
sub_spectrum = linear_spectrum(peptide)
return all(x in spectrum for x in sub_spectrum)
def driver(path):
with open(path, 'r') as f:
line = next(f)
spectrum = [int(x) for x in line.split(" ")]
winners = cyclopeptide_sequencing(spectrum)
for peptide in winners:
formatted_peptide = "-".join(str(x) for x in peptide)
print(formatted_peptide, end=" ")
# driver('rosalind_ba4e.txt')
| [
"ellesummer212@gmail.com"
] | ellesummer212@gmail.com |
0bf77bfa0fc5d84c7e932a08829788f560eae77b | 9e7a944c16be631f6c200c7d2e44fcd8c818afae | /recognition/partial_fc/symbol/resnet.py | d3a27caf320c800de64d6ccaab978fce9bb0fd0b | [
"MIT",
"LicenseRef-scancode-proprietary-license"
] | permissive | hatsunexym/insightface | 193e5d6385ad9b3bc40223c574c2d22f6b308eb3 | f4d517bcd3f09b822f642cfc25161f3b427f5208 | refs/heads/master | 2022-12-29T06:00:28.197461 | 2020-10-16T01:31:53 | 2020-10-16T01:31:53 | 254,526,690 | 0 | 0 | MIT | 2020-10-16T01:31:54 | 2020-04-10T02:34:23 | null | UTF-8 | Python | false | false | 32,522 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
'''
Adapted from https://github.com/tornadomeet/ResNet/blob/master/symbol_resnet.py
Original author Wei Wu
Implemented the following paper:
Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun. "Identity Mappings in Deep Residual Networks"
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import os
import mxnet as mx
import numpy as np
from symbol import symbol_utils
# import memonger
# import sklearn
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from default import config
def Conv(**kwargs):
# name = kwargs.get('name')
# _weight = mx.symbol.Variable(name+'_weight')
# _bias = mx.symbol.Variable(name+'_bias', lr_mult=2.0, wd_mult=0.0)
# body = mx.sym.Convolution(weight = _weight, bias = _bias, **kwargs)
body = mx.sym.Convolution(**kwargs)
return body
def Act(data, act_type, name):
if act_type == 'prelu':
body = mx.sym.LeakyReLU(data=data, act_type='prelu', name=name)
else:
body = mx.symbol.Activation(data=data, act_type=act_type, name=name)
return body
def residual_unit_v1(data, num_filter, stride, dim_match, name, bottle_neck, **kwargs):
"""Return ResNet Unit symbol for building ResNet
Parameters
----------
data : str
Input data
num_filter : int
Number of output channels
bnf : int
Bottle neck channels factor with regard to num_filter
stride : tuple
Stride used in convolution
dim_match : Boolean
True means channel number between input and output is the same, otherwise means differ
name : str
Base name of the operators
workspace : int
Workspace used in convolution operator
"""
use_se = kwargs.get('version_se', 1)
bn_mom = kwargs.get('bn_mom', 0.9)
workspace = kwargs.get('workspace', 256)
memonger = kwargs.get('memonger', False)
act_type = kwargs.get('version_act', 'prelu')
# print('in unit1')
if bottle_neck:
conv1 = Conv(data=data, num_filter=int(num_filter * 0.25), kernel=(1, 1), stride=stride, pad=(0, 0),
no_bias=True, workspace=workspace, name=name + '_conv1')
bn1 = mx.sym.BatchNorm(data=conv1, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn1')
act1 = Act(data=bn1, act_type=act_type, name=name + '_relu1')
conv2 = Conv(data=act1, num_filter=int(num_filter * 0.25), kernel=(3, 3), stride=(1, 1), pad=(1, 1),
no_bias=True, workspace=workspace, name=name + '_conv2')
bn2 = mx.sym.BatchNorm(data=conv2, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn2')
act2 = Act(data=bn2, act_type=act_type, name=name + '_relu2')
conv3 = Conv(data=act2, num_filter=num_filter, kernel=(1, 1), stride=(1, 1), pad=(0, 0), no_bias=True,
workspace=workspace, name=name + '_conv3')
bn3 = mx.sym.BatchNorm(data=conv3, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn3')
if use_se:
# se begin
body = mx.sym.Pooling(data=bn3, global_pool=True, kernel=(7, 7), pool_type='avg', name=name + '_se_pool1')
body = Conv(data=body, num_filter=num_filter // 16, kernel=(1, 1), stride=(1, 1), pad=(0, 0),
name=name + "_se_conv1", workspace=workspace)
body = Act(data=body, act_type=act_type, name=name + '_se_relu1')
body = Conv(data=body, num_filter=num_filter, kernel=(1, 1), stride=(1, 1), pad=(0, 0),
name=name + "_se_conv2", workspace=workspace)
body = mx.symbol.Activation(data=body, act_type='sigmoid', name=name + "_se_sigmoid")
bn3 = mx.symbol.broadcast_mul(bn3, body)
# se end
if dim_match:
shortcut = data
else:
conv1sc = Conv(data=data, num_filter=num_filter, kernel=(1, 1), stride=stride, no_bias=True,
workspace=workspace, name=name + '_conv1sc')
shortcut = mx.sym.BatchNorm(data=conv1sc, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_sc')
if memonger:
shortcut._set_attr(mirror_stage='True')
return Act(data=bn3 + shortcut, act_type=act_type, name=name + '_relu3')
else:
conv1 = Conv(data=data, num_filter=num_filter, kernel=(3, 3), stride=stride, pad=(1, 1),
no_bias=True, workspace=workspace, name=name + '_conv1')
bn1 = mx.sym.BatchNorm(data=conv1, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name + '_bn1')
act1 = Act(data=bn1, act_type=act_type, name=name + '_relu1')
conv2 = Conv(data=act1, num_filter=num_filter, kernel=(3, 3), stride=(1, 1), pad=(1, 1),
no_bias=True, workspace=workspace, name=name + '_conv2')
bn2 = mx.sym.BatchNorm(data=conv2, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name + '_bn2')
if use_se:
# se begin
body = mx.sym.Pooling(data=bn2, global_pool=True, kernel=(7, 7), pool_type='avg', name=name + '_se_pool1')
body = Conv(data=body, num_filter=num_filter // 16, kernel=(1, 1), stride=(1, 1), pad=(0, 0),
name=name + "_se_conv1", workspace=workspace)
body = Act(data=body, act_type=act_type, name=name + '_se_relu1')
body = Conv(data=body, num_filter=num_filter, kernel=(1, 1), stride=(1, 1), pad=(0, 0),
name=name + "_se_conv2", workspace=workspace)
body = mx.symbol.Activation(data=body, act_type='sigmoid', name=name + "_se_sigmoid")
bn2 = mx.symbol.broadcast_mul(bn2, body)
# se end
if dim_match:
shortcut = data
else:
conv1sc = Conv(data=data, num_filter=num_filter, kernel=(1, 1), stride=stride, no_bias=True,
workspace=workspace, name=name + '_conv1sc')
shortcut = mx.sym.BatchNorm(data=conv1sc, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name + '_sc')
if memonger:
shortcut._set_attr(mirror_stage='True')
return Act(data=bn2 + shortcut, act_type=act_type, name=name + '_relu3')
def residual_unit_v1_L(data, num_filter, stride, dim_match, name, bottle_neck, **kwargs):
"""Return ResNet Unit symbol for building ResNet
Parameters
----------
data : str
Input data
num_filter : int
Number of output channels
bnf : int
Bottle neck channels factor with regard to num_filter
stride : tuple
Stride used in convolution
dim_match : Boolean
True means channel number between input and output is the same, otherwise means differ
name : str
Base name of the operators
workspace : int
Workspace used in convolution operator
"""
use_se = kwargs.get('version_se', 1)
bn_mom = kwargs.get('bn_mom', 0.9)
workspace = kwargs.get('workspace', 256)
memonger = kwargs.get('memonger', False)
act_type = kwargs.get('version_act', 'prelu')
# print('in unit1')
if bottle_neck:
conv1 = Conv(data=data, num_filter=int(num_filter * 0.25), kernel=(1, 1), stride=(1, 1), pad=(0, 0),
no_bias=True, workspace=workspace, name=name + '_conv1')
bn1 = mx.sym.BatchNorm(data=conv1, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn1')
act1 = Act(data=bn1, act_type=act_type, name=name + '_relu1')
conv2 = Conv(data=act1, num_filter=int(num_filter * 0.25), kernel=(3, 3), stride=(1, 1), pad=(1, 1),
no_bias=True, workspace=workspace, name=name + '_conv2')
bn2 = mx.sym.BatchNorm(data=conv2, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn2')
act2 = Act(data=bn2, act_type=act_type, name=name + '_relu2')
conv3 = Conv(data=act2, num_filter=num_filter, kernel=(1, 1), stride=stride, pad=(0, 0), no_bias=True,
workspace=workspace, name=name + '_conv3')
bn3 = mx.sym.BatchNorm(data=conv3, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn3')
if use_se:
# se begin
body = mx.sym.Pooling(data=bn3, global_pool=True, kernel=(7, 7), pool_type='avg', name=name + '_se_pool1')
body = Conv(data=body, num_filter=num_filter // 16, kernel=(1, 1), stride=(1, 1), pad=(0, 0),
name=name + "_se_conv1", workspace=workspace)
body = Act(data=body, act_type=act_type, name=name + '_se_relu1')
body = Conv(data=body, num_filter=num_filter, kernel=(1, 1), stride=(1, 1), pad=(0, 0),
name=name + "_se_conv2", workspace=workspace)
body = mx.symbol.Activation(data=body, act_type='sigmoid', name=name + "_se_sigmoid")
bn3 = mx.symbol.broadcast_mul(bn3, body)
# se end
if dim_match:
shortcut = data
else:
conv1sc = Conv(data=data, num_filter=num_filter, kernel=(1, 1), stride=stride, no_bias=True,
workspace=workspace, name=name + '_conv1sc')
shortcut = mx.sym.BatchNorm(data=conv1sc, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_sc')
if memonger:
shortcut._set_attr(mirror_stage='True')
return Act(data=bn3 + shortcut, act_type=act_type, name=name + '_relu3')
else:
conv1 = Conv(data=data, num_filter=num_filter, kernel=(3, 3), stride=(1, 1), pad=(1, 1),
no_bias=True, workspace=workspace, name=name + '_conv1')
bn1 = mx.sym.BatchNorm(data=conv1, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name + '_bn1')
act1 = Act(data=bn1, act_type=act_type, name=name + '_relu1')
conv2 = Conv(data=act1, num_filter=num_filter, kernel=(3, 3), stride=stride, pad=(1, 1),
no_bias=True, workspace=workspace, name=name + '_conv2')
bn2 = mx.sym.BatchNorm(data=conv2, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name + '_bn2')
if use_se:
# se begin
body = mx.sym.Pooling(data=bn2, global_pool=True, kernel=(7, 7), pool_type='avg', name=name + '_se_pool1')
body = Conv(data=body, num_filter=num_filter // 16, kernel=(1, 1), stride=(1, 1), pad=(0, 0),
name=name + "_se_conv1", workspace=workspace)
body = Act(data=body, act_type=act_type, name=name + '_se_relu1')
body = Conv(data=body, num_filter=num_filter, kernel=(1, 1), stride=(1, 1), pad=(0, 0),
name=name + "_se_conv2", workspace=workspace)
body = mx.symbol.Activation(data=body, act_type='sigmoid', name=name + "_se_sigmoid")
bn2 = mx.symbol.broadcast_mul(bn2, body)
# se end
if dim_match:
shortcut = data
else:
conv1sc = Conv(data=data, num_filter=num_filter, kernel=(1, 1), stride=stride, no_bias=True,
workspace=workspace, name=name + '_conv1sc')
shortcut = mx.sym.BatchNorm(data=conv1sc, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name + '_sc')
if memonger:
shortcut._set_attr(mirror_stage='True')
return Act(data=bn2 + shortcut, act_type=act_type, name=name + '_relu3')
def residual_unit_v2(data, num_filter, stride, dim_match, name, bottle_neck, **kwargs):
"""Return ResNet Unit symbol for building ResNet
Parameters
----------
data : str
Input data
num_filter : int
Number of output channels
bnf : int
Bottle neck channels factor with regard to num_filter
stride : tuple
Stride used in convolution
dim_match : Boolean
True means channel number between input and output is the same, otherwise means differ
name : str
Base name of the operators
workspace : int
Workspace used in convolution operator
"""
use_se = kwargs.get('version_se', 1)
bn_mom = kwargs.get('bn_mom', 0.9)
workspace = kwargs.get('workspace', 256)
memonger = kwargs.get('memonger', False)
act_type = kwargs.get('version_act', 'prelu')
# print('in unit2')
if bottle_neck:
# the same as https://github.com/facebook/fb.resnet.torch#notes, a bit difference with origin paper
bn1 = mx.sym.BatchNorm(data=data, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn1')
act1 = Act(data=bn1, act_type=act_type, name=name + '_relu1')
conv1 = Conv(data=act1, num_filter=int(num_filter * 0.25), kernel=(1, 1), stride=(1, 1), pad=(0, 0),
no_bias=True, workspace=workspace, name=name + '_conv1')
bn2 = mx.sym.BatchNorm(data=conv1, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn2')
act2 = Act(data=bn2, act_type=act_type, name=name + '_relu2')
conv2 = Conv(data=act2, num_filter=int(num_filter * 0.25), kernel=(3, 3), stride=stride, pad=(1, 1),
no_bias=True, workspace=workspace, name=name + '_conv2')
bn3 = mx.sym.BatchNorm(data=conv2, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn3')
act3 = Act(data=bn3, act_type=act_type, name=name + '_relu3')
conv3 = Conv(data=act3, num_filter=num_filter, kernel=(1, 1), stride=(1, 1), pad=(0, 0), no_bias=True,
workspace=workspace, name=name + '_conv3')
if use_se:
# se begin
body = mx.sym.Pooling(data=conv3, global_pool=True, kernel=(7, 7), pool_type='avg', name=name + '_se_pool1')
body = Conv(data=body, num_filter=num_filter // 16, kernel=(1, 1), stride=(1, 1), pad=(0, 0),
name=name + "_se_conv1", workspace=workspace)
body = Act(data=body, act_type=act_type, name=name + '_se_relu1')
body = Conv(data=body, num_filter=num_filter, kernel=(1, 1), stride=(1, 1), pad=(0, 0),
name=name + "_se_conv2", workspace=workspace)
body = mx.symbol.Activation(data=body, act_type='sigmoid', name=name + "_se_sigmoid")
conv3 = mx.symbol.broadcast_mul(conv3, body)
if dim_match:
shortcut = data
else:
shortcut = Conv(data=act1, num_filter=num_filter, kernel=(1, 1), stride=stride, no_bias=True,
workspace=workspace, name=name + '_sc')
if memonger:
shortcut._set_attr(mirror_stage='True')
return conv3 + shortcut
else:
bn1 = mx.sym.BatchNorm(data=data, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name + '_bn1')
act1 = Act(data=bn1, act_type=act_type, name=name + '_relu1')
conv1 = Conv(data=act1, num_filter=num_filter, kernel=(3, 3), stride=stride, pad=(1, 1),
no_bias=True, workspace=workspace, name=name + '_conv1')
bn2 = mx.sym.BatchNorm(data=conv1, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name + '_bn2')
act2 = Act(data=bn2, act_type=act_type, name=name + '_relu2')
conv2 = Conv(data=act2, num_filter=num_filter, kernel=(3, 3), stride=(1, 1), pad=(1, 1),
no_bias=True, workspace=workspace, name=name + '_conv2')
if use_se:
# se begin
body = mx.sym.Pooling(data=conv2, global_pool=True, kernel=(7, 7), pool_type='avg', name=name + '_se_pool1')
body = Conv(data=body, num_filter=num_filter // 16, kernel=(1, 1), stride=(1, 1), pad=(0, 0),
name=name + "_se_conv1", workspace=workspace)
body = Act(data=body, act_type=act_type, name=name + '_se_relu1')
body = Conv(data=body, num_filter=num_filter, kernel=(1, 1), stride=(1, 1), pad=(0, 0),
name=name + "_se_conv2", workspace=workspace)
body = mx.symbol.Activation(data=body, act_type='sigmoid', name=name + "_se_sigmoid")
conv2 = mx.symbol.broadcast_mul(conv2, body)
if dim_match:
shortcut = data
else:
shortcut = Conv(data=act1, num_filter=num_filter, kernel=(1, 1), stride=stride, no_bias=True,
workspace=workspace, name=name + '_sc')
if memonger:
shortcut._set_attr(mirror_stage='True')
return conv2 + shortcut
def residual_unit_v3(data, num_filter, stride, dim_match, name, bottle_neck, **kwargs):
"""Return ResNet Unit symbol for building ResNet
Parameters
----------
data : str
Input data
num_filter : int
Number of output channels
bnf : int
Bottle neck channels factor with regard to num_filter
stride : tuple
Stride used in convolution
dim_match : Boolean
True means channel number between input and output is the same, otherwise means differ
name : str
Base name of the operators
workspace : int
Workspace used in convolution operator
"""
use_se = kwargs.get('version_se', 1)
bn_mom = kwargs.get('bn_mom', 0.9)
workspace = kwargs.get('workspace', 256)
memonger = kwargs.get('memonger', False)
act_type = kwargs.get('version_act', 'prelu')
# print('in unit3')
if bottle_neck:
bn1 = mx.sym.BatchNorm(data=data, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn1')
conv1 = Conv(data=bn1, num_filter=int(num_filter * 0.25), kernel=(1, 1), stride=(1, 1), pad=(0, 0),
no_bias=True, workspace=workspace, name=name + '_conv1')
bn2 = mx.sym.BatchNorm(data=conv1, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn2')
act1 = Act(data=bn2, act_type=act_type, name=name + '_relu1')
conv2 = Conv(data=act1, num_filter=int(num_filter * 0.25), kernel=(3, 3), stride=(1, 1), pad=(1, 1),
no_bias=True, workspace=workspace, name=name + '_conv2')
bn3 = mx.sym.BatchNorm(data=conv2, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn3')
act2 = Act(data=bn3, act_type=act_type, name=name + '_relu2')
conv3 = Conv(data=act2, num_filter=num_filter, kernel=(1, 1), stride=stride, pad=(0, 0), no_bias=True,
workspace=workspace, name=name + '_conv3')
bn4 = mx.sym.BatchNorm(data=conv3, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn4')
if use_se:
# se begin
body = mx.sym.Pooling(data=bn4, global_pool=True, kernel=(7, 7), pool_type='avg', name=name + '_se_pool1')
body = Conv(data=body, num_filter=num_filter // 16, kernel=(1, 1), stride=(1, 1), pad=(0, 0),
name=name + "_se_conv1", workspace=workspace)
body = Act(data=body, act_type=act_type, name=name + '_se_relu1')
body = Conv(data=body, num_filter=num_filter, kernel=(1, 1), stride=(1, 1), pad=(0, 0),
name=name + "_se_conv2", workspace=workspace)
body = mx.symbol.Activation(data=body, act_type='sigmoid', name=name + "_se_sigmoid")
bn4 = mx.symbol.broadcast_mul(bn4, body)
# se end
if dim_match:
shortcut = data
else:
conv1sc = Conv(data=data, num_filter=num_filter, kernel=(1, 1), stride=stride, no_bias=True,
workspace=workspace, name=name + '_conv1sc')
shortcut = mx.sym.BatchNorm(data=conv1sc, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_sc')
if memonger:
shortcut._set_attr(mirror_stage='True')
return bn4 + shortcut
else:
bn1 = mx.sym.BatchNorm(data=data, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn1')
conv1 = Conv(data=bn1, num_filter=num_filter, kernel=(3, 3), stride=(1, 1), pad=(1, 1),
no_bias=True, workspace=workspace, name=name + '_conv1')
bn2 = mx.sym.BatchNorm(data=conv1, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn2')
act1 = Act(data=bn2, act_type=act_type, name=name + '_relu1')
conv2 = Conv(data=act1, num_filter=num_filter, kernel=(3, 3), stride=stride, pad=(1, 1),
no_bias=True, workspace=workspace, name=name + '_conv2')
bn3 = mx.sym.BatchNorm(data=conv2, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn3')
if use_se:
# se begin
body = mx.sym.Pooling(data=bn3, global_pool=True, kernel=(7, 7), pool_type='avg', name=name + '_se_pool1')
body = Conv(data=body, num_filter=num_filter // 16, kernel=(1, 1), stride=(1, 1), pad=(0, 0),
name=name + "_se_conv1", workspace=workspace)
body = Act(data=body, act_type=act_type, name=name + '_se_relu1')
body = Conv(data=body, num_filter=num_filter, kernel=(1, 1), stride=(1, 1), pad=(0, 0),
name=name + "_se_conv2", workspace=workspace)
body = mx.symbol.Activation(data=body, act_type='sigmoid', name=name + "_se_sigmoid")
bn3 = mx.symbol.broadcast_mul(bn3, body)
# se end
if dim_match:
shortcut = data
else:
conv1sc = Conv(data=data, num_filter=num_filter, kernel=(1, 1), stride=stride, no_bias=True,
workspace=workspace, name=name + '_conv1sc')
shortcut = mx.sym.BatchNorm(data=conv1sc, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name + '_sc')
if memonger:
shortcut._set_attr(mirror_stage='True')
return bn3 + shortcut
def residual_unit_v3_x(data, num_filter, stride, dim_match, name, bottle_neck, **kwargs):
"""Return ResNeXt Unit symbol for building ResNeXt
Parameters
----------
data : str
Input data
num_filter : int
Number of output channels
bnf : int
Bottle neck channels factor with regard to num_filter
stride : tuple
Stride used in convolution
dim_match : Boolean
True means channel number between input and output is the same, otherwise means differ
name : str
Base name of the operators
workspace : int
Workspace used in convolution operator
"""
assert (bottle_neck)
use_se = kwargs.get('version_se', 1)
bn_mom = kwargs.get('bn_mom', 0.9)
workspace = kwargs.get('workspace', 256)
memonger = kwargs.get('memonger', False)
act_type = kwargs.get('version_act', 'prelu')
num_group = 32
# print('in unit3')
bn1 = mx.sym.BatchNorm(data=data, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn1')
conv1 = Conv(data=bn1, num_group=num_group, num_filter=int(num_filter * 0.5), kernel=(1, 1), stride=(1, 1),
pad=(0, 0),
no_bias=True, workspace=workspace, name=name + '_conv1')
bn2 = mx.sym.BatchNorm(data=conv1, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn2')
act1 = Act(data=bn2, act_type=act_type, name=name + '_relu1')
conv2 = Conv(data=act1, num_group=num_group, num_filter=int(num_filter * 0.5), kernel=(3, 3), stride=(1, 1),
pad=(1, 1),
no_bias=True, workspace=workspace, name=name + '_conv2')
bn3 = mx.sym.BatchNorm(data=conv2, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn3')
act2 = Act(data=bn3, act_type=act_type, name=name + '_relu2')
conv3 = Conv(data=act2, num_filter=num_filter, kernel=(1, 1), stride=stride, pad=(0, 0), no_bias=True,
workspace=workspace, name=name + '_conv3')
bn4 = mx.sym.BatchNorm(data=conv3, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn4')
if use_se:
# se begin
body = mx.sym.Pooling(data=bn4, global_pool=True, kernel=(7, 7), pool_type='avg', name=name + '_se_pool1')
body = Conv(data=body, num_filter=num_filter // 16, kernel=(1, 1), stride=(1, 1), pad=(0, 0),
name=name + "_se_conv1", workspace=workspace)
body = Act(data=body, act_type=act_type, name=name + '_se_relu1')
body = Conv(data=body, num_filter=num_filter, kernel=(1, 1), stride=(1, 1), pad=(0, 0),
name=name + "_se_conv2", workspace=workspace)
body = mx.symbol.Activation(data=body, act_type='sigmoid', name=name + "_se_sigmoid")
bn4 = mx.symbol.broadcast_mul(bn4, body)
# se end
if dim_match:
shortcut = data
else:
conv1sc = Conv(data=data, num_filter=num_filter, kernel=(1, 1), stride=stride, no_bias=True,
workspace=workspace, name=name + '_conv1sc')
shortcut = mx.sym.BatchNorm(data=conv1sc, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_sc')
if memonger:
shortcut._set_attr(mirror_stage='True')
return bn4 + shortcut
def residual_unit(data, num_filter, stride, dim_match, name, bottle_neck, **kwargs):
uv = kwargs.get('version_unit', 3)
version_input = kwargs.get('version_input', 1)
if uv == 1:
if version_input == 0:
return residual_unit_v1(data, num_filter, stride, dim_match, name, bottle_neck, **kwargs)
else:
return residual_unit_v1_L(data, num_filter, stride, dim_match, name, bottle_neck, **kwargs)
elif uv == 2:
return residual_unit_v2(data, num_filter, stride, dim_match, name, bottle_neck, **kwargs)
elif uv == 4:
return residual_unit_v4(data, num_filter, stride, dim_match, name, bottle_neck, **kwargs)
else:
return residual_unit_v3(data, num_filter, stride, dim_match, name, bottle_neck, **kwargs)
def resnet(units, num_stages, filter_list, num_classes, bottle_neck):
bn_mom = config.bn_mom
workspace = config.workspace
kwargs = {'version_se': config.net_se,
'version_input': config.net_input,
'version_output': config.net_output,
'version_unit': config.net_unit,
'version_act': config.net_act,
'bn_mom': bn_mom,
'workspace': workspace,
'memonger': config.memonger,
}
"""Return ResNet symbol of
Parameters
----------
units : list
Number of units in each stage
num_stages : int
Number of stage
filter_list : list
Channel size of each stage
num_classes : int
Ouput size of symbol
dataset : str
Dataset type, only cifar10 and imagenet supports
workspace : int
Workspace used in convolution operator
"""
version_se = kwargs.get('version_se', 1)
version_input = kwargs.get('version_input', 1)
assert version_input >= 0
version_output = kwargs.get('version_output', 'E')
fc_type = version_output
version_unit = kwargs.get('version_unit', 3)
act_type = kwargs.get('version_act', 'prelu')
memonger = kwargs.get('memonger', False)
print(version_se, version_input, version_output, version_unit, act_type, memonger)
num_unit = len(units)
assert (num_unit == num_stages)
data = mx.sym.Variable(name='data')
if config.fp16:
data = mx.sym.Cast(data=data, dtype=np.float16)
if version_input == 0:
# data = mx.sym.BatchNorm(data=data, fix_gamma=True, eps=2e-5, momentum=bn_mom, name='bn_data')
data = mx.sym.identity(data=data, name='id')
data = data - 127.5
data = data * 0.0078125
body = Conv(data=data, num_filter=filter_list[0], kernel=(7, 7), stride=(2, 2), pad=(3, 3),
no_bias=True, name="conv0", workspace=workspace)
body = mx.sym.BatchNorm(data=body, fix_gamma=False, eps=2e-5, momentum=bn_mom, name='bn0')
body = Act(data=body, act_type=act_type, name='relu0')
# body = mx.sym.Pooling(data=body, kernel=(3, 3), stride=(2,2), pad=(1,1), pool_type='max')
elif version_input == 2:
data = mx.sym.BatchNorm(data=data, fix_gamma=True, eps=2e-5, momentum=bn_mom, name='bn_data')
body = Conv(data=data, num_filter=filter_list[0], kernel=(3, 3), stride=(1, 1), pad=(1, 1),
no_bias=True, name="conv0", workspace=workspace)
body = mx.sym.BatchNorm(data=body, fix_gamma=False, eps=2e-5, momentum=bn_mom, name='bn0')
body = Act(data=body, act_type=act_type, name='relu0')
else:
data = mx.sym.identity(data=data, name='id')
data = data - 127.5
data = data * 0.0078125
body = data
body = Conv(data=body, num_filter=filter_list[0], kernel=(3, 3), stride=(1, 1), pad=(1, 1),
no_bias=True, name="conv0", workspace=workspace)
body = mx.sym.BatchNorm(data=body, fix_gamma=False, eps=2e-5, momentum=bn_mom, name='bn0')
body = Act(data=body, act_type=act_type, name='relu0')
for i in range(num_stages):
# if version_input==0:
# body = residual_unit(body, filter_list[i+1], (1 if i==0 else 2, 1 if i==0 else 2), False,
# name='stage%d_unit%d' % (i + 1, 1), bottle_neck=bottle_neck, **kwargs)
# else:
# body = residual_unit(body, filter_list[i+1], (2, 2), False,
# name='stage%d_unit%d' % (i + 1, 1), bottle_neck=bottle_neck, **kwargs)
body = residual_unit(body, filter_list[i + 1], (2, 2), False,
name='stage%d_unit%d' % (i + 1, 1), bottle_neck=bottle_neck, **kwargs)
for j in range(units[i] - 1):
body = residual_unit(body, filter_list[i + 1], (1, 1), True, name='stage%d_unit%d' % (i + 1, j + 2),
bottle_neck=bottle_neck, **kwargs)
if config.fp16:
body = mx.sym.Cast(data=body, dtype=np.float32)
if bottle_neck:
body = Conv(data=body, num_filter=512, kernel=(1, 1), stride=(1, 1), pad=(0, 0),
no_bias=True, name="convd", workspace=workspace)
body = mx.sym.BatchNorm(data=body, fix_gamma=False, eps=2e-5, momentum=bn_mom, name='bnd')
body = Act(data=body, act_type=act_type, name='relud')
fc1 = symbol_utils.get_fc1(body, num_classes, fc_type)
return fc1
def get_symbol():
"""
Adapted from https://github.com/tornadomeet/ResNet/blob/master/train_resnet.py
Original author Wei Wu
"""
num_classes = config.embedding_size
num_layers = config.num_layers
if num_layers >= 500:
filter_list = [64, 256, 512, 1024, 2048]
bottle_neck = True
else:
filter_list = [64, 64, 128, 256, 512]
bottle_neck = False
num_stages = 4
if num_layers == 18:
units = [2, 2, 2, 2]
elif num_layers == 34:
units = [3, 4, 6, 3]
elif num_layers == 49:
units = [3, 4, 14, 3]
elif num_layers == 50:
units = [3, 4, 14, 3]
elif num_layers == 74:
units = [3, 6, 24, 3]
elif num_layers == 90:
units = [3, 8, 30, 3]
elif num_layers == 98:
units = [3, 4, 38, 3]
elif num_layers == 99:
units = [3, 8, 35, 3]
elif num_layers == 100:
units = [3, 13, 30, 3]
elif num_layers == 134:
units = [3, 10, 50, 3]
elif num_layers == 136:
units = [3, 13, 48, 3]
elif num_layers == 140:
units = [3, 15, 48, 3]
elif num_layers == 124:
units = [3, 13, 40, 5]
elif num_layers == 160:
units = [3, 24, 49, 3]
elif num_layers == 101:
units = [3, 4, 23, 3]
elif num_layers == 152:
units = [3, 8, 36, 3]
elif num_layers == 200:
units = [3, 24, 36, 3]
elif num_layers == 269:
units = [3, 30, 48, 8]
else:
raise ValueError("no experiments done on num_layers {}, you can do it yourself".format(num_layers))
net = resnet(units=units,
num_stages=num_stages,
filter_list=filter_list,
num_classes=num_classes,
bottle_neck=bottle_neck)
return net
| [
"anxiangsir@outlook.com"
] | anxiangsir@outlook.com |
ff3457af2885ad007f99f5934781e303b81c3a85 | 7b20e2f86c2bb2145ae9ca5bcd4b9ad1566e79b0 | /ABC/ABC072/B.py | c1e5f4aca531a38a39572c87cb4b7882f3a478d9 | [] | no_license | pto8913/KyoPro | 5f5e769960dfec73af5b0f338f32659ff067094b | 29ebc30a3d45fea273cb9034fba8311673a406dd | refs/heads/master | 2021-06-13T16:43:40.275854 | 2021-03-23T00:02:25 | 2021-03-23T00:02:25 | 174,684,331 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 78 | py | # URL: https://atcoder.jp/contests/abc072/tasks/abc072_b
print(input()[::2])
| [
"noreply@github.com"
] | pto8913.noreply@github.com |
537f94dabe6666bfaf56f01be6590e9d0da47613 | 3cefa2266f64ee6be33393a175fddf2e405101ab | /VAD/evaluate_mmse.py | 839311127b1ef43c7e928ed8f2504f13db1a855b | [] | no_license | suzuhira0208/MyProgram | 9cd94ef4d30dc87ca7b0a9463957b837330c0591 | 2d8c2f386def4a61810d3e1ce773dac856995fd8 | refs/heads/main | 2023-03-20T07:34:27.207535 | 2021-03-08T09:09:55 | 2021-03-08T09:09:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,592 | py | """mmseによる評価ラベルと元々の正解ラベルを比較して正答率を算出するプログラム"""
import numpy as np
import os
import argparse
import csv
import pandas as pd
import math
import glob
from utils import vad_utils
def get_args():
parser = argparse.ArgumentParser(description = "CSVファイルを読み込み入力波形と一緒に出力するプログラム")
parser.add_argument('--mmse', type = str, default = 'Output_MMSE/mmse_labels_test_snr0.csv', help = 'mmseラベル')
parser.add_argument('--correct', type = str, default = 'Label_Correct/label_clean_test.csv', help = '正解ラベル')
return parser.parse_args()
def get_csv(path_csv):
df = pd.read_csv(path_csv, engine = 'python')
#print("number of Clean feature ==> : \n", df.keys())
#print("number of Clean index and columns ==> : \n", df.shape)
df.info()
return df
if __name__ == "__main__":
"""---引数を取得---"""
args = get_args()
path_mmse = vad_utils.get_path(args.mmse)
path_correct = vad_utils.get_path(args.correct)
"""---csvファイルを読み込み---"""
df_mmse = get_csv(path_mmse)
df_correct = get_csv(path_correct)
"""---データフレームを配列に変換+0列目を抽出---"""
mmse_labels = df_mmse.values[:,0]
correct_labels = df_correct.values[:,0]
index = len(mmse_labels)
print(index)
cnt = 0
for i in range(index):
if mmse_labels[i] == correct_labels[i]:
cnt = cnt + 1
acc = cnt / index * 100
print("accuracies: " + acc)
| [
"szhrwork@gmail.com"
] | szhrwork@gmail.com |
f81fa202ac030ed1854e85d1d468bada85820ad5 | 4d9e7425ea6902a45eeda81c5cd5ede7d44fd087 | /examples/starwars/tests/test_connections.py | d266df33d782e73fb8019b1abe3fbbc0aa505e77 | [
"MIT"
] | permissive | Salalem/graphene-neo4j | 2258b29093337fd8981b880065b144dc4e6c145b | f2b99fa18b7367cf3a581d1f4a71fda16a1320fc | refs/heads/master | 2023-08-08T06:57:41.749557 | 2019-06-14T16:22:14 | 2019-06-14T16:22:14 | 222,071,239 | 1 | 1 | MIT | 2023-07-22T21:45:41 | 2019-11-16T08:45:32 | null | UTF-8 | Python | false | false | 1,441 | py | import pytest
from ..data import initialize
from ..schema import schema
pytestmark = pytest.mark.django_db
def test_correct_fetch_first_ship_rebels():
initialize()
query = '''
query RebelsShipsQuery {
rebels {
name,
hero {
name
}
ships(first: 1) {
edges {
node {
name
}
}
}
}
}
'''
expected = {
'rebels': {
'name': 'Alliance to Restore the Republic',
'hero': {
'name': 'Human'
},
'ships': {
'edges': [
{
'node': {
'name': 'X-Wing'
}
}
]
}
}
}
result = schema.execute(query)
assert not result.errors
assert result.data == expected
def test_correct_list_characters():
initialize()
query = '''
query RebelsShipsQuery {
node(id: "U2hpcDox") {
... on Ship {
name
characters {
name
}
}
}
}
'''
expected = {
'node': {
'name': 'X-Wing',
'characters': [{
'name': 'Human'
}],
}
}
result = schema.execute(query)
assert not result.errors
assert result.data == expected
| [
"me@syrusakbary.com"
] | me@syrusakbary.com |
2de022b2a2e08966a16a6e3619ad7edf64145aa4 | 5ba139e6e882ec6e4ebebf580316566ffe8b96de | /algorithms/search/linear_search.py | 4857982ee7bfd9023a544707913007f08bea6a5b | [] | no_license | egor-mahlaev/python-algorithms | 52e0b212d257dc7acca742f0b0435a1e2212f07f | ee89709a9324106a1edb642e7a6a199583313d88 | refs/heads/master | 2020-06-11T11:59:46.656432 | 2019-07-07T11:43:39 | 2019-07-07T11:54:26 | 193,956,063 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 182 | py | # Works on any array.
# Best case: O(1)
# Worst case: O(n)
def linear_search(array, x):
for i in range(len(array)):
if array[i] == x:
return i
return -1
| [
"mahlasza@gmail.com"
] | mahlasza@gmail.com |
50aa838108041994970b4a245b95fa893a34737f | 66dd570bf5945dcbd183ed3c0cf897c0359cbccd | /python/python语法/pyexercise/Exercise05_09.py | e942db9fe0e15a945d4dac56ce26d7e5c0745b7a | [] | no_license | SamJ2018/LeetCode | 302cc97626220521c8847d30b99858e63fa509f3 | 784bd0b1491050bbd80f5a0e2420467b63152d8f | refs/heads/master | 2021-06-19T10:30:37.381542 | 2021-02-06T16:15:01 | 2021-02-06T16:15:01 | 178,962,481 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 279 | py | tuition = 10000
count = 1
while count <= 10:
tuition = tuition * 1.05;
count += 1
print("Tuition in ten years is", tuition)
sum = tuition
for i in range(2, 5):
tuition = tuition * 1.05
sum += tuition
print("The four-year tuition in ten years is", sum)
| [
"juksam@centos7.localdomain"
] | juksam@centos7.localdomain |
4eda5203a6c1504367ace56c9fa003a10b79015f | 861cd75334a2dc90e66acc04259082a43cfbab1e | /070-climbing-stairs/climbing-stairs.py | 1ac64cfa52489b1b0e02825cb083da16ed4f83f5 | [] | no_license | mengban/leetcode | 783b1ffa7c10b658b7e02450156f3b8e0624a14f | 5abf16527fe0bc5689af2592c1fdedaf00aa43ca | refs/heads/master | 2022-12-12T07:57:51.132071 | 2019-03-22T09:21:55 | 2019-03-22T09:21:55 | 147,085,305 | 0 | 0 | null | 2018-09-02T12:59:58 | 2018-09-02T12:59:58 | null | UTF-8 | Python | false | false | 848 | py | # You are climbing a stair case. It takes n steps to reach to the top.
#
# Each time you can either climb 1 or 2 steps. In how many distinct ways can you climb to the top?
#
# Note: Given n will be a positive integer.
#
# Example 1:
#
#
# Input: 2
# Output: 2
# Explanation: There are two ways to climb to the top.
# 1. 1 step + 1 step
# 2. 2 steps
#
#
# Example 2:
#
#
# Input: 3
# Output: 3
# Explanation: There are three ways to climb to the top.
# 1. 1 step + 1 step + 1 step
# 2. 1 step + 2 steps
# 3. 2 steps + 1 step
#
#
class Solution:
def climbStairs(self, n):
"""
:type n: int
:rtype: int
"""
n1 = 1
n2 = 2
ret = 0
if n<3:
return n
for i in range(2,n):
ret = n1 + n2
n1 = n2
n2 = ret
return ret
| [
"2904500451@qq.com"
] | 2904500451@qq.com |
8d9644e5e40a741f1d96890def3bb5c9b29ccf07 | 9f09a1f06d1e15e3d055515570ab8e0a09e3ee8c | /venv/bin/pip | 36f76d3146446e27cfbb4b727992c364b6ac4b11 | [] | no_license | lightman21/StudyPython | f5fd88e3ef665a763d85696bcbad9c344686a09f | 31b8d7ea6eae1fcd5cbf755e973429cf3138ef7d | refs/heads/dev | 2021-09-06T22:08:26.766814 | 2020-03-31T02:41:49 | 2020-03-31T02:41:49 | 75,256,602 | 0 | 0 | null | 2020-03-31T02:28:34 | 2016-12-01T04:56:57 | Python | UTF-8 | Python | false | false | 428 | #!/Users/toutouhiroshidaiou/keruyun/INTELLIJ_IDEA/PycharmProjects/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip')()
)
| [
"tanghao@keruyun.com"
] | tanghao@keruyun.com | |
3fee7cbc5e3b9e29bbcbda870765094f56bca97e | 4d8aa86a3aa2b1076d9729ed59ace5d8614fc0f3 | /manage.py | 2e9aa7e0ee3f8d3e82dc3487c4153a26d3b0d199 | [] | no_license | hoangddt/demo-Activity-Stream | dcfb3843ed912ac5a8be6ce45739c2b5475fa4ed | 8ed2635099d14a7db851d80d23ae241f2328a910 | refs/heads/master | 2020-06-07T03:20:22.333485 | 2015-03-27T04:57:38 | 2015-03-27T04:57:38 | 32,942,911 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 261 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "demoActivityStream.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| [
"quochoangddt@gmail.com"
] | quochoangddt@gmail.com |
017d0a8ffb2b9b577b8ef976168c48995c63e689 | 6df76f8a6fcdf444c3863e3788a2f4b2c539c22c | /django code/p105/p105/settings.py | a76b29db32f2d0ab4cdf3fc1733f20e72b6b2894 | [] | no_license | basantbhandari/DjangoProjectsAsDocs | 068e4a704fade4a97e6c40353edb0a4299bd9678 | 594dbb560391eaf94bb6db6dc07702d127010b88 | refs/heads/master | 2022-12-18T22:33:23.902228 | 2020-09-22T13:11:01 | 2020-09-22T13:11:01 | 297,651,728 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,134 | py | """
Django settings for p105 project.
Generated by 'django-admin startproject' using Django 3.0.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '*o3o*nsr8m66()euw(-%s1%0(y@(a$-bypjgao_uqbn1q=elc!'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
#userapp
'myapp',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'p105.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'p105.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME':
'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME':
'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME':
'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME':
'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
| [
"36443209+basantbhandari@users.noreply.github.com"
] | 36443209+basantbhandari@users.noreply.github.com |
181a61ee46d6d67b9d9b14ffcb61bdea932af50b | 28d08dfb4f76d39461fcff94215aa9af44438856 | /Lucas_Herbert/calibration_methods/2Dcalibration.py | 50a774a3d81fc39cbe70a207ec2aa0f91d0241aa | [] | no_license | NeoNarval/NeoNarval | 952a0db8b1d2895667e0edd2f445a742bb8532da | b4045f078ff3f46d1e7935f8ba4fbf9c9c7160b9 | refs/heads/master | 2021-01-25T13:34:20.133276 | 2018-10-23T13:26:22 | 2018-10-23T13:26:22 | 123,580,091 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,682 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Python's modules imports
import pyfits
import numpy as np
import matplotlib.pyplot as plt
import scipy.optimize
import pickle
import lmfit
from scipy.optimize import leastsq
from scipy.optimize import curve_fit
# Other module's imports
import calibration_methods.interpolated_conversion as cnv
import validation_methods.matching as mtch
import validation_methods.compute_order as cporder
import calibration_methods.itered_calibration as itrd
# Definition of global values
global order_len # length of an order
order_len = 4612
global order_total # number of orders
order_total = 36
"""
We will use the poly2D_fit and the itered_calibration algorithms to compute a new calibration, using our fitted coefficients for the conversion polynom. Those coefficients come from the iterated conversions and have been fitted and computed again to try to improve their precision.
The idea is to replace the actual conversion method, which uses the interpolated_conversion algorithms by a new one using th fitted coefficients, and use it in the itered_calibration alogrithms, so we will use previously made code and modify it a little.
"""
"""
At this point, we have computed the best possible precision order per order (the best we currently can do with our current ideas obvously). Now we will try to see if we can improve this precision of calibration by using 2D fits of the coefficients of the polynom which is used to convert wavelengths from Arturos to Angstroms. For now, we consider that each polynom used for the conversion only depends on the order, but maybe we can find a tendance, a law which gives an idea of the evolution of the polynomial coefficents according to the order. In other words, the parameter will now be the order number and the data to fit the coefficients. We will find a new law for each coefficient (each degree), giving the coefficients. We will use those to create the corresponding polynom and try to convert and match the original wavelengths with that. Then we will be able to tell if it is a way to improve the results or not.
The structure of the lists of coefficients used will be the following : a list of lists which will have for each order : [a0, a1,a2,etc] where ai is the coeff of degree P-i where P is the degree of the polynom. (higher degree first then the other, and degree 0 at the end of each list).
Inputs :
None.
Outputs :
- lists_of_fitted_coeffs : list of the fitted coeffs, order per order
"""
def polyfit_dispersion2D():
N = 34 # How many orders do you want to fit (the N first orders will be fitted)
# Order of the polynomial fit :
p = 20
lists_of_coeffs = []
list_of_fitted_coeffs = []
# Creating 6 lists of coefficients to fit
list_of_coeffs0 = []
list_of_coeffs1 = []
list_of_coeffs2 = []
list_of_coeffs3 = []
list_of_coeffs4 = []
list_of_coeffs5 = []
# Importing the coefficients from the previously recorded pickles in our lists to fit
for i in range(N):
coeffs_file = open("results/Interpolation coefficients/Interpolation_coefficients_order_"+str(i)+"_"+str(0.1)+"_"+str(0.1),'r')
old_coeffs = pickle.load(coeffs_file)
coeffs_file.close()
list_of_coeffs0.insert(i,old_coeffs[0])
list_of_coeffs1.insert(i,old_coeffs[1])
list_of_coeffs2.insert(i,old_coeffs[2])
list_of_coeffs3.insert(i,old_coeffs[3])
list_of_coeffs4.insert(i,old_coeffs[4])
list_of_coeffs5.insert(i,old_coeffs[5])
lists_of_coeffs.insert(0,list_of_coeffs0)
lists_of_coeffs.insert(1,list_of_coeffs1)
lists_of_coeffs.insert(2,list_of_coeffs2)
lists_of_coeffs.insert(3,list_of_coeffs3)
lists_of_coeffs.insert(4,list_of_coeffs4)
lists_of_coeffs.insert(5,list_of_coeffs5)
indices = [i for i in range(N)]
lists_of_fitted_coeffs = [ [] for order in range(N) ]
for i in range(6):
try :
coeffs = np.polyfit(indices,lists_of_coeffs[i],p)
except :
print("Polynomial fitting of the coefficients failed!")
# Computation of the fit
pol = np.poly1d(coeffs)
fitted_coeffs = pol(indices)
plt.figure(60+i)
plt.title("Interpolation results")
plt.plot(indices,lists_of_coeffs[i],'o',color='black')
plt.plot(indices,fitted_coeffs,'o',color='purple')
plt.show()
for order in range(N):
lists_of_fitted_coeffs[order].insert(i,fitted_coeffs[order])
record_file = open("fitted_coeffs_degree"+str(p),'w')
pickle.dump(lists_of_fitted_coeffs,record_file)
record_file.close()
return(lists_of_fitted_coeffs)
"""
This function will convert one order from Arturos to Angstroms, using the fitted coeffs computed thanks to poly2D_fit.
Inputs :
- order : number of the order to convert
- lists_of_fitted_coeffs : list of the whole coeffs after fitting by the polyfit_dispersion2D algorithm
Outputs :
- order_lambdas_Angstroms : list of converted wavelengths in Angstroms, using the 2D fit.
"""
def convert_arturo_angstroms2D(order,lists_of_fitted_coeffs):
print(" ")
print(" _________________________ Converting _________________________ ")
print(" ")
# Selecting the rigth coeffs in the input list
order_fitted_coeffs = lists_of_fitted_coeffs[order]
# Generating the arturo scaled wavelengths list to convert
arturos_list = [i for i in range(0,order_total*order_len)]
order_lambdas_arturos = arturos_list[order_len*order:order_len*(order+1)]
# Creating the output list
order_lambdas_Angstroms = []
# Convertion using the 2Dfit's results (the coeffs) :
#print("Polynomial coefficients",order_fitted_coeffs)
pol = np.poly1d(order_fitted_coeffs)
order_lambdas_Angstroms = pol(order_lambdas_arturos)
return(order_lambdas_Angstroms)
"""
Now we have written the conversion function so we can basically use it the way we used the interpolated_conversion function. We are gonna write the algorithm which will use this function, convert all orders wavelengths, use those wavelengths and compute a matching to see if there is an improvevment or not.
"""
"""
The following function will compute a conversion using the function above, and then compute the matching or a given order. It will plot the results so that we can compare the effiency of the 2D fit versus the itered conversion.
Inputs :
- order : the number of the order to compute
Outputs :
- None
"""
def order_conversion2D(order):
# Computing all the fitted coefficients
fitted_coeffs = polyfit_dispersion2D()
# Converting the wavelengths for the given order
order_lambdas = convert_arturo_angstroms2D(order,fitted_coeffs)
print(order_lambdas)
lambdas_file = open("temporary_file_for_2Dconversion",'w')
pickle.dump(order_lambdas,lambdas_file)
lambdas_file.close()
matching_results = mtch.order_matching("temporary_file_for_2Dconversion",0.1,order,0.1)
return(None)
def all_order_conversion2D(n):
for i in range(n):
order_conversion2D(i)
"""
The following function computes the polynomial coefficients of the conversion polynom for the given order.
Inputs :
- order : the order
Outputs :
None.
"""
def coeffs(order):
alpha=63.495*np.pi/180.
gamma=0.6*np.pi/180.
G=79. # grooves/mm
F=388. #mm focal length
p=13.5e-3 # 12 micron pixel in mm
m = 21 + order
# On multiplie par 1e7 pour passer des mm aux Angstroms
a0 = 1e7*(1.0/m)*2*np.cos(gamma)*np.sin(alpha)/G
a1 = 1e7*(1.0/m)*np.cos(gamma)*np.cos(alpha)*p/G/F
a2 = 1e7*-(1.0/m)*np.cos(gamma)*np.sin(alpha)*p**2/2/G/(F**2)
a3 = 1e7*-(1.0/m)*np.cos(gamma)*np.cos(alpha)*p**3/6/G/(F**3)
a4 = 1e7*(1.0/m)*np.cos(gamma)*np.sin(alpha)*p**4/24/G/(F**4)
a5 = 1e7*(1.0/m)*np.cos(gamma)*np.cos(alpha)*p**5/120/G/(F**5)
print(a5,a4,a3,a2,a1,a0)
coeffs_file = open("results/Interpolation coefficients/Interpolation_coefficients_order_"+str(order)+"_"+str(0.1)+"_"+str(0.1),'r')
old_coeffs = pickle.load(coeffs_file)
coeffs_file.close()
print(old_coeffs)
"""
Function which cleans the screen by closing all the plots.
"""
def clean_plt():
for i in range(100):
plt.close()
"""
Another try of a different fit :
2D fit of the f(m,indice) : ex : f(1,10) = 0.5[f(0,10)+f(2,10)]
"""
def grid2D():
# Creating the grid
param = (34,order_len)
grid = np.zeros(param)
# Filling the grid
for i in range(34):
# Loading the better polynomial fit for each order
coeffs_file = open("results/Interpolation coefficients/Interpolation_coefficients_order_"+str(i)+"_"+str(0.1)+"_"+str(0.1),'r')
old_coeffs = pickle.load(coeffs_file)
coeffs_file.close()
#print(old_coeffs)
order_polynom = np.poly1d(old_coeffs)
indices = [k for k in range(order_len*i , order_len*(i+1))]
for j in range(order_len):
grid[i,j] = order_polynom(indices[j])
print(grid)
# Now we have that big matrix containing all the informations about the conversions of all orders. We can easily use it to interpolate between the orders and find a new "vertical" fit. The result will be a 2D cross fit between the 1D horizontal fit for each order and the 1D vertical fit between the orders.
orders = [o for o in range(34)]
new_grid = np.zeros(param)
for ind in range(order_len):
vertical_values = [ grid[order,ind] for order in orders ] # Creating the list of values to fit for each ind
try :
coeffs = np.polyfit(orders,vertical_values,10)
except :
print("Polynomial fitting failed!")
# Computation of the fit
pol = np.poly1d(coeffs)
for o in orders :
new_grid[o,ind] = pol(o)
print(new_grid)
# Now the new grid has been computed, we can compute a new matching for each order.
for o in orders :
order_lambdas = [ new_grid[o,ind] for ind in range(order_len) ]
order_lambdas_file = open("temporary_file_for_vertical_fit",'w')
pickle.dump(order_lambdas,order_lambdas_file)
order_lambdas_file.close()
order_matching_results = mtch.order_matching("temporary_file_for_vertical_fit",0.1,o,0.1)
return(None)
| [
"lherbert@irap.omp.eu"
] | lherbert@irap.omp.eu |
07940eedfcae82a2bd43c8f195d612fcd579994d | b77dff442c658b4e99c69eb75bc3924adbbad231 | /scrap_api/urls.py | 75b00fbc5b6d85984fd7fdb994f371c8316286a1 | [] | no_license | JorgeQuetgo/scrap_api | 5a4378d8b7000e463d2bf285162bbdad7a9870c3 | acae2ea6f6674849778b1336d3db4906c63b2987 | refs/heads/master | 2023-02-14T03:06:11.318449 | 2021-01-08T05:33:24 | 2021-01-08T05:33:24 | 327,804,396 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,108 | py | """scrap_api URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.urls import include
from rest_framework import routers
from django.contrib import admin
from django.urls import path
from api.views import ProductViewSet, PageScrappingViewSet
api_v1 = routers.DefaultRouter(trailing_slash=False)
api_v1.register(r'product', ProductViewSet, basename='product')
api_v1.register(r'page-scraping', PageScrappingViewSet, basename='pagescrap')
urlpatterns = [
url(r'v1/', include(api_v1.urls))
]
| [
"jorge.giron@atentousa.com"
] | jorge.giron@atentousa.com |
e1c03e6fadbc3ee9a7f9236dd57623db274c8173 | ca3b853e212a8735f590ecc6a7dc5028754cdbd7 | /hangman.py | 2527ec9308bdbb4f5e63051986b1c27f48620e2e | [] | no_license | PranavAnand587/Hangman-Game | 498de872acfa326fd9034bfca3e1182a0c71a14c | f89dcb46bb82637cb316710f4a208cb8a1b492ee | refs/heads/master | 2022-06-01T14:20:24.902043 | 2020-04-28T14:19:38 | 2020-04-28T14:19:38 | 257,596,593 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,246 | py | """
The Hangman game uses a premade ASCII arts of hangmans, logo and GameOver
Proceed to https://ascii.co.uk/text to create ASCII art from text
The story of the game and the word to used can be found in story.py and words.txt respectively
"""
import random
import sys, os, time # For typewriter animation
# colorama module lets you add colors in shell window
from colorama import init, Fore, Style
init()
# Imports story of the game
import story
# List of Hangman ASCII characters
hangmans = [Fore.GREEN + '''
+---+
|
|
|
===''' + Style.RESET_ALL, Fore.RED + '''
+---+
O |
|
|
===''' + Style.RESET_ALL, Fore.RED + '''
+---+
O |
| |
|
===''' + Style.RESET_ALL, Fore.RED + '''
+---+
O |
/| |
|
===''' + Style.RESET_ALL, Fore.RED + '''
+---+
O |
/|\ |
|
===''' + Style.RESET_ALL, Fore.RED + '''
+---+
O |
/|\ |
/ |
===''' + Style.RESET_ALL, Fore.RED + '''
+---+
O |
/|\ |
/ \ |
===''' + Style.RESET_ALL]
logo= Fore.GREEN + '''
==========================================================================
_
| |
| |__ __ _ _ __ __ _ _ __ ___ __ _ _ __
| '_ \ / _` | '_ \ / _` | '_ ` _ \ / _` | '_ \
| | | | (_| | | | | (_| | | | | | | (_| | | | |
|_| |_|\__,_|_| |_|\__, |_| |_| |_|\__,_|_| |_|
__/ |
|___/
==========================================================================
''' + Style.RESET_ALL
game_over = Fore.RED + '''
============================================================================================
______ _ ____ ____ ________ ___ ____ ____ ________ _______
.' ___ | / \ |_ \ / _||_ __ | .' `.|_ _| |_ _||_ __ ||_ __ \
/ .' \_| / _ \ | \/ | | |_ \_| / .-. \ \ \ / / | |_ \_| | |__) |
| | ____ / ___ \ | |\ /| | | _| _ | | | | \ \ / / | _| _ | __ /
\ `.___] |_/ / \ \_ _| |_\/_| |_ _| |__/ | \ `-' / \ ' / _| |__/ | _| | \ \_
`._____.'|____| |____||_____||_____||________| `.___.' \_/ |________||____| |___|
=============================================================================================
''' + Style.RESET_ALL
border="======================================================="
replayTxt=f'''
{border}
Would you like to play the game once more
Press:
1 -> to play again
Anything else -> to Exit
{border}> '''
# Get words from words.txt file
f = open('words.txt', 'r+')
data = f.readlines()
f.close()
# Convert the words into a list
def words_list():
for line in data:
word = line.split()
return word
words= words_list()
# Get a random word
def get_random_word():
rn = random.randint(0,len(words))
word = words[rn]
word.lower()
return word
# For typewriter animation
def typewriter(message):
for char in message:
sys.stdout.write(char)
sys.stdout.flush()
time.sleep(0.05)
# Main Menu of the game
def main_menu():
# Menu Text
menu = f'''
{border}
Please select one of the following :-
0 -> Exit
1 -> Play The Game
{border}
'''
print(menu)
n = input("Enter your choice : ")
if(n=='0'):
typewriter(story.exitText)
sys.exit() # Exits the game
elif(n=='1'):
_ = os.system("cls") if os.name=="nt" else os.system("clear") # Clears the screen according to os type
typewriter(story.playText)
game() # Plays the Game
else:
typewriter("Select something valid before the masked man pulls his trigger") # Handles Invalid Input
main_menu()
# Updating status of Game
def game_status(blanks,guessed_words,lives):
hidden_word = " ".join(blanks) # The word with blanks
guessed_words_str = " ".join(guessed_words) # List of guessed words
print(f'''
Word to Guess : {hidden_word}
No of Wrong Guesses left: {lives}
Words Guessed already: {guessed_words_str}
The Hangman right now --> {hangmans[6-lives]}''') # Displaying Hangman picture
# Replaying the game
def replay_game():
_ = os.system("cls") if os.name=="nt" else os.system("clear") # Clears the screen according to os type
print(logo)
# Skip story functionality
skip_story = int(input('Press 1 to skip the story or anything else to continue : '))
if(skip_story!=1):
typewriter(story.storyText)
game()
# Main loop of the Game
# 1. Controls game logic
# 2. Shows Game status
# 3. Handles Input of Game
def game():
# Total lives of player
lives = 6
# Register a word and an equivalent list of blanks
word = list(get_random_word())
blanks = list(len(word) * "_")
guessed_words = [] #list of already guessed words
while(lives > 0):
# Shows current status of player
game_status(blanks,guessed_words,lives)
# Gettting Letter from User
n = str(input("\n Guess a Letter for the Word : "))
n = n.lower()
# Handling correct guesses
for i in range(len(word)):
if(n==word[i]):
_ = os.system("cls") if os.name=="nt" else os.system("clear") # Clears the screen according to os type
print(f"{border}\n Seems like your a bit lucky, '{n}' is in the word \n{border}")
blanks[i]=n # Replacing the blank with word
# Handling Invalid Input
if(not n.isalpha() or len(n)>1):
_ = os.system("cls") if os.name=="nt" else os.system("clear") # Clears the screen according to os type
typewriter("\n Hitting your head with the gun he said: 'I want a letter'")
#Handling already Guessed words
elif(n in guessed_words):
_ = os.system("cls") if os.name=="nt" else os.system("clear") # Clears the screen according to os type
typewriter('''\nThe Masked man said: "You have already guessed this, guess something else or I'll pull the trigger right now"\n''')
# Wrong Guess
elif(n not in word):
guessed_words.append(n)
# GameOver logic
if(lives==1):
_ = os.system("cls") if os.name=="nt" else os.system("clear") # Clears the screen according to os type
print(hangmans[6])
typewriter(story.exitText)
print(game_over)
typewriter("\nThe word was : {}".format("".join(word))) # Displaying actual word
_n = input(f'{replayTxt}') #Player Replay option
# Handling Player replay input
if(_n=='1'):
replay_game()
else:
sys.exit()
# Handling Wrong Guesses
lives = lives - 1
_ = os.system("cls") if os.name=="nt" else os.system("clear") # Clears the screen according to os type
print(f"{border}\n '{n}' is not in the word, your death is nearing \n{border}")
# Registering guessed words
else:
guessed_words.append(n)
# When Player wins the game
if(blanks==word):
print(f'''{border} \nLook's like you WIN!!! Impossible, the word is : {"".join(word)} \n{border}''')
n = input(f'{replayTxt}')# Player Replay Text
# Handling Player replay input
if(n=='1'):
replay_game()
else:
sys.exit()
# Playing the Game
if __name__ == '__main__':
print(logo)
skip_story = input("Press 1 to skip story or anything else to continue : ")
if(skip_story == '1'):
main_menu()
else:
typewriter(story.storyText)
main_menu()
| [
"pranav20172017@gmail.com"
] | pranav20172017@gmail.com |
ccd9a13878867c64b046d0c2430669e314344e6b | 259cc507d97bfeff84d21de3a0ab56640676a9eb | /venv1/Lib/site-packages/tensorflow/contrib/sparsemax/python/ops/sparsemax.py | ab6bdcb499055455ea400a70cb2c8dbe89ad712d | [
"MIT",
"Apache-2.0"
] | permissive | Soum-Soum/Tensorflow_Face_Finder | c3ef71b6f718f6720b80f8760d28b6ca6e11e6d2 | fec6c15d2df7012608511ad87f4b55731bf99478 | refs/heads/master | 2020-03-22T20:31:39.606644 | 2018-07-12T13:47:56 | 2018-07-12T13:47:56 | 140,607,068 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,667 | py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Sparsemax op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
__all__ = ["sparsemax"]
def sparsemax(logits, name=None):
"""Computes sparsemax activations [1].
For each batch `i` and class `j` we have
sparsemax[i, j] = max(logits[i, j] - tau(logits[i, :]), 0)
[1]: https://arxiv.org/abs/1602.02068
Args:
logits: A `Tensor`. Must be one of the following types: `half`, `float32`,
`float64`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `logits`.
"""
with ops.name_scope(name, "sparsemax", [logits]) as name:
logits = ops.convert_to_tensor(logits, name="logits")
obs = array_ops.shape(logits)[0]
dims = array_ops.shape(logits)[1]
z = logits - math_ops.reduce_mean(logits, axis=1)[:, array_ops.newaxis]
# sort z
z_sorted, _ = nn.top_k(z, k=dims)
# calculate k(z)
z_cumsum = math_ops.cumsum(z_sorted, axis=1)
k = math_ops.range(
1, math_ops.cast(dims, logits.dtype) + 1, dtype=logits.dtype)
z_check = 1 + k * z_sorted > z_cumsum
# because the z_check vector is always [1,1,...1,0,0,...0] finding the
# (index + 1) of the last `1` is the same as just summing the number of 1.
k_z = math_ops.reduce_sum(math_ops.cast(z_check, dtypes.int32), axis=1)
# calculate tau(z)
indices = array_ops.stack([math_ops.range(0, obs), k_z - 1], axis=1)
tau_sum = array_ops.gather_nd(z_cumsum, indices)
tau_z = (tau_sum - 1) / math_ops.cast(k_z, logits.dtype)
# calculate p
return math_ops.maximum(
math_ops.cast(0, logits.dtype), z - tau_z[:, array_ops.newaxis])
| [
"pes.carceller@gmail.com"
] | pes.carceller@gmail.com |
2db88cda5432a11904ee7c347a5964e8bddae410 | 83d29ff0bdd29d3beff8b7dbf5ce7eb63708db15 | /entropy_estimators/main.py | 3d30957b37377cc1f51f84a2d50d713d1dc8a95b | [
"MIT"
] | permissive | Palpatineli/npeepy | 8fc9394afd704bee4eb7df105dfc9e013f32f424 | 1860d9a7eb27845089f58f3e00fc5a1195220f95 | refs/heads/master | 2021-07-13T03:51:33.897058 | 2019-12-11T22:55:26 | 2020-12-16T00:53:05 | 227,475,684 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,856 | py | """Non-parametric Entropy Estimation Toolbox
This package contains Python code implementing several entropy estimation
functions for both discrete and continuous variables.
Written by Greg Ver Steeg
See readme.pdf for documentation
Or go to http://www.isi.edu/~gregv/npeet.html
"""
from typing import Optional, Tuple
from scipy.spatial import cKDTree
from scipy.special import digamma as ψ
from math import log
import numpy as np
import warnings
__all__ = ["entropy", "mutual_info", "mutual_info_mixed", "kl_divergence", "shuffle_test"]
# CONTINUOUS ESTIMATORS
def _format_sample(x, jitter=True):
# type: (np.ndarray, bool) -> np.ndarray
x = _jitter(np.asarray(x)) if jitter else np.asarray(x)
assert x.ndim < 3, "x can only be 1D or 2D"
if x.ndim == 1:
x = x.reshape(-1, 1)
return x
def _entropy(x, k=3, base=2):
# type: (np.ndarray, int, float) -> float
"""The classic K-L k-nearest neighbor continuous entropy estimator.
Estimates the (differential) entropy of :math:`x \in \mathbb{R}^{d_x}`
from samples :math:`x^{(i)}, i = 1, ..., N`. Differential entropy,
unlike discrete entropy, can be negative due to close neighbors having
negative distance.
Args:
ndarray[float] x: a list of vectors,
e.g. x = [[1.3], [3.7], [5.1], [2.4]]
if x is a one-dimensional scalar and we have four samples
int k: use k-th neighbor
float base: unit of the returned entropy
Returns:
float: in bit if base is 2, or nat if base is e
"""
assert k <= len(x) - 1, "Set k smaller than num. samples - 1"
x = _format_sample(x)
n_elements, n_features = x.shape
neighbor_distances = _neighbor(x, k)
const = ψ(n_elements) - ψ(k) + n_features * log(2)
return (const + n_features * np.log(neighbor_distances).mean()) / log(base)
def entropy(x, y=None, k=3, base=2):
# type: (np.ndarray, Optional[np.ndarray], int, float) -> float
"""The classic K-L k-nearest neighbor continuous entropy estimator.
Estimates the (differential) entropy of :math:`x \in \mathbb{R}^{d_x}`
from samples :math:`x^{(i)}, i = 1, ..., N`. Differential entropy,
unlike discrete entropy, can be negative due to close neighbors having
negative distance. If y is provided then it gives entropy of x conditioned on y.
Args:
ndarray[vector] x, y: a list of vectors, e.g. x = [[1.3], [3.7], [5.1], [2.4]]
if x is a one-dimensional scalar and we have four samples
int k: use k-th neighbor
float base: unit of the returned entropy
Returns:
float: in bit if base is 2, or nat if base is e
"""
if y is None:
return _entropy(x, k=k, base=base)
else:
return _entropy(np.c_[x, y], k=k, base=base) - _entropy(y, k=k, base=base)
def mutual_info(x, y, z=None, k=3, base=2):
# type: (np.ndarray, np.ndarray, Optional[np.ndarray], int, float) -> float
""" Estimate the mutual information between :math:`x \in \mathbb{R}^{d_x}`
and :math:`y \in \mathbb{R}^{d_y}` from samples import
:math:`x^{(i)}, y^{(i)}, i = 1, ..., N`, conditioned on z if z is not None.
Args:
ndarray[vector] x, y: a list of vectors, e.g. x = [[1.3], [3.7], [5.1], [2.4]]
if x is a one-dimensional scalar and we have four samples
ndarray[vector] z (, optional): a list of vectors with same length as x and y
int k: use k-th neighbor
float base: unit of entropy
Returns:
float: mutual information
"""
assert len(x) == len(y), f"Arrays must have same length: len(x) = {len(x)}, len(y) = {len(y)}"
assert k <= len(x) - 1, f"Set k smaller than num. samples - 1, k = {k}, len(x) = {len(x)}"
x, y = _format_sample(x), _format_sample(y)
if z is None:
points = np.c_[x, y]
distances = _neighbor(points, k)
return ((ψ(k) + ψ(len(x)) - _ψ_avg(x, distances) - _ψ_avg(y, distances)) / log(base)).clip(0, None)
else:
z = _format_sample(z, jitter=False)
points = np.c_[x, y, z]
distances = _neighbor(points, k)
return ((_ψ_avg(z, distances) + ψ(k)
- _ψ_avg(np.c_[x, z], distances) - _ψ_avg(np.c_[y, z], distances)) / log(base)).clip(0, None)
def kl_divergence(x, x_prime, k=3, base=2):
# type: (np.ndarray, np.ndarray, int, float) -> float
"""Estimate the KL divergence between two distributions
:math:`p(x)` and :math:`q(x)` from samples x, drawn from :math:`p(x)` and samples
:math:`x'` drawn from :math:`q(x)`. The number of samples do no have to be the same.
KL divergence is not symmetric.
Args:
np.ndarray[vector] x, x_prime: list of vectors, e.g. x = [[1.3], [3.7], [5.1], [2.4]]
if x is a one-dimensional scalar and we have four samples
int k: use k-th neighbor
float base: unit of entropy
Returns:
float: divergence
"""
assert k < min(len(x), len(x_prime)), "Set k smaller than num. samples - 1"
assert len(x[0]) == len(x_prime[0]), "Two distributions must have same dim."
n, d, m = len(x), len(x[0]), len(x_prime)
const = log(m) - log(n - 1)
nn, nn_prime = _neighbor(x, k), _neighbor(x_prime, k - 1)
return (const + d * (np.log(nn_prime).mean() - np.log(nn).mean())) / log(base)
def _entropy_discrete(x, base=2):
# type: (np.ndarray, float) -> float
"""Estimates entropy given a list of samples of discrete variable x.
where :math:`\hat{p} = \\frac{count}{total\:number}`
Args:
np.array[vector] sx: a list of samples
float base: unit of entropy
Returns:
float: entropy
"""
unique, count = np.unique(x, return_counts=True, axis=0)
prob = count / len(x)
return np.sum(prob * np.log(1. / prob)) / log(base)
def entropy_discrete(x, y=None, base=2):
# type: (np.ndarray, Optional[np.ndarray], float) -> float
""" Estimates entropy for samples from discrete variable X conditioned on
discrete variable Y
Args:
ndarray[obj] x, y: list of samples which can be any hashable object,
if y is not None then give entropy conditioned on y
Returns:
float: conditional entropy
"""
if y is None:
return _entropy_discrete(x, base=base)
else:
return _entropy_discrete(np.c_[x, y], base) - _entropy_discrete(y, base)
def mutual_info_mixed(x, y, k=3, base=2, warning=True):
# type: (np.ndarray, np.ndarray, int, float, bool) -> float
"""Estimates the mutual information between a continuous variable :math:`x \in \mathbb{R}^{d_x}`
and a discrete variable y. Note that mutual information is symmetric, but you must pass the
continuous variable first.
Args:
ndarray[vector] x: list of samples from continuous random variable X, ndarray of vector
ndarray[vector] y: list of samples from discrete random variable Y, ndarray of vector
int k: k-th neighbor
bool warning: provide warning for insufficient data
Returns:
float: mutual information
"""
assert len(x) == len(y), "Arrays should have same length"
entropy_x = _entropy(x, k, base=base)
y_unique, y_count, y_index = np.unique(y, return_counts=True, return_inverse=True, axis=0)
if warning:
insufficient = np.flatnonzero(y_count < k + 2)
if len(insufficient) > 0:
warnings.warn("Warning: y=[{yval}] has insufficient data, "
"where we assume maximal entropy.".format(
", ".join([str(a) for a in y_unique[insufficient]])))
H_x_y = np.array([(_entropy(x[y_index == idx], k=k, base=base) if count > k else entropy_x)
for idx, count in enumerate(y_count)])
return abs(entropy_x - H_x_y * y_count / len(y)) # units already applied
def _jitter(x, intensity=1e-10):
# type: (np.ndarray, float) -> np.ndarray
"""Small noise to break degeneracy, as points with same coordinates screws nearest neighbor.
Noise distribution doesn't really matter as it's supposed to be extremely small."""
return x + intensity * np.random.random_sample(x.shape)
def _neighbor(x, k):
# type: (np.ndarray, int) -> np.ndarray
"""Get the k-th neighbor of a list of vectors.
Args:
ndarray[vector] x: a 2d array [n x m] with n samples and samples are m-dimensional
int k: k-th neighbor
Returns:
ndarray: 1D array for distance between each sample and its k-th nearest neighbor
"""
# n_jobs = -1: all processes used
return cKDTree(x).query(x, k=k + 1, p=np.inf, n_jobs=-1)[0][:, k]
def _ψ_avg(x, distances):
# type: (np.ndarray, np.ndarray) -> float
"""Find number of neighbors in some radius in the marginal space.
Args:
ndarray[vector] x: a 2d array [n x m] with n samples and samples are m-dimensional
ndarray[float] distances: a 1d array [n] with distances to k-th neighbor for each of
the n samples.
Returns:
:math:`E_{<ψ(n_x)>}`
"""
tree = cKDTree(x)
# not including the boundary point is equivalent to +1 to n_x. as center point is included
return np.mean([ψ(len(tree.query_ball_point(a, dist, p=np.inf))) for a, dist in zip(x, distances - 1E-15)])
# TESTS
def shuffle_test(measure, # Callable[[np.ndarray, np.ndarray, Optional[np.ndarray]], float]
x, # np.ndarray
y, # np.ndarray
z=None, # Optional[np.ndarray]
ns=200, # int
ci=0.95, # floatt
**kwargs):
# type: (...) -> Tuple[float, Tuple[float, float]]
"""Shuffle the x's so that they are uncorrelated with y,
then estimates whichever information measure you specify with 'measure'.
e.g., mutual information with mi would return the average mutual information
(which should be near zero, because of the shuffling) along with the confidence
interval. This gives a good sense of numerical error and, particular, if your
measured correlations are stronger than would occur by chance.
Args:
(ndarray,ndarray,Optiona[ndarray])->float measure: the function
ndarray x, y: x and y for measure
ndarray z: if measure takes z, then z is given here
int ns: number of shuffles
float ci: two-side confidence interval
kwargs: other parameters for measure
Returns:
(float,(float,float)): average_value, (lower_confidence, upper_confidence)
"""
x_clone = np.copy(x) # A copy that we can shuffle
outputs = []
for i in range(ns):
np.random.shuffle(x_clone)
outputs.append((measure(x_clone, y, z, **kwargs) if z else measure(x_clone, y, **kwargs)))
outputs.sort()
return np.mean(outputs), (outputs[int((1. - ci) / 2 * ns)], outputs[int((1. + ci) / 2 * ns)])
| [
"mail@keji.li"
] | mail@keji.li |
2b31065c5316468ba5ddc5b3a99de4a2cee3ce09 | 850e8064c6d911c11970f07fdfe02e7e5fe26910 | /dist_sq_2.py | 1b61351c31fed66dca22ace6c228f05ccac8b2ef | [] | no_license | denisshustov/map_test | b1070c7bf487b4425fd4c5c5afb057ccac160858 | 5ce69ffe28b658b6023a7e5e2732f6809aa7b11b | refs/heads/main | 2023-04-05T06:33:21.906214 | 2021-04-12T16:30:21 | 2021-04-12T16:30:21 | 347,666,959 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,399 | py | import cv2
import numpy as np
import os
import random
from numpy.lib.stride_tricks import as_strided
import math
from collections import deque
obsticle = 0
class Node:
def __init__(self,x,y,left,right,up,down,id):
self.children = []
self.parent = None
self.x = x
self.y = y
self.left = left
self.right = right
self.up = up
self.down = down
self.id = id
def show_list(self, showChildren=True):
for n in self.children:
n.show(True, False)
def get_coord(self):
return (self.x-self.left, self.y-self.up, self.x+self.right, self.y+self.down)
def show(self, showChildren=True, ignore_childern=False):
if len(self.children)>0 or not ignore_childern: # or True:
x1 = self.x-self.left
y1 = self.y-self.up
x2 = self.x+self.right
y2 = self.y+self.down
cv2.rectangle(img,(x1,y1),(x2,y2),(31,255,0),1)
c1x = int(x1 + (abs(x1-x2)/2))
c1y = int(y1 + (abs(y2-y1)/2))
cv2.rectangle(img,(c1x,c1y),(c1x,c1y),(255,0,0),1)
font = cv2.FONT_HERSHEY_SIMPLEX
#cv2.putText(img, str(self.id), (c1x,c1y), font, 0.5, (0, 255, 0), 1, cv2.LINE_AA)
# cv2.putText(img, str(self.x) + ':'+str(self.y), (c1x,c1y), font, 0.5, (0, 255, 0), 1, cv2.LINE_AA)
if showChildren and len(self.children)>0:
self.show_list()
def get_central_point(self):
x = self.x-self.left
y = self.y-self.up
x2 = self.x+self.right
y2 = self.y+self.down
c_x = int(x + (abs(x-x2)/2))
c_y = int(y + (abs(y-y2)/2))
return (c_x,c_y)
@property
def width(self):
return self.left+self.right+1
@property
def height(self):
return self.up+self.down+1
def area(self):
return (self.width)*(self.height)
def dist(self, other):
c_point = self.get_central_point()
c_x = c_point[0]
c_y = c_point[1]
c_point_2 = other.get_central_point()
o_c_x = c_point_2[0]
o_c_y = c_point_2[1]
if abs(c_x - o_c_x) <= (self.width + other.width):
dx = 0
else:
dx = abs(c_x - o_c_x) - (self.width + other.width)
if abs(c_y - o_c_y) <= (self.height + other.height):
dy = 0
else:
dy = abs(c_y - o_c_y) - (self.height + other.height)
return dx + dy
def __eq__(self,other):
return other!=None and self.x==other.x and self.y==other.y and self.left==other.left and self.right==other.right \
and self.up==other.up and self.down==other.down
def getWindow(arr, x, y, border=0, left=0, right=0, up=0, down=0):
if border!=0:
left+=border
right+=border
up+=border
down+=border
i0=y-up if y-up>0 else 0
i1=y+down+1
j0=x-left if x-left>0 else 0
j1=x+right+1
return arr[i0:i1,j0:j1]
def getNeibors(node, used):
result = []
for x1, y1, left1, right1, up1, down1 in used:
if node.x!=x1 and node.y!=y1:
z = node.dist(Node(x1,y1,left1,right1,up1,down1,-1))
if z>=0 and z<=1:
result.append((x1, y1, left1, right1, up1, down1,z))
return result
def getChain(used, node, id):
id +=1
neibors = getNeibors(node,used)
if (node.x,node.y,node.left,node.right,node.up,node.down) in used:
idx = used.index((node.x,node.y,node.left,node.right,node.up,node.down))
del used[idx]
if len(neibors)>0:
for x_,y_,left_,right_,up_,down_,_ in neibors:
child_node = Node(x_,y_,left_,right_,up_,down_,id)
child_node.parent = node
node.children.append(child_node)
id = getChain(used, child_node, id)
if (x_,y_,left_,right_,up_,down_) in used:
idx = used.index((x_,y_,left_,right_,up_,down_))
del used[idx]
return id
def get_rect(arr,x,y,left,right,up,down,order):
cond_counter = 0
while True:
if cond_counter==order[0]:
if (x - left-1) > 0:
left+=1
else:
cond_counter+=1
elif cond_counter==order[1]:
if (x + right + 1) < arr.shape[1]:
right+=1
else:
cond_counter+=1
elif cond_counter==order[2]:
if (y - up-1) > 0:
up+=1
else:
cond_counter+=1
elif cond_counter==order[3]:
if (y + down + 1) < arr.shape[0]:
down+=1
else:
cond_counter+=1
w_out = getWindow(arr,x,y,0,left,right,up,down)
if obsticle in w_out.flatten():
if cond_counter==order[0]:
left-=1
elif cond_counter==order[1]:
right-=1
elif cond_counter==order[2]:
up-=1
elif cond_counter==order[3]:
down-=1
cond_counter+=1
if cond_counter==4:
break
return x,y,left,right,up,down
def hz(arr):
result = deque()
k=0
left=0
right=0
up=0
down=0
more_than = 1
y,x = np.unravel_index(arr.argmax(), arr.shape)
while True:
if np.amax(arr)< 0.005:
break
w_out = getWindow(arr,x,y,1,left,right,up,down)
if obsticle in w_out.flatten():
w_out = getWindow(arr,x,y,0,left,right,up,down)
if not obsticle in w_out.flatten():
x1,y1,left1,right1,up1,down1 = get_rect(arr, x,y,left,right,up,down,[0,1,2,3])
x2,y2,left2,right2,up2,down2 = get_rect(arr, x,y,left,right,up,down,[3,2,1,0])
if (left1+right1+up1+down1)>(left2+right2+up2+down2):
x,y,left,right,up,down=x1,y1,left1,right1,up1,down1
else:
x,y,left,right,up,down=x2,y2,left2,right2,up2,down2
w_out = getWindow(arr,x,y,0,left,right,up,down)
if left>more_than or right>more_than or up>more_than or down>more_than:
result.append((x, y, left, right, up, down))
w_out[:,:]=obsticle
else:
# used.append((x, y, left, right, up, down))
w_out[:,:]=obsticle
y,x = np.unravel_index(arr.argmax(), arr.shape)
left=0
right=0
up=0
down=0
else:
if (x - left-1) > 0:
left+=1
if (x + right + 1) < arr.shape[1]:
right+=1
if (y - up) > 0:
up+=1
if (y + down + 1) < arr.shape[0]:
down+=1
# if k>181:
# break
k+=1
return result
img = cv2.imread('f:\Project\map\mymap_22.jpg')#
#img = cv2.imread('f:\Project\map\mymap_222.jpg')#
#img = cv2.imread('f:\Project\map\mymap_223.jpg')#
#img = cv2.imread('f:\Project\map.jpg')
#img = cv2.imread('f:\Project\map_m.jpg')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret,threshed = cv2.threshold(gray, 205, 255, cv2.THRESH_BINARY)
kernel = np.ones((2,2),np.uint8)
dist_transform = cv2.distanceTransform(threshed, cv2.DIST_L1 ,3)
cv2.normalize(dist_transform, dist_transform, 0, 1.0, cv2.NORM_MINMAX)
# threshed_2 = threshed.copy()
dist_transform_2 = dist_transform.copy()
used=hz(dist_transform)
nodes=[]
id = 0
while len(used)>0:
node=Node(*used.pop(),id)
getChain(used, node, id)
if len(node.children)>0:
nodes.append(node)
# i=0
for n in nodes:
n.show(True,True)#
# if i>=1110:
# break
# i+=1
# cv2.imshow("drawCntsImg.jpg", dist_transform)
cv2.imshow("drawCntsImg552.jpg", dist_transform_2)
cv2.imshow("drawCntsImg4.jpg", img)
# cv2.imshow("drawCntsImg1.jpg", threshed_2)
cv2.imshow("drawCntsImg2.jpg", threshed)
cv2.waitKey(0) | [
"noreply@github.com"
] | denisshustov.noreply@github.com |
9f8803860d4a2ef74ee0010d0cf27154fd22972a | 456b54f862cc2f4fdab05916ad780ae41e2be595 | /quest.py | a32af833f45092788c73ac16e6ffff61fb892ec5 | [] | no_license | lumorinus/pytest | f6030645e5b99ba0e4d08248fde55bbfbe603ecd | 36d723869c29c156c26227fbed488fa0f6a21f86 | refs/heads/master | 2022-11-22T09:40:08.714525 | 2020-07-25T10:12:48 | 2020-07-25T10:12:48 | 282,391,016 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 377 | py | import random
import time
def min(param1_list):
asd = param1_list[0]
for num in param1_list[1:]:
if num < asd:
asd = num
return asd
for _ in range(0,10):
numlist = []
for x in range(0,10):
numGen = random.randint(0,10)
numlist.append(numGen)
print(numlist)
ans = min(numlist)
print("MIN NUMBE :" , ans)
| [
"noxbillied@gmail.com"
] | noxbillied@gmail.com |
64a486d1e198ce36a749452f7e58310571fb052c | 33e265aadb77dd97aecab408ac7bfb0ddf6f2ba9 | /CodeGoesHere/Budgeter/helpers.py | 40c51b5961ced8315677f64dd0de5391d0d6825d | [] | no_license | CarlBlacklock/JASCHD | 8e711566c1aac68a795fd5e8d4f15455e4127d40 | 7f8c4094ba8ea16377727ab0773236641baf314a | refs/heads/master | 2021-01-10T18:13:16.894948 | 2016-02-07T18:58:55 | 2016-02-07T18:58:55 | 51,183,673 | 0 | 2 | null | 2016-02-06T23:49:17 | 2016-02-06T01:00:39 | CSS | UTF-8 | Python | false | false | 1,657 | py | import Budgeter.models as model
from django.db import models
import csv
from decimal import *
#helper functions to handle various data processing functions
def getTransactionsFromFile(path_to_file):
transactionDict = csv.DictReader(open(path_to_file), delimiter = '\t')
rawTransactions = []
for row in transactionDict:
rawTransactions.append(row)
return rawTransactions
def transactionFormatAndSave(rawTransactions, user_id):
for row in rawTransactions:
dateString = row['DATE']
tokenizedString = dateString.split('/')
if len(tokenizedString[1]) == 1:
formatedDay = '0'+tokenizedString[1]
else:
formatedDay = tokenizedString[1]
if len(tokenizedString[0]) == 1:
formatedMonth = '0'+tokenizedString[0]
else:
formatedMonth = tokenizedString[0]
formatedDate = tokenizedString[2]+'-'+formatedMonth+'-'+formatedDay
if row['DEBIT'] != '':
newTransaction = model.Transaction(transaction_date = formatedDate, merchant_name = row['TRANSACTION DETAILS'], transaction_amount = Decimal(row['DEBIT']), user_name = user_id)
newTransaction.save(force_insert = True)
def transactionCatagorization(user_id, monthToCatagorize):
all_catagories = model.TransactionCatagories.objects.all()
catagorized_spending = {}
for entry in all_catagories:
catagorized_spending[entry] = Decimal(0.00)
user_transactions = model.Transaction.objects.filter(user_id)
for entry in user_transactions:
catagory = model.TransactionCatagories.objects.filter(entry[merchant_name])
catagorized_spending[merchant_name] = Decimal(catagorized_spending[merchant_name]) + Decimal(user_transactions[transaction_amount])
return catagorized_spending | [
"carl.alex.blacklock@gmail.com"
] | carl.alex.blacklock@gmail.com |
4be52c0b307abcd487ffcee5502157e10511c5eb | 13f7f33db44d3a916ad971b0f40eac07b475c6e1 | /dsalgo/cs/new_ds/minimum_spanning_tree.py | 1999fb23d1aa7c8f4392a10ef3d2e4bbaa9c2b2e | [] | no_license | arcarchit/mit-ds-algo | 66753c1cfe699792c0f3245dbedae2e4f4c58624 | 4bf56d39d15900f9c6f10b0ea4b4250658860645 | refs/heads/master | 2021-06-05T06:06:30.795094 | 2021-04-18T09:09:09 | 2021-04-18T09:09:09 | 139,175,123 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,246 | py | """
What is minimum spanning Tree?
== It can be computed for connected, undirected and weighted graph
== Tree which connects all the nodes and sum of edges are minimum
Application
Network design : connecting your offices in different cities
Kruskal's algorithm is greedy and yet optimal.
COMPLEXITY ?
Sorting of Edge take O(E log E)
Union find takes O(log V) at most, we do it atmost E times in a loop, hence O(E log V)
total = O(E log E + E log V)
Values of E can be almost V^2
O(E log E) = O( E log V^2) = O ( 2 * E * log V) = O(E log V)
Hence time complexity is either O(E log E) or O(E log V)
"""
from collections import defaultdict
class Graph:
def __init__(self):
self.graph = defaultdict(list)
def addEdge(self, u, v, w):
self.graph[u].append((v, w))
self.graph[v].append((u, w))
def has_cycle(self, edges_so_far, new_edge):
"""
We will use union find
Iterate through all edges
make union of vertices
For a new edge if we find both vertice are in same set, it is cyclic
:param edges:
:return:
"""
edges = []
edges.extend(edges_so_far)
edges.append(new_edge)
dicc = {}
def union(x, y):
if x not in dicc:
dicc[x] = x
if y not in dicc:
dicc[y] = y
parent_x = find_parent(x)
parent_y = find_parent(y)
dicc[parent_x] = parent_y
def find_parent(x_in):
x = x_in
if x not in dicc:
dicc[x] = x
parent_x = dicc[x]
while parent_x != x:
x = parent_x
parent_x = dicc[x]
dicc[x_in] = parent_x
return parent_x
for e in edges:
x, y, _ = e
parent_x = find_parent(x)
parent_y = find_parent(y)
if parent_x == parent_y:
return True
union(x, y)
return False
def get_mst(self):
"""
Return List of (u, v, w)
:return:
"""
all_edges = []
edges_added = set()
for x in self.graph:
neighbour_list = self.graph[x]
for y, w in neighbour_list:
edge1 = (x, y)
edge2 = (y, x)
if edge1 in edges_added or edge2 in edges_added:
continue
edges_added.add((x, y))
all_edges.append((x, y, w))
all_edges = sorted(all_edges, key=lambda x:x[2])
no_of_vertice = len(self.graph)
mst_edges = []
edge_count = no_of_vertice # MST has edges = no_of_vertice - 1
for e in all_edges:
if self.has_cycle(mst_edges, e):
pass
else:
mst_edges.append(e)
if len(mst_edges) == edge_count:
break
return mst_edges
def main():
g = Graph()
g.addEdge(0, 1, 10)
g.addEdge(0, 2, 6)
g.addEdge(0, 3, 5)
g.addEdge(1, 3, 15)
g.addEdge(2, 3, 4)
mst_edges = g.get_mst()
print "\nEdges in MST are : \n"
print mst_edges
if __name__=="__main__":
main() | [
"vora@adobe.com"
] | vora@adobe.com |
66bd04773fbf4efbf89a682cc63b8fc89b4de05e | 5daf1e78b8f596fbf9913372d7c4dc2a47c7881b | /question1.py | 197311566b8a88fa52ac7c0451d13c6b251654e7 | [] | no_license | sai-gopi/pythonpractice_2021 | f182f60bc08bbd8869f5d24cdd63d9002e41d967 | a1277299a8268634171c058bba2cff23fd39ab6f | refs/heads/main | 2023-06-30T18:15:06.721200 | 2021-08-02T18:05:46 | 2021-08-02T18:05:46 | 390,918,653 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 81 | py | a = int(input("enter a: "))
b = int(input("enter b: "))
print(a + b)
print(a - b) | [
"saigopi.2608@gmail.com"
] | saigopi.2608@gmail.com |
3970c98c713bdccc733ef52ea22407c6e55af3d1 | 2e72e843b74ed385aff12710e854703f006830ef | /python-code/36.二叉搜索树与双向链表.py | da6807b04bdf1735c81dce2bcb42ddf2484abb3e | [] | no_license | t-dawei/offer-code | 4d38a8bef207278c35ebc4a4e5430ea18d101b82 | ae8a4b84f68843e031a4f946d645ee7b31f1c935 | refs/heads/master | 2020-05-01T15:11:52.916048 | 2019-09-03T01:30:25 | 2019-09-03T01:30:25 | 177,540,506 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,208 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# @author: T
'''
题目:
输入一棵二叉搜索树,将该二叉搜索树转换成一个排序的双向链表。
要求不能创建任何新的结点,只能调整树中结点指针的指向。
解题思路一:
由于输入的一个二叉搜索树,其左子树不大于右子树的值,这位后面的排序做了准备,
因为只需要中序遍历即可,将所有的节点保存到一个列表,。
对这个list[:-1]进行遍历,每个节点的right设为下一个节点,下一个节点的left设为上一个节点。
借助了一个O(n)的辅助空间
'''
class TreeNode:
def __init__(self, val):
self.val = val
self.left = None
self.right = None
class Solution:
self.attr = []
def conver(self, root):
if not root:
return
self.inorder(root)
for i, val in enumerate(self.attr[:-1]):
self.attr[i].right = self.attr[i+1]
self.attr[i+1].left = val
return self.attr[0]
def inorder(self, root):
if not root:
return
self.inorder(root.left)
self.append(root)
self.inorder(root.right)
| [
"t_dawei@163.com"
] | t_dawei@163.com |
936e9d105f4efefcae2256e3b9539e416e9f4db6 | f268f40624d0b828775e5e11ffd04ee5713e6367 | /1374A.py | 27dc7ee91d0fcf171386759d732dc3bfcaaf5836 | [] | no_license | Janvi-Sharma/CF-solutions | 63bc261350fed2d3428c1b65722c4d07f98e51c7 | 05835df2d3d84621608a5d4bd473399f08edd5ed | refs/heads/main | 2023-08-28T01:18:21.115344 | 2021-10-31T15:52:04 | 2021-10-31T15:52:04 | 423,191,129 | 0 | 0 | null | 2021-10-31T15:52:05 | 2021-10-31T15:50:51 | null | UTF-8 | Python | false | false | 108 | py | for _ in range(int(input())):
a,b,c=map(int,input().split())
p=(c-b)//a
q=p*a+b
print(q) | [
"noreply@github.com"
] | Janvi-Sharma.noreply@github.com |
af95fbdf2269bdc08e1f0ab6cf01be96dd9c0e6e | cd924a75c9dc5d5845c8e3e6a15488c32016ed7a | /relations/views.py | d5feffd5808093e007317d87223c6aa987923e26 | [] | no_license | kdagley/relations | 38c89711539a2257f19ea685b9ad47148b52ba72 | 884d745f5cddf562c39389d8e87f6c2b9e13ce70 | refs/heads/master | 2021-01-19T11:03:10.191232 | 2015-09-21T20:08:15 | 2015-09-21T20:08:15 | 42,819,831 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 136 | py | # -*- coding: utf-8 -*-
from django.shortcuts import render
def home(request):
return render(request, "relations/index.html", {})
| [
"git@dagley.net"
] | git@dagley.net |
1a0931cd3eb62af8ed60a9e7d4a98bdede1849e1 | 11fd71011702af86941f1fae298e02d5a5c01a65 | /venv/bin/chardetect | 1965cf164909f72945fa75ab73be35ac9e72ee97 | [] | no_license | 15851826258/UNSW_courses_XinchenWang | ba335726b24b222692b794d2832d0dbfb072da97 | 98b4841e7425a22cb6ba66bee67dbb2b8a3ef97e | refs/heads/master | 2022-11-30T05:28:24.886161 | 2020-08-11T10:37:49 | 2020-08-11T10:37:49 | 286,715,677 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 268 | #!/Users/wangxinchen/PycharmProjects/untitled/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from chardet.cli.chardetect import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"wangxinchen123@qq.com"
] | wangxinchen123@qq.com | |
09e5f56501334e4bab2390e4d8924fae6ae80302 | 375dcf69d0dc2679a562a2a7c18525cd2159e195 | /helium/chapter-7/main.py | b58057b664bca7a194c06fe2346d1bdc1f27bec5 | [] | no_license | horrendous-git/helium | 9b9b7ba4fe977996b6a2b59c505a3786c563e6d4 | 46250504af22d86f8f6b5a831137e4fcf0f94808 | refs/heads/master | 2021-01-20T21:53:01.221499 | 2015-06-15T21:36:40 | 2015-06-15T21:36:40 | 37,223,736 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 158 | py |
from grid import *
grid = Grid(4)
grid.set_wumpus((0,2))
grid.set_pit((2,0))
grid.set_pit((2,2))
grid.set_pit((3,3))
grid.set_gold((1,2))
grid.print_grid()
| [
"horrendous.git@gmail.com"
] | horrendous.git@gmail.com |
9bf49d5b304ee774660035f99af8bd00248a54cd | ea71668b77b147d85551aace47ef55bd3bb2a962 | /rotational_gradient.py | 48fac8ce5f35a7975fe94fe3ce564ecf1ca14b0f | [] | no_license | sohils/Catadioptric-Object-Detection | bac946cc68c617051c344cd324ecb4e08f976133 | c5aefccfcc463782bccf3f896ddcbed9085e94e0 | refs/heads/master | 2020-05-01T02:24:39.042071 | 2019-03-22T22:40:53 | 2019-03-22T22:40:53 | 177,218,012 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,508 | py | import numpy as np
import cv2
def main():
img = cv2.imread('IMG_8693.JPG',0)
# img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
padded_img = np.pad(img, 1, 'constant', constant_values=0)
img_shape = img.shape
centre = [i/2 for i in padded_img.shape]
filter_x = np.array([[-1, 0, 1],
[-2, 0, 2],
[-1, 0, 1]])
filter_y = np.transpose(filter_x)
gradient_radial_image = np.zeros(img_shape)
gradient_tangential_image = np.zeros(img_shape)
for i in range(1,padded_img.shape[0]-1):
print(i)
for j in range(1,padded_img.shape[1]-1):
theta = np.arctan2(i-centre[0], j-centre[1])
radial_filter = -np.cos(theta)*filter_x + np.sin(theta)*filter_y
tangential_filter = np.cos(theta)*filter_x + np.sin(theta)*filter_y
gradient_radial_image[i-1,j-1] = np.sum(radial_filter*(padded_img[i-1:i+2, j-1:j+2]))
gradient_tangential_image[i-1,j-1] = np.sum(tangential_filter*(padded_img[i-1:i+2, j-1:j+2]))
print("Done calculations")
gradient_radial_image = cv2.resize(gradient_radial_image, (800,1200))
cv2.imshow("Radial Gradient",gradient_radial_image)
cv2.waitKey(0)
cv2.destroyAllWindows()
gradient_tangential_image = cv2.resize(gradient_tangential_image, (800,1200))
cv2.imshow("Tangential Gradient",gradient_tangential_image)
cv2.waitKey(0)
cv2.destroyAllWindows()
if __name__ == "__main__":
main() | [
"savla.sohil@gmail.com"
] | savla.sohil@gmail.com |
2624e54163be6bb922154c3afa0a64edd67cc00a | 2f83f7df9511dead559ba8b6f49f7dfa4438951e | /sum of large.py | 54862ad623baec492812e9333c83313c77bc6b85 | [] | no_license | Shristi19/GeeksforGeeks-Solved-Question | 6f740c4620e1af4b6219738e0d6f75321d88a17a | cb0652a2e4d9609523c88afc3886946b73e19569 | refs/heads/master | 2022-02-26T10:07:05.252829 | 2019-10-16T17:52:29 | 2019-10-16T17:52:29 | 212,523,870 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 273 | py | def logic(x,y):
if len(str(x+y))==len(str(x)):
print(x+y)
else:
print(x)
num=int(input())
xs=[]
ys=[]
for i in range(num):
x,y=list(map(int,input().strip().split()))
xs.append(x)
ys.append(y)
for i in range(num):
logic(xs[i],ys[i])
| [
"noreply@github.com"
] | Shristi19.noreply@github.com |
f18ec3f5f6a4ab05b10177709f243475c026b3b0 | bf1588509df8cc40e99f3e362eff18f5bd754ae3 | /Python/python_stack/django/login_registration_project/apps/login_registration_app/migrations/0001_initial.py | 60501c193954486e570b5c9319db088152131b1c | [] | no_license | nick0000100/DojoAssignments | dec7b45a18d986acea3373839a9dcc5c314781a2 | dd698fc69df17041a284fd99cf2522e0731c6477 | refs/heads/master | 2021-01-01T17:39:44.065520 | 2017-08-18T22:28:45 | 2017-08-18T22:28:45 | 98,124,163 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 887 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-14 22:10
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=255)),
('last_name', models.CharField(max_length=255)),
('email', models.CharField(max_length=255)),
('password', models.CharField(max_length=255)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
]
| [
"nick.sor@live.com"
] | nick.sor@live.com |
4348b2bc0826b0f9b945bbd18435abeeadd69352 | 3daa525569066cb69db5c6d574f79dbb828d431f | /compilador/urls.py | a4010beff72fa08269fc6173100ba2607ce018e9 | [] | no_license | thiagomartendal/TrabalhoFormaisCompiladores | 5b905786e9515a41ee40f903e8bec134ed24c373 | ebafea8dfbfc4d18e0e7cd84af13e51f5f007151 | refs/heads/main | 2023-03-07T16:36:44.303883 | 2021-02-23T22:11:27 | 2021-02-23T22:11:27 | 338,878,028 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 265 | py | from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='inicio'),
path('editar_automato/', views.editarAutomato, name='editar_automato'),
path('editar_gramatica/', views.editarGramatica, name='editar_gramatica'),
]
| [
"noreply@github.com"
] | thiagomartendal.noreply@github.com |
c883e1a0a408db687bff3e281fefff765a1d8a66 | c6ec292a52ea54499a35a7ec7bc042a9fd56b1aa | /Python/1102.py | 2cae0a34d41306787e668057e921d884cf86347d | [] | no_license | arnabs542/Leetcode-38 | ad585353d569d863613e90edb82ea80097e9ca6c | b75b06fa1551f5e4d8a559ef64e1ac29db79c083 | refs/heads/master | 2023-02-01T01:18:45.851097 | 2020-12-19T03:46:26 | 2020-12-19T03:46:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 804 | py | class Solution:
def maximumMinimumPath(self, A: List[List[int]]) -> int:
if not A or not A[0]:
return 0
m, n = len(A), len(A[0])
visited = [[False] * n for _ in range(m)]
mi = A[0][0]
heap = [(-mi, 0, 0)]
dx = [1, -1, 0, 0]
dy = [0, 0, -1, 1]
while heap:
curMin, x, y = heapq.heappop(heap)
if x == m - 1 and y == n - 1:
return -curMin
for i in range(4):
nx, ny = dx[i] + x, dy[i] + y
if 0 <= nx < m and 0 <= ny < n and not visited[nx][ny]:
visited[nx][ny] = True
newMin = min(-curMin, A[nx][ny])
heapq.heappush(heap, (-newMin, nx, ny))
return -1
| [
"lo_vegood@126.com"
] | lo_vegood@126.com |
1dba213f105e21f426680fadcb405f5a810e83b7 | 2ebfe362e35af1669b98a313b7718f74e1438fb4 | /1_basis/lotto.py | f9c826072aad61a9bd623ec77c90697eac6cffcc | [] | no_license | PCzarny/python101 | 9f85c0aa7e9ce64a7e85774a38a6c4bdef8932d7 | 100913e0c249c7db38f17693626afd7b8483f823 | refs/heads/master | 2020-04-17T21:51:08.219816 | 2019-02-24T15:48:13 | 2019-02-24T15:48:13 | 166,969,728 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 395 | py | import random
solution = random.randint(1, 10)
try_number = 3
for i in range(try_number):
print(f'Try number {i + 1}')
answer = input('What number was chosen? ')
if int(answer) == solution:
print('Great! You won!')
break
elif i == try_number - 1:
print(f'You\'ve lost. It was number {solution}')
else:
print(f'You\'ve missed. Try again\n')
| [
"piotr.a.czarny@gmail.com"
] | piotr.a.czarny@gmail.com |
b2385dc3272c957e8e027af6117d2102403e8702 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03243/s474613072.py | 7218ea9f3043f0cb31d16a785cad563de5b7ff3f | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 126 | py | n = int(input())
mul = lambda x: x * 100 + x * 10 + x
if mul(n//100)>=n:ans = mul(n//100)
else :ans = mul(n//100+1)
print(ans) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
5e614b28b6ad370832c42ab72ed18cf54c35f665 | 7d080f7cd0265e4d2bd5b27367b58f55011bc48b | /session10_1/SQLite3Sample.py | 2df2de3733eba0bdbe7be85331ec0f0f96903a10 | [] | no_license | quangntran/cs162 | a76dcf61a0b3441fa49f4bf11677f487159e9013 | 513f541c33056e754cd490ce8005306425bb223c | refs/heads/master | 2023-02-04T16:23:46.562325 | 2020-03-18T11:59:37 | 2020-03-18T11:59:37 | 236,327,971 | 0 | 1 | null | 2023-02-02T05:12:46 | 2020-01-26T15:01:55 | Python | UTF-8 | Python | false | false | 1,169 | py | """
This is a basic SQLite3 Python implementation, to initialise two tables.
The goal is to provide a comparison against an SQLAlchemy implementation,
to highlight the differences between using an ORM and a native SQL database in Python.
"""
import sqlite3
# Create a connection() object representing the database.
conn = sqlite3.connect('databasesqlite.db')
"""After connecting and running this file,
you should see the databasesqlite.db file in the same directory.
"""
# The cursor is created after the connection, so execute() functions can be called with it with raw SQL.
c = conn.cursor()
c.execute("""
CREATE TABLE if not exists Users
(id INTEGER PRIMARY KEY ASC,
name TEXT(20),
insurance_id INTEGER)
""")
c.execute("""
CREATE TABLE if not exists Insurance
(insurance_id INTEGER PRIMARY KEY,
claim_id INTEGER,
FOREIGN KEY(insurance_id) REFERENCES Users(insurance_id))
""")
c.execute("""
INSERT INTO Users VALUES(4, 'minerva', 3)
""")
c.execute("""
INSERT INTO Insurance VALUES(3,12345)
""")
# Save/commit the changes.
conn.commit()
# Make sure changes are committed or they will be lost.
conn.close()
| [
"quangtran0698@gmail.com"
] | quangtran0698@gmail.com |
6780582353c10eef2b785d13ec40e1f68746a39c | 1177fa5972939b32d709916efc7eab25d6088973 | /Sugar_map_folding.py | 5c91aad94b902f120be95cdd0535da6f5d3bcf0f | [
"MIT"
] | permissive | YutoToguchi/map_folding | 8f3566c4e2f561cf18687c3a4360e37c436c946d | 1390e13ac7c0183a1fd8d8be00468404c67fcbc9 | refs/heads/master | 2020-04-24T14:41:02.214997 | 2019-07-25T05:29:40 | 2019-07-25T05:29:40 | 134,123,050 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,338 | py | # coding: utf-8
import itertools
import subprocess # コマンド実行用
import random
# 制約条件 2 newsによる局所的な重なり順
# 重なり順生成
def stacking_order(news_list):
M = len(news_list) + 1
N = len(news_list[0]) + 1
layer = [] # layer[[a,b,c,d]] a->b->c->d
flag12 = 0 # (上->下) if 2->1:flag12=0 elif 1->2:flag12=1
flag24 = 2 # (上->下) if 2->4:flag24=0 elif 4->2:flag12=1 initial:2
for i in range(M-1):
for j in range(N-1):
if j == 0: # 最初の列のとき
if i != 0: # 最初の行でないとき前のflag24からflag12を設定
if news_list[i][j] == "n":
flag12 = flag24
elif news_list[i][j] == "s":
flag12 = 1 - flag24
elif news_list[i][j] == "w":
flag12 = flag24
elif news_list[i][j] == "e":
flag12 = 1 - flag24
# flag12からflag24を設定
if news_list[i][j] == "n":
flag24 = flag12
elif news_list[i][j] == "s":
flag24 = 1 - flag12
elif news_list[i][j] == "w":
flag24 = 1 - flag12
elif news_list[i][j] == "e":
flag24 = flag12
# flag12から層を決定
if flag12 == 0:
if news_list[i][j] == "n":
layer.append([M*j+i+2,M*j+i+1,M*(j+1)+i+1,M*(j+1)+i+2])
flag12 = 1 - flag12
elif news_list[i][j] == "s":
layer.append([M*(j+1)+i+1,M*(j+1)+i+2,M*j+i+2,M*j+i+1])
flag12 = 1 - flag12
elif news_list[i][j] == "w":
layer.append([M*(j+1)+i+2,M*j+i+2,M*j+i+1,M*(j+1)+i+1])
elif news_list[i][j] == "e":
layer.append([M*j+i+2,M*(j+1)+i+2,M*(j+1)+i+1,M*j+i+1])
else:
if news_list[i][j] == "n":
layer.append([M*(j+1)+i+2,M*(j+1)+i+1,M*j+i+1,M*j+i+2])
flag12 = 1 - flag12
elif news_list[i][j] == "s":
layer.append([M*j+i+1,M*j+i+2,M*(j+1)+i+2,M*(j+1)+i+1])
flag12 = 1 - flag12
elif news_list[i][j] == "w":
layer.append([M*(j+1)+i+1,M*j+i+1,M*j+i+2,M*(j+1)+i+2])
elif news_list[i][j] == "e":
layer.append([M*j+i+1,M*(j+1)+i+1,M*(j+1)+i+2,M*j+i+2])
return layer
# 引数 1 : 重なり順リスト stack_list
# 引数 2 : ファイルオブジェクト f_obj
# 出力 : CNF条件
# 戻り値 : なし
def stacking_order_cnf(stack_list, f_obj):
# "map_folding.csp"を追記モードでオープンしている
for k in range( len(stack_list) ):
a = stack_list[k][0]
b = stack_list[k][1]
c = stack_list[k][2]
d = stack_list[k][3]
print("; 制約条件 2.%d " %(k+1), stack_list[k], "の重なり順", file=f_obj)
# 地図番号 aが 地図番号 bより上にある
print("(< m_",a," m_", b, ")", sep="", file=f_obj)
# 地図番号 bが 地図番号 cより上にある
print("(< m_",b," m_", c, ")", sep="", file=f_obj)
# 地図番号 cが 地図番号 dより上にある
print("(< m_",c," m_", d, ")", sep="", file=f_obj)
# 制約条件 3,4,5,6 領域での交差
# 引数 1 : 領域に格納されている折り線リスト c_list
# 引数 2 : 制約条件の数字
# 引数 3 : ファイルオブジェクト f_obj
# 出力 : 交差しない条件
# 戻り値 : なし
def intersction(crease_list, const_num, f_obj):
# "map_folding.csp"を追記モードでオープンしている
for element in itertools.combinations(crease_list,2):
# (a,b)と(c,d)が交差しない条件(節)
a = element[0][0]
b = element[0][1]
c = element[1][0]
d = element[1][1]
print("; 制約条件 %d (%d,%d)と(%d,%d)が交差しない条件" %(const_num,a, b, c, d), file=f_obj)
# [ min(a,b) < c < max(a,b) ] <=> [ min(a,b) < d < max(a,b) ]
print("(iff ", end="", file=f_obj)
print("(and (< (min m_%d m_%d) m_%d ) (< m_%d (max m_%d m_%d))) " %(a,b,c,c,a,b), end="", file=f_obj)
print("(and (< (min m_%d m_%d) m_%d ) (< m_%d (max m_%d m_%d)))" %(a,b,d,d,a,b), end="", file=f_obj)
print(")", file=f_obj)
# 地図折り問題からCSPファイルを作成
def map_to_csp(news_list):
M = len(news_list) + 1
N = len(news_list[0]) + 1
cell_num = M * N
numbers = range( 1, cell_num+1 )
# 変数の宣言
f_obj = open("map_folding.csp", 'w')# 書き込みモードで初期化
print("; %d × %d 地図折り問題 " %(M,N), news_list, file=f_obj)
f_obj.close()
f_obj = open("map_folding.csp", 'a') # 追記モードでオープン
print("; 変数の宣言", file=f_obj)
for i in numbers:
print("(int m_",i," 1 ", cell_num, ")", sep="", file=f_obj)
# 制約条件 1 セル番号と層が1対1で対応する
# (alldifferent m_1 m_2 m_3 m_4 m_5 m_6 )
print("; 制約条件 1", file=f_obj)
print("(alldifferent ", sep="", end="", file=f_obj)
for i in numbers:
print("m_",i," ", sep="", end="", file=f_obj)
print(")", file=f_obj)
# 制約条件 2 newsによる重なり順
stack_list = stacking_order(news_list) # 重なり順の生成
stacking_order_cnf(stack_list, f_obj) # CNFの作成
# 制約条件 3 A領域での交差
# A領域に格納されている折り線リストの作成
domainA_list = []
if M % 2 == 0: # Mが偶数のとき
i = 1
while i < cell_num:
domainA_list.append([i,i+1])
i = i + 2
else: # Mが奇数のとき
i = 1
while i < cell_num:
if i % M != 0: # セル番号iが端でないなら
domainA_list.append([i,i+1])
i = i + 2
else: # セル番号iが端
i = i + 1
# "map_folding.csp"を追記モードでオープンしている
intersction(domainA_list, 3, f_obj) # CNFの作成
# 制約条件 4 B領域での交差
# B領域に格納されている折り線リストの作成
domainB_list = []
i = 1
while i <= cell_num-M:
domainB_list.append([i,i+M])
if i % M != 0: # セル番号iが端でないなら
i = i + 1
else: # セル番号iが端
i = i + M + 1
# "map_folding.csp"を追記モードでオープンしている
intersction(domainB_list, 4, f_obj) # CNFの作成
# 制約条件 5 C領域での交差
# C領域に格納されている折り線リストの作成
domainC_list = []
if M % 2 == 0: # Mが偶数のとき
i = 2
while i < cell_num:
if i % M != 0: # セル番号iが端でないなら
domainC_list.append([i,i+1])
i = i + 2
else: # セル番号iが端
i = i + 2
else: # Mが奇数のとき
i = 2
while i < cell_num:
domainC_list.append([i,i+1])
if (i+1) % M != 0: # セル番号i+1が端でないなら
i = i + 2
else: # セル番号i+1が端
i = i + 3
# "map_folding.csp"を追記モードでオープンしている
intersction(domainC_list, 5, f_obj) # CNFの作成
# 制約条件 6 D領域での交差
# D領域に格納されている折り線リストの作成
domainD_list = []
i = M + 1
while i <= cell_num-M:
domainD_list.append([i,i+M])
if i % M != 0: # セル番号iが端でないなら
i = i + 1
else: # セル番号iが端
i = i + M + 1
# "map_folding.csp"を追記モードでオープンしている
intersction(domainD_list, 6, f_obj) # CNFの作成
# # テスト制約条件 (コメントアウト)
# ; テスト制約
# (not (and (= m_1 6) (= m_2 1) (= m_3 3) (= m_4 2) (= m_5 4) (= m_6 5)))
# 禁止する解をprohibit_whereに追加
# "map_folding.csp"を追記モードでオープンしている
print("; テスト制約", file=f_obj)
prohibit_where = []
prohibit_where.append([6, 1, 3, 2, 4, 5])
for i in range(len(prohibit_where)):
# print("(not ", end="", file=f_obj)
# print("(and", end="", file=f_obj)
for j in range(len(prohibit_where[i])):
pass
# print(" (= m_", j+1," ", prohibit_where[i][j],")", sep="", end="", file=f_obj)
# print(")", end="", file=f_obj)
# print(")", file=f_obj)
print("; END", file=f_obj)
f_obj.close()
print("%d × %d 地図折り問題 " %(M,N), news_list)
# バイトコードを文字列に変換
def conv_hbase_str(bytecode):
return eval("{}".format(bytecode)).decode()
# Sugarの実行
# 引数 : news_list
# 戻り値 : [error], [unfoldable], foldable 解のリスト
def fold_check(news_list):
map_to_csp(news_list) # cspファイルの作成
# コマンド入力
try:
byteOut = subprocess.check_output('sugar map_folding.csp', shell=True)
output = conv_hbase_str(byteOut)
except:
print ("Error.")
return ['Error']
if output.split()[1] == 'SATISFIABLE':
print(output)
return output.split()[4::3]
else:
print(output)
return ['unfoldable']
def random_news_list(M, N):
news_element = ["n", "e", "w", "s"]
es_element = ["e","s"]
nw_element = ["n","w"]
news_list = []
# 1行目のnewsをランダムに決定
news_list.append(random.choices(news_element, k=N-1))
# 2行目以降のnewsを決定
for i in range(1, M-1):
# add_listを"0"で初期化
add_list = ["0" for k in range(N-1)]
# 1列目の値をnews_elementから, ランダムに決定
add_list[0] = random.choices(news_element, k=1)[0]
# 2列目以降の値を決定
for j in range(1,N-1):
# flagの計算
# True: same-> "e","s"
# False: different-> "n","w"
if news_list[i-1][j-1] == "n" or news_list[i-1][j-1] == "w":
flag = True
else:
flag = False
if add_list[j-1] == "n" or add_list[j-1] == "e":
flag = not(flag)
if news_list[i-1][j] == "w" or news_list[i-1][j] == "s":
flag = not(flag)
# flagからnewsの決定
if flag:
add_list[j] = random.choice(es_element)[0]
else:
add_list[j] = random.choices(nw_element)[0]
#news_listに追加
news_list.append(add_list[:])
return(news_list[:])
def main():
M = 2
N = 5
news_list = random_news_list(M, N)
# news_list = [['n', 'e', 'w', 'n']]
fold_check(news_list)
if __name__ == '__main__':
main()
| [
"noreply@github.com"
] | YutoToguchi.noreply@github.com |
9fd32d09e3dee1b1d467de5785167d31fbc3ffa7 | edcd74f8f65119bdbe737360c2ca33b4a6da160a | /python/problem-tree/longest_univalue_path.py | 1278c1e329a7ad0dd71f2ab4433e53e2e009049e | [] | no_license | hyunjun/practice | 72e83de6a1d5e04ddcd16526f16110ea2dd00373 | 5376dd48b1cefb4faba9d2ef6a8a497b6b1d6c67 | refs/heads/master | 2023-08-31T07:00:37.320351 | 2023-08-17T07:29:24 | 2023-08-17T07:29:24 | 2,704,126 | 3 | 2 | null | 2022-12-14T20:25:07 | 2011-11-03T18:28:44 | Python | UTF-8 | Python | false | false | 8,459 | py | # https://leetcode.com/problems/longest-univalue-path
# https://leetcode.com/problems/longest-univalue-path/solution
from TreeNode import TreeNode
class Solution:
# Wrong Answer
def longestUnivaluePath0(self, root):
if root is None:
return 0
cur, stack, res = root, [], []
while cur or stack:
if cur:
stack.append(cur)
cur = cur.left
else:
cur = stack.pop()
res.append(cur.val)
cur = cur.right
print(res)
s, e, maxLen = 0, 0, 0
for i, r in enumerate(res):
if 0 == i:
continue
if res[i - 1] != r:
e = i
print('{}[{}]~{}[{}]'.format(res[s], s, res[e], e))
maxLen = max(maxLen, e - 1 - s)
s = i
maxLen = max(maxLen, len(res) - 1 - s)
return maxLen
# Wrong Answer
def longestUnivaluePath1(self, root):
if root is None:
return 0
queue, maxVal = [(root, [])], 0
while queue:
cur, prevVals = queue.pop(0)
prevVals.append(cur.val)
if cur.left is None and cur.right is None:
print(prevVals)
cnt = 0
for i, val in enumerate(prevVals):
if 0 == i:
continue
if prevVals[i - 1] == val:
cnt += 1
maxVal = max(maxVal, cnt)
else:
cnt = 0
else:
if cur.left:
queue.append((cur.left, prevVals[:]))
if cur.right:
queue.append((cur.right, prevVals[:]))
return maxVal
# Wrong Answer
def longestUnivaluePath2(self, root):
if root is None:
return 0
def getCount(node):
if node is None:
return 0
lCount, rCount = 0, 0
if node.left:
if node.left.val == node.val:
lCount = 1 + getCount(node.left)
else:
lCount = getCount(node.left)
if node.right:
if node.right.val == node.val:
rCount = 1 + getCount(node.right)
else:
rCount = getCount(node.right)
if node.left and node.right and node.val == node.left.val == node.right.val:
return lCount + rCount
return max(lCount, rCount)
return getCount(root)
# Wrong Answer
def longestUnivaluePath3(self, root):
def getConnectedCount(node, val):
if node is None:
return 0
lCount, rCount = 0, 0
if node.left:
if node.left.val == node.val == val:
lCount = 1 + getConnectedCount(node.left, val)
else:
lCount = getConnectedCount(node.left, val)
if node.right:
if node.right.val == node.val == val:
rCount = 1 + getConnectedCount(node.right, val)
else:
rCount = getConnectedCount(node.right, val)
if node.left and node.right and val == node.val == node.left.val == node.right.val:
return lCount + rCount
return max(lCount, rCount)
if root is None:
return 0
queue, candidates = [root], set()
while queue:
cur = queue.pop(0)
if cur.left:
if cur.val == cur.left.val:
candidates.add(cur.val)
queue.append(cur.left)
if cur.right:
if cur.val == cur.right.val:
candidates.add(cur.val)
queue.append(cur.right)
print(candidates)
maxLen = 0
for cand in candidates:
maxLen = max(maxLen, getConnectedCount(root, cand))
return maxLen
# Wrong Answer
def longestUnivaluePath(self, root):
if root is None:
return 0
def combine(node):
if node is None:
return []
res = []
if node.left and node.right and node.left.val == node.right.val == node.val:
lRes = combine(node.left)
if 0 == len(lRes):
res.append(node.left.val)
else:
res.extend(lRes)
res.append(node.val)
rRes = combine(node.right)
if 0 == len(rRes):
res.append(node.right.val)
else:
res.extend(rRes)
elif node.left and node.left.val == node.val:
lRes = combine(node.left)
if 0 == len(lRes):
res.append(node.left.val)
else:
res.extend(lRes)
res.append(node.val)
elif node.right and node.right.val == node.val:
res.append(node.val)
rRes = combine(node.right)
if 0 == len(rRes):
res.append(node.right.val)
else:
res.extend(rRes)
return res
queue, maxVal = [root], 0
while queue:
cur = queue.pop(0)
maxVal = max(maxVal, len(combine(cur)) - 1)
if cur.left:
queue.append(cur.left)
if cur.right:
queue.append(cur.right)
return maxVal
# 57.52% solution
def longestUnivaluePath(self, root):
self.ans = 0
def getLength(node):
if node is None:
return 0
lLength, rLength = getLength(node.left), getLength(node.right)
lChild, rChild = 0, 0
if node.left and node.left.val == node.val:
lChild = lLength + 1
if node.right and node.right.val == node.val:
rChild = rLength + 1
self.ans = max(self.ans, lChild + rChild)
return max(lChild, rChild)
getLength(root)
return self.ans
s = Solution()
'''
5
/ \
4 5
/ \ \
1 1 5
'''
root = TreeNode(5)
root.left = TreeNode(4)
root.left.left = TreeNode(1)
root.left.right = TreeNode(1)
root.right = TreeNode(5)
root.right.right = TreeNode(5)
print(s.longestUnivaluePath(root))
'''
1
/ \
4 5
/ \ \
4 4 5
'''
root = TreeNode(1)
root.left = TreeNode(4)
root.left.left = TreeNode(4)
root.left.right = TreeNode(4)
root.right = TreeNode(5)
root.right.right = TreeNode(5)
print(s.longestUnivaluePath(root))
'''
1
/
4
/
4
/
1
'''
root = TreeNode(1)
root.left = TreeNode(4)
root.left.left = TreeNode(4)
root.left.left.left = TreeNode(1)
print(s.longestUnivaluePath(root))
'''
1
\
4
\
4
\
1
'''
root = TreeNode(1)
root.right = TreeNode(4)
root.right.right = TreeNode(4)
root.right.right.right = TreeNode(1)
print(s.longestUnivaluePath(root))
'''
1
/ \
2 2
/ \ \
2 2 2
'''
root = TreeNode(1)
root.left = TreeNode(2)
root.left.left = TreeNode(2)
root.left.right = TreeNode(2)
root.right = TreeNode(2)
root.right.left = TreeNode(2)
print(s.longestUnivaluePath(root))
'''
1
/ \
2 2
/ \
2 2
'''
root = TreeNode(1)
root.left = TreeNode(2)
root.left.left = TreeNode(2)
root.left.right = TreeNode(2)
root.right = TreeNode(2)
print(s.longestUnivaluePath(root))
'''
1
/ \
2 3
'''
root = TreeNode(1)
root.left = TreeNode(2)
root.right = TreeNode(3)
print(s.longestUnivaluePath(root))
'''
4
/ \
-7 -3
/ \
-9 -3
/
-4
'''
root = TreeNode(4)
root.left = TreeNode(-7)
root.right = TreeNode(-3)
root.right.left = TreeNode(-9)
root.right.right = TreeNode(-3)
root.right.right.left = TreeNode(-4)
print(s.longestUnivaluePath(root))
| [
"agapelover4u@yahoo.co.kr"
] | agapelover4u@yahoo.co.kr |
41531fd748eb622788ea5b678fe01e71c93a0358 | eb14a596eb9857413c2ffff55aa3bc824c42fc6a | /test_LR.py | 361eb4710fcf08f3c46ec71be12fb705125b7572 | [] | no_license | GaelTouquet/particle_flow_calibration | d242abc970e07968f566205de3a8058e6c29905e | d74d44fb292c35be6cfc9317f318551d0143b2f0 | refs/heads/master | 2021-07-10T03:52:18.186563 | 2017-07-16T15:18:37 | 2017-07-16T15:18:37 | 96,535,146 | 0 | 0 | null | 2017-07-07T12:11:12 | 2017-07-07T12:11:12 | null | UTF-8 | Python | false | false | 2,895 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Developed by Samuel Niang
For IPNL (Nuclear Physics Institute of Lyon)
Script to understand how does LinearRegression works.
"""
import matplotlib.pyplot as plt
import pfcalibration.usualplots as usplt
from pfcalibration.tools import savefig
from pfcalibration.tools import importData,importCalib
#importation of simulated particles
filename = 'charged_hadrons_100k.energydata'
data1 = importData(filename)
filename = 'prod2_200_400k.energydata'
data2 = importData(filename)
# we merge the 2 sets of data
data1 = data1.mergeWith(data2)
# we split the data in 2 sets
data1,data2 = data1.splitInTwo()
#data 1 -> training data
#data 2 -> data to predict
# parameters of the calibration
lim_min = 20
lim_max=80
lim=150
# file to save the pictures
directory = "pictures/testLinearRegression/"
try:
# We import the calibration
filename = "calibrations/LinearRegression_162Kpart_lim_150_lim_max_80_lim_min_20.calibration"
LinearRegression = importCalib(filename)
except FileNotFoundError:
# We create the calibration
LinearRegression = data1.LinearRegression(lim_min = 20, lim_max=80, lim=150)
# We save the calibration
LinearRegression.saveCalib()
classname = LinearRegression.classname
#plot 3D Training points
fig = plt.figure(1,figsize=(6, 4))
usplt.plot3D_training(data1)
plt.show()
savefig(fig,directory,classname+"_plot3D_training.png")
plt.close()
#plot 3D surface calibration
fig = plt.figure(1,figsize=(6, 4))
usplt.plot3D_surf(LinearRegression)
plt.show()
savefig(fig,directory,classname+"_plot3D_surf.png")
savefig(fig,directory,classname+"_plot3D_surf.eps")
plt.close()
#courbe de calibration pour ecal = 0
fig = plt.figure(figsize=(12,4))
usplt.plotCalibrationCurve(LinearRegression)
plt.show()
savefig(fig,directory,classname+"_calibration.png")
plt.close()
#ecalib/true in function of etrue
fig = plt.figure(figsize=(12,4))
usplt.plot_ecalib_over_etrue_functionof_etrue(LinearRegression,data2)
plt.show()
savefig(fig,directory,classname+"_ecalib_over_etrue.png")
plt.close()
#histogram of ecalib and etrue
fig = plt.figure(figsize=(12,5))
usplt.hist_ecalib(LinearRegression,data2)
plt.show()
savefig(fig,directory,classname+"_histograms_ecalib_etrue.png")
savefig(fig,directory,classname+"_histograms_ecalib_etrue.eps")
plt.close()
#ecalib/etrue in function of ecal,hcal
fig = plt.figure(figsize=(12,4))
usplt.plot_ecalib_over_etrue_functionof_ecal_hcal(LinearRegression,data2)
plt.show()
savefig(fig,directory,classname+"_ecalib_over_etrue_functionof_ecal_hcal.png")
plt.close()
#ecalib/etrue gaussian fit curve
fig = plt.figure(figsize=(12,10))
usplt.plot_gaussianfitcurve_ecalib_over_etrue_functionof_ecal_hcal(LinearRegression,data2)
plt.show()
savefig(fig,directory,classname+"_ecalib_over_etrue_curve.png")
savefig(fig,directory,classname+"_ecalib_over_etrue_curve.eps")
plt.close()
| [
"samuel.niang@gmail.com"
] | samuel.niang@gmail.com |
6f366e86084606a25e88cd2c1e7f23d2080a6bfe | 29f1fd66c2a56b6b46641b9139e180364d1c4356 | /construct_dt_and_classify_one_sample_case1.py | b2813dc9137f332ee66336b0fe0e56b317a976aa | [] | no_license | ashhale/Answer-Me-This | 1a6f00cab94a91663d10fbc71da4aac330fa28cf | 1e62f96a02d9ea7f8f3b7a60b130e7c625e57f21 | refs/heads/master | 2020-06-11T14:03:17.063103 | 2016-12-08T15:03:01 | 2016-12-08T15:03:01 | 75,649,550 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,464 | py | #!/usr/bin/env python
## construct_dt_and_classify_one_sample_case1.py
## This script shows DecisionTree module for the case of
## purely symbolic data. By the way, this training data
## was produced by the script
## generate_training_data_symbolic.py on the basis of the
## parameters declared in the file `param_symbolic.txt'.
import LearningDecisionTree
import sys
training_datafile = "training_symbolic.csv"
#training_datafile = "training_symbolic2.csv"
dt = LearningDecisionTree.LearningDecisionTree( training_datafile = training_datafile,
csv_class_column_index = 1,
csv_columns_for_features = [2,3,4,5],
entropy_threshold = 0.01,
max_depth_desired = 5,
csv_cleanup_needed = 1,
)
dt.get_training_data()
dt.calculate_first_order_probabilities()
dt.calculate_class_priors()
# UNCOMMENT THE FOLLOWING LINE if you would like to see the training
# data that was read from the disk file:
#dt.show_training_data()
root_node = dt.construct_decision_tree_classifier()
# UNCOMMENT THE FOLLOWING TWO LINES if you would like to see the decision
# tree displayed in your terminal window:
print("\n\nThe Decision Tree:\n")
root_node.display_decision_tree(" ")
test_sample1 = [ 'exercising=never',
'smoking=heavy',
'fatIntake=heavy',
'videoAddiction=heavy']
test_sample2 = ['exercising=none',
'smoking=heavy',
'fatIntake=heavy',
'videoAddiction=none']
# The rest of the script is for displaying the classification results:
classification = dt.classify(root_node, test_sample1)
solution_path = classification['solution_path']
del classification['solution_path']
which_classes = list( classification.keys() )
which_classes = sorted(which_classes, key=lambda x: classification[x], reverse=True)
print("\nClassification:\n")
print(" " + str.ljust("class name", 30) + "probability")
print(" ---------- -----------")
for which_class in which_classes:
if which_class is not 'solution_path':
print(" " + str.ljust(which_class, 30) + str(classification[which_class]))
print("\nSolution path in the decision tree: " + str(solution_path))
print("\nNumber of nodes created: " + str(root_node.how_many_nodes()))
| [
"noreply@github.com"
] | ashhale.noreply@github.com |
77acc0d3cf53b10d4d349208c468bc9079016a6e | 045cb1a5638c3575296f83471758dc09a8065725 | /addons/sale_coupon/wizard/__init__.py | 635af11d6b33d4b83895e11bf4abe859856175f1 | [] | no_license | marionumza/saas | 7236842b0db98d1a0d0c3c88df32d268509629cb | 148dd95d991a348ebbaff9396759a7dd1fe6e101 | refs/heads/main | 2023-03-27T14:08:57.121601 | 2021-03-20T07:59:08 | 2021-03-20T07:59:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 175 | py | # -*- coding: utf-8 -*-
# Part of Harpiya. See LICENSE file for full copyright and licensing details.
from . import sale_coupon_apply_code
from . import sale_coupon_generate
| [
"yasir@harpiya.com"
] | yasir@harpiya.com |
290d56dcec1a58dca055cb026dc4d25f4b012abe | 010aa27c9b532a98acba678ac61cd603b3d9fd2e | /cltk/lemmatize/latin/backoff.py | 46497c4758d48485457d7a8704249d1724d404ec | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | ashwanidv100/cltk | 2e83e394139173a7024e6bb5d21e826986f9ab13 | ddfdb530aa322ac73f7ad57f725c9a80ed1b56ff | refs/heads/master | 2021-04-09T15:25:31.172534 | 2018-03-21T17:16:04 | 2018-03-21T17:16:04 | 125,635,475 | 1 | 0 | MIT | 2018-03-17T14:26:14 | 2018-03-17T14:08:35 | Python | UTF-8 | Python | false | false | 23,168 | py | """Module for lemmatizing Latin—includes several classes for different
lemmatizing approaches--based on training data, regex pattern matching,
etc. These can be chained together using the backoff parameter. Also,
includes a pre-built chain that uses models in latin_models_cltk repo
called BackoffLatinLemmatizer.
The logic behind the backoff lemmatizer is based on backoff POS-tagging in
NLTK and repurposes several of the tagging classes for lemmatization
tasks. See here for more info on sequential backoff tagging in NLTK:
http://www.nltk.org/_modules/nltk/tag/sequential.html
"""
__author__ = ['Patrick J. Burns <patrick@diyclassics.org>']
__license__ = 'MIT License. See LICENSE.'
import os
import re
from nltk.probability import ConditionalFreqDist
from nltk.tag.api import TaggerI
from nltk.tag.sequential import SequentialBackoffTagger, ContextTagger, DefaultTagger, NgramTagger, UnigramTagger, RegexpTagger
from cltk.utils.file_operations import open_pickle
from cltk.lemmatize.latin.latin import latin_sub_patterns, latin_verb_patterns, latin_pps, rn_patterns
# Unused for now
#def backoff_lemmatizer(train_sents, lemmatizer_classes, backoff=None):
# """From Python Text Processing with NLTK Cookbook."""
# for cls in lemmatizer_classes:
# backoff = cls(train_sents, backoff=backoff)
# return backoff
class LemmatizerI(TaggerI):
"""Inherit base tagging class for Latin lemmatizer."""
# def __init__(self):
# TaggerI.__init__(self)
pass
class SequentialBackoffLemmatizer(LemmatizerI, SequentialBackoffTagger):
""""""
def __init__(self, backoff=None):
"""Setup for SequentialBackoffLemmatizer()
:param backoff: Next lemmatizer in backoff chain.
"""
LemmatizerI.__init__(self)
SequentialBackoffTagger.__init__(self, backoff)
def lemmatize(self, tokens):
"""Transform tag method into custom method for lemmatizing tasks. Can
be overwritten by specific instances where list of tokens should
be handled in a different manner. (Cf. IdentityLemmatizer)
:param tokens: List of tokens to be lemmatized
:return: Tuple of the form (TOKEN, LEMMA)
"""
return SequentialBackoffLemmatizer.tag(self, tokens)
def choose_tag(self, tokens, index, history):
"""Override choose_tag with lemmatizer-specific method for various
methods that expect a method with this name.
:param tokens: List of tokens to be lemmatized
:param index: Int with current token
:param history: List with tokens that have already been lemmatized
:return: String with lemma, if found; otherwise NONE
"""
return self.choose_lemma(tokens, index, history)
class DefaultLemmatizer(SequentialBackoffLemmatizer, DefaultTagger):
""""""
def __init__(self, lemma=None):
"""Setup for DefaultLemmatizer().
:param lemma: String with default lemma to be assigned for all tokens;
set to None if no parameter is assigned.
"""
self._lemma = lemma
SequentialBackoffLemmatizer.__init__(self, None)
DefaultTagger.__init__(self, self._lemma)
def choose_lemma(self, tokens, index, history):
return DefaultTagger.choose_tag(self, tokens, index, history)
class IdentityLemmatizer(SequentialBackoffLemmatizer):
""""""
def __init__(self, backoff=None):
"""Setup for IdentityLemmatizer()."""
SequentialBackoffLemmatizer.__init__(self, backoff)
def lemmatize(self, tokens):
"""
Custom lemmatize method for working with identity. No need to
call tagger because token is return as lemma.
:param tokens: List of tokens to be lemmatized
:return: Tuple of the form (TOKEN, LEMMA)
Note: "enumerate" may be better way of handling this loop in general;
compare "range(len(tokens))" in nltk.tag.sequential.
"""
lemmas = []
for i in enumerate(tokens):
lemmas.append(i[1])
return list(zip(tokens, lemmas))
def choose_lemma(self, tokens, index, history):
"""Returns the given token as the lemma.
:param tokens: List of tokens to be lemmatized
:param index: Int with current token
:param history: List with tokens that have already been lemmatized
:return: String, spec. the token found at the current index.
"""
return tokens[index]
class TrainLemmatizer(SequentialBackoffLemmatizer):
"""Standalone version of 'model' function found in UnigramTagger; by
defining as its own class, it is clearer that this lemmatizer is
based on dictionary lookup and does not use training data."""
def __init__(self, model, backoff=None):
"""Setup for TrainLemmatizer().
:param model: Dictionary with form {TOKEN: LEMMA}
:param backoff: Next lemmatizer in backoff chain.
"""
SequentialBackoffLemmatizer.__init__(self, backoff)
self.model = model
def choose_lemma(self, tokens, index, history):
"""Returns the given token as the lemma.
:param tokens: List of tokens to be lemmatized
:param index: Int with current token
:param history: List with tokens that have already been lemmatized; NOT USED
:return: String, spec. the dictionary value found with token as key.
"""
keys = self.model.keys()
if tokens[index] in keys:
return self.model[tokens[index]]
class ContextLemmatizer(SequentialBackoffLemmatizer, ContextTagger):
""""""
def __init__(self, context_to_lemmatize, backoff=None):
"""Setup for ContextLemmatizer().
:param context_to_lemmatize: List of tuples of the form (TOKEN, LEMMA);
this should be 'gold standard' data that can be used to train on a
given context, e.g. unigrams, bigrams, etc.
:param backoff: Next lemmatizer in backoff chain.
"""
SequentialBackoffLemmatizer.__init__(self, backoff)
self._context_to_lemmatize = (context_to_lemmatize if context_to_lemmatize else {})
ContextTagger.__init__(self, self._context_to_lemmatize, backoff)
def choose_lemma(self, tokens, index, history):
return ContextTagger.choose_tag(self, tokens, index, history)
class NgramLemmatizer(ContextLemmatizer, NgramTagger):
""""""
def __init__(self, n, train=None, model=None, backoff=None, cutoff=0):
"""Setup for NgramLemmatizer()
:param n: Int with length of 'n'-gram
:param train: List of tuples of the form (TOKEN, LEMMA)
:param model: Dict; DEPRECATED, use TrainLemmatizer
:param backoff: Next lemmatizer in backoff chain.
:param cutoff: Int with minimum number of matches to choose lemma
"""
self._n = n
self._check_params(train, model)
ContextLemmatizer.__init__(self, model, backoff)
NgramTagger.__init__(self, self._n, train, model, backoff, cutoff)
if train:
# Refactor to remove model? Always train?
self._train(train, cutoff)
def context(self, tokens, index, history):
""""""
return NgramTagger.context(self, tokens, index, history)
class UnigramLemmatizer(NgramLemmatizer, UnigramTagger):
"""Setup for UnigramLemmatizer()"""
def __init__(self, train=None, model=None, backoff=None, cutoff=0):
""""""
NgramLemmatizer.__init__(self, 1, train, model, backoff, cutoff) # Note 1 for unigram
UnigramTagger.__init__(self, train, model, backoff, cutoff)
class RegexpLemmatizer(SequentialBackoffLemmatizer, RegexpTagger):
""""""
def __init__(self, regexps=None, backoff=None):
"""Setup for RegexpLemmatizer()
:param regexps: List of tuples of form (PATTERN, REPLACEMENT)
:param backoff: Next lemmatizer in backoff chain.
"""
SequentialBackoffLemmatizer.__init__(self, backoff)
RegexpTagger.__init__(self, regexps, backoff)
self._regexs = regexps
def choose_lemma(self, tokens, index, history):
"""Use regular expressions for rules-based lemmatizing based on word endings;
tokens are matched for patterns with the base kept as a group; an word ending
replacement is added to the (base) group.
:param tokens: List of tokens to be lemmatized
:param index: Int with current token
:param history: List with tokens that have already been lemmatized
:return: Str with concatenated lemma
"""
for pattern, replace in self._regexs:
if re.search(pattern, tokens[index]):
return re.sub(pattern, replace, tokens[index])
break # pragma: no cover
class PPLemmatizer(RegexpLemmatizer):
"""Customization of the RegexpLemmatizer for Latin. The RegexpLemmatizer is
used as a stemmer; the stem is then applied to a dictionary lookup of
principal parts."""
def __init__(self, regexps=None, pps=None, backoff=None):
"""Setup PPLemmatizer().
:param regexps: List of tuples of form (PATTERN, INT) where INT is
the principal part number needed to lookup the correct stem.
:param backoff: Next lemmatizer in backoff chain.
"""
RegexpLemmatizer.__init__(self, regexps, backoff)
# Note different compile to make use of principal parts dictionary structure; also, note
# that the PP dictionary has been set up so that principal parts match their traditional
# numbering, i.e. present stem is indexed as 1. The 0 index is used for the lemma.
self._regexs = latin_verb_patterns
self.pps = latin_pps
def choose_lemma(self, tokens, index, history):
"""Use regular expressions for rules-based lemmatizing based on
principal parts stems. Tokens are matched for patterns with
the ending kept as a group; the stem is looked up in a dictionary
by PP number (see above) and ending is discarded.
:param tokens: List of tokens to be lemmatized
:param index: Int with current token
:param history: List with tokens that have already been lemmatized
:return: Str with index[0] from the dictionary value, see above about '0 index'
"""
for regexp in self._regexs:
m = re.match(regexp[0], tokens[index])
if m:
root = m.group(1)
match = [lemma for (lemma, pp) in self.pps.items() if root == pp[regexp[1]]]
if not match:
pass
else:
return match[0] # Lemma is indexed at zero in PP dictionary
class RomanNumeralLemmatizer(RegexpLemmatizer):
""""""
def __init__(self, regexps=rn_patterns, default=None, backoff=None):
"""RomanNumeralLemmatizer"""
RegexpLemmatizer.__init__(self, regexps, backoff)
self._regexs = [(re.compile(regexp), pattern,) for regexp, pattern in regexps]
self.default = default
def choose_lemma(self, tokens, index, history):
"""Test case for customized rules-based improvements to lemmatizer using regex; differs
from base RegexpLemmatizer in that it returns the given pattern without stemming,
concatenating, etc.
:param tokens: List of tokens to be lemmatized
:param index: Int with current token
:param history: List with tokens that have already been lemmatized
:return: Str with replacement from pattern
"""
for pattern, replace in self._regexs:
if re.search(pattern, tokens[index]):
if self.default:
return self.default
else:
return replace
break # pragma: no cover
class ContextPOSLemmatizer(ContextLemmatizer):
"""Lemmatizer that combines context with POS-tagging based on
training data. Subclasses define context.
The code for _train closely follows ContextTagger in
https://github.com/nltk/nltk/blob/develop/nltk/tag/sequential.py
This lemmatizer is included here as proof of concept that
lemma disambiguation can be made based on the pattern:
LEMMA & POS of following word.
Should be rewritten to give more flexibility to the kinds
of context that a free word order language demand. I.e. to
study patterns such as:
POS of preceding word & LEMMA
LEMMA & POS of following two words
LEMMA & POS of n-skipgrams
etc.
"""
def __init__(self, context_to_lemmatize, include=None, backoff=None):
"""Setup ContextPOSLemmatizer().
:param context_to_lemmatize: List of tuples of the form (TOKEN, LEMMA);
this should be 'gold standard' data that can be used to train on a
given context, e.g. unigrams, bigrams, etc.
:param include: List of tokens to include, all other tokens return None
from choose_lemma--runs VERY SLOW if no list is given as a parameter
since every token gets POS-tagged. Only tested so far on 'cum'
--also, test data only distinguishes 'cum1'/'cum2'. Further
testing should be done with ambiguous lemmas using Morpheus numbers.
:param backoff: Next lemmatizer in backoff chain.
:param include: List of tokens to consider
"""
# SequentialBackoffLemmatizer.__init__(self, backoff)
ContextLemmatizer.__init__(self, context_to_lemmatize, backoff)
self.include = include
self._context_to_tag = (context_to_lemmatize if context_to_lemmatize else {})
def _get_pos_tags(self, tokens):
"""Iterate through list of tokens and use POS tagger to build
a corresponding list of tags.
:param tokens: List of tokens to be POS-tagged
:return: List with POS-tag for each token
"""
# Import (and define tagger) with other imports?
from cltk.tag.pos import POSTag
tagger = POSTag('latin')
tokens = " ".join(tokens)
tags = tagger.tag_ngram_123_backoff(tokens)
tags = [tag[1][0].lower() if tag[1] else tag[1] for tag in tags]
return tags
def choose_lemma(self, tokens, index, history):
"""Choose lemma based on POS-tag defined by context.
:param tokens: List of tokens to be lemmatized
:param index: Int with current token
:param history: List with POS-tags of tokens that have already
been lemmatized.
:return: String with suggested lemma
"""
if self.include:
if tokens[index] not in self.include:
return None
history = self._get_pos_tags(tokens)
context = self.context(tokens, index, history)
suggested_lemma = self._context_to_tag.get(context)
return suggested_lemma
def _train(self, lemma_pos_corpus, cutoff=0):
"""Override method for _train from ContextTagger in
nltk.tag.sequential. Original _train method expects
tagged corpus of form (TOKEN, LEMMA); this expects in
addition POS-tagging information.
:param lemma_pos_corpus: List of tuples of form (TOKEN, LEMMA, POSTAG)
:param cutoff: Int with minimum number of matches to choose lemma
"""
token_count = hit_count = 0
# A context is considered 'useful' if it's not already lemmatized
# perfectly by the backoff lemmatizer.
useful_contexts = set()
# Count how many times each tag occurs in each context.
fd = ConditionalFreqDist()
for sentence in lemma_pos_corpus:
tokens, lemmas, poss = zip(*sentence)
for index, (token, lemma, pos) in enumerate(sentence):
# Record the event.
token_count += 1
context = self.context(tokens, index, poss)
if context is None: continue
fd[context][lemma] += 1
# If the backoff got it wrong, this context is useful:
if (self.backoff is None or lemma != self.backoff.tag_one(tokens, index, lemmas[:index])): # pylint: disable=line-too-long
useful_contexts.add(context)
# Build the context_to_lemmatize table -- for each context, figure
# out what the most likely lemma is. Only include contexts that
# we've seen at least `cutoff` times.
for context in useful_contexts:
best_lemma = fd[context].max()
hits = fd[context][best_lemma]
if hits > cutoff:
self._context_to_tag[context] = best_lemma
hit_count += hits
class NgramPOSLemmatizer(ContextPOSLemmatizer):
""""""
def __init__(self, n, train=None, model=None, include=None,
backoff=None, cutoff=0):
"""Setup for NgramPOSLemmatizer
:param n: Int with length of 'n'-gram
:param train: List of tuples of the form (TOKEN, LEMMA, POS)
:param model: Dict; DEPRECATED
:param include: List of tokens to consider
:param backoff: Next lemmatizer in backoff chain.
:param cutoff: Int with minimum number of matches to choose lemma
"""
self._n = n
self._check_params(train, model)
ContextPOSLemmatizer.__init__(self, model, include, backoff)
if train:
self._train(train, cutoff)
def context(self, tokens, index, history):
"""Redefines context with look-ahead of length n (not look behind
as in original method).
:param tokens: List of tokens to be lemmatized
:param index: Int with current token
:param history: List with tokens that have already been
tagged/lemmatized
:return: Tuple of the form (TOKEN, (CONTEXT)); CONTEXT will
depend on ngram value, e.g. for bigram ('cum', ('n',)) but
for trigram ('cum', ('n', 'n', ))
"""
lemma_context = tuple(history[index + 1: index + self._n])
return tokens[index], lemma_context
class BigramPOSLemmatizer(NgramPOSLemmatizer):
""""""
def __init__(self, train=None, model=None, include=None,
backoff=None, cutoff=0):
"""Setup for BigramPOSLemmatizer()"""
NgramPOSLemmatizer.__init__(self, 2, train, model,
include, backoff, cutoff)
#class TrigramPOSLemmatizer(NgramPOSLemmatizer):
# """"""
# def __init__(self, train=None, model=None, include=None,
# backoff=None, cutoff=0):
# """Setup for TrigramPOSLemmatizer()"""
# NgramPOSLemmatizer.__init__(self, 3, train, model, include,
# backoff, cutoff)
class BackoffLatinLemmatizer(object):
"""Suggested backoff chain; includes at least on of each
type of major sequential backoff class from backoff.py
### Putting it all together
### BETA Version of the Backoff Lemmatizer AKA BackoffLatinLemmatizer
### For comparison, there is also a TrainLemmatizer that replicates the
### original Latin lemmatizer from cltk.stem
"""
def __init__(self, train, seed=3):
self.train = train
self.seed = seed
rel_path = os.path.join('~/cltk_data/latin/model/latin_models_cltk/lemmata/backoff')
path = os.path.expanduser(rel_path)
# Check for presence of LATIN_OLD_MODEL
file = 'latin_lemmata_cltk.pickle'
old_model_path = os.path.join(path, file)
if os.path.isfile(old_model_path):
self.LATIN_OLD_MODEL = open_pickle(old_model_path)
else:
self.LATIN_OLD_MODEL = {}
print('The file %s is not available in cltk_data' % file)
# Check for presence of LATIN_MODEL
file = 'latin_model.pickle'
model_path = os.path.join(path, file)
if os.path.isfile(model_path):
self.LATIN_MODEL = open_pickle(model_path)
else:
self.LATIN_MODEL = {}
print('The file %s is not available in cltk_data' % file)
# Check for presence of misc_patterns
self.latin_sub_patterns = latin_sub_patterns
# Check for presence of verb_patterns
self.latin_verb_patterns = latin_verb_patterns
# Check for presence of latin_pps
self.latin_pps = latin_pps
def _randomize_data(train, seed):
import random
random.seed(seed)
random.shuffle(train)
pos_train_sents = train[:4000]
lem_train_sents = [[(item[0], item[1]) for item in sent] for sent in train]
train_sents = lem_train_sents[:4000]
test_sents = lem_train_sents[4000:5000]
return pos_train_sents, train_sents, test_sents
self.pos_train_sents, self.train_sents, self.test_sents = _randomize_data(self.train, self.seed)
def _define_lemmatizer(self):
# Suggested backoff chain--should be tested for optimal order
backoff0 = None
backoff1 = IdentityLemmatizer()
backoff2 = TrainLemmatizer(model=self.LATIN_OLD_MODEL, backoff=backoff1)
backoff3 = PPLemmatizer(regexps=self.latin_verb_patterns, pps=self.latin_pps, backoff=backoff2)
backoff4 = RegexpLemmatizer(self.latin_sub_patterns, backoff=backoff3)
backoff5 = UnigramLemmatizer(self.train_sents, backoff=backoff4)
backoff6 = TrainLemmatizer(model=self.LATIN_MODEL, backoff=backoff5)
#backoff7 = BigramPOSLemmatizer(self.pos_train_sents, include=['cum'], backoff=backoff6)
#lemmatizer = backoff7
lemmatizer = backoff6
return lemmatizer
def lemmatize(self, tokens):
lemmatizer = self._define_lemmatizer()
lemmas = lemmatizer.lemmatize(tokens)
return lemmas
def evaluate(self):
lemmatizer = self._define_lemmatizer()
return lemmatizer.evaluate(self.test_sents)
# Accuracty test available below——keep? delete?
#if __name__ == "__main__":
#
# # Set up training sentences
# rel_path = os.path.join('~/cltk_data/latin/model/latin_models_cltk/lemmata/backoff')
# path = os.path.expanduser(rel_path)
#
# # Check for presence of latin_pos_lemmatized_sents
# file = 'latin_pos_lemmatized_sents.pickle'
#
# latin_pos_lemmatized_sents_path = os.path.join(path, file)
# if os.path.isfile(latin_pos_lemmatized_sents_path):
# latin_pos_lemmatized_sents = open_pickle(latin_pos_lemmatized_sents_path)
# else:
# latin_pos_lemmatized_sents = []
# print('The file %s is not available in cltk_data' % file)
#
#
# RUN = 10
# ACCURACIES = []
#
# for I in range(RUN):
# LEMMATIZER = BackoffLatinLemmatizer(latin_pos_lemmatized_sents)
# ACC = LEMMATIZER.evaluate()
# ACCURACIES.append(ACC)
# print('{:.2%}'.format(ACC))
#
# print('\nTOTAL (Run %d) times' % RUN)
# print('{:.2%}'.format(sum(ACCURACIES) / RUN))
| [
"kyle@kyle-p-johnson.com"
] | kyle@kyle-p-johnson.com |
0b90436944b72f7d75e12de6a61fcac97b097b15 | e40c481a94e54b658c4c1cc3a11a8f8922ce6e58 | /question_1/main_pedrinho.py | 1a07cc42cd1d24966b3a436c1d1d153e24df3546 | [] | no_license | DouglasEBauler/TrabalhoBuscasEmGrafos | 8f9e84a1eff02791b6c46cccf2bfc2aa9071a169 | 8e41ef05c8f660f9b395393e0c29866c6a6fb529 | refs/heads/master | 2020-05-15T12:41:47.279557 | 2019-06-10T01:48:44 | 2019-06-10T01:48:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,208 | py | # coding: utf-8
# author: Douglas Eduardo Bauler, Jefferson do Nascimento Júnior.
from graph.classes import Graph, Vertex
def fill_graph(file_name: str) -> Graph:
f = open(file_name, "r")
if f.__sizeof__() == 0:
raise FileExistsError("Arquivo do grafo não está preenchido")
g = Graph()
for line in f:
line = line.replace("\n", "")
values = line.split(" ", 2)
try:
values[2]
except Exception:
g.destiny_vertex = str(int(values[0]) + 1) # Destiny vertex
continue
else:
# Add vertex in graph
g.add_vertex(Vertex(values[0]))
g.add_vertex(Vertex(values[1]))
# Add edge
g.add_edge(values[0], values[1], int(values[2]))
return g
def info_graph(g: Graph, file_name: str):
try:
f = open(file_name, "w")
except FileNotFoundError:
f = open(file_name, "x")
g.dijsktra("0")
f.write(str(g.vertex_list[g.destiny_vertex].distance))
f.close()
if __name__ == '__main__':
# fill graph
graph = fill_graph("../question_1/input.txt")
# save info graph
info_graph(graph, "C:/Temp/entrada.in.txt") | [
"ceco96.edu@gmail.com"
] | ceco96.edu@gmail.com |
c1371bf0e25040027bd51884f78fe60acd0c2bb5 | 8970b2ad01d9f447a5c6b582c84c4ba60ac36808 | /nterm_annot/extract_clusters.py | b173213a34531265b6dc52bd04b8d0325319dd38 | [] | no_license | pauldrinn/nterm_annot_project | d86d5fd3b430b599a415536195d47ac2655e59a1 | 94bb82b97d8e88c6ba72d15d35eff1d50ea7ae7f | refs/heads/master | 2023-05-26T16:31:11.083174 | 2023-05-21T14:09:58 | 2023-05-21T14:09:58 | 181,643,829 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 823 | py | import os
import sys
clu_all_seq = sys.argv[1]
clus_dir = os.path.dirname(clu_all_seq)+ '/separated_clusters'
if not os.path.exists(clus_dir):
os.makedirs(clus_dir)
previous = ''
with open(clu_all_seq, 'r') as f:
for line in f:
if line.startswith('>'): # /^'>/
identifier = line
if identifier == previous:
try:
w.close()
except:
pass
w = open(clus_dir + '/' + previous[1:].strip() + '.fa', 'w')
previous = identifier
else:
w.write(identifier + line)
"""
# Found out createseqfiledb does this with --min-sequences 20
# This part removes clusters with less than 20 members
filelist = os.listdir('separated_clusters')
for clufile in filelist:
num_lines = sum(1 for line in open('separated_clusters/' + clufile))
if num_lines < 42:
os.remove('separated_clusters/' + clufile)
""" | [
"tahsinemirhan58771@gmail.com"
] | tahsinemirhan58771@gmail.com |
a91a049898f84af1efc4a7b5519fe90926e59eff | 61b245e06b9c16f7d6bffba12b2f8c3ce81c42f3 | /misago/readtracker/apps.py | e977c3f70205a69ba80df472219b8c922b61d0a4 | [] | no_license | quangsen/misago_dev | cc8737e0b63bcdd9cb2a6ec4bd4b2293d12e93f5 | 4a57f14b5a9fdf4fe94f68d3334c36b5046ff20a | refs/heads/master | 2022-12-07T15:15:45.809005 | 2019-08-22T10:21:43 | 2019-08-26T00:24:00 | 196,503,519 | 0 | 0 | null | 2022-11-22T04:11:48 | 2019-07-12T03:37:40 | Python | UTF-8 | Python | false | false | 97 | py | from django.apps import AppConfig
class ReadtrackerConfig(AppConfig):
name = 'readtracker'
| [
"vkluong@VKLuongs.local"
] | vkluong@VKLuongs.local |
0b44362a2b6c549049995c5352b90512f7f1a82f | d50312b74e4cb91ff250490936db92bbebcfbc3b | /bleRSSI_mqtt/main.py | cc4db0e62f19920cde438d4f39aaf5aed95751f8 | [] | no_license | blijf-weg/subscirber_ble_RSSI_grenswaarde | 2c1151588976b47626b81107484b142b7481b2df | 017dac590c687fcfa9f3d8cc3b833d5865331ab7 | refs/heads/main | 2023-03-27T00:46:54.512349 | 2021-03-18T12:30:38 | 2021-03-18T12:30:38 | 349,085,289 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 693 | py | import time
import paho.mqtt.client as mqtt
from pysinewave import SineWave
sinewave = SineWave(pitch = 12, pitch_per_second = 10)
def process_message(client, userdata, message):
bericht = str(message.payload.decode("utf-8"))
gesplitst = bericht.split("_")
RSSI = gesplitst[0]
print(bericht)
if (float(RSSI) > -30):
sinewave.play()
time.sleep(1)
sinewave.stop()
# Create client
client = mqtt.Client(client_id="subscriber-1")
# Assign callback function
client.on_message = process_message
# Connect to broker
client.connect("192.168.43.101",1883,60)
# Subscriber to topic
client.subscribe("esp32/afstand/rssi")
# Run loop
client.loop_forever() | [
"nathan.slembrouck@student.kuleuven.be"
] | nathan.slembrouck@student.kuleuven.be |
efefb440146c23d804a17792e39d091c1a94ae26 | 06476bc4cb7fc3ce378beb357fac7d5aacb87b3b | /Prototype/env/lib/python3.8/site-packages/Xlib/xobject/icccm.py | a328925ed9e58bccae33040d004ca1fabba4d98d | [
"MIT"
] | permissive | marc-ortuno/VOPEC | 44d3a74d3e0686474dd57fcb21e845fd5fd48897 | e7ed1f13cc1868a824f4036dd08ec6bed4266c08 | refs/heads/main | 2023-06-12T19:15:18.060897 | 2021-07-01T17:15:03 | 2021-07-01T17:15:03 | 344,433,646 | 0 | 0 | MIT | 2021-06-14T19:15:47 | 2021-03-04T10:22:05 | Python | UTF-8 | Python | false | false | 3,441 | py | # Xlib.xobject.icccm -- ICCCM structures
#
# Copyright (C) 2000 Peter Liljenberg <petli@ctrl-c.liu.se>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation; either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc.,
# 59 Temple Place,
# Suite 330,
# Boston, MA 02111-1307 USA
from Xlib import X, Xutil
from Xlib.protocol import rq
Aspect = rq.Struct( rq.Int32('num'), rq.Int32('denum') )
WMNormalHints = rq.Struct( rq.Card32('flags'),
rq.Pad(16),
rq.Int32('min_width', default = 0),
rq.Int32('min_height', default = 0),
rq.Int32('max_width', default = 0),
rq.Int32('max_height', default = 0),
rq.Int32('width_inc', default = 0),
rq.Int32('height_inc', default = 0),
rq.Object('min_aspect', Aspect, default = (0, 0)),
rq.Object('max_aspect', Aspect, default = (0, 0)),
rq.Int32('base_width', default = 0),
rq.Int32('base_height', default = 0),
rq.Int32('win_gravity', default = 0),
)
WMHints = rq.Struct( rq.Card32('flags'),
rq.Card32('input', default = 0),
rq.Set('initial_state', 4,
# withdrawn is totally bogus according to
# ICCCM, but some window managers seem to
# use this value to identify dockapps.
# Oh well.
( Xutil.WithdrawnState,
Xutil.NormalState,
Xutil.IconicState ),
default = Xutil.NormalState),
rq.Pixmap('icon_pixmap', default = 0),
rq.Window('icon_window', default = 0),
rq.Int32('icon_x', default = 0),
rq.Int32('icon_y', default = 0),
rq.Pixmap('icon_mask', default = 0),
rq.Window('window_group', default = 0),
)
WMState = rq.Struct( rq.Set('state', 4,
( Xutil.WithdrawnState,
Xutil.NormalState,
Xutil.IconicState )),
rq.Window('icon', ( X.NONE, )),
)
WMIconSize = rq.Struct( rq.Card32('min_width'),
rq.Card32('min_height'),
rq.Card32('max_width'),
rq.Card32('max_height'),
rq.Card32('width_inc'),
rq.Card32('height_inc'),
)
| [
"you@example.com"
] | you@example.com |
07fd377900fcd41df288495bb834238f50298d2d | d278b8fa96c30d33fe2ef3a829bb72b9ee74ce20 | /qa/rpc-tests/test_framework/util.py | 30c4b5f9eef6f92ab29b3b0d4423a1ac84496cc1 | [
"MIT"
] | permissive | trionecoin/trione | a6811153759c696b7bc233301f6233126808fbf2 | a82e76a4e95fec07d1d9c615a3fe8f93d17da906 | refs/heads/master | 2020-03-18T05:36:18.568988 | 2018-05-23T14:19:03 | 2018-05-23T14:19:03 | 134,351,375 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,616 | py | # Copyright (c) 2014-2015 The Bitcoin Core developers
# Copyright (c) 2014-2017 The Trione Core developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Helpful routines for regression testing
#
# Add python-bitcoinrpc to module search path:
import os
import sys
from binascii import hexlify, unhexlify
from base64 import b64encode
from decimal import Decimal, ROUND_DOWN
import json
import random
import shutil
import subprocess
import time
import re
import errno
from . import coverage
from .authproxy import AuthServiceProxy, JSONRPCException
COVERAGE_DIR = None
#Set Mocktime default to OFF.
#MOCKTIME is only needed for scripts that use the
#cached version of the blockchain. If the cached
#version of the blockchain is used without MOCKTIME
#then the mempools will not sync due to IBD.
MOCKTIME = 0
def enable_mocktime():
#For backwared compatibility of the python scripts
#with previous versions of the cache, set MOCKTIME
#to regtest genesis time + (201 * 156)
global MOCKTIME
MOCKTIME = 1417713337 + (201 * 156)
def disable_mocktime():
global MOCKTIME
MOCKTIME = 0
def get_mocktime():
return MOCKTIME
def enable_coverage(dirname):
"""Maintain a log of which RPC calls are made during testing."""
global COVERAGE_DIR
COVERAGE_DIR = dirname
def get_rpc_proxy(url, node_number, timeout=None):
"""
Args:
url (str): URL of the RPC server to call
node_number (int): the node number (or id) that this calls to
Kwargs:
timeout (int): HTTP timeout in seconds
Returns:
AuthServiceProxy. convenience object for making RPC calls.
"""
proxy_kwargs = {}
if timeout is not None:
proxy_kwargs['timeout'] = timeout
proxy = AuthServiceProxy(url, **proxy_kwargs)
proxy.url = url # store URL on proxy for info
coverage_logfile = coverage.get_filename(
COVERAGE_DIR, node_number) if COVERAGE_DIR else None
return coverage.AuthServiceProxyWrapper(proxy, coverage_logfile)
def get_mnsync_status(node):
result = node.mnsync("status")
return result['IsSynced']
def wait_to_sync(node):
synced = False
while not synced:
synced = get_mnsync_status(node)
time.sleep(0.5)
def p2p_port(n):
return 11000 + n + os.getpid()%999
def rpc_port(n):
return 12000 + n + os.getpid()%999
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def count_bytes(hex_string):
return len(bytearray.fromhex(hex_string))
def bytes_to_hex_str(byte_str):
return hexlify(byte_str).decode('ascii')
def hex_str_to_bytes(hex_str):
return unhexlify(hex_str.encode('ascii'))
def str_to_b64str(string):
return b64encode(string.encode('utf-8')).decode('ascii')
def sync_blocks(rpc_connections, wait=1):
"""
Wait until everybody has the same block count
"""
while True:
counts = [ x.getblockcount() for x in rpc_connections ]
if counts == [ counts[0] ]*len(counts):
break
time.sleep(wait)
def sync_mempools(rpc_connections, wait=1):
"""
Wait until everybody has the same transactions in their memory
pools
"""
while True:
pool = set(rpc_connections[0].getrawmempool())
num_match = 1
for i in range(1, len(rpc_connections)):
if set(rpc_connections[i].getrawmempool()) == pool:
num_match = num_match+1
if num_match == len(rpc_connections):
break
time.sleep(wait)
def sync_masternodes(rpc_connections):
for node in rpc_connections:
wait_to_sync(node)
bitcoind_processes = {}
def initialize_datadir(dirname, n):
datadir = os.path.join(dirname, "node"+str(n))
if not os.path.isdir(datadir):
os.makedirs(datadir)
with open(os.path.join(datadir, "trione.conf"), 'w') as f:
f.write("regtest=1\n")
f.write("rpcuser=rt\n")
f.write("rpcpassword=rt\n")
f.write("port="+str(p2p_port(n))+"\n")
f.write("rpcport="+str(rpc_port(n))+"\n")
f.write("listenonion=0\n")
return datadir
def rpc_url(i, rpchost=None):
return "http://rt:rt@%s:%d" % (rpchost or '127.0.0.1', rpc_port(i))
def wait_for_bitcoind_start(process, url, i):
'''
Wait for trioned to start. This means that RPC is accessible and fully initialized.
Raise an exception if trioned exits during initialization.
'''
while True:
if process.poll() is not None:
raise Exception('trioned exited with status %i during initialization' % process.returncode)
try:
rpc = get_rpc_proxy(url, i)
blocks = rpc.getblockcount()
break # break out of loop on success
except IOError as e:
if e.errno != errno.ECONNREFUSED: # Port not yet open?
raise # unknown IO error
except JSONRPCException as e: # Initialization phase
if e.error['code'] != -28: # RPC in warmup?
raise # unkown JSON RPC exception
time.sleep(0.25)
def initialize_chain(test_dir):
"""
Create (or copy from cache) a 200-block-long chain and
4 wallets.
"""
if (not os.path.isdir(os.path.join("cache","node0"))
or not os.path.isdir(os.path.join("cache","node1"))
or not os.path.isdir(os.path.join("cache","node2"))
or not os.path.isdir(os.path.join("cache","node3"))):
#find and delete old cache directories if any exist
for i in range(4):
if os.path.isdir(os.path.join("cache","node"+str(i))):
shutil.rmtree(os.path.join("cache","node"+str(i)))
# Create cache directories, run trioneds:
for i in range(4):
datadir=initialize_datadir("cache", i)
args = [ os.getenv("TRIONED", "trioned"), "-server", "-keypool=1", "-datadir="+datadir, "-discover=0" ]
if i > 0:
args.append("-connect=127.0.0.1:"+str(p2p_port(0)))
bitcoind_processes[i] = subprocess.Popen(args)
if os.getenv("PYTHON_DEBUG", ""):
print "initialize_chain: trioned started, waiting for RPC to come up"
wait_for_bitcoind_start(bitcoind_processes[i], rpc_url(i), i)
if os.getenv("PYTHON_DEBUG", ""):
print "initialize_chain: RPC succesfully started"
rpcs = []
for i in range(4):
try:
rpcs.append(get_rpc_proxy(rpc_url(i), i))
except:
sys.stderr.write("Error connecting to "+url+"\n")
sys.exit(1)
# Create a 200-block-long chain; each of the 4 nodes
# gets 25 mature blocks and 25 immature.
# blocks are created with timestamps 156 seconds apart
# starting from 31356 seconds in the past
enable_mocktime()
block_time = get_mocktime() - (201 * 156)
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(rpcs, block_time)
rpcs[peer].generate(1)
block_time += 156
# Must sync before next peer starts generating blocks
sync_blocks(rpcs)
# Shut them down, and clean up cache directories:
stop_nodes(rpcs)
wait_bitcoinds()
disable_mocktime()
for i in range(4):
os.remove(log_filename("cache", i, "debug.log"))
os.remove(log_filename("cache", i, "db.log"))
os.remove(log_filename("cache", i, "peers.dat"))
os.remove(log_filename("cache", i, "fee_estimates.dat"))
for i in range(4):
from_dir = os.path.join("cache", "node"+str(i))
to_dir = os.path.join(test_dir, "node"+str(i))
shutil.copytree(from_dir, to_dir)
initialize_datadir(test_dir, i) # Overwrite port/rpcport in trione.conf
def initialize_chain_clean(test_dir, num_nodes):
"""
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization.
"""
for i in range(num_nodes):
datadir=initialize_datadir(test_dir, i)
def _rpchost_to_args(rpchost):
'''Convert optional IP:port spec to rpcconnect/rpcport args'''
if rpchost is None:
return []
match = re.match('(\[[0-9a-fA-f:]+\]|[^:]+)(?::([0-9]+))?$', rpchost)
if not match:
raise ValueError('Invalid RPC host spec ' + rpchost)
rpcconnect = match.group(1)
rpcport = match.group(2)
if rpcconnect.startswith('['): # remove IPv6 [...] wrapping
rpcconnect = rpcconnect[1:-1]
rv = ['-rpcconnect=' + rpcconnect]
if rpcport:
rv += ['-rpcport=' + rpcport]
return rv
def start_node(i, dirname, extra_args=None, rpchost=None, timewait=None, binary=None):
"""
Start a trioned and return RPC connection to it
"""
datadir = os.path.join(dirname, "node"+str(i))
if binary is None:
binary = os.getenv("TRIONED", "trioned")
# RPC tests still depend on free transactions
args = [ binary, "-datadir="+datadir, "-server", "-keypool=1", "-discover=0", "-rest", "-blockprioritysize=50000", "-mocktime="+str(get_mocktime()) ]
if extra_args is not None: args.extend(extra_args)
bitcoind_processes[i] = subprocess.Popen(args)
if os.getenv("PYTHON_DEBUG", ""):
print "start_node: trioned started, waiting for RPC to come up"
url = rpc_url(i, rpchost)
wait_for_bitcoind_start(bitcoind_processes[i], url, i)
if os.getenv("PYTHON_DEBUG", ""):
print "start_node: RPC succesfully started"
proxy = get_rpc_proxy(url, i, timeout=timewait)
if COVERAGE_DIR:
coverage.write_all_rpc_commands(COVERAGE_DIR, proxy)
return proxy
def start_nodes(num_nodes, dirname, extra_args=None, rpchost=None, binary=None):
"""
Start multiple trioneds, return RPC connections to them
"""
if extra_args is None: extra_args = [ None for i in range(num_nodes) ]
if binary is None: binary = [ None for i in range(num_nodes) ]
rpcs = []
try:
for i in range(num_nodes):
rpcs.append(start_node(i, dirname, extra_args[i], rpchost, binary=binary[i]))
except: # If one node failed to start, stop the others
stop_nodes(rpcs)
raise
return rpcs
def log_filename(dirname, n_node, logname):
return os.path.join(dirname, "node"+str(n_node), "regtest", logname)
def stop_node(node, i):
node.stop()
bitcoind_processes[i].wait()
del bitcoind_processes[i]
def stop_nodes(nodes):
for node in nodes:
node.stop()
del nodes[:] # Emptying array closes connections as a side effect
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
def wait_bitcoinds():
# Wait for all bitcoinds to cleanly exit
for bitcoind in bitcoind_processes.values():
bitcoind.wait()
bitcoind_processes.clear()
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:"+str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
while any(peer['version'] == 0 for peer in from_connection.getpeerinfo()):
time.sleep(0.1)
def connect_nodes_bi(nodes, a, b):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
def find_output(node, txid, amount):
"""
Return index to output of txid with value amount
Raises exception if there is none.
"""
txdata = node.getrawtransaction(txid, 1)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found"%(txid,str(amount)))
def gather_inputs(from_node, amount_needed, confirmations_required=1):
"""
Return a random set of unspent txouts that are enough to pay amount_needed
"""
assert(confirmations_required >=0)
utxo = from_node.listunspent(confirmations_required)
random.shuffle(utxo)
inputs = []
total_in = Decimal("0.00000000")
while total_in < amount_needed and len(utxo) > 0:
t = utxo.pop()
total_in += t["amount"]
inputs.append({ "txid" : t["txid"], "vout" : t["vout"], "address" : t["address"] } )
if total_in < amount_needed:
raise RuntimeError("Insufficient funds: need %d, have %d"%(amount_needed, total_in))
return (total_in, inputs)
def make_change(from_node, amount_in, amount_out, fee):
"""
Create change output(s), return them
"""
outputs = {}
amount = amount_out+fee
change = amount_in - amount
if change > amount*2:
# Create an extra change output to break up big inputs
change_address = from_node.getnewaddress()
# Split change in two, being careful of rounding:
outputs[change_address] = Decimal(change/2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
change = amount_in - amount - outputs[change_address]
if change > 0:
outputs[from_node.getnewaddress()] = change
return outputs
def send_zeropri_transaction(from_node, to_node, amount, fee):
"""
Create&broadcast a zero-priority transaction.
Returns (txid, hex-encoded-txdata)
Ensures transaction is zero-priority by first creating a send-to-self,
then using its output
"""
# Create a send-to-self with confirmed inputs:
self_address = from_node.getnewaddress()
(total_in, inputs) = gather_inputs(from_node, amount+fee*2)
outputs = make_change(from_node, total_in, amount+fee, fee)
outputs[self_address] = float(amount+fee)
self_rawtx = from_node.createrawtransaction(inputs, outputs)
self_signresult = from_node.signrawtransaction(self_rawtx)
self_txid = from_node.sendrawtransaction(self_signresult["hex"], True)
vout = find_output(from_node, self_txid, amount+fee)
# Now immediately spend the output to create a 1-input, 1-output
# zero-priority transaction:
inputs = [ { "txid" : self_txid, "vout" : vout } ]
outputs = { to_node.getnewaddress() : float(amount) }
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"])
def random_zeropri_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random zero-priority transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(txid, txhex) = send_zeropri_transaction(from_node, to_node, amount, fee)
return (txid, txhex, fee)
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(total_in, inputs) = gather_inputs(from_node, amount+fee)
outputs = make_change(from_node, total_in, amount, fee)
outputs[to_node.getnewaddress()] = float(amount)
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"], fee)
def assert_equal(thing1, thing2):
if thing1 != thing2:
raise AssertionError("%s != %s"%(str(thing1),str(thing2)))
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s"%(str(thing1),str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
try:
fun(*args, **kwds)
except exc:
pass
except Exception as e:
raise AssertionError("Unexpected exception raised: "+type(e).__name__)
else:
raise AssertionError("No exception raised")
def assert_is_hex_string(string):
try:
int(string, 16)
except Exception as e:
raise AssertionError(
"Couldn't interpret %r as hexadecimal; raised: %s" % (string, e))
def assert_is_hash_string(string, length=64):
if not isinstance(string, basestring):
raise AssertionError("Expected a string, got type %r" % type(string))
elif length and len(string) != length:
raise AssertionError(
"String of length %d expected; got %d" % (length, len(string)))
elif not re.match('[abcdef0-9]+$', string):
raise AssertionError(
"String %r contains invalid characters for a hash." % string)
def assert_array_result(object_array, to_match, expected, should_not_find = False):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
If the should_not_find flag is true, to_match should not be found
in object_array
"""
if should_not_find == True:
assert_equal(expected, { })
num_matched = 0
for item in object_array:
all_match = True
for key,value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
elif should_not_find == True:
num_matched = num_matched+1
for key,value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s"%(str(item), str(key), str(value)))
num_matched = num_matched+1
if num_matched == 0 and should_not_find != True:
raise AssertionError("No objects matched %s"%(str(to_match)))
if num_matched > 0 and should_not_find == True:
raise AssertionError("Objects were found %s"%(str(to_match)))
def satoshi_round(amount):
return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
# Helper to create at least "count" utxos
# Pass in a fee that is sufficient for relay and mining new transactions.
def create_confirmed_utxos(fee, node, count):
node.generate(int(0.5*count)+101)
utxos = node.listunspent()
iterations = count - len(utxos)
addr1 = node.getnewaddress()
addr2 = node.getnewaddress()
if iterations <= 0:
return utxos
for i in xrange(iterations):
t = utxos.pop()
inputs = []
inputs.append({ "txid" : t["txid"], "vout" : t["vout"]})
outputs = {}
send_value = t['amount'] - fee
outputs[addr1] = satoshi_round(send_value/2)
outputs[addr2] = satoshi_round(send_value/2)
raw_tx = node.createrawtransaction(inputs, outputs)
signed_tx = node.signrawtransaction(raw_tx)["hex"]
txid = node.sendrawtransaction(signed_tx)
while (node.getmempoolinfo()['size'] > 0):
node.generate(1)
utxos = node.listunspent()
assert(len(utxos) >= count)
return utxos
# Create large OP_RETURN txouts that can be appended to a transaction
# to make it large (helper for constructing large transactions).
def gen_return_txouts():
# Some pre-processing to create a bunch of OP_RETURN txouts to insert into transactions we create
# So we have big transactions (and therefore can't fit very many into each block)
# create one script_pubkey
script_pubkey = "6a4d0200" #OP_RETURN OP_PUSH2 512 bytes
for i in xrange (512):
script_pubkey = script_pubkey + "01"
# concatenate 128 txouts of above script_pubkey which we'll insert before the txout for change
txouts = "81"
for k in xrange(128):
# add txout value
txouts = txouts + "0000000000000000"
# add length of script_pubkey
txouts = txouts + "fd0402"
# add script_pubkey
txouts = txouts + script_pubkey
return txouts
def create_tx(node, coinbase, to_address, amount):
inputs = [{ "txid" : coinbase, "vout" : 0}]
outputs = { to_address : amount }
rawtx = node.createrawtransaction(inputs, outputs)
signresult = node.signrawtransaction(rawtx)
assert_equal(signresult["complete"], True)
return signresult["hex"]
# Create a spend of each passed-in utxo, splicing in "txouts" to each raw
# transaction to make it large. See gen_return_txouts() above.
def create_lots_of_big_transactions(node, txouts, utxos, fee):
addr = node.getnewaddress()
txids = []
for i in xrange(len(utxos)):
t = utxos.pop()
inputs = []
inputs.append({ "txid" : t["txid"], "vout" : t["vout"]})
outputs = {}
send_value = t['amount'] - fee
outputs[addr] = satoshi_round(send_value)
rawtx = node.createrawtransaction(inputs, outputs)
newtx = rawtx[0:92]
newtx = newtx + txouts
newtx = newtx + rawtx[94:]
signresult = node.signrawtransaction(newtx, None, None, "NONE")
txid = node.sendrawtransaction(signresult["hex"], True)
txids.append(txid)
return txids
def get_bip9_status(node, key):
info = node.getblockchaininfo()
for row in info['bip9_softforks']:
if row['id'] == key:
return row
raise IndexError ('key:"%s" not found' % key)
| [
"trioncoin@gmail.com"
] | trioncoin@gmail.com |
0876cd12aecdbcf3b9fc4ffd3190d80b4c751f14 | d87badbf69a2a32fc5a2719b48e2ff002796b394 | /src/nnet/train_cnn_nnet.py | e82184b0fb76e59bbe856710ebb276966eae8175 | [
"Apache-2.0"
] | permissive | sadhusamik/pyspeech | cbd396773122f35f9e590365b8383bed545c120d | dcb45894f55fe9a2ed793a680d3379887e7d8abb | refs/heads/master | 2020-03-20T16:12:06.693552 | 2018-07-18T15:57:37 | 2018-07-18T15:57:37 | 137,532,684 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,169 | py | #!/export/b18/ssadhu/tools/python/bin/python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 12 23:01:39 2018
@author: samiksadhu
"""
'Train CNN nnet with pytorch'
import sys
sys.path.append('/export/b15/ssadhu/pyspeech/src/featgen/')
sys.path.append('/export/b15/ssadhu/pyspeech/src/utils/')
sys.path.append('/export/b15/ssadhu/pyspeech/src/nnet/')
from gen_utils import get_dim
from nnet import get_device_id, print_log, model_err
import argparse
import pickle
import numpy as np
# Pytorch stuff
import torch
import torch.utils.data
from torch import nn
from torch.autograd import Variable
from os.path import join, dirname
class change_shape(nn.Module):
def forward(self, x):
return x.view(x.size(0), -1)
def cnn_model(nlayers,ndepth,ksize,ntargets,insize,device_id):
structure=[nn.Conv2d(1,ndepth,kernel_size=ksize), nn.MaxPool2d(2), nn.ReLU()]
ori_size=insize
insize=insize-ksize+1
insize=insize/2
pad_size=int((ori_size-insize)/2)
for k in range(nlayers-1):
structure += [nn.Conv2d(ndepth,ndepth,kernel_size=ksize,padding=pad_size), nn.ReLU(), nn.MaxPool2d(2) ]
insize=insize-ksize+1+2*pad_size
insize=int(np.floor(insize/2))
structure +=[change_shape(), nn.Linear(insize*insize*ndepth,ntargets)]
model = nn.Sequential(*structure)
if device_id!=-1:
with torch.cuda.device(device_id):
model.cuda(device_id)
return model
def get_args():
parser = argparse.ArgumentParser('Train CNN nnet with pytorch backend')
parser.add_argument('egs_dir', help='Example data directory')
parser.add_argument('outmodel', help='output file')
parser.add_argument('--ntargets', type=int, default=48, help='number of targets(48)')
parser.add_argument('--nlayers', type=int, default=4, help='number of hidden layers(4)')
parser.add_argument('--ndepth', type=int, default=20, help='Depth of each CNN layer(20)')
parser.add_argument('--ksize', type=int, default=5, help='Kernel size(5)')
parser.add_argument('--bsize', type=int, default=1000,
help='batch size')
parser.add_argument('--split_num', type=int, help='number of splits of the data(5)', default=5)
parser.add_argument('--epochs', type=int, default=1000,
help='number of epochs')
parser.add_argument('--lrate', type=float, default=1e-3,
help='learning rate')
parser.add_argument('--weight_decay', type=float, default=0.0,
help='L2 regularization')
parser.add_argument('--cv_stop', type=int,
help='Stop after this many increases of CV error')
return parser.parse_args()
def error_rate(model, features, labels, loss_fn):
outputs = model(features)
loss_test = loss_fn(outputs, labels)
_, predicted = torch.max(outputs, dim=1)
hits = (labels == predicted).float().sum()
return loss_test.data[0], (1 - hits / labels.size(0)).data[0]
def train(model,egs_dir,split_num,epochs,gpu_id,cv_stop,lrate,weight_decay,bsize,outmodel):
loss_fn = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=lrate,
weight_decay=weight_decay)
if gpu_id!=-1:
with torch.cuda.device(gpu_id):
model.cuda(gpu_id)
cv_er_old=0
warn_time=0
for epoch in range(epochs):
t_loss = 0.0
t_er = 0.0
batch_count=0
for batch in range(1,split_num+1):
train_data=pickle.load(open(join(egs_dir,'train','data.'+str(batch)+'.egs'),'rb'))
train_labels=pickle.load(open(join(egs_dir,'train','labels.'+str(batch)+'.egs'),'rb'))
train_data, train_labels = torch.from_numpy(train_data).float(), \
torch.from_numpy(train_labels.flatten()-1).long()
dataset = torch.utils.data.TensorDataset(train_data, train_labels)
trainloader = torch.utils.data.DataLoader(dataset, batch_size=bsize,
shuffle=True)
for i, data in enumerate(trainloader):
inputs, labels = Variable(data[0]).cuda(), Variable(data[1]).cuda()
optimizer.zero_grad()
outputs = model(inputs)
loss = loss_fn(outputs, labels)
# Compute the error rate on the training set.
_, predicted = torch.max(outputs, dim=1)
hits = (labels == predicted).float().sum()
t_er += (1 - hits / labels.size(0)).data[0]
t_loss += loss.data[0]
batch_count+=1
loss.backward()
optimizer.step()
# print the loss after every epoch
t_loss /= batch_count
t_er /= batch_count
cv_loss, cv_er=model_err(model, egs_dir, loss_fn, bsize, gpu_id)
logmsg = '# epoch: {epoch} loss (train): {t_loss:.3f} ' \
'error rate (train): {t_er:.3%} loss (cv): {cv_loss:.3f} ' \
'error rate (cv): {cv_er:.3%}'.format(epoch=epoch+1, t_loss=t_loss, t_er=t_er, cv_loss=cv_loss, cv_er=cv_er)
t_er = 0.0
t_loss = 0.0
print(logmsg)
sys.stdout.flush()
if cv_er>cv_er_old:
warn_time+=1
cv_er_old=cv_er
if warn_time>=cv_stop:
print('%s: Cross Validation Error found to increase continuously.. exiting with present model!' % sys.argv[0])
re_loss, re_er = model_err(model, egs_dir, loss_fn, bsize, gpu_id)
print('%s: The final test performance is: %.2f %%' % (sys.argv[0],re_er*100))
break
print('%s: Maximum number of epochs exceeded!' % sys.argv[0])
re_loss, re_er = model_err(model, egs_dir, loss_fn, bsize, gpu_id)
print('%s: The final test performance is: %.2f %%' % (sys.argv[0],re_er*100))
# Save performance
res_file=join(dirname(outmodel),'result')
with open(res_file,'w') as f:
f.write('Test set Frame Error Rate: %.2f %%' % (re_er*100))
# Save model
model=model.cpu()
with open(outmodel, 'wb') as fid:
pickle.dump(model, fid)
else:
cv_er_old=0
warn_time=0
for epoch in range(epochs):
t_loss = 0.0
t_er = 0.0
batch_count=0
for batch in range(1,split_num+1):
train_data=pickle.load(open(join(egs_dir,'train','data.'+str(batch)+'.egs'),'rb'))
train_labels=pickle.load(open(join(egs_dir,'train','labels.'+str(batch)+'.egs'),'rb'))
train_data, train_labels = torch.from_numpy(train_data).float(), \
torch.from_numpy(train_labels.flatten()-1).long()
dataset = torch.utils.data.TensorDataset(train_data, train_labels)
trainloader = torch.utils.data.DataLoader(dataset, batch_size=bsize,
shuffle=True)
for i, data in enumerate(trainloader):
inputs, labels = Variable(data[0]), Variable(data[1])
optimizer.zero_grad()
outputs = model(inputs)
loss = loss_fn(outputs, labels)
# Compute the error rate on the training set.
_, predicted = torch.max(outputs, dim=1)
hits = (labels == predicted).float().sum()
t_er += (1 - hits / labels.size(0)).data[0]
t_loss += loss.data[0]
batch_count+=1
loss.backward()
optimizer.step()
# print the loss after every epoch
t_loss /= batch_count
t_er /= batch_count
cv_loss, cv_er=model_err(model, egs_dir, loss_fn, bsize, gpu_id)
logmsg = '# epoch: {epoch} loss (train): {t_loss:.3f} ' \
'error rate (train): {t_er:.3%} loss (cv): {cv_loss:.3f} ' \
'error rate (cv): {cv_er:.3%}'.format(epoch=epoch+1, t_loss=t_loss, t_er=t_er, cv_loss=cv_loss, cv_er=cv_er)
t_er = 0.0
t_loss = 0.0
print(logmsg)
sys.stdout.flush()
if cv_er>cv_er_old:
warn_time+=1
cv_er_old=cv_er
if warn_time>=cv_stop:
print('%s: Cross Validation Error found to increase in 2 epochs.. exiting with present model!' % sys.argv[0])
cv_loss, cv_er=model_err(model, egs_dir, loss_fn, bsize, gpu_id)
print('%s: The final test performance is: %.2f %%' % (sys.argv[0],re_er*100))
break
print('%s: Maximum number of epochs exceeded!' % sys.argv[0])
re_loss, re_er =cv_loss, cv_er=model_err(model, egs_dir, loss_fn, bsize, gpu_id)
print('%s: The final test performance is: %.2f %%' % (sys.argv[0],re_er*100))
# Save result
res_file=join(dirname(outmodel),'result')
with open(res_file,'w') as f:
f.write('Test set Frame Error Rate: %.2f %%' % (re_er*100))
# Save model
with open(outmodel, 'wb') as fid:
pickle.dump(model, fid)
if __name__=='__main__':
print_log('# BEGIN CNN TRAINING')
args=get_args()
gpu_id=get_device_id()
if gpu_id!=-1:
print('%s: Using GPU device %d for nnet' % (sys.argv[0],gpu_id))
else:
print_log('Training nnet on single CPU, this will take some time!')
print_log('Defining nnet model')
with open(join(args.egs_dir,'dim'),'r') as fid:
insize=int(np.sqrt(int(fid.readline())))
model=cnn_model(args.nlayers,args.ndepth,args.ksize,args.ntargets,insize,gpu_id)
print_log('Training nnet model')
# Main Training function
train(model,args.egs_dir,args.split_num,args.epochs,gpu_id,args.cv_stop,args.lrate,args.weight_decay,args.bsize,args.outmodel)
print_log('# FINISHED CNN TRAINING') | [
"sadhusamik@gmail.com"
] | sadhusamik@gmail.com |
44fa4a6405695e5e96d524e406f9fbbf2501bb88 | 1d956e459088cbc17641da44ef827ee3de5912c8 | /setup.py | c095e35b87d525c2a55d6bf6c98902d5a638cb06 | [] | no_license | Adilla/oudjat | a49dc2c8b83f6c6f0c4b62fba264bfd4c934dfe9 | 00c0f98a916402ae7157db7dd48f41427e837b86 | refs/heads/master | 2021-01-23T18:10:48.996950 | 2013-06-28T09:36:28 | 2013-06-28T09:36:28 | 8,778,460 | 1 | 0 | null | 2013-04-17T09:19:19 | 2013-03-14T15:28:57 | Python | UTF-8 | Python | false | false | 1,013 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
from setuptools import find_packages
with open('README.rst') as readme:
long_description = readme.read()
with open('requirements.txt') as requirements:
lines = requirements.readlines()
libraries = [lib for lib in lines if not lib.startswith('-')]
dependency_links = [link.split()[1] for link in lines if
link.startswith('-f')]
setup(
name='oudjat',
version='0.5',
author='Adilla Susungi',
author_email='adilla.susungi@etu.unistra.fr',
maintainer='Arnaud Grausem',
maintainer_email='arnaud.grausem@unistra.fr',
url='http://repodipory.u-strasbg.fr/docs/oudjat',
license='PSF',
description='',
long_description=long_description,
packages=find_packages('src'),
package_dir={'': 'src'},
download_url='http://repodipory.u-strasbg.fr/lib/python/',
install_requires=libraries,
dependency_links=dependency_links,
keywords=['security', 'keywords'],
)
| [
"arnaud.grausem@unistra.fr"
] | arnaud.grausem@unistra.fr |
a1ad11fe81cbafd2634f7e88da34d940617525ed | 0728a2e165808cfe5651693a6e7f47804bfb085f | /get/2013/site/getmyad/tests/functional/test_private.py | bb8b9931e924edaa1d8c9b4539e9f53d599872dc | [] | no_license | testTemtProj/OLD_PROJECT | 5b026e072017f5135159b0940370fda860241d39 | 9e5b165f4e8acf9003536e05dcefd33a5ae46890 | refs/heads/master | 2020-05-18T15:30:24.543319 | 2013-07-23T15:17:32 | 2013-07-23T15:17:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 203 | py | from getmyad.tests import *
class TestPrivateController(TestController):
def test_index(self):
response = self.app.get(url(controller='private', action='index'))
# Test response...
| [
"Kyzmenko_Pavel@mail.ru"
] | Kyzmenko_Pavel@mail.ru |
861315586b4868acf179d67652c9c795ebdc8b94 | c116855af9e8a203155b8807efd2714e82b16c0d | /acsql/ingest/make_file_dict.py | 4eda3d5e3f1761375bd8ee9b6cb875190896d647 | [
"BSD-3-Clause"
] | permissive | spacetelescope/acsql | 40ea3f1086e8157a20deefba3b9811281238a365 | 984449d8ad00ba606b122478c010ac88e12bbe82 | refs/heads/master | 2021-10-16T19:40:13.904807 | 2019-02-12T18:29:23 | 2019-02-12T18:29:23 | 81,865,402 | 6 | 0 | null | 2017-10-18T14:08:35 | 2017-02-13T19:58:30 | TeX | UTF-8 | Python | false | false | 5,965 | py | """Create a dictionary containing useful information for the ingestion
process.
The ``file_dict`` contains various information that can be used by
``ingest.py`` (e.g. filesystem paths, observational metadata) and can
be used as a data container that can be easily passed around to various
functions.
Authors
-------
Matthew Bourque
Use
---
This module and its functionars are intended to be imported and
used by ``acsql.ingest.ingest.py`` as such:
::
from ascql.ingest.make_file_dict import get_detector
from ascql.ingest.make_file_dict import get_metadata_from_test_files
from ascql.ingest.make_file_dict import get_proposid
from acsql.ingest.make_file_dict import make_file_dict
make_file_dict(filename)
get_detector(filename)
get_metadata_from_test_files(rootname_path, keyword)
get_proposid(filename)
Dependencies
------------
External library dependencies include:
- ``astropy``
"""
import glob
import logging
import os
from astropy.io import fits
from acsql.utils import utils
from acsql.utils.utils import SETTINGS
def get_detector(filename):
"""Return the ``detector`` associated with the given ``filename``,
if possible.
Parameters
----------
filename : str
The path to the file to attempt to get the ``detector`` header
keyword from.
Returns
-------
detector : str
The detector (e.g. ``WFC``)
"""
if 'jit' in filename:
detector = fits.getval(filename, 'config', 0)
if detector == 'S/C': # FGS observation
detector = None
else:
detector = detector.lower().split('/')[1]
else:
detector = fits.getval(filename, 'detector', 0).lower()
return detector
def get_metadata_from_test_files(rootname_path, keyword):
"""Return the value of the given ``keyword`` and ``rootname_path``.
The given ``rootname_path`` is checked for various filetypes that
are beleived to have the ``keyword`` that is sought, in order
of most likeliness: ``raw``, ``flt``, ``spt``, ``drz``, and
``jit``. If a candidate file is found, it is used to determine
the value of the ``keyword`` in the primary header. If no
candidate file exists, or the ``keyword`` value cannot be
determined from the primary header, a ``value`` of ``None`` is
returned, essentially ending the ingestion process for the given
rootname.
Parameters
----------
rootname_path : str
The path to the rootname in the MAST cache.
keyword : str
The header keyword to determine the value of (e.g.
``detector``)
Returns
-------
value : str or None
The header keyword value.
"""
raw = glob.glob(os.path.join(rootname_path, '*raw.fits'))
flt = glob.glob(os.path.join(rootname_path, '*flt.fits'))
spt = glob.glob(os.path.join(rootname_path, '*spt.fits'))
drz = glob.glob(os.path.join(rootname_path, '*drz.fits'))
jit = glob.glob(os.path.join(rootname_path, '*jit.fits'))
for test_files in [raw, flt, spt, drz, jit]:
try:
test_file = test_files[0]
if keyword == 'detector':
value = get_detector(test_file)
elif keyword == 'proposid':
value = get_proposid(test_file)
break
except (IndexError, KeyError):
value = None
if not value:
logging.warning('Cannot determine {} for {}'\
.format(keyword, rootname_path))
return value
def get_proposid(filename):
"""Return the proposal ID from the primary header of the given
``filename``.
Parameters
----------
filename : str
The path to the file to get the ``proposid`` form.
Returns
-------
proposid : int
The proposal ID (e.g. ``12345``).
"""
proposid = str(fits.getval(filename, 'proposid', 0))
return proposid
def make_file_dict(filename):
"""Create a dictionary that holds information that is useful for
the ingestion process. This dictionary can then be passed around
the various functions of the module.
Parameters
----------
filename : str
The path to the file.
Returns
-------
file_dict : dict
A dictionary containing various data useful for the ingestion
process.
"""
file_dict = {}
# Filename related keywords
file_dict['filename'] = os.path.abspath(filename)
file_dict['dirname'] = os.path.dirname(filename)
file_dict['basename'] = os.path.basename(filename)
file_dict['rootname'] = file_dict['basename'].split('_')[0][:-1]
file_dict['full_rootname'] = file_dict['basename'].split('_')[0]
file_dict['filetype'] = file_dict['basename'].split('.fits')[0].split('_')[-1]
file_dict['proposid'] = file_dict['basename'][0:4]
file_dict['proposid_int'] = get_metadata_from_test_files(file_dict['dirname'], 'proposid')
# Metadata kewords
file_dict['detector'] = get_metadata_from_test_files(file_dict['dirname'], 'detector')
if file_dict['detector']:
file_dict['file_exts'] = getattr(utils, '{}_FILE_EXTS'.format(file_dict['detector'].upper()))[file_dict['filetype']]
# JPEG related kewords
if file_dict['filetype'] in ['raw', 'flt', 'flc']:
file_dict['jpg_filename'] = file_dict['basename'].replace('.fits', '.jpg')
file_dict['jpg_dst'] = os.path.join(SETTINGS['jpeg_dir'], file_dict['proposid_int'], file_dict['jpg_filename'])
file_dict['thumbnail_filename'] = file_dict['basename'].replace('.fits', '.thumb')
file_dict['thumbnail_dst'] = os.path.join(SETTINGS['thumbnail_dir'], file_dict['proposid_int'], file_dict['thumbnail_filename'])
else:
file_dict['jpg_filename'] = None
file_dict['jpg_dst'] = None
file_dict['thumbnail_filename'] = None
file_dict['thumbnail_dst'] = None
return file_dict
| [
"bourque@stsci.edu"
] | bourque@stsci.edu |
0b842e78d6f798606bbc551d9632e708194fe68e | 837acd65cf23ba80782e5f3c79d0fd2ffa2aae66 | /TensorDyve/custom_exceptions.py | a4b21640f7451bf8865ba1eb6b5254f105e9278f | [] | no_license | catalinHRG/orange_juice | f62a3ddc369d809bfa737c3b17d7dfbfafe4ea06 | cec2154f914db4ee871b87ef53bbd53ea673cf6e | refs/heads/master | 2023-01-02T05:09:24.280657 | 2020-10-01T00:25:45 | 2020-10-01T00:25:45 | 300,095,845 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 544 | py |
class MemThresholdReached(Exception):
pass
class MissingModelFile(Exception):
pass
class MissingConfigFile(Exception):
pass
class MissingTrainTFR(Exception):
pass
class MissingMetaDataFile(Exception):
pass
class MissingEvalTFR(Exception):
pass
class ModelDatasetMissmatch(Exception):
pass
class ToBeImplemented(Exception):
pass
class NoProgress(Exception):
pass
class TrainEvalDatasetFormatMismatch(Exception):
pass
| [
"cherghelegiu11@gmail.com"
] | cherghelegiu11@gmail.com |
993289cd6a28286e26ca18a3513587c36e54ee23 | caee7af0774d73f714fa0c0ccb1066f4dbd6ece2 | /ceasar.py | 94860b20645bc73ef99594b5c4cc420476fb23bd | [] | no_license | giangcse/ATBMTT | cf3fedba7952379c5abb755e767d1f5498480216 | ea604cfe75231433532e315f8954ea7fb12e786e | refs/heads/master | 2020-08-27T19:43:23.653303 | 2019-11-28T08:14:08 | 2019-11-28T08:14:08 | 217,473,909 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,146 | py | # -*- coding: utf-8 -*-
from Tkinter import *
import ttk
window = Tk()
window.title("Welcome to Demo An Toan Bao Mat Thong Tin")
lb0 = Label(window, text=" ",font=("Arial Bold", 10))
lb0.grid(column=0, row=0)
lbl = Label(window, text="CHƯƠNG TRÌNH DEMO",font=("Arial Bold", 20))
lbl.grid(column=1, row=1)
lb2 = Label(window, text="MẬT MÃ AFINE",font=("Arial Bold", 15))
lb2.grid(column=0, row=2)
plainlb3 = Label(window, text="PLANT TEXT",font=("Arial", 14))
plainlb3.grid(column=0, row=3)
plaintxt = Entry(window,width=20)
plaintxt.grid(column=1, row=3)
KEYlb4 = Label(window, text="KEY PAIR",font=("Arial", 14))
KEYlb4.grid(column=2, row=3)
KEYA1 = Entry(window,width=3)
KEYA1.grid(column=3, row=3)
KEYB1 = Entry(window,width=5)
KEYB1.grid(column=4, row=3)
lb5 = Label(window, text="CIPHER TEXT",font=("Arial", 14))
lb5.grid(column=0, row=4)
ciphertxt3 = Entry(window,width=20)
ciphertxt3.grid(column=1, row=4)
denctxt3 = Entry(window,width=20)
denctxt3.grid(column=3, row=4)
def Char2Num(c):
return ord(c.upper())-65
def Num2Char(n):
return chr(n+65)
def xgcd(b,a):
tmp=a
x0, x1, y0, y1 = 1, 0, 0, 1
while a!=0:
q, b, a = b // a, a, b % a
x0, x1 = x1, x0 - q * x1
y0, y1 = y1, y0 - q * y1
if x0<0:x0=tmp+x0
return x0
def encryptAF(txt,a,b,m):
r=""
for c in txt:
e=(a*Char2Num(c)+b )%m
r=r+Num2Char(e)
return r
def decryptAF(txt,a,b,m):
r=""
a1=xgcd(a,m)
for c in txt:
e=(a1*(Char2Num(c)-b ))%m
r=r+Num2Char(e)
return r
def clicked():
a,b,m=int(KEYA1.get()),int(KEYB1.get()),26
entxt=encryptAF(plaintxt.get(),a,b,m)
ciphertxt3.delete(0,END)
#a=int(KEYA1.get())
ciphertxt3.insert(INSERT,entxt)
def giaima():
a,b,m=int(KEYA1.get()),int(KEYB1.get()),26
detxt=decryptAF(ciphertxt3.get(),a,b,m)
denctxt3.delete(0,END)
#a=int(KEYA1.get())
denctxt3.insert(INSERT,detxt)
AFbtn = Button(window, text="Encrypt", command=clicked)
AFbtn.grid(column=5, row=3)
DEAFbtn = Button(window, text="Decrypt", command=giaima)
DEAFbtn.grid(column=2, row=4)
window.geometry('800x200')
window.mainloop()
| [
"noreply@github.com"
] | giangcse.noreply@github.com |
b0349f7066486474551ff341f4c360e1e7ce83ff | c1d202d589c2daf052fd29ba0de341bc04bc3f4f | /hello/hello/pipelines.py | 5d223a352f0c2133800554f8e9ff738439f333ae | [] | no_license | baichuan-hailong/crawl_Projects | 302e6eee27f944a2c7117bf5439642f2f8293709 | 1714c6d19229d2e67fdacd2b5a95275fd9de9ed7 | refs/heads/master | 2021-01-19T13:39:29.683169 | 2017-09-13T10:27:07 | 2017-09-13T10:27:07 | 100,853,728 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 328 | py | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
class HelloPipeline(object):
def process_item(self, item, spider):
print(spider.name)
# print(item)
return item
| [
"1719812714@qq.com"
] | 1719812714@qq.com |
fb11583b75cc60e7d5bb99db1618fe567ba35868 | 815130cc83e31a35ff118c301b4d3d9400f15a03 | /rango/urls.py | e5eb41964b83de748bd6bd1915b959f9cdf02895 | [] | no_license | zainsra7/tango_with_django_project | 69642f9fa9469aa0f9e663f5b0a6308a749846d6 | 43e70db5f384254ec34f52863ab6d249ff020996 | refs/heads/master | 2020-04-15T16:59:51.704945 | 2019-02-07T12:36:37 | 2019-02-07T12:36:37 | 164,856,963 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 792 | py | from django.conf.urls import url
from rango import views
# app_name = 'rango' # Adding namespace if there are multiple apps
urlpatterns =[
url(r'^$', views.index, name='index'),
# Adding the rango/about url mapping
url(r'about/$', views.about, name='about'),
url(r'^add_category/$', views.add_category, name='add_category'),
url(r'^category/(?P<category_name_url>[\w\-]+)/$',
views.show_category, name='show_category'),
url(r'^category/(?P<category_name_url>[\w\-]+)/add_page/$',
views.add_page, name='add_page'),
url(r'^register/$', views.register, name='register'),
url(r'^login/$', views.user_login, name='login'),
url(r'^restricted/', views.restricted, name='restricted'),
url(r'^logout/$', views.user_logout, name='logout'),
] | [
"m.zain.ul.islam@gmail.com"
] | m.zain.ul.islam@gmail.com |
67ee1a8a045a611f0dc932c3c9d6dcb8c762d069 | b62e76a022e50d4f1b6b2567598127f87f5ad06a | /anjukespider/run.py | eac47084a03a16966dd98fb7e2f124c3cbbe77a2 | [] | no_license | siuchunpang/Scrapy_anjuke | d28db6ccb87a71498a30c99ec7ca8a363926f190 | ab9af951be4ca2378fe006461fd294bd97820962 | refs/heads/master | 2021-01-02T00:22:42.464369 | 2020-03-03T15:53:00 | 2020-03-03T15:53:00 | 239,409,622 | 0 | 0 | null | 2020-02-12T11:17:22 | 2020-02-10T02:16:04 | Python | UTF-8 | Python | false | false | 82 | py | from scrapy import cmdline
cmdline.execute("scrapy crawl anjukespider".split())
| [
"623968625@qq.com"
] | 623968625@qq.com |
6980e2751a96b6c52463e04797439404eb790d99 | 53d3fc35999ce80bb70110c7e29f04245512e7c1 | /venv/Scripts/pip3-script.py | 864f6560e3eddc37df9e2174d98770fb79fca96a | [] | no_license | Adaptive-Application/hawkEye-Server-Side | 8fb1b3a3bcfb40162db41885e7debbe5da025a53 | 65738afbadec98b0cf203f61a859b15eb7e42884 | refs/heads/master | 2021-04-06T03:46:22.347913 | 2018-04-01T14:50:10 | 2018-04-01T14:50:10 | 124,409,256 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 478 | py | #!"C:\Users\agrah\OneDrive\Desktop\Drive Content\PyCharm Workspace\Adaptive Application\hawkEye-Server-Side\venv\Scripts\python.exe"
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==9.0.1','console_scripts','pip3'
__requires__ = 'pip==9.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==9.0.1', 'console_scripts', 'pip3')()
)
| [
"agraharisrm@gmail.com"
] | agraharisrm@gmail.com |
f2902f7714eff32f3ce55bc6b7583757ac12f665 | ee0d7ab506521d65f4dc6463c3fb6e1655ae00bd | /mailhandler.py | fd6e3bade89cea26f56f70c8edd3ab69e84e1cd2 | [] | no_license | eupendra/amazon-price-watcher-basic | b6b30ea6ad4dc8d31d8feb7c0cc6666d33c6dc21 | 1c93937f62ded21b24321c06fa286ed82bdd4a04 | refs/heads/master | 2021-06-25T06:48:35.851616 | 2021-02-10T11:28:34 | 2021-02-10T11:28:34 | 194,848,624 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 338 | py | import smtplib
import config
def sendmail(subject, body):
smtp = smtplib.SMTP("smtp.gmail.com",587)
smtp.ehlo()
smtp.starttls()
smtp.login(config.USER_NAME, config.USER_PASS)
message_body = f"Subject:{subject}\n\n{body}"
smtp.sendmail(config.USER_NAME, config.USER_NAME,message_body)
smtp.quit()
| [
"noreply@github.com"
] | eupendra.noreply@github.com |
180543166f7dbf62eabfd04dfe888243cfff65a3 | 1a6bfa1491d1d7a72ec4b10218f4cb6d620351f3 | /src/api.py | b8c98464e9d59880fb4ae41b56d160166450e749 | [] | no_license | ptlu79/dashboard_currency | f0082f45a8854abaeee28ef798f36271e301ff1b | 516af46edccca9a001c5b2ae9e3ecc6b9d947f3a | refs/heads/master | 2023-01-04T23:58:21.509343 | 2020-11-04T16:04:47 | 2020-11-04T16:04:47 | 297,571,653 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,087 | py | from datetime import date, timedelta
from pprint import pprint
import requests
def get_rate(currencies=["USD"], days=30):
end_date = date.today()
start_date = end_date - timedelta(days=days)
symbols = ','.join(currencies)
requete = f"https://api.exchangeratesapi.io/history?start_at={start_date}&end_at={end_date}&symbols={symbols}"
r = requests.get(requete)
if not r and not r.json(): # pas besoin de verif 200 car si non ben false, AND verif si bien json
return False, False #false devise et false jour
api_rates = r.json().get("rates") # donc je recupere json et tout ce qui en clef rate
all_rates = {currency: [] for currency in currencies} # comprehension de liste, non de tableau
# on prend uniquement les jours et on verif l'ordre
all_days = sorted(api_rates.keys())
for each_day in all_days:
[all_rates[currency].append(rate) for currency, rate in api_rates[each_day].items()]
return all_days, all_rates
if __name__ == "__main__":
days, rates = get_rate(currencies=["USD", "CAD"])
| [
"grbroyer@gmail.com"
] | grbroyer@gmail.com |
89ec17bffeb4e5106403f0613f886a9573d21375 | 682b355573f8f2f4af325dd20e9f316340659818 | /PebbleApp/build/c4che/basalt_cache.py | 3f9ab1289bb70b0fffc7bf64b50a9ddb0e668dd9 | [] | no_license | JenanMannette/VibeSight-Pebble | 8f4bb55e52def0c4a9f1fbf03896afddc37fe349 | 4e8135df3511322bd38615e4e405d9e9b5b1d897 | refs/heads/master | 2021-01-19T08:14:22.777167 | 2015-09-06T21:07:38 | 2015-09-06T21:07:38 | 42,018,594 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,012 | py | AR = 'arm-none-eabi-ar'
ARFLAGS = 'rcs'
AS = 'arm-none-eabi-gcc'
BINDIR = '/usr/local/bin'
BUILD_DIR = 'basalt'
CC = ['arm-none-eabi-gcc']
CCLNK_SRC_F = []
CCLNK_TGT_F = ['-o']
CC_NAME = 'gcc'
CC_SRC_F = []
CC_TGT_F = ['-c', '-o']
CC_VERSION = ('4', '7', '2')
CFLAGS = ['-std=c99', '-mcpu=cortex-m3', '-mthumb', '-ffunction-sections', '-fdata-sections', '-g', '-Os', '-D_TIME_H_', '-Wall', '-Wextra', '-Werror', '-Wno-unused-parameter', '-Wno-error=unused-function', '-Wno-error=unused-variable']
CFLAGS_MACBUNDLE = ['-fPIC']
CFLAGS_cshlib = ['-fPIC']
CPPPATH_ST = '-I%s'
DEFINES = ['RELEASE', 'PBL_PLATFORM_BASALT', 'PBL_COLOR', 'PBL_SDK_3']
DEFINES_ST = '-D%s'
DEST_BINFMT = 'elf'
DEST_CPU = 'arm'
DEST_OS = 'darwin'
INCLUDES = ['basalt']
LD = 'arm-none-eabi-ld'
LIBDIR = '/usr/local/lib'
LIBPATH_ST = '-L%s'
LIB_ST = '-l%s'
LINKFLAGS = ['-mcpu=cortex-m3', '-mthumb', '-Wl,--gc-sections', '-Wl,--warn-common', '-Os']
LINKFLAGS_MACBUNDLE = ['-bundle', '-undefined', 'dynamic_lookup']
LINKFLAGS_cshlib = ['-shared']
LINKFLAGS_cstlib = ['-Wl,-Bstatic']
LINK_CC = ['arm-none-eabi-gcc']
PBW_BIN_DIR = 'basalt'
PEBBLE_SDK = '/Users/jenanm/pebble-dev/PebbleSDK-3.4-rc8/Pebble/basalt'
PEBBLE_SDK_COMMON = '/Users/jenanm/pebble-dev/PebbleSDK-3.4-rc8/Pebble/common'
PLATFORM = {'PBW_BIN_DIR': 'basalt', 'TAGS': ['basalt', 'color'], 'ADDITIONAL_TEXT_LINES_FOR_PEBBLE_H': [], 'MAX_APP_BINARY_SIZE': 65536, 'MAX_RESOURCES_SIZE': 1048576, 'MAX_APP_MEMORY_SIZE': 65536, 'MAX_WORKER_MEMORY_SIZE': 10240, 'NAME': 'basalt', 'BUILD_DIR': 'basalt', 'MAX_RESOURCES_SIZE_APPSTORE': 262144, 'DEFINES': ['PBL_PLATFORM_BASALT', 'PBL_COLOR']}
PLATFORM_NAME = 'basalt'
PREFIX = '/usr/local'
RPATH_ST = '-Wl,-rpath,%s'
SDK_VERSION_MAJOR = 5
SDK_VERSION_MINOR = 60
SHLIB_MARKER = None
SIZE = 'arm-none-eabi-size'
SONAME_ST = '-Wl,-h,%s'
STLIBPATH_ST = '-L%s'
STLIB_MARKER = None
STLIB_ST = '-l%s'
TARGET_PLATFORMS = [u'basalt']
cprogram_PATTERN = '%s'
cshlib_PATTERN = 'lib%s.so'
cstlib_PATTERN = 'lib%s.a'
macbundle_PATTERN = '%s.bundle'
| [
"jenanmannette@gmail.com"
] | jenanmannette@gmail.com |
e34b264b7b56e2a33af6c1f0832aab38871d7b95 | 38700904e69da8b8a3801456a6b78c8d74499eb7 | /scripts/run-clang-format.py | c283fa25d6d5f4a68b25cf28bbaa4338705592ca | [
"MIT"
] | permissive | IvanVnucec/c_matrix_library | 77f033067fe5ec27a8f76e7e3234288b01d5178f | 572df1840c46d3dcca6ceb955350d8d46abcf298 | refs/heads/master | 2023-04-08T12:13:04.127859 | 2021-04-21T07:00:15 | 2021-04-21T07:00:15 | 341,643,494 | 0 | 0 | WTFPL | 2021-03-21T19:51:43 | 2021-02-23T18:01:04 | C | UTF-8 | Python | false | false | 12,760 | py | #!/usr/bin/env python
"""A wrapper script around clang-format, suitable for linting multiple files
and to use for continuous integration.
This is an alternative API for the clang-format command line.
It runs over multiple files and directories in parallel.
A diff output is produced and a sensible exit code is returned.
"""
from __future__ import print_function, unicode_literals
import argparse
import codecs
import difflib
import fnmatch
import io
import errno
import multiprocessing
import os
import signal
import subprocess
import sys
import traceback
from functools import partial
try:
from subprocess import DEVNULL # py3k
except ImportError:
DEVNULL = open(os.devnull, "wb")
DEFAULT_EXTENSIONS = 'c,h,C,H,cpp,hpp,cc,hh,c++,h++,cxx,hxx'
DEFAULT_CLANG_FORMAT_IGNORE = '.clang-format-ignore'
class ExitStatus:
SUCCESS = 0
DIFF = 1
TROUBLE = 2
def excludes_from_file(ignore_file):
excludes = []
try:
with io.open(ignore_file, 'r', encoding='utf-8') as f:
for line in f:
if line.startswith('#'):
# ignore comments
continue
pattern = line.rstrip()
if not pattern:
# allow empty lines
continue
excludes.append(pattern)
except EnvironmentError as e:
if e.errno != errno.ENOENT:
raise
return excludes;
def list_files(files, recursive=False, extensions=None, exclude=None):
if extensions is None:
extensions = []
if exclude is None:
exclude = []
out = []
for file in files:
if recursive and os.path.isdir(file):
for dirpath, dnames, fnames in os.walk(file):
fpaths = [os.path.join(dirpath, fname) for fname in fnames]
for pattern in exclude:
# os.walk() supports trimming down the dnames list
# by modifying it in-place,
# to avoid unnecessary directory listings.
dnames[:] = [
x for x in dnames
if
not fnmatch.fnmatch(os.path.join(dirpath, x), pattern)
]
fpaths = [
x for x in fpaths if not fnmatch.fnmatch(x, pattern)
]
for f in fpaths:
ext = os.path.splitext(f)[1][1:]
if ext in extensions:
out.append(f)
else:
out.append(file)
return out
def make_diff(file, original, reformatted):
return list(
difflib.unified_diff(
original,
reformatted,
fromfile='{}\t(original)'.format(file),
tofile='{}\t(reformatted)'.format(file),
n=3))
class DiffError(Exception):
def __init__(self, message, errs=None):
super(DiffError, self).__init__(message)
self.errs = errs or []
class UnexpectedError(Exception):
def __init__(self, message, exc=None):
super(UnexpectedError, self).__init__(message)
self.formatted_traceback = traceback.format_exc()
self.exc = exc
def run_clang_format_diff_wrapper(args, file):
try:
ret = run_clang_format_diff(args, file)
return ret
except DiffError:
raise
except Exception as e:
raise UnexpectedError('{}: {}: {}'.format(file, e.__class__.__name__,
e), e)
def run_clang_format_diff(args, file):
try:
with io.open(file, 'r', encoding='utf-8') as f:
original = f.readlines()
except IOError as exc:
raise DiffError(str(exc))
if args.in_place:
invocation = [args.clang_format_executable, '-i', file]
else:
invocation = [args.clang_format_executable, file]
if args.style:
invocation.extend(['--style', args.style])
if args.dry_run:
print(" ".join(invocation))
return [], []
# Use of utf-8 to decode the process output.
#
# Hopefully, this is the correct thing to do.
#
# It's done due to the following assumptions (which may be incorrect):
# - clang-format will returns the bytes read from the files as-is,
# without conversion, and it is already assumed that the files use utf-8.
# - if the diagnostics were internationalized, they would use utf-8:
# > Adding Translations to Clang
# >
# > Not possible yet!
# > Diagnostic strings should be written in UTF-8,
# > the client can translate to the relevant code page if needed.
# > Each translation completely replaces the format string
# > for the diagnostic.
# > -- http://clang.llvm.org/docs/InternalsManual.html#internals-diag-translation
#
# It's not pretty, due to Python 2 & 3 compatibility.
encoding_py3 = {}
if sys.version_info[0] >= 3:
encoding_py3['encoding'] = 'utf-8'
try:
proc = subprocess.Popen(
invocation,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
**encoding_py3)
except OSError as exc:
raise DiffError(
"Command '{}' failed to start: {}".format(
subprocess.list2cmdline(invocation), exc
)
)
proc_stdout = proc.stdout
proc_stderr = proc.stderr
if sys.version_info[0] < 3:
# make the pipes compatible with Python 3,
# reading lines should output unicode
encoding = 'utf-8'
proc_stdout = codecs.getreader(encoding)(proc_stdout)
proc_stderr = codecs.getreader(encoding)(proc_stderr)
# hopefully the stderr pipe won't get full and block the process
outs = list(proc_stdout.readlines())
errs = list(proc_stderr.readlines())
proc.wait()
if proc.returncode:
raise DiffError(
"Command '{}' returned non-zero exit status {}".format(
subprocess.list2cmdline(invocation), proc.returncode
),
errs,
)
if args.in_place:
return [], errs
return make_diff(file, original, outs), errs
def bold_red(s):
return '\x1b[1m\x1b[31m' + s + '\x1b[0m'
def colorize(diff_lines):
def bold(s):
return '\x1b[1m' + s + '\x1b[0m'
def cyan(s):
return '\x1b[36m' + s + '\x1b[0m'
def green(s):
return '\x1b[32m' + s + '\x1b[0m'
def red(s):
return '\x1b[31m' + s + '\x1b[0m'
for line in diff_lines:
if line[:4] in ['--- ', '+++ ']:
yield bold(line)
elif line.startswith('@@ '):
yield cyan(line)
elif line.startswith('+'):
yield green(line)
elif line.startswith('-'):
yield red(line)
else:
yield line
def print_diff(diff_lines, use_color):
if use_color:
diff_lines = colorize(diff_lines)
if sys.version_info[0] < 3:
sys.stdout.writelines((l.encode('utf-8') for l in diff_lines))
else:
sys.stdout.writelines(diff_lines)
def print_trouble(prog, message, use_colors):
error_text = 'error:'
if use_colors:
error_text = bold_red(error_text)
print("{}: {} {}".format(prog, error_text, message), file=sys.stderr)
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'--clang-format-executable',
metavar='EXECUTABLE',
help='path to the clang-format executable',
default='clang-format-11')
parser.add_argument(
'--extensions',
help='comma separated list of file extensions (default: {})'.format(
DEFAULT_EXTENSIONS),
default=DEFAULT_EXTENSIONS)
parser.add_argument(
'-r',
'--recursive',
action='store_true',
help='run recursively over directories')
parser.add_argument(
'-d',
'--dry-run',
action='store_true',
help='just print the list of files')
parser.add_argument(
'-i',
'--in-place',
action='store_true',
help='format file instead of printing differences')
parser.add_argument('files', metavar='file', nargs='+')
parser.add_argument(
'-q',
'--quiet',
action='store_true',
help="disable output, useful for the exit code")
parser.add_argument(
'-j',
metavar='N',
type=int,
default=0,
help='run N clang-format jobs in parallel'
' (default number of cpus + 1)')
parser.add_argument(
'--color',
default='auto',
choices=['auto', 'always', 'never'],
help='show colored diff (default: auto)')
parser.add_argument(
'-e',
'--exclude',
metavar='PATTERN',
action='append',
default=[],
help='exclude paths matching the given glob-like pattern(s)'
' from recursive search')
parser.add_argument(
'--style',
help='formatting style to apply (LLVM, Google, Chromium, Mozilla, WebKit)')
args = parser.parse_args()
# use default signal handling, like diff return SIGINT value on ^C
# https://bugs.python.org/issue14229#msg156446
signal.signal(signal.SIGINT, signal.SIG_DFL)
try:
signal.SIGPIPE
except AttributeError:
# compatibility, SIGPIPE does not exist on Windows
pass
else:
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
colored_stdout = False
colored_stderr = False
if args.color == 'always':
colored_stdout = True
colored_stderr = True
elif args.color == 'auto':
colored_stdout = sys.stdout.isatty()
colored_stderr = sys.stderr.isatty()
version_invocation = [args.clang_format_executable, str("--version")]
try:
subprocess.check_call(version_invocation, stdout=DEVNULL)
except subprocess.CalledProcessError as e:
print_trouble(parser.prog, str(e), use_colors=colored_stderr)
return ExitStatus.TROUBLE
except OSError as e:
print_trouble(
parser.prog,
"Command '{}' failed to start: {}".format(
subprocess.list2cmdline(version_invocation), e
),
use_colors=colored_stderr,
)
return ExitStatus.TROUBLE
retcode = ExitStatus.SUCCESS
excludes = excludes_from_file(DEFAULT_CLANG_FORMAT_IGNORE)
excludes.extend(args.exclude)
files = list_files(
args.files,
recursive=args.recursive,
exclude=excludes,
extensions=args.extensions.split(','))
if not files:
return
njobs = args.j
if njobs == 0:
njobs = multiprocessing.cpu_count() + 1
njobs = min(len(files), njobs)
if njobs == 1:
# execute directly instead of in a pool,
# less overhead, simpler stacktraces
it = (run_clang_format_diff_wrapper(args, file) for file in files)
pool = None
else:
pool = multiprocessing.Pool(njobs)
it = pool.imap_unordered(
partial(run_clang_format_diff_wrapper, args), files)
pool.close()
while True:
try:
outs, errs = next(it)
except StopIteration:
break
except DiffError as e:
print_trouble(parser.prog, str(e), use_colors=colored_stderr)
retcode = ExitStatus.TROUBLE
sys.stderr.writelines(e.errs)
except UnexpectedError as e:
print_trouble(parser.prog, str(e), use_colors=colored_stderr)
sys.stderr.write(e.formatted_traceback)
retcode = ExitStatus.TROUBLE
# stop at the first unexpected error,
# something could be very wrong,
# don't process all files unnecessarily
if pool:
pool.terminate()
break
else:
sys.stderr.writelines(errs)
if outs == []:
continue
if not args.quiet:
print_diff(outs, use_color=colored_stdout)
if retcode == ExitStatus.SUCCESS:
retcode = ExitStatus.DIFF
if pool:
pool.join()
return retcode
if __name__ == '__main__':
sys.exit(main())
| [
"noreply@github.com"
] | IvanVnucec.noreply@github.com |
cdc5ad2731caf764566fbcf463d063a573c55424 | 4e759fe0b592322cb4c28ca2d56c6d82620c3c36 | /country_information.py | 9d2b77c186f4aea506f3defaeb8429e23093bc46 | [] | no_license | Uservasyl/web_scraping | c886aecdfb028111c1ce157fdc01715ed6eec0a0 | 68e320460272b9bae752318a2557fe5bf193215c | refs/heads/master | 2020-04-07T00:17:16.154217 | 2018-11-17T17:05:18 | 2018-11-17T17:05:18 | 157,897,456 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,188 | py | #!/usr/bin/python3
import re, time
from urllib.request import urlopen, Request
url = 'http://example.webscraping.com/places/default/index/0'
def url_page(url, start, end):
''' Шукаємо сторінки на сайті webscraping.com'''
#start - індекс першої сторінки сайту
#end - індекс останньої сторінки
lst = url.split('/')
s = int(lst[-1])
start = s + start
start = str(start)
lst[-1] = start
url = '/'.join(lst)
return url
def country(url, start, end):
'''Програма виводить інформацію про країну із сайту web scraping'''
country_find = input("Введіть назву країни: \n")
country_list = []
while start < end:
country_request = Request(url_page(url, start, end))
country_page = urlopen(country_request).read()
country_page = str(country_page)
# шукаємо індекси країн на кожній окремій сторінці
COUNTRY_TAG = [m.start() for m in re.finditer('.png" />', country_page)]
for tag_index in COUNTRY_TAG: #створюємо список країн
country_tag_size = len(COUNTRY_TAG)
country_value_start = tag_index + country_tag_size -1
country = ''
for char in country_page[country_value_start:]:
if char != '<':
country += char
else:
break
country_list.append(country)
start += 1
time.sleep(0.7)
if country_find in country_list: #перевіряємо чи країна є у списку
url_country = 'http://example.webscraping.com/places/default/view/' + country_find + '-' + str(country_list.index(country_find) + 1)
country_request_url = Request(url_country)
country_page = urlopen(country_request_url).read()
country_page = str(country_page)
print(country_page)
else:
print(f"Такої країни як {country_find} немає в списку")
country(url, 0, 25)
| [
"vasul1983rost@gmail.com"
] | vasul1983rost@gmail.com |
8f7c658c1bb9c9e58b12ae1ae8285e65503a9ce0 | a8deda1bbde9870e9a8263155c8b81db0d5d9a2e | /GeneticAlgo/Main.py | e8221d2b48f87cc474f4562c2220df3b2d43aa23 | [] | no_license | Mahedi250/Artificial-Intelligence | bae1139657823ca5dc68e7cec2949d6fcdac6512 | 393a217eef8e9ec05449f71033550f1408cc6674 | refs/heads/master | 2022-03-25T17:18:59.831823 | 2018-01-05T09:45:37 | 2018-01-05T09:45:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,935 | py | import random
class Individual:
geneLength = 0
fitness = 0
def __init__(self,geneLength):
self.geneLength = geneLength
self.gene = []
for i in range(self.geneLength):
self.gene.append(random.randint(0,10)%2)
def calculateFitness(self):
self.fitness = 0
for i in self.gene:
if i is 1:
self.fitness += 1
return self.fitness
def showGene(self):
print(self.gene)
class Population:
populationSize = 0
indivituals = []
copyIndivitual = []
fitness = []
fittest = None
fittestIndivitualIndex = None
totalFItness = 0
def __init__(self,populationSize):
self.populationSize = populationSize
self.initializePopulation()
def initializePopulation(self):
for i in range(self.populationSize):
self.indivituals.append(Individual(5))
def calculateFitnessOfIndiviuals(self):
self.totalFItness = 0
for i in range(self.populationSize):
self.totalFItness += self.indivituals[i].calculateFitness()
def sortIndiviual(self):
for i in range(len(self.indivituals)):
for j in range(0,len(self.indivituals)-i-1):
if self.indivituals[j].fitness < self.indivituals[j+1].fitness:
self.indivituals[j], self.indivituals[j+1] = self.indivituals[j+1], self.indivituals[j]
def copyIndivitual(self,ind):
n = Individual(5)
n.gene.clear()
for i in ind.gene:
n.gene.append(i)
return n
def getFittestIndivitual(self):
self.sortIndiviual()
self.fittestIndivitualIndex = 0
self.fittest = self.indivituals[0].fitness
return self.copyIndivitual(self.indivituals[0])
def getSecondFittestIndivitual(self):
return self.copyIndivitual(self.indivituals[1])
def getLeastIndivitual(self):
return self.populationSize - 1
def printPopulation(self):
self.sortIndiviual()
for i in self.indivituals:
i.showGene()
print('------------------------')
print('Total Fitness:', self.totalFItness)
def isMaximum(self,value):
for i in self.indivituals:
if i.fitness is value:
return True
return False
def crossOver(fittest,secondFittest):
randomCrossOverPoint = random.randint(0,4)
for i in range(randomCrossOverPoint,5):
fittest.gene[i],secondFittest.gene[i] = secondFittest.gene[i], fittest.gene[i]
def mutation(fittest, secondFittest):
randomCrossOverPoint = random.randint(0, 4)
if fittest.gene[randomCrossOverPoint] is 0:
fittest.gene[randomCrossOverPoint] = 1
if secondFittest.gene[randomCrossOverPoint] is 0:
secondFittest.gene[randomCrossOverPoint] = 1
def offSpring(population,fittest,secondFittest):
population.indivituals[population.getLeastIndivitual()].gene.clear()
if fittest.calculateFitness() >= secondFittest.calculateFitness():
for i in fittest.gene:
population.indivituals[population.getLeastIndivitual()].gene.append(i)
else:
for i in secondFittest.gene:
population.indivituals[population.getLeastIndivitual()].gene.append(i)
# -- Main --
population = Population(10)
population.calculateFitnessOfIndiviuals()
generationCount = 1
population.printPopulation()
print('Generation:',generationCount)
print('-----------------------')
while population.isMaximum(5) is not True:
fittest = population.getFittestIndivitual()
secondFittest = population.getSecondFittestIndivitual()
crossOver(fittest, secondFittest)
mutation(fittest,secondFittest)
offSpring(population,fittest,secondFittest)
generationCount += 1
population.calculateFitnessOfIndiviuals()
population.printPopulation()
print('Generation:',generationCount)
print('-----------------------')
| [
"kakanghosh69@gmail.com"
] | kakanghosh69@gmail.com |
381bd670714af67a22af0521c5e9191c43766142 | 5673f3b568667319a73d2df2c40337bbead0a615 | /动态规划/756-最小花费爬楼梯.py | e945bd7f8a13f2bef3a82de68d5dd0c81a360fce | [] | no_license | zhengsizuo/leetcode-zhs | 8f65c779406fedd605aa696db45ccc6a17f8c83f | 7ffdb772ad7252f3d4b9aa2689a92cb1f10c8f37 | refs/heads/master | 2023-01-08T07:41:28.486238 | 2022-12-30T07:34:50 | 2022-12-30T07:34:50 | 243,281,854 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 384 | py | class Solution:
def minCostClimbingStairs(self, cost) -> int:
dp = [0] * len(cost)
dp[0], dp[1] = cost[0], cost[1]
for i in range(2, len(cost)):
dp[i] = min(dp[i - 1], dp[i - 2]) + cost[i]
return min(dp[-1], dp[-2])
cost = [1, 100, 1, 1, 1, 100, 1, 1, 100, 1]
cost = [10, 15, 20]
sl = Solution()
print(sl.minCostClimbingStairs(cost)) | [
"42198964+zhengsizuo@users.noreply.github.com"
] | 42198964+zhengsizuo@users.noreply.github.com |
f61f481fab7195ba42886e11b44ce13e7890af5b | 746f710d3fe281ba547be3bdae44d226a877d775 | /venv/Scripts/easy_install-3.8-script.py | 035aefb8b84f616911cafb4a6705772afa33f6a9 | [] | no_license | Haeon/hw3_framework | aa51f9ca846136b017d8f34dc0132b8c2b05ad28 | 8e8e3ebea85442ce47c54cab163a741a62c89823 | refs/heads/master | 2021-03-28T11:53:31.102530 | 2020-03-17T02:36:38 | 2020-03-17T02:36:38 | 247,861,396 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 456 | py | #!C:\Users\hyewon\PycharmProjects\env2\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install-3.8'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install-3.8')()
)
| [
"hyeww.choi@kaist.ac.kr"
] | hyeww.choi@kaist.ac.kr |
86b19948161d26a6f45aa96e368fb070262ba432 | d5248ecaa5fa42b864656e6e5c685255a0e6dcf4 | /files/mk_datedir.py | e10e5df1e15b102daad0ad1933f2f365367a6df5 | [
"MIT"
] | permissive | thejoltjoker/python | 6963db60171a91a28d95fa6a499efdcee797c102 | b77134e5fb4e8e0ac20b34acdb33440f2252dce2 | refs/heads/master | 2023-08-30T20:56:53.642902 | 2023-08-28T16:33:46 | 2023-08-28T16:33:46 | 156,038,774 | 0 | 0 | MIT | 2023-08-28T16:33:48 | 2018-11-04T01:25:46 | Python | UTF-8 | Python | false | false | 685 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
mk_datedir.py
Create a folder with a date prefix
"""
import sys
import os
from datetime import datetime
from pathlib import Path
def mkdatedir(dirname):
"""Creates a folder with a date prefix in the working dir"""
# Set variables
prefix = datetime.today().strftime('%y%m%d')
path = Path(os.getcwd()) / "_".join([prefix, dirname])
# Create dir
path.mkdir(parents=True)
print(f"Created new folder {path.resolve()}")
return path
if __name__ == '__main__':
if len(sys.argv) == 2:
mkdatedir(sys.argv[1])
else:
dirname = input("Enter a folder name: ")
mkdatedir(dirname)
| [
"hello@thejoltjoker.com"
] | hello@thejoltjoker.com |
22f8e5b375f315809f3bcf57d7a7127d9ef54152 | 02c6c643712278e7015d790fdc2bc7cb2673d27d | /groups/templatetags/tags.py | d5aae461dfe0fb377e908e63bf37b987da823c02 | [] | no_license | anya-k/student_group | ae7efe57c7e38a54841e263014ee8394c382c746 | 1e14414ff64e6d74247f0e53bf52cd5c3f09baf6 | HEAD | 2016-08-12T04:35:42.414852 | 2016-02-08T01:25:10 | 2016-02-08T01:25:10 | 51,274,655 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 331 | py | import datetime
from django import template
from django.core.urlresolvers import reverse
register = template.Library()
@register.simple_tag
def edit_list(object):
url = reverse('admin:{0}_{1}_change'.format(object._meta.app_label.lower(), object._meta.object_name.lower()),
args=[object.id])
return url | [
"blackqueennn@gmail.com"
] | blackqueennn@gmail.com |
8bc492eab134aff8bef21639b028c0946cfa114e | f91c0e245e9027ffad00181fc5a121328f9ff41b | /appconfig/__init__.py | b4961f77a658e5475b0b9fd10db20572c39679f9 | [] | no_license | mylesonine/arisa2 | 5c77e46c6c832d71c4e71b3d92e017e9f8164924 | 70e5dc9c7637c23c7bffebefd66a9ee43f5aee5a | refs/heads/master | 2020-06-20T07:40:37.825637 | 2019-07-21T12:55:14 | 2019-07-21T12:55:14 | 197,046,478 | 0 | 0 | null | 2019-07-15T17:51:39 | 2019-07-15T17:51:38 | null | UTF-8 | Python | false | false | 1,051 | py | from configparser import ConfigParser
import os
import os.path
cwd = os.path.abspath(os.path.dirname(__file__))
cfg = ConfigParser()
cfg.read(os.path.join(cwd, 'config.conf.DEFAULT'))
cfg.read(os.path.join(cwd, 'config.conf'))
def fetch(section, option=None, check_env=True, cast_to=None):
def toupper(s):
if s is None:
return None
try:
s = s.upper()
except AttributeError:
msg = f'option or section name must be type str, not {type(s)}'
raise TypeError(msg)
return s
# Convert args to uppercase
section, option = [*map(toupper, [section, option])]
sec = cfg[section]
if option == None:
return sec
env = None
if check_env:
varname = f'{section}_{option}'.upper()
env = os.environ.get(varname)
value = sec.get(option) or env
if value.isnumeric():
value = float(value)
if cast_to != None:
env = cast_to(env)
return value
DEBUGGING = fetch('BOT', 'DEBUGGING', cast_to=bool)
| [
"fhejehfif@gmail.com"
] | fhejehfif@gmail.com |
b85a7a339e7b176e74b53b55f931c0a90092d453 | d35976ebd5f35536166b4f7ae1c3591fd2575731 | /FFT3_test.py | 92aadaa786a4ac1ee3a2cc702e679397ee2d5a03 | [] | no_license | AmaneKobayashi/Python_Programs | 29548b53f4a6d35bfee3319e6289d4ae8785ba58 | 89ecc72828a3e18c23fefd4d52edffe6fc4c8185 | refs/heads/master | 2022-10-12T01:07:50.711891 | 2020-06-10T08:12:38 | 2020-06-10T08:12:38 | 271,444,056 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,829 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
import numpy as np
import cupy as cp
import time
import tifffile
import os
import mrcfile
from PIL import Image
from skimage import io
if ((len(sys.argv)==1)):
print("command:python3 FFT3_test.py [-finame]")
exit()
n_parameter=1
parameter_name_list=[""]*n_parameter
flag_list=[0]*n_parameter
parameter_name_list[0]="-finame"
for i in range(len(sys.argv)):
if(sys.argv[i]=="-finame"):
finame=sys.argv[i+1]
flag_list[0]=1
if(sys.argv[i]=="--help"):
print("command:python3 FFT3_test.py [-finame]")
exit()
input_parameter=0
for i in range(n_parameter):
if(flag_list[i]==0):
print("please input parameter : [" + parameter_name_list[i] + "]")
input_parameter=1
if(input_parameter==1):
exit()
print("finame = " + finame)
t1=time.time()
with mrcfile.open(finame, permissive=True) as mrc:
# mrc.header.map = mrcfile.constants.MAP_ID
np_finame=np.asarray(mrc.data,dtype="float32")
mrc.close
t2=time.time()
print("np_finame size = " + str(np_finame.size))
print("np_finame shape = " + str(np_finame.shape))
print("np_finame size = " + str(type(np_finame.size)))
print("np_finame dtype = " + str(np_finame.dtype))
cp_finame=cp.asarray(np_finame,dtype="float32")
cp_finame = cp.fft.fftn(cp_finame, axes=(0,1,2), norm="ortho")
#cp_finame = cp.fft.fftshift(cp_finame)
cp_amp = cp.absolute(cp_finame)
t3=time.time()
np_finame = cp.asnumpy(cp_amp)
foname=finame[finame.rfind("/")+1:len(finame)-4] + "_FFT.mrc"
with mrcfile.new(foname, overwrite=True) as mrc2:
mrc2.set_data(np_finame)
mrc2.close
t4=time.time()
print("open time : " + str(t2-t1))
print("fft time : " + str(t3-t2))
print("output : " + str(t4-t3))
print("total time: " + str(t4-t1))
| [
"amane.kobayashi@riken.jp"
] | amane.kobayashi@riken.jp |
ee15ea407bbfee751fc72b553b795b56d4788ca9 | 44486d6b94c7067d16e032a0a909c97bddceb662 | /resumeapp/migrations/0003_auto_20190108_1558.py | f7af52cde81898f859bfa6773b59326030e2e297 | [] | no_license | cake404/portfoliowebsite | 37b6faccfa8a168ad5ab57ddd47290f8d4cb2a9c | 3e78f25fa736df116fd29205e87f6b16fd7b8160 | refs/heads/master | 2020-04-15T01:14:59.948685 | 2019-02-26T16:18:59 | 2019-02-26T16:18:59 | 164,268,560 | 0 | 0 | null | 2019-02-26T16:19:00 | 2019-01-06T02:27:31 | HTML | UTF-8 | Python | false | false | 391 | py | # Generated by Django 2.1.4 on 2019-01-08 15:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('resumeapp', '0002_auto_20190108_1518'),
]
operations = [
migrations.AlterField(
model_name='author',
name='last_name',
field=models.CharField(max_length=50),
),
]
| [
"jake@localhost.localdomain"
] | jake@localhost.localdomain |
30aff920794c3c048d343284a45434178cb960ae | d1747e903d4d21b55b4aa5b4db149edd6fc95fcf | /figures/figure_5/figure_5.py | 1916be4135b55980250e0d277f872d2589a9589f | [] | no_license | jon-myers/Harmonic_Theory | 5bcb5fada7413786b9e4faca6e3d2a5744cb1450 | fbb20e46840d0bbcc8ea00a6148dd42bc2603081 | refs/heads/main | 2023-03-23T01:59:01.088089 | 2021-03-18T19:10:18 | 2021-03-18T19:10:18 | 305,810,960 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 736 | py | from matplotlib import pyplot as plt
import numpy as np
import matplotlib
hs1 = np.arange(1, 37)
hs2 = hs1*3
hs3 = hs1 * 5
# plt.bar(np.zeros(len(hs1)), 0.25, 1, hs1)
fig, ax = plt.subplots()
fig.set_size_inches(6.5, 3)
ax.vlines(hs1, 1, 2, color='black')
ax.vlines(hs2, 0, 1, color='red')
ax.vlines(hs3, 2, 3, color='green')
# plt.scatter([1 for i in range(len(hs2))], hs2, s = 300, marker='_')
ax.set_xscale('log')
ax.set_xlim(0.9, 37)
ax.set_xticks([1, 2, 3, 4, 5, 6, 8, 10, 12, 16, 20, 24, 36])
ax.set_yticks([])
ax.set_xticklabels([10, 20, 30, 40, 50, 60, 80, 100, 120, 160, 200, 240, 360])
# ax.get_xaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
plt.tight_layout()
plt.savefig('figure_5/figure_5.png', dpi=300)
| [
"jon@Jons-MacBook-Pro.local"
] | jon@Jons-MacBook-Pro.local |
bf874bcf5e3f2c43cb3ac730fd7ee9531fc848cb | ec0f53b6b6314b8f7e6d834249b1a243d4f4e730 | /SimpleCare/simplecare/simplecare_home/experimental.py | 051e8b44afa510d695954c3effd9c626d011f548 | [] | no_license | rafatogo/SimpleCare | 027cfcd9b24833da5fa8a008821d4a6089f71cd2 | 75f3e6297fe4f2458b7fb82daca1e8e8aafdce3b | refs/heads/new_branch | 2020-07-30T01:29:42.195992 | 2019-09-22T16:58:30 | 2019-09-22T16:58:30 | 210,037,876 | 2 | 1 | null | 2019-09-22T15:40:05 | 2019-09-21T18:51:52 | Python | UTF-8 | Python | false | false | 32 | py | def blahblah():
return True
| [
"noreply@github.com"
] | rafatogo.noreply@github.com |
d48c88fc5ded71718856de110e9d40ebe857176c | d1571fa81214c900176037d9ff6e4e489f4e14ac | /scripts/normaliseddiffplots.py | dee8fd691fe13d68768363c5d06159f7ad601372 | [
"MIT"
] | permissive | NormanRH/UCLCHEM | e219556f5fbe42438664787af4529c89982bce7d | 6c4cb60f54b86c339541616cb833ce61cfd1993f | refs/heads/master | 2023-02-04T04:27:35.535861 | 2020-12-21T20:55:35 | 2020-12-21T20:55:35 | 277,169,427 | 0 | 0 | MIT | 2020-07-04T19:01:19 | 2020-07-04T19:01:18 | null | UTF-8 | Python | false | false | 18,941 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 8 20:08:10 2020
@author: norm
"""
#Plot differences due to abundance variations normalised for both phase 1 and static cloud scenarios
#it reads full UCLCHEM output and saves a plot of the abudances of select species
import os
import multiprocessing as mp
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import plotfunctions as pf
#Element ids and initial values
# This part can be substituted with any choice of grid
elements=[["C","fc",2.6e-04], ["N","fn",6.1e-05], ["O","fo",4.6e-04], ["S","fs",1.318e-05],
["Mg","fmg",3.981e-05], ["Si","fsi",1.0e-07],["Cl","fcl",3.162e-07]]#,["F","ff",3.6e-08]]
imgsize = {"A7":[(3.5,2),3.0,4.0,4.0,0.5,4],
"A6":[(5.2,3.5),'xx-small','x-small','small',1.0,7],
"A5":[(6.8,3.2),'xx-small','x-small','small',1.0,6],
"A4":[(10,6.8),'small','medium','large',2.0,9]}
#list the species we want graphing from the current data output of the model
#Teh output has the data labeled with the lefthand colum of names and we display the righthand versions
#speciesNames = ("SO SO2 S2 N NH NH2 NH3 HNC NO NO2 OCN HNCO HCS O2 H2O").split()
speciesNameLists = []
speciesNameLists.append(["S1",("SO SO2 S2 #SO2 #SO").split(),("SO SO2 S2 SO2ice SOice").split()])
speciesNameLists.append(["S2",("HS H2S #HS #H2S").split(),("HS H2S HSice H2Sice").split()])
speciesNameLists.append(["S3",("HCS OCS H2S2 #OCS #H2S2").split(),("HCS OCS H2S2 OCSice H2S2ice").split()])
speciesNameLists.append(["N1",("NH NH2 #NH #NH2").split(),("NH NH2 NHice NH2ice").split()])
speciesNameLists.append(["N2",("NH3 HNC #NH3 #HNC").split(),("NH3 HNC NH3ice HNCice").split()])
speciesNameLists.append(["N3",("NO NO2 #NO #NO2").split(),("NO NO2 NOice NO2ice").split()])
speciesNameLists.append(["N4",("N HNCO #N #HNCO").split(),("N HNCO Nice HNCOice").split()])
speciesNameLists.append(["N5",("HCN H2CN #HCN #H2CN").split(),("HCN H2CN HCNice H2CNice").split()])
speciesNameLists.append(["O1",("H2O HNO #H2O #HNO").split(),("H2O HNO H2Oice HNOice").split()])
speciesNameLists.append(["O2",("O O2 OH #O #O2 #OH").split(),("O O2 OH Oice O2ice OHice").split()])
speciesNameLists.append(["C1",("C CH CH2 #C #CH #CH2").split(),("C CH CH2 Cice CHice CH2ice").split()])# #C #CH #CH2
speciesNameLists.append(["C2",("CH3 CH4 #CH3 #CH4").split(),("CH3 CH4 CH3ice CH4ice").split()])
speciesNameLists.append(["C3",("C3H2 CH3CCH #C3H2").split(),("C3H2 CH3CCH C3H2ice").split()])
speciesNameLists.append(["C4",("CO CO2 #CO #CO2").split(),("CO CO2 COice CO2ice").split()])
speciesNameLists.append(["Si1",("SIC SIH2 #SIC #SIH4").split(),("SIC SIH2 SICice SIH4ice").split()])
speciesNameLists.append(["Mg",("CL HCL MG #HCL").split(),("Cl HCl Mg HClice").split()])
speciesNameLists.append(["Si2",("SIS SIC4 H2SIO SIO").split(),("SiS SiC4 H2SiO SiO").split()])
speciesNameLists.append(["e1",("MG MG+ E-").split(),("Mg Mg+ e-").split()])
speciesNameLists.append(["e2",("C C+ E-").split(),("C C+ e-").split()])
speciesNameLists.append(["e3",("S S+ E-").split(),("S S+ e-").split()])
speciesNameLists.append(["e4",("O O+ E- O2").split(),("O O+ e- O2").split()])
speciesNameLists.append(["e5",("C C+ E- MG MG+ S S+").split(),("C C+ e- Mg Mg+ S S+").split()])
speciesNameLists.append(["e6",("C C+ E- MG MG+ O O2 S S+").split(),("C C+ e- Mg Mg+ O O2 S S+").split()])
speciesNoiceNameLists = []
speciesNoiceNameLists.append(["S1",("SO SO2 S2").split(),("SO SO2 S2").split()])
speciesNoiceNameLists.append(["S2",("HS H2S").split(),("HS H2S").split()])
speciesNoiceNameLists.append(["S3",("HCS OCS H2S2").split(),("HCS OCS H2S2").split()])
speciesNoiceNameLists.append(["N1",("NH NH2").split(),("NH NH2").split()])
speciesNoiceNameLists.append(["N2",("NH3 HNC").split(),("NH3 HNC").split()])
speciesNoiceNameLists.append(["N3",("NO NO2").split(),("NO NO2").split()])
speciesNoiceNameLists.append(["N4",("N HNCO").split(),("N HNCO").split()])
speciesNoiceNameLists.append(["N5",("HCN H2CN").split(),("HCN H2CN").split()])
speciesNoiceNameLists.append(["O1",("H2O HNO").split(),("H2O HNO").split()])
speciesNoiceNameLists.append(["O2",("O O2 OH").split(),("O O2 OH").split()])
speciesNoiceNameLists.append(["C1",("C CH CH2").split(),("C CH CH2").split()])# #C #CH #CH2
speciesNoiceNameLists.append(["C2",("CH3 CH4").split(),("CH3 CH4").split()])
speciesNoiceNameLists.append(["C3",("C3H2 CH3CCH").split(),("C3H2 CH3CCH").split()])
speciesNoiceNameLists.append(["C4",("CO CO2").split(),("CO CO2").split()])
speciesNoiceNameLists.append(["Si1",("SIC SIH2").split(),("SIC SIH2").split()])
speciesNoiceNameLists.append(["Mg",("CL HCL MG ").split(),("Cl HCl Mg").split()])
speciesNoiceNameLists.append(["Si2",("SIS SIC4 H2SIO SIO").split(),("SiS SiC4 H2SiO SiO").split()])
speciesNoiceNameLists.append(["e1",("MG MG+ E-").split(),("Mg Mg+ e-").split()])
speciesNoiceNameLists.append(["e2",("C C+ E-").split(),("C C+ e-").split()])
speciesNoiceNameLists.append(["e3",("S S+ E-").split(),("S S+ e-").split()])
speciesNoiceNameLists.append(["e4",("O O+ E- O2").split(),("O O+ e- O2").split()])
speciesNoiceNameLists.append(["e5",("C C+ E- MG MG+ S S+").split(),("C C+ e- Mg Mg+ S S+").split()])
speciesNoiceNameLists.append(["e6",("C C+ E- MG MG+ O O2 S S+").split(),("C C+ e- Mg Mg+ O O2 S S+").split()])
varyfactor = [0.25, 0.5, 1, 2, 4]
#Linestles = [(0,(3,10,1,10)),(0,(3,5,1,5,1,5)),(0,()),(0,(3,5,1,5)),(0,(3,1,1,1))]
#Linestles = [(0,(4,2,1,2,1,2,1,2,1,2)),(0,(4,2,1,2,1,2)),(0,()),(0,(1,2,4,2,4,2)),(0,(1,2,4,2,4,2,4,2,4,2))]
Linestles = [(0,(1,2)),(0,(1,1)),(0,()),(0,(2,1,1,1)),(0,(2,1))]
Linestyles = [(1,2),(1,1),(),(2,1,1,1),(2,1)]
bulk=True #set true to run the speciesNoiceNameLists lists through the mass plot production process False runs a single plot
nplot = 1 #list to plot
switch=1
ice = False
papersize = "A5"
xaslog='linear'
sns.set()
sns.set_context("paper")
#sns.axes_style(xscale=xaslog,yscale='log')
#sns.set_palette("bright")
colours=["red orange","baby blue","greyish","amber","pink","greyish"]
#sns.set_palette("bright")
#use default pallet? sns.set_palette(sns.xkcd_palette(colours))
sns.set_style('whitegrid')
imgparams=imgsize[papersize]
columnpath = "../VaryFromSolar/outputfiles"+str(switch)+"/"
if ice:
plotspath = "../VaryFromSolar/"+papersize+xaslog+"VarPllPlots"+str(switch)
else:
plotspath = "../VaryFromSolar/"+papersize+xaslog+"VarNoIcePllPlots"+str(switch)
if os.path.isdir(plotspath) is not True:
os.mkdir(plotspath)
plt.rcParams['xtick.labelsize']=imgparams[5]
plt.rcParams['ytick.labelsize']=imgparams[5]
#plt.subplots(figsize=imgparams[0])
#fig,axes=pf.plt.subplots(len(elements),len(varyfactor),figsize=(16,9))
#fig,axes=pf.plt.subplots(figsize=(16,9))#len(elements),len(varyfactor),figsize=(16,9))
#axes=axes.flatten()
#i=0
#for m , speciesNames in enumerate(speciesNameLists):
def plotchem(speciesGroup):
#speciesGroup=speciesNameLists[0]#use this to just do one element for a test
speciesNames = speciesGroup[1]
speciesDisplay = speciesGroup[2]
groupname = speciesGroup[0]
if bulk is False:
print(speciesNames)
p=0
for k, e in enumerate(elements):
i=0
iprev = i
specfactor = []
time = []
abundances = []
species = []
abundscale = []
varying=[]#willl be a different plot "varying" may be able to put in a grid of plots
model = []
timelim = 5.4e6
title = "Varying " + e[0]
#aiming to have 3 panes stacked vertically
fig,axes=plt.subplots(figsize=imgparams[0], num=p,clear=True)
p=p+1
#figcombo,axescombo=pf.plt.subplots(figsize=(16,9), sharey=True,num=p,clear=True)
#p=p+1
#Separate out the phase 2 graph
#figp2,axesp2=pf.plt.subplots(figsize=(16,9), sharey=True,num=p,clear=True)
timeadded = False
# phase2startt=0
# phase2endt=0
collfilepf1 = columnpath + "phase1-fullCtrl.dat"
tpf1,denspf1,temppf1,abundpf1=pf.read_uclchem(collfilepf1,speciesNames)
collfilesf1 = columnpath + "static-fullCtrl.dat"
tsf1,denssf1,tempsf1,abundsf1=pf.read_uclchem(collfilesf1,speciesNames)
#iterate over all the multipliers held in varyfactor
for j , factor in enumerate(varyfactor):
#pick species, any number is fine
#title = ""
lb=False
#phase 1 plot
if factor == 1 : #we have the abundances for factor 1 the base level
continue
lb=True
collfile = columnpath + "phase1-full"+e[0]+ "-" + str(factor).replace('.','_')+".dat"
#title = "Varying " + e[0]#+str(factor)
lb=False
#call read_uclchem. Fetches a list of abundances for each species requested.
t,dens,temp,abund=pf.read_uclchem(collfile,speciesNames)
if t[-1] > timelim:
timelim = t[-1]
for l, s in enumerate(speciesDisplay):
if (len(abund[l]) != len(t)) | (len(abundpf1[l]) != len(tpf1)):
print("Collapse Species "+s+"no values a=" +str(len(abund[l]))+" t="+str(len(t)) + "a1=" +str(len(abundpf1[l]))+" t1="+str(len(tpf1)))
else:
#extract the abundance and subtract the base factor 1 level first get the lists the same length as for factor 1
if len(t) > len(tpf1):
aabundf = np.asarray(abund[l][:len(tpf1)])
aabpf = np.asarray(abundpf1[l][:len(tpf1)])
aab = aabundf - aabpf
abundances.extend(aab.tolist())
time.extend(tpf1)
species.extend([s.replace('#','ice')]*len(tpf1))
abundscale.extend([str(factor)]*len(tpf1))
varying.extend([e[0]]*len(tpf1))#varying element means we could construct a page of plots
model.extend(["collapse"]*len(tpf1))
#calculate the parallel track for the current factor
#This should be a function to insert a track of abundance and time
# npx = aabundf * (factor - 1.0)#rebase so factor 1 is teh new zero
# abundances.extend(npx.tolist())
# time.extend(tpf1)
# species.extend([s.replace('#','ice')+"pll"]*len(tpf1))
# abundscale.extend([str(factor)]*len(tpf1))
# varying.extend([e[0]]*len(tpf1))#varying element means we could construct a page of plots
# model.extend(["collapse"]*len(tpf1))
else:
aabundf = np.asarray(abund[l][:len(t)])
aabpf = np.asarray(abundpf1[l][:len(t)])
aab = aabundf - aabpf
abundances.extend(aab.tolist())
time.extend(t)
species.extend([s.replace('#','ice')]*len(t))
abundscale.extend([str(factor)]*len(t))
varying.extend([e[0]]*len(t))#varying element means we could construct a page of plots
model.extend(["collapse"]*len(t))
#calculate the parallel track for the current factor
# npx = aabundf * (factor - 1.0)
# abundances.extend(npx.tolist())
# time.extend(t)
# species.extend([s.replace('#','ice')+"pll"]*len(t))
# abundscale.extend([str(factor)]*len(t))
# varying.extend([e[0]]*len(t))#varying element means we could construct a page of plots
# model.extend(["collapse"]*len(t))
#print("time "+str(len(time))+" abundances ",str(len(abundances))+" species "+str(len(species))+" factor "+str(len(abundscale))+" varying "+str(len(varying))+" model "+str(len(model)))
#static model plot
title = "Ctrl"
lb = True
collfile = columnpath + "static-full"+e[0]+ "-" + str(factor).replace('.','_')+".dat"
title = "Varying " + e[0]#+str(factor)
lb = False
#call read_uclchem.
t,dens,temp,abund=pf.read_uclchem(collfile,speciesNames)
#note this time pf1 replaced by sf1
for l, s in enumerate(speciesDisplay):
if (len(abund[l]) != len(t)) | (len(abundsf1[l]) != len(tsf1)):
print("Static Species "+s+" no values a=" +str(len(abund[l]))+" t="+str(len(t)) + " a1=" +str(len(abundsf1[l]))+" t1="+str(len(tsf1)))
else:
#extract the abundance and subtract the base factor 1 level first get the lists the same length as for factor 1
if len(t) > len(tsf1):
aabundf = np.asarray(abund[l][:len(tsf1)])
aabpf = np.asarray(abundsf1[l][:len(tsf1)])
aab = aabundf - aabpf
abundances.extend(aab.tolist())
time.extend(tsf1)
species.extend([s.replace('#','ice')]*len(tsf1))
abundscale.extend([str(factor)]*len(tsf1))
varying.extend([e[0]]*len(tsf1))#varying element means we could construct a page of plots
model.extend(["static"]*len(tsf1))
#calculate the parallel track for the current factor
# npx = aabundf * (factor - 1.0)#rebase so factor 1 is teh new zero
# abundances.extend(npx.tolist())
# time.extend(tsf1)
# species.extend([s.replace('#','ice')+"pll"]*len(tsf1))
# abundscale.extend([str(factor)]*len(tsf1))
# varying.extend([e[0]]*len(tsf1))#varying element means we could construct a page of plots
# model.extend(["static"]*len(tsf1))
else:
aabundf = np.asarray(abund[l][:len(t)])
aabpf = np.asarray(abundsf1[l][:len(t)])
aab = aabundf - aabpf
abundances.extend(aab.tolist())
time.extend(t)
species.extend([s.replace('#','ice')]*len(t))
abundscale.extend([str(factor)]*len(t))
varying.extend([e[0]]*len(t))#varying element means we could construct a page of plots
model.extend(["static"]*len(t))
#calculate the parallel track for the current factor
# npx = aabundf * (factor - 1.0)
# abundances.extend(npx.tolist())
# time.extend(t)
# species.extend([s.replace('#','ice')+"pll"]*len(t))
# abundscale.extend([str(factor)]*len(t))
# varying.extend([e[0]]*len(t))#varying element means we could construct a page of plots
# model.extend(["static"]*len(t))
#print("time "+str(len(time))+" abundances ",str(len(abundances))+" species "+str(len(species))+" factor "+str(len(abundscale))+" varying "+str(len(varying))+" model "+str(len(model)))
# timenp = np.asarray(time)
# if not timeadded:
# p1df["time"] = time
# timeadded = True
# for l, s in enumerate(speciesNames):
# colname=s+str(factor)
# colname = s.replace('#','ice')
# specfactor.append(colname)
# p1df = pd.concat([p1df,pd.DataFrame({colname:abundances[l]})],axis=1)#column for each species with
#
#plot species and save to test.png, alternatively send dens instead of time.
#axis,rtist0=pf.plot_species(speciesNames,time,abundances,axes,ls=Linestles[j],lab=lb)#ax=axes[i])
#axis,rtist1=pf.plot_species(speciesNames,time,abundances,axes[1],ls=Linestles[j],lab=False)#ax=axes[i])
#p1 = p1df.transpose()
if bulk is False:
print("time "+str(len(time))+" abundances ",str(len(abundances))+" species "+str(len(species))+" factor "+str(len(abundscale))+" varying "+str(len(varying))+" model "+str(len(model)))
p1df = pd.DataFrame({"time":time,"abundances":abundances,"species":species,"factor":abundscale,"varying":varying,"model":model})
g = sns.FacetGrid(p1df,col="model",row="varying")
g.map_dataframe(sns.lineplot, x="time",y="abundances",hue="species",style="factor",dashes=Linestyles,linewidth=imgparams[4],ci=None) #data=p1df,legend="brief",ax=axes,
g.set(xscale=xaslog,yscale='linear',xlim=(1e0,timelim)) #,ylim=(-1e-6,1e-6)
g.set_axis_labels('Time / Years','X/H')
g.add_legend()
#axes.set(xscale=xaslog,yscale='log',ylim=(1e-18,1e-3),xlim=(1e0,t[-1]))
#axes.set_xlabel('Time / Years',fontsize=imgparams[2])
#if xaslog == 'linear':
# axes.ticklabel_format(axis='x',useMathText=True)
# axes.set_ylabel('X/H',fontsize=imgparams[2])
#axes.set_title(title+" static cloud",fontsize=imgparams[3])
#axes.legend(loc='best',fontsize=imgparams[1])
plt.savefig(plotspath+"/facetplot"+e[0]+"_"+groupname+".png",dpi=300)
#axes[0].text(.02,0.98,e[0],horizontalalignment="left",verticalalignment="top",transform=axes[0].transAxes)
#the single plot per page version
# axes.text(.02,0.98,e[0],horizontalalignment="left",verticalalignment="top",transform=axes.transAxes)
#axes[3].text(.02,0.98,"Your Row",horizontalalignment="left",verticalalignment="top",transform=axes[3].transAxes)
# fig.savefig(plotspath+"/staticplot"+e[0]+"_"+speciesNames[0]+".png",dpi=300)
#pf.plt.clf()
if bulk:
pool = mp.Pool(12)
if ice:
pool.map(plotchem, speciesNameLists)
else:
pool.map(plotchem, speciesNoiceNameLists)
pool.close()
pool.join()
else:
if ice:
for n, sn in enumerate(speciesNameLists):
plotchem(sn)
#plotchem(speciesNameLists[nplot])
else:
for n, sn in enumerate(speciesNoiceNameLists):
plotchem(sn)
#plotchem(speciesNoiceNameLists[nplot])
#plotchem(["COpll",["CO","CO2"],["CO","CO2"]])
#plotchem(["NH3pll",["NH3"],["NH3"]])
#plotchem(["HNCpll",["HNC"],["HNC"]])
#plotchem(["OHpll",["OH","H2O"],["OH","H2O"]])
| [
"nrhansen@blueyoder.co.uk"
] | nrhansen@blueyoder.co.uk |
1213124adf75033f2ad6ca1f2f3e3bee5626ff5f | f31378472eab1dfbe9c960ded9843c3638ef43a2 | /solution1010.py | 94bc45a30138668526c7c2c490bd2c94e4192839 | [] | no_license | akljohny/madlabs-teachcode-solutions | 02ed7befd87d66eb51e5774f87ad5976872ac51b | 4882a61a0d40a85bb294841aa7977a047e609aae | refs/heads/master | 2021-01-19T15:10:59.580868 | 2017-11-13T14:15:15 | 2017-11-13T14:15:15 | 100,948,188 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 103 | py | x = int(1.23)
y = float(5)
z = str(8)
q = ord('a')
r = chr(97)
s = hex(10)
print (x, y, z, r, q, s)
| [
"johnyakhil123@gmail.com"
] | johnyakhil123@gmail.com |
bb0ce9645fde1dd12f1cdcbc2c425aca062c074a | 981fcfe446a0289752790fd0c5be24020cbaee07 | /python2_Grammer/src/basic/zhengze/rool/字符集和数量/字符集/5_单个字符.py | a5c6ad067a51536c1c61d9cec1f9965226efcb1d | [] | no_license | lijianbo0130/My_Python | 7ba45a631049f6defec3977e680cd9bd75d138d1 | 8bd7548c97d2e6d2982070e949f1433232db9e07 | refs/heads/master | 2020-12-24T18:42:19.103529 | 2016-05-30T03:03:34 | 2016-05-30T03:03:34 | 58,097,799 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 331 | py | #coding=utf-8
'''
Created on 2015年8月4日
@author: Administrator
'''
from __future__ import division
import sys
reload(sys)
sys.setdefaultencoding('utf-8') # @UndefinedVariable
import re
# 单个字母 \w [A-Za-z0-9_] 包含 '_'
# 非单词字符 \W
lis=re.findall("\w", "_ppa")#\w 包含_
print lis # ['_', 'p', 'p', 'a'] | [
"lijianbo0130@qq.com"
] | lijianbo0130@qq.com |
14615b181c2a08ffb6ac05466dc7b074d5563764 | 40a41f6cd08e6271c1d30bd06099ffdb5a637ac6 | /src/events/management/commands/recount_likes.py | e08e15ffce8ff17a83f6e89b6205d2adcfb429e4 | [] | no_license | PotapovaSofia/stackoverflow | 06add113f9861b2d807c120b3a7565c2a12eb876 | b436b952e0668ff28dd1a74810b1731c2fdb9b13 | refs/heads/master | 2021-01-20T15:51:06.307560 | 2016-06-03T10:52:39 | 2016-06-03T10:52:39 | 60,340,359 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 554 | py | # -*- coding: utf-8 -*-
from django.core.management import BaseCommand
#from django.contrib.auth.models import User
#from .models import Event
import random
class Command(BaseCommand):
def handle(self, *args, **kwargs):
"""
users = list(User.objects.all())
for i in range(100):
q = Event()
q.author = random.choice(users)
q.title = u'title {}'.format(i)
q.text = u'text {}'.format(i)
q.is_published = True
q.save()
"""
print "ololol"
| [
"potapova@phystech.edu"
] | potapova@phystech.edu |
3ea4b43b961f4ecf92bb65116360a8a18440ae1d | f53c4ae74da85302c9eef5e3df300bd44497a537 | /src/main/g8/$name$/wsgi.py | 2c6c16cbb60115dfbf35de424492cc0e0987b420 | [] | no_license | east301/django-template.g8 | 1bcd90577fb63c031951fd318c69c32c43dc7d8c | dc94292bad8e5b2e21086ba4a14307b222af6c23 | refs/heads/master | 2021-01-10T19:59:09.664448 | 2015-09-30T12:43:53 | 2015-09-30T12:49:11 | 42,756,088 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 393 | py | #
# WSGI config for $name$ project.
#
# It exposes the WSGI callable as a module-level variable named ``application``.
#
# For more information on this file, see
# https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
#
import os
os.environ.setdefault('DJANGO_SETTINGS_MODULE', '$name$.settings')
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| [
"me@east301.net"
] | me@east301.net |
f42bc817dcd318885005c9843c46c7c2fbb6a3a8 | 83934c40b2bd835464732345fa516b2c657a6259 | /Pyrado/scripts/training/qq-su_bayrn_power_sim2sim.py | bb7f330fbe57985f6ca1ae10001237a0591dbaea | [
"BSD-2-Clause",
"BSD-3-Clause"
] | permissive | 1abner1/SimuRLacra | e0427bf4f2459dcb992206d3b2f347beab68a5b4 | d7e9cd191ccb318d5f1e580babc2fc38b5b3675a | refs/heads/master | 2023-05-25T04:52:17.917649 | 2021-06-07T07:26:44 | 2021-06-07T07:26:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,840 | py | # Copyright (c) 2020, Fabio Muratore, Honda Research Institute Europe GmbH, and
# Technical University of Darmstadt.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of Fabio Muratore, Honda Research Institute Europe GmbH,
# or Technical University of Darmstadt, nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL FABIO MURATORE, HONDA RESEARCH INSTITUTE EUROPE GMBH,
# OR TECHNICAL UNIVERSITY OF DARMSTADT BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Train an agent to solve the Qube swing-up task using Bayesian Domain Randomization.
"""
import numpy as np
import pyrado
from pyrado.algorithms.episodic.power import PoWER
from pyrado.algorithms.meta.bayrn import BayRn
from pyrado.domain_randomization.default_randomizers import (
create_default_domain_param_map_qq,
create_zero_var_randomizer,
)
from pyrado.domain_randomization.utils import wrap_like_other_env
from pyrado.environment_wrappers.domain_randomization import DomainRandWrapperLive, MetaDomainRandWrapper
from pyrado.environments.pysim.quanser_qube import QQubeSwingUpSim
from pyrado.logger.experiment import save_dicts_to_yaml, setup_experiment
from pyrado.policies.special.environment_specific import QQubeSwingUpAndBalanceCtrl
from pyrado.spaces import BoxSpace
from pyrado.utils.argparser import get_argparser
if __name__ == "__main__":
# Parse command line arguments
args = get_argparser().parse_args()
# Experiment (set seed before creating the modules)
ex_dir = setup_experiment(
QQubeSwingUpSim.name,
f"{BayRn.name}-{PoWER.name}_{QQubeSwingUpAndBalanceCtrl.name}",
f"sim2sim_rand-Mp-Mr_seed-{args.seed}",
)
# Set seed if desired
pyrado.set_seed(args.seed, verbose=True)
# Environments
env_sim_hparams = dict(dt=1 / 100.0, max_steps=600)
env_sim = QQubeSwingUpSim(**env_sim_hparams)
env_sim = DomainRandWrapperLive(env_sim, create_zero_var_randomizer(env_sim))
dp_map = create_default_domain_param_map_qq()
env_sim = MetaDomainRandWrapper(env_sim, dp_map)
env_real = QQubeSwingUpSim(**env_sim_hparams)
env_real.domain_param = dict(
Mp=0.024 * 1.1,
Mr=0.095 * 1.1,
)
env_real_hparams = env_sim_hparams
env_real = wrap_like_other_env(env_real, env_sim)
# PoWER and energy-based controller setup
policy_hparam = dict(energy_gain=0.587, ref_energy=0.827, acc_max=10.0)
policy = QQubeSwingUpAndBalanceCtrl(env_sim.spec, **policy_hparam)
subrtn_hparam = dict(
max_iter=5,
pop_size=50,
num_init_states_per_domain=4,
num_domains=10,
num_is_samples=5,
expl_std_init=2.0,
expl_std_min=0.02,
symm_sampling=False,
num_workers=12,
)
subrtn = PoWER(ex_dir, env_sim, policy, **subrtn_hparam)
# PoWER and linear policy setup
# policy_hparam = dict(
# feats=FeatureStack(identity_feat, sign_feat, abs_feat, squared_feat,
# MultFeat((2, 5)), MultFeat((3, 5)), MultFeat((4, 5)))
# )
# policy = LinearPolicy(spec=env_sim.spec, **policy_hparam)
# subrtn_hparam = dict(
# max_iter=20,
# pop_size=200,
# num_init_states_per_domain=6,
# num_is_samples=10,
# expl_std_init=2.0,
# expl_std_min=0.02,
# symm_sampling=False,
# num_workers=32,
# )
# subrtn = PoWER(ex_dir, env_sim, policy, **subrtn_hparam)
# Set the boundaries for the GP
dp_nom = QQubeSwingUpSim.get_nominal_domain_param()
ddp_space = BoxSpace(
bound_lo=np.array([0.8 * dp_nom["Mp"], 1e-8, 0.8 * dp_nom["Mr"], 1e-8]),
bound_up=np.array([1.2 * dp_nom["Mp"], 1e-7, 1.2 * dp_nom["Mr"], 1e-7]),
)
# Algorithm
bayrn_hparam = dict(
max_iter=15,
acq_fc="UCB",
acq_param=dict(beta=0.25),
acq_restarts=500,
acq_samples=1000,
num_init_cand=4,
warmstart=False,
num_eval_rollouts_real=100,
thold_succ_subrtn=300,
)
# Save the environments and the hyper-parameters (do it before the init routine of BayRn)
save_dicts_to_yaml(
dict(env_sim=env_sim_hparams, env_real=env_real_hparams, seed=args.seed),
dict(policy=policy_hparam),
dict(subrtn=subrtn_hparam, subrtn_name=PoWER.name),
dict(algo=bayrn_hparam, algo_name=BayRn.name, dp_map=dp_map),
save_dir=ex_dir,
)
algo = BayRn(ex_dir, env_sim, env_real, subrtn, ddp_space, **bayrn_hparam)
# Jeeeha
algo.train(snapshot_mode="latest", seed=args.seed)
| [
"fabio.muratore@famura.net"
] | fabio.muratore@famura.net |
04f3348dcba79ceb132538619203da84de297413 | ad13583673551857615498b9605d9dcab63bb2c3 | /output/instances/nistData/atomic/byte/Schema+Instance/NISTXML-SV-IV-atomic-byte-maxExclusive-3-1.py | 2e778fde1dbab8cd334f90bfc4b9a342ede77976 | [
"MIT"
] | permissive | tefra/xsdata-w3c-tests | 397180205a735b06170aa188f1f39451d2089815 | 081d0908382a0e0b29c8ee9caca6f1c0e36dd6db | refs/heads/main | 2023-08-03T04:25:37.841917 | 2023-07-29T17:10:13 | 2023-07-30T12:11:13 | 239,622,251 | 2 | 0 | MIT | 2023-07-25T14:19:04 | 2020-02-10T21:59:47 | Python | UTF-8 | Python | false | false | 260 | py | from output.models.nist_data.atomic.byte.schema_instance.nistschema_sv_iv_atomic_byte_max_exclusive_3_xsd.nistschema_sv_iv_atomic_byte_max_exclusive_3 import NistschemaSvIvAtomicByteMaxExclusive3
obj = NistschemaSvIvAtomicByteMaxExclusive3(
value=-128
)
| [
"tsoulloftas@gmail.com"
] | tsoulloftas@gmail.com |
5f7fad412dbfbf0b6efab78bdc1c3472dc5bfdd5 | d1036f459db9414acf24c69f742b81a1c5fd240b | /lab1/task1.py | 2a0004faf954795c9b2d7a8431695f754d4a54cc | [] | no_license | KuzyaCat/computational-geometry | 6035472e51a9ea581122b4447d8492e5155af338 | 409948d48531af48a2ce8867143e9d2106e09e64 | refs/heads/master | 2022-08-24T12:22:28.076008 | 2020-05-28T17:16:18 | 2020-05-28T17:16:18 | 267,631,329 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,058 | py | import matplotlib.pyplot as plt
import numpy
import random
from Point import Point
point0 = Point(random.randint(0, 10), random.randint(0, 10))
point1 = Point(random.randint(0, 10), random.randint(0, 10))
point2 = Point(random.randint(0, 10), random.randint(0, 10))
def getMatrix(p0: Point, p1: Point, p2: Point):
return [[p2.x - p1.x, p2.y - p1.y], [p0.x - p1.x, p0.y - p1.y]]
def getPointPosition(matrix: list) -> str:
d = numpy.linalg.det(matrix)
if d > 0:
return 'left'
elif d < 0:
return 'right'
else:
return 'on the line'
def drawLine(p1: Point, p2: Point):
plt.plot([0, p1.x, p2.x], [0, p1.y, p2.y])
def drawPoint(p0: Point):
plt.scatter(p0.x, p0.y)
def drawText(text: str):
plt.suptitle(text, fontsize=14)
def draw(p0: Point, p1: Point, p2: Point):
plt.grid(True) # линии вспомогательной сетки
drawLine(p1, p2)
drawPoint(p0)
drawText(getPointPosition(getMatrix(point0, point1, point2)))
plt.show()
draw(point0, point1, point2)
| [
"alex@MacBook-Pro-Alexander.local"
] | alex@MacBook-Pro-Alexander.local |
b1c2cca63528b2e3c2555181467246a10c567da9 | 257611fe3cac7af9776741e361ec9ae5ad5568c8 | /lesson2-4_step5.py | 0630e4ce1f369f5ca1069adb555921d79dba3d6e | [] | no_license | krutik228/stepik_auto-test-selenium-course | bd1a35a4523e676c9a1d434ca7ee8fc1bd7f96cf | 14e9ad4a7f884616c6d6af1f02919a96ecaa388d | refs/heads/master | 2023-03-01T16:09:22.750410 | 2021-02-10T18:36:41 | 2021-02-10T18:36:41 | 337,802,807 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 356 | py | from selenium import webdriver
import time
link="http://suninjuly.github.io/wait1.html"
try:
browser = webdriver.Chrome()
browser.implicitly_wait(5)
browser.get(link)
browser.find_element_by_id("verify").click()
assert "successful" in browser.find_element_by_id("verify_message").text
finally:
time.sleep(5)
browser.quit()
| [
"nikkrutik@mail.ru"
] | nikkrutik@mail.ru |
849dc7bec027beb9388173ab0f4d7875af17de51 | 09c87fe780df6d1f9eb33799ed516a0bbd7ab1e3 | /src/tests/python-in/testmodule_pynsource.py | 735e0b98628d7ea19d2e62d5bcea3176a8213c7a | [] | no_license | abulka/pynsource | 8ad412b85dc1acaeb83d7d34af8cc033c6baba91 | 979436525c57fdaeaa832e960985e0406e123587 | refs/heads/master | 2023-04-13T12:58:02.911318 | 2023-04-11T09:56:32 | 2023-04-11T09:56:32 | 32,249,425 | 271 | 46 | null | 2022-10-10T04:36:57 | 2015-03-15T07:21:43 | Python | UTF-8 | Python | false | false | 3,827 | py | # pynsource command line tool
import os
# from core_parser import *
from generate_code.gen_asciiart import CmdLinePythonToAsciiArt
from generate_code.gen_yuml import CmdLinePythonToYuml
from generate_code.gen_delphi import CmdLinePythonToDelphi
from generate_code.gen_java import CmdLinePythonToJava
import messages
def test():
# FILE = "..\\tests\\python-in\\testmodule01.py"
FILE = "..\\tests\\python-in\\testmodule66.py"
# p = PySourceAsText()
p = PySourceAsYuml()
# p.optionModuleAsClass = True
p.Parse(FILE)
# print '*'*20, 'parsing', FILE, '*'*20
print(p)
# print 'Done.'
def ParseArgsAndRun():
import sys, glob
import getopt # good doco http://www.doughellmann.com/PyMOTW/getopt/
# should possibly upgrade to using http://docs.python.org/library/argparse.html#module-argparse
SIMPLE = 0
globbed = []
optionVerbose = 0
optionModuleAsClass = 0
optionExportToJava = 0
optionExportToDelphi = 0
optionExportToYuml = False
optionExportTo_outdir = ""
if SIMPLE:
params = sys.argv[1]
globbed = glob.glob(params)
else:
listofoptionvaluepairs, params = getopt.getopt(sys.argv[1:], "amvy:j:d:")
# print listofoptionvaluepairs, params
# print dict(listofoptionvaluepairs) # turn e.g. [('-v', ''), ('-y', 'fred.png')] into nicer? dict e.g. {'-v': '', '-y': 'fred.png'}
def EnsurePathExists(outdir, outlanguagemsg):
assert outdir, "Need to specify output folder for %s output - got %s." % (
outlanguagemsg,
outdir,
)
if not os.path.exists(outdir):
raise RuntimeError(
"Output directory %s for %s file output does not exist."
% (outdir, outlanguagemsg)
)
for optionvaluepair in listofoptionvaluepairs:
if "-a" == optionvaluepair[0]:
pass # default is asciart, so don't need to specify
if "-m" == optionvaluepair[0]:
optionModuleAsClass = 1
if "-v" == optionvaluepair[0]:
optionVerbose = 1
if optionvaluepair[0] in ("-j", "-d"):
if optionvaluepair[0] == "-j":
optionExportToJava = 1
language = "Java"
else:
optionExportToDelphi = 1
language = "Delphi"
optionExportTo_outdir = optionvaluepair[1]
EnsurePathExists(optionExportTo_outdir, language)
if optionvaluepair[0] in ("-y"):
optionExportToYuml = True
optionExportTo_outpng = optionvaluepair[1]
for param in params:
files = glob.glob(param)
globbed += files
if globbed:
if optionExportToJava or optionExportToDelphi:
if optionExportToJava:
u = CmdLinePythonToJava(
globbed, treatmoduleasclass=optionModuleAsClass, verbose=optionVerbose
)
else:
u = CmdLinePythonToDelphi(
globbed, treatmoduleasclass=optionModuleAsClass, verbose=optionVerbose
)
u.ExportTo(optionExportTo_outdir)
elif optionExportToYuml:
u = CmdLinePythonToYuml(
globbed, treatmoduleasclass=optionModuleAsClass, verbose=optionVerbose
)
u.ExportTo(optionExportTo_outpng)
else:
u = CmdLinePythonToAsciiArt(
globbed, treatmoduleasclass=optionModuleAsClass, verbose=optionVerbose
)
u.ExportTo(None)
else:
print(messages.HELP_COMMAND_LINE_USAGE)
if __name__ == "__main__":
# test()
# exit(0)
ParseArgsAndRun()
| [
"abulka@gmail.com"
] | abulka@gmail.com |
e20c8318a5d3f11ad8b9e57c4c2e1d2243ad557f | 5858534fed46ddc44224e39ccc0449b7aea7b418 | /terminal.py | 107891bd56f3e10f441e1b1f630ec985c0776e40 | [] | no_license | eduardomarossi/z01.1-ula | 2811c1c505e9f56c378992b92d4b04e50aa2a3df | ff2aaf5e68c8e9c01e3fc5983630702b11284d5a | refs/heads/master | 2022-12-28T17:31:54.431158 | 2020-09-18T11:38:15 | 2020-09-18T11:38:15 | 295,547,067 | 1 | 1 | null | 2023-09-14T18:12:52 | 2020-09-14T21:52:12 | Python | UTF-8 | Python | false | false | 1,852 | py | import sys
from random import randrange
from ula import compute_ula, convert_output
VERSION = '1.1.0'
class UlaTerminal:
ula_fields = ['x', 'y', 'zx', 'nx', 'zy', 'ny', 'f', 'no']
def __init__(self):
self.data = {}
for k in UlaTerminal.ula_fields:
self.data[k] = randrange(0, 2)
self.data['x'] = randrange(0, 2**16)
self.data['y'] = randrange(0, 2**16)
def print_ula_vals(self):
print('======-- ULA --======')
for k in UlaTerminal.ula_fields:
print('{0:s}: {1:b}'.format(k, self.data[k]))
print('\n')
result = compute_ula(**self.data)
print('======-- Output --======')
print('zr: {} ng: {} out: {}'.format(result[0], result[1], convert_output(result[2])))
print('')
def ask(self):
campo = ''
valor = None
while campo not in UlaTerminal.ula_fields:
campo = input('Digite nome do campo para alterar valor ou S para sair: ')
if campo.strip() == 'S':
sys.exit(0)
while valor is None:
try:
valor = input('Digite um valor para o campo: ')
if campo not in ['x', 'y'] and (valor not in ['0', '1']):
valor = None
raise Exception('Valor deve ser 0 ou 1')
elif campo in ['x', 'y']:
try:
int(valor, 2)
except:
valor = None
raise Exception('Valor de X e Y deve ser binário')
except Exception as e:
print(e)
self.data[campo] = int(valor, 2)
if __name__ == '__main__':
print('z01.1-ula terminal - v' + VERSION)
ula = UlaTerminal()
while True:
ula.print_ula_vals()
ula.ask()
| [
"eduardom44@gmail.com"
] | eduardom44@gmail.com |
89f47ddd24567b0144cfa0d3141664aa7eeddcef | 666008c9cea62f793fa1c62fb07cd730473476eb | /89.py | 87214d59d0450cf952ab69f5d816df6f2dbcd903 | [] | no_license | dhivya2nandha/guvi | dcb120313521967687da3efa3a51857c5f6bcd53 | 3ec3da8a837e55d05c0703934c21f14771405bbb | refs/heads/master | 2020-05-24T02:46:57.706182 | 2019-07-03T05:06:02 | 2019-07-03T05:06:02 | 187,060,505 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 69 | py |
"""89.py"""
ss=str(input())
s1=list(ss)
s1.sort()
print(sep='',*s1)
| [
"noreply@github.com"
] | dhivya2nandha.noreply@github.com |
736177be6e62fa382ac47be5d33fbdc6148042ad | 98c6ea9c884152e8340605a706efefbea6170be5 | /examples/data/Assignment_1/brkluk001/question2.py | 3b83362c9673632335a5e01fda0b228f12c0c017 | [] | no_license | MrHamdulay/csc3-capstone | 479d659e1dcd28040e83ebd9e3374d0ccc0c6817 | 6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2 | refs/heads/master | 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 336 | py | validity = 'invalid.'
hours = eval(input('Enter the hours:\n'))
minutes = eval(input('Enter the minutes:\n'))
seconds = eval(input('Enter the seconds:\n'))
if 0 <= hours <= 23:
if 0 <= minutes <= 60:
if 0 <= seconds <= 60:
validity = 'valid.'
print('Your time is',validity)
| [
"jarr2000@gmail.com"
] | jarr2000@gmail.com |
7cb9f5ba5f68f71c7f11994b8f313ec3db24991f | 0f2f7a35fdd98757cd261aed4b446241d97280f6 | /time_series/acquire.py | 4d44bab1ffc34b647b0a67ce69c25062d80b08d9 | [] | no_license | aleclhartman/ds-methodologies-exercises | e34b888150a6addb9f1c4ba25bf159992e02db00 | 2cf0fea4f734210f17422cc30abe81080392d510 | refs/heads/master | 2021-05-18T21:16:54.961691 | 2020-05-27T23:32:48 | 2020-05-27T23:32:48 | 251,424,951 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,515 | py | import numpy as np
import pandas as pd
import requests
from os import path
def get_df(name):
"""
This function does the following:
1a. Looks for an existing items, stores, or sales .csv file.
1b. If the .csv file exists it reads the .csv and returns a DataFrame
2. If the file does not exist, the function iterates through the pages for items, stores, or sales concatenating each page to the existing DataFrame,
writes the DataFrame to a csv file, and returns a DataFrame
"""
# variables
base_url = "https://python.zach.lol"
api_url = base_url + "/api/v1/"
response = requests.get(api_url + name)
data = response.json()
df = pd.DataFrame(data["payload"][name])
# conditional based on existence of .csv file
if path.exists(name + ".csv"):
# read .csv if the file exists
df = pd.read_csv(name + ".csv", index_col=0)
else:
# iterate through pages and concatenate data if .csv does not exist
while data["payload"]["next_page"] != None:
response = requests.get(base_url + data["payload"]["next_page"])
data = response.json()
df = pd.concat([df, pd.DataFrame(data["payload"][name])]).reset_index().drop(columns="index")
# write DataFrame to .csv
df.to_csv(name + ".csv")
return df
def get_sales():
"""
This function does the following:
1. Left joins the items DataFrame to the sales DataFrame to create a sales_and_items DataFrame
2. Left joins the stores DataFrame to the sales_and_items DataFrame to create a master DataFrame
3. Returns the master DataFrame with all the data from the three originating DataFrames
"""
# conditional based on existence of .csv file
if path.exists("items.csv"):
# read .csv if the file exists
items = pd.read_csv("items.csv", index_col=0)
else:
# else call get_df function
items = get_df("items")
if path.exists("stores.csv"):
# read .csv if the file exists
stores = pd.read_csv("stores.csv", index_col=0)
else:
# else call get_df function
stores = get_df("stores")
if path.exists("sales.csv"):
# read .csv if the file exists
sales = pd.read_csv("sales.csv", index_col=0)
else:
# else call get_df function
sales = get_df("sales")
if path.exists("master.csv"):
# read .csv if the file exists
df = pd.read_csv("master.csv", index_col=0)
else:
# merge .csv files if the master.csv file does not exist
sales_and_items = pd.merge(sales, items, left_on="item", right_on="item_id", how="left")
df = pd.merge(sales_and_items, stores, left_on="store", right_on="store_id", how="left")
df.drop(columns=['item', 'store'], inplace=True)
df.to_csv("master.csv")
return df
def get_germany():
"""
This function does the following:
1. Looks for an existing germany.csv file, reads the csv, and returns a DataFrame
2. If the file does not exist, the function uses the link variable to get the Open Power Systems Data for Germany, writes the DataFrame to a csv file,
and returns a DataFrame
"""
url = "https://raw.githubusercontent.com/jenfly/opsd/master/opsd_germany_daily.csv"
if path.exists("germany.csv"):
df = pd.read_csv("germany.csv", index_col=0)
else:
df = pd.read_csv(url)
df.to_csv("germany.csv")
return df | [
"aleclhartman08@gmail.com"
] | aleclhartman08@gmail.com |
6e7debb71663a192c1e344ae9cf559854650a16d | d5fa2f5b6b5933ef6c629430c74f61ee7f1248b5 | /transformer3.py | a85a41b5efb09b5b59de3978f0d60bee2018543d | [] | no_license | ytyz1307zzh/Multi-Modal_Translation | 09c5c7847572095f0fc361b92ab258613c85fe7f | 0e6bededa3a695796b355ffc09c3324cfad95d38 | refs/heads/master | 2020-07-01T07:25:50.544664 | 2019-10-27T14:48:18 | 2019-10-27T14:48:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,994 | py | '''
@Date : 8/22/2018
@Author: Shuming Ma
@mail : shumingma@pku.edu.cn
@homepage: shumingma.com
'''
import torch
import torch.nn as nn
import torch.utils.data
import torch.optim as Optim
from nltk.translate.bleu_score import sentence_bleu
import json
import argparse
import time
import os
import random
import pickle
from PIL import Image
import numpy as np
from modules import *
parser = argparse.ArgumentParser(description='train.py')
parser.add_argument('-n_emb', type=int, default=512, help="Embedding size")
parser.add_argument('-n_hidden', type=int, default=512, help="Hidden size")
parser.add_argument('-d_ff', type=int, default=2048, help="Hidden size of Feedforward")
parser.add_argument('-n_head', type=int, default=8, help="Number of head")
parser.add_argument('-n_block', type=int, default=6, help="Number of block")
parser.add_argument('-batch_size', type=int, default=64, help="Batch size")
parser.add_argument('-epoch', type=int, default=50, help="Number of epoch")
parser.add_argument('-impatience', type=int, default=10, help='number of evaluation rounds for early stopping')
parser.add_argument('-report', type=int, default=1000, help="Number of report interval")
parser.add_argument('-lr', type=float, default=3e-4, help="Learning rate")
parser.add_argument('-dropout', type=float, default=0.1, help="Dropout rate")
parser.add_argument('-restore', type=str, default='', help="Restoring model path")
parser.add_argument('-mode', type=str, default='train', help="Train or test")
parser.add_argument('-dir', type=str, default='ckpt', help="Checkpoint directory")
parser.add_argument('-max_len', type=int, default=30, help="Limited length for text")
parser.add_argument('-n_img', type=int, default=5, help="Number of input images")
parser.add_argument('-n_com', type=int, default=5, help="Number of input comments")
parser.add_argument('-output', default='prediction.json', help='Output json file for generation')
parser.add_argument('-src_lang', type=str, required=True, choices=['en', 'fr', 'de'], help='Source language')
parser.add_argument('-tgt_lang', type=str, required=True, choices=['en', 'fr', 'de'], help='Target language')
parser.add_argument('-img_dim', type=int, default=49, help='(length x width) dimension of CNN features')
opt = parser.parse_args()
assert opt.src_lang != opt.tgt_lang
assert opt.src_lang == 'en' or opt.tgt_lang == 'en'
data_path = 'data/'
train_path = data_path + 'train_{}2{}.json'.format(opt.src_lang, opt.tgt_lang)
dev_path = data_path + 'val_{}2{}.json'.format(opt.src_lang, opt.tgt_lang)
src_vocab_path = data_path + '{}_dict.json'.format(opt.src_lang)
tgt_vocab_path = data_path + '{}_dict.json'.format(opt.tgt_lang)
train_img_path, dev_img_path = data_path + 'train_res34.pkl', data_path + 'val_res34.pkl'
src_vocabs = json.load(open(src_vocab_path, 'r', encoding='utf8'))['word2id']
tgt_vocabs = json.load(open(tgt_vocab_path, 'r', encoding='utf8'))['word2id']
src_rev_vocabs = json.load(open(src_vocab_path, 'r', encoding='utf8'))['id2word']
tgt_rev_vocabs = json.load(open(tgt_vocab_path, 'r', encoding='utf8'))['id2word']
opt.src_vocab_size = len(src_vocabs)
opt.tgt_vocab_size = len(tgt_vocabs)
torch.manual_seed(1234)
torch.cuda.manual_seed(1234)
if not os.path.exists(opt.dir):
os.mkdir(opt.dir)
class Model(nn.Module):
def __init__(self, n_emb, n_hidden, src_vocab_size, tgt_vocab_size, dropout, d_ff, n_head, n_block, text_len, img_len):
super(Model, self).__init__()
self.n_emb = n_emb
self.n_hidden = n_hidden
self.src_vocab_size = src_vocab_size
self.tgt_vocab_size = tgt_vocab_size
self.dropout = dropout
self.src_embedding = nn.Sequential(Embeddings(n_hidden, src_vocab_size), PositionalEncoding(n_hidden, dropout))
self.tgt_embedding = nn.Sequential(Embeddings(n_hidden, tgt_vocab_size), PositionalEncoding(n_hidden, dropout))
self.video_encoder = VideoEncoder(n_hidden, d_ff, n_head, dropout, n_block)
self.text_encoder = TextEncoder(n_hidden, d_ff, n_head, dropout, n_block)
self.comment_decoder = CommentDecoder(n_hidden, d_ff, n_head, dropout, n_block)
self.output_layer = nn.Linear(self.n_hidden, self.tgt_vocab_size)
self.criterion = nn.CrossEntropyLoss(reduce=False, size_average=False, ignore_index=0)
self.co_attn = CoAttention(n_hidden, text_len, img_len)
self.input_combine = nn.Linear(n_hidden*2, n_hidden)
def encode_img(self, X):
out = self.video_encoder(X)
return out
def encode_text(self, X, m):
embs = self.src_embedding(X)
out = self.text_encoder(embs, m)
return out
def decode(self, x, m1, m2, mask):
embs = self.tgt_embedding(x)
context = self.co_attn(m1, m2).unsqueeze(dim=1).repeat(1, embs.size(1), 1)
inputs = self.input_combine(torch.cat((embs, context), dim=-1))
out = self.comment_decoder(inputs, m1, m2, mask)
out = self.output_layer(out)
return out
def forward(self, X, Y, T):
out_img = self.encode_img(X)
out_text = self.encode_text(T, out_img)
mask = Variable(subsequent_mask(Y.size(0), Y.size(1)-1), requires_grad=False).cuda()
outs = self.decode(Y[:,:-1], out_img, out_text, mask)
Y = Y.t()
outs = outs.transpose(0, 1)
loss = self.criterion(outs.contiguous().view(-1, self.tgt_vocab_size),
Y[1:].contiguous().view(-1))
return torch.mean(loss)
def generate(self, X, T):
out_img = self.encode_img(X)
out_text = self.encode_text(T, out_img)
ys = torch.ones(X.size(0), 1).long()
with torch.no_grad():
ys = Variable(ys).cuda()
for i in range(opt.max_len):
out = self.decode(ys, out_img, out_text,
Variable(subsequent_mask(ys.size(0), ys.size(1))).cuda())
prob = out[:, -1]
_, next_word = torch.max(prob, dim=-1, keepdim=True)
next_word = next_word.data
ys = torch.cat([ys, next_word], dim=-1)
return ys[:, 1:]
class DataSet(torch.utils.data.Dataset):
def __init__(self, data_path, src_vocabs, tgt_vocabs, img_path, is_train=True):
print("starting load...")
start_time = time.time()
print('load data from file: ', data_path)
print('load images from file: ', img_path)
self.datas = json.load(open(data_path, 'r', encoding='utf8')) # each piece is a dict like {"src": xxx, "tgt": xxx}
self.imgs = torch.load(open(img_path, 'rb'))
print("loading time:", time.time() - start_time)
self.src_vocabs = src_vocabs
self.tgt_vocabs = tgt_vocabs
self.src_vocab_size = len(self.src_vocabs)
self.tgt_vocab_size = len(self.tgt_vocabs)
self.is_train = is_train
def __len__(self):
return len(self.datas)
def __getitem__(self, index):
data = self.datas[index] # a dict like {"src": xxx, "tgt": xxx}
I = torch.cuda.FloatTensor(self.imgs[index]['features']) # input image
I = I.view(I.size(0), -1)
X = torch.stack([I[:, i] for i in range(I.size(1))])
T = DataSet.padding(data['src'], opt.max_len, 'src') # source sequence
Y = DataSet.padding(data['tgt'], opt.max_len, 'tgt') # target sequence
return X, Y, T
@staticmethod
# cut sentences that exceed the limit, turn words into numbers, pad sentences to max_len
def padding(data, max_len, language):
if language == 'src': # source language
vocabs = src_vocabs
elif language == 'tgt': # target language
vocabs = tgt_vocabs
data = data.split()
if len(data) > max_len-2:
data = data[:max_len-2]
Y = list(map(lambda t: vocabs.get(t, 3), data))
Y = [1] + Y + [2]
length = len(Y)
Y = torch.cat([torch.LongTensor(Y), torch.zeros(max_len - length).long()])
return Y
@staticmethod
def transform_to_words(ids, language):
if language == 'src': # source language
rev_vocabs = src_rev_vocabs
elif language == 'tgt': # target language
rev_vocabs = tgt_rev_vocabs
words = []
for id in ids:
if id == 2:
break
words.append(rev_vocabs[str(id.item())])
return " ".join(words)
def get_dataloader(dataset, batch_size, is_train=True):
return torch.utils.data.DataLoader(dataset=dataset, batch_size=batch_size, shuffle=is_train)
def save_model(path, model):
model_state_dict = model.module.state_dict()
#model_state_dict = model.state_dict()
torch.save(model_state_dict, path)
def train():
train_set = DataSet(train_path, src_vocabs, tgt_vocabs, train_img_path, is_train=True)
dev_set = DataSet(dev_path, src_vocabs, tgt_vocabs, dev_img_path, is_train=False)
train_batch = get_dataloader(train_set, opt.batch_size, is_train=True)
model = Model(n_emb=opt.n_emb, n_hidden=opt.n_hidden, src_vocab_size=opt.src_vocab_size, tgt_vocab_size=opt.tgt_vocab_size,
dropout=opt.dropout, d_ff=opt.d_ff, n_head=opt.n_head, n_block=opt.n_block, text_len=opt.max_len, img_len=opt.img_dim)
if opt.restore != '':
model_dict = torch.load(opt.restore)
model.load_state_dict(model_dict)
model.cuda()
model = nn.DataParallel(model)
optim = Optim.Adam(filter(lambda p: p.requires_grad,model.parameters()), lr=opt.lr)
best_score = -1000000
impatience = 0
for i in range(opt.epoch):
model.train()
report_loss, start_time, n_samples = 0, time.time(), 0
count, total = 0, len(train_set) // opt.batch_size + 1
for batch in train_batch:
model.zero_grad()
X, Y, T = batch
X = Variable(X).cuda()
Y = Variable(Y).cuda()
T = Variable(T).cuda()
loss = model(X, Y, T)
loss.sum().backward()
optim.step()
report_loss += loss.sum().item()
n_samples += len(X.data)
count += 1
if count % opt.report == 0 or count == total:
print('%d/%d, epoch: %d, report_loss: %.3f, time: %.2f'
% (count, total, i+1, report_loss / n_samples, time.time() - start_time))
model.eval()
score = eval(dev_set, model)
model.train()
if score > best_score:
best_score = score
impatience = 0
print('New best score!')
save_model(os.path.join(opt.dir, 'best_checkpoint_{:.3f}.pt'.format(-score)), model)
else:
impatience += 1
print('Impatience: ', impatience, 'best score: ', best_score)
save_model(os.path.join(opt.dir, 'impatience_{:.3f}.pt'.format(-score)), model)
if impatience > opt.impatience:
print('Early stopping!')
quit()
report_loss, start_time, n_samples = 0, time.time(), 0
#save_model(os.path.join(opt.dir, 'checkpoint_{}.pt'.format(i+1)), model)
return model
def eval(dev_set, model):
print("starting evaluating...")
start_time = time.time()
model.eval()
dev_batch = get_dataloader(dev_set, opt.batch_size, is_train=False)
loss = 0
for batch in dev_batch:
X, Y, T = batch
with torch.no_grad():
X = Variable(X).cuda()
Y = Variable(Y).cuda()
T = Variable(T).cuda()
loss += model(X, Y, T).sum().item()
loss = (loss * opt.batch_size) / 64
print(loss)
print("evaluating time:", time.time() - start_time)
return -loss
def test(test_set, model):
model.eval()
test_batch = get_dataloader(test_set, opt.batch_size, is_train=False)
assert opt.output.endswith('.json'), 'Output file should be a json file'
outputs = []
cnt = 0 # counter for testing process
for batch in test_batch:
X, Y, T = batch
cnt += X.size()[0]
with torch.no_grad():
X = Variable(X).cuda()
Y = Variable(Y).cuda()
T = Variable(T).cuda()
predictions = model.generate(X, T).data
assert X.size()[0] == predictions.size()[0] and X.size()[0] == T.size()[0]
for i in range(X.size()[0]):
out_dict = {'source': DataSet.transform_to_words(T[i].cpu(), 'src'),
'target': DataSet.transform_to_words(Y[i].cpu(), 'tgt'),
'prediction': DataSet.transform_to_words(predictions[i].cpu(), 'tgt')}
outputs.append(out_dict)
print(cnt)
json.dump(outputs, open(opt.output, 'w', encoding='utf-8'), indent=4, ensure_ascii=False)
print('All data finished.')
if __name__ == '__main__':
print(opt)
if opt.mode == 'train':
train()
elif opt.mode == 'test':
test_path = data_path + 'test_{}2{}.json'.format(opt.src_lang, opt.tgt_lang)
test_img_path = data_path + 'test_res34.pkl'
test_set = DataSet(test_path, src_vocabs, tgt_vocabs, test_img_path, is_train=False)
model = Model(n_emb=opt.n_emb, n_hidden=opt.n_hidden, src_vocab_size=opt.src_vocab_size, tgt_vocab_size=opt.tgt_vocab_size,
dropout=opt.dropout, d_ff=opt.d_ff, n_head=opt.n_head, n_block=opt.n_block, text_len=opt.max_len, img_len=opt.img_dim)
model_dict = torch.load(opt.restore)
model.load_state_dict(model_dict)
model.cuda()
test(test_set, model)
| [
"noreply@github.com"
] | ytyz1307zzh.noreply@github.com |
b9b1ffa176d87b8402b83be490345771a651bf8b | 0da7e21298d4d9e6afb83927ef88d0cca88df603 | /cde-root/usr/lib64/python2.4/site-packages/Bio/SCOP/__init__.py | 074667923c17222cf2fb5717a2f95c80abd92f87 | [] | no_license | NirBenTalLab/find_motif | 391e908d28f4fe329c6273675aca078e1e94f13f | a11ea74a16e7487808227c71a711e0fa8a4ef8ff | refs/heads/master | 2021-01-01T05:10:02.586026 | 2016-04-12T13:02:38 | 2016-04-12T13:02:38 | 56,062,384 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 32,297 | py | # Copyright 2001 by Gavin E. Crooks. All rights reserved.
# Modifications Copyright 2004/2005 James Casbon. All rights Reserved.
#
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
#
# Changes made by James Casbon:
# - New Astral class
# - SQL functionality for both Scop and Astral classes
# - All sunids are int not strings
#
# Code written by Jeffrey Chang to access SCOP over the internet, which
# was previously in Bio.WWW.SCOP, has now been merged into this module.
""" SCOP: Structural Classification of Proteins.
The SCOP database aims to provide a manually constructed classification of
all know protein structures into a hierarchy, the main levels of which
are family, superfamily and fold.
* "SCOP":http://scop.mrc-lmb.cam.ac.uk/scop/
* "Introduction":http://scop.mrc-lmb.cam.ac.uk/scop/intro.html
* "SCOP parsable files":http://scop.mrc-lmb.cam.ac.uk/scop/parse/
The Scop object in this module represents the entire SCOP classification. It
can be built from the three SCOP parsable files, modified is so desired, and
converted back to the same file formats. A single SCOP domain (represented
by the Domain class) can be obtained from Scop using the domain's SCOP
identifier (sid).
nodeCodeDict -- A mapping between known 2 letter node codes and a longer
description. The known node types are 'cl' (class), 'cf'
(fold), 'sf' (superfamily), 'fa' (family), 'dm' (domain),
'sp' (species), 'px' (domain). Additional node types may
be added in the future.
This module also provides code to access SCOP over the WWW.
Functions:
search -- Access the main CGI script.
_open -- Internally used function.
"""
from types import *
import os
import Des
import Cla
import Hie
from Residues import *
from Bio import SeqIO
from Bio.Seq import Seq
nodeCodeDict = { 'cl':'class', 'cf':'fold', 'sf':'superfamily',
'fa':'family', 'dm':'protein', 'sp':'species', 'px':'domain'}
_nodetype_to_code= { 'class': 'cl', 'fold': 'cf', 'superfamily': 'sf',
'family': 'fa', 'protein': 'dm', 'species': 'sp', 'domain': 'px'}
nodeCodeOrder = [ 'ro', 'cl', 'cf', 'sf', 'fa', 'dm', 'sp', 'px' ]
astralBibIds = [10,20,25,30,35,40,50,70,90,95,100]
astralEvs = [10, 5, 1, 0.5, 0.1, 0.05, 0.01, 0.005, 0.001, 1e-4, 1e-5, 1e-10, 1e-15,
1e-20, 1e-25, 1e-50]
astralEv_to_file = { 10: 'e+1', 5: 'e+0,7', 1: 'e+0', 0.5: 'e-0,3', 0.1: 'e-1',
0.05: 'e-1,3', 0.01: 'e-2', 0.005: 'e-2,3', 0.001: 'e-3',
1e-4: 'e-4', 1e-5: 'e-5', 1e-10: 'e-10', 1e-15: 'e-15',
1e-20: 'e-20', 1e-25: 'e-25', 1e-50: 'e-50' }
astralEv_to_sql = { 10: 'e1', 5: 'e0_7', 1: 'e0', 0.5: 'e_0_3', 0.1: 'e_1',
0.05: 'e_1_3', 0.01: 'e_2', 0.005: 'e_2_3', 0.001: 'e_3',
1e-4: 'e_4', 1e-5: 'e_5', 1e-10: 'e_10', 1e-15: 'e_15',
1e-20: 'e_20', 1e-25: 'e_25', 1e-50: 'e_50' }
def cmp_sccs(sccs1, sccs2):
"""Order SCOP concise classification strings (sccs).
a.4.5.1 < a.4.5.11 < b.1.1.1
A sccs (e.g. a.4.5.11) compactly represents a domain's classification.
The letter represents the class, and the numbers are the fold,
superfamily, and family, respectively.
"""
s1 = sccs1.split(".")
s2 = sccs2.split(".")
if s1[0] != s2[0]: return cmp(s1[0], s2[0])
s1 = map(int, s1[1:])
s2 = map(int, s2[1:])
return cmp(s1,s2)
_domain_re = re.compile(r">?([\w_\.]*)\s+([\w\.]*)\s+\(([^)]*)\) (.*)")
def parse_domain(str):
"""Convert an ASTRAL header string into a Scop domain.
An ASTRAL (http://astral.stanford.edu/) header contains a concise
description of a SCOP domain. A very similar format is used when a
Domain object is converted into a string. The Domain returned by this
method contains most of the SCOP information, but it will not be located
within the SCOP hierarchy (i.e. The parent node will be None). The
description is composed of the SCOP protein and species descriptions.
A typical ASTRAL header looks like --
>d1tpt_1 a.46.2.1 (1-70) Thymidine phosphorylase {Escherichia coli}
"""
m = _domain_re.match(str)
if (not m) : raise ValueError("Domain: "+ str)
dom = Domain()
dom.sid = m.group(1)
dom.sccs = m.group(2)
dom.residues = Residues(m.group(3))
if not dom.residues.pdbid:
dom.residues.pdbid= dom.sid[1:5]
dom.description = m.group(4).strip()
return dom
def _open_scop_file(scop_dir_path, version, filetype):
filename = "dir.%s.scop.txt_%s" % (filetype,version)
handle = open(os.path.join( scop_dir_path, filename))
return handle
class Scop:
"""The entire SCOP hierarchy.
root -- The root node of the hierarchy
"""
def __init__(self, cla_handle=None, des_handle=None, hie_handle=None,
dir_path=None, db_handle=None, version=None):
"""Build the SCOP hierarchy from the SCOP parsable files, or a sql backend.
If no file handles are given, then a Scop object with a single
empty root node is returned.
If a directory and version are given (with dir_path=.., version=...) or
file handles for each file, the whole scop tree will be built in memory.
If a MySQLdb database handle is given, the tree will be built as needed,
minimising construction times. To build the SQL database to the methods
write_xxx_sql to create the tables.
"""
self._sidDict = {}
self._sunidDict = {}
if cla_handle==des_handle==hie_handle==dir_path==db_handle==None: return
if dir_path is None and db_handle is None:
if cla_handle == None or des_handle==None or hie_handle==None:
raise RuntimeError("Need CLA, DES and HIE files to build SCOP")
sunidDict = {}
self.db_handle = db_handle
try:
if db_handle:
# do nothing if we have a db handle, we'll do it all on the fly
pass
else:
# open SCOP parseable files
if dir_path:
if not version:
raise RuntimeError("Need SCOP version to find parsable files in directory")
if cla_handle or des_handle or hie_handle:
raise RuntimeError("Cannot specify SCOP directory and specific files")
cla_handle = _open_scop_file( dir_path, version, 'cla')
des_handle = _open_scop_file( dir_path, version, 'des')
hie_handle = _open_scop_file( dir_path, version, 'hie')
root = Node()
domains = []
root.sunid=0
root.type='ro'
sunidDict[root.sunid] = root
self.root = root
root.description = 'SCOP Root'
# Build the rest of the nodes using the DES file
records = Des.parse(des_handle)
for record in records:
if record.nodetype =='px':
n = Domain()
n.sid = record.name
domains.append(n)
else :
n = Node()
n.sunid = record.sunid
n.type = record.nodetype
n.sccs = record.sccs
n.description = record.description
sunidDict[n.sunid] = n
# Glue all of the Nodes together using the HIE file
records = Hie.parse(hie_handle)
for record in records:
if record.sunid not in sunidDict:
print record.sunid
n = sunidDict[record.sunid]
if record.parent != '' : # Not root node
if record.parent not in sunidDict:
raise ValueError("Incomplete data?")
n.parent = sunidDict[record.parent]
for c in record.children:
if c not in sunidDict:
raise ValueError("Incomplete data?")
n.children.append(sunidDict[c])
# Fill in the gaps with information from the CLA file
sidDict = {}
records = Cla.parse(cla_handle)
for record in records:
n = sunidDict[record.sunid]
assert n.sccs == record.sccs
assert n.sid == record.sid
n.residues = record.residues
sidDict[n.sid] = n
# Clean up
self._sunidDict = sunidDict
self._sidDict = sidDict
self._domains = tuple(domains)
finally:
if dir_path:
# If we opened the files, we close the files
if cla_handle : cla_handle.close()
if des_handle : des_handle.close()
if hie_handle : hie_handle.close()
def getRoot(self):
return self.getNodeBySunid(0)
def getDomainBySid(self, sid):
"""Return a domain from its sid"""
if sid in self._sidDict:
return self._sidDict[sid]
if self.db_handle:
self.getDomainFromSQL(sid=sid)
if sid in self._sidDict:
return self._sidDict[sid]
else:
return None
def getNodeBySunid(self, sunid):
"""Return a node from its sunid"""
if sunid in self._sunidDict:
return self._sunidDict[sunid]
if self.db_handle:
self.getDomainFromSQL(sunid=sunid)
if sunid in self._sunidDict:
return self._sunidDict[sunid]
else:
return None
def getDomains(self):
"""Returns an ordered tuple of all SCOP Domains"""
if self.db_handle:
return self.getRoot().getDescendents('px')
else:
return self._domains
def write_hie(self, handle):
"""Build an HIE SCOP parsable file from this object"""
nodes = self._sunidDict.values()
# We order nodes to ease comparison with original file
nodes.sort(lambda n1,n2: cmp(n1.sunid, n2.sunid))
for n in nodes:
handle.write(str(n.toHieRecord()))
def write_des(self, handle):
"""Build a DES SCOP parsable file from this object"""
nodes = self._sunidDict.values()
# Origional SCOP file is not ordered?
nodes.sort(lambda n1,n2: cmp(n1.sunid, n2.sunid))
for n in nodes:
if n != self.root:
handle.write(str(n.toDesRecord()))
def write_cla(self, handle):
"""Build a CLA SCOP parsable file from this object"""
nodes = self._sidDict.values()
# We order nodes to ease comparison with original file
nodes.sort(lambda n1,n2: cmp(n1.sunid, n2.sunid))
for n in nodes:
handle.write(str(n.toClaRecord()))
def getDomainFromSQL(self, sunid=None, sid=None):
"""Load a node from the SQL backend using sunid or sid"""
if sunid==sid==None: return None
cur = self.db_handle.cursor()
if sid:
cur.execute("SELECT sunid FROM cla WHERE sid=%s", sid)
res = cur.fetchone()
if res is None:
return None
sunid = res[0]
cur.execute("SELECT * FROM des WHERE sunid=%s", sunid)
data = cur.fetchone()
if data is not None:
n = None
#determine if Node or Domain
if data[1] != "px":
n = Node(scop=self)
cur.execute("SELECT child FROM hie WHERE parent=%s", sunid)
children = []
for c in cur.fetchall():
children.append(c[0])
n.children = children
else:
n = Domain(scop=self)
cur.execute("select sid, residues, pdbid from cla where sunid=%s",
sunid)
[n.sid,n.residues,pdbid] = cur.fetchone()
n.residues = Residues(n.residues)
n.residues.pdbid=pdbid
self._sidDict[n.sid] = n
[n.sunid,n.type,n.sccs,n.description] = data
if data[1] != 'ro':
cur.execute("SELECT parent FROM hie WHERE child=%s", sunid)
n.parent = cur.fetchone()[0]
n.sunid = int(n.sunid)
self._sunidDict[n.sunid] = n
def getAscendentFromSQL(self, node, type):
"""Get ascendents using SQL backend"""
if nodeCodeOrder.index(type) >= nodeCodeOrder.index(node.type): return None
cur = self.db_handle.cursor()
cur.execute("SELECT "+type+" from cla WHERE "+node.type+"=%s", (node.sunid))
result = cur.fetchone()
if result is not None:
return self.getNodeBySunid(result[0])
else:
return None
def getDescendentsFromSQL(self, node, type):
"""Get descendents of a node using the database backend. This avoids
repeated iteration of SQL calls and is therefore much quicker than
repeatedly calling node.getChildren().
"""
if nodeCodeOrder.index(type) <= nodeCodeOrder.index(node.type): return []
des_list = []
# SQL cla table knows nothing about 'ro'
if node.type == 'ro':
for c in node.getChildren():
for d in self.getDescendentsFromSQL(c,type):
des_list.append(d)
return des_list
cur = self.db_handle.cursor()
if type != 'px':
cur.execute("SELECT DISTINCT des.sunid,des.type,des.sccs,description FROM \
cla,des WHERE cla."+node.type+"=%s AND cla."+type+"=des.sunid", (node.sunid))
data = cur.fetchall()
for d in data:
if int(d[0]) not in self._sunidDict:
n = Node(scop=self)
[n.sunid,n.type,n.sccs,n.description] = d
n.sunid=int(n.sunid)
self._sunidDict[n.sunid] = n
cur.execute("SELECT parent FROM hie WHERE child=%s", n.sunid)
n.parent = cur.fetchone()[0]
cur.execute("SELECT child FROM hie WHERE parent=%s", n.sunid)
children = []
for c in cur.fetchall():
children.append(c[0])
n.children = children
des_list.append( self._sunidDict[int(d[0])] )
else:
cur.execute("SELECT cla.sunid,sid,pdbid,residues,cla.sccs,type,description,sp\
FROM cla,des where cla.sunid=des.sunid and cla."+node.type+"=%s",
node.sunid)
data = cur.fetchall()
for d in data:
if int(d[0]) not in self._sunidDict:
n = Domain(scop=self)
#[n.sunid, n.sid, n.pdbid, n.residues, n.sccs, n.type,
#n.description,n.parent] = data
[n.sunid,n.sid, pdbid,n.residues,n.sccs,n.type,n.description,
n.parent] = d[0:8]
n.residues = Residues(n.residues)
n.residues.pdbid = pdbid
n.sunid = int(n.sunid)
self._sunidDict[n.sunid] = n
self._sidDict[n.sid] = n
des_list.append( self._sunidDict[int(d[0])] )
return des_list
def write_hie_sql(self, handle):
"""Write HIE data to SQL database"""
cur = handle.cursor()
cur.execute("DROP TABLE IF EXISTS hie")
cur.execute("CREATE TABLE hie (parent INT, child INT, PRIMARY KEY (child),\
INDEX (parent) )")
for p in self._sunidDict.values():
for c in p.children:
cur.execute("INSERT INTO hie VALUES (%s,%s)" % (p.sunid, c.sunid))
def write_cla_sql(self, handle):
"""Write CLA data to SQL database"""
cur = handle.cursor()
cur.execute("DROP TABLE IF EXISTS cla")
cur.execute("CREATE TABLE cla (sunid INT, sid CHAR(8), pdbid CHAR(4),\
residues VARCHAR(50), sccs CHAR(10), cl INT, cf INT, sf INT, fa INT,\
dm INT, sp INT, px INT, PRIMARY KEY (sunid), INDEX (SID) )")
for n in self._sidDict.values():
c = n.toClaRecord()
cur.execute( "INSERT INTO cla VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)",
(n.sunid, n.sid, c.residues.pdbid, c.residues, n.sccs,
n.getAscendent('cl').sunid, n.getAscendent('cf').sunid,
n.getAscendent('sf').sunid, n.getAscendent('fa').sunid,
n.getAscendent('dm').sunid, n.getAscendent('sp').sunid,
n.sunid ))
def write_des_sql(self, handle):
"""Write DES data to SQL database"""
cur = handle.cursor()
cur.execute("DROP TABLE IF EXISTS des")
cur.execute("CREATE TABLE des (sunid INT, type CHAR(2), sccs CHAR(10),\
description VARCHAR(255),\
PRIMARY KEY (sunid) )")
for n in self._sunidDict.values():
cur.execute( "INSERT INTO des VALUES (%s,%s,%s,%s)",
( n.sunid, n.type, n.sccs, n.description ) )
class Node:
""" A node in the Scop hierarchy
sunid -- SCOP unique identifiers. e.g. '14986'
parent -- The parent node
children -- A list of child nodes
sccs -- SCOP concise classification string. e.g. 'a.1.1.2'
type -- A 2 letter node type code. e.g. 'px' for domains
description --
"""
def __init__(self, scop=None):
"""Create a Node in the scop hierarchy. If a Scop instance is provided to the
constructor, this will be used to lookup related references using the SQL
methods. If no instance is provided, it is assumed the whole tree exists
and is connected."""
self.sunid=''
self.parent = None
self.children=[]
self.sccs = ''
self.type =''
self.description =''
self.scop=scop
def __str__(self):
s = []
s.append(str(self.sunid))
s.append(self.sccs)
s.append(self.type)
s.append(self.description)
return " ".join(s)
def toHieRecord(self):
"""Return an Hie.Record"""
rec = Hie.Record()
rec.sunid = str(self.sunid)
if self.getParent() : #Not root node
rec.parent = str(self.getParent().sunid)
else:
rec.parent = '-'
for c in self.getChildren():
rec.children.append(str(c.sunid))
return rec
def toDesRecord(self):
"""Return a Des.Record"""
rec = Des.Record()
rec.sunid = str(self.sunid)
rec.nodetype = self.type
rec.sccs = self.sccs
rec.description = self.description
return rec
def getChildren(self):
"""Return a list of children of this Node"""
if self.scop is None:
return self.children
else:
return map ( self.scop.getNodeBySunid, self.children )
def getParent(self):
"""Return the parent of this Node"""
if self.scop is None:
return self.parent
else:
return self.scop.getNodeBySunid( self.parent )
def getDescendents( self, node_type):
""" Return a list of all decendent nodes of the given type. Node type can a
two letter code or longer description. e.g. 'fa' or 'family'
"""
if node_type in _nodetype_to_code:
node_type = _nodetype_to_code[node_type]
nodes = [self]
if self.scop:
return self.scop.getDescendentsFromSQL(self,node_type)
while nodes[0].type != node_type:
if nodes[0].type == 'px' : return [] # Fell of the bottom of the hierarchy
child_list = []
for n in nodes:
for child in n.getChildren():
child_list.append( child )
nodes = child_list
return nodes
def getAscendent( self, node_type):
""" Return the ancenstor node of the given type, or None.Node type can a
two letter code or longer description. e.g. 'fa' or 'family'"""
if node_type in _nodetype_to_code:
node_type = _nodetype_to_code[node_type]
if self.scop:
return self.scop.getAscendentFromSQL(self,node_type)
else:
n = self
if n.type == node_type: return None
while n.type != node_type:
if n.type == 'ro': return None # Fell of the top of the hierarchy
n = n.getParent()
return n
class Domain(Node):
""" A SCOP domain. A leaf node in the Scop hierarchy.
sid -- The SCOP domain identifier. e.g. 'd5hbib_'
residues -- A Residue object. It defines the collection
of PDB atoms that make up this domain.
"""
def __init__(self,scop=None):
Node.__init__(self,scop=scop)
self.sid = ''
self.residues = None
def __str__(self):
s = []
s.append(self.sid)
s.append(self.sccs)
s.append("("+str(self.residues)+")")
if not self.getParent():
s.append(self.description)
else:
sp = self.getParent()
dm = sp.getParent()
s.append(dm.description)
s.append("{"+sp.description+"}")
return " ".join(s)
def toDesRecord(self):
"""Return a Des.Record"""
rec = Node.toDesRecord(self)
rec.name = self.sid
return rec
def toClaRecord(self):
"""Return a Cla.Record"""
rec = Cla.Record()
rec.sid = self.sid
rec.residues = self.residues
rec.sccs = self.sccs
rec.sunid = self.sunid
n = self
while n.sunid != 0: #Not root node
rec.hierarchy.append( (n.type, str(n.sunid)) )
n = n.getParent()
rec.hierarchy.reverse()
return rec
class Astral:
"""Abstraction of the ASTRAL database, which has sequences for all the SCOP domains,
as well as clusterings by percent id or evalue.
"""
def __init__( self, dir_path=None, version=None, scop=None,
astral_file=None, db_handle=None):
"""
Initialise the astral database.
You must provide either a directory of SCOP files:
dir_path - string, the path to location of the scopseq-x.xx directory
(not the directory itself), and
version -a version number.
or, a FASTA file:
astral_file - string, a path to a fasta file (which will be loaded in memory)
or, a MYSQL database:
db_handle - a database handle for a MYSQL database containing a table
'astral' with the astral data in it. This can be created
using writeToSQL.
"""
if astral_file==dir_path==db_handle==None:
raise RuntimeError("Need either file handle, or (dir_path + "\
+ "version) or database handle to construct Astral")
if not scop:
raise RuntimeError("Must provide a Scop instance to construct")
self.scop = scop
self.db_handle = db_handle
if not astral_file and not db_handle:
if dir_path == None or version == None:
raise RuntimeError("must provide dir_path and version")
self.version = version
self.path = os.path.join( dir_path, "scopseq-%s" % version)
astral_file = "astral-scopdom-seqres-all-%s.fa" % self.version
astral_file = os.path.join (self.path, astral_file)
if astral_file:
#Build a dictionary of SeqRecord objects in the FASTA file, IN MEMORY
self.fasta_dict = SeqIO.to_dict(SeqIO.parse(open(astral_file), "fasta"))
self.astral_file = astral_file
self.EvDatasets = {}
self.EvDatahash = {}
self.IdDatasets = {}
self.IdDatahash = {}
def domainsClusteredByEv(self,id):
"""get domains clustered by evalue"""
if id not in self.EvDatasets:
if self.db_handle:
self.EvDatasets[id] = self.getAstralDomainsFromSQL(astralEv_to_sql[id])
else:
if not self.path:
raise RuntimeError("No scopseq directory specified")
file_prefix = "astral-scopdom-seqres-sel-gs"
filename = "%s-e100m-%s-%s.id" % (file_prefix, astralEv_to_file[id] ,
self.version)
filename = os.path.join(self.path,filename)
self.EvDatasets[id] = self.getAstralDomainsFromFile(filename)
return self.EvDatasets[id]
def domainsClusteredById(self,id):
"""get domains clustered by percent id"""
if id not in self.IdDatasets:
if self.db_handle:
self.IdDatasets[id] = self.getAstralDomainsFromSQL("id"+str(id))
else:
if not self.path:
raise RuntimeError("No scopseq directory specified")
file_prefix = "astral-scopdom-seqres-sel-gs"
filename = "%s-bib-%s-%s.id" % (file_prefix, id, self.version)
filename = os.path.join(self.path,filename)
self.IdDatasets[id] = self.getAstralDomainsFromFile(filename)
return self.IdDatasets[id]
def getAstralDomainsFromFile(self,filename=None,file_handle=None):
"""Get the scop domains from a file containing a list of sids"""
if file_handle == filename == none:
raise RuntimeError("You must provide a filename or handle")
if not file_handle:
file_handle = open(filename)
doms = []
while 1:
line = file_handle.readline()
if not line:
break
line = line.rstrip()
doms.append(line)
if filename:
file_handle.close()
doms = filter( lambda a: a[0]=='d', doms )
doms = map( self.scop.getDomainBySid, doms )
return doms
def getAstralDomainsFromSQL(self, column):
"""Load a set of astral domains from a column in the astral table of a MYSQL
database (which can be created with writeToSQL(...)"""
cur = self.db_handle.cursor()
cur.execute("SELECT sid FROM astral WHERE "+column+"=1")
data = cur.fetchall()
data = map( lambda x: self.scop.getDomainBySid(x[0]), data)
return data
def getSeqBySid(self,domain):
"""get the seq record of a given domain from its sid"""
if self.db_handle is None:
return self.fasta_dict[domain].seq
else:
cur = self.db_handle.cursor()
cur.execute("SELECT seq FROM astral WHERE sid=%s", domain)
return Seq(cur.fetchone()[0])
def getSeq(self,domain):
"""Return seq associated with domain"""
return self.getSeqBySid(domain.sid)
def hashedDomainsById(self,id):
"""Get domains clustered by sequence identity in a dict"""
if id not in self.IdDatahash:
self.IdDatahash[id] = {}
for d in self.domainsClusteredById(id):
self.IdDatahash[id][d] = 1
return self.IdDatahash[id]
def hashedDomainsByEv(self,id):
"""Get domains clustered by evalue in a dict"""
if id not in self.EvDatahash:
self.EvDatahash[id] = {}
for d in self.domainsClusteredByEv(id):
self.EvDatahash[id][d] = 1
return self.EvDatahash[id]
def isDomainInId(self,dom,id):
"""Returns true if the domain is in the astral clusters for percent ID"""
return dom in self.hashedDomainsById(id)
def isDomainInEv(self,dom,id):
"""Returns true if the domain is in the ASTRAL clusters for evalues"""
return dom in self.hashedDomainsByEv(id)
def writeToSQL(self, db_handle):
"""Write the ASTRAL database to a MYSQL database"""
cur = db_handle.cursor()
cur.execute("DROP TABLE IF EXISTS astral")
cur.execute("CREATE TABLE astral (sid CHAR(8), seq TEXT, PRIMARY KEY (sid))")
for dom in self.fasta_dict.keys():
cur.execute( "INSERT INTO astral (sid,seq) values (%s,%s)",
(dom, self.fasta_dict[dom].seq.data))
for i in astralBibIds:
cur.execute("ALTER TABLE astral ADD (id"+str(i)+" TINYINT)")
for d in self.domainsClusteredById(i):
cur.execute("UPDATE astral SET id"+str(i)+"=1 WHERE sid=%s",
d.sid)
for ev in astralEvs:
cur.execute("ALTER TABLE astral ADD ("+astralEv_to_sql[ev]+" TINYINT)")
for d in self.domainsClusteredByEv(ev):
cur.execute("UPDATE astral SET "+astralEv_to_sql[ev]+"=1 WHERE sid=%s",
d.sid)
def search(pdb=None, key=None, sid=None, disp=None, dir=None, loc=None,
cgi='http://scop.mrc-lmb.cam.ac.uk/scop/search.cgi', **keywds):
"""search(pdb=None, key=None, sid=None, disp=None, dir=None, loc=None,
cgi='http://scop.mrc-lmb.cam.ac.uk/scop/search.cgi', **keywds)
Access search.cgi and return a handle to the results. See the
online help file for an explanation of the parameters:
http://scop.mrc-lmb.cam.ac.uk/scop/help.html
Raises an IOError if there's a network error.
"""
params = {'pdb' : pdb, 'key' : key, 'sid' : sid, 'disp' : disp,
'dir' : dir, 'loc' : loc}
variables = {}
for k in params.keys():
if params[k] is not None:
variables[k] = params[k]
variables.update(keywds)
return _open(cgi, variables)
def _open(cgi, params={}, get=1):
"""_open(cgi, params={}, get=1) -> UndoHandle
Open a handle to SCOP. cgi is the URL for the cgi script to access.
params is a dictionary with the options to pass to it. get is a boolean
that describes whether a GET should be used. Does some
simple error checking, and will raise an IOError if it encounters one.
"""
import urllib
from Bio import File
# Open a handle to SCOP.
options = urllib.urlencode(params)
if get: # do a GET
fullcgi = cgi
if options:
fullcgi = "%s?%s" % (cgi, options)
handle = urllib.urlopen(fullcgi)
else: # do a POST
handle = urllib.urlopen(cgi, options)
# Wrap the handle inside an UndoHandle.
uhandle = File.UndoHandle(handle)
# Should I check for 404? timeout? etc?
return uhandle
| [
"hochshi@gmail.com"
] | hochshi@gmail.com |
528771585ec232b3bbad088c1ceb97469132ec70 | baaeb8c1d335e258fd49b5ef024ac39790fd660f | /backend/test/test_invite.py | 8701475410a01fb8aadf206d8a29c2720cb644a6 | [] | no_license | ReactARDev/React_Redux_Python | f0b80a9d2a603b38f8e144966bc899c5aa3690e6 | afdb4a55f82fdff86686ad955448a4168d05c739 | refs/heads/master | 2021-10-10T19:28:05.142652 | 2019-01-15T21:24:06 | 2019-01-15T21:24:06 | 159,198,417 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24,757 | py | import json
import test_app
import factories
import pprint
from app import db_session_users
from schemas.base_users import User, UserAgency, UserFollowedEntity, MarketingCampaign, UserTopic, Subscription, UserFolder, AggregatedAnnotations
num_of_default_agencies_at_signup = 5
class RegisterTest(test_app.AppTest):
def test_invite(self):
emails = ['foo@example.com', 'foobarwat@example.com']
for i, email in enumerate(emails):
num_users = test_app.db_session_users.query(test_app.base_users.User)\
.filter_by(email=email).count()
self.assertEqual(0, num_users)
# N.B. upper case the second example email in the initial invite request to simulate a scenario
# where the user first sent it to us upper cased. the value remains otherwise lower case, so validation
# below should all still work
req_body = json.dumps({'email': email.upper() if i == 1 else email})
resp = self.client.post(
"/invite",
headers={'Authorization': self.admin_user_token},
data=req_body
)
self.assert200(resp)
new_user = db_session_users.query(User).filter_by(email=email).first()
self.assertFalse(new_user.enabled)
reset_token = new_user.reset_token
self.assertIsNotNone(reset_token)
# don't allow a second submission
resp = self.client.post(
"/invite",
headers={'Authorization': self.admin_user_token},
data=req_body
)
self.assert400(resp)
# fails for non-admin user
resp = self.client.post(
"/invite",
headers={'Authorization': self.token},
data=req_body
)
self.assert403(resp)
# ...unless resend is true
req_body = json.dumps({'email': email, 'resend': True})
resp = self.client.post(
"/invite",
headers={'Authorization': self.admin_user_token},
data=req_body
)
self.assert200(resp)
self.assertIn('resent_invite_time', new_user.properties)
self.assertNotEqual(reset_token, new_user.reset_token)
req_body = json.dumps({
"first_name": "First",
"last_name": "Last",
"email": email,
"token": new_user.reset_token,
"new_password": "somethingnew",
"agencies": [80, 188],
"other_agencies": "Something you didn't think of",
"topics": [1, 2, 3],
"other_topics": "Something else",
})
resp = self.client.post('/activate', data=req_body)
self.assert200(resp)
db_session_users.refresh(new_user)
self.assertIsInstance(new_user.properties['activation_time'], unicode)
self.assertTrue(new_user.enabled)
def test_activation(self):
user = factories.UserFactory.build(
first_name=None,
last_name=None,
)
user.reset_token = 'foo'
orig_props = { 'property': 'exists', 'arrayprop': [1,2,3,4]}
user.properties = orig_props
user.enabled = False
db_session_users.add(user)
db_session_users.flush()
db_session_users.refresh(user)
initial_hash = user.password_hash
req_body = json.dumps({
"first_name": "First",
"last_name": "Last",
"email": user.email,
"token": "foo",
"new_password": "somethingnew",
"agencies": [80, 188, 78987958795], # one invalid id
# XXX these aren't really state agencies because they're not in the fixture:
"state_agencies": ["US-CA", "US-NY"],
"other_agencies": "Something you didn't think of",
"other_state_agencies": "California dreams",
"other_topics": "Something else",
'is_contributor': True
})
resp = self.client.post('/activate', data=req_body)
self.assert200(resp)
new_user = db_session_users.query(User).filter_by(email=user.email).first()
self.assertIsNone(new_user.reset_token)
self.assertNotEqual(initial_hash, new_user.password_hash)
self.assertEqual('First', new_user.first_name)
self.assertEqual('Last', new_user.last_name)
self.assertTrue(new_user.enabled)
self.assertDictContainsSubset(orig_props, new_user.properties)
self.assertTrue('contributor' in new_user.roles)
subscription = db_session_users.query(Subscription).filter_by(user_id=user.id).first()
self.assertEqual('free_trial', subscription.stripe_id)
self.assertEqual(True, subscription.latest)
self.assertEqual('active', subscription.status)
folders = db_session_users.query(UserFolder).filter_by(user_id=user.id).all()
bookmarked = filter(lambda folder : folder.name == 'Bookmarked', folders)
read = filter(lambda folder : folder.name == 'Read', folders)
self.assertIsInstance(folders, list)
self.assertEqual(len(folders), 2)
self.assertEqual(len(bookmarked), 1)
self.assertEqual(len(read), 1)
for p in ['other_topics', 'other_agencies', 'other_state_agencies']:
self.assertIn(p, new_user.properties)
self.assertIsInstance(new_user.properties.get(p), unicode)
for p in ['agencies', 'state_agencies']:
self.assertIn(p, new_user.properties)
self.assertIsInstance(new_user.properties.get(p), list)
num_user_agencies = db_session_users.query(UserAgency).filter_by(user_id=user.id).count()
self.assertEqual(num_of_default_agencies_at_signup, num_user_agencies) # should not include invalid selection
num_user_entities = db_session_users.query(UserFollowedEntity).filter_by(user_id=user.id).count()
self.assertEqual(4, num_user_entities)
num_news_entities = db_session_users.query(UserFollowedEntity).filter_by(user_id=user.id, entity_type='news_sources').count()
self.assertEqual(2, num_news_entities)
num_user_topics = db_session_users.query(UserTopic).filter_by(user_id=user.id).count()
self.assertEqual(len(AggregatedAnnotations.topic_id_name_mapping.keys()), num_user_topics)
# dry run should now fail
req_body = json.dumps({
'email': user.email,
'token': 'does not matter',
'dry_run': True,
})
resp = self.client.post('/activate', data=req_body)
self.assert400(resp)
self.assertRegexpMatches(resp.json['error'], r'enabled')
def test_activation_with_edu_email(self):
user = factories.UserFactory.build(
first_name=None,
last_name=None,
)
user.email = 'foo@hogwarts.edu'
user.reset_token = 'foo'
orig_props = { 'property': 'exists', 'arrayprop': [1,2,3,4]}
user.properties = orig_props
user.enabled = False
db_session_users.add(user)
db_session_users.flush()
db_session_users.refresh(user)
initial_hash = user.password_hash
req_body = json.dumps({
"first_name": "First",
"last_name": "Last",
"email": user.email,
"token": "foo",
"new_password": "somethingnew",
"agencies": [80, 188, 78987958795], # one invalid id
# XXX these aren't really state agencies because they're not in the fixture:
"state_agencies": ["US-CA", "US-NY"],
"other_agencies": "Something you didn't think of",
"other_state_agencies": "California dreams",
"other_topics": "Something else",
'is_contributor': True
})
resp = self.client.post('/activate', data=req_body)
self.assert200(resp)
subscription = db_session_users.query(Subscription).filter_by(user_id=user.id).first()
self.assertEqual('free_trial_120months', subscription.stripe_id)
self.assertEqual(True, subscription.latest)
self.assertEqual('active', subscription.status)
def test_activation_dry_run(self):
user = factories.UserFactory.build(
first_name=None,
last_name=None,
)
user.reset_token = 'bar'
user.enabled = False
db_session_users.add(user)
db_session_users.flush()
db_session_users.refresh(user)
# try with a valid email/token first
req_body = json.dumps({
'email': user.email,
'token': 'bar',
'dry_run': True,
})
resp = self.client.post('/activate', data=req_body)
self.assert200(resp)
self.assertFalse(resp.json['marketing_campaign'])
# invalid token
req_body = json.dumps({
'email': user.email,
'token': 'baz',
'dry_run': True,
})
resp = self.client.post('/activate', data=req_body)
self.assert400(resp)
# invalid email
req_body = json.dumps({
'email': 'invalid@example.com',
'token': 'bar',
'dry_run': True,
})
resp = self.client.post('/activate', data=req_body)
self.assert400(resp)
# missing email
req_body = json.dumps({
'email': None,
'token': 'bar',
'dry_run': True,
})
resp = self.client.post('/activate', data=req_body)
self.assert400(resp)
def test_activation_marketing_campaign(self):
marketing_campaign = MarketingCampaign(name='foo', start_date="01/01/2017", end_date="01/05/2017", notes='bar', created_by_user_id=self.user.id)
marketing_campaign.gen_token()
db_session_users.add(marketing_campaign)
db_session_users.commit()
token = marketing_campaign.token
# try with a valid email/token first
req_body = json.dumps({
'token': token,
'dry_run': True,
})
resp = self.client.post('/activate', data=req_body)
self.assert200(resp)
self.assertTrue(resp.json['marketing_campaign'])
signup_email = "email@marketing.campaign.com"
req_body = json.dumps({
"first_name": "First",
"last_name": "Last",
"email": signup_email,
"token": token,
"new_password": "somethingnew",
"agencies": [80, 188, 78987958795], # one invalid id
# XXX these aren't really state agencies because they're not in the fixture:
"state_agencies": ["US-CA", "US-NY"],
"other_agencies": "Something you didn't think of",
"other_state_agencies": "California dreams",
"topics": [1, 2, 3],
"other_topics": "Something else",
})
resp = self.client.post('/activate', data=req_body)
self.assert200(resp)
self.assertIsInstance(resp.json['jwt_token'], unicode)
new_user = db_session_users.query(User).filter_by(email=signup_email).first()
self.assertIsInstance(new_user.reset_token, unicode)
self.assertEqual('First', new_user.first_name)
self.assertEqual('Last', new_user.last_name)
self.assertFalse(new_user.enabled)
self.assertIsInstance(new_user.password_hash, unicode)
self.assertEqual(len(new_user.marketing_campaigns), 1)
for p in ['other_topics', 'other_agencies', 'other_state_agencies']:
self.assertIn(p, new_user.properties)
self.assertIsInstance(new_user.properties.get(p), unicode)
for p in ['agencies', 'state_agencies']:
self.assertIn(p, new_user.properties)
self.assertIsInstance(new_user.properties.get(p), list)
num_user_agencies = db_session_users.query(UserAgency).filter_by(user_id=new_user.id).count()
self.assertEqual(num_of_default_agencies_at_signup, num_user_agencies) # should not include invalid selection
num_user_entities = db_session_users.query(UserFollowedEntity).filter_by(user_id=new_user.id).count()
self.assertEqual(4, num_user_entities)
num_news_entities = db_session_users.query(UserFollowedEntity).filter_by(user_id=new_user.id, entity_type='news_sources').count()
self.assertEqual(2, num_news_entities)
num_user_topics = db_session_users.query(UserTopic).filter_by(user_id=new_user.id).count()
self.assertEqual(3, num_user_topics)
# validate access works with temporary token
access_resp = self.client.get("/current_user", headers={'Authorization': resp.json['jwt_token']})
self.assert200(access_resp)
# run an extra api call that should fail on /activate with this email to confirm the token is not overwritten
req_body = json.dumps({
"email": signup_email,
"new_password": "foo"
})
resp = self.client.post('/activate', data=req_body)
self.assert400(resp)
# finally, use the confirm route to enable the user
req_body = json.dumps({
"email": signup_email,
"token": new_user.reset_token
})
resp = self.client.post('/confirm', data=req_body)
new_user = db_session_users.query(User).filter_by(email=signup_email).first()
self.assertTrue(new_user.enabled)
self.assertIn('confirmed_date', new_user.properties)
def test_activation_no_token(self):
# try with a valid email/token first
req_body = json.dumps({
'dry_run': True,
})
resp = self.client.post('/activate', data=req_body)
self.assert200(resp)
self.assertFalse(resp.json['marketing_campaign'])
signup_email = "email@no.token.com"
req_body = json.dumps({
"first_name": "First",
"last_name": "Last",
"email": signup_email,
"new_password": "somethingnew",
"agencies": [80, 188, 78987958795], # one invalid id
# XXX these aren't really state agencies because they're not in the fixture:
"state_agencies": ["US-CA", "US-NY"],
"other_agencies": "Something you didn't think of",
"other_state_agencies": "California dreams",
"topics": [1, 2, 3],
"other_topics": "Something else",
})
resp = self.client.post('/activate', data=req_body)
self.assert200(resp)
self.assertIsInstance(resp.json['jwt_token'], unicode)
new_user = db_session_users.query(User).filter_by(email=signup_email).first()
self.assertIsInstance(new_user.reset_token, unicode)
self.assertEqual('First', new_user.first_name)
self.assertEqual('Last', new_user.last_name)
self.assertFalse(new_user.enabled)
self.assertIsInstance(new_user.password_hash, unicode)
self.assertEqual(len(new_user.marketing_campaigns), 0)
for p in ['other_topics', 'other_agencies', 'other_state_agencies']:
self.assertIn(p, new_user.properties)
self.assertIsInstance(new_user.properties.get(p), unicode)
for p in ['agencies', 'state_agencies']:
self.assertIn(p, new_user.properties)
self.assertIsInstance(new_user.properties.get(p), list)
num_user_agencies = db_session_users.query(UserAgency).filter_by(user_id=new_user.id).count()
self.assertEqual(num_of_default_agencies_at_signup, num_user_agencies) # should not include invalid selection
num_user_entities = db_session_users.query(UserFollowedEntity).filter_by(user_id=new_user.id).count()
self.assertEqual(4, num_user_entities)
num_news_entities = db_session_users.query(UserFollowedEntity).filter_by(user_id=new_user.id, entity_type='news_sources').count()
self.assertEqual(2, num_news_entities)
num_user_topics = db_session_users.query(UserTopic).filter_by(user_id=new_user.id).count()
self.assertEqual(3, num_user_topics)
# validate access works with temporary token
access_resp = self.client.get("/current_user", headers={'Authorization': resp.json['jwt_token']})
self.assert200(access_resp)
# run an extra api call that should fail on /activate with this email to confirm the token is not overwritten
req_body = json.dumps({
"email": signup_email,
"new_password": "foo"
})
resp = self.client.post('/activate', data=req_body)
self.assert400(resp)
# finally, use the confirm route to enable the user
req_body = json.dumps({
"email": signup_email,
"token": new_user.reset_token
})
resp = self.client.post('/confirm', data=req_body)
self.assert200(resp)
new_user = db_session_users.query(User).filter_by(email=signup_email).first()
self.assertTrue(new_user.enabled)
self.assertIn('confirmed_date', new_user.properties)
resp = self.client.post('/confirm', data=req_body)
self.assert400(resp)
def test_invite_mixed(self):
emails = ['foobar1@example.com', 'foobarwat1@example.com']
for i, email in enumerate(emails):
num_users = test_app.db_session_users.query(test_app.base_users.User)\
.filter_by(email=email).count()
self.assertEqual(0, num_users)
# N.B. upper case the second example email in the initial invite request to simulate a scenario
# where the user first sent it to us upper cased. the value remains otherwise lower case, so validation
# below should all still work
req_body = json.dumps({'email': email.upper() if i == 1 else email})
resp = self.client.post(
"/invite",
headers={'Authorization': self.admin_user_token},
data=req_body
)
self.assert200(resp)
new_user = db_session_users.query(User).filter_by(email=email).first()
self.assertFalse(new_user.enabled)
reset_token = new_user.reset_token
self.assertIsNotNone(reset_token)
# don't allow a second submission
resp = self.client.post(
"/invite",
headers={'Authorization': self.admin_user_token},
data=req_body
)
self.assert400(resp)
# fails for non-admin user
resp = self.client.post(
"/invite",
headers={'Authorization': self.token},
data=req_body
)
self.assert403(resp)
# ...unless resend is true
req_body = json.dumps({'email': email, 'resend': True})
resp = self.client.post(
"/invite",
headers={'Authorization': self.admin_user_token},
data=req_body
)
self.assert200(resp)
self.assertNotEqual(reset_token, new_user.reset_token)
req_body = json.dumps({
"first_name": "First",
"last_name": "Last",
"email": email,
"new_password": "somethingnew",
"agencies": [80, 188],
"other_agencies": "Something you didn't think of",
"other_state_agencies": "California dreams",
"topics": [1, 2, 3],
"other_topics": "Something else",
"state_agencies": ["US-CA", "US-NY"],
})
resp = self.client.post('/activate', data=req_body)
self.assert200(resp)
db_session_users.refresh(new_user)
self.assertIsInstance(new_user.properties['activation_time'], unicode)
self.assertFalse(new_user.enabled)
self.assertIsInstance(resp.json['jwt_token'], unicode)
self.assertIsInstance(new_user.reset_token, unicode)
self.assertEqual('First', new_user.first_name)
self.assertEqual('Last', new_user.last_name)
self.assertFalse(new_user.enabled)
self.assertIsInstance(new_user.password_hash, unicode)
self.assertEqual(len(new_user.marketing_campaigns), 0)
for p in ['other_topics', 'other_agencies', 'other_state_agencies']:
self.assertIn(p, new_user.properties)
self.assertIsInstance(new_user.properties.get(p), unicode)
for p in ['agencies', 'state_agencies']:
self.assertIn(p, new_user.properties)
self.assertIsInstance(new_user.properties.get(p), list)
num_user_agencies = db_session_users.query(UserAgency).filter_by(user_id=new_user.id).count()
self.assertEqual(num_of_default_agencies_at_signup, num_user_agencies) # should not include invalid selection
num_user_entities = db_session_users.query(UserFollowedEntity).filter_by(user_id=new_user.id).count()
self.assertEqual(4, num_user_entities)
num_news_entities = db_session_users.query(UserFollowedEntity).filter_by(user_id=new_user.id, entity_type='news_sources').count()
self.assertEqual(2, num_news_entities)
num_user_topics = db_session_users.query(UserTopic).filter_by(user_id=new_user.id).count()
self.assertEqual(3, num_user_topics)
# validate access works with temporary token
access_resp = self.client.get("/current_user", headers={'Authorization': resp.json['jwt_token']})
self.assert200(access_resp)
# run an extra api call that should fail on /activate with this email to confirm the token is not overwritten
req_body = json.dumps({
"email": email,
"new_password": "foo"
})
resp = self.client.post('/activate', data=req_body)
self.assert400(resp)
# finally, use the confirm route to enable the user
req_body = json.dumps({
"email": email,
"token": new_user.reset_token
})
resp = self.client.post('/confirm', data=req_body)
self.assert200(resp)
new_user = db_session_users.query(User).filter_by(email=email).first()
self.assertTrue(new_user.enabled)
self.assertIn('confirmed_date', new_user.properties)
resp = self.client.post('/confirm', data=req_body)
self.assert400(resp)
def test_check_email(self):
resp = self.client.get("/check_email?email=demo@jurispect.com")
self.assert200(resp)
self.assertIn('email_in_use', resp.json)
self.assertIsInstance(resp.json['email_in_use'], bool)
def test_resend_confirmation_email(self):
# first create a user that has signed up (not invited) and requires a confirmation
req_body = json.dumps({
"first_name": "First",
"last_name": "Last",
"email": 'a@example.com',
"token": None,
"new_password": "somethingnew",
"agencies": [80, 188],
"other_agencies": "Something you didn't think of",
"topics": [1, 2, 3],
"other_topics": "Something else",
})
resp = self.client.post('/activate', data=req_body)
self.assert200(resp)
user = db_session_users.query(User).filter_by(email='a@example.com').first()
db_session_users.refresh(user)
# Now that the user is created lets resend them a confirmation email
req_body = json.dumps({'email': user.email })
resp = self.client.post(
"/send_confirm_email",
headers={'Authorization': self.token},
data=req_body
)
self.assert200(resp)
self.assertIn('confirmation_resent_time', user.properties)
# Now lets test if we get the error we expect
req_body = json.dumps({})
resp = self.client.post(
"/send_confirm_email",
headers={'Authorization': self.token},
data=req_body
)
self.assert400(resp)
#now lets send a false email
req_body = json.dumps({'email': 'blah@blah.com'})
resp = self.client.post(
"/send_confirm_email",
headers={'Authorization': self.token},
data=req_body
)
self.assert400(resp)
| [
"talentmobile9999@gmail.com"
] | talentmobile9999@gmail.com |
7be475a7e62b3831ab5c17005a6cd0f0731b9dd5 | 22d3c25de80b0b1cf2302ebf953afc6370a0974f | /device/barcode-reader/keyboard.py | 969e29271877d2aa32542c37de3e9d5450f2e647 | [] | no_license | earn-earnrising/poc | 4ce150ecce9a14e01a2f2ef36e40c6b35c00b088 | 4198b181834f92e1c0e3ce193a8c278d97210aba | refs/heads/master | 2022-05-24T01:25:49.945414 | 2020-04-27T13:03:18 | 2020-04-27T13:03:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 614 | py | #! /usr/bin/env python3
from Xlib.display import Display
import Xlib
from pprint import pprint
import sys
key_codes = range(0, 255)
def handle_event(event):
if event.type == Xlib.X.KeyRelease:
key_code = event.detail
if key_code in key_codes:
print("KeyRelease: %d" % key_code)
display = Display()
root = display.screen().root
root.change_attributes(event_mask=Xlib.X.KeyReleaseMask)
for keycode in key_codes:
root.grab_key(keycode, Xlib.X.AnyModifier, 1, Xlib.X.GrabModeAsync, Xlib.X.GrabModeAsync)
while 1:
event = root.display.next_event()
handle_event(event)
| [
"andras.tim@gmail.com"
] | andras.tim@gmail.com |
1484c55af6358e41228214378c276a467a0cf6f7 | b39d72ba5de9d4683041e6b4413f8483c817f821 | /GeneVisualization/ass1/Lib/site-packages/itk/itkLiThresholdCalculatorPython.py | 556f811f79e55e27f6ef3e8cafcd931ef76386cb | [] | no_license | ssalmaan/DataVisualization | d93a0afe1290e4ea46c3be5718d503c71a6f99a7 | eff072f11337f124681ce08742e1a092033680cc | refs/heads/master | 2021-03-13T05:40:23.679095 | 2020-03-11T21:37:45 | 2020-03-11T21:37:45 | 246,642,979 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 29,840 | py | # This file was automatically generated by SWIG (http://www.swig.org).
# Version 3.0.8
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info
if version_info >= (3, 0, 0):
new_instancemethod = lambda func, inst, cls: _itkLiThresholdCalculatorPython.SWIG_PyInstanceMethod_New(func)
else:
from new import instancemethod as new_instancemethod
if version_info >= (2, 6, 0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_itkLiThresholdCalculatorPython', [dirname(__file__)])
except ImportError:
import _itkLiThresholdCalculatorPython
return _itkLiThresholdCalculatorPython
if fp is not None:
try:
_mod = imp.load_module('_itkLiThresholdCalculatorPython', fp, pathname, description)
finally:
fp.close()
return _mod
_itkLiThresholdCalculatorPython = swig_import_helper()
del swig_import_helper
else:
import _itkLiThresholdCalculatorPython
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self, class_type, name, value, static=1):
if (name == "thisown"):
return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name, None)
if method:
return method(self, value)
if (not static):
object.__setattr__(self, name, value)
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self, class_type, name, value):
return _swig_setattr_nondynamic(self, class_type, name, value, 0)
def _swig_getattr_nondynamic(self, class_type, name, static=1):
if (name == "thisown"):
return self.this.own()
method = class_type.__swig_getmethods__.get(name, None)
if method:
return method(self)
if (not static):
return object.__getattr__(self, name)
else:
raise AttributeError(name)
def _swig_getattr(self, class_type, name):
return _swig_getattr_nondynamic(self, class_type, name, 0)
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except Exception:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object:
pass
_newclass = 0
def _swig_setattr_nondynamic_method(set):
def set_attr(self, name, value):
if (name == "thisown"):
return self.this.own(value)
if hasattr(self, name) or (name == "this"):
set(self, name, value)
else:
raise AttributeError("You cannot add attributes to %s" % self)
return set_attr
import itkHistogramThresholdCalculatorPython
import itkHistogramPython
import itkArrayPython
import vnl_vectorPython
import vnl_matrixPython
import stdcomplexPython
import pyBasePython
import itkSamplePython
import itkVectorPython
import vnl_vector_refPython
import itkFixedArrayPython
import ITKCommonBasePython
import itkSimpleDataObjectDecoratorPython
import itkRGBAPixelPython
import itkCovariantVectorPython
import itkRGBPixelPython
def itkLiThresholdCalculatorHFF_New():
return itkLiThresholdCalculatorHFF.New()
def itkLiThresholdCalculatorHDF_New():
return itkLiThresholdCalculatorHDF.New()
def itkLiThresholdCalculatorHFUS_New():
return itkLiThresholdCalculatorHFUS.New()
def itkLiThresholdCalculatorHDUS_New():
return itkLiThresholdCalculatorHDUS.New()
def itkLiThresholdCalculatorHFUC_New():
return itkLiThresholdCalculatorHFUC.New()
def itkLiThresholdCalculatorHDUC_New():
return itkLiThresholdCalculatorHDUC.New()
def itkLiThresholdCalculatorHFSS_New():
return itkLiThresholdCalculatorHFSS.New()
def itkLiThresholdCalculatorHDSS_New():
return itkLiThresholdCalculatorHDSS.New()
class itkLiThresholdCalculatorHDF(itkHistogramThresholdCalculatorPython.itkHistogramThresholdCalculatorHDF):
"""Proxy of C++ itkLiThresholdCalculatorHDF class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __New_orig__() -> "itkLiThresholdCalculatorHDF_Pointer":
"""__New_orig__() -> itkLiThresholdCalculatorHDF_Pointer"""
return _itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHDF___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def Clone(self) -> "itkLiThresholdCalculatorHDF_Pointer":
"""Clone(itkLiThresholdCalculatorHDF self) -> itkLiThresholdCalculatorHDF_Pointer"""
return _itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHDF_Clone(self)
__swig_destroy__ = _itkLiThresholdCalculatorPython.delete_itkLiThresholdCalculatorHDF
def cast(obj: 'itkLightObject') -> "itkLiThresholdCalculatorHDF *":
"""cast(itkLightObject obj) -> itkLiThresholdCalculatorHDF"""
return _itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHDF_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkLiThresholdCalculatorHDF
Create a new object of the class itkLiThresholdCalculatorHDF and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkLiThresholdCalculatorHDF.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkLiThresholdCalculatorHDF.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkLiThresholdCalculatorHDF.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
itkLiThresholdCalculatorHDF.Clone = new_instancemethod(_itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHDF_Clone, None, itkLiThresholdCalculatorHDF)
itkLiThresholdCalculatorHDF_swigregister = _itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHDF_swigregister
itkLiThresholdCalculatorHDF_swigregister(itkLiThresholdCalculatorHDF)
def itkLiThresholdCalculatorHDF___New_orig__() -> "itkLiThresholdCalculatorHDF_Pointer":
"""itkLiThresholdCalculatorHDF___New_orig__() -> itkLiThresholdCalculatorHDF_Pointer"""
return _itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHDF___New_orig__()
def itkLiThresholdCalculatorHDF_cast(obj: 'itkLightObject') -> "itkLiThresholdCalculatorHDF *":
"""itkLiThresholdCalculatorHDF_cast(itkLightObject obj) -> itkLiThresholdCalculatorHDF"""
return _itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHDF_cast(obj)
class itkLiThresholdCalculatorHDSS(itkHistogramThresholdCalculatorPython.itkHistogramThresholdCalculatorHDSS):
"""Proxy of C++ itkLiThresholdCalculatorHDSS class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __New_orig__() -> "itkLiThresholdCalculatorHDSS_Pointer":
"""__New_orig__() -> itkLiThresholdCalculatorHDSS_Pointer"""
return _itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHDSS___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def Clone(self) -> "itkLiThresholdCalculatorHDSS_Pointer":
"""Clone(itkLiThresholdCalculatorHDSS self) -> itkLiThresholdCalculatorHDSS_Pointer"""
return _itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHDSS_Clone(self)
__swig_destroy__ = _itkLiThresholdCalculatorPython.delete_itkLiThresholdCalculatorHDSS
def cast(obj: 'itkLightObject') -> "itkLiThresholdCalculatorHDSS *":
"""cast(itkLightObject obj) -> itkLiThresholdCalculatorHDSS"""
return _itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHDSS_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkLiThresholdCalculatorHDSS
Create a new object of the class itkLiThresholdCalculatorHDSS and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkLiThresholdCalculatorHDSS.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkLiThresholdCalculatorHDSS.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkLiThresholdCalculatorHDSS.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
itkLiThresholdCalculatorHDSS.Clone = new_instancemethod(_itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHDSS_Clone, None, itkLiThresholdCalculatorHDSS)
itkLiThresholdCalculatorHDSS_swigregister = _itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHDSS_swigregister
itkLiThresholdCalculatorHDSS_swigregister(itkLiThresholdCalculatorHDSS)
def itkLiThresholdCalculatorHDSS___New_orig__() -> "itkLiThresholdCalculatorHDSS_Pointer":
"""itkLiThresholdCalculatorHDSS___New_orig__() -> itkLiThresholdCalculatorHDSS_Pointer"""
return _itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHDSS___New_orig__()
def itkLiThresholdCalculatorHDSS_cast(obj: 'itkLightObject') -> "itkLiThresholdCalculatorHDSS *":
"""itkLiThresholdCalculatorHDSS_cast(itkLightObject obj) -> itkLiThresholdCalculatorHDSS"""
return _itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHDSS_cast(obj)
class itkLiThresholdCalculatorHDUC(itkHistogramThresholdCalculatorPython.itkHistogramThresholdCalculatorHDUC):
"""Proxy of C++ itkLiThresholdCalculatorHDUC class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __New_orig__() -> "itkLiThresholdCalculatorHDUC_Pointer":
"""__New_orig__() -> itkLiThresholdCalculatorHDUC_Pointer"""
return _itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHDUC___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def Clone(self) -> "itkLiThresholdCalculatorHDUC_Pointer":
"""Clone(itkLiThresholdCalculatorHDUC self) -> itkLiThresholdCalculatorHDUC_Pointer"""
return _itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHDUC_Clone(self)
__swig_destroy__ = _itkLiThresholdCalculatorPython.delete_itkLiThresholdCalculatorHDUC
def cast(obj: 'itkLightObject') -> "itkLiThresholdCalculatorHDUC *":
"""cast(itkLightObject obj) -> itkLiThresholdCalculatorHDUC"""
return _itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHDUC_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkLiThresholdCalculatorHDUC
Create a new object of the class itkLiThresholdCalculatorHDUC and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkLiThresholdCalculatorHDUC.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkLiThresholdCalculatorHDUC.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkLiThresholdCalculatorHDUC.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
itkLiThresholdCalculatorHDUC.Clone = new_instancemethod(_itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHDUC_Clone, None, itkLiThresholdCalculatorHDUC)
itkLiThresholdCalculatorHDUC_swigregister = _itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHDUC_swigregister
itkLiThresholdCalculatorHDUC_swigregister(itkLiThresholdCalculatorHDUC)
def itkLiThresholdCalculatorHDUC___New_orig__() -> "itkLiThresholdCalculatorHDUC_Pointer":
"""itkLiThresholdCalculatorHDUC___New_orig__() -> itkLiThresholdCalculatorHDUC_Pointer"""
return _itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHDUC___New_orig__()
def itkLiThresholdCalculatorHDUC_cast(obj: 'itkLightObject') -> "itkLiThresholdCalculatorHDUC *":
"""itkLiThresholdCalculatorHDUC_cast(itkLightObject obj) -> itkLiThresholdCalculatorHDUC"""
return _itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHDUC_cast(obj)
class itkLiThresholdCalculatorHDUS(itkHistogramThresholdCalculatorPython.itkHistogramThresholdCalculatorHDUS):
"""Proxy of C++ itkLiThresholdCalculatorHDUS class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __New_orig__() -> "itkLiThresholdCalculatorHDUS_Pointer":
"""__New_orig__() -> itkLiThresholdCalculatorHDUS_Pointer"""
return _itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHDUS___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def Clone(self) -> "itkLiThresholdCalculatorHDUS_Pointer":
"""Clone(itkLiThresholdCalculatorHDUS self) -> itkLiThresholdCalculatorHDUS_Pointer"""
return _itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHDUS_Clone(self)
__swig_destroy__ = _itkLiThresholdCalculatorPython.delete_itkLiThresholdCalculatorHDUS
def cast(obj: 'itkLightObject') -> "itkLiThresholdCalculatorHDUS *":
"""cast(itkLightObject obj) -> itkLiThresholdCalculatorHDUS"""
return _itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHDUS_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkLiThresholdCalculatorHDUS
Create a new object of the class itkLiThresholdCalculatorHDUS and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkLiThresholdCalculatorHDUS.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkLiThresholdCalculatorHDUS.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkLiThresholdCalculatorHDUS.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
itkLiThresholdCalculatorHDUS.Clone = new_instancemethod(_itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHDUS_Clone, None, itkLiThresholdCalculatorHDUS)
itkLiThresholdCalculatorHDUS_swigregister = _itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHDUS_swigregister
itkLiThresholdCalculatorHDUS_swigregister(itkLiThresholdCalculatorHDUS)
def itkLiThresholdCalculatorHDUS___New_orig__() -> "itkLiThresholdCalculatorHDUS_Pointer":
"""itkLiThresholdCalculatorHDUS___New_orig__() -> itkLiThresholdCalculatorHDUS_Pointer"""
return _itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHDUS___New_orig__()
def itkLiThresholdCalculatorHDUS_cast(obj: 'itkLightObject') -> "itkLiThresholdCalculatorHDUS *":
"""itkLiThresholdCalculatorHDUS_cast(itkLightObject obj) -> itkLiThresholdCalculatorHDUS"""
return _itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHDUS_cast(obj)
class itkLiThresholdCalculatorHFF(itkHistogramThresholdCalculatorPython.itkHistogramThresholdCalculatorHFF):
"""Proxy of C++ itkLiThresholdCalculatorHFF class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __New_orig__() -> "itkLiThresholdCalculatorHFF_Pointer":
"""__New_orig__() -> itkLiThresholdCalculatorHFF_Pointer"""
return _itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHFF___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def Clone(self) -> "itkLiThresholdCalculatorHFF_Pointer":
"""Clone(itkLiThresholdCalculatorHFF self) -> itkLiThresholdCalculatorHFF_Pointer"""
return _itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHFF_Clone(self)
__swig_destroy__ = _itkLiThresholdCalculatorPython.delete_itkLiThresholdCalculatorHFF
def cast(obj: 'itkLightObject') -> "itkLiThresholdCalculatorHFF *":
"""cast(itkLightObject obj) -> itkLiThresholdCalculatorHFF"""
return _itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHFF_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkLiThresholdCalculatorHFF
Create a new object of the class itkLiThresholdCalculatorHFF and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkLiThresholdCalculatorHFF.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkLiThresholdCalculatorHFF.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkLiThresholdCalculatorHFF.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
itkLiThresholdCalculatorHFF.Clone = new_instancemethod(_itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHFF_Clone, None, itkLiThresholdCalculatorHFF)
itkLiThresholdCalculatorHFF_swigregister = _itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHFF_swigregister
itkLiThresholdCalculatorHFF_swigregister(itkLiThresholdCalculatorHFF)
def itkLiThresholdCalculatorHFF___New_orig__() -> "itkLiThresholdCalculatorHFF_Pointer":
"""itkLiThresholdCalculatorHFF___New_orig__() -> itkLiThresholdCalculatorHFF_Pointer"""
return _itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHFF___New_orig__()
def itkLiThresholdCalculatorHFF_cast(obj: 'itkLightObject') -> "itkLiThresholdCalculatorHFF *":
"""itkLiThresholdCalculatorHFF_cast(itkLightObject obj) -> itkLiThresholdCalculatorHFF"""
return _itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHFF_cast(obj)
class itkLiThresholdCalculatorHFSS(itkHistogramThresholdCalculatorPython.itkHistogramThresholdCalculatorHFSS):
"""Proxy of C++ itkLiThresholdCalculatorHFSS class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __New_orig__() -> "itkLiThresholdCalculatorHFSS_Pointer":
"""__New_orig__() -> itkLiThresholdCalculatorHFSS_Pointer"""
return _itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHFSS___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def Clone(self) -> "itkLiThresholdCalculatorHFSS_Pointer":
"""Clone(itkLiThresholdCalculatorHFSS self) -> itkLiThresholdCalculatorHFSS_Pointer"""
return _itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHFSS_Clone(self)
__swig_destroy__ = _itkLiThresholdCalculatorPython.delete_itkLiThresholdCalculatorHFSS
def cast(obj: 'itkLightObject') -> "itkLiThresholdCalculatorHFSS *":
"""cast(itkLightObject obj) -> itkLiThresholdCalculatorHFSS"""
return _itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHFSS_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkLiThresholdCalculatorHFSS
Create a new object of the class itkLiThresholdCalculatorHFSS and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkLiThresholdCalculatorHFSS.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkLiThresholdCalculatorHFSS.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkLiThresholdCalculatorHFSS.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
itkLiThresholdCalculatorHFSS.Clone = new_instancemethod(_itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHFSS_Clone, None, itkLiThresholdCalculatorHFSS)
itkLiThresholdCalculatorHFSS_swigregister = _itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHFSS_swigregister
itkLiThresholdCalculatorHFSS_swigregister(itkLiThresholdCalculatorHFSS)
def itkLiThresholdCalculatorHFSS___New_orig__() -> "itkLiThresholdCalculatorHFSS_Pointer":
"""itkLiThresholdCalculatorHFSS___New_orig__() -> itkLiThresholdCalculatorHFSS_Pointer"""
return _itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHFSS___New_orig__()
def itkLiThresholdCalculatorHFSS_cast(obj: 'itkLightObject') -> "itkLiThresholdCalculatorHFSS *":
"""itkLiThresholdCalculatorHFSS_cast(itkLightObject obj) -> itkLiThresholdCalculatorHFSS"""
return _itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHFSS_cast(obj)
class itkLiThresholdCalculatorHFUC(itkHistogramThresholdCalculatorPython.itkHistogramThresholdCalculatorHFUC):
"""Proxy of C++ itkLiThresholdCalculatorHFUC class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __New_orig__() -> "itkLiThresholdCalculatorHFUC_Pointer":
"""__New_orig__() -> itkLiThresholdCalculatorHFUC_Pointer"""
return _itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHFUC___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def Clone(self) -> "itkLiThresholdCalculatorHFUC_Pointer":
"""Clone(itkLiThresholdCalculatorHFUC self) -> itkLiThresholdCalculatorHFUC_Pointer"""
return _itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHFUC_Clone(self)
__swig_destroy__ = _itkLiThresholdCalculatorPython.delete_itkLiThresholdCalculatorHFUC
def cast(obj: 'itkLightObject') -> "itkLiThresholdCalculatorHFUC *":
"""cast(itkLightObject obj) -> itkLiThresholdCalculatorHFUC"""
return _itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHFUC_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkLiThresholdCalculatorHFUC
Create a new object of the class itkLiThresholdCalculatorHFUC and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkLiThresholdCalculatorHFUC.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkLiThresholdCalculatorHFUC.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkLiThresholdCalculatorHFUC.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
itkLiThresholdCalculatorHFUC.Clone = new_instancemethod(_itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHFUC_Clone, None, itkLiThresholdCalculatorHFUC)
itkLiThresholdCalculatorHFUC_swigregister = _itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHFUC_swigregister
itkLiThresholdCalculatorHFUC_swigregister(itkLiThresholdCalculatorHFUC)
def itkLiThresholdCalculatorHFUC___New_orig__() -> "itkLiThresholdCalculatorHFUC_Pointer":
"""itkLiThresholdCalculatorHFUC___New_orig__() -> itkLiThresholdCalculatorHFUC_Pointer"""
return _itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHFUC___New_orig__()
def itkLiThresholdCalculatorHFUC_cast(obj: 'itkLightObject') -> "itkLiThresholdCalculatorHFUC *":
"""itkLiThresholdCalculatorHFUC_cast(itkLightObject obj) -> itkLiThresholdCalculatorHFUC"""
return _itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHFUC_cast(obj)
class itkLiThresholdCalculatorHFUS(itkHistogramThresholdCalculatorPython.itkHistogramThresholdCalculatorHFUS):
"""Proxy of C++ itkLiThresholdCalculatorHFUS class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __New_orig__() -> "itkLiThresholdCalculatorHFUS_Pointer":
"""__New_orig__() -> itkLiThresholdCalculatorHFUS_Pointer"""
return _itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHFUS___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def Clone(self) -> "itkLiThresholdCalculatorHFUS_Pointer":
"""Clone(itkLiThresholdCalculatorHFUS self) -> itkLiThresholdCalculatorHFUS_Pointer"""
return _itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHFUS_Clone(self)
__swig_destroy__ = _itkLiThresholdCalculatorPython.delete_itkLiThresholdCalculatorHFUS
def cast(obj: 'itkLightObject') -> "itkLiThresholdCalculatorHFUS *":
"""cast(itkLightObject obj) -> itkLiThresholdCalculatorHFUS"""
return _itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHFUS_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkLiThresholdCalculatorHFUS
Create a new object of the class itkLiThresholdCalculatorHFUS and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkLiThresholdCalculatorHFUS.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkLiThresholdCalculatorHFUS.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkLiThresholdCalculatorHFUS.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
itkLiThresholdCalculatorHFUS.Clone = new_instancemethod(_itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHFUS_Clone, None, itkLiThresholdCalculatorHFUS)
itkLiThresholdCalculatorHFUS_swigregister = _itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHFUS_swigregister
itkLiThresholdCalculatorHFUS_swigregister(itkLiThresholdCalculatorHFUS)
def itkLiThresholdCalculatorHFUS___New_orig__() -> "itkLiThresholdCalculatorHFUS_Pointer":
"""itkLiThresholdCalculatorHFUS___New_orig__() -> itkLiThresholdCalculatorHFUS_Pointer"""
return _itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHFUS___New_orig__()
def itkLiThresholdCalculatorHFUS_cast(obj: 'itkLightObject') -> "itkLiThresholdCalculatorHFUS *":
"""itkLiThresholdCalculatorHFUS_cast(itkLightObject obj) -> itkLiThresholdCalculatorHFUS"""
return _itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHFUS_cast(obj)
def li_threshold_calculator(*args, **kwargs):
"""Procedural interface for LiThresholdCalculator"""
import itk
instance = itk.LiThresholdCalculator.New(*args, **kwargs)
return instance.__internal_call__()
def li_threshold_calculator_init_docstring():
import itk
import itkTemplate
if isinstance(itk.LiThresholdCalculator, itkTemplate.itkTemplate):
li_threshold_calculator.__doc__ = itk.LiThresholdCalculator.values()[0].__doc__
else:
li_threshold_calculator.__doc__ = itk.LiThresholdCalculator.__doc__
| [
"44883043+ssalmaan@users.noreply.github.com"
] | 44883043+ssalmaan@users.noreply.github.com |
8cfcdcb3009f5a7c14665ce0abffbbc31b6f11c2 | d9b5577ed3802fe746c7bd1f8234fe2485c069e7 | /Root.py | eeac8e9271becbd474ad400899edd2c66fcc9070 | [] | no_license | AndyHwh/homework7 | b658e9589320ba841c86033aa2655c8fec431577 | 2af41eca083f9178246768877abc0c2dd9670640 | refs/heads/master | 2020-03-16T08:10:23.801033 | 2018-05-08T10:12:03 | 2018-05-08T10:12:03 | 132,591,177 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,362 | py | # -*-coding:utf-8-*-
#请写出一个二分法找方程根的通用函数,加到大家的软件包里去。能够处理单调递增和递减的情况。
#注:本程序仅适用于某区间内函数 单调递增 或 单调递减 或 二次函数 或 半个周期的 三角函数
#name: Huang Weihao
#date: 2018/5/8
import math
import numpy as np
def func(x): #半个周期内的三角函数 或二次函数
return math.cos(x)-1
# def func(x): #单调递增函数
# return math.exp(x)-3.4
# def func(x): #单调递减函数
# return -x**2+3
def sym_(a,b,s): #二分法求方程的根
k=0
while(True):
if func(a)*func(b)==0: #当f(a)*f(b)=0
if func(a)==0:
return a
else:
return b
else:
m=(a+b)/2
if abs(a-b)<s:
return m
else:
if func(m)*func(b)<0:
a=m
elif func(m)==0:
return m
else:
b=m
k+=1
while(True):
print("Please enter the range of argument ant accurancy: ")
a=float(input("a="))
b=float(input("b="))
s=float(input("ℇ="))
if func(a)*func(b)>0: #判断两端点处函数值符号是否相同,是则执行语句,否则直接用二分法求根
if (func(a)-func(a+s))*(func(b)-func(b+s))<0: #判断两端点处的斜率是否互异(求解二次函数或半周期三角函数的根)
while(True):
m = (a + b) / 2
if func(a)*func(m) > 0:
if (func(m - s) - func(m)) * (func(m) - func(m + s)) < 0: #如果成立,说明m点在在该精度下为函数极值点
if abs(func(m)) < s: #如果中间点的函数值的绝对值小于精度值ℇ,则该点为在该精度下函数的根
print("x0 =",m)
break
else:
break
elif (func(m - s) - func(m)) * (func(m) - func(m + s)) == 0: #如果成立,说明m点在在该精度下为函数极值点,并取中间值为函数的根
if func(m) == func(m - s):
print("x0 =",m - s / 2)
break
else:
print("x0 =",m + s / 2)
break
else:
if (func(a) - func(a + s)) * (func(m) - func(m + s)) < 0: #将m点的值赋给某一个端点,该端点的斜率与m点的斜率互异
b = m
else:
a = m
continue
elif func(m)==0:
print("x0 =",m)
break
else: #用二分法分别求解二次函数或半周期三角函数的两个根(当其存在两个根时)
print("x1 =",sym_(a,m,s))
print("x2 =",sym_(m,b,s))
break
else:
print("x0 =",sym_(a,b,s))
break
break
| [
"wangfeng@cnlab.net"
] | wangfeng@cnlab.net |
a4d99f5efcd74f85da14c55895c1e8281da0ba04 | 1a5f7fb99f74d1bf9d64761c0cdb4a5e2e72bb36 | /bestoon/settings.py | f5058d022b0b048bc34dab15f9d7cb4a25f5bb23 | [] | no_license | ahsz110/bestoon | 0055f8c604edbcc7c7c23665c9df7dcc878954cc | b3786f9428e1ea9b31001d1cf9517bf8753e38fd | refs/heads/master | 2022-12-12T15:03:21.474707 | 2018-09-11T07:20:23 | 2018-09-11T07:20:23 | 141,896,116 | 0 | 0 | null | 2021-06-10T20:40:26 | 2018-07-22T13:01:58 | Python | UTF-8 | Python | false | false | 3,111 | py | """
Django settings for bestoon project.
Generated by 'django-admin startproject' using Django 1.11.14.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'cw%x9-1$izk+1mko-11%m2pdxt-o2ioyk&1@dr0u^!df^gg!&_'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'web',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'bestoon.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'bestoon.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
| [
"ahsz110@gmail.com"
] | ahsz110@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.