blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
21c72f0be4c269c282c32d3ded7cac8c22f2a902 | 4c3768f5ee953cbc876c7e12d8aa3949eeba4662 | /main.py | 4dd5dfce80c2898fd680cbc8ae546540308bb182 | [] | no_license | danielsrenwick/lunarGNSS | fad79925d347ee29c9ad35440b36ec3a97ad842a | 6cbed9343ee3114569aadf6430f27d2c6504a05e | refs/heads/main | 2023-08-10T08:43:47.077413 | 2021-09-10T11:27:18 | 2021-09-10T11:27:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,089 | py | from pymoo.algorithms.ctaea import CTAEA
from pymoo.model.problem import Problem
from pymoo.factory import get_termination, get_reference_directions, get_sampling, get_mutation, get_crossover
from pymoo.operators.mixed_variable_operator import MixedVariableCrossover, MixedVariableMutation, MixedVariableSampling
from pymoo.util.termination.f_tol import MultiObjectiveSpaceToleranceTermination
from numpy import array, save, load
import random
import copy
import numpy as np
import pandas as pd
import analysis
import time
from tqdm import tqdm
from multiprocessing import Pool, get_context
from itertools import product
import orekit
from orekit.pyhelpers import setup_orekit_curdir
# initialises Orekit
vm = orekit.initVM()
setup_orekit_curdir()
# define a class to handle the optimisation within Pymoo
class iterationProblem(Problem):
def __init__(self):
# defines the bounds for each of the variables under consideration
# a, e, i, aop, s, p, f
lowerBounds = np.array([5000.0, 0.0, 0.0, 0.0, 3, 3, 0])
upperBounds = np.array([50000.0, 0.5, 90.0, 360.0, 10, 10, 1])
super().__init__(
n_var=7, # a, e, i, aop, s, p, f
n_obj=4, # gdop, hdop, deltav, t
n_constr=4,
xl=lowerBounds,
xu=upperBounds)
def _evaluate(self, x, out, *args, **kwargs):
# initialises the simulation for the current set of configurations
results = poolHandler(x)
# split data from results into seperate variables
gdop = [y[0] for y in results] # gdop
hdop = [y[1] for y in results] # hdop
deltav = [y[2] for y in results] # delta v
t = [y[3] for y in results] # number of satellite in constellation
# define constraints for the results of each variable
# if the design falls outside the constraint, the then design
# is discarded
g1 = (x[:, 4] * x[:, 5]) - 60.0 # t <= 60.0
g2 = [(y[0] - 10.0) for y in results] # gdop <= 10.0
g3 = [(y[1] - 10.0) for y in results] # hdop <= 10.0
g4 = [(y[2] - 1.0) for y in results] # dv <= 1.0
# returns the results of the current iteration back to CTAEA
out["F"] = np.column_stack([gdop, hdop, deltav, t])
out["G"] = np.column_stack([g1, g2, g3, g4])
# setup up and runs the simulation for the given constellation configuration
def analyses(config):
# defines the quality parameters for the simulation
# sets the time in seconds between each step in the orbital propagator
stepSize = 900.0
# sets the number of points from which to construct the surface of the Moon
points = 500
# performs the simulaiton for the given constellation design
results = analysis.constellationAnalysis(config, stepSize, points)
# returns the performance metrics to the optimiser
return results
# function to handle the multiprocessing of the simulation by using a pool
def poolHandler(configs):
# set to the number of cores (threads) in your CPU
# BE CAREFUL WITH RAM USAGE!!!
# each process uses approximately 2.5 GB of RAM at maximum load
cpus = 24
# creates the multiprocessing pool with the given number of CPUs
# 'forkserver' and maxtaskperchild = 1 are used to minimise RAM usage
p = get_context("forkserver").Pool(cpus, maxtasksperchild=1)
# configList = pd.DataFrame(
# data=configs, columns=["a", "e", "i", "aop", "s", "p", "f"])
# configList.to_csv('configList.csv', index_label=False, index=False)
# maps the current list of configuations onto the multiprocessing pool
# tqdm is used to display the current progress through the current
# iteration
results = list(
tqdm(
p.imap(
analyses, configs, chunksize=1), total=len(configs)))
p.close()
p.join()
return results
if __name__ == '__main__':
# determines whether to load a save or not.
# if you want to start a fresh analysis, set x to 0
# if you want to a previous analysis set x to 1
x = 0
if x == 0:
# creates a fresh optimisation with the given variables,
# objectives, and constraints
problem = iterationProblem()
# defines the number of constellations to be generated for each
# iteration
iterationSize = 200
# defines the directions within the objective space through which
# the CTAEA algorithm will search
ref_dirs = get_reference_directions("energy", 4, iterationSize)
# defines the type used for each of the seven input variables
mask = ["real", "real", "real", "real", "int", "int", "real"]
# defines the random sampling used for the selection of configurations
sampling = MixedVariableSampling(mask, {
"real": get_sampling("real_random"),
"int": get_sampling("int_random")
})
# defines the crossover model used
crossover = MixedVariableCrossover(
mask, {
"real": get_crossover(
"real_sbx", prob=0.75, eta=5.0),
"int": get_crossover(
"int_sbx", prob=0.75, eta=5.0)
})
# defines the mutation model used
mutation = MixedVariableMutation(
mask, {
"real": get_mutation(
"real_pm", eta=10.0),
"int": get_mutation(
"int_pm", eta=10.0)
})
# creates a object containing the CTAEA algorithm with the
# given directions, sampling, crossover, and mutation defined
# above
algorithm = CTAEA(
ref_dirs=ref_dirs,
sampling=sampling,
crossover=crossover,
mutation=mutation,
eliminate_duplicates=True)
# sets the tolerance at which the algorithm will terminate
# the current value fo 1e-3 sets the to continue indefinately
termination = MultiObjectiveSpaceToleranceTermination(
tol=1e-3, n_last=30, nth_gen=5)
run = copy.deepcopy(algorithm)
run.setup(problem, termination=termination, seed=1)
# runs the optimisation process until the termination is met
while run.has_next():
run.next()
# saves the data for each iteration into a new folder
# this allows one to restart from any generation, or
# load each generation for analysis at the end
save("generations/evolution_%d" % run.n_gen, run)
print("\n\n", run.n_gen)
# when you want to finish the current simulation simply
# close the cmd prompt running the script
else:
# if you want to load from a previous run
# enter the number of the generation you wish to start from
run, = load(
"generations/evolution_1864.npy", allow_pickle=True).flatten()
while run.has_next():
run.next()
save("generations/evolution_%d" % run.n_gen, run)
print("\n\n", run.n_gen)
| [
"daniel@renwick.xyz"
] | daniel@renwick.xyz |
698bf48545910120be18a382c66a8d0396334c26 | 5e2cc156399e1265b5f05201be71618693fc4b50 | /utils.py | 0770dfc50038c9efc1dd46b779cdc66f5d5520e1 | [] | no_license | yuanyehome/Text-Classification-Project | 111fdebf6a6b4e974d57449ba5bb33b68c0632f5 | 5446544e8c04a60757b43a4c6c090cbc103eb31d | refs/heads/main | 2023-02-11T18:28:47.924564 | 2021-01-09T17:56:50 | 2021-01-09T17:56:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,840 | py | from prettytable import PrettyTable
import time
import torch
from torch.utils.data.dataset import Dataset
def layer_wise_parameters(model):
table = PrettyTable()
table.field_names = ["Layer Name", "Output Shape", "Param #"]
table.align["Layer Name"] = "l"
table.align["Output Shape"] = "r"
table.align["Param #"] = "r"
for name, parameters in model.named_parameters():
if parameters.requires_grad:
table.add_row([name, str(list(parameters.shape)),
parameters.numel()])
return table
def human_format(num):
num = float('{:.3g}'.format(num))
magnitude = 0
while abs(num) >= 1000:
magnitude += 1
num /= 1000.0
return '{}{}'.format('{:f}'.format(num).rstrip('0').rstrip('.'),
['', 'K', 'M', 'B', 'T'][magnitude])
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
class AverageMeter(object):
"""Computes and stores the average and current value."""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
class Timer(object):
"""Computes elapsed time."""
def __init__(self):
self.running = True
self.total = 0
self.start = time.time()
def reset(self):
self.running = True
self.total = 0
self.start = time.time()
return self
def resume(self):
if not self.running:
self.running = True
self.start = time.time()
return self
def stop(self):
if self.running:
self.running = False
self.total += time.time() - self.start
return self
def time(self):
if self.running:
return self.total + time.time() - self.start
return self.total
class TextDataset(Dataset):
def __init__(self, datas, vocab, device, lengths, is_test=False):
self.is_test = is_test
if not is_test:
self.labels = torch.tensor(
list(map(lambda x: int(x) - 1, datas.label_id))).to(device)
self.features = torch.tensor(list(map(
lambda sentence: list(map(
lambda token: vocab.stoi[token], sentence)),
datas.text
))).to(device)
self.lengths = torch.tensor(lengths)
def __len__(self):
return len(self.features)
def __getitem__(self, index):
if self.is_test:
return (self.features[index], self.lengths[index])
else:
return (self.labels[index], self.features[index], self.lengths[index])
| [
"2252706531@qq.com"
] | 2252706531@qq.com |
3baf8e9914792b2d398347aa85e55b038c491263 | 89fea7d230e282b3bd3cf2d7db1b003e572e6fa8 | /genconf/views.py | 34042b1fcf0916e223d81ec77069c612f784a390 | [] | no_license | madron/genconf | 45abaf66010944e2df9ca1bdaa32328267c62584 | 99c7d82d55b5075299940adfeaff903b0c70bc8b | refs/heads/master | 2020-12-04T11:51:16.029577 | 2016-01-15T09:35:01 | 2016-01-15T09:35:01 | 231,754,213 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 224 | py | from django.views.generic import DetailView
from . import models
class ConfigurationView(DetailView):
http_method_names = ['get']
model = models.Router
template_name = 'genconf/configuration/configuration.txt'
| [
"massimiliano.ravelli@gmail.com"
] | massimiliano.ravelli@gmail.com |
f0774a6e287ec0c510b4f0bc8e30866b528f3230 | 28493b48298133bc8885ace8a3be5d0990814a4f | /gui.py | 7729d641c2e47e8f10eebbe6e2cd5f989f57a3b6 | [] | no_license | VadimKutovoi/DiffEq | 0dd62323b706199a0fe4ad8a341280ab11063c5e | 26506768bfda024d2b7c2da7956e61dd2ebd5b01 | refs/heads/master | 2020-05-25T16:49:16.365771 | 2019-05-26T12:29:14 | 2019-05-26T12:29:14 | 187,894,197 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,934 | py | import matplotlib as mp
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.ticker as ticker
import math
Fi_a = 3.5
Fi_b = 1
Fi_c = 1
B_a = 0
B_b = 0.25
B_c = -0.25
B_d = -0.5
B_e = -0.5
l = 0
h = 0
tau = 0
a = 0
T = 0
def FuncFi(x, l):
return Fi_a * 1 + Fi_b * math.cos(math.pi * x / l) + Fi_c * math.cos(2 * math.pi * x / l)
def FuncB(x, l):
return B_a * 1 + B_b * math.cos(math.pi * x / l) + B_c * math.sin(math.pi * x / l) + \
B_d * math.cos(2 * math.pi * x / l) + B_e * math.sin(2 * math.pi * x / l)
def RightConst(y, size, tau):
global l
x = np.zeros(len(y))
simpson = 0
simpson += (1 / 3) * ( FuncB(0, l) * y[0] + FuncB((size - 1)*h, l) * y[size - 1] )
for i in range(2, size, 2):
simpson += (2 / 3) * (FuncB(i * h, l) * y[i])
for i in range(1, size, 2):
simpson += (4 / 3) * (FuncB(i * h, l) * y[i])
for i in range(1, len(x) - 1):
x[i] = y[i] * ((1 / tau) + FuncB(i * h, l) - simpson)
return x
def main(args):
print(args)
global T ; global a ; global l ; global tau ; global h ; global Fi_a
global Fi_b ; global Fi_c ; global B_a ; global B_b ; global B_d ; global B_c ; global B_e ;
T = args[0] ; l = args[1] ; a = args[2] ; h = args[3] ; tau = args[4]
Fi_a = args[5] ; Fi_b = args[6] ; Fi_c = args[7]
B_a = args[8] ; B_b = args[9] ; B_c = args[10] ; B_d = args[11] ; B_e = args[12]
size = int(l / h) + 1
y = np.zeros(size)
b = np.zeros(size)
y0 = np.zeros(size)
A = np.zeros((size, size))
B = np.zeros((size, size))
yb = np.zeros(size)
for i in range(len(y)):
y[i] = FuncFi(i * h, l)
for i in range(len(y0)):
y0[i] = FuncFi(i * h, l)
b[i] = FuncB(i * h, l)
coef1 = (-1) * (a * a) / (h * h)
coef2 = ((2 * a * a) / (h * h)) + (1 / tau)
for i in range(1, size-1):
A[i][i - 1] = coef1
A[i][i] = coef2
A[i][i + 1] = coef1
A[0][0] = coef2
A[0][1] = coef1
A[size-1][size-2] = coef1
A[size-1][size-1] = coef2
for i in range(1, size-1):
B[i][i - 1] = coef1
B[i][i] = coef2
B[i][i + 1] = coef1
B[0][0] = 1
B[0][1] = -1
B[1][0] = 0
B[size - 2][size-1] = 0
B[size-1][size - 2] = -1
B[size-1][size-1] = 1
for i in range(size-1):
yb[i + 1] = y[i]
yb[0] = 0
yb[size-1] = 0
for i in range(1, T + 1):
tmp_y = RightConst(yb, size, tau)
yb = np.linalg.solve(B, tmp_y)
x = [i * h for i in range(0, size)]
print(b)
print(y0)
print(yb)
plt.plot(x, yb, color='black')
plt.plot(x, y0, color='blue')
plt.plot(x, b, color='red')
plt.style.use('fivethirtyeight')
#plt.rcParams['figure.figsize']=(16,9)
mng = plt.get_current_fig_manager()
mng.resize(width=1600, height=900)
plt.minorticks_on()
plt.grid(which='both', color='gray')
plt.show()
| [
"vadim.kutovoi@intel.com"
] | vadim.kutovoi@intel.com |
0b66f6ab3a183691b0308c29dabfc36e889109a5 | 997c82f5d9684945fb2f5d5481dc4d251a93755f | /famapy/core/operations/products.py | 70f4b1107b28b43b64401baecdbb3814a6c47f11 | [] | no_license | jmhorcas/famapy-aafms | a6e45b5fff2c820037daf95151df5bc6895b1611 | bcc80f7061bed4d6bfd536f9d53cf195bffa01e6 | refs/heads/main | 2023-08-24T05:51:47.337325 | 2021-10-15T10:18:20 | 2021-10-15T10:18:20 | 389,559,981 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 270 | py | from abc import abstractmethod
from typing import Any
from famapy.core.operations import Operation
class Products(Operation):
@abstractmethod
def __init__(self) -> None:
pass
@abstractmethod
def get_products(self) -> list[Any]:
pass
| [
"jhorcas@us.es"
] | jhorcas@us.es |
67a36b3377a85c29f958a450f8025f856482a329 | 9c168aba9bb1386c929416b62ede6203c4c99685 | /multicast.py | ac2e94717925d3a8e635b5d89d302e1634faadc0 | [
"MIT"
] | permissive | mcomtzg/Legion | 8712e8a4de44e54f1c40678dab17f77ef4a1051f | b773efa362216d6efc14ce2750104b62e7ea476e | refs/heads/master | 2021-01-24T17:41:50.903440 | 2017-07-27T01:48:40 | 2017-07-27T01:48:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,088 | py | #!/usr/bin/env python3
"""
MultiCast Library
Send/receive UDP multicast packets.
Requires that your OS kernel supports IP multicast.
Usage:
multicast -s (sender, IPv4)
multicast (receivers, IPv4)
Author: Adam Compton
@tatanus
"""
import struct
import socket
import sys
from threading import Timer
class continuousTimer(object):
def __init__(self, interval, function, *args, **kwargs):
self._timer = None
self.interval = interval
self.function = function
self.args = args
self.kwargs = kwargs
self.is_running = False
self.start()
def _run(self):
self.is_running = False
self.start()
self.function(*self.args, **self.kwargs)
def start(self):
if not self.is_running:
self._timer = Timer(self.interval, self._run)
self._timer.start()
self.is_running = True
def stop(self):
self._timer.cancel()
self.is_running = False
class MultiCast():
def __init__(self, port, group, ttl=1):
self.port = port
self.group = group
self.ttl = ttl
self.stopListener = False
def send(self, uid, msg=""):
addrinfo = socket.getaddrinfo(self.group, None)[0]
s = socket.socket(addrinfo[0], socket.SOCK_DGRAM)
# Set Time-to-live (optional)
ttl_bin = struct.pack('@i', self.ttl)
s.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, ttl_bin)
data = uid + ":" + msg
s.sendto((data + '\0').encode(), (addrinfo[4][0], self.port))
def recv(self, callback):
# Look up multicast group address in name server and find out IP version
addrinfo = socket.getaddrinfo(self.group, None)[0]
# Create a socket
s = socket.socket(addrinfo[0], socket.SOCK_DGRAM)
# Allow multiple copies of this program on one machine
# (not strictly needed)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# Bind it to the port
s.bind(('', self.port))
group_bin = socket.inet_pton(addrinfo[0], addrinfo[4][0])
# Join group
if addrinfo[0] == socket.AF_INET: # IPv4
mreq = group_bin + struct.pack('=I', socket.INADDR_ANY)
s.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
# Loop, printing any data we receive
while True:
if callback == None:
return
data, sender = s.recvfrom(1500)
data = data.decode()
while data[-1:] == '\0': data = data[:-1] # Strip trailing \0's
#callback(sender, data.split(':'))
callback.put([sender, data])
def aaa(sender, data):
print ("sender:" + sender + " DATA:")
print (data)
def main():
MYPORT = 8193
MYGROUP = '234.233.232.231'
m = MultiCast(MYPORT,MYGROUP,1)
if "-s" in sys.argv[1:]:
t = continuousTimer(5, m.send, "12345", "Hello there")
t.start()
else:
m.recv(aaa)
if __name__ == '__main__':
main()
| [
"adam_compton@gmail.com"
] | adam_compton@gmail.com |
917d288dcb4753271813f615e9efb145d87fbe8e | fe9753ee7961e727c8152f2fefc070d142118441 | /JSONPARCER.py | 16542f78982479c01ce725d9138358503de44e1c | [] | no_license | sviridchik/ISP2_4SEM | 443e059f0cb31ff63e84a32eb10b169cd5592b74 | ab9e98fb18e31ebe3c9b2c281c70b52f16b1a733 | refs/heads/main | 2023-05-14T18:48:41.688368 | 2021-05-12T19:44:34 | 2021-05-12T19:44:34 | 364,898,959 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 847 | py |
a ="""{
"AllData": {"ArchiveAndDearchieveConfiguration": {
"Archieve": true,
"DeArchieve": true
},
"FinderInfo": {
"Coding": "utf8",
"SourceDirectory": "/Users/victoriasviridchik/Desktop/lab2/SourceDir",
"TargetDirectory": "/Users/victoriasviridchik/Desktop/lab2/TargetDir/",
"LogPath": "/Users/victoriasviridchik/Desktop/zoo/templog.txt",
"NeedToLog": true
},
"CompressingOptions": {
"Compressing": false
},
"EncryptingAndDecriptingOptions": {
"RandomKey": true,
"Encrypt": false,
"DEncrypt": false
},
"DataOptions": {
"Server": "localhost\\SQLEXPRESS",
"Database": "master",
"Trusted_Connection": true
}
}
}"""
import re as r
pattern = '""(\w*[^""{}])"": {([^}]*)} ?'
res = r.match(pattern,a)
for r in res:
print(r) | [
"noreply@github.com"
] | noreply@github.com |
16ccd075198cce6a93498c64c9593d76411a3ab4 | 3d4496ab41b343d8d0f59cb07cbca42c53a9c63e | /collecte_json.py | 4fb28acbcc19bdbc5122f3794d5350fff7975059 | [] | no_license | RayenGs/BEAR | fbad1b932f75dbe471e37883f6c8c1e1ed891317 | fc05afeacc4dde79d8f6e5f4def8bceaa3bc7104 | refs/heads/master | 2020-04-28T05:27:48.516287 | 2019-04-11T09:08:46 | 2019-04-11T09:08:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,945 | py | #!/usr/bin/python3
import os, json, time
from http.server import BaseHTTPRequestHandler, HTTPServer
hostName = ""
hostPort = 8000
class SimpleHTTPRequestHandler(BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.end_headers()
self.wfile.write("Hello, world!")
def do_POST(self):
content_length = int(self.headers['Content-Length']) # On récupère les données reçues
body = self.rfile.read(content_length) # body contient les données reçues
chaine=body.decode() # chaine décode les données JSON
datas_rcv=json.loads(chaine) # on converti en dictionnaire python les données JSON reçues
if os.path.isfile('/tmp/datas.json'): # Si le fichier datas.json existe déjà
with open('/tmp/datas.json','r+') as f:
datas_file=json.load(f) # On converti en dictionnaire les données du fichier déjà existant
exist=False
mac=""
for macF in datas_file: # Pour chaque @MAC (PC) dans le fichier
for macR in datas_rcv: # Pour chaque @MAC (PC) reçu
mac=macR
if macF == macR: # Si ce MAC existe déjà dans le fichier json
datas_file[macF] = datas_rcv[macR] # On met à jour les données pour ce MAC
exist=True
if not exist: # Si le MAC n'existe pas
datas_file[mac] = datas_rcv[mac] # On crée une nouvelle entrée pour ce PC
f=open('/tmp/datas.json','w+')
f.write(json.dumps(datas_file, indent=4)) # On converti le dictionnaire en données en JSON dans datas.json
f.close()
else: # Si le fichier datas.json n'existe pas
with open('/tmp/datas.json','w') as f:
f.write(json.dumps(datas_rcv, indent=4)) # On rajoute les données reçues dans le fichier datas.json
httpd = HTTPServer((hostName, hostPort), SimpleHTTPRequestHandler) # On crée le serveur
print(time.asctime(), "Server Starts - %s:%s" % (hostName, hostPort))
httpd.serve_forever() # On fait tourner le serveur
| [
"rayen.gouasmi@yahoo.com"
] | rayen.gouasmi@yahoo.com |
78f02da277f4298af340f876baf26f6f0f8ce38a | 9b2255e0a474555d8a4d90f586e280d40224a181 | /apps/common/urls.py | b60c575f1aeced024158b7e6f3fbde0719d8e8eb | [] | no_license | rogeriofalcone/redirector | 85f496f7c3a3c755b2d9f86f90d25ace783842e4 | 8255be80ce4e3245317864dcc580a1ef68a7c244 | refs/heads/master | 2020-04-08T07:03:19.053680 | 2012-08-12T19:13:35 | 2012-08-12T19:13:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,259 | py | from django.conf.urls.defaults import patterns, url
from django.views.generic.simple import direct_to_template
from django.conf import settings
urlpatterns = patterns('common.views',
url(r'^about/$', direct_to_template, {'template': 'about.html'}, 'about_view'),
url(r'^changelog/$', 'changelog_view', (), 'changelog_view'),
url(r'^license/$', 'license_view', (), 'license_view'),
url(r'^password/change/done/$', 'password_change_done', (), name='password_change_done'),
url(r'^object/multiple/action/$', 'multi_object_action_view', (), name='multi_object_action_view'),
url(r'^user/$', 'current_user_details', (), 'current_user_details'),
url(r'^user/edit/$', 'current_user_edit', (), 'current_user_edit'),
url(r'^login/$', 'login_view', (), name='login_view'),
)
urlpatterns += patterns('',
url(r'^logout/$', 'django.contrib.auth.views.logout', {'next_page': '/top_redirect/'}, name='logout_view'),
url(r'^password/change/$', 'django.contrib.auth.views.password_change', {'template_name': 'password_change_form.html', 'post_change_redirect': '/password/change/done/'}, name='password_change_view'),
url(r'^password/reset/$', 'django.contrib.auth.views.password_reset', {'email_template_name': 'password_reset_email.html', 'template_name': 'password_reset_form.html', 'post_reset_redirect': '/password/reset/done'}, name='password_reset_view'),
url(r'^password/reset/confirm/(?P<uidb36>[0-9A-Za-z]+)-(?P<token>.+)/$', 'django.contrib.auth.views.password_reset_confirm', {'template_name': 'password_reset_confirm.html', 'post_reset_redirect': '/password/reset/complete/'}, name='password_reset_confirm_view'),
url(r'^password/reset/complete/$', 'django.contrib.auth.views.password_reset_complete', {'template_name': 'password_reset_complete.html'}, name='password_reset_complete_view'),
url(r'^password/reset/done/$', 'django.contrib.auth.views.password_reset_done', {'template_name': 'password_reset_done.html'}, name='password_reset_done_view'),
# (r'^favicon\.ico$', 'django.views.generic.simple.redirect_to', {'url': '%s%s' % (settings.STATIC_URL, 'images/favicon.ico')}),
)
urlpatterns += patterns('',
url(r'^set_language/$', 'django.views.i18n.set_language', name='set_language'),
)
| [
"Roberto.Rosario.Gonzalez@gmail.com"
] | Roberto.Rosario.Gonzalez@gmail.com |
8fd4193624ed4b3ec5193cdc4ac863af4ddabfdf | 50b77b527b95659c6ac8484a1091a70b4ad25d73 | /2019/19/aoc19.py | fdfbb0fd8a49953dc81279bd988da641306ef860 | [] | no_license | cjuub/advent-of-code | d3a4569dd0b7bf7e10dc6a76a1ffe569df4e93a2 | bb92d8ae96cde8c3e57abed26019e692fa6e168f | refs/heads/master | 2023-01-10T00:32:56.847184 | 2023-01-02T20:46:57 | 2023-01-02T20:46:57 | 160,243,333 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,337 | py | #!/usr/bin/env python3
from typing import List
class IntCodeComputer:
OP_ADD = 1
OP_MUL = 2
OP_LOAD = 3
OP_STORE = 4
OP_JUMP_IF_TRUE = 5
OP_JUMP_IF_FALSE = 6
OP_LESS_THAN = 7
OP_EQUALS = 8
OP_REL_BASE = 9
OP_HALT = 99
class HaltException(Exception):
pass
def __init__(self, memory: List[int]):
self._memory = memory[:] + [0] * 100000
self._pc = 0
self._input = []
self._inputs_read = 0
self._output = 0
self._rel_base = 0
self._instructions = {IntCodeComputer.OP_ADD: self._add,
IntCodeComputer.OP_MUL: self._mul,
IntCodeComputer.OP_LOAD: self._load,
IntCodeComputer.OP_STORE: self._store,
IntCodeComputer.OP_JUMP_IF_TRUE: self._jump_if_true,
IntCodeComputer.OP_JUMP_IF_FALSE: self._jump_if_false,
IntCodeComputer.OP_LESS_THAN: self._less_than,
IntCodeComputer.OP_EQUALS: self._equals,
IntCodeComputer.OP_REL_BASE: self._change_rel_base}
def _add(self, op1, op2, res):
self._memory[res] = op1 + op2
self._pc += 4
def _mul(self, op1, op2, res):
self._memory[res] = op1 * op2
self._pc += 4
def _load(self, op1, op2, res):
self._memory[op1] = self._input[self._inputs_read]
self._inputs_read += 1
self._pc += 2
def _store(self, op1, op2, res):
self._output = op1
self._pc += 2
return self._output
def _jump_if_true(self, op1, op2, res):
if op1 != 0:
self._pc = op2
else:
self._pc += 3
def _jump_if_false(self, op1, op2, res):
if op1 == 0:
self._pc = op2
else:
self._pc += 3
def _less_than(self, op1, op2, res):
if op1 < op2:
self._memory[res] = 1
else:
self._memory[res] = 0
self._pc += 4
def _equals(self, op1, op2, res):
if op1 == op2:
self._memory[res] = 1
else:
self._memory[res] = 0
self._pc += 4
def _change_rel_base(self, op1, op2, res):
self._rel_base += op1
self._pc += 2
def execute(self) -> int:
while True:
op_code_str = str(self._memory[self._pc]).rjust(5, '0')
op_code = int(op_code_str[-2:])
op1_mode = int(op_code_str[2])
op2_mode = int(op_code_str[1])
op3_mode = int(op_code_str[0])
if op_code == IntCodeComputer.OP_HALT:
raise IntCodeComputer.HaltException(self._output)
if op1_mode == 0:
# Only instruction with write on op1
if op_code == IntCodeComputer.OP_LOAD:
op1 = self._memory[self._pc + 1]
else:
op1 = self._memory[self._memory[self._pc + 1]]
elif op1_mode == 1:
op1 = self._memory[self._pc + 1]
else:
if op_code == IntCodeComputer.OP_LOAD:
op1 = self._rel_base + self._memory[self._pc + 1]
else:
op1 = self._memory[self._rel_base + self._memory[self._pc + 1]]
if op2_mode == 0:
op2 = self._memory[self._memory[self._pc + 2]]
elif op2_mode == 1:
op2 = self._memory[self._pc + 2]
else:
op2 = self._memory[self._rel_base + self._memory[self._pc + 2]]
if op3_mode == 0:
res = self._memory[self._pc + 3]
elif op3_mode == 1:
res = self._pc + 3
else:
res = self._rel_base + self._memory[self._pc + 3]
ret = self._instructions[op_code](op1, op2, res)
if ret is not None:
return int(ret)
def set_input(self, value):
self._input = value
with open('input.txt') as fp:
code = list(map(int, fp.readline().strip().split(",")))
grid = [['' for y3 in range(100)] for x3 in range(100)]
cnt = 0
for y2 in range(50):
for x2 in range(50):
x = 0
y = 0
comp = IntCodeComputer(code)
comp.set_input([x2, y2])
try:
while True:
out = comp.execute()
grid[y2][x2] = out
if out == 1:
cnt += 1
except IntCodeComputer.HaltException:
pass
for y2 in range(50):
for x2 in range(50):
print(grid[y2][x2], end='')
print()
print('Part 1: ' + str(cnt))
grid = [['.' for y3 in range(200)] for x3 in range(100)]
for y2 in range(100):
for x2 in range(200):
comp = IntCodeComputer(code)
# lol, found it by trial and error and manual binary search
comp.set_input([x2 + 650, y2 + 1097])
try:
while True:
out = comp.execute()
if out == 1:
grid[y2][x2] = '#'
except IntCodeComputer.HaltException:
pass
for y2 in range(100):
for x2 in range(200):
print(grid[y2][x2], end='')
print()
print('Part 2: ' + str((650 + 17) * 10000 + 1097)) | [
"cjuuub@gmail.com"
] | cjuuub@gmail.com |
29386c53267fcde8ec74fd73da0e10408d9972d5 | 7cca97849a01d940298c308a751e3b0877fa351d | /BI_6.0.7_WebUI_AUTOTOOLS_003/BI_6.0.7_WebUI_AUTOTOOLS_03/BI_6.0.7_WebUI_AUTOTOOLS_03/test_case/bi/订购分析/case周期订购C3.py | bc8ac9667feabeb4b7f5d0cb95737f42fe99fd98 | [] | no_license | demi52/mandy | bdbdd2469ae66fe3e72998352e7b6cb24b7486e7 | 8c3a2447e53f1fcf7d418e171a01c8e94fc4c8ae | refs/heads/master | 2020-06-24T21:26:30.243072 | 2019-08-06T16:05:13 | 2019-08-06T16:05:13 | 199,095,515 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,204 | py | from config.conf import Suite
__author__ = "luxu"
from lib.funcslib import Ioput
Ioput.input("file", r"ordercycle.ini")
from flow.biportal.orderbase import Order周期订购详情C3,Order周期订购C3
from lib.testsam import *
file=Ioput.output("file")
class CaseSample首页(FunctionSample):
for f in Get.out(Ioput.output("file"), "time"):
if Suite.table_data:
exec("""
def test_{0}__表格数据(self):
Ioput.function_name(self.__class__.__name__)
self.首页_table({1})
""".format(f.get("casename"), f))
if Suite.xlsx_data:
exec("""
def test_{0}_导出数据(self):
Ioput.function_name(self.__class__.__name__)
self.首页_xlsx({1})
""".format(f.get("casename"), f))
if Suite.keyword:
exec("""
def test_{0}_关键字搜索(self):
Ioput.function_name(self.__class__.__name__)
self.h_keyword({1})
""".format(f.get("casename"), f))
class CaseSample详情(FunctionSample):
for f in Get.out(Ioput.output("file"), "detail"):
if Suite.table_data:
exec("""
def test_{0}__表格数据(self):
Ioput.function_name(self.__class__.__name__)
self.详情_table({1})
""".format(f.get("casename"), f))
if Suite.xlsx_data:
exec("""
def test_{0}_导出数据(self):
Ioput.function_name(self.__class__.__name__)
self.详情_xlsx({1})
""".format(f.get("casename"), f))
if Suite.keyword and f.get("casename") in ("区域分布",):
exec("""
def test_{0}_关键字搜索(self):
Ioput.function_name(self.__class__.__name__)
self.d_keyword({1})
""".format(f.get("casename"), f))
Test1=type("Test周期订购C3_首页", (CaseSample首页,), {"obj":Order周期订购C3})
# Test2 = type("Test周期订购详情C3_日",(CaseSample详情,), {"obj": Order周期订购详情C3,"time_type":"day"})
for c in Get.out(file, "tabletype"):
exec("""
class Test周期订购详情C3_{0}(CaseSample详情):
obj=Order周期订购详情C3
timetype="{1}"
""".format(c.get("casename"), c.get("tt"))
)
if __name__ == "__main__":
print(dir())
| [
"daimeiwangcard@163.com"
] | daimeiwangcard@163.com |
4f4c05b480029edcb7af13d2faebdd7225be8765 | 1151584562e21a86147a10b00b624da9a8e194ac | /Substitution/multiplicative.py | 77ce336b0616c47453ec1b65925a9399daa8c57d | [] | no_license | md3997/Cryptography | b0c0e7a858b955bb2181e587275927f8b9e58030 | 82c136350396bcc86210b7d584e7c4154bfaf407 | refs/heads/master | 2020-03-26T17:10:46.545111 | 2018-08-18T14:55:54 | 2018-08-18T14:55:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,404 | py | def multiplicativeCipherEncrypt(pt, numberOfShits):
pt = pt.replace(" ", "").lower()
alphabets = [chr(i) for i in range(97, 123)]
mapping = {i: alphabets[(alphabets.index(i) * numberOfShits) % 26] for i in alphabets}
ct = ''
for ch in pt:
ct = ct + mapping[ch]
return ct.upper()
def multiplicativeCipherDecrypt(ct, n):
ct = ct.replace(" ", "").lower()
alphabets = [chr(i) for i in range(97, 123)]
n_inverse = 0
for i in range(0,26):
if n * i % 26 == 1:
n_inverse = i
break
mapping = {i: alphabets[int((alphabets.index(i) * n_inverse) % 26)] for i in alphabets}
pt = ''
for ch in ct:
pt = pt + mapping[ch]
return pt
while(True):
ch = input("\nFor Encryption: Enter 1\nFor Decryption: Enter 2\n")
if ch == '1':
pt = input("Enter Plain Text: ")
n = input("Enter value of n: ")
ct = multiplicativeCipherEncrypt(pt, int(n))
print("\nPlain Text : '{0}'\nCipher Text: '{1}'".format(pt, ct))
elif ch == '2':
ct = input("Enter Cipher Text: ")
n = input("Enter value of n: ")
pt = multiplicativeCipherDecrypt(ct,int(n))
print("\nCipher Text : '{0}'\nPlain Text: '{1}'".format(ct, pt))
else:
print("Invalid choice!!")
ch = input("\nPress 'Y' to continue, else press 'N'\n")
if ch == 'N' or ch == 'n':
break | [
"mandardeo1997@gmail.com"
] | mandardeo1997@gmail.com |
6679f8edd473df05f89e950e19b4b6a8ea6cb8d8 | e5b65726d20ba10bd505b174d2e52e0dc3dd906c | /_python/django/DjangoRelationships/apps/firstapp/urls.py | b5cb4644b268a173a95ef63b3f9fcca8819cb78a | [] | no_license | codingdojotulsaaugust2019/instructor_phil | f4356b2b830df59315b817653592d9516f8ae6d2 | d84ac8c472d6ebad6ffadba31306ce2515d01086 | refs/heads/master | 2023-01-28T08:33:01.171107 | 2019-11-12T22:52:48 | 2019-11-12T22:52:48 | 203,250,688 | 0 | 1 | null | 2023-01-07T11:33:10 | 2019-08-19T21:10:24 | Python | UTF-8 | Python | false | false | 184 | py | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index),
url(r'^cars$', views.add_car),
url(r'^passengers$', views.add_passengers),
]
| [
"pkrull@codingdojo.com"
] | pkrull@codingdojo.com |
10307f97047866c36565b596a4eda346c81bc545 | d9e35a05bad8b2718f48c5fd10c554c03b26d8de | /Cron/event_cal/sensitivity.py | a4d1eaf991cc62622fe967a782dd28573babae61 | [] | no_license | lwenj2012/PoliticalInfiltration-1 | 498c65705bdc56678d966a63a6e65f6e8f7e0435 | 06f137b001f204aad8e5c997402548695cf4a89c | refs/heads/master | 2021-03-03T10:23:43.507672 | 2020-04-02T11:49:05 | 2020-04-02T11:49:05 | 245,954,003 | 0 | 2 | null | 2020-03-09T05:48:17 | 2020-03-09T05:48:16 | null | UTF-8 | Python | false | false | 6,453 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: Hu
@time: 2020/2/18 10:10
@file: sensitivity.py
"""
import re
import ngtpy
from bert_serving.client import BertClient
import numpy as np
import os
import pandas as pd
from elasticsearch import helpers
import sys
import math
sys.path.append('../../')
from Config.db_utils import ees, pi_cur
from Config.base import BERT_HOST, BERT_PORT, BERT_PORT_OUT
def jinghua(text1):
text = re.search('(.*?)//@', text1)
if text is not None:
text1 = text.group(1)
re_rp = re.compile('回覆@.+?:')
text1 = re_rp.sub('', text1)
re_rp2 = re.compile('回复@.+?:')
text1 = re_rp2.sub('', text1)
re_at = re.compile('@.+?:')
text1 = re_at.sub('', text1)
re_at2 = re.compile('@.+?:')
text1 = re_at2.sub('', text1)
re_at3 = re.compile('@.+? ')
text1 = re_at3.sub('', text1)
re_link = re.compile('http://[a-zA-Z0-9.?/&=:]*')
re_links = re.compile('https://[a-zA-Z0-9.?/&=:]*')
text1 = re_link.sub("", text1)
text1 = re_links.sub("", text1)
if text1 in {'转发微博', '轉發微博', 'Repost', 'repost'}:
text1 = ''
if text1.startswith('@'):
text1 = ''
re_link = re.compile('t.cn/[a-zA-Z0-9.?/&=:]*')
text1 = re_link.sub("", text1)
re_jh = re.compile('[\u4E00-\u9FA5]|[\\w]|[,.,。!:!、??: ]')
text1 = re_jh.findall(text1)
text1 = ''.join(text1)
text1 = re.sub(' +', ' ', text1) # 多个空格转为单个空格
return text1
# 数据处理
def data_process(data):
# 输入dict{mid:text}
texts = []
for mid in list(data.keys()):
text = jinghua(data[mid]['text']).strip()
if text != '':
texts.append(text)
else:
del data[mid]
return data, texts
# 转换bert向量
def bert_vec(texts):
with BertClient(ip=BERT_HOST, port=BERT_PORT, port_out=BERT_PORT_OUT) as bc:
vec = bc.encode(texts)
vec = list(vec)
return vec
def ANN_cal(index, vec, y):
# index = ngtpy.Index(str(e_id) + '.anng')
label = []
for i in vec:
results = index.search(i, size=8)
sum = 0
for j in results:
sum += j[1]
if sum == 0:
pos = 0
neg = 1
else:
pos = 0
neg = 0
for j in results:
if y[j[0]] == 1:
pos += 1 - j[1] / sum
else:
neg += 1 - j[1] / sum
if pos > neg:
label.append(1)
else:
label.append(0)
return label
def create_ANN(e_id, pos_data, neg_data):
ngtpy.create(path=str(e_id) + '.anng', dimension=768, distance_type="L2")
index = ngtpy.Index(str(e_id) + '.anng')
nX1 = np.array(list(pos_data['vec']))
nX2 = np.array(list(neg_data['vec']))
objects = np.concatenate((nX1, nX2))
index.batch_insert(objects)
index.build_index()
y = np.concatenate((np.ones(len(nX1), dtype=int), np.zeros(len(nX2), dtype=int)))
return index, y
def get_pos(POS_NUM):
cursor = pi_cur()
sql = 'select i_id, text from Event_information, Information where information_id = i_id group by information_id order by Count(*) DESC, hazard_index DESC'
cursor.execute(sql)
try:
result = cursor.fetchall()[:POS_NUM]
except:
result = cursor.fetchall()
mid = [i['i_id'] for i in result]
texts = [i['text'] for i in result]
return mid, texts
def get_pos_data(e_id, POS_NUM):
if os.path.exists(e_id + '.pkl'):
pos_data = pd.read_pickle(e_id + '.pkl')
else:
pos_data = pd.DataFrame(columns=('mid', 'vec'))
mid, texts = get_pos(int(POS_NUM))
pos_data['mid'] = mid
pos_data['vec'] = bert_vec(texts)
pos_data.to_pickle(e_id + '.pkl')
return pos_data
def get_neg_data(e_index, NEG_NUM):
NEG_NUM = int(NEG_NUM)
query_body = {
'query': {
'match_all': {}
}
}
es_result = helpers.scan(
client=ees,
query=query_body,
scroll='1m',
index=e_index,
timeout='1m'
)
neg_data = pd.DataFrame(columns=('mid', 'vec'))
mid = []
vec = []
es_result = list(es_result)
if len(es_result) > 100000:
index_list = set(np.random.choice(range(len(es_result)), size=NEG_NUM, replace=False))
for index, item in enumerate(es_result):
if index not in index_list:
continue
mid.append(item['_source']['mid'])
vec.append(item['_source']['text'])
neg_data['mid'] = mid
neg_data['vec'] = bert_vec(vec)
else:
index_list = set(np.random.choice(range(len(es_result)), size=int(len(es_result) / 10), replace=False))
for index, item in enumerate(es_result):
if index not in index_list:
continue
mid.append(item['_source']['mid'])
vec.append(item['_source']['text'])
neg_data['mid'] = mid
neg_data['vec'] = bert_vec(vec)
return neg_data
def sensitivity(e_id, data, e_index, POS_NUM, NEG_NUM):
# data = dict_slice(data, 0, 25) # 测试代码,采样一小部分数据
data, texts = data_process(data)
pos_data = get_pos_data(e_id, POS_NUM)
neg_data = get_neg_data(e_index, NEG_NUM)
index, y = create_ANN(e_id, pos_data, neg_data) #返回index 取消保存
batch_num = 12800
batch_all = math.ceil(len(texts) / batch_num)
label = []
for batch_epoch in range(batch_all):
texts_batch = texts[batch_epoch * batch_num: (batch_epoch + 1) * batch_num]
print("文本{}至{}, 共{}".format(batch_epoch * batch_num, (batch_epoch + 1) * batch_num, len(texts)))
vec = bert_vec(texts)
label_batch = ANN_cal(index, vec, y) # eid改成index
label.extend(label_batch)
for i, j in zip(list(data.keys()), label):
if j == 0:
del data[i]
return data
def dict_slice(ori_dict, start, end):
"""
字典类切片
:param ori_dict: 字典
:param start: 起始
:param end: 终点
:return:
"""
slice_dict = {k: ori_dict[k] for k in list(ori_dict.keys())[start:end]}
return slice_dict
| [
"huzf1218@gmail.com"
] | huzf1218@gmail.com |
0ac533c32c7ad7b44e56bbcaef655495db6d3b65 | a440b7f3427230ace64bb7abf1c57d83f981ea2c | /P05 - K-means Clustering/P05.py | 6385e8834edd0e9395d04dd44e014120da164b03 | [] | no_license | HyperionNKJ/Machine-Learning | 0786a0c5a895fb02b267807a725d199ab4f6564a | c5c2485fce2621ac1d2fe05ad586f54cc9afddcc | refs/heads/master | 2020-12-13T23:10:49.200577 | 2020-01-17T13:59:59 | 2020-01-17T14:53:51 | 234,558,589 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,389 | py | import numpy as np
from matplotlib import pyplot as plt
import matplotlib.cm as cm
class kmeans:
def __init__(self, x1, x2, k):
self.x1 = x1
self.x2 = x2
self.k = k
self.X = np.array(list(zip(x1, x2)))
# return X, cluster labels, coordinates of cluster centers(shape = (15,2))
def clustering(self):
# initial cluster centers
np.random.seed(0)
# x coordinates of random cluster center
C_x = np.random.randint(0, np.max(self.x1)-np.mean(self.x1), size=self.k)
# y coordinates of random cluster center
C_y = np.random.randint(0, np.max(self.x2)-np.mean(self.x2), size=self.k)
self.C = np.array(list(zip(C_x, C_y)), dtype=np.float32)
self.cluster_labels = np.zeros(self.X.shape[0], dtype=int) # initialize labels to all 0
while(True):
old_C = self.C.copy()
self.assign_labels()
self.revise_centroids()
if (np.array_equal(old_C, self.C)): break
assert self.cluster_labels.shape == (self.X.shape[0],)
assert self.C.shape == (self.k,2)
return self.X, self.cluster_labels, self.C
# Euclidean distance
def EuclideanDistance(self, a, b, ax = 1):
distance = np.linalg.norm(a-b)
return distance
def assign_labels(self):
for i in range(self.X.shape[0]):
dists = np.array([self.EuclideanDistance(self.X[i], cc) for cc in self.C])
self.cluster_labels[i] = np.argmin(dists) # use index of centroids as labels
def revise_centroids(self):
for j in range(self.k):
self.C[j] = self.X[self.cluster_labels == j].mean(axis=0)
def cluster_heterogeneity(self):
heterogeneity = 0
for j in range(self.k):
members = self.X[self.cluster_labels == j]
for member in members:
heterogeneity += self.EuclideanDistance(member, self.C[j])**2
return heterogeneity
def plot_data(X, cluster_labels, C, k):
fig = plt.figure(figsize=(10,5))
colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
label_color = [colors[label] for label in cluster_labels]
plt.scatter(X[:,0], X[:,1], s=1, c=label_color)
plt.scatter(C[:,0], C[:,1], marker = '*', s = 700, c='k')
return plt | [
"kjneo1996@gmail.com"
] | kjneo1996@gmail.com |
e15b73b908b5a921a72dca61a06a943c370f99cd | f0d925b64af90d903971aeb23225d9a4e98ee77d | /.ENV/bin/pip3.8 | a98feb7c8a598d868ad62356c4a9adff236c6bde | [] | no_license | joseduno/django-playground-web | 8d0fd7c8746eaf4ffcd83970f95340dd23234f2b | a2121ac5e0e1ac06490e08b07f9f305988969778 | refs/heads/master | 2022-12-22T07:36:58.654226 | 2020-10-04T20:00:05 | 2020-10-04T20:00:05 | 291,525,753 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 243 | 8 | #!/home/duno/CursoDjango/.ENV/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal.cli.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"jose.duno@spymovil.com"
] | jose.duno@spymovil.com |
990e2217fe711cd73fd1d06d8903d92f9d4bb47a | 7d2f933ed3c54e128ecaec3a771817c4260a8458 | /venv/Lib/site-packages/sklearn/tests/test_random_projection.py | 4a928196200e0a3c341825db400be0947b1e67b0 | [] | no_license | danielmoreira12/BAProject | c61dfb1d0521eb5a28eef9531a00e744bfb0e26a | 859f588305d826a35cc8f7d64c432f54a0a2e031 | refs/heads/master | 2021-01-02T07:17:39.267278 | 2020-02-25T22:27:43 | 2020-02-25T22:27:43 | 239,541,177 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,126 | py |
import functools
import numpy as np
import pytest
import scipy.sparse as sp
from sklearn.exceptions import DataDimensionalityWarning
from sklearn.metrics import euclidean_distances
from sklearn.random_projection import GaussianRandomProjection
from sklearn.random_projection import SparseRandomProjection
from sklearn.random_projection import _gaussian_random_matrix
from sklearn.random_projection import _sparse_random_matrix
from sklearn.random_projection import gaussian_random_matrix
from sklearn.random_projection import johnson_lindenstrauss_min_dim
from sklearn.random_projection import sparse_random_matrix
from sklearn.utils._testing import assert_almost_equal
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.utils._testing import assert_array_equal
from sklearn.utils._testing import assert_raise_message
from sklearn.utils._testing import assert_raises
from sklearn.utils._testing import assert_warns
all_sparse_random_matrix = [_sparse_random_matrix]
all_dense_random_matrix = [_gaussian_random_matrix]
all_random_matrix = all_sparse_random_matrix + all_dense_random_matrix
all_SparseRandomProjection = [SparseRandomProjection]
all_DenseRandomProjection = [GaussianRandomProjection]
all_RandomProjection = set(all_SparseRandomProjection +
all_DenseRandomProjection)
# Make some random data with uniformly located non zero entries with
# Gaussian distributed values
def make_sparse_random_data(n_samples, n_features, n_nonzeros):
rng = np.random.RandomState(0)
data_coo = sp.coo_matrix(
(rng.randn(n_nonzeros),
(rng.randint(n_samples, size=n_nonzeros),
rng.randint(n_features, size=n_nonzeros))),
shape=(n_samples, n_features))
return data_coo.toarray(), data_coo.tocsr()
def densify(matrix):
if not sp.issparse(matrix):
return matrix
else:
return matrix.toarray()
n_samples, n_features = (10, 1000)
n_nonzeros = int(n_samples * n_features / 100.)
data, data_csr = make_sparse_random_data(n_samples, n_features, n_nonzeros)
###############################################################################
# test on JL lemma
###############################################################################
def test_invalid_jl_domain():
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, 1.1)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, 0.0)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, -0.1)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 0, 0.5)
def test_input_size_jl_min_dim():
assert_raises(ValueError, johnson_lindenstrauss_min_dim,
3 * [100], 2 * [0.9])
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 3 * [100],
2 * [0.9])
johnson_lindenstrauss_min_dim(np.random.randint(1, 10, size=(10, 10)),
np.full((10, 10), 0.5))
###############################################################################
# tests random matrix generation
###############################################################################
def check_input_size_random_matrix(random_matrix):
assert_raises(ValueError, random_matrix, 0, 0)
assert_raises(ValueError, random_matrix, -1, 1)
assert_raises(ValueError, random_matrix, 1, -1)
assert_raises(ValueError, random_matrix, 1, 0)
assert_raises(ValueError, random_matrix, -1, 0)
def check_size_generated(random_matrix):
assert random_matrix(1, 5).shape == (1, 5)
assert random_matrix(5, 1).shape == (5, 1)
assert random_matrix(5, 5).shape == (5, 5)
assert random_matrix(1, 1).shape == (1, 1)
def check_zero_mean_and_unit_norm(random_matrix):
# All random matrix should produce a transformation matrix
# with zero mean and unit norm for each columns
A = densify(random_matrix(10000, 1, random_state=0))
assert_array_almost_equal(0, np.mean(A), 3)
assert_array_almost_equal(1.0, np.linalg.norm(A), 1)
def check_input_with_sparse_random_matrix(random_matrix):
n_components, n_features = 5, 10
for density in [-1., 0.0, 1.1]:
assert_raises(ValueError,
random_matrix, n_components, n_features, density=density)
@pytest.mark.parametrize("random_matrix", all_random_matrix)
def test_basic_property_of_random_matrix(random_matrix):
# Check basic properties of random matrix generation
check_input_size_random_matrix(random_matrix)
check_size_generated(random_matrix)
check_zero_mean_and_unit_norm(random_matrix)
@pytest.mark.parametrize("random_matrix", all_sparse_random_matrix)
def test_basic_property_of_sparse_random_matrix(random_matrix):
check_input_with_sparse_random_matrix(random_matrix)
random_matrix_dense = functools.partial(random_matrix, density=1.0)
check_zero_mean_and_unit_norm(random_matrix_dense)
def test_gaussian_random_matrix():
# Check some statical properties of Gaussian random matrix
# Check that the random matrix follow the proper distribution.
# Let's say that each element of a_{ij} of A is taken from
# a_ij ~ N(0.0, 1 / n_components).
#
n_components = 100
n_features = 1000
A = _gaussian_random_matrix(n_components, n_features, random_state=0)
assert_array_almost_equal(0.0, np.mean(A), 2)
assert_array_almost_equal(np.var(A, ddof=1), 1 / n_components, 1)
def test_sparse_random_matrix():
# Check some statical properties of sparse random matrix
n_components = 100
n_features = 500
for density in [0.3, 1.]:
s = 1 / density
A = _sparse_random_matrix(n_components,
n_features,
density=density,
random_state=0)
A = densify(A)
# Check possible values
values = np.unique(A)
assert np.sqrt(s) / np.sqrt(n_components) in values
assert - np.sqrt(s) / np.sqrt(n_components) in values
if density == 1.0:
assert np.size(values) == 2
else:
assert 0. in values
assert np.size(values) == 3
# Check that the random matrix follow the proper distribution.
# Let's say that each element of a_{ij} of A is taken from
#
# - -sqrt(s) / sqrt(n_components) with probability 1 / 2s
# - 0 with probability 1 - 1 / s
# - +sqrt(s) / sqrt(n_components) with probability 1 / 2s
#
assert_almost_equal(np.mean(A == 0.0),
1 - 1 / s, decimal=2)
assert_almost_equal(np.mean(A == np.sqrt(s) / np.sqrt(n_components)),
1 / (2 * s), decimal=2)
assert_almost_equal(np.mean(A == - np.sqrt(s) / np.sqrt(n_components)),
1 / (2 * s), decimal=2)
assert_almost_equal(np.var(A == 0.0, ddof=1),
(1 - 1 / s) * 1 / s, decimal=2)
assert_almost_equal(np.var(A == np.sqrt(s) / np.sqrt(n_components),
ddof=1),
(1 - 1 / (2 * s)) * 1 / (2 * s), decimal=2)
assert_almost_equal(np.var(A == - np.sqrt(s) / np.sqrt(n_components),
ddof=1),
(1 - 1 / (2 * s)) * 1 / (2 * s), decimal=2)
###############################################################################
# tests on random projection transformer
###############################################################################
def test_sparse_random_projection_transformer_invalid_density():
for RandomProjection in all_SparseRandomProjection:
assert_raises(ValueError,
RandomProjection(density=1.1).fit, data)
assert_raises(ValueError,
RandomProjection(density=0).fit, data)
assert_raises(ValueError,
RandomProjection(density=-0.1).fit, data)
def test_random_projection_transformer_invalid_input():
for RandomProjection in all_RandomProjection:
assert_raises(ValueError,
RandomProjection(n_components='auto').fit, [[0, 1, 2]])
assert_raises(ValueError,
RandomProjection(n_components=-10).fit, data)
def test_try_to_transform_before_fit():
for RandomProjection in all_RandomProjection:
assert_raises(ValueError,
RandomProjection(n_components='auto').transform, data)
def test_too_many_samples_to_find_a_safe_embedding():
data, _ = make_sparse_random_data(1000, 100, 1000)
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto', eps=0.1)
expected_msg = (
'eps=0.100000 and n_samples=1000 lead to a target dimension'
' of 5920 which is larger than the original space with'
' n_features=100')
assert_raise_message(ValueError, expected_msg, rp.fit, data)
def test_random_projection_embedding_quality():
data, _ = make_sparse_random_data(8, 5000, 15000)
eps = 0.2
original_distances = euclidean_distances(data, squared=True)
original_distances = original_distances.ravel()
non_identical = original_distances != 0.0
# remove 0 distances to avoid division by 0
original_distances = original_distances[non_identical]
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto', eps=eps, random_state=0)
projected = rp.fit_transform(data)
projected_distances = euclidean_distances(projected, squared=True)
projected_distances = projected_distances.ravel()
# remove 0 distances to avoid division by 0
projected_distances = projected_distances[non_identical]
distances_ratio = projected_distances / original_distances
# check that the automatically tuned values for the density respect the
# contract for eps: pairwise distances are preserved according to the
# Johnson-Lindenstrauss lemma
assert distances_ratio.max() < 1 + eps
assert 1 - eps < distances_ratio.min()
def test_SparseRandomProjection_output_representation():
for SparseRandomProjection in all_SparseRandomProjection:
# when using sparse input, the projected data can be forced to be a
# dense numpy array
rp = SparseRandomProjection(n_components=10, dense_output=True,
random_state=0)
rp.fit(data)
assert isinstance(rp.transform(data), np.ndarray)
sparse_data = sp.csr_matrix(data)
assert isinstance(rp.transform(sparse_data), np.ndarray)
# the output can be left to a sparse matrix instead
rp = SparseRandomProjection(n_components=10, dense_output=False,
random_state=0)
rp = rp.fit(data)
# output for dense input will stay dense:
assert isinstance(rp.transform(data), np.ndarray)
# output for sparse output will be sparse:
assert sp.issparse(rp.transform(sparse_data))
def test_correct_RandomProjection_dimensions_embedding():
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto',
random_state=0,
eps=0.5).fit(data)
# the number of components is adjusted from the shape of the training
# set
assert rp.n_components == 'auto'
assert rp.n_components_ == 110
if RandomProjection in all_SparseRandomProjection:
assert rp.density == 'auto'
assert_almost_equal(rp.density_, 0.03, 2)
assert rp.components_.shape == (110, n_features)
projected_1 = rp.transform(data)
assert projected_1.shape == (n_samples, 110)
# once the RP is 'fitted' the projection is always the same
projected_2 = rp.transform(data)
assert_array_equal(projected_1, projected_2)
# fit transform with same random seed will lead to the same results
rp2 = RandomProjection(random_state=0, eps=0.5)
projected_3 = rp2.fit_transform(data)
assert_array_equal(projected_1, projected_3)
# Try to transform with an input X of size different from fitted.
assert_raises(ValueError, rp.transform, data[:, 1:5])
# it is also possible to fix the number of components and the density
# level
if RandomProjection in all_SparseRandomProjection:
rp = RandomProjection(n_components=100, density=0.001,
random_state=0)
projected = rp.fit_transform(data)
assert projected.shape == (n_samples, 100)
assert rp.components_.shape == (100, n_features)
assert rp.components_.nnz < 115 # close to 1% density
assert 85 < rp.components_.nnz # close to 1% density
def test_warning_n_components_greater_than_n_features():
n_features = 20
data, _ = make_sparse_random_data(5, n_features, int(n_features / 4))
for RandomProjection in all_RandomProjection:
assert_warns(DataDimensionalityWarning,
RandomProjection(n_components=n_features + 1).fit, data)
def test_works_with_sparse_data():
n_features = 20
data, _ = make_sparse_random_data(5, n_features, int(n_features / 4))
for RandomProjection in all_RandomProjection:
rp_dense = RandomProjection(n_components=3,
random_state=1).fit(data)
rp_sparse = RandomProjection(n_components=3,
random_state=1).fit(sp.csr_matrix(data))
assert_array_almost_equal(densify(rp_dense.components_),
densify(rp_sparse.components_))
# TODO remove in 0.24
def test_deprecations():
with pytest.warns(FutureWarning, match="deprecated in 0.22"):
gaussian_random_matrix(10, 100)
with pytest.warns(FutureWarning, match="deprecated in 0.22"):
sparse_random_matrix(10, 100)
| [
"danielmoreira12@github.com"
] | danielmoreira12@github.com |
a21ddf111b795ac1aaf0b8014d7ce5ef4f7a9966 | 501a91abf128b9d7b8ce0ade09e719c9ec201a7d | /jaut/finished-lab12/compiled_jti/pbj_pb2.py | c4d14acc6920b8c89fb7c0767a79e2b8a7516a71 | [] | no_license | ysaied/jncis-devops | d9cc1de43c70b68c4f292eb8cca6ce9cf5332ca6 | 0a2b26cba2589a2ed99945390a1783ed91d45f4a | refs/heads/master | 2021-08-07T10:28:17.138902 | 2021-03-07T08:32:19 | 2021-03-07T08:32:19 | 245,346,111 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | true | 8,805 | py | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: pbj.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import descriptor_pb2 as google_dot_protobuf_dot_descriptor__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='pbj.proto',
package='',
syntax='proto2',
serialized_pb=_b('\n\tpbj.proto\x1a google/protobuf/descriptor.proto\"\xc6\x01\n\nPBJOptions\x12\x10\n\x08max_size\x18\x01 \x01(\x05\x12\x11\n\tmax_count\x18\x02 \x01(\x05\x12$\n\x04type\x18\x03 \x01(\x0e\x32\n.FieldType:\nFT_DEFAULT\x12\x18\n\nlong_names\x18\x04 \x01(\x08:\x04true\x12\x1c\n\rpacked_struct\x18\x05 \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0cskip_message\x18\x06 \x01(\x08:\x05\x66\x61lse\x12\x18\n\ncache_size\x18\x07 \x01(\x08:\x04true*Z\n\tFieldType\x12\x0e\n\nFT_DEFAULT\x10\x00\x12\x0f\n\x0b\x46T_CALLBACK\x10\x01\x12\x0e\n\nFT_POINTER\x10\x04\x12\r\n\tFT_STATIC\x10\x02\x12\r\n\tFT_IGNORE\x10\x03:C\n\x0fpbj_file_option\x12\x1c.google.protobuf.FileOptions\x18\xfc\x07 \x01(\x0b\x32\x0b.PBJOptions:I\n\x12pbj_message_option\x12\x1f.google.protobuf.MessageOptions\x18\xfc\x07 \x01(\x0b\x32\x0b.PBJOptions:C\n\x0fpbj_enum_option\x12\x1c.google.protobuf.EnumOptions\x18\xfc\x07 \x01(\x0b\x32\x0b.PBJOptions:E\n\x10pbj_field_option\x12\x1d.google.protobuf.FieldOptions\x18\xfc\x07 \x01(\x0b\x32\x0b.PBJOptionsB\x11\n\x0fnet.juniper.pbj')
,
dependencies=[google_dot_protobuf_dot_descriptor__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_FIELDTYPE = _descriptor.EnumDescriptor(
name='FieldType',
full_name='FieldType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='FT_DEFAULT', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FT_CALLBACK', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FT_POINTER', index=2, number=4,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FT_STATIC', index=3, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FT_IGNORE', index=4, number=3,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=248,
serialized_end=338,
)
_sym_db.RegisterEnumDescriptor(_FIELDTYPE)
FieldType = enum_type_wrapper.EnumTypeWrapper(_FIELDTYPE)
FT_DEFAULT = 0
FT_CALLBACK = 1
FT_POINTER = 4
FT_STATIC = 2
FT_IGNORE = 3
PBJ_FILE_OPTION_FIELD_NUMBER = 1020
pbj_file_option = _descriptor.FieldDescriptor(
name='pbj_file_option', full_name='pbj_file_option', index=0,
number=1020, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
options=None)
PBJ_MESSAGE_OPTION_FIELD_NUMBER = 1020
pbj_message_option = _descriptor.FieldDescriptor(
name='pbj_message_option', full_name='pbj_message_option', index=1,
number=1020, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
options=None)
PBJ_ENUM_OPTION_FIELD_NUMBER = 1020
pbj_enum_option = _descriptor.FieldDescriptor(
name='pbj_enum_option', full_name='pbj_enum_option', index=2,
number=1020, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
options=None)
PBJ_FIELD_OPTION_FIELD_NUMBER = 1020
pbj_field_option = _descriptor.FieldDescriptor(
name='pbj_field_option', full_name='pbj_field_option', index=3,
number=1020, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
options=None)
_PBJOPTIONS = _descriptor.Descriptor(
name='PBJOptions',
full_name='PBJOptions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='max_size', full_name='PBJOptions.max_size', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='max_count', full_name='PBJOptions.max_count', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='type', full_name='PBJOptions.type', index=2,
number=3, type=14, cpp_type=8, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='long_names', full_name='PBJOptions.long_names', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='packed_struct', full_name='PBJOptions.packed_struct', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='skip_message', full_name='PBJOptions.skip_message', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='cache_size', full_name='PBJOptions.cache_size', index=6,
number=7, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=48,
serialized_end=246,
)
_PBJOPTIONS.fields_by_name['type'].enum_type = _FIELDTYPE
DESCRIPTOR.message_types_by_name['PBJOptions'] = _PBJOPTIONS
DESCRIPTOR.enum_types_by_name['FieldType'] = _FIELDTYPE
DESCRIPTOR.extensions_by_name['pbj_file_option'] = pbj_file_option
DESCRIPTOR.extensions_by_name['pbj_message_option'] = pbj_message_option
DESCRIPTOR.extensions_by_name['pbj_enum_option'] = pbj_enum_option
DESCRIPTOR.extensions_by_name['pbj_field_option'] = pbj_field_option
PBJOptions = _reflection.GeneratedProtocolMessageType('PBJOptions', (_message.Message,), dict(
DESCRIPTOR = _PBJOPTIONS,
__module__ = 'pbj_pb2'
# @@protoc_insertion_point(class_scope:PBJOptions)
))
_sym_db.RegisterMessage(PBJOptions)
pbj_file_option.message_type = _PBJOPTIONS
google_dot_protobuf_dot_descriptor__pb2.FileOptions.RegisterExtension(pbj_file_option)
pbj_message_option.message_type = _PBJOPTIONS
google_dot_protobuf_dot_descriptor__pb2.MessageOptions.RegisterExtension(pbj_message_option)
pbj_enum_option.message_type = _PBJOPTIONS
google_dot_protobuf_dot_descriptor__pb2.EnumOptions.RegisterExtension(pbj_enum_option)
pbj_field_option.message_type = _PBJOPTIONS
google_dot_protobuf_dot_descriptor__pb2.FieldOptions.RegisterExtension(pbj_field_option)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\017net.juniper.pbj'))
try:
# THESE ELEMENTS WILL BE DEPRECATED.
# Please use the generated *_pb2_grpc.py files instead.
import grpc
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities as face_utilities
from grpc.beta import implementations as beta_implementations
from grpc.beta import interfaces as beta_interfaces
except ImportError:
pass
# @@protoc_insertion_point(module_scope)
| [
"ysaied80@yahoo.com"
] | ysaied80@yahoo.com |
b6e2564b8a64a638b17c87155a6dc7dea9ea4494 | 427d191b5c7275a840e54dc6db79be45afd4363f | /LSTM_5_1/6_10.py | c6f0c6757410c5401c25a31b578606f5c5730868 | [] | no_license | wangxiong101/MyItem | b4b23525456deb7330c781cf0720178445596667 | 3a048038e6b33e11fa0f92c7583353d012c49ae4 | refs/heads/master | 2022-12-26T08:11:07.429069 | 2020-10-10T10:55:14 | 2020-10-10T10:55:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 450 | py | import numpy as np
Loc = [93]
Sgroup = [i for i in range(13)]
Pgroup = [i for i in range(13,93)]
groups = {
'S' : Sgroup,
'P' : Pgroup,
'SL' : Sgroup + Loc,
'PL' : Pgroup + Loc,
'SP' : Sgroup + Pgroup,
'SPL' : Sgroup + Pgroup + Loc,
}
assert group in groups
useGroup = groups['SPL']
if forSOD == True:
useGroup = [i for i in range(49)] #0-48
X = X[:,:,useGroup] | [
"31409729+TreeNewWind@users.noreply.github.com"
] | 31409729+TreeNewWind@users.noreply.github.com |
57d9e7e85e4457da61a3353cc4c87eae270b7087 | f10721c8e65e928ab2dc26366d6cf701e13f6c6a | /software_challenge/manage.py | 12bf6df7342dac8dafe7859b28816f7111bfa23b | [] | no_license | dogbeide/SoftwareChallenge | 0bbbb98544b5411f572a9ad993b7bf0c94ad2e84 | 01c062f74eca9b440ef6b1a1c41723165b99f673 | refs/heads/master | 2021-08-21T21:04:35.537922 | 2017-11-29T02:53:43 | 2017-11-29T02:53:43 | 111,731,505 | 0 | 0 | null | 2017-11-29T02:53:43 | 2017-11-22T20:55:10 | JavaScript | UTF-8 | Python | false | false | 816 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "software_challenge.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| [
"boyowao@gmail.com"
] | boyowao@gmail.com |
7a05fadb04f7095039de7c80f514ced4dad3eeb8 | 8033688716c7b120d8105fb98152467c515d7d03 | /makeScalingFunctionPlot.py | edde28e4600681d9c21512f107d64dfddf7b3b24 | [] | no_license | jonathon-langford/EFT-Fitter | 68214a8f46e9817dc7add99d16e3260ae5d1617d | 1cebdef80497bb66ac2d262e2347c4d8100f94b8 | refs/heads/master | 2023-05-20T06:51:21.341971 | 2021-02-12T19:35:12 | 2021-02-12T19:35:12 | 338,414,103 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 7,914 | py | import os, sys
import json
import re
from optparse import OptionParser
from collections import OrderedDict as od
from importlib import import_module
import pickle
import ROOT
import numpy as np
def get_options():
parser = OptionParser()
parser.add_option('--pois', dest='pois', default='params.HEL', help="Name of json file storing pois")
parser.add_option('--functions', dest='functions', default='functions.HEL_STXS', help="Name of json file storing functions")
parser.add_option('--inputs', dest='inputs', default='', help="Comma separated list of input files")
parser.add_option("--translateBins", dest="translateBins", default=None, help="Translate STXS bins")
return parser.parse_args()
(opt,args) = get_options()
# Functions for translations
def Translate(name, ndict):
return ndict[name] if name in ndict else name
def LoadTranslations(jsonfilename):
with open(jsonfilename) as jsonfile:
return json.load(jsonfile)
translateBins = {} if opt.translateBins is None else LoadTranslations(opt.translateBins)
# Load parameters of interest
pois = import_module(opt.pois).pois
# Load functions
functions = import_module(opt.functions).functions
# Load input measurements
inputs = []
for i in opt.inputs.split(","):
_cfg = import_module(i)
_input = od()
_input['name'] = _cfg.name
_input['X'] = _cfg.X
_input['rho'] = _cfg.rho
inputs.append(_input)
from tools.fitter import *
fit = fitter(pois,functions,inputs,False)
#stxs_bins = ['ttH']
stxs_bins = ['ZH_lep_PTV_0_75','ZH_lep_PTV_75_150','ZH_lep_PTV_150_250_0J','ZH_lep_PTV_150_250_GE1J','ZH_lep_PTV_GT250','ZH_lep']
scaling = od()
for stxs_bin in stxs_bins:
scaling[stxs_bin] = od()
for poi in pois.keys(): scaling[stxs_bin][poi] = od()
# Quadratic
fit.setLinearOnly(False)
for poi in pois.keys():
scaling[stxs_bin][poi]['quad'] = od()
c, mu = fit.scaling1D(poi,stxs_bin,npoints=1000)
scaling[stxs_bin][poi]['quad']['c'] = c
scaling[stxs_bin][poi]['quad']['mu'] = mu
# Linear
fit.setLinearOnly()
for poi in pois.keys():
scaling[stxs_bin][poi]['lin'] = od()
c,mu = fit.scaling1D(poi,stxs_bin,npoints=1000)
scaling[stxs_bin][poi]['lin']['c'] = c
scaling[stxs_bin][poi]['lin']['mu'] = mu
# Mage graphs
grs = od()
for stxs_bin in stxs_bins:
for poi in pois.keys():
grs['%s_vs_%s_quad'%(stxs_bin,poi)] = ROOT.TGraph()
grs['%s_vs_%s_lin'%(stxs_bin,poi)] = ROOT.TGraph()
for i in range(len(scaling[stxs_bin][poi]['quad']['c'])): grs['%s_vs_%s_quad'%(stxs_bin,poi)].SetPoint( grs['%s_vs_%s_quad'%(stxs_bin,poi)].GetN(),scaling[stxs_bin][poi]['quad']['c'][i], scaling[stxs_bin][poi]['quad']['mu'][i] )
for i in range(len(scaling[stxs_bin][poi]['lin']['c'])): grs['%s_vs_%s_lin'%(stxs_bin,poi)].SetPoint( grs['%s_vs_%s_lin'%(stxs_bin,poi)].GetN(),scaling[stxs_bin][poi]['lin']['c'][i], scaling[stxs_bin][poi]['lin']['mu'][i] )
# Make plot
styleMap = od()
styleMap['quad'] = {'LineWidth':3,'LineStyle':1,'MarkerSize':0}
styleMap['quad_dummy'] = {'LineWidth':3,'LineStyle':1,'MarkerSize':0}
styleMap['lin'] = {'LineWidth':2, 'LineStyle':2,'MarkerSize':0}
styleMap['lin_dummy'] = {'LineColor':12, 'LineWidth':2, 'LineStyle':2,'MarkerSize':0}
#styleMap['lin_dummy'] = {'LineColor':ROOT.kMagenta-7, 'LineWidth':2, 'LineStyle':2,'MarkerSize':0}
colorMap = od()
colorMap['ZH_lep'] = {'LineColor':ROOT.kRed-4,'MarkerColor':ROOT.kRed-4}
colorMap['ZH_lep_PTV_0_75'] = {'LineColor':ROOT.kGreen-8,'MarkerColor':ROOT.kGreen-8}
colorMap['ZH_lep_PTV_75_150'] = {'LineColor':ROOT.kGreen-7,'MarkerColor':ROOT.kGreen-7}
colorMap['ZH_lep_PTV_150_250_0J'] = {'LineColor':ROOT.kGreen+1,'MarkerColor':ROOT.kGreen+1}
colorMap['ZH_lep_PTV_150_250_GE1J'] = {'LineColor':ROOT.kGreen+3,'MarkerColor':ROOT.kGreen+3}
colorMap['ZH_lep_PTV_GT250'] = {'LineColor':ROOT.kBlack,'MarkerColor':ROOT.kBlack}
colorMap['ttH'] = {'LineColor':ROOT.kMagenta-7,'MarkerColor':ROOT.kMagenta-7}
# POI str
poi = "cWWMinuscB"
hmax = 2.5
import math
m = "%g"%math.log(1/pois[poi]['multiplier'],10)
if m == '1': m = ''
if poi == "cWWMinuscB":
pstr_stripped = "c_{WW} #minus c_{B}"
pstr = "(c_{WW} #minus c_{B}) x 10^{%s}"%m
else:
pstr_stripped = "c_{%s}"%poi.split("c")[-1]
pstr = "c_{%s} x 10^{%s}"%(poi.split("c")[-1],m)
ROOT.gROOT.SetBatch(True)
ROOT.gStyle.SetOptStat(0)
canv = ROOT.TCanvas("canv_%s"%poi,"canv_%s"%poi,700,500)
#canv = ROOT.TCanvas("canv_%s"%poi,"canv_%s"%poi,900,500)
canv.SetBottomMargin(0.15)
canv.SetTickx()
canv.SetTicky()
prange = pois[poi]['range'][1]-pois[poi]['range'][0]
h_axes = ROOT.TH1F("haxes","",100, pois[poi]['range'][0]-0.1*prange, pois[poi]['range'][1]+0.1*prange )
h_axes.SetMaximum(hmax)
h_axes.SetMinimum(-0.2)
h_axes.SetTitle("")
h_axes.GetXaxis().SetTitle(pstr)
h_axes.GetXaxis().SetTitleSize(0.05)
h_axes.GetXaxis().SetLabelSize(0.035)
h_axes.GetYaxis().SetTitle("#mu^{i}_{prod}(%s)"%pstr_stripped)
h_axes.GetYaxis().SetTitleSize(0.05)
h_axes.GetYaxis().SetTitleOffset(0.8)
h_axes.GetYaxis().SetLabelSize(0.035)
h_axes.GetYaxis().SetLabelOffset(0.007)
h_axes.GetYaxis().CenterTitle()
h_axes.SetLineWidth(0)
h_axes.Draw()
for stxs_bin in stxs_bins:
for k, v in colorMap[stxs_bin].iteritems():
getattr(grs["%s_vs_%s_quad"%(stxs_bin,poi)],"Set%s"%k)(v)
getattr(grs["%s_vs_%s_lin"%(stxs_bin,poi)],"Set%s"%k)(v)
for k, v in styleMap['quad'].iteritems(): getattr(grs["%s_vs_%s_quad"%(stxs_bin,poi)],"Set%s"%k)(v)
for k, v in styleMap['lin'].iteritems(): getattr(grs["%s_vs_%s_lin"%(stxs_bin,poi)],"Set%s"%k)(v)
grs["%s_vs_%s_quad"%(stxs_bin,poi)].Draw("Same C")
grs["%s_vs_%s_lin"%(stxs_bin,poi)].Draw("Same C")
# Lines
hlines = {}
yvals = [0,1]
for i in range(len(yvals)):
yval = yvals[i]
hlines['hline_%g'%i] = ROOT.TLine(pois[poi]['range'][0]-0.1*prange,yval,pois[poi]['range'][1]+0.1*prange,yval)
hlines['hline_%g'%i].SetLineColorAlpha(15,0.5)
hlines['hline_%g'%i].SetLineStyle(2)
hlines['hline_%g'%i].SetLineWidth(1)
hlines['hline_%g'%i].Draw("SAME")
vlines = {}
xvals = [pois[poi]['range'][0],0,pois[poi]['range'][1]]
for i in range(len(xvals)):
xval = xvals[i]
vlines['vline_%g'%i] = ROOT.TLine(xval,-0.2,xval,hmax)
vlines['vline_%g'%i].SetLineColorAlpha(15,0.5)
vlines['vline_%g'%i].SetLineStyle(2)
vlines['vline_%g'%i].SetLineWidth(1)
vlines['vline_%g'%i].Draw("SAME")
# Text
lat0 = ROOT.TLatex()
lat0.SetTextFont(42)
lat0.SetTextAlign(11)
lat0.SetNDC()
lat0.SetTextSize(0.045)
lat0.DrawLatex(0.1,0.92,"HEL UFO")
lat1 = ROOT.TLatex()
lat1.SetTextFont(42)
lat1.SetTextAlign(23)
lat1.SetTextSize(0.03)
xpos = pois[poi]['range'][0]-0.05*prange
lat1.DrawLatex(xpos,1.,"'#color[15]{#sigma = #sigma_{SM}}")
lat1.DrawLatex(xpos,0.,"#color[15]{#sigma = 0}")
lat2 = ROOT.TLatex()
lat2.SetTextFont(42)
lat2.SetTextAlign(23)
lat2.SetTextAngle(90)
lat2.SetTextSize(0.045)
lat2.SetTextAlign(21)
lat2.DrawLatex(pois[poi]['range'][0]-0.02*prange,0.9*hmax,"#color[15]{c_{min}}")
lat2.SetTextAlign(23)
lat2.DrawLatex(pois[poi]['range'][1]+0.01*prange,0.9*hmax,"#color[15]{c_{max}}")
# Legend
# Create dummy graph for linear
gr_lin_dummy = ROOT.TGraph()
for k,v in styleMap['lin_dummy'].iteritems(): getattr(gr_lin_dummy,"Set%s"%k)(v)
leg = ROOT.TLegend(0.55,0.22,0.8,0.48)
#leg = ROOT.TLegend(0.63,0.28,0.8,0.38)
leg.SetFillStyle(0)
leg.SetLineColor(0)
leg.SetTextSize(0.0275)
#leg.SetTextSize(0.035)
for stxs_bin in stxs_bins: leg.AddEntry( grs["%s_vs_%s_quad"%(stxs_bin,poi)], Translate(stxs_bin,translateBins), "L")
leg.AddEntry(gr_lin_dummy,"(Lin. terms only)","L")
leg.Draw("Same")
canv.Update()
canv.SaveAs("/eos/home-j/jlangfor/www/CMS/thesis/chapter7/scaling_functions/ZH_lep_vs_%s.png"%poi)
canv.SaveAs("/eos/home-j/jlangfor/www/CMS/thesis/chapter7/scaling_functions/ZH_lep_vs_%s.pdf"%poi)
#canv.SaveAs("/eos/home-j/jlangfor/www/CMS/thesis/chapter7/scaling_functions/ttH_vs_%s.png"%poi)
#canv.SaveAs("/eos/home-j/jlangfor/www/CMS/thesis/chapter7/scaling_functions/ttH_vs_%s.pdf"%poi)
| [
"jl2117@ic.ac.uk"
] | jl2117@ic.ac.uk |
992a5b4b59051118d2a401970b9ab392aa4db485 | 7fbd5c1229bc0fb40c6ec4e0cd21d6701a293588 | /model/mask_rcnn/model/simple/rpn.py | 4a9327f470cbfd00963d67ae9e3f076feab902ba | [] | no_license | liruilong940607/carnava-image-masking-challenge | 5f318a2481a1da9f50d26470cd4687d2d3e435d5 | 56800803bb3172cc6b47fc66004ea31250bf3f4b | refs/heads/master | 2021-07-16T20:07:08.322438 | 2017-10-15T13:37:54 | 2017-10-15T13:37:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,220 | py | from common import*
from dataset.box import *
from net.mask_rcnn.model.simple.configuration import *
def rpn_bases(cfg):
bases = make_bases(ratios = np.array(cfg.rpn.ratios), scales = np.array(cfg.rpn.scales))
return bases
def rpn_windows(x, f, bases, cfg):
stride = cfg.rpn.stride
allowed_border = cfg.rpn.allowed_border
image_shape = (x.size(2),x.size(3)) #original image width
feature_shape = (f.size(2),f.size(3))
windows, inside_inds = make_windows(bases, stride, image_shape, feature_shape, allowed_border)
return windows, inside_inds
class RpnNet(nn.Module):
def __init__(self, cfg, in_channels):
super(RpnNet, self).__init__()
self.cfg = cfg
num_bases = len(cfg.rpn.ratios)*len(cfg.rpn.scales)
self.conv = nn.Sequential(
nn.Conv2d(in_channels, 256, kernel_size=3, stride=1, padding=1, groups=1, bias=True),
nn.ReLU(inplace=True),
)
self.predict_score = nn.Conv2d(256, num_bases*2, kernel_size=1, stride=1, padding=0, groups=1, bias=True)
self.predict_dbox = nn.Conv2d(256, num_bases*4, kernel_size=1, stride=1, padding=0, groups=1, bias=True)
def forward(self, x):
x = self.conv(x)
delta = self.predict_dbox (x)
score = self.predict_score(x)
delta_flat = delta.permute(0, 2, 3, 1).contiguous().view(-1, 4)
score_flat = score.permute(0, 2, 3, 1).contiguous().view(-1, 2)
#loss takes in logit!
return score_flat, delta_flat
# main #################################################################
if __name__ == '__main__':
print( '%s: calling main function ... ' % os.path.basename(__file__))
cfg = Configuration()
if 1: #modify default configurations here
cfg.rpn.train_fg_thresh_low = 0.7
cfg.rpn.scales=[64,128,256]
cfg.rpn.ratios=[1,0.5]
batch_size = 1
in_channels, H, W = 32, 256,256
inputs = torch.randn(batch_size,in_channels,H,W)
rpn_net = RpnNet(cfg, in_channels ).cuda().train()
x = Variable(inputs).cuda()
s,b = rpn_net(x)
print(type(rpn_net))
print(rpn_net)
print('score_flat\n',s)
print('delta_flat\n',b) | [
"hchaolee@gmail.com"
] | hchaolee@gmail.com |
af61f8bb6da1586678deb860f9c61b6e97ed9b2e | 3ccbd6e4d37e9a61b4301bc118400933df69862b | /AItutor/backEnd/dance_learning/etc code/graph.py | 0e4b4b98373961b8287cc445e5df2453afe531a7 | [] | no_license | jimschenchen/2019-projects | 5c6d550fca0d5fb305293f2a6d07a0e0b1c326f5 | df7bf412d85e55ea1bfb97998b141711b68f5a75 | refs/heads/master | 2020-06-21T23:31:40.062867 | 2019-07-18T12:35:24 | 2019-07-18T12:35:24 | 197,578,899 | 1 | 0 | null | 2019-07-18T12:03:07 | 2019-07-18T12:03:06 | null | UTF-8 | Python | false | false | 762 | py | # -*- Encoding:UTF-8 -*- #
import pickle
import sys
import json
import os
from glob import glob
num = []
txt_paths = sorted(glob.glob('*[0-9].pkl'))
for ind, txt_path in enumerate(txt_paths):
#file isn't exist
if(os.path.isfile(txt_path[:-4] + '.txt') == False):
with open(txt_path,'rb') as f:
data = pickle.load(f)
# file 1 write
f2 = open(txt_path[:-4] + '.txt', 'w')
line=str(data)
pose3='{"pose": '
pose=line.partition("'pose':")[2]
pose2=pose.partition("])")[0]
pose3+=pose2.partition("array(")[2]
pose3+="]}"
f2.write(str(pose3))
f2.close()
# parsing
with open(txt_path[:-4] + '.txt') as json_file:
json_data = json.load(json_file)
json_string = json_data["pose"]
num.append(int(json_string[0]))
print (num)
| [
"jimschenchen@163.com"
] | jimschenchen@163.com |
7f40a80d620a8d6c6aa4dd7a7b3b2beacf0ab887 | f437229856bbce2476c70a72713c5373a3500f90 | /players/migrations/0025_auto_20201112_1801.py | 49f2774a851af25976cbeba3794ca049263e6a23 | [] | no_license | tehnomanik/scumanager | b856adf4fef2004645ef9e4373c3f03b904c8c9a | cc9ab3df07c33c7fbf724715b7eeaba1d882df4a | refs/heads/master | 2023-01-14T05:25:58.856744 | 2020-11-17T20:48:58 | 2020-11-17T20:48:58 | 310,887,801 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 898 | py | # Generated by Django 3.1.3 on 2020-11-12 17:01
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('players', '0024_auto_20201108_2325'),
]
operations = [
migrations.AddField(
model_name='case',
name='created_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='created_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='case',
name='modified_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='modified_by', to=settings.AUTH_USER_MODEL),
),
]
| [
"tehnomanik@gmail.com"
] | tehnomanik@gmail.com |
d46160d4b3fcc0f0935469cce665f4bc5d1f2654 | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /ACL_PyTorch/built-in/cv/HRNet_mmlab_for_pytorch/map_postprocess.py | 9515b6bba2a62d6ba9e100ca207748cfdeb8e959 | [
"BSD-3-Clause",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 2,010 | py | # Copyright 2023 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import subprocess
def ais_process(s_list):
data_sum = 0
for s in s_list:
command = 'python3 -m ais_bench --model ./mmpose/hrnet.om --dymDims input:1,3,{},{} --output ./ \
--outfmt BIN --loop 1000'.format(s[0], s[1])
ret = subprocess.getoutput(command)
data_time = process(ret)
data_sum += data_time
print('shape:{}*{} of FPS:'.format(s[0], s[1]))
print('average of FPS:', data_sum / 36)
def process(file):
data_line = []
temp = ''
result = None
for f in file:
if f == '\n':
data_line.append(temp)
temp = ''
continue
temp += f
for line in data_line:
if 'throughput' in line:
result = line.split(':')[1]
break
return result
if __name__ == '__main__':
shape_list = [[512, 768], [512, 832], [512, 704], [512, 896], [512, 640], [512, 960], [512, 512], [512, 1152],
[512, 576], [512, 1024], [512, 1088], [512, 1280], [512, 1984], [512, 1792], [512, 1536],
[512, 2048], [512, 2112], [512, 1216], [512, 1856], [512, 1344], [512, 1728], [512, 1920],
[512, 1472], [512, 1600], [512, 1408], [768, 512], [704, 512], [576, 512], [832, 512], [640, 512],
[960, 512], [960, 512], [896, 512], [1152, 512], [1088, 512], [1024, 512], [1344, 512]]
ais_process(shape_list)
| [
"gaokai33@huawei.com"
] | gaokai33@huawei.com |
0b50117110d294e44696a9a02d9e66259d4d6d2c | bc6b9dbb32a1eabbc976d2304a04f0f56428323d | /blaseball_mike/models/election.py | ddc0f1d9fdde8bd703da2e3ffc47835139cddbab | [
"MIT"
] | permissive | rgallo/blaseball-mike | d49145c36144e2b25f51013eac24e0c88472b1d1 | 6a578f5cb1c83073185fa88a19f3e517446b79e1 | refs/heads/master | 2023-06-27T01:43:45.425963 | 2021-08-02T20:41:16 | 2021-08-02T20:41:16 | 370,364,855 | 0 | 0 | MIT | 2021-08-02T20:41:39 | 2021-05-24T13:37:58 | Python | UTF-8 | Python | false | false | 6,331 | py | from .base import Base
from .team import Team
from .. import database
class Election(Base):
"""Represents the current election"""
@classmethod
def _get_fields(cls):
p = cls.load()
return [cls._from_api_conversion(x) for x in p.fields]
@classmethod
def load(cls):
"""Load the current election"""
offseason = database.get_offseason_election_details()
return cls(offseason)
@Base.lazy_load("_blessings", default_value=list())
def blessings(self):
return [Blessing(b) for b in self._blessings]
@Base.lazy_load("_decrees", default_value=list())
def decrees(self):
return [Decree(b) for b in self._decrees]
@Base.lazy_load("_wills", default_value=list())
def wills(self):
return [Will(b) for b in self._wills]
@Base.lazy_load("_gifts", default_value=list())
def gifts(self):
return [Gift(b) for b in self._gifts]
class OffseasonSetup(Election):
pass
class Decree(Base):
"""Represents a decree currently up for vote"""
@classmethod
def _get_fields(cls):
p = getattr(Election.load(), "decrees", [])
if len(p) == 0:
return []
return [cls._from_api_conversion(x) for x in p.fields]
class Blessing(Base):
"""Represents a blessing currently up for vote"""
@classmethod
def _get_fields(cls):
p = getattr(Election.load(), "blessings", [])
if len(p) == 0:
return []
return [cls._from_api_conversion(x) for x in p.fields]
class Will(Base):
"""Represents a will currently up for vote"""
@classmethod
def _get_fields(cls):
p = getattr(Election.load(), "wills", [])
if len(p) == 0:
return []
return [cls._from_api_conversion(x) for x in p.fields]
class Gift(Base):
"""Represents a gift currently available"""
@classmethod
def _get_fields(cls):
p = getattr(Election.load(), "gifts", [])
if len(p) == 0:
return []
return [cls._from_api_conversion(x) for x in p.fields]
class ElectionResult(Base):
"""Represents the results of an election"""
@classmethod
def _get_fields(cls):
p = cls.load_by_season(19)
return [cls._from_api_conversion(x) for x in p.fields]
@classmethod
def load_by_season(cls, season):
"""
Load results by season. Season is 1-indexed.
"""
return cls(database.get_offseason_recap(season))
@Base.lazy_load("_bonus_results_ids", cache_name="_bonus_results", default_value=list())
def bonus_results(self):
blessings = BlessingResult.load(*self._bonus_results_ids)
return [blessings.get(id_) for id_ in self._bonus_results_ids]
# blessing_results is an alias to bonus_results
@property
def blessing_results(self):
return self.bonus_results
@Base.lazy_load("_decree_results_ids", cache_name="_decree_results", default_value=list())
def decree_results(self):
decrees = DecreeResult.load(*self._decree_results_ids)
return[decrees.get(id_) for id_ in self._decree_results_ids]
@Base.lazy_load("_event_results_ids", cache_name="_event_results", default_value=list())
def event_results(self):
events = TidingResult.load(*self._event_results_ids)
return [events.get(id_) for id_ in self._event_results_ids]
# tiding_results is an alias to event_results
@property
def tiding_results(self):
return self.event_results
@Base.lazy_load("_season", use_default=False)
def season(self):
return self._season + 1
class OffseasonResult(ElectionResult):
pass
class DecreeResult(Base):
"""Represents the results of a single decree."""
@classmethod
def _get_fields(cls):
p = cls.load_one("643280fc-b7c6-4b6d-a164-9b53e1a3e47a")
return [cls._from_api_conversion(x) for x in p.fields]
@classmethod
def load(cls, *ids):
"""
Load one or more decree results by decree ID
"""
decrees = database.get_offseason_decree_results(list(ids))
return {
id_: cls(decree) for (id_, decree) in decrees.items()
}
@classmethod
def load_one(cls, id_):
"""
Load a single decree result by decree ID
"""
return cls.load(id_).get(id_)
class BlessingResult(Base):
"""Represents the results of a single blessing"""
@classmethod
def _get_fields(cls):
p = cls.load_one("cbb567c0-d770-4d22-92f6-ff16ebb94758")
return [cls._from_api_conversion(x) for x in p.fields]
@classmethod
def load(cls, *ids):
"""
Load one or more blessing results by blessing ID
"""
blessings = database.get_offseason_bonus_results(list(ids))
return {
id_: cls(blessing) for (id_, blessing) in blessings.items()
}
@classmethod
def load_one(cls, id_):
"""
Load a single blessing result by blessing ID
"""
return cls.load(id_).get(id_)
@Base.lazy_load("_team_id", cache_name="_team")
def team_id(self):
return Team.load(self._team_id)
# team is an alias to team_id
@property
def team(self):
return self.team_id
# Note: highest_team not present for Season 1
@Base.lazy_load("_highest_team_id", cache_name="_highest_team")
def highest_team(self):
return Team.load(self._highest_team_id)
# blessing_title is an alias to bonus_title
@property
def blessing_title(self):
return self.bonus_title
# blessing_id is an alias to bonus_id
@property
def blessing_id(self):
return self.bonus_id
class BonusResult(BlessingResult):
pass
class TidingResult(Base):
"""Represents the results of a single election tiding"""
@classmethod
def _get_fields(cls):
p = cls.load_one("future_written")
return [cls._from_api_conversion(x) for x in p.fields]
@classmethod
def load(cls, *ids):
event = database.get_offseason_event_results(list(ids))
return {
id_: cls(event) for (id_, event) in event.items()
}
@classmethod
def load_one(cls, id_):
return cls.load(id_).get(id_)
class EventResult(TidingResult):
pass
| [
"ryan.littleton0@gmail.com"
] | ryan.littleton0@gmail.com |
4a1cbb5d6dc42d369ef90f048def96805105cba5 | bb0a28f1c7140fc75241085af4d772bf690c92c5 | /books/deep-learning-from-scratch/common/multi_layer_net_extend.py | 18e40c7f861dbcf3e7d09935f4a3369c850c2337 | [
"MIT"
] | permissive | oonisim/python-programs | dcddad2df3d3451169e79d053624072706091741 | b592c9bf004d9f2ca6b014eae0e9623e5567bcff | refs/heads/master | 2023-08-23T19:09:19.425013 | 2023-06-23T03:38:29 | 2023-06-23T03:38:29 | 224,111,443 | 2 | 2 | null | 2023-02-16T07:37:54 | 2019-11-26T05:40:37 | Jupyter Notebook | UTF-8 | Python | false | false | 6,905 | py | # coding: utf-8
import sys, os
sys.path.append(os.pardir) # 親ディレクトリのファイルをインポートするための設定
import numpy as np
from collections import OrderedDict
from src.common import numerical_gradient
class MultiLayerNetExtend:
"""拡張版の全結合による多層ニューラルネットワーク
Weiht Decay、Dropout、Batch Normalizationの機能を持つ
Parameters
----------
input_size : 入力サイズ(MNISTの場合は784)
hidden_size_list : 隠れ層のニューロンの数のリスト(e.g. [100, 100, 100])
output_size : 出力サイズ(MNISTの場合は10)
activation : 'relu' or 'sigmoid'
weight_init_std : 重みの標準偏差を指定(e.g. 0.01)
'relu'または'he'を指定した場合は「Heの初期値」を設定
'sigmoid'または'xavier'を指定した場合は「Xavierの初期値」を設定
weight_decay_lambda : Weight Decay(L2ノルム)の強さ
use_dropout: Dropoutを使用するかどうか
dropout_ration : Dropoutの割り合い
use_batchNorm: Batch Normalizationを使用するかどうか
"""
def __init__(self, input_size, hidden_size_list, output_size,
activation='relu', weight_init_std='relu', weight_decay_lambda=0,
use_dropout = False, dropout_ration = 0.5, use_batchnorm=False):
self.input_size = input_size
self.output_size = output_size
self.hidden_size_list = hidden_size_list
self.hidden_layer_num = len(hidden_size_list)
self.use_dropout = use_dropout
self.weight_decay_lambda = weight_decay_lambda
self.use_batchnorm = use_batchnorm
self.params = {}
# 重みの初期化
self.__init_weight(weight_init_std)
# レイヤの生成
activation_layer = {'sigmoid': Sigmoid, 'relu': Relu}
self.layers = OrderedDict()
for idx in range(1, self.hidden_layer_num+1):
self.layers['Affine' + str(idx)] = Affine(self.params['W' + str(idx)],
self.params['b' + str(idx)])
if self.use_batchnorm:
self.params['gamma' + str(idx)] = np.ones(hidden_size_list[idx-1])
self.params['beta' + str(idx)] = np.zeros(hidden_size_list[idx-1])
self.layers['BatchNorm' + str(idx)] = BatchNormalization(self.params['gamma' + str(idx)], self.params['beta' + str(idx)])
self.layers['Activation_function' + str(idx)] = activation_layer[activation]()
if self.use_dropout:
self.layers['Dropout' + str(idx)] = Dropout(dropout_ration)
idx = self.hidden_layer_num + 1
self.layers['Affine' + str(idx)] = Affine(self.params['W' + str(idx)], self.params['b' + str(idx)])
self.last_layer = SoftmaxWithLoss()
def __init_weight(self, weight_init_std):
"""重みの初期値設定
Parameters
----------
weight_init_std : 重みの標準偏差を指定(e.g. 0.01)
'relu'または'he'を指定した場合は「Heの初期値」を設定
'sigmoid'または'xavier'を指定した場合は「Xavierの初期値」を設定
"""
all_size_list = [self.input_size] + self.hidden_size_list + [self.output_size]
for idx in range(1, len(all_size_list)):
scale = weight_init_std
if str(weight_init_std).lower() in ('relu', 'he'):
scale = np.sqrt(2.0 / all_size_list[idx - 1]) # ReLUを使う場合に推奨される初期値
elif str(weight_init_std).lower() in ('sigmoid', 'xavier'):
scale = np.sqrt(1.0 / all_size_list[idx - 1]) # sigmoidを使う場合に推奨される初期値
self.params['W' + str(idx)] = scale * np.random.randn(all_size_list[idx-1], all_size_list[idx])
self.params['b' + str(idx)] = np.zeros(all_size_list[idx])
def predict(self, x, train_flg=False):
for key, layer in self.layers.items():
if "Dropout" in key or "BatchNorm" in key:
x = layer.forward(x, train_flg)
else:
x = layer.forward(x)
return x
def loss(self, x, t, train_flg=False):
"""損失関数を求める
引数のxは入力データ、tは教師ラベル
"""
y = self.predict(x, train_flg)
weight_decay = 0
for idx in range(1, self.hidden_layer_num + 2):
W = self.params['W' + str(idx)]
weight_decay += 0.5 * self.weight_decay_lambda * np.sum(W**2)
return self.last_layer.forward(y, t) + weight_decay
def accuracy(self, x, t):
y = self.predict(x, train_flg=False)
y = np.argmax(y, axis=1)
if t.ndim != 1 : t = np.argmax(t, axis=1)
accuracy = np.sum(y == t) / float(x.shape[0])
return accuracy
def numerical_gradient(self, x, t):
"""勾配を求める(数値微分)
Parameters
----------
x : 入力データ
t : 教師ラベル
Returns
-------
各層の勾配を持ったディクショナリ変数
grads['W1']、grads['W2']、...は各層の重み
grads['b1']、grads['b2']、...は各層のバイアス
"""
loss_W = lambda W: self.loss(x, t, train_flg=True)
grads = {}
for idx in range(1, self.hidden_layer_num+2):
grads['W' + str(idx)] = numerical_gradient(loss_W, self.params['W' + str(idx)])
grads['b' + str(idx)] = numerical_gradient(loss_W, self.params['b' + str(idx)])
if self.use_batchnorm and idx != self.hidden_layer_num+1:
grads['gamma' + str(idx)] = numerical_gradient(loss_W, self.params['gamma' + str(idx)])
grads['beta' + str(idx)] = numerical_gradient(loss_W, self.params['beta' + str(idx)])
return grads
def gradient(self, x, t):
# forward
self.loss(x, t, train_flg=True)
# backward
dout = 1
dout = self.last_layer.backward(dout)
layers = list(self.layers.values())
layers.reverse()
for layer in layers:
dout = layer.backward(dout)
# 設定
grads = {}
for idx in range(1, self.hidden_layer_num+2):
grads['W' + str(idx)] = self.layers['Affine' + str(idx)].dW + self.weight_decay_lambda * self.params['W' + str(idx)]
grads['b' + str(idx)] = self.layers['Affine' + str(idx)].db
if self.use_batchnorm and idx != self.hidden_layer_num+1:
grads['gamma' + str(idx)] = self.layers['BatchNorm' + str(idx)].dgamma
grads['beta' + str(idx)] = self.layers['BatchNorm' + str(idx)].dbeta
return grads | [
"oonisim@gmail.com"
] | oonisim@gmail.com |
951b77e361f2118df09ebf9cbe9d960fbb3e7e40 | 3b59e49bc78d81d004893bc37af7d5318d62915a | /internalblue/fw/fw_0x422a.py | b37de30e112f8535e0791cc981a57052c9948a76 | [
"MIT"
] | permissive | superf0sh/internalblue | ea7283f8b69c93dfd5668369a230ea1360e6a203 | bc0aa11ef39cfa42030dacd6284768f1771d9bc7 | refs/heads/master | 2020-05-19T12:05:24.374797 | 2019-03-29T14:33:21 | 2019-03-29T14:33:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,587 | py | #!/usr/bin/env python2
# MacBook 15" early 2011 tested with Ubuntu
#
# Generic firmware file in case we do not know something...
#
# Copyright (c) 2019 Jiska Classen. (MIT License)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
# - The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# - The Software is provided "as is", without warranty of any kind, express or
# implied, including but not limited to the warranties of merchantability,
# fitness for a particular purpose and noninfringement. In no event shall the
# authors or copyright holders be liable for any claim, damages or other
# liability, whether in an action of contract, tort or otherwise, arising from,
# out of or in connection with the Software or the use or other dealings in the
# Software.
from fw import MemorySection
# Firmware Infos
FW_NAME = "BCM2070B0 (MacBook Pro 2011)"
# Build date: Jul 9 2008
# Memory Sections
# start, end, is_rom? is_ram?
SECTIONS = [ MemorySection(0x0, 0x58000, True , False),
MemorySection(0x80000, 0x9b000, False, True ),
]
| [
"github@jiska.de"
] | github@jiska.de |
5d6499d3cdbb2859a3ae0a165a2ecc9127f8e572 | a5965ccd1c53e88dfec2e9e885da368ea8626b5d | /matcho/views.py | f1f4cdb1bd474ea10fc3147217ea53ec6a216301 | [] | no_license | deftydev/Matchmaker | 59320bdd890b4a3577972296d171fe49ff6af677 | 2202279e185611ea2897bd5b9848d3d5ca7548a3 | refs/heads/master | 2022-08-02T08:41:55.527520 | 2020-06-01T09:58:17 | 2020-06-01T09:58:17 | 268,476,247 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,983 | py | from django.contrib.auth import get_user_model
from django.contrib.auth.signals import user_logged_in
from django.dispatch import receiver
from django.http import Http404
from django.shortcuts import render, get_object_or_404
from jobs.models import Job, Employer, Location
from profiles.models import Profile
from .models import PositionMatch, LocationMatch, EmployerMatch, Match
User = get_user_model()
@receiver(user_logged_in)
def get_user_matches_receiver(sender, request, user, *args, **kwargs):
for u in User.objects.exclude(email=user.email).order_by("-id")[:200]:
profile = Profile.objects.get_or_create(user=u)
matched, created = Match.objects.get_or_create_match(user_a=u,user_b=user)
def position_match_view(request, slug):
try:
instance = Job.objects.get(slug=slug)
except Job.MultipleObjectsReturned:
queryset = Job.objects.filter(slug=slug).order_by('-id')
instance = queryset[0]
except Job.DoesNotExist:
raise Http404
matches = PositionMatch.objects.filter(job__text__iexact=instance.text)
template = "matcho/position_match_view.html"
context = {
"instance": instance,
"matches": matches
}
return render(request, template, context)
def employer_match_view(request, slug):
try:
instance = Employer.objects.get(slug=slug)
except Employer.MultipleObjectsReturned:
queryset = Employer.objects.filter(slug=slug).order_by('-id')
instance = queryset[0]
except Employer.DoesNotExist:
raise Http404
template = "matcho/employer_match_view.html"
context = {
"instance": instance,
}
return render(request, template, context)
def location_match_view(request, slug):
try:
instance = Location.objects.get(slug=slug)
except Location.MultipleObjectsReturned:
queryset = Location.objects.filter(slug=slug).order_by('-id')
instance = queryset[0]
except Location.DoesNotExist:
raise Http404
template = "matcho/location_match_view.html"
context = {
"instance": instance,
}
return render(request, template, context)
| [
"devanshgupta79212346@gmail.com"
] | devanshgupta79212346@gmail.com |
36ce4dab876b4e0563d62a00bacf57b77742f175 | 53ddbcbb72d50bdf9a2157b4aedb5ded73216aa1 | /setup.py | 5ee8bd27e8a22904cceafe508a68756c097a1b12 | [] | no_license | glahr/threading_gustavo | cd9abe5ed7cb755db1c940fe1abe49b9ef80ca5e | bccb8ace31ab15e566a51de52b119af6bd4072fa | refs/heads/master | 2020-03-28T20:57:08.208026 | 2018-09-19T07:53:13 | 2018-09-19T07:53:13 | 149,116,619 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 317 | py | ## ! DO NOT MANUALLY INVOKE THIS setup.py, USE CATKIN INSTEAD
from distutils.core import setup
from catkin_pkg.python_setup import generate_distutils_setup
# fetch values from package.xml
setup_args = generate_distutils_setup(
packages=['threading_gustavo'],
package_dir={'': 'src'},
)
setup(**setup_args)
| [
"gjgl.lahr@gmail.com"
] | gjgl.lahr@gmail.com |
7af69734b3d0402997a391b6d9ea7aa021398234 | 8758b941fe5e9a92492c013c7cbead9e5584339b | /build.py | e0b41e31462976066645ff6bbc44704632f1f05d | [] | no_license | kevinmore/fltConverterGUI | e1f55bd6dcdb2c3ed146aabe2a4f1d6b8142f8be | df6f90e000aecfad58c037692138a1b24ee2a19e | refs/heads/master | 2021-01-19T10:20:56.390570 | 2014-03-25T17:01:36 | 2014-03-25T17:01:36 | 18,108,540 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 456 | py | from distutils.core import setup
import py2exe, sys, glob
sys.argv.append('py2exe')
setup(
data_files = [("Resources", glob.glob('Resources/*'))],
options = {
'py2exe': {
"optimize": 2,
"dist_dir": "Havok OpenFlight Converter",
"dll_excludes": ["MSVCP90.dll"]
}
},
windows=[{'script': "HavokOpenFlightConverter.py", "icon_resources": [(1, "Resources/havok.ico")]}],
zipfile=None,
)
| [
"dingfengyu@gmail.com"
] | dingfengyu@gmail.com |
856c6ebfb20257e14b4afbe82626ed609abfb324 | ff44d610ebb1f6c67e884762284eedc8ac844e44 | /ship.py | 8cb76c1122fcc8775beefd93e090e9157b24a0d2 | [] | no_license | suiyidajiangyou/alien | a07d45cc72f3982a30ac6fc18471b595829807bb | 32f7d5f3848e86df2533598c4a4c44d65e01be4d | refs/heads/master | 2021-09-09T19:27:14.357613 | 2018-03-19T07:51:29 | 2018-03-19T07:51:29 | 125,820,972 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 986 | py | import pygame
class Ship():
def __init__(self,ai_settings, screen):
self.screen = screen
self.ai_settings = ai_settings
self.image = pygame.image.load('images/ship.bmp')
self.rect = self.image.get_rect()
self.screen_rect = screen.get_rect()
self.center = float(self.rect.centerx)
self.rect.centerx = self.screen_rect.centerx
self.rect.bottom = self.screen_rect.bottom
self.moving_right = False
self.moving_left = False
def update(self):
if self.moving_right and self.rect.right < self.screen_rect.right:
#self.rect.centerx += 1
self.center += self.ai_settings.ship_speed_factor
if self.moving_left and self.rect.left>0:
#self.rect.centerx -=1
self.center -= self.ai_settings.ship_speed_factor
self.rect.centerx = self.center
def blitme(self):
"指定位置绘制"
self.screen.blit(self.image,self.rect) | [
"32216173@qq.com"
] | 32216173@qq.com |
90bf0c478a54786446ee065fd3e8ab565c3ce656 | 3511e1edc150d55e9b86e03851d7d88641bd7895 | /panda/lib/python3.7/reprlib.py | 0d7df777ea4ce1ac00c0c5a2dc59212a346c799b | [] | no_license | abdjahiduddin/Project_CCA | 2f723a068d684b4409bb22dfb08b312b64022dd2 | 67229b1b1dfed07391973284a7db653bac14b54e | refs/heads/master | 2021-12-31T19:58:25.149238 | 2021-12-09T13:33:42 | 2021-12-09T13:33:42 | 203,208,219 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 45 | py | /home/jay/miniconda3/lib/python3.7/reprlib.py | [
"abdjahiduddin"
] | abdjahiduddin |
6579ed7eda70af3fb0f70a442bdb4d7c70ed8293 | ada9be340c3774543364915a040a110bde17e383 | /python3/tencentyun/__init__.py | 385be21266e733302455ae58252133323069f124 | [
"MIT"
] | permissive | EdisonLiang/python-sdk | bed997d86510dba37206b968c6154040866f1291 | 46140734214bb2ed222fb45d106990ac4fcd183d | refs/heads/master | 2020-12-11T05:43:23.409502 | 2015-09-25T07:39:19 | 2015-09-25T07:39:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 152 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from .image import Image
from .imagev2 import ImageV2
from .video import Video
from .auth import Auth
| [
"hoojamis@gmail.com"
] | hoojamis@gmail.com |
e524217ab7f7ebe8f9152498eee5b08ba0d1e677 | 4ee00dce848b72924161f646da8c2766001617e5 | /5th_year/Computer_Vision/lab5/lab5-logistic-regression-own-digit.py | e737ec285cb913f332fafcc076858459b098aee9 | [] | no_license | Lenkelheit/University_Applications | 12432ffdc0b79ae0964fe2ab23c0ece85e2f06a1 | 10e8a95415846f6a9eba58711dda535eb7307dfc | refs/heads/master | 2021-06-10T12:02:22.049028 | 2021-06-05T21:15:44 | 2021-06-05T21:16:09 | 154,336,063 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,168 | py | from sklearn import datasets, metrics
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler
from skimage import io, color, feature, transform
from skimage.filters import sobel
mnist = datasets.load_digits()
images = mnist.images
data_size = len(images)
# Preprocessing images
images = images.reshape(len(images), -1)
labels = mnist.target
# Initialize Logistic Regression
LR_classifier = LogisticRegression(
C=0.01, penalty='l1', solver='liblinear', tol=0.01)
# Training the data on only 75% of the dataset. Rest of the 25% will be used in testing the Logistic Regression
LR_classifier.fit(images[:int((data_size / 4) * 3)],
labels[:int((data_size / 4) * 3)])
# Load a custom image
digit_img = io.imread('digit-4.png')
# Convert image to grayscale
digit_img = color.rgb2gray(digit_img)
# Resize the image to 8x8
digit_img = transform.resize(digit_img, (8, 8), mode="wrap")
# Run edge detection on the image
digit_img = sobel(digit_img)
io.imshow(digit_img)
io.show()
digit_img = digit_img.reshape(1, -1)
# Testing the data
prediction = LR_classifier.predict(digit_img)
print(prediction)
| [
"tymtsiv2710@gmail.com"
] | tymtsiv2710@gmail.com |
59f3a61a411096f49113230d614abf5415246dfb | 2235e7dd35bc4d212a5f4fe5a66bd0efc1518701 | /foodometer/urls.py | e439484919ffa4f72dffb82c60e6fb7994c16837 | [] | no_license | gogiakush/foodometer_final | aad2f71f4cada38fc455fec194312712de78e1ef | a2d2e07b5cee693c6f0889c38bb81ec8c6ed9c3c | refs/heads/master | 2023-09-03T15:16:01.189369 | 2021-09-24T18:49:55 | 2021-09-24T18:49:55 | 285,451,727 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 820 | py | """foodometer URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import include, path
urlpatterns = [
path("admin/", admin.site.urls),
path("", include("foodom.urls"))
]
| [
"gogiakush@gmail.com"
] | gogiakush@gmail.com |
f615d48061218732ce9a42d3e37ebe9c895c6fcb | 29d06ec5a56f7d58ec78e762a7ab74fec959dd11 | /september/settings.py | cce1fd66bf08ad42e3d9fa679bcefec4e7b0fdde | [
"MIT"
] | permissive | kikeh/contacts | abf7bf421b0ff2018e1917c95777cdd08210a1ab | 3aef9462347cdeaab393ea93866b91c20acba870 | refs/heads/master | 2021-01-17T17:43:28.506713 | 2016-09-29T18:02:48 | 2016-09-29T18:02:48 | 61,233,917 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,302 | py | """
Django settings for september project.
Generated by 'django-admin startproject' using Django 1.10.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '6-#)p1!8(bm5sfbprm6wa9h$%2)7+n7g!^0!fbr60&_&m!75z8'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'contacts.apps.ContactsConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_static_jquery',
'bootstrap3',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'september.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'september/templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'september.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'september/static')
]
| [
"heisba@gmail.com"
] | heisba@gmail.com |
63e0c4f8c12b61c7d64937f8f6d8e49ee3bab0a4 | 1ebac62ee6d69e9d6e616f3aa774b8ee0c9d0063 | /imageAnalyzer/helpers.py | 9cb2578e7a4fe51fc1f181422c5fffd5faf3a60f | [] | no_license | yaniv120892/ImageProcessing | 9d7cbb995af9aa32a6575e63c62cc23b0facdbd1 | dd332712e7a034bc4662cd212c05a0173ef99a72 | refs/heads/main | 2023-07-03T05:56:51.216441 | 2021-08-07T13:53:00 | 2021-08-07T13:53:00 | 393,681,083 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 188 | py | import json
def write_to_json_file(obj, file_to_write):
json_string = json.dumps(obj)
json_file = open(file_to_write, "w")
json_file.write(json_string)
json_file.close()
| [
"yaniv120892@gmail.com"
] | yaniv120892@gmail.com |
7f6bd6db1950bb212336a9d800d41cf1c6515222 | 27556d221db5669fd74dd57344ded4cf2942c0ae | /contact/views.py | b49b69a35b79e8d1d331c3b66fb8e75ee39ac4b8 | [] | no_license | edzen12/sendEmail | 60a9dce424ec80c41b68b1092a55259154c4e080 | eb2a8feb609d9034695674a94308ed70ea600bcd | refs/heads/master | 2023-08-25T03:36:38.538297 | 2021-10-12T08:45:11 | 2021-10-12T08:45:11 | 416,253,922 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 277 | py | from rest_framework import viewsets, mixins
from contact.models import Person
from contact.serializers import PersonSerializer
class PersonViewSet(mixins.CreateModelMixin, viewsets.GenericViewSet):
queryset = Person.objects.all()
serializer_class = PersonSerializer
| [
"oichiev.edzen@gmail.com"
] | oichiev.edzen@gmail.com |
074ab027cb4315eb20983fe8da8f25ebce12fef8 | 6a9ffdcdb5a66fb91f93452b9f1a6cbe8d90f8ce | /masking.py | d4394a26aa2ed3804fe530d4139006ab35f9f748 | [] | no_license | sajid3900/Regression-in-Solid-Mechanics | d49b33028c417caa1d4941867867f157c483cc8a | 3b5d912cd062fc6349e4e267a025a22a9a15f19e | refs/heads/master | 2022-11-10T09:08:26.736611 | 2020-06-21T13:23:06 | 2020-06-21T13:23:06 | 273,908,409 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,155 | py | import tensorflow as tf
class SequenceMasking(tf.keras.layers.Layer):
"""Masks a sequence by using a mask value to skip timesteps.
For each timestep in the input tensor (dimension #1 in the tensor),
if all values in the input tensor at that timestep
are equal to `mask_value`, then the timestep will be masked (skipped)
in all downstream layers (as long as they support masking).
If any downstream layer does not support masking yet receives such
an input mask, an exception will be raised.
Example:
Consider a Numpy data array `x` of shape `(samples, timesteps, features)`,
to be fed to an LSTM layer.
You want to mask timestep #3 and #5 because you lack data for
these timesteps. You can:
- set `x[:, 3, :] = 0.` and `x[:, 5, :] = 0.`
- insert a `Masking` layer with `mask_value=0.` before the LSTM layer:
```python
model = Sequential()
model.add(Masking(mask_value=0., input_shape=(timesteps, features)))
model.add(LSTM(32))
```
"""
def __init__(self, mask_value=0., data_format='NSXYF', **kwargs):
super(SequenceMasking, self).__init__(**kwargs)
self.data_format = data_format
self.supports_masking = True
self.mask_value = mask_value
def compute_mask(self, inputs, mask=None):
mask = tf.not_equal(inputs, self.mask_value)
for i, d in enumerate(self.data_format):
if d not in 'NS':
mask = tf.keras.backend.any(mask, axis=i, keepdims=True)
return mask
def call(self, inputs, **kwargs):
mask = self.compute_mask(inputs)
return inputs * tf.cast(mask, inputs.dtype)
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {'mask_value': self.mask_value,
'data_format': self.data_format}
base_config = super(SequenceMasking, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class MaskedTimeDistributed(tf.keras.layers.TimeDistributed):
def compute_mask(self, inputs, mask=None):
return mask
| [
"noreply@github.com"
] | noreply@github.com |
d31c6c6d2c15286fb322e5cf99004ad713e4dbd5 | a18b6b49902543cf01b031b02268e1af41260006 | /.metadata/.plugins/org.eclipse.core.resources/.history/1e/5049598e225200151c1b974e2966b381 | 56e6dd81ae7a3355f96209fd27b1aab51393f2b9 | [] | no_license | jinyalin/Myproject | 70392321f7f9c7fb05d8ccf71acaeec426b1c62b | dea4433bae9fb912db69f29905937af6fb86568e | refs/heads/master | 2020-05-17T20:12:46.906116 | 2015-10-19T14:32:33 | 2015-10-19T14:32:33 | 42,420,127 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,776 | #!/usr/bin/python
# encoding: utf-8
from urllib.parse import urlencode
from httplib2 import Http
import pymysql
import datetime
import time
import logging
import socket
def connDB():
#conn = pymysql.Connect(host='210.14.134.77',port=13306,user='notice_plate',passwd='hskj&U*I(O1207',db='monitor',charset='utf8')
conn = pymysql.Connect(host='210.14.134.77',port=13306,user='notice_plate',passwd='hskj&U*I(O1207',db='monitor',charset='utf8')
cur = conn.cursor()
return conn,cur
def exeQuery(cur,sql):
cur.execute(sql)
return(cur)
def exeUpdate(cur,sql):
sta=cur.execute(sql)
return (sta)
def connClose(conn,cur):
cur.close()
conn.close()
def exeInsert(cur,sql):
cur.execute(sql)
return(cur)
if socket.gethostname() == 'vm-ywcs03':
filename_info='./log/'+datetime.datetime.now().strftime("%Y-%m-%d")+'.info.log'
filename_error='./log/'+datetime.datetime.now().strftime("%Y-%m-%d")+'.error.log'
else:
filename_info="E:\workspace\sms-demo\info.log"
filename_error="E:\workspace\sms-demo\error.log"
def InfoLog(message):
#format='%(asctime)s - %(levelname)s - %(message)s'
format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s'
logging.basicConfig(filename=filename_info, level=logging.INFO , format=format)
logging.info(message)
def ErrorLog(message):
#format='%(asctime)s - %(pathname)s - %(filename)s - [line:%(lineno)d] - %(levelname)s - %(message)s'
format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s'
logging.basicConfig(filename=filename_error, level=logging.ERROR , format=format)
logging.error(message)
if __name__=="__main__":
http = Http(timeout=5)
headers={'Content-Type': 'application/x-www-form-urlencoded;charset=UTF-8'}
while True:
conn1,cur1 = connDB()
sql1 = "select * from protocol_monitor where status=0"
exeQuery(cur1,sql1)
for row in cur1:
method = row[2]
url = str(row[3])+"?"
body = eval(row[4])
response = row[5]
frequence = int(row[6])
last_time = row[8]
date1 = datetime.datetime.now()
date11 = date1.strftime("%Y-%m-%d %H:%M:%S")
endtime = datetime.datetime.strptime(date11, "%Y-%m-%d %H:%M:%S")
starttime = last_time
seconds = (endtime-starttime).seconds
content = ""
if seconds >= frequence:
try:
resptime_start = datetime.datetime.now()
resp, content = http.request(url,method,urlencode(body), headers=headers)
print(resp,content)
resptime_end = datetime.datetime.now()
respseconds = (resptime_end-resptime_start).microseconds / 1000
print("响应时间:",respseconds,"ms")
conn2,cur2 = connDB()
sql2 = "update protocol_monitor set last_time = '" + str(endtime) + "'"
exeUpdate(cur2,sql2)
connClose(conn2,cur2)
if content:
response_status = content.decode()
if response_status == response :
InfoLog(url+"访问正常,响应时间:"+str(respseconds)+"ms")
else:
ErrorLog(url+"访问异常,访问返回错误码:"+str(response_status)+"和期望的错误码"+str(response)+"不符")
NowTime = time.strftime('%Y-%m-%d %H:%M:%S')
conn3,cur3 = connDB()
content=url+"访问返回值与期望不符,请查看日志/hskj/web/Monitor/log~"
sql3 = "insert into notice_info(content,alarm_type,alarm_value,status,insert_time) values('" + content +"','sms','13261289750',0,'"+NowTime+"')"
print(sql3)
exeInsert(cur3,sql3)
connClose(conn3,cur3)
except Exception as e:
print(e)
conn3,cur3 = connDB()
NowTime = time.strftime('%Y-%m-%d %H:%M:%S')
ErrorLog(url+"接口访问超时,"+str(e))
content = url+"接口访问超时,请检查!"
sql3 = "insert into notice_info(content,alarm_type,alarm_value,status,insert_time) values('" + content +"','sms','13261289750',0,'"+NowTime+"')"
print(sql3)
exeInsert(cur3,sql3)
connClose(conn3,cur3)
time.sleep(10)
connClose(conn1,cur1)
# time.sleep(30) | [
"582559708@qq.com"
] | 582559708@qq.com | |
3191a0e4372d35548cac3407b28961fcd2892328 | e2a96b89ab6bf33344b9486a36439d5a4607f8c8 | /28_threading/lock_demo.py | ae65c0b82d413b5f387dc51162139fdbbb7ab5e2 | [] | no_license | canwe/python3-course-advanced | 49339ae485738ef1dd8b2bd9fa956c39fd7ca9f3 | bcc94b5977c377daccbba3b27c64f1eec298647a | refs/heads/master | 2020-05-26T09:36:05.769006 | 2017-11-06T16:46:04 | 2017-11-06T16:46:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 937 | py | import threading
"""
We may have a use case where more than one thread will need to access the same resource at the same time.
A lock is provided by Python’s threading module and can be held by either a single thread or no thread at all.
Should a thread try to acquire a lock on a resource that is already locked,
that thread will basically pause until the lock is released.
"""
total = 0
lock = threading.Lock() # acquire the lock before anything else
def update_total(amount):
"""
Updates the total by the given amount
"""
global total
# Old way of doing things:
# lock.acquire()
# try:
# total += amount
# finally:
# lock.release()
# New way of doing things, using a context manager:
with lock:
total += amount
print (total)
if __name__ == '__main__':
for i in range(10):
my_thread = threading.Thread(target=update_total, args=(5,))
my_thread.start()
# 5
# 10
# 15
# 20
# 25
# 30
# 35
# 40
# 45
# 50
| [
"joao.guerreiro@dengun.net"
] | joao.guerreiro@dengun.net |
5c479fef2ba2a813946fd55f85dddcb13b08b531 | 73cd89c29b15382557eecdebf41339e2053aa4fb | /venvn/bin/pilfont.py | 7ad3126accd5fb9ba5e54a1046180b9ee742871c | [] | no_license | halfbloodprince107/schedularapp | 099d599a41ccb3ba769eec814c8c1f8c9d4998a8 | 1bf970775d60c91213b04fc13207af0f18296e2f | refs/heads/master | 2021-01-12T16:24:19.393655 | 2016-11-08T04:02:33 | 2016-11-08T04:02:33 | 71,989,189 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,073 | py | #!/home/consultadd/pranay/project/loginregister/venvn/bin/python
#
# The Python Imaging Library
# $Id$
#
# PIL raster font compiler
#
# history:
# 1997-08-25 fl created
# 2002-03-10 fl use "from PIL import"
#
from __future__ import print_function
import glob
import sys
# drivers
from PIL import BdfFontFile
from PIL import PcfFontFile
VERSION = "0.4"
if len(sys.argv) <= 1:
print("PILFONT", VERSION, "-- PIL font compiler.")
print()
print("Usage: pilfont fontfiles...")
print()
print("Convert given font files to the PIL raster font format.")
print("This version of pilfont supports X BDF and PCF fonts.")
sys.exit(1)
files = []
for f in sys.argv[1:]:
files = files + glob.glob(f)
for f in files:
print(f + "...", end=' ')
try:
fp = open(f, "rb")
try:
p = PcfFontFile.PcfFontFile(fp)
except SyntaxError:
fp.seek(0)
p = BdfFontFile.BdfFontFile(fp)
p.save(f)
except (SyntaxError, IOError):
print("failed")
else:
print("OK")
| [
"nileema.g@consultadd.com"
] | nileema.g@consultadd.com |
2ab8cff3b129ff45ca9388ca0f2911afc6008d9c | 72d119b3cb832ec88725b521e354e481a85f87b6 | /FERRAMENTA/MOGD-Ferramenta-SEMINTERACAO.py | 10d93922e878c9b90aba095b914490f38e1b690f | [] | no_license | thiagorfrf1/Many-Objective-GERADOR-DE-DATASETS | 3f8897eeaf5d6438189750315aac42b35caf7eec | 32f92fd2f44fc2180363aca29890d5e00c73257c | refs/heads/master | 2023-01-23T09:10:25.048668 | 2021-06-18T06:26:00 | 2021-06-18T06:26:00 | 236,584,553 | 1 | 1 | null | 2023-01-06T09:56:44 | 2020-01-27T20:14:14 | Jupyter Notebook | UTF-8 | Python | false | false | 55,207 | py | # -*- coding: UTF-8 -*-
import numpy as np
import random
import multiprocessing
import pickle
from sklearn.datasets import make_blobs
from matplotlib import pyplot
from pandas import DataFrame
from deap import base
from deap import creator
from deap import tools
from deap import algorithms
import rpy2.robjects as robjects
from rpy2.robjects import pandas2ri
from rpy2.robjects.packages import SignatureTranslatedAnonymousPackage as STAP
from rpy2.robjects import IntVector, Formula
pandas2ri.activate()
cont = 0
bobj = 0.4
P = [12]
SCALES = [1]
ok = "0"
NGEN = 300
CXPB = 0.7
MUTPB = 0.2
INDPB = 0.05
POP = 50
#"Escolha que tipo de base deseja gerar:"
#"Escolha 1 - Para bolhas de pontos com uma distribuição gaussiana."
#"Escolha 2 - Para gerar um padrão de redemoinho, ou duas luas."
#"Escolha 3 - Para gerar um problema de classificação com conjuntos de dados em círculos concêntricos."
dataset = 1
#"Quantas instancias (Exmplos) deseja utilizar? "
n_instancias = 1000
#"Quantos atributos (features) deseja utilizar? "
n_features = 2
#"Quantas bolhas (centers) deseja utilizar?"
centers = 1
X, y = make_blobs(n_samples=int(n_instancias), centers=int(centers), n_features=int(n_features))
if n_features == 2:
df = DataFrame(dict(x=X[:, 0], y=X[:, 1], label=y))
else:
df = DataFrame(dict(x=X[:, 0], y=X[:, 1], z=X[:, 2], label=y))
colors = {0: 'red', 1: 'blue', 2: 'orange'} # , 2:'green', 3:'orange', 4:'pink'}
fig, ax = pyplot.subplots()
grouped = df.groupby('label')
for key, group in grouped:
group.plot(ax=ax, kind='scatter', x='x', y='y', label=key, color=colors[key])
#pyplot.show()
dataFrame = df
filename = "Ferramentaa"
#print("Escolha quais métricas deseja otimizar (separe com espaço)")
#print("Class imbalance C2 = 1")
#print("Linearity L2 = 2")
#print("Neighborhood N2 = 3")
#print("Network ClsCoef = 4")
#print("Dimensionality T2 = 5")
#print("Feature-based F1 = 6")
metricas = ("1 2")
metricasList = metricas.split()
N_ATTRIBUTES = int(n_instancias)
NOBJ = len(metricasList)
#objetivos = input("Escolha os valores que deseja alcançar para cada métrica")
#objetivosList = objetivos.split()
globalBalance = 0.07
globalLinear = 0.07
globalN2 = 0.07
globalClsCoef = 0.07
globalt2 = 0.07
globalf1 = 0.07
dic = {}
# reference points
ref_points = [tools.uniform_reference_points(NOBJ, p, s) for p, s in zip(P, SCALES)]
ref_points = np.concatenate(ref_points)
_, uniques = np.unique(ref_points, axis=0, return_index=True)
ref_points = ref_points[uniques]
string = """
#' Measures of linearity
#'
#' The linearity measures try to quantify if it is possible to separate the
#' labels by a hyperplane or linear function. The underlying assumption is that
#' a linearly separable problem can be considered simpler than a problem
#' requiring a non-linear decision boundary.
#'
#' @family complexity-measures
#' @param x A data.frame contained only the input attributes.
#' @param y A response vector with one value for each row/component of x.
#' @param measures A list of measures names or \code{"all"} to include all them.
#' @param formula A formula to define the output column.
#' @param data A data.frame dataset contained the input attributes and class.
#' @param summary A list of summarization functions or empty for all values. See
#' \link{summarization} method to more information. (Default:
#' \code{c("mean", "sd")})
#' @param ... Not used.
#' @details
#' The following classification measures are allowed for this method:
#' \describe{
#' \item{"L1"}{Sum of the error distance by linear programming (L1) computes
#' the sum of the distances of incorrectly classified examples to a linear
#' boundary used in their classification.}
#' \item{"L2"}{Error rate of linear classifier (L2) computes the error rate
#' of the linear SVM classifier induced from dataset.}
#' \item{"L3"}{Non-linearity of a linear classifier (L3) creates a new
#' dataset randomly interpolating pairs of training examples of the same
#' class and then induce a linear SVM on the original data and measure
#' the error rate in the new data points.}
#' }
#' The following regression measures are allowed for this method:
#' \describe{
#' \item{"L1"}{Mean absolute error (L1) averages the absolute values of the
#' residues of a multiple linear regressor.}
#' \item{"L2"}{Residuals variance (L2) averages the square of the residuals
#' from a multiple linear regression.}
#' \item{"L3"}{Non-linearity of a linear regressor (L3) measures how
#' sensitive the regressor is to the new randomly interpolated points.}
#' }
#' @return A list named by the requested linearity measure.
#'
#' @references
#' Albert Orriols-Puig, Nuria Macia and Tin K Ho. (2010). Documentation for the
#' data complexity library in C++. Technical Report. La Salle - Universitat
#' Ramon Llull.
#'
#' @examples
#' ## Extract all linearity measures for classification task
#' data(iris)
#' linearity(Species ~ ., iris)
#'
#' ## Extract all linearity measures for regression task
#' data(cars)
#' linearity(speed ~ ., cars)
#' @export
linearity <- function(...) {
UseMethod("linearity")
}
#' @rdname linearity
#' @export
linearity.default <- function(x, y, measures="all",
summary=c("mean", "sd"), ...) {
if(!is.data.frame(x)) {
stop("data argument must be a data.frame")
}
if(is.data.frame(y)) {
y <- y[, 1]
}
foo <- "regression"
if(is.factor(y)) {
foo <- "classification"
if(min(table(y)) < 2) {
stop("number of examples in the minority class should be >= 2")
}
}
if(nrow(x) != length(y)) {
stop("x and y must have same number of rows")
}
if(measures[1] == "all") {
measures <- ls.linearity()
}
measures <- match.arg(measures, ls.linearity(), TRUE)
if (length(summary) == 0) {
summary <- "return"
}
colnames(x) <- make.names(colnames(x), unique=TRUE)
eval(call(foo, x=x, y=y, measures=measures, summary=summary))
}
#' @rdname linearity
#' @export
linearity.formula <- function(formula, data, measures="all",
summary=c("mean", "sd"), ...) {
if(!inherits(formula, "formula")) {
stop("method is only for formula datas")
}
if(!is.data.frame(data)) {
stop("data argument must be a data.frame")
}
modFrame <- stats::model.frame(formula, data)
attr(modFrame, "terms") <- NULL
linearity.default(modFrame[, -1, drop=FALSE], modFrame[, 1, drop=FALSE],
measures, summary, ...)
}
classification <- function(x, y, measures, summary, ...) {
data <- data.frame(x, class=y)
data <- ovo(data)
model <- lapply(data, smo)
sapply(measures, function(f) {
measure = eval(call(paste("c", f, sep="."), model=model, data=data))
summarization(measure, summary, f %in% ls.linearity.multiples(), ...)
}, simplify=FALSE)
}
regression <- function(x, y, measures, summary, ...) {
x <- normalize(x)
y <- normalize(y)[,1]
x <- x[order(y), ,drop=FALSE]
y <- y[order(y)]
model <- stats::lm(y ~ ., cbind(y=y, x))
sapply(measures, function(f) {
measure = eval(call(paste("r", f, sep="."), m=model, x=x, y=y))
summarization(measure, summary, f %in% ls.linearity.multiples(), ...)
}, simplify=FALSE)
}
ls.linearity <- function() {
c("L1", "L2", "L3")
}
ls.linearity.multiples <- function() {
ls.linearity()
}
smo <- function(data) {
e1071::svm(class ~ ., data, scale=TRUE, kernel="linear")
}
c.L1 <- function(model, data) {
aux <- mapply(function(m, d) {
prd <- stats::predict(m, d, decision.values=TRUE)
err <- rownames(d[prd != d$class,])
dst <- attr(prd, "decision.values")[err,]
sum(abs(dst))/nrow(d)
}, m=model, d=data)
#aux <- 1/(mean(aux) + 1)
#aux <- 1 - aux
aux <- 1 - 1/(aux + 1)
return(aux)
}
error <- function(pred, class) {
1 - sum(diag(table(class, pred)))/sum(table(class, pred))
}
c.L2 <- function(model, data) {
aux <- mapply(function(m, d) {
prd <- stats::predict(m, d)
error(prd, d$class)
}, m=model, d=data)
#return(mean(aux))
return(aux)
}
c.L3 <- function(model, data) {
aux <- mapply(function(m, d) {
tmp <- c.generate(d, nrow(d))
prd <- stats::predict(m, tmp)
error(prd, tmp$class)
}, m=model, d=data)
#return(mean(aux))
return(aux)
}
r.L1 <- function(m, ...) {
#mean(abs(m$residuals))
abs(m$residuals)
}
r.L2 <- function(m, ...) {
#mean(m$residuals^2)
mean(m$residuals^2)
}
r.L3 <- function(m, x, y) {
test <- r.generate(x, y, nrow(x))
pred <- stats::predict.lm(m, test[, -ncol(test), drop=FALSE])
#mean((pred - test[, ncol(test)])^2)
(pred - test[, ncol(test)])^2
}
#' Measures of class balance
#'
#' Classification task. These measures capture the differences in the number of
#' examples per class in the dataset. When these differences are severe,
#' problems related to generalization of the ML classification techniques could
#' happen because of the imbalance ratio.
#'
#' @family complexity-measures
#' @param x A data.frame contained only the input attributes.
#' @param y A factor response vector with one label for each row/component of x.
#' @param measures A list of measures names or \code{"all"} to include all them.
#' @param formula A formula to define the class column.
#' @param data A data.frame dataset contained the input attributes and class.
#' @param ... Not used.
#' @details
#' The following measures are allowed for this method:
#' \describe{
#' \item{"C1"}{The entropy of class proportions (C1) capture the imbalance in
#' a dataset based on the proportions of examples per class.}
#' \item{"C2"}{The imbalance ratio (C2) is an index computed for measuring
#' class balance. This is a version of the measure that is also suited for
#' multiclass classification problems.}
#' }
#' @return A list named by the requested class balance measure.
#'
#' @references
#' Ana C Lorena, Ivan G Costa, Newton Spolaor and Marcilio C P Souto. (2012).
#' Analysis of complexity indices for classification problems: Cancer gene
#' expression data. Neurocomputing 75, 1, 33--42.
#'
#' Ajay K Tanwani and Muddassar Farooq. (2010). Classification potential vs.
#' classification accuracy: a comprehensive study of evolutionary algorithms
#' with biomedical datasets. Learning Classifier Systems 6471, 127--144.
#'
#' @examples
#' ## Extract all balance measures for classification task
#' data(iris)
#' balance(Species ~ ., iris)
#' @export
balance <- function(...) {
UseMethod("balance")
}
#' @rdname balance
#' @export
balance.default <- function(x, y, measures="all", ...) {
if(!is.data.frame(x)) {
stop("data argument must be a data.frame")
}
if(is.data.frame(y)) {
y <- y[, 1]
}
y <- as.factor(y)
if(nrow(x) != length(y)) {
stop("x and y must have same number of rows")
}
if(measures[1] == "all") {
measures <- ls.balance()
}
measures <- match.arg(measures, ls.balance(), TRUE)
sapply(measures, function(f) {
eval(call(paste("c", f, sep="."), y=y))
}, simplify=FALSE)
}
#' @rdname balance
#' @export
balance.formula <- function(formula, data, measures="all", ...) {
if(!inherits(formula, "formula")) {
stop("method is only for formula datas")
}
if(!is.data.frame(data)) {
stop("data argument must be a data.frame")
}
modFrame <- stats::model.frame(formula, data)
attr(modFrame, "terms") <- NULL
balance.default(modFrame[, -1, drop=FALSE], modFrame[, 1, drop=FALSE],
measures, ...)
}
ls.balance <- function() {
c("C1", "C2")
}
c.C1 <- function(y) {
c <- -1/log(nlevels(y))
i <- table(y)/length(y)
aux <- c*sum(i*log(i))
return(aux)
}
c.C2 <- function(y) {
ii <- summary(y)
nc <- length(ii)
aux <- ((nc - 1)/nc) * sum(ii/(length(y) - ii))
aux <- 1 - (1/aux)
return(aux)
}
colMin <- function(x) {
apply(x, 2, min)
}
colMax <- function(x) {
apply(x, 2, max)
}
dist <- function(x) {
as.matrix(cluster::daisy(x, metric="gower", stand=TRUE, warnBin=FALSE))
}
form <- function(x) {
att <- paste(colnames(x), collapse="+")
stats::formula(paste("~ 0 +", att, sep=" "))
}
binarize <- function(x) {
data.frame(stats::model.matrix(form(x), x))
}
ovo <- function(data) {
aux <- utils::combn(levels(data$class), 2)
tmp <- apply(aux, 2, function(i) {
vet <- base::subset(data, data$class %in% i)
vet$class <- factor(vet$class)
return(vet)
})
return(tmp)
}
c.interpolation <- function(data) {
aux <- data[data$class == sample(data$class, 1),]
tmp <- aux[sample(nrow(aux), 2),]
rnd <- stats::runif(1)
for(i in 1:(ncol(data)-1)) {
if(is.numeric(data[,i])) {
tmp[1,i] <- tmp[1,i] + (tmp[2,i] - tmp[1,i]) * rnd
} else {
tmp[1,i] <- sample(aux[,i], 1)
}
}
return(tmp[1,])
}
c.generate <- function(data, n) {
tmp <- do.call("rbind",
lapply(1:n, function(i) {
c.interpolation(data)
})
)
return(tmp)
}
maxmin <- function(x) {
(x - min(x))/(max(x) - min(x))
}
normalize <- function(x) {
x <- as.data.frame(x)
for(i in 1:ncol(x)) {
if(is.numeric(x[,i]))
if(length(unique(x[,i])) != 1)
x[,i] <- maxmin(x[,i])
}
return(x)
}
spearman <- function(x) {
1-6*sum(x^2)/(length(x)^3 - length(x))
}
r.interpolation <- function(x, y, i) {
aux <- x[(i-1):i,,drop=FALSE]
rnd <- stats::runif(1)
for(j in 1:ncol(x)) {
if(is.numeric(x[,j])) {
aux[1,j] <- aux[1,j] + (aux[2,j] - aux[1,j]) * rnd
} else {
aux[1,j] <- sample(aux[,j], 1)
}
}
tmp <- y[(i-1):i]
rnd <- stats::runif(1)
tmp[1] <- tmp[1]*rnd + tmp[2]*(1-rnd)
return(cbind(aux[1,], tmp[1]))
}
r.generate <- function(x, y, n) {
tmp <- do.call("rbind",
lapply(2:n, function(i) {
r.interpolation(x, y, i)
})
)
tmp <- data.frame(tmp)
colnames(tmp) <- c(colnames(x), "y")
return(tmp)
}
#' Post processing complexity measures
#'
#' Post-processing alternatives to deal with multiples values. This method is
#' used by the complexity measures to summarize the obtained values.
#'
#' @param measure A list with the complexity measures values.
#' @param summary The functions to post processing the data. See the details
#' to more information. Default: \code{c("mean", "sd")}
#' @param multiple A logical value defining if the measure should return
#' multiple values. (Default: \code{TRUE})
#' @param ... Extra values used to the functions of summarization.
#' @details
#' The post processing functions are used to summarize the complexity measures.
#' They are organized into three groups: return, descriptive statistic and
#' distribution. Currently, the hypothesis testing post processing are not
#' supported.
#'
#' In practice, there are no difference among the types, so that more than one
#' type and functions can be combined. Usually, these function are used to
#' summarize a set of values for each complexity measures. For instance, a
#' measure computed for each attribute can be summarized using the
#' \code{"mean"} and/or \code{"sd"}.
#'
#' In addition to the native functions available in R, the following functions
#' can be used:
#' \describe{
#' \item{"histogram"}{Computes a histogram of the given data value. The extra
#' parameters '\code{bins}' can be used to define the number of values to
#' be returned. The parameters '\code{max}' and '\code{min}' are used to
#' define the range of the data. The default value for these parameters
#' are respectively \code{10, min(x)} and \code{max(x)}.}
#' \item{"kurtosis"}{See \code{\link[e1071]{kurtosis}}}
#' \item{"max"}{See \code{\link{max}}}
#' \item{"mean"}{See \code{\link{mean}}}
#' \item{"median"}{See \code{\link{median}}}
#' \item{"min"}{See \code{\link{min}}}
#' \item{"quantiles"}{See \code{\link{quantile}}}
#' \item{"sd"}{See \code{\link{sd}}}
#' \item{"skewness"}{See \code{\link[e1071]{skewness}}}
#' \item{"var"}{See \code{\link{var}}}
#' \item{"return"}{Returns the original value(s) of the complexity measure.}
#' }
#' These functions are not restrictive, thus another functions can be applied
#' as post-processing summarization function.
#'
#' @return A list with the post-processed complexity measures.
#'
#' @references
#' Albert Orriols-Puig, Nuria Macia and Tin K Ho. (2010). Documentation for the
#' data complexity library in C++. Technical Report. La Salle - Universitat
#' Ramon Llull.
#'
#' @examples
#' summarization(runif(15))
#' summarization(runif(15), c("min", "max"))
#' summarization(runif(15), c("quantiles", "skewness"))
#' @export
summarization <- function(measure, summary=c("mean", "sd"), multiple=TRUE,
...) {
if(length(measure) == 0) {
return(NA)
}
if(!multiple) {
if(length(measure) > 1) {
stop("More than one value was obtained for a single measure")
}
measure = as.numeric(measure[1])
return(measure)
}
measure = measure[is.finite(measure)]
res = sapply(summary, function(s) {
do.call(s, list(measure, ...))
}, simplify=FALSE)
unlist(res)
}
skewness <- function(x, na.rm=FALSE, type=3, ...) {
e1071::skewness(x, na.rm, type)
}
kurtosis <- function(x, na.rm=FALSE, type=3, ...) {
e1071::kurtosis(x, na.rm, type)
}
quantiles <- function(x, type=1, ...) {
tryCatch(
stats::quantile(x, type=type, ...),
error=function(e) stats::quantile(NA, na.rm=TRUE, ...)
)
}
iqr <- function(x, na.rm=FALSE, ...) {
if (!na.rm & any(is.na(x))) NA
else stats::IQR(x, na.rm = na.rm)
}
histogram <- function(x, bins=10, min=base::min(x, na.rm=TRUE),
max=base::max(x, na.rm=TRUE), ...) {
breaks <- seq(ifelse(is.finite(min), min, 0),
ifelse(is.finite(max), max, bins), length.out=bins + 1)
graphics::hist(as.numeric(x), breaks=breaks, plot=FALSE)$counts / length(x)
}
#' Measures of neighborhood
#'
#' Classification task. The Neighborhood measures analyze the neighborhoods of
#' the data items and try to capture class overlapping and the shape of the
#' decision boundary. They work over a distance matrix storing the distances
#' between all pairs of data points in the dataset.
#'
#' @family complexity-measures
#' @param x A data.frame contained only the input attributes.
#' @param y A factor response vector with one label for each row/component of x.
#' @param measures A list of measures names or \code{"all"} to include all them.
#' @param formula A formula to define the class column.
#' @param data A data.frame dataset contained the input attributes and class.
#' @param summary A list of summarization functions or empty for all values. See
#' \link{summarization} method to more information. (Default:
#' \code{c("mean", "sd")})
#' @param ... Not used.
#' @details
#' The following measures are allowed for this method:
#' \describe{
#' \item{"N1"}{Fraction of borderline points (N1) computes the percentage of
#' vertexes incident to edges connecting examples of opposite classes in
#' a Minimum Spanning Tree (MST).}
#' \item{"N2"}{Ratio of intra/extra class nearest neighbor distance (N2)
#' computes the ratio of two sums: intra-class and inter-class. The former
#' corresponds to the sum of the distances between each example and its
#' closest neighbor from the same class. The later is the sum of the
#' distances between each example and its closest neighbor from another
#' class (nearest enemy).}
#' \item{"N3"}{Error rate of the nearest neighbor (N3) classifier corresponds
#' to the error rate of a one Nearest Neighbor (1NN) classifier, estimated
#' using a leave-one-out procedure in dataset.}
#' \item{"N4"}{Non-linearity of the nearest neighbor classifier (N4) creates
#' a new dataset randomly interpolating pairs of training examples of the
#' same class and then induce a the 1NN classifier on the original data and
#' measure the error rate in the new data points.}
#' \item{"T1"}{Fraction of hyperspheres covering data (T1) builds
#' hyperspheres centered at each one of the training examples, which have
#' their radios growth until the hypersphere reaches an example of another
#' class. Afterwards, smaller hyperspheres contained in larger hyperspheres
#' are eliminated. T1 is finally defined as the ratio between the number of
#' the remaining hyperspheres and the total number of examples in the
#' dataset.}
#' \item{"LSC"}{Local Set Average Cardinality (LSC) is based on Local Set
#' (LS) and defined as the set of points from the dataset whose distance of
#' each example is smaller than the distance from the exemples of the
#' different class. LSC is the average of the LS.}
#' }
#' @return A list named by the requested neighborhood measure.
#'
#' @references
#' Albert Orriols-Puig, Nuria Macia and Tin K Ho. (2010). Documentation for the
#' data complexity library in C++. Technical Report. La Salle - Universitat
#' Ramon Llull.
#'
#' Enrique Leyva, Antonio Gonzalez and Raul Perez. (2014). A Set of Complexity
#' Measures Designed for Applying Meta-Learning to Instance Selection. IEEE
#' Transactions on Knowledge and Data Engineering 27, 2, 354--367.
#'
#' @examples
#' ## Extract all neighborhood measures for classification task
#' data(iris)
#' neighborhood(Species ~ ., iris)
#' @export
neighborhood <- function(...) {
UseMethod("neighborhood")
}
#' @rdname neighborhood
#' @export
neighborhood.default <- function(x, y, measures="all", summary=c("mean", "sd"),
...) {
if(!is.data.frame(x)) {
stop("data argument must be a data.frame")
}
if(is.data.frame(y)) {
y <- y[, 1]
}
y <- as.factor(y)
if(min(table(y)) < 2) {
stop("number of examples in the minority class should be >= 2")
}
if(nrow(x) != length(y)) {
stop("x and y must have same number of rows")
}
if(measures[1] == "all") {
measures <- ls.neighborhood()
}
measures <- match.arg(measures, ls.neighborhood(), TRUE)
if (length(summary) == 0) {
summary <- "return"
}
colnames(x) <- make.names(colnames(x), unique=TRUE)
data <- data.frame(x, class=y)
dst <- dist(x)
sapply(measures, function(f) {
measure = eval(call(paste("c", f, sep="."), dst=dst, data=data))
summarization(measure, summary, f %in% ls.neighborhood.multiples(), ...)
}, simplify=FALSE)
}
#' @rdname neighborhood
#' @export
neighborhood.formula <- function(formula, data, measures="all",
summary=c("mean", "sd"), ...) {
if(!inherits(formula, "formula")) {
stop("method is only for formula datas")
}
if(!is.data.frame(data)) {
stop("data argument must be a data.frame")
}
modFrame <- stats::model.frame(formula, data)
attr(modFrame, "terms") <- NULL
neighborhood.default(modFrame[, -1, drop=FALSE], modFrame[, 1, drop=FALSE],
measures, summary, ...)
}
ls.neighborhood <- function() {
c("N1","N2", "N3", "N4", "T1", "LSC")
}
ls.neighborhood.multiples <- function() {
c("N2", "N3", "N4", "T1")
}
c.N1 <- function(dst, data) {
g <- igraph::graph.adjacency(dst, mode="undirected", weighted=TRUE)
tree <- as.matrix(igraph::as_adj(igraph::mst(g)))
tmp <- which(tree != 0, arr.ind=TRUE)
aux <- which(data[tmp[,1],]$class != data[tmp[,2],]$class)
aux <- length(unique(tmp[aux,1]))
return(aux/nrow(data))
}
intra <- function(dst, data, i) {
tmp <- rownames(data[data$class == data[i,]$class,])
aux <- min(dst[i, setdiff(tmp, i)])
return(aux)
}
inter <- function(dst, data, i) {
tmp <- rownames(data[data$class != data[i,]$class,])
aux <- sort(dst[i, tmp])[1]
return(aux)
}
c.N2 <- function(dst, data) {
aux <- sapply(rownames(data), function(i) {
c(intra(dst, data, i), inter(dst, data, i))
})
#aux <- sum(aux[1,])/sum(aux[2,])
aux <- 1 - (1/((aux[1,]/aux[2,]) + 1))
return(aux)
}
knn <- function(data, dst, k) {
apply(dst, 1, function(i) {
tmp <- names(sort(i)[k])
data[tmp,]$class
})
}
c.N3 <- function(dst, data) {
aux <- knn(data, dst, 2) != data$class
#return(mean(aux))
return(aux)
}
c.N4 <- function(dst, data) {
tran <- rbind(data, c.generate(data, nrow(data)))
test <- utils::tail(tran, nrow(data))
dst <- dist(tran[,-ncol(tran), drop=FALSE])
dst <- dst[rownames(test), rownames(data)]
aux <- knn(data, dst, 1) != test$class
#return(mean(aux))
return(aux)
}
radios <- function(dst, data, i) {
di <- inter(dst, data, i)
j <- names(di)
dj <- inter(dst, data, j)
k <- names(dj)
if(i == k) {
return(di/2)
} else {
tmp <- radios(dst, data, j)
return(di - tmp)
}
}
hyperspher <- function(dst, data) {
aux <- sapply(rownames(data), function(i) {
as.numeric(radios(dst, data, i))
})
return(aux)
}
translate <- function(dst, r) {
aux <- t(sapply(rownames(dst), function(i) {
dst[i,] < r[i]
}))
return(aux)
}
adherence <- function(adh, data) {
h <- n <- c()
repeat{
aux <- which.max(rowSums(adh))
tmp <- names(which(adh[aux,]))
dif <- setdiff(rownames(adh), c(tmp, names(aux)))
adh <- adh[dif, dif, drop=FALSE]
if(all(dim(adh) != 0)) {
h <- c(h, length(tmp))
} else {
h <- c(h, 1)
}
n <- c(n, names(aux))
if(all(dim(adh)) == 0)
break
}
names(h) <- n
return(h)
}
c.T1 <- function(dst, data) {
r <- hyperspher(dst, data)
aux <- adherence(translate(dst, r), data)
#aux <- length(aux)/nrow(data)
return(aux/nrow(data))
}
c.LSC <- function(dst, data) {
r <- sapply(rownames(data), function(i) {
as.numeric(inter(dst, data, i))
})
aux <- 1 - sum(translate(dst, r))/(nrow(dst)^2)
return(aux)
}
#' Measures of overlapping
#'
#' Classification task. The overlapping measures evaluate how informative the
#' available features are to separate the classes. If there is at least one very
#' discriminative feature in the dataset, the problem can be considered simpler
#' than if there is no such an attribute.
#'
#' @family complexity-measures
#' @param x A data.frame contained only the input attributes.
#' @param y A factor response vector with one label for each row/component of x.
#' @param measures A list of measures names or \code{"all"} to include all them.
#' @param formula A formula to define the class column.
#' @param data A data.frame dataset contained the input attributes and class.
#' @param summary A list of summarization functions or empty for all values. See
#' \link{summarization} method to more information. (Default:
#' \code{c("mean", "sd")})
#' @param ... Not used.
#' @details
#' The following measures are allowed for this method:
#' \describe{
#' \item{"F1"}{Maximum Fisher's Discriminant Ratio (F1) measures the overlap
#' between the values of the features and takes the value of the largest
#' discriminant ratio among all the available features.}
#' \item{"F1v"}{Directional-vector maximum Fisher's discriminant ratio (F1v)
#' complements F1 by searching for a vector able to separate two classes
#' after the training examples have been projected into it.}
#' \item{"F2"}{Volume of the overlapping region (F2) computes the overlap of
#' the distributions of the features values within the classes. F2 can be
#' determined by finding, for each feature its minimum and maximum values
#' in the classes.}
#' \item{"F3"}{The maximum individual feature efficiency (F3) of each
#' feature is given by the ratio between the number of examples that are
#' not in the overlapping region of two classes and the total number of
#' examples. This measure returns the maximum of the values found among
#' the input features.}
#' \item{"F4"}{Collective feature efficiency (F4) get an overview on how
#' various features may work together in data separation. First the most
#' discriminative feature according to F3 is selected and all examples that
#' can be separated by this feature are removed from the dataset. The
#' previous step is repeated on the remaining dataset until all the
#' features have been considered or no example remains. F4 returns the
#' ratio of examples that have been discriminated.}
#' }
#' @return A list named by the requested overlapping measure.
#'
#' @references
#' Albert Orriols-Puig, Nuria Macia and Tin K Ho. (2010). Documentation for the
#' data complexity library in C++. Technical Report. La Salle - Universitat
#' Ramon Llull.
#'
#' @examples
#' ## Extract all overlapping measures for classification task
#' data(iris)
#' overlapping(Species ~ ., iris)
#' @export
overlapping <- function(...) {
UseMethod("overlapping")
}
#' @rdname overlapping
#' @export
overlapping.default <- function(x, y, measures="all", summary=c("mean", "sd"),
...) {
if(!is.data.frame(x)) {
stop("data argument must be a data.frame")
}
if(is.data.frame(y)) {
y <- y[, 1]
}
y <- as.factor(y)
if(min(table(y)) < 2) {
stop("number of examples in the minority class should be >= 2")
}
if(nrow(x) != length(y)) {
stop("x and y must have same number of rows")
}
if(measures[1] == "all") {
measures <- ls.overlapping()
}
measures <- match.arg(measures, ls.overlapping(), TRUE)
if (length(summary) == 0) {
summary <- "return"
}
colnames(x) <- make.names(colnames(x), unique=TRUE)
x <- binarize(x)
data <- data.frame(x, class=y)
sapply(measures, function(f) {
measure = eval(call(paste("c", f, sep="."), data=data))
summarization(measure, summary, f %in% ls.overlapping.multiples(), ...)
}, simplify=FALSE)
}
#' @rdname overlapping
#' @export
overlapping.formula <- function(formula, data, measures="all",
summary=c("mean", "sd"), ...) {
if(!inherits(formula, "formula")) {
stop("method is only for formula datas")
}
if(!is.data.frame(data)) {
stop("data argument must be a data.frame")
}
modFrame <- stats::model.frame(formula, data)
attr(modFrame, "terms") <- NULL
overlapping.default(modFrame[, -1, drop=FALSE], modFrame[, 1, drop=FALSE],
measures, summary, ...)
}
ls.overlapping <- function() {
c("F1", "F1v", "F2", "F3", "F4")
}
ls.overlapping.multiples <- function() {
ls.overlapping()
}
branch <- function(data, j) {
data[data$class == j, -ncol(data), drop=FALSE]
}
numerator <- function(j, data) {
tmp <- branch(data, j)
aux <- nrow(tmp) * (colMeans(tmp) -
colMeans(data[,-ncol(data), drop=FALSE]))^2
return(aux)
}
denominator <- function(j, data) {
tmp <- branch(data, j)
aux <- rowSums((t(tmp) - colMeans(tmp))^2)
return(aux)
}
c.F1 <- function(data) {
num <- lapply(levels(data$class), numerator, data)
den <- lapply(levels(data$class), denominator, data)
aux <- rowSums(do.call("cbind", num)) /
rowSums(do.call("cbind", den))
#aux <- max(aux, na.rm=TRUE)
aux <- 1/(aux + 1)
return(aux)
}
dvector <- function(data) {
l <- levels(data$class)
a <- branch(data, l[1])
b <- branch(data, l[2])
c1 <- colMeans(a)
c2 <- colMeans(b)
W <- (nrow(a)/nrow(data)) * stats::cov(a) +
(nrow(b)/nrow(data)) * stats::cov(b)
B <- (c1 - c2) %*% t(c1 - c2)
d <- MASS::ginv(W) %*% (c1 - c2)
aux <- (t(d) %*% B %*% d)/(t(d) %*% W %*% d)
return(aux)
}
c.F1v <- function(data) {
data <- ovo(data)
#aux <- mean(sapply(data, dvector))
aux <- sapply(data, dvector)
aux <- 1/(aux + 1)
return(aux)
}
regionOver <- function(data) {
l <- levels(data$class)
a <- branch(data, l[1])
b <- branch(data, l[2])
maxmax <- rbind(colMax(a), colMax(b))
minmin <- rbind(colMin(a), colMin(b))
over <- colMax(rbind(colMin(maxmax) - colMax(minmin), 0))
rang <- colMax(maxmax) - colMin(minmin)
aux <- prod(over/rang, na.rm=TRUE)
return(aux)
}
c.F2 <- function(data) {
data <- ovo(data)
#aux <- mean(sapply(data, regionOver))
aux <- sapply(data, regionOver)
return(aux)
}
nonOverlap <- function(data) {
l <- levels(data$class)
a <- branch(data, l[1])
b <- branch(data, l[2])
minmax <- colMin(rbind(colMax(a), colMax(b)))
maxmin <- colMax(rbind(colMin(a), colMin(b)))
aux <- do.call("cbind",
lapply(1:(ncol(data)-1), function(i) {
data[,i] < maxmin[i] | data[,i] > minmax[i]
})
)
aux <- data.frame(aux)
rownames(aux) <- rownames(data)
return(aux)
}
c.F3 <- function(data) {
data <- ovo(data)
aux <- mapply(function(d) {
colSums(nonOverlap(d))/nrow(d)
}, d=data)
#aux <- 1 - mean(colMax(aux))
aux <- 1 - colMax(aux)
return(aux)
}
removing <- function(data) {
repeat {
tmp <- nonOverlap(data)
col <- which.max(colSums(tmp))
aux <- rownames(tmp[tmp[,col] != TRUE, , drop=FALSE])
data <- data[aux,- col, drop=FALSE]
if(nrow(data) == 0 | ncol(data) == 1 |
length(unique(data$class)) == 1)
break
}
return(data)
}
c.F4 <- function(data) {
data <- ovo(data)
aux <- mapply(function(d) {
nrow(removing(d))/nrow(d)
}, d=data)
#aux <- mean(aux)
return(aux)
}
#' Measures of network
#'
#' Classification task. The network measures represent the dataset as a graph
#' and extract structural information from it. The transformation between raw
#' data and the graph representation is based on the epsilon-NN algorithm. Next,
#' a post-processing step is applied to the graph, pruning edges between
#' examples of opposite classes.
#'
#' @family complexity-measures
#' @param x A data.frame contained only the input attributes.
#' @param y A factor response vector with one label for each row/component of x.
#' @param measures A list of measures names or \code{"all"} to include all them.
#' @param formula A formula to define the class column.
#' @param data A data.frame dataset contained the input attributes and class.
#' @param eps The percentage of nodes in the graph to be connected.
#' @param summary A list of summarization functions or empty for all values. See
#' \link{summarization} method to more information. (Default:
#' \code{c("mean", "sd")})
#' @param ... Not used.
#' @details
#' The following measures are allowed for this method:
#' \describe{
#' \item{"Density"}{Average Density of the network (Density) represents the
#' number of edges in the graph, divided by the maximum number of edges
#' between pairs of data points.}
#' \item{"ClsCoef"}{Clustering coefficient (ClsCoef) averages the clustering
#' tendency of the vertexes by the ratio of existent edges between its
#' neighbors and the total number of edges that could possibly exist
#' between them.}
#' \item{"Hubs"}{Hubs score (Hubs) is given by the number of connections it
#' has to other nodes, weighted by the number of connections these
#' neighbors have.}
#' }
#' @return A list named by the requested network measure.
#'
#' @references
#' Gleison Morais and Ronaldo C Prati. (2013). Complex Network Measures for
#' Data Set Characterization. In 2nd Brazilian Conference on Intelligent
#' Systems (BRACIS). 12--18.
#'
#' Luis P F Garcia, Andre C P L F de Carvalho and Ana C Lorena. (2015). Effect
#' of label noise in the complexity of classification problems.
#' Neurocomputing 160, 108--119.
#'
#' @examples
#' ## Extract all network measures for classification task
#' data(iris)
#' network(Species ~ ., iris)
#' @export
network <- function(...) {
UseMethod("network")
}
#' @rdname network
#' @export
network.default <- function(x, y, measures="all", eps=0.15,
summary=c("mean", "sd"), ...) {
if(!is.data.frame(x)) {
stop("data argument must be a data.frame")
}
if(is.data.frame(y)) {
y <- y[, 1]
}
y <- as.factor(y)
if(min(table(y)) < 2) {
stop("number of examples in the minority class should be >= 2")
}
if(nrow(x) != length(y)) {
stop("x and y must have same number of rows")
}
if(measures[1] == "all") {
measures <- ls.network()
}
measures <- match.arg(measures, ls.network(), TRUE)
if (length(summary) == 0) {
summary <- "return"
}
colnames(x) <- make.names(colnames(x), unique=TRUE)
dst <- enn(x, y, eps*nrow(x))
graph <- igraph::graph.adjacency(dst, mode="undirected", weighted=TRUE)
sapply(measures, function(f) {
measure = eval(call(paste("c", f, sep="."), graph))
summarization(measure, summary, f %in% ls.network.multiples(), ...)
}, simplify=FALSE)
}
#' @rdname network
#' @export
network.formula <- function(formula, data, measures="all", eps=0.15,
summary=c("mean", "sd"), ...) {
if(!inherits(formula, "formula")) {
stop("method is only for formula datas")
}
if(!is.data.frame(data)) {
stop("data argument must be a data.frame")
}
modFrame <- stats::model.frame(formula, data)
attr(modFrame, "terms") <- NULL
network.default(modFrame[, -1, drop=FALSE], modFrame[, 1, drop=FALSE],
measures, eps, summary, ...)
}
ls.network <- function() {
c("Density", "ClsCoef", "Hubs")
}
ls.network.multiples <- function() {
c("Hubs")
}
enn <- function(x, y, e) {
dst <- dist(x)
for(i in 1:nrow(x)) {
a <- names(sort(dst[i,])[1:e+1])
b <- rownames(x[y == y[i],])
dst[i, setdiff(rownames(x), intersect(a, b))] <- 0
}
return(dst)
}
c.Density <- function(graph) {
1 - igraph::graph.density(graph)
}
c.ClsCoef <- function(graph) {
1 - igraph::transitivity(graph, type="global", isolates="zero")
}
c.Hubs <- function(graph) {
#1 - mean(igraph::hub.score(graph)$vector)
1 - igraph::hub.score(graph)$vector
}
#' Measures of dimensionality
#'
#' These measures give an indicative of data sparsity. They capture how sparse
#' a datasets tend to have regions of low density. These regions are know to be
#' more difficult to extract good classification and regression models.
#'
#' @family complexity-measures
#' @param x A data.frame contained only the input attributes.
#' @param y A response vector with one value for each row/component of x.
#' @param measures A list of measures names or \code{"all"} to include all them.
#' @param formula A formula to define the output column.
#' @param data A data.frame dataset contained the input and output attributes.
#' @param ... Not used.
#' @details
#' The following measures are allowed for this method:
#' \describe{
#' \item{"T2"}{Average number of points per dimension (T2) is given by the
#' ratio between the number of examples and dimensionality of the dataset.}
#' \item{"T3"}{Average number of points per PCA (T3) is similar to T2, but
#' uses the number of PCA components needed to represent 95% of data
#' variability as the base of data sparsity assessment.}
#' \item{"T4"}{Ratio of the PCA Dimension to the Original (T4) estimates the
#' proportion of relevant and the original dimensions for a dataset.}
#' }
#' @return A list named by the requested dimensionality measure.
#'
#' @references
#' Ana C Lorena, Ivan G Costa, Newton Spolaor and Marcilio C P Souto. (2012).
#' Analysis of complexity indices for classification problems: Cancer gene
#' expression data. Neurocomputing 75, 1, 33--42.
#'
#' @examples
#' ## Extract all dimensionality measures for classification task
#' data(iris)
#' dimensionality(Species ~ ., iris)
#'
#' ## Extract all dimensionality measures for regression task
#' data(cars)
#' dimensionality(speed ~ ., cars)
#' @export
dimensionality <- function(...) {
UseMethod("dimensionality")
}
#' @rdname dimensionality
#' @export
dimensionality.default <- function(x, y, measures="all", ...) {
if(!is.data.frame(x)) {
stop("data argument must be a data.frame")
}
if(is.data.frame(y)) {
y <- y[, 1]
}
if(nrow(x) != length(y)) {
stop("x and y must have same number of rows")
}
if(measures[1] == "all") {
measures <- ls.dimensionality()
}
measures <- match.arg(measures, ls.dimensionality(), TRUE)
colnames(x) <- make.names(colnames(x), unique=TRUE)
x <- binarize(x)
sapply(measures, function(f) {
eval(call(paste("c", f, sep="."), x=x))
})
}
#' @rdname dimensionality
#' @export
dimensionality.formula <- function(formula, data, measures="all", ...) {
if(!inherits(formula, "formula")) {
stop("method is only for formula datas")
}
if(!is.data.frame(data)) {
stop("data argument must be a data.frame")
}
modFrame <- stats::model.frame(formula, data)
attr(modFrame, "terms") <- NULL
dimensionality.default(modFrame[, -1, drop=FALSE], modFrame[, 1, drop=FALSE],
measures, ...)
}
ls.dimensionality <- function() {
c("T2", "T3", "T4")
}
pca <- function(x) {
aux <- stats::prcomp(x)
tmp <- length(which(summary(aux)$importance[3,] < 0.95)) + 1
return(tmp)
}
c.T2 <- function(x) {
ncol(x)/nrow(x)
}
c.T3 <- function(x) {
pca(x)/nrow(x)
}
c.T4 <- function(x) {
pca(x)/ncol(x)
}
#' Measures of overlapping
#'
#' Classification task. The overlapping measures evaluate how informative the
#' available features are to separate the classes. If there is at least one very
#' discriminative feature in the dataset, the problem can be considered simpler
#' than if there is no such an attribute.
#'
#' @family complexity-measures
#' @param x A data.frame contained only the input attributes.
#' @param y A factor response vector with one label for each row/component of x.
#' @param measures A list of measures names or \code{"all"} to include all them.
#' @param formula A formula to define the class column.
#' @param data A data.frame dataset contained the input attributes and class.
#' @param summary A list of summarization functions or empty for all values. See
#' \link{summarization} method to more information. (Default:
#' \code{c("mean", "sd")})
#' @param ... Not used.
#' @details
#' The following measures are allowed for this method:
#' \describe{
#' \item{"F1"}{Maximum Fisher's Discriminant Ratio (F1) measures the overlap
#' between the values of the features and takes the value of the largest
#' discriminant ratio among all the available features.}
#' \item{"F1v"}{Directional-vector maximum Fisher's discriminant ratio (F1v)
#' complements F1 by searching for a vector able to separate two classes
#' after the training examples have been projected into it.}
#' \item{"F2"}{Volume of the overlapping region (F2) computes the overlap of
#' the distributions of the features values within the classes. F2 can be
#' determined by finding, for each feature its minimum and maximum values
#' in the classes.}
#' \item{"F3"}{The maximum individual feature efficiency (F3) of each
#' feature is given by the ratio between the number of examples that are
#' not in the overlapping region of two classes and the total number of
#' examples. This measure returns the maximum of the values found among
#' the input features.}
#' \item{"F4"}{Collective feature efficiency (F4) get an overview on how
#' various features may work together in data separation. First the most
#' discriminative feature according to F3 is selected and all examples that
#' can be separated by this feature are removed from the dataset. The
#' previous step is repeated on the remaining dataset until all the
#' features have been considered or no example remains. F4 returns the
#' ratio of examples that have been discriminated.}
#' }
#' @return A list named by the requested overlapping measure.
#'
#' @references
#' Albert Orriols-Puig, Nuria Macia and Tin K Ho. (2010). Documentation for the
#' data complexity library in C++. Technical Report. La Salle - Universitat
#' Ramon Llull.
#'
#' @examples
#' ## Extract all overlapping measures for classification task
#' data(iris)
#' overlapping(Species ~ ., iris)
#' @export
overlapping <- function(...) {
UseMethod("overlapping")
}
#' @rdname overlapping
#' @export
overlapping.default <- function(x, y, measures="all", summary=c("mean", "sd"),
...) {
if(!is.data.frame(x)) {
stop("data argument must be a data.frame")
}
if(is.data.frame(y)) {
y <- y[, 1]
}
y <- as.factor(y)
if(min(table(y)) < 2) {
stop("number of examples in the minority class should be >= 2")
}
if(nrow(x) != length(y)) {
stop("x and y must have same number of rows")
}
if(measures[1] == "all") {
measures <- ls.overlapping()
}
measures <- match.arg(measures, ls.overlapping(), TRUE)
if (length(summary) == 0) {
summary <- "return"
}
colnames(x) <- make.names(colnames(x), unique=TRUE)
x <- binarize(x)
data <- data.frame(x, class=y)
sapply(measures, function(f) {
measure = eval(call(paste("c", f, sep="."), data=data))
summarization(measure, summary, f %in% ls.overlapping.multiples(), ...)
}, simplify=FALSE)
}
#' @rdname overlapping
#' @export
overlapping.formula <- function(formula, data, measures="all",
summary=c("mean", "sd"), ...) {
if(!inherits(formula, "formula")) {
stop("method is only for formula datas")
}
if(!is.data.frame(data)) {
stop("data argument must be a data.frame")
}
modFrame <- stats::model.frame(formula, data)
attr(modFrame, "terms") <- NULL
overlapping.default(modFrame[, -1, drop=FALSE], modFrame[, 1, drop=FALSE],
measures, summary, ...)
}
ls.overlapping <- function() {
c("F1", "F1v", "F2", "F3", "F4")
}
ls.overlapping.multiples <- function() {
ls.overlapping()
}
branch <- function(data, j) {
data[data$class == j, -ncol(data), drop=FALSE]
}
numerator <- function(j, data) {
tmp <- branch(data, j)
aux <- nrow(tmp) * (colMeans(tmp) -
colMeans(data[,-ncol(data), drop=FALSE]))^2
return(aux)
}
denominator <- function(j, data) {
tmp <- branch(data, j)
aux <- rowSums((t(tmp) - colMeans(tmp))^2)
return(aux)
}
c.F1 <- function(data) {
num <- lapply(levels(data$class), numerator, data)
den <- lapply(levels(data$class), denominator, data)
aux <- rowSums(do.call("cbind", num)) /
rowSums(do.call("cbind", den))
#aux <- max(aux, na.rm=TRUE)
aux <- 1/(aux + 1)
return(aux)
}
dvector <- function(data) {
l <- levels(data$class)
a <- branch(data, l[1])
b <- branch(data, l[2])
c1 <- colMeans(a)
c2 <- colMeans(b)
W <- (nrow(a)/nrow(data)) * stats::cov(a) +
(nrow(b)/nrow(data)) * stats::cov(b)
B <- (c1 - c2) %*% t(c1 - c2)
d <- MASS::ginv(W) %*% (c1 - c2)
aux <- (t(d) %*% B %*% d)/(t(d) %*% W %*% d)
return(aux)
}
c.F1v <- function(data) {
data <- ovo(data)
#aux <- mean(sapply(data, dvector))
aux <- sapply(data, dvector)
aux <- 1/(aux + 1)
return(aux)
}
regionOver <- function(data) {
l <- levels(data$class)
a <- branch(data, l[1])
b <- branch(data, l[2])
maxmax <- rbind(colMax(a), colMax(b))
minmin <- rbind(colMin(a), colMin(b))
over <- colMax(rbind(colMin(maxmax) - colMax(minmin), 0))
rang <- colMax(maxmax) - colMin(minmin)
aux <- prod(over/rang, na.rm=TRUE)
return(aux)
}
c.F2 <- function(data) {
data <- ovo(data)
#aux <- mean(sapply(data, regionOver))
aux <- sapply(data, regionOver)
return(aux)
}
nonOverlap <- function(data) {
l <- levels(data$class)
a <- branch(data, l[1])
b <- branch(data, l[2])
minmax <- colMin(rbind(colMax(a), colMax(b)))
maxmin <- colMax(rbind(colMin(a), colMin(b)))
aux <- do.call("cbind",
lapply(1:(ncol(data)-1), function(i) {
data[,i] < maxmin[i] | data[,i] > minmax[i]
})
)
aux <- data.frame(aux)
rownames(aux) <- rownames(data)
return(aux)
}
c.F3 <- function(data) {
data <- ovo(data)
aux <- mapply(function(d) {
colSums(nonOverlap(d))/nrow(d)
}, d=data)
#aux <- 1 - mean(colMax(aux))
aux <- 1 - colMax(aux)
return(aux)
}
removing <- function(data) {
repeat {
tmp <- nonOverlap(data)
col <- which.max(colSums(tmp))
aux <- rownames(tmp[tmp[,col] != TRUE, , drop=FALSE])
data <- data[aux,- col, drop=FALSE]
if(nrow(data) == 0 | ncol(data) == 1 |
length(unique(data$class)) == 1)
break
}
return(data)
}
c.F4 <- function(data) {
data <- ovo(data)
aux <- mapply(function(d) {
nrow(removing(d))/nrow(d)
}, d=data)
#aux <- mean(aux)
return(aux)
}
"""
stringr_c = STAP(string, "stringr_c")
def my_evaluate(individual):
vetor= []
dataFrame['label'] = individual
robjects.globalenv['dataFrame'] = dataFrame
fmla = Formula('label ~ .')
if("1" in metricasList):
##imbalance
imbalanceVector = stringr_c.balance_formula(fmla, dataFrame, measures="C2", summary="return")
imbalance = imbalanceVector.rx(1)
vetor.append(abs(globalBalance - imbalance[0][0]))
if ("2" in metricasList):
## -- linearity
linearityVector = stringr_c.linearity_formula(fmla, dataFrame, measures="L2", summary="return")
linearity = linearityVector.rx(1)
vetor.append(abs(globalLinear - linearity[0][0]))
if ("3" in metricasList):
## -- neighborhood N2
n2Vector = stringr_c.neighborhood_formula(fmla, dataFrame, measures="N2", summary="return")
n2 = n2Vector.rx(1)
vetor.append(abs(globalN2 - n2[0][0]))
if ("4" in metricasList):
## -- Network ClsCoef
ClsCoefVector = stringr_c.network_formula(fmla, dataFrame, measures="ClsCoef", summary="return")
ClsCoef = ClsCoefVector.rx(1)
vetor.append(abs(globalClsCoef - ClsCoef[0][0]))
if ("5" in metricasList):
## -- Dimensionality T2
t2Vector = stringr_c.dimensionality_formula(fmla, dataFrame, measures="T2", summary="return")
t2 = t2Vector.rx(1)
vetor.append(abs(globalt2 - t2[0]))
if ("6" in metricasList):
## -- Feature-based F1
f1Vector = stringr_c.overlapping_formula(fmla, dataFrame, measures="F1", summary="return")
f1 = f1Vector.rx(1)
vetor.append(abs(globalf1 - f1[0][0]))
## --
if(len(vetor) == 1):
return vetor[0],
if(len(vetor) == 2):
return vetor[0], vetor[1],
elif(len(vetor) == 3):
return vetor[0], vetor[1], vetor[2],
elif(len(vetor) == 4):
return vetor[0], vetor[1], vetor[2], vetor[3],
def print_evaluate(individual):
vetor= []
dataFrame['label'] = individual
robjects.globalenv['dataFrame'] = dataFrame
fmla = Formula('label ~ .')
if("1" in metricasList):
##imbalance
imbalanceVector = stringr_c.balance_formula(fmla, dataFrame, measures="C2", summary="return")
imbalance = imbalanceVector.rx(1)
vetor.append(imbalance[0][0])
if ("2" in metricasList):
## -- linearity
linearityVector = stringr_c.linearity_formula(fmla, dataFrame, measures="L2", summary="return")
linearity = linearityVector.rx(1)
vetor.append(linearity[0][0])
if ("3" in metricasList):
## -- neighborhood N2
n2Vector = stringr_c.neighborhood_formula(fmla, dataFrame, measures="N2", summary="return")
n2 = n2Vector.rx(1)
vetor.append(n2[0][0])
if ("4" in metricasList):
## -- Network ClsCoef
ClsCoefVector = stringr_c.network_formula(fmla, dataFrame, measures="ClsCoef", summary="return")
ClsCoef = ClsCoefVector.rx(1)
vetor.append(ClsCoef[0][0])
if ("5" in metricasList):
## -- Dimensionality T2
t2Vector = stringr_c.dimensionality_formula(fmla, dataFrame, measures="T2", summary="return")
t2 = t2Vector.rx(1)
vetor.append(t2[0])
if ("6" in metricasList):
## -- Feature-based F1
f1Vector = stringr_c.overlapping_formula(fmla, dataFrame, measures="F1", summary="return")
f1 = f1Vector.rx(1)
vetor.append(f1[0][0])
## --
if(len(vetor) == 1):
return vetor[0],
if(len(vetor) == 2):
return vetor[0], vetor[1],
elif(len(vetor) == 3):
return vetor[0], vetor[1], vetor[2],
elif(len(vetor) == 4):
return vetor[0], vetor[1], vetor[2], vetor[3],
creator.create("FitnessMin", base.Fitness, weights=(-1.0,)*NOBJ)
creator.create("Individual", list, fitness=creator.FitnessMin)
RANDINT_LOW = 0
RANDINT_UP = 1
toolbox = base.Toolbox()
toolbox.register("attr_int", random.randint, RANDINT_LOW, RANDINT_UP)
toolbox.register("individual", tools.initRepeat, creator.Individual, toolbox.attr_int, N_ATTRIBUTES)
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
toolbox.register("evaluate", my_evaluate)
toolbox.register("mate", tools.cxTwoPoint)
toolbox.register("mutate", tools.mutShuffleIndexes, indpb=INDPB)
toolbox.register("select", tools.selNSGA3, ref_points=ref_points)
def main(seed=None):
random.seed(64)
pool = multiprocessing.Pool(processes=12)
toolbox.register("map", pool.map)
# Initialize statistics object
stats = tools.Statistics(lambda ind: ind.fitness.values)
stats.register("avg", np.mean, axis=0)
stats.register("std", np.std, axis=0)
stats.register("min", np.min, axis=0)
stats.register("max", np.max, axis=0)
logbook = tools.Logbook()
logbook.header = "gen", "evals", "std", "min", "avg", "max"
pop = toolbox.population(POP)
# Evaluate the individuals with an invalid fitness
invalid_ind = [ind for ind in pop if not ind.fitness.valid]
fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
for ind, fit in zip(invalid_ind, fitnesses):
ind.fitness.values = fit
# Compile statistics about the population
record = stats.compile(pop)
logbook.record(gen=0, evals=len(invalid_ind), **record)
print(logbook.stream)
# Begin the generational process
for gen in range(1, NGEN):
offspring = algorithms.varAnd(pop, toolbox, CXPB, MUTPB)
# Evaluate the individuals with an invalid fitness
invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
for ind, fit in zip(invalid_ind, fitnesses):
ind.fitness.values = fit
# Select the next generation population from parents and offspring
pop = toolbox.select(pop + offspring, POP)
# Compile statistics about the new population
record = stats.compile(pop)
logbook.record(gen=gen, evals=len(invalid_ind), **record)
print(logbook.stream)
return pop, logbook
if __name__ == '__main__':
cont1 = 0
cont0 = 0
#dataFrame = pd.read_csv(str(N_ATTRIBUTES) + '.csv')
#dataFrame = dataFrame.drop('c0', axis=1)
results = main()
print("logbook")
print(results[0][0])
for x in range(len(results[0])):
dic[print_evaluate(results[0][x])] = results[0][x]
outfile = open(filename, 'wb')
pickle.dump(dic, outfile)
outfile.close()
df['label'] = results[0][0]
fig, ax = pyplot.subplots()
grouped = df.groupby('label')
for key, group in grouped:
group.plot(ax=ax, kind='scatter', x='x', y='y', label=key, color=colors[key])
print(X)
print(y)
pyplot.show()
| [
"thiagorfrf@hotmail.com"
] | thiagorfrf@hotmail.com |
b11c4826fc447b05b9d54f499aeaf774c2e0775c | 2783c01da8a212da6ab44e4f39c78f469338a341 | /source/mixedpreconditioners.py | f0996920bc4b332a448f684071bf380de5b73608 | [] | no_license | firedrakeproject/firedrake-helmholtzsolver | 955ac9cd7a7bd06fed51421f374a575877284419 | 81ad7f0966211f6f44a5eefb587e05e5e3f90cbd | refs/heads/master | 2020-04-06T07:01:03.464618 | 2016-08-31T15:57:07 | 2016-08-31T15:57:07 | 19,945,177 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,989 | py | from firedrake import *
from mixedarray import *
from auxilliary.logger import *
from pressuresolver.vertical_normal import *
from pressuresolver.mu_tilde import *
petsc4py.init(sys.argv)
from petsc4py import PETSc
class MixedPreconditioner(object):
'''Schur complement preconditioner for the mixed gravity wave system.
Use the following algorithm to precondition the mixed gravity wave system:
1. Calculate
.. math::
M_p\\tilde{\\vec{R}}_p = M_p\\vec{R}_p
- \\frac{\Delta t}{2}D\\tilde{M}_u^{-1}(M_u\\vec{R}_u)
2. Solve :math:`H\\vec{P}=(M_p\\tilde{\\vec{R}}_p)` for :math:`\\vec{P}` using
the specified pressure solver
3. Calculate
.. math::
\\vec{U} = \\tilde{M}_u^{-1}((M_u\\tilde{\\vec{R}}_u)
+ \\frac{\Delta t}{2}D^T \\vec{P})
Here :math:`\\tilde{M_u} = M_u + \omega_N^2 M_u^{(v)}` is the
modified velocity mass matrix (see :class:`.Mutilde`) and
:math:`H = M_{p} + \omega_c^2 D (\\tilde{M}_u)^{-1} D^T` is the
Helmholtz operator in pressure space. Depending on the value of the
parameter diagonal_only, only the central, block-diagonal matrix is used
and in backward/forward substitution (steps 1. and 3. above) the terms which are
formally of order :math:`\Delta t` are ignored.
:arg mixed_operator: Mixed operator (:class:`.Mutilde`
:arg mutilde: Modfied velocity mass matrix (:class:`.Mutilde`)
:arg Wb: Function space for buoyancy
:arg pressure_solver: Solver in pressure space
:arg diagonal_only: Only use diagonal matrix, ignore forward/backward
substitution with triagular matrices
:arg preassemble: Preassemble the operators for building the modified RHS
and for the back-substitution
'''
def __init__(self,
mixed_operator,
mutilde,
Wb,
pressure_solver,
diagonal_only=False,
tolerance_u=1.E-5,maxiter_u=1000):
self._pressure_solver = pressure_solver
self._mixed_operator = mixed_operator
self._W2 = mixed_operator._W2
self._W3 = mixed_operator._W3
self._Wb = Wb
self._dt_half = mixed_operator._dt_half
self._dt_half_N2 = mixed_operator._dt_half_N2
self._dt_half_c2 = mixed_operator._dt_half_c2
self._diagonal_only = diagonal_only
self._preassemble = mixed_operator._preassemble
self._mesh = self._W3._mesh
self._zhat = VerticalNormal(self._mesh)
self._dx = self._mesh._dx
self._bcs = [DirichletBC(self._W2, 0.0, "bottom"),
DirichletBC(self._W2, 0.0, "top")]
self._utest = TestFunction(self._W2)
self._ptest = TestFunction(self._W3)
self._btest = TestFunction(self._Wb)
self._mutilde = mutilde
# Temporary functions
self._rtilde_u = Function(self._W2)
self._rtilde_p = Function(self._W3)
self._u = Function(self._W2)
self._p = Function(self._W3)
self._tmp_u = Function(self._W2)
self._Pu = Function(self._W2)
self._Pp = Function(self._W3)
self._mixedarray = MixedArray(self._W2,self._W3)
self._tolerance_u = tolerance_u
@timed_function("matrixfree mixed preconditioner")
def solve(self,r_u,r_p,u,p):
'''Preconditioner solve.
Given the RHS r_u and r_p, calculate the fields u and p
:arg r_u: RHS in velocity space
:arg r_p: RHS in pressure space
:arg u: Solution for velocity
:arg p: Solution for pressure
'''
if (self._diagonal_only):
# Pressure solve
p.assign(0.0)
self._pressure_solver.solve(r_p,p)
# Velocity solve
with timed_region('matrixfree pc_hdiv'):
self._mutilde.divide(r_u,u,tolerance=self._tolerance_u)
else:
# Modified RHS for pressure
with timed_region('matrixfree pc_hdiv'):
self._mutilde.divide(r_u,self._tmp_u,tolerance=self._tolerance_u)
with timed_region('matrixfree schur_reconstruct'):
if (self._preassemble):
with self._rtilde_p.dat.vec as v:
with self._tmp_u.dat.vec_ro as x:
self._mixed_operator._mat_pu.mult(x,v)
v *= -1.0
else:
assemble(- self._dt_half_c2 * self._ptest * div(self._tmp_u) * self._dx,
tensor=self._rtilde_p)
self._rtilde_p += r_p
# Pressure solve
p.assign(0.0)
self._pressure_solver.solve(self._rtilde_p,p)
# Backsubstitution for velocity
with timed_region('matrixfree schur_reconstruct'):
if (self._preassemble):
with self._tmp_u.dat.vec as v:
with p.dat.vec_ro as x:
self._mixed_operator._mat_up.mult(x,v)
v *= -1.0
else:
assemble(self._dt_half * div(self._utest) * p*self._dx,
tensor=self._tmp_u)
self._tmp_u += r_u
with timed_region('matrixfree pc_hdiv'):
self._mutilde.divide(self._tmp_u,u,tolerance=self._tolerance_u)
def apply(self,pc,x,y):
'''PETSc interface for preconditioner solve.
PETSc interface wrapper for the :func:`solve` method.
:arg x: PETSc vector representing the mixed right hand side
:arg y: PETSc vector representing the mixed solution vector
'''
with self._u.dat.vec as u, \
self._p.dat.vec as p:
self._mixedarray.split(x,u,p)
self.solve(self._u,self._p,self._Pu,self._Pp)
with self._Pu.dat.vec_ro as u, \
self._Pp.dat.vec_ro as p:
self._mixedarray.combine(y,u,p)
class MixedPreconditionerOrography(object):
'''Schur complement preconditioner for the mixed gravity wave system.
Use the following algorithm to precondition the mixed gravity wave system:
1. Calculate
.. math::
M_u\\tilde{\\vec{R}}_u = M_u\\vec{R}_u+\\frac{\Delta t}{2}QM_b^{-1}(M_b\\vec{R}_b)
M_p\\tilde{\\vec{R}}_p = M_p\\vec{R}_p
- \\frac{\Delta t}{2}D\\tilde{M}_u^{-1}(M_u\\tilde{\\vec{R}}_u)
2. Solve :math:`H\\vec{P}=(M_p\\tilde{\\vec{R}}_p)` for :math:`\\vec{P}` using
the specified pressure solver
3. Calculate
.. math::
\\vec{U} = \\tilde{M}_u^{-1}((M_u\\tilde{\\vec{R}}_u)
+ \\frac{\Delta t}{2}D^T \\vec{P})
\\vec{B} = M_b^{-1}((M_b\\vec{R}_b)-\\frac{\Delta t}{2}N^2 Q^T \\vec{U})
Here :math:`\\tilde{M_u} = M_u + \omega_N^2 Q M_b^{-1} Q^T` is the
modified velocity mass matrix (see :class:`.Mutilde`) and
:math:`H = M_{p} + \omega_c^2 D (\\tilde{M}_u)^{-1} D^T` is the
Helmholtz operator in pressure space. Depending on the value of the
parameter diagonal_only, only the central, block-diagonal matrix is used
and in backward/forward substitution (steps 1. and 3. above) the terms which are
formally of order :math:`\Delta t` are ignored.
:arg mixed_operator: Mixed operator (:class:`.Mutilde`
:arg mutilde: Modfied velocity mass matrix (:class:`.Mutilde`)
:arg Wb: Function space for buoyancy
:arg pressure_solver: Solver in pressure space
:arg diagonal_only: Only use diagonal matrix, ignore forward/backward
substitution with triagular matrices
'''
def __init__(self,
mixed_operator,
mutilde,
Wb,
pressure_solver,
diagonal_only=False,
tolerance_b=1.E-5,maxiter_b=1000,
tolerance_u=1.E-5,maxiter_u=1000):
self._pressure_solver = pressure_solver
self._W2 = mixed_operator._W2
self._W3 = mixed_operator._W3
self._Wb = Wb
self._dt_half = mixed_operator._dt_half
self._dt_half_N2 = mixed_operatpr._dt_half_N2
self._dt_half_c2 = mixed_operator._dt_half_c2
self._diagonal_only = diagonal_only
self._mesh = self._W3._mesh
self._zhat = VerticalNormal(self._mesh)
self._dx = self._mesh._dx
self._bcs = [DirichletBC(self._W2, 0.0, "bottom"),
DirichletBC(self._W2, 0.0, "top")]
self._utest = TestFunction(self._W2)
self._ptest = TestFunction(self._W3)
self._btest = TestFunction(self._Wb)
# Buoyancy mass matrix
self._mutilde = mutilde
# Temporary functions
self._rtilde_u = Function(self._W2)
self._rtilde_p = Function(self._W3)
self._u = Function(self._W2)
self._p = Function(self._W3)
self._b = Function(self._Wb)
self._tmp_u = Function(self._W2)
self._tmp_b = Function(self._Wb)
self._Pu = Function(self._W2)
self._Pp = Function(self._W3)
self._Pb = Function(self._Wb)
self._tolerance_u = tolerance_u
self._mixedarray = MixedArray(self._W2,self._W3,self._Wb)
Mb = assemble(self._btest*TrialFunction(self._Wb)*self._dx)
self._linearsolver_b = LinearSolver(Mb,solver_parameters={'ksp_type':'cg',
'ksp_rtol':tolerance_b,
'ksp_max_it':maxiter_b,
'ksp_monitor':False,
'pc_type':'jacobi'})
@timed_function("matrixfree mixed preconditioner")
def solve(self,r_u,r_p,r_b,u,p,b):
'''Preconditioner solve.
Given the RHS r_u, r_p and r_b, calculate the fields
u,p and b.
:arg r_u: RHS in velocity space
:arg r_p: RHS in pressure space
:arg r_b: RHS in buoyancy space
:arg u: Solution for velocity
:arg p: Solution for pressure
:arg b: Solution for buoyancy
'''
if (self._diagonal_only):
assert (self._matrixfree_prec)
# Pressure solve
p.assign(0.0)
self._pressure_solver.solve(r_p,p)
# Velocity solve
with timed_region('matrixfree pc_hdiv'):
self._mutilde.divide(r_u,u,tolerance=self._tolerance_u)
# Buoyancy solve
self._linearsolver_b.solve(b,r_b)
else:
# Modified RHS for velocity
self._linearsolver_b.solve(self._tmp_b,r_b)
assemble(self._dt_half * dot(self._utest,self._zhat.zhat) \
* self._tmp_b * self._dx,
tensor=self._rtilde_u)
self._rtilde_u += r_u
# Modified RHS for pressure
with timed_region('matrixfree pc_hdiv'):
self._mutilde.divide(self._rtilde_u,self._tmp_u,tolerance=self._tolerance_u)
assemble(- self._dt_half_c2 * self._ptest * div(self._tmp_u) * self._dx,
tensor=self._rtilde_p)
self._rtilde_p += r_p
# Pressure solve
p.assign(0.0)
self._pressure_solver.solve(self._rtilde_p,p)
# Backsubstitution for velocity
assemble(self._dt_half * div(self._utest) * p*self._dx,
tensor=self._tmp_u)
self._tmp_u += self._rtilde_u
with timed_region('matrixfree pc_hdiv'):
self._mutilde.divide(self._tmp_u,u,tolerance=self._tolerance_u)
# Backsubstitution for buoyancy
assemble(- self._dt_half_N2 * self._btest*dot(self._zhat.zhat,u)*self._dx,
tensor=self._tmp_b)
self._tmp_b += r_b
self._linearsolver_b.solve(b,self._tmp_b)
def apply(self,pc,x,y):
'''PETSc interface for preconditioner solve.
PETSc interface wrapper for the :func:`solve` method.
:arg x: PETSc vector representing the mixed right hand side
:arg y: PETSc vector representing the mixed solution vector
'''
with self._u.dat.vec as u, \
self._p.dat.vec as p, \
self._b.dat.vec as b:
self._mixedarray.split(x,u,p,b)
self.solve(self._u,self._p,self._b,
self._Pu,self._Pp,self._Pb)
with self._Pu.dat.vec_ro as u, \
self._Pp.dat.vec_ro as p, \
self._Pb.dat.vec_ro as b:
self._mixedarray.combine(y,u,p,b)
| [
"eike.h.mueller@gmail.com"
] | eike.h.mueller@gmail.com |
230e1bc2cb14bc2ee4547a7639e5d8be1eb4afb9 | 217f2d8f76f2d397da3fbf598005ddec34b5f968 | /python_koans-master/python2/koans/about_exceptions.py~ | 4b6b410b3f514beba395fa883cc0457518f794f3 | [
"MIT"
] | permissive | mindis/python-2 | 82783dd7b149abe4df0df39f59b9197d985dae0f | 736430c7738400ac49fac8744e2d74f13dc35112 | refs/heads/master | 2022-03-30T13:57:11.190633 | 2017-04-19T08:42:48 | 2017-04-19T08:42:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,842 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
class AboutExceptions(Koan):
class MySpecialError(RuntimeError):
pass
def test_exceptions_inherit_from_exception(self):
mro = self.MySpecialError.__mro__
self.assertEqual("RuntimeError", mro[1].__name__)
self.assertEqual("StandardError", mro[2].__name__)
self.assertEqual("Exception", mro[3].__name__)
self.assertEqual("BaseException", mro[4].__name__)
def test_try_clause(self):
result = None
try:
self.fail("Oops")
except StandardError as ex:
result = 'exception handled'
self.assertEqual('exception handled', result)
self.assertEqual(True, isinstance(ex, StandardError))
self.assertEqual(False, isinstance(ex, RuntimeError))
self.assertTrue(issubclass(RuntimeError, StandardError), \
"RuntimeError is a subclass of StandardError")
self.assertEqual("Oops", ex[0])
def test_raising_a_specific_error(self):
result = None
try:
raise self.MySpecialError, "My Message"
except self.MySpecialError as ex:
result = 'exception handled'
self.assertEqual('exception handled', result)
self.assertEqual("My Message", ex[0])
def test_else_clause(self):
result = None
try:
pass
except RuntimeError:
result = 'it broke'
pass
else:
result = 'no damage done'
self.assertEqual('no damage done', result)
def test_finally_clause(self):
result = None
try:
self.fail("Oops")
except:
# no code here
pass
finally:
result = 'always run'
self.assertEqual(__, result)
| [
"manuel.rombach@blue-yonder.com"
] | manuel.rombach@blue-yonder.com | |
b24883e7dee78e5b77eced916d2902dccf5391a9 | 9d1a547c92decc2860314033812ea1679f0eb7ba | /for/five_times.py | 197d157a5ddee18bf2859403caf1488d01926738 | [
"Unlicense"
] | permissive | pythoninthegrass/automate_boring_stuff | acf6d04c616ee3505956edb0a72ba71686520926 | dee26d1fe0b41ea22a4d6bd5775a4f320f1c5de2 | refs/heads/master | 2023-08-30T06:39:57.216985 | 2023-07-28T22:08:38 | 2023-07-28T22:08:38 | 244,089,699 | 2 | 1 | Unlicense | 2023-09-09T15:41:29 | 2020-03-01T04:59:15 | Python | UTF-8 | Python | false | false | 556 | py | #!/usr/bin/env python3
# 5x for loop
# print('My name is ')
# for i in range(5):
# print("Jimmy Five Times " + str(i))
# Gauss' for loop
# total = 0
# for num in range(101):
# total = total + num
# print(total)
# 5x while loop
# print('My name is ')
# i = 0
# while i < 5:
# print("Jimmy Five Times " + str(i))
# i = i + 1
# 5x for loop w/range(start, stop, step)
print('My name is ')
# for i in range(0, 10, 2): # 2, 4, 6, 8
for i in range(5, -1, -1): # 5, 4, 3, 2, 1, 0
print("Jimmy Five Times " + str(i)) | [
"lance.stephens@okstate.edu"
] | lance.stephens@okstate.edu |
244ea4450132751600755a1fc2a6a64e1b52a2d9 | 423d946229d4e794d88f2a7bf964f62cfeadaab9 | /Perceptron-LR-Clustering/plot_db.py | 1929d346f75c48692f3bc314954c51ca210c3ab3 | [] | no_license | itsmenick212/AI | 78ad1cb8083dec4af50e3013da5fa18931a1018c | de2120c96f94716a5db1fa906fd8925d882d1400 | refs/heads/master | 2022-11-26T01:39:17.389069 | 2020-07-26T01:19:57 | 2020-07-26T01:19:57 | 272,541,413 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,059 | py | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import sys
from matplotlib import cm
import matplotlib.lines as mlines
from mpl_toolkits.mplot3d import Axes3D
"""
Author: Kelsey D'Souza
This file contains two functions for visualizing 2-feature labeled datasets.
Its purpose is to give you ideas on how to vizualize your data and use pandas
and matplotlib, feel free to snippets of any code in here or import the file
into your programs.
This file *does not* need to be included in your submission unless imported.
visualize_scatter
Assumes binary labels and creates a line between the data using the given
feature and bias weights.
Note: weights should be input as [w1, w2, bias]
visualize_3d
Plots data points in 3D space using feat1 x feat2 on the x-y base, and
label as the data point height along the z-axis.
It then creates a 3D surface plot of the continuous label model using
the given linear regressor weights.
"""
def visualize_scatter(df, feat1=0, feat2=1, labels=2, weights=[-1, -1, 1],
title=''):
"""
Scatter plot feat1 vs feat2.
Assumes +/- binary labels.
Plots first and second columns by default.
Args:
- df: dataframe with feat1, feat2, and labels
- feat1: column name of first feature
- feat2: column name of second feature
- labels: column name of labels
- weights: [w1, w2, b]
"""
# Draw color-coded scatter plot
colors = pd.Series(['r' if label > 0 else 'b' for label in df[labels]])
ax = df.plot(x=feat1, y=feat2, kind='scatter', c=colors)
# Get scatter plot boundaries to define line boundaries
xmin, xmax = ax.get_xlim()
# Compute and draw line. ax + by + c = 0 => y = -a/b*x - c/b
a = weights[0]
b = weights[1]
c = weights[2]
def y(x):
return (-a/b)*x - c/b
line_start = (xmin, xmax)
line_end = (y(xmin), y(xmax))
line = mlines.Line2D(line_start, line_end, color='red')
ax.add_line(line)
if title == '':
title = 'Scatter of feature %s vs %s' %(str(feat1), str(feat2))
ax.set_title(title)
plt.show()
def visualize_3d(df, lin_reg_weights=[1,1,1], feat1=0, feat2=1, labels=2,
xlim=(-1, 1), ylim=(-1, 1), zlim=(0, 3),
alpha=0., xlabel='age', ylabel='weight', zlabel='height',
title=''):
"""
3D surface plot.
Main args:
- df: dataframe with feat1, feat2, and labels
- feat1: int/string column name of first feature
- feat2: int/string column name of second feature
- labels: int/string column name of labels
- lin_reg_weights: [b_0, b_1 , b_2] list of float weights in order
Optional args:
- x,y,zlim: axes boundaries. Default to -1 to 1 normalized feature values.
- alpha: step size of this model, for title only
- x,y,z labels: for display only
- title: title of plot
"""
# Setup 3D figure
ax = plt.figure().gca(projection='3d')
plt.hold(True)
# Add scatter plot
ax.scatter(df[feat1], df[feat2], df[labels])
# Set axes spacings for age, weight, height
axes1 = np.arange(xlim[0], xlim[1], step=.05) # age
axes2 = np.arange(xlim[0], ylim[1], step=.05) # weight
axes1, axes2 = np.meshgrid(axes1, axes2)
axes3 = np.array( [lin_reg_weights[0] +
lin_reg_weights[1]*f1 +
lin_reg_weights[2]*f2 # height
for f1, f2 in zip(axes1, axes2)] )
plane = ax.plot_surface(axes1, axes2, axes3, cmap=cm.Spectral,
antialiased=False, rstride=1, cstride=1)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_zlabel(zlabel)
ax.set_xlim3d(xlim)
ax.set_ylim3d(ylim)
ax.set_zlim3d(zlim)
if title == '':
title = 'LinReg Height with Alpha %f' % alpha
ax.set_title(title)
plt.show()
if __name__ == "__main__":
#======== INPUT1.CSV =======#
print("Visualizing input1.csv")
# Import input1.csv, without headers for easier indexing
data = pd.read_csv('input1.csv', header=None)
# Note, these weights just happen to look good.
visualize_scatter(data, weights=[-4.8, -2.1, 38.0])
# ======== SAMPLE PLOTS =======#
print('Generating default sample plots.')
# Example random data
data = {'feat1': np.random.uniform(-1, 1, 50),
'feat2': np.random.uniform(-1, 1, 50),
'labels': np.random.rand(50) > 0.5}
df = pd.DataFrame(data)
# Sample scatter plot
visualize_scatter(df, feat1='feat1', feat2='feat2', labels='labels')
# Sample meshgrid using arbitrary linreg weights
col_names = list(df)
bias = np.random.rand() * 0.1
w1 = np.random.rand()
w2 = np.random.rand()
lin_reg_weights = [bias, w1, w2]
visualize_3d(df, lin_reg_weights=lin_reg_weights,
feat1='feat1', feat2='feat2', labels='labels',
xlabel=col_names[0], ylabel=col_names[1], zlabel=col_names[2])
| [
"noreply@github.com"
] | noreply@github.com |
a950c831386ddc3bd2049db23e626695c198bb3a | 6b5572557c4a0785c4b727ee024790ec066ad6f2 | /Baekjoon/동적계획법 1/1, 2, 3 더하기.py | c018911cffa41fdf3bdb10a60894fd54e9209e12 | [] | no_license | easternpillar/AlgorithmTraining | 5be38998dc062d1d02933f61eaca3265e1b73981 | c8f05eda86161a7dbacab99154be1af292e7db8a | refs/heads/master | 2023-04-29T11:13:34.984005 | 2023-04-08T07:12:29 | 2023-04-08T07:12:29 | 231,875,419 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 419 | py | # Problem:
# Reference: https://www.acmicpc.net/problem/9095
# My Solution:
import sys
dp = [0 for _ in range(11)]
dp[0] = 1
for i in range(1, 4, 1):
for j in range(1,11):
tot = 0
for k in range(j - i, j, 1):
if k >= 0:
tot += dp[k]
dp[j] = tot
for _ in range(int(sys.stdin.readline().rstrip())):
num = int(sys.stdin.readline().rstrip())
print(dp[num])
| [
"roh941129@gmail.com"
] | roh941129@gmail.com |
6618204ab3ed59672d5fa15dc237188c064c4842 | b889ffee955441597dbe42fd0a15e043966ce8be | /test/SigmasFit.py | b4afc85dcfa62fe0105c776dcaf897721c40dd48 | [] | no_license | tbluntsc/KFitVHtobbbar | 5b4b6e86d06935c2faa206145d06a50e3e3c43ec | 763ca0b215f540fa7fdb520d2ee7d9f9ec8c223f | refs/heads/master | 2020-12-25T17:00:35.683494 | 2016-08-04T11:11:13 | 2016-08-04T11:11:13 | 53,402,509 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,650 | py | import numpy as np
import ROOT
import re
pow = ROOT.TMath.Power
sqrt = ROOT.TMath.Sqrt
RootFile = ROOT.TFile('Egen_to_Ereco_fits.root')
SigmasFit = ROOT.TFile('Sigmas_Fits.root', 'RECREATE')
#Some shenanigans to extract the number of Eta regions and the number of fits in each region from the RootFile
ListOfKeys = RootFile.GetListOfKeys()
first_key = ListOfKeys[0]
name_of_first_key = first_key.GetName()
first_directory = RootFile.GetDirectory(name_of_first_key)
first_directory.cd()
Keys_in_first_Directory = first_directory.GetListOfKeys()
first_key_in_eta_dir = Keys_in_first_Directory[0]
name_of_first_key_in_eta_dir = first_key_in_eta_dir.GetName()
first_directory_in_eta_dir = first_directory.GetDirectory(name_of_first_key_in_eta_dir)
Keys_in_first_eta_dir = first_directory_in_eta_dir.GetListOfKeys()
no_fits = Keys_in_first_eta_dir.GetEntries()
no_regions = ListOfKeys.GetEntries()
print "no_fits: ", no_fits
print "no_regions: ", no_regions
#Extract the actual Eta region boundaries from the titles of the directories of the RootFile to save them later
regions = []
for i in xrange(no_regions):
current_key = ListOfKeys[i]
title_of_current_key = current_key.GetTitle()
beg_1 = title_of_current_key.find("between") + 8
end_1 = title_of_current_key.find(" and")
beg_2 = title_of_current_key.find("and ") + 4
interval_width = title_of_current_key[beg_2+22:beg_2+25]
interval_width = float(interval_width)
current_region = []
current_region.append(np.float32(title_of_current_key[beg_1:end_1]))
current_region.append(np.float32(title_of_current_key[beg_2:beg_2+3]))
regions.append(current_region)
#Initialize Dictionary in which the parameters will be saved in the end
result ={}
for i in xrange(no_regions):
current_eta_dir = RootFile.GetDirectory("eta"+str(i))
for flavour in [0,5]:
current_eta_dir.cd("Flavour"+str(flavour))
Test = ROOT.TH1F("Sigmas_"+str(i)+"_"+str(flavour), "Fitted Sigma values for |Eta| in ["+str(regions[i][0])+","+str(regions[i][1])+"]" +" and HadronFlavour == "+str(flavour) , no_fits, 2.0*interval_width , (no_fits+2)*(interval_width))
Mean = ROOT.TH1F("Mean_"+str(i)+"_"+str(flavour), "Fitted Mean values for |Eta| in "+str(regions[i][0])+","+str(regions[i][1])+"]" +" and HadronFlavour == "+str(flavour), no_fits, 2.0*interval_width, (no_fits+2)*(interval_width))
Test.SetMarkerSize(3)
Test.SetMarkerStyle(5)
Mean.SetMarkerSize(3)
Mean.SetMarkerStyle(5)
for fit in xrange(no_fits):
#Load Histogram h_fit and from it load the Gaussian function fitted to it
Histo = ROOT.gDirectory.Get("h_"+str(fit))
myfunc = Histo.GetFunction("gaus")
#Parameter 1 and 2 correspond to the position and the width of the Gaussian fit
current_sigma = myfunc.GetParameter(2)
current_energy = myfunc.GetParameter(1)
#Find bin with value of position of the Gaussian and write Gaussian width to it
pos = ((fit+2)*interval_width + (fit+3)*interval_width)/2.0
bin_number = Test.FindBin(pos)
bin_number_mean = Mean.FindBin(pos)
Test.SetBinContent(bin_number, current_sigma)
Mean.SetBinContent(bin_number_mean, current_energy)
#This Function is not known if defined at the very start of the macro. No idea why.
print "Sigmas fit :"
print " "
sigma_func = ROOT.TF1("sigma_func", "sqrt( [0]*[0]*x + [1]*[1] )",20,200)
params = Test.Fit("sigma_func", "S")
print " "
print ""
print "Mean fit :"
print " "
mean_func = ROOT.TF1("mean_func", "[0]*x + [1]", 0, 200)
params_mean = Mean.Fit("mean_func", "S")
print " "
Test.SetOption("P")
Test.Draw()
Mean.SetOption("P")
Mean.Draw()
SigmasFit.cd()
Test.Write()
Mean.Write()
# result["eta"+str(i),flavour] = [[params.Value(0), params.Value(1), params.Value(2)],regions[i], flavour]
#Test.SetMarkerColor(2)
#Test.SetMarkerSize(1)
#Test.SetMarkerStyle(5)
#Test.Draw("P")
#print "Hey man check out the eta0 results :"
#print result["eta0", 0]
#print result["eta0", 5]
###print " "
#print " "
#print "Also eta1 is a special kind of nice :"
#print result["eta1", 0]
#print result["eta1", 5]
#print " "
#print " "
#print "Have you seen eta2 though? Whew :"
#print result["eta2", 0]
#print result["eta2", 5]
##print " "
#print " "
#print "At eta3 we have some real gems as well :"
#print result["eta3", 0]
#print result["eta3", 5]
RootFile.Close()
| [
"tizian.bluntschli@cern.ch"
] | tizian.bluntschli@cern.ch |
5679d6f26c4dedbd94c475dc4fc6c92e85fe8069 | 9f96dffeefb1c36f20218c351aed625e822bda0d | /utils_imports.py | 936d44919d6798dbae29e7ffacee65e798bff3e8 | [
"MIT"
] | permissive | shgidi/Mask_RCNN | 9020e652f6b7c87b390f11a81ebff5b3d2c09ee0 | dddcffe4fa40b92d811cf8bf974ed767e2fac464 | refs/heads/master | 2020-03-26T21:19:14.541782 | 2018-08-20T07:44:05 | 2018-08-20T07:44:05 | 145,380,282 | 0 | 0 | null | 2018-08-20T07:11:37 | 2018-08-20T07:11:37 | null | UTF-8 | Python | false | false | 1,100 | py | from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from pycocotools import mask as maskUtils
import os
import sys
#from utils import * # phenmoics utils
from imgaug import augmenters as iaa
ROOT_DIR = os.path.abspath("/home/gidish/cloned_libs/Mask_RCNN/") # add here mask RCNN path
sys.path.append(ROOT_DIR) # To find local version of the library
from datetime import datetime
import glob
from sklearn.model_selection import train_test_split
import skimage
import itertools
import math
import logging
import json
import re
import random
from collections import OrderedDict
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib.lines as lines
from matplotlib.patches import Polygon
import imgaug
import PIL
import pandas as pd
# Root directory of the project
# Import Mask RCNN
from mrcnn import utils
from mrcnn import visualize
from mrcnn.visualize import display_images
import mrcnn.model as modellib
from mrcnn.model import log
from samples.coco import coco
from samples.balloon import balloon | [
"gidish@rahan.phenomics.local"
] | gidish@rahan.phenomics.local |
f3880688646bb673bcba7cf3d0fb9ebc68a4b56b | c4339db3470e08d1ff326ac596d3be002ae71b2e | /06_config_hostname.py | 83aaa04704d086517851acd2e2144da8a3d4fbc7 | [] | no_license | kiranrj07/python_netconf_csr1000v16.6 | 1734e10f4d928c04c93ce29c300e83ade5255023 | d0f3a79f420354ae347636344269da16294e7e99 | refs/heads/main | 2023-05-24T03:59:50.943627 | 2021-06-03T17:26:38 | 2021-06-03T17:26:38 | 373,589,095 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 589 | py | from ncclient import manager
router = {
'ip': '10.1.1.44',
'port': '830',
'username': 'admin',
'password': 'root@123'
}
m = manager.connect(host=router['ip'], port=router['port'], username=router['username'],
password=router['password'], device_params={'name':'iosxr'}, hostkey_verify=False)
CONFIGURATION = """
<config>
<native
xmlns='http://cisco.com/ns/yang/Cisco-IOS-XE-native'>
<hostname>config_by_python</hostname>
</native>
</config> """
data= m.edit_config(CONFIGURATION, target = 'running')
print(data)
m.close_session() | [
"kiranrj07@gmail.com"
] | kiranrj07@gmail.com |
88c6758835cc8bcd665daa8338aca8911d49bb52 | a5952a3de725e391783c762ec99caab28679ba98 | /django/toolkit/utils.py | 4d3ff1c0f6243637c0aad27de4f467c0a2d9c2d2 | [] | no_license | xiemx/django-vue-admin | 77f19ba9456ef35394a0b84cc2085f11fff7b3f0 | b506c711a9e916d99f0d3d00973d29d4480f747d | refs/heads/master | 2023-03-31T07:41:10.160635 | 2021-04-08T03:08:03 | 2021-04-08T04:34:18 | 355,748,057 | 10 | 2 | null | null | null | null | UTF-8 | Python | false | false | 533 | py | import requests
import re
import time
import os
from django.conf import settings
import gitlab
access_token = settings.GITLAB_ACCESS_KEY
gitlab_server = settings.GITLAB_SERVER
def trigger_pipeline(project_id, trigger_token, ref, **kwargs):
'''
kwargs variables key-valued must strings.
'''
gl = gitlab.Gitlab(gitlab_server, private_token=access_token)
project = gl.projects.get(project_id)
pipeline = project.trigger_pipeline(
ref=ref, token=trigger_token, variables=kwargs)
return pipeline
| [
"mingxu.xie@leetcode"
] | mingxu.xie@leetcode |
3c2c49678f06026dbfaa445afe0e4a8220dcbb8b | 1f65e7723f1b394cab45005dffff878b9fb43e82 | /CP1404/prac_05/word_occurrences.py | d7bc8ec6ea3c8c85e5f3a091a0b1534c381b981e | [] | no_license | BraydanNewman/JCU_Uni | d76938e7984714bd6a0a1c31a8c4307cf3a3eecd | e9543b8fa661edf435745aa80f44e4c84810deb9 | refs/heads/master | 2023-08-29T20:41:16.388214 | 2021-11-03T05:18:46 | 2021-11-03T05:18:46 | 342,503,981 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 312 | py | from operator import itemgetter
text = input("Text: ").split()
words = {}
for word in text:
if word in words:
words[word] += 1
else:
words[word] = 1
max_length = max([len(str(word)) for word in words.keys()])
for i in sorted(words.items()):
print(f"{i[0]:{max_length}}: {i[1]}")
| [
"newmanbraydan@gmail.com"
] | newmanbraydan@gmail.com |
a85106bd1af406133aadf6cc7284e4a385429299 | 48d123a5d0bbed13d02d94cafaa5f0b4555d8f9b | /Hogwarts_10/testcase/test_fixtures.py | b08fe30e098bc9affea51e935c37677460b4b53e | [] | no_license | beitou/api_Hogwarts | 22394b09bf012a2e3fb3f8f74cc90ecb7d50ff4f | eb06ff406ff59b7756cb98e18a41d8dce34581c3 | refs/heads/master | 2023-05-06T15:16:28.793258 | 2021-06-04T10:25:09 | 2021-06-04T10:25:09 | 373,791,375 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 195 | py | import logging
def test_1(topics):
logging.info("start")
assert len(topics["topics"]) == 2
logging.info("end")
def test_2(topics):
assert topics["topics"][0]["deleted"] == False | [
"wanwanling@xiaobangtouzi.com"
] | wanwanling@xiaobangtouzi.com |
e12d6762e76b184388535c634d9a135c76ad728f | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02400/s312694229.py | 0e3f00ee5086355e387498233f562d240a9c9fa5 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 168 | py | import sys
import math
def main():
r = float(sys.stdin.readline())
print(math.pi * r**2, 2 * math.pi * r)
return
if __name__ == '__main__':
main()
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
378d007413661af3587557a24731e15a33cfe2aa | 334fc2752627476cabe53ffe3b31347b6a6ecac5 | /7-July.py | b15e9bc235a5b064df56cac3815a6be1a91f7607 | [] | no_license | cabudies/Python-Batch-3-July-2018 | 79dc54e894063c608a1e57fffaf3e99f6688d831 | f5911305f99146dc1abf1e5519c726c08b02b0c5 | refs/heads/master | 2021-07-14T01:06:24.815069 | 2020-05-19T11:28:06 | 2020-05-19T11:28:06 | 139,531,349 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 547 | py |
class Student:
name = ""
college = ""
fees = 0
def __init__(self, name = "", college = "", fees = 0):
self.name = name
self.college = college
self.fees = fees
list=[]
obj = Student("Gurjas", "DIT", 8000)
obj1 = Student("Kunal", "DIT", 8000)
obj2 = Student("Piyush", "DIT", 8000)
list.append(obj)
list.append(obj1)
list.append(obj2)
for i in list:
print("Student Name is: ", i.name, ", Student College is: ", i.college, ", Student Fees is: ", i.fees)
| [
"noreply@github.com"
] | noreply@github.com |
878cf7f32086ca4bc728eb982d47e874594df573 | 29439d7172868ccc5f39ecf9e56da0aff35e3e65 | /chapter 7/chapter-7.py | 84bf53fc5f1fb5ea5889b7f39e5b7926721f69af | [] | no_license | WeijiaMa123/homework | 012fc59d93e6b6d8882149e0efde1e3cd75ef2d9 | 129b1eea7e6e61a4f87669b26a6ed54a1c720a0b | refs/heads/master | 2021-09-13T20:35:28.173394 | 2018-05-04T02:45:24 | 2018-05-04T02:45:24 | 112,765,227 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,221 | py | import sys
import turtle
wn=turtle.Screen()
frank=turtle.Turtle()
drunk=turtle.Turtle()
def test(did_pass):
""" Print the result of a test. """
linenum = sys._getframe(1).f_lineno # Get the caller's line number.
if did_pass:
msg = "Test at line {0} ok.".format(linenum)
else:
msg = ("Test at line {0} FAILED.".format(linenum))
print(msg)
mylist=[1,7,2,4,5,6,7,9,32,54,-88,-7]
mylist2=["hello","babab","sam","frank","newbe","conming","xbnhk"]
listnosam=["frank","wison","jaxsin","aronson"]
def count_odd(lst):
odd=0
for i in lst:
if i%2 == 1:
odd += 1
return odd
def sum_even(llst):
sumeven=0
for i in llst:
if i%2==0:
sumeven += i
return sumeven
def sum_negative(lllst):
negasum=0
for i in lllst:
if i < 0:
negasum += i
return negasum
def countword5(list):
numword5=0
for i in list:
if len(i)==5:
numword5 += 1
return numword5
def sum_to_even(list):
mysum=0
for i in list:
if i%2 == 0:
break
mysum += i
return mysum
def before_sam(lst):
count=0
for i in lst:
if i == "sam":
count += 1
break
count += 1
return count
def sqrt(n):
approx = n/3
while True:
better = (approx + n/approx)/2
print(better)
if abs(approx-better)<0.001:
return better
approx = better
def is_prime(n):
for i in range(2,n):
if n % i == 0:
return False
return True
def test_suite():
test(count_odd(mylist)==6)
test(sum_even(mylist)==10)
test(sum_negative(mylist)==-95)
test(countword5(mylist2)==5)
test(sum_to_even(mylist)==8)
test(before_sam(mylist2)==3)
test(before_sam(listnosam)==4)
test_suite()
path=[(160, 20), (-43, 10), (270, 8), (-43, 12)]
for (angle,dist) in path:
drunk.right(angle)
drunk.forward(dist)
house=[(45,141.4),(90,70.7),(90,70.7),(90,141.4),(135,100),(90,100),(90,100),(90,100)]
for(angle,dist)in house:
frank.left(angle)
frank.forward(dist)
| [
"31965430+WeijiaMa123@users.noreply.github.com"
] | 31965430+WeijiaMa123@users.noreply.github.com |
47446414bf2daf8ca2dd4bc3ed55e61e9b852640 | afb57c99da7b47f124a4347f1387b82f037e7877 | /blog/models.py | a7307779526b57585295b01388d622eb42974d75 | [] | no_license | Gusein021/my-first-blog | f8d04d0be4e527d219445cc511991502ec86fb0a | 37979201b276bf2712d6593b86e757050e9ac121 | refs/heads/master | 2020-08-11T00:12:18.903092 | 2019-10-13T03:50:34 | 2019-10-13T03:50:34 | 214,450,112 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 512 | py | from django.conf import settings
from django.db import models
from django.utils import timezone
class Post(models.Model):
author = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete = models.CASCADE)
title = models.CharField(max_length = 200)
text = models.TextField()
create_date = models.DateTimeField(default=timezone.now)
published_date = models.DateTimeField(blank=True, null=True)
def publish(self):
self.published_date = timezone.now()
seld.save()
def __str__(self):
return self.title | [
"gusein019@.ru"
] | gusein019@.ru |
396734b6a5d74f2e1766822748382f74366d2960 | b7c7f9ffec4bab2e8fe9cb65ffd46ca8189f0e5d | /test_heliport.py | c154ffc395f10597f87f03dfeb185358f47481b6 | [
"MIT"
] | permissive | bcorfman/heliport | cf802677febe654204adbaa2f176473dd307c771 | d725cba4185a37e040731c87a6a5469cbe7d1766 | refs/heads/master | 2022-10-07T13:12:08.708809 | 2020-06-05T17:30:24 | 2020-06-05T17:30:24 | 268,340,628 | 0 | 0 | MIT | 2020-06-05T17:38:06 | 2020-05-31T18:34:36 | Python | UTF-8 | Python | false | false | 3,097 | py | from heliport import RoofSegment, Point, Circle, Rect, Heliport
from heliport import roof_outline, update_location, read_file, point_in_circle, candidate_edges, \
find_largest_radius_inside, find_helipad_radius
class TestHeliport:
"""def test_sample_output(self):
results = ''
for i, item in enumerate([1.0, 10.0]):
if i > 0:
results += '\n'
results += f'Case Number {i+1} radius is: {item:.2f}\n'
assert self.heliport.get_output() == results"""
def test_read_file(self):
lines = ['0',
'10 R 10 U 10 L 10 U 10 R 5 U 30 L 20 D 20 R 5 D',
'10',
'2 R 2 U 2 L 2 D',
'4']
assert read_file('heliport.in') == lines
def test_roof_outline(self):
line = '2 R 2 U 2 L 2 D'
roof = roof_outline(line)
assert roof.upper_right_segments == [RoofSegment(2, 'R'), RoofSegment(2, 'U')]
assert roof.upper_right_points == [Point(2, 0), Point(2, 2)]
assert roof.bottom_left_segments == [RoofSegment(2, 'L'), RoofSegment(2, 'D')]
assert roof.bottom_left_points == [Point(0, 2), Point(0, 0)]
def test_update_location(self):
assert Point(2, 0) == update_location(Point(0, 0), RoofSegment(2, 'R'))
assert Point(2, 2) == update_location(Point(2, 0), RoofSegment(2, 'U'))
assert Point(0, 2) == update_location(Point(2, 2), RoofSegment(2, 'L'))
assert Point(0, 0) == update_location(Point(0, 2), RoofSegment(2, 'D'))
def test_outside_point_in_circle(self):
point = Point(-8, -7)
circle = Circle(Point(-4, -4), 5)
assert point_in_circle(point, circle) is False
def test_inside_point_in_circle(self):
point = Point(7, -6)
circle = Circle(Point(4, -4), 5)
assert point_in_circle(point, circle) is True
def test_candidate_edges_square_roof(self):
line = '2 R 2 U 2 L 2 D'
edges = candidate_edges(roof_outline(line))
assert edges.top == [2]
assert edges.bottom == [0]
assert edges.left == [0]
assert edges.right == [2]
def test_candidate_edges_poly_roof(self):
line = '10 R 10 U 10 L 10 U 10 R 5 U 30 L 20 D 20 R 5 D'
edges = candidate_edges(roof_outline(line))
assert edges.top == [10, 20, 25]
assert edges.bottom == [0, 5]
assert edges.left == [-20, 0]
assert edges.right == [0, 10]
def test_find_largest_radius_inside(self):
rect = Rect(1, 10, 5, 2)
assert 2.0 == find_largest_radius_inside(rect)
rect = Rect(-10, 4, -5, 2)
assert 1.0 == find_largest_radius_inside(rect)
def test_find_helipad_radius(self):
line = '10 R 10 U 10 L 10 U 10 R 5 U 30 L 20 D 20 R 5 D'
roof = roof_outline(line)
edges = candidate_edges(roof)
edges.top = [25]
edges.left = [-20]
edges.bottom = [5]
assert find_helipad_radius(edges, roof) == 10.0
def test_run_cases(self):
heliport = Heliport()
heliport.run_cases()
| [
"bcorfman@fastmail.fm"
] | bcorfman@fastmail.fm |
f435c4a9a714a7eaf9852e2b4ef35884832466f3 | ba9639736518830fe2c00a9aad7844a9efbf7d24 | /1.4.2_Deep Neural Network Application - Image Classification/imageClassification.py | c5a80ca63d9895f70fce752b4b97207b73532c86 | [] | no_license | ArronHZG/DeepLearningAI | a87340af5a9cbe957bc317818c66c0a72a892108 | a20e81c73cc8396fd5e024cd48483dbb1c3224df | refs/heads/master | 2020-03-27T03:37:40.933321 | 2019-01-17T04:25:38 | 2019-01-17T04:25:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,582 | py | import time
import numpy as np
import h5py
import matplotlib.pyplot as plt
import scipy
from PIL import Image
from scipy import ndimage
from dnn_app_utils_v3 import *
plt.rcParams['figure.figsize'] = (5.0, 4.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
np.random.seed(1)
train_x_orig, train_y, test_x_orig, test_y, classes = load_data()
# Example of a picture
# index = 10
# plt.imshow(train_x_orig[index])
# print ("y = " + str(train_y[0,index]) + ". It's a " + classes[train_y[0,index]].decode("utf-8") + " picture.")
# Explore your dataset
m_train = train_x_orig.shape[0]
num_px = train_x_orig.shape[1]
m_test = test_x_orig.shape[0]
print ("Number of training examples: " + str(m_train))
print ("Number of testing examples: " + str(m_test))
print ("Each image is of size: (" + str(num_px) + ", " + str(num_px) + ", 3)")
print ("train_x_orig shape: " + str(train_x_orig.shape))
print ("train_y shape: " + str(train_y.shape))
print ("test_x_orig shape: " + str(test_x_orig.shape))
print ("test_y shape: " + str(test_y.shape))
# Reshape the training and test examples
train_x_flatten = train_x_orig.reshape(train_x_orig.shape[0], -1).T # The "-1" makes reshape flatten the remaining dimensions
test_x_flatten = test_x_orig.reshape(test_x_orig.shape[0], -1).T
# Standardize data to have feature values between 0 and 1.
train_x = train_x_flatten/255.
test_x = test_x_flatten/255.
print ("train_x's shape: " + str(train_x.shape))
print ("test_x's shape: " + str(test_x.shape))
### CONSTANTS DEFINING THE MODEL ####
n_x = 12288 # num_px * num_px * 3
n_h = 7
n_y = 1
layers_dims = (n_x, n_h, n_y)
# GRADED FUNCTION: initialize_parameters
def initialize_parameters(n_x, n_h, n_y):
"""
Argument:
n_x -- size of the input layer
n_h -- size of the hidden layer
n_y -- size of the output layer
Returns:
parameters -- python dictionary containing your parameters:
W1 -- weight matrix of shape (n_h, n_x)
b1 -- bias vector of shape (n_h, 1)
W2 -- weight matrix of shape (n_y, n_h)
b2 -- bias vector of shape (n_y, 1)
"""
np.random.seed(1)
### START CODE HERE ### (≈ 4 lines of code)
W1 = np.random.randn(n_h, n_x) * 0.01
b1 = np.zeros((n_h, 1))
W2 = np.random.randn(n_y, n_h) * 0.01
b2 = np.zeros((n_y, 1))
### END CODE HERE ###
assert (W1.shape == (n_h, n_x))
assert (b1.shape == (n_h, 1))
assert (W2.shape == (n_y, n_h))
assert (b2.shape == (n_y, 1))
parameters = {"W1": W1,
"b1": b1,
"W2": W2,
"b2": b2}
return parameters
# GRADED FUNCTION: initialize_parameters_deep
def initialize_parameters_deep(layer_dims):
"""
Arguments:
layer_dims -- python array (list) containing the dimensions of each layer in our network
Returns:
parameters -- python dictionary containing your parameters "W1", "b1", ..., "WL", "bL":
Wl -- weight matrix of shape (layer_dims[l], layer_dims[l-1])
bl -- bias vector of shape (layer_dims[l], 1)
"""
np.random.seed(3)
parameters = {}
L = len(layer_dims) # number of layers in the network
for l in range(1, L):
### START CODE HERE ### (≈ 2 lines of code)
parameters["W" + str(l)] = np.random.randn(layer_dims[l], layer_dims[l - 1]) * 0.01
parameters["b" + str(l)] = np.zeros((layer_dims[l], 1))
### END CODE HERE ###
assert (parameters['W' + str(l)].shape == (layer_dims[l], layer_dims[l - 1]))
assert (parameters['b' + str(l)].shape == (layer_dims[l], 1))
return parameters
# GRADED FUNCTION: linear_forward
def linear_forward(A, W, b):
"""
Implement the linear part of a layer's forward propagation.
Arguments:
A -- activations from previous layer (or input data): (size of previous layer, number of examples)
W -- weights matrix: numpy array of shape (size of current layer, size of previous layer)
b -- bias vector, numpy array of shape (size of the current layer, 1)
Returns:
Z -- the input of the activation function, also called pre-activation parameter
cache -- a python dictionary containing "A", "W" and "b" ; stored for computing the backward pass efficiently
"""
### START CODE HERE ### (≈ 1 line of code)
Z = np.dot(W, A) + b
### END CODE HERE ###
assert (Z.shape == (W.shape[0], A.shape[1]))
cache = (A, W, b)
return Z, cache
# GRADED FUNCTION: linear_activation_forward
def linear_activation_forward(A_prev, W, b, activation):
"""
Implement the forward propagation for the LINEAR->ACTIVATION layer
Arguments:
A_prev -- activations from previous layer (or input data): (size of previous layer, number of examples)
W -- weights matrix: numpy array of shape (size of current layer, size of previous layer)
b -- bias vector, numpy array of shape (size of the current layer, 1)
activation -- the activation to be used in this layer, stored as a text string: "sigmoid" or "relu"
Returns:
A -- the output of the activation function, also called the post-activation value
cache -- a python dictionary containing "linear_cache" and "activation_cache";
stored for computing the backward pass efficiently
"""
if activation == "sigmoid":
# Inputs: "A_prev, W, b". Outputs: "A, activation_cache".
### START CODE HERE ### (≈ 2 lines of code)
Z, linear_cache = linear_forward(A_prev, W, b)
A, activation_cache = sigmoid(Z)
### END CODE HERE ###
elif activation == "relu":
# Inputs: "A_prev, W, b". Outputs: "A, activation_cache".
### START CODE HERE ### (≈ 2 lines of code)
Z, linear_cache = linear_forward(A_prev, W, b)
A, activation_cache = relu(Z)
### END CODE HERE ###
assert (A.shape == (W.shape[0], A_prev.shape[1]))
cache = (linear_cache, activation_cache)
return A, cache
# GRADED FUNCTION: L_model_forward
def L_model_forward(X, parameters):
"""
Implement forward propagation for the [LINEAR->RELU]*(L-1)->LINEAR->SIGMOID computation
Arguments:
X -- data, numpy array of shape (input size, number of examples)
parameters -- output of initialize_parameters_deep()
Returns:
AL -- last post-activation value
caches -- list of caches containing:
every cache of linear_activation_forward() (there are L-1 of them, indexed from 0 to L-1)
"""
caches = []
A = X
L = len(parameters) // 2 # number of layers in the neural network
# Implement [LINEAR -> RELU]*(L-1). Add "cache" to the "caches" list.
for l in range(1, L):
A_prev = A
### START CODE HERE ### (≈ 2 lines of code)
A, cache = linear_activation_forward(A, parameters["W" + str(l)], parameters["b" + str(l)], "relu")
caches.append(cache)
### END CODE HERE ###
# Implement LINEAR -> SIGMOID. Add "cache" to the "caches" list.
### START CODE HERE ### (≈ 2 lines of code)
AL, cache = linear_activation_forward(A, parameters["W" + str(L)], parameters["b" + str(L)], "sigmoid")
caches.append(cache)
### END CODE HERE ###
assert (AL.shape == (1, X.shape[1]))
return AL, caches
# GRADED FUNCTION: compute_cost
def compute_cost(AL, Y):
"""
Implement the cost function defined by equation (7).
Arguments:
AL -- probability vector corresponding to your label predictions, shape (1, number of examples)
Y -- true "label" vector (for example: containing 0 if non-cat, 1 if cat), shape (1, number of examples)
Returns:
cost -- cross-entropy cost
"""
m = Y.shape[1]
# Compute loss from aL and y.
### START CODE HERE ### (≈ 1 lines of code)
cost = -1 / m * np.sum(np.multiply(Y, np.log(AL) + np.multiply((1 - Y), np.log(1 - AL))))
### END CODE HERE ###
cost = np.squeeze(cost) # To make sure your cost's shape is what we expect (e.g. this turns [[17]] into 17).
assert (cost.shape == ())
return cost
# GRADED FUNCTION: linear_backward
def linear_backward(dZ, cache):
"""
Implement the linear portion of backward propagation for a single layer (layer l)
Arguments:
dZ -- Gradient of the cost with respect to the linear output (of current layer l)
cache -- tuple of values (A_prev, W, b) coming from the forward propagation in the current layer
Returns:
dA_prev -- Gradient of the cost with respect to the activation (of the previous layer l-1), same shape as A_prev
dW -- Gradient of the cost with respect to W (current layer l), same shape as W
db -- Gradient of the cost with respect to b (current layer l), same shape as b
"""
A_prev, W, b = cache
m = A_prev.shape[1]
### START CODE HERE ### (≈ 3 lines of code)
dW = 1 / m * np.dot(dZ, A_prev.T)
db = 1 / m * np.sum(dZ, axis=1, keepdims=True)
dA_prev = np.dot(W.T, dZ)
### END CODE HERE ###
assert (dA_prev.shape == A_prev.shape)
assert (dW.shape == W.shape)
assert (db.shape == b.shape)
return dA_prev, dW, db
# GRADED FUNCTION: linear_activation_backward
def linear_activation_backward(dA, cache, activation):
"""
Implement the backward propagation for the LINEAR->ACTIVATION layer.
Arguments:
dA -- post-activation gradient for current layer l
cache -- tuple of values (linear_cache, activation_cache) we store for computing backward propagation efficiently
activation -- the activation to be used in this layer, stored as a text string: "sigmoid" or "relu"
Returns:
dA_prev -- Gradient of the cost with respect to the activation (of the previous layer l-1), same shape as A_prev
dW -- Gradient of the cost with respect to W (current layer l), same shape as W
db -- Gradient of the cost with respect to b (current layer l), same shape as b
"""
linear_cache, activation_cache = cache
# linear_cache = (A, W, b) activation_cache = Z = WA+b
if activation == "relu":
### START CODE HERE ### (≈ 2 lines of code)
dZ = relu_backward(dA, activation_cache)
dA_prev, dW, db = linear_backward(dZ, linear_cache)
### END CODE HERE ###
elif activation == "sigmoid":
### START CODE HERE ### (≈ 2 lines of code)
dZ = sigmoid_backward(dA, activation_cache)
dA_prev, dW, db = linear_backward(dZ, linear_cache)
### END CODE HERE ###
return dA_prev, dW, db
# GRADED FUNCTION: L_model_backward
def L_model_backward(AL, Y, caches):
"""
Implement the backward propagation for the [LINEAR->RELU] * (L-1) -> LINEAR -> SIGMOID group
Arguments:
AL -- probability vector, output of the forward propagation (L_model_forward())
Y -- true "label" vector (containing 0 if non-cat, 1 if cat)
caches -- list of caches containing:
every cache of linear_activation_forward() with "relu" (it's caches[l], for l in range(L-1) i.e l = 0...L-2)
the cache of linear_activation_forward() with "sigmoid" (it's caches[L-1])
Returns:
grads -- A dictionary with the gradients
grads["dA" + str(l)] = ...
grads["dW" + str(l)] = ...
grads["db" + str(l)] = ...
"""
grads = {}
L = len(caches) # the number of layers
m = AL.shape[1]
Y = Y.reshape(AL.shape) # after this line, Y is the same shape as AL
# Initializing the backpropagation
### START CODE HERE ### (1 line of code)
dAL = - (np.divide(Y, AL) - np.divide(1 - Y, 1 - AL)) # derivative of cost with respect to AL
### END CODE HERE ###
# Lth layer (SIGMOID -> LINEAR) gradients. Inputs: "dAL, current_cache". Outputs: "grads["dAL-1"], grads["dWL"], grads["dbL"]
### START CODE HERE ### (approx. 2 lines)
current_cache = None
grads["dA" + str(L - 1)], grads["dW" + str(L)], grads["db" + str(L)] = linear_activation_backward(dAL, caches[-1], "sigmoid")
### END CODE HERE ###
# Loop from l=L-2 to l=0
for l in reversed(range(L - 1)):
# lth layer: (RELU -> LINEAR) gradients.
# Inputs: "grads["dA" + str(l + 1)], current_cache". Outputs: "grads["dA" + str(l)] , grads["dW" + str(l + 1)] , grads["db" + str(l + 1)]
### START CODE HERE ### (approx. 5 lines)
current_cache = caches[l]
dA_prev_temp, dW_temp, db_temp = linear_activation_backward(grads["dA" + str(l + 1)], current_cache, "relu")
grads["dA" + str(l)] = dA_prev_temp
grads["dW" + str(l + 1)] = dW_temp
grads["db" + str(l + 1)] = db_temp
### END CODE HERE ###
return grads
# GRADED FUNCTION: update_parameters
def update_parameters(parameters, grads, learning_rate):
"""
Update parameters using gradient descent
Arguments:
parameters -- python dictionary containing your parameters
grads -- python dictionary containing your gradients, output of L_model_backward
Returns:
parameters -- python dictionary containing your updated parameters
parameters["W" + str(l)] = ...
parameters["b" + str(l)] = ...
"""
L = len(parameters) // 2 # number of layers in the neural network
# Update rule for each parameter. Use a for loop.
### START CODE HERE ### (≈ 3 lines of code)
for l in range(L):
parameters["W" + str(l + 1)] -= learning_rate*grads["dW" + str(l + 1)]
parameters["b" + str(l + 1)] -= learning_rate*grads["db" + str(l + 1)]
### END CODE HERE ###
return parameters
# GRADED FUNCTION: two_layer_model
def two_layer_model(X, Y, layers_dims, learning_rate=0.0075, num_iterations=3000, print_cost=False):
"""
Implements a two-layer neural network: LINEAR->RELU->LINEAR->SIGMOID.
Arguments:
X -- input data, of shape (n_x, number of examples)
Y -- true "label" vector (containing 0 if cat, 1 if non-cat), of shape (1, number of examples)
layers_dims -- dimensions of the layers (n_x, n_h, n_y)
num_iterations -- number of iterations of the optimization loop
learning_rate -- learning rate of the gradient descent update rule
print_cost -- If set to True, this will print the cost every 100 iterations
Returns:
parameters -- a dictionary containing W1, W2, b1, and b2
"""
np.random.seed(1)
grads = {}
costs = [] # to keep track of the cost
m = X.shape[1] # number of examples
(n_x, n_h, n_y) = layers_dims
# Initialize parameters dictionary, by calling one of the functions you'd previously implemented
### START CODE HERE ### (≈ 1 line of code)
parameters = initialize_parameters(n_x, n_h, n_y)
### END CODE HERE ###
# Get W1, b1, W2 and b2 from the dictionary parameters.
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
# Loop (gradient descent)
for i in range(0, num_iterations):
# Forward propagation: LINEAR -> RELU -> LINEAR -> SIGMOID. Inputs: "X, W1, b1, W2, b2". Output: "A1, cache1, A2, cache2".
### START CODE HERE ### (≈ 2 lines of code)
A1, cache1 = linear_activation_forward(X, W1, b1, "sigmoid")
A2, cache2 = linear_activation_forward(A1, W2, b2, "sigmoid")
### END CODE HERE ###
# Compute cost
### START CODE HERE ### (≈ 1 line of code)
cost = compute_cost(A2,Y)
### END CODE HERE ###
# Initializing backward propagation
dA2 = - (np.divide(Y, A2) - np.divide(1 - Y, 1 - A2))
# Backward propagation. Inputs: "dA2, cache2, cache1". Outputs: "dA1, dW2, db2; also dA0 (not used), dW1, db1".
### START CODE HERE ### (≈ 2 lines of code)
dA1, dW2, db2 = linear_activation_backward(dA2,cache2,"sigmoid")
dA0, dW1, db1 = linear_activation_backward(dA1,cache1,"sigmoid")
### END CODE HERE ###
# Set grads['dWl'] to dW1, grads['db1'] to db1, grads['dW2'] to dW2, grads['db2'] to db2
grads['dW1'] = dW1
grads['db1'] = db1
grads['dW2'] = dW2
grads['db2'] = db2
# Update parameters.
### START CODE HERE ### (approx. 1 line of code)
parameters = update_parameters(parameters,grads,0.1)
### END CODE HERE ###
# Retrieve W1, b1, W2, b2 from parameters
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
# Print the cost every 100 training example
if print_cost and i % 100 == 0:
print("Cost after iteration {}: {}".format(i, np.squeeze(cost)))
if print_cost and i % 100 == 0:
costs.append(cost)
# plot the cost
plt.plot(np.squeeze(costs))
plt.ylabel('cost')
plt.xlabel('iterations (per tens)')
plt.title("Learning rate =" + str(learning_rate))
plt.show()
plt.savefig("two_layer_model.jpg")
return parameters
# parameters = two_layer_model(train_x, train_y, layers_dims = (n_x, n_h, n_y), num_iterations = 2500, print_cost=True)
#
# predictions_train = predict(train_x, train_y, parameters)
#
# predictions_test = predict(test_x, test_y, parameters)
### CONSTANTS ###
layers_dims = [12288, 20, 7, 5, 1] # 4-layer model
# GRADED FUNCTION: L_layer_model
def L_layer_model(X, Y, layers_dims, learning_rate=0.0075, num_iterations=3000, print_cost=False): # lr was 0.009
"""
Implements a L-layer neural network: [LINEAR->RELU]*(L-1)->LINEAR->SIGMOID.
Arguments:
X -- data, numpy array of shape (number of examples, num_px * num_px * 3)
Y -- true "label" vector (containing 0 if cat, 1 if non-cat), of shape (1, number of examples)
layers_dims -- list containing the input size and each layer size, of length (number of layers + 1).
learning_rate -- learning rate of the gradient descent update rule
num_iterations -- number of iterations of the optimization loop
print_cost -- if True, it prints the cost every 100 steps
Returns:
parameters -- parameters learnt by the model. They can then be used to predict.
"""
np.random.seed(1)
costs = [] # keep track of cost
# Parameters initialization. (≈ 1 line of code)
### START CODE HERE ###
parameters = initialize_parameters_deep(layers_dims)
### END CODE HERE ###
# Loop (gradient descent)
for i in range(0, num_iterations):
# Forward propagation: [LINEAR -> RELU]*(L-1) -> LINEAR -> SIGMOID.
### START CODE HERE ### (≈ 1 line of code)
AL, caches = L_model_forward(X, parameters)
### END CODE HERE ###
# Compute cost.
### START CODE HERE ### (≈ 1 line of code)
cost = compute_cost(AL,Y)
### END CODE HERE ###
# Backward propagation.
### START CODE HERE ### (≈ 1 line of code)
grads =L_model_backward(AL,Y,caches)
### END CODE HERE ###
# Update parameters.
### START CODE HERE ### (≈ 1 line of code)
parameters = update_parameters(parameters,grads,learning_rate)
### END CODE HERE ###
# Print the cost every 100 training example
if print_cost and i % 100 == 0:
print("Cost after iteration %i: %f" % (i, cost))
if print_cost and i % 100 == 0:
costs.append(cost)
# plot the cost
plt.plot(np.squeeze(costs))
plt.ylabel('cost')
plt.xlabel('iterations (per tens)')
plt.title("Learning rate =" + str(learning_rate))
plt.show()
plt.savefig("L_layer_model.jpg")
return parameters
parameters = L_layer_model(train_x, train_y, layers_dims, num_iterations = 2500, print_cost = True)
pred_train = predict(train_x, train_y, parameters)
pred_test = predict(test_x, test_y, parameters)
print_mislabeled_images(classes, test_x, test_y, pred_test)
## START CODE HERE ##
my_image = "my_image.jpg" # change this to the name of your image file
my_label_y = [1] # the true class of your image (1 -> cat, 0 -> non-cat)
## END CODE HERE ##
fname = "images/" + my_image
image = np.array(ndimage.imread(fname, flatten=False))
my_image = scipy.misc.imresize(image, size=(num_px,num_px)).reshape((num_px*num_px*3,1))
my_image = my_image/255.
my_predicted_image = predict(my_image, my_label_y, parameters)
plt.imshow(image)
print ("y = " + str(np.squeeze(my_predicted_image)) + ", your L-layer model predicts a \"" + classes[int(np.squeeze(my_predicted_image)),].decode("utf-8") + "\" picture.") | [
"hou.zg@foxmail,com"
] | hou.zg@foxmail,com |
589ce7906a414367232a91ba522b84143c9a6b57 | 075055508e15a8eed9fad14a06227a2374b9c623 | /02 Kernel-based Learning/Tutorial 04 - Support Vector Machine/generate.py | 2f2c557bf301cb4b2fc94c07afe3bbc8590201b9 | [
"MIT"
] | permissive | KateYeon/Business-Anlaytics | ba150c8440069573fe0a6a2b44cf7c3e74755995 | 454c1cb1b88499e94eeb5e8a7a32309afb7165e5 | refs/heads/master | 2020-03-28T12:23:17.769947 | 2018-11-24T08:14:34 | 2018-11-24T08:14:34 | 148,292,789 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,521 | py |
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 31 13:58:01 2017
@author: Yooyeon
"""
import numpy as np
from sklearn.preprocessing import StandardScaler
# create 2-dimension data (class: y=1,-1)
# 100 samples from normal distribution
# linearly-separable, non-linearly-separable, linearly-separable but overlapping data
def gen_lin_separable_data():
"""Generate linearly seperable data."""
mean1 = np.array([0,2])
mean2 = np.array([2,0])
cov = np.array([[0.8,0.6],[0.6,0.8]])
X1 = np.random.multivariate_normal(mean1, cov, 100)
y1 = np.ones(len(X1))
X2 = np.random.multivariate_normal(mean2, cov, 100)
y2 = np.ones(len(X2))*-1
return X1, y1, X2, y2
def gen_non_lin_separable_data():
"""Generate non-linearly seperable data."""
mean1 = [-1,2]
mean2 = [1,-1]
mean3 = [4,-4]
mean4 = [-4,4]
cov = [[1.0,0.8],[0.8,1.0]]
X1 = np.random.multivariate_normal(mean1, cov, 50)
X1 = np.vstack((X1, np.random.multivariate_normal(mean3,cov,50)))
y1 = np.ones(len(X1))
X2 = np.random.multivariate_normal(mean2, cov, 50)
X2 = np.vstack((X2,np.random.multivariate_normal(mean4,cov,50)))
y2 = np.ones(len(X2))*-1
return X1, y1, X2, y2
def gen_lin_separable_overlap_data():
"""Generate linearly seperable but overlapping data."""
mean1 = np.array([0,2])
mean2 = np.array([2,0])
cov = np.array([[1.5, 1.0],[1.0,1.5]])
X1 = np.random.multivariate_normal(mean1, cov, 100)
y1 = np.ones(len(X1))
X2 = np.random.multivariate_normal(mean2, cov, 100)
y2 = np.ones(len(X2))*-1
return X1, y1, X2, y2
def split_train(X1,y1,X2,y2):
#combines the train and test datas: creates X_train, X_test, y_train, Y_test
scaler = StandardScaler()
X1_train = X1[:90]
y1_train = y1[:90]
X2_train = X2[:90]
y2_train = y2[:90]
X_train = np.vstack((X1_train, X2_train))
y_train = np.hstack((y1_train, y2_train))
X1_test = X1[90:]
y1_test = y1[90:]
X2_test = X2[90:]
y2_test = y2[90:]
X_test = np.vstack((X1_test,X2_test))
y_test = np.hstack((y1_test,y2_test))
scaler.fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
return X_train, y_train, X_test, y_test | [
"noreply@github.com"
] | noreply@github.com |
7f0be96085b8e1560c713760f00c4e4769ee0cd2 | 8ec30955ea43c03e162b7c15867b4f1d470fe086 | /test/conftest.py | 6bc925b6f818ca57ed48268d69c11dc55feb9bb4 | [] | no_license | arina-pro/flask | 217d32300747e9b1639c076dc162efca110888b0 | 254ebe46868f1dc10e498403349e4279273b68c2 | refs/heads/master | 2020-07-10T13:25:59.042562 | 2019-08-26T13:06:44 | 2019-08-26T13:06:44 | 204,272,384 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,076 | py | import os
import tempfile
import pytest
from flasky import create_app
from flasky.db import get_db, init_db
with open(os.path.join(os.path.dirname(__file__), 'data.sql'), 'rb') as f:
_data_sql = f.read().decode('utf8')
@pytest.fixture
def app():
db_fd, db_path = tempfile.mkstemp()
app = create_app({
'TESTING': True,
'DATABASE': db_path,
})
with app.app_context():
init_db()
get_db().executescript(_data_sql)
yield app
os.close(db_fd)
os.unlink(db_path)
@pytest.fixture
def client(app):
return app.test_client()
@pytest.fixture
def runner(app):
return app.test_cli_runner()
class AuthActions(object):
def __init__(self, client):
self._client = client
def login(self, username='test', password='test'):
return self._client.post(
'/auth/login',
data={'username': username, 'password': password}
)
def logout(self):
return self._client.get('/auth/logout')
@pytest.fixture
def auth(client):
return AuthActions(client) | [
"arina-pro@yandex.ru"
] | arina-pro@yandex.ru |
86cee55ac662fda48b77b0862002ac44e18b51ec | 538f5469d1bd817c73efe6cfc8d11d7800ccdced | /model/transformer.py | 341cd3cb881dd13e9f1d393e6464422772e1d33a | [
"MIT"
] | permissive | ruiyiw/VT-summ | b63cb29645d0d4a9a4982aa9b1c85b40913be218 | 897795e307604e724a32ffae175b717ba55b6fa6 | refs/heads/main | 2023-09-06T05:23:22.682840 | 2021-11-16T06:39:08 | 2021-11-16T06:39:08 | 419,081,027 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,981 | py | ### TAKEN FROM https://github.com/kolloldas/torchnlp
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import math
from model.common_layer import EncoderLayer, DecoderLayer, MultiHeadAttention, Conv, PositionwiseFeedForward, LayerNorm , _gen_bias_mask ,_gen_timing_signal, share_embedding, LabelSmoothing, NoamOpt, _get_attn_subsequent_mask, get_input_from_batch, get_output_from_batch, top_k_top_p_filtering
from utils import config
import random
# from numpy import random
import os
import pprint
from tqdm import tqdm
pp = pprint.PrettyPrinter(indent=1)
import os
import time
from copy import deepcopy
from sklearn.metrics import accuracy_score
class Encoder(nn.Module):
"""
A Transformer Encoder module.
Inputs should be in the shape [batch_size, length, hidden_size]
Outputs will have the shape [batch_size, length, hidden_size]
Refer Fig.1 in https://arxiv.org/pdf/1706.03762.pdf
"""
def __init__(self, embedding_size, hidden_size, num_layers, num_heads, total_key_depth, total_value_depth,
filter_size, max_length=1000, input_dropout=0, layer_dropout=0,
attention_dropout=0.1, relu_dropout=0.1, use_mask=False, universal=False):
"""
Parameters:
embedding_size: Size of embeddings
hidden_size: Hidden size
num_layers: Total layers in the Encoder
num_heads: Number of attention heads
total_key_depth: Size of last dimension of keys. Must be divisible by num_head
total_value_depth: Size of last dimension of values. Must be divisible by num_head
output_depth: Size last dimension of the final output
filter_size: Hidden size of the middle layer in FFN
max_length: Max sequence length (required for timing signal)
input_dropout: Dropout just after embedding
layer_dropout: Dropout for each layer
attention_dropout: Dropout probability after attention (Should be non-zero only during training)
relu_dropout: Dropout probability after relu in FFN (Should be non-zero only during training)
use_mask: Set to True to turn on future value masking
"""
super(Encoder, self).__init__()
self.universal = universal
self.num_layers = num_layers
self.timing_signal = _gen_timing_signal(max_length, hidden_size)
if(self.universal):
## for t
self.position_signal = _gen_timing_signal(num_layers, hidden_size)
params =(hidden_size,
total_key_depth or hidden_size,
total_value_depth or hidden_size,
filter_size,
num_heads,
_gen_bias_mask(max_length) if use_mask else None,
layer_dropout,
attention_dropout,
relu_dropout)
self.embedding_proj = nn.Linear(embedding_size, hidden_size, bias=False)
if(self.universal):
self.enc = EncoderLayer(*params)
else:
self.enc = nn.ModuleList([EncoderLayer(*params) for _ in range(num_layers)])
self.layer_norm = LayerNorm(hidden_size)
self.input_dropout = nn.Dropout(input_dropout)
if(config.act):
self.act_fn = ACT_basic(hidden_size)
self.remainders = None
self.n_updates = None
def forward(self, inputs, mask):
#Add input dropout
x = self.input_dropout(inputs)
# Project to hidden size
x = self.embedding_proj(x)
if(self.universal):
if(config.act):
x, (self.remainders, self.n_updates) = self.act_fn(x, inputs, self.enc, self.timing_signal, self.position_signal, self.num_layers)
y = self.layer_norm(x)
else:
for l in range(self.num_layers):
x += self.timing_signal[:, :inputs.shape[1], :].type_as(inputs.data)
x += self.position_signal[:, l, :].unsqueeze(1).repeat(1,inputs.shape[1],1).type_as(inputs.data)
x = self.enc(x, mask=mask)
y = self.layer_norm(x)
else:
# Add timing signal
x += self.timing_signal[:, :inputs.shape[1], :].type_as(inputs.data)
for i in range(self.num_layers):
x = self.enc[i](x, mask)
y = self.layer_norm(x)
return y
class Decoder(nn.Module):
"""
A Transformer Decoder module.
Inputs should be in the shape [batch_size, length, hidden_size]
Outputs will have the shape [batch_size, length, hidden_size]
Refer Fig.1 in https://arxiv.org/pdf/1706.03762.pdf
"""
def __init__(self, embedding_size, hidden_size, num_layers, num_heads, total_key_depth, total_value_depth,
filter_size, max_length=200, input_dropout=0, layer_dropout=0,
attention_dropout=0.1, relu_dropout=0.1, universal=False):
"""
Parameters:
embedding_size: Size of embeddings
hidden_size: Hidden size
num_layers: Total layers in the Encoder
num_heads: Number of attention heads
total_key_depth: Size of last dimension of keys. Must be divisible by num_head
total_value_depth: Size of last dimension of values. Must be divisible by num_head
output_depth: Size last dimension of the final output
filter_size: Hidden size of the middle layer in FFN
max_length: Max sequence length (required for timing signal)
input_dropout: Dropout just after embedding
layer_dropout: Dropout for each layer
attention_dropout: Dropout probability after attention (Should be non-zero only during training)
relu_dropout: Dropout probability after relu in FFN (Should be non-zero only during training)
"""
super(Decoder, self).__init__()
self.universal = universal
self.num_layers = num_layers
self.timing_signal = _gen_timing_signal(max_length, hidden_size)
if(self.universal):
## for t
self.position_signal = _gen_timing_signal(num_layers, hidden_size)
self.mask = _get_attn_subsequent_mask(max_length)
params =(hidden_size,
total_key_depth or hidden_size,
total_value_depth or hidden_size,
filter_size,
num_heads,
_gen_bias_mask(max_length), # mandatory
None,
layer_dropout,
attention_dropout,
relu_dropout)
if(self.universal):
self.dec = DecoderLayer(*params)
else:
self.dec = nn.Sequential(*[DecoderLayer(*params) for l in range(num_layers)])
self.embedding_proj = nn.Linear(embedding_size, hidden_size, bias=False)
self.layer_norm = LayerNorm(hidden_size)
self.input_dropout = nn.Dropout(input_dropout)
def forward(self, inputs, encoder_output, mask):
mask_src, mask_trg = mask
dec_mask = torch.gt(mask_trg + self.mask[:, :mask_trg.size(-1), :mask_trg.size(-1)], 0)
#Add input dropout
x = self.input_dropout(inputs)
x = self.embedding_proj(x)
if(self.universal):
if(config.act):
x, attn_dist, (self.remainders,self.n_updates) = self.act_fn(x, inputs, self.dec, self.timing_signal, self.position_signal, self.num_layers, encoder_output, decoding=True)
y = self.layer_norm(x)
else:
x += self.timing_signal[:, :inputs.shape[1], :].type_as(inputs.data)
for l in range(self.num_layers):
x += self.position_signal[:, l, :].unsqueeze(1).repeat(1,inputs.shape[1],1).type_as(inputs.data)
x, _, attn_dist, _ = self.dec((x, encoder_output, [], (mask_src,dec_mask)))
y = self.layer_norm(x)
else:
# Add timing signal
x += self.timing_signal[:, :inputs.shape[1], :].type_as(inputs.data)
# Run decoder
y, _, attn_dist, _ = self.dec((x, encoder_output, [], (mask_src,dec_mask)))
# Final layer normalization
y = self.layer_norm(y)
return y, attn_dist
class Generator(nn.Module):
"Define standard linear + softmax generation step."
def __init__(self, d_model, vocab):
super(Generator, self).__init__()
self.proj = nn.Linear(d_model, vocab)
self.p_gen_linear = nn.Linear(config.hidden_dim, 1)
def forward(self, x, attn_dist=None, enc_batch_extend_vocab=None, extra_zeros=None, temp=1, beam_search=False, attn_dist_db=None):
if config.pointer_gen:
p_gen = self.p_gen_linear(x)
alpha = torch.sigmoid(p_gen)
logit = self.proj(x)
if(config.pointer_gen):
vocab_dist = F.softmax(logit/temp, dim=2)
vocab_dist_ = alpha * vocab_dist
attn_dist = F.softmax(attn_dist/temp, dim=-1)
attn_dist_ = (1 - alpha) * attn_dist
enc_batch_extend_vocab_ = torch.cat([enc_batch_extend_vocab.unsqueeze(1)]*x.size(1),1) ## extend for all seq
if(beam_search):
enc_batch_extend_vocab_ = torch.cat([enc_batch_extend_vocab_[0].unsqueeze(0)]*x.size(0),0) ## extend for all seq
logit = torch.log(vocab_dist_.scatter_add(2, enc_batch_extend_vocab_, attn_dist_))
return logit
else:
return F.log_softmax(logit,dim=-1)
class Transformer(nn.Module):
def __init__(self, vocab, emo_number, model_file_path=None, is_eval=False, load_optim=False):
super(Transformer, self).__init__()
self.vocab = vocab
self.vocab_size = vocab.n_words
self.embedding = share_embedding(self.vocab,config.pretrain_emb)
self.encoder = Encoder(config.emb_dim, config.hidden_dim, num_layers=config.hop, num_heads=config.heads,
total_key_depth=config.depth, total_value_depth=config.depth,
filter_size=config.filter,universal=config.universal)
self.decoder = Decoder(config.emb_dim, hidden_size = config.hidden_dim, num_layers=config.hop, num_heads=config.heads,
total_key_depth=config.depth,total_value_depth=config.depth,
filter_size=config.filter)
self.generator = Generator(config.hidden_dim, self.vocab_size)
if config.weight_sharing:
# Share the weight matrix between target word embedding & the final logit dense layer
self.generator.proj.weight = self.embedding.lut.weight
self.criterion = nn.NLLLoss(ignore_index=config.PAD_idx)
self.optimizer = torch.optim.Adam(self.parameters(), lr=config.lr)
if(config.noam):
self.optimizer = NoamOpt(config.hidden_dim, 1, 8000, torch.optim.Adam(self.parameters(), lr=0, betas=(0.9, 0.98), eps=1e-9))
if model_file_path is not None:
print("loading weights")
state = torch.load(model_file_path, map_location= lambda storage, location: storage)
self.encoder.load_state_dict(state['encoder_state_dict'])
self.decoder.load_state_dict(state['decoder_state_dict'])
self.generator.load_state_dict(state['generator_dict'])
self.embedding.load_state_dict(state['embedding_dict'])
if (load_optim):
self.optimizer.load_state_dict(state['optimizer'])
self.eval()
self.model_dir = config.save_path
if not os.path.exists(self.model_dir):
os.makedirs(self.model_dir)
self.best_path = ""
def save_model(self, running_avg_ppl, iter, f1_g,f1_b,ent_g,ent_b):
state = {
'iter': iter,
'encoder_state_dict': self.encoder.state_dict(),
'decoder_state_dict': self.decoder.state_dict(),
'generator_dict': self.generator.state_dict(),
'embedding_dict': self.embedding.state_dict(),
'optimizer': self.optimizer.state_dict(),
'current_loss': running_avg_ppl
}
model_save_path = os.path.join(self.model_dir, 'model_{}_{:.4f}_{:.4f}_{:.4f}_{:.4f}_{:.4f}'.format(iter,running_avg_ppl,f1_g,f1_b,ent_g,ent_b) )
self.best_path = model_save_path
torch.save(state, model_save_path)
def train_one_batch(self, batch, iter, train=True):
enc_batch, _, _, enc_batch_extend_vocab, extra_zeros, _, _ = get_input_from_batch(batch)
dec_batch, _, _, _, _ = get_output_from_batch(batch)
if(config.noam):
self.optimizer.optimizer.zero_grad()
else:
self.optimizer.zero_grad()
## Encode
mask_src = enc_batch.data.eq(config.PAD_idx).unsqueeze(1)
meta = self.embedding(batch["program_label"])
emb_mask = self.embedding(batch["input_mask"])
encoder_outputs = self.encoder(self.embedding(enc_batch),mask_src)
# Decode
sos_token = torch.LongTensor([config.SOS_idx] * enc_batch.size(0)).unsqueeze(1)
if config.USE_CUDA: sos_token = sos_token.cuda()
dec_batch_shift = torch.cat((sos_token,dec_batch[:, :-1]),1)
mask_trg = dec_batch_shift.data.eq(config.PAD_idx).unsqueeze(1)
pre_logit, attn_dist = self.decoder(self.embedding(dec_batch_shift)+meta.unsqueeze(1),encoder_outputs, (mask_src,mask_trg))
#+meta.unsqueeze(1)
## compute output dist
logit = self.generator(pre_logit,attn_dist,enc_batch_extend_vocab if config.pointer_gen else None, extra_zeros, attn_dist_db=None)
#logit = F.log_softmax(logit,dim=-1) #fix the name later
## loss: NNL if ptr else Cross entropy
loss = self.criterion(logit.contiguous().view(-1, logit.size(-1)), dec_batch.contiguous().view(-1))
if(train):
loss.backward()
self.optimizer.step()
return loss.item(), math.exp(min(loss.item(), 100)), 0
def compute_act_loss(self,module):
R_t = module.remainders
N_t = module.n_updates
p_t = R_t + N_t
avg_p_t = torch.sum(torch.sum(p_t,dim=1)/p_t.size(1))/p_t.size(0)
loss = config.act_loss_weight * avg_p_t.item()
return loss
def decoder_greedy(self, batch, max_dec_step=50):
enc_batch, _, _, enc_batch_extend_vocab, extra_zeros, _, _ = get_input_from_batch(batch)
mask_src = enc_batch.data.eq(config.PAD_idx).unsqueeze(1)
emb_mask = self.embedding(batch["input_mask"])
meta = self.embedding(batch["program_label"])
encoder_outputs = self.encoder(self.embedding(enc_batch) ,mask_src)
ys = torch.ones(enc_batch.shape[0], 1).fill_(config.SOS_idx).long()
if config.USE_CUDA:
ys = ys.cuda()
# print('=====================ys========================')
# print(ys)
mask_trg = ys.data.eq(config.PAD_idx).unsqueeze(1)
decoded_words = []
for i in range(max_dec_step+1):
out, attn_dist = self.decoder(self.embedding(ys)+meta.unsqueeze(1),encoder_outputs, (mask_src,mask_trg))
prob = self.generator(out,attn_dist,enc_batch_extend_vocab, extra_zeros, attn_dist_db=None)
_, next_word = torch.max(prob[:, -1], dim = 1)
# print('=====================next_word1========================')
# print(next_word)
decoded_words.append(['<EOS>' if ni.item() == config.EOS_idx else self.vocab.index2word[ni.item()] for ni in next_word.view(-1)])
#next_word = next_word.data[0]
# print('=====================next_word2========================')
# print(next_word)
if config.USE_CUDA:
# print('=====================shape========================')
# print(ys.shape, next_word.shape)
ys = torch.cat([ys, next_word.unsqueeze(1)], dim=1)
ys = ys.cuda()
else:
ys = torch.cat([ys, next_word.unsqueeze(1)], dim=1)
mask_trg = ys.data.eq(config.PAD_idx).unsqueeze(1)
# print('=====================new_ys========================')
# print(ys)
sent = []
for _, row in enumerate(np.transpose(decoded_words)):
st = ''
for e in row:
if e == '<EOS>': break
else: st+= e + ' '
sent.append(st)
return sent
def decoder_greedy_po(self, batch, max_dec_step=50):
enc_batch, _, _, enc_batch_extend_vocab, extra_zeros, _, _ = get_input_from_batch(batch)
mask_src = enc_batch.data.eq(config.PAD_idx).unsqueeze(1)
emb_mask = self.embedding(batch["input_mask"])
meta = self.embedding(batch["program_label"])
encoder_outputs = self.encoder(self.embedding(enc_batch),mask_src)
ys = torch.ones(enc_batch.shape[0], 1).fill_(config.SOS_idx).long()
if config.USE_CUDA:
ys = ys.cuda()
# print('=====================ys========================')
# print(ys)
mask_trg = ys.data.eq(config.PAD_idx).unsqueeze(1)
decoded_words = []
for i in range(max_dec_step+1):
out, attn_dist = self.decoder(self.embedding(ys)+meta.unsqueeze(1),encoder_outputs, (mask_src,mask_trg))
prob = self.generator(out,attn_dist,enc_batch_extend_vocab, extra_zeros, attn_dist_db=None)
_, next_word = torch.max(prob[:, -1], dim = 1)
# print('=====================next_word1========================')
# print(next_word)
decoded_words.append(['<EOS>' if ni.item() == config.EOS_idx else self.vocab.index2word[ni.item()] for ni in next_word.view(-1)])
#next_word = next_word.data[0]
# print('=====================next_word2========================')
# print(next_word)
if config.USE_CUDA:
# print('=====================shape========================')
# print(ys.shape, next_word.shape)
ys = torch.cat([ys, next_word.unsqueeze(1)], dim=1)
ys = ys.cuda()
else:
ys = torch.cat([ys, next_word.unsqueeze(1)], dim=1)
mask_trg = ys.data.eq(config.PAD_idx).unsqueeze(1)
# print('=====================new_ys========================')
# print(ys)
sent = []
for _, row in enumerate(np.transpose(decoded_words)):
st = ''
for e in row:
if e == '<EOS>': break
else: st+= e + ' '
sent.append(st)
return sent
### CONVERTED FROM https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/models/research/universal_transformer_util.py#L1062
class ACT_basic(nn.Module):
def __init__(self,hidden_size):
super(ACT_basic, self).__init__()
self.sigma = nn.Sigmoid()
self.p = nn.Linear(hidden_size,1)
self.p.bias.data.fill_(1)
self.threshold = 1 - 0.1
def forward(self, state, inputs, fn, time_enc, pos_enc, max_hop, encoder_output=None, decoding=False):
# init_hdd
## [B, S]
halting_probability = torch.zeros(inputs.shape[0],inputs.shape[1]).cuda()
## [B, S
remainders = torch.zeros(inputs.shape[0],inputs.shape[1]).cuda()
## [B, S]
n_updates = torch.zeros(inputs.shape[0],inputs.shape[1]).cuda()
## [B, S, HDD]
previous_state = torch.zeros_like(inputs).cuda()
step = 0
# for l in range(self.num_layers):
while( ((halting_probability<self.threshold) & (n_updates < max_hop)).byte().any()):
# Add timing signal
state = state + time_enc[:, :inputs.shape[1], :].type_as(inputs.data)
state = state + pos_enc[:, step, :].unsqueeze(1).repeat(1,inputs.shape[1],1).type_as(inputs.data)
p = self.sigma(self.p(state)).squeeze(-1)
# Mask for inputs which have not halted yet
still_running = (halting_probability < 1.0).float()
# Mask of inputs which halted at this step
new_halted = (halting_probability + p * still_running > self.threshold).float() * still_running
# Mask of inputs which haven't halted, and didn't halt this step
still_running = (halting_probability + p * still_running <= self.threshold).float() * still_running
# Add the halting probability for this step to the halting
# probabilities for those input which haven't halted yet
halting_probability = halting_probability + p * still_running
# Compute remainders for the inputs which halted at this step
remainders = remainders + new_halted * (1 - halting_probability)
# Add the remainders to those inputs which halted at this step
halting_probability = halting_probability + new_halted * remainders
# Increment n_updates for all inputs which are still running
n_updates = n_updates + still_running + new_halted
# Compute the weight to be applied to the new state and output
# 0 when the input has already halted
# p when the input hasn't halted yet
# the remainders when it halted this step
update_weights = p * still_running + new_halted * remainders
if(decoding):
state, _, attention_weight = fn((state,encoder_output,[]))
else:
# apply transformation on the state
state = fn(state)
# update running part in the weighted state and keep the rest
previous_state = ((state * update_weights.unsqueeze(-1)) + (previous_state * (1 - update_weights.unsqueeze(-1))))
if(decoding):
if(step==0): previous_att_weight = torch.zeros_like(attention_weight).cuda() ## [B, S, src_size]
previous_att_weight = ((attention_weight * update_weights.unsqueeze(-1)) + (previous_att_weight * (1 - update_weights.unsqueeze(-1))))
## previous_state is actually the new_state at end of hte loop
## to save a line I assigned to previous_state so in the next
## iteration is correct. Notice that indeed we return previous_state
step+=1
if(decoding):
return previous_state, previous_att_weight, (remainders,n_updates)
else:
return previous_state, (remainders,n_updates) | [
"berserkhealer@163.com"
] | berserkhealer@163.com |
1b065bced2f96564d8b7517d3b16698fa6a03312 | b04037287a05a433520126d9fee61f4aa91bcc32 | /dsl/analysis_flow_processor.py | 86536123b1ff32269c1d6da96d0b2747aa55da3c | [] | no_license | robinjack/dsl_for_data_analysis | 74a4d50ccc52c65253d2fc5d15118be1775536fb | f144784377d8736be17de35aa27228b6a9ceccf2 | refs/heads/master | 2022-11-27T01:59:30.867283 | 2020-08-06T18:43:39 | 2020-08-06T18:43:39 | 254,438,512 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 783 | py | from utils.const import CLASSNAMES
from dsl.function_table import FUNCTION_TABLE
def self_map(self):
return self
def obj_processor(textX_object):
'''
Obj. processors can also introduce changes in the objects they process.
Here we set "evaluate" function based on the object they refer to.
'''
if hasattr(textX_object, '_tx_fqn'):
textX_object.evaluate = FUNCTION_TABLE.get(textX_object._tx_fqn, self_map)
return textX_object
# Object processors are registered by defining a map between a rule name
# and the callable that will process the instances of that rule/class.
OBJ_PROCESSORS = {
className: obj_processor for className in CLASSNAMES
}
# This map/dict is registered on a meta-model by the "register_obj_processors"
# call. | [
"robinmajack@gmail.com"
] | robinmajack@gmail.com |
0d3a933cc8970b6430e5055ad9a3b982c1909717 | 49ec08e0f9335e18853a55d8f26e3f4f243ff4be | /dyndns.py | 4746576785e7cc461ccd35074435a5c2406915e4 | [] | no_license | therealgambo/dyndns | 573eecd0b265090c9c4254a11a7af033e0ae2da8 | a1d3b9bcd290174f50c4bede412c6e94dd13fdf4 | refs/heads/master | 2020-12-02T21:12:54.670421 | 2017-07-05T03:43:52 | 2017-07-05T03:43:52 | 96,273,461 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,242 | py | #!/usr/bin/python3
# Required pip packages
# - pip install requests termcolor pyopenssl ndg-httpsclient pyasn1
import collections
import requests
import smtplib
import socket
import time
from termcolor import colored
# Your SiteHost API Key
api_key = "ENTER API KEY HERE"
# Your SiteHost / MyHost client ID
client_id = ENTER CLIENT ID HERE
# A list of domains that your client ID controls
domains = ["mydomain.nz"]
# A whitelist list of records that will be updated
update_records = ["mydomain.nz", "*.mydomain.nz"]
# The SMTP server to use for sending alert emails
smtp_server = "127.0.0.1"
# A list of email addresses to receive notification alert emails
alert_emails = ['myemail@ubuntu']
def msg(message):
"""
Print a message with timestamp
"""
print('[' + time.strftime("%Y-%m-%d %H:%M") + '] ' + message)
def info(message):
"""
Print an info message
"""
msg(colored('[INFO]: ', 'green') + message)
def warn(message):
"""
Print an warning message
"""
msg(colored('[WARN]: ', 'yellow') + message)
def error(message):
"""
Print an error message
"""
msg(colored('[ERROR]: ', 'red') + message)
def get_ip():
"""
Return the current IP address
"""
try:
r = requests.get("http://httpbin.org/ip")
return r.json()['origin'] if r.status_code == 200 else None
except requests.ConnectionError:
return None
def get_dns_ip(domain):
"""
Return machine's current IP address in DNS.
"""
try:
return socket.gethostbyname(domain)
except socket.error:
return None
def update_domain(domain, ip_address):
"""
Updates the domains A records with the new IP address
"""
records = get_records(domain)
if records is not None:
for record in records:
# We only want to update A records
if record['type'] == 'A':
if record['name'] in update_records:
update_record(domain, record['id'], record['type'], record['name'], ip_address)
else:
warn(' - Skipping ' + colored(record['type'], 'green') + ' record: ' + colored(record['name'], 'yellow'))
def update_record(domain, record_id, type, name, ip_address, priority=0):
"""
Update an individual record
"""
try:
info(' - Updating ' + colored(type, 'green') + ' record: ' + colored(name, 'green'))
r = requests.get(
'https://mysth.safeserver.net.nz/1.0/dns/update_record.json?apikey=%s&client_id=%d&domain=%s&record_id=%s&type=%s&name=%s&content=%s&prio=%d' % (
api_key, client_id, domain, record_id, type, name, ip_address, priority
))
if r.status_code == 200 and r.json()['status'] is False:
error(r.text)
except requests.ConnectionError:
error('Cannot communicate with the API: update_record()')
def get_records(domain):
"""
Retrieve all records for this domain
"""
try:
r = requests.get(
"https://mysth.safeserver.net.nz/1.0/dns/list_records.json?apikey=%s&client_id=%d&domain=%s" %(
api_key, client_id, domain
))
if r.status_code == 200 and r.json()['status'] is False:
error(r.text)
return None
return r.json()['return']
except requests.ConnectionError:
error('Cannot communicate with the API: get_records()')
def get_mon_hosts():
"""
Retrieve all hosts being monitored
"""
try:
r = requests.get(
"https://mysth.safeserver.net.nz/1.0/mon/list_hosts.json?apikey=%s&client_id=%d" % (
api_key, client_id
))
if r.status_code == 200 and r.json()['status'] is False:
error(r.text)
return None
return r.json()['return']
except requests.ConnectionError:
error('Cannot communicate with the API: get_mon_hosts()')
def update_monitoring(domain, ip):
"""
Update the monitored hosts with the new IP address
"""
hosts = get_mon_hosts()
if hosts is not None:
for host in hosts:
if host['hostname'] == domain and host['ip_addr'] != ip:
info(' - Updating ' + colored(domain, 'green') + ' monitoring: ' + colored(ip, 'green'))
update_mon_host(host['id'], ip)
def update_mon_host(host_id, ip):
"""
update a single host with new ip
"""
try:
request_params = collections.OrderedDict()
request_params['client_id'] = client_id
request_params['host_id'] = host_id
request_params['params[ip_addr]'] = ip
r = requests.post(
"https://mysth.safeserver.net.nz/1.0/mon/update_host.json?apikey=%s" % (
api_key,
), data=request_params)
if r.status_code == 200 and r.json()['status'] is False:
error(r.text)
return None
r = requests.post(
"https://mysth.safeserver.net.nz/1.0/mon/update_config.json?apikey=%s" % (api_key))
if r.status_code == 200 and r.json()['status'] is False:
error(r.text)
return None
except requests.ConnectionError:
error('Cannot communicate with the API: update_mon_host()')
def send_email(to, domain, ip, sender = 'dyndns@ubuntu'):
"""
Send an email alert
"""
if not isinstance(to, list):
error('Email recipients must be provided as a python list.')
else:
try:
m = """From: %s
To: %s
Subject: [%s] IP address has changed
Domain: %s
IP Address: %s
Updated Records: %s
""" % (sender, ','.join(to), domain, domain, ip, ','.join(update_records))
s = smtplib.SMTP(smtp_server, 25)
s.sendmail(sender, ','.join(to), m)
info('Notification Email Sent: ' + colored(', '.join(to), 'green'))
except socket.error:
error('Failed to send notification email, could not connect to SMTP server: ' + colored(smtp_server + ':25', 'green'))
except SMTPException:
error('Failed to send notification email to: ' + colored(', '.join(to), 'green'))
if __name__ == "__main__":
current_ip = get_ip()
if current_ip is None:
error('Could not retrieve current IP address from internet.')
exit(-1)
for domain in domains:
dns_ip = get_dns_ip(domain)
info('Checking domain: ' + colored(domain, 'green') + ' (' + colored(dns_ip, 'green') + ')')
if dns_ip is None:
warn(colored('Updating, could not determine IP from DNS', 'yellow'))
info('Updating domain ' + colored(domain, 'green') + ' with current ip ' + colored(current_ip, 'green'))
update_domain(domain, current_ip)
update_monitoring(domain, current_ip)
send_email(alert_emails, domain, current_ip)
elif dns_ip != current_ip:
info('Updating domain ' + colored(domain, 'green') + ' with current ip ' + colored(current_ip, 'green'))
update_domain(domain, current_ip)
update_monitoring(domain, current_ip)
send_email(alert_emails, domain, current_ip)
else:
info('DNS is up to date!')
| [
"git@fucking.nz"
] | git@fucking.nz |
674849a1860811c2a1da07971241d91ac3153fa5 | b2731b2be3080973fac03258454f657b4411a4e9 | /python programs/data wise notes/29 april/default dict ex8.py | 1609b826f61b1e8354e833c8d5de0ffbd50421e2 | [] | no_license | Pavan1511/python-program-files | db8817a64942b989253a992393283c54df22af81 | 4701e6c4a60d7b31fd2888c41c7b1f69f37735a6 | refs/heads/master | 2022-12-10T00:03:43.795916 | 2020-09-14T16:17:01 | 2020-09-14T16:17:01 | 256,945,716 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 483 | py | #Program: 8 using int as a default_factory function
# use of int function in defaultdict
from collections import defaultdict
def int_defaultdict():
count_chars = defaultdict(int)
data = 'abccbacabb'
for ch in data:
count_chars[ch] += 1
print(count_chars) # {'a':3,'b':4,'c':3}
def main():
int_defaultdict()
if __name__ == "__main__":
main()
'''
ch ---> a = 0, 1,2,3
ch---> b = 0 ,1,2,3,4
ch ---> c = 0,1,2,3
'''
| [
"noreply@github.com"
] | noreply@github.com |
fade9b5c1817b307aadb3d23edf34995d3f5d250 | f09e564ce389fa7df0f1010c414385430f660547 | /modifySchema.py | 9254d3957d7618f4016d03b630ec170ad1e7f72b | [] | no_license | haidixiansheng/coursePortal | bbf89cd22fe2d1f605fdf80a5c2591de79ac0ac8 | 10aad71186452c55c72507e83c7ee0a7e6372fe0 | refs/heads/master | 2021-01-15T20:43:17.376424 | 2013-08-21T02:24:30 | 2013-08-21T02:24:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 452 | py | #!/usr/bin/env python
import os
import sys
from xml.dom.minidom import parse
dom = parse('solr-4.3.0/example/solr/collection1/conf/schema.xml')
x = dom.createElement("field")
x.setAttribute("type","long")
x.setAttribute("name","_version_")
x.setAttribute("indexed","true")
x.setAttribute("stored","true")
y = dom.getElementsByTagName("fields")
y[0].appendChild(x)
f = open ('solr-4.3.0/example/solr/collection1/conf/schema.xml','w')
dom.writexml(f)
| [
"tyan@umich.edu"
] | tyan@umich.edu |
4710cc6ca7e7276480b00d450e48a62ffad8dea1 | 013fcd8a1699cbe5c50493c00b2f87e63d173ff1 | /Lib/site-packages/attr/validators.py | 46dcafc2ecf74360987be79d80cdedcf87f8c1d0 | [] | no_license | Homa333/PriorNet | 8d894571f7c32d983a93c216749b07b3027160b2 | b390f011188d1f23ba8e41eadbe533544105522d | refs/heads/master | 2023-07-27T06:38:57.353953 | 2021-09-08T09:43:23 | 2021-09-08T09:43:23 | 365,434,750 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,537 | py | """
Commonly useful validators.
"""
from __future__ import absolute_import, division, print_function
import re
from ._make import _AndValidator, and_, attrib, attrs
from .exceptions import NotCallableError
__all__ = [
"and_",
"deep_iterable",
"deep_mapping",
"in_",
"instance_of",
"is_callable",
"matches_re",
"optional",
"provides",
]
@attrs(repr=False, slots=True, hash=True)
class _InstanceOfValidator(object):
type = attrib()
def __call__(self, inst, attr, value):
"""
We use a callable class to be able to change the ``__repr__``.
"""
if not isinstance(value, self.type):
raise TypeError(
"'{name}' must be {type!r} (got {value!r} that is a "
"{actual!r}).".format(
name=attr.chatroom_win,
type=self.type,
actual=value.__class__,
value=value,
),
attr,
self.type,
value,
)
def __repr__(self):
return "<instance_of validator for type {type!r}>".format(
type=self.type
)
def instance_of(type):
"""
A validator that raises a `TypeError` if the initializer is called
with a wrong type for this particular attribute (checks are performed using
`isinstance` therefore it's also valid to pass a tuple of types).
:param type: The type to check for.
:type type: type or tuple of types
:raises TypeError: With a human readable error message, the attribute
(of type `attr.Attribute`), the expected type, and the value it
got.
"""
return _InstanceOfValidator(type)
@attrs(repr=False, frozen=True, slots=True)
class _MatchesReValidator(object):
regex = attrib()
flags = attrib()
match_func = attrib()
def __call__(self, inst, attr, value):
"""
We use a callable class to be able to change the ``__repr__``.
"""
if not self.match_func(value):
raise ValueError(
"'{name}' must match regex {regex!r}"
" ({value!r} doesn't)".format(
name=attr.chatroom_win, regex=self.regex.pattern, value=value
),
attr,
self.regex,
value,
)
def __repr__(self):
return "<matches_re validator for pattern {regex!r}>".format(
regex=self.regex
)
def matches_re(regex, flags=0, func=None):
r"""
A validator that raises `ValueError` if the initializer is called
with a string that doesn't match *regex*.
:param str regex: a regex string to match against
:param int flags: flags that will be passed to the underlying re function
(default 0)
:param callable func: which underlying `re` function to call (options
are `re.fullmatch`, `re.search`, `re.match`, default
is ``None`` which means either `re.fullmatch` or an emulation of
it on Python 2). For performance reasons, they won't be used directly
but on a pre-`re.compile`\ ed pattern.
.. versionadded:: 19.2.0
"""
fullmatch = getattr(re, "fullmatch", None)
valid_funcs = (fullmatch, None, re.search, re.match)
if func not in valid_funcs:
raise ValueError(
"'func' must be one of %s."
% (
", ".join(
sorted(
e and e.__name__ or "None" for e in set(valid_funcs)
)
),
)
)
pattern = re.compile(regex, flags)
if func is re.match:
match_func = pattern.match
elif func is re.search:
match_func = pattern.search
else:
if fullmatch:
match_func = pattern.fullmatch
else:
pattern = re.compile(r"(?:{})\Z".format(regex), flags)
match_func = pattern.match
return _MatchesReValidator(pattern, flags, match_func)
@attrs(repr=False, slots=True, hash=True)
class _ProvidesValidator(object):
interface = attrib()
def __call__(self, inst, attr, value):
"""
We use a callable class to be able to change the ``__repr__``.
"""
if not self.interface.providedBy(value):
raise TypeError(
"'{name}' must provide {interface!r} which {value!r} "
"doesn't.".format(
name=attr.chatroom_win, interface=self.interface, value=value
),
attr,
self.interface,
value,
)
def __repr__(self):
return "<provides validator for interface {interface!r}>".format(
interface=self.interface
)
def provides(interface):
"""
A validator that raises a `TypeError` if the initializer is called
with an object that does not provide the requested *interface* (checks are
performed using ``interface.providedBy(value)`` (see `zope.interface
<https://zopeinterface.readthedocs.io/en/latest/>`_).
:param interface: The interface to check for.
:type interface: ``zope.interface.Interface``
:raises TypeError: With a human readable error message, the attribute
(of type `attr.Attribute`), the expected interface, and the
value it got.
"""
return _ProvidesValidator(interface)
@attrs(repr=False, slots=True, hash=True)
class _OptionalValidator(object):
validator = attrib()
def __call__(self, inst, attr, value):
if value is None:
return
self.validator(inst, attr, value)
def __repr__(self):
return "<optional validator for {what} or None>".format(
what=repr(self.validator)
)
def optional(validator):
"""
A validator that makes an attribute optional. An optional attribute is one
which can be set to ``None`` in addition to satisfying the requirements of
the sub-validator.
:param validator: A validator (or a list of validators) that is used for
non-``None`` values.
:type validator: callable or `list` of callables.
.. versionadded:: 15.1.0
.. versionchanged:: 17.1.0 *validator* can be a list of validators.
"""
if isinstance(validator, list):
return _OptionalValidator(_AndValidator(validator))
return _OptionalValidator(validator)
@attrs(repr=False, slots=True, hash=True)
class _InValidator(object):
options = attrib()
def __call__(self, inst, attr, value):
try:
in_options = value in self.options
except TypeError: # e.g. `1 in "abc"`
in_options = False
if not in_options:
raise ValueError(
"'{name}' must be in {options!r} (got {value!r})".format(
name=attr.chatroom_win, options=self.options, value=value
)
)
def __repr__(self):
return "<in_ validator with options {options!r}>".format(
options=self.options
)
def in_(options):
"""
A validator that raises a `ValueError` if the initializer is called
with a value that does not belong in the options provided. The check is
performed using ``value in options``.
:param options: Allowed options.
:type options: list, tuple, `enum.Enum`, ...
:raises ValueError: With a human readable error message, the attribute (of
type `attr.Attribute`), the expected options, and the value it
got.
.. versionadded:: 17.1.0
"""
return _InValidator(options)
@attrs(repr=False, slots=False, hash=True)
class _IsCallableValidator(object):
def __call__(self, inst, attr, value):
"""
We use a callable class to be able to change the ``__repr__``.
"""
if not callable(value):
message = (
"'{name}' must be callable "
"(got {value!r} that is a {actual!r})."
)
raise NotCallableError(
msg=message.format(
name=attr.chatroom_win, value=value, actual=value.__class__
),
value=value,
)
def __repr__(self):
return "<is_callable validator>"
def is_callable():
"""
A validator that raises a `attr.exceptions.NotCallableError` if the
initializer is called with a value for this particular attribute
that is not callable.
.. versionadded:: 19.1.0
:raises `attr.exceptions.NotCallableError`: With a human readable error
message containing the attribute (`attr.Attribute`) name,
and the value it got.
"""
return _IsCallableValidator()
@attrs(repr=False, slots=True, hash=True)
class _DeepIterable(object):
member_validator = attrib(validator=is_callable())
iterable_validator = attrib(
default=None, validator=optional(is_callable())
)
def __call__(self, inst, attr, value):
"""
We use a callable class to be able to change the ``__repr__``.
"""
if self.iterable_validator is not None:
self.iterable_validator(inst, attr, value)
for member in value:
self.member_validator(inst, attr, member)
def __repr__(self):
iterable_identifier = (
""
if self.iterable_validator is None
else " {iterable!r}".format(iterable=self.iterable_validator)
)
return (
"<deep_iterable validator for{iterable_identifier}"
" iterables of {member!r}>"
).format(
iterable_identifier=iterable_identifier,
member=self.member_validator,
)
def deep_iterable(member_validator, iterable_validator=None):
"""
A validator that performs deep validation of an iterable.
:param member_validator: Validator to apply to iterable members
:param iterable_validator: Validator to apply to iterable itself
(optional)
.. versionadded:: 19.1.0
:raises TypeError: if any sub-validators fail
"""
return _DeepIterable(member_validator, iterable_validator)
@attrs(repr=False, slots=True, hash=True)
class _DeepMapping(object):
key_validator = attrib(validator=is_callable())
value_validator = attrib(validator=is_callable())
mapping_validator = attrib(default=None, validator=optional(is_callable()))
def __call__(self, inst, attr, value):
"""
We use a callable class to be able to change the ``__repr__``.
"""
if self.mapping_validator is not None:
self.mapping_validator(inst, attr, value)
for key in value:
self.key_validator(inst, attr, key)
self.value_validator(inst, attr, value[key])
def __repr__(self):
return (
"<deep_mapping validator for objects mapping {key!r} to {value!r}>"
).format(key=self.key_validator, value=self.value_validator)
def deep_mapping(key_validator, value_validator, mapping_validator=None):
"""
A validator that performs deep validation of a dictionary.
:param key_validator: Validator to apply to dictionary keys
:param value_validator: Validator to apply to dictionary values
:param mapping_validator: Validator to apply to top-level mapping
attribute (optional)
.. versionadded:: 19.1.0
:raises TypeError: if any sub-validators fail
"""
return _DeepMapping(key_validator, value_validator, mapping_validator)
| [
"kshitizbhurtel@gmail.com"
] | kshitizbhurtel@gmail.com |
719b19ce7e60d6b7a55a48aa5ec48d45ea4dd2bc | 8cd5d44b1a03947e05c12deab1866ee8b5028414 | /serverSocket.py | 78e8d86487394d59a45161f024d6f87072c858d5 | [] | no_license | LittleYmada/SocketC-Spy | 1cb59d64ef004989249cdd21649b099910661253 | 3df1a8ce7407bda7d1bd56ff402998ca4f69bfde | refs/heads/master | 2021-01-10T16:56:05.193640 | 2016-03-27T08:32:36 | 2016-03-27T08:32:36 | 54,819,523 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 452 | py | from socket import *
serverPort = 12000
serverSocket=socket(AF_INET,SOCK_STREAM)
serverSocket.bind(('',serverPort))
serverSocket.listen(1)
print('The server is ready to receive')
while 1:
connectionSocket ,addr =serverSocket.accept()
if addr!=None:
print(str(addr)+' say hello to you')
sentence=connectionSocket.recv(1024).decode()
recvSentence=sentence.upper()
connectionSocket.send(recvSentence.encode())
connectionSocket.close() | [
"13122171080@163.com"
] | 13122171080@163.com |
83cbbbb33a3d4d6ef78725161c3f3a4209e07bd3 | fb8a95beb7447902f7f4b50d2fc885be43df56e9 | /plan_dnia.py | 36e46b9296b193cfe22af624ce01772357401656 | [] | no_license | kwasniep/plandnia | e22deb874374c9867bc06274478e22bcd00c1a5c | aface412a77a037fe7e3f4d76f4d350ec10b40c9 | refs/heads/main | 2023-08-27T11:16:53.008149 | 2021-10-28T03:35:43 | 2021-10-28T03:35:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,204 | py | import speech_recognition as sr
import pyttsx3
import time
import csv
import datetime
engine = pyttsx3.init()
engine.setProperty('volume',1)
engine.setProperty('rate',150)
weekdays={0:'monday',1:'tuesday',2:'wednesday',
3:'thursday',4:'friday',5:'saturday',6:'sunday'}
weekdays2={0:'poniedzialek',1:'wtorek',2:'środa',
3:'czwartek',4:'piątek',5:'sobota',6:'niedziela'}
dane=[]
komunikaty=[]
def recognise(msg="Powiedz: okey"):
r = sr.Recognizer()
with sr.Microphone() as source:
print(msg)
audio = r.listen(source)
try:
recognized_text = r.recognize_google(audio, language="pl-PL")
print("Powiedziałeś: " + recognized_text)
return recognized_text.lower()
except sr.UnknownValueError:
print("Nie rozumiem")
#engine.say("Nie rozumiem")
#engine.runAndWait()
except sr.RequestError as e:
print("Error: ", e)
def load_data():
global weekdays, dane, komunikaty
try:
day_num = datetime.datetime.today().weekday()
with open('plan_dnia_' + str(day_num+1) + '_' + weekdays[day_num] +'.txt', newline='',encoding='utf-8') as csvfile:
csv_reader = csv.reader(csvfile, delimiter=',', quotechar=None)
dane=list(csv_reader)
except Exception as e:
print("Error: ", e)
return False
with open('settings.txt', encoding='utf-8') as settings:
komunikaty = settings.read().split('\n')
#print('settings.txt (komunikaty): ', komunikaty)
if not (len(komunikaty)>1 and komunikaty[0] and komunikaty[1]):
input('Plik settings.txt powinien zawierać 2 linijki')
return False
return True
while True:
if not load_data():
break
app_start_time = time.strftime("%H:%M", time.localtime())
print(weekdays2[datetime.datetime.today().weekday()] + ' ' + app_start_time)
engine.say('jest ' + weekdays2[datetime.datetime.today().weekday()] + ', godzina ' + app_start_time + ', test dźwięku')
engine.runAndWait()
while dane[len(dane)-1][0] < time.strftime("%H:%M", time.localtime()):
time.sleep(60)
if not load_data():
break
app_start_time = time.strftime("%H:%M", time.localtime())
print(weekdays2[datetime.datetime.today().weekday()] + ' ' + app_start_time)
for row in dane:
print(row)
if row[0]<app_start_time:
continue
not_yet = True
while not_yet:
if row[0]<=time.strftime("%H:%M", time.localtime()):
#text = ""
#while text!="okej":
engine.say(row[0] + ' ' + komunikaty[0] + ', ' + row[1])
engine.runAndWait()
# text = recognise()
# if not text:
# continue
not_yet = False
#engine.say('spoko')
#engine.runAndWait()
else:
time.sleep(30)
engine.say(komunikaty[1])
engine.runAndWait()
time.sleep(120)
| [
"noreply@github.com"
] | noreply@github.com |
8e6955653ad162f1fe7a5fd85b6a47836ca902c4 | 92d1cd994e34a63f1c12205a80ae2e8e7e91bac5 | /binarytime.1s.py | a94cfa9c7b41df6b17c1717c0f1db9dc2aec5239 | [
"MIT"
] | permissive | fenhl/bitbar-true-binary-time | 695f301bbc662d9e7b705b5a56f152edb6787a48 | 37e00ce2471ffa58bbd70f2295f0eb1e18e93bf5 | refs/heads/main | 2021-07-23T01:27:24.580207 | 2016-09-22T17:18:52 | 2016-09-22T17:18:52 | 68,884,215 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,459 | py | #!/usr/local/bin/python3
import datetime
import more_itertools
def bits_iter(time=None):
fract = fraction_of_day(time)
while True:
fract *= 2
if fract >= 1:
yield True
fract -= 1
else:
yield False
def decode_bits(bits):
fract = 0.0
exp = 1
for bit in bits:
if bit:
fract += 1 / 2 ** exp
exp += 1
return (datetime.datetime(1970, 1, 1) + datetime.timedelta(days=fract)).time()
def decode_hex(hex_string):
for hex_digit in hex_string:
nybble = int(hex_digit, 16)
yield bool(nybble & 0b1000)
yield bool(nybble & 0b0100)
yield bool(nybble & 0b0010)
yield bool(nybble & 0b0001)
def fraction_of_day(time=None):
if time is None:
time = datetime.datetime.now().time()
return time.hour / 24 + time.minute / 1440 + time.second / 86400 + time.microsecond / 86400000000
def hex(bits):
for nybble in more_itertools.chunked(bits, 4):
while len(nybble) < 4:
nybble.append(False)
yield 8 * nybble[0] + 4 * nybble[1] + 2 * nybble[2] + 1 * nybble[3]
if __name__ == '__main__':
now = datetime.datetime.now()
print(''.join('{:X}'.format(nybble) for nybble in hex(more_itertools.take(12, bits_iter(now.time())))))
print('---')
print('{:%H:%M:%S}'.format(now.time()))
print('w{0[1]}.{0[2]}: {1:%Y-%m-%d}'.format(now.date().isocalendar(), now.date()))
| [
"fenhl@fenhl.net"
] | fenhl@fenhl.net |
4afc257a660ec3c6036e5a3391b855598c441d8b | 75b1e1da6297963dc6da84dcd549ef3f9e46f963 | /Voice-wake-up/dataset.py | c10476bdb399a22a54ed176cf97053d82501c2d5 | [] | no_license | Chase2816/Voice-wake-up | 366797a69fe144060db48fb554a55e16f196b821 | e0f0cd207ad86cdee6a7e3c4ab2285309ceb3f78 | refs/heads/master | 2021-10-09T20:19:23.342325 | 2019-01-03T05:24:17 | 2019-01-03T05:24:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,381 | py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset
import os
import numpy as np
import wave
class Mydataset(Dataset):
def __init__(self,path):
self.path=path
self.dataset=[]
for file in os.listdir(os.path.join(self.path,r"positive")):
self.dataset.append(os.path.join(self.path,r"positive",file))
for file in os.listdir(os.path.join(self.path,r"negative")):
self.dataset.append(os.path.join(self.path,r"negative",file))
np.random.shuffle(self.dataset)
def __getitem__(self,item):
data_path=self.dataset[item]
data=wave.open(data_path,"rb")
parm=data.getparams()
#nchannels: 声道数,sampwidth: 量化位数(byte),framerate: 采样频率,nframes: 采样点数
nchannels,sampwidth,framerate,nframes=parm[:4]
strdata=data.readframes(nframes)
wavedata=np.fromstring(strdata,dtype=np.float32)
x=wavedata*1.0/(max(abs(wavedata)))
if os.path.samefile(data_path,os.path.join(self.path,r"positive",data_path.split("\\")[-1])):
y=1
elif os.path.samefile(data_path,os.path.join(self.path,r"negative",data_path.split("\\")[-1])):
y=0
return x,y
def __len__(self):
return len(self.dataset)
| [
"noreply@github.com"
] | noreply@github.com |
3fae578b5162e7f5acb831c405be63172c98b6df | 2aace9bb170363e181eb7520e93def25f38dbe5c | /build/idea-sandbox/system/python_stubs/cache/300a07a56fa3435d495e1ce8762b25d84931bfae7c2899c2825326bcc799b818/typing/re.py | 2336d186762e5e40248962573d6c349ef6e6ffaa | [] | no_license | qkpqkp/PlagCheck | 13cb66fd2b2caa2451690bb72a2634bdaa07f1e6 | d229904674a5a6e46738179c7494488ca930045e | refs/heads/master | 2023-05-28T15:06:08.723143 | 2021-06-09T05:36:34 | 2021-06-09T05:36:34 | 375,235,940 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,495 | py | # encoding: utf-8
# module typing.re
# from C:\Users\Doly\Anaconda3\lib\site-packages\statsmodels\tsa\statespace\_representation.cp37-win_amd64.pyd
# by generator 1.147
""" Wrapper namespace for re type aliases. """
# no imports
# functions
def Match(*args, **kwargs): # real signature unknown
"""
The central part of internal API.
This represents a generic version of type 'origin' with type arguments 'params'.
There are two kind of these aliases: user defined and special. The special ones
are wrappers around builtin collections and ABCs in collections.abc. These must
have 'name' always set. If 'inst' is False, then the alias can't be instantiated,
this is used by e.g. typing.List and typing.Dict.
"""
pass
def Pattern(*args, **kwargs): # real signature unknown
"""
The central part of internal API.
This represents a generic version of type 'origin' with type arguments 'params'.
There are two kind of these aliases: user defined and special. The special ones
are wrappers around builtin collections and ABCs in collections.abc. These must
have 'name' always set. If 'inst' is False, then the alias can't be instantiated,
this is used by e.g. typing.List and typing.Dict.
"""
pass
# no classes
# variables with complex values
__all__ = [
'Pattern',
'Match',
]
__weakref__ = None # (!) real value is "<attribute '__weakref__' of 'typing.re' objects>"
| [
"qinkunpeng2015@163.com"
] | qinkunpeng2015@163.com |
e53e17ad881b1216b23d47377ac2c6c683b429e3 | bd079d3a02c2f3ec192bbf580cde41420bc0e7c1 | /app/migrations/versions/5beee08aaf5b_room_table.py | 1311fdbb6b8e58e0663e38c2e775dabc5ae93966 | [] | no_license | freddiejbawden/cloneful | 7a30317d18687581aef8490c5d3093049153e6e0 | 5c049df6c7c2a4f7137e8001f64f3f2bd01ca05f | refs/heads/master | 2020-03-24T00:51:08.613520 | 2018-10-14T14:24:29 | 2018-10-14T14:24:29 | 142,311,150 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 843 | py | """room table
Revision ID: 5beee08aaf5b
Revises:
Create Date: 2018-07-18 14:29:29.729000
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '5beee08aaf5b'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('room',
sa.Column('id', sa.String(length=4), nullable=False),
sa.Column('host', sa.String(length=16), nullable=True),
sa.Column('players', sa.String(length=256), nullable=True),
sa.Column('gameState', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('room')
# ### end Alembic commands ###
| [
"31216525+freddiejbawden@users.noreply.github.com"
] | 31216525+freddiejbawden@users.noreply.github.com |
78fe24d5908a310034dde4b1a3a62b86bef8dd4f | 1eb1c06f8df31a01ad57a76f51a01c05ecb436ac | /set/psvv.py | c7c59fc3d7f79fd175ea84c00a8c04169b259b19 | [] | no_license | yyang19/toolbox | 2448e4f9140c1edef9307b4df40451e72086cd66 | a05f0d12b50321b5eea62d33e791b63d5c4094bf | refs/heads/master | 2018-12-18T16:42:56.912761 | 2018-09-14T17:27:47 | 2018-09-14T17:27:47 | 28,947,477 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 984 | py | from itertools import chain, combinations
import numpy as np
from classes.container import Container
from zipfgen import ZipfGenerator
import cProfile
class psvv(object):
def __init__(self,N,B,arr_p):
self.c=Container(B)
self.N=N
self.B=B
self.arr_p=arr_p
return
def __del__(self):
return
def get(self, e, X):
#arr_p = np.random.random((N,))
#arr_p /= sum(L_prob)
S_prob = set(self.arr_p)
S_e = set([e])
S_diff = S_prob-S_e
p_sum = 0
for L_X in set(combinations(S_diff, self.B-1)):
S_X=set(L_X)
c_psvv = self.c.psvv( S_X|S_e, e, X )
p_sum += ( c_psvv * sum(S_X)/(1-e) )
result = p_sum/len(set(combinations(S_diff, self.B-1)))
return result
N=14
B=8
X=4
zg=ZipfGenerator(N,0.6)
za=zg.randZipf()
za = [float(i)/sum(za) for i in za]
print za, sum(za)
psvv_obj=psvv(N,B,za)
#p=psvv_obj.get( za[0], X)
cProfile.run('psvv_obj.get( za[0], X)')
#print p
| [
"yueyang2010@gmail.com"
] | yueyang2010@gmail.com |
0a5d100e0892ec58c593d7e9ec545e93ce8e0902 | b0e73ff777b74516fe2447eaed057e165e0be49d | /bookapp/catalog/migrations/0041_remove_orderbook_tot_quantity.py | 6e9e02fdaf294e5d380f024a56b6e9423267e4b5 | [] | no_license | mgonz893/bookdjango | 29b3272f308d7eb4038cc6a36e839d8619d2aa96 | 323e681db55e45f08579f508a381705c9578d641 | refs/heads/master | 2020-12-26T23:36:03.780481 | 2020-04-12T20:23:48 | 2020-04-12T20:23:48 | 237,686,759 | 0 | 0 | null | 2020-03-27T18:15:10 | 2020-02-01T22:28:33 | Python | UTF-8 | Python | false | false | 337 | py | # Generated by Django 3.0.2 on 2020-03-22 17:43
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('catalog', '0040_auto_20200321_2002'),
]
operations = [
migrations.RemoveField(
model_name='orderbook',
name='tot_quantity',
),
]
| [
"mguad010@fiu.edu"
] | mguad010@fiu.edu |
ed1149b92c5cbd94797d43c59af168a6185672cc | 81fe9c8ecf0a7af743356b40af3237ecf85d4a3a | /convertWire.py | b28598f8a291b190552b1a25992caf115ba85372 | [] | no_license | d-b-w-gain/Electrical | 8ee6b6441d38a8abe40f49368b0d15d544384df5 | 9a6bb4c9849a027a408016a909a09d0332fb42bf | refs/heads/master | 2022-12-08T06:28:55.660200 | 2020-08-18T06:28:28 | 2020-08-18T06:28:28 | 288,320,080 | 0 | 0 | null | 2020-08-18T01:20:20 | 2020-08-18T00:57:41 | Python | UTF-8 | Python | false | false | 1,219 | py | #!/usr/bin/env python
# coding: utf-8
# # Convert from AWG to CSA mm^2 using the ASTM B258-02 #
# This utility is provided for ROM convenience only. Engineering calculation must be preformed directly from standards.
# In[3]:
import csv
import os
def AWG2CSA(gauge) :
path=os.path.abspath('.')
#print(path)
fileName='B258-02-T1.csv';
filePath=path+'/'+fileName;
with open(filePath, mode='r') as infile:
reader = csv.reader(infile)
mydict = {rows[0]:rows[5] for rows in reader}
CSA=mydict.get(str(gauge));
return CSA
# In[4]:
## For testing
# AWG = int(input("Enter required AWG: "))
# print('For an AWG of: '+str(AWG))
# print('The Nominal CSA is: '+AWGtoCSA(AWG)+' mm\u00b2')
# In[ ]:
def CSA2AWG(CSA):
fileName='B258-02-T1.csv';
with open(fileName, mode='r') as infile:
reader = csv.reader(infile)
mydict = {rows[5]:rows[0] for rows in reader}
del mydict["mm2"]
with open(fileName, mode='r') as infile:
reader = csv.reader(infile)
mydict2 = {rows[0]:rows[5] for rows in reader}
del mydict2["AWG"]
AWG=mydict.get(list(dict((k, v) for k, v in mydict.items() if float(k) >= CSA).keys())[0]);
return AWG
| [
"noreply@github.com"
] | noreply@github.com |
08b90866b78ffc606ed71f9acec6f5ea3f91bbc8 | 08de8487e05e22e54d24c2d8f2bf7b43ea9061f3 | /src/Control/mainControl.py | f86f6f16cb2e5ffd7185f3f3d3ee152804a9a77b | [] | no_license | faitdivers/pyao | ce539630ff393b473b8244e086b84b725058743d | 93fafa6d0cc39d1e312890c6ddeecb1437b9a7ce | refs/heads/master | 2021-01-22T11:59:19.133317 | 2014-07-03T16:24:48 | 2014-07-03T16:24:48 | 19,397,453 | 2 | 0 | null | 2014-07-03T16:24:48 | 2014-05-03T06:50:50 | TeX | UTF-8 | Python | false | false | 112 | py | from numpy import *
def control(wfRec, paramsAct):
return zeros((paramsAct['numActx'],paramsAct['numActy']))
| [
"jpedro.e.silva@gmail.com"
] | jpedro.e.silva@gmail.com |
743e4249bd85556ce8656911945887f925769dfc | 37fe2837f68d9cfbcb4610525da1e60ed5935ec7 | /etcdclient/command_desc.py | 12065c00f22f4e68cbf97c81db0735b98562e7db | [] | no_license | jrepp/containers | a49ee3a2138d814faba9f6faba99a633dce92b2d | 23f2e0fb88f1a85d65f205cf4730e4dcf7df1759 | refs/heads/master | 2020-06-17T01:36:19.127261 | 2017-04-27T15:10:48 | 2017-04-27T15:12:53 | 75,051,780 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 918 | py | import time
from desc import Desc
REQUIRED = [
'region',
'name',
'args',
'user' ]
class CommandDesc(Desc):
def __init__(self, base_path, context, **kwargs):
Desc.__init__(self)
# Commands descriptors are stored as a child under an existing base path
self._base_path = base_path
self.region = kwargs.get('region')
self.site = kwargs.get('site')
self.name = kwargs.get('name')
self.args = kwargs.get('args')
self.user = kwargs.get('user')
self.created = time.time()
self.acknowledged = None
self.pending = None
self.duration = None
self.completed = None
self.validate_init(context)
def base_path(self):
return self._base_path
def validate_init(self, context):
Desc.validate_init(self, context)
self.check_required(REQUIRED)
| [
"jacobrepp@gmail.com"
] | jacobrepp@gmail.com |
48d4b9fe1ff3432fc5ef22a33a9d4014933c5d2c | 53983c1dbd4e27d918237d22287f1838ae42cc92 | /tools/txtIO.py | 03e55b006916f7db35cb202eb15e7466473f3329 | [] | no_license | xshii/MDAOXS | da5060ea6b6ac600b3b85dddbb7460f62ab4a684 | d4c54b79d7c84740bf01d8e8573e54522de2e6d0 | refs/heads/master | 2021-09-24T10:35:31.295574 | 2018-10-08T10:54:44 | 2018-10-08T10:54:44 | 108,884,304 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 873 | py | import numpy as np
def stlTxtToNpArray(path):
path = r"/Users/gakki/Downloads/SU2_mesh_point_clouds/Optimale_orig_points.txt"
mesh = np.empty([1,3])
with open(path) as fp:
for line in fp:
if line.__len__()<5:
continue
line = line.strip().split(';')
line = list(map(str.strip, line))
if line[1].startswith('-'):
if line[1].count('-') == 2:
line[1] = line[1].replace('-','e-')[1:]
else:
line[1] = line[1].replace('-','e-')
mesh = np.vstack([mesh,np.array(line,dtype='float')])
mesh = mesh[1:,:]
return mesh
if __name__=='__main__':
path = r"/Users/gakki/Downloads/SU2_mesh_point_clouds/Optimale_orig_points.txt"
mesh = stlTxtToNpArray(path=path)
np.savetxt('new_mesh.txt',mesh,delimiter=';') | [
"xshi@kth.se"
] | xshi@kth.se |
39bb988ebc724ca84a128d54b3579fda941215ea | f4261f1a32d6d9b9ada80d0d7e1055aa360060a8 | /fabric/contrib/idp/__init__.py | 4e1eb8096cfdd70e160ab395cf6971f5ac408c01 | [
"BSD-2-Clause"
] | permissive | freyley/fabric | 3f472c27694a7914ce052f12400038e5f8eb3054 | 3e8c8c754f11f5393c90b2f11aef7c8132f34bd0 | refs/heads/master | 2021-01-18T05:59:00.933066 | 2011-10-18T22:58:01 | 2011-10-18T22:58:01 | 2,564,471 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 116 | py |
from fs import Directory, Symlink
from vcs import GitRepository
__all__=["Directory", "GitRepository", "Symlink"]
| [
"freyley@gmail.com"
] | freyley@gmail.com |
f41abfac30c6595febcd632edd7376180f78b1d5 | 92807062430feeedf9a72d589b57263b95bc180a | /profiles_project/settings.py | d6950447bfb0d9ff2211189fc27ea42db9d5066e | [] | no_license | blackerknight/profiles-rest-api | 40f2a8bf6d3053077a9a793b8fb4b34d920e0477 | b777a0f86e7d9fcf40587e51691435fb4edc7ea6 | refs/heads/main | 2023-07-31T12:21:50.689168 | 2021-09-15T03:16:52 | 2021-09-15T03:16:52 | 389,187,686 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,415 | py | """
Django settings for profiles_project project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'htalc+g(5%h2zo9y#@5#lixtb$q8b4r5cn2v5=d5l)4g3(*&^)'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ALLOWED_HOSTS = [
'localhost',
'0.0.0.0',
'127.0.0.1',
'147.182.190.239',
'appsemm.xyz',
'www.appsemm.xyz'
]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'profiles_api',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'profiles_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'profiles_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static/')
AUTH_USER_MODEL = 'profiles_api.UserProfile'
| [
"eduardomancillamorales@gmail.com"
] | eduardomancillamorales@gmail.com |
167439e261d55b4a6013812c4b86be943e29dd30 | 16e8129f7a12239ed49aabfa04549d90419bb12e | /old_explore.py | 1b2cf18e07a1f4df23eb6398cb62957b72fd8c45 | [] | no_license | Seanny123/hrl_analysis | 0a5a4ff8b672d05760e39ec5558557220d71459d | 7ccf2beea6090c1493c4dce95630ef251f9c6548 | refs/heads/master | 2020-05-18T20:59:29.098987 | 2015-01-07T23:18:56 | 2015-01-07T23:18:56 | 28,878,160 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 146 | py | not_equal = []
count = 0
for i,j in zip(file_list[0], file_list[1]):
if(i != j ):
not_equal.append(1)
count += 1
else:
not_equal.append(0) | [
"saubin@uwaterloo.ca"
] | saubin@uwaterloo.ca |
90e532be7870a0d9766f889bfa76fb4503a75568 | 4b64e1f051a18759eecdaf8365a78b6c70e0efdb | /wsgi.py | b6b153bc34a231494967898bfb755f9d6ba51465 | [] | no_license | AliHashimi12345/facebook_login | ac9f2b7e4cf2c0c08036c7b93ede7807cc3ea2f1 | a682e36ad6315624be2ae7b5e713694c8f7c56bf | refs/heads/master | 2023-09-01T14:56:05.128411 | 2021-11-02T13:10:56 | 2021-11-02T13:10:56 | 423,847,028 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | """
WSGI config for Alihashimi project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Alihashimi.settings')
application = get_wsgi_application()
| [
"alihashimivns@gmail.com"
] | alihashimivns@gmail.com |
1a37d2f7a618537fb84f62c141d88105e25238a2 | 42c48f3178a48b4a2a0aded547770027bf976350 | /google/ads/google_ads/v4/services/ad_group_service_client_config.py | 1931ef3dfd64f9494a515ac86c2f1db21526b546 | [
"Apache-2.0"
] | permissive | fiboknacky/google-ads-python | e989464a85f28baca1f28d133994c73759e8b4d6 | a5b6cede64f4d9912ae6ad26927a54e40448c9fe | refs/heads/master | 2021-08-07T20:18:48.618563 | 2020-12-11T09:21:29 | 2020-12-11T09:21:29 | 229,712,514 | 0 | 0 | Apache-2.0 | 2019-12-23T08:44:49 | 2019-12-23T08:44:49 | null | UTF-8 | Python | false | false | 964 | py | config = {
"interfaces": {
"google.ads.googleads.v4.services.AdGroupService": {
"retry_codes": {
"idempotent": [
"DEADLINE_EXCEEDED",
"UNAVAILABLE"
],
"non_idempotent": []
},
"retry_params": {
"default": {
"initial_retry_delay_millis": 5000,
"retry_delay_multiplier": 1.3,
"max_retry_delay_millis": 60000,
"initial_rpc_timeout_millis": 3600000,
"rpc_timeout_multiplier": 1.0,
"max_rpc_timeout_millis": 3600000,
"total_timeout_millis": 3600000
}
},
"methods": {
"GetAdGroup": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "default"
},
"MutateAdGroups": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default"
}
}
}
}
}
| [
"noreply@github.com"
] | noreply@github.com |
0b8f8969df2ebec37197e526f411a892c8f2a200 | 7976802b53fcdc266167aff97f4fff1a0d801836 | /pythonOperators/program_3.py | 9e3adf99770c1d78bdb4e44d23d5e73d2c6ed262 | [] | no_license | UpendraDange/JALATechnologiesAssignment | 938c6ab3384533099b8a8399d5511844cde2cb0a | b81f60ee1f2e836ad51dadc94f007e3a6d81af3e | refs/heads/master | 2023-04-20T08:45:33.251261 | 2021-05-19T16:52:32 | 2021-05-19T16:52:32 | 368,906,567 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 131 | py | """
3. Program to equal operator and not equal operators
"""
num1 = 10
num2 = 10
num3 = 20
print(num1==num3)
print(num3!=num2)
| [
"upendradange@gamil.com"
] | upendradange@gamil.com |
59b1db902116a96f05510d33d28160719939cf87 | 1e53bf7cf0c8f02a96c57bdf036acccf0df2385d | /Practice1.py | 16541753397350c441319bd374927dbd8d2349d0 | [] | no_license | FitzpatrickAW2085/Back-Up | 8478e84da5863e0bc3ff8952980ebacfa99fba87 | 8d4a664ed2c6649be97f65b9e4bf7395e0232888 | refs/heads/master | 2020-08-26T20:53:31.352831 | 2019-10-31T20:00:34 | 2019-10-31T20:00:34 | 217,145,423 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 442 | py | # Alan Fitzpatrick
# 9/23/19 Period 3
# Practice1.py
print("4 * 8 = " + str(4 * 8)) # we need to cast this to string
print("12 / 2 = " + str(12 / 2))
print("-5 - 6 * 3 = " + str(-5 - 6 * 3))
age = input("What is your age?: ")
age = int (age)
print("In ten years you will be " + str (age + 10))
print("Go Vipers!\n" * 20)
myNum = 20
myNum = myNum + 5
myOtherNum = 10
print ("My num is " + str(myNum + myOtherNum))
| [
"noreply@github.com"
] | noreply@github.com |
66c824ae5c6b6b235b3bb178b980a4953d6ba68e | b615aa786c2a57809196713920e784187b1c1cd6 | /53_finetune_crf_loss_new_mask_LOC_max_seq_128_batch_32_lr5e5_lr5e5/CRF2.py | cee3a4b4c1166c0eead2b032679d7c877b88dd3f | [] | no_license | hoon4233/KoElectra-CRF | 5ae14d883271e78fcd344b169dddf49a34789af1 | 0adf89715d1369de097160cb821f931386f2ebb0 | refs/heads/master | 2023-07-16T19:19:25.229933 | 2021-09-04T03:13:48 | 2021-09-04T03:13:48 | 402,653,513 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 14,310 | py | from typing import List, Optional
import torch
import torch.nn as nn
class CRF(nn.Module):
"""Conditional random field.
This module implements a conditional random field [LMP01]_. The forward computation
of this class computes the log likelihood of the given sequence of tags and
emission score tensor. This class also has `~CRF.decode` method which finds
the best tag sequence given an emission score tensor using `Viterbi algorithm`_.
Args:
num_tags: Number of tags.
batch_first: Whether the first dimension corresponds to the size of a minibatch.
Attributes:
start_transitions (`~torch.nn.Parameter`): Start transition score tensor of size
``(num_tags,)``.
end_transitions (`~torch.nn.Parameter`): End transition score tensor of size
``(num_tags,)``.
transitions (`~torch.nn.Parameter`): Transition score tensor of size
``(num_tags, num_tags)``.
.. [LMP01] Lafferty, J., McCallum, A., Pereira, F. (2001).
"Conditional random fields: Probabilistic models for segmenting and
labeling sequence data". *Proc. 18th International Conf. on Machine
Learning*. Morgan Kaufmann. pp. 282–289.
.. _Viterbi algorithm: https://en.wikipedia.org/wiki/Viterbi_algorithm
"""
def __init__(self, num_tags: int, batch_first: bool = False) -> None:
if num_tags <= 0:
raise ValueError(f'invalid number of tags: {num_tags}')
super().__init__()
self.num_tags = num_tags
self.batch_first = batch_first
self.start_transitions = nn.Parameter(torch.empty(num_tags))
self.end_transitions = nn.Parameter(torch.empty(num_tags))
self.transitions = nn.Parameter(torch.empty(num_tags, num_tags))
self.reset_parameters()
def reset_parameters(self) -> None:
"""Initialize the transition parameters.
The parameters will be initialized randomly from a uniform distribution
between -0.1 and 0.1.
"""
nn.init.uniform_(self.start_transitions, -0.1, 0.1)
nn.init.uniform_(self.end_transitions, -0.1, 0.1)
nn.init.uniform_(self.transitions, -0.1, 0.1)
def __repr__(self) -> str:
return f'{self.__class__.__name__}(num_tags={self.num_tags})'
def forward(
self,
emissions: torch.Tensor,
tags: torch.LongTensor,
mask: Optional[torch.ByteTensor] = None,
reduction: str = 'sum',
) -> torch.Tensor:
"""Compute the conditional log likelihood of a sequence of tags given emission scores.
Args:
emissions (`~torch.Tensor`): Emission score tensor of size
``(seq_length, batch_size, num_tags)`` if ``batch_first`` is ``False``,
``(batch_size, seq_length, num_tags)`` otherwise.
tags (`~torch.LongTensor`): Sequence of tags tensor of size
``(seq_length, batch_size)`` if ``batch_first`` is ``False``,
``(batch_size, seq_length)`` otherwise.
mask (`~torch.ByteTensor`): Mask tensor of size ``(seq_length, batch_size)``
if ``batch_first`` is ``False``, ``(batch_size, seq_length)`` otherwise.
reduction: Specifies the reduction to apply to the output:
``none|sum|mean|token_mean``. ``none``: no reduction will be applied.
``sum``: the output will be summed over batches. ``mean``: the output will be
averaged over batches. ``token_mean``: the output will be averaged over tokens.
Returns:
`~torch.Tensor`: The log likelihood. This will have size ``(batch_size,)`` if
reduction is ``none``, ``()`` otherwise.
"""
self._validate(emissions, tags=tags, mask=mask)
if reduction not in ('none', 'sum', 'mean', 'token_mean'):
raise ValueError(f'invalid reduction: {reduction}')
if mask is None:
mask = torch.ones_like(tags, dtype=torch.uint8)
if self.batch_first:
emissions = emissions.transpose(0, 1)
tags = tags.transpose(0, 1)
mask = mask.transpose(0, 1)
# shape: (batch_size,)
numerator = self._compute_score(emissions, tags, mask)
# shape: (batch_size,)
denominator = self._compute_normalizer(emissions, mask)
# shape: (batch_size,)
llh = numerator - denominator
if reduction == 'none':
return llh
if reduction == 'sum':
return llh.sum()
if reduction == 'mean':
return llh.mean()
assert reduction == 'token_mean'
return llh.sum() / mask.float().sum()
def decode(self, emissions: torch.Tensor,
mask: Optional[torch.ByteTensor] = None) -> List[List[int]]:
"""Find the most likely tag sequence using Viterbi algorithm.
Args:
emissions (`~torch.Tensor`): Emission score tensor of size
``(seq_length, batch_size, num_tags)`` if ``batch_first`` is ``False``,
``(batch_size, seq_length, num_tags)`` otherwise.
mask (`~torch.ByteTensor`): Mask tensor of size ``(seq_length, batch_size)``
if ``batch_first`` is ``False``, ``(batch_size, seq_length)`` otherwise.
Returns:
List of list containing the best tag sequence for each batch.
"""
self._validate(emissions, mask=mask)
if mask is None:
mask = emissions.new_ones(emissions.shape[:2], dtype=torch.uint8)
if self.batch_first:
emissions = emissions.transpose(0, 1)
mask = mask.transpose(0, 1)
return self._viterbi_decode(emissions, mask)
def _validate(
self,
emissions: torch.Tensor,
tags: Optional[torch.LongTensor] = None,
mask: Optional[torch.ByteTensor] = None) -> None:
if emissions.dim() != 3:
raise ValueError(f'emissions must have dimension of 3, got {emissions.dim()}')
if emissions.size(2) != self.num_tags:
raise ValueError(
f'expected last dimension of emissions is {self.num_tags}, '
f'got {emissions.size(2)}')
if tags is not None:
if emissions.shape[:2] != tags.shape:
raise ValueError(
'the first two dimensions of emissions and tags must match, '
f'got {tuple(emissions.shape[:2])} and {tuple(tags.shape)}')
if mask is not None:
if emissions.shape[:2] != mask.shape:
raise ValueError(
'the first two dimensions of emissions and mask must match, '
f'got {tuple(emissions.shape[:2])} and {tuple(mask.shape)}')
no_empty_seq = not self.batch_first and mask[0].all()
no_empty_seq_bf = self.batch_first and mask[:, 0].all()
if not no_empty_seq and not no_empty_seq_bf:
raise ValueError('mask of the first timestep must all be on')
def _compute_score(
self, emissions: torch.Tensor, tags: torch.LongTensor,
mask: torch.ByteTensor) -> torch.Tensor:
# emissions: (seq_length, batch_size, num_tags)
# tags: (seq_length, batch_size)
# mask: (seq_length, batch_size)
assert emissions.dim() == 3 and tags.dim() == 2
assert emissions.shape[:2] == tags.shape
assert emissions.size(2) == self.num_tags
assert mask.shape == tags.shape
assert mask[0].all()
seq_length, batch_size = tags.shape
mask = mask.float()
# Start transition score and first emission
# shape: (batch_size,)
score = self.start_transitions[tags[0]]
score += emissions[0, torch.arange(batch_size), tags[0]]
for i in range(1, seq_length):
# Transition score to next tag, only added if next timestep is valid (mask == 1)
# shape: (batch_size,)
score += self.transitions[tags[i - 1], tags[i]] * mask[i]
# Emission score for next tag, only added if next timestep is valid (mask == 1)
# shape: (batch_size,)
score += emissions[i, torch.arange(batch_size), tags[i]] * mask[i]
# End transition score
# shape: (batch_size,)
seq_ends = mask.long().sum(dim=0) - 1
# shape: (batch_size,)
last_tags = tags[seq_ends, torch.arange(batch_size)]
# shape: (batch_size,)
score += self.end_transitions[last_tags]
return score
def _compute_normalizer(
self, emissions: torch.Tensor, mask: torch.ByteTensor) -> torch.Tensor:
# emissions: (seq_length, batch_size, num_tags)
# mask: (seq_length, batch_size)
assert emissions.dim() == 3 and mask.dim() == 2
assert emissions.shape[:2] == mask.shape
assert emissions.size(2) == self.num_tags
assert mask[0].all()
seq_length = emissions.size(0)
# Start transition score and first emission; score has size of
# (batch_size, num_tags) where for each batch, the j-th column stores
# the score that the first timestep has tag j
# shape: (batch_size, num_tags)
score = self.start_transitions + emissions[0]
for i in range(1, seq_length):
# Broadcast score for every possible next tag
# shape: (batch_size, num_tags, 1)
broadcast_score = score.unsqueeze(2)
# Broadcast emission score for every possible current tag
# shape: (batch_size, 1, num_tags)
broadcast_emissions = emissions[i].unsqueeze(1)
# Compute the score tensor of size (batch_size, num_tags, num_tags) where
# for each sample, entry at row i and column j stores the sum of scores of all
# possible tag sequences so far that end with transitioning from tag i to tag j
# and emitting
# shape: (batch_size, num_tags, num_tags)
next_score = broadcast_score + self.transitions + broadcast_emissions
# Sum over all possible current tags, but we're in score space, so a sum
# becomes a log-sum-exp: for each sample, entry i stores the sum of scores of
# all possible tag sequences so far, that end in tag i
# shape: (batch_size, num_tags)
next_score = torch.logsumexp(next_score, dim=1)
# Set score to the next score if this timestep is valid (mask == 1)
# shape: (batch_size, num_tags)
score = torch.where(mask[i].unsqueeze(1), next_score, score)
# End transition score
# shape: (batch_size, num_tags)
score += self.end_transitions
# Sum (log-sum-exp) over all possible tags
# shape: (batch_size,)
return torch.logsumexp(score, dim=1)
def _viterbi_decode(self, emissions: torch.FloatTensor,
mask: torch.ByteTensor) -> List[List[int]]:
# emissions: (seq_length, batch_size, num_tags)
# mask: (seq_length, batch_size)
assert emissions.dim() == 3 and mask.dim() == 2
assert emissions.shape[:2] == mask.shape
assert emissions.size(2) == self.num_tags
assert mask[0].all()
seq_length, batch_size = mask.shape
# Start transition and first emission
# shape: (batch_size, num_tags)
score = self.start_transitions + emissions[0]
history = []
# score is a tensor of size (batch_size, num_tags) where for every batch,
# value at column j stores the score of the best tag sequence so far that ends
# with tag j
# history saves where the best tags candidate transitioned from; this is used
# when we trace back the best tag sequence
# Viterbi algorithm recursive case: we compute the score of the best tag sequence
# for every possible next tag
for i in range(1, seq_length):
# Broadcast viterbi score for every possible next tag
# shape: (batch_size, num_tags, 1)
broadcast_score = score.unsqueeze(2)
# Broadcast emission score for every possible current tag
# shape: (batch_size, 1, num_tags)
broadcast_emission = emissions[i].unsqueeze(1)
# Compute the score tensor of size (batch_size, num_tags, num_tags) where
# for each sample, entry at row i and column j stores the score of the best
# tag sequence so far that ends with transitioning from tag i to tag j and emitting
# shape: (batch_size, num_tags, num_tags)
next_score = broadcast_score + self.transitions + broadcast_emission
# Find the maximum score over all possible current tag
# shape: (batch_size, num_tags)
next_score, indices = next_score.max(dim=1)
# Set score to the next score if this timestep is valid (mask == 1)
# and save the index that produces the next score
# shape: (batch_size, num_tags)
score = torch.where(mask[i].unsqueeze(1), next_score, score)
history.append(indices)
# End transition score
# shape: (batch_size, num_tags)
score += self.end_transitions
# Now, compute the best path for each sample
# shape: (batch_size,)
seq_ends = mask.long().sum(dim=0) - 1
best_tags_list = []
for idx in range(batch_size):
# Find the tag which maximizes the score at the last timestep; this is our best tag
# for the last timestep
_, best_last_tag = score[idx].max(dim=0)
best_tags = [best_last_tag.item()]
# We trace back where the best last tag comes from, append that to our best tag
# sequence, and trace it back again, and so on
for hist in reversed(history[:seq_ends[idx]]):
best_last_tag = hist[idx][best_tags[-1]]
best_tags.append(best_last_tag.item())
# Reverse the order because we start from the last timestep
best_tags.reverse()
best_tags_list.append(best_tags)
return best_tags_list
| [
"wogns3141@gmail.com"
] | wogns3141@gmail.com |
2252131802f40a5592ac2afb001b250bcf728d2b | 1312041dbfbdd30d0d888366c34cd294eb6039f8 | /trial/working_code.py.py | 500be764cc0b225ecaaca1ecc79b82e16b13512b | [] | no_license | BMSCE-Robotics-Club/Flipkart-GRiD-3.0-Round-1 | e8467dcb3836e7300d265336a8fb556270a912fb | a27a609e81791c806a7f0e7cf19546a5f1684f59 | refs/heads/main | 2023-07-13T23:41:24.243110 | 2021-08-31T06:28:54 | 2021-08-31T06:28:54 | 387,833,673 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,440 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Aug 16 17:54:45 2021
@author: FranticUser
"""
import cv2
import imutils
import sys
from imutils.video import VideoStream
import time
sp1,sp2,sp3,sp4 = (480, 0),(540, 00),(600, 0),(660, 0)
# Initializing turning points coordinates for this configuration
tp1,tp2,tp3,tp4 = (480,480),(540,540),(600,540),(660,480)
# Initiazling final points for the bots to reach
fp1,fp2,fp3,fp4 = (0,480),(0,540),(1140,540),(1140,480)
ARUCO_DICT = {
"DICT_4X4_50": cv2.aruco.DICT_4X4_50,
"DICT_4X4_100": cv2.aruco.DICT_4X4_100,
"DICT_4X4_250": cv2.aruco.DICT_4X4_250,
"DICT_4X4_1000": cv2.aruco.DICT_4X4_1000,
"DICT_5X5_50": cv2.aruco.DICT_5X5_50,
"DICT_5X5_100": cv2.aruco.DICT_5X5_100,
"DICT_5X5_250": cv2.aruco.DICT_5X5_250,
"DICT_5X5_1000": cv2.aruco.DICT_5X5_1000,
"DICT_6X6_50": cv2.aruco.DICT_6X6_50,
"DICT_6X6_100": cv2.aruco.DICT_6X6_100,
"DICT_6X6_250": cv2.aruco.DICT_6X6_250,
"DICT_6X6_1000": cv2.aruco.DICT_6X6_1000,
"DICT_7X7_50": cv2.aruco.DICT_7X7_50,
"DICT_7X7_100": cv2.aruco.DICT_7X7_100,
"DICT_7X7_250": cv2.aruco.DICT_7X7_250,
"DICT_7X7_1000": cv2.aruco.DICT_7X7_1000
}
def moveForward():
print("F")
def rotateRight(x):
print("rotr")
def rotateLeft(x):
print('rotl')
def PlotForwardPath(image,startpoint,turnpoint,endpoint,direction):
#the point moves vertically down first and then chooses left or right
x1,y1= startpoint
x2,y2= endpoint
a1,a2= turnpoint
start = True
#direct = (1,2,3,4) map respectively to (up,down,left, right)
while(start):
if(y1<a2):
#vertically moving pixelwise down
#we can write an opencv function here if we want
y1 = y1+60
moveForward()
if(y1>=a2) and direction:
#vertically moving sidewise right
rotateRight(90)
moveForward()
x1 = x1-30
if(y1>=a2) and not direction:
#vertically moving sidewise left
rotateLeft(90)
moveForward()
x1 = x1+30
if(x1 in range(x2-10,x2+10) and y1 in range (y2-10,y2+10)):
start = False
# stop()
print("Destination Reached!")
return (x1,y1)
def PlotReversePath(image,startpoint,turnpoint,endpoint,direction):
# the point moves left or right first and then finally moves upwards
x1,y1= endpoint # Actually this is the starting point
x2,y2= startpoint # And this is the end point... i have interchanged because, i wanted the syntax to remain same
a1,a2= turnpoint
start1 = True
while(start1):
if(x1 <= a1) and direction:
#move right
rotateRight(180)
moveForward()
x1 = x1-60
if(x1>a1) and not direction:
#move left
rotateLeft(180)
moveForward()
x1 = x1+60
if(x1 in range(a1-10,a1+10)):
if(direction==1):
rotateLeft(90)
else:
rotateRight(90)
# stop()
start2 = True
while(start2):
if(y1<=a2):
#move top
moveForward()
y1 = y1-60
if(x1 in range(x2-10,x2+10) and y1 in range (y2-10,y2+10)):
start2 = False
# stop()
print("Initial Point Reached!")
return (x1,y1)
def detect_video(frame,dict_type):
if ARUCO_DICT.get(dict_type, None) is None:
print("[INFO] ArUCo tag of '{}' is not supported".format(dict_type))
sys.exit(0)
print("[INFO] detecting '{}' tags...".format(dict_type))
arucoDict = cv2.aruco.Dictionary_get(ARUCO_DICT[dict_type])
arucoParams = cv2.aruco.DetectorParameters_create()
# detect ArUco markers in the input frame
(corners, ids, rejected) = cv2.aruco.detectMarkers(frame,
arucoDict, parameters=arucoParams)
print("Working till here!")
if len(corners) > 0:
ids = ids.flatten()
for (markerCorner, markerID) in zip(corners, ids):
# extract the marker corners
corners = markerCorner.reshape((4, 2))
(topLeft, topRight, bottomRight, bottomLeft) = corners
# convert each of the (x, y)-coordinate pairs to integers
topRight = (int(topRight[0]), int(topRight[1]))
bottomRight = (int(bottomRight[0]), int(bottomRight[1]))
bottomLeft = (int(bottomLeft[0]), int(bottomLeft[1]))
topLeft = (int(topLeft[0]), int(topLeft[1]))
# draw the bounding box of the ArUCo detection
cv2.line(frame, topLeft, topRight, (0, 255, 0), 2)
cv2.line(frame, topRight, bottomRight, (0, 255, 0), 2)
cv2.line(frame, bottomRight, bottomLeft, (0, 255, 0), 2)
cv2.line(frame, bottomLeft, topLeft, (0, 255, 0), 2)
# compute and draw the center (x, y)-coordinates of the
# ArUco marker
cX = int((topLeft[0] + bottomRight[0]) / 2.0)
cY = int((topLeft[1] + bottomRight[1]) / 2.0)
cv2.circle(frame, (cX, cY), 4, (0, 0, 255), -1)
# draw the ArUco marker ID on the frame
cv2.putText(frame, str(markerID),
(topLeft[0], topLeft[1] - 15),
cv2.FONT_HERSHEY_SIMPLEX,
0.5, (0, 255, 0), 2)
return frame
def main():
video = cv2.VideoCapture(0,cv2.CAP_DSHOW)
InitialPoints =[sp1,sp2,sp3,sp4]
TurningPoints =[tp1,tp2,tp3,tp4]
FinalPoints =[fp1,fp2,fp3,fp4]
Directions =[1,1,0,0] # 1 significies left and 0 significies right
while(1):
_,frame = video.read()
vid = detect_video(frame,"DICT_5X5_100")
robNo=4
# for i in range(robNo):
# (x1,y1)=PlotForwardPath(vid,InitialPoints[0], TurningPoints[0], FinalPoints[0],Directions[0])
# print("String works",x1,y1)
# (x2,y2)=PlotReversePath(vid, InitialPoints[0], TurningPoints[0],FinalPoints[0],Directions[0])
# print("Second string also works!",x2,y2)
cv2.imshow("name",vid)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video.release()
cv2.destroyAllWindows()
main() | [
"noreply@github.com"
] | noreply@github.com |
b9a8ddce7bf20f2bb9f63a44a9b1c8e49c9a08d2 | 41ced8351a2ccb62d955b437b8728dd795924cb6 | /separate_by_actors.py | 5251a7fe5c3b9d1a4646bf703f9a43e1e815b5b1 | [] | no_license | taubergm/HollywoodGenderData | fdc64233f580cac6a4ab55e293667d85bf5106f9 | 3f92c28f090673c66abb3df77b3e29de858309b9 | refs/heads/master | 2020-04-04T11:57:21.824251 | 2019-06-16T04:58:27 | 2019-06-16T04:58:27 | 155,909,360 | 2 | 3 | null | null | null | null | UTF-8 | Python | false | false | 2,049 | py | import csv
import sys
reload(sys)
sys.setdefaultencoding('UTF8')
import re
#import networkx as nx
#import matplotlib.pyplot as plt
import io
filename = sys.argv[1]
csvOutFile = "all_actors_movies.csv"
outCsv = open(csvOutFile, 'wb')
fieldnames = ['year','wiki_ref','wiki_query','producer','distributor','name','country','director','cinematography','editing','studio','budget','gross',
'runtime','music','writer','starring','language','released']
csv_writer = csv.DictWriter(outCsv, fieldnames=fieldnames)
csv_writer.writeheader()
i = 0
with io.open(filename, encoding='utf-8', errors='ignore') as csvfile:
readCSV = csv.reader(csvfile, delimiter=',')
for row in readCSV:
#print type(row[16])
actors = row[16].split(",")
#print actors
for actor in actors:
actor = re.sub("\[", "", actor)
actor = re.sub("\]", "", actor)
actor = re.sub("\'", "", actor)
actor = actor.strip()
#row[16] = actor
new_csv_row = {}
new_csv_row['year'] = row[0]
new_csv_row['wiki_ref'] = row[1]
new_csv_row['wiki_query'] = row[2]
new_csv_row['producer'] = row[3]
new_csv_row['distributor'] = row[4]
new_csv_row['name'] = row[5]
new_csv_row['country'] = row[6]
new_csv_row['director'] = row[7]
new_csv_row['cinematography'] = row[8]
new_csv_row['editing'] = row[9]
new_csv_row['studio'] = row[10]
new_csv_row['budget'] = row[11]
new_csv_row['gross'] = row[12]
new_csv_row['runtime'] = row[13]
new_csv_row['music'] = row[14]
new_csv_row['writer'] = row[15]
new_csv_row['starring'] = actor
new_csv_row['language'] = row[17]
new_csv_row['released'] = row[18]
csv_writer.writerow(new_csv_row)
i = i + 1
#if (i == 3):
# import sys
# sys.exit()
| [
"michaeltauberg@michaels-MacBook-Pro.local"
] | michaeltauberg@michaels-MacBook-Pro.local |
8e95de029333c6c144fe7923a72ce823d922cfcf | c2102a9f17a9e08988f367cf785eb3f5d7925854 | /backend/home/migrations/0002_load_initial_data.py | 03d33413376a79c8d6e8efbc30a313757e5182c9 | [] | no_license | crowdbotics-apps/frego-24047 | 44d1569e12748ac327867ac08cfee416ae6eef45 | b30631f14473f965604b937458aea3c7739fd170 | refs/heads/master | 2023-02-19T10:00:37.381026 | 2021-01-25T11:07:44 | 2021-01-25T11:07:44 | 332,718,726 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,274 | py | from django.db import migrations
def create_customtext(apps, schema_editor):
CustomText = apps.get_model("home", "CustomText")
customtext_title = "Frego"
CustomText.objects.create(title=customtext_title)
def create_homepage(apps, schema_editor):
HomePage = apps.get_model("home", "HomePage")
homepage_body = """
<h1 class="display-4 text-center">Frego</h1>
<p class="lead">
This is the sample application created and deployed from the Crowdbotics app.
You can view list of packages selected for this application below.
</p>"""
HomePage.objects.create(body=homepage_body)
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "frego-24047.botics.co"
site_params = {
"name": "Frego",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("home", "0001_initial"),
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_customtext),
migrations.RunPython(create_homepage),
migrations.RunPython(create_site),
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
b0940cf39d8c271c519f410dc40b7c6d639f7506 | c46a6ec29f955f8dd9d9a8eb6052f9184e5cc1b4 | /47.py | ebb1232190108eb695b30747677079b1a23d02e0 | [] | no_license | Yuvanshankar21/yuvan | c1589a1e395c9f3468f743cbb878909b1e3837be | 3f02fde8b2aa9a00a6ff09fe2d00d2bb3e69829f | refs/heads/master | 2020-06-11T19:43:36.088900 | 2019-07-17T09:32:26 | 2019-07-17T09:32:26 | 194,064,276 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 85 | py | num=int(input())
a=[int(i)for i in input().split()]
ma=max(a)
mi=min(a)
print(mi,ma)
| [
"noreply@github.com"
] | noreply@github.com |
beb7d057bf587187b4ebbb72c5e9b08c7ce9fc97 | 0becb4b3d27a369c2f272d0da0e55dfe6f757187 | /cglearn-1/cglearn.py | a336df2844a8cdfa14c173dd638e6eefdf5b251d | [] | no_license | jaimersoncorreia/tp4 | 41a5fc528d6f713c480b8d91716cc08bb73cdfd1 | 430294ad29eff054f0a678126ee9ee89ba3ef051 | refs/heads/master | 2021-01-21T20:23:34.007377 | 2017-06-11T17:24:35 | 2017-06-11T17:24:35 | 92,230,023 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,785 | py | #!/usr/bin/env python3
import sys
import math
import argparse
import importlib
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
import numpy
from transformation import *
from geometry import *
from timing import Timing
import config
from context import Context
from drawingutils import *
parser = argparse.ArgumentParser()
parser.add_argument("configuration_filepath",
metavar="CONFIG_FILE", nargs='?',
help="JSON configuration file to load")
options = parser.parse_args()
config = config.load_config_file(options.configuration_filepath)
compor_cena = None
processar_teclado = None
try:
student_module = importlib.import_module(config.module_name)
compor_cena = getattr(student_module, config.callback_name)
processar_teclado = getattr(student_module, "processar_teclado", None)
except ImportError:
print("*** Atencao: Arquivo %s.py nao foi encontrado."
% config.module_name, file=sys.stderr)
except AttributeError:
print("*** Atencao: Arquivo %s.py nao possui funcao '%s'."
% (config.module_name, config.callback_name), file=sys.stderr)
if compor_cena is None:
def compor_cena(context):
for object_name in context.object_names:
context.draw(object_name)
if processar_teclado is None:
def processar_teclado(key):
pass
glutInit(sys.argv)
glutInitDisplayMode(GLUT_RGB | GLUT_DOUBLE | GLUT_DEPTH)
class Interface(object):
window_width = None
window_height = None
zoom_exponent = 0
_is_dragging = False
drag_start = None
delta_viewport_by_dragging = None
_viewport_fixed_center = (0.0, 0.0)
viewport_min_x = None
viewport_max_x = None
viewport_min_y = None
viewport_max_y = None
FAST_TRANSITION_TIME = 100
SLOW_TRANSITION_TIME = 500
show_fill = True
show_wireframe = False
VISIBLE_WIREFRAME_COLOR = [1.0, 1.0, 0.0, 1.0]
HIDDEN_WIREFRAME_COLOR = [1.0, 1.0, 0.0, 0.0]
show_points = False
VISIBLE_POINT_BORDER_COLOR = [0.0, 0.0, 0.0, 1.0]
HIDDEN_POINT_BORDER_COLOR = [0.0, 0.0, 0.0, 0.0]
VISIBLE_POINT_FILL_COLOR = [1.0, 0.0, 0.0, 1.0]
HIDDEN_POINT_FILL_COLOR = [1.0, 0.0, 0.0, 0.0]
show_target = True
VISIBLE_TARGET_COLOR = [0.0, 0.4, 0.8, 1.0]
HIDDEN_TARGET_COLOR = [0.0, 0.4, 0.8, 0.0]
@property
def is_dragging(self):
return self._is_dragging
@property
def viewport_fixed_center(self):
return self._viewport_fixed_center
@viewport_fixed_center.setter
def viewport_fixed_center(self, value):
self._viewport_fixed_center = value
@property
def viewport_center(self):
if self._is_dragging:
return (self._viewport_fixed_center[0]
- self.delta_viewport_by_dragging[0, 0],
self._viewport_fixed_center[1]
- self.delta_viewport_by_dragging[0, 1])
return self._viewport_fixed_center
def increment_zoom(self):
self.zoom_exponent -= 1
self.set_scene_coords_projection()
def decrement_zoom(self):
self.zoom_exponent += 1
self.set_scene_coords_projection()
@property
def zoom_factor(self):
return 1.2 ** (self.zoom_exponent + 0)
def scene_to_window_coords(self, point, *args):
return gluProject(point[0], point[1], point[2], *args)
def window_to_viewport_coords(self, points):
if isinstance(points, tuple):
points = numpy.array(points).reshape(-1, 2).astype(float)
# Normalize to [0, 1]
points = points / [[self.window_width, self.window_height]]
# Invert orientation of Y
points[:, 1] = 1.0 - points[:, 1]
# Normalize to [0, viewport_max_*-viewport_min_*]
points *= [[self.viewport_max_x - self.viewport_min_x,
self.viewport_max_y - self.viewport_min_y]]
# Shift to [viewport_min_*, viewport_max_*]
points += [[self.viewport_min_x, self.viewport_min_y]]
return points
def set_scene_coords_projection(self, use_fixed_viewport_center=False):
if self.window_width > self.window_height:
delta_x = float(self.window_width) / float(self.window_height)
delta_y = 1.
else:
delta_x = 1.
delta_y = float(self.window_height) / float(self.window_width)
if use_fixed_viewport_center:
viewport_center = self._viewport_fixed_center
else:
viewport_center = self.viewport_center
self.viewport_min_x = viewport_center[0] - delta_x * self.zoom_factor
self.viewport_max_x = viewport_center[0] + delta_x * self.zoom_factor
self.viewport_min_y = viewport_center[1] - delta_y * self.zoom_factor
self.viewport_max_y = viewport_center[1] + delta_y * self.zoom_factor
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glOrtho(self.viewport_min_x, self.viewport_max_x,
self.viewport_min_y, self.viewport_max_y,
-1, 1)
glMatrixMode(GL_MODELVIEW)
def set_window_coords_projection(self):
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glOrtho(0, self.window_width,
0, self.window_height,
-1, 1)
glMatrixMode(GL_MODELVIEW)
def start_drag(self, x, y):
self._is_dragging = True
self.drag_start = self.window_to_viewport_coords((x, y))
self.delta_viewport_by_dragging = numpy.array([[0., 0.]])
def update_drag(self, x, y):
if not self._is_dragging:
return
self.set_scene_coords_projection(use_fixed_viewport_center=True)
current_drag = self.window_to_viewport_coords((x, y))
self.delta_viewport_by_dragging = current_drag - self.drag_start
self.set_scene_coords_projection()
def finish_drag(self):
if not self._is_dragging:
return
self._viewport_fixed_center = self.viewport_center
self._is_dragging = False
self.set_scene_coords_projection()
def cancel_drag(self):
self._is_dragging = False
self.set_scene_coords_projection()
timing = Timing()
timing.set_value('main_opacity', 1.0)
timing.set_value('main_wireframe_color', Interface.HIDDEN_WIREFRAME_COLOR)
timing.set_value('point_border_color', Interface.HIDDEN_POINT_BORDER_COLOR)
timing.set_value('point_fill_color', Interface.HIDDEN_POINT_FILL_COLOR)
timing.set_value('target_wireframe_color', Interface.VISIBLE_TARGET_COLOR)
interface = Interface()
context = Context(config=config, interface=interface, timing=timing)
def display():
timing.update_time()
glClearColor(0.1, 0.1, 0.1, 1)
if config.enable_depth:
glClear(GL_COLOR_BUFFER_BIT + GL_DEPTH_BUFFER_BIT)
else:
glClear(GL_COLOR_BUFFER_BIT)
interface.set_scene_coords_projection()
glLoadIdentity()
draw_grid_2d(grid_spacing=1)
current_modelview_matrix = glGetFloatv(GL_MODELVIEW_MATRIX)
current_projection_matrix = glGetFloatv(GL_PROJECTION_MATRIX)
for instruction in config.sequence:
glMatrixMode(GL_PROJECTION)
glLoadMatrixf(current_projection_matrix)
glMatrixMode(GL_MODELVIEW)
glLoadMatrixf(current_modelview_matrix)
command = instruction[0]
if command == 'UserCallback':
glColor(1, 1, 1, 1)
compor_cena(context)
elif command == 'Outline':
glColor(timing.get_value('target_wireframe_color'))
config.geometry.draw_wireframe(instruction[1])
elif command == 'Fill':
config.geometry.fill(instruction[1])
glutSwapBuffers()
def idle():
glutPostRedisplay()
def reshape(width, height):
interface.window_width = width
interface.window_height = height
glViewport(0, 0, interface.window_width, interface.window_height)
interface.set_scene_coords_projection()
def mouse(button, state, x, y):
if button == GLUT_LEFT_BUTTON:
if state == GLUT_DOWN:
interface.start_drag(x, y)
else:
interface.finish_drag()
elif button == 3: # Scroll up
interface.increment_zoom()
elif button == 4: # Scroll down
interface.decrement_zoom()
def motion(x, y):
interface.update_drag(x, y)
def keyboard(key, x, y):
if key == b'\x1b':
sys.exit(0)
elif key == b'+' or key == b'=':
interface.increment_zoom()
elif key == b'-':
interface.decrement_zoom()
elif key.lower() == b'f':
transition_time = interface.FAST_TRANSITION_TIME \
if key == b'F' else interface.SLOW_TRANSITION_TIME
if interface.show_fill:
interface.show_fill = False
timing.set_value('main_opacity', 0.1, transition_time)
else:
interface.show_fill = True
timing.set_value('main_opacity', 1.0, transition_time)
elif key.lower() == b'w':
transition_time = interface.FAST_TRANSITION_TIME \
if key == b'W' else interface.SLOW_TRANSITION_TIME
if interface.show_wireframe:
interface.show_wireframe = False
timing.set_value('main_wireframe_color',
Interface.HIDDEN_WIREFRAME_COLOR,
transition_time)
else:
interface.show_wireframe = True
timing.set_value('main_wireframe_color',
Interface.VISIBLE_WIREFRAME_COLOR,
transition_time)
elif key.lower() == b'p':
transition_time = interface.FAST_TRANSITION_TIME \
if key == b'P' else interface.SLOW_TRANSITION_TIME
if interface.show_points:
interface.show_points = False
timing.set_value('point_border_color',
Interface.HIDDEN_POINT_BORDER_COLOR,
transition_time)
timing.set_value('point_fill_color',
Interface.HIDDEN_POINT_FILL_COLOR,
transition_time)
else:
interface.show_points = True
timing.set_value('point_border_color',
Interface.VISIBLE_POINT_BORDER_COLOR,
transition_time)
timing.set_value('point_fill_color',
Interface.VISIBLE_POINT_FILL_COLOR,
transition_time)
elif key == b'[':
context.prev_phase()
elif key == b']':
context.next_phase()
elif key == b'{':
context.first_phase()
elif key == b'}':
context.last_phase()
else:
processar_teclado(key)
if config.bounds_min is not None:
bounds_min, bounds_max = config.bounds_min, config.bounds_max
else:
bounds_min, bounds_max = config.geometry.get_bounds(config.fit_objects)
delta_x = bounds_max[0] - bounds_min[0]
delta_y = bounds_max[1] - bounds_min[1]
interface.zoom_exponent = max(math.log(delta_x / 2) / math.log(1.2),
math.log(delta_y / 2) / math.log(1.2)) + 1
if config.center is not None:
interface.viewport_fixed_center = config.center
else:
interface.viewport_fixed_center = (bounds_min[0] + 0.5 * delta_x,
bounds_min[1] + 0.5 * delta_y)
glutInitWindowPosition(0, 0)
glutInitWindowSize(400, 400)
glutCreateWindow(b"Computer Graphics")
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
if config.enable_depth:
glEnable(GL_DEPTH_TEST)
glutDisplayFunc(display)
glutIdleFunc(idle)
glutReshapeFunc(reshape)
glutMouseFunc(mouse)
glutMotionFunc(motion)
glutKeyboardFunc(keyboard)
glutMainLoop()
| [
"jaimerson_correia@htmail.com"
] | jaimerson_correia@htmail.com |
a3dd99368c904903d111559a9aacfa6b15d8f868 | 1d2a06e15deb896556f28099118f4b5ef5b2fef5 | /overheal_crit.py | 1832af5fe777b0978f4ab7a955fc43c6b9e90792 | [] | no_license | apexz1/Overheal | e4df83acfba91eb268a1b966bd432368deddf5e3 | 9885b749bea094969f14896349a5a6f8a9d9ca9f | refs/heads/master | 2023-07-14T22:04:32.354780 | 2021-08-13T23:08:55 | 2021-08-13T23:08:55 | 395,818,592 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,832 | py | """
Script that estimates value of 1% crit.
By: Filip Gokstorp (Saintis), 2020
"""
from src import readers, group_processed_lines
import spell_data as sd
def process_spell(spell_id, spell_lines):
n_spells = len(spell_lines)
crit_underheal = []
crit_fullheal = []
hh = 0
ohh = 0
for h, oh, crit in spell_lines:
# we only care about crits
hh += h
ohh += oh
if not crit:
continue
ch = h * (1 / 3)
cuh = max(0.0, ch - oh)
crit_fullheal.append(ch)
crit_underheal.append(cuh)
n_crits = len(crit_underheal)
if n_crits == 0:
crit_fh = 0
crit_uh = 0
else:
crit_fh = sum(crit_fullheal) / n_crits
crit_uh = sum(crit_underheal) / n_crits
return (spell_id, n_crits, n_spells, crit_fh, crit_uh, hh, ohh)
def print_results(data):
print()
if len(data) == 0:
print("No data found.")
return
data = sorted(data, key=lambda d: sd.spell_name(d[0]))
print(f"Crits:")
nn_crits = 0
nn_spells = 0
s_crit_fh = 0
s_crit_uh = 0
s_coef = 0
t_hh = 0
t_oh = 0
for spell_id, n_crits, n_spells, crit_fh, crit_uh, hh, ohh in data:
spell_name = sd.spell_name(spell_id)
coef = sd.spell_coefficient(spell_id)
nn_crits += n_crits
nn_spells += n_spells
s_crit_fh += crit_fh * n_crits
s_crit_uh += crit_uh * n_crits
s_coef += coef * n_crits
t_hh += hh
t_oh += ohh
crit_pc = n_crits / n_spells
crit_pc_str = f"{crit_pc:5.1%}" if crit_pc < 1.0 else "100.%"
message = f" {spell_name:<30s}:{n_crits:4d} /{n_spells:4d} crits ({crit_pc_str}); ({ohh / hh:5.1%} OH)"
if n_crits == 0:
print(message)
continue
crit_oh = crit_fh - crit_uh
oh_pc = crit_oh / crit_fh
oh_pc_str = f"{oh_pc:5.1%}" if oh_pc < 1.0 else "100.%"
crit_heal = 0.01 * crit_uh
eq_h_0c = crit_heal / coef
eq_h = eq_h_0c / (1.0 + 0.5 * crit_pc)
message += f", Crit H: {crit_fh:4.0f} ({crit_uh:4.0f} + {crit_oh:4.0f} oh) ({oh_pc_str} oh)"
message += f", 1% crit gives {0.01 * crit_uh:+4.1f} healing eq to {eq_h:+5.1f} h ({eq_h_0c:+5.1f} at 0% crit)."
print(message)
print()
crit_pc = nn_crits / nn_spells
spell_name = "Overall / Average"
coef = s_coef / nn_crits
message = f" {spell_name:<30s}:{nn_crits:4d} /{nn_spells:4d} crits ({crit_pc:5.1%}); ({t_oh / t_hh:5.1%} OH)"
if nn_crits == 0:
print(message)
return
crit_fh = s_crit_fh / nn_crits
crit_uh = s_crit_uh / nn_crits
crit_oh = crit_fh - crit_uh
oh_pc = crit_oh / crit_fh
crit_heal = 0.01 * crit_uh
eq_h_0c = crit_heal / coef
eq_h = eq_h_0c / (1.0 + 0.5 * crit_pc)
message += f", Crit H: {crit_fh:4.0f} ({crit_uh:4.0f} + {crit_oh:4.0f} oh) ({oh_pc:5.1%} oh)"
message += f", 1% crit gives {0.01 * crit_uh:+4.1f} healing eq to {eq_h:+5.1f} h ({eq_h_0c:+5.1f} at 0% crit)."
print(message)
print()
def overheal_crit(source, character_name, spell_id=None, encounter=None):
processor = readers.get_processor(source, character_name=character_name)
encounter = processor.select_encounter(encounter=encounter)
processor.process(encounter=encounter)
heal_lines = processor.direct_heals # only care about direct heals (periodics cannot crit)
# Group lines
heal_lines = group_processed_lines(heal_lines, False, spell_id=spell_id)
data = []
if spell_id:
lines = []
# Only one will be populated
if spell_id in heal_lines:
lines = heal_lines[spell_id]
else:
print(f"Could not find casts of spell [{spell_id}]")
exit(1)
data.append(process_spell(spell_id, lines))
else:
for spell_id, lines in heal_lines.items():
data.append(process_spell(spell_id, lines))
print_results(data)
def main(argv=None):
import os
from src.parser import OverhealParser
# make sure directories exist
os.makedirs("figs/crit", exist_ok=True)
parser = OverhealParser(
description="""\
Analyses a combat log and calculates the additional healing any crits gave.
Counts up the healing and overhealing done by each found crit.
Prints out extra healing done by each percentage of crit, on average, and the equivalent +heal worth,
for each spell, and for the average spell profile over the whole combat log.
""",
need_character=True,
accept_spell_id=True,
accept_encounter=True,
)
args = parser.parse_args(argv)
overheal_crit(args.source, args.character_name, spell_id=args.spell_id, encounter=args.encounter)
if __name__ == "__main__":
main()
| [
"filip@gokstorp.se"
] | filip@gokstorp.se |
fc57bf8b87efd57b9255b47b6b72265ecd7cdb8f | 4c126a91fbd9b25c85621e86568b364f0f3125ee | /swf/movie.py | 0831b60fe4804204ad85fe8906039e0d433d37dd | [] | no_license | ctz/flashover | 742974b9e7b469c5337f8868ab8ce68ead6277f9 | ac1248631bb5fb59a8c2490cfc71188f561aa3fa | refs/heads/master | 2023-03-23T15:59:10.207787 | 2012-06-05T09:58:25 | 2012-06-05T09:58:25 | 2,959,096 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,992 | py | """
SWF
"""
from tag import SWFTimelineContainer
from stream import SWFStream
from export import SVGExporter
try:
import cStringIO as StringIO
except ImportError:
import StringIO
class SWFHeaderException(Exception):
""" Exception raised in case of an invalid SWFHeader """
def __init__(self, message):
super(SWFHeaderException, self).__init__(message)
class SWFHeader(object):
""" SWF header """
def __init__(self, stream):
a = stream.readUI8()
b = stream.readUI8()
c = stream.readUI8()
if not a in [0x43, 0x46] or b != 0x57 or c != 0x53:
# Invalid signature! ('FWS' or 'CWS')
raise SWFHeaderException("not a SWF file! (invalid signature)")
self._compressed = (a == 0x43)
self._version = stream.readUI8()
self._file_length = stream.readUI32()
if not self._compressed:
self._frame_size = stream.readRECT()
self._frame_rate = stream.readFIXED8()
self._frame_count = stream.readUI16()
@property
def frame_size(self):
""" Return frame size as a SWFRectangle """
return self._frame_size
@property
def frame_rate(self):
""" Return frame rate """
return self._frame_rate
@property
def frame_count(self):
""" Return number of frames """
return self._frame_count
@property
def file_length(self):
""" Return uncompressed file length """
return self._file_length
@property
def version(self):
""" Return SWF version """
return self._version
@property
def compressed(self):
""" Whether the SWF is compressed using ZLIB """
return self._compressed
def __str__(self):
return " [SWFHeader]\n" + \
" Version: %d\n" % self.version + \
" FileLength: %d\n" % self.file_length + \
" FrameSize: %s\n" % self.frame_size.__str__() + \
" FrameRate: %d\n" % self.frame_rate + \
" FrameCount: %d\n" % self.frame_count
class SWF(SWFTimelineContainer):
"""
SWF class
The SWF (pronounced 'swiff') file format delivers vector graphics, text,
video, and sound over the Internet and is supported by Adobe Flash
Player software. The SWF file format is designed to be an efficient
delivery format, not a format for exchanging graphics between graphics
editors.
@param file: a file object with read(), seek(), tell() methods.
"""
def __init__(self, file=None):
super(SWF, self).__init__()
self._data = None if file is None else SWFStream(file)
self._header = None
if self._data is not None:
self.parse(self._data)
@property
def data(self):
"""
Return the SWFStream object (READ ONLY)
"""
return self._data
@property
def header(self):
""" Return the SWFHeader """
return self._header
def export(self, exporter=None, force_stroke=False):
"""
Export this SWF using the specified exporter.
When no exporter is passed in the default exporter used
is swf.export.SVGExporter.
Exporters should extend the swf.export.BaseExporter class.
@param exporter : the exporter to use
@param force_stroke : set to true to force strokes on fills,
useful for some edge cases.
"""
exporter = SVGExporter() if exporter is None else exporter
if self._data is None:
raise Exception("This SWF was not loaded! (no data)")
if len(self.tags) == 0:
raise Exception("This SWF doesn't contain any tags!")
return exporter.export(self, force_stroke)
def parse_file(self, filename):
""" Parses the SWF from a filename """
self.parse(open(filename, 'rb'))
def parse(self, data):
"""
Parses the SWF.
The @data parameter can be a file object or a SWFStream
"""
self._data = data = data if isinstance(data, SWFStream) else SWFStream(data)
self._header = SWFHeader(self._data)
if self._header.compressed:
import zlib
data = data.f.read()
zip = zlib.decompressobj()
temp = StringIO.StringIO()
temp.write(zip.decompress(data))
temp.seek(0)
data = SWFStream(temp)
self._header._frame_size = data.readRECT()
self._header._frame_rate = data.readFIXED8()
self._header._frame_count = data.readUI16()
self.parse_tags(data)
def __str__(self):
s = "[SWF]\n"
s += self._header.__str__()
for tag in self.tags:
s += tag.__str__() + "\n"
return s
| [
"jpixton@gmail.com"
] | jpixton@gmail.com |
4f6988f2b266088727771bf3ad68042676374e7a | c7349a228d4a43176c3249b8e5fc3110f15be5b6 | /LG_TriHard_Loss.py | 08a4a55057dc37494f03ca2fefaf3b790c856e80 | [] | no_license | OrientTraveller/AlignedReID | 0cd08cf26bc776d0ec7c819acea587ba1ff866cc | 3c4dd5bba8d77b39da46f4870d22369999202bdf | refs/heads/main | 2023-04-02T05:24:34.749008 | 2021-03-31T03:57:46 | 2021-03-31T03:57:46 | 352,834,191 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,018 | py | import torch
from torch import nn
from local_distance import batch_local_dist
from hard_example_mining import hard_example_mining
"""
本文件用于自定义计算全局特征与局部特征的难样本挖掘三元组损失
需要调用先前编写的难样本挖掘算法与局部对齐最小距离算法
"""
class AlignedTripletLoss(nn.Module):
def __init__(self, margin=0.3):
super().__init__()
# margin就是三元组损失中的边界α
self.margin = margin
# 计算三元组损失使用的函数
self.ranking_loss = nn.MarginRankingLoss(margin=margin)
def forward(self, inputs, local_features, targets):
"""
输入:
1.全局特征张量inputs
2.局部特征张量local_features
3.真实行人IDtargets
输出:
1.全局特征损失global_loss
2.局部特征损失,local_loss
"""
# 获取批量
n = inputs.size(0)
# 将局部特征张量进行维度压缩
local_features = local_features.squeeze()
"""
计算图片之间的欧氏距离
矩阵A,B欧氏距离等于√(A^2 + (B^T)^2 - 2A(B^T))
"""
# 计算A^2
distance = torch.pow(inputs, 2).sum(dim=1, keepdim=True).expand(n, n)
# 计算A^2 + (B^T)^2
distance = distance + distance.t()
# 计算A^2 + (B^T)^2 - 2A(B^T)
distance.addmm(1, -2, inputs, inputs.t())
# 计算√(A^2 + (B^T)^2 - 2A(B^T))
distance = distance.clamp(min=1e-12).sqrt() # 该distance矩阵为对称矩阵
# 获取正负样本对距离,使用难样本挖掘
dist_ap, dist_an, p_inds, n_inds = hard_example_mining(distance, targets, return_inds=True)
p_inds, n_inds = p_inds.long(), n_inds.long()
# 根据难样本挖掘计算得到最小相似度正样本与最大相似度负样本索引,提取对应难样本的局部特征
p_local_features = local_features[p_inds]
n_local_features = local_features[n_inds]
# 对难样本局部特征使用局部对齐最小距离算法计算样本对距离
local_dist_ap = batch_local_dist(local_features, p_local_features)
local_dist_an = batch_local_dist(local_features, n_local_features)
# y指明ranking_loss前一个参数大于后一个参数
y = torch.ones_like(dist_an)
# 全局特征损失
global_loss = self.ranking_loss(dist_an, dist_ap, y)
# 局部特征损失
local_loss = self.ranking_loss(local_dist_an, local_dist_ap, y)
return global_loss, local_loss
if __name__ == '__main__':
target = [1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8]
target = torch.Tensor(target)
features = torch.rand(32, 2048)
local_features = torch.randn(32, 128, 3)
a = AlignedTripletLoss()
g_loss, l_loss = a.forward(features, local_features, target)
print(g_loss)
print(l_loss)
| [
"1589301333@qq.com"
] | 1589301333@qq.com |
d4fda28985f484418a5bf8dd6ad5b4bfd8b4eee5 | 543c391dd6889b1533d44e78bc10e2307c8eec64 | /Les 4/PE_4/Practice Exercise 4_2.py | f8876c386aaf07b4b32754d04159bae948f7c417 | [] | no_license | SFenijn/Python2017 | 8393975fcf30b7ccad2563634668aeab93184d8d | 4deeb367f428b85b506f9319e68cf9feb333cc26 | refs/heads/master | 2021-07-04T05:22:01.266682 | 2017-09-25T08:07:07 | 2017-09-25T08:07:07 | 104,900,838 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 155 | py | def som(getallenlijst):
'Deze functie telt alle getallen uit een lijst bij elkaar op.'
return sum(getallenlijst)
lst = [5, 5, 4, 6]
print(som(lst)) | [
"stijn.fenijn@student.hu.nl"
] | stijn.fenijn@student.hu.nl |
e7ccb286539047d1d436e032546a3938ddce7bf1 | 0b86600e0288c0fefc081a0f428277a68b14882e | /binaire/binaire_II.py | b861b63f8209d81b2dbc50518fcc23ce46f0cebc | [] | no_license | Byliguel/python1-exo7 | 9ede37a8d2b8f384d1ebe3d612e8c25bbe47a350 | fbf6b08f4c1e94dd9f170875eee871a84849399e | refs/heads/master | 2020-09-22T10:16:34.044141 | 2019-12-01T11:52:51 | 2019-12-01T11:52:51 | 225,152,986 | 1 | 0 | null | 2019-12-01T11:51:37 | 2019-12-01T11:51:36 | null | UTF-8 | Python | false | false | 5,199 | py |
##############################
# Binaire - partie II
##############################
from binaire_I import *
##############################
# Activité 1 - Palindrome en binaire
##############################
## Question 1 ##
def est_palindrome_1(liste):
p = len(liste)
drapeau = True
for i in range(p):
if liste[i] != liste[p-1-i]:
drapeau = False
return drapeau
# Version optimisée :
def est_palindrome_1_bis(liste):
p = len(liste)
for i in range(p//2):
if liste[i] != liste[p-1-i]:
return False
return True
def est_palindrome_2(liste):
liste_inverse = list(reversed(liste))
return liste == liste_inverse
# Test
print("--- Test d'un palindrome ---")
liste = [1,0,1,0,0,1,0,1]
print(est_palindrome_1(liste))
print(est_palindrome_1_bis(liste))
print(est_palindrome_2(liste))
## Question 2 ##
def cherche_palindrome_binaire(N):
num = 0
for n in range(N):
liste_binaire = entier_vers_binaire(n)
if est_palindrome_1(liste_binaire) == True:
num = num + 1
print(num,":",n,"=",entier_vers_binaire(n))
return
# Test
print("--- Palindromes binaires ---")
cherche_palindrome_binaire(1000)
# Le 1000ème palindrome en binaire est :
#249903 = [1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1]
## Question 3 ##
def cherche_palindrome_decimale(N):
num = 0
for n in range(N):
liste_decimale = entier_vers_decimale(n)
if est_palindrome_1(liste_decimale) == True:
num = num + 1
print(num,":",n)
return
# Test
print("--- Palindromes avec décimales ---")
cherche_palindrome_decimale(1000)
# Le 1000ème palindrome en décimales est :
# 90009
## Question 4 ##
def cherche_bi_palindrome(N):
num = 0
for n in range(N):
liste_binaire = entier_vers_binaire(n)
liste_decimale = entier_vers_decimale(n)
if est_palindrome_1(liste_binaire) == True and est_palindrome_1(liste_decimale):
num = num + 1
print(num,":",n,"=",entier_vers_binaire(n))
return
# Test
print("--- Bi-palindromes ---")
cherche_bi_palindrome(1000)
# Le 20ème bi-palindrome est
# 585585 = [1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1]
##############################
# Activité 2 - Opérations logiques
##############################
## Question 1 ##
def OUeg(l1,l2):
n = len(l1)
l = []
for i in range(n):
if l1[i]==1 or l2[i]==1:
l = l + [1]
else:
l = l + [0]
return l
def ETeg(l1,l2):
n = len(l1)
l = []
for i in range(n):
if l1[i]==1 and l2[i]==1:
l = l + [1]
else:
l = l + [0]
return l
def NON(l1):
l = []
for b in l1:
if b==1:
l = l + [0]
else:
l = l + [1]
return l
# Test
print("--- Opérations logiques (même longueur) ---")
l1 = [1,0,1,0,1,0,1]
l2 = [1,0,0,1,0,0,1]
print(l1)
print(l2)
print(OUeg(l1,l2))
print(ETeg(l1,l2))
print(NON(l1))
## Question 2 ##
# Rajouter des zéros non significatifs si besoins
def ajouter_zeros(liste,p):
while len(liste)< p:
liste = [0] + liste
return liste
# Test
print("--- Zeros non significatifs ---")
print(ajouter_zeros([1,0,1,1],8))
## Question 3 ##
# Opérations logiques avec des listes de tailles différentes
def OU(l1,l2):
p = len(l1)
q = len(l2)
if p>q:
ll2 = ajouter_zeros(l2,p)
return OUeg(l1,ll2)
else:
ll1 = ajouter_zeros(l1,q)
return OUeg(ll1,l2)
def ET(l1,l2):
p = len(l1)
q = len(l2)
if p>q:
ll2 = ajouter_zeros(l2,p)
return ETeg(l1,ll2)
else:
ll1 = ajouter_zeros(l1,q)
return ETeg(ll1,l2)
# Test
print("--- Opérations logiques (cas général) ---")
l1 = [1,0,1,0,1,0,1]
l2 = [1,0,0,1,0,]
print(l1)
print(l2)
print(OU(l1,l2))
print(ET(l1,l2))
##############################
# Activité 3 - Loi de Morgan
##############################
## Question 1 ##
def tous_les_binaires(p):
liste_p = []
for n in range(2**p):
liste_p = liste_p + [entier_vers_binaire(n)]
return liste_p
# Test
print("--- Tous les binaires ---")
print(tous_les_binaires(3))
## Question 2 ##
def toutes_les_listes(p):
if p == 0:
return []
if p == 1:
return [[0],[1]]
liste_p_1 = toutes_les_listes(p-1)
liste_p = [ [0] + l for l in liste_p_1] + [ [1] + l for l in liste_p_1]
return liste_p
# Test
print("--- Toutes les listes ---")
print(toutes_les_listes(3))
## Question 3 ##
# Lois de Morgan
def test_loi_de_morgan(p):
liste_tous = [ajouter_zeros(l,p) for l in tous_les_binaires(p)]
#liste_tous = toutes_les_listes(p)
for l1 in liste_tous:
for l2 in liste_tous:
non_l1_ou_l2 = NON(OU(l1,l2))
non_l1_et_non_l2 = ET(NON(l1),NON(l2))
if non_l1_ou_l2 == non_l1_et_non_l2:
print("Vrai")
# pass
else:
print("Faux",l1,l2)
return
# Test
print("--- Test loi de Morgan ---")
test_loi_de_morgan(2)
| [
"arnaud.bodin@math.univ-lille1.fr"
] | arnaud.bodin@math.univ-lille1.fr |
fe0fe2eaefa2fe1ad30ed11718719a9b3ab8b442 | cf1d29bdc65402693218d929d3b2f7015cacc3c9 | /yin/mini-pysonar-master/tests/func-infinite.py | bb35cd99aa5f580d11660f0394377ee9cee23a84 | [
"BSD-3-Clause"
] | permissive | thyonline/study | 9375cc3f194d1813d07ef3624de7707717d4f244 | 00621e6c9f81a87bfda17c4ef8eb77b2940ccbd6 | refs/heads/master | 2022-11-14T04:15:10.276043 | 2022-10-24T09:59:13 | 2022-10-24T09:59:13 | 156,359,582 | 2 | 0 | null | 2018-11-06T09:34:32 | 2018-11-06T09:34:31 | null | UTF-8 | Python | false | false | 257 | py | # f will not show up in the index because there is no path it can return
def f(x, *y, **kw):
if x < 1:
return 1
elif x < 5:
tt = kw
return f(x, y)
else:
return f(y, x)
z = f(1, True, 'ok', z=1, w=2)
| [
"ming1016@foxmail.com"
] | ming1016@foxmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.