index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
4,200 | bfb52a5ee6d88d63c4ef89dae26bb8cbecb091c6 | # ============================================================================
# Archivo cnn_sisben.py
# autor Johan S. Mendez, Jose D. Mendez
# fecha 27/Agos/2020
# Clasificacion de beneficiarios del nuevo sistema de clasificacion del sisben
# agrupado en 4 grandes grupos de beneficiarios, se utiliza un red neuronal
# simple para el primero modelo de clasificación de salida multiple
# ============================================================================
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from keras.models import Sequential
from keras.layers import Dense, Flatten, Conv1D
# from keras.callbacks import ModelCheckpoint
# from keras.models import model_from_json
# from keras import backend as K
# from keras.utils import to_categorical
from sklearn.model_selection import train_test_split
from numpy.random import seed
seed(1)
from tensorflow import set_random_seed
set_random_seed(2)
print("\033[91m Lectura de datos \033[0m")
#************ Preparing the data ************#
# Read the data
path = "/Users/johan/Documents/GitHub/Cafe/bases_datos/base_maestra_sisben_iv.csv"
df = pd.read_csv(path)
# Eliminate the missing values for dataset
df = df.dropna()
# Choose a subset of the complete data
df_sample = df.sample(n=150000, random_state=123)
# Preparing the data to the model
X = df_sample.drop("sisben_iv", axis=1)
y = df_sample["sisben_iv"]
y = pd.get_dummies(y, dtype=float)
# We separate the categorical and numerical data in diferente variables
X_categorical = X.select_dtypes(include=["int", "object"])
X_categorical = pd.get_dummies(X,
columns=X_categorical.columns,
dtype=float)
x_train, x_test, y_train, y_test = train_test_split(np.asarray(X_categorical),
np.asarray(y),
test_size=0.33,
shuffle=True)
x_train = x_train.reshape(x_train.shape[0], x_train.shape[1], 1)
x_test = x_test.reshape(x_test.shape[0], x_test.shape[1], 1)
model = Sequential()
model.add(Conv1D(64, (32),
input_shape=(x_train.shape[1], 1),
activation='elu'))
model.add(Flatten())
model.add(Dense(32, activation='elu'))
model.add(Dense(4, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer="Adam",
metrics=[tf.keras.metrics.CategoricalAccuracy()],
)
print(model.summary())
batch_size = 128
epochs = 100
model = model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
plt.plot(model.history['loss'])
plt.plot(model.history['val_loss'])
plt.title('model train vs validation loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper right')
plt.show()
plt.plot(model.history['categorical_accuracy'])
plt.plot(model.history['val_categorical_accuracy'])
plt.title('model train vs validation categorical_accuracy')
plt.ylabel('categorical_accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper right')
plt.show()
|
4,201 | 56d4532b633242f34f7a6ed86a35290836861f67 | from binaryninja import *
import yara
def get_yara_rule_path():
return get_open_filename_input("Open YARA rule", "YARA rules (*.yar *.yara)")
def get_markdown_result(matches):
entry_fmt = "| {} | {} | {} |\n"
md_text = """# YARA - Scan results
| Rule Name | Function | Strings offsets |
|-----------|----------|-----------------|
"""
for m in matches:
rule = m['rule']
func = '-'
if 'funcs' in m and len(m['funcs']) > 0:
func = " ".join(['[{:name}](binaryninja://?expr={:name})'.format(name=f.name) for f in m['funcs']])
# 'strings': [(81L, '$a', 'abc'), (141L, '$b', 'def')]
s = " ".join(['["{}"](binaryninja://?expr=0x{:x})'.format(s[2].decode('utf-8'), s[0]) for s in m['strings']])
md_text += entry_fmt.format(rule, func, s)
return md_text
def plugin_search_file(bv):
matches = []
def yara_callback(data):
"""
{
'tags': ['foo', 'bar'],
'matches': True,
'namespace': 'default',
'rule': 'my_rule',
'meta': {},
'strings': [(81L, '$a', 'abc'), (141L, '$b', 'def')]
}
"""
if data['matches']:
funcs = []
for addr, _, _ in data['strings']:
funcs += bv.get_functions_containing(addr)
data['funcs'] = funcs
matches.append(data)
yara.CALLBACK_CONTINUE
yara_path = get_yara_rule_path()
# user closed message prompt
if yara_path is None:
return
try:
rules = yara.compile(filepath=yara_path.decode('utf-8'))
rules.match(bv.file.original_filename, callback=yara_callback)
except Exception as e:
log_error("[YARA] Exception: {}".format(str(e)))
show_message_box("Error", "Check logs for details", icon=MessageBoxIcon.ErrorIcon)
if len(matches) > 0:
bv.show_markdown_report("YARA", get_markdown_result(matches))
else:
log_info("[YARA] No matches")
def plugin_search_functions(bv):
show_message_box("Not implemented", "This feature is not implemented yet")
# TODO implement Background task maybe?
PluginCommand.register("[YARA] Scan file with yara rule...", "Scan file with yara rule", plugin_search_file)
# PluginCommand.register('[YARA] Scan functions with yara rule...', "Scan all functions with yara rules (might be slower)", plugin_search_functions)
|
4,202 | 73e23b3560294ca24428e7dd4cc995b97767335c | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
@project= Life_is_short_you_need_python
@file= judgement
@author= wubingyu
@create_time= 2017/12/21 下午2:58
"""
#a if condition else b
#(falseValue,trueValue)[test]
#(falseValue,trueValue)[test==True]
#(falseValue,trueValue)[bool(<expression>)]
|
4,203 | 995e42312e286d82fa101128795d8aa60c1a6548 | # -*- coding: utf-8 -*-
# Copyright 2019, IBM.
#
# This source code is licensed under the Apache License, Version 2.0 found in
# the LICENSE.txt file in the root directory of this source tree.
# pylint: disable=undefined-loop-variable
"""
Run through RB for different qubit numbers to check that it's working
and that it returns the identity
"""
import unittest
import random
import qiskit
import qiskit.ignis.verification.randomized_benchmarking as rb
class TestRB(unittest.TestCase):
""" The test class """
@staticmethod
def choose_pattern(pattern_type, nq):
'''
Choose a valid field for rb_opts['rb_pattern']
:param pattern_type: a number between 0 and 2.
0 - a list of all qubits, for nq=5 it is
[1, 2, 3, 4, 5]
1 - a list of lists of single qubits, for nq=5
it is [[1], [2], [3], [4], [5]]
2 - randomly choose a pattern which is a list of
two lists, for example for nq=5 it can be
[[4, 1, 2], [5, 3]]
:param nq: number of qubits
:return: the pattern or None
Returns None if the pattern type is not relevant to the
number of qubits, i.e,, one of two cases:
pattern_type = 1 and nq = 1, which implies [[1]]
pattern_type = 2 and nq <= 2: - for nq=1 this is impossible
- for nq=2 this implies
[[1], [2]], which is already
tested when pattern_type = 1
'''
if pattern_type == 0:
res = [list(range(nq))]
elif pattern_type == 1:
if nq == 1:
return None
res = [[x] for x in range(nq)]
else:
if nq <= 2:
return None
shuffled_bits = list(range(nq))
random.shuffle(shuffled_bits)
split_loc = random.randint(1, nq-1)
res = [shuffled_bits[:split_loc], shuffled_bits[split_loc:]]
return res
@staticmethod
def choose_multiplier(mult_opt, len_pattern):
'''
:param multi_opt:
0: fixed length
1: vector of lengths
:param len_pattern: number of patterns
:return: the length multiplier
'''
if mult_opt == 0:
res = 1
else:
res = [i + 1 for i in range(len_pattern)]
return res
def verify_circuit(self, circ, nq, rb_opts, vec_len, result, shots):
'''
For a single sequence, verifies that it meets the requirements:
- Executing it on the ground state ends up in the ground state
- It has the correct number of Cliffords
- It fulfills the pattern, as specified by rb_patterns and
length_multiplier
:param circ: the sequence to check
:param nq: number of qubits
:param rb_opts: the specification that generated the set of sequences
which includes circ
:param vec_len: the expected length vector of circ (one of
rb_opts['length_vector'])
:param result: the output of the simulator
when executing all the sequences on the ground state
:param shots: the number of shots in the simulator execution
'''
if not hasattr(rb_opts['length_multiplier'], "__len__"):
rb_opts['length_multiplier'] = [
rb_opts['length_multiplier'] for i in range(
len(rb_opts['rb_pattern']))]
ops = circ.data
op_index = 0
# for each cycle (the sequence should consist of vec_len cycles)
for _ in range(vec_len):
# for each component of the pattern...
for pat_index in range(len(rb_opts['rb_pattern'])):
# for each Clifford...
for _ in range(rb_opts['length_multiplier'][pat_index]):
# for each basis gate...
while ops[op_index].name != 'barrier':
# Verify that the gate acts on the correct qubits
# This happens if the sequence is composed of the
# correct sub-sequences, as specified by vec_len and
# rb_opts
self.assertTrue(
all(x[1] in rb_opts['rb_pattern'][pat_index]
for x in ops[op_index].qargs),
"Error: operation acts on incorrect qubits")
op_index += 1
# increment because of the barrier gate
op_index += 1
# check if the ground state returns
self.assertEqual(result.
get_counts(circ)['{0:b}'.format(0).zfill(nq)], shots,
"Error: %d qubit RB does not return the \
ground state back to the ground state" % nq)
def test_rb(self):
""" Main function of the test """
# Load simulator
backend = qiskit.Aer.get_backend('qasm_simulator')
# Test up to 2 qubits
nq_list = [1, 2]
for nq in nq_list:
print("Testing %d qubit RB" % nq)
for pattern_type in range(2):
for multiplier_type in range(2):
# See documentation of choose_pattern for the meaning of
# the different pattern types
rb_opts = {}
rb_opts['nseeds'] = 3
rb_opts['length_vector'] = [1, 3, 4, 7]
rb_opts['rb_pattern'] = self.choose_pattern(
pattern_type, nq)
# if the pattern type is not relevant for nq
if rb_opts['rb_pattern'] is None:
continue
rb_opts['length_multiplier'] = self.choose_multiplier(
multiplier_type, len(rb_opts['rb_pattern']))
# Generate the sequences
try:
rb_circs, _ = rb.randomized_benchmarking_seq(**rb_opts)
except OSError:
skip_msg = ('Skipping tests for %s qubits because '
'tables are missing' % str(nq))
print(skip_msg)
continue
# Perform an ideal execution on the generated sequences
# basis_gates = ['u1','u2','u3','cx'] # use U, CX for now
# Shelly: changed format to fit qiskit current version
basis_gates = 'u1, u2, u3, cx'
shots = 100
result = []
for seed in range(rb_opts['nseeds']):
result.append(
qiskit.execute(rb_circs[seed], backend=backend,
basis_gates=basis_gates,
shots=shots).result())
# Verify the generated sequences
for seed in range(rb_opts['nseeds']):
length_vec = rb_opts['length_vector']
for circ_index, vec_len in enumerate(length_vec):
self.assertEqual(
rb_circs[seed][circ_index].name,
'rb_seed_%s_length_%s' % (
str(seed), str(vec_len)),
'Error: incorrect circuit name')
self.verify_circuit(rb_circs[seed][circ_index],
nq, rb_opts,
vec_len, result[seed], shots)
self.assertEqual(circ_index, len(rb_circs),
"Error: additional circuits exist")
def test_rb_utils(self):
""" Test some of the utility calculations, e.g.
coherence limit"""
t1 = 100.
t2 = 100.
gate2Q = 0.5
gate1Q = 0.1
twoq_coherence_err = rb.rb_utils.coherence_limit(2, [t1, t1],
[t2, t2], gate2Q)
oneq_coherence_err = rb.rb_utils.coherence_limit(1, [t1],
[t2], gate1Q)
self.assertAlmostEqual(oneq_coherence_err, 0.00049975, 6,
"Error: 1Q Coherence Limit")
self.assertAlmostEqual(twoq_coherence_err, 0.00597, 5,
"Error: 2Q Coherence Limit")
twoq_epc = rb.rb_utils.twoQ_clifford_error([5.2, 5.2, 1.5],
[0, 1, -1],
[0.001, 0.0015, 0.02])
self.assertAlmostEqual(twoq_epc, 0.0446283, 6,
"Error: 2Q EPC Calculation")
if __name__ == '__main__':
unittest.main()
|
4,204 | 2385882f040ef4bd0a3611bebfbb2ae5b3cd1dc6 | print('Boolean Exercise')
print(False or False)
print(False and False)
print(not True or not False) |
4,205 | e398908ba74306c5a746d7643b38f08651cf92ec | from datetime import datetime
from poop import objstore
class Comment(objstore.Item):
__typename__ = 'comment'
__table__ = 'comment'
relatesToId = objstore.column('relates_to_id')
relatesToVersion = objstore.column('relates_to_version')
posted = objstore.column()
approved = objstore.column()
relatesToCommentId=None
def __init__(self, *a, **k):
relatesTo = k.pop('relatesTo')
authorName = k.pop('authorName')
authorEmail = k.pop('authorEmail')
humanCheck = k.pop('humanCheck')
comment = k.pop('comment')
relatesToCommentId = k.pop('relatesToCommentId')
super(Comment, self).__init__(*a, **k)
if hasattr(relatesTo,'id'):
self.relatesToId, self.relatesToVersion = relatesTo.id, relatesTo.version
else:
self.relatesToId, self.relatesToVersion = relatesTo,1
self.authorName = authorName
self.comment = comment
self.authorEmail = authorEmail
self.posted = datetime.utcnow()
self.approved = False
self.relatesToCommentId = relatesToCommentId
def registerTypes(store):
store.registerType(Comment)
|
4,206 | e78504971c51a98eed60ea8032502b6ce1a11f29 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Given an input with the following format
# x y yerr
# on standard input, print a fit of y = ax+b
# to the data.
import sys, string
from math import sqrt
def cP(X, Yerr):
sum = 0
for i in range(len(X)):
sum = sum + (X[i]*X[i])/(Yerr[i]*Yerr[i])
return sum
def cQ(Yerr):
sum = 0
for i in range(len(Yerr)):
sum = sum + 1/(Yerr[i]*Yerr[i])
return sum
def cR(X, Yerr):
sum = 0
for i in range(len(X)):
sum = sum + X[i]/(Yerr[i]*Yerr[i])
return sum
def cS(X, Y, Yerr):
sum = 0
for i in range(len(X)):
sum = sum + (X[i]*Y[i])/(Yerr[i]*Yerr[i])
return sum
def cT(Y, Yerr):
sum = 0
for i in range(len(Y)):
sum = sum + Y[i]/(Yerr[i]*Yerr[i])
return sum
def stdin2lists():
X = []
Y = []
Yerr = []
while True:
try:
r = raw_input('')
line = string.split(r)
if len(r) == 0 or r[0] == '#':
continue
f = map(lambda x: float(x), line)
X.append(f[0])
Y.append(f[1])
Yerr.append(f[2])
except EOFError:
break
return [X, Y, Yerr]
data = stdin2lists()
P = cP(data[0], data[2])
Q = cQ(data[2])
R = cR(data[0], data[2])
S = cS(data[0], data[1], data[2])
T = cT(data[1], data[2])
a1 = (Q*S - R*T)/(P*Q - R*R)
a0 = (P*T - R*S)/(P*Q - R*R)
ea1 = sqrt(Q/(P*Q - R*R))
ea0 = sqrt(P/(P*Q - R*R))
print "{:e} ± {:e}".format(a1, ea1)
print "{:e} ± {:e}".format(a0, ea0)
|
4,207 | 1555583cd3d8938cbaeeac2d1f74bb9c3858f26d | import tensorflow as tf
def makeMnistModel():
mnist = tf.keras.datasets.mnist
(X_train, y_train), (_,_) = mnist.load_data()
X_train= X_train/255.0
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(28,28)),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(10, activation='softmax')
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(X_train, y_train,epochs=5)
model.save('./mnist_model.h5')
makeMnistModel() |
4,208 | de557c3c1455acc0a3facfca5729a010f3d123dc | # from cis_dna import
import cis_config as conf
import protocol_pb2 as proto
import uuid
import random
import math
import dna_decoding
import numpy as np
def move(cell):
pass
def is_alive(cell):
starvation_threshold = conf.ENERGY_THRESHOLD
if cell.energy_level < starvation_threshold:
return False
else:
return True
def builds_connection_after_division(cell):
return dna_decoding.builds_connection_after_division(
cell.dna, len(cell.connections))
def dna_copy_or_sub_slice(cell):
if dna_decoding.dna_should_sub_slice(cell.dna, len(cell.connections)):
return dna_decoding.dna_sub_slice(cell.dna, len(cell.connections))
return cell.dna
def divide(cell):
initial_energy = int(dna_decoding.initial_energy(cell.dna))
cost = conf.DIVISION_ENERGY_COST + initial_energy
if cell.energy_level > dna_decoding.division_treshold(cell.dna) + cost:
cell.energy_level -= cost
child_id = str(uuid.uuid1())
child_connections = []
if builds_connection_after_division(cell):
child_connections.append(proto.Connection(connected_to=cell.id))
conn = cell.connections.add()
conn.connected_to = child_id
child_dna = dna_decoding.mutate_dna_with_chance(
dna_copy_or_sub_slice(cell),
conf.MUTATION_CHANCE
)
new_cell = proto.Cell(
id=child_id,
energy_level=initial_energy,
pos=randomly_shifted_pos(cell.pos, 10),
vel=proto.Vector(
x=0,
y=0,
z=0),
dna=child_dna,
connections=child_connections)
return new_cell
def randomly_shifted_pos(pos, shift_dist):
d_x, d_y, d_z = random_vector_of_length(shift_dist)
return proto.Vector(
x=pos.x + d_x,
y=pos.y + d_y,
z=pos.z + d_z,
)
def random_vector_of_length(l):
vec = np.random.uniform(1 / 10 * 6, 2, [3]) - 1
dist = np.sqrt(vec.dot(vec))
factor = l / dist
return vec
|
4,209 | d234034f7f232e842d0b4e465ea6ec314af6964d | from scrapera.image.duckduckgo import DuckDuckGoScraper
scraper = DuckDuckGoScraper()
scraper.scrape('spongebob squarepants', 1, r'path/to/output/directory')
|
4,210 | 9150eb53d309e75299775cd9524a688e8dc2ff76 | import xml.etree.ElementTree as ET
from collections import OrderedDict
import json
import threading
class MyThread(threading.Thread):
def __init__(self, filenum):
threading.Thread.__init__(self)
self.filenum = filenum
print('Inicio del thread:', str(self.filenum))
def run(self):
parser = ET.XMLParser(encoding='ISO-8859-1')
parser.entity["agrave"] = 'à'
parser.entity["uuml"] = 'ü'
parser.entity["Eacute"] = 'É'
parser.entity["eacute"] = 'é'
parser.entity["aacute"] = 'á'
parser.entity["iacute"] = 'í'
parser.entity["ouml"] = 'ö'
parser.entity["ccedil"] = 'ç'
parser.entity["egrave"] = 'è'
parser.entity["auml"] = 'ä'
parser.entity["uacute"] = 'ú'
parser.entity["aring"] = 'å'
parser.entity["oacute"] = 'ó'
parser.entity["szlig"] = 'ß'
parser.entity["oslash"] = 'ø'
parser.entity["yacute"] = 'ỳ'
parser.entity["iuml"] = 'ï'
parser.entity["igrave"] = 'í'
parser.entity["ocirc"] = 'ô'
parser.entity["icirc"] = 'î'
parser.entity["Uuml"] = 'Ü'
parser.entity["euml"] = 'ë'
parser.entity["acirc"] = 'â'
parser.entity["atilde"] = 'ã'
parser.entity["Uacute"] = 'Ù'
parser.entity["Aacute"] = 'À'
parser.entity["ntilde"] = 'ñ'
parser.entity["Auml"] = 'Ä'
parser.entity["Oslash"] = 'Ø'
parser.entity["Ccedil"] = 'Ç'
parser.entity["otilde"] = 'õ'
parser.entity["ecirc"] = 'ê'
parser.entity["times"] = '×'
parser.entity["Ouml"] = 'Ö'
parser.entity["reg"] = '®'
parser.entity["Aring"] = 'Å'
parser.entity["Oacute"] = 'Ò'
parser.entity["ograve"] = 'ó'
parser.entity["yuml"] = 'ÿ'
parser.entity["eth"] = 'ð'
parser.entity["aelig"] = 'æ'
parser.entity["AElig"] = 'Æ'
parser.entity["Agrave"] = 'Á'
parser.entity["Iuml"] = 'Ï'
parser.entity["micro"] = 'µ'
parser.entity["Acirc"] = 'Â'
parser.entity["Otilde"] = 'Õ'
parser.entity["Egrave"] = 'É'
parser.entity["ETH"] = 'Ð'
parser.entity["ugrave"] = 'ú'
parser.entity["ucirc"] = 'û'
parser.entity["thorn"] = 'þ'
parser.entity["THORN"] = 'Þ'
parser.entity["Iacute"] = 'Ì'
parser.entity["Icirc"] = 'Î'
parser.entity["Ntilde"] = 'Ñ'
parser.entity["Ecirc"] = 'Ê'
parser.entity["Ocirc"] = 'Ô'
parser.entity["Ograve"] = 'Ó'
parser.entity["Igrave"] = 'Í'
parser.entity["Atilde"] = 'Ã'
parser.entity["Yacute"] = 'Ỳ'
parser.entity["Ucirc"] = 'Û'
parser.entity["Euml"] = 'Ë'
xml_file = '../../../data/dblp.' + str(self.filenum) + '.xml'
e = ET.parse(xml_file, parser=parser).getroot()
tot_docs = len(e)
doc_number = 0
mitad = False
max_mitad = False
complete = False
d = OrderedDict()
docs = ['article', 'inproceedings', 'incollection']
tags = ['author', 'year', 'title']
# Borrado previo del fichero de resultados
with open('../../../data/result' + str(self.filenum) +'.txt', 'w') as out:
out.writelines('')
# Almacenamiento de valores en dicc para volcado posterior a json
for child1 in e:
if ((doc_number / tot_docs > 0.5) & (not mitad)):
print('50% de los documentos procesados en el thread',str(self.filenum))
mitad = True
if ((doc_number / tot_docs > 0.9) & (not max_mitad)):
print('90% de los documentos procesados en el thread',str(self.filenum))
max_mitad = True
if ((doc_number / tot_docs == 1.0) & (not complete)):
print('100% de los documentos procesados en el thread',str(self.filenum))
complete = True
if (child1.tag in docs):
d['Type'] = child1.tag
d['Authors'] = []
for child2 in child1:
if (child2.tag in tags):
if (child2.tag == 'author'):
dicc_aut = dict()
dicc_aut["Nombre"] = child2.text
d['Authors'].append(dicc_aut)
elif child2.tag == "title":
d["Title"] = child2.text
elif child2.tag == "year":
d["Year"] = child2.text
out.writelines(json.dumps(d) + '\n')
doc_number += 1
out.close()
for i in range(7):
MyThread(i).start() |
4,211 | 0934163fc6461e30a73c06e74b3a5e983ed2fa02 | import csv
import json
import re
import itertools
import pandas as pd
import networkx as nx
import matplotlib.pyplot as plt
from networkx.algorithms import community
import snap
import numpy
# setting up data structures to map actor IDs to objects in order to increase run time.
csv.field_size_limit(100000000)
curr_actor_id = 1
all_actors = dict()
all_actors_id_map = dict()
all_actors_frequencies = dict()
edges = set()
weights = dict()
movies = list()
movies_dict = dict()
edges_last_60_20 = set()
comm = list()
PG = nx.Graph()
class Actor:
def __init__(self, name: str, id:int):
self.filmography = set()
self.name = name
self.id = id
def getFilms(self):
return self.filmography
def getName(self):
return self.name
def getId(self):
return self.id
def updateFilms(self, film:int):
self.filmography.add(film)
class Movie:
def __init__(self, id: int):
self.actors = set()
self.name = ""
self.id = id
self.year = 0
def getName(self):
return self.name
def getActors(self):
return self.actors
def getId(self):
return self.id
def getDate(self):
return self.year
def updateActors(self, actor:Actor):
self.actors.add(actor)
def updateActors(self, actors_to_add:set()):
for x in actors_to_add:
self.actors.add(x)
def setDate(self, i: int):
self.year = i
#parsing data from csv and dropping crew column
reader = pd.read_csv('credits.csv', header = 0)
crewless = reader.drop('crew', axis = 1)
cleanup = re.compile('[^a-zA-Z\s]')
#skip the header row
row = crewless.iterrows()
#loop through each row
for x in range(len(reader.index)):
cur_row = next(row)
data = cur_row[1][0]
id = cur_row[1][1]
actors = set()
#create an instance of a Movie for each row
movie = Movie(int(id))
movies.append(movie)
movies_dict[id] = movie
#split the string around each name
split_around_names = data.split('name')
#parse actors, and create an instance of Actor for each actor in each movie
for y in range(1, len(split_around_names)):
#Cleaning up characters and spaces around the actor's name
actorName = str(split_around_names[y].split('order')[0])
actorName = cleanup.sub(' ', actorName)
actorName = actorName.strip()
#Create the Actor and update his/her filmography
if actorName not in all_actors.keys():
a = Actor(actorName, curr_actor_id)
curr_actor_id += 1
a.updateFilms(movie)
actors.add(a)
all_actors[actorName] = a
all_actors_frequencies[a] = 1
all_actors_id_map[curr_actor_id] = a
else:
all_actors[actorName].updateFilms(movie)
all_actors_frequencies[a] += 1
actors.add(all_actors[actorName])
#Update the set of actors per movie
movie.updateActors(actors)
reader = pd.read_csv('movies_metadata.csv', header = 0)
reader.drop(reader.columns.difference(['id', 'release_date']), 1, inplace=True)
row = reader.iterrows()
cleaned_actors = set()
cleaned_movies_1 = set()
cleaned_movies = set()
# adding ids to movies from movie files
for x in range(len(reader.index)):
cur_row = next(row)
id = cur_row[1][0]
date = cur_row[1][1]
id = int(id)
year = date[:4]
year_int = int(year)
if id in movies_dict.keys():
movies_dict[id].setDate(year_int)
cleaned_movies_1.add(movies_dict[id])
def clean(threshold: int):
for actorName in all_actors.keys():
if len(all_actors[actorName].getFilms()) > threshold:
cleaned_actors.add(all_actors[actorName])
else:
for movie in all_actors[actorName].getFilms():
if all_actors[actorName] in movie.getActors():
movie.getActors().remove(all_actors[actorName])
def clean_movies(threshold: int):
for movie in cleaned_movies_1:
if 2017 - movie.getDate() <= threshold:
cleaned_movies.add(movie)
else:
for actor in movie.getActors():
s = actor.getFilms()
s.remove(movie)
def createGraph():
counter = 0
G = nx.Graph()
PG_actors = set()
#fill graph with nodes
for actor in cleaned_actors:
G.add_node(actor.getId())
#generate a list of edges and weights based on frequencie of combination appearances
for movie in cleaned_movies:
actorIds = set()
for actor in movie.getActors():
actorIds.add(actor.getId())
combinations = itertools.combinations(actorIds, 2)
for comb in combinations:
reverse = comb[::-1]
if (comb not in edges) and (reverse not in edges):
counter+=1
if (2017 - movie.getDate() < 60 and 2017 - movie.getDate() > 20):
if (comb not in edges_last_60_20) and (reverse not in edges_last_60_20):
edges_last_60_20.add(comb)
edges.add(comb)
weights[comb] = 1
else:
if comb in edges:
weights[comb] = weights[comb] + 1
elif reverse in edges:
weights[reverse] = weights[reverse] + 1
G.add_edges_from(edges)
for x in edges_last_60_20:
if x[0] not in PG_actors:
PG_actors.add(x[0])
if x[1] not in PG_actors:
PG_actors.add(x[1])
PG.add_nodes_from(PG_actors)
PG.add_edges_from(edges_last_60_20)
return G
def centrality_analysis():
types = [nx.eigenvector_centrality, nx.harmonic_centrality, nx.degree_centrality]
for x in types:
# based upon cleaning values chosen, choose a directory to store results to.
file = open('./centrality/40_10/centrality_results_'+x.__name__+'.txt', 'w')
nodes = x(graph)
top_10 = list()
top_10_ids = list()
sorted_values = list(nodes.values())
sorted_values.sort()
sorted_values.reverse()
top_10 = sorted_values[0]
# print(sorted_values)
# for y in top_10:
for x in nodes.keys():
if nodes[x] == top_10:
top_10_ids.append(x)
file.write(str(len(top_10_ids)) + '\n')
for x in top_10_ids:
for y in cleaned_actors:
if x == y.getId():
print(y.getName())
#file.write(y.getName() + '\n')
file.close()
def community_analysis():
f = open('./community/communities_outputs.txt', 'w')
communities_generator = nx.community.girvan_newman(graph)
communities = next(communities_generator)
size = len(communities)
while size < 10:
print(communities)
communities = next(communities_generator)
size = len(communities)
f.write('community iteration: size = {}, {} \n'.format(size, communities))
def link_pred():
splPG = dict(nx.all_pairs_shortest_path_length(PG, cutoff=2))
friends_PG = list()
for x in splPG.keys():
for y in splPG[x].keys():
if splPG[x][y] == 2:
l = list()
l.append(x)
l.append(y)
friends_PG.append(l)
predictions = nx.jaccard_coefficient(PG, friends_PG)
results = list()
for x in predictions:
results.append(x)
results.sort(key=lambda x: x[2])
results.reverse()
k_vals = [10,20,50,100]
for k in k_vals:
f = open('./link_pred/link_prediction_values_jaccard' + str(k) + '.txt', 'w')
count = 0
while (count < k):
print('({}, {}),jaccard: {}'.format(all_actors_id_map[results[count][0]].getName(), all_actors_id_map[results[count][1]].getName(), results[count][2]))
f.write('({}, {}),jaccard: {}\n'.format(all_actors_id_map[results[count][0]].getName(),all_actors_id_map[results[count][1]].getName(),results[count][2]))
count+=1
top_k = list()
precision_at_k = 0
for x in range(k):
top_k.append(results[x])
count = 0
for val in top_k:
tup = (val[0], val[1])
if tup in edges:
count += 1
precision_at_k = count / k
print('precision @ K{}: {}\n'.format(k, precision_at_k))
f.write('precision @ K{}: {}'.format(k, precision_at_k))
f.close()
#Convert community results from IDs to Actor name
def convert_id_actor():
file = open('./community_/communities_outputs.txt')
for row in file:
items = row.split(', ')
i = 0
while i < len(items):
items[i].strip('\n')
items[i] = int(items[i])
i+=1
i = 0
this_row = list()
i= 0
while i < len(items):
this_row.append(items[i])
i+=1
comm.append(this_row)
file.close()
file = open('./actorname_communities.txt', 'w')
for x in range(len(comm)):
for y in range(len(comm[x])):
try:
comm[x][y] = all_actors_id_map[comm[x][y]].getName()
except:
comm[x][y] = 'None'
comm.reverse()
for x in range(len(comm)):
print("Community #{}: {}".format(x, comm[x]))
file.write("Community #{}: {}\n".format(x, comm[x]))
file.flush()
file.close()
clean_movies(60)
clean(30)
graph = createGraph()
print(nx.info(graph))
print(nx.info(PG))
# To perform the analysis, uncomment the respective function(s); additionally, uncomment #convert_id_actor() for community_analysis.
# centrality_analysis()
# community_analysis()
# convert_id_actor()
# link_pred()
|
4,212 | cb4ca5f91c7cd47197784085258536166055afe9 | # first we have to label the Banana / Apple / Tomato in the images
# we will use lables me
# pip install pyqt5
# pip install labelme
# after labeling the images. lets test it.
#Each image has a json file
import pixellib
from pixellib.custom_train import instance_custom_training
train_maskRcnn = instance_custom_training()
# num_classes=3 since we have 3 classes : Banana , Apple , Tomato
train_maskRcnn.modelConfig(network_backbone="resnet101",num_classes=3, batch_size=1)
#https://github.com/matterport/Mask_RCNN/releases
# you can download here the 2.0 version for the model
train_maskRcnn.load_pretrained_model("c:/models/mask_rcnn_coco.h5")
train_maskRcnn.load_dataset("Object-Detection/Pixellib/customModel")
# The model directory has several files in this format : mask_rcnn_model.*
# It is saved with the epoch number
# we would like to evaluate each model and find the best one
# lets test a specific model :
#train_maskRcnn.evaluate_model("c:/models/mask_rcnn_model.051-0.252276.h5")
# The evaluation for this epoch is : 0.636364
# we would like to evaluate all the models.
# since the direcroty is not empty , I will just copy all the models to a new directory .
# lets test the result of all models
train_maskRcnn.evaluate_model("c:/models/eval")
# These are the results :
# c:/models/eval\mask_rcnn_model.001-1.361029.h5 evaluation using iou_threshold 0.5 is 0.000000
# c:/models/eval\mask_rcnn_model.002-0.597196.h5 evaluation using iou_threshold 0.5 is 0.000000
# c:/models/eval\mask_rcnn_model.004-0.463875.h5 evaluation using iou_threshold 0.5 is 0.272727
# c:/models/eval\mask_rcnn_model.006-0.376810.h5 evaluation using iou_threshold 0.5 is 0.272727
# c:/models/eval\mask_rcnn_model.008-0.342451.h5 evaluation using iou_threshold 0.5 is 0.363636
# c:/models/eval\mask_rcnn_model.010-0.301472.h5 evaluation using iou_threshold 0.5 is 0.454545
# c:/models/eval\mask_rcnn_model.015-0.267621.h5 evaluation using iou_threshold 0.5 is 0.590909
# # this is the best model - since it has the high evaluate number : 0.636
# c:/models/eval\mask_rcnn_model.051-0.252276.h5 evaluation using iou_threshold 0.5 is 0.636364
# mask_rcnn_model.051-0.252276.h5 #
|
4,213 | a6c07146f1cbc766cd464dab620d1fb075759c12 | n=int(input("Enter any int number:\n"))
x=1
while(x<13):
print(n ," x ", x ," = ", n*x)
x=x+1
|
4,214 | 52064b518ad067c9906e7de8542d9a399076a0b5 | # 1.- Crear una grafica que muestre la desviacion tipica de los datos cada dia para todos los pacientes
# 2.- Crear una grafica que muestre a la vez la inflamacion maxima, media y minima para cada dia
import numpy as np
data = np.loadtxt(fname='inflammation-01.csv', delimiter=',')
import matplotlib.pyplot as plt
plt.plot(data.std(axis=0)) # Desviacion tipica por dia
plt.show()
plt.plot(data.max(axis=0)) # Inflamacion maxima, media y minima para cada dia
plt.plot(data.mean(axis=0))
plt.plot(data.min(axis=0))
|
4,215 | 535ee547475fbc2e1c0ee59e3e300beda1489d47 | import pickle
import time
DECAY = 0.95
DEPTH = 2
def init_cache(g):
'''
Initialize simrank cache for graph g
'''
g.cache = {}
def return_and_cache(g, element, val):
'''
Code (and function name) is pretty self explainatory here
'''
g.cache[element] = val
return val
def simrank_impl(g, node1, node2, t, is_weighted):
'''
Weighted simrank implementation
'''
#print "%d %d %d"%(node1, node2, t)
if node1 == node2:
return 1
if t == 0:
return 0
if (node1, node2, t) in g.cache.keys():
return g.cache[(node1, node2, t)]
#if (node2 not in pickle.load(open("neighbourhood/%s"%node1, 'rb'))):
#if (node2 not in g.authors[node1].neighbours):
# return return_and_cache(g, (node1, node2, t), 0)
neighbours1 = g.authors[node1].edges
neighbours2 = g.authors[node2].edges
if is_weighted:
neighbours_mult = [(neighbours1[i]*neighbours2[j], i, j) for i in neighbours1.keys() for j in neighbours2.keys()]
else:
neighbours_mult = [(1, i, j) for i in neighbours1.keys() for j in neighbours2.keys()]
simrank_sum = sum([mult*simrank_impl(g, i, j, t-1, is_weighted) for (mult, i, j) in neighbours_mult])
normalize = sum([mult for (mult, i, j) in neighbours_mult])
return return_and_cache(g, (node1, node2, t), (DECAY/normalize)*simrank_sum)
def simrank(g, node1, node2, depth=DEPTH):
'''
NON-weighted variant
'''
init_cache(g)
start = time.time()
res = simrank_impl(g, node1, node2, depth, False)
end = time.time()
print "simrank took %f seconds"%(end-start)
return res
def wsimrank(g, node1, node2, depth=DEPTH):
'''
weighted variant
'''
init_cache(g)
start = time.time()
res = simrank_impl(g, node1, node2, depth, True)
end = time.time()
print "weighted simrank took %f seconds"%(end-start)
return res
def read_neighbours(g):
'''
Read neighbours of all nodes from disk into memory.
Neighbours are assumed to be stored under the "neighbours" directory.
'''
i = 0
for auth_id, auth in g.authors.iteritems():
auth.neighbours = pickle.load(open("neighbourhood/%s"%auth_id, 'rb'))
if (i % 500) == 0:
print "reading neighbours, iteration %d out of %d"%(i, len(g.authors))
i += 1
def experiment_phaze1():
print "loading graph"
g = pickle.load(open("processed_graph.pickle"))
read_neighbours(g)
g.authors[828114].edges[14607] = 4
g.authors[14607].edges[828114] = 4
return g
def experiment_phaze2(g, file_name):
area = map(int, map(str.strip, open(file_name).readlines()))
main_auth = area[0]
area = area[1:]
results = [["name", "simrank", "weighted simrank"]]
for aid in area:
print "computing author %d %s:"%(aid, g.authors[aid].name)
ws = str(wsimrank(g, main_auth, aid, 3))
print "wsimrank: " + ws
s = str(simrank(g, main_auth, aid, 3))
print "simrank: " + s
results.append([g.authors[aid].name, s, ws])
wr = csv.writer(open(file_name + ".csv", 'w'))
wr.writerows(results)
|
4,216 | fdc8f9ff9a0e2cd8ad1990948036d9e420fdc074 | text = "I love Python Programming"
for word in text.split():
print(word) |
4,217 | d79e65b7aa09066230dec1a472f4535dff4123b5 | from reportlab.lib.pagesizes import letter
from reportlab.platypus import SimpleDocTemplate, Paragraph
from reportlab.lib.styles import getSampleStyleSheet
def paragraph_spacing():
doc = SimpleDocTemplate("paragraph_spacing.pdf", pagesize=letter)
styles = getSampleStyleSheet()
#Mengahasilkan spasi antar paragraf sehinga tidak diperlukan <br/>
styles["Normal"].spaceBefore = 10
styles["Normal"].spaceAfter = 10
flowables = []
text = """
This <b>text</b> is important,
not <strong>strong</strong>.
"""
para = Paragraph(text, style=styles["Normal"])
flowables.append(para)
text = """
This <b>text</b> is important,
not <strong>strong</strong>.
"""
para = Paragraph(text, style=styles["Normal"])
flowables.append(para)
doc.build(flowables)
if __name__ == "__main__":
paragraph_spacing()
|
4,218 | 7f4a5779564efde7eaf08741d00254dd4aa37569 | # coding=utf-8
import pytest
from twitter_tunes.scripts import redis_data
from mock import patch
REDIS_PARSE = [
(b"{'trend3': 'url3', 'trend2': 'url2', 'trend1': 'url1'}",
{'trend1': 'url1', 'trend2': 'url2', 'trend3': 'url3'}),
(b"{}", {}),
(b"{'hello':'its me'}", {'hello': 'its me'}),
(b"{'trends': ['trend1', 'trend2', 'trend3']}",
{'trends': ['trend1', 'trend2', 'trend3']}),
(b"{'bob': []}",
{'bob': []}),
(b"{'hello': [u'its me']}", {'hello': ['its me']}),
]
GOOD_REDIS_RETURN = b"{'trend3': 'url3', 'trend2': 'url2', 'trend1': 'url1'}"
TWITTER_TRENDS = ["D'Angelo Russell",
'#ThenItAllWentHorriblyWrong',
'#SELFIEFORSEB',
'#ILikeWhatYouHave',
'#DolanTwinsNewVideo',
'#ManateePickUpLines',
'Wendy Bell',
'Brannen Greene',
'Jon Lester',
'Alison Rapp']
PARSE_LIST = [
(["D'Angelo Russell"], ['D Angelo Russell']),
(["B'O'B"], ['B O B']),
(["D''Angelo Russell"], ['D Angelo Russell']),
(["''"], [' ']),
(["D'Angelo Russ'ell"], ['D Angelo Russ ell']),
]
@pytest.mark.parametrize('data, parsed', REDIS_PARSE)
def test_parse_redis_data(data, parsed):
"""Test to see if data dict in bytes is parsed."""
assert redis_data.parse_redis_data(data) == parsed
def test_parse_redis_data_error():
"""Test to see if parse redis raises value error if bad input."""
with pytest.raises(ValueError):
redis_data.parse_redis_data(b"this is some data")
@patch('redis.from_url')
def test_get_redis_data_good_redis_key(from_url):
"""Test to see if get redis data returns data dictionary."""
mock_method = from_url().get
mock_method.return_value = GOOD_REDIS_RETURN
assert redis_data.get_redis_data('trends') == {'trend1': 'url1',
'trend2': 'url2',
'trend3': 'url3'}
@patch('redis.from_url')
def test_get_redis_data_bad_redis_key(from_url):
"""Test to see if get redis data returns data dictionary."""
mock_method = from_url().get
mock_method.return_value = None
assert redis_data.get_redis_data('bad') == {}
@patch('redis.from_url')
def test_set_redis_data(from_url):
"""Test to see if set redis data is called."""
mock_method = from_url().set
redis_data.set_redis_data('trends', 'val')
assert mock_method.call_count == 1
@patch('redis.from_url')
def test_set_redis_data_empty(from_url):
"""Test to see if set redis data is called with empty data."""
mock_method = from_url().set
redis_data.set_redis_data('trends', {})
assert mock_method.call_count == 1
def test_set_redis_no_val():
"""Test if set data fails with no arguments."""
with pytest.raises(TypeError):
redis_data.set_redis_data('key')
@pytest.mark.parametrize('data, result', PARSE_LIST)
def test_parse_redis_twiter_trends(data, result):
"""Test trend parser to remove apostrophes from trends."""
assert redis_data.redis_parse_twitter_trends(data) == result
@patch('redis.from_url')
def test_redis_set_trends(from_url):
"""Test the redis main function."""
mock_method = from_url().set
redis_data.set_redis_trend_list(TWITTER_TRENDS)
assert mock_method.call_count == 1
|
4,219 | 034d4027ea98bca656178b66c5c6e6e8b13e4b9e | import cv2 as cv
def nothing(x):
pass
cv.namedWindow('Binary')
cv.createTrackbar('threshold', 'Binary', 0, 255, nothing)
cv.setTrackbarPos('threshold', 'Binary', 127)
img_color = cv.imread('../sample/ball.png', cv.IMREAD_COLOR)
img_gray = cv.cvtColor(img_color, cv.COLOR_BGR2GRAY)
while(True):
thre = cv.getTrackbarPos('threshold', 'Binary')
# THRESH_BINARY_INV : 이진화 결과를 반전 시킴
ret, img_binary = cv.threshold(img_gray, thre, 255, cv.THRESH_BINARY_INV)
img_result = cv.bitwise_and(img_color, img_color, mask=img_binary)
cv.imshow('Result', img_result)
cv.imshow('Binary', img_binary)
if cv.waitKey(1) == 27:
break
cv.destroyAllWindows() |
4,220 | 7ea608b73f592cffc7723b4319cf1a87b3e9b443 | import math
z = 1j
cosinus_real = math.cos(z.real)
cosinus_imaginary = math.cos(z.imag)
sinus_real = math.sin(z.real)
sinus_imag = math.sin(z.imag)
print (cosinus_real)
print (cosinus_imaginary)
print (sinus_real)
print (sinus_imag)
|
4,221 | e9bf5a40360d35f32bd2ad5aa404225f49895a14 | # Generated by Django 4.0.5 on 2023-02-14 18:57
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0020_festival_boxoffice_close_festival_boxoffice_open'),
]
operations = [
migrations.AlterModelOptions(
name='user',
options={},
),
]
|
4,222 | d2f760b821fc5c599cda1091334364e18234ab06 | import PIL
from matplotlib import pyplot as plt
import matplotlib
from keras.preprocessing.image import ImageDataGenerator
from keras.models import load_model
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.optimizers import RMSprop
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.callbacks import EarlyStopping
from keras import backend as K
import keras
from time import time
from sklearn.metrics import classification_report, confusion_matrix
import numpy as np
#model = load_model("./Modelo_C32K44_C128k44_d075_D256_d05_D5.h5")
#model = load_model("./Modelo_C32k55_C64k55_d025_D128_d05_D5.h5")
model = load_model("./Modelo_C64k33_C128k33_d025_D256_d05_D5.h5")
model.fit
batch_size = 20
epochs = 100
train_data_dir = 'D:/Universidad/OneDrive - Universidad de Las Palmas de Gran Canaria/TERCERO/Fundamentos de los Sistemas Inteligentes/RedesNeuronales/cars_colours/train'
validation_data_dir = 'D:/Universidad/OneDrive - Universidad de Las Palmas de Gran Canaria/TERCERO/Fundamentos de los Sistemas Inteligentes/RedesNeuronales/cars_colours/test'
train_datagen = ImageDataGenerator(
rescale=1./255,
rotation_range=15,
zoom_range=0.1
)
validation_datagen = ImageDataGenerator(
rescale=1./255
)
validation_datagen = ImageDataGenerator(
rescale=1./255
)
validation_generator = validation_datagen.flow_from_directory(
validation_data_dir,
target_size=(250, 150),
batch_size=batch_size,
class_mode='categorical')
train_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=(250, 150),
batch_size=batch_size,
class_mode='categorical')
validation_generator = validation_datagen.flow_from_directory(
validation_data_dir,
target_size=(250, 150),
batch_size=batch_size,
class_mode='categorical')
history = model.fit_generator(
train_generator,
epochs=epochs,
validation_data = validation_generator,
#callbacks = [es]
)
model.save("./T_100_Modelo_C64k33_C128k33_d025_D256_d05_D5.h5")
plt.plot(history.history['accuracy'], label='accuracy')
plt.plot(history.history['val_accuracy'], label='validation accuracy')
plt.plot(history.history['val_loss'], label='val_loss')
plt.plot(history.history['loss'], label='loss')
plt.title('Accuracy y Loss Clasificando coches por color')
plt.xlabel('Épocas')
plt.legend(loc="lower right")
plt.show() |
4,223 | 80f681eb99d1e3f64cacd23ce0a4b10a74a79fe8 | """
给定两个非空链表来代表两个非负整数,位数按照逆序方式存储,它们的每个节点只存储单个数字。将这两数相加会返回一个新的链表。
你可以假设除了数字 0 之外,这两个数字都不会以零开头。
输入:(2 -> 4 -> 3) + (5 -> 6 -> 4)
输出:7 -> 0 -> 8
原因:342 + 465 = 807
"""
"""
解题思路:
先计算两个节点的值和与进位的和
然后将值对10取余存放到新的链表中
循环下去
直到l1 l2 进位都不存在
"""
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def addTwoNumbers(self, l1, l2):
"""
:type l1: ListNode
:type l2: ListNode
:rtype: ListNode
"""
ret = ListNode(0)
cur = ret
add = 0
while l1 or l2 or add:
val = (l1.val if l1 else 0) + (l2.val if l2 else 0) + add
add = val // 10
cur.next = ListNode(val % 10)
cur = cur.next
l1 = l1.next if l1.next else None
l2 = l2.next if l2.next else None
return ret.next
|
4,224 | 9847a9cd360649819f51abfe584fb51a81306f68 | subworkflow data:
workdir:
"../../data/SlideSeq/Puck_180819_10"
include: "../Snakefile"
|
4,225 | 8a37299154aded37147e1650cbf52a5cdf7d91da | from adventurelib import *
from horror import *
from dating import *
from popquiz import*
from comedy import*
from island import *
start()
|
4,226 | 818623621b609d67f8f657be4ade6e3bb86a0bc5 | """
Package for django_static_template.
"""
|
4,227 | ba9d7b877eda3f7469db58e2ee194b601e3c3e08 | """Support for Bond covers."""
import asyncio
import logging
from typing import Any, Callable, Dict, List, Optional
from bond import BOND_DEVICE_TYPE_MOTORIZED_SHADES, Bond
from homeassistant.components.cover import DEVICE_CLASS_SHADE, CoverEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ATTR_NAME
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity import Entity
from .const import DOMAIN
from .utils import BondDevice, get_bond_devices
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(
hass: HomeAssistant,
entry: ConfigEntry,
async_add_entities: Callable[[List[Entity]], None],
) -> None:
"""Set up Bond cover devices."""
bond: Bond = hass.data[DOMAIN][entry.entry_id]
async def discover():
devices = await get_bond_devices(hass, bond)
covers = [
BondCover(bond, device)
for device in devices
if device.type == BOND_DEVICE_TYPE_MOTORIZED_SHADES
]
async_add_entities(covers)
asyncio.create_task(discover())
class BondCover(CoverEntity):
"""Representation of a Bond cover."""
def __init__(self, bond: Bond, device: BondDevice):
"""Create HA entity representing Bond cover."""
self._bond = bond
self._device = device
@property
def device_class(self) -> Optional[str]:
"""Get device class."""
return DEVICE_CLASS_SHADE
@property
def unique_id(self) -> Optional[str]:
"""Get unique ID for the entity."""
return self._device.device_id
@property
def name(self) -> Optional[str]:
"""Get entity name."""
return self._device.name
@property
def device_info(self) -> Optional[Dict[str, Any]]:
"""Get a an HA device representing this cover."""
return {ATTR_NAME: self.name, "identifiers": {(DOMAIN, self._device.device_id)}}
@property
def is_closed(self):
"""Return if the cover is closed or not."""
return None
def open_cover(self, **kwargs: Any) -> None:
"""Open the cover."""
self._bond.open(self._device.device_id)
def close_cover(self, **kwargs: Any) -> None:
"""Close cover."""
self._bond.close(self._device.device_id)
def stop_cover(self, **kwargs):
"""Hold cover."""
self._bond.hold(self._device.device_id)
|
4,228 | 150e0180567b74dfcd92a6cd95cf6c6bf36f6b5d | # [SIG Python Task 1]
"""
Tasks to performs:
a) Print 'Hello, World! From SIG Python - <your name>' to the screen
b) Calculate Volume of a Sphere
c) Create a customised email template for all students,
informing them about a workshop.
PS: This is called a docstring... and it will not be interepreted
So leave these instructions here.. no problems.
The first line having '#' is called a comment. It will be ignored too.
You can write your own comments and docstrings to make your code clear
and documented.
"""
# a) Print 'Hello, World! From SIG Python - <your name>' to the screen
# Write you code here
print("hello, World! From SIG Python - Gaurangi Rawat")
# b) Calculate Volume of a Sphere
# Take the radius as input from the user and use the math module
r=int(input('enter radius of the sphere'))
volume=4/3*3.14*r*r*r
print("volume=",volume,)
#c) Create a customised email template for all students, informing them about a workshop that the student applied for earlier.
# Task the student's name, workshop, Time, Date, writer's name, organization's name as input from the program's user
student_name=input('enter student name:')
workshop=input('workshop:')
Time=input('time:')
Date=input('date:')
writer_name=input("writer's name:")
organization_name=input('organization name:')
# Use the given string as your template [This is a multi-line string]
email_msg = f"""Dear {student_name},
We have received your request to register for the Workshop event and the
workshop you applied for {workshop} has been scheduled at {Time} on {Date }.
We will be seeing you there! Thanks for participating.
Regards,
{writer_name}
{organization_name}"""
# Format using .format or f-strings
print(email_msg)
# NOTE: Make sure to print few empty lines after each task
|
4,229 | 6ecbe119c8a14776373d165dc05e81f91084893c | from SMP.motion_planner.node import PriorityNode
import numpy as np
from heapq import nsmallest
import sys
from SMP.motion_planner.plot_config import DefaultPlotConfig
from SMP.motion_planner.search_algorithms.best_first_search import GreedyBestFirstSearch
# imports for route planner:
class StudentMotionPlanner(GreedyBestFirstSearch):
"""
Motion planner implementation by students.
Note that you may inherit from any given motion planner as you wish, or come up with your own planner.
Here as an example, the planner is inherited from the GreedyBestFirstSearch planner.
"""
def __init__(self, scenario, planningProblem, automata, plot_config=DefaultPlotConfig):
super().__init__(scenario=scenario, planningProblem=planningProblem, automaton=automata,
plot_config=plot_config)
def evaluation_function(self, node_current: PriorityNode) -> float:
########################################################################
# todo: Implement your own evaluation function here. #
########################################################################
# Copied from greedy best first search:
"""
Evaluation function of GBFS is f(n) = h(n)
"""
node_current.priority = self.heuristic_function(node_current=node_current)
return node_current.priority
def heuristic_function(self, node_current: PriorityNode) -> float:
########################################################################
# todo: Implement your own heuristic cost calculation here. #
# Hint: #
# Use the State of the current node and the information from the #
# planning problem, as well as from the scenario. #
# Some helper functions for your convenience can be found in #
# ./search_algorithms/base_class.py #
########################################################################
"""
Function that evaluates the heuristic cost h(n) in student class.
Created by Mohamed A. Abdellaoui 10.01.2021
"""
output_logs = False
if output_logs:
print("##################")
print("current time step: ", node_current.list_paths[-1][-1].time_step)
print("current problem mode", self.planningProblemType)
print("depth tree: ", node_current.depth_tree)
currentorient = node_current.list_paths[-1][-1].orientation
currentpos = node_current.list_paths[-1][-1].position
currenttimestep = node_current.list_paths[-1][-1].time_step
currentVel = node_current.list_paths[-1][-1].velocity
# Test if reached goal:
if self.reached_goal(node_current.list_paths[-1]):
return 0.0
# Test if route planner failed to find a path:
if self.routeplannerresult is None:
return np.inf
############ Detect cars in front:
# calc cost based on distance to gool following the refrence path:
# loop through all obstacles at time step x and find if any is close of current pos:
if not self.disableObstAvoidance:
for obst in self.list_obstacles:
obstPos = obst.state_at_time(currenttimestep)
if currentorient is not None and obstPos is not None:
disttoobst = self.euclidean_distance(currentpos, obstPos.position)
lookaheadVar = 1.375 * currentVel + 2.5
if disttoobst <= lookaheadVar:
# calc orientation diff between car and obstacle:
vectorToObst = np.array([currentpos, obstPos.position])
vectorToObstOrient = self.calc_angle_of_position(vectorToObst, currentpos)
orientdiff = self.calc_orientation_diff(currentorient, vectorToObstOrient)
if abs(orientdiff) <= 0.261799:
if not 'velocity' in obstPos.attributes:
continue
if node_current.list_paths[-1][-1].velocity > obstPos.velocity and obstPos.velocity != 0:
return np.inf
# get index of closest object to the ego vehicle:
index_smallest_dist = self.get_index_nearest_obst_infront(node_current)
# use the index to locate vehicle to calc cost:
if index_smallest_dist != -1:
# found the index of vehicle with smallest distance to ego car:
obst = self.list_obstacles[index_smallest_dist]
obstPos = obst.state_at_time(currenttimestep)
if obstPos is not None and 'velocity' in obstPos.attributes:
if obstPos.velocity == 0:
cost = node_current.list_paths[-1][-1].velocity
return cost
if node_current.list_paths[-1][-1].velocity > obstPos.velocity:
return np.inf
cost = abs(node_current.list_paths[-1][-1].velocity - obstPos.velocity)
return cost
#########################################################
# Decide based on planning problem type how to calculate cost
if self.planningProblemType == 'ModeA':
# Call function for planning problem with desired time, position, speed and orientation
cost = self.cost_for_modeA_problem(node_current, output_logs)
if output_logs:
print("Cost from modeA cost func: ", cost)
if cost < 0:
return 0
return cost
elif self.planningProblemType == 'ModeB':
# Call function for planning problem with desired time, position and velocity:
cost = self.cost_for_modeB_problem(node_current, output_logs)
if output_logs:
print("Cost from modeB cost func: ", cost)
if cost < 0:
return 0
return cost
elif self.planningProblemType == 'ModeC':
# Call function for planning problem with desired time, position and orientation:
cost = self.cost_for_modeC_problem(node_current, output_logs)
if output_logs:
print("Cost from modeB cost func: ", cost)
if cost < 0:
return 0
return cost
elif self.planningProblemType == 'ModeD':
# Call function for planning problem with desired time and position:
cost = self.cost_for_modeD_problem(node_current, output_logs)
if output_logs:
print("Cost from modeB cost func: ", cost)
if cost < 0:
return 0
return cost
elif self.planningProblemType == 'Survival':
# Call function for planning problem with desired time:
cost = self.cost_for_Survival_problem(node_current, output_logs)
if output_logs:
print("Cost from modeB cost func: ", cost)
if cost < 0:
return 0
return cost
def cost_for_modeA_problem(self, node_current, output_logs):
# Function for planning problem with desired time, position, speed and orientation
if self.position_desired is None:
if output_logs:
print("exit Cost function because position desired is None!")
return self.time_desired.start - node_current.list_paths[-1][-1].time_step
else:
velocity = node_current.list_paths[-1][-1].velocity
path_last = node_current.list_paths[-1]
if np.isclose(velocity, 0):
return np.inf
else:
# Calc Variables:
distance = self.calc_euclidean_distance(current_node=node_current)
angleToGoal = self.calc_angle_to_goal(path_last[-1])
orientationToGoalDiff = self.calc_orientation_diff(angleToGoal, path_last[-1].orientation)
orientationToGoalDiffdegree = (abs(orientationToGoalDiff) * 180) / 3.14
desired_orient = (self.orientation_desired.end + self.orientation_desired.start) / 2
desired_velocity = (self.velocity_desired.start + self.velocity_desired.end) / 2
diff_desiredOrient = abs(self.calc_orientation_diff(desired_orient, path_last[-1].orientation))
diff_deiredVelocity = abs(velocity - desired_velocity)
angle_intervall = abs(abs(self.orientation_desired.start) - abs(self.orientation_desired.end))
# Output data for debugging:
if output_logs:
print("Distance to goal of current node is: ", distance)
print("Velocity of current node is: ", velocity)
print("Orientation of current position: ", node_current.list_paths[-1][-1].orientation)
print("Angle to goal of current node is: ", angleToGoal)
print("orientation diff to goal of current node is(deg): ", orientationToGoalDiffdegree)
print("diff desired orient of current node is(deg): ", diff_desiredOrient)
print("diff desired velocity of current node is(deg): ", diff_deiredVelocity)
# test 16.01:
current_orient = path_last[-1].orientation
if distance <= 10:
if current_orient < self.orientation_desired.start or current_orient > self.orientation_desired.end:
return np.inf
if velocity < self.velocity_desired.start or velocity > self.velocity_desired.end:
return np.inf
weight = 10
# if very colse to goal, minimize the diff velocity and diff orient
cost = (distance / velocity) + weight* diff_deiredVelocity + weight* diff_desiredOrient
#cost = distance + diff_desiredOrient + diff_deiredVelocity
return cost
def cost_for_modeB_problem(self, node_current, output_logs):
# Function for planning problem with desired time, position, speed
if self.position_desired is None:
if output_logs:
print("exit Cost function because position desired is None!")
return self.time_desired.start - node_current.list_paths[-1][-1].time_step
else:
velocity = node_current.list_paths[-1][-1].velocity
path_last = node_current.list_paths[-1]
if np.isclose(velocity, 0):
return np.inf
else:
# Calc Variables:
distance = self.calc_distance_to_goal_from_point(node_current.list_paths[-1][-1])
angleToGoal = self.calc_angle_to_goal(path_last[-1])
orientationToGoalDiff = self.calc_orientation_diff(angleToGoal, path_last[-1].orientation)
orientationToGoalDiffdegree = (abs(orientationToGoalDiff)*180)/3.14
desired_velocity = (self.velocity_desired.start + self.velocity_desired.end)/2
diff_deiredVelocity = abs(velocity - desired_velocity)
self.test_if_in_goal_lanelet(node_current)
# Output data for debugging:
if output_logs:
print("Distance to goal of current node is: ", distance)
print("Velocity of current node is: ", velocity)
print("Orientation of current position: ",node_current.list_paths[-1][-1].orientation)
print("Angle to goal of current node is: ", angleToGoal)
print("orientation diff to goal of current node is(deg): ", orientationToGoalDiffdegree)
print("diff desired velocity of current node is(deg): ", diff_deiredVelocity)
# If very close to target but time is still not reached:
#if distance <= 0.1 and node_current.list_paths[-1][-1].time_step < self.time_desired.start:
# return self.time_desired.start - node_current.list_paths[-1][-1].time_step * 0.01
if self.planningProblem.goal.is_reached_only_pos(node_current.list_paths[-1][-1]):
cost = (self.time_desired.start - node_current.list_paths[-1][-1].time_step) * 0.01
cost = cost + diff_deiredVelocity + velocity *0.01
return cost
cost = ( distance / velocity ) + 2 * diff_deiredVelocity + velocity*0.01
return cost
def cost_for_modeC_problem(self, node_current, output_logs):
# Function for planning problem with desired time, position, speed and orientation
if self.position_desired is None:
if output_logs:
print("exit Cost function because position desired is None!")
return self.time_desired.start - node_current.list_paths[-1][-1].time_step
else:
velocity = node_current.list_paths[-1][-1].velocity
path_last = node_current.list_paths[-1]
if np.isclose(velocity, 0):
return np.inf
else:
# Calc Variables:
distance = self.calc_euclidean_distance(current_node=node_current)
angleToGoal = self.calc_angle_to_goal(path_last[-1])
orientationToGoalDiff = self.calc_orientation_diff(angleToGoal, path_last[-1].orientation)
orientationToGoalDiffdegree = (abs(orientationToGoalDiff)*180)/3.14
desired_orient = (self.orientation_desired.end + self.orientation_desired.start) / 2
diff_desiredOrient = self.calc_orientation_diff(desired_orient, path_last[-1].orientation)
angle_intervall = abs(abs(self.orientation_desired.start) - abs(self.orientation_desired.end))
# Calcualte distance between currrent position and reference path:
arry = node_current.list_paths[-1][-1].position
a = np.array([arry[0], arry[1]])
if self.routeplannerresult is not None:
distance_to_refrence = self.calc_distance_to_nearest_point(self.routeplannerresult.reference_path,
a)
else:
distance_to_refrence = 0
# Output data for debugging:
if output_logs:
print("distance to reference path: ", distance_to_refrence)
print("Distance to goal of current node is: ", distance)
print("Velocity of current node is: ", velocity)
print("Orientation of current position: ",node_current.list_paths[-1][-1].orientation)
print("Angle to goal of current node is: ", angleToGoal)
print("orientation diff to goal of current node is(deg): ", orientationToGoalDiffdegree)
print("diff desired orient of current node is(deg): ", diff_desiredOrient)
# If very close to target but time is still not reached:
if distance <= 0.1 and node_current.list_paths[-1][-1].time_step < self.time_desired.start:
return self.time_desired.start - node_current.list_paths[-1][-1].time_step
if self.planningProblem.goal.is_reached_only_pos(node_current.list_paths[-1][-1]):
cost = (self.time_desired.start - node_current.list_paths[-1][-1].time_step) * 0.01
cost = cost + diff_desiredOrient + velocity *0.01
return cost
cost = ( distance / velocity ) + 2 * diff_desiredOrient + velocity*0.01
return cost
def cost_for_modeD_problem(self, node_current, output_logs):
totaltogoal = self.calc_distance_to_goal_from_point(node_current.list_paths[-1][-1])
if self.position_desired is None:
if output_logs:
print("exit Cost function because position desired is None!")
return self.time_desired.start - node_current.list_paths[-1][-1].time_step
else:
if self.planningProblem.goal.is_reached_only_pos(node_current.list_paths[-1][-1]):
return (self.time_desired.start - node_current.list_paths[-1][-1].time_step) *0.01
velocity = node_current.list_paths[-1][-1].velocity
if np.isclose(velocity, 0):
return np.inf
cost = totaltogoal / node_current.list_paths[-1][-1].velocity
return cost
def cost_for_Survival_problem(self, node_current, output_logs):
currentorient = node_current.list_paths[-1][-1].orientation
currentpos = node_current.list_paths[-1][-1].position
currenttimestep = node_current.list_paths[-1][-1].time_step
currentVel = node_current.list_paths[-1][-1].velocity
for obst in self.list_obstacles:
obstPos = obst.state_at_time(currenttimestep)
if currentorient is not None and obstPos is not None:
disttoobst = self.euclidean_distance(currentpos, obstPos.position)
lookaheadVar = 1.375 * currentVel + 2.5
if disttoobst <= lookaheadVar:
# calc orientation diff between car and obstacle:
vectorToObst = np.array([currentpos, obstPos.position])
vectorToObstOrient = self.calc_angle_of_position(vectorToObst, currentpos)
orientdiff = self.calc_orientation_diff(currentorient, vectorToObstOrient)
if abs(orientdiff) <= 0.261799:
if not 'velocity' in obstPos.attributes:
continue
if node_current.list_paths[-1][-1].velocity > obstPos.velocity and obstPos.velocity != 0:
return np.inf
return self.time_desired.start - node_current.list_paths[-1][-1].time_step
def calc_distance_to_ref_from_point(self, state):
#calc distance of points to each point of refrence path:
currentpos = state.position
distances = []
for p in self.refPathParsedPnts:
distances.append(self.euclidean_distance(currentpos, p))
smallest_points = nsmallest(2, distances)
index1 = distances.index(smallest_points[0])
index2 = distances.index(smallest_points[1])
p1 = self.refPathParsedPnts[index1]
p2 = self.refPathParsedPnts[index2]
distance_to_refrence = np.abs(np.cross(p2 - p1, currentpos - p1) / np.linalg.norm(p2 - p1))
return distance_to_refrence
def calc_distance_to_goal_from_point(self, state):
#calc distance of points to each point of refrence path:
currentpos = state.position
distances = []
for p in self.refPathParsedPnts:
distances.append(self.euclidean_distance(currentpos, p))
index_smallest_dist = distances.index(min(distances))
totaltogoal = 0
for p in range(index_smallest_dist, len(self.refPathParsedPnts) - 1):
totaltogoal = totaltogoal + self.euclidean_distance(self.refPathParsedPnts[p],self.refPathParsedPnts[p+1])
return totaltogoal
def get_index_nearest_obst_infront(self,node_current):
# loop through all obstacles at time step x and find if any is close of current pos:
currentorient = node_current.list_paths[-1][-1].orientation
currentpos = node_current.list_paths[-1][-1].position
currenttimestep = node_current.list_paths[-1][-1].time_step
currentVel = node_current.list_paths[-1][-1].velocity
disttoobst = [np.inf] * len(self.list_obstacles)
for i in range(len(self.list_obstacles)):
obst = self.list_obstacles[i]
obstPos = obst.state_at_time(currenttimestep)
if currentorient is not None and obstPos is not None:
dist = self.euclidean_distance(currentpos, obstPos.position)
lookaheadVar = 1.375 * currentVel + 2.5
if dist <= lookaheadVar:
# calc orientation diff between car and obstacle:
vectorToObst = np.array([currentpos, obstPos.position])
vectorToObstOrient = self.calc_angle_of_position(vectorToObst, currentpos)
orientdiff = self.calc_orientation_diff(currentorient, vectorToObstOrient)
if abs(orientdiff) <= 0.261799:
disttoobst[i]= dist
else:
disttoobst[i]= np.inf
else:
disttoobst[i]= np.inf
index_smallest_dist = disttoobst.index(min(disttoobst))
if disttoobst[index_smallest_dist] == np.inf:
index_smallest_dist = -1
return index_smallest_dist
def test_if_in_goal_lanelet(self, node_current):
pos = [node_current.list_paths[-1][-1].position]
currentlanelet = self.scenario.lanelet_network.find_lanelet_by_position(pos)
currentlanelet = currentlanelet[0][0]
#result = self.is_goal_in_lane(currentlanelet)
result = False
if self.planningProblem.goal.lanelets_of_goal_position is not None:
if currentlanelet in self.planningProblem.goal.lanelets_of_goal_position.get(0):
result = True
return result
def cost_for_modeA_problem_old(self, node_current, output_logs):
# Function for planning problem with desired time, position, speed and orientation
if self.position_desired is None:
if output_logs:
print("exit Cost function because position desired is None!")
return self.time_desired.start - node_current.list_paths[-1][-1].time_step
else:
velocity = node_current.list_paths[-1][-1].velocity
path_last = node_current.list_paths[-1]
if np.isclose(velocity, 0):
return np.inf
else:
# Calc Variables:
distance = self.calc_euclidean_distance(current_node=node_current)
angleToGoal = self.calc_angle_to_goal(path_last[-1])
orientationToGoalDiff = self.calc_orientation_diff(angleToGoal, path_last[-1].orientation)
orientationToGoalDiffdegree = (abs(orientationToGoalDiff)*180)/3.14
desired_orient = (self.orientation_desired.end + self.orientation_desired.start) / 2
desired_velocity = (self.velocity_desired.start + self.velocity_desired.end)/2
diff_desiredOrient = self.calc_orientation_diff(desired_orient, path_last[-1].orientation)
diff_deiredVelocity = abs(velocity - desired_velocity)
angle_intervall = abs(abs(self.orientation_desired.start) - abs(self.orientation_desired.end))
# Output data for debugging:
if output_logs:
print("Distance to goal of current node is: ", distance)
print("Velocity of current node is: ", velocity)
print("Orientation of current position: ",node_current.list_paths[-1][-1].orientation)
print("Angle to goal of current node is: ", angleToGoal)
print("orientation diff to goal of current node is(deg): ", orientationToGoalDiffdegree)
print("diff desired orient of current node is(deg): ", diff_desiredOrient)
print("diff desired velocity of current node is(deg): ", diff_deiredVelocity)
# if very colse to goal, minimize the diff velocity and diff orient
if distance <= 1:
desired_vel_weight = 1
desired_orient_weight = 1
cost = desired_vel_weight * diff_deiredVelocity
if angle_intervall < 1 and angle_intervall != 0:
cost = cost + desired_orient_weight * diff_desiredOrient
return cost
# If very close to target but time is still not reached:
if distance <= 0.1 and node_current.list_paths[-1][-1].time_step < self.time_desired.start:
return (self.time_desired.start - node_current.list_paths[-1][-1].time_step) *0.001
# check if goal in in field of view:
if orientationToGoalDiffdegree > 45:
# goal is not in field of view:
# give more weight to speed and follow reference path blindly:
# block to differentiate between large distance to goal and small distance:
if distance >= 10: # too far away from target, just follow the least distance and target lanelet.
velocity_weight = 1
cost = distance / velocity
return cost
if distance < 10 and distance >= 5: # almost close, reduce speed.
return np.inf
if distance < 5: # very close andjust orientation angle..
return np.inf
else:
# goal is in field of view:
# give more weight to distance and speed and orientation goals:
# goal is not in field of view:
# give more weight to speed and follow reference path blindly:
# block to differentiate between large distance to goal and small distance:
if distance >= 10: # too far away from target, just follow the least distance and target lanelet.
velocity_weight = 1
cost = distance / velocity * velocity_weight
return cost
if distance < 10 and distance >= 5: # almost close, reduce speed.
velocity_weight = 0.5
desired_vel_weight = 1
desired_orient_weight = 1
cost = distance / velocity
cost = cost + desired_vel_weight * diff_deiredVelocity
if angle_intervall < 1 and angle_intervall != 0:
cost = cost + desired_orient_weight * diff_desiredOrient
return cost
if distance < 5: # very close andjust orientation angle..
cost = distance / velocity
desired_vel_weight = 3
desired_orient_weight = 3
cost = cost + desired_vel_weight * diff_deiredVelocity
if angle_intervall < 1 and angle_intervall != 0:
cost = cost + desired_orient_weight * diff_desiredOrient
return cost
|
4,230 | 0081ffc2a1de7fb71515fd0070aaebfef806f6ef | def intersection(nums1, nums2):
return list(set(nums1)&set(nums2))
if __name__=="__main__":
print intersection([1, 2, 2, 1],[2, 2]) |
4,231 | bbbdb30ceef920e600c9f46fb968732b077be2d8 | from analizer_pl.abstract.instruction import Instruction
from analizer_pl import grammar
from analizer_pl.statement.expressions import code
from analizer_pl.reports.Nodo import Nodo
class If_Statement(Instruction):
def __init__(self, row, column,expBool, elseif_list,else_,stmts ) -> None:
super().__init__(row, column)
self.expBool = expBool
self.elseif_list=elseif_list
self.else_ = else_
self.stmts = stmts
def execute(self, environment):
self.p_if_sum()
boolCode = self.expBool.execute(environment)
cod3d = boolCode.value
cod3d += "\tif "+str(boolCode.temp)+": goto .etiv"+str(grammar.current_etiq+1)+"\n"
grammar.optimizer_.addIF(str(boolCode.temp),str("etiv"+str(grammar.current_etiq+1)),self.row)
cod3d+="\tgoto .etif"+ str(grammar.current_etiq+2)+"\n"
grammar.optimizer_.addGoto(str("etif"+str(grammar.current_etiq+2)),self.row)
grammar.back_fill.insert_true(grammar.current_etiq+1)
grammar.back_fill.insert_false(grammar.current_etiq+2)
grammar.current_etiq+=2
cod3d += self.p_iev()
codeElseif=""
for stmt in self.stmts:
cod3d +=stmt.execute(environment).value
self.index = grammar.optimizer_.addGoto(str("etiqS"+str(grammar.next_etiq)),self.row)-1
if len(self.elseif_list) > 0:
for elseif in self.elseif_list:
codeElseif += elseif.execute(environment).value
cod3d+=self.p_fef()
cod3d+=codeElseif
if self.else_ != None:
cod3d+=self.else_.execute(environment).value
else:
cod3d+=self.p_write_next_etiq()
cod3d+=self.p_fev()
self.p_if_rest()
return code.C3D(cod3d,"if",self.row,self.column)
def p_if_sum(self):
if grammar.if_stmt !=0:
grammar.back_fill.new_lists()
grammar.if_stmt+=1
def p_if_rest(self):
grammar.if_stmt -=1
def p_iev(self):
return grammar.back_fill.take_out_true_list(self.row)
def p_fev(self):
return grammar.back_fill.take_out_false_list(self.row)
def p_fef(self):
val ="\tgoto .etiqS"+ str(grammar.next_etiq)+"\n"
grammar.optimizer_.addGoto_IF(str("etiqS"+str(grammar.next_etiq)),self.row,self.index)
val +=grammar.back_fill.take_out_true_list(self.row)
return val
def p_write_next_etiq(self):
val="\tlabel .etiqS"+str(grammar.next_etiq)+"\n"
grammar.optimizer_.addLabel(str("etiqS"+str(grammar.next_etiq)),self.row)
grammar.next_etiq+=1
return val
def dot(self):
new = Nodo("IF")
new.addNode(self.expBool.dot())
then = Nodo("THEN")
new.addNode(then)
for s in self.stmts:
then.addNode(s.dot())
for eif in self.elseif_list:
new.addNode(eif.dot())
if self.else_:
new.addNode(self.else_.dot())
return new |
4,232 | 78e3750a1bbe9f2f6680937729c1a810bd29fd4d | #Q7. Write a program to calculate the sum of digits of a given number.
n=int(input("Enter a number:\n"))
sum=0
while(n>0):
r=n%10
sum=sum+r
n=n//10
print("The total sum of digits is:",sum)
|
4,233 | dd3419f42a3b1aafd1d4f5d88189fb3c6bd0c67e | import logging
from pathlib import Path
import numpy as np
import torch
import re
import json
from helpers import init_helper, data_helper, vsumm_helper, bbox_helper
from modules.model_zoo import get_model
logger = logging.getLogger()
def evaluate(model, val_loader, nms_thresh, device):
model.eval()
stats = data_helper.AverageMeter('fscore', 'diversity')
json_file = []
with torch.no_grad():
for test_key, seq, gt, cps, n_frames, nfps, picks, user_summary, name in val_loader:
seq_len = len(seq)
seq_torch = torch.from_numpy(seq).unsqueeze(0).to(device)
pred_cls, pred_bboxes = model.predict(seq_torch)
pred_bboxes = np.clip(pred_bboxes, 0, seq_len).round().astype(np.int32)
pred_cls, pred_bboxes = bbox_helper.nms(pred_cls, pred_bboxes, nms_thresh)
pred_summ, score = vsumm_helper.bbox2summary(
seq_len, pred_cls, pred_bboxes, cps, n_frames, nfps, picks)
eval_metric = 'avg' if 'tvsum' in test_key else 'max'
fscore = vsumm_helper.get_summ_f1score(
pred_summ, user_summary, eval_metric)
pred_arr, pred_seg = convert_array(pred_summ, nfps)
pred_summ = vsumm_helper.downsample_summ(pred_summ)
json_file.append({"video":str(name), "gt": convert_array_2(gt),
"pred_score": convert_array_2(score),
"user_anno":convert_user(user_summary),
"fscore": float(fscore),
"pred_sum": convert_array_2(pred_summ)})
diversity = vsumm_helper.get_summ_diversity(pred_summ, seq)
stats.update(fscore=fscore, diversity=diversity)
return stats.fscore, stats.diversity, json_file
def convert_user(arr):
res = []
for i in arr:
temp = []
for a in i:
temp.append(a.item())
res.append(temp)
return res
def convert_array_2(arr):
res = []
for i in arr:
res.append(i.item())
return res
def convert_array(user, nfps):
user_arr = []
shots_arr = []
for b in user:
user_arr.append(1 if b else 0)
shots_arr.append(nfps[0].item())
for i in range(1, len(nfps)):
shots_arr.append(shots_arr[i-1] + nfps[i].item())
return user_arr, shots_arr
def get_file_name(name):
arr = re.split("[\\/]", name)
print(arr)
return arr[-1]
def main():
args = init_helper.get_arguments()
init_helper.init_logger(args.model_dir, args.log_file)
init_helper.set_random_seed(args.seed)
logger.info(vars(args))
model = get_model(args.model, **vars(args))
model = model.eval().to(args.device)
f = []
for split_path in args.splits:
split_path = Path(split_path)
splits = data_helper.load_yaml(split_path)
stats = data_helper.AverageMeter('fscore', 'diversity')
for split_idx, split in enumerate(splits):
ckpt_path = data_helper.get_ckpt_path(args.model_dir, split_path, split_idx)
state_dict = torch.load(str(ckpt_path),
map_location=lambda storage, loc: storage)
model.load_state_dict(state_dict)
val_set = data_helper.VideoDataset(split['test_keys'])
val_loader = data_helper.DataLoader(val_set, shuffle=False)
fscore, diversity, json_file = evaluate(model, val_loader, args.nms_thresh, args.device)
f += json_file
stats.update(fscore=fscore, diversity=diversity)
logger.info(f'{split_path.stem} split {split_idx}: diversity: '
f'{diversity:.4f}, F-score: {fscore:.4f}')
logger.info(f'{split_path.stem}: diversity: {stats.diversity:.4f}, '
f'F-score: {stats.fscore:.4f}')
# with open('aftvsum.json', 'w') as fout:
# json.dump(f, fout)
if __name__ == '__main__':
main()
|
4,234 | c247b218267fc7c2bee93053dd90b2806572eaf2 | # https://www.acmicpc.net/problem/20540
# 각 지표의 반대되는 지표를 저장한 dictionary
MBTI_reverse_index = {
'E': 'I',
'I': 'E',
'S': 'N',
'N': 'S',
'T': 'F',
'F': 'T',
'J': 'P',
'P': 'J'
}
# 연길이의 MBTI 4글자를 대문자로 입력
yeongil_MBTI = input()
# 연길이 MBTI의 각 지표에 반대되는 지표를 출력
for i in yeongil_MBTI:
print(MBTI_reverse_index[i], end='') |
4,235 | 8dab85622a29bc40f8ad6150f9e6f284853aeaf8 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import sys
import time
import netifaces
import requests
_GET_ADDR_MAX_ITERATION = 50
_POST_CALLBACK_MAX_ITERATION =50
_RETRY_INTERVAL = 5
def _process_error(message):
sys.stderr.write(message)
sys.stderr.write('\n')
sys.exit(1)
def _parse_kernel_cmdline():
"""Parse linux kernel command line"""
with open('/proc/cmdline', 'rt') as f:
cmdline = f.read()
parameters = {}
for p in cmdline.split():
name, _, value = p.partition('=')
parameters[name] = value
return parameters
def _get_interface_ip(mac_addr):
""""Get IP address of interface by mac."""
interfaces = netifaces.interfaces()
for iface in interfaces:
addresses = netifaces.ifaddresses(iface)
link_addresses = addresses.get(netifaces.AF_LINK, [])
for link_addr in link_addresses:
if link_addr.get('addr') == mac_addr:
ip_addresses = addresses.get(netifaces.AF_INET)
if ip_addresses:
# NOTE: return first address, ironic API does not
# support multiple
return ip_addresses[0].get('addr')
else:
break
def main():
"""Script informs Ironic that bootstrap loading is done.
There are three mandatory parameters in kernel command line.
Ironic prepares these two:
'ironic_api_url' - URL of Ironic API service,
'deployment_id' - UUID of the node in Ironic.
Passed from PXE boot loader:
'BOOTIF' - MAC address of the boot interface.
"""
kernel_params = _parse_kernel_cmdline()
api_url = kernel_params.get('ironic_api_url')
deployment_id = kernel_params.get('deployment_id')
inspect = kernel_params.get('inspect')
# TODO(aarefiev): change ssh driver
ironic_driver = kernel_params.get('callback-driver-name', 'ansible_ssh')
if inspect and api_url is None:
_process_error('Ironic ansible callback: Mandatory parameter '
'"ironic_api_url" is missing.')
if api_url is None or deployment_id is None:
_process_error('Mandatory parameter ("ironic_api_url" or '
'"deployment_id") is missing.')
boot_mac = kernel_params.get('BOOTIF')
if boot_mac is None:
_process_error('Cannot define boot interface, "BOOTIF" parameter is '
'missing.')
# There is a difference in syntax in BOOTIF variable between pxe and ipxe
# boot with Ironic. For pxe boot the the leading `01-' denotes the device type
# (Ethernet) and is not a part of the MAC address
if boot_mac.startswith('01-'):
boot_mac = boot_mac[3:].replace('-', ':')
for n in range(_GET_ADDR_MAX_ITERATION):
boot_ip = _get_interface_ip(boot_mac)
if boot_ip is not None:
break
time.sleep(_RETRY_INTERVAL)
else:
_process_error('Cannot find IP address of boot interface.')
data = {"callback_url": "ssh://" + boot_ip}
if inspect:
passthru = ('%(api-url)s/v1/drivers/%(driver)s/vendor_passthru'
'/inspect' % {'api-url': api_url,
'driver': ironic_driver}
else:
passthru = '%(api-url)s/v1/nodes/%(deployment_id)s/vendor_passthru' \
'/heartbeat' % {'api-url': api_url,
'deployment_id': deployment_id}
for attempt in range(_POST_CALLBACK_MAX_ITERATION):
try:
resp = requests.post(passthru, data=json.dumps(data),
headers={'Content-Type': 'application/json',
'Accept': 'application/json'})
except Exception as e:
error = str(e)
else:
if resp.status_code != 202:
error= ('Wrong status code %d returned from Ironic API' %
resp.status_code)
else:
break
if attempt == (_POST_CALLBACK_MAX_ITERATION - 1):
_process_error(error)
time.sleep(_RETRY_INTERVAL)
if __name__ == '__main__':
sys.exit(main())
|
4,236 | 331b5f0a34db4d12d713439db3d2818e8c922310 | # models.py- Team
from django.db import models
class Team(models.Model):
teamName = models.TextField()
#Seasons associated
#Registrants unique
return
|
4,237 | 4a7d8db2bc3b753ea1a12120e1ad85f31d572dc7 | #!/usr/bin/env python
# encoding: utf-8
"""
@description: 有序字典
(notice: python3.6 以后字典已经有序了)
@author: baoqiang
@time: 2019/11/28 1:34 下午
"""
from collections import OrderedDict
def run206_01():
print('Regular dict:')
# d = {'a':'A','b':'B','c':'C'}
d = {}
d['a'] = 'A'
d['b'] = 'B'
d['c'] = 'C'
for k, v in d.items():
print(k, v)
print('OrderedDict:')
d = OrderedDict()
d['a'] = 'A'
d['b'] = 'B'
d['c'] = 'C'
for k, v in d.items():
print(k, v)
def run206_02():
"""
相等性判断,需要考虑顺序
:return:
"""
print('Regular dict:')
d1 = {'a': 'A', 'b': 'B', 'c': 'C'}
d2 = {'c': 'C', 'b': 'B', 'a': 'A'}
print(d1 == d2)
for k, v in d1.items():
print(k, v)
for k, v in d2.items():
print(k, v)
print('OrderedDict:')
d1 = OrderedDict(d1)
d2 = OrderedDict(d2)
print(d1 == d2)
for k, v in d1.items():
print(k, v)
for k, v in d2.items():
print(k, v)
def run206_03():
"""
re ordering
:return:
"""
d = OrderedDict([('a', 'A'), ('b', 'B'), ('c', 'C')])
print('Before:')
for k, v in d.items():
print(k, v)
d.move_to_end('b')
print('\nmove_to_end():')
for k, v in d.items():
print(k, v)
d.move_to_end('b', last=False)
print('\nmove_to_end(last=False):')
for k, v in d.items():
print(k, v)
|
4,238 | 23b6d754adf1616bc6ea1f8c74984fbd8dade6dd | # 나의 풀이
def solution(prices):
# 초 단위로 기록된 주식가격이 담긴 배열 prices # 가격이 떨어지지 않은 기간을 리턴
answer = [0]*len(prices)
for i in range(len(prices)-1):
for j in range(i+1, len(prices)):
answer[i] += 1
# 가격이 떨어졌을 경우
if prices[i] > prices[j]:
break
return answer
|
4,239 | 6c10213c2e866ec84f229aa426c7122aa817d167 | from django.contrib import admin
from coupon.models import Coupon, Games
admin.site.register(Coupon)
admin.site.register(Games) |
4,240 | 5f7d05c642339ce0ab02a65ca41f9ee89c2faf57 | weekdays = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
i = input('Enter a day of the week and number of days: ').split()
e = int(i[-1])
starting_point = weekdays.index(i[0])
a = e + starting_point - len(weekdays)
print(weekdays[a]) |
4,241 | 355e2799e89dfea4f775480ea7d829a075f92473 | from flask import current_app
def get_logger():
return current_app.logger
def debug(msg, *args, **kwargs):
get_logger().debug(msg, *args, **kwargs)
def info(msg, *args, **kwargs):
get_logger().info(msg, *args, **kwargs)
def warn(msg, *args, **kwargs):
get_logger().warning(msg, *args, **kwargs)
def error(msg, *args, **kwargs):
get_logger().error(msg, *args, **kwargs)
|
4,242 | af523777e32c44112bd37a4b9dcbc0941f7e8236 | # Generated by Django 2.2.6 on 2019-10-10 07:02
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='cronjob',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('titel', models.CharField(max_length=255)),
('adresse', models.URLField(max_length=255)),
('authentifizierung_checked', models.BooleanField(default=False)),
('benutzername', models.CharField(max_length=255)),
('passwort', models.CharField(max_length=255)),
('ausführen', models.DateTimeField(default=datetime.datetime(2019, 10, 10, 9, 2, 22, 105756))),
('benachrichtigung_fehlschlag', models.BooleanField(default=False)),
('benachrichtigung_erfolg', models.BooleanField(default=False)),
('benachrichtigung_deaktivierung', models.BooleanField(default=False)),
('antwort_speichern', models.BooleanField(default=False)),
],
),
]
|
4,243 | 38bd18e9c1d17f25c10321ab561372eed58e8abc | #딕셔너리로 데이터 표현
# sales = {'hong':0,'lee':0,'park':0}
# d = {'z':10, 'b':20,'c':30}
# print(d)
# d.pop('b')
# print(d)
# d['f']=40
# print(d)
# d.pop('z')
# d['z'] = 40
# print(d.keys())
#반복문(while)
#조건이 참일동안 수행
#while True:
# print('python!!!')
# a = 0
# while a < 10:
# a += 1
# print(a)
# a = 0
# while True:
# a +=1
# print(a)
# if a>=10:
# break
#1부터 1씩증가하는 숫자의 합이 1000초과시 숫자 출력
# a = 0 #1씩 증가하는 수
# s = 0 #합계를 저장할 변수
# while True:
# a +=1
# s +=a
# if s>=1000: break
#
# print('누적값:' , s)
# print('마지막숫자:', a)
#실습)사용자에게 숫자를 입력을 받아서 출력
#사용자가 0을 입력하면 프로그램 종료
#1)
# while True:
# num=int(input('숫자는?'))
# if num ==0: break
# print('입력숫자:', num)
#
# #2)
# #num = 1
#
# #사용자가 q를 입력하면 반복문 종료
# s=0
# while True:
# num = input('숫자는(q:종료)?')
# if num=='q': break
# s +=int(num)
#
# print('누적합계', s)
#실습4
#숫자 두 개와 기호를 입력 받아 계산기 프로그램을 만들어 봅시다.
#단, 사용자가 q를 입력하면 계산기 종료
# while True:
# num = input('첫 번째 숫자 입력(q:종료)')
# if num=='num': break
# num1 = input('두 번째 숫자 입력(q:종료)')
# sign = input('기호는?')
# if sign =='+':
# print('더하기',num+num1)
# elif sign =='-':
# print('빼기:',num-num1)
# elif sign == '*':
# print('곱하기:', num * num1)
# elif sign == '/':
# print('나누기:', num / num1)
# else:
# print('잘못된 기호')
# while True:
# a = input('first:')
# b = input('second:')
# sign = input('sign:')
# if sign == '+':
# print('더하기:', a+b)
#
# if sign == '-':
# print('빼기:', a-b)
#2)
# while True:
# cal = input('계산식은?').split()
# #print(cal)
# if cal[0]=='q': break
# a,sign,b = cal #언패킹
# a=int(a); b =int(b)
# if sign == '+':
# print('더하기', a + b)
# elif sign == '-':
# print('빼기:', a - b)
# elif sign == '*':
# print('곱하기:', a * b)
# elif sign == '/':
# print('나누기:', a / b)
# else:
# print('잘못된 기호')
#실습) 가장 큰수 찾기
# data=[5,6,2,8,9,1]
# max = 0
# for x in data:
# if x > max:
# max=x
#
# print(max)
#실습) 가장 작은수 찾기
data=[5,6,2,8,9,1]
min=10
for x in data:
if x < min:
min=x
print(min)
|
4,244 | e881fcfce933d8f3bafcbaab039ddcf98827bf5e | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: WuTian
# @Date : 2018/5/3
# @Contact : jsj0804wt@126.com
# @Desc :使用广度优先搜索查找芒果商
from collections import deque
graph = {}
graph["you"] = ["alice", "bob", "claire"]
graph["bob"] = ["anuj", "peggy"]
graph["alice"] = ["peggy"]
graph["claire"] = ["thom", "jonny"]
graph["anuj"] = []
graph["peggy"] = []
graph["thom"] = []
graph["jonny"] = []
def is_mango_seller(name):
return name[-1] == "m"
def search_mango_seller(name):
search_queue = deque()
searched = []
global graph
search_queue += graph[name]
while search_queue:
person = search_queue.popleft()
if not person in searched:
if is_mango_seller(person):
print("%s is a mango seller" % person)
return True
else:
search_queue += graph[person]
searched.append(person)
return False
search_mango_seller("you")
|
4,245 | 1a4da621add157fa6d1f578370d64594b102eeb5 | #This is a file from CS50 Finance
from functools import wraps
from flask import redirect, render_template, session
from threading import Thread
from flask_mail import Message
from application import app, mail
ALLOWED_EXTENSIONS = {"png", "PNG", "jpg", "jpeg", "JPG", "JPEG"}
def login_required(f):
"""
Decorate routes to require login.
http://flask.pocoo.org/docs/1.0/patterns/viewdecorators/
"""
@wraps(f)
def decorated_function(*args, **kwargs):
if session.get("user_id") is None:
return redirect("/sign_in")
return f(*args, **kwargs)
return decorated_function
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
# Send message function
def async_send_mail(applic, msg):
with applic.app_context():
mail.send(msg)
def send_mail(subject, recipient, template, **kwargs):
msg = Message(subject, recipients=[recipient])
msg.html = render_template(template, **kwargs)
thr = Thread(target=async_send_mail, args=[app, msg])
thr.start()
return thr |
4,246 | 2afc1027c6866e8ab9584a5f7feef4470661f763 | '''CLASS message_unpacker
Message bodies sent through RabbitMQ may take various forms. They were packed
accordingly by the message_packager.
This class reverses the process. Currently, only implemented for message bodies
represented as strings, but could also handle various image formats in a real use
situation
Encapsulating the "unpacking" aspect into this class makes it easier to extend the
functionality of methods needed for unpacking data as a function of the data types
(e.g. lidar, radar, numeric, GPS) that are packaged by message_packager.
'''
import pickle
import json
class MessageUnpacker():
def __init__(self):
print('Generating message unpacker...')
# Unpacks messages that were packaged as a field-delimited (';') string representation
def unpack_string_to_dict(self, incoming_values):
FIELD_DELIMITER = ';'
fields = ['message_num', 'time_stamp', 'car_id', 'device_id', 'data_type', 'error_flag', 'data']
values = incoming_values.split(FIELD_DELIMITER)
record_as_dict = {}
for f, v in zip(fields, values):
record_as_dict[f] = v
record_as_dict['data'] = record_as_dict['data'].strip('\n') # artifact of message body
return record_as_dict
# Unpacks messages that were packaged as JSON
def unpack_json_to_dict(self, incoming_json):
record_as_dict = json.loads(incoming_json)
return record_as_dict
# Unpacks messages that were pickled
def unpickle_to_dict(self, pickled_message):
record_as_dict = pickle.loads(pickled_message)
return record_as_dict
|
4,247 | cb28e8bb98cbeed0b703fbfcf7cf30ebca52aa25 | #!C:/Users/Tarang/AppData/Local/Programs/Python/Python37-32/python.exe -u
print("Content-Type: text/html")
print()
import cgi,cgitb
cgitb.enable() #for debugging
form = cgi.FieldStorage()
name = form.getvalue('fname')
print("Name of the user is:",name)
import pymysql
db = pymysql.connect("localhost","root","Manchesterutd20","sts" )
cursor = db.cursor()
cursor.execute(name)
name = cursor.fetchall()
print (name)
db.close() |
4,248 | 8a9ed10bf25f3aa13fde43079303194fc6db26c0 |
import tensorflow as tf
import numpy as np
import OpenAi.Pendulum.ActorCritic.Models as Models
"""
The `Buffer` class implements Experience Replay.
---

---
**Critic loss** - Mean Squared Error of `y - Q(s, a)`
where `y` is the expected return as seen by the Target network,
and `Q(s, a)` is action value predicted by the Critic network. `y` is a moving target
that the critic model tries to achieve; we make this target
stable by updating the Target model slowly.
**Actor loss** - This is computed using the mean of the value given by the Critic network
for the actions taken by the Actor network. We seek to maximize this quantity.
Hence we update the Actor network so that it produces actions that get
the maximum predicted value as seen by the Critic, for a given state.
"""
class Agent:
def __init__(self, env, buffer_capacity=100000, batch_size=64, gamma = -0.99, tau = 0.005, critic_lr = 0.002, actor_lr = 0.001):
num_states = env.observation_space.shape[0]
num_actions = env.action_space.shape[0]
# Number of "experiences" to store at max
self.buffer_capacity = buffer_capacity
# Num of tuples to train on.
self.batch_size = batch_size
# Its tells us num of times record() was called.
self.buffer_counter = 0
self.gamma = gamma
self.tau = tau
self.critic_lr = critic_lr
self.actor_l = actor_lr
# Instead of list of tuples as the exp.replay concept go
# We use different np.arrays for each tuple element
self.state_buffer = np.zeros((self.buffer_capacity, num_states))
self.action_buffer = np.zeros((self.buffer_capacity, num_actions))
self.reward_buffer = np.zeros((self.buffer_capacity, 1))
self.next_state_buffer = np.zeros((self.buffer_capacity, num_states))
self.models = Models.Models(env=env, critic_lr = critic_lr, actor_lr = actor_lr)
# Takes (s,a,r,s') obervation tuple as input
def record(self, obs_tuple):
# Set index to zero if buffer_capacity is exceeded,
# replacing old records
index = self.buffer_counter % self.buffer_capacity
self.state_buffer[index] = obs_tuple[0]
self.action_buffer[index] = obs_tuple[1]
self.reward_buffer[index] = obs_tuple[2]
self.next_state_buffer[index] = obs_tuple[3]
self.buffer_counter += 1
# Eager execution is turned on by default in TensorFlow 2. Decorating with tf.function allows
# TensorFlow to build a static graph out of the logic and computations in our function.
# This provides a large speed up for blocks of code that contain many small TensorFlow operations such as this one.
@tf.function
def update(self, state_batch, action_batch, reward_batch, next_state_batch,):
# Training and updating Actor & Critic networks.
# See Pseudo Code.
with tf.GradientTape() as tape:
target_actions = self.models.target_actor(next_state_batch, training=True)
y = reward_batch + self.gamma * self.models.target_critic(
[next_state_batch, target_actions], training=True
)
critic_value = self.models.critic_model([state_batch, action_batch], training=True)
critic_loss = tf.math.reduce_mean(tf.math.square(y - critic_value))
self.critic_grad = tape.gradient(critic_loss, self.models.critic_model.trainable_variables)
self.models.critic_optimizer.apply_gradients(
zip(self.critic_grad, self.models.critic_model.trainable_variables)
)
with tf.GradientTape() as tape:
actions = self.models.actor_model(state_batch, training=True)
critic_value = self.models.critic_model([state_batch, actions], training=True)
# Used `-value` as we want to maximize the value given
# by the critic for our actions
actor_loss = -tf.math.reduce_mean(critic_value)
actor_grad = tape.gradient(actor_loss, self.models.actor_model.trainable_variables)
self.models.actor_optimizer.apply_gradients(
zip(actor_grad, self.models.actor_model.trainable_variables)
)
# We compute the loss and update parameters
def learn(self):
# Get sampling range
record_range = min(self.buffer_counter, self.buffer_capacity)
# Randomly sample indices
batch_indices = np.random.choice(record_range, self.batch_size)
# Convert to tensors
state_batch = tf.convert_to_tensor(self.state_buffer[batch_indices])
action_batch = tf.convert_to_tensor(self.action_buffer[batch_indices])
reward_batch = tf.convert_to_tensor(self.reward_buffer[batch_indices])
reward_batch = tf.cast(reward_batch, dtype=tf.float32)
next_state_batch = tf.convert_to_tensor(self.next_state_buffer[batch_indices])
self.update(state_batch, action_batch, reward_batch, next_state_batch)
self.update_target(self.models.target_actor.variables, self.models.actor_model.variables, self.tau)
self.update_target(self.models.target_critic.variables, self.models.critic_model.variables, self.tau)
def action(self, state, noise_object):
return self.models.policy(state, noise_object)
@tf.function
def update_target(self, target_weights, weights, tau):
for (a, b) in zip(target_weights, weights):
a.assign(b * tau + a * (1 - tau))
"""
To implement better exploration by the Actor network, we use noisy perturbations,
specifically
an **Ornstein-Uhlenbeck process** for generating noise, as described in the paper.
It samples noise from a correlated normal distribution.
"""
class OUActionNoise:
def __init__(self, mean, std_deviation, theta=0.15, dt=1e-2, x_initial=None):
self.theta = theta
self.mean = mean
self.std_dev = std_deviation
self.dt = dt
self.x_initial = x_initial
self.reset()
def __call__(self):
# Formula taken from https://www.wikipedia.org/wiki/Ornstein-Uhlenbeck_process.
x = (
self.x_prev
+ self.theta * (self.mean - self.x_prev) * self.dt
+ self.std_dev * np.sqrt(self.dt) * np.random.normal(size=self.mean.shape)
)
# Store x into x_prev
# Makes next noise dependent on current one
self.x_prev = x
return x
def reset(self):
if self.x_initial is not None:
self.x_prev = self.x_initial
else:
self.x_prev = np.zeros_like(self.mean)
|
4,249 | c80b31bc154d5c1c8f9fc0ac226295160f2f9473 | #!/usr/bin/env python
"""
.. module:: convert
:synopsis: used to create info.txt and the <txname>.txt files.
"""
import sys
import os
import argparse
argparser = argparse.ArgumentParser(description =
'create info.txt, txname.txt, twiki.txt and sms.py')
argparser.add_argument ('-utilsPath', '--utilsPath',
help = 'path to the package smodels_utils',\
type = str )
argparser.add_argument ('-smodelsPath', '--smodelsPath',
help = 'path to the package smodels_utils',\
type = str )
args = argparser.parse_args()
if args.utilsPath:
utilsPath = args.utilsPath
else:
databaseRoot = '../../../'
sys.path.append(os.path.abspath(databaseRoot))
from utilsPath import utilsPath
utilsPath = databaseRoot + utilsPath
if args.smodelsPath:
sys.path.append(os.path.abspath(args.smodelsPath))
sys.path.append(os.path.abspath(utilsPath))
from smodels_utils.dataPreparation.inputObjects import MetaInfoInput,DataSetInput
from smodels_utils.dataPreparation.databaseCreation import databaseCreator
from smodels_utils.dataPreparation.massPlaneObjects import x, y, z
#+++++++ global info block ++++++++++++++
info = MetaInfoInput('ATLAS-SUSY-2013-19')
info.comment = 'T2tt UL are from DF channel only, no combined UL map available'
info.sqrts = '8.0'
info.private = False
info.lumi = '20.3'
info.publication = 'http://link.springer.com/article/10.1007/JHEP06(2014)124'
info.url = 'https://atlas.web.cern.ch/Atlas/GROUPS/PHYSICS/PAPERS/SUSY-2013-19/'
info.arxiv = 'http://arxiv.org/abs/1403.4853'
info.prettyName = '2 OS leptons + (b)jets + Etmiss (leptonic/hadronic m_T2)'
info.supersedes = 'ATLAS-CONF-2013-048'
#+++++++ dataset block ++++++++++++++
dataset = DataSetInput('data')
dataset.setInfo(dataType = 'upperLimit', dataId = None)
#+++++++ next txName block ++++++++++++++
T2tt = dataset.addTxName('T2tt')
T2tt.constraint ="[[['t+']],[['t-']]]"
T2tt.conditionDescription ="None"
T2tt.condition ="None"
T2tt.source = "ATLAS"
#+++++++ next mass plane block ++++++++++++++
T2tt = T2tt.addMassPlane(2*[[x, y]])
T2tt.dataUrl = "http://hepdata.cedar.ac.uk/view/ins1286444/d72"
T2tt.histoDataUrl = "https://atlas.web.cern.ch/Atlas/GROUPS/PHYSICS/PAPERS/SUSY-2013-19/figaux_10a.png"
T2tt.figure = "fig 10a"
T2tt.figureUrl = "https://atlas.web.cern.ch/Atlas/GROUPS/PHYSICS/PAPERS/SUSY-2013-19/figaux_10a.png"
T2tt.setSources(dataLabels= ['obsExclusion', 'upperLimits'],
dataFiles= ['orig/exclusionline_T2tt_DF.txt', 'orig/T2tt.txt'],
dataFormats= ['txt', 'txt'])
#+++++++ next txName block ++++++++++++++
T2bbWW = dataset.addTxName('T2bbWW')
T2bbWW.constraint ="[[['b','W+']],[['b','W-']]]"
T2bbWW.conditionDescription ="None"
T2bbWW.condition ="None"
T2bbWW.source = "ATLAS"
#+++++++ next mass plane block ++++++++++++++
T2bbWW = T2bbWW.addMassPlane(2*[[x, y]])
T2bbWW.figure = 'Fig.(aux) 3e'
T2bbWW.figureUrl = 'https://atlas.web.cern.ch/Atlas/GROUPS/PHYSICS/PAPERS/SUSY-2013-19/figaux_03e.png'
T2bbWW.dataUrl = 'http://hepdata.cedar.ac.uk/view/ins1286444/d42'
T2bbWW.setSources(dataLabels= ['obsExclusion', 'upperLimits'],
dataFiles= ['orig/exclusionline_T2bbWW.txt', 'orig/T2bbWW.txt'],
dataFormats= ['txt', 'txt'])
#+++++++ next txName block ++++++++++++++
T6bbWW = dataset.addTxName('T6bbWW')
T6bbWW.checked ="VM"
T6bbWW.constraint ="[[['b'],['W+']],[['b'],['W-']]]"
T6bbWW.conditionDescription ="None"
T6bbWW.condition ="None"
T6bbWW.source = "ATLAS"
T6bbWW.massConstraint = None
T6bbWWoff = dataset.addTxName('T6bbWWoff')
T6bbWWoff.constraint ="22*([[['b'],['l+','nu']],[['b'],['l-','nu']]])"
T6bbWWoff.conditionDescription="[[['b'],['l+','nu']],[['b'],['l-','nu']]] > 2*[[['b'],['e+','nu']],[['b'],['e-','nu']]]"
T6bbWWoff.condition="Cgtr([[['b'],['l+','nu']],[['b'],['l-','nu']]],2*[[['b'],['e+','nu']],[['b'],['e-','nu']]])"
T6bbWWoff.massConstraint = [['dm >= 0.0', 'dm <= 76.0'], ['dm >= 0.0', 'dm <= 76.0']]
T6bbWWoff.source = "ATLAS"
#+++++++ next mass plane block ++++++++++++++
T6bbWWLSP001 = T6bbWW.addMassPlane(2*[[x, y, 1.0]])
T6bbWWLSP001.figure = 'Fig.(aux) 3a'
T6bbWWLSP001.figureUrl = 'https://atlas.web.cern.ch/Atlas/GROUPS/PHYSICS/PAPERS/SUSY-2013-19/figaux_3a.png'
T6bbWWLSP001.dataUrl = 'http://hepdata.cedar.ac.uk/view/ins1286444/d30'
T6bbWWLSP001.setSources(dataLabels= ['obsExclusion', 'upperLimits'],
dataFiles= ['orig/exclusionline_T6bbWWLSP001.txt', 'orig/T6bbWWLSP001.txt'],
dataFormats= ['txt', 'txt'])
T6bbWWoff.addMassPlane(T6bbWWLSP001)
#+++++++ next mass plane block ++++++++++++++
T6bbWWD010 = T6bbWW.addMassPlane(2*[[x, x-10.0, y]])
T6bbWWD010.figure = "fig(aux) 3b"
T6bbWWD010.figureUrl = 'https://atlas.web.cern.ch/Atlas/GROUPS/PHYSICS/PAPERS/SUSY-2013-19/figaux_3b.png'
T6bbWWD010.dataUrl = 'Not defined'
T6bbWWD010.setSources(dataLabels= ['obsExclusion', 'upperLimits'],
dataFiles= ['orig/exclusionline_T6bbWWD10.txt', 'orig/T6bbWWD010.txt'],
dataFormats= ['txt', 'txt'])
T6bbWWoff.addMassPlane(T6bbWWD010)
#+++++++ next mass plane block ++++++++++++++
T6bbWWM1300 = T6bbWW.addMassPlane(2*[[300.0, x, y]])
T6bbWWM1300.figure = 'Fig.(aux) 3c'
T6bbWWM1300.figureUrl = 'https://atlas.web.cern.ch/Atlas/GROUPS/PHYSICS/PAPERS/SUSY-2013-19/fig_16.png'
T6bbWWM1300.dataUrl = 'http://hepdata.cedar.ac.uk/view/ins1286444/d36'
T6bbWWM1300.setSources(dataLabels= ['obsExclusion', 'upperLimits'],
dataFiles= ['orig/exclusionline_T6bbWWM1300.txt', 'orig/T6bbWWM1300.txt'],
dataFormats= ['txt', 'txt'])
T6bbWWoff.addMassPlane(T6bbWWM1300)
#+++++++ next mass plane block ++++++++++++++
T6bbWWC106 = T6bbWW.addMassPlane(2*[[x, 106.0, y]])
T6bbWWC106.figure = 'Fig.(aux) 3f'
T6bbWWC106.figureUrl = 'https://atlas.web.cern.ch/Atlas/GROUPS/PHYSICS/PAPERS/SUSY-2013-19/fig_20.png'
T6bbWWC106.dataUrl = 'http://hepdata.cedar.ac.uk/view/ins1286444/d68'
T6bbWWC106.setSources(dataLabels= ['obsExclusion', 'upperLimits'],
dataFiles= ['orig/exclusionline_T6bbWWC106.txt', 'orig/T6bbWWC106.txt'],
dataFormats= ['txt', 'txt'])
T6bbWWoff.addMassPlane(T6bbWWC106)
#+++++++ next mass plane block ++++++++++++++
T6bbWWx200 = T6bbWW.addMassPlane(2*[[x, y*2.0, y]])
T6bbWWx200.figure = 'Fig.(aux) 3d'
T6bbWWx200.figureUrl = 'https://atlas.web.cern.ch/Atlas/GROUPS/PHYSICS/PAPERS/SUSY-2013-19/fig_17.png'
T6bbWWx200.dataUrl = 'http://hepdata.cedar.ac.uk/view/ins1286444/d39'
T6bbWWx200.setSources(dataLabels= ['obsExclusion', 'upperLimits'],
dataFiles= ['orig/exclusionline_T6bbWWx200.txt', 'orig/T6bbWWx200.txt'],
dataFormats= ['txt', 'txt'])
T6bbWWoff.addMassPlane(T6bbWWx200)
databaseCreator.create()
|
4,250 | 4957e62deec6192aabdf7144f02b28c7ce60ed4b | from django.contrib import admin
from .models import Account
# Register your models here.
class AuthenticationCustom(admin.ModelAdmin):
list_display = ("email", "id")
search_fields = ["email", "mobile"]
admin.site.register(Account, AuthenticationCustom) |
4,251 | 731d2891bbc29879fd8900a11077c93550e4e88d | from django.http import HttpResponseRedirect
from django.shortcuts import render
from django.views.generic import TemplateView
from pos.service.sumup import API_URL, create_checkout
from pos.models.sumup import SumUpAPIKey, SumUpOnline
from pos.forms import RemotePayForm
from pos.models.user import User
class RemotePayView(TemplateView):
template_name = 'remotepay/pay.djhtml'
def pay(request):
if request.method == 'POST':
form = RemotePayForm(request.POST)
if form.is_valid():
phone = form.cleaned_data['phone']
amount = form.cleaned_data['amount']
# Check if user exists
try:
user = User.objects.get(phone=phone, is_crew=False)
except User.DoesNotExist:
return render(request, 'remotepay/pay.djhtml', {'form': form, 'error': True})
# Assuming the user exists, we proceed
t = SumUpOnline.objects.create(user=user, amount=amount)
try:
txid = create_checkout(SumUpAPIKey.objects.all().last(), t.id, t.amount, user.phone)
t.transaction_id = txid
t.status = 1
t.save()
return render(request, 'remotepay/process.djhtml', {'txid': txid, 'phone': phone, 'amount': amount})
except:
return render(request, 'remotepay/pay.djhtml', {'form': form, 'systemerror': True})
else:
form = RemotePayForm
return render(request, 'remotepay/pay.djhtml', {'form': form})
def pay_callback(request, checkoutid):
# Get the status of the transaction for the user
t = SumUpOnline.objects.get(transaction_id=checkoutid)
if (t.status == 0 or t.status == 3):
return HttpResponseRedirect('/pay/error/')
elif (t.status == 4):
return HttpResponseRedirect('/pay/success/')
elif (t.status == 1) or (t.status == 2):
return render(request, 'remotepay/hold.djhtml', {'checkoutid': checkoutid})
def pay_success(request):
return render(request, 'remotepay/success.djhtml')
def pay_error(request):
return render(request, 'remotepay/error.djhtml')
def pay_hold(request):
return render(request, 'remotepay/hold.djhtml')
|
4,252 | 223d96806631e0d249e8738e9bb7cf5b1f48a8c1 | #!/usr/bin/env python
import sys
sys.path.append('./spec')
# FIXME: make the spec file an argument to this script
from dwarf3 import *
def mandatory_fragment(mand):
if mand:
return "mandatory"
else:
return "optional"
def super_attrs(tag):
#sys.stderr.write("Calculating super attrs for %s\n" % tag)
# attrs of all bases, plus super_attrs of all bases
immediate_base_attrs = sum([tag_map.get(base, ([], [], []))[0] \
for base in tag_map.get(tag, ([], [], []))[2] + artificial_tag_map.get(tag, ([], [], []))[2]], []) \
+ sum([artificial_tag_map.get(base, ([], [], []))[0] \
for base in tag_map.get(tag, ([], [], []))[2] + artificial_tag_map.get(tag, ([], [], []))[2]], [])
base_attrs = sum(map(super_attrs, tag_map.get(tag, ([], [], []))[2]), immediate_base_attrs) + \
sum(map(super_attrs, artificial_tag_map.get(tag, ([], [], []))[2]), [])
#sys.stderr.write("Calculated super attrs for %s as %s\n" % (tag, str([x for x in set(base_attrs)])))
return [x for x in set(base_attrs)] #+ tag_map[tag][0]
def main(argv):
for (tag, (attr_list, children, bases) ) in tags:
print "forward_decl(%s)" % tag
for (tag, (attr_list, children, bases) ) in tags:
print "begin_class(%s, %s, %s)" % (tag, \
'base_initializations(' + ', '.join(["initialize_base(" + base + ")" for base in bases]) + ')', \
', '.join(["declare_base(%s)" % base for base in bases]))
for (attr, mand) in attr_list:
print "\tattr_%s(%s, %s)" % (mandatory_fragment(mand), attr, attr_type_map[attr])
for (attr, mand) in super_attrs(tag):
print "\tsuper_attr_%s(%s, %s)" % (mandatory_fragment(mand), attr, attr_type_map[attr])
for child in children:
print "\tchild_tag(%s)" % child
print "#ifdef extra_decls_%s\n\textra_decls_%s\n#endif" % (tag, tag)
print "end_class(%s)" % tag
# main script
if __name__ == "__main__":
main(sys.argv[1:])
|
4,253 | 1fb3904d48905ade8f83b6e052057e80302ec5a7 | from cancion import *
class NodoLista:
def __init__(self, cancion, s, a):
self.elemento = cancion
self.siguiente = s
self.anterior = a
|
4,254 | 2294951af6ad7a5e752285194d0586c79c49ef87 | """Run golden output tests.
The golden tests are a convenient way to make sure that a "small" change
does not break anyone else.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple
import os
import subprocess
import sys
GOLDEN_CASES_DIR = 'src/googleapis/codegen/testdata/golden'
GOLDEN_DISCOVERY_DIR = 'src/googleapis/codegen/testdata/golden_discovery'
VERBOSE = False
Test = namedtuple('Test', [
'language',
'variant',
'input',
'options',
'golden_file'])
def FindTests():
"""Finds golden files and returns Test cases for each."""
for root, _, files in os.walk(GOLDEN_CASES_DIR):
path_parts = root.split('/')
if path_parts[-3] == 'golden':
language = path_parts[-2]
variant = path_parts[-1]
for golden_file in files:
input, _ = golden_file.split('.')
options = None
if input.endswith('_monolithic'):
input = input[0:-11]
options = ['--monolithic_source_name=sink'] # pure hackery
yield Test(
language = language,
variant = variant,
input = input,
options = options,
golden_file = os.path.join(root, golden_file))
def Generate(language, variant, input, options, out_file):
cmd = [
'python',
'src/googleapis/codegen/generate_library.py',
'--input=%s/%s.json' % (GOLDEN_DISCOVERY_DIR, input),
'--language=%s' % language,
'--language_variant=%s' % variant,
'--output_format=txt',
'--output_file=%s' % out_file,
]
if options:
cmd.extend(options)
try:
if VERBOSE:
print('generate cmd: %s' % ' '.join(cmd))
subprocess.check_call(cmd, stdout=sys.stdout, stderr=sys.stderr)
except subprocess.CalledProcessError as e:
msg = '(%s, %s, %s, %s)' % (language, variant, input, options)
print('FAIL: generate(%s), cmd=[%s]' % (msg, ' '.join(cmd)))
return False
return True
def RunTest(test):
# Fix this
out_file = '/tmp/%s.new' % test.golden_file.split('/')[-1]
if Generate(test.language, test.variant, test.input, test.options, out_file):
cmd = ['diff', '--brief', test.golden_file, out_file]
try:
subprocess.check_call(cmd, stderr=sys.stderr)
print('PASS: %s, %s, %s, %s' % (test.language, test.variant, test.input, test.options))
except subprocess.CalledProcessError as e:
print('FAIL: %s' % str(test))
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
src_path = os.path.join(os.getcwd(), 'src')
python_path = os.environ.get('PYTHONPATH')
if python_path:
os.environ['PYTHONPATH'] = '%s:%s' % (src_path, python_path)
else:
os.environ['PYTHONPATH'] = src_path
for test in FindTests():
RunTest(test)
if __name__ == '__main__':
main(sys.argv)
|
4,255 | 3a1b0b9891fec7b3d722f77cd2f3f6efa878a7a0 | from django import forms
from basic_app_new.models import *
class UpdateFood(forms.ModelForm):
class Meta:
model = Old_Food_Diary
fields = ['mfg_code', 'food_name', 'description', 'food_type', 'calories', 'fats', 'protein', 'carbohydrates', 'link_of_image', 'link_of_recipie', 'purchasing_link']
class UpdatePurchaseFood(forms.ModelForm):
class Meta:
model = purchase_cards
fields = ['food_name', 'description', 'ss_code', 'calorie', 'fat', 'protein', 'carbs', 'image_path'] |
4,256 | fd57e13269ca00ed5eb05e00bd7999c041141187 | import reddit
import tts
import sys
import praw
import os
#TODO: CENSOR CURSE WORDS,tag images that have curse words in them. strip punctuation from comment replies mp3
#TODO: pay for ads :thinking: buy views?
#TODO: sort by top upvotes
#todo: remove the formatting stuff
#todo: redo ducking
#todo quick script to get high upvote replies
#todo: remove hyperlinks
POST_ID = sys.argv[1]
NUM_POSTS = int(sys.argv[2])
reddit_object = praw.Reddit(
client_id="aAhfCgWHCGOylw",
client_secret="FLrVvWquolZc4cnKaEhULqzfUYsxQQ",
user_agent='reddit_to_vid')
print(f"NOW PROCESSING POST ID: {POST_ID}")
comments_from_post,post_title = reddit.get_top_comments_from_id(reddit_object,POST_ID,NUM_POSTS)
tts.comment_to_mp3(post_title,'./quota.txt','titles',0,randomize=True)
n = 1
for comment in comments_from_post:
tts.comment_to_mp3(comment,"./quota.txt",POST_ID,n,randomize=True)
n+=1
tts.comment_to_mp3("Oh, you made it to the end? You're a ducking beast! Lets make a deal: Hit like and subscribe and I will provide more humanoid content. Goodbye!","./quota.txt",'duck',1,randomize=True)
|
4,257 | 12ecfd2750f79fd19355665b6e57c2103a3cac3e | #!/usr/bin/env python3
"""Shows how to call C code from python"""
__appname__ = "myccalc.py"
__author__ = "Joseph Palmer <joseph.palmer18@imperial.ac.uk>"
__version__ = "0.0.1"
__license__ = "License for this code/"
__date__ = "Dec-2018"
## imports ##
import os
import ctypes
# Load the C library into python - needs the full path for some reason!
so_filepath = "{}/libmycalc.so".format(os.getcwd())
ctypes.cdll.LoadLibrary(so_filepath)
myccalc = ctypes.CDLL(so_filepath)
# make a simpler name for the mycalc.add_floats
add_floats = myccalc.add_floats
# tell python what variables this function takes & returns
add_floats.argtypes = [ctypes.c_float, ctypes.c_float]
add_floats.restype = ctypes.c_float
# the function can now be used
x = 1.2
y = 3.3
a = add_floats(x, y)
print("The sum of %.1f and %.1f is %.1f" % (x, y, a))
# we can do the same for others
sf = myccalc.subtract_floats
sf.argtypes = [ctypes.c_float, ctypes.c_float]
sf.restype = ctypes.c_float
b = sf(y, x)
print("Subtracting %.1f from %.1f is %.1f" % (x, y, b))
|
4,258 | 46f218829e1bf324d4c50ea0ff7003bc48b64e2a | from __future__ import absolute_import
import itertools
from django.contrib import messages
from django.core.context_processors import csrf
from django.db import transaction
from django.http import HttpResponseRedirect
from django.views.decorators.cache import never_cache
from django.utils.decorators import method_decorator
from sudo.decorators import sudo_required
from sentry.models import (Project, ProjectStatus, Organization, OrganizationStatus)
from sentry.plugins import plugins
from sentry.web.forms.accounts import (
ProjectEmailOptionsForm, NotificationSettingsForm, NotificationReportSettingsForm,
NotificationDeploySettingsForm
)
from sentry.web.decorators import login_required
from sentry.web.frontend.base import BaseView
from sentry.web.helpers import render_to_response
from sentry.utils.auth import get_auth_providers
from sentry.utils.safe import safe_execute
class AccountNotificationView(BaseView):
notification_settings_form = NotificationSettingsForm
@method_decorator(never_cache)
@method_decorator(login_required)
@method_decorator(sudo_required)
@method_decorator(transaction.atomic)
def handle(self, request):
settings_form = self.notification_settings_form(request.user, request.POST or None)
reports_form = NotificationReportSettingsForm(
request.user, request.POST or None, prefix='reports'
)
org_list = list(
Organization.objects.filter(
status=OrganizationStatus.VISIBLE,
member_set__user=request.user,
).distinct()
)
org_forms = [
(
org, NotificationDeploySettingsForm(
request.user, org, request.POST or None, prefix='deploys-org-%s' % (org.id, )
)
) for org in sorted(org_list, key=lambda o: o.name)
]
project_list = list(
Project.objects.filter(
team__organizationmemberteam__organizationmember__user=request.user,
team__organizationmemberteam__is_active=True,
status=ProjectStatus.VISIBLE,
).distinct()
)
project_forms = [
(
project, ProjectEmailOptionsForm(
project,
request.user,
request.POST or None,
prefix='project-%s' % (project.id, )
)
) for project in sorted(project_list, key=lambda x: (x.organization.name, x.name))
]
ext_forms = []
for plugin in plugins.all():
for form in safe_execute(plugin.get_notification_forms, _with_transaction=False) or ():
form = safe_execute(
form,
plugin,
request.user,
request.POST or None,
prefix=plugin.slug,
_with_transaction=False
)
if not form:
continue
ext_forms.append(form)
if request.POST:
all_forms = list(
itertools.chain(
[settings_form, reports_form], ext_forms, (f for _, f in project_forms),
(f for _, f in org_forms)
)
)
if all(f.is_valid() for f in all_forms):
for form in all_forms:
form.save()
messages.add_message(request, messages.SUCCESS, 'Your settings were saved.')
return HttpResponseRedirect(request.path)
context = csrf(request)
context.update(
{
'settings_form': settings_form,
'project_forms': project_forms,
'org_forms': org_forms,
'reports_form': reports_form,
'ext_forms': ext_forms,
'page': 'notifications',
'AUTH_PROVIDERS': get_auth_providers(),
}
)
return render_to_response('sentry/account/notifications.html', context, request)
|
4,259 | c30f11e9bac54771df5198971c312624f68d0a33 | from django.db import models
from django.template.defaultfilters import slugify
# Create your models here.
class SlugStampMixin(object):
'''
An Worflow is an ordered collection of a Protocols
'''
def save(self, *args, **kwargs):
super(SlugStampMixin, self).save(*args, **kwargs) # Method may need to be changed to handle giving it a new name.
new_slug = self.generate_slug()
if not new_slug == self.slug: # Triggered when its a clone method
self.slug = new_slug
super(SlugStampMixin, self).save(*args, **kwargs) # Method may need to be changed to handle giving it a new name.
def generate_slug(self):
slug = slugify(self.name)
if self.pk:
return "%d-%s" % (self.pk, slug)
else:
return slug
|
4,260 | cc7942c406e9bcb5af43f131fdf0a6441f81c16a | from pycat.base.color import Color
from pycat.sprite import Sprite
from pycat.window import Window
from pyglet.gl.glext_arb import GL_FONT_HEIGHT_NV
from random import randint
window=Window()
class Chick(Sprite):
def on_create(self):
self.image = 'chick-a.png'
self.goto_random_position()
self.opacity = 500
self.scale = 1
self.rotation = randint(0, 360)
# c1 = window.create_sprite(Chick)
# c2 = window.create_sprite(Chick)
for i in range(1000):
e = window.create_sprite(Chick)
e.opacity = 200
e.scale = 2
e.color = Color.RED
window.run() |
4,261 | a573c6870392024ec2e84571ccb0bad3f5c4033a | import time
import datetime
from pushover import init, Client
from scraper import *
from config import *
# Get the current time
timeNow = time.strftime("%a %b %d, %I:%M %p").lstrip("0").replace(" 0", " ")
# Initialise Pushover for notifications
client = Client(user_key, api_token=api_token)
# Loop for times of ISS passes and compare to current time
def issCheck():
for i in column.keys():
for x in column[i]:
if i == 'Date':
issNow = x
if issNow == timeNow:
client.send_message("ISS is over London: " + x, title="ISS")
else:
break
while True:
issCheck()
time.sleep(10)
|
4,262 | b0174b6f6c33434ff9b5cdb59531502899d8348a | # Generated by Django 2.2.3 on 2019-07-18 06:05
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('juchu', '0003_auto_20190718_1500'),
]
operations = [
migrations.RemoveField(
model_name='order',
name='product',
),
migrations.RemoveField(
model_name='order',
name='quantity',
),
migrations.CreateModel(
name='OrderProduct',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quantity', models.IntegerField(default=1)),
('order', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='juchu.Order')),
('product', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='juchu.Product')),
],
),
]
|
4,263 | 74bca94cbcba0851e13d855c02fbc13fb0b09e6a | cijferICOR = float(input('Wat is je cijfer voor ICOR?: '))
x = 30
beloningICOR = cijferICOR * x
beloning = 'beloning €'
print(beloning, beloningICOR)
cijferPROG = float(input('Wat is je cijfer voor PROG: '))
beloningPROG = cijferPROG * x
print(beloning, beloningPROG)
cijferCSN = float(input('Wat is je cijfer voor CSN?: '))
beloningCSN = cijferCSN * x
print(beloning, beloningCSN)
gemiddelde = beloningICOR + beloningPROG + beloningCSN
print('de gemiddelde beloning is:€ ', gemiddelde / 3)
totalevergoeding = beloningICOR + beloningPROG + beloningCSN
print('uw totale vergoeding is:€ ', totalevergoeding)
gemiddeld_cijfer = (cijferICOR + cijferPROG + cijferCSN) / 3
print('mijn cijfers gemiddeld is een', gemiddeld_cijfer, 'en dat levert een beloning op van: €', totalevergoeding)
|
4,264 | 3a05ebee8e70321fe53637b4792f5821ce7044be | # -*- coding: utf-8 -*-
###############################################################################
# This file is part of metalibm (https://github.com/kalray/metalibm)
###############################################################################
# MIT License
#
# Copyright (c) 2018 Kalray
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
###############################################################################
# Author(s): Nicolas Brunie (nbrunie@kalray.eu)
# Created: Aug 8th, 2017
# last-modified: Mar 7th, 2018
###############################################################################
from functools import reduce
from metalibm_core.core.ml_formats import ML_Bool
from metalibm_core.core.ml_operations import (
ML_LeafNode, Comparison, BooleanOperation,
is_leaf_node,
LogicalAnd, LogicalOr, Constant,
BitLogicLeftShift, BitLogicRightShift,
BitArithmeticRightShift,
)
from metalibm_core.core.advanced_operations import PlaceHolder
from metalibm_core.core.ml_table import ML_NewTable
from metalibm_core.utility.log_report import Log
def evaluate_comparison_range(node):
""" evaluate the numerical range of Comparison node, if any
else returns None """
return None
def is_comparison(node):
""" test if node is a Comparison node or not """
return isinstance(node, Comparison)
LOG_VERBOSE_EVALUATE_RANGE = Log.LogLevel("EvaluateRangeVerbose")
## Assuming @p optree has no pre-defined range, recursively compute a range
# from the node inputs
def evaluate_range(optree, update_interval=False, memoization_map=None):
""" evaluate the range of an Operation node
Args:
optree (ML_Operation): input Node
Return:
sollya Interval: evaluated range of optree or None if no range
could be determined
"""
if memoization_map is None:
memoization_map = {}
init_interval = optree.get_interval()
if not init_interval is None:
return init_interval
else:
if optree in memoization_map:
return memoization_map[optree]
elif isinstance(optree, ML_LeafNode):
op_range = optree.get_interval()
elif is_comparison(optree):
op_range = evaluate_comparison_range(optree)
if update_interval:
optree.set_interval(op_range)
elif isinstance(optree, PlaceHolder):
op_range = evaluate_range(optree.get_input(0),
update_interval=update_interval,
memoization_map=memoization_map)
if update_interval:
optree.set_interval(op_range)
else:
args_interval = tuple(
evaluate_range(op, update_interval=update_interval,
memoization_map=memoization_map
) for op in optree.get_inputs())
args_interval_map = {op: op_interval for op, op_interval in zip(optree.inputs, args_interval)}
# evaluate_range cannot rely on bare_range_function only as some
# operations (e.g. CountLeadingZeros) do not base interval computation
# on their inputs' intervals but on other parameters
ops_interval_get = lambda op: args_interval_map[op]
op_range = optree.range_function(optree.inputs,
ops_interval_getter=ops_interval_get)
if update_interval:
optree.set_interval(op_range)
Log.report(LOG_VERBOSE_EVALUATE_RANGE, "range of {} is {}", optree, op_range)
memoization_map[optree] = op_range
return op_range
def forward_attributes(src, dst):
""" forward compatible attributes from src node to dst node
:param src: source source for attributes values
:type src: ML_Operation
:param dst: destination node for attributes copies
:type dst: ML_Operation
"""
dst.set_tag(src.get_tag())
dst.set_debug(src.get_debug())
dst.set_handle(src.get_handle())
if hasattr(src.attributes, "init_stage"):
forward_stage_attributes(src, dst)
if isinstance(src, BooleanOperation) and isinstance(dst, BooleanOperation):
dst.likely = src.likely
def forward_stage_attributes(src, dst):
""" copy node's stage attributes from src node to dst node """
dst.attributes.init_stage = src.attributes.init_stage
def depth_node_ordering(start_node, end_nodes):
""" order the node between root start_node end end_nodes
by depth (root first, starting with start_node)
:param start_node: root of the sort (first node)
:type start_node: ML_Operation
:param end_nodes: nodes where the depth sort must end
:type end_nodes: iterator over ML_Operation
:return: depth ordered list of nodes
:rtype: list(ML_Operation)
"""
ordered_list = []
ordered_set = set()
working_list = [start_node]
while working_list != []:
node = working_list.pop(0)
if not node in ordered_set:
ordered_set.add(node)
ordered_list.append(node)
if not is_leaf_node(node) and not node in end_nodes:
for node_op in node.get_inputs():
working_list.append(node_op)
return ordered_list
def logical_reduce(op_list, op_ctor=LogicalOr, precision=ML_Bool, **kw):
""" Logical/Boolean operand list reduction """
local_list = [node for node in op_list]
while len(local_list) > 1:
op0 = local_list.pop(0)
op1 = local_list.pop(0)
local_list.append(
op_ctor(op0, op1, precision=precision)
)
# assigning attributes to the resulting node
result = local_list[0]
result.set_attributes(**kw)
return result
## Specialization of logical reduce to OR operation
logical_or_reduce = lambda op_list, **kw: logical_reduce(op_list, LogicalOr, ML_Bool, **kw)
## Specialization of logical reduce to AND operation
logical_and_reduce = lambda op_list, **kw: logical_reduce(op_list, LogicalAnd, ML_Bool, **kw)
def uniform_list_check(value_list):
""" Check that value_list is made of only a single value replicated in
each element """
return reduce((lambda acc, value: acc and value == value_list[0]), value_list, True)
def uniform_vector_constant_check(optree):
""" check whether optree is a uniform vector constant """
if isinstance(optree, Constant) and not optree.get_precision() is None \
and optree.get_precision().is_vector_format():
return uniform_list_check(optree.get_value())
return False
def uniform_shift_check(optree):
""" check whether optree is a bit shift by a uniform vector constant """
if isinstance(optree, (BitLogicLeftShift, BitLogicRightShift, BitArithmeticRightShift)):
return uniform_vector_constant_check(optree.get_input(1)) \
or not optree.get_input(1).get_precision().is_vector_format()
return False
def is_false(node):
""" check if node is a Constant node whose value is equal to boolean False """
return is_scalar_cst(node, False) or is_vector_uniform_cst(node, False)
def is_true(node):
""" check if node is a Constant node whose value is equal to boolean True """
return is_scalar_cst(node, True) or is_vector_uniform_cst(node, True)
def is_scalar_cst(node, value):
""" check if node is a constant node with value equals to value """
return isinstance(node, Constant) and not node.get_precision().is_vector_format() and node.get_value() == value
def is_vector_uniform_cst(node, scalar_value):
""" check if node is a vector constant node with each value equals to
scalar_value """
return isinstance(node, Constant) and node.get_precision().is_vector_format() and node.get_value() == [scalar_value] * node.get_precision().get_vector_size()
def extract_tables(node):
""" extract the set of all ML_Table nodes in the graph rooted at node """
processed_set = set([node])
table_set = set()
working_set = [node]
while working_set:
elt = working_set.pop(0)
if isinstance(elt, ML_NewTable):
table_set.add(elt)
elif not isinstance(elt, ML_LeafNode):
for op_node in elt.inputs:
if not op_node in processed_set:
processed_set.add(op_node)
working_set.append(op_node)
return table_set
|
4,265 | ffb17b370c892696b341f6d37a2cfe106a5670a5 | import numpy as np
raw = np.load("raw_with_freq.npy").item()
for i in list(raw.keys()):
if len(i) > 8:
del(raw[i])
print(raw)
print(len(list(raw.keys())))
np.save("shorten_raw_with_freq.npy", raw)
|
4,266 | 00790b9d2648d19a37d1d1864e7fdeab0f59f764 | # coding=utf-8
"""
author = jamon
""" |
4,267 | 00f8992173321dfa5ac5b125a2e663b159fafb23 | import cv2
import torch
print('haha') |
4,268 | 2ea33fd06be888db5cda86b345f535532d2a05b5 | #!/usr/bin/python
import glob
import pandas as pd
import numpy as np
manifest = pd.read_csv('./manifest.csv', sep=',', names=['projectId','records'], skiprows=[0])
mailTypes = pd.read_csv('./mail_types.csv', sep=',', names=['typeId','typeName'], skiprows=[0])
#----- mailTypes['typeId'] = pd.to_numeric(mailTypes['typeId'], errors='coerce')
#mailTypes['typeId'] = mailTypes['typeId'].astype(str).astype(int)
#print mailTypes.dtypes
mailAll = pd.DataFrame(columns=['projectId', 'correspondenceId', 'sentDate', 'fromOrganizationId',
'fromUserId', 'correspondenceTypeId', 'correspondenceTypeName', 'responseRequiredByDate'])
path = './correspondence/' # use your path
allFiles = glob.glob(path + "*.csv")
counter = 0
for file_ in allFiles :
counter+=1
print 'files remaining: ' + str(len(allFiles) - counter)
correspond = pd.read_csv(file_, sep=',', header='infer')
mail = pd.merge(correspond, mailTypes, how='left', left_on=['correspondenceTypeId'], right_on=['typeId'])
mail.drop('typeId', axis=1, inplace=True)
mail.columns = ['projectId', 'correspondenceId', 'sentDate', 'fromOrganizationId', 'fromUserId', 'correspondenceTypeId', 'correspondenceTypeName', 'responseRequiredByDate']
mailAll = mailAll.append(mail)
mailAll_df = pd.DataFrame.from_dict(mailAll)
mailAll_df = mailAll_df[['projectId', 'correspondenceId', 'sentDate', 'fromOrganizationId', 'fromUserId', 'correspondenceTypeId', 'correspondenceTypeName', 'responseRequiredByDate']]
mailAll_df.to_csv('mailAll.csv', sep=',')
|
4,269 | 1af6bda6eb4e7a46b22379180ea82e78c67ce771 | # -*- coding: utf-8 -*-
from qav5.http.client import BaseClient
from qav5.http.helper import api
from qav5.utils import Bunch, low_case_to_camelcase
class AppusersClient(BaseClient):
def __init__(self, base_url, access_token=None, **kwargs):
super().__init__(base_url, kwargs)
self.access_token = access_token
self.req_kwargs.update({"headers": {"Authorization": self.access_token}})
self.interceptor = lambda r, j: Bunch(j)
@api(rule="/app_users/app_order_create_info", method="get", is_json_req=True)
def app_order_create_info(self,order_id:int=None):
"""
订单创建个人账号页信息
:return:
"""
def contract_upload_for_user(self, sub_firm_id, contract_file):
"""
单个创建账号的合同文件
:param contract_file: 合同文件
:param sub_firm_id: 公司id
:return:
"""
return self._call_api("/app_users/contract_upload", method='POST',
req_kwargs=dict(data={"sub_firm_id": sub_firm_id},
files=dict(contract_file=open(contract_file, 'rb'))),
disable_log=True)
@api(rule="/app_users/setting", is_json_req=True)
def app_users_setting(self,id):
"""
账号编辑设置
:param id: 个人账号id
:return:
"""
@api(rule="/app_users/set_allot_admin", is_json_req=True, remove_null=True)
def set_allot_admin(self, app_user_ids, allot_admin):
"""
设置分配管理员
:param app_user_ids:个人账号IDs 的数组
:param allot_admin:设置分配管理员,(0:否|1:是)
:return:
"""
pass
|
4,270 | 4a2796645f1ab585084be47c8cd984c2945aa38b | #!/usr/bin/python
import os, sys
import csv
import glob
if len(sys.argv)==3:
res_dir = sys.argv[1]
info = sys.argv[2]
else:
print "Incorrect arguments: enter outout directory"
sys.exit(0)
seg = dict([('PB2','1'), ('PB1','2'), ('PA','3'), ('HA','4'), ('NP','5'), ('NA','6'), ('MP','7'), ('NS','8')])
# Read the summary info file:
info_list = []
with open(info, 'r') as csvfile:
reader = csv.reader(csvfile)
for xi in reader:
print xi
info_list = xi
print info_list
# if one samlple or many samples : fixing the list length issue
if len(info_list[0]) < 4 : subtypes = list[set([c[-1] for c in info_list])]
else: subtypes = [info_list[-1],]
# Merge all Annotation file of the consensus genome
all_annot = []
assembled_cons = [["Sample Id", "Sample Name", "HA", "NA", "MP", "PB2", "PB1", "PA", "NP", "NS"]]
for sub_type in subtypes:
for x in glob.glob(res_dir + "/Consensus_genome/" + sub_type + "/*csv"):
X = x.split("/")
y = X[-1].replace("-annotation.csv", "")
with open(x, 'rb') as csvfile:
r = csv.reader(csvfile)
ha = "-"
na = "-"
mp = "-"
pb2 = "-"
pb1 = "-"
pa = "-"
np = "-"
ns = "-"
for a in r:
if a[0] != "Genome":
print X, a
seg_nam = a[0].split("|")[1]
a.insert(0,y + "." + seg[seg_nam])
all_annot.append(a)
if a[1].split("|")[1] == "HA": ha = a[-1]
if a[1].split("|")[1] == "NA": na = a[-1]
if a[1].split("|")[1] == "MP": mp = a[-1]
if a[1].split("|")[1] == "PB2": pb2 = a[-1]
if a[1].split("|")[1] == "PB1": pb1 = a[-1]
if a[1].split("|")[1] == "PA": pa = a[-1]
if a[1].split("|")[1] == "NP": np = a[-1]
if a[1].split("|")[1] == "NS": ns = a[-1]
else: annot_header = a
assembled_cons.append([y, a[1].split("|")[0], ha, na, mp, pb2, pb1, pa, np, ns])
with open(res_dir + '/' + sub_type + '-ConsensusDetail.csv', 'wb') as f:
writer = csv.writer(f)
annot_header.insert(0,"Sample Id")
all_annot.insert(0,annot_header)
writer.writerows(all_annot)
with open(res_dir + '/' + sub_type + '-ConsensusSummary.csv', 'wb') as f:
writer = csv.writer(f)
writer.writerows(assembled_cons)
# Merge all SNPs called...
merge_snps = []
for sub_type in subtypes:
for x in glob.glob(res_dir + "/Snps/" + sub_type + "/*.vcf"):
X = x.split("/")
y = X[-1].replace("-genome-snps.vcf", "")
with open(x, 'rb') as csvfile:
r = csv.reader(csvfile, delimiter="\t")
for s in r:
if not s[0].startswith("#"):
print s
seg_nam = s[0].split("|")[1]
s.insert(0, y + "." + seg[seg_nam])
merge_snps.append(s)
with open(res_dir + '/' + sub_type + '-SNPs.csv', 'wb') as f:
writer = csv.writer(f)
merge_snps.insert(0, ["Sample Id", "Sample Name", "POS","ID","REF","ALT", "QUAL", "FILTER", "INFO"])
writer.writerows(merge_snps)
|
4,271 | c1a9c220b9100a927076753d6483ad7c069dea8c | # coding:utf-8
'''
对称二叉树
实现一个函数,用来判断一个二叉树是不是对称的
如果一颗二叉树和它的镜像是一样的,就是对称的
'''
class BinaryTreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def isSymmetryBonaryTree(self, pRoot):
if pRoot is None:
return False
pRoot.right, pRoot.left = pRoot.left, pRoot.right
self.isSymmetryBonaryTree(pRoot.left)
self.isSymmetryBonaryTree(pRoot.right)
return pRoot
if __name__ == '__main__':
# The first Binary Tree
pRoot1 = BinaryTreeNode(10)
pRoot2 = BinaryTreeNode(12)
pRoot3 = BinaryTreeNode(12)
pRoot1.left = pRoot2
pRoot1.right = pRoot3
# The second Binary Tree
pRoot4 = BinaryTreeNode(13)
pRoot5 = BinaryTreeNode(14)
pRoot6 = BinaryTreeNode(14)
pRoot7 = BinaryTreeNode(16)
pRoot8 = BinaryTreeNode(17)
pRoot9 = BinaryTreeNode(17)
pRoot10 = BinaryTreeNode(16)
pRoot4.left = pRoot5
pRoot4.right = pRoot6
pRoot5.left = pRoot7
pRoot5.right = pRoot8
pRoot6.left = pRoot9
pRoot6.right = pRoot10
# The third Binary Tree
pRootx1 = BinaryTreeNode(100)
pRootx2 = BinaryTreeNode(102)
pRootx3 = BinaryTreeNode(103)
pRootx1.left = pRootx2
pRootx1.right = pRootx3
s = Solution()
pRootCopy1 = s.isSymmetryBonaryTree(pRoot1)
print pRootCopy1 == pRoot1
pRootCopy4 = s.isSymmetryBonaryTree(pRoot4)
print pRootCopy4 == pRoot4
pRootCopyx1 = s.isSymmetryBonaryTree(pRootx1)
print pRootCopyx1 == pRootx1
|
4,272 | 31246a2e022f3c5b0ce68bb06422307439cbd9b6 | import random
import HardMode
import EasyMode
#Intro function, gets user input of game start, instructions, and game mode
def introduction():
like_to_play = int(input ("Welcome to Rock Paper Scissors, would you like to play? (1 = yes, 2 = no) "))
#like_to_play = int(like_to_play)
#need to set y/n variables instead of numeric: flow control
if(like_to_play == 1):
easy_or_hard = input("Easy (1) or hard (2)? ")
easy_or_hard = int(easy_or_hard)
if easy_or_hard == 1:
EasyMode.play_game_easy()
elif easy_or_hard == 2:
HardMode.play_game_hard()
else:
print("Invalid option!")
else:
print("Goodbye!")
introduction()
|
4,273 | 6d5257158a7d2eef63faf2fea27f36721d4349ae | #!/usr/bin/python
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String, ForeignKey
from sqlalchemy.orm import sessionmaker, relationship
engine = create_engine("sqlite:///banco.db")
Base = declarative_base()
Session = sessionmaker()
Session.configure(bind=engine)
session = Session()
class Funcionario(Base):
__tablename__ = 'funcionario'
id = Column(Integer,primary_key=True)
nome = Column(String)
dependente = relationship("Dependente")
class Dependente(Base):
__tablename__ = "dependente"
id = Column(Integer,primary_key=True)
nome = Column(String)
funcionario_id = Column(Integer,ForeignKey("funcionario.id"))
if __name__ == "__main__":
Base.metadata.create_all(engine)
# Buscando funcionario
result = session.query(Funcionario,Dependente) \
.join(Dependente) \
.filter(Funcionario.id==1).first()
dependente = session.query(Dependente).filter_by(id=1).first()
session.delete(dependente)
session.commit()
print "Funcionario: ",result.Funcionario.nome
for d in result.Funcionario.dependente:
print "Dependente: ",d.nome
|
4,274 | 8c683c109aba69f296b8989915b1f3b3eecd9745 | import re
rule_regex = re.compile(r'([\.#]{5}) => ([\.#])')
grid_regex = re.compile(r'initial state: ([\.#]+)')
class Rule:
def __init__(self, template, alive):
self.template = template
self.alive = alive
def parse(string):
match = rule_regex.match(string)
if match:
template = match.group(1)
alive = match.group(2)
return Rule(template, alive)
return None
def read_input(path):
init_grid = ''
rules = []
with open(path) as infile:
cnt = 0
for line in infile:
if cnt == 0:
init_grid = grid_regex.match(line).group(1)
elif cnt > 1:
rules.append(Rule.parse(line))
cnt = cnt + 1
return init_grid, rules
def apply_rule(segment, rule):
if segment == rule.template:
return rule.alive
return None
def advance(grid, rules):
augmented_grid = "....." + grid + "....."
grid = ['.' for x in range(0, len(augmented_grid))]
for pos in range(2, len(augmented_grid)-2):
for rule in rules:
result = apply_rule(augmented_grid[pos-2:pos+3], rule)
if result:
grid[pos] = result
first_hash = grid.index('#')
last_hash = len(grid) - 1 - grid[::-1].index('#')
offset_delta = first_hash-5
return ''.join(grid[first_hash:last_hash+1]), offset_delta
def find_sum(grid, offset):
sum = 0
for i in range(0,len(grid)):
if grid[i] == '#':
sum = sum + i+offset
return sum
def main():
grid, rules = read_input('./input/input.dat')
offset = 0
sum = find_sum(grid, offset)
print(grid)
for i in range(1, 1000):
new_grid, offset_delta = advance(grid, rules)
offset = offset + offset_delta
new_sum = find_sum(new_grid, offset)
sum_diff = new_sum - sum
print(i, ": grid length = ", len(new_grid), " offset = ", offset, " sum = ", new_sum)
if new_grid == grid:
print("found repeated grids:")
break
grid = new_grid
sum = new_sum
target_year = 50000000000
print("sum at {} = {}".format(target_year, new_sum + sum_diff*(target_year-i)))
if __name__== "__main__":
main()
|
4,275 | 2b5df70c75f2df174991f6b9af148bdcf8751b61 | import sys
sys.stdin = open('4828.txt', 'r')
sys.stdout = open('4828_out.txt', 'w')
T = int(input())
for test_case in range(1, T + 1):
N = int(input())
l = list(map(int,input().split()))
min_v = 1000001
max_v = 0
i = 0
while i < N:
if l[i] < min_v:
min_v=l[i]
if l[i] > max_v:
max_v=l[i]
i += 1
print(f'#{test_case} {max_v-min_v}') |
4,276 | a3588a521a87765d215fd2048407e5e54fb87e94 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 14 09:53:10 2021
@author: kaouther
"""
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import pandas as pd
#path = '/home/kaouther/Documents/Internship/pre_process/input_files/heart_forKaouther.xlsx'
#path = '/home/kaouther/Documents/Internship/pre_process/input_files/tissues_9m_forKaouther3.xlsx'
path = '/home/kaouther/Documents/Internship/pre_process/input_files/clean/TabulaMuris_Senis_Brain.xlsx'
#path=input('enter the complete path to your input file')
#path = input('Paste the absolute path to the file') #enter the path to the heart_forKaouther.xlsx
#df = pd.read_csv(path, delimiter = "\t")
df = pd.read_excel(path)
#function de extract the last caracterfrom a string
def get_rep_name(string):
return (string[-1:])
#get columns names (samples & biological replicates)
column_names = df.columns
column_names = column_names.delete([0]) #remove gene
#get only biological replicates
biological_rep=[]
mean_replicates= dict()
for name in column_names:
if get_rep_name(name) not in biological_rep:
#print(get_rep_name(name))
biological_rep.append(name[-1:])
#dictionnary to store the sum of values of a type of biological rep and nb of iteration
for i in range (0,len(biological_rep),1):
mean_replicates['mean_replicate_'+biological_rep[i]] = [0]*len(df)
mean_replicates['nb_itteration_'+biological_rep[i]] = [0]*len(df)
for k in range (0,len(df),1):
for i in range (0, len(column_names),1):
for j in biological_rep:
if j in get_rep_name(column_names[i]):
mean_replicates['mean_replicate_'+j][k]+= df.loc[k,column_names[i]]
mean_replicates['nb_itteration_'+j][k]+=1
dico2 = dict() #store tuples sum and iteration on each line
dico3 = dict() #store the mean calculation
for i in range (0,len(biological_rep),1):
dico3['mean_replicate_'+biological_rep[i]] = [0]*len(df)
#get list of mean replicates
list_mean_replicates =[]
for i in range (0,len(biological_rep),1):
list_mean_replicates.append('mean_replicate_'+biological_rep[i])
#dico to store as a tuple the sum and iteration for each mean rep
for key in list_mean_replicates:
for key2 in mean_replicates:
if key != key2 and get_rep_name(key) == get_rep_name(key2):
print( key,key2)
dico2[key]= list(zip((mean_replicates[key]),mean_replicates[key2]))
#dico to calculate the average per gene per mean replicate
for key in dico2:
for i in range(0,len(df),1):
cal = round(dico2[key][i][0]/ dico2[key][i][1])
dico3[key][i]= cal
#store results in new df in new columns
final_df = df.copy()
for mean in list_mean_replicates:
final_df[mean] = 0
for i in range(0,len(final_df),1):
for key in list_mean_replicates:
final_df.loc[i,key] = dico3[key][i]
#export as excel the df
final_df.to_excel ('/home/kaouther/Documents/Internship/pre_process/output_files/brain_matrix.xlsx', index = False, header=True)
#final_df.to_csv('/home/kaouther/Documents/Internship/pre_process/output_files/'+'tissues_mean.csv', index = False, header=True)
#final_df.to_excel('/home/kaouther/Documents/Internship/pre_process/output_files/'+'tissues_matrix.xlsx', index = False, header=True)
#file_name= input('file name')
#final_df.to_excel(file_name+'.xlsx', index = False, header=True)
duplicateRowsDF = final_df[final_df.iloc[:,0].duplicated()]
|
4,277 | 9a6f159d9208ee9e337de7b717e2e25c7e7f9f06 | """Plugin setup."""
import importlib
from qiime2.plugin import (
Plugin,
Str,
Choices,
Int,
Bool,
Range,
Float,
Metadata,
MetadataColumn,
Categorical,
Numeric,
Citations,
)
import q2_micom
from q2_micom._formats_and_types import (
SBML,
JSON,
Pickle,
SBMLFormat,
SBMLDirectory,
JSONFormat,
JSONDirectory,
CommunityModelFormat,
CommunityModelManifest,
CommunityModelDirectory,
GrowthRates,
Fluxes,
MicomResultsDirectory,
MicomMediumFile,
MicomMediumDirectory,
MetabolicModels,
CommunityModels,
MicomResults,
MicomMedium,
Global,
PerSample,
TradeoffResults,
TradeoffResultsDirectory,
REQ_FIELDS,
)
from q2_types.feature_data import FeatureData, Taxonomy
from q2_types.feature_table import FeatureTable, RelativeFrequency, Frequency
citations = Citations.load("citations.bib", package="q2_micom")
plugin = Plugin(
name="micom",
version=q2_micom.__version__,
website="https://github.com/micom-dev/q2-micom",
package="q2_micom",
description=(""),
short_description="Plugin for metabolic modeling of microbial communities.",
citations=[citations["micom"]],
)
plugin.register_formats(
SBMLFormat,
SBMLDirectory,
JSONFormat,
JSONDirectory,
CommunityModelFormat,
CommunityModelManifest,
CommunityModelDirectory,
GrowthRates,
Fluxes,
MicomResultsDirectory,
MicomMediumFile,
MicomMediumDirectory,
TradeoffResultsDirectory,
)
plugin.register_semantic_types(
MetabolicModels, CommunityModels, MicomResults, MicomMedium
)
plugin.register_semantic_type_to_format(MetabolicModels[SBML], SBMLDirectory)
plugin.register_semantic_type_to_format(MetabolicModels[JSON], JSONDirectory)
plugin.register_semantic_type_to_format(
CommunityModels[Pickle], CommunityModelDirectory
)
plugin.register_semantic_type_to_format(MicomResults, MicomResultsDirectory)
plugin.register_semantic_type_to_format(TradeoffResults, TradeoffResultsDirectory)
plugin.register_semantic_type_to_format(MicomMedium[Global], MicomMediumDirectory)
plugin.register_semantic_type_to_format(MicomMedium[PerSample], MicomMediumDirectory)
plugin.methods.register_function(
function=q2_micom.db,
inputs={},
parameters={
"meta": Metadata,
"rank": Str % Choices(q2_micom._build.RANKS),
"threads": Int % Range(1, None),
},
outputs=[("metabolic_models", MetabolicModels[JSON])],
input_descriptions={},
parameter_descriptions={
"meta": (
"Metadata for the individual metabolic models in `folder`. "
"Must contain the the following columns: %s." % ", ".join(REQ_FIELDS)
),
"rank": "The phylogenetic rank at which to summarize taxa.",
"threads": "The number of threads to use when constructing models.",
},
output_descriptions={"metabolic_models": "The metabolic model DB."},
name="Build a metabolic model database.",
description=(
"Constructs pan-genome models summarized to the specified rank "
"and bundles the models to be used by MICOM. "
"The chosen rank has to be the same you want as when building your "
"community models. "
"So you may not build genus-level community models with a species "
"level database. "
"You will only need to run this function if you want to build a "
"custom DB. For many use cases downloading the prebuilt AGORA DB "
"with the the preferred rank should be sufficient."
),
citations=[
citations["agora"],
citations["agora_reply"],
citations["micom"],
],
)
plugin.methods.register_function(
function=q2_micom.build,
inputs={
"abundance": FeatureTable[Frequency | RelativeFrequency],
"taxonomy": FeatureData[Taxonomy],
"models": MetabolicModels[JSON],
},
parameters={
"threads": Int % Range(1, None),
"cutoff": Float % Range(0.0, 1.0),
"strict": Bool,
"solver": Str % Choices("auto", "cplex", "osqp", "gurobi"),
},
outputs=[("community_models", CommunityModels[Pickle])],
input_descriptions={
"abundance": (
"The feature table containing the samples over which beta "
"diversity should be computed."
),
"taxonomy": "The taxonomy assignments for the ASVs in the table.",
"models": "The single taxon model database to use.",
},
parameter_descriptions={
"threads": "The number of threads to use when constructing models.",
"cutoff": "Taxa with a relative abundance smaller than this will "
"be dropped.",
"strict": (
"If true will collapse and match on all taxa ranks up to the "
"specified rank (so on all higher ranks as well). If false "
"(default) will match only on single taxa rank specified before. "
"If using the strict option make sure ranks are named the same as in "
"the used database."
),
"solver": (
"The quadratic and linear programming solver that will be used "
"in the models. Will pick an appropriate one by default. "
"`cplex` and `gurobi` are commercial solvers with free academic "
"licenses and have to be installed manually. See the docs for more info."
),
},
output_descriptions={"community_models": "The community models."},
name="Build community models.",
description=("Builds the metabolic community models for a set of samples."),
citations=[citations["micom"]],
)
plugin.methods.register_function(
function=q2_micom.minimal_medium,
inputs={"models": CommunityModels[Pickle]},
parameters={
"min_growth": Float % Range(0.0, None, inclusive_start=False),
"threads": Int % Range(1, None),
},
outputs=[("medium", MicomMedium[Global])],
input_descriptions={
"models": (
"A collection of metabolic community models. "
"This should contain on model for each sample."
),
},
parameter_descriptions={
"min_growth": (
"The minimum achievable growth rate for each taxon. "
"The returned growth medium enables all taxa to growth "
"simultaneously with at least this rate."
),
"threads": "The number of threads to use when simulating.",
},
output_descriptions={"medium": "The resulting growth medium."},
name="Obtain a minimal growth medium for models.",
description=(
"Obtains a minimal growth medium for the community models. "
"Please note that this medium does not have any biological "
"feasibility. If you have any knowledge about metabolites present "
"in the environment we recommend you construct the medium by hand."
),
citations=[citations["micom"]],
)
plugin.methods.register_function(
function=q2_micom.grow,
inputs={
"models": CommunityModels[Pickle],
"medium": MicomMedium[Global | PerSample],
},
parameters={
"tradeoff": Float % Range(0.0, 1.0, inclusive_start=False, inclusive_end=True),
"strategy": Str % Choices("pFBA", "minimal uptake", "none"),
"threads": Int % Range(1, None),
},
outputs=[("results", MicomResults)],
input_descriptions={
"models": (
"A collection of metabolic community models. "
"This should contain on model for each sample."
),
"medium": "The growth medium to use.",
},
parameter_descriptions={
"tradeoff": (
"The tradeoff parameter. This describes the balance "
"between maximizing biomass production of the entire "
"community and biomass production of individual taxa "
'(ergo "egoistic" growth). A value of 1.0 would yield '
"the best biomass production across the community but "
"will only allow a few taxa to grow. Smaller values will "
"allow more taxa to grow but will sacrifice overall "
"biomass. A value of 0.5 (the default) has been shown to "
"best reproduce growth rates in the human gut."
),
"strategy": (
"The strategy used when choosing the solution in the "
"optimal flux space. `minimal uptake` uses the fluxes "
"that result in the smallest total uptake from the environment."
"`pFBA` uses parsimonious Flux Balance Analysis and thus will choose "
"the fluxes with the lowest enzyme requirement for each taxon. "
"`none` will return an arbitrary solution from the optimal flux space."
),
"threads": "The number of threads to use when simulating.",
},
output_descriptions={
"results": "The resulting taxa-level growth rates and metabolic "
"exchange fluxes."
},
name="Simulate growth for community models.",
description=(
"Simulates growth for a set of samples. Note that those are "
'sample-specific or "personalized" simulations, so each taxon '
"may have different growth rates and metabolite usage in each sample."
),
citations=[citations["micom"]],
)
plugin.methods.register_function(
function=q2_micom.tradeoff,
inputs={
"models": CommunityModels[Pickle],
"medium": MicomMedium[Global | PerSample],
},
parameters={
"tradeoff_min": Float % Range(0.0, 1.0, inclusive_start=False),
"tradeoff_max": Float % Range(0.0, 1.0, inclusive_end=True),
"step": Float % Range(0.0, 1.0),
"threads": Int,
},
outputs=[("results", TradeoffResults)],
input_descriptions={
"models": (
"A collection of metabolic community models. "
"This should contain on model for each sample."
),
"medium": "The growth medium to use.",
},
parameter_descriptions={
"tradeoff_min": "The minimum tradeoff parameter to test. This should "
"be larger than 0.0 and smaller than 1.0.",
"tradeoff_max": "The maximum tradeoff parameter to test. This should "
"be larger than 0.0 and smaller than 1.0 and also be"
"larger than `tradeoff_min`.",
"step": "The tradeoff value step size to use.",
"threads": "The number of threads to use when simulating.",
},
output_descriptions={
"results": "The resulting taxa-level growth rates for varying "
"tradeoff values."
},
name="Test a variety of tradeoff values.",
description=(
"Simulates growth for a set of samples while varying the tradeoff "
"between community and taxon biomass production. "
"This can be used to characterize a good tradeoff value for a "
"specific set of samples. Our study suggested that a good tradeoff "
"value is the largest value that allows the majority of taxa in the "
"sample to grow."
),
citations=[citations["micom"]],
)
plugin.methods.register_function(
function=q2_micom.filter_models,
inputs={"models": CommunityModels[Pickle]},
parameters={"metadata": Metadata, "query": Str, "exclude": Bool},
outputs=[("filtered_models", CommunityModels[Pickle])],
input_descriptions={
"models": (
"A collection of metabolic community models. "
"This should contain on model for each sample."
)
},
parameter_descriptions={
"metadata": "The metadata for the samples to keep or to query.",
"query": (
"A pandas query expression to select samples from the metadata. "
"This will call `query` on the metadata DataFrame, so you can test "
"your query by loading our metadata into a pandas DataFrame."
),
"exclude": (
"If true will use all samples *except* the ones selected "
"by metadata and query."
),
},
output_descriptions={"filtered_models": "The filtered community models."},
name="Filters models for a chosen set of samples.",
description=(
"Select a subset of samples and their community models using a list "
"of samples or a pandas query expression."
),
citations=[citations["micom"]],
)
plugin.methods.register_function(
function=q2_micom.filter_results,
inputs={"results": MicomResults},
parameters={"metadata": Metadata, "query": Str, "exclude": Bool},
outputs=[("filtered_results", MicomResults)],
input_descriptions={
"results": (
"A set of MICOM analysis results. "
"Contains predicted groath rates and exchange fluxes."
)
},
parameter_descriptions={
"metadata": "The metadata for the samples to keep or to query.",
"query": (
"A pandas query expression to select samples from the metadata. "
"This will call `query` on the metadata DataFrame, so you can test "
"your query by loading our metadata into a pandas DataFrame."
),
"exclude": (
"If true will use all samples *except* the ones selected "
"by metadata and query."
),
},
output_descriptions={"filtered_results": "The filtered simulation models."},
name="Filters results for a chosen set of samples.",
description=(
"Select a subset of samples and their simulation results using a list "
"of samples or a pandas query expression."
),
citations=[citations["micom"]],
)
plugin.visualizers.register_function(
function=q2_micom.plot_growth,
inputs={"results": MicomResults},
parameters={},
input_descriptions={
"results": (
"A set of MICOM analysis results. "
"Contains predicted groath rates and exchange fluxes."
)
},
parameter_descriptions={},
name="Plot taxa growth rates.",
description=(
"Plot predicted growth rates for each taxon in each sample. "
"Only points with growing taxa are shown (growth rate sufficiently "
"larger than zero)."
),
citations=[citations["micom"]],
)
plugin.visualizers.register_function(
function=q2_micom.exchanges_per_sample,
inputs={"results": MicomResults},
parameters={
"direction": Str % Choices("import", "export"),
"cluster": Bool,
},
input_descriptions={
"results": (
"A set of MICOM analysis results. "
"Contains predicted groath rates and exchange fluxes."
)
},
parameter_descriptions={
"direction": "The direction of the flux.",
"cluster": "Whether to perform clutering on samples and reactions.",
},
name="Plot gloabl exchange rates.",
description=(
"Plot predicted global exchange fluxes for each sample. "
"When plotting imports this corresponds to the consumption "
"fluxes for each metabolite that is available to the community. "
"When plotting export this corresponds to the production fluxes "
"for each metabolite."
),
citations=[citations["micom"]],
)
plugin.visualizers.register_function(
function=q2_micom.exchanges_per_taxon,
inputs={"results": MicomResults},
parameters={
"direction": Str % Choices("import", "export"),
"perplexity": Int % Range(2, None),
},
input_descriptions={
"results": (
"A set of MICOM analysis results. "
"Contains predicted growth rates and exchange fluxes."
)
},
parameter_descriptions={
"direction": "The direction of the flux.",
"perplexity": "TSNE parameter. Relates to the number of neighbors used to "
"calculate distances. Smaller values preserve more local "
"structure and larger values preserve more global structure.",
},
name="Plot niche overlap.",
description=(
"Plot growth or production niches. "
"The entire set of import or export fluxes for each taxon in each "
"sample is reduced onto a single point on a 2D plane."
"Taxa that are close to each other either consume similar metabolites "
" (imports) or produce similar metabolites (exports)."
),
citations=[citations["micom"]],
)
plugin.visualizers.register_function(
function=q2_micom.plot_tradeoff,
inputs={"results": TradeoffResults},
parameters={},
input_descriptions={
"results": (
"A set of MICOM tradeoff analysis results. "
"Contains predicted growth rates for each tested tradeoff."
)
},
parameter_descriptions={},
name="Plot tradeoff results.",
description=(
"Plot predicted growth rate distributions for each tradeoff as "
"well as the fraction of growing taxa in each sample and tradeoff "
"value. For a good tradeoff value one usually tries to find the "
"largest tradeoff value that still aloows most taxa to grow."
),
citations=[citations["micom"]],
)
plugin.visualizers.register_function(
function=q2_micom.fit_phenotype,
inputs={"results": MicomResults},
parameters={
"metadata": MetadataColumn[Categorical | Numeric],
"variable_type": Str % Choices("binary", "continuous"),
"flux_type": Str % Choices("import", "production"),
"min_coef": Float % Range(0, None),
},
input_descriptions={
"results": (
"A set of MICOM analysis results. "
"Contains predicted growth rates and exchange fluxes."
),
},
parameter_descriptions={
"metadata": "The metadata variable to use.",
"variable_type": "The type of the phenotype variable.",
"flux_type": "Which fluxes to use.",
"min_coef": (
"Only coefficient with absolute values larger than this " "will be shown."
),
},
name="Test for differential production",
description=(
"Test for overall metabolite production differences " "between two groups."
),
citations=[citations["micom"]],
)
importlib.import_module("q2_micom._transform")
|
4,278 | bfb778a2ecf43a697bc0e3449e9302142b20e1f4 | from django.conf.urls import url
from django.urls import path
from . import views
app_name = 'Accounts'
urlpatterns = [
path('update_info', views.update_info, name='update_info'),
path('create_user', views.create_user, name='create_user'),
path('change_password', views.change_password, name='change_password'),
path('register', views.register, name='register'),
path('login', views.login, name='login'),
path('logout', views.logout, name='logout'),
path('test_auth', views.test, name='test'),
url(r'^activate/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$',
views.activate, name='activate'),
path('change_user_status/<int:user_id>/<int:status>', views.change_user_status, name='change_user_status'),
path('change_user_privilege/<int:user_id>/<int:status>', views.change_user_privilege, name='change_user_privilege'),
]
|
4,279 | 9887e001f13ed491331c79c08450299afcc0d7cd | """
First run in samples:
mogrify -format png -density 150 input.pdf -quality 90 -- *.pdf
"""
import cv2
import os
import numpy as np
from matplotlib import pylab
def peakdetect(v, delta, x=None):
"""
Converted from MATLAB script at http://billauer.co.il/peakdet.html
Returns two arrays
function [maxtab, mintab]=peakdet(v, delta, x)
%PEAKDET Detect peaks in a vector
% [MAXTAB, MINTAB] = PEAKDET(V, DELTA) finds the local
% maxima and minima ("peaks") in the vector V.
% MAXTAB and MINTAB consists of two columns. Column 1
% contains indices in V, and column 2 the found values.
%
% With [MAXTAB, MINTAB] = PEAKDET(V, DELTA, X) the indices
% in MAXTAB and MINTAB are replaced with the corresponding
% X-values.
%
% A point is considered a maximum peak if it has the maximal
% value, and was preceded (to the left) by a value lower by
% DELTA.
% Eli Billauer, 3.4.05 (Explicitly not copyrighted).
% This function is released to the public domain; Any use is allowed.
"""
maxtab = []
mintab = []
if x is None:
x = np.arange(len(v))
v = np.asarray(v)
if len(v) != len(x):
sys.exit('Input vectors v and x must have same length')
if not np.isscalar(delta):
sys.exit('Input argument delta must be a scalar')
if delta <= 0:
sys.exit('Input argument delta must be positive')
mn, mx = np.Inf, -np.Inf
mnpos, mxpos = np.NaN, np.NaN
lookformax = True
for i in np.arange(len(v)):
this = v[i]
if this > mx:
mx = this
mxpos = x[i]
if this < mn:
mn = this
mnpos = x[i]
if lookformax:
if this < mx-delta:
maxtab.append((mxpos, mx))
mn = this
mnpos = x[i]
lookformax = False
else:
if this > mn+delta:
mintab.append((mnpos, mn))
mx = this
mxpos = x[i]
lookformax = True
return np.array(maxtab), np.array(mintab)
datasets_dir = "../samples"
filenames = [f for f in sorted(os.listdir(datasets_dir)) if f.lower().endswith(".png")]
for i, filename in enumerate(filenames):
print i, filename
filename = os.path.join(datasets_dir, filename)
# filename = '../samples/aa_test.png'
image = cv2.imread(filename)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
ret, thresholded = cv2.threshold(gray, 200, 255, cv2.THRESH_BINARY_INV)
results = cv2.findContours(thresholded, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
if len(results) == 2:
contours = results[0]
else:
contours = results[1]
draw_image = ~image.copy()
newimage = np.zeros(thresholded.shape, dtype=np.uint8)
boxes = [cv2.boundingRect(contour) for contour in contours]
widths = [box[2] for box in boxes]
typical_width = np.median(widths)
merge_width = int(typical_width)
# merge letters to form word blocks
for box in boxes:
if box[2] > 5 * typical_width or box[3] > 5 * typical_width:
continue
cv2.rectangle(newimage, (box[0] - merge_width, box[1]), (box[0] + box[2] + merge_width, box[1] + box[3]), 255, -1)
# refind contours in merged line image
boximage = newimage.copy()
results = cv2.findContours(newimage, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
if len(results) == 2:
contours = results[0]
else:
contours = results[1]
# make histogram of x coverage of word boxes.
hist_x1 = np.zeros(image.shape[1])
for contour in contours:
box = cv2.boundingRect(contour)
hist_x1[box[0]:box[0]+box[2]] += 1
max_x = np.max(hist_x1)
line_x = np.where(hist_x1 > max_x * 0.6)
maxtab, mintab = peakdetect(hist_x1, np.max(hist_x1) * 0.2)
for i, x in maxtab:
x = int(i)
cv2.line(draw_image, (x, 0), (x, 2000), (0, 0, 255), 2)
draw_image[boximage != 0, 0] = 255
cv2.imshow("process", draw_image)
pylab.clf()
pylab.plot(hist_x1)
pylab.plot(maxtab[:, 0], maxtab[:, 1], 'o')
pylab.ion()
pylab.show()
while cv2.waitKey(0) != 27:
pass
|
4,280 | e069ad88b5173e5859f1b01b9fb45951d1e82593 | Python 3.6.8 (tags/v3.6.8:3c6b436a57, Dec 24 2018, 00:16:47) [MSC v.1916 64 bit (AMD64)] on win32
Type "help", "copyright", "credits" or "license()" for more information.
>>> import turtle
turtle.setup(650,350,200,200)
turtle.penup()
turtle.fd(-250)
turtle.pendown()
turtle.pensize(25)
turtle.pencolor("purple")
turtle.seth(-40)
for i in range(4):
turtle.circle(40,80)
turtle.circle(-40,80)
turtle.circle(40,80/2)
turtle.fd(40)
turtle.circle(16,180)
turtle.fd(40 * 2/3)
turtle.done()
|
4,281 | be408b349e2795101b525ad8d948dbf52cab81bf | import time
import os
import psutil
start = time.time()
from queue import Queue
from copy import copy
process = psutil.Process(os.getpid())
class Node:
object_id = 0
weight = 0
value = 0
def __init__(self,object_id,weight,value):
self.object_id=object_id
self.weight=weight
self.value=value
""" First we need to extract values from the file"""
def read_file(file):
f = open(file, "r")
f.seek(0)
queue=Queue(maxsize=0)
list_elements=[]
nodes=[]
for line in f:
id=int(line.split(".", 1)[0])
value= int(line.split(" ", 3)[1])
weight=int(line.split(" ", 3)[2].split('\n', 2)[0])
nodes.append(Node(id,weight,value))
list_elements.append(-1)
list_elements.append(0)
list_elements.append(0)
queue.put(list_elements)
res=go_explore(queue,nodes)
for i in range(0,len(res)-2):
if(res[i]==1):
node=nodes[i]
res[i]={"id":node.object_id,"weight":node.weight,"value":node.value}
res=list(filter(lambda x: x != 0, res))
value=len(res)-1
weight=len(res)-2
res[value]={"total value":res[value]}
res[weight]={"total weight":res[weight]}
return res
def go_explore(queue,nodes):
best_value = 0
res=[]
while not queue.empty():
q=copy(queue.get())
for i in range(len(q)):
if q[i] is -1:
weight = q[len(q)-2]
value = q[len(q)-1]
if weight<=420:
if value > best_value:
res = q
best_value=value
q[i]=0
queue.put(q)
q_positive= copy(q)
q_positive[len(q_positive)-1]=value+nodes[i].value
q_positive[len(q_positive)-2]=weight+nodes[i].weight
q_positive[i]=1
queue.put(q_positive)
break
elif i == len(q)-1:
weight = q[len(q)-2]
value = q[len(q)-1]
if weight<=420:
if value > best_value:
res = q
best_value=value
return res
solution=read_file('Knapsack/data_knapsack')
for data in solution:
print(data)
end = time.time()
print(end - start)
print(process.memory_info().rss)
|
4,282 | f5831b84c1177d8b869db05d332bd364b3f72fff | from ContactBook import ContactBook
import csv
def run():
contact_book = ContactBook()
with open("22_agenda/contactos.csv",'r') as f:
reader = csv.reader(f)
for idx,row in enumerate(reader):
if idx == 0:
continue
else:
contact_book.add(row[0],row[1],row[2])
while True:
comando = input('''
Que desea hacer
a. añadir contacto
b. actualizar contacto
c. buscar contacto
d. eliminar contacto
e. listar contacto
f. salir
: ''')
if comando == 'a':
print("añadir contacto")
nombre = input("Escribe el nombre de la persona: ")
telefono = input("Escribe el telefono de la persona: ")
email = input("ingrese el email de la persona: ")
contact_book.add(nombre,telefono,email)
elif comando == 'b':
print("actualizar contacto")
nombre = input("Escribe el nombre de la persona: ")
contact_book.update_menu(nombre)
elif comando == 'c':
print("buscar contacto")
nombre = input("Escribe el nombre de la persona: ")
contact_book.search(nombre)
elif comando == 'd':
print("eliminar contacto")
nombre = input("Escribe el nombre de la persona: ")
contact_book.delete(nombre)
elif comando == 'e':
print("listar contactos")
contact_book.show_all()
elif comando == 'f':
print("salir ")
break
else:
print("opcion no valida")
if __name__ == "__main__":
run() |
4,283 | b7db0d2f4bbbc2c7763b9d2e6bede74979b65161 | import sys
import os
import random
if sys.version_info[0] < 3:
from StringIO import StringIO
else:
from io import StringIO
def file_len(file):
initial = file.tell()
file.seek(0, os.SEEK_END)
size = file.tell()
file.seek(initial)
return size
def run():
rand_seed = None
stderr_filename = None
stdout_filename = None
if len(sys.argv) >= 4:
rand_seed = int(sys.argv[3])
if len(sys.argv) >= 3:
stderr_filename = sys.argv[2]
if len(sys.argv) >= 2:
stdout_filename = sys.argv[1]
stdout_file = None
stderr_file = None
if stdout_filename:
stdout_file = open(stdout_filename, 'r')
else:
stdout_file = StringIO()
if stderr_filename:
stderr_file = open(stderr_filename, 'r')
else:
stderr_file = StringIO()
if not rand_seed:
sys.stdout.write(stdout_file.read())
sys.stderr.write(stderr_file.read())
else:
random.seed(rand_seed)
stdout_len = file_len(stdout_file)
stdout_eof = False
stderr_eof = False
while not stdout_eof or not stderr_eof:
if not stdout_eof:
r = random.randrange(stdout_len / 4)
data = stdout_file.read(r)
if len(data) < r:
stdout_eof = True
sys.stdout.write(data)
if not stderr_eof:
r = random.randrange(stdout_len / 4)
data = stderr_file.read(r)
if len(data) < r:
stderr_eof = True
sys.stderr.write(data)
if __name__ == '__main__':
run()
|
4,284 | 1aace7b9385aefdc503ce0e43e0f7f0996fe112a | from gamesim import GameSim
from network import Network
from player import RemotePlayer
from mutator import Mutator
from random import *
import copy
game = GameSim()
game.make_players(10)
base = "networks/"
dir = ""
name = "203964_85377"
gens = 2000
game.players[0].import_player(base + dir + name + ".network")
game.train(gens)
if (gens%500 != 0):
game.players[0].export_player()
|
4,285 | 685fa78b9c3ec141ce1e9ab568e4ad8a0565d596 | with open("input_trees.txt") as file:
map = file.readlines()
map = [ line.strip() for line in map ]
slopes = [(1,1), (3,1), (5,1), (7,1),(1,2)]
total = 1
for slope in slopes:
treeCount = 0
row, column = 0, 0
while row + 1 < len(map):
row += slope[1]
column += slope[0]
space = map[row][column % len(map[row])]
if space == "#":
treeCount += 1
total *= treeCount
print(total)
|
4,286 | 8283bdab023e22bba3d8a05f8bda0014ee19adee | # -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <codecell>
import pylab as pl
import pymc as mc
import book_graphics
reload(book_graphics)
# <markdowncell>
# Uniform points in an $n$-dimensional ball
# =========================================
#
# This notebook implements and compares samplers in PyMC
# to sample uniformly from an $n$-dimensional ball,
# i.e to sample from the set
# $$
# \mathbf{B}_n = \\{x \in \mathbf{R}^n: \|x\|\leq 1\\}
# $$
# <codecell>
mc.np.random.seed(1234567)
# simple model
n = 2
X = [mc.Uninformative('X_%d'%i, value=0) for i in range(n)]
@mc.potential
def in_ball(X=X):
if X[0]**2 + X[1]**2 <= 1.:
return 0
else:
return -pl.inf
# <codecell>
class UniformBall(mc.Gibbs):
def __init__(self, stochastic, others, verbose=None):
self.others = others
self.conjugate = True # pymc will include a Metropolis rejection step on top of the proposal if this is false
mc.Gibbs.__init__(self, stochastic, verbose)
def propose(self):
x_other = [X_i.value for X_i in self.others]
max_val = pl.sqrt(1. - pl.dot(x_other, x_other))
self.stochastic.value = mc.runiform(-max_val, max_val)
# <codecell>
m = mc.MCMC([X, in_ball])
for i in range(n):
m.use_step_method(UniformBall, X[i], [X[j] for j in range(n) if j != i])
# <codecell>
m.sample(100, progress_bar=False)
# <codecell>
def plot_trace(X, scale=1., angle=0.):
fig = pl.figure(figsize=(12,4.75))
ax1 = fig.add_subplot(1, 2, 1)
# plot boundary
t = pl.arange(0,2*pl.pi,.01)
ax1.plot(pl.cos(angle)*pl.cos(t) - pl.sin(angle)*pl.sin(t)/scale, pl.cos(angle)*pl.sin(t)/scale + pl.sin(angle)*pl.cos(t), 'k:')
# plot samples
if isinstance(X, mc.Stochastic):
tr = [X.trace()[:,0], X.trace()[:,1]]
else:
tr = [X[0].trace(), X[1].trace()]
ax1.plot(tr[0], tr[1], 'ko-')
# decorate plot
book_graphics.set_font()
pl.xlabel('$X_1$')
pl.ylabel('$X_2$', rotation=0)
pl.axis([-1.1,1.1,-1.1,1.1])
pl.text(-1,1,'(a)', fontsize=16, va='top', ha='left')
for i in range(2):
if i == 0:
ax2 = fig.add_subplot(2, 4, 3+4*i)
ax2.plot(tr[i], 'k', drawstyle='steps-mid')
else:
ax2a = fig.add_subplot(2, 4, 3+4*i, sharex=ax2)
ax2a.plot(tr[i], 'k', drawstyle='steps-mid')
pl.xlabel('Sample')
pl.xticks([25,50,75])
pl.yticks([-.5,0,.5])
pl.ylabel('$X_%d$'%(i+1), rotation=0)
pl.axis([-5,105,-1.5,1.5])
pl.text(-1,1.25,'(%s)'%'bc'[i], fontsize=16, va='top', ha='left')
if i == 0:
ax3 = fig.add_subplot(2, 4, 4+4*i)
ax3.acorr(tr[i].reshape(100), color='k')
else:
ax3a = fig.add_subplot(2, 4, 4+4*i, sharex=ax3)
ax3a.acorr(tr[i].reshape(100), color='k')
pl.xlabel('Autocorrelation')
pl.xticks([-5,0,5])
pl.yticks([0., .5, 1])
pl.axis([-12,12,-.1,1.1])
pl.text(-10,1,'(%s)'%'de'[i], fontsize=16, va='top', ha='left')
pl.setp(ax2.get_xticklabels(), visible=False)
pl.setp(ax3.get_xticklabels(), visible=False)
pl.subplots_adjust(wspace=.55, hspace=.1, bottom=.14,left=.13)
# <codecell>
plot_trace(X, 1, 0.)
pl.savefig('book/graphics/gibbs-ball.pdf')
# <markdowncell>
# Now with the Metropolis sampler
# ---------------------------------
# <codecell>
mc.np.random.seed(123456789)
# <codecell>
# simple model
n = 2
X = mc.Uninformative('X', value=[0,0])
@mc.potential
def in_ball(X=X, s=3., t=pl.pi/4.):
if (pl.cos(t)*X[0] + pl.sin(t)*X[1])**2 + s**2*(pl.cos(t)*X[1] -pl.sin(t)*X[0])**2 <= 1.:
return 0
else:
return -pl.inf
m = mc.MCMC([X, in_ball])
m.sample(100, progress_bar=False)
# <codecell>
plot_trace(X, 3, pl.pi/4)
pl.savefig('book/graphics/metropolis-ball.pdf')
# <markdowncell>
# Now with Adaptive Metropolis
# <codecell>
mc.np.random.seed(1234567)
# simple model
n = 2
X = mc.Uninformative('X', value=[0,0])
@mc.potential
def in_ball(X=X, s=3., t=pl.pi/4):
if (pl.cos(t)*X[0] + pl.sin(t)*X[1])**2 + s**2*(pl.cos(t)*X[1] -pl.sin(t)*X[0])**2 <= 1.:
return 0
else:
return -pl.inf
m = mc.MCMC([X, in_ball])
m.use_step_method(mc.AdaptiveMetropolis, X)
# <codecell>
m.sample(100, progress_bar=False)
plot_trace(X, 3, pl.pi/4)
pl.savefig('book/graphics/am-ball-1.pdf')
# <codecell>
m.sample(iter=20100, burn=20000, progress_bar=False)
plot_trace(X, 3, pl.pi/4)
pl.savefig('book/graphics/am-ball-2.pdf')
pl.show()
|
4,287 | b8ebbef7403a71d6165a5462bc08e2634b4cebc5 | CARD_SIZE = (70, 90)
SPACING = 3 |
4,288 | f105ecb8229020554930bb4f0e00ecf88e83f5ae | # -*- coding: iso-8859-15 -*-
# @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
# @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@8:........C@@@
# @@@@@@@@@@@@@@88@@@@@@@@@@@@@@@@@@@@@@88@@@@@@@@@@888@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@O:...........:C@
# @ .@O O@8 C@@O o@@@: cO oc 8o .@@. @c....:O@@:....:@
# @ .:c8 CO O8 :o O8 oO C@. :8. :::. ..::. ::Cc ..:8o o@: @o....:8@@:....:@
# @ c@@@O OO C8 c@ OO o8 c@. :@. :@@C O@@@@. :@@@c 8@@@@@@@@@@@@: @@@@@@@@@O.....:@
# @ ..oO OO C8 .@O o@@@@@@@. :@. :@@C O@@@@. :@@@c :C8@@@o O@@ccC @@@@@@@O.......c@
# @ oO OO C8 C@O o. c8. :@. :@@8OOCo8@@@@. :@@@8@@@@@@O@@@@@@@8C: @@@@@C.......o@@@
# @ c@@@O OO C8 c8 OO oO c@. :@. o@@@@@@@@@@@@@@@@@@@@@o 8@@@o ..o @@@C......:C@@@@@
# @ c@@@O CO C8 c8 OO o@. c@. :@..o8@@@@@@@@@@@@@@@@Oc@@@c 8@@@o oo @C......:O@@@@@@@
# @ c@@@@ .. 88 c8 O@. .: c@c :o@@@@@@@@@@@@@@@@@@@@@@@@Ooc:: Co o@. @c....:O@@@@@@@@@
# @ c@@@@@o o@@8 c@ O@@o cc c@@O. c@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@: Co o@O @c....:O8@@@@@@@@
# @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@:C@:C:..:C.:.:c.:.@o.............:@
# @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@.:o o.oo o ooCc.oC@c.............:@
# @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
#
# NCOrifle.py -- Support for squadleaders being able to choose between smg and rifle.
#
# ©2010 Spit for Forgotten Hope
import host, bf2
from game.gameplayPlugin import base
from game.utilities import rconExec, getCurrentRound
from NCOrifleData import NCO_kits
DEBUG = 0
class NCOrifle(base):
def round_start(self, hooker):
self.watched_players = []
self.choices = {}
self.spawned = []
self.spawned_dict = {}
if not hooker.hasHook('RemoteCommand', self.onRemoteCommand):
hooker.register('RemoteCommand', self.onRemoteCommand)
hooker.register('PlayerSpawn', self.onPlayerSpawn)
hooker.register('PickupKit', self.onPickupKit)
if DEBUG: print 'NCOrifle: hooks registered'
else:
if DEBUG: print 'NCOrifle: hooks already registered'
def onRemoteCommand(self, playerid, cmd):
if not (cmd == 'ncosmg' or cmd == 'ncorifle' or cmd.startswith('selectkit')): return
if playerid == -1: playerid = 255
player = bf2.playerManager.getPlayerByIndex(playerid)
if DEBUG: print 'NCOrifle: player %s executed rcon command "%s"' % (player.getName(), cmd)
if cmd.startswith('selectkit'):
if cmd.endswith('6'):
self.addPlayer(player)
else:
self.removePlayer(player)
if cmd == 'ncorifle':
self.choices[player] = 'rifle'
if DEBUG: print 'NCOrifle: player %s has chosen a rifle to spawn with' % player.getName()
elif cmd == 'ncosmg':
self.choices[player] = 'smg'
if DEBUG: print 'NCOrifle: player %s has chosen an smg to spawn with' % player.getName()
def onPickupKit(self, player, kit):
if player not in self.spawned: return
def_kit = self.getData(player)
if def_kit is None: return
if DEBUG: print 'Setting NCO kit back to default for team %d' % player.getTeam()
self.setKit(def_kit, player.getTeam(), self.spawned_dict[player])
self.spawned.remove(player)
self.spawned_dict[player] = None
def onPlayerSpawn(self, player, soldier):
try:
self._onPlayerSpawn(player, soldier)
except Exception, e:
print 'NCOrifle exception', e
def getData(self, player):
map, gamemode, size = getCurrentRound()
if map in NCO_kits.keys():
def_kit1, def_kit2 = NCO_kits[map]
exec('def_kit = def_kit%d' % player.getTeam())
return def_kit
else:
print 'NCOrifle: Can\'t find NCO kit info for map %s. Update NCOrifleData.py or provide custom map info via mapdata.py' % map
return None
def _onPlayerSpawn(self, player, soldier):
if player not in self.watched_players: return
def_kit = None
def_kit = self.getData(player)
if def_kit is None: return
if player not in self.choices.keys():
self.setKit(def_kit, player.getTeam(), soldier.templateName)
elif self.choices[player] == 'smg':
self.setKit(def_kit, player.getTeam(), soldier.templateName)
elif self.choices[player] == 'rifle':
if DEBUG: print 'NCOrifle: player %s wants to spawn with a modified NCO kit...' % player.getName()
kit = def_kit + '_rifle'
self.setKit(kit, player.getTeam(), soldier.templateName)
if player in self.spawned: return
self.spawned.append(player)
self.spawned_dict[player] = soldier.templateName
def setKit(self, kit, team, soldier):
rconExec('gameLogic.setKit %d 6 "%s" "%s"' % (team, kit, soldier))
if DEBUG: print 'NCOrifle: Set NCO kit for team %d to %s, %s' % (team, kit, soldier)
def addPlayer(self, player):
if player not in self.watched_players:
self.watched_players.append(player)
if DEBUG: print 'NCOrifle: added player %s to watched players list' % player.getName()
def removePlayer(self, player):
if player in self.watched_players:
self.watched_players.remove(player)
if DEBUG: print 'NCOrifle: removed player %s from watched players list' % player.getName()
|
4,289 | 01f0ad8746ed9a9941faa699b146625ad3a0b373 | # GeoPy can be used to interface to map box https://pypi.org/project/geopy/
from pygeodesy.ellipsoidalVincenty import LatLon
from geojson import Polygon, Feature, FeatureCollection, dump
import sys
import random
BEARING_SOUTH = 180.0
BEARING_EAST = 90.0
class Cell(object):
def __init__(self, cellId, top_left_cell, top_right_cell, bottom_right_cell, bottom_left_cell):
self.cellId = cellId
self.top_left_cell = top_left_cell
self.top_right_cell = top_right_cell
self.bottom_right_cell = bottom_right_cell
self.bottom_left_cell = bottom_left_cell
def __repr__(self):
return str(self.__dict__)
def generate_cell(current_cell_id, top_left_cell, top_right_cell, bottom_right_cell, bottom_left_cell):
c = Cell(current_cell_id, top_left_cell, top_right_cell, bottom_right_cell, bottom_left_cell)
# Expect other data to be inserted into the cell here
return c
def generate_cell_grid(top_left, east_extent, south_extent, cell_lat_size_meters, cell_long_size_meters):
south_distance = 0
current_cell_id = 0
list_of_cells = []
left_edge = top_left
while south_distance < south_extent:
south_distance = south_distance + cell_lat_size_meters
point_south_of_left_edge = left_edge.destination(cell_lat_size_meters, BEARING_SOUTH)
top_left_cell = left_edge
bottom_left_cell = point_south_of_left_edge
east_distance = 0
while east_distance < east_extent:
top_right_cell = top_left_cell.destination(cell_long_size_meters, BEARING_EAST)
bottom_right_cell = bottom_left_cell.destination(cell_long_size_meters, BEARING_EAST)
cell = generate_cell(current_cell_id, top_left_cell, top_right_cell, bottom_right_cell, bottom_left_cell)
current_cell_id = current_cell_id + 1
list_of_cells.append(cell)
# Increments
top_left_cell = top_right_cell
bottom_left_cell = bottom_right_cell
east_distance = east_distance + cell_long_size_meters
left_edge = point_south_of_left_edge
return list_of_cells
def grid_to_geojson(grid, lower_elevation, upper_elevation):
features = []
for cell in grid:
rect_points = [
[
(cell.top_left_cell.lon, cell.top_left_cell.lat, lower_elevation),
(cell.top_right_cell.lon, cell.top_right_cell.lat, lower_elevation),
(cell.bottom_right_cell.lon, cell.bottom_right_cell.lat, lower_elevation),
(cell.bottom_left_cell.lon, cell.bottom_left_cell.lat, lower_elevation),
(cell.top_left_cell.lon, cell.top_left_cell.lat, lower_elevation), #Because first and last points have to match
(cell.top_left_cell.lon, cell.top_left_cell.lat, upper_elevation),
(cell.top_right_cell.lon, cell.top_right_cell.lat, upper_elevation),
(cell.bottom_right_cell.lon, cell.bottom_right_cell.lat, upper_elevation),
(cell.bottom_left_cell.lon, cell.bottom_left_cell.lat, upper_elevation),
(cell.top_left_cell.lon, cell.top_left_cell.lat, upper_elevation) #Because first and last points have to match
]
]
properties = {
'capacity': random.randint(0, 5)
} # TODO this is just an example
polygon = Polygon(rect_points)
feature = Feature(geometry=polygon, id=cell.cellId, properties=properties)
features.append(feature)
return FeatureCollection(features)
def main():
TOP_LEFT = LatLon(-37.721874, 144.966859)
EAST_EXTENT = 1000.0
SOUT_EXTENT = 1000.0
CELL_LONG_SIZE_METERS = 100.0
CELL_LAT_SIZE_METERS = 100.0
grid = generate_cell_grid(TOP_LEFT, EAST_EXTENT, SOUT_EXTENT, CELL_LAT_SIZE_METERS, CELL_LONG_SIZE_METERS)
geojson_feature_collection = grid_to_geojson(grid, 10.0, 100.0)
dump(geojson_feature_collection, sys.stdout, indent=4)
json_file = open('grid-3d.geojson', 'w')
dump(geojson_feature_collection, json_file, indent=4)
if __name__ == '__main__':
main()
|
4,290 | 1a126ba7e73eb2e7811ab32146fe5aee6c6b30f9 | """
pokespeare.http.py
~~~~~~~~~~~~~~~~~~
Contains definitions of custom HTTP clients, allowing for more flexibility on
the library choice
"""
import abc
import requests
from typing import Dict, Tuple, Any
from .exceptions import HTTPError, UnexpectedError
import requests_cache
class HTTPClient(abc.ABC):
"""Basic interface class. Allow to define custom HTTP clients giving
stronger contract behaviour
:type cache_name: str
:param cache_name: The name of the cache, corresponds to the name of the
sqlite DB on the filesystem if the `beckend` is sqlite
or the name of the redis namespace in case of `redis`
backend.
:type backend: str
:param backend: The backend to use, can be either `memory` to use a simple
python dict, `sqlite` to use a sqlite DB on the filesystem
or `redis` for a redis cache
:type expire_after: int
:param expire_after: Define after how many seconds each key in the cache
have to be evicted
:type allowable_methods: Tuple[str]
:param allowable_methods: A tuple of strings defining for which HTTP
methods to apply caching
Also supports `connection` in case of a redis connection on kwargs,
for more info `https://requests-cache.readthedocs.io/en/latest/api.html`
"""
def __init__(
self,
cache_name: str = "",
*,
backend: str = "memory",
expire_after: int = 3600,
allowable_methods: Tuple[str] = ("GET",),
**kwargs
):
self.cache_name = cache_name
self.backend = backend
self.expire_after = expire_after
self.allowable_methods = allowable_methods
self.cache_enabled = False
if self.cache_name:
self.enable_cache(**kwargs)
@abc.abstractmethod
def enable_cache(self, **kwargs: Dict[str, Any]) -> None:
"""Enable caching for each request"""
pass
@abc.abstractmethod
def disable_cache(self) -> None:
"""Disable caching"""
pass
@abc.abstractmethod
def get(self, url: str, **kwargs: Dict[str, Any]) -> Any:
"""Perform GET request to a defined URL"""
pass
@abc.abstractmethod
def post(self, url: str, **kwargs: Dict[str, Any]) -> Any:
"""Perform POST request to a defined URL"""
pass
class RequestsHTTPClient(HTTPClient):
"""
Simple wrapper class around requests library, which is used as the
main engine for each call. Allow better unit-testing overall.
"""
def enable_cache(self, **kwargs: Dict[str, Any]) -> None:
requests_cache.install_cache(
self.cache_name,
backend=self.backend,
expire_after=self.expire_after,
allowable_methods=self.allowable_methods,
**kwargs
)
self.cache_enabled = True
def disable_cache(self) -> None:
requests_cache.disable_cache()
requests_cache.uninstall_cache()
self.cache_enabled = False
def get(self, url: str, **kwargs: Dict[str, Any]) -> Any:
try:
response = requests.get(url, **kwargs)
response.raise_for_status()
except (
requests.exceptions.HTTPError,
requests.exceptions.TooManyRedirects,
) as e:
raise HTTPError(e)
except (requests.exceptions.RequestException, Exception) as e:
raise UnexpectedError(e)
return response
def post(self, url: str, **kwargs: Dict[str, Any]) -> Any:
try:
response = requests.post(url, **kwargs)
except (
requests.exceptions.HTTPError,
requests.exceptions.TooManyRedirects,
) as e:
raise HTTPError(e)
except (requests.exceptions.RequestException, Exception) as e:
raise UnexpectedError(e)
return response
|
4,291 | 722739086d2777085fdbfdbddef205aaf025580d | # -*- coding: utf-8 -*-
# Generated by Django 1.11.8 on 2018-07-21 12:51
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='history',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('uname', models.CharField(max_length=50, verbose_name='用户名')),
('uword', models.CharField(max_length=50, verbose_name='单词')),
('time', models.DateTimeField(auto_now=True, verbose_name='查询时间')),
('isban', models.BooleanField(default=False, verbose_name='禁用')),
('isdelete', models.BooleanField(default=False, verbose_name='删除')),
],
),
]
|
4,292 | 6d51a088ba81cfc64c2e2a03f98b0ee354eda654 | from time import sleep
import requests
import json
import pymysql
db = pymysql.connect(host="localhost", user="root", password="root", db="xshop", port=33061)
def getCursor():
cursor = db.cursor()
return cursor
class Classify(object):
def __init__(self, **args):
self.cl_name = args['cl_name']
self.cl_grade = args['cl_grade'] is None if 0 else args['cl_grade']
self.cl_fid = args['cl_fid']
if 'picture' not in args:
args['picture'] = ""
self.picture = args['picture']
def insert(self):
sql = "insert into classify (cl_name,cl_grade,cl_fid,cl_serial,eid,picture) values (%s, %s, %s, %s, %s, %s)"
cursor = getCursor()
cursor.execute(sql, (self.cl_name, self.cl_grade, self.cl_fid, 0, 1, self.picture))
cursor.close()
db.commit()
print("id=" + str(cursor.lastrowid))
return cursor.lastrowid
def get_json(id):
file = open('./res/shop_class' + str(id) + '.json')
return json.loads(file.read())
def _downloadFile(url):
global r
if url is None or len(url) == 0:
return None
ss = url.split("/")
name = ss[len(ss) - 1]
r = _down(url, 0)
if r is None:
return ""
with open('./res/shop_class_img/' + name, 'wb') as f:
f.write(r.content)
sleep(0.1)
return name
def _down(url, time):
try:
return requests.get(url)
except:
sleep(5)
print("重试中[" + str(time + 1) + "]....")
# 超过三次不下载了
if time + 1 == 3:
return None
return _down(url, time + 1)
def _json(id):
json_arr = get_json(id)
# 二级分类
for item in json_arr['moduleList']:
cl_id = Classify(cl_fid=id, cl_grade=1, cl_name=item['title']).insert()
# 插入二级分类
print('****************************' + item['title'] + '****************************')
# 三级分类
for i in item['items']:
print(i['name'] + " " + i['pic'])
if i['name'] is None or i['name'] == "":
continue
if len(i['pic'].split(":")) < 2:
print(i['pic'].split(":"))
i['pic'] = "https:" + i['pic']
print(i['pic'])
img_name = _downloadFile(i['pic'])
Classify(cl_fid=cl_id, cl_grade=2, cl_name=i['name'], picture=img_name).insert()
print('****************************************************************')
print('\n')
_json(1)
_json(2)
_json(3)
# print()
|
4,293 | c64c542b57107c06de2ce0751075a81fcb195b61 | def Merge (left,right,merged):
#Ф-ция объединения и сравнения элементов массивов
left_cursor,right_cursor=0,0
while left_cursor<len(left) and right_cursor<len(right):
if left[left_cursor]<=right[right_cursor]:
merged[left_cursor+right_cursor]=left[left_cursor]
left_cursor+=1
else:
merged[left_cursor+right_cursor]=right[right_cursor]
right_cursor+=1
for left_cursor in range(left_cursor,len(left)):
merged[left_cursor+right_cursor]=left[left_cursor]
for right_cursor in range(right_cursor,len(right)):
merged[left_cursor+right_cursor]=right[right_cursor]
return merged
def MergeSort(array):
#Основная рекурсивная функция
if len(array)<=1:
return array
mid=len(array)//2
left,right=MergeSort(array[:mid]),MergeSort(array[mid:])
return Merge(left,right,array.copy())
"""
a=[2,45,1,4,66,34]
print(MergeSort(a))
print(a)
"""
|
4,294 | b04aef64dc0485d9112a40e00d178042833a9ddd | name=['zhangsan']
def func(n):
name=n
print(name)
def func1():
nonlocal name
name='xiaohong'
print(name)
func1()
print(name)
func('lisi') |
4,295 | daecbf5280c199b31f3b9d9818df245d9cd165a7 | import uuid
from aliyunsdkcore.client import AcsClient
from aliyunsdkcore.profile import region_provider
# 注意:不要更改
from celery_tasks.sms.dysms_python.build.lib.aliyunsdkdysmsapi.request.v20170525 import SendSmsRequest
class SendMes(object):
REGION = "cn-hangzhou"
PRODUCT_NAME = "Dysmsapi"
DOMAIN = "dysmsapi.aliyuncs.com"
# 申请的ACCESS_KEY_ID和ACCESS_KEY_SECRET
ACCESS_KEY_ID = "LTAIYEeWFSUAFcYy"
ACCESS_KEY_SECRET = "FeuGEGSeHXHJ7A4uFIO0mMLoGjKiiY"
acs_client = AcsClient(ACCESS_KEY_ID, ACCESS_KEY_SECRET, REGION)
region_provider.add_endpoint(PRODUCT_NAME, REGION, DOMAIN)
def send_2_mes(self, phone_numbers, code):
# 申请的短信签名 和 短信模板
sign_name = 'SpiritBlog'
template_code = 'SMS_137657397'
business_id = uuid.uuid1()
template_param = '{"code":"%s"}' % code
smsRequest = SendSmsRequest.SendSmsRequest()
# 申请的短信模板编码,必填
smsRequest.set_TemplateCode(template_code)
# 短信模板变量参数
if template_param is not None:
smsRequest.set_TemplateParam(template_param)
# 设置业务请求流水号,必填。
smsRequest.set_OutId(business_id)
# 短信签名
smsRequest.set_SignName(sign_name)
# 短信发送的号码列表,必填。
smsRequest.set_PhoneNumbers(phone_numbers)
# 调用短信发送接口,返回json
smsResponse = self.acs_client.do_action_with_exception(smsRequest)
return smsResponse
# sm = SendMes()
# sm.send_2_mes(15071176826, 333333)
|
4,296 | 9d3d7000ed13a2623a53705d55b5dbb42662ce2f | import xml.parsers.expat
import urllib2
import threading
def check_url(checkurl, checkstring, checkname):
try:
opener = urllib2.urlopen(checkurl, timeout = 5)
if checkstring[0] == "!":
if checkstring.encode('utf-8')[1:] not in opener.read():
print "Open",checkname
else:
#print "Closed",checkname
pass
else:
if checkstring.encode('utf-8') in opener.read():
print "Open",checkname
else:
#print "Closed",checkname
pass
except IOError:
#print "Broken",checkname
pass
p = xml.parsers.expat.ParserCreate()
tname = ""
url = ""
check = ""
mode = ""
enabled = ""
def char_data(data):
global tname, url, check, mode, enabled
if mode == "name":
tname += data
elif mode == "check":
check += data
elif mode == "signup":
url += data
elif mode == "type":
enabled += data
def end_element(name):
global tname, url, check, mode, enabled
mode = ""
if name == "tracker" and enabled[0] == "T":
threading.Thread(target=check_url, args=(url, check, tname)).start()
tname = ""
url = ""
enabled = ""
check = ""
def start_element(name, attrs):
global tname, url, check, mode, enabled
if name == "name":
mode = "name"
elif name == "signup":
mode = "signup"
elif name == "check":
mode = "check"
elif name == "type":
mode = "type"
p.StartElementHandler = start_element
p.EndElementHandler = end_element
p.CharacterDataHandler = char_data
f = open("trackers.xml")
p.Parse(f.read(),1)
|
4,297 | b734a4405d1f5b3650d7149ae80e14548e2dbda4 |
# Project Overview
# Implement the k-means algorithm and apply your implementation on the given dataset,
# which contains a set of 2-D points.
# Import Libraries
import scipy.io
import pandas as pd
import matplotlib.pyplot as plt
import random
import numpy as np
import time
print("\nProgram Started :",time.asctime())
# Function to assign data to clusters using minimum euclidean distance to centroids.
# Inout: Data and Centroids
# Output: Assigned Clusters
def assign(data,Centroids):
EuclideanDistance = np.array([]).reshape(m, 0)
for k in range(K):
dist = np.sum((data - Centroids[:, k]) ** 2, axis=1)
EuclideanDistance = np.c_[EuclideanDistance, dist]
Clusters = np.argmin(EuclideanDistance, axis=1) + 1
return(Clusters)
# Function to map clusters and the respective data points
# Input: data and number of clusters
# Output: Map Cluster to Data Points
def map_cluster_data(data, K):
clusterDataMap = {}
for k in range(K):
clusterDataMap[k + 1] = np.array([]).reshape(2, 0)
for i in range(m):
clusterDataMap[clusters[i]] = np.c_[clusterDataMap[clusters[i]], data.iloc[i]]
for k in range(K):
clusterDataMap[k + 1] = clusterDataMap[k + 1].T
return(clusterDataMap)
# Function to calculate centroid
# Input: Map with cluster and Data Points and Centroids
# Output: New centroids which are calculated from the data mapping of clusters
def centroid(clusterDataMap,Centroids):
for k in range(K):
Centroids[:, k] = np.mean(clusterDataMap[k + 1], axis=0)
return(Centroids)
# Strategy 1 - Cluster Initialization
# Function to initialize cluster centroids randomly
# Input: Data and Number of Clusters
# Output: Centroids
def initialize_centroids(data, K):
Centroids = np.array([]).reshape(data.shape[1], 0)
for i in range(K):
randIndex = random.randint(0, data.shape[0] - 1)
Centroids = np.c_[Centroids, data.iloc[randIndex]]
return(Centroids)
# Strategy 2 - Cluster Initialization
# Function to initialize cluster centroids randomly
# Input: Data and Number of Clusters
# Output: Centroids
def initialize_centroids_strategy2(data, K):
Centroids = np.array([]).reshape(data.shape[1], 0)
for i in range(K):
if i ==0:
rand = random.randint(0, data.shape[0] - 1)
Centroids = np.c_[Centroids, data.iloc[rand]]
# centroidIndexes.append(rand)
data=data.drop(data.index[rand])
else:
centroidMean = np.mean(Centroids,axis=1)
index = np.argmax(np.sqrt(np.sum((data - centroidMean) ** 2, axis=1)), axis=1)
# centroidIndexes.append(index)
Centroids = np.c_[Centroids, data.iloc[index]]
data = data.drop(data.index[index])
return(Centroids)
# Read Data
Numpyfile= scipy.io.loadmat("C:\\Projects\\MCS\\CSE575\\Project 2 - KMeans\\data.mat")
data = pd.DataFrame(Numpyfile['AllSamples'])
data.columns=['x1','x2']
m = data.shape[0]
n = data.shape[1]
# Initialize Prameters
n_iter = 50
# Initialize plot parameters
color=['red','blue','green','cyan','magenta','grey', 'yellow', 'orange', 'black', 'purple']
labels=['cluster1','cluster2','cluster3','cluster4','cluster5','cluster6', 'cluster7','cluster8','cluster9','cluster10']
#
print("Strategy 1 : First Iteration")
# ********* Strategy 1 ************
# Randomly pick the initial centers from the given samples.
# First run with cluster initiation
# Run K-Means with clusters in the range of 2 - 10
WCSS_array=np.array([])
for K in range(2,11):
Centroids = initialize_centroids(data, K)
for i in range(n_iter):
clusters = assign(data, Centroids)
clusterDataMap = map_cluster_data(data, K)
Centroids = centroid(clusterDataMap, Centroids)
wcss = 0
# Compute Objective functions
for k in range(K):
wcss += np.sum((clusterDataMap[k + 1] - Centroids[:, k]) ** 2)
WCSS_array = np.append(WCSS_array, wcss)
# Plot the objective function
KMeans_array=np.arange(2,11,1)
plt.figure()
plt.plot(KMeans_array,WCSS_array)
plt.xlabel('Number of Clusters')
plt.ylabel('within-cluster sums of squares (WCSS)')
plt.title('Strategy1 - Run 1: Elbow Chart to identify optimum cluster number')
plt.show()
print("Strategy 1 : Second Iteration")
# Second run with different cluster initiation
# Run K-Means with clusters in the range of 2 - 10
WCSS_array=np.array([])
for K in range(2,11):
Centroids = initialize_centroids(data, K)
for i in range(n_iter):
clusters = assign(data, Centroids)
clusterDataMap = map_cluster_data(data, K)
Centroids = centroid(clusterDataMap, Centroids)
wcss = 0
# Compute Objective functions
for k in range(K):
wcss += np.sum((clusterDataMap[k + 1] - Centroids[:, k]) ** 2)
WCSS_array = np.append(WCSS_array, wcss)
# Plot the objective function
KMeans_array=np.arange(2,11,1)
plt.figure()
plt.plot(KMeans_array,WCSS_array)
plt.xlabel('Number of Clusters')
plt.ylabel('within-cluster sums of squares (WCSS)')
plt.title('Strategy1 - Run 1: Elbow Chart to identify optimum cluster number')
plt.show()
print("Strategy 2 : First Iteration")
# ********** Strategy 2 ************
# Strategy 2: pick the first center randomly; for the i-th center (i>1),
# choose a sample (among all possible samples) such that the average distance of this
# chosen one to all previous (i-1) centers is maximal.
# First run with cluster initiation
# Run K-Means with clusters in the range of 2 - 10
WCSS_array=np.array([])
for K in range(2,11):
Centroids = initialize_centroids_strategy2(data, K)
for i in range(n_iter):
clusters = assign(data, Centroids)
clusterDataMap = map_cluster_data(data, K)
Centroids = centroid(clusterDataMap, Centroids)
wcss = 0
for k in range(K):
wcss += np.sum((clusterDataMap[k + 1] - Centroids[:, k]) ** 2)
WCSS_array = np.append(WCSS_array, wcss)
# Plot the objective function: Strategy 2 - First initialization
KMeans_array=np.arange(2,11,1)
plt.figure()
plt.plot(KMeans_array,WCSS_array)
plt.xlabel('Number of Clusters')
plt.ylabel('within-cluster sums of squares (WCSS)')
plt.title('Strategy2 - Run 1: Elbow Chart to identify optimum cluster number')
plt.show()
print("Strategy 2 : Second Iteration")
# Second run with different cluster initiation
# Run K-Means with clusters in the range of 2 - 10
WCSS_array=np.array([])
for K in range(2,11):
Centroids = initialize_centroids_strategy2(data, K)
for i in range(n_iter):
clusters = assign(data, Centroids)
clusterDataMap = map_cluster_data(data, K)
Centroids = centroid(clusterDataMap, Centroids)
wcss = 0
for k in range(K):
wcss += np.sum((clusterDataMap[k + 1] - Centroids[:, k]) ** 2)
WCSS_array = np.append(WCSS_array, wcss)
# Plot the objective function: Strategy 2 - First initialization
KMeans_array=np.arange(2,11,1)
plt.figure()
plt.plot(KMeans_array,WCSS_array)
plt.xlabel('Number of Clusters')
plt.ylabel('within-cluster sums of squares (WCSS)')
plt.title('Strategy2 - Run 2: Elbow Chart to identify optimum cluster number')
plt.show()
print("\nProgram Ended :",time.asctime()) |
4,298 | b3f6d255830bdb2b0afc99aab6e3715616ac4dec | # -*- coding:utf-8 -*-
# Author: 李泽军
# Date: 2020/1/27 3:31 PM
# Project: flask-demo
from flask import abort
from flask_login import current_user
from functools import wraps
from simpledu.modes import User
def role_required(role):
'''
带参数的装饰器,可以用它来保护一个路由处理函数智能被特定的用户访问
:param role:
:return:
'''
def decorator(func):
@wraps(func)
def wrapper(*args,**kwargs):
if not current_user.is_authenticated or current_user.role < role:
abort(404)
return func(*args,**kwargs)
return wrapper
return decorator
# 特定角色的装饰器
staff_required = role_required(User.ROLE_STAFF)
admin_required = role_required(User.ROLE_ADMIN)
|
4,299 | 21974274b1e7800b83eb9582ab21714f04230549 | from rllab.envs.base import Env
from rllab.spaces import Discrete
from rllab.spaces import Box
from rllab.envs.base import Step
import numpy as np
import sys, pickle, os
sys.path.append(os.path.dirname(os.getcwd()))
from os.path import dirname
sys.path.append(dirname(dirname(dirname(os.getcwd()))))
from simulation import *
from scorer import *
from shapecloth import *
from tensioner import *
"""
A Rllab Environment for the tensioning policy experiments.
"""
class PinEnvDiscrete(Env):
MAPPING = {
0 : (0,0,0),
1 : (1,0,0),
2 : (0,1,0),
3 : (0,0,1),
4 : (-1,0,0),
5 : (0,-1,0),
6 : (0,0,-1)
}
def __init__(self, simulation, x, y, trajectory, scorer=0, max_displacement=False, predict=False, original=False, sample=False):
self.simulation = simulation
height, width = simulation.cloth.initial_params[0]
self.os_dim = height * width * 5
self.simulation.reset()
self.tensioner = self.simulation.pin_position(x, y, max_displacement)
self.scorer = Scorer(scorer)
self.trajectory = trajectory
self.traj_index = 0
self.pinx, self.piny = x, y
self.predict = predict
self.original = original
self.sample = sample
@property
def observation_space(self):
if self.original:
return Box(low=np.array([0, -self.tensioner.max_displacement, -self.tensioner.max_displacement, -self.tensioner.max_displacement]),
high=np.array([len(self.trajectory) + 1, self.tensioner.max_displacement, self.tensioner.max_displacement, self.tensioner.max_displacement]))
if not self.predict:
return Box(low=np.array([0, -self.tensioner.max_displacement, -self.tensioner.max_displacement, -self.tensioner.max_displacement] + len(self.simulation.cloth.blobs) * [0, 0, -800]),
high=np.array([len(self.trajectory) + 1, self.tensioner.max_displacement, self.tensioner.max_displacement, self.tensioner.max_displacement]
+ len(self.simulation.cloth.blobs) * [500, 500, 800]))
return Box(low=np.array([0, -self.tensioner.max_displacement, -self.tensioner.max_displacement, -self.tensioner.max_displacement] + len(self.simulation.cloth.blobs) * [0, 0, -800] + 3 * [-1000, -1000, -1000, -1000, -3.2] + [0, 0]),
high=np.array([len(self.trajectory) + 1, self.tensioner.max_displacement, self.tensioner.max_displacement, self.tensioner.max_displacement]
+ len(self.simulation.cloth.blobs) * [500, 500, 800] + 3 * [800, 800, 800, 800, 3.2] + [600, 600]))
@property
def action_space(self):
return Discrete(7)
@property
def _state(self):
scissors = self.simulation.mouse.x, self.simulation.mouse.y
centroids = np.ravel(np.array(self.simulation.cloth.centroids)).tolist()
if self.original:
return np.array([self.traj_index] + list(self.tensioner.displacement))
if not self.predict:
return np.array([self.traj_index] + list(self.tensioner.displacement) + centroids)
next_position3 = [-1000, -1000]
closest_shape3 = [-1000, -1000]
angle3 = 0
next_position2 = [-1000, -1000]
closest_shape2 = [-1000, -1000]
angle2 = 0
next_position = [-1000, -1000]
closest_shape = [-1000, -1000]
angle = 0
if self.traj_index < len(self.trajectory) - 1:
next_position = [self.trajectory[self.traj_index+1][0], self.trajectory[self.traj_index+1][1]]
closest_shape = list(self.simulation.cloth.find_closest_shapept(next_position[0], next_position[1]))
angle = self.simulation.cloth.find_dtheta(scissors[0], scissors[1], next_position[0], next_position[1], closest_shape[0], closest_shape[1])
if self.traj_index < len(self.trajectory) - 5:
next_position2 = [self.trajectory[self.traj_index+5][0], self.trajectory[self.traj_index+5][1]]
if np.linalg.norm(np.array(next_position2) - np.array(next_position)) < 100:
closest_shape2 = list(self.simulation.cloth.find_closest_shapept(next_position2[0], next_position2[1]))
angle2 = self.simulation.cloth.find_dtheta(next_position[0], next_position[1], next_position2[0], next_position2[1], closest_shape2[0], closest_shape2[1])
if self.traj_index < len(self.trajectory) - 10:
next_position3 = [self.trajectory[self.traj_index+10][0], self.trajectory[self.traj_index+10][1]]
if np.linalg.norm(np.array(next_position3) - np.array(next_position2)) < 100:
closest_shape3 = list(self.simulation.cloth.find_closest_shapept(next_position3[0], next_position3[1]))
angle3 = self.simulation.cloth.find_dtheta(next_position2[0], next_position2[1], next_position3[0], next_position3[1], closest_shape3[0], closest_shape3[1])
return np.array([self.traj_index] + list(self.tensioner.displacement) + centroids + next_position + closest_shape + [angle] + next_position2 + closest_shape2 + [angle2]
+ next_position3 + closest_shape3 + [angle3] + list(scissors))
@property
def _score(self):
disp = np.linalg.norm(self._state[1])
score = self.scorer.score(self.simulation.cloth)
if disp >= self.tensioner.max_displacement - 2:
score -= 100
return score
def reset(self):
self.simulation.reset()
self.tensioner = self.simulation.pin_position(self.pinx, self.piny, self.tensioner.max_displacement)
self.traj_index = 0
observation = np.copy(self._state)
return observation
def step(self, action):
x, y, z = self.MAPPING[action]
self.tensioner.tension(x, y, z)
self.simulation.move_mouse(self.trajectory[self.traj_index][0], self.trajectory[self.traj_index][1])
reward = self.simulation.update() * self.traj_index/10
self.traj_index += 1
self.simulation.move_mouse(self.trajectory[self.traj_index][0], self.trajectory[self.traj_index][1])
reward += self.simulation.update() * self.traj_index/10
done = self.traj_index >= len(self.trajectory) - 2
if done:
reward = self.simulation.cloth.evaluate()
else:
reward = 0
next_observation = np.copy(self._state)
self.traj_index += 1
return Step(observation=next_observation, reward=reward, done=done)
def render(self):
self.simulation.render_sim()
# def local_angles(self, n=5):
# if self.
# for i in range(n):
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.