blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f5e3c26105be0d37b85658b740c617e799dc16cb | 21961be64d9cd4414f4a6d0b45eb20f727734929 | /K47 User Manual/7_miscellany/37_relay/Code/Python/relay.py | 0e58bdd428c46888823cc7f321177d007c601ec2 | [] | no_license | Alion3064492356/Sensors-for-RaspberryPi | 156f70c2ce81705a9f1372f9d037d0432a7ad54c | f81ca29280d8d27da795f1c9720747e3db9c20d6 | refs/heads/master | 2020-03-17T20:54:52.709446 | 2018-06-14T02:30:46 | 2018-06-14T02:30:46 | 133,934,753 | 0 | 0 | null | 2018-06-12T20:31:51 | 2018-05-18T09:40:42 | Python | UTF-8 | Python | false | false | 676 | py | #!/usr/bin/env python
import RPi.GPIO as GPIO
import time
RelayPin = 10
def setup():
GPIO.setmode(GPIO.BOARD) # Numbers GPIOs by physical location
GPIO.setup(RelayPin, GPIO.OUT)
def loop():
while True:
print '...relayd on'
GPIO.output(RelayPin, GPIO.HIGH)
time.sleep(0.5)
print 'relay off...'
GPIO.output(RelayPin, GPIO.LOW)
time.sleep(0.5)
def destroy():
GPIO.output(RelayPin, GPIO.LOW)
GPIO.cleanup() # Release resource
if __name__ == '__main__': # Program start from here
setup()
try:
loop()
except KeyboardInterrupt: # When 'Ctrl+C' is pressed, the child program destroy() will be executed.
destroy()
| [
"3064492356@qq.com"
] | 3064492356@qq.com |
c6f9bfe889eb0278f68b7a17049662d5605c5285 | 5af277b5819d74e61374d1d78c303ac93c831cf5 | /axial/logging_utils.py | ef723570c0f02a331ebfc7220811665417690c53 | [
"Apache-2.0"
] | permissive | Ayoob7/google-research | a2d215afb31513bd59bc989e09f54667fe45704e | 727ec399ad17b4dd1f71ce69a26fc3b0371d9fa7 | refs/heads/master | 2022-11-11T03:10:53.216693 | 2020-06-26T17:13:45 | 2020-06-26T17:13:45 | 275,205,856 | 2 | 0 | Apache-2.0 | 2020-06-26T16:58:19 | 2020-06-26T16:58:18 | null | UTF-8 | Python | false | false | 4,459 | py | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import io
import time
from absl import logging
import numpy as np
import PIL.Image
import tensorflow.compat.v1 as tf
from tensorflow.compat.v1 import gfile
from tensorflow.compat.v1.core.framework.summary_pb2 import Summary
from tensorflow.compat.v1.core.util.event_pb2 import Event
def pack_images(images, rows, cols):
"""Helper utility to make a tiled field of images from numpy arrays.
Taken from Jaxboard.
Args:
images: Image tensor in shape [N, W, H, C].
rows: Number of images per row in tiled image.
cols: Number of images per column in tiled image.
Returns:
A tiled image of shape [W * rows, H * cols, C].
Truncates incomplete rows.
"""
shape = np.shape(images)
width, height, depth = shape[-3:]
images = np.reshape(images, (-1, width, height, depth))
batch = np.shape(images)[0]
rows = np.minimum(rows, batch)
cols = np.minimum(batch // rows, cols)
images = images[:rows * cols]
images = np.reshape(images, (rows, cols, width, height, depth))
images = np.transpose(images, [0, 2, 1, 3, 4])
images = np.reshape(images, [rows * width, cols * height, depth])
return images
class SummaryWriter(object):
"""Tensorflow summary writer inspired by Jaxboard.
This version doesn't try to avoid Tensorflow dependencies, because this
project uses Tensorflow.
"""
def __init__(self, dir, write_graph=True):
if not gfile.IsDirectory(dir):
gfile.MakeDirs(dir)
self.writer = tf.summary.FileWriter(
dir, graph=tf.get_default_graph() if write_graph else None)
def flush(self):
self.writer.flush()
def close(self):
self.writer.close()
def _write_event(self, summary_value, step):
self.writer.add_event(
Event(
wall_time=round(time.time()),
step=step,
summary=Summary(value=[summary_value])))
def scalar(self, tag, value, step):
self._write_event(Summary.Value(tag=tag, simple_value=float(value)), step)
def image(self, tag, image, step):
image = np.asarray(image)
if image.ndim == 2:
image = image[:, :, None]
if image.shape[-1] == 1:
image = np.repeat(image, 3, axis=-1)
bytesio = io.BytesIO()
PIL.Image.fromarray(image).save(bytesio, 'PNG')
image_summary = Summary.Image(
encoded_image_string=bytesio.getvalue(),
colorspace=3,
height=image.shape[0],
width=image.shape[1])
self._write_event(Summary.Value(tag=tag, image=image_summary), step)
def images(self, tag, images, step, square=True):
"""Saves (rows, cols) tiled images from onp.ndarray.
This truncates the image batch rather than padding
if it doesn't fill the final row.
"""
images = np.asarray(images)
n_images = len(images)
if square:
rows = cols = int(np.sqrt(n_images))
else:
rows = 1
cols = n_images
tiled_images = pack_images(images, rows, cols)
self.image(tag, tiled_images, step=step)
class Log(object):
"""Logging to Tensorboard and the Python logger at the same time."""
def __init__(self, logdir, write_graph=True):
self.logdir = logdir
# Tensorboard
self.summary_writer = SummaryWriter(logdir, write_graph=write_graph)
def write(self, key_prefix, info_dicts, step):
log_items = []
for key in info_dicts[-1]:
# average the log values over time
key_with_prefix = '{}/{}'.format(key_prefix, key)
avg_val = np.mean([info[key] for info in info_dicts])
# absl log
log_items.append('{}={:.6f}'.format(key_with_prefix, avg_val))
# tensorboard
self.summary_writer.scalar(key_with_prefix, avg_val, step=step)
self.summary_writer.flush()
logging.info('step={:08d} {}'.format(step, ' '.join(log_items)))
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
1eb51bed6fada6957870d90071118e0bb172b20d | dc68820135a54bbf744425fce65da16c1101ad33 | /web/api/post_image.py | 397c4e2dafed832004870a01e5e6218a182b1a46 | [] | no_license | andrefreitas/feup-ldso-foodrific | 425cf9013427d183e0b23c5632115135dc4542f1 | ec2bd0361873d3eadd4f6a2fa60b23eb3b7e6ddf | refs/heads/master | 2021-01-10T14:30:24.011293 | 2015-10-04T11:01:09 | 2015-10-04T11:01:09 | 43,633,158 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 284 | py | from datastore import *
from pages import BaseHandler
from google.appengine.api import images
class PostImage(BaseHandler):
def get(self):
post = getPostByID(int(self.request.get("id")))
self.response.headers['Content-Type'] = 'image/png'
self.response.out.write(post.photo)
| [
"p.andrefreitas@gmail.com"
] | p.andrefreitas@gmail.com |
92c3a0d5822904b02ee02cf30204b593268f8d36 | ded10c2f2f5f91c44ec950237a59225e8486abd8 | /.history/2/matrix_squaring_20200413235341.py | 1cded98ea6504881b7ef71c0979704ed33286f9f | [] | no_license | jearistiz/Statistical-Physics-Projects | 276a86407b32ded4e06b32efb2fadbd8eff8daed | d9c5b16a50856e148dc8604d92b6de3ea21fc552 | refs/heads/master | 2022-11-05T03:41:23.623050 | 2020-06-28T06:36:05 | 2020-06-28T06:36:05 | 254,909,897 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,000 | py | # -*- coding: utf-8 -*-
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
from time import time
import pandas as pd
def rho_free(x,xp,beta):
"""
Uso: devuelve elemento de matriz dsnsidad para el caso de una partícula libre en un toro infinito.
"""
return (2.*np.pi*beta)**(-0.5) * np.exp(-(x-xp)**2 / (2 * beta) )
def harmonic_potential(x):
"""Devuelve valor del potencial harmónico para una posición x dada"""
return 0.5*x**2
def anharmonic_potential(x):
"""Devuelve valor de potencial anharmónico para una posición x dada"""
# return np.abs(x)*(1+np.cos(x)) #el resultado de este potencial es interesante
return 0.5*x**2 - x**3 + x**4
def QHO_canonical_ensemble(x,beta):
"""
Uso: calcula probabilidad teórica cuántica de encontrar al osciladoe armónico
(presente en un baño térmico) en la posición x.
Recibe:
x: float -> posición
beta: float -> inverso de temperatura en unidades reducidas beta = 1/T.
Devuelve:
probabilidad teórica cuántica en posición dada para temperatura T dada.
"""
return (np.tanh(beta/2.)/np.pi)**0.5 * np.exp(- x**2 * np.tanh(beta/2.))
def rho_trotter(x_max = 5., nx = 101, beta=1, potential=harmonic_potential):
"""
Uso: devuelve matriz densidad en aproximación de Trotter para altas temperaturas
y bajo el potencial "potential".
Recibe:
xmax: float -> los valores de x estarán en el intervalo (-xmax,xmax).
nx: int -> número de valores de x considerados.
beta: float -> inverso de temperatura en unidades reducidas.
potential: func -> potencial de interacción, debe ser una función de x.
Devuelve:
rho: numpy array, shape=(nx,nx) -> matriz densidad en aproximación de Trotter para
altas temperaturas y potencial dado.
grid_x: numpy array, shape=(nx,) -> valores de x en los que está evaluada rho.
dx: float -> separación entre valores contiguos de grid_x
"""
dx = 2. * x_max / (nx - 1)
grid_x = np.array([i*dx for i in range(-int((nx-1)/2), int(nx/2 + 1))])
rho = np.array([ [ rho_free(x , xp, beta) * np.exp(-0.5*beta*(potential(x)+potential(xp))) for x in grid_x] for xp in grid_x])
return rho, grid_x, dx
def density_matrix_squaring(rho, grid_x, N_iter = 1, beta_ini = 1, print_steps=True):
"""
Uso: devuelve matriz densidad luego de aplicarle algoritmo matrix squaring N_iter veces.
El sistema asociado a la matriz densidad obtenida (al final de aplicar el algoritmo)
está a temperatura inversa beta_fin = beta_ini * 2**(N_iter).
Recibe:
rho: numpy array, shape=(nx,nx) -> matriz densidad en aproximación de Trotter para
altas temperaturas y potencial dado.
grid_x: numpy array, shape=(nx,) -> valores de x en los que está evaluada "rho".
N_iter: int -> número de iteraciones del algoritmo.
beta_ini: float -> valor de inverso de temperatura asociado a la
matriz densidad "rho".
print_steps: bool -> muestra valores de beta en cada iteración
Devuelve:
rho: numpy array, shape=(nx,nx) -> matriz densidad de estado "rho" a temperatura
inversa igual a "beta_fin".
trace_rho: int -> traza de la matriz densidad a temperatura inversa
igual a "beta_fin". Por la definición que tomamos
de "rho", ésta es equivalente a la función
partición en dicha temperatura.
beta_fin: float -> temperatura inversa del sistema asociado a "rho".
"""
dx = grid_x[1] - grid_x[0]
beta_fin = beta_ini * 2 ** N_iter
print('\nbeta_ini = %.3f'%beta_ini,
'\n----------------------------------------------------------------')
for i in range(N_iter):
rho = dx * np.dot(rho,rho)
if print_steps==True:
print(u'Iteration %d) 2^%d * beta_ini --> 2^%d * beta_ini'%(i, i, i+1))
trace_rho = np.trace(rho)*dx
return rho, trace_rho, beta_fin
def save_pi_x_csv(grid_x, x_weights, file_name, relevant_info, print_data=True):
"""
Uso: guarda datos de la distribución pi(x;beta)
Recibe:
grid_x: numpy array, shape=(nx,) -> valores de x en los que está evaluada pi(x;beta).
x_weights: numpy array, shape=(nx,) ->
"""
pi_x_data = {'Position x': grid_x,
'Prob. density': x_weights}
pi_x_data = pd.DataFrame(data=pi_x_data)
with open(file_name,mode='w') as rho_csv:
rho_csv.write(relevant_info+'\n')
rho_csv.close()
with open(file_name,mode='a') as rho_csv:
pi_x_data.to_csv(rho_csv)
rho_csv.close()
if print_data==True:
print(pi_x_data)
return pi_x_data
def run_pi_x_squaring(x_max=5., nx=201, N_iter=7, beta_fin=4, potential=harmonic_potential,
potential_string = 'harmonic_potential', print_steps=True,
save_data=True, plot=True, save_plot=True, show_plot=True):
beta_ini = beta_fin * 2**(-N_iter)
# Cálculo de rho con aproximación de Trotter
rho, grid_x, dx = rho_trotter(x_max, nx, beta_ini, potential)
# Aproximación de rho con matrix squaring iterado N_iter veces.
rho, trace_rho, beta_fin_2 = density_matrix_squaring(rho, grid_x, N_iter,
beta_ini, print_steps)
print('----------------------------------------------------------------\n',
u'beta_fin = %.3f Z(beta_fin) = Tr(rho(beta_fin)) ≈ %.3E \n'%(beta_fin_2,trace_rho))
# Normalización de rho y cálculo de densidades de probabilidad para valores en grid_x
rho_normalized = rho/trace_rho
x_weights = np.diag(rho_normalized)
if save_data==True:
# Nombre del archivo csv en el que guardamos valores de pi(x;beta_fin)
file_name = u'pi_x-%s-x_max_%.3f-nx_%d-N_iter_%d-beta_fin_%.3f.csv'\
%(potential_string,x_max,nx,N_iter,beta_fin)
# Información relevante para agregar como comentario al archivo csv
relevant_info = u'# %s x_max = %.3f nx = %d '%(potential_string,x_max,nx) + \
u'N_iter = %d beta_ini = %.3f '%(N_iter,beta_ini,) + \
u'beta_fin = %.3f'%beta_fin
# Guardamos valores de pi(x;beta_fin) en archivo csv
save_pi_x_csv(grid_x, x_weights, file_name, relevant_info, print_data=0)
# Gráfica y comparación con teoría
if plot == True:
plt.figure(figsize=(8,5))
plt.plot(grid_x, x_weights, label = 'Matrix squaring +\nfórmula de Trotter.\n$N=%d$ iteraciones\n$dx=%.3E$'%(N_iter,dx))
plt.plot(grid_x, QHO_canonical_ensemble(grid_x,beta_fin), label=u'Valor teórico QHO')
plt.xlabel(u'x')
plt.ylabel(u'$\pi^{(Q)}(x;\\beta)$')
plt.legend(loc='best',title=u'$\\beta=%.2f$'%beta_fin)
plt.tight_layout()
if save_plot==True:
plot_name = u'pi_x-plot-%s-x_max_%.3f-nx_%d-N_iter_%d-beta_fin_%.3f.eps'\
%(potential_string,x_max,nx,N_iter,beta_fin)
plt.savefig(plot_name)
if show_plot==True:
plt.show()
plt.close()
return 0
plt.rcParams.update({'font.size':15})
run_pi_x_squaring(potential = harmonic_potential, potential_string = 'harmonic_potential',
save_data=True, save_plot=False, show_plot=True)
| [
"jeaz.git@gmail.com"
] | jeaz.git@gmail.com |
b992a6459c6e930f7216efb9a7c3dd03ebc7e85e | 2f81d225594bbe655bc7d3442a1a41924d3829ff | /src/qrcode/tests.py | 098102082f5e0412f5c8fd51ab6e2fe2a4c8b623 | [] | no_license | znight99/inkscape_ext_barcode | 02c33ec4d1f9d0be329f9b268682c7077ff433e4 | 52079e1f739bd318f28959b6148fa935cd8cd6ac | refs/heads/master | 2021-08-22T11:11:10.351558 | 2017-11-30T02:33:55 | 2017-11-30T02:33:55 | 112,554,299 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,830 | py | import six
import qrcode
import qrcode.util
import qrcode.image.svg
try:
import qrcode.image.pure
import pymaging_png # ensure that PNG support is installed
except ImportError:
pymaging_png = None
from qrcode.util import (
MODE_NUMBER, MODE_ALPHA_NUM, MODE_8BIT_BYTE)
try:
import unittest2 as unittest
except ImportError:
import unittest
UNICODE_TEXT = u'\u03b1\u03b2\u03b3'
class QRCodeTests(unittest.TestCase):
def test_basic(self):
qr = qrcode.QRCode(version=1)
qr.add_data('a')
qr.make(fit=False)
def test_overflow(self):
qr = qrcode.QRCode(version=1)
qr.add_data('abcdefghijklmno')
self.assertRaises(OverflowError, qr.make, fit=False)
def test_fit(self):
qr = qrcode.QRCode()
qr.add_data('a')
qr.make()
self.assertEqual(qr.version, 1)
qr.add_data('bcdefghijklmno')
qr.make()
self.assertEqual(qr.version, 2)
def test_mode_number(self):
qr = qrcode.QRCode()
qr.add_data('1234567890123456789012345678901234', optimize=0)
qr.make()
self.assertEqual(qr.version, 1)
self.assertEqual(qr.data_list[0].mode, MODE_NUMBER)
def test_mode_alpha(self):
qr = qrcode.QRCode()
qr.add_data('ABCDEFGHIJ1234567890', optimize=0)
qr.make()
self.assertEqual(qr.version, 1)
self.assertEqual(qr.data_list[0].mode, MODE_ALPHA_NUM)
def test_regression_mode_comma(self):
qr = qrcode.QRCode()
qr.add_data(',', optimize=0)
qr.make()
self.assertEqual(qr.data_list[0].mode, MODE_8BIT_BYTE)
def test_mode_8bit(self):
qr = qrcode.QRCode()
qr.add_data(u'abcABC' + UNICODE_TEXT, optimize=0)
qr.make()
self.assertEqual(qr.version, 1)
self.assertEqual(qr.data_list[0].mode, MODE_8BIT_BYTE)
def test_mode_8bit_newline(self):
qr = qrcode.QRCode()
qr.add_data('ABCDEFGHIJ1234567890\n', optimize=0)
qr.make()
self.assertEqual(qr.data_list[0].mode, MODE_8BIT_BYTE)
def test_render_svg(self):
qr = qrcode.QRCode()
qr.add_data(UNICODE_TEXT)
img = qr.make_image(image_factory=qrcode.image.svg.SvgImage)
img.save(six.BytesIO())
def test_render_svg_path(self):
qr = qrcode.QRCode()
qr.add_data(UNICODE_TEXT)
img = qr.make_image(image_factory=qrcode.image.svg.SvgPathImage)
img.save(six.BytesIO())
@unittest.skipIf(not pymaging_png, "Requires pymaging with PNG support")
def test_render_pymaging_png(self):
qr = qrcode.QRCode()
qr.add_data(UNICODE_TEXT)
img = qr.make_image(image_factory=qrcode.image.pure.PymagingImage)
img.save(six.BytesIO())
def test_optimize(self):
qr = qrcode.QRCode()
text = 'A1abc12345def1HELLOa'
qr.add_data(text, optimize=4)
qr.make()
self.assertEqual(len(qr.data_list), 5)
self.assertEqual(qr.data_list[0].mode, MODE_8BIT_BYTE)
self.assertEqual(qr.data_list[1].mode, MODE_NUMBER)
self.assertEqual(qr.data_list[2].mode, MODE_8BIT_BYTE)
self.assertEqual(qr.data_list[3].mode, MODE_ALPHA_NUM)
self.assertEqual(qr.data_list[4].mode, MODE_8BIT_BYTE)
self.assertEqual(qr.version, 2)
def test_optimize_size(self):
text = 'A1abc12345123451234512345def1HELLOHELLOHELLOHELLOa' * 5
qr = qrcode.QRCode()
qr.add_data(text)
qr.make()
self.assertEqual(qr.version, 10)
qr = qrcode.QRCode()
qr.add_data(text, optimize=0)
qr.make()
self.assertEqual(qr.version, 11)
def test_qrdata_repr(self):
data = b'hello'
data_obj = qrcode.util.QRData(data)
self.assertEqual(repr(data_obj), repr(data))
| [
"znight@yeah.net"
] | znight@yeah.net |
b100acdda26eabc4c53277934e03fac670a2011b | fe7c5854aa07223cfcfcb117ada0dd8111f258fe | /controlLayer_server/Control Layer.py | 7865665e43047daf46a77bca27d98560e268aa13 | [] | no_license | amrmoastafa/CodeYield | ba27dd8aa6d4c47dc43550a64ae0df24c5b30cdb | 7715e651ee13ca2e982f8edb844e0e4f6512ec42 | refs/heads/master | 2022-07-04T13:36:28.033621 | 2020-05-12T11:29:16 | 2020-05-12T11:29:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,884 | py | import cv2 as cv
from image import detectImage
from NavBar import navBar
from cross_circle import detectIcon
from LabelBar import labelBarDetection
from LabelDetection import labelDetection
path = "data/test.png"
img = cv.imread(path)
imageHeight, imageWidth, imageChannels = img.shape
print(imageHeight,imageWidth)
# Tuning Parameters
rowMarginBetweenShapes = 0.2*imageHeight
colMarginXPoint = int(imageWidth / 2)
noOfColumnsPerRow = 2
class Shape:
def __init__(self, name, x, y, width, height, radius):
self.name = name
self.x = x
self.y = y
self.width = width
self.height = height
self.radius = radius
self.widthRatio = 0
self.heightRatio = 0
# Right-Left
self.allignment = ""
class HtmlRow:
def __init__(self):
self.shapesPerRow = []
self.column1Shapes = []
self.column2Shapes = []
self.column1Ratio = 0
self.column2Ratio = 0
self.height = 0
self.maxWidthIndex = 0
shapesList = []
listOfRows = []
#cv.imshow('main', img)
# Retrieving labels
text = labelDetection(path)
for iterator in range(len(text)):
x,y,w,h = cv.boundingRect(text[iterator])
temporaryShape = Shape("TEXT", x + w / 2, y + h / 2, w, h, 0)
shapesList.append(temporaryShape)
# Retrieving labelBar
labelBar = labelBarDetection(path)
for iterator in range(len(labelBar)):
x, y, w, h = cv.boundingRect(labelBar[iterator])
temporaryShape = Shape("LABEL", x + w / 2, y + h / 2, w, h, 0)
shapesList.append(temporaryShape)
# Retrieving images
image = detectImage(path)
for iterator in range(len(image)):
x, y, w, h = cv.boundingRect(image[iterator])
temporaryShape = Shape("IMAGE", x + w / 2, y + h / 2, w, h, 0)
shapesList.append(temporaryShape)
# Retrieving navigation bar
nav = navBar(path)
for iterator in range(len(nav)):
x, y, w, h = cv.boundingRect(nav[iterator])
temporaryShape = Shape("NAV", x + w / 2, y + h / 2, w, h, 0)
shapesList.append(temporaryShape)
# Retrieving icons
icon = detectIcon(path)
for iterator in range(len(icon)):
(x, y), rad = cv.minEnclosingCircle(icon[iterator])
temporaryShape = Shape("ICON", int(x), int(y), int(rad)*2, int(rad)*2, int(rad))
shapesList.append(temporaryShape)
# Sorting by y-point
shapesList = sorted(shapesList, key=lambda x: x.y, reverse=False)
# Calc. Each row height
def getMaxHeightPerRow(ROW):
maxHeight = 0
for ite in range(len(ROW.shapesPerRow)):
maxHeight = max(maxHeight, ROW.shapesPerRow[ite].height)
return maxHeight
def handlingRows():
temporaryRow = HtmlRow()
# 1st minimum-y shape is inserted into 1st row
temporaryRow.shapesPerRow.append(shapesList[0])
for iterator in range(len(shapesList) - 1):
diff = abs(shapesList[iterator].y - shapesList[iterator + 1].y)
if diff < rowMarginBetweenShapes:
temporaryRow.shapesPerRow.append(shapesList[iterator + 1])
else:
listOfRows.append(temporaryRow)
temporaryRow = HtmlRow()
temporaryRow.shapesPerRow.append(shapesList[iterator+1])
# Appending last row elements
listOfRows.append(temporaryRow)
# Retrieving max-height per row
for rows in range(len(listOfRows)):
listOfRows[rows].height = getMaxHeightPerRow(listOfRows[rows])
#print('ROW Height',listOfRows[rows].height)
handlingRows()
# Retrieving maximum width of a shape for each row & calc. ratio of columns
for rowsCounter in range(len(listOfRows)):
for shapes in range(len(listOfRows[rowsCounter].shapesPerRow)-1):
if listOfRows[rowsCounter].shapesPerRow[shapes+1].width > listOfRows[rowsCounter].shapesPerRow[ listOfRows[rowsCounter].maxWidthIndex ].width:
listOfRows[rowsCounter].maxWidthIndex = shapes+1
# Retrieving maximum width for each shape for each row
maxWidthShape = listOfRows[rowsCounter].shapesPerRow[ listOfRows[rowsCounter].maxWidthIndex ]
#
if maxWidthShape.x <= colMarginXPoint:
maxColumnWidth = maxWidthShape.x + (maxWidthShape.width / 2)
listOfRows[rowsCounter].column1Ratio = maxColumnWidth / imageWidth
listOfRows[rowsCounter].column2Ratio = 1 - listOfRows[rowsCounter].column1Ratio
else:
maxColumnWidth = abs( maxWidthShape.x - (maxWidthShape.width / 2) )
listOfRows[rowsCounter].column1Ratio = maxColumnWidth / imageWidth
listOfRows[rowsCounter].column2Ratio = 1 - listOfRows[rowsCounter].column1Ratio
# Appending each shape to their belong column
for rowsCounter in range(len(listOfRows)):
for shapes in range(len(listOfRows[rowsCounter].shapesPerRow)):
# Checking if the shape lies either in the left column or right one
if listOfRows[rowsCounter].shapesPerRow[shapes].x <= (listOfRows[rowsCounter].column1Ratio * imageWidth):
listOfRows[rowsCounter].column1Shapes.append(listOfRows[rowsCounter].shapesPerRow[shapes])
# Assigning shape width ratio
shapeWidthRatio = listOfRows[rowsCounter].shapesPerRow[shapes].width / (listOfRows[rowsCounter].column1Ratio * imageWidth)
listOfRows[rowsCounter].shapesPerRow[shapes].widthRatio = shapeWidthRatio
# Assigning shape height ratio
shapeHeightRatio = listOfRows[rowsCounter].shapesPerRow[shapes].height / listOfRows[rowsCounter].height
listOfRows[rowsCounter].shapesPerRow[shapes].heightRatio = shapeHeightRatio
# Assigning shape allignment
shapeAllignment = (listOfRows[rowsCounter].column1Ratio * imageWidth) / 3
if listOfRows[rowsCounter].shapesPerRow[shapes].x <= shapeAllignment:
listOfRows[rowsCounter].shapesPerRow[shapes].allignment = "LEFT"
elif listOfRows[rowsCounter].shapesPerRow[shapes].x <= 2*shapeAllignment:
listOfRows[rowsCounter].shapesPerRow[shapes].allignment = "Center"
else:
listOfRows[rowsCounter].shapesPerRow[shapes].allignment = "RIGHT"
else:
listOfRows[rowsCounter].column2Shapes.append(listOfRows[rowsCounter].shapesPerRow[shapes])
# Assigning shape width ratios
shapeWidthRatio = listOfRows[rowsCounter].shapesPerRow[shapes].width / (listOfRows[rowsCounter].column2Ratio * imageWidth)
listOfRows[rowsCounter].shapesPerRow[shapes].widthRatio = shapeWidthRatio
# Assigning shape height ratio
shapeHeightRatio = listOfRows[rowsCounter].shapesPerRow[shapes].height / listOfRows[rowsCounter].height
listOfRows[rowsCounter].shapesPerRow[shapes].heightRatio = shapeHeightRatio
# Assigning shape allignment
column1XPoint = (listOfRows[rowsCounter].column1Ratio * imageWidth)
shapeAllignment = (imageWidth - column1XPoint) / 3
if listOfRows[rowsCounter].shapesPerRow[shapes].x <= (shapeAllignment + column1XPoint):
listOfRows[rowsCounter].shapesPerRow[shapes].allignment = "LEFT"
elif listOfRows[rowsCounter].shapesPerRow[shapes].x <= (2*shapeAllignment + column1XPoint):
listOfRows[rowsCounter].shapesPerRow[shapes].allignment = "Center"
else:
listOfRows[rowsCounter].shapesPerRow[shapes].allignment = "RIGHT"
for i in range(len(listOfRows)):
print('Column 1 Started')
for j in range(len(listOfRows[i].column1Shapes)):
print(listOfRows[i].column1Shapes[j].name, ',', listOfRows[i].column1Shapes[j].allignment)
print('Column 2 Started')
for k in range(len(listOfRows[i].column2Shapes)):
print(listOfRows[i].column2Shapes[k].name, ',', listOfRows[i].column2Shapes[k].allignment)
print('ROW' + str(i+1) + 'Finished')
print('ROW Height' + str(i+1) ,listOfRows[i].height)
#cv.waitKey(0)
#cv.destroyAllWindows()
| [
"ahmedmoamen138@gmail.com"
] | ahmedmoamen138@gmail.com |
54157e46485cfe84e785669c8a896e72e4eba04c | 22fc34523f4de64a1e1eea707e01da79e425a043 | /srtmprofile/core/urls.py | 3eb617af2639a39dc20d463863e4fff390506028 | [
"MIT"
] | permissive | marcellobenigno/srtmprofile | 04cdcf4a1f127462dd37d94ec5f368b0f304b932 | 52a2550976ce4ecad2921e53a72ac2ec8a8459b5 | refs/heads/master | 2021-04-03T05:25:54.097968 | 2018-03-15T11:05:02 | 2018-03-15T11:05:02 | 124,605,246 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 261 | py | from django.conf.urls import url
from . import views
app_name = 'core'
urlpatterns = [
url(r'^$', views.home, name='home'),
url(r'^roads.geojson$', views.roads_geojson, name='roads_geojson'),
url(r'^(?P<pk>\d+)/$', views.detail, name='detail'),
]
| [
"benigno.marcello@gmail.com"
] | benigno.marcello@gmail.com |
52a5fc44063f2e73239719204651a2f2b4b3e5e5 | 767b5482f3c5b9c2c85575c711e37561f5b8f198 | /engine/engine_lib/encoderlib.py | 27d186e1e4d625fe001279e1c8110f2ff708818f | [] | no_license | zhupite233/scaner | 8e39c903f295d06195be20067043087ec8baac4f | 7c29c02bca2247a82bcbb91cc86955cc27998c95 | refs/heads/master | 2020-05-18T03:23:03.459222 | 2019-04-15T04:29:10 | 2019-04-15T04:29:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,332 | py | #!/usr/bin/env python
"""
This is the encoding / decoding functions collection for DharmaEncoder. It
allows you to encode and decode various data formats.
(c) 2010 Nathan Hamiel
Email: nathan{at}neohaxor{dot}org
Hexsec Labs: http://hexsec.com/labs
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import urllib
import hashlib
import cgi
import StringIO
import zlib
import decimal
from xml.sax.saxutils import unescape
from xml.sax.saxutils import escape
###################
# Encoder section #
###################
def url_encode(encvalue):
""" URL encode the specifed value. Example Format: Hello%20World """
try:
encoded_value = urllib.quote(encvalue)
except:
encoded_value = "There was a problem with the specified value"
return(encoded_value)
def full_url_encode(encvalue):
""" Full URL Hex encode the specified value.
Example Format: %48%65%6c%6c%6f%20%57%6f%72%6c%64 """
hexval = ""
for item in encvalue:
val = hex(ord(item)).replace("0x", "%")
hexval += val
return(hexval)
def base64_encode(encvalue):
""" Base64 encode the specified value. Example Format: SGVsbG8gV29ybGQ= """
try:
basedata = encvalue.encode("Base64")
except:
basedata = "There was an error"
return(basedata)
# def html_entity_encode(encvalue):
# """ Encode value using HTML entities. Example Format: """
#####
# Follow up on this. It needs to be fixed
#####
# encoded_value = cgi.escape(encvalue)
# return(encoded_value)
def hex_encode(encvalue):
""" Encode value to Hex. Example Format: 48656c6c6f2576f726c64"""
hexval = ""
for item in encvalue:
val = hex(ord(item)).strip("0x")
hexval += val
return(hexval)
def hex_entity_encode(encvalue):
""" Encode value to a Hex entitiy. Example Format: Hello"""
hexval = ""
for item in encvalue:
val = hex(ord(item)).replace("0x", "&#x") + ";"
hexval += val
return(hexval)
def unicode_encode(encvalue):
""" Unicode encode the specified value in the %u00 format. Example:
%u0048%u0065%u006c%u006c%u006f%u0020%u0057%u006f%u0072%u006c%u0064 """
hexval = ""
for item in encvalue:
val = hex(ord(item)).replace("0x", "%u00")
hexval += val
return(hexval)
def escape_xml(encvalue):
""" Escape the specified HTML/XML value. Example Format: Hello&World """
escaped = escape(encvalue, {"'": "'", '"': """})
return(escaped)
def md5_hash(encvalue):
""" md5 hash the specified value.
Example Format: b10a8db164e0754105b7a99be72e3fe5"""
hashdata = hashlib.md5(encvalue).hexdigest()
return(hashdata)
def sha1_hash(encvalue):
""" sha1 hash the specified value.
Example Format: 0a4d55a8d778e5022fab701977c5d840bbc486d0 """
hashdata = hashlib.sha1(encvalue).hexdigest()
return(hashdata)
def sqlchar_encode(encvalue):
""" SQL char encode the specified value.
Example Format: CHAR(72)+CHAR(101)+CHAR(108)+CHAR(108)+CHAR(111)"""
charstring = ""
for item in encvalue:
val = "CHAR(" + str(ord(item)) + ")+"
charstring += val
return(charstring.rstrip("+"))
####
# oraclechr_encode not tested yet, but should work
####
def oraclechr_encode(encvalue):
""" Oracle chr encode the specified value. """
charstring = ""
for item in encvalue:
val = "chr(" + str(ord(item)) + ")||"
charstring += val
return(charstring.rstrip("||"))
def decimal_convert(encvalue):
""" Convert input to decimal value.
Example Format: 721011081081113287111114108100 """
decvalue = ""
for item in encvalue:
decvalue += str(ord(item))
return(decvalue)
def decimal_entity_encode(encvalue):
""" Convert input to a decimal entity.
Example Format: Hello World """
decvalue = ""
for item in encvalue:
decvalue += "&#" + str(ord(item)) +";"
return(decvalue)
def rot13_encode(encvalue):
""" Perform ROT13 encoding on the specified value.
Example Format: Uryyb Jbeyq """
return(encvalue.encode("rot13"))
###################
# Decoder section #
###################
def url_decode(decvalue):
""" URL Decode the specified value. Example Format: Hello%20World """
returnval = urllib.unquote(decvalue)
return(returnval)
def fullurl_decode(decvalue):
""" Full URL decode the specified value.
Example Format: %48%65%6c%6c%6f%20%57%6f%72%6c%64 """
splithex = decvalue.split("%")
hexdec = ""
for item in splithex:
if item != "":
hexdec += chr(int(item, 16))
return(hexdec)
def base64_decode(decvalue):
""" Base64 decode the specified value.
Example Format: SGVsbG8gV29ybGQ= """
msg = """ There was an error. Most likely this isn't a valid Base64 value
and Python choked on it """
try:
base64dec = decvalue.decode("Base64")
return(base64dec)
except:
return(msg)
def hex_decode(decvalue):
""" Hex decode the specified value.
Example Format: 48656c6c6f2576f726c64 """
msg = """ There was an error, perhaps an invalid length for the hex
value """
try:
decodeval = decvalue.decode("hex")
return(decodeval)
except:
return(msg)
def hexentity_decode(decvalue):
""" Hex entity decode the specified value.
Example Format: Hello """
charval = ""
splithex = decvalue.split(";")
for item in splithex:
# Necessary because split creates an empty "" that tries to be
# converted with int()
if item != "":
hexcon = item.replace("&#", "0")
charcon = chr(int(hexcon, 16))
charval += charcon
else:
pass
return(charval)
def unescape_xml(decvalue):
""" Unescape the specified HTML or XML value: Hel啊lo&World"""
unescaped = unescape(decvalue, {"'": "'", """: '"'})
return(unescaped)
def unicode_decode(decvalue):
""" Unicode decode the specified value %u00 format.
Example Format: %u0048%u0065%u006c%u006c%u006f%u0020%u0057%u006f%u0072%u006c%u0064 """
charval = ""
splithex = decvalue.split("%u00")
for item in splithex:
if item != "":
hexcon = item.replace("%u00", "0")
charcon = chr(int(hexcon, 16))
charval += charcon
else:
pass
return(charval)
def rot13_decode(decvalue):
""" ROT13 decode the specified value. Example Format: Uryyb Jbeyq
rot13 回转位13 a编码后转换成b,b经过相同的编码之后会转换成
"""
return(decvalue.decode("rot13"))
| [
"lixiang@yundun.com"
] | lixiang@yundun.com |
225aaf3cdf09abdad88c9d81dc062d86ce62c1f7 | 16bb2395e3e1c51450a242c1f13718734686c749 | /src/models/ERGM_CVX.py | c89b1232c8587cc1ae750850d3c32148d94d5507 | [
"MIT"
] | permissive | DanqingZ/CPS_TRC | 8a05edacf2aa2ec1f260aa08fa19515779869a6b | 47f98ffc909e2cc9759a0f273a521b3ba189a02f | refs/heads/master | 2021-05-01T01:56:35.471342 | 2019-08-26T17:15:32 | 2019-08-26T17:15:32 | 82,489,734 | 7 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,662 | py | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from cvxpy import *
class ERGM_CVX:
def __init__(self, E, C, V, E_all):
self.E = E
self.C = C
self.V = V
self.E_all = E_all
def run_CVX(self):
community = self.C.values.tolist()
input = np.zeros((len(self.C)*len(self.C),2))
c_matrix = np.zeros((len(self.C),len(self.C)))
Y_matrix = np.zeros((len(self.C),len(self.C)))
for i in range(len(self.C)):
for j in range(len(self.C)):
if i!=j:
if community[i]==community[j]:
c_matrix[i,j] = 1
input[:,0] = c_matrix.reshape(len(self.C)*len(self.C))
distance = self.E_all[1].values.tolist()
input[:,1] = distance
start = self.E[0].values.tolist()
end = self.E[1].values.tolist()
names = self.V.values.tolist()
start_int = np.zeros((len(start)))
end_int = np.zeros((len(start)))
for i in range(len(start)):
for j in range(len(names)):
if names[j][0] == start[i]:
start_int[i] = int(j)
if names[j][0] == end[i]:
end_int[i] = int(j)
Y_matrix = np.zeros((len(self.C),len(self.C)))
for i in range(len(start_int)):
Y_matrix[start_int[i],end_int[i]] = 1
Y = Y_matrix.reshape(len(self.C)*len(self.C))
import cvxpy as cvx
w = cvx.Variable(2)
b = cvx.Variable(1)
Y_matrix = np.ones((len(self.C),len(self.C)))*(-1)
for i in range(len(start_int)):
Y_matrix[start_int[i],end_int[i]] = 1
Y = Y_matrix.reshape(len(self.C)*len(self.C))
loss = cvx.sum_entries(cvx.logistic(-cvx.mul_elemwise(Y, input*w+np.ones((len(self.C)*len(self.C),1))*b)))
problem = cvx.Problem(cvx.Minimize(loss))
problem.solve(verbose=True)
self.W = w.value
self.b = b.value
| [
"danqing0703@berkeley.edu"
] | danqing0703@berkeley.edu |
a397b67df4bf1082a8f0cd0373bcb051241d51ce | e008a757fc02e6fcc725cc079b373f7c5cfefa28 | /experiments/analysis_gtex_feature_explore.py | b96d4acfe61ff7ff19f4fa1616583928de208ebe | [] | no_license | sc130/AdaFDRpaper | 74e3b67548f0f2811ac1a4d170f379396dd60440 | c564f24a513cf505c4ac7ab07e960d4ef6be1b9b | refs/heads/master | 2022-01-22T16:59:57.285565 | 2019-07-31T18:34:35 | 2019-07-31T18:34:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,398 | py | ## system settings
import matplotlib
matplotlib.use('Agg')
import logging
import os
import sys
import argparse
import adafdr.data_loader as dl
import adafdr.method as md
import time
import matplotlib.pyplot as plt
import pickle
def main(args):
# Set up parameters.
alpha = 0.01
n_itr = 1500
# Set up the output folder.
output_folder = os.path.realpath('..') + '/result_gtex_feature_explore/result_'\
+ args.output_folder
if not os.path.exists(output_folder):
os.makedirs(output_folder)
else:
filelist = [os.remove(os.path.join(output_folder, f))\
for f in os.listdir(output_folder)]
# Load the data.
p, x, n_full, cate_name, cis_name = dl.load_GTEx(args.data_name,\
if_impute=False)
# feature_explore
md.adafdr_explore(p, x, alpha=alpha, n_full=n_full, vis_dim=None, cate_name=cate_name,\
output_folder=output_folder, h=None)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Side-info assisted multiple hypothesis testing')
parser.add_argument('-d', '--data_loader', type=str, required=True)
parser.add_argument('-n', '--data_name', type=str, required=False)
parser.add_argument('-o', '--output_folder', type=str, required = True)
args = parser.parse_args()
main(args) | [
"martinjzhang@gmail.com"
] | martinjzhang@gmail.com |
9591bbc70a14e16ba63940263937edaeb6058ed4 | 1de19b4527989a29a15cb9ccf3b40c6cbce6e906 | /http/http_client.py | 87802227635ca277305215dce27147758e79c4ec | [] | no_license | rjcostales/python | 468406184bb984e893c3217e74dabff0be4d24c1 | ab09a421a0b9d806a9c8dbef9dc166274a19b4d2 | refs/heads/master | 2021-01-17T11:48:14.512859 | 2018-11-07T20:46:50 | 2018-11-07T20:46:50 | 38,708,542 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 474 | py | import http.client
url = 'www.python.org'
conn = http.client.HTTPConnection(url)
conn.request("GET", "/")
response = conn.getresponse()
print('\nstatus')
print(response.status)
print('\nmsg')
print(response.msg)
print('\nheaders')
print(response.getheaders())
print(response.getheader("date"))
print(response.getheader('content-type'))
print(response.getheader('content-length'))
print('\nresponse')
length = response.length
print(length)
print(response.read(length))
| [
"jesse.costales@sigfig.com"
] | jesse.costales@sigfig.com |
d2534e7f9ed2539c6ec7228c87061771a60c4676 | 1d11288ec1a5d98dcf66c4ca45072ffd29901de0 | /mrp_extend/models/mrp_bom_line.py | 0731280072097855fc742fa848452a84c7f6fb29 | [] | no_license | pyrun13/addons | 14202e273c802cee391a68474a6bdc7cf062b25c | b81650d81e0a227dd4fc460846e53ce5e61a8cc1 | refs/heads/master | 2020-09-07T21:48:18.673226 | 2019-11-12T16:15:06 | 2019-11-12T16:15:06 | 220,921,425 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 412 | py | from odoo import models, fields, api, exceptions
class MrpBomLine(models.Model):
_inherit = 'mrp.bom.line'
attrition_rate = fields.Float(string='损耗率(%)')
def write(self, vals):
attrition_rate = vals.get('attrition_rate', 0)
if attrition_rate < 0:
raise exceptions.ValidationError('损耗率不能为负数!')
return super(MrpBomLine, self).write(vals)
| [
"xiongjianhong@gmail.com"
] | xiongjianhong@gmail.com |
16576534e3c49ba3775bb663e766209077577950 | a467c2c1f691095c0e13db08dd3e939f57ec2096 | /myenv/lib/python2.7/warnings.py | a0bd7105fc010dcd2c1b0f38d8a6f32eca1a30aa | [] | no_license | Etheri/my-first-blog | 88e7e5dfd6b31206964cef3213c877e717c88d0c | af14f10029bc9168d5875f5978342279c8bdd416 | refs/heads/master | 2021-01-23T05:09:58.496749 | 2017-05-31T14:44:38 | 2017-05-31T14:44:38 | 92,956,488 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 52 | py | /home/bednyakova/anaconda2/lib/python2.7/warnings.py | [
"bednyakova.a@gmail.com"
] | bednyakova.a@gmail.com |
ac338df45f6245caeacf3c665331db8a223ae9b2 | a1af0d3029d0dc2715a3ee55a1767351d18a6f9f | /LC/2115_find_recipes.py | 29bff113ed5592ca5f8200edface44f74795b78a | [] | no_license | zona-li/practice_in_python | b36c12112ec6b27cdb3b9bf83b315ac021d0c4e2 | 2906d12452ec059a7e39fb772391082a576591f0 | refs/heads/master | 2022-09-07T18:02:03.520425 | 2022-09-04T22:40:26 | 2022-09-04T22:40:26 | 100,322,799 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,014 | py | # recipes = ["burger", "sandwich", "bread"]
# ingredients = [["sandwich","meat","bread"],["bread","meat"],["yeast","flour"]]
# supplies = ["yeast","flour","meat"]
from collections import Counter, defaultdict, deque
from typing import List
from traitlets import default
class Solution:
def findAllRecipes(self, recipes: List[str], ingredients: List[List[str]], supplies: List[str]) -> List[str]:
adj = defaultdict(set)
indegree = defaultdict(int)
for rec, ings in zip(recipes, ingredients):
for ing in ings:
adj[ing].add(rec)
indegree[rec] += 1
res = []
q = deque(supplies)
while len(q):
next = q.popleft()
if next in recipes:
res.append(next)
for rec in adj[next]:
indegree[rec] -= 1
if indegree[rec] == 0:
q.append(rec)
return res
s = Solution()
print(s.findAllRecipes(["burger", "sandwich", "bread"], [["sandwich","meat","bread"],["bread","meat"],["yeast","flour"]], ["yeast","flour","meat"]))
| [
"haoyang.zona@gmail.com"
] | haoyang.zona@gmail.com |
b11487f305e77ea0ce64db973c3e20b4db2f6b9a | 8cf28f19ec1d4ac902a5c53a3a4f23a21f125af4 | /list_dictionary.py | b5c6314af499e0bf32f6f2500a4bbd104d9d54b8 | [] | no_license | thydev/dojo-pythonbasic | e8ba6e986c8eefd45f15bf4d66802323da46e96e | 1c96ef09b2e7f9b65c5e9a6b78dce5fd7ca675e3 | refs/heads/master | 2021-01-25T14:55:49.532555 | 2018-03-05T16:18:21 | 2018-03-05T16:18:21 | 123,735,757 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,048 | py | # Create a function that takes in two lists and creates a single dictionary.
# The first list contains keys and the second list contains the values.
# Assume the lists will be of equal length.
name = ["Anna", "Eli", "Pariece", "Brendan", "Amy", "Shane", "Oscar","Dojo", "coding"]
favorite_animal = ["horse", "cat", "spider", "giraffe", "ticks", "dolphins", "llamas", "fish"]
# print zip(name, favorite_animal) # Create Tuple
def make_dict(list1, list2):
new_dict = {}
if len(list1) == len(list2):
for i in range(0, len(list1)):
new_dict[list1[i]] = list2[i]
elif len(list1) > len(list2):
for i in range(0, len(list1)):
if i < len(list2):
new_dict[list1[i]] = list2[i]
else:
new_dict[list1[i]] = ""
else:
for i in range(0, len(list2)):
if i < len(list1):
new_dict[list2[i]] = list1[i]
else:
new_dict[list2[i]] = ""
return new_dict
d = make_dict(name, favorite_animal)
print d | [
"nginchanthy@gmail.com"
] | nginchanthy@gmail.com |
a8b812a267f75e48be0ef972c448526f65c16f4c | d7f8d98b7ce4ca157d44f8124b374887c5a55f79 | /project1/critic/splitgd.py | d1d823dca5d1a5e538420a9032083b1bbb91af7b | [] | no_license | oaowren/IT3105---Artificial-Intelligence-Programming | 1961e4487c4bc73aea527ff3c7f57838e69986a5 | e98c68f231f665e1659e5efcb4e42a785ca64ca6 | refs/heads/master | 2023-05-21T16:48:49.271201 | 2021-05-31T12:26:57 | 2021-05-31T12:26:57 | 328,962,486 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,177 | py | import math
import tensorflow as tf
import numpy as np
# ************** Split Gradient Descent (SplitGD) **********************************
# This "exposes" the gradients during gradient descent by breaking the call to "fit" into two calls: tape.gradient
# and optimizer.apply_gradients. This enables intermediate modification of the gradients. You can find many other
# examples of this concept online and in the (excellent) book "Hands-On Machine Learning with Scikit-Learn, Keras,
# and Tensorflow", 2nd edition, (Geron, 2019).
# This class serves as a wrapper around a keras model. Then, instead of calling keras_model.fit, just call
# SplitGD.fit.
#
# WARNING. In THEORY, you should be able to use this class by just subclassing it and writing your own code
# for the "modify_gradients" method. However, there are many practical issues involving versions of tensorflow, use
# of keras and the tensorflow backend, etc. So the main purpose of this file is to show the basics of how you can
# split gradient descent into two parts using tf.GradientTape. Many similar examples exist online, but, again, there
# is no guarantee that they will work seamlessly with your own code.
class SplitGD():
def __init__(self, keras_model, critic):
self.model = keras_model
self.critic = critic
# Subclass this with something useful.
def modify_gradients(self, gradients):
gradients = self.critic.modify_gradients(gradients)
return gradients
# This returns a tensor of losses, OR the value of the averaged tensor. Note: use .numpy() to get the
# value of a tensor.
def gen_loss(self,features,targets,avg=False):
predictions = self.model(features) # Feed-forward pass to produce outputs/predictions
loss = self.model.loss(targets, predictions) # model.loss = the loss function
return tf.reduce_mean(loss).numpy() if avg else loss
def fit(self, features, targets, epochs=1, mbs=1,vfrac=0.1,verbosity=0,callbacks=[]):
params = self.model.trainable_weights
train_ins, train_targs, val_ins, val_targs = split_training_data(features,targets,vfrac=vfrac)
for cb in callbacks: cb.on_train_begin()
for epoch in range(epochs):
for cb in callbacks: cb.on_epoch_begin(epoch)
for _ in range(math.floor(len(train_ins) / mbs)):
with tf.GradientTape() as tape: # Read up on tf.GradientTape !!
feaset,tarset = gen_random_minibatch(train_ins,train_targs,mbs=mbs)
loss = self.gen_loss(feaset,tarset,avg=False)
gradients = tape.gradient(loss,params)
gradients = self.modify_gradients(gradients)
self.model.optimizer.apply_gradients(zip(gradients,params))
if verbosity > 0:
self.end_of_epoch_action(train_ins,train_targs,val_ins,val_targs,epoch,
verbosity=verbosity,callbacks=callbacks)
for cb in callbacks: cb.on_train_end()
# The call to model.evaluate does 2 things for a set of features and targets: 1) computes the loss, 2) applies
# the model's "metric" (which may differ from the loss) to produce an "evaluation". A typical metric is
# "categorical_accuracy" = the fraction of outputs that are "correct", i.e. the highest output neuron
# corresponds to the correct value. For more metrics, read up on Keras.metrics.
# Verbosity levels: 0 = no prints, 1 = only my own prints, 2 = my prints + TF prints (in call to model.evaluate
def gen_evaluation(self,features,targets,avg=False,verbosity=0,callbacks=[]):
loss, evaluation = self.model.evaluate(features,targets,callbacks=callbacks,
batch_size=len(features), verbose=(1 if verbosity == 2 else 0))
return evaluation, loss
# return (tf.reduce_mean(evaluation).numpy() if avg else evaluation), loss
def status_display(self, val, loss, verbosity = 1, mode='Train'):
if verbosity > 0:
print('{0} *** Loss: {1} Eval: {2}'.format(mode,loss,val), end=' ')
def end_of_epoch_action(self, train_ins, train_targs, valid_ins, valid_targs, epoch,verbosity=1,callbacks=[]):
print('\n Epoch: {0}'.format(epoch), end=' ')
# Calculate Loss and Evaluation for entire training set
val, loss = self.gen_evaluation(train_ins, train_targs,avg=True,verbosity=verbosity,callbacks=callbacks)
self.status_display(val,loss, verbosity=verbosity, mode='Train')
val2, loss2 = 0, 0
if len(valid_ins) > 0: # Calculate Loss and Evaluation for entire Validation Set
val2, loss2 = self.gen_evaluation(valid_ins, valid_targs, avg=True, verbosity=verbosity,callbacks=callbacks)
self.status_display(val2,loss2, verbosity=verbosity, mode='Validation')
self.update_callbacks(epoch, (loss, val, loss2, val2), callbacks)
def update_callbacks(self, epoch, quad, callbacks=[]):
cb_log = {"loss": quad[0], "metric": quad[1], "val_loss": quad[2], "val_metric": quad[3]}
#cb_log = {"loss": quad[0], "val_loss": quad[2]}
for cb in callbacks: cb.on_epoch_end(epoch,cb_log)
# A few useful auxiliary functions
def gen_random_minibatch(inputs, targets, mbs=1):
indices = np.random.randint(len(inputs), size=mbs)
return inputs[indices], targets[indices]
# This returns: train_features, train_targets, validation_features, validation_targets
def split_training_data(inputs,targets,vfrac=0.1,mix=True):
vc = round(vfrac * len(inputs)) # vfrac = validation_fraction
# pairs = np.array(list(zip(inputs,targets)))
if vfrac > 0:
pairs = list(zip(inputs,targets))
if mix: np.random.shuffle(pairs)
vcases = pairs[0:vc]; tcases = pairs[vc:]
return np.array([tc[0] for tc in tcases]), np.array([tc[1] for tc in tcases]),\
np.array([vc[0] for vc in vcases]), np.array([vc[1] for vc in vcases])
# return tcases[:,0], tcases[:,1], vcases[:,0], vcases[:,1] # Can't get this to work properly
else:
return inputs,targets,[],[] | [
"noreply@github.com"
] | noreply@github.com |
05af6eb6e60b4748045485fcbf36d751acf72583 | 0c7ff0ec35ba2bb38f99ef6ecb261ec33466dd52 | /Day1/day1Project.py | 2d1e56254a4ef4fd53ab5a15fdd51db183e510ec | [] | no_license | TheKinshu/100-Days-Python | 15cbacc608ee349cc9733a7032e10a359bebb731 | 293ad6b3e5f5208da84efbc5b2d2d395a5a53421 | refs/heads/master | 2023-04-18T08:21:30.361800 | 2021-05-02T18:48:39 | 2021-05-02T18:48:39 | 351,582,416 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 534 | py | #1. Create a greeting for your program.
print("Welcome to the Band Name Generator.")
#2. Ask the user for the city that they grew up in.
city = input("What's name of the city you gre up in?\n")
#3. Ask the user for the name of a pet.
pet = input("What's your pet's name?\n")
#4. Combine the name of their city and pet and show them their band name.
print("Your band name could be " + city + " " + pet)
#5. Make sure the input cursor shows on a new line, see the example at:
# https://band-name-generator-end.appbrewery.repl.run/ | [
"kc007919@gmail.com"
] | kc007919@gmail.com |
2e3138b7aebe9b0d818303c674da9144988dee2d | 2b0eab74af8d23244ff11699830f9bb10fbd717a | /helpers/mixins/unpack_tags_mixin.py | 5e6e4c11c733fc5368427ac90ddb23bf2e781302 | [] | no_license | alexandrenorman/mixeur | c7e25cd20b03c78b361cb40e3e359a6dc5d9b06b | 95d21cd6036a99c5f399b700a5426e9e2e17e878 | refs/heads/main | 2023-03-13T23:50:11.800627 | 2021-03-07T15:49:15 | 2021-03-07T15:49:15 | 345,384,858 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,277 | py | from .unpack_ids_mixin import UnpackIdsMixin
class UnpackTagsMixin(UnpackIdsMixin):
"""
Mixin to apply on a ModelViewSet which transform registered fields from string containing ids to list of objects
"1,2,3" => [<Obj id=1>, <Obj id=2>, <Obj id=3>]
If a string passed, it will create a new instance of given model with given model name field
"1,2,truc" => [<Obj id=1 name=...>, <Obj id=2 name=...>, <new Obj id=3 name="truc">]
Should define unpackable fields like this :
unpackable_fields = {'data_field_name': (ModelName, 'model_field_name')}
"""
def get_item_id(self, word, options):
"""
If given tag contain only digits, use it as id, else create the instance
"""
item_id = None
if word.isdigit():
item_id = int(word)
elif options:
tag_model, tag_model_field = options
existing_tag = tag_model.objects.filter(**{tag_model_field: word}).first()
if existing_tag:
item_id = existing_tag.id
elif word != "":
item_id = tag_model.objects.create(**{tag_model_field: word}).id
else:
return {"id": None}
if item_id is not None:
return {"id": item_id}
| [
"norman@xael.org"
] | norman@xael.org |
ea06dfdc414399d140d3ee55f76920fd6e8f97c9 | b76990d490d87517e01f60e3f010de273e473725 | /naive_bayesian_for_text.py | 3479592ef6ac9058ae8cf48977118cfc0a7bc267 | [] | no_license | manju838/machine_learing_algo_python | 10799bba48e9e850cff397e4ede4ae1ca61c679b | d679f2df11b963bd926842d46db7e6235ff511a8 | refs/heads/master | 2022-03-06T04:30:11.481001 | 2019-10-24T01:26:55 | 2019-10-24T01:26:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,751 | py | import numpy as np
import text_preprocess
class NaiveBayesianForText:
def fit(self, X, y):
'''
Parameters
----------
X : shape (n_corpus, text_length)
Training corpus
y : shape (n_corpus,)
Target values
'''
self.__classes = np.unique(y)
n_classes = len(self.__classes)
self.__p_classes = [np.mean(y == label) for label in self.__classes]
self.__model = text_preprocess.Tfidf()
word_vector = self.__model.fit_transform(X)
word_of_classes = np.ones((n_classes, len(self.__model.word_dictionary)))
word_of_classes_total = np.full(n_classes, n_classes)
for i in range(n_classes):
word_of_classes[i] += np.sum(word_vector[np.flatnonzero(y == self.__classes[i])], axis=0)
word_of_classes_total[i] += np.sum(word_of_classes[i])
self.__p_word_of_classes = word_of_classes / word_of_classes_total.reshape((-1, 1))
def predict(self, X):
'''
Parameters
----------
X : shape (n_corpus, text_length)
Predicting corpus
Returns
-------
y : shape (n_corpus,)
Predicted class label per sample
'''
n_samples = len(X)
word_vector = np.zeros((n_samples, len(self.__model.word_dictionary)))
for i in range(n_samples):
_, indexes, _ = np.intersect1d(self.__model.word_dictionary, X[i], return_indices=True)
word_vector[i, indexes] = 1
p_class_of_doc = word_vector.dot(np.log(self.__p_word_of_classes).T) + np.log(self.__p_classes)
return self.__classes[np.argmax(p_class_of_doc, axis=1)] | [
"zhaoyi3@lenovo.com"
] | zhaoyi3@lenovo.com |
e231384f1e02475631385dcbf48de464cedf8272 | b08b7d8561b78cdf0b245c79b577bfbc4f1805b7 | /autoclicker.py | e1f3999aa77b27481fd1823968b72a10f5f9484e | [] | no_license | AadityaKandel/AutoClicker | a3a679630ebadb048acd4cc027dfb1c0c7629d34 | 0c6559e434e40533fecf4289cd69276f13e118b6 | refs/heads/main | 2023-02-05T11:03:25.744489 | 2020-12-29T11:59:49 | 2020-12-29T11:59:49 | 325,274,803 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,394 | py | try:
from tkinter import *
import pyautogui as pp
import keyboard
# Import Success
root = Tk()
mouse = StringVar()
mouse.set("0")
mousee = StringVar()
mousee.set("0")
def loc():
for i in range(0,999999999):
act.set('Activated [ Shift+Q [ Deactivate ] ]')
root.update()
locc.set('Press Ctrl+Q to Stop')
if keyboard.is_pressed("ctrl+q"):
dd = pp.position()
locc.set("Find Mouse Location")
mouse.set(f"{dd[0]}")
mousee.set(f"{dd[1]}")
break
Label(text = "Welcome To AUTO CLICKER",font = "comicsansms 14 bold",bg = "black",fg = "white").pack()
Label(text = "",font = "arial",bg = "white",fg = "white",borderwidth = 1,width = 45).pack()
def ff():
Label(text = "",bg = "black").pack()
ff()
locc = StringVar()
locc.set("Find Mouse Location")
Button(textvariable = locc,font = "comicsansms 14 bold",bg = "black",fg = "white",command = loc).pack(anchor = W)
f1 = Frame(borderwidth = 10,bg = "black")
f3 = Frame(borderwidth = 10,bg = "black")
f4 = Frame(borderwidth = 10,bg = "black")
Label(f1,text = "Mouse X: ",font = "comicsansms 14 bold",bg = "black",fg = "white").pack(side = LEFT)
Entry(f1,textvariable = mouse,font = "comicsansms 14 bold",bg = "black",fg = "white",width = 7,justify = "right").pack(side = LEFT)
Label(f1,text = "Mouse Y: ",font = "comicsansms 14 bold",bg = "black",fg = "white").pack(side = LEFT)
Entry(f1,textvariable = mousee,font = "comicsansms 14 bold",bg = "black",fg = "white",width = 7,justify = "right").pack(side = LEFT)
f1.pack(anchor = W)
Label(text = "",font = "arial",bg = "white",fg = "white",borderwidth = 1,width = 45).pack()
ff()
interval = DoubleVar()
interval.set(1)
def plusb():
interval.set((interval.get())+0.1)
def subb():
interval.set((interval.get())-0.1)
Label(f3,text = "Wait After 1 Click: ",font = "comicsansms 14 bold",bg = "black",fg = "white").pack(side = LEFT)
Entry(f3,textvariable = interval,font = "comicsansms 14 bold",bg = "black",fg = "white",width = 5,justify = "right").pack(side = LEFT)
Label(f3,text = " ",font = "comicsansms 14 bold",bg = "black",fg = "white").pack(side = LEFT)
Button(f3,text = "+",font = "comicsansms 14 bold",bg = "black",fg = "white",command = plusb).pack(side = LEFT)
Label(f3,text = " ",font = "comicsansms 14 bold",bg = "black",fg = "white").pack(side = LEFT)
Button(f3,text = "-",font = "comicsansms 14 bold",bg = "black",fg = "white",command = subb).pack(side = LEFT)
f3.pack(anchor = W)
Label(text = "",font = "arial",bg = "white",fg = "white",borderwidth = 1,width = 45).pack()
ff()
ff()
act = StringVar()
act.set("[ Shift+A ] Activate")
Button(textvariable = act,font = "comicsansms 14 bold",bg = "black",fg = "white").pack(side = BOTTOM)
root.config(bg = "black")
import time
for i in range(0,999999999):
root.update()
if keyboard.is_pressed('shift+a'):
act.set('Activated [ Shift+Q [ Deactivate ] ]')
for i in range(0,999999999999999999999):
root.update()
if keyboard.is_pressed('shift+q'):
root.update()
act.set("[ Shift+A ] Activate")
break
else:
pp.click(x=eval((mouse.get())),y=eval((mousee.get())))
pp.click(x=eval((mouse.get()))+1,y=eval((mousee.get())))
time.sleep((interval.get()))
act.set("[ Shift+A ] Activate")
root.mainloop()
except:
quit() | [
"noreply@github.com"
] | noreply@github.com |
5e181f655825792a06adea89c63d5a696c7e7028 | 6dbd108198759b98ed044fc740d79d775553636a | /3.py | 5d37ad979bece10dcce4e04d6e362609616466bc | [] | no_license | linh6666/baitaptuan7-xstk | c5ca69e2b292b5c2aab6304f3242585240fff7f1 | 712922ede096a76704378e5ad9a5ed1229c81786 | refs/heads/main | 2023-08-18T19:07:13.313201 | 2021-10-09T06:14:19 | 2021-10-09T06:14:19 | 415,216,972 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 185 | py | import numpy as np
a = np.zeros((2, 512 * 512), dtype=np.float32)
a[0, :] = 1.0
a[1, :] = 0.1
print("a.shape: ", a.shape)
print("mean a = ", np.mean(a))
© 2021 GitHub, Inc.
| [
"noreply@github.com"
] | noreply@github.com |
3b0761a6d612bc898fd451258885973dbdba8234 | de3e36fb6ed1a94c3b8b0313f426d4f74c858fad | /industry/hw3/checkers/svm_impl_shtanko.py | 601f22ae73bcdf347265b05fc1e140f934602597 | [] | no_license | leonshting/data_mining_in_action_2017 | 5415c542de56013bc5b2ef21137e2347bf2f1765 | baeb379213e44c6f38d73f845a5c673ce78f2cf3 | refs/heads/master | 2021-05-07T01:20:52.985394 | 2017-11-18T16:02:25 | 2017-11-18T16:02:25 | 110,332,082 | 0 | 0 | null | 2017-11-11T09:02:35 | 2017-11-11T09:02:35 | null | UTF-8 | Python | false | false | 3,678 | py | import numpy as np
from sklearn.base import BaseEstimator
SVM_PARAMS_DICT = {
'C': 100,
'random_state': 42,
'iters': 1000,
'batch_size': 10,
}
import numpy as np
from random import randint
import random
np.random.seed(42)
random.seed(42)
class MySVM(object):
def __init__(self, C=10000, batch_size = 100, iters=10000, **kwargs):
self.C = C # regularization constant
self.batch_size = batch_size
self.iters = iters
# f(x) = <w,x> + w_0
def f(self, x):
return np.dot(self.w, x) + self.w0
# a(x) = [f(x) > 0]
def a(self, x):
return 1 if self.f(x) > 0 else -1
# predicting answers for X_test
def predict(self, X_test):
o_o = np.array([self.a(x) for x in X_test])
o_o[o_o == -1] = 0
return o_o
# l2-regularizator
def reg(self):
return 1.0 * sum(self.w ** 2) / (2.0 * self.C)
# l2-regularizator derivative
def der_reg(self):
return self.w/self.C
# hinge loss vectorized
def loss(self, x, answer):
return np.vectorize(lambda x_v, answer_v: max([0, 1 - answer_v * self.f(x_v)]),
signature='(m),()->()')(x, answer)
# hinge loss derivative
def _dl(self, x_v, answer_v):
return -answer_v if 1 - answer_v * self.f(x_v) > 0 else 0.0
def der_loss(self, x, answer):
return np.vectorize(lambda x_v, answer_v: self._dl(x_v, answer_v), signature=
'(m),()->()')(x, answer)
def der_loss_wrt_w(self, x, answer):
#print(self.der_loss(x, answer))
return np.mean((np.multiply(x.T, self.der_loss(x, answer))), axis=1)
def der_loss_wrt_w0(self, x, answer):
return np.mean(self.der_loss(x, answer))
def trans_to_01(self, y):
y[y == -1] = 1
return y
def trans_to_11(self, y):
y[y == 0] = -1
return y
def get_params(self, *args, **kwargs):
return {
'C': self.C,
'batch_size': self.batch_size,
'iters': self.iters
}
# fitting w and w_0 with SGD
def fit(self, X_train, y_train):
dim = len(X_train[0])
self.w = np.random.rand(dim) # initial value for w
self.w0 = np.random.randn() # initial value for w_0
y_train = self.trans_to_11(y_train)
# 10000 steps is OK for this example
# another variant is to continue iterations while error is still decreasing
loss_a = 1.
delta = 1.
cnt = 0
glob_cnt = 0
#stops if too long
while (cnt<100 or abs(delta/loss_a) > 1e-3) and glob_cnt < self.iters:
# random example choise
# rand_index = randint(0, len(X_train) - 1,) # generating random index
rand_index = np.random.randint(low=0, high=X_train.shape[0], size=self.batch_size)
x = X_train[rand_index]
y = y_train[rand_index]
loss_b = self.loss(x, y).sum()
# simple heuristic for step size
step = 1./(glob_cnt+1)
# w update
#print(self.der_loss_wrt_w(x, y), self.der_reg())
self.w += step * (-self.der_loss_wrt_w(x, y) - self.der_reg())
# w_0 update
self.w0 += -step * self.der_loss_wrt_w0(x, y)
loss_a = self.loss(x, y).sum()
delta = abs(loss_a - loss_b)
if abs(delta/loss_a) > 1e-3:
cnt = 0
else:
cnt+=1
glob_cnt += 1
return self | [
"leonshting@gmail.com"
] | leonshting@gmail.com |
e6d4a5b68241ef8bf821e322cb11bd1f31db75b6 | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/7/rci.py | 1499f78fdcb23fcbcc72afecd718862922797f9e | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'rCI':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"juliettaylorswift@gmail.com"
] | juliettaylorswift@gmail.com |
9e70985c1a04109922a692d38d13895082188238 | 8575c4ce854151973bb8f58b8a124f7b1816df45 | /Malu_Python_Scripts/badpix_match.py | eb4ee7f719267ce1e11e56c8d1213791dbaeb636 | [] | no_license | mlldantas/Gal_classification | e0a3ce375d0661ca1933b4d36ff20f6fb4d469cc | 81c392ec828709d30dea351a2fe27ec81bc6e69d | refs/heads/master | 2022-03-30T14:24:18.340900 | 2020-02-21T17:24:25 | 2020-02-21T17:24:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,834 | py | from __future__ import division
import numpy as np
import pandas as pd
dn4000_txt = '/home/mldantas/Dropbox/STARLIGHT/dn4000_MALU.txt'
lines = '/home/mldantas/Dropbox/STARLIGHT/lines.txt'
dn4000_table = np.loadtxt(dn4000_txt, dtype=object)
lines_table = np.loadtxt(lines, dtype=object)
bad_pix_info = np.loadtxt('/home/mldantas/Dropbox/Clustering/Dataset/badpixels_class_WHAN_BPT_predictions.txt', dtype=str)
classification = np.loadtxt('/home/mldantas/Dropbox/Clustering/Dataset/class_WHAN_BPT.csv', delimiter=',', dtype=str)
dn4000_dictionary = {}
for k in range(len(dn4000_table[0, :])):
dn4000_dictionary[dn4000_table[0, k]] = np.array(dn4000_table[0 + 1:, k], dtype=str)
print ("Dn4000 Table Dictionary read ok!")
lines_dictionary = {}
for j in range((lines_table[0, :]).size):
lines_dictionary[lines_table[0, j]] = np.array(lines_table[0 + 1:, j], dtype=str)
print ("Lines' Table Dictionary read ok!")
classification_dictionary = {}
for k in range(len(classification[0, :])):
classification_dictionary[classification[0, k]] = np.array(classification[0 + 1:, k], dtype=str)
print ("Classification Table Dictionary read ok!")
ids = bad_pix_info[:, 0].astype(str)
bad_pix_hb = bad_pix_info[:, 1].astype(float)
bad_pix_o3 = bad_pix_info[:, 2].astype(float)
bad_pix_ha = bad_pix_info[:, 3].astype(float)
bad_pix_n2 = bad_pix_info[:, 4].astype(float)
index = np.where((bad_pix_hb < 0.25) * (bad_pix_hb >= 0.0) * (bad_pix_o3 < 0.25) * (bad_pix_o3 >= 0.0)
* (bad_pix_ha < 0.25) * (bad_pix_ha >= 0.0) * (bad_pix_n2 < 0.25) * (bad_pix_n2 >= 0.0))
dn4000_ids = dn4000_dictionary['SC5-output_file'].astype(str)
dn4000_obs_break = dn4000_dictionary['Dn4000(obs)'].astype(float)
dn4000_syn_break = dn4000_dictionary['Dn4000(syn)'].astype(float)
lines_plate = lines_dictionary['plate'].astype(int)
lines_mjd = lines_dictionary['mjd'].astype(int)
lines_fiberid = lines_dictionary['fiberID'].astype(int)
print("Line's table size is %d" % lines_plate.size)
ids_class = classification_dictionary['id'].astype(str)
plate_class = []
mjd_class = []
fiberid_class = []
for i in range(ids_class.size):
plate_class.append(int(ids_class[i].split('.')[0]))
mjd_class.append(int(ids_class[i].split('.')[1]))
fiberid_class.append(int(ids_class[i].split('.')[2]))
plate_class = np.array(plate_class)
mjd_class = np.array(mjd_class)
fiberid_class = np.array(fiberid_class)
plate = []
mjd = []
fiberid = []
for i in range(ids.size):
plate.append(int(ids[i].split('.')[0]))
mjd.append(int(ids[i].split('.')[1]))
fiberid.append(int(ids[i].split('.')[2]))
plate = np.array(plate)[index]
mjd = np.array(mjd)[index]
fiberid = np.array(fiberid)[index]
## Dn4000 crossmatch -----------------------------------------------------------------------------------------------
dn4000_plate = []
dn4000_mjd = []
dn4000_fiberid = []
for l in range(dn4000_ids.size):
dn4000_plate_i = dn4000_ids[l].split('.')[0]
dn4000_mjd_i = dn4000_ids[l].split('.')[1]
dn4000_fiberid_i = dn4000_ids[l].split('.')[2]
dn4000_plate.append(int(dn4000_plate_i))
dn4000_mjd.append(int(dn4000_mjd_i))
dn4000_fiberid.append(int(dn4000_fiberid_i))
dn4000_plate = np.array(dn4000_plate)
dn4000_mjd = np.array(dn4000_mjd)
dn4000_fiberid = np.array(dn4000_fiberid)
print ("Dn4000 size is %d" % dn4000_plate.size)
dn4000_indexes = np.arange(plate.size)
dn4000_data_index = []
for m in range(dn4000_plate.size):
dn4000_data_index_m = dn4000_indexes[(plate == dn4000_plate[m]) * (mjd == dn4000_mjd[m]) *
(fiberid == dn4000_fiberid[m])]
if dn4000_data_index_m.size is 0:
continue
dn4000_data_index.append(m)
dn4000_synth = dn4000_syn_break[dn4000_data_index]
dn4000_obs = dn4000_obs_break[dn4000_data_index]
dn4000_plate = dn4000_plate[dn4000_data_index]
dn4000_mjd = dn4000_mjd[dn4000_data_index]
dn4000_fiberid = dn4000_fiberid[dn4000_data_index]
## Lines crossmatch ------------------------------------------------------------------------------------------------
indexes = np.arange(plate.size)
new_index = []
for i in range(lines_plate.size):
index = indexes[(plate == lines_plate[i]) * (mjd == lines_mjd[i]) * (fiberid == lines_fiberid[i])]
if index.size is 0:
continue
new_index.append(i)
h_alpha = lines_dictionary['F_Halpha'].astype(float)[new_index]
ew_h_alpha = lines_dictionary['EW_Halpha'].astype(float)[new_index]
h_beta = lines_dictionary['F_Hbeta'].astype(float)[new_index]
oiii = lines_dictionary['F_oiii'].astype(float)[new_index]
nii = lines_dictionary['F_nii'].astype(float)[new_index]
## Classification crossmatch -------------------------------------------------------------------------------------------
indexes_class = np.arange(plate_class.size)
index_class = []
for i in range(plate_class.size):
index = indexes_class[(plate == plate_class[i]) * (mjd == mjd_class[i]) * (fiberid == fiberid_class[i])]
if index.size is 0:
continue
index_class.append(i)
classification_bpt = classification_dictionary['class_BPT'].astype(int)[index_class]
classification_whan = classification_dictionary['class_WHAN'].astype(int)[index_class]
np.savetxt('/home/mldantas/Dropbox/Clustering/Dataset/results_classification.csv',
np.column_stack((plate, mjd, fiberid, dn4000_obs, dn4000_synth, h_alpha, ew_h_alpha, h_beta, oiii, nii,
classification_bpt, classification_whan)),
fmt="%d,%d,%d,%.4f,%.4f,%.4f,%.4f,%.4f,%.4f,%.4f,%d,%d", delimiter=',', newline='\n',
header='plate,mjd,fiber_id,dn4000_obs,dn4000_synth,H_alpha,EW_H_alpha,'
'H_beta,OIII,NII,class_BPT,class_WHAN')
| [
"noreply@github.com"
] | noreply@github.com |
1e6ab766a2799d3338ba484409f9b162c8797b68 | 1d8b4d67e1f65b785cd11d006d0322af0d27ebcc | /[0725_현수]Naver_DSC2018/TypeAnalysis_Cpu.py | e860a910918d24f084a3f913c62b0b36624edccb | [] | no_license | kumakuma34/Naver-DataScienceCompetition-2018 | 4256ff548d3d8893620581dc49cf8ea37a2a9d0e | a5c447f327ca6d18879cc7ae59b5889a0292fedc | refs/heads/master | 2020-05-04T21:56:23.604022 | 2019-09-28T11:40:22 | 2019-09-28T11:40:22 | 179,494,179 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 926 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Jul 25 22:06:02 2018
@author: qgqg2
"""
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import collections
from sklearn import datasets, linear_model
# utf-8 encoding error, so I take 'cp1252'
df = pd.read_csv('Processed_Data.csv', encoding= "cp1252")
#df.shape()
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import collections
from sklearn import datasets, linear_model
# utf-8 encoding error, so I take 'cp1252'
df = pd.read_csv('Processed_Data.csv', encoding= "cp1252")
#df.shape()
Cpu_type = df['Cpu_Type'].unique()
Cpu_size = df.groupby('Cpu_Type').size()
print(Cpu_size)
print(Cpu_type)
labels = Cpu_type
ratio = Cpu_size
plt.figure(figsize=(13,13))
plt.pie(ratio, labels=labels, shadow=True, startangle=150, autopct = '%1.1f%%')
plt.title('PieChart of Cpu')
#plt.show()
| [
"qgqg264@naver.com"
] | qgqg264@naver.com |
d9431f1fb2020f8d301376bed93ef53f3204cbf1 | 0c110eb32f2eaea5c65d40bda846ddc05757ced6 | /python_scripts/pimriscripts/mastersort/scripts_dir/p7432_run2M1.py | 39656c11ebf8cd9db049ce6d7b9a74d8b7e3f30a | [] | no_license | nyspisoccog/ks_scripts | 792148a288d1a9d808e397c1d2e93deda2580ff4 | 744b5a9dfa0f958062fc66e0331613faaaee5419 | refs/heads/master | 2021-01-18T14:22:25.291331 | 2018-10-15T13:08:24 | 2018-10-15T13:08:24 | 46,814,408 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,193 | py | from __future__ import with_statement
import os, csv, shutil,tarfile, uf, dcm_ops
dest_root = '/ifs/scratch/pimri/soccog/test_working'
dst_path_lst = ['7432', 'run2M1']
uf.buildtree(dest_root, dst_path_lst)
uf.copytree('/ifs/scratch/pimri/soccog/old/SocCog_Raw_Data_By_Exam_Number/2480/e1331017/s1388354_5610_2M1_s30', '/ifs/scratch/pimri/soccog/test_working/7432/run2M1')
t = tarfile.open(os.path.join('/ifs/scratch/pimri/soccog/test_working/7432/run2M1','MRDC_files.tar.gz'), 'r')
t.extractall('/ifs/scratch/pimri/soccog/test_working/7432/run2M1')
for f in os.listdir('/ifs/scratch/pimri/soccog/test_working/7432/run2M1'):
if 'MRDC' in f and 'gz' not in f:
old = os.path.join('/ifs/scratch/pimri/soccog/test_working/7432/run2M1', f)
new = os.path.join('/ifs/scratch/pimri/soccog/test_working/7432/run2M1', f + '.dcm')
os.rename(old, new)
qsub_cnv_out = dcm_ops.cnv_dcm('/ifs/scratch/pimri/soccog/test_working/7432/run2M1', '7432_run2M1', '/ifs/scratch/pimri/soccog/scripts/mastersort/scripts_dir/cnv')
#qsub_cln_out = dcm_ops.cnv_dcm('/ifs/scratch/pimri/soccog/test_working/7432/run2M1', '7432_run2M1', '/ifs/scratch/pimri/soccog/scripts/mastersort/scripts_dir/cln')
| [
"katherine@Katherines-MacBook-Pro.local"
] | katherine@Katherines-MacBook-Pro.local |
e1f2f7cda8711e3ec07881200a6ea52f823dd4d3 | 1b98c70426580d6cebf36b6f9ed807fe7a9c0729 | /plots/plot-rx-overhead.py | f34b76b70508ad27093ed0e330314680583b85a3 | [] | no_license | jvimal/eyeq-tests | 54a1bba50d7019c07b09fdd147b831a5a823b5ba | d99d05d001d5a3d9fce53b66c6587f605245b555 | refs/heads/master | 2020-06-06T17:31:13.127695 | 2013-03-21T17:24:46 | 2013-03-21T17:24:46 | 8,303,987 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,946 | py | import plot_defaults
from helper import *
import math
parser = argparse.ArgumentParser()
parser.add_argument('--cols',
help="Columns to include for CPU usage",
action="store",
default='user,sys,sirq,hirq',
dest="cols")
parser.add_argument('--maxy',
help="Max CPU on y-axis",
action="store",
default=100,
dest="maxy",
type=int)
parser.add_argument('-o',
help="Output file to save",
default=None,
dest="out")
parser.add_argument('--text',
help="Plot rate text on the graph",
default=False,
action="store_true",
dest="text")
parser.add_argument('--dirs',
help="Directories to read output from",
default=['.'],
nargs="+",
dest="dirs")
args = parser.parse_args()
rates = [1000, 3000, 6000, 9000]
nums = [1, 8, 16, 32, 64, 92]
def dir_param(rate, without=False, num=1):
dir = "r%s-n%d" % (rate, num)
if without:
dir = "rx-without/" + dir
else:
dir = "rx-with/" + dir
return dir
def yvalue(rate, without=False, num=1, cols="sirq", rootdir="."):
dir = rootdir + "/" + dir_param(rate, without, num)
data = parse_cpu_usage(os.path.join(dir, "cpu.txt"))
data = transpose(data)
data = map(lambda d: avg(d[10:]), data)
# user, sys, hirq, sirq
data = {
'user': data[0],
'sys': data[1],
'hirq': data[4],
'sirq': data[5]
}
ret = 0.0
for col in cols.split(','):
ret += data[col]
return ret
def yvalue2(rate, without=False, num=1, rootdir="."):
dir = rootdir + "/" + dir_param(rate, without, num)
data = parse_rate_usage(os.path.join(dir, "net.txt"),
ifaces=["eth2"], dir="rx", divider=(1 << 20))
data = avg(data["eth2"][30:])
#perf = perf_summary(os.path.join(dir, "perf.txt"))
print dir, data
#pprint(perf)
return data
colours = blue_colours + ['black']
bar_width=1
bar_group=len(nums)+1
cols = args.cols
def avg(l):
return sum(l) * 1.0 /len(l)
def stdev(l):
m = avg(l)
dev = map(lambda x: (x - m)**2, l)
return math.sqrt(avg(dev))
def plot_without(without=False):
alpha = 1
first = True
for i, n in enumerate(nums):
xs = []
xlabels = []
ys = []
yerrs = []
xindex = i
for rate in rates:
xindex += bar_group
xs.append(xindex)
xlabels.append("%sG" % (rate/1000))
temp_ys = []
for dir in args.dirs:
print dir
temp_ys.append(yvalue(rate, num=n, without=without, cols=cols, rootdir=dir))
ys.append(avg(temp_ys))
yerrs.append(stdev(temp_ys))
#rate = yvalue2(rate, num=n, without=without, rootdir=args.dir)
if without == False and args.text:
plt.text(xindex, ys[-1] + 10,
'%.1fM' % rate, rotation='vertical')
if without == False:
plt.bar(xs, ys, bar_width, color=colours[0], alpha=alpha, hatch='*', yerr=yerrs, ecolor='purple')
else:
plt.bar(xs, ys, bar_width, color=colours[i], label="%d" % n, yerr=yerrs, ecolor="black")#, alpha=alpha)
plt.xlabel("Rate")
plt.ylabel("CPU %")
plt.xticks(xs, xlabels)
if without == True:
plt.legend(loc="upper left")
#plt.title("CPU %s usage @ diff number of VQs/TCP connections.." % cols)
plt.ylim((0,args.maxy))
plt.grid(True)
return
# This negative variable naming is a pain, I know! ;)
plot_without(False)
plot_without(True)
if args.out:
plt.savefig(args.out)
else:
plt.show()
| [
"j.vimal@gmail.com"
] | j.vimal@gmail.com |
c8448d233f737831366635ce1250748f73103822 | 97aa47340e99f7be364f27cba87e499d942eab43 | /dice.py | e625e3d7c22d238043fb453f6c002adc02a49a65 | [] | no_license | eryilmazysf/assignments- | cbe0d0d761a0a3da819c456ea0d9accb86175a35 | c1b3084b39ea72ae14fdc4c564d94c26ca198806 | refs/heads/master | 2022-12-11T00:22:59.427632 | 2020-09-02T15:26:12 | 2020-09-02T15:26:12 | 277,168,892 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 860 | py | import random
print("""
*************************
DİCE SİMULATİON
*************************
do not forget dice number between 1 and 8
""")
x=int(input("how many dice you will use:"))
while (x<1 or x>8): #for control whether valid or not
print("not valid value try again")
x = int(input("how many dice you will use:"))
y=int(input("number of rolls:"))
while (y<0): #for control whether valid or not
print("not valid try again:")
y = int(input("number of rolls:"))
total_list=[]
for m in range(1,y+1):#for counting
total = 0
for n in range(1,x+1):
random_number= random.randint(1,6) # for appearance number we make random number
print(n,".diece: ",random_number)
total+=random_number
total_list.append(total)
print(m,".total",total)
print(total_list)
| [
"yusuferyilmaz1819@gmail.com"
] | yusuferyilmaz1819@gmail.com |
8c59ff3068e701a47f55427121fb4d45c93db56c | 649e2af15011b3c6326436e91a9dd9af0c3a6f8f | /vnpy/app/spread_trading/engine.py | 0a6901795c79ebaf15b64c56c62d0f2272d57e13 | [
"MIT"
] | permissive | Loopring/vnpy | 6270662260c2fdbeed846f0370d1b5eecea7c7bf | f7945b23e29dab8bfdf064da6a6cb815bb755b17 | refs/heads/loopring-release | 2023-07-16T23:11:10.174728 | 2021-09-06T04:01:00 | 2021-09-06T04:01:00 | 277,985,227 | 21 | 6 | MIT | 2021-01-23T02:21:08 | 2020-07-08T03:59:17 | Python | UTF-8 | Python | false | false | 31,837 | py | import traceback
import importlib
import os
from typing import List, Dict, Set, Callable, Any, Type
from collections import defaultdict
from copy import copy
from pathlib import Path
from datetime import datetime, timedelta
from vnpy.event import EventEngine, Event
from vnpy.trader.engine import BaseEngine, MainEngine
from vnpy.trader.event import (
EVENT_TICK, EVENT_POSITION, EVENT_CONTRACT,
EVENT_ORDER, EVENT_TRADE, EVENT_TIMER
)
from vnpy.trader.utility import load_json, save_json
from vnpy.trader.object import (
TickData, ContractData, LogData,
SubscribeRequest, OrderRequest
)
from vnpy.trader.constant import (
Direction, Offset, OrderType, Interval
)
from vnpy.trader.converter import OffsetConverter
from .base import (
LegData, SpreadData,
EVENT_SPREAD_DATA, EVENT_SPREAD_POS,
EVENT_SPREAD_ALGO, EVENT_SPREAD_LOG,
EVENT_SPREAD_STRATEGY,
load_bar_data, load_tick_data
)
from .template import SpreadAlgoTemplate, SpreadStrategyTemplate
from .algo import SpreadTakerAlgo
APP_NAME = "SpreadTrading"
class SpreadEngine(BaseEngine):
""""""
def __init__(self, main_engine: MainEngine, event_engine: EventEngine):
"""Constructor"""
super().__init__(main_engine, event_engine, APP_NAME)
self.active = False
self.data_engine: SpreadDataEngine = SpreadDataEngine(self)
self.algo_engine: SpreadAlgoEngine = SpreadAlgoEngine(self)
self.strategy_engine: SpreadStrategyEngine = SpreadStrategyEngine(self)
self.add_spread = self.data_engine.add_spread
self.remove_spread = self.data_engine.remove_spread
self.get_spread = self.data_engine.get_spread
self.get_all_spreads = self.data_engine.get_all_spreads
self.start_algo = self.algo_engine.start_algo
self.stop_algo = self.algo_engine.stop_algo
def start(self):
""""""
if self.active:
return
self.active = True
self.data_engine.start()
self.algo_engine.start()
self.strategy_engine.start()
def stop(self):
""""""
self.data_engine.stop()
self.algo_engine.stop()
self.strategy_engine.stop()
def write_log(self, msg: str):
""""""
log = LogData(
msg=msg,
gateway_name=APP_NAME
)
event = Event(EVENT_SPREAD_LOG, log)
self.event_engine.put(event)
class SpreadDataEngine:
""""""
setting_filename = "spread_trading_setting.json"
def __init__(self, spread_engine: SpreadEngine):
""""""
self.spread_engine: SpreadEngine = spread_engine
self.main_engine: MainEngine = spread_engine.main_engine
self.event_engine: EventEngine = spread_engine.event_engine
self.write_log = spread_engine.write_log
self.legs: Dict[str, LegData] = {} # vt_symbol: leg
self.spreads: Dict[str, SpreadData] = {} # name: spread
self.symbol_spread_map: Dict[str, List[SpreadData]] = defaultdict(list)
def start(self):
""""""
self.load_setting()
self.register_event()
self.write_log("价差数据引擎启动成功")
def stop(self):
""""""
pass
def load_setting(self) -> None:
""""""
setting = load_json(self.setting_filename)
for spread_setting in setting:
self.add_spread(
spread_setting["name"],
spread_setting["leg_settings"],
spread_setting["active_symbol"],
spread_setting.get("min_volume", 1),
save=False
)
def save_setting(self) -> None:
""""""
setting = []
for spread in self.spreads.values():
leg_settings = []
for leg in spread.legs.values():
price_multiplier = spread.price_multipliers[leg.vt_symbol]
trading_multiplier = spread.trading_multipliers[leg.vt_symbol]
inverse_contract = spread.inverse_contracts[leg.vt_symbol]
leg_setting = {
"vt_symbol": leg.vt_symbol,
"price_multiplier": price_multiplier,
"trading_multiplier": trading_multiplier,
"inverse_contract": inverse_contract
}
leg_settings.append(leg_setting)
spread_setting = {
"name": spread.name,
"leg_settings": leg_settings,
"active_symbol": spread.active_leg.vt_symbol,
"min_volume": spread.min_volume
}
setting.append(spread_setting)
save_json(self.setting_filename, setting)
def register_event(self) -> None:
""""""
self.event_engine.register(EVENT_TICK, self.process_tick_event)
self.event_engine.register(EVENT_TRADE, self.process_trade_event)
self.event_engine.register(EVENT_POSITION, self.process_position_event)
self.event_engine.register(EVENT_CONTRACT, self.process_contract_event)
def process_tick_event(self, event: Event) -> None:
""""""
tick = event.data
leg = self.legs.get(tick.vt_symbol, None)
if not leg:
return
leg.update_tick(tick)
for spread in self.symbol_spread_map[tick.vt_symbol]:
spread.calculate_price()
self.put_data_event(spread)
def process_position_event(self, event: Event) -> None:
""""""
position = event.data
leg = self.legs.get(position.vt_symbol, None)
if not leg:
return
leg.update_position(position)
for spread in self.symbol_spread_map[position.vt_symbol]:
spread.calculate_pos()
self.put_pos_event(spread)
def process_trade_event(self, event: Event) -> None:
""""""
trade = event.data
leg = self.legs.get(trade.vt_symbol, None)
if not leg:
return
leg.update_trade(trade)
for spread in self.symbol_spread_map[trade.vt_symbol]:
spread.calculate_pos()
self.put_pos_event(spread)
def process_contract_event(self, event: Event) -> None:
""""""
contract = event.data
leg = self.legs.get(contract.vt_symbol, None)
if leg:
# Update contract data
leg.update_contract(contract)
req = SubscribeRequest(
contract.symbol, contract.exchange
)
self.main_engine.subscribe(req, contract.gateway_name)
def put_data_event(self, spread: SpreadData) -> None:
""""""
event = Event(EVENT_SPREAD_DATA, spread)
self.event_engine.put(event)
def put_pos_event(self, spread: SpreadData) -> None:
""""""
event = Event(EVENT_SPREAD_POS, spread)
self.event_engine.put(event)
def get_leg(self, vt_symbol: str) -> LegData:
""""""
leg = self.legs.get(vt_symbol, None)
if not leg:
leg = LegData(vt_symbol)
self.legs[vt_symbol] = leg
# Subscribe market data
contract = self.main_engine.get_contract(vt_symbol)
if contract:
leg.update_contract(contract)
req = SubscribeRequest(
contract.symbol,
contract.exchange
)
self.main_engine.subscribe(req, contract.gateway_name)
# Initialize leg position
for direction in Direction:
vt_positionid = f"{vt_symbol}.{direction.value}"
position = self.main_engine.get_position(vt_positionid)
if position:
leg.update_position(position)
return leg
def add_spread(
self,
name: str,
leg_settings: List[Dict],
active_symbol: str,
min_volume: float,
save: bool = True
) -> None:
""""""
if name in self.spreads:
self.write_log("价差创建失败,名称重复:{}".format(name))
return
legs: List[LegData] = []
price_multipliers: Dict[str, int] = {}
trading_multipliers: Dict[str, int] = {}
inverse_contracts: Dict[str, bool] = {}
for leg_setting in leg_settings:
vt_symbol = leg_setting["vt_symbol"]
leg = self.get_leg(vt_symbol)
legs.append(leg)
price_multipliers[vt_symbol] = leg_setting["price_multiplier"]
trading_multipliers[vt_symbol] = leg_setting["trading_multiplier"]
inverse_contracts[vt_symbol] = leg_setting.get(
"inverse_contract", False)
spread = SpreadData(
name,
legs,
price_multipliers,
trading_multipliers,
active_symbol,
inverse_contracts,
min_volume
)
self.spreads[name] = spread
for leg in spread.legs.values():
self.symbol_spread_map[leg.vt_symbol].append(spread)
if save:
self.save_setting()
self.write_log("价差创建成功:{}".format(name))
self.put_data_event(spread)
def remove_spread(self, name: str) -> None:
""""""
if name not in self.spreads:
return
spread = self.spreads.pop(name)
for leg in spread.legs.values():
self.symbol_spread_map[leg.vt_symbol].remove(spread)
self.save_setting()
self.write_log("价差移除成功:{},重启后生效".format(name))
def get_spread(self, name: str) -> SpreadData:
""""""
spread = self.spreads.get(name, None)
return spread
def get_all_spreads(self) -> List[SpreadData]:
""""""
return list(self.spreads.values())
class SpreadAlgoEngine:
""""""
algo_class = SpreadTakerAlgo
def __init__(self, spread_engine: SpreadEngine):
""""""
self.spread_engine: SpreadEngine = spread_engine
self.main_engine: MainEngine = spread_engine.main_engine
self.event_engine: EventEngine = spread_engine.event_engine
self.write_log = spread_engine.write_log
self.spreads: Dict[str: SpreadData] = {}
self.algos: Dict[str: SpreadAlgoTemplate] = {}
self.order_algo_map: dict[str: SpreadAlgoTemplate] = {}
self.symbol_algo_map: dict[str: SpreadAlgoTemplate] = defaultdict(list)
self.algo_count: int = 0
self.vt_tradeids: Set = set()
self.offset_converter: OffsetConverter = OffsetConverter(
self.main_engine
)
def start(self):
""""""
self.register_event()
self.write_log("价差算法引擎启动成功")
def stop(self):
""""""
for algo in self.algos.values():
self.stop_algo(algo)
def register_event(self):
""""""
self.event_engine.register(EVENT_TICK, self.process_tick_event)
self.event_engine.register(EVENT_ORDER, self.process_order_event)
self.event_engine.register(EVENT_TRADE, self.process_trade_event)
self.event_engine.register(EVENT_POSITION, self.process_position_event)
self.event_engine.register(EVENT_TIMER, self.process_timer_event)
self.event_engine.register(
EVENT_SPREAD_DATA, self.process_spread_event
)
def process_spread_event(self, event: Event):
""""""
spread: SpreadData = event.data
self.spreads[spread.name] = spread
def process_tick_event(self, event: Event):
""""""
tick = event.data
algos = self.symbol_algo_map[tick.vt_symbol]
if not algos:
return
buf = copy(algos)
for algo in buf:
if not algo.is_active():
algos.remove(algo)
else:
algo.update_tick(tick)
def process_order_event(self, event: Event):
""""""
order = event.data
self.offset_converter.update_order(order)
algo = self.order_algo_map.get(order.vt_orderid, None)
if algo and algo.is_active():
algo.update_order(order)
def process_trade_event(self, event: Event):
""""""
trade = event.data
# Filter duplicate trade push
if trade.vt_tradeid in self.vt_tradeids:
return
self.vt_tradeids.add(trade.vt_tradeid)
self.offset_converter.update_trade(trade)
algo = self.order_algo_map.get(trade.vt_orderid, None)
if algo and algo.is_active():
algo.update_trade(trade)
def process_position_event(self, event: Event):
""""""
position = event.data
self.offset_converter.update_position(position)
def process_timer_event(self, event: Event):
""""""
buf = list(self.algos.values())
for algo in buf:
if not algo.is_active():
self.algos.pop(algo.algoid)
else:
algo.update_timer()
def start_algo(
self,
spread_name: str,
direction: Direction,
offset: Offset,
price: float,
volume: float,
payup: int,
interval: int,
lock: bool
) -> str:
# Find spread object
spread = self.spreads.get(spread_name, None)
if not spread:
self.write_log("创建价差算法失败,找不到价差:{}".format(spread_name))
return ""
# Generate algoid str
self.algo_count += 1
algo_count_str = str(self.algo_count).rjust(6, "0")
algoid = f"{self.algo_class.algo_name}_{algo_count_str}"
# Create algo object
algo = self.algo_class(
self,
algoid,
spread,
direction,
offset,
price,
volume,
payup,
interval,
lock
)
self.algos[algoid] = algo
# Generate map between vt_symbol and algo
for leg in spread.legs.values():
self.symbol_algo_map[leg.vt_symbol].append(algo)
# Put event to update GUI
self.put_algo_event(algo)
return algoid
def stop_algo(
self,
algoid: str
):
""""""
algo = self.algos.get(algoid, None)
if not algo:
self.write_log("停止价差算法失败,找不到算法:{}".format(algoid))
return
algo.stop()
def put_algo_event(self, algo: SpreadAlgoTemplate) -> None:
""""""
event = Event(EVENT_SPREAD_ALGO, algo)
self.event_engine.put(event)
def write_algo_log(self, algo: SpreadAlgoTemplate, msg: str) -> None:
""""""
msg = f"{algo.algoid}:{msg}"
self.write_log(msg)
def send_order(
self,
algo: SpreadAlgoTemplate,
vt_symbol: str,
price: float,
volume: float,
direction: Direction,
lock: bool
) -> List[str]:
""""""
holding = self.offset_converter.get_position_holding(vt_symbol)
contract = self.main_engine.get_contract(vt_symbol)
if direction == Direction.LONG:
available = holding.short_pos - holding.short_pos_frozen
else:
available = holding.long_pos - holding.long_pos_frozen
# If no position to close, just open new
if not available:
offset = Offset.OPEN
# If enougth position to close, just close old
elif volume < available:
offset = Offset.CLOSE
# Otherwise, just close existing position
else:
volume = available
offset = Offset.CLOSE
original_req = OrderRequest(
symbol=contract.symbol,
exchange=contract.exchange,
direction=direction,
offset=offset,
type=OrderType.LIMIT,
price=price,
volume=volume
)
# Convert with offset converter
req_list = self.offset_converter.convert_order_request(
original_req, lock)
# Send Orders
vt_orderids = []
for req in req_list:
vt_orderid = self.main_engine.send_order(
req, contract.gateway_name)
# Check if sending order successful
if not vt_orderid:
continue
vt_orderids.append(vt_orderid)
self.offset_converter.update_order_request(req, vt_orderid)
# Save relationship between orderid and algo.
self.order_algo_map[vt_orderid] = algo
return vt_orderids
def cancel_order(self, algo: SpreadAlgoTemplate, vt_orderid: str) -> None:
""""""
order = self.main_engine.get_order(vt_orderid)
if not order:
self.write_algo_log(algo, "撤单失败,找不到委托{}".format(vt_orderid))
return
req = order.create_cancel_request()
self.main_engine.cancel_order(req, order.gateway_name)
def get_tick(self, vt_symbol: str) -> TickData:
""""""
return self.main_engine.get_tick(vt_symbol)
def get_contract(self, vt_symbol: str) -> ContractData:
""""""
return self.main_engine.get_contract(vt_symbol)
class SpreadStrategyEngine:
""""""
setting_filename = "spread_trading_strategy.json"
def __init__(self, spread_engine: SpreadEngine):
""""""
self.spread_engine: SpreadEngine = spread_engine
self.main_engine: MainEngine = spread_engine.main_engine
self.event_engine: EventEngine = spread_engine.event_engine
self.write_log = spread_engine.write_log
self.strategy_setting: Dict[str: Dict] = {}
self.classes: Dict[str: Type[SpreadStrategyTemplate]] = {}
self.strategies: Dict[str: SpreadStrategyTemplate] = {}
self.order_strategy_map: dict[str: SpreadStrategyTemplate] = {}
self.algo_strategy_map: dict[str: SpreadStrategyTemplate] = {}
self.spread_strategy_map: dict[str: SpreadStrategyTemplate] = defaultdict(
list)
self.vt_tradeids: Set = set()
self.load_strategy_class()
def start(self):
""""""
self.load_strategy_setting()
self.register_event()
self.write_log("价差策略引擎启动成功")
def close(self):
""""""
self.stop_all_strategies()
def load_strategy_class(self):
"""
Load strategy class from source code.
"""
path1 = Path(__file__).parent.joinpath("strategies")
self.load_strategy_class_from_folder(
path1, "vnpy.app.spread_trading.strategies")
path2 = Path.cwd().joinpath("strategies")
self.load_strategy_class_from_folder(path2, "strategies")
def load_strategy_class_from_folder(self, path: Path, module_name: str = ""):
"""
Load strategy class from certain folder.
"""
for dirpath, dirnames, filenames in os.walk(str(path)):
for filename in filenames:
if filename.split(".")[-1] in ("py", "pyd", "so"):
strategy_module_name = ".".join([module_name, filename.split(".")[0]])
self.load_strategy_class_from_module(strategy_module_name)
def load_strategy_class_from_module(self, module_name: str):
"""
Load strategy class from module file.
"""
try:
module = importlib.import_module(module_name)
for name in dir(module):
value = getattr(module, name)
if (isinstance(value, type) and issubclass(value, SpreadStrategyTemplate) and value is not SpreadStrategyTemplate):
self.classes[value.__name__] = value
except: # noqa
msg = f"策略文件{module_name}加载失败,触发异常:\n{traceback.format_exc()}"
self.write_log(msg)
def get_all_strategy_class_names(self):
""""""
return list(self.classes.keys())
def load_strategy_setting(self):
"""
Load setting file.
"""
self.strategy_setting = load_json(self.setting_filename)
for strategy_name, strategy_config in self.strategy_setting.items():
self.add_strategy(
strategy_config["class_name"],
strategy_name,
strategy_config["spread_name"],
strategy_config["setting"]
)
def update_strategy_setting(self, strategy_name: str, setting: dict):
"""
Update setting file.
"""
strategy = self.strategies[strategy_name]
self.strategy_setting[strategy_name] = {
"class_name": strategy.__class__.__name__,
"spread_name": strategy.spread_name,
"setting": setting,
}
save_json(self.setting_filename, self.strategy_setting)
def remove_strategy_setting(self, strategy_name: str):
"""
Update setting file.
"""
if strategy_name not in self.strategy_setting:
return
self.strategy_setting.pop(strategy_name)
save_json(self.setting_filename, self.strategy_setting)
def register_event(self):
""""""
ee = self.event_engine
ee.register(EVENT_ORDER, self.process_order_event)
ee.register(EVENT_TRADE, self.process_trade_event)
ee.register(EVENT_SPREAD_DATA, self.process_spread_data_event)
ee.register(EVENT_SPREAD_POS, self.process_spread_pos_event)
ee.register(EVENT_SPREAD_ALGO, self.process_spread_algo_event)
def process_spread_data_event(self, event: Event):
""""""
spread = event.data
strategies = self.spread_strategy_map[spread.name]
for strategy in strategies:
if strategy.inited:
self.call_strategy_func(strategy, strategy.on_spread_data)
def process_spread_pos_event(self, event: Event):
""""""
spread = event.data
strategies = self.spread_strategy_map[spread.name]
for strategy in strategies:
if strategy.inited:
self.call_strategy_func(strategy, strategy.on_spread_pos)
def process_spread_algo_event(self, event: Event):
""""""
algo = event.data
strategy = self.algo_strategy_map.get(algo.algoid, None)
if strategy:
self.call_strategy_func(
strategy, strategy.update_spread_algo, algo)
def process_order_event(self, event: Event):
""""""
order = event.data
strategy = self.order_strategy_map.get(order.vt_orderid, None)
if strategy:
self.call_strategy_func(strategy, strategy.update_order, order)
def process_trade_event(self, event: Event):
""""""
trade = event.data
strategy = self.order_strategy_map.get(trade.vt_orderid, None)
if strategy:
self.call_strategy_func(strategy, strategy.on_trade, trade)
def call_strategy_func(
self, strategy: SpreadStrategyTemplate, func: Callable, params: Any = None
):
"""
Call function of a strategy and catch any exception raised.
"""
try:
if params:
func(params)
else:
func()
except Exception:
strategy.trading = False
strategy.inited = False
msg = f"触发异常已停止\n{traceback.format_exc()}"
self.write_strategy_log(strategy, msg)
def add_strategy(
self, class_name: str, strategy_name: str, spread_name: str, setting: dict
):
"""
Add a new strategy.
"""
if strategy_name in self.strategies:
self.write_log(f"创建策略失败,存在重名{strategy_name}")
return
strategy_class = self.classes.get(class_name, None)
if not strategy_class:
self.write_log(f"创建策略失败,找不到策略类{class_name}")
return
spread = self.spread_engine.get_spread(spread_name)
if not spread:
self.write_log(f"创建策略失败,找不到价差{spread_name}")
return
strategy = strategy_class(self, strategy_name, spread, setting)
self.strategies[strategy_name] = strategy
# Add vt_symbol to strategy map.
strategies = self.spread_strategy_map[spread_name]
strategies.append(strategy)
# Update to setting file.
self.update_strategy_setting(strategy_name, setting)
self.put_strategy_event(strategy)
def edit_strategy(self, strategy_name: str, setting: dict):
"""
Edit parameters of a strategy.
"""
strategy = self.strategies[strategy_name]
strategy.update_setting(setting)
self.update_strategy_setting(strategy_name, setting)
self.put_strategy_event(strategy)
def remove_strategy(self, strategy_name: str):
"""
Remove a strategy.
"""
strategy = self.strategies[strategy_name]
if strategy.trading:
self.write_log(f"策略{strategy.strategy_name}移除失败,请先停止")
return
# Remove setting
self.remove_strategy_setting(strategy_name)
# Remove from symbol strategy map
strategies = self.spread_strategy_map[strategy.spread_name]
strategies.remove(strategy)
# Remove from strategies
self.strategies.pop(strategy_name)
return True
def init_strategy(self, strategy_name: str):
""""""
strategy = self.strategies[strategy_name]
if strategy.inited:
self.write_log(f"{strategy_name}已经完成初始化,禁止重复操作")
return
self.call_strategy_func(strategy, strategy.on_init)
strategy.inited = True
self.put_strategy_event(strategy)
self.write_log(f"{strategy_name}初始化完成")
def start_strategy(self, strategy_name: str):
""""""
strategy = self.strategies[strategy_name]
if not strategy.inited:
self.write_log(f"策略{strategy.strategy_name}启动失败,请先初始化")
return
if strategy.trading:
self.write_log(f"{strategy_name}已经启动,请勿重复操作")
return
self.call_strategy_func(strategy, strategy.on_start)
strategy.trading = True
self.put_strategy_event(strategy)
def stop_strategy(self, strategy_name: str):
""""""
strategy = self.strategies[strategy_name]
if not strategy.trading:
return
self.call_strategy_func(strategy, strategy.on_stop)
strategy.stop_all_algos()
strategy.cancel_all_orders()
strategy.trading = False
self.put_strategy_event(strategy)
def init_all_strategies(self):
""""""
for strategy in self.strategies.keys():
self.init_strategy(strategy)
def start_all_strategies(self):
""""""
for strategy in self.strategies.keys():
self.start_strategy(strategy)
def stop_all_strategies(self):
""""""
for strategy in self.strategies.keys():
self.stop_strategy(strategy)
def get_strategy_class_parameters(self, class_name: str):
"""
Get default parameters of a strategy class.
"""
strategy_class = self.classes[class_name]
parameters = {}
for name in strategy_class.parameters:
parameters[name] = getattr(strategy_class, name)
return parameters
def get_strategy_parameters(self, strategy_name):
"""
Get parameters of a strategy.
"""
strategy = self.strategies[strategy_name]
return strategy.get_parameters()
def start_algo(
self,
strategy: SpreadStrategyTemplate,
spread_name: str,
direction: Direction,
offset: Offset,
price: float,
volume: float,
payup: int,
interval: int,
lock: bool
) -> str:
""""""
algoid = self.spread_engine.start_algo(
spread_name,
direction,
offset,
price,
volume,
payup,
interval,
lock
)
self.algo_strategy_map[algoid] = strategy
return algoid
def stop_algo(self, strategy: SpreadStrategyTemplate, algoid: str):
""""""
self.spread_engine.stop_algo(algoid)
def stop_all_algos(self, strategy: SpreadStrategyTemplate):
""""""
pass
def send_order(
self,
strategy: SpreadStrategyTemplate,
vt_symbol: str,
price: float,
volume: float,
direction: Direction,
offset: Offset,
lock: bool
) -> List[str]:
contract = self.main_engine.get_contract(vt_symbol)
original_req = OrderRequest(
symbol=contract.symbol,
exchange=contract.exchange,
direction=direction,
offset=offset,
type=OrderType.LIMIT,
price=price,
volume=volume
)
# Convert with offset converter
req_list = self.offset_converter.convert_order_request(
original_req, lock)
# Send Orders
vt_orderids = []
for req in req_list:
vt_orderid = self.main_engine.send_order(
req, contract.gateway_name)
# Check if sending order successful
if not vt_orderid:
continue
vt_orderids.append(vt_orderid)
self.offset_converter.update_order_request(req, vt_orderid)
# Save relationship between orderid and strategy.
self.order_strategy_map[vt_orderid] = strategy
return vt_orderids
def cancel_order(self, strategy: SpreadStrategyTemplate, vt_orderid: str):
""""""
order = self.main_engine.get_order(vt_orderid)
if not order:
self.write_strategy_log(
strategy, "撤单失败,找不到委托{}".format(vt_orderid))
return
req = order.create_cancel_request()
self.main_engine.cancel_order(req, order.gateway_name)
def cancel_all_orders(self, strategy: SpreadStrategyTemplate):
""""""
pass
def put_strategy_event(self, strategy: SpreadStrategyTemplate):
""""""
data = strategy.get_data()
event = Event(EVENT_SPREAD_STRATEGY, data)
self.event_engine.put(event)
def write_strategy_log(self, strategy: SpreadStrategyTemplate, msg: str):
""""""
msg = f"{strategy.strategy_name}:{msg}"
self.write_log(msg)
def send_strategy_email(self, strategy: SpreadStrategyTemplate, msg: str):
""""""
if strategy:
subject = f"{strategy.strategy_name}"
else:
subject = "价差策略引擎"
self.main_engine.send_email(subject, msg)
def load_bar(
self, spread: SpreadData, days: int, interval: Interval, callback: Callable
):
""""""
end = datetime.now()
start = end - timedelta(days)
bars = load_bar_data(spread, interval, start, end)
for bar in bars:
callback(bar)
def load_tick(self, spread: SpreadData, days: int, callback: Callable):
""""""
end = datetime.now()
start = end - timedelta(days)
ticks = load_tick_data(spread, start, end)
for tick in ticks:
callback(tick)
| [
"xiaoyou.chen@foxmail.com"
] | xiaoyou.chen@foxmail.com |
0203f8b7a170b9c90a9503a129644d67e720066b | de121a951947f70f402079d288a78d35c85747b2 | /exercises/exercises_04.py | 79cb7651e375b500210a4054a4ae7430a01afd4a | [] | no_license | tpurnachander/requests-workshop | 56899be6c5520fb947d91676c11864d09b4489d6 | dac134558f141c482e0a52f19fdce37b7e7ba928 | refs/heads/master | 2023-03-10T19:00:31.012280 | 2021-02-19T12:08:54 | 2021-02-19T12:08:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,359 | py | import requests
import xml.etree.ElementTree as et
# Exercise 4.1
# Create a function create_xml_body_from_string()
# that returns a docstring (with triple double quotes)
# containing the following XML document:
# <payee>
# <name>John Smith</name>
# <address>
# <street>My street</street>
# <city>My city</city>
# <state>My state</state>
# <zipCode>90210</zipCode>
# </address>
# <phoneNumber>0123456789</phoneNumber>
# <accountNumber>12345</accountNumber>
# </payee>
# Exercise 4.2
# Write a test that POSTs the object created in 4.1
# to http://parabank.parasoft.com/parabank/services/bank/billpay?accountId=12345&amount=500
# Set the request header 'Content-Type' to 'application/xml'
# Then check that the response status code is 200
# and that the value of the response header 'Content-Type' is also equal to 'application/xml'
# Exercise 4.3
# Write a method create_xml_body_using_elementtree() that returns
# the same request body as in Exercise 4.1, but now uses the
# ElementTree library (I've imported that for you already, it's available as 'et')
# Make your life a little easier by specifying all element values as strings
# Exercise 4.4
# Repeat Exercise 4.2, but now use the XML document created in Exercise 4.3
# Don't forget to convert the XML document to a string before sending it!
| [
"bas@ontestautomation.com"
] | bas@ontestautomation.com |
6a6ebbb1c7f50e95986df884a5b0e3681842fb9a | c72dae37d94d8f9e80db232a9838244e2bb33fb2 | /src/teachzy/urls.py | 387858aeb2ebd754105a9a08dec06e7ba64f4989 | [] | no_license | Asifrahman96/DjangoTeachzy | 956ae3c2e9b917ec1bb181fb06babe32f7f48083 | 89e3a07b0beb12e328747a0bc369d731d63b10ec | refs/heads/master | 2023-02-24T03:08:17.274863 | 2021-01-28T20:18:41 | 2021-01-28T20:18:41 | 333,869,067 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 443 | py | from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('', include('pages.urls')),
path('csvs/', include('csvs.urls')),
path('teachers/', include('teachers.urls')),
path('accounts/', include('accounts.urls')),
path('admin/', admin.site.urls),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"asifrahmananvar@gmail.com"
] | asifrahmananvar@gmail.com |
0aca2f6b09e65de9d040194775493f12fd174098 | 4d539867a53a3b9909bec1bfb1a49e189a2f1e20 | /EGAT/data_generator_attention_visual.py | ef32d63aab04eba48e9b3958108c0b8d5495fe4c | [
"MIT"
] | permissive | sailfish009/EGAT | 7a1d05c7c77e750903cc2ceb4b671d0f37a4ea60 | a03d6cbeb3e6d8f75edd608370256326d8fcb05b | refs/heads/main | 2023-01-15T10:13:17.142550 | 2020-11-22T14:20:28 | 2020-11-22T14:20:28 | 315,053,817 | 0 | 0 | MIT | 2020-11-22T14:15:56 | 2020-11-22T14:15:55 | null | UTF-8 | Python | false | false | 4,534 | py |
import os
import time
import pickle
import torch as t
import numpy as np
from torch.utils import data
import gzip
from time import time
from config import DefaultConfig
import torch
import dgl
import threading
class dataSet(data.Dataset):
def __init__(self, root_dir, protein_list_file):
super(dataSet, self).__init__()
self.edge_feat_mean = [31.83509173, 1.56021911] #calculated from trainset only
self.edge_feat_std = [16.79204272, 0.69076342] #calculated from trainset only
self.all_protBert_feature = pickle.load(gzip.open(root_dir+'/inputs/ProtBert_features.pkl.gz', "rb"))['ProtBert_features']
self.all_dist_matrix = pickle.load(gzip.open(root_dir+'/inputs/ppisp_dist_matrix_map.pkl.gz', 'rb'))
self.all_angle_matrix = pickle.load(gzip.open(root_dir+'/inputs/ppisp_angle_matrix_map.pkl.gz', 'rb'))
print('protein_list_file:', protein_list_file)
with open(protein_list_file, "r") as f:
protein_list = f.readlines()
self.protein_list = [x.strip() for x in protein_list]
self.config = DefaultConfig()
self.max_seq_len = self.config.max_sequence_length
self.neighbourhood_size = 21
self.protein_list_len = len(self.protein_list)
self.all_graphs = self.generate_all_graphs()
print('All graphs generated.')
def __getitem__(self, index):
t0=time()
protein_name = self.protein_list[index]
id_idx = index
_all_protBert_feature_ = self.all_protBert_feature[id_idx][:self.max_seq_len]
seq_len = _all_protBert_feature_.shape[0]
protein_info = {
'protein_name': protein_name,
'protein_idx': id_idx,
'seq_length': seq_len
}
if seq_len < self.max_seq_len:
temp = np.zeros([self.max_seq_len, _all_protBert_feature_.shape[1]])
temp[:seq_len, :] = _all_protBert_feature_
_all_protBert_feature_ = temp
_all_protBert_feature_ = _all_protBert_feature_[np.newaxis, :, :]
G = self.all_graphs[id_idx]
return torch.from_numpy(_all_protBert_feature_).type(torch.FloatTensor), \
G, \
protein_info
def __len__(self):
return self.protein_list_len
def generate_all_graphs(self):
graph_list = {}
for id_idx in self.all_dist_matrix:
G = dgl.DGLGraph()
G.add_nodes(self.max_seq_len)
neighborhood_indices = self.all_dist_matrix[id_idx]['dist_matrix'][:self.max_seq_len, :self.max_seq_len, 0] \
.argsort()[:, 1:self.neighbourhood_size]
if neighborhood_indices.max() > 499 or neighborhood_indices.min() < 0:
print(neighborhood_indices.max(), neighborhood_indices.min())
raise
edge_feat = np.array([
self.all_dist_matrix[id_idx]['dist_matrix'][:self.max_seq_len, :self.max_seq_len, 0],
self.all_angle_matrix[id_idx]['angle_matrix'][:self.max_seq_len, :self.max_seq_len]
])
edge_feat = np.transpose(edge_feat, (1, 2, 0))
edge_feat = (edge_feat - self.edge_feat_mean) / self.edge_feat_std # standardize features
self.add_edges_custom(G,
neighborhood_indices,
edge_feat
)
graph_list[id_idx]= G
return graph_list
def add_edges_custom(self, G, neighborhood_indices, edge_features):
t1 = time()
size = neighborhood_indices.shape[0]
neighborhood_indices = neighborhood_indices.tolist()
src = []
dst = []
temp_edge_features = []
for center in range(size):
src += neighborhood_indices[center]
dst += [center] * (self.neighbourhood_size - 1)
for nbr in neighborhood_indices[center]:
temp_edge_features += [np.abs(edge_features[center, nbr])]
if len(src) != len(dst):
prit('source and destination array should have been of the same length: src and dst:', len(src), len(dst))
raise Exception
G.add_edges(src, dst)
G.edata['ex'] = np.array(temp_edge_features)
def graph_collate(samples):
protbert_data, graph_batch, protein_info = map(list, zip(*samples))
graph_batch = dgl.batch(graph_batch)
protbert_data = torch.cat(protbert_data)
return protbert_data, graph_batch, protein_info
| [
"sazan97@gmail.com"
] | sazan97@gmail.com |
4b91aba2fe0eca1c717453b7bb0a1adc8c7c999a | 436da49d82df227fc2654c7e3c6acc72d6a6aad6 | /hindi/migrations/0001_initial.py | 4c174394056b998f564cafad188f59419be3a23f | [] | no_license | mc639/Manavmitra | c91a2178f01427284d256ff152a4032f978e48a4 | 567d3b483d52e9285681916bfeda773a7b9ae0ed | refs/heads/master | 2020-07-30T14:41:55.736634 | 2019-09-23T05:02:26 | 2019-09-23T05:02:26 | 210,266,603 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,342 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-07-19 18:26
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import tinymce.models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Blog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('caption', models.CharField(max_length=500)),
('slug', models.SlugField(max_length=500)),
('image_upload', models.ImageField(upload_to='gujarati/media')),
('article', tinymce.models.HTMLField()),
('video', models.TextField(blank=True, null=True)),
('image', models.TextField(blank=True, null=True)),
('embed', models.TextField(blank=True, null=True)),
('time', models.DateTimeField()),
],
),
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('slug', models.SlugField()),
],
),
migrations.CreateModel(
name='Epaper',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('e_paper_name', models.CharField(max_length=50)),
('e_paper', models.FileField(upload_to='gujarati/Epaper')),
('date', models.DateField()),
],
),
migrations.CreateModel(
name='Trailer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('trailer_name', models.CharField(max_length=50)),
('trailer_url', models.TextField()),
],
),
migrations.AddField(
model_name='blog',
name='category',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='hindi.Category'),
),
]
| [
"noreply@github.com"
] | noreply@github.com |
3918150f5542d26412b16d6b3636c338034e5b14 | 97aed55858f227a56fd79fec51f093e192db6c01 | /app/core/tests/test_models.py | fd2b496c9844804512d95c53c549b3b3e2ae01ab | [
"MIT"
] | permissive | harrydadson/recipe-app-api | 9fdc7d52fe7c2689808d605e65e241e82235cd05 | 63015fd390877ed6d5f41384c818cb9c7d870c52 | refs/heads/master | 2020-06-26T01:18:38.264636 | 2019-08-09T04:25:53 | 2019-08-09T04:25:53 | 173,390,055 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,313 | py | from django.test import TestCase
from django.contrib.auth import get_user_model
class ModelsTests(TestCase):
def test_create_user_with_email_successful(self):
"""Test creating a new user with an email is successful"""
email = "test@email.com"
password = 'testpass123'
user = get_user_model().objects.create_user(
email = email,
password = password
)
self.assertEqual(user.email, email)
self.assertTrue(user.check_password(password))
def test_new_user_email_normalized(self):
"""Test the email for a new user is normalized"""
email = 'test@EMAIL.COM'
user = get_user_model().objects.create_user(email, 'test123')
self.assertEqual(user.email, email.lower())
def test_new_user_invalid_email(self):
"""Test creating user with no email raises error"""
with self.assertRaises(ValueError):
get_user_model().objects.create_user(None, 'test123')
def test_create_new_superuser(self):
"""Test creating a new superuser"""
user = get_user_model().objects.create_superuser(
'test@email.com',
'test123'
)
self.assertTrue(user.is_superuser) # PermissionMixins
self.assertTrue(user.is_staff)
| [
"harry.dadson@gmail.com"
] | harry.dadson@gmail.com |
ecaeb6fd0293027f32aa44c7a539c89169160328 | 9f2fdb37e1dd7fd926d45fc22ecab5d3c0d6c4ab | /LDA/22.2.netease_news.py | 4d8b231eef12af009b0754091faf983191997db0 | [] | no_license | luoguohao/python2_machine_learning | 40c67028bc46b0d81ee0f28fa692be75eabaff9a | 3f6c893cf977a9ffa8b2cb18a39947c5d59600ef | refs/heads/master | 2021-05-05T21:12:00.384861 | 2017-12-28T07:49:17 | 2017-12-28T07:49:17 | 115,509,874 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,126 | py | # !/usr/bin/python
# -*- coding:utf-8 -*-
import numpy as np
from gensim import corpora, models, similarities
from pprint import pprint
import time
# import logging
# logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
def load_stopword():
f_stop = open('22.stopword.txt')
sw = [line.strip() for line in f_stop]
f_stop.close()
return sw
if __name__ == '__main__':
# 网易新闻数据,使用gensim库分析
print '初始化停止词列表 --'
t_start = time.time()
stop_words = load_stopword()
print '开始读入语料数据 -- '
f = open('22.news.dat') #22.LDA_test.txt
texts = [[word for word in line.strip().lower().split() if word not in stop_words] for line in f]
# texts = [line.strip().split() for line in f]
print '读入语料数据完成,用时%.3f秒' % (time.time() - t_start)
f.close()
M = len(texts)
print '文本数目:%d个' % M
# pprint(texts)
print '正在建立词典 --'
dictionary = corpora.Dictionary(texts)
V = len(dictionary)
print '正在计算文本向量 --'
corpus = [dictionary.doc2bow(text) for text in texts]
print '正在计算文档TF-IDF --'
t_start = time.time()
corpus_tfidf = models.TfidfModel(corpus)[corpus]
print '建立文档TF-IDF完成,用时%.3f秒' % (time.time() - t_start)
print 'LDA模型拟合推断 --'
num_topics = 30
t_start = time.time()
lda = models.LdaModel(corpus_tfidf, num_topics=num_topics, id2word=dictionary,
alpha=0.01, eta=0.01, minimum_probability=0.001,
update_every=1, chunksize=100, passes=1)
print 'LDA模型完成,训练时间为\t%.3f秒' % (time.time() - t_start)
# # 所有文档的主题
# doc_topic = [a for a in lda[corpus_tfidf]]
# print 'Document-Topic:\n'
# pprint(doc_topic)
# 随机打印某10个文档的主题
num_show_topic = 10 # 每个文档显示前几个主题
print '10个文档的主题分布:'
doc_topics = lda.get_document_topics(corpus_tfidf) # 所有文档的主题分布
idx = np.arange(M)
np.random.shuffle(idx)
idx = idx[:10]
for i in idx:
topic = np.array(doc_topics[i])
topic_distribute = np.array(topic[:, 1])
# print topic_distribute
topic_idx = topic_distribute.argsort()[:-num_show_topic-1:-1]
print ('第%d个文档的前%d个主题:' % (i, num_show_topic)), topic_idx
print topic_distribute[topic_idx]
num_show_term = 7 # 每个主题显示几个词
print '每个主题的词分布:'
for topic_id in range(num_topics):
print '主题#%d:\t' % topic_id
term_distribute_all = lda.get_topic_terms(topicid=topic_id)
term_distribute = term_distribute_all[:num_show_term]
term_distribute = np.array(term_distribute)
term_id = term_distribute[:, 0].astype(np.int)
print '词:\t',
for t in term_id:
print dictionary.id2token[t],
print
# print '\n概率:\t', term_distribute[:, 1]
| [
"guohao.luo@tendcloud.com"
] | guohao.luo@tendcloud.com |
94516c3ae940d74c65e1973a2df7f40372e0d9d4 | ecbaab2349087c97f512cd144538369609623b2b | /src/output_terminal.py | caef97d8fdb494d726d544cba5a50894735caba5 | [] | no_license | ErikCalsson/RNA_binding_site_correlation | c1b38a04efaab284c7914aba70d52e04dfa73823 | fe4e64813f90d74200660f622d06df9958c61438 | refs/heads/master | 2023-08-23T15:00:24.329338 | 2021-11-01T07:27:58 | 2021-11-01T07:27:58 | 372,179,719 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 599 | py | # imports extern
# imports intern
# import sequence_pre_calculation as calc
# import graph_creation as graph
import data_calculation as dat
# start terminal output
def use_terminal():
print("See output barGroupPC1.png or barGroupPC2.png for visualisation of final results")
# output message with statistic
print("PC1: ", str(dat.result_PC_1[0]) + ",p-value: ", dat.result_PC_1[1])
print("PC2: ", dat.result_PC_2[0], ",p-value: ", dat.result_PC_2[1])
print("remember: t > T and alpha > p-value")
print("T = [ 0.995 -> 2.576, 0.99 -> 2.326, 0.975 -> 1.96, 0.95 -> 1.645]")
| [
"ecarlsson@techfak.uni-bielefeld.de"
] | ecarlsson@techfak.uni-bielefeld.de |
72596ce81af81043b7245963fcc8b2090e48c45d | 70f27f6215c5261f080cb8d12ceac5c484f2f147 | /app/django_models/person/models.py | 7af80c95b061fd6d88de2200ad80dfc6017a9b6c | [] | no_license | jordangarside/django-async-pytest-example | bf2fa32c7ffc5ebed3f2077483113b47987afd5a | 3e141eb1a048b80bd3f04c49068534f726a8c4c6 | refs/heads/master | 2022-12-02T20:51:33.364679 | 2020-08-14T16:47:22 | 2020-08-14T16:47:26 | 286,944,630 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 890 | py | from django.db import models
from asgiref.sync import sync_to_async
from typing import TypeVar
TModel = TypeVar("TModel", bound=models.Model)
class BonfireAsyncManager(models.Manager): # type: ignore # excuse: typed in a stub
"""This class is typed via a typestub in async_manager.pyi. Make sure to add new manager commands in the file
to pass the typecheck.
"""
async def async_create(self, **kwargs: object) -> TModel:
obj: TModel = await sync_to_async(super().create, thread_sensitive=True)(
**kwargs
)
return obj
class BonfireBaseModel(models.Model): # type: ignore
"""Abstract base model class to provide commonly used fields uuid, created_at and updated_at."""
objects = BonfireAsyncManager()
class Meta:
abstract = True
class Person(BonfireBaseModel):
name = models.CharField(max_length=100)
| [
"jordan.garside@robinhood.com"
] | jordan.garside@robinhood.com |
001b8e5d7167d9f7ae30d9510713bbc363cc653b | da934e0010380fdc6894063540f61b0ebc2c9ded | /nova/crypto.py | 1f35ffa3915dad74a002a55998c536549c4b8d2d | [
"Apache-2.0"
] | permissive | bopopescu/cc-2 | ed4f1dfe3c98f476ff619058d99855a16272d36b | 37444fb16b36743c439b0d6c3cac2347e0cc0a94 | refs/heads/master | 2022-11-23T03:57:12.255817 | 2014-10-02T06:10:46 | 2014-10-02T06:10:46 | 282,512,589 | 0 | 0 | Apache-2.0 | 2020-07-25T19:36:05 | 2020-07-25T19:36:05 | null | UTF-8 | Python | false | false | 7,863 | py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright [2010] [Anso Labs, LLC]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Wrappers around standard crypto, including root and intermediate CAs,
SSH keypairs and x509 certificates.
"""
import hashlib
import logging
import os
import shutil
import tempfile
import time
import utils
from nova import vendor
import M2Crypto
from nova import exception
from nova import flags
FLAGS = flags.FLAGS
flags.DEFINE_string('ca_file', 'cacert.pem', 'Filename of root CA')
flags.DEFINE_string('keys_path', utils.abspath('../keys'), 'Where we keep our keys')
flags.DEFINE_string('ca_path', utils.abspath('../CA'), 'Where we keep our root CA')
flags.DEFINE_boolean('use_intermediate_ca', False, 'Should we use intermediate CAs for each project?')
def ca_path(project_id):
if project_id:
return "%s/INTER/%s/cacert.pem" % (FLAGS.ca_path, project_id)
return "%s/cacert.pem" % (FLAGS.ca_path)
def fetch_ca(project_id=None, chain=True):
if not FLAGS.use_intermediate_ca:
project_id = None
buffer = ""
if project_id:
with open(ca_path(project_id),"r") as cafile:
buffer += cafile.read()
if not chain:
return buffer
with open(ca_path(None),"r") as cafile:
buffer += cafile.read()
return buffer
def generate_key_pair(bits=1024):
# what is the magic 65537?
tmpdir = tempfile.mkdtemp()
keyfile = os.path.join(tmpdir, 'temp')
utils.execute('ssh-keygen -q -b %d -N "" -f %s' % (bits, keyfile))
(out, err) = utils.execute('ssh-keygen -q -l -f %s.pub' % (keyfile))
fingerprint = out.split(' ')[1]
private_key = open(keyfile).read()
public_key = open(keyfile + '.pub').read()
shutil.rmtree(tmpdir)
# code below returns public key in pem format
# key = M2Crypto.RSA.gen_key(bits, 65537, callback=lambda: None)
# private_key = key.as_pem(cipher=None)
# bio = M2Crypto.BIO.MemoryBuffer()
# key.save_pub_key_bio(bio)
# public_key = bio.read()
# public_key, err = execute('ssh-keygen -y -f /dev/stdin', private_key)
return (private_key, public_key, fingerprint)
def ssl_pub_to_ssh_pub(ssl_public_key, name='root', suffix='nova'):
"""requires lsh-utils"""
convert="sed -e'1d' -e'$d' | pkcs1-conv --public-key-info --base-64 |" \
+ " sexp-conv | sed -e'1s/(rsa-pkcs1/(rsa-pkcs1-sha1/' | sexp-conv -s" \
+ " transport | lsh-export-key --openssh"
(out, err) = utils.execute(convert, ssl_public_key)
if err:
raise exception.Error("Failed to generate key: %s", err)
return '%s %s@%s\n' %(out.strip(), name, suffix)
def generate_x509_cert(subject="/C=US/ST=California/L=The Mission/O=CloudFed/OU=NOVA/CN=foo", bits=1024):
tmpdir = tempfile.mkdtemp()
keyfile = os.path.abspath(os.path.join(tmpdir, 'temp.key'))
csrfile = os.path.join(tmpdir, 'temp.csr')
logging.debug("openssl genrsa -out %s %s" % (keyfile, bits))
utils.runthis("Generating private key: %s", "openssl genrsa -out %s %s" % (keyfile, bits))
utils.runthis("Generating CSR: %s", "openssl req -new -key %s -out %s -batch -subj %s" % (keyfile, csrfile, subject))
private_key = open(keyfile).read()
csr = open(csrfile).read()
shutil.rmtree(tmpdir)
return (private_key, csr)
def sign_csr(csr_text, intermediate=None):
if not FLAGS.use_intermediate_ca:
intermediate = None
if not intermediate:
return _sign_csr(csr_text, FLAGS.ca_path)
user_ca = "%s/INTER/%s" % (FLAGS.ca_path, intermediate)
if not os.path.exists(user_ca):
start = os.getcwd()
os.chdir(FLAGS.ca_path)
utils.runthis("Generating intermediate CA: %s", "sh geninter.sh %s" % (intermediate))
os.chdir(start)
return _sign_csr(csr_text, user_ca)
def _sign_csr(csr_text, ca_folder):
tmpfolder = tempfile.mkdtemp()
csrfile = open("%s/inbound.csr" % (tmpfolder), "w")
csrfile.write(csr_text)
csrfile.close()
logging.debug("Flags path: %s" % ca_folder)
start = os.getcwd()
# Change working dir to CA
os.chdir(ca_folder)
utils.runthis("Signing cert: %s", "openssl ca -batch -out %s/outbound.crt -config ./openssl.cnf -infiles %s/inbound.csr" % (tmpfolder, tmpfolder))
os.chdir(start)
with open("%s/outbound.crt" % (tmpfolder), "r") as crtfile:
return crtfile.read()
def mkreq(bits, subject="foo", ca=0):
pk = M2Crypto.EVP.PKey()
req = M2Crypto.X509.Request()
rsa = M2Crypto.RSA.gen_key(bits, 65537, callback=lambda: None)
pk.assign_rsa(rsa)
rsa = None # should not be freed here
req.set_pubkey(pk)
req.set_subject(subject)
req.sign(pk,'sha512')
assert req.verify(pk)
pk2 = req.get_pubkey()
assert req.verify(pk2)
return req, pk
def mkcacert(subject='nova', years=1):
req, pk = mkreq(2048, subject, ca=1)
pkey = req.get_pubkey()
sub = req.get_subject()
cert = M2Crypto.X509.X509()
cert.set_serial_number(1)
cert.set_version(2)
cert.set_subject(sub) # FIXME subject is not set in mkreq yet
t = long(time.time()) + time.timezone
now = M2Crypto.ASN1.ASN1_UTCTIME()
now.set_time(t)
nowPlusYear = M2Crypto.ASN1.ASN1_UTCTIME()
nowPlusYear.set_time(t + (years * 60 * 60 * 24 * 365))
cert.set_not_before(now)
cert.set_not_after(nowPlusYear)
issuer = M2Crypto.X509.X509_Name()
issuer.C = "US"
issuer.CN = subject
cert.set_issuer(issuer)
cert.set_pubkey(pkey)
ext = M2Crypto.X509.new_extension('basicConstraints', 'CA:TRUE')
cert.add_ext(ext)
cert.sign(pk, 'sha512')
# print 'cert', dir(cert)
print cert.as_pem()
print pk.get_rsa().as_pem()
return cert, pk, pkey
# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
# http://code.google.com/p/boto
def compute_md5(fp):
"""
@type fp: file
@param fp: File pointer to the file to MD5 hash. The file pointer will be
reset to the beginning of the file before the method returns.
@rtype: tuple
@return: the hex digest version of the MD5 hash
"""
m = hashlib.md5()
fp.seek(0)
s = fp.read(8192)
while s:
m.update(s)
s = fp.read(8192)
hex_md5 = m.hexdigest()
# size = fp.tell()
fp.seek(0)
return hex_md5
| [
"anotherjesse@gmail.com"
] | anotherjesse@gmail.com |
a5e2debc3b4de63242c2bc5f62e4db0ae3a58645 | 44f07b81df56d7ea44775784a9697648fe481478 | /day8/faceapp/facedetect.py | ab3e244e889618a394e6791b7b7b4edf81d25532 | [] | no_license | shaadomanthra/cbpython-advanced | 436510c70deca4e1ef01517f87bba0e392583a88 | 86b613f89ca0b0cd8b243c157af1a2807e6ce605 | refs/heads/master | 2022-11-30T23:33:45.938854 | 2020-08-12T11:20:03 | 2020-08-12T11:20:03 | 276,316,817 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 841 | py | ## detect face and draw rectangles
# import packages (pip install opencv-python)
from cv2 import cv2
import sys
# path for image and cascade
imagePath = 'images/f1.jpg'
cascPath = "haarcascade_frontalface_default.xml"
# Create the haar cascade
faceCascade = cv2.CascadeClassifier(cascPath)
# Read the image & convert to gray scale
image = cv2.imread(imagePath)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Detect faces in the image
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30)
)
print(faces)
# # Draw a rectangle around the faces
for (x, y, w, h) in faces:
cv2.rectangle(image, (x, y), (x+w, y+h), (0, 255, 0), 2)
# #
# # # # open the image widow to display
cv2.imshow("Faces found", image)
cv2.waitKey(0)
# Saving the image
# cv2.imwrite(saveimagePath, image)
| [
"packetcode@gmail.com"
] | packetcode@gmail.com |
ea3871eaa7c9b6755d2963498e08b7d307615ebc | e840fe54e8fc774fce6e81b373c5f532cc35bfd1 | /Api/Flask/Daos/AccesoDatos/DaoEspectros.py | 29c5cebbb14da20aeb31c5fac0b43239ce5184ae | [] | no_license | jcamiloq/geospectre | a059cf594c13aa5e01d2d2696615c5e6c2e0d3bb | 199896571b8ecc38da8374ff35f66f3bc1f3d193 | refs/heads/master | 2022-12-12T04:40:42.159146 | 2020-09-15T14:50:40 | 2020-09-15T14:50:40 | 293,323,245 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,148 | py | from .Logica.Espectros import Espectros
class DaoEspectros:
def __init__(self, conexion):
self.conexion = conexion
def guardarEspectros(self, espectros):
sql_guardar = "INSERT INTO espectros (white, dark, capturado, resultado, sensores_id) VALUES "
sql_guardar += "(%s, %s, %s, %s, %s) RETURNING *"
try:
cursor = self.conexion.cursor()
cursor.execute(
sql_guardar,
(
espectros.white, espectros.dark, espectros.capturado,
espectros.resultado,espectros.sensores_id
)
)
result = cursor.fetchone()
self.conexion.commit()
cursor.close()
espectros.id = result[0]
print("Espectro guardado con éxito")
return espectros
except(Exception) as e:
print("Error al guardar espectro", e)
return None
def actualizarEspectros(self, espectros):
sql_guardar = "UPDATE espectros SET "
sql_guardar += "white = %s, dark = %s, capturado = %s, resultado = %s, "
sql_guardar += "sensores_id = %s WHERE id = %s"
# print(sql_guardar)
try:
cursor = self.conexion.cursor()
cursor.execute(
sql_guardar,
(
espectros.white,
espectros.dark,
espectros.capturado,
espectros.resultado,
espectros.sensores_id,
espectros.id
)
)
self.conexion.commit()
cursor.close()
return espectros
except(Exception) as e:
print("Error al actualizar el espectro", e)
return None
def borrarEspectros(self, espectros):
sql_borrar = "DELETE FROM espectros WHERE id = " + str(espectros) + ";"
try:
cursor = self.conexion.cursor()
cursor.execute(sql_borrar)
except(Exception) as e:
print("Error al actualizar la espectros", e)
finally:
if(cursor):
cursor.close()
print("Se ha cerrado el cursor")
def getEspectrosSensores(self, id_sensores):
idEspectros = []
sql_select = "SELECT * FROM espectros WHERE sensores_id = " + str(id_sensores)
try:
cursor = self.conexion.cursor()
cursor.execute(sql_select)
record = cursor.fetchall()
for i in range (0, len(record)):
idEspectros.append(record[i][0])
# idEspectros.append("\n")
print(idEspectros)
return idEspectros
# record = cursor.fetchone()
# result = Sensores()
# result.id = record[0]
# result.lugar = record[1]
# result.tipo = record[2]
# result.numero_serie = record[3]
# result.t_int = record[4]
# result.numero_capt = record[5]
# result.mision_id = record[6]
# return result
except(Exception) as e:
print("Error al retornar los espectros", e)
result = None
def getEspectros(self, id_espectros):
sql_select = "SELECT * FROM espectros WHERE id = " + str(id_espectros)
try:
cursor = self.conexion.cursor()
cursor.execute(sql_select)
record = cursor.fetchone()
result = Espectros()
result.id = record[0]
result.white = record[1]
result.dark = record[2]
result.capturado = record[3]
result.resultado = record[4]
result.sensores_id = record[5]
# print(record)
return result
except(Exception) as e:
print("Error al retornar el espectro", e)
result = None
finally:
if(cursor):
cursor.close()
print("Se ha cerrado el cursor")
return result
| [
"j.juankquintero@gmail.com"
] | j.juankquintero@gmail.com |
35520773184de9bc6cbe60fe4ed6a427c4a1cb42 | 1c79e354726a60b939df18aa34ab63408553d078 | /py/examples/counter_broadcast.py | 9e30f02433102c7e848efbfe1fc87dd3a516e969 | [
"Apache-2.0"
] | permissive | feddelegrand7/wave | 9017fc7bbeef9233c1fd9497c3e7a4d6f6911e85 | ba002d47fcea688bf46fa1682e6c4a73cae0f8ee | refs/heads/master | 2023-02-03T13:06:41.894758 | 2020-12-18T00:09:14 | 2020-12-18T00:09:14 | 322,443,421 | 0 | 0 | Apache-2.0 | 2020-12-18T00:31:26 | 2020-12-18T00:10:14 | null | UTF-8 | Python | false | false | 647 | py | # Mode / Broadcast
# Launch the server in broadcast mode to synchronize browser state across users.
# Open `/demo` in multiple browsers and watch them synchronize in realtime.
# ---
from h2o_wave import main, app, Q, ui, pack
@app('/demo', mode='broadcast')
async def serve(q: Q):
count = q.app.count or 0
if 'increment' in q.args:
count += 1
q.app.count = count
items = pack([ui.button(name='increment', label=f'Count={count}')])
if count > 0:
form = q.page['example']
form.items = items
else:
q.page['example'] = ui.form_card(box='1 1 12 10', items=items)
await q.page.save()
| [
"prithvi@h2o.ai"
] | prithvi@h2o.ai |
e84fdec36800bc2eaf6a99f809432ca0be4287f2 | 84c4f9e14040502efddb258c243cb8e326f274c5 | /task_2_version_3/window_func.py | 0dfa95cde19f5daed407baf331d5f76a219b536e | [] | no_license | labkubia/lab | 7b6707eb2e1a1912e64dbda87bff44ca0aa84299 | 7e8ba89aa8638eb0f80855ba76fb4d852cc63a6e | refs/heads/master | 2021-09-07T15:14:05.243702 | 2018-02-24T19:23:04 | 2018-02-24T19:23:04 | 111,433,806 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 390 | py | import numpy as np
def window_func(train_frame_set):
window = np.hanning(len(train_frame_set[0])) #different in matlab
#window = np.hamming(len(train_frame_set[0]))
train_frame_set=np.float64(train_frame_set)
frame_windowed_set=np.multiply(train_frame_set,window) # apply the window to the frames
#using np.multiply , multipy by elements
return frame_windowed_set | [
"yuxinliu_oliver@outlook.com"
] | yuxinliu_oliver@outlook.com |
a174ca449539006233ff7a4acea1252aef8eb3eb | 0ab90ab559eab46b583b4b1fdd4a5bb3f55b7793 | /python/ray/experimental/workflow/common.py | 3c40c555e0eab6747e2da0c8fe41e1c1b84e7018 | [
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] | permissive | swag1ong/ray | b22cd5ebab96c30f15b00a7d044fdeb7543a4616 | fdbeef604692aa308973988b32405ec0d70f9f40 | refs/heads/master | 2023-06-25T21:55:44.398516 | 2021-07-26T00:39:24 | 2021-07-26T00:39:24 | 389,518,857 | 2 | 0 | Apache-2.0 | 2021-07-26T05:33:40 | 2021-07-26T05:33:39 | null | UTF-8 | Python | false | false | 7,714 | py | from enum import Enum, unique
from collections import deque
import re
from typing import Dict, List, Optional, Callable, Set, Iterator, Any
import unicodedata
import uuid
from dataclasses import dataclass
import ray
from ray import ObjectRef
# Alias types
StepID = str
WorkflowOutputType = ObjectRef
@unique
class WorkflowStatus(str, Enum):
# There is at least a remote task running in ray cluster
RUNNING = "RUNNING"
# It got canceled and can't be resumed later.
CANCELED = "CANCELED"
# The workflow runs successfully.
SUCCESSFUL = "SUCCESSFUL"
# The workflow failed with an applicaiton error.
# It can be resumed.
FAILED = "FAILED"
# The workflow failed with a system error, i.e., ray shutdown.
# It can be resumed.
RESUMABLE = "RESUMABLE"
@dataclass
class WorkflowInputs:
# The object ref of the input arguments.
args: ObjectRef
# The object refs in the arguments.
object_refs: List[ObjectRef]
# TODO(suquark): maybe later we can replace it with WorkflowData.
# The workflows in the arguments.
workflows: "List[Workflow]"
@dataclass
class WorkflowData:
# The workflow step function body.
func_body: Callable
# The arguments of a workflow.
inputs: WorkflowInputs
# The num of retry for application exception
max_retries: int
# Whether the user want to handle the exception mannually
catch_exceptions: bool
# ray_remote options
ray_options: Dict[str, Any]
def to_metadata(self) -> Dict[str, Any]:
f = self.func_body
return {
"name": f.__module__ + "." + f.__qualname__,
"object_refs": [r.hex() for r in self.inputs.object_refs],
"workflows": [w.id for w in self.inputs.workflows],
"max_retries": self.max_retries,
"catch_exceptions": self.catch_exceptions,
"ray_options": self.ray_options,
}
@dataclass
class WorkflowMetaData:
# The current status of the workflow
status: WorkflowStatus
def slugify(value: str, allow_unicode=False) -> str:
"""Adopted from
https://github.com/django/django/blob/master/django/utils/text.py
Convert to ASCII if 'allow_unicode' is False. Convert spaces or repeated
dashes to single dashes. Remove characters that aren't alphanumerics,
underscores, dots or hyphens. Also strip leading and
trailing whitespace.
"""
if allow_unicode:
value = unicodedata.normalize("NFKC", value)
else:
value = unicodedata.normalize("NFKD", value).encode(
"ascii", "ignore").decode("ascii")
value = re.sub(r"[^\w.\-]", "", value).strip()
return re.sub(r"[-\s]+", "-", value)
class Workflow:
def __init__(self, workflow_data: WorkflowData):
if workflow_data.ray_options.get("num_returns", 1) > 1:
raise ValueError("Workflow should have one return value.")
self._data = workflow_data
self._executed: bool = False
self._output: Optional[WorkflowOutputType] = None
self._step_id: StepID = slugify(
self._data.func_body.__qualname__) + "." + uuid.uuid4().hex
@property
def executed(self) -> bool:
return self._executed
@property
def output(self) -> WorkflowOutputType:
if not self._executed:
raise Exception("The workflow has not been executed.")
return self._output
@property
def id(self) -> StepID:
return self._step_id
def execute(self,
outer_most_step_id: Optional[StepID] = None,
last_step_of_workflow: bool = False) -> ObjectRef:
"""Trigger workflow execution recursively.
Args:
outer_most_step_id: See
"step_executor.execute_workflow" for explanation.
last_step_of_workflow: The step that generates the output of the
workflow (including nested steps).
"""
if self.executed:
return self._output
from ray.experimental.workflow import step_executor
output = step_executor.execute_workflow_step(self._step_id, self._data,
outer_most_step_id,
last_step_of_workflow)
if not isinstance(output, WorkflowOutputType):
raise TypeError("Unexpected return type of the workflow.")
self._output = output
self._executed = True
return output
def iter_workflows_in_dag(self) -> Iterator["Workflow"]:
"""Collect all workflows in the DAG linked to the workflow
using BFS."""
# deque is used instead of queue.Queue because queue.Queue is aimed
# at multi-threading. We just need a pure data structure here.
visited_workflows: Set[Workflow] = {self}
q = deque([self])
while q: # deque's pythonic way to check emptyness
w: Workflow = q.popleft()
for p in w._data.inputs.workflows:
if p not in visited_workflows:
visited_workflows.add(p)
q.append(p)
yield w
@property
def data(self) -> WorkflowData:
"""Get the workflow data."""
return self._data
def __reduce__(self):
raise ValueError(
"Workflow is not supposed to be serialized by pickle. "
"Maybe you are passing it to a Ray remote function, "
"returning it from a Ray remote function, or using "
"'ray.put()' with it?")
def run(self, workflow_id: Optional[str] = None) -> Any:
"""Run a workflow.
Examples:
>>> @workflow.step
... def book_flight(origin: str, dest: str) -> Flight:
... return Flight(...)
>>> @workflow.step
... def book_hotel(location: str) -> Reservation:
... return Reservation(...)
>>> @workflow.step
... def finalize_trip(bookings: List[Any]) -> Trip:
... return Trip(...)
>>> flight1 = book_flight.step("OAK", "SAN")
>>> flight2 = book_flight.step("SAN", "OAK")
>>> hotel = book_hotel.step("SAN")
>>> trip = finalize_trip.step([flight1, flight2, hotel])
>>> result = trip.run()
Args:
workflow_id: A unique identifier that can be used to resume the
workflow. If not specified, a random id will be generated.
"""
return ray.get(self.run_async(workflow_id))
def run_async(self, workflow_id: Optional[str] = None) -> ObjectRef:
"""Run a workflow asynchronously.
Examples:
>>> @workflow.step
... def book_flight(origin: str, dest: str) -> Flight:
... return Flight(...)
>>> @workflow.step
... def book_hotel(location: str) -> Reservation:
... return Reservation(...)
>>> @workflow.step
... def finalize_trip(bookings: List[Any]) -> Trip:
... return Trip(...)
>>> flight1 = book_flight.step("OAK", "SAN")
>>> flight2 = book_flight.step("SAN", "OAK")
>>> hotel = book_hotel.step("SAN")
>>> trip = finalize_trip.step([flight1, flight2, hotel])
>>> result = ray.get(trip.run_async())
Args:
workflow_id: A unique identifier that can be used to resume the
workflow. If not specified, a random id will be generated.
"""
# TODO(suquark): avoid cyclic importing
from ray.experimental.workflow.execution import run
return run(self, workflow_id)
| [
"noreply@github.com"
] | noreply@github.com |
112b11038313f4ecd80672439c01bf4361a7ebd4 | 158f2afa919a22e51b8c607f7a34b34e72db1b1a | /Astropy_Open_Error.py | 8e9b32e185a495bd99e348ea98ba9b54dc0e07f6 | [] | no_license | chrisfrohmaier/Code_Snippets | 96fb40a8a0ea46fbe1171432af388b9003b7a877 | 59d2ec591cf74805f7d0e299a4e6dcdd23acb6de | refs/heads/master | 2016-09-10T15:50:29.066747 | 2014-03-07T11:54:07 | 2014-03-07T11:54:07 | 14,979,633 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 292 | py | from astropy.io import fits
try:
hdulist_multi_sci=fits.open('/Users/cf5g09/Documents/PTF_Transform/Icecube/refs/ptf_2924/C01/cd.ptf_2924_01_R_first.weight.fits')
#print '++++ multi_mask assign ', science_image
except IOError or Warning or UnboundLocalError:
print 'Cant open Science' | [
"cf5g09@soton.ac.uk"
] | cf5g09@soton.ac.uk |
5bc96ed5b2ff7057cfe5cf0f85b1852e0b311584 | afa0d5a97925273f7fb0befef697d36020df5787 | /packages/google-cloud-alloydb/google/cloud/alloydb_v1beta/services/alloy_db_admin/pagers.py | 1c442de8074c691d92fdefd2aa87e57390df9038 | [
"Apache-2.0"
] | permissive | scooter4j/google-cloud-python | dc7ae1ba6a33a62a40b617b806ec8ed723046b8b | 36b1cf08092d5c07c5971bb46edda7a9928166b1 | refs/heads/master | 2023-04-14T18:36:48.643436 | 2023-04-06T13:19:26 | 2023-04-06T13:19:26 | 188,338,673 | 0 | 0 | null | 2019-05-24T02:27:15 | 2019-05-24T02:27:14 | null | UTF-8 | Python | false | false | 20,951 | py | # -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import (
Any,
AsyncIterator,
Awaitable,
Callable,
Iterator,
Optional,
Sequence,
Tuple,
)
from google.cloud.alloydb_v1beta.types import resources, service
class ListClustersPager:
"""A pager for iterating through ``list_clusters`` requests.
This class thinly wraps an initial
:class:`google.cloud.alloydb_v1beta.types.ListClustersResponse` object, and
provides an ``__iter__`` method to iterate through its
``clusters`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListClusters`` requests and continue to iterate
through the ``clusters`` field on the
corresponding responses.
All the usual :class:`google.cloud.alloydb_v1beta.types.ListClustersResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., service.ListClustersResponse],
request: service.ListClustersRequest,
response: service.ListClustersResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.alloydb_v1beta.types.ListClustersRequest):
The initial request object.
response (google.cloud.alloydb_v1beta.types.ListClustersResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = service.ListClustersRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterator[service.ListClustersResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterator[resources.Cluster]:
for page in self.pages:
yield from page.clusters
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class ListClustersAsyncPager:
"""A pager for iterating through ``list_clusters`` requests.
This class thinly wraps an initial
:class:`google.cloud.alloydb_v1beta.types.ListClustersResponse` object, and
provides an ``__aiter__`` method to iterate through its
``clusters`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListClusters`` requests and continue to iterate
through the ``clusters`` field on the
corresponding responses.
All the usual :class:`google.cloud.alloydb_v1beta.types.ListClustersResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., Awaitable[service.ListClustersResponse]],
request: service.ListClustersRequest,
response: service.ListClustersResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.alloydb_v1beta.types.ListClustersRequest):
The initial request object.
response (google.cloud.alloydb_v1beta.types.ListClustersResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = service.ListClustersRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(self) -> AsyncIterator[service.ListClustersResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterator[resources.Cluster]:
async def async_generator():
async for page in self.pages:
for response in page.clusters:
yield response
return async_generator()
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class ListInstancesPager:
"""A pager for iterating through ``list_instances`` requests.
This class thinly wraps an initial
:class:`google.cloud.alloydb_v1beta.types.ListInstancesResponse` object, and
provides an ``__iter__`` method to iterate through its
``instances`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListInstances`` requests and continue to iterate
through the ``instances`` field on the
corresponding responses.
All the usual :class:`google.cloud.alloydb_v1beta.types.ListInstancesResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., service.ListInstancesResponse],
request: service.ListInstancesRequest,
response: service.ListInstancesResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.alloydb_v1beta.types.ListInstancesRequest):
The initial request object.
response (google.cloud.alloydb_v1beta.types.ListInstancesResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = service.ListInstancesRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterator[service.ListInstancesResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterator[resources.Instance]:
for page in self.pages:
yield from page.instances
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class ListInstancesAsyncPager:
"""A pager for iterating through ``list_instances`` requests.
This class thinly wraps an initial
:class:`google.cloud.alloydb_v1beta.types.ListInstancesResponse` object, and
provides an ``__aiter__`` method to iterate through its
``instances`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListInstances`` requests and continue to iterate
through the ``instances`` field on the
corresponding responses.
All the usual :class:`google.cloud.alloydb_v1beta.types.ListInstancesResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., Awaitable[service.ListInstancesResponse]],
request: service.ListInstancesRequest,
response: service.ListInstancesResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.alloydb_v1beta.types.ListInstancesRequest):
The initial request object.
response (google.cloud.alloydb_v1beta.types.ListInstancesResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = service.ListInstancesRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(self) -> AsyncIterator[service.ListInstancesResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterator[resources.Instance]:
async def async_generator():
async for page in self.pages:
for response in page.instances:
yield response
return async_generator()
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class ListBackupsPager:
"""A pager for iterating through ``list_backups`` requests.
This class thinly wraps an initial
:class:`google.cloud.alloydb_v1beta.types.ListBackupsResponse` object, and
provides an ``__iter__`` method to iterate through its
``backups`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListBackups`` requests and continue to iterate
through the ``backups`` field on the
corresponding responses.
All the usual :class:`google.cloud.alloydb_v1beta.types.ListBackupsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., service.ListBackupsResponse],
request: service.ListBackupsRequest,
response: service.ListBackupsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.alloydb_v1beta.types.ListBackupsRequest):
The initial request object.
response (google.cloud.alloydb_v1beta.types.ListBackupsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = service.ListBackupsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterator[service.ListBackupsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterator[resources.Backup]:
for page in self.pages:
yield from page.backups
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class ListBackupsAsyncPager:
"""A pager for iterating through ``list_backups`` requests.
This class thinly wraps an initial
:class:`google.cloud.alloydb_v1beta.types.ListBackupsResponse` object, and
provides an ``__aiter__`` method to iterate through its
``backups`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListBackups`` requests and continue to iterate
through the ``backups`` field on the
corresponding responses.
All the usual :class:`google.cloud.alloydb_v1beta.types.ListBackupsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., Awaitable[service.ListBackupsResponse]],
request: service.ListBackupsRequest,
response: service.ListBackupsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.alloydb_v1beta.types.ListBackupsRequest):
The initial request object.
response (google.cloud.alloydb_v1beta.types.ListBackupsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = service.ListBackupsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(self) -> AsyncIterator[service.ListBackupsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterator[resources.Backup]:
async def async_generator():
async for page in self.pages:
for response in page.backups:
yield response
return async_generator()
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class ListSupportedDatabaseFlagsPager:
"""A pager for iterating through ``list_supported_database_flags`` requests.
This class thinly wraps an initial
:class:`google.cloud.alloydb_v1beta.types.ListSupportedDatabaseFlagsResponse` object, and
provides an ``__iter__`` method to iterate through its
``supported_database_flags`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListSupportedDatabaseFlags`` requests and continue to iterate
through the ``supported_database_flags`` field on the
corresponding responses.
All the usual :class:`google.cloud.alloydb_v1beta.types.ListSupportedDatabaseFlagsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., service.ListSupportedDatabaseFlagsResponse],
request: service.ListSupportedDatabaseFlagsRequest,
response: service.ListSupportedDatabaseFlagsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.alloydb_v1beta.types.ListSupportedDatabaseFlagsRequest):
The initial request object.
response (google.cloud.alloydb_v1beta.types.ListSupportedDatabaseFlagsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = service.ListSupportedDatabaseFlagsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterator[service.ListSupportedDatabaseFlagsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterator[resources.SupportedDatabaseFlag]:
for page in self.pages:
yield from page.supported_database_flags
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class ListSupportedDatabaseFlagsAsyncPager:
"""A pager for iterating through ``list_supported_database_flags`` requests.
This class thinly wraps an initial
:class:`google.cloud.alloydb_v1beta.types.ListSupportedDatabaseFlagsResponse` object, and
provides an ``__aiter__`` method to iterate through its
``supported_database_flags`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListSupportedDatabaseFlags`` requests and continue to iterate
through the ``supported_database_flags`` field on the
corresponding responses.
All the usual :class:`google.cloud.alloydb_v1beta.types.ListSupportedDatabaseFlagsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., Awaitable[service.ListSupportedDatabaseFlagsResponse]],
request: service.ListSupportedDatabaseFlagsRequest,
response: service.ListSupportedDatabaseFlagsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.alloydb_v1beta.types.ListSupportedDatabaseFlagsRequest):
The initial request object.
response (google.cloud.alloydb_v1beta.types.ListSupportedDatabaseFlagsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = service.ListSupportedDatabaseFlagsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(self) -> AsyncIterator[service.ListSupportedDatabaseFlagsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterator[resources.SupportedDatabaseFlag]:
async def async_generator():
async for page in self.pages:
for response in page.supported_database_flags:
yield response
return async_generator()
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
| [
"noreply@github.com"
] | noreply@github.com |
ec6528e726e97e6f27a0169c8a07854d0fd9957b | d8c12c88942f5e0d0db76885884653bb94076cac | /src/boi/parser.py | 1275ecf1b21e99ef4817ae2441b9f314e5d3a697 | [] | no_license | jkarns275/ELLie | 58c5648f3e1fbcdfb3198a51af97ebee0e5fc91a | 2ecfed86f7f4bc0a9eec36368e9fd3319ebaac6c | refs/heads/master | 2020-06-23T20:06:47.745014 | 2019-07-28T03:20:51 | 2019-07-28T03:20:51 | 198,739,697 | 0 | 0 | null | 2019-07-25T02:11:27 | 2019-07-25T02:11:26 | null | UTF-8 | Python | false | false | 11,449 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# CAVEAT UTILITOR
#
# This file was automatically generated by TatSu.
#
# https://pypi.python.org/pypi/tatsu/
#
# Any changes you make to it will be overwritten the next time
# the file is generated.
from __future__ import print_function, division, absolute_import, unicode_literals
import sys
from tatsu.buffering import Buffer
from tatsu.parsing import Parser
from tatsu.parsing import tatsumasu, leftrec, nomemo
from tatsu.parsing import leftrec, nomemo # noqa
from tatsu.util import re, generic_main # noqa
KEYWORDS = {} # type: ignore
class BoiBuffer(Buffer):
def __init__(
self,
text,
whitespace=None,
nameguard=None,
comments_re=None,
eol_comments_re=None,
ignorecase=None,
namechars='',
**kwargs
):
super(BoiBuffer, self).__init__(
text,
whitespace=whitespace,
nameguard=nameguard,
comments_re=comments_re,
eol_comments_re=eol_comments_re,
ignorecase=ignorecase,
namechars=namechars,
**kwargs
)
class BoiParser(Parser):
def __init__(
self,
whitespace=None,
nameguard=None,
comments_re=None,
eol_comments_re=None,
ignorecase=None,
left_recursion=True,
parseinfo=True,
keywords=None,
namechars='',
buffer_class=BoiBuffer,
**kwargs
):
if keywords is None:
keywords = KEYWORDS
super(BoiParser, self).__init__(
whitespace=whitespace,
nameguard=nameguard,
comments_re=comments_re,
eol_comments_re=eol_comments_re,
ignorecase=ignorecase,
left_recursion=left_recursion,
parseinfo=parseinfo,
keywords=keywords,
namechars=namechars,
buffer_class=buffer_class,
**kwargs
)
@tatsumasu()
def _id_(self): # noqa
self._pattern('[a-zA-Z][a-zA-Z0-9_]*')
@tatsumasu()
def _var_(self): # noqa
self._id_()
@tatsumasu()
def _float_(self): # noqa
self._pattern('[-+]?[0-9]*\\.?[0-9]+([eE][-+]?[0-9]+)?')
@tatsumasu()
def _value_(self): # noqa
self._float_()
self.name_last_node('@')
@tatsumasu()
def _expr_(self): # noqa
with self._choice():
with self._option():
self._value_()
self.name_last_node('@')
with self._option():
self._var_()
self.name_last_node('@')
with self._option():
self._token('(')
self._cut()
self._base_expr_()
self.name_last_node('@')
self._cut()
self._token(')')
self._error('no available options')
@tatsumasu()
def _multiplicative_expr_(self): # noqa
self._expr_()
self.add_last_node_to_name('@')
def block1():
with self._group():
with self._group():
with self._choice():
with self._option():
self._token('*')
with self._option():
self._token('/')
self._error('no available options')
self._cut()
self._expr_()
self.add_last_node_to_name('@')
self._closure(block1)
@tatsumasu()
def _pow_expr_(self): # noqa
self._multiplicative_expr_()
self.add_last_node_to_name('@')
def block1():
with self._group():
self._token('**')
self._cut()
self._multiplicative_expr_()
self.add_last_node_to_name('@')
self._closure(block1)
@tatsumasu()
def _additive_expr_(self): # noqa
self._pow_expr_()
self.add_last_node_to_name('@')
def block1():
with self._group():
with self._group():
with self._choice():
with self._option():
self._token('+')
with self._option():
self._token('-')
self._error('no available options')
self._cut()
self._pow_expr_()
self.add_last_node_to_name('@')
self._closure(block1)
@tatsumasu()
def _base_expr_(self): # noqa
with self._choice():
with self._option():
self._lambda_expr_()
self.name_last_node('@')
with self._option():
self._let_expr_()
self.name_last_node('@')
with self._option():
self._if_expr_()
self.name_last_node('@')
with self._option():
self._function_call_expr_()
self.name_last_node('@')
with self._option():
self._additive_expr_()
self.name_last_node('@')
self._error('no available options')
@tatsumasu()
def _bool_expr_(self): # noqa
with self._choice():
with self._option():
self._comparison_expr_()
self.name_last_node('@')
with self._option():
self._condition_expr_()
self.name_last_node('@')
self._error('no available options')
@tatsumasu()
def _comparison_expr_(self): # noqa
self._base_expr_()
with self._group():
with self._choice():
with self._option():
self._token('>=')
with self._option():
self._token('>')
with self._option():
self._token('<=')
with self._option():
self._token('<')
with self._option():
self._token('=')
with self._option():
self._token('<>')
self._error('no available options')
self._cut()
self._base_expr_()
@tatsumasu()
def _condition_expr_(self): # noqa
self._base_expr_()
@tatsumasu()
def _function_call_expr_(self): # noqa
self._id_()
self.add_last_node_to_name('@')
with self._group():
with self._choice():
with self._option():
self._token('(')
self._token(')')
with self._option():
def block2():
self._expr_()
self._positive_closure(block2)
self._error('no available options')
self.add_last_node_to_name('@')
@tatsumasu()
def _let_expr_(self): # noqa
self._token('let')
self._id_()
self.add_last_node_to_name('@')
self._token('=')
self._cut()
self._base_expr_()
self.add_last_node_to_name('@')
self._cut()
self._token('in')
self._cut()
self._base_expr_()
self.add_last_node_to_name('@')
@tatsumasu()
def _lambda_expr_(self): # noqa
self._token('let')
self._id_()
self.add_last_node_to_name('@')
with self._group():
with self._choice():
with self._option():
self._token('(')
self._token(')')
with self._option():
def block2():
self._id_()
self._positive_closure(block2)
self._error('no available options')
self.add_last_node_to_name('@')
self._token('=')
self._cut()
self._base_expr_()
self.add_last_node_to_name('@')
self._cut()
self._token('in')
self._cut()
self._base_expr_()
self.add_last_node_to_name('@')
@tatsumasu()
def _if_expr_(self): # noqa
self._token('if')
self._cut()
self._bool_expr_()
self.add_last_node_to_name('@')
self._cut()
self._token('then')
self._cut()
self._base_expr_()
self.add_last_node_to_name('@')
self._cut()
self._token('else')
self._cut()
self._base_expr_()
self.add_last_node_to_name('@')
@tatsumasu()
def _function_(self): # noqa
self._token('let')
self._id_()
self.add_last_node_to_name('@')
with self._group():
with self._choice():
with self._option():
self._token('(')
self._token(')')
with self._option():
def block2():
self._id_()
self._positive_closure(block2)
self._error('no available options')
self.add_last_node_to_name('@')
self._cut()
self._token('=')
self._cut()
self._base_expr_()
self.add_last_node_to_name('@')
@tatsumasu()
def _program_(self): # noqa
def block1():
with self._choice():
with self._option():
self._function_()
with self._option():
self._base_expr_()
self._error('no available options')
self._closure(block1)
self.name_last_node('@')
self._check_eof()
@tatsumasu()
def _start_(self): # noqa
self._program_()
class BoiSemantics(object):
def id(self, ast): # noqa
return ast
def var(self, ast): # noqa
return ast
def float(self, ast): # noqa
return ast
def value(self, ast): # noqa
return ast
def expr(self, ast): # noqa
return ast
def multiplicative_expr(self, ast): # noqa
return ast
def pow_expr(self, ast): # noqa
return ast
def additive_expr(self, ast): # noqa
return ast
def base_expr(self, ast): # noqa
return ast
def bool_expr(self, ast): # noqa
return ast
def comparison_expr(self, ast): # noqa
return ast
def condition_expr(self, ast): # noqa
return ast
def function_call_expr(self, ast): # noqa
return ast
def let_expr(self, ast): # noqa
return ast
def lambda_expr(self, ast): # noqa
return ast
def if_expr(self, ast): # noqa
return ast
def function(self, ast): # noqa
return ast
def program(self, ast): # noqa
return ast
def start(self, ast): # noqa
return ast
def main(filename, start=None, **kwargs):
if start is None:
start = 'id'
if not filename or filename == '-':
text = sys.stdin.read()
else:
with open(filename) as f:
text = f.read()
parser = BoiParser()
return parser.parse(text, rule_name=start, filename=filename, **kwargs)
if __name__ == '__main__':
import json
from tatsu.util import asjson
ast = generic_main(main, BoiParser, name='Boi')
print('AST:')
print(ast)
print()
print('JSON:')
print(json.dumps(asjson(ast), indent=2))
print()
| [
"jkarns275@gmail.com"
] | jkarns275@gmail.com |
df43518630c7ca82014fbf33f662d1d4af83bbca | 03a7a46f3cc00486ff46edcf4c4390dd64e214e5 | /lab2stack.py | c23b9e575cae786df29e52c83243cb6579e50154 | [] | no_license | fadybarsoum/NetSec-Reliable-Interaction-Protocol-Implementation | 1f558a8eb400f73df7d193b49571c9f2b196fca3 | a9ad4c4a1d9bbae224f1d61288f4df3686e27c2e | refs/heads/master | 2021-01-19T23:02:54.065939 | 2017-05-03T19:57:33 | 2017-05-03T19:57:33 | 88,915,809 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31 | py | from src.rip import RIPProtocol | [
"fady.m.barsoum@gmail.com"
] | fady.m.barsoum@gmail.com |
9a71057ca86eb6931927a2afbb8ea436b8c68c37 | afd44f9bf1469418ae4709f48f2c3c188b45eb73 | /preprocessing/text_processor.py | 88a513a6b2da416865452ab9af1cab27c4987d68 | [] | no_license | zerebom/pytoolkit | 2ed359ec0ef612461dec24b57e746f99f212d540 | 078a2fa786a755d6fe0ee69dd8caecec833fb2fa | refs/heads/master | 2020-06-29T06:20:11.069967 | 2019-09-18T01:59:14 | 2019-09-18T01:59:14 | 200,461,675 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,065 | py | import urllib.request, urllib.error
import re
import MeCab
import mojimoji
from sklearn.feature_extraction.text import TfidfVectorizer
def get_stopword()->list:
slothlib_path = 'http://svn.sourceforge.jp/svnroot/slothlib/CSharp/Version1/SlothLib/NLP/Filter/StopWord/word/Japanese.txt'
slothlib_file = urllib.request.urlopen(url=slothlib_path)
slothlib_stopwords = [line.decode("utf-8").strip() for line in slothlib_file]
slothlib_stopwords = [ss for ss in slothlib_stopwords if not ss==u'']
eng_stop=["a's" , "able" , "about" , "above" , "according" , "accordingly" , "across" , "actually" , "after" , "afterwards" , "again" , "against" , "ain't" , "all" , "allow" , "allows" , "almost" , "alone" , "along" , "already" , "also" , "although" , "always" , "am" , "among" , "amongst" , "an" , "and" , "another" , "any" , "anybody" , "anyhow" , "anyone" , "anything" , "anyway" , "anyways" , "anywhere" , "apart" , "appear" , "appreciate" , "appropriate" , "are" , "aren't" , "around" , "as" , "aside" , "ask" , "asking" , "associated" , "at" , "available" , "away" , "awfully" , "be" , "became" , "because" , "become" , "becomes" , "becoming" , "been" , "before" , "beforehand" , "behind" , "being" , "believe" , "below" , "beside" , "besides" , "best" , "better" , "between" , "beyond" , "both" , "brief" , "but" , "by" , "c'mon" , "c's" , "came" , "can" , "can't" , "cannot" , "cant" , "cause" , "causes" , "certain" , "certainly" , "changes" , "clearly" , "co" , "com" , "come" , "comes" , "concerning" , "consequently" , "consider" , "considering" , "contain" , "containing" , "contains" , "corresponding" , "could" , "couldn't" , "course" , "currently" , "definitely" , "described" , "despite" , "did" , "didn't" , "different" , "do" , "does" , "doesn't" , "doing" , "don't" , "done" , "down" , "downwards" , "during" , "each" , "edu" , "eg" , "eight" , "either" , "else" , "elsewhere" , "enough" , "entirely" , "especially" , "et" , "etc" , "even" , "ever" , "every" , "everybody" , "everyone" , "everything" , "everywhere" , "ex" , "exactly" , "example" , "except" , "far" , "few" , "fifth" , "first" , "five" , "followed" , "following" , "follows" , "for" , "former" , "formerly" , "forth" , "four" , "from" , "further" , "furthermore" , "get" , "gets" , "getting" , "given" , "gives" , "go" , "goes" , "going" , "gone" , "got" , "gotten" , "greetings" , "had" , "hadn't" , "happens" , "hardly" , "has" , "hasn't" , "have" , "haven't" , "having" , "he" , "he's" , "hello" , "help" , "hence" , "her" , "here" , "here's" , "hereafter" , "hereby" , "herein" , "hereupon" , "hers" , "herself" , "hi" , "him" , "himself" , "his" , "hither" , "hopefully" , "how" , "howbeit" , "however" , "i'd" , "i'll" , "i'm" , "i've" , "ie" , "if" , "ignored" , "immediate" , "in" , "inasmuch" , "inc" , "indeed" , "indicate" , "indicated" , "indicates" , "inner" , "insofar" , "instead" , "into" , "inward" , "is" , "isn't" , "it" , "it'd" , "it'll" , "it's" , "its" , "itself" , "just" , "keep" , "keeps" , "kept" , "know" , "known" , "knows" , "last" , "lately" , "later" , "latter" , "latterly" , "least" , "less" , "lest" , "let" , "let's" , "like" , "liked" , "likely" , "little" , "look" , "looking" , "looks" , "ltd" , "mainly" , "many" , "may" , "maybe" , "me" , "mean" , "meanwhile" , "merely" , "might" , "more" , "moreover" , "most" , "mostly" , "much" , "must" , "my" , "myself" , "name" , "namely" , "nd" , "near" , "nearly" , "necessary" , "need" , "needs" , "neither" , "never" , "nevertheless" , "new" , "next" , "nine" , "no" , "nobody" , "non" , "none" , "noone" , "nor" , "normally" , "not" , "nothing" , "novel" , "now" , "nowhere" , "obviously" , "of" , "off" , "often" , "oh" , "ok" , "okay" , "old" , "on" , "once" , "one" , "ones" , "only" , "onto" , "or" , "other" , "others" , "otherwise" , "ought" , "our" , "ours" , "ourselves" , "out" , "outside" , "over" , "overall" , "own" , "particular" , "particularly" , "per" , "perhaps" , "placed" , "please" , "plus" , "possible" , "presumably" , "probably" , "provides" , "que" , "quite" , "qv" , "rather" , "rd" , "re" , "really" , "reasonably" , "regarding" , "regardless" , "regards" , "relatively" , "respectively" , "right" , "said" , "same" , "saw" , "say" , "saying" , "says" , "second" , "secondly" , "see" , "seeing" , "seem" , "seemed" , "seeming" , "seems" , "seen" , "self" , "selves" , "sensible" , "sent" , "serious" , "seriously" , "seven" , "several" , "shall" , "she" , "should" , "shouldn't" , "since" , "six" , "so" , "some" , "somebody" , "somehow" , "someone" , "something" , "sometime" , "sometimes" , "somewhat" , "somewhere" , "soon" , "sorry" , "specified" , "specify" , "specifying" , "still" , "sub" , "such" , "sup" , "sure" , "t's" , "take" , "taken" , "tell" , "tends" , "th" , "than" , "thank" , "thanks" , "thanx" , "that" , "that's" , "thats" , "the" , "their" , "theirs" , "them" , "themselves" , "then" , "thence" , "there" , "there's" , "thereafter" , "thereby" , "therefore" , "therein" , "theres" , "thereupon" , "these" , "they" , "they'd" , "they'll" , "they're" , "they've" , "think" , "third" , "this" , "thorough" , "thoroughly" , "those" , "though" , "three" , "through" , "throughout" , "thru" , "thus" , "to" , "together" , "too" , "took" , "toward" , "towards" , "tried" , "tries" , "truly" , "try" , "trying" , "twice" , "two" , "un" , "under" , "unfortunately" , "unless" , "unlikely" , "until" , "unto" , "up" , "upon" , "us" , "use" , "used" , "useful" , "uses" , "using" , "usually" , "value" , "various" , "very" , "via" , "viz" , "vs" , "want" , "wants" , "was" , "wasn't" , "way" , "we" , "we'd" , "we'll" , "we're" , "we've" , "welcome" , "well" , "went" , "were" , "weren't" , "what" , "what's" , "whatever" , "when" , "whence" , "whenever" , "where" , "where's" , "whereafter" , "whereas" , "whereby" , "wherein" , "whereupon" , "wherever" , "whether" , "which" , "while" , "whither" , "who" , "who's" , "whoever" , "whole" , "whom" , "whose" , "why" , "will" , "willing" , "wish" , "with" , "within" , "without" , "won't" , "wonder" , "would" , "wouldn't" , "yes" , "yet" , "you" , "you'd" , "you'll" , "you're" , "you've" , "your" , "yours" , "yourself" , "yourselves" , "zero"]
sw=slothlib_stopwords+eng_stop
return sw
tagger = MeCab.Tagger('-d /usr/local/lib/mecab/dic/mecab-ipadic-neologd')
def normalize_number(text):
# 連続した数字を0で置換
text = re.sub(r'[0-9]+', '0', text)
text = re.sub(r'[10-99]+', '00', text)
text = re.sub(r'[100-999]+', '000', text)
text = re.sub(r'[1000-9999]+', '0000', text)
replaced_text = re.sub(r'[10000-9999999]+', '0000', text)
return replaced_text
def delete_number(text):
# 連続した数字を0で置換
replaced_text = re.sub(r'[0-9999999]+', '', text)
return replaced_text
# 入力されたテキストを単語単位に分割して返却する関数
def parse_text(text, min_word_len=1):
words = []
try:
tagger.parse(text).rstrip().split("\n")[:-1]
except:
return ""
for morph in tagger.parse(text).rstrip().split("\n")[:-1]:
#表層系
# word=morph.split("\t")[0]
#標準形
word = morph.split(",")[-3]
word_cls = morph.split("\t")[1].split(",")[0]
word = mojimoji.zen_to_han(word, kana=False).lower()
if not word in sw:
if len(word) > min_word_len:
#品詞によるスクリーニング
# if word_cls in ['名詞']:
words.append(delete_number(word))
return " ".join(words)
def tokenize(s):
return re.split('[ !"#$%&\'(+)*,-./:;<=>?@\\\[\]^_`{|}~“”¨«»®´·º½¾¿¡§£₤‘’。、]', s)
def get_len(text):
"""df[col]=df[col].apply(get_len)"""
num = len(text) if type(text) == str else 0
return num
tfidf_vectorizer = TfidfVectorizer(max_df=0.5, min_df=1,
max_features=10000, norm='l2',
tokenizer=tokenize, ngram_range=(1, 2))
| [
"ice.choco.pudding.kokoro@gmail.com"
] | ice.choco.pudding.kokoro@gmail.com |
a80a155616ff956f9c128c0f892b37b9c3a26c9c | bc7f8b45413692fbf3b74287ed95ce6b2857c83c | /src/test_ocr.py | 18367e4d149ba270d56c26f52d3c540961f356a7 | [
"MIT"
] | permissive | mouradsm/vrpdr | c58b5d699d32f8858af2e7bd3bd203d56c806b97 | 700de74ac322ef2d02be36c070039dcaead918e5 | refs/heads/master | 2022-10-23T14:16:50.189659 | 2020-06-20T16:48:57 | 2020-06-20T16:48:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,542 | py | import cv2 as cv
import numpy as np
import argparse
import sys
import os.path
import logging
import matplotlib.pyplot as plt
from ocr import OCR
def plot_images(data, rows, cols, cmap='gray'):
if(len(data) > 0):
i = 0
for title, image in data.items():
#logging.debug(title)
plt.subplot(rows,cols,i+1),plt.imshow(image,cmap)
plt.title(title)
plt.xticks([]),plt.yticks([])
i += 1
plt.show()
def display_images(img_list, row, col):
if(len(img_list) > 0):
images = {}
n = 0
for img in img_list:
n += 1
images[str(n)] = img
plot_images(images, row, col, cmap='gray')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Testing OCR.')
parser.add_argument('--image', help='Path to image file.')
args = parser.parse_args()
logging.getLogger().setLevel(logging.DEBUG)
# Open the image file
if not os.path.isfile(args.image):
logging.error("Input image file ", args.image, " doesn't exist")
sys.exit(1)
cap = cv.VideoCapture(args.image)
hasFrame, frame = cap.read()
if hasFrame:
images = {}
images['frame'] = frame
ocr = OCR(model_filename="../config/attention_ocr_model.pth", use_cuda=False, threshold=0.7)
pred = ocr.predict(frame)
logging.info(f'Prediction: {pred}')
plot_images(images, 1, 3, cmap='gray')
else:
logging.debug("Frame not found!") | [
"andreybicalho@gmail.com"
] | andreybicalho@gmail.com |
1a57dcb6dd5bc694a8c241ff875abb2a00b8f021 | a2e638cd0c124254e67963bda62c21351881ee75 | /Extensions/Prime Services/FPythonCode/PaymentFees.py | d79409eb38743fa11ab65e6b6c2c6f2b1438516b | [] | no_license | webclinic017/fa-absa-py3 | 1ffa98f2bd72d541166fdaac421d3c84147a4e01 | 5e7cc7de3495145501ca53deb9efee2233ab7e1c | refs/heads/main | 2023-04-19T10:41:21.273030 | 2021-05-10T08:50:05 | 2021-05-10T08:50:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,474 | py | """-----------------------------------------------------------------------
MODULE
PaymentFees
DESCRIPTION
Date : 2012-09-19
Purpose : Returns the payment fees of a trade
Department and Desk : Prime Services
Requester : Danilo Mantoan
Developer : Nidheesh Sharma
CR Number : 556348
ENDDESCRIPTION
HISTORY
Date: CR Number: Developer: Description:
2013-03-19 C885651 Nidheesh Sharma Excluded INS, SET, Brokerage fees from OtherFee
2014-03-12 C1819376 Hynek Urban Refactor & minor bug fix of other fees.
2018-11-22 1001164411 Ondrej Bahounek ABITFA-5622: Convert Other Fees to trade currency.
2018-11-28 Jaco Swanepoel Payment migration: convert cash payments to appropriate new additional payment types.
-----------------------------------------------------------------------"""
import acm
FX_COLUMN_ID = 'FX Rate On Display Curr'
CS = acm.Calculations().CreateCalculationSpace(acm.GetDefaultContext(), 'FPortfolioSheet')
ZAR_CUR = acm.FCurrency['ZAR']
PAYMENT_TYPES_TO_EXCLUDE = ('Premium',
'Dividend Suppression',
'INS',
'SET',
'Brokerage Vatable',
'Execution Fee',
'Aggregated Settled',
'Aggregated Accrued',
'Aggregated Funding',
'Aggregated Dividends',
'Aggregated Depreciation',
'Aggregated Future Settle',
'Aggregated Forward Funding PL',
'Aggregated Cash Open Value',
'Aggregated Cash Position',
'Aggregated Forward Premium',
'Aggregated Forward Settled',
'Aggregated Forward Dividends',
'Aggregated Forward Position')
PAYMENT_TEXTS_TO_EXCLUDE = ('Execution', 'ExecutionFee', 'INS', 'SET', 'Brokerage')
def ReturnOtherFee(trade, val_date):
"""
Return the sum of all fees of a trade up to the specified date.
Fees of type Execution Fee, INS, SET and Brokerage and any payments of type
Aggregated Settled are excluded.
"""
CS.SimulateGlobalValue('Valuation Date', val_date)
CS.SimulateGlobalValue('Portfolio Profit Loss End Date', 'Custom Date')
CS.SimulateGlobalValue('Portfolio Profit Loss End Date Custom', val_date)
sumOfOtherFees = 0
if trade.Status() not in ('Void'):
payments = trade.Payments()
for payment in payments:
if payment.Type() in PAYMENT_TYPES_TO_EXCLUDE or\
payment.Text() in PAYMENT_TEXTS_TO_EXCLUDE:
continue
if payment.ValidFrom() > val_date:
continue
amount = payment.Amount()
if ZAR_CUR.Name() != payment.Currency().Name():
# Ondrej's note:
# Convert all non-ZAR payments to ZAR.
# This should be ideally converted to trade currency,
# but then many other attributes need to be changed and well tested.
# This is just a fix to accommodate Futs on FXs by the end of the month.
CS.SimulateValue(ZAR_CUR, "Portfolio Currency", payment.Currency())
fx_rate = CS.CreateCalculation(ZAR_CUR, FX_COLUMN_ID).Value().Number()
amount *= fx_rate
sumOfOtherFees += amount
return acm.DenominatedValue(sumOfOtherFees, ZAR_CUR.Name(), None, val_date)
#Function to return termination fee of a trade
def ReturnTerminationFee(trade):
terminationFee = 0
if trade.Status() in ('Terminated'):
payments = trade.Payments()
for payment in payments:
if payment.Type() in ('Cash') and ('Termination' in payment.Text() or 'Terminated' in payment.Text()):
terminationFee = terminationFee + payment.Amount()
elif payment.Type() in ('Termination Fee'):
terminationFee = terminationFee + payment.Amount()
return terminationFee
#Function to return termination fee date of a trade
def ReturnTerminationFeeDate(trade):
terminationDate = ''
if trade.Status() in ('Terminated'):
payments = trade.Payments()
for payment in payments:
if payment.Type() in ('Cash') and ('Termination' in payment.Text() or 'Terminated' in payment.Text()):
terminationDate = payment.PayDay()
elif payment.Type() in ('Termination Fee'):
terminationDate = payment.PayDay()
return terminationDate
#Function to return termination fee date of a trade in the correct format from an array of dates
def ReturnSingleTerminationFeeDate(arrayOfDates):
terminationDate = ''
for date in arrayOfDates:
if date != '' and isinstance(date, str):
dateFormatter = acm.FDateFormatter('dateFormatter')
dateFormatter.FormatDefinition("%d/%m/%Y")
terminationDate = dateFormatter.Format(date)#.replace('-','/')
break
return terminationDate
| [
"nencho.georogiev@absa.africa"
] | nencho.georogiev@absa.africa |
63d50f46e6763c50b438c35733b409c516416606 | 33cff13b90fdd628560baef8b3f6d68ceaad912c | /tests/test_commands/test_package_downloads.py | e4b7b094ed22878a396f1c1e911369fd769b9165 | [
"MIT"
] | permissive | rosdyana/dephell | 3139140d6f16288177705020a625897f91f2514b | 993a212ce17dda04a878ceac64854d809f3dc47b | refs/heads/master | 2020-08-06T09:38:21.150070 | 2019-09-27T16:58:23 | 2019-09-27T16:58:23 | 212,927,181 | 0 | 0 | MIT | 2019-10-05T01:22:23 | 2019-10-05T01:22:23 | null | UTF-8 | Python | false | false | 708 | py | # built-in
import json
# external
import pytest
# project
from dephell.commands import PackageDownloadsCommand
from dephell.config import Config
@pytest.mark.skipif(True, reason='disable while pypistat is down')
@pytest.mark.allow_hosts()
def test_package_downloads_command(capsys):
config = Config()
config.attach({
'level': 'WARNING',
'silent': True,
})
command = PackageDownloadsCommand(argv=['DJANGO'], config=config)
result = command()
captured = capsys.readouterr()
output = json.loads(captured.out)
assert result is True
assert len(output['pythons']) > 4
assert len(output['systems']) > 2
assert '█' in output['pythons'][0]['chart']
| [
"master_fess@mail.ru"
] | master_fess@mail.ru |
b327266507aba7d35b343d48b9710ab0f36214ad | 0da165e72316bff15e8330a9d1789bd743d30689 | /quadcopter/agents/ddpg_v1/ddpg_actor.py | 83c47f1620e3d085e795d5e60e55546294fba7ba | [] | no_license | padickinson/RL-Quadcopter-2 | 1267ce761780ea5d79f705ca2286ae780b95298c | ebb70ac7f7252b69cd94111bbe0637a17196e87e | refs/heads/master | 2020-03-29T10:19:54.320868 | 2018-09-21T21:31:43 | 2018-09-21T21:31:43 | 149,799,738 | 0 | 0 | null | 2018-09-21T18:02:26 | 2018-09-21T18:02:26 | null | UTF-8 | Python | false | false | 2,945 | py | from keras import layers, models, optimizers
from keras import backend as K
class Actor:
"""Actor (Policy) Model."""
def __init__(self, state_size, action_size, action_low, action_high, lr, alpha):
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
action_low (array): Min value of each action dimension
action_high (array): Max value of each action dimension
"""
self.state_size = state_size
self.action_size = action_size
self.action_low = action_low
self.action_high = action_high
self.action_range = self.action_high - self.action_low
# Initialize any other variables here
self.learning_rate = lr
self.hidden_size1 = 32
self.hidden_size2 = 32
self.alpha = alpha
self.build_model()
def build_model(self):
"""Build an actor (policy) network that maps states -> actions."""
# Define input layer (states)
states = layers.Input(shape=(self.state_size,), name='states')
# Add hidden layers
net = layers.BatchNormalization()(states)
net = layers.Dense(units=self.hidden_size1, activation='linear')(net)
net = layers.advanced_activations.LeakyReLU(self.alpha)(net)
net = layers.BatchNormalization()(net)
net = layers.Dense(units=self.hidden_size2, activation='linear')(net)
net = layers.advanced_activations.LeakyReLU(self.alpha)(net)
net = layers.BatchNormalization()(net)
# net = layers.Dense(units=32, activation='relu')(net)
# net = layers.BatchNormalization()(net)
# Try different layer sizes, activations, add batch normalization, regularizers, etc.
# Add final output layer with sigmoid activation
raw_actions = layers.Dense(units=self.action_size, activation='tanh',
name='raw_actions')(net)
# Scale [0, 1] output for each action dimension to proper range
actions = layers.Lambda(lambda x: (x * self.action_range) + self.action_low,
name='actions')(raw_actions)
# Create Keras model
self.model = models.Model(inputs=states, outputs=actions)
# Define loss function using action value (Q value) gradients
action_gradients = layers.Input(shape=(self.action_size,))
loss = K.mean(-action_gradients * actions)
# Incorporate any additional losses here (e.g. from regularizers)
# Define optimizer and training function
optimizer = optimizers.Adam(lr=self.learning_rate)
updates_op = optimizer.get_updates(params=self.model.trainable_weights, loss=loss)
self.train_fn = K.function(
inputs=[self.model.input, action_gradients, K.learning_phase()],
outputs=[],
updates=updates_op)
| [
"padickinson@gmail.com"
] | padickinson@gmail.com |
4abe47ce110bd58ce4ebf9615f794bbff3b4f553 | ee2f57ffb3c0bec9a196090022a623a342a9ce96 | /PythonApp/FlaskWebProject1/runserver.py | dbe79fd5e33d3bd051c2ce33ad1fd4476e00178c | [] | no_license | cherho0/pythonApp | c313f2b2869530a79b0cba26d68e2c61df5d5ad1 | 9c7c6fb851358bc85956e9c512ba23b80d8cc3b3 | refs/heads/master | 2021-01-20T03:54:37.721785 | 2017-09-07T03:49:49 | 2017-09-07T03:49:49 | 101,373,987 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 349 | py | """
This script runs the FlaskWebProject1 application using a development server.
"""
from os import environ
from FlaskWebProject1 import app
import socket
if __name__ == '__main__':
#HOST = environ.get('SERVER_HOST', 'localhost')
hostname = socket.gethostname()
ip = socket.gethostbyname(hostname)
print(ip)
app.run(ip, 5555)
| [
"czy11807@ly.com"
] | czy11807@ly.com |
5aa9f68fd54dbb2103720c1b33fda1491da44482 | 436743a9a77d417350e27736dd20f117bb8d625a | /desidlas/preprocess/preprocess.py | d8fe688a2564fb58b2aa680d07f00882c2a2c179 | [] | no_license | samwang141224/dla_cnn | b0a017030e6007016fa9b889890fc6057fe9c3d2 | 402f675b73d7c449aa67dbe969f3ad3ab3ea3951 | refs/heads/main | 2023-03-26T23:26:32.481646 | 2021-03-31T07:48:39 | 2021-03-31T07:48:39 | 309,691,930 | 0 | 0 | null | 2021-03-31T07:48:39 | 2020-11-03T13:17:42 | Jupyter Notebook | UTF-8 | Python | false | false | 17,675 | py | """ Code for pre-processing DESI data"""
''' Basic Recipe
0. Load the DESI mock spectrum
1. Resample to a constant dlambda/lambda dispersion
2. Renomalize the flux?
3. Generate a Sightline object with DLAs
4. Add labels
5. Write to disk (numpy or TF)
'''
import numpy as np
from desidlas.dla_cnn.spectra_utils import get_lam_data
#from dla_cnn.data_model.DataMarker import Marker
#no Marker in the DESI mock spectra, but maybe the sv data will have this value
from scipy.interpolate import interp1d
from os.path import join, exists
from os import remove
import csv
# Set defined items
from desidlas.dla_cnn import defs
REST_RANGE = defs.REST_RANGE
kernel = defs.kernel
def label_sightline(sightline, kernel=kernel, REST_RANGE=REST_RANGE, pos_sample_kernel_percent=0.3):
"""
Add labels to input sightline based on the DLAs along that sightline
Parameters
----------
sightline: dla_cnn.data_model.Sightline
pos_sample_kernel_percent: float
kernel: pixel numbers for each spectra window
REST_RANGE: [900,1346], wavelength range of DLAs in the rest frame
Returns
-------
classification: np.ndarray
is 1 / 0 / -1 for DLA/nonDLA/border
offsets_array: np.ndarray
offset
column_density: np.ndarray
"""
lam, lam_rest, ix_dla_range = get_lam_data(sightline.loglam, sightline.z_qso, REST_RANGE)
samplerangepx = int(kernel*pos_sample_kernel_percent/2) #60
#kernelrangepx = int(kernel/2) #200
ix_dlas=[]
coldensity_dlas=[]
for dla in sightline.dlas:
if (912<(dla.central_wavelength/(1+sightline.z_qso))<1220)&(dla.central_wavelength>=3700):
ix_dlas.append(np.abs(lam[ix_dla_range]-dla.central_wavelength).argmin())
coldensity_dlas.append(dla.col_density) # column densities matching ix_dlas
'''
# FLUXES - Produce a 1748x400 matrix of flux values
fluxes_matrix = np.vstack(map(lambda f,r:f[r-kernelrangepx:r+kernelrangepx],
zip(itertools.repeat(sightline.flux), np.nonzero(ix_dla_range)[0])))
'''
# CLASSIFICATION (1 = positive sample, 0 = negative sample, -1 = border sample not used
# Start with all samples zero
classification = np.zeros((np.sum(ix_dla_range)), dtype=np.float32)
# overlay samples that are too close to a known DLA, write these for all DLAs before overlaying positive sample 1's
for ix_dla in ix_dlas:
classification[ix_dla-samplerangepx*2:ix_dla+samplerangepx*2+1] = -1
# Mark out Ly-B areas
lyb_ix = sightline.get_lyb_index(ix_dla)
classification[lyb_ix-samplerangepx:lyb_ix+samplerangepx+1] = -1
# mark out bad samples from custom defined markers
#for marker in sightline.data_markers:
#assert marker.marker_type == Marker.IGNORE_FEATURE # we assume there are no other marker types for now
#ixloc = np.abs(lam_rest - marker.lam_rest_location).argmin()
#classification[ixloc-samplerangepx:ixloc+samplerangepx+1] = -1
# overlay samples that are positive
for ix_dla in ix_dlas:
classification[ix_dla-samplerangepx:ix_dla+samplerangepx+1] = 1
# OFFSETS & COLUMN DENSITY
offsets_array = np.full([np.sum(ix_dla_range)], np.nan, dtype=np.float32) # Start all NaN markers
column_density = np.full([np.sum(ix_dla_range)], np.nan, dtype=np.float32)
# Add DLAs, this loop will work from the DLA outward updating the offset values and not update it
# if it would overwrite something set by another nearby DLA
for i in range(int(samplerangepx+1)):
for ix_dla,j in zip(ix_dlas,range(len(ix_dlas))):
offsets_array[ix_dla+i] = -i if np.isnan(offsets_array[ix_dla+i]) else offsets_array[ix_dla+i]
offsets_array[ix_dla-i] = i if np.isnan(offsets_array[ix_dla-i]) else offsets_array[ix_dla-i]
column_density[ix_dla+i] = coldensity_dlas[j] if np.isnan(column_density[ix_dla+i]) else column_density[ix_dla+i]
column_density[ix_dla-i] = coldensity_dlas[j] if np.isnan(column_density[ix_dla-i]) else column_density[ix_dla-i]
offsets_array = np.nan_to_num(offsets_array)
column_density = np.nan_to_num(column_density)
# Append these to the Sightline
sightline.classification = classification
sightline.offsets = offsets_array
sightline.column_density = column_density
# classification is 1 / 0 / -1 for DLA/nonDLA/border
# offsets_array is offset
return classification, offsets_array, column_density
def rebin(sightline, v):
"""
Resample and rebin the input Sightline object's data to a constant dlambda/lambda dispersion.
Parameters
----------
sightline: :class:`dla_cnn.data_model.Sightline.Sightline`
v: float, and np.log(1+v/c) is dlambda/lambda, its unit is m/s, c is the velocity of light
Returns
-------
:class:`dla_cnn.data_model.Sightline.Sightline`:
"""
# TODO -- Add inline comments
c = 2.9979246e8
# Set a constant dispersion
dlnlambda = np.log(1+v/c)
wavelength = 10**sightline.loglam #the wavelength range
max_wavelength = wavelength[-1]
min_wavelength = wavelength[0]
# Calculate how many pixels are needed for Rebinning in this spectra
pixels_number = int(np.round(np.log(max_wavelength/min_wavelength)/dlnlambda))+1 #how many pixels in this spectra
# Rebined wavelength
new_wavelength = wavelength[0]*np.exp(dlnlambda*np.arange(pixels_number))
# Endpoints of original pixels
npix = len(wavelength)
wvh = (wavelength + np.roll(wavelength, -1)) / 2.
wvh[npix - 1] = wavelength[npix - 1] + \
(wavelength[npix - 1] - wavelength[npix - 2]) / 2.
dwv = wvh - np.roll(wvh, 1)
dwv[0] = 2 * (wvh[0] - wavelength[0])
med_dwv = np.median(dwv)
# Cumulative Sum
cumsum = np.cumsum(sightline.flux * dwv)
cumvar = np.cumsum(sightline.error * dwv, dtype=np.float64)
# Interpolate
fcum = interp1d(wvh, cumsum,bounds_error=False)
fvar = interp1d(wvh, cumvar,bounds_error=False)
# Endpoints of new pixels
nnew = len(new_wavelength)
nwvh = (new_wavelength + np.roll(new_wavelength, -1)) / 2.
nwvh[nnew - 1] = new_wavelength[nnew - 1] + \
(new_wavelength[nnew - 1] - new_wavelength[nnew - 2]) / 2.
# Pad starting point
bwv = np.zeros(nnew + 1)
bwv[0] = new_wavelength[0] - (new_wavelength[1] - new_wavelength[0]) / 2.
bwv[1:] = nwvh
# Evaluate
newcum = fcum(bwv)
newvar = fvar(bwv)
# Rebinned flux, var
new_fx = (np.roll(newcum, -1) - newcum)[:-1]
new_var = (np.roll(newvar, -1) - newvar)[:-1]
# Normalize (preserve counts and flambda)
new_dwv = bwv - np.roll(bwv, 1)
new_fx = new_fx / new_dwv[1:]
# Preserve S/N (crudely)
med_newdwv = np.median(new_dwv)
new_var = new_var / (med_newdwv/med_dwv) / new_dwv[1:]
left = 0
while np.isnan(new_fx[left])|np.isnan(new_var[left]):
left = left+1
right = len(new_fx)
while np.isnan(new_fx[right-1])|np.isnan(new_var[right-1]):
right = right-1
test = np.sum((np.isnan(new_fx[left:right]))|(np.isnan(new_var[left:right])))
assert test==0, 'Missing value in this spectra!'
sightline.loglam = np.log10(new_wavelength[left:right])
sightline.flux = new_fx[left:right]
sightline.error = new_var[left:right]
return sightline
def normalize(sightline, full_wavelength, full_flux):
"""
Normalize spectrum by dividing the mean value of continnum at lambda[left,right]
------------------------------------------
parameters:
sightline: dla_cnn.data_model.Sightline.Sightline object;
full_flux: list, flux of the spectra
full_wavelength: list,wavelength of the spectra
--------------------------------------------
return
sightline: the sightline after normalized
"""
blue_limit = 1420
red_limit = 1480
rest_wavelength = full_wavelength/(sightline.z_qso+1)
assert blue_limit <= red_limit,"No Lymann-alpha forest, Please check this spectra: %i"%sightline.id#when no lymann alpha forest exists, assert error.
#use the slice we chose above to normalize this spectra, normalize both flux and error array using the same factor to maintain the s/n.
good_pix = (rest_wavelength>=blue_limit)&(rest_wavelength<=red_limit)
sightline.flux = sightline.flux/np.median(full_flux[good_pix])
sightline.error = sightline.error/np.median(full_flux[good_pix])
def estimate_s2n(sightline):
"""
Estimate the s/n of a given sightline, using the lymann forest part and excluding dlas.
-------------------------------------------------------------------------------------
parameters;
sightline: class:`dla_cnn.data_model.sightline.Sightline` object, we use it to estimate the s/n,
and since we use the lymann forest part, the sightline's wavelength range should contain 1070~1170
--------------------------------------------------------------------------------------
return:
s/n : float, the s/n of the given sightline.
"""
#determine the lymann forest part of this sightline
blue_limit = 1420
red_limit = 1480
wavelength = 10**sightline.loglam
rest_wavelength = wavelength/(sightline.z_qso+1)
#lymann forest part of this sightline, contain dlas
test = (rest_wavelength>blue_limit)&(rest_wavelength<red_limit)
#when excluding the part of dla, we remove the part between central_wavelength+-delta
#dwv = rest_wavelength[1]-rest_wavelength[0]#because we may change the re-sampling of the spectra, this need to be calculated.
#dv = dwv/rest_wavelength[0] * 3e5 # km/s
#delta = int(np.round(3000./dv))
#for dla in sightline.dlas:
#test = test&((wavelength>dla.central_wavelength+delta)|(wavelength<dla.central_wavelength-delta))
#assert np.sum(test)>0, "this sightline doesn't contain lymann forest, sightline id: %i"%sightline.id
s2n = sightline.flux/sightline.error
#return s/n
return np.median(s2n[test])
def generate_summary_table(sightlines, output_dir, mode = "w"):
"""
Generate a csv file to store some necessary information of the given sightlines. The necessary information means the id, z_qso,
s/n of thelymann forest part(avoid dlas+- 3000km/s), the wavelength range and corresponding pixel number of each channel.And the csv file's format is like:
id(int), z_qso(float), s2n(float), wavelength_start_b(float), wavelength_end_b(float),pixel_start_b(int), pixel_end_b(int), wavelength_start_r(float), wavelength_end_r(float),pixel_start_r(int), pixel_end_r(int), wavelength_start_z(float), wavelength_end_z(float),pixel_start_z(int), pixel_end_z(int),dlas_col_density(str),dlas_central_wavelength(str)
"wavelength_start_b" means the start wavelength value of b channel, "wavelength_end_b" means the end wavelength value of b channel, "pixel_start_b" means the start pixel number of b channel,"pixel_end_b" means the end pixel number of b channel
so do the other two channels.Besides, "dlas_col_density" means the col_density array of the sightline, and "dlas_central_wavelength" means the central wavelength array means the central wavelength array of the given sightline. Due to the uncertainty of the dlas' number, we chose to use str format to store the two arrays,
each array is written in the format like "value1,value2, value3", and one can use `str.split(",")` to get the data, the column density and central wavelength which have the same index in the two arrayscorrspond to the same dla.
------------------------------------------------------------------------------------------------------------------------------------------------------------------
parameters:
sightlines: list of `dla_cnn.data_model.Sightline.Sightline` object, the sightline contained should
contain the all data of b,r,z channel, and shouldn't be rebinned,
output_dir: str, where the output csv file is stored, its format should be "xxxx.csv",
mode: str, possible values "w", "a", "w" means writing to the csv file directly(overwrite the
previous content), "a" means adding more data to the csv file(remaining the previous content)
-------------------------------------------------------------------------------------------------------------------------------------------------------------------
return:
None
"""
#the header of the summary table, each element's meaning can refer to above comment
headers = ["id","z_qso","s2n","wavelength_start_b","wavelength_end_b","pixel_start_b","pixel_end_b","wavelength_start_r","wavelength_end_r","pixel_start_r","pixel_end_r","wavelength_start_z","wavelength_end_z","pixel_start_z","pixel_end_z","dlas_col_density","dlas_central_wavelength"]
#open the csv file
with open(output_dir, mode=mode,newline="") as summary_table:
summary_table_writer = csv.DictWriter(summary_table,headers)
if mode == "w":
summary_table_writer.writeheader()
for sightline in sightlines:
#for each sightline, read its information and write to the csv file
info = {"id":sightline.id, "z_qso":sightline.z_qso, "s2n": sightline.s2n,"wavelength_start_b":10**sightline.loglam[0],
"wavelength_end_b":10**sightline.loglam[sightline.split_point_br-1],"pixel_start_b":0,"pixel_end_b":sightline.split_point_br-1,
"wavelength_start_r":10**sightline.loglam[sightline.split_point_br],"wavelength_end_r":10**sightline.loglam[sightline.split_point_rz-1],
"pixel_start_r":sightline.split_point_br,"pixel_end_r":sightline.split_point_rz-1,"wavelength_start_z":10**sightline.loglam[sightline.split_point_rz],
"wavelength_end_z":10**sightline.loglam[-1],"pixel_start_z":sightline.split_point_rz,"pixel_end_z":len(sightline.loglam)-1}
dlas_col_density = ""
dlas_central_wavelength = ""
for dla in sightline.dlas:
dlas_col_density += str(dla.col_density)+","
dlas_central_wavelength += str(dla.central_wavelength)+","
info["dlas_col_density"] = dlas_col_density[:-1]
info["dlas_central_wavelength"] = dlas_central_wavelength[:-1]
#write to the csv file
summary_table_writer.writerow(info)
#from dla_cnn.desi.DesiMock import DesiMock
def write_summary_table(nums, version,path, output_path):
"""
Directly read data from fits files and write the summary table, the summary table contains all available sightlines(dlas!=[] and z_qso>2.33) in the given fits files.
-----------------------------------------------------------------------------------------------------------------------------------------
parameters:
nums: list, the given fits files' id, its elements' format is int, and one should make sure all fits files are available before invoking this funciton, otherwise some sightlines can be missed;
version: int, the version of the data set we use, e.g. if the version is v9.16, then version = 16
path: str, the dir of the folder which stores the given fits file, the folder's structure is like folder-fits files' id - fits files , if you are still confused, you can check the below code about read data from the fits file;
output_path: str, the dir where the summary table is generated, and if there have been a summary table, then we will remove it and generate a new summary table;
------------------------------------------------------------------------------------------------------------------------------------------
"""
#if exists summary table before, remove it
#if exists(output_path):
#remove(output_path)
def write_as_summary_table(num):
"""
write summary table for a single given fits file, if there have been a summary table then directly write after it, otherwise create a new one
---------------------------------------------------------------------------------------------------------------------------------------------
parameter:
num: int, the id of the given fits file, e.g. 700
---------------------------------------------------------------------------------------------------------------------------------------------
"""
#read data from fits file
file_path = join(path,str(num))
spectra = join(file_path,"spectra-%i-%i.fits"%(version,num))
truth = join(file_path,"truth-%i-%i.fits"%(version,num))
zbest = join(file_path,"zbest-%i-%i.fits"%(version,num))
spec = DesiMock()
spec.read_fits_file(spectra,truth,zbest)
sightlines = []
bad_sightlines = []
for key in spec.data.keys():
if spec.data[key]["z_qso"]>2.33 and spec.data[key]["DLAS"]!=[]:
sightlines.append(spec.get_sightline(key))
#generate summary table
if exists(output_path):
generate_summary_table(sightlines,output_path,"a")
else:
generate_summary_table(sightlines,output_path,"w")
bad_files = [] #store the fits files with problems
#for each id in nums, invoking the `write_as_summary_table` funciton
for num in nums:
try:
write_as_summary_table(num)
except:
#if have problems append to the bad_files
bad_files.append(num)
assert bad_files==[], "these fits files have some problems, check them please, fits files' id :%s"%str(bad_files)
| [
"noreply@github.com"
] | noreply@github.com |
2976ce29416e292cb0e119e7f3705c0bdd786ad7 | bad59b62baf06c5a110dbf96ee9653a69b8ca4df | /soldjango/soldjango/settings.py | 1daa5ebe5b7896e0a20e718dac4263b34ae127d4 | [] | no_license | 45jihoon/Leon | 848db934898ef9fc838154919c254ae5f7fcb684 | 69d024469a518a1b40e450e3408291671b183718 | refs/heads/main | 2023-01-13T10:55:11.184173 | 2020-11-18T01:47:33 | 2020-11-18T01:47:33 | 313,471,525 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,122 | py | """
Django settings for soldjango project.
Generated by 'django-admin startproject' using Django 1.11.29.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '%i8ro%=@i@jttfy1hxsblzafv7&v5uj@9dc%4g5^@v-2xnw=j&'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'assembly',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'soldjango.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'soldjango.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
| [
"45jihoon@naver.com"
] | 45jihoon@naver.com |
ad21dddcaff52dd22e77f283ff4e11ab18a76100 | b8d0b260960e1c43b883049d68c15a7183df200b | /5_py_blog/blog_app/tests.py | ebafc4198267b4929abd66e68f76098e08839139 | [] | no_license | JAreina/python-django | 59ac92d0694522c1d096bed636409d9405c5caba | 66c7c301dec448217df6516198723e1ce987eab7 | refs/heads/master | 2020-03-27T18:34:59.821701 | 2018-09-07T07:49:35 | 2018-09-07T07:49:35 | 146,931,202 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,739 | py |
# Create your tests here.
from django.contrib.auth import get_user_model
from django.test import Client, TestCase
from django.urls import reverse
from .models import Post
class BlogTests(TestCase):
def setUp(self):
self.user = get_user_model().objects.create_user(
username='testuser',
email='ja@gmail.com',
password='xxxxxx'
)
self.post = Post.objects.create(
titulo='A good titulo',
texto='Nice texto content',
autor=self.user,
)
def test_string_representation(self):
post = Post(titulo='A sample titulo')
self.assertEqual(str(post), post.titulo)
def test_post_content(self):
self.assertEqual(f'{self.post.titulo}', 'A good titulo')
self.assertEqual(f'{self.post.autor}', 'testuser')
self.assertEqual(f'{self.post.texto}', 'Nice texto content')
self.assertEqual(f'{self.post.titulo}', 'A good titulo')
self.assertEqual(f'{self.post.autor}', 'testuser')
self.assertEqual(f'{self.post.texto}', 'Nice texto content')
def test_post_list_view(self):
response = self.client.get(reverse('home'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'Nice texto content')
self.assertTemplateUsed(response, 'home.html')
def test_post_detail_view(self):
response = self.client.get('/post/1/')
no_response = self.client.get('/post/100000/')
self.assertEqual(response.status_code, 200)
self.assertEqual(no_response.status_code, 404)
self.assertContains(response, 'A good titulo')
self.assertTemplateUsed(response, 'post_detalle.html')
| [
"jareinafdez@gmail.com"
] | jareinafdez@gmail.com |
90ebb27f00615a63b07c8ff1cd495f77293c88ea | 8f784ca91cd56818dc6e38d5e602756a913e13b4 | /modbus_tcp_server/network/accept_thread.py | a512980848dd5a91ed2ce730cf546634df5968c6 | [
"MIT"
] | permissive | smok-serwis/modbus-tcp-server | 9a02a3c5e9d0875179903bc4171b4d782d6d48b9 | 558eca908b6762280a74b16d78d56dc047a9dace | refs/heads/master | 2023-06-14T01:26:07.299860 | 2021-07-15T13:59:15 | 2021-07-15T13:59:15 | 339,780,750 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,383 | py | import socket
import typing as tp
from satella.coding import silence_excs
from satella.coding.concurrent import TerminableThread
from .conn_thread import ConnectionThread
from ..data_source import BaseDataSource, TestingDataSource
from ..datagrams import MODBUSTCPMessage
from ..processor import ModbusProcessor
class ModbusTCPServer(TerminableThread):
def __init__(self, bind_ifc: str, bind_port: int,
data_source: tp.Optional[BaseDataSource] = None,
backlog: int = 128):
super().__init__(name='accept')
if data_source is None:
data_source = TestingDataSource()
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind((bind_ifc, bind_port))
self.backlog = backlog
self.processor = ModbusProcessor(data_source)
def prepare(self) -> None:
self.socket.listen(self.backlog)
self.socket.setblocking(True)
self.socket.settimeout(5)
def process_message(self, msg: MODBUSTCPMessage) -> MODBUSTCPMessage:
return self.processor.process(msg)
def cleanup(self):
self.socket.close()
@silence_excs(socket.timeout)
def loop(self) -> None:
sock, addr = self.socket.accept()
ConnectionThread(sock, addr, self).start()
| [
"piotr.maslanka@henrietta.com.pl"
] | piotr.maslanka@henrietta.com.pl |
7f3cd58d2ad66672684040b5b5587e8f52617096 | 1ae6034a53d60bee5c61208539cbb39143ec76e3 | /Motion detection game/ui.py | 5982fe204020dab34a2dd66c4d71613ee9d9a191 | [
"MIT"
] | permissive | harshmalik9423/Motion-Detection-Game | 2f60e77983d8dda746ffb4c7de907f6658dcb2fb | 49ad8c25360df34f4e33647dee7406e6397311de | refs/heads/master | 2022-08-23T12:52:48.089138 | 2020-05-25T10:56:48 | 2020-05-25T10:56:48 | 266,755,583 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,110 | py | from tkinter import *
import cv2
from tkinter import messagebox
import PIL.Image, PIL.ImageTk
window = Tk()
window.title("Welcome")
window.geometry('1500x1500')
########CODING FOR QUIT BUTTON MESSAGEBOX#########
def close_window():
MsgBox=messagebox.askquestion('Quit the game','Are you sure you want to quit the game?',icon='warning')
if MsgBox =='yes':
window.destroy()
else:
messagebox.showinfo('Return','You will now return to the game screen')
def close():
if txt1 == '':
messagebox.showinfo('Return','You will now return to the game screen')
else:
file=open("text1.txt","w")
a=txt1.get()
file.write(a)
file.close()
if txt2 == '':
messagebox.showinfo('Return','You will now return to the game screen')
else:
file=open("text2.txt","w")
b=txt2.get()
file.write(b)
file.close()
window.destroy()
#################CREATE A WINDOW##################
cv_img=cv2.imread("images.jpeg")
canvas=Canvas(window,width=1500,height=1500)
canvas.pack()
photo=PIL.ImageTk.PhotoImage(image=PIL.Image.fromarray(cv_img))
canvas.create_image(0,0,image=photo,anchor=NW)
##########LABEL AND TEXTBOX FOR PLAYER 1##########
photo2=PhotoImage(file="player1.gif")
lbl1 = Label(window, compound=TOP, width=284, height=58, image=photo2)
lbl1.place(x=200,y=180)
txt1 = Entry(window,font=("Bold",20),bd=5,bg='light green')
txt1.place(x=600,y=185)
##########LABEL AND TEXTBOX FOR PLAYER 2##########
photo3=PhotoImage(file="player2.gif")
lbl2 = Label(window, compound=TOP, width=292, height=60, image=photo3)
lbl2.place(x=200,y=280)
txt2 = Entry(window,font=("Bold",20),bd=5,bg='light green')
txt2.place(x=600,y=285)
##############READY AND QUIT BUTTONS##############
btn2=Button(window,text="Ready",font=("Bold",20),height=2,width=20,fg='green',bg='black',bd=10,command=close)
btn2.place(x=400,y=500)
btn3=Button(window,text="Quit",font=("Bold",15),height=1,width=8,fg='black',bg='gray',bd=5,command=close_window)
btn3.place(x=1225,y=50)
###################MAIN LOOP######################
window.mainloop()
| [
"harshmalik2013@gmail.com"
] | harshmalik2013@gmail.com |
ac24192eb309aac4b2982be8519bf5c2b06906ea | 1326612e90a772bd6acbf0c9532cbcaefdec9f48 | /BiblioPixelAnimations/strip/Searchlights.py | 5bed4eba37d24cfa5a9789fec768858fa29d0c92 | [
"MIT"
] | permissive | russp81/BiblioPixelAnimations | cc2ad721ed60435950282ce09d95191bf09b1eb3 | 4184ace37200861cc721e5bd7a43014bd0bbcadf | refs/heads/master | 2021-01-17T21:01:22.832174 | 2016-09-16T06:43:06 | 2016-09-16T06:43:06 | 68,341,732 | 1 | 0 | null | 2016-09-16T00:23:29 | 2016-09-16T00:23:29 | null | UTF-8 | Python | false | false | 3,267 | py | from bibliopixel import LEDStrip
import bibliopixel.colors as colors
from bibliopixel.animation import BaseStripAnim
import random
class Searchlights(BaseStripAnim):
"""Three search lights sweeping at different speeds"""
def __init__(self, led, colors=[colors.MediumSeaGreen,colors.MediumPurple,colors.MediumVioletRed], tail=5, start=0, end=-1):
super(Searchlights, self).__init__(led, start, end)
self._color = colors
self._tail = tail + 1
if self._tail >= self._size / 2:
self._tail = (self._size / 2) - 1
self._direction = [1,1,1]
self._currentpos = [0,0,0]
self._steps = [1,1,1]
self._fadeAmt = 256 / self._tail
def step(self, amt = 1):
self._ledcolors = [(0,0,0) for i in range(self._size)]
self._led.all_off()
for i in range(0,3):
self._currentpos[i] = self._start + self._steps[i]
#average the colors together so they blend
self._ledcolors[self._currentpos[i]] = map(lambda x,y: (x + y)/2, self._color[i], self._ledcolors[self._currentpos[i]])
for j in range(1,self._tail):
if self._currentpos[i] - j >= 0:
self._ledcolors[self._currentpos[i] - j] = map(lambda x,y: (x + y)/2, self._ledcolors[self._currentpos[i] - j], colors.color_scale(self._color[i], 255 - (self._fadeAmt * j)))
if self._currentpos[i] + j < self._size:
self._ledcolors[self._currentpos[i] + j] = map(lambda x,y: (x + y)/2, self._ledcolors[self._currentpos[i] + j], colors.color_scale(self._color[i], 255 - (self._fadeAmt * j)))
if self._start + self._steps[i] >= self._end:
self._direction[i] = -1
elif self._start + self._steps[i] <= 0:
self._direction[i] = 1
# advance each searchlight at a slightly different speed
self._steps[i] += self._direction[i] * amt * int(random.random() > (i*0.05))
for i,thiscolor in enumerate(self._ledcolors):
self._led.set(i, thiscolor)
MANIFEST = [
{
"class": Searchlights,
"controller": "strip",
"desc": "Three search lights sweeping at different speeds",
"display": "Searchlights",
"id": "Searchlights",
"params": [
{
"default": -1,
"help": "Ending pixel (-1 for entire strip)",
"id": "end",
"label": "End",
"type": "int"
},
{
"default": 0,
"help": "Starting pixel",
"id": "start",
"label": "Start",
"type": "int"
},
{
"default": 5,
"help": "Length of the faded pixels at the start and end.",
"id": "tail",
"label": "Tail Length",
"type": "int"
},
{
"default": [colors.MediumSeaGreen,colors.MediumPurple,colors.MediumVioletRed],
"help": "",
"id": "colors",
"label": "Colors",
"type": "colors"
}
],
"type": "animation"
}
]
| [
"bonnie.barrilleaux@gmail.com"
] | bonnie.barrilleaux@gmail.com |
38834c15b947bd3f1e865675bfc8c866c246b9e8 | d4f4293505926285a3449bd7aba29fb4fe07a51c | /wangyi/wangyi/settings.py | c4abd898d64cb8001578cd9120598fc255472092 | [] | no_license | fugui12345/- | 96078509e798e7b55af5632dfdf37969c727894c | ffb9f7d6295cd246d990ea35f28c2fce5035af19 | refs/heads/main | 2023-07-01T06:36:32.951094 | 2021-08-04T18:48:15 | 2021-08-04T18:48:15 | 392,793,829 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,147 | py | # Scrapy settings for wangyi project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://docs.scrapy.org/en/latest/topics/settings.html
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'wangyi'
SPIDER_MODULES = ['wangyi.spiders']
NEWSPIDER_MODULE = 'wangyi.spiders'
LOG_LEVEL = 'ERROR'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.77 Safari/537.36'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'wangyi.middlewares.WangyiSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
DOWNLOADER_MIDDLEWARES = {
'wangyi.middlewares.WangyiDownloaderMiddleware': 543,
}
# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'wangyi.pipelines.WangyiPipeline': 300,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| [
"noreply@github.com"
] | noreply@github.com |
c8d42762f1ba5f26bd5112890d47ecaef5bed717 | d992f4973bb2aa3c43dce5d4b3483c6212108c60 | /hw1/MachineConstants.py | d31a5e92fb49edff71974ce9b0112e2e8b76759d | [] | no_license | wesshih/astr427 | ece74e5e3ac7275ed50ba6613ed6980a2da6cc8a | b6dc28a02b0712f8e40a5d9ca5e371a660c070f2 | refs/heads/master | 2021-01-18T15:45:50.093209 | 2017-06-08T05:30:08 | 2017-06-08T05:30:08 | 86,681,974 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,939 | py | import numpy as np
'''
Wesley Shih
1237017
Astr 427 Homework 1
4/11/17
Problem 1: Machine Constants
The first problem of the homework asks us to empirically determine several
machine constants related to floating point numbers. These include the smallest number
epsilon that can be successfully added to or subracted from 1.0, and the maximum and minimum
positive numbers that can be represented using the float data type. We will be using 64-bit floats
for all parts of this problem.
This file contains code that calculates the machine constants and prints them to the console.
Becasue these values are fairly easy to calculate, this file will not contain any user-defined
functions or data structures. For each part I will simply calculate the constant, print it to
the console, and comment on how it relates to the IEEE 754 representation.
A quick note on float representation that is relative to the whole problem. For a 64-bit
float, there is 1 bit for sign, 11 bits for the exponent, and 52 bits for the Significand or fraction.
However, there is an implied 1 at the beginning of the significand, so we effectively have 53 bits
available for the fraction.
'''
# Part A
# We are looking for the smallest value that can be successfully subtracted from 1.0
# or in other words, find smallest epsilon such that 1.0 - epsilon != 1.0
epsilon_a = 1.0
while (1.0 - epsilon_a/2.0) != 1.0:
epsilon_a /= 2.0
print 'A) smallest epsilon s.t. 1.0 - epsilon != 1.0'
print '\t\tepsilon_a:\t' + `epsilon_a` + '\n'
# Running this code gives us a value of epsilon_a = 1.1102230246251565e-16
# This value is within an order of 2 of the true value of epsilon, as we know that
# 1.0 - (epsilon_a/2) == 1.0. Given the 53 bits for the significand, we expect
# the true machine epsilon to be 2^-(53 - 1). However, 2^-52 = 2.22e-16 which is essentially
# double the value of epsilon_a.
# Part B
# We are looking for the smallest value that can be successfully added from 1.0
# or formally, find smallest epsilon such that 1.0 + epsilon != 1.0
epsilon_b = 1.0
while (1.0 + epsilon_b/2.0) != 1.0:
epsilon_b /= 2.0
print 'B) smallest epsilon s.t. 1.0 + epsilon != 1.0'
print '\t\tepsilon_b:\t' + `epsilon_b` + '\n'
# Running this code gives us a value of epsilon_b = 2.220446049250313e-16
# This value agrees very nicely with the "expected" epsilon I calculated above.
# 2^-52 = 2.22e-16, which is very close to the calculated value.
# Part C
# We are looking for the maximum number that can be represented with a float
max_num = 1.0
while (max_num * 2.0) != np.inf:
max_num *= 2.0
print 'C) maximum representable number'
print '\t\tmax_num:\t' + `max_num` + '\n'
# Running this code gives us a max_num = 8.98846567431158e+307
# We know that this value is at least within an order of magnitude of the true max_num
# because we know that max_num * 2.0 == infinity representation.
# We have 11 bits total for the exponent, however these bits follow twos-compliment.
# This means we only have 10 bits available for positive exponents. So the maximum
# positive exponent is 1023. We find that 2^1023 = 8.9884e+307, which is exactly what
# we have found here. the true maximum number will be greater than this though, as we
# can increase the significand to push the max_num higher.
# Part D
# We are looking for the minimum representable positive number
min_num = 1.0
while (min_num/2) > 0:
min_num /= 2
print 'D) minimum representable number'
print '\t\tmin_num:\t' + `min_num` + '\n'
# Running this code gives us a min_num = 5e-324
# Like with max_num, to find the minimum number, we will look at the 11 exponent bits.
# However, this time we are able to use the MSB, and so we can achieve an exponent of -1024
# 2^-1024 = 5.56e-309. Using the exponent alone is not enough to get 5e-324. To do this,
# we must denormalize the float, changing the implied 1.f to a 0.f. This will get us the
# rest of the way there to 5e-324.
| [
"wesshih@uw.edu"
] | wesshih@uw.edu |
caa1a4bff741d96aeec1525edabd69b711687a41 | 4b5b168ab84b4d40d7e7cc58ebc806287e0a8d82 | /tricky.py | 714693cb19a608edcafbec9ebb2a41ea15f87da0 | [] | no_license | gould-ann/challenges- | 5de5f14527f2ee0147514d364f193d78cec6a113 | 4784eb6c3caf494ee24e0e8c587ce626fc026fdd | refs/heads/master | 2020-03-18T18:32:35.402677 | 2018-05-28T03:57:15 | 2018-05-28T03:57:15 | 135,099,855 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 650 | py | #https://www.hackerrank.com/challenges/maximum-subarray-sum/problem
import math #lol
# arr = [1, 2, 3]
# mod_number = 2
# max([sum( [arr[j] if str(bin(i))[2:].zfill(len(arr))[j] == "1" else 0 for j in range(len(arr))] ) % mod_number for i in range(2**len(arr))])
def max_subarray(arr, mod_number):
best_sum = 0
for i in range(2**len(arr)):
current_array = []
bin_str = str(bin(i))[2:].zfill(len(arr))
current_array = [arr[j] if bin_str[j] == "1" else 0 for j in range(len(arr))]
best_sum = max(sum(current_array) % mod_number, best_sum)
return best_sum
#this line
print max_subarray([3, 3, 9, 9, 5], 7)
| [
"anngould@iastate.edu"
] | anngould@iastate.edu |
4bccb67d59f443ca9b1876575c634bd2741ec643 | 0a3b70128f4de8ba3dc74decea6d349924d31907 | /mysite/bin/easy_install-2.7 | 9f3daab08a7378e652e37d5ba6778ad0a331647e | [] | no_license | ozknight/Django_Tutorials_OJT_Proof | 80c38b278b42f2ca72bd05206f84a359b780daeb | 8006ed69be3bae7e666e49c62f517fbd2f454420 | refs/heads/master | 2016-09-06T16:13:58.746020 | 2015-06-24T23:53:40 | 2015-06-24T23:53:40 | 37,992,132 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 291 | 7 | #!/home/oz-knightwalker/Desktop/Django_Tutorials_OJT_Proof/mysite/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"ozknightwalker@yahoo.com"
] | ozknightwalker@yahoo.com |
b1efe20d5ba4c2a9c279544113a1e2bd6cdf7018 | 2432996ac1615cd36d61f0feeff8a359d2b438d8 | /env/lib/python3.8/site-packages/_pyinstaller_hooks_contrib/hooks/stdhooks/hook-eth_hash.py | 1b22c286fe3f7300f269b0ec19044cd2c28cc11a | [
"GPL-1.0-or-later",
"GPL-2.0-or-later",
"GPL-2.0-only",
"Apache-2.0"
] | permissive | Parveshdhull/AutoTyper | dd65d53ece7c13fbc1ead7ce372947483e05e2e3 | 7fabb30e15b770d790b69c2e4eaf9bbf5a4d180c | refs/heads/main | 2023-05-08T14:10:35.404160 | 2023-05-07T20:43:15 | 2023-05-07T20:43:15 | 315,415,751 | 26 | 18 | Apache-2.0 | 2023-05-07T20:43:16 | 2020-11-23T19:13:05 | Python | UTF-8 | Python | false | false | 611 | py | # ------------------------------------------------------------------
# Copyright (c) 2020 PyInstaller Development Team.
#
# This file is distributed under the terms of the GNU General Public
# License (version 2.0 or later).
#
# The full license is available in LICENSE.GPL.txt, distributed with
# this software.
#
# SPDX-License-Identifier: GPL-2.0-or-later
# ------------------------------------------------------------------
from PyInstaller.utils.hooks import collect_submodules
# The ``eth_hash.utils.load_backend`` function does a dynamic import.
hiddenimports = collect_submodules('eth_hash.backends')
| [
"parvesh.dhullmonu@gmail.com"
] | parvesh.dhullmonu@gmail.com |
4c8db1dea09dd07cd75539c214a17c0ac7ae5b64 | 1738a24cc31c3e659384d9c3cafaf9d15b1eac52 | /ChatBot.py | e9bde94ee7ef2562b2d6bcedd554077b56e4a139 | [] | no_license | michaelcicero/PythonChatBot | 7e7bb54e6009b80698b898e468a1dc375019b6fc | e0f8d47e42b05e5823880d1c21534d9560050072 | refs/heads/master | 2022-12-09T20:07:04.335082 | 2020-09-09T20:55:24 | 2020-09-09T20:55:24 | 294,226,744 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,946 | py | # # - * - coding: utf-8 - * -
""" Eliza homework """
_author_="Michael Cicero"
import re
#find bye function
def findBye(a):
if(re.findall(r'bye', curInp, re.IGNORECASE)):
print("It was nice speaking with you. Goodbye")
quit()
#find ed function
def findEd(b):
l = b.split(" ")
for ed in l:
if(re.findall('ed$', ed)):
return ed
#get name
print("Greetings. I'm chatbot. Can you remind me of your name?")
name = input()
curInp = name
findBye(curInp)
if(re.findall(r'(i am)|(my name is)', curInp, re.IGNORECASE)):
name = re.sub(r'([iI] am)|([mM]y name is)',"", curInp)
while len(name) == 0:
print("What is your name?")
name = input()
curInp = name
findBye(curInp)
#how are you
print("Hello " + name + "! How are you doing today?")
hay = input()
curInp = hay
if(re.findall(r'([you?]{4})', curInp, re.IGNORECASE)):
print("Don't worry about me. I am more interested in hearing about you. I'll be asking the questions from now on.")
while len(hay) == 0:
print("Sorry, I couldn't hear you. How are you doing today?")
hay = input()
curInp = hay
findBye(curInp)
#while loop containing all further checks
while True:
if(re.findall(r'(ok|alright|fine|not bad|not too bad)',curInp, re.IGNORECASE)):
print("I'm glad you are feeling ok. Why aren't you good?")
curInp = input()
findBye(curInp)
if(bool(re.search("ed", curInp))):
edMatch = findEd(curInp)
print("why did it " + re.sub("ed", "",edMatch) + "?")
curInp = input()
findBye(curInp)
elif(re.findall(r'(sad|depress|sick|unwell|bad|poor|not well|not very well|not good|not very good|not great)',curInp, re.IGNORECASE)):
print("I'm sorry you arent feeling well. Why haven't you been feeling well?")
curInp = input()
findBye(curInp)
if(bool(re.search("ed", curInp))):
edMatch = findEd(curInp)
print("why did it " + re.sub("ed", "",edMatch) + "?")
curInp = input()
findBye(curInp)
elif(re.findall(r'(good|great|excellent|superb|wonderful|extraordinary|well|happy|joy)',curInp, re.IGNORECASE)):
print("Wonderful, I'm glad you're good. Why are you so good?")
curInp = input()
findBye(curInp)
if(bool(re.search("ed", curInp))):
edMatch = findEd(curInp)
print("why did it " + re.sub("ed", "",edMatch) + "?")
curInp = input()
findBye(curInp)
elif(re.findall(r'(mom|mother|mommmy|momma)',curInp, re.IGNORECASE)):
print("Tell me more about your mother.")
curInp = input()
findBye(curInp)
if(bool(re.search("ed", curInp))):
edMatch = findEd(curInp)
print("why did it " + re.sub("ed", "",edMatch) + "?")
curInp = input()
findBye(curInp)
elif(re.findall(r'(friend|bud|pal)',curInp, re.IGNORECASE)):
print("Tell me more about your friend.")
curInp = input()
findBye(curInp)
if(bool(re.search("ed", curInp))):
edMatch = findEd(curInp)
print("why did it " + re.sub("ed", "",edMatch) + "?")
curInp = input()
findBye(curInp)
elif(re.findall(r'(dad|father|daddy)',curInp, re.IGNORECASE)):
print("Tell me more about your father.")
curInp = input()
findBye(curInp)
if(bool(re.search("ed", curInp))):
edMatch = findEd(curInp)
print("why did it " + re.sub("ed", "",edMatch) + "?")
curInp = input()
findBye(curInp)
elif(re.findall(r'(brother|bro)',curInp, re.IGNORECASE)):
print("Tell me more about your brother.")
curInp = input()
findBye(curInp)
if(bool(re.search("ed", curInp))):
edMatch = findEd(curInp)
print("why did it " + re.sub("ed", "",edMatch) + "?")
curInp = input()
findBye(curInp)
elif(re.findall(r'(sister|sis)',curInp, re.IGNORECASE)):
print("Tell me more about your sister.")
curInp = input()
findBye(curInp)
if(bool(re.search("ed", curInp))):
edMatch = findEd(curInp)
print("why did it " + re.sub("ed", "",edMatch) + "?")
curInp = input()
findBye(curInp)
else:
print("Intresting, mind telling me a bit more about this?")
curInp = input()
findBye(curInp)
if(bool(re.search("ed", curInp))):
edMatch = findEd(curInp)
print("why did it " + re.sub("ed", "",edMatch) + "?")
curInp = input()
findBye(curInp)
| [
"noreply@github.com"
] | noreply@github.com |
19566dd1b2d10137c2e3943ea5ac2e7ec000f503 | 55e14ece094383ca13ad22d638d545767a637327 | /ClassificationModel_TrainedW2V/repeat.py | adaa7b183171cce43ebab70c21964e334860e323 | [] | no_license | UnitForDataScience/Neptune-Classification | e16ede89d2e3afc434a787d018d99c2393525811 | 114d78c1fcfd1ec329636fde3401fdedd8d418ef | refs/heads/master | 2022-06-09T23:25:04.386608 | 2020-05-06T06:05:10 | 2020-05-06T06:05:10 | 261,607,929 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 75 | py | import os
import main
for i in range(4):
os.system('python main.py')
| [
"asudatascience@datascience.dhcp.asu.edu"
] | asudatascience@datascience.dhcp.asu.edu |
607250c2ca29295053d3ba2e5639b4b102ba1fb8 | 5f5f9fb972d6c8236142413876fee737b11b0090 | /Assignment4/comp/code_lstm/max_predictions.py | 9da8ed5ca0836e69d7c306c3c2a0b5fe796f133f | [] | no_license | agarwal-ayushi/Machine-Learning-Assignments | c634d48996dd975874ff7383aac46e626bcb338d | 53262ba02ce440946e0aa92d6385f94bd79b32c5 | refs/heads/master | 2022-12-17T00:15:25.626464 | 2020-09-16T17:19:33 | 2020-09-16T17:19:33 | 235,383,698 | 1 | 5 | null | null | null | null | UTF-8 | Python | false | false | 1,353 | py | import torch
import torch.nn as nn
import torch.nn.functional as F
from models_rasha import Encoder_Rasha, Decoder_Rasha
import numpy as np
def to_device(data, device):
if isinstance(data,(list,tuple)):
return [to_device(x,device) for x in data]
return data.to(device)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def max_prediction(encoded_features, decoder_model, vocab_dict, max_length=80):
'''
Function to perform max prediction to generate captions
'''
word_2_ix, ix_2_word = vocab_dict
start_token = word_2_ix['<start>']
end_token = word_2_ix['<end>']
hidden = None # In the beginning the hidden state is None
caption_word_id = []
for i in range(max_length):
encoded_features = encoded_features.unsqueeze(1)
if(hidden == None):
output, hidden = decoder_model.get_pred(encoded_features.cuda())
else:
output, hidden = decoder_model.get_pred(encoded_features.cuda(), to_device(hidden, device))
_ , predicted_id = output.max(1)
caption_word_id.append(predicted_id)
if (predicted_id == end_token):
break
encoded_features = decoder_model.embed(predicted_id)
caption_word_id = torch.stack(caption_word_id, 1)
return caption_word_id.cpu().numpy()[0]
| [
"ayushi.mnnit@gmail.com"
] | ayushi.mnnit@gmail.com |
36d202508d82d6e61d7e13af2d2f6b042afdbfe4 | 77fb4b9902a79a2bcc42105f1c62744cc869cd15 | /wignerd1.py | 0aae41925ad38d3c8112ac88bc28753c021972ef | [] | no_license | davidsdatascience/Algorithm-Development | 6dd3d35d6eeab1a3a019abca8b591950f7830754 | 57b0cf1a976ce7005fa05a79880a49d6bdd06822 | refs/heads/master | 2020-04-20T09:21:54.105597 | 2019-05-04T20:08:50 | 2019-05-04T20:08:50 | 168,763,580 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,354 | py | from scipy.special import jv, legendre, sph_harm, jacobi
from scipy.misc import factorial, comb
from numpy import floor, sqrt, sin, cos, exp, power
from math import pi
def wignerd(j,m,n=0,approx_lim=10):
'''
Wigner "small d" matrix. (Euler z-y-z convention)
example:
j = 2
m = 1
n = 0
beta = linspace(0,pi,100)
wd210 = wignerd(j,m,n)(beta)
some conditions have to be met:
j >= 0
-j <= m <= j
-j <= n <= j
The approx_lim determines at what point
bessel functions are used. Default is when:
j > m+10
and
j > n+10
for integer l and n=0, we can use the spherical harmonics. If in
addition m=0, we can use the ordinary legendre polynomials.
'''
if (j < 0) or (abs(m) > j) or (abs(n) > j):
raise ValueError("wignerd(j = {0}, m = {1}, n = {2}) value error.".format(j,m,n) \
+ " Valid range for parameters: j>=0, -j<=m,n<=j.")
if (j > (m + approx_lim)) and (j > (n + approx_lim)):
#print 'bessel (approximation)'
return lambda beta: jv(m-n, j*beta)
if (floor(j) == j) and (n == 0):
if m == 0:
#print 'legendre (exact)'
return lambda beta: legendre(j)(cos(beta))
elif False:
#print 'spherical harmonics (exact)'
a = sqrt(4.*pi / (2.*j + 1.))
return lambda beta: a * conjugate(sph_harm(m,j,beta,0.))
jmn_terms = {
j+n : (m-n,m-n),
j-n : (n-m,0.),
j+m : (n-m,0.),
j-m : (m-n,m-n),
}
k = min(jmn_terms)
a, lmb = jmn_terms[k]
b = 2.*j - 2.*k - a
if (a < 0) or (b < 0):
raise ValueError("wignerd(j = {0}, m = {1}, n = {2}) value error.".format(j,m,n) \
+ " Encountered negative values in (a,b) = ({0},{1})".format(a,b))
coeff = power(-1.,lmb) * sqrt(comb(2.*j-k,k+a)) * (1./sqrt(comb(k+b,b)))
#print 'jacobi (exact)'
return lambda beta: coeff \
* power(sin(0.5*beta),a) \
* power(cos(0.5*beta),b) \
* jacobi(k,a,b)(cos(beta))
def wignerD(j,m,n=0,approx_lim=10):
'''
Wigner D-function. (Euler z-y-z convention)
This returns a function of 2 to 3 Euler angles:
(alpha, beta, gamma)
gamma defaults to zero and does not need to be
specified.
The approx_lim determines at what point
bessel functions are used. Default is when:
j > m+10
and
j > n+10
usage:
from numpy import linspace, meshgrid
a = linspace(0, 2*pi, 100)
b = linspace(0, pi, 100)
aa,bb = meshgrid(a,b)
j,m,n = 1,1,1
zz = wignerD(j,m,n)(aa,bb)
'''
return lambda alpha,beta,gamma=0: \
exp(-1j*m*alpha) \
* wignerd(j,m,n,approx_lim)(beta) \
* exp(-1j*n*gamma)
#if __name__ == '__main__':
'''
just a bunch of plots in (phi,theta) for
integer and half-integer j and where m and
n take values of [-j, -j+1, ..., j-1, j]
Note that all indexes can be any real number
with the conditions:
j >= 0
-j <= m <= j
-j <= n <= j
'''
from matplotlib import pyplot, cm, rc
from numpy import linspace, arange, meshgrid, real, imag, arccos
rc('text', usetex=False)
ext = [0.,2.*pi,0.,pi]
phi = linspace(ext[0],ext[1],200)
theta = linspace(ext[2],ext[3],200)
pphi,ttheta = meshgrid(phi,theta)
# The maximum value of j to plot. Will plot real and imaginary
# distributions for j = 0, 0.5, ... maxj
maxj = 3
for j in arange(0,maxj+.1,step=0.5):
fsize = (j*2+3,j*2+3)
title = 'WignerD(j,m,n)(phi,theta)'
if j == 0:
fsize = (4,4)
else:
title += ', j = '+str(j)
figr = pyplot.figure(figsize=fsize)
figr.suptitle(r'Real Part of '+title)
figi = pyplot.figure(figsize=fsize)
figi.suptitle(r'Imaginary Part of '+title)
for fig in [figr,figi]:
fig.subplots_adjust(left=.1,bottom=.02,right=.98,top=.9,wspace=.02,hspace=.1)
if j == 0:
fig.subplots_adjust(left=.1,bottom=.1,right=.9,top=.9)
if j == 0.5:
fig.subplots_adjust(left=.2,top=.8)
if j == 1:
fig.subplots_adjust(left=.15,top=.85)
if j == 1.5:
fig.subplots_adjust(left=.15,top=.85)
if j == 2:
fig.subplots_adjust(top=.87)
if j != 0:
axtot = fig.add_subplot(1,1,1)
axtot.axesPatch.set_alpha(0.)
axtot.xaxis.set_ticks_position('top')
axtot.xaxis.set_label_position('top')
axtot.yaxis.set_ticks_position('left')
axtot.spines['left'].set_position(('outward',10))
axtot.spines['top'].set_position(('outward',10))
axtot.spines['right'].set_visible(False)
axtot.spines['bottom'].set_visible(False)
axtot.set_xlim(-j-.5,j+.5)
axtot.set_ylim(-j-.5,j+.5)
axtot.xaxis.set_ticks(arange(-j,j+0.1,1))
axtot.yaxis.set_ticks(arange(-j,j+0.1,1))
axtot.set_xlabel('n')
axtot.set_ylabel('m')
nplts = 2*j+1
for m in arange(-j,j+0.1,step=1):
for n in arange(-j,j+0.1,step=1):
print j,m,n
zz = wignerD(j,m,n)(pphi,ttheta)
i = n+j + nplts*(j-m)
for fig,data in zip((figr,figi), (real(zz),imag(zz))):
ax = fig.add_subplot(nplts, nplts, i+1, projection='polar')
plt = ax.pcolormesh(pphi,ttheta,data.copy(),
cmap=cm.jet,
#cmap=cm.RdYlBu_r,
vmin=-1., vmax=1.)
if j == 0:
ax.grid(True, alpha=0.5)
ax.set_title(r'j,m,n = (0,0,0)', position=(0.5,1.1), size=12)
ax.set_xlabel(r'$\phi$')
ax.set_ylabel(r'$\theta$', rotation='horizontal', va='bottom')
ax.xaxis.set_ticks([0,.25*pi,.5*pi,.75*pi,pi,1.25*pi,1.5*pi,1.75*pi])
ax.xaxis.set_ticklabels(['0',r'$\frac{\pi}{4}$',r'$\frac{\pi}{2}$',r'$\frac{3 \pi}{4}$',r'$\pi$',r'$\frac{5 \pi}{4}$',r'$\frac{3 \pi}{2}$',r'$\frac{7 \pi}{4}$'], size=14)
ax.yaxis.set_ticks([0,.25*pi,.5*pi,.75*pi,pi])
ax.yaxis.set_ticklabels(['0',r'$\frac{\pi}{4}$',r'$\frac{\pi}{2}$',r'$\frac{3 \pi}{4}$',r'$\pi$'], size=14)
else:
ax.xaxis.set_ticks([])
ax.yaxis.set_ticks([])
ax.set_xlim(ext[0],ext[1])
ax.set_ylim(ext[2],ext[3])
if j == 0:
fig.colorbar(plt, pad=0.07)
# uncomment the following if you want to save these to image files
#figr.savefig('wignerD_j'+str(j)+'_real.png', dpi=150)
#figi.savefig('wignerD_j'+str(j)+'_imag.png', dpi=150)
pyplot.show()
| [
"noreply@github.com"
] | noreply@github.com |
123de62f0fc26c6891df29d80121cecdcef6d869 | bdea01d7702bec417772442f54744a5abcab8090 | /contours.py | 133c0683fc36f14bed0b04a8bd385e80397e1ea8 | [] | no_license | arbaza/open-cv | 6e21565e34e9e208d39a8d391137d038cef930f0 | 576d4f210f1ca764f9ae8d442e730bd6ee9a798a | refs/heads/main | 2023-02-10T11:27:05.228090 | 2020-12-30T18:19:09 | 2020-12-30T18:19:09 | 325,618,524 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 749 | py | import numpy as np
import cv2 as cv
img = cv.imread('Photos\Euro.jpg')
cv.imshow('image', img)
cv.waitKey(0)
#Grayscale
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
blank = np.zeros(img.shape[:2], dtype = 'uint8')
cv.imshow('Blank', blank)
#blur
# blur = cv.GaussianBlur(gray, (5,5), cv.BORDER_DEFAULT)
# cv.imshow("Blur", blur )
# cv.waitKey(0)
#Canny/edge detection
canny = cv.Canny(img, 127, 175)
# ret, thresh = cv.threshold(gray, 125, 255, cv.THRESH_BINARY)
#Contour detection
contours, hierarchies = cv.findContours(canny, cv.RETR_LIST, cv.CHAIN_APPROX_NONE)
print(len(contours))
cv.drawContours(blank, contours, -1, (0, 0, 255 ), 1)
cv.imshow('Contours drawn', blank)
cv.waitKey(0)
# cv.imshow("Thresholded", thresh )
cv.waitKey(0) | [
"arbaaz_arbazzz@yahoo.com"
] | arbaaz_arbazzz@yahoo.com |
78d7b71bc57e02874094e8d8369f2ea02d403828 | ab6b73cc1bd2501fca5b406a0bcd69b7b8b7f94b | /hackerrank/warm-up/sales-by-match.py | fb602ce970fa16f37a83476ebb9e068e686fa307 | [] | no_license | youngbin-ro/problem-solving | c2a57a4318dc66647a182418d9c07bf0615ff36b | 7b27e44144bc25fd0ad9928eb979c5522ab772d4 | refs/heads/master | 2023-02-05T07:18:34.926633 | 2020-12-21T13:16:08 | 2020-12-21T13:16:08 | 232,480,425 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 348 | py | from collections import defaultdict
def sock_merchant(arr):
dic = defaultdict(int)
count = 0
for num in arr:
dic[num] += 1
if dic[num] % 2 == 0:
dic[num] = 0
count += 1
return count
if __name__ == "__main__":
arr_ = [10, 20, 20, 10, 10, 30, 50, 10, 20]
print(sock_merchant(arr_))
| [
"youngbin.ro@gmail.com"
] | youngbin.ro@gmail.com |
ebc97dabe6ba4cd2d87aca268755945115d291e2 | 3447227dd54587eb8c0c7f5346ac158504f7a907 | /compass/ocean/tests/global_ocean/threads_test/__init__.py | 42883b53b746d85a52e069468c8ae411ba7c414e | [
"LicenseRef-scancode-warranty-disclaimer",
"BSD-2-Clause"
] | permissive | MPAS-Dev/compass | 5e2c1525224dd399bcf4f56f661df05e2ec197a6 | 0b7440f0aa77c1ae052922a39e646bd35c267661 | refs/heads/main | 2023-08-30T20:59:52.052430 | 2023-08-29T09:45:14 | 2023-08-29T09:45:14 | 310,409,977 | 10 | 26 | NOASSERTION | 2023-09-13T14:19:16 | 2020-11-05T20:28:25 | Python | UTF-8 | Python | false | false | 2,046 | py | from compass.validate import compare_variables
from compass.ocean.tests.global_ocean.forward import ForwardTestCase, \
ForwardStep
class ThreadsTest(ForwardTestCase):
"""
A test case for performing two short forward runs to make sure the results
are identical with 1 and 2 thread per MPI process
"""
def __init__(self, test_group, mesh, init, time_integrator):
"""
Create test case
Parameters
----------
test_group : compass.ocean.tests.global_ocean.GlobalOcean
The global ocean test group that this test case belongs to
mesh : compass.ocean.tests.global_ocean.mesh.Mesh
The test case that produces the mesh for this run
init : compass.ocean.tests.global_ocean.init.Init
The test case that produces the initial condition for this run
time_integrator : {'split_explicit', 'RK4'}
The time integrator to use for the forward run
"""
super().__init__(test_group=test_group, mesh=mesh, init=init,
time_integrator=time_integrator,
name='threads_test')
for openmp_threads in [1, 2]:
name = f'{openmp_threads}thread'
step = ForwardStep(test_case=self, mesh=mesh, init=init,
time_integrator=time_integrator, name=name,
subdir=name, ntasks=4,
openmp_threads=openmp_threads)
step.add_output_file(filename='output.nc')
self.add_step(step)
# no run() method is needed
def validate(self):
"""
Test cases can override this method to perform validation of variables
and timers
"""
variables = ['temperature', 'salinity', 'layerThickness',
'normalVelocity']
compare_variables(test_case=self, variables=variables,
filename1='1thread/output.nc',
filename2='2thread/output.nc')
| [
"xylarstorm@gmail.com"
] | xylarstorm@gmail.com |
6114e7a67a5459c344b648dc4ae2266a17a375b1 | 3b8955841f6982c575331ac78ce91deb327902ee | /utils/ax.py | 858f4f79bc2837700f6fdd88f371a3710a7d33d6 | [] | no_license | kyshel/ich | 6c7b1e66ca28c7c633d800eb7f4d3ee76e05c056 | 1cd4a17a9abf63afa72195fffdc7051fd87eed45 | refs/heads/main | 2023-07-22T17:27:58.461595 | 2021-08-21T13:42:54 | 2021-08-21T13:42:54 | 370,445,761 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,300 | py | # general functions that often use
import os
import pickle
import json
import csv
from time import localtime, strftime
from pathlib import Path
import time
import glob
import re
import math
from decimal import Decimal
from pathlib import Path
import glob
import re
import torch # no need repro cause only save here
import os
from tqdm import tqdm
import shutil
os.environ['TZ'] = 'Asia/Shanghai'
time.tzset()
try:
import wandb
except ImportError:
wandb = None
def colorstr(*input):
# Colors a string https://en.wikipedia.org/wiki/ANSI_escape_code, i.e. colorstr('blue', 'hello world')
*args, string = input if len(input) > 1 else ('blue', 'bold', input[0]) # color arguments, string
colors = {'black': '\033[30m', # basic colors
'red': '\033[31m',
'green': '\033[32m',
'yellow': '\033[33m',
'blue': '\033[34m',
'magenta': '\033[35m',
'cyan': '\033[36m',
'white': '\033[37m',
'bright_black': '\033[90m', # bright colors
'bright_red': '\033[91m',
'bright_green': '\033[92m',
'bright_yellow': '\033[93m',
'bright_blue': '\033[94m',
'bright_magenta': '\033[95m',
'bright_cyan': '\033[96m',
'bright_white': '\033[97m',
'end': '\033[0m', # misc
'bold': '\033[1m',
'underline': '\033[4m'}
return ''.join(colors[x] for x in args) + f'{string}' + colors['end']
def strip_optimizer(f='best.pt', s=''): # from utils.general import *; strip_optimizer()
# Strip optimizer from 'f' to finalize training, optionally save as 's'
x = torch.load(f, map_location=torch.device('cpu'))
if x.get('ema'):
x['model'] = x['ema'] # replace model with ema
for k in 'optimizer', 'optimizer_state_dict', 'training_results', 'wandb_id', 'ema', 'updates': # keys
x[k] = None
x['epoch'] = -1
x['model'].half() # to FP16
for p in x['model'].parameters():
p.requires_grad = False
torch.save(x, s or f)
mb = os.path.getsize(s or f) / 1024**2 # filesize
print(f"Optimizer stripped from {f},{(' saved as %s,' % s) if s else ''} {mb:.1f}MB")
def increment_path(path, exist_ok=False, sep='', mkdir=False):
# Increment file or directory path, i.e. runs/exp --> runs/exp{sep}2, runs/exp{sep}3, ... etc.
path = Path(path) # os-agnostic
if path.exists() and not exist_ok:
suffix = path.suffix
path = path.with_suffix('')
dirs = glob.glob(f"{path}{sep}*") # similar paths
matches = [re.search(rf"%s{sep}(\d+)" % path.stem, d) for d in dirs]
i = [int(m.groups()[0]) for m in matches if m] # indices
n = max(i) + 1 if i else 2 # increment number
path = Path(f"{path}{sep}{n}{suffix}") # update path
dir = path if path.suffix == '' else path.parent # directory
if not dir.exists() and mkdir:
dir.mkdir(parents=True, exist_ok=True) # make directory
return path
def get_stratified(inputs,ratio): # list, float
magnitude = math.floor(math.log(len(inputs), 10)) # Ex: 10 > 1, 100 > 2
margin_ratio = str(round(float(ratio), magnitude))
numerator, denominator = Decimal(margin_ratio).as_integer_ratio()
# print(numerator,denominator)
return [v for i,v in enumerate(inputs) if i % denominator < numerator]
def nowtime(style = 0):
if style == 0:
fmt = "%Y%m%d_%H%M%S"
elif style == 1:
fmt = "%Y-%m-%d %H:%M:%S"
return strftime(fmt, localtime())
def mkdir(fp):
Path(fp).mkdir(parents=True, exist_ok=True)
def clean_dir(dir_name ):
fp_list = glob.glob( os.path.join(dir_name,'*')) + glob.glob(os.path.join(dir_name,'.*'))
for f in tqdm( fp_list, desc=f"Cleaning {dir_name}" ) :
# os.remove(f)
shutil.rmtree(f)
def get_fp_list(dir_name,ext = None):
fp_list =[]
for root, dirs, files in os.walk(dir_name):
for file in files:
if ext:
if file.endswith(ext):
filepath = os.path.join(root, file)
fp_list += [filepath]
else:
filepath = os.path.join(root, file)
fp_list += [filepath]
return fp_list
# https://stackoverflow.com/questions/3086973/how-do-i-convert-this-list-of-dictionaries-to-a-csv-file
def dict2csvfile(toCSV,filename = 'tmp.csv',bom = 0,silent=0):
keys = toCSV[0].keys()
with open(filename, 'w', encoding='utf-8', newline='') as output_file:
if bom: output_file.write('\ufeff')
dict_writer = csv.DictWriter(output_file, keys)
dict_writer.writeheader()
dict_writer.writerows(toCSV)
if not silent: print('dict2csvfile ok! please check ' + filename)
# https://stackoverflow.com/questions/18337407/saving-utf-8-texts-with-json-dumps-as-utf8-not-as-u-escape-sequence
def dict2jsonfile(dict_src,filename='tmp.json',silent=0):
with open(filename, 'w', encoding='utf-8') as fp:
json.dump(dict_src, fp,indent=4, sort_keys=False,ensure_ascii=False)
if not silent: print('dict2jsonfile ok! please check '+filename)
def list_files(startpath):
for root, dirs, files in os.walk(startpath):
level = root.replace(startpath, '').count(os.sep)
indent = ' ' * 4 * (level)
print('{}{}/'.format(indent, os.path.basename(root)))
subindent = ' ' * 4 * (level + 1)
for f in files:
print('{}{}'.format(subindent, f))
def ins(v):
print("ins>>>")
print('>dir:')
print(dir(v))
print('>type:')
print(type(v))
print('>print:')
print(v)
print("ins<<<")
def save_obj(obj1, fp='tmp_obj.pkl',silent = 0):
if not silent: print('saving obj to ' + fp)
with open(fp, 'wb') as handle:
pickle.dump(obj1, handle, protocol=pickle.HIGHEST_PROTOCOL)
if not silent: print('save_obj ok! ' )
pass
def load_obj(filename='tmp_obj.txt', silent = 0):
if not silent: print('loading obj ' + filename)
with open(filename, 'rb') as handle:
b = pickle.load(handle)
if not silent: print('load_obj ok! ' )
return b
pass
# alias
save = save_obj
load = load_obj
if __name__ == '__main__':
# do nothing
print('you called main, do nothing')
| [
"11898075+kyshel@users.noreply.github.com"
] | 11898075+kyshel@users.noreply.github.com |
bfcd8e3ab880fe73ecee9e775da43d432b361f6b | 90e131d3f407984c6eb651702c8539986216173b | /transform_content.py | a3ce95a5672e53fece2b5fe04e3f02e34b05a9f6 | [] | no_license | EnVyNm/EnVy | 2333c3f7954b6706527a09aee369bfceff7a1ddb | e12b71a4a6ae716998ecb64e6a4f8b09bb10b6a6 | refs/heads/master | 2020-04-18T22:33:15.011208 | 2014-05-07T03:38:46 | 2014-05-07T03:38:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,618 | py | #!/usr/bin/env python
# Copyright 2008 Brett Slatkin
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = "Brett Slatkin (bslatkin@gmail.com)"
import os
import re
import urlparse
################################################################################
# URLs that have absolute addresses
ABSOLUTE_URL_REGEX = r"(http(s?):)?//(?P<url>[^\"'> \t\)]+)"
# URLs that are relative to the base of the current hostname.
BASE_RELATIVE_URL_REGEX = r"/(?!(/)|(http(s?)://)|(url\())(?P<url>[^\"'> \t\)]*)"
# URLs that have '../' or './' to start off their paths.
TRAVERSAL_URL_REGEX = r"(?P<relative>\.(\.)?)/(?!(/)|(http(s?)://)|(url\())(?P<url>[^\"'> \t\)]*)"
# URLs that are in the same directory as the requested URL.
SAME_DIR_URL_REGEX = r"(?!(/)|(http(s?)://)|(url\())(?P<url>[^\"'> \t\)]+)"
# URL matches the root directory.
ROOT_DIR_URL_REGEX = r"(?!//(?!>))/(?P<url>)(?=[ \t\n]*[\"'\)>/])"
# Start of a tag using 'src' or 'href'
TAG_START = r"(?i)\b(?P<tag>src|href|action|url|background)(?P<equals>[\t ]*=[\t ]*)(?P<quote>[\"']?)"
# Start of a CSS import
CSS_IMPORT_START = r"(?i)@import(?P<spacing>[\t ]+)(?P<quote>[\"']?)"
# CSS url() call
CSS_URL_START = r"(?i)\burl\((?P<quote>[\"']?)"
REPLACEMENT_REGEXES = [
(TAG_START + SAME_DIR_URL_REGEX,
"\g<tag>\g<equals>\g<quote>%(accessed_dir)s\g<url>"),
(TAG_START + TRAVERSAL_URL_REGEX,
"\g<tag>\g<equals>\g<quote>%(accessed_dir)s/\g<relative>/\g<url>"),
(TAG_START + BASE_RELATIVE_URL_REGEX,
"\g<tag>\g<equals>\g<quote>/%(base)s/\g<url>"),
(TAG_START + ROOT_DIR_URL_REGEX,
"\g<tag>\g<equals>\g<quote>/%(base)s/"),
# Need this because HTML tags could end with '/>', which confuses the
# tag-matching regex above, since that's the end-of-match signal.
(TAG_START + ABSOLUTE_URL_REGEX,
"\g<tag>\g<equals>\g<quote>/\g<url>"),
(CSS_IMPORT_START + SAME_DIR_URL_REGEX,
"@import\g<spacing>\g<quote>%(accessed_dir)s\g<url>"),
(CSS_IMPORT_START + TRAVERSAL_URL_REGEX,
"@import\g<spacing>\g<quote>%(accessed_dir)s/\g<relative>/\g<url>"),
(CSS_IMPORT_START + BASE_RELATIVE_URL_REGEX,
"@import\g<spacing>\g<quote>/%(base)s/\g<url>"),
(CSS_IMPORT_START + ABSOLUTE_URL_REGEX,
"@import\g<spacing>\g<quote>/\g<url>"),
(CSS_URL_START + SAME_DIR_URL_REGEX,
"url(\g<quote>%(accessed_dir)s\g<url>"),
(CSS_URL_START + TRAVERSAL_URL_REGEX,
"url(\g<quote>%(accessed_dir)s/\g<relative>/\g<url>"),
(CSS_URL_START + BASE_RELATIVE_URL_REGEX,
"url(\g<quote>/%(base)s/\g<url>"),
(CSS_URL_START + ABSOLUTE_URL_REGEX,
"url(\g<quote>/\g<url>"),
]
################################################################################
def TransformContent(base_url, accessed_url, content):
url_obj = urlparse.urlparse(accessed_url)
accessed_dir = os.path.dirname(url_obj.path)
if not accessed_dir.endswith("/"):
accessed_dir += "/"
for pattern, replacement in REPLACEMENT_REGEXES:
fixed_replacement = replacement % {
"base": base_url,
"accessed_dir": accessed_dir,
}
content = re.sub(pattern, fixed_replacement, content)
return content
| [
"envynm@gmail.com"
] | envynm@gmail.com |
b6223ed96517013e0969b1c9d814fc5c4699d324 | 9ee84830f4360c063c1bb9fe0d7312e5fdab47d9 | /例子-0911-02.py | 0e43c38a239c44c7dbc3d1b443786146afc9123d | [] | no_license | hcl621/python | 05974f23548f7afd5d8643d9cf1fd4f5b1937186 | af8972dc2918640451c13fa76447f2088054b38c | refs/heads/master | 2020-07-09T22:07:52.317034 | 2019-10-08T12:40:53 | 2019-10-08T12:40:53 | 204,094,003 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 70 | py | #coding=utf-8
try:
j
except NameError as e:
print('catch error!!!')
| [
"noreply@github.com"
] | noreply@github.com |
448adf2f39782bfbffbcd858cbcdfd29fa0d2642 | d4822b0a4bf2279f31edf5eceddac884f77de5b0 | /order/permissions.py | 6f85f40a9a84eafa2238d36dbc52d85319088402 | [] | no_license | xizlt/api_sport | bb05fa36272782bd91796ac8acb72e16cb38d881 | 55bbccfb344528016e4f903bcadad6ffaa02db17 | refs/heads/master | 2023-03-05T02:32:33.215134 | 2021-02-21T21:49:28 | 2021-02-21T21:49:28 | 297,443,990 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,128 | py | from rest_framework.permissions import SAFE_METHODS, BasePermission
class OrderPermission(BasePermission):
def has_object_permission(self, request, view, obj):
return bool(
request.method in SAFE_METHODS or
request.user and
request.user.is_authenticated and (obj.user == request.user or request.user.is_staff)
)
def has_permission(self, request, view):
if request.method == 'POST':
return True
return bool(
request.method in SAFE_METHODS or
request.user.is_authenticated or request.user.is_staff
)
class ItemViewPermission(BasePermission):
def has_object_permission(self, request, view, obj):
return bool(
request.method in SAFE_METHODS or
request.user and
request.user.is_authenticated and (obj.order.user.id == request.user.id or request.user.is_staff)
)
def has_permission(self, request, view):
return bool(
request.method in SAFE_METHODS or
request.user.is_authenticated or request.user.is_staff
)
| [
"ivantreada@gmail.com"
] | ivantreada@gmail.com |
feaced22bf438e634a1320d815dfdf7b85c5bbe9 | 5f5fb1cec25f432a8cc1c73fb9943b0b45588042 | /Base/BaseElementEnmu.py | c2416f791cdbf7aa1296caec5b556acaa7dc11fd | [] | no_license | qijianxiaobai/Python-Appium | 732f64c8e982f1a660b1c45bdf3a73c692e04417 | caef97e5eaa6f739b08a85194bfb61a2666419b6 | refs/heads/master | 2020-04-11T11:36:43.266070 | 2018-12-17T10:10:02 | 2018-12-17T10:10:02 | 161,753,770 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,549 | py |
class Element(object):
# 常用操作关键字
find_element_by_id = "id"
find_elements_by_id = "ids"
INDEX = "index"
find_elements_by_xpath = "xpaths"
find_element_by_xpath = "xpath"
find_element_by_css_selector = "css"
find_element_by_class_name = "class_name"
CLICK = "click"
BACK = "back"
TAP = "tap"
ACCESSIBILITY = "accessibility"
ADB_TAP = "adb_tap"
SWIPE_DOWN = "swipe_down"
SWIPE_UP = "swipe_up"
SWIPE_LEFT = "swipe_left"
SET_VALUE = "set_value"
GET_VALUE = "get_value"
WAIT_TIME = 20
PRESS_KEY_CODE = "press_keycode"
GET_CONTENT_DESC = "get_content_desc"
# 错误日志
TIME_OUT = "timeout"
NO_SUCH = "noSuch"
WEB_DROVER_EXCEPTION = "WebDriverException"
INDEX_ERROR = "index_error"
STALE_ELEMENT_REFERENCE_EXCEPTION = "StaleElementReferenceException"
DEFAULT_ERROR = "default_error"
# 检查点
CONTRARY = "contrary" # 相反检查点,表示如果检查元素存在就说明失败,如删除后,此元素依然存在
CONTRARY_GETVAL = "contrary_getval" # 检查点关键字contrary_getval: 相反值检查点,如果对比成功,说明失败
DEFAULT_CHECK = "default_check" # 默认检查点,就是查找页面元素
COMPARE = "compare" # 历史数据和实际数据对比
TOAST = "toast"
RE_CONNECT = 1 # 是否打开失败后再次运行一次用例
INFO_FILE = "info.pickle"
SUM_FILE = "sum.pickle"
DEVICES_FILE = "devices.pickle"
REPORT_FILE = "Report.xlsx"
| [
"ad2156@email.vccs.edu"
] | ad2156@email.vccs.edu |
d0ca2d9cd484b355ced743af6aebbbdb18d8529a | 3cf8d34cc1ea0ef7857742211bed333ee0400e63 | /46.py | 422649ee9f326b4384db1ce35c43258bbd08ace6 | [] | no_license | ElseVladimir/py_tasks | 2dd11130cae83af772f4cb89d04e80da9dbcf070 | bdfa6e4dbb06b67eb79f3a06ba4ab1bf6052d1a6 | refs/heads/master | 2023-01-27T21:54:18.911645 | 2020-12-02T15:51:55 | 2020-12-02T15:51:55 | 303,987,477 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,203 | py | """
В этой задаче вам необходимо воспользоваться API сайта artsy.net
API проекта Artsy предоставляет информацию о некоторых деятелях искусства, их работах, выставках.
В рамках данной задачи вам понадобятся сведения о деятелях искусства (назовем их, условно, художники).
Вам даны идентификаторы художников в базе Artsy.
Для каждого идентификатора получите информацию о имени художника и годе рождения.
Выведите имена художников в порядке неубывания года рождения. В случае если у художников одинаковый
год рождения, выведите их имена в лексикографическом порядке.
Работа с API Artsy
Полностью открытое и свободное API предоставляют совсем немногие проекты. В большинстве случаев,
для получения доступа к API необходимо зарегистрироваться в проекте, создать свое приложение,
и получить уникальный ключ (или токен), и в дальнейшем все запросы к API осуществляются при помощи этого ключа.
Чтобы начать работу с API проекта Artsy, вам необходимо пройти на стартовую страницу документации к
API https://developers.artsy.net/start и выполнить необходимые шаги, а именно зарегистрироваться,
создать приложение, и получить пару идентификаторов Client Id и Client Secret. Не публикуйте эти идентификаторы.
После этого необходимо получить токен доступа к API. На стартовой странице документации есть примеры
того, как можно выполнить запрос и как выглядит ответ сервера. Мы приведем пример запроса на Python.
import requests
import json
client_id = '...'
client_secret = '...'
# инициируем запрос на получение токена
r = requests.post("https://api.artsy.net/api/tokens/xapp_token",
data={
"client_id": client_id,
"client_secret": client_secret
})
# разбираем ответ сервера
j = json.loads(r.text)
# достаем токен
token = j["token"]
Теперь все готово для получения информации о художниках. На стартовой странице документации есть
пример того, как осуществляется запрос и как выглядит ответ сервера. Пример запроса на Python.
# создаем заголовок, содержащий наш токен
headers = {"X-Xapp-Token" : token}
# инициируем запрос с заголовком
r = requests.get("https://api.artsy.net/api/artists/4d8b92b34eb68a1b2c0003f4", headers=headers)
# разбираем ответ сервера
j = json.loads(r.text)
Примечание:
В качестве имени художника используется параметр sortable_name в кодировке UTF-8.
Пример входных данных:
4d8b92b34eb68a1b2c0003f4
537def3c139b21353f0006a6
4e2ed576477cc70001006f99
Пример выходных данных:
Abbott Mary
Warhol Andy
Abbas Hamra
Примечание для пользователей Windows
При открытии файла для записи на Windows по умолчанию используется кодировка CP1251, в то время как для записи имен на сайте используется кодировка UTF-8, что может привести к ошибке при попытке записать в файл имя с необычными символами. Вы можете использовать print, или аргумент encoding функции open.
У вас есть неограниченное число попыток.
Время одной попытки: 5 mins
"""
import requests
import json
idlist = []
artists = dict()
with open('dataset_24476_4.txt', 'r') as f:
for i in f:
idlist.append(i.strip())
token = 'token'
headers = {"X-Xapp-Token" : token}
for ident in idlist:
r = requests.get("https://api.artsy.net/api/artists/{}".format(ident), headers=headers)
j = json.loads(r.text)
artists.update({j['sortable_name']: j['birthday']})
srtd_artists = sorted(artists.items(), key=lambda x: (x[1], x[0]))
for i in srtd_artists:
print(i[0]) | [
"voinvova95@gmail.com"
] | voinvova95@gmail.com |
d7551e573e1989b8a7920c2d5ef568749c8cd130 | 642716a67e15459d713217ed02aa38d56348ff89 | /SF_Movies/urls.py | 2462fc91656ad0ae785117573efd3b5c33275fe6 | [] | no_license | zxzhang/coding-challenge-tools | 42eaa2355ad66e82abe2ac6401d702f505d3ff06 | cfc8c3a446331d2f29dae4fe972cfa7f7fdcb25d | refs/heads/master | 2021-01-19T07:24:06.713307 | 2015-05-20T01:43:52 | 2015-05-20T01:43:52 | 34,290,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 317 | py | from django.conf.urls import include, url
from django.contrib import admin
from django.views.generic import RedirectView
RedirectView.permanent = True
urlpatterns = [
url(r'^$', RedirectView.as_view(url='/movies')),
url(r'^movies/', include('movies.urls')),
url(r'^admin/', include(admin.site.urls)),
]
| [
"zxzhang1991@gmail.com"
] | zxzhang1991@gmail.com |
1638e966bc35966aa652f2387e6f29f7caad377c | 9c4c895d23f8109d6ddd8c26937e33d905665b6e | /testing/readfile.py | 6d4e684df232b8e6e6116a8e879e9fd3030994f6 | [] | no_license | shaybix/Raazi | 401b3b4cd8ef31248f9efb58d8750a25ad319718 | 1fee51d2bdfb0a023c51ae5bc906f03f4e055544 | refs/heads/master | 2021-01-21T21:39:43.959826 | 2016-03-21T00:42:02 | 2016-03-21T00:42:02 | 18,281,430 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 708 | py | import sqlite3
connection = sqlite3.connect('sample.sqlite3')
cursor = connection.cursor()
output = cursor.execute('Select * from main')
f = open('sample2.txt', 'w')
fetch = cursor.fetchone()
#lines = []
#
#for line in f:
# lines.append(line)
#
#
#print lines[]
#print fetch[0]
#print "----------------"
#print fetch[1]
#print "----------------"
#info = fetch[2]
#
#encoded = info.encode('utf-8')
#
#f.write(encoded)
#print "----------------"
#encoded = fetch[3].encode('utf-8')
#encoded = encoded.splitlines()[5].split(':')[-1].split('.')[0]
info = fetch[-5:]
#
print info[1]
#encoded = info.encode('utf-8')
#print encoded
#f.write(encoded)
#print "----------------"
#print fetch[4]
f.close() | [
"shayba@shaybix.com"
] | shayba@shaybix.com |
74a2ad6e251baa847e24ec5c7ee551c2e7349fbe | a232988fe8f247fbd56f7a91748ccfbf73326265 | /blog/blog/views.py | 739eb639313cb9e904159fe63b5e39ce4fd5f797 | [] | no_license | Vigs16/hello-world | 32236f6258ce42e6d8f5ef5139ecd84090be37bd | 4212e0669944f22d0d3148516b97bf9fec95b72d | refs/heads/master | 2021-01-20T13:03:51.843003 | 2017-05-30T04:52:13 | 2017-05-30T04:52:13 | 90,442,917 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,177 | py | from flask import render_template
from flask import request, redirect, url_for
from . import app
from .database import session, Entry
PAGINATE_BY = 10
@app.route("/")
@app.route("/page/<int:page>")
def entries(page=1):
# Zero-indexed page
page_index = page - 1
count = session.query(Entry).count()
start = page_index * PAGINATE_BY
end = start + PAGINATE_BY
total_pages = (count - 1) // PAGINATE_BY + 1
has_next = page_index < total_pages - 1
has_prev = page_index > 0
entries = session.query(Entry)
entries = entries.order_by(Entry.datetime.desc())
entries = entries[start:end]
return render_template("entries.html",
entries=entries,
has_next=has_next,
has_prev=has_prev,
page=page,
total_pages=total_pages
)
@app.route("/entry/add", methods=["GET"])
def add_entry_get():
return render_template("add_entry.html")
@app.route("/entry/add", methods=["POST"])
def add_entry_post():
entry = Entry(
title=request.form["title"],
content=request.form["content"],
)
session.add(entry)
session.commit()
return redirect(url_for("entries")) | [
"vigs16@outlook.com"
] | vigs16@outlook.com |
1767ed91cd5ecb9dc4967b259a9c41f4baf56d84 | 093d2e689823e5716c46b09511d8adebb2320573 | /Python Competitive Program/count occurance in tuple.py | c4103e6f394595ad87dceed955728d81a0eb86fe | [] | no_license | gauravk268/Competitive_Coding | 02813a908e4cd023e4a7039997e750d1fdae6d92 | 783c246dbaf36425a5b7cb76b4e79e2b7ba1a10c | refs/heads/master | 2022-10-15T02:25:41.598723 | 2022-10-03T06:09:17 | 2022-10-03T06:09:17 | 235,630,000 | 20 | 22 | null | 2022-10-03T06:09:18 | 2020-01-22T17:45:32 | C++ | UTF-8 | Python | false | false | 138 | py |
# Count occurrences of an element
str=input("Enter the string : ")
word_count={char:str.count(char) for char in str}
print(word_count)
| [
"mishraravi179@gmail.com"
] | mishraravi179@gmail.com |
1755b870d7e1acc791f2b3b36cfe6ebb0e6e8cfe | ec3964d765f2a499fd017a4e2fb89d405c3070c9 | /basicapp/models.py | 9358d6cd70b0ab4d7b5c7d1096ec410afe5aa252 | [] | no_license | aryan1jain2/Trycycle | 77476e8e769185db42fc7bb2ecdac6ad1f7c102c | 00e63728fdcc7e8d4ec37964ed4ac8e5f856ad14 | refs/heads/main | 2023-03-02T03:31:55.354309 | 2021-02-13T10:52:10 | 2021-02-13T10:52:10 | 338,552,016 | 0 | 4 | null | null | null | null | UTF-8 | Python | false | false | 2,529 | py | from django.db import models
# Create your models here.
class userinfo(models.Model): #corresponds to customer table
first_name = models.CharField(max_length=100)
last_name = models.CharField(max_length=100)
# reg_no = models.CharField(max_length=100)
# room_no = models.CharField(max_length=100)
username = models.CharField(max_length=20, default = "No name")
email = models.CharField(max_length=100)
phone_no = models.BigIntegerField()
class location(models.Model):
location_id = models.CharField(max_length = 40, primary_key = True)
name = models.CharField(max_length=40)
class contact_us(models.Model):
name = models.CharField(max_length=40)
email =models.CharField(max_length=75)
subject=models.TextField()
feed=models.TextField()
class cycle_accessories(models.Model):
name = models.CharField(max_length=40, primary_key = True)
quantity = models.IntegerField()
costperitem = models.IntegerField()
class cycle_category(models.Model):
name = models.CharField(max_length = 40, primary_key = True)
costperday = models.IntegerField()
latefeesperhour = models.IntegerField()
class cycle(models.Model):
cycle_id = models.CharField(max_length=40, primary_key = True)
model_year = models.CharField(max_length=5)
model = models.CharField(max_length=40)
category = models.ForeignKey(cycle_category, on_delete=models.CASCADE)
availability = models.CharField(max_length=40)
class discount(models.Model):
discount_code = models.CharField(max_length = 40, primary_key = True)
name = models.CharField(max_length=40)
expiry_date = models.DateField()
percentage = models.IntegerField()
class insurance(models.Model):
insurance_code = models.CharField(max_length = 40, primary_key = True)
name = models.CharField(max_length=40)
costperday = models.IntegerField()
coverage_type = models.CharField(max_length = 40)
class Bookings(models.Model):
user = models.CharField(max_length=40)
date = models.DateField(max_length=10)
startpt = models.CharField(max_length=40)
lastpt = models.CharField(max_length=40)
start_time = models.TimeField(max_length=40)
end_time = models.TimeField(max_length=40, default = '24 hours')
accessory = models.CharField(max_length=40, null = True)
discount = models.CharField(max_length=40, null = True)
insurance = models.CharField(max_length=40, null = True)
cycle_id = models.CharField(max_length=40)
tot = models.IntegerField()
| [
"aryan1jain2@gmail.com"
] | aryan1jain2@gmail.com |
4670ba9b785563921ebd4e8eb26fa337062abb5b | 1625edfe28b4b0979fd32b4a3c5e55249a993fd5 | /baekjoon14915.py | 7648498a85fccf5a369e7197408b17d1726a754d | [] | no_license | beOk91/baekjoon2 | b8bf504c506c6278899d4107ecfe51974ef13f5e | 39569f8effb8e32405a7d74d98bdabcab783ec56 | refs/heads/master | 2023-05-11T20:11:19.015113 | 2020-09-14T23:58:49 | 2020-09-14T23:58:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 198 | py | m,n=map(int,input().strip().split())
def conversion(m,n):
c="0123456789ABCDEF"
if m<n:
return str(c[m])
else:
return conversion(m//n,n)+str(c[m%n])
print(conversion(m,n)) | [
"be_ok91@naver.com"
] | be_ok91@naver.com |
5e2e9ee1d976ed4b9dae0c19f9e48d49c14d8d4a | d4442db5a7ab9db2b04fef640a9864f3fba54758 | /src/python/WMCore/Services/Dashboard/DashboardAPI.py | 9f90e4842ae59431378744395dc3404a30601661 | [] | no_license | stuartw/WMCore | fa25ff19ab5058a635d35d3c58a0ac56a3e079a1 | 38c39c43f7237fd316930839674ac9be3c0ee8cc | refs/heads/master | 2021-01-18T07:18:18.324604 | 2012-10-18T22:30:34 | 2012-10-18T22:30:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,823 | py | #!/usr/bin/python
"""
This is the Dashboard API Module for the Worker Node
"""
from WMCore.Services.Dashboard import apmon
import time
from types import DictType, StringType, ListType
#
# Methods for manipulating the apmon instance
#
# Internal attributes
apmonInstance = None
apmonInit = False
# Monalisa configuration
apmonConf = ["cms-wmagent-job.cern.ch"]
#
# Method to create a single apmon instance at a time
#
def getApmonInstance( logr, apmonServer ):
global apmonInstance
global apmonInit
if apmonInstance is None and not apmonInit :
apmonInit = True
if apmonInstance is None :
try :
if not apmonServer:
apmonInstance = apmon.ApMon(apmonConf, logr) #apmonLoggingLevel)
else:
apmonInstance = apmon.ApMon(apmonServer, logr)
except Exception, e :
pass
return apmonInstance
#
# Method to free the apmon instance
#
def apmonFree() :
global apmonInstance
global apmonInit
if apmonInstance is not None :
try :
apmonInstance.free()
except Exception, e :
pass
apmonInstance = None
apmonInit = False
#
# Method to send params to Monalisa service
#
def apmonSend(taskid, jobid, params, logr, apmonServer) :
apm = getApmonInstance( logr, apmonServer )
if apm is not None :
if not isinstance(params, DictType) and not isinstance(params, ListType) :
params = {'unknown' : '0'}
if not isinstance(taskid, StringType) :
taskid = 'unknown'
if not isinstance(jobid, StringType) :
jobid = 'unknown'
try :
apm.sendParameters(taskid, jobid, params)
return 0
except Exception, e:
pass
return 1
| [
"sfoulkes@4525493e-7705-40b1-a816-d608a930855b"
] | sfoulkes@4525493e-7705-40b1-a816-d608a930855b |
92296cabb36cdc43ac8a55f79c416c6d3190cc2b | f332244831040530c8d4d3ff42ee4e06078ca22b | /cart/views.py | eed5c0e9c50a9e6889b30cec001caa5258639029 | [] | no_license | worlddeleteRin/cosmetics | f8f1bd8a3d9b6b149ae29126fa6f4bd6bb5e72b1 | f7d593f3206606d24084d6281bd6d5472654da25 | refs/heads/master | 2023-03-04T04:34:59.349269 | 2021-02-13T19:43:37 | 2021-02-13T19:43:37 | 296,117,694 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,561 | py | from django.shortcuts import render
from django.http import HttpResponse, HttpResponse, HttpResponseRedirect, JsonResponse
from django.shortcuts import render, get_object_or_404
from django.urls import reverse
from .models import *
from products.models import *
from collections import defaultdict
import pandas as pd
import urllib.parse
# to serialize to json format
from django.core import serializers
from django.core.mail import send_mail
from django.conf import settings
from django.template.loader import render_to_string
from django.utils.html import strip_tags
# Create your views here.
def index(request):
allcategories = Category.objects.all()
allbrands = Brand.objects.all()
if not request.session.session_key:
request.session.create()
current_session_key = request.session.session_key
else:
current_session_key = request.session.session_key
cart = Cart.objects.get_or_create(
session_key = current_session_key
)
cart_items = cart[0].item_set.all()
return render(request, 'cart/index.html', {
'allbrands': allbrands,
'categories': allcategories,
'session_key': current_session_key,
'items': cart_items,
'current_cart': cart[0],
})
def add_item(request, product_id):
product = Product.objects.get(id = product_id)
if not request.session.session_key:
request.session.create()
current_session_key = request.session.session_key
else:
current_session_key = request.session.session_key
cart = Cart.objects.get_or_create(session_key = current_session_key)[0]
cart_items = cart.item_set.all()
if Item.objects.filter(cart = cart, name = product.name).exists():
current_item = Item.objects.get(cart = cart, name = product.name)
current_item.quantity += 1
current_item.save()
else:
new_item = Item(
cart = cart,
name = product.name,
price = product.price,
sale_price = product.sale_price,
imgurl = product.imgurl,
brand = product.pr_brand.name,
series = product.pr_series.name,
obiem = product.obiem,
)
new_item.save()
return HttpResponseRedirect(reverse('cart:index'))
def add_quantity(request, item_id):
current_item = Item.objects.get(id = item_id)
current_item.quantity += 1
current_item.save()
return HttpResponseRedirect(reverse('cart:index'))
def remove_quantity(request, item_id):
current_session_key = request.session.session_key
cart = Cart.objects.get_or_create(session_key = current_session_key)[0]
current_item = Item.objects.get(id = item_id)
if current_item.quantity == 1:
current_item.delete()
else:
current_item.quantity -= 1
current_item.save()
return HttpResponseRedirect(reverse('cart:index'))
def remove_item_ajax(request):
current_session_key = request.session.session_key
cart = Cart.objects.get(session_key = current_session_key)
item_id = request.GET['item_id']
current_item = Item.objects.get(id = item_id, cart = cart)
if current_item.quantity == 1:
current_item.delete()
return JsonResponse({
'message': 'everything is ok',
'need_delete': 'yes',
}, status = 200)
check_items = cart.if_items_empty()
if check_items == True:
cart.promo = None
cart.save()
else:
current_item.quantity -= 1
current_item.save()
quantity = current_item.quantity
return JsonResponse({
'message': 'everything is ok',
'need_delete': 'no',
'quantity': quantity,
}, status = 200)
def add_item_ajax(request):
if not request.session.session_key:
request.session.create()
current_session_key = request.session.session_key
else:
current_session_key = request.session.session_key
cart = Cart.objects.get_or_create(session_key = current_session_key)[0]
item_id = request.GET['item_id']
current_item = Item.objects.get(id = item_id, cart = cart)
current_item.quantity += 1
current_item.save()
quantity = current_item.quantity
return JsonResponse({
'message': 'everything is ok',
'quantity': quantity,
}, status = 200)
def update_item_amount_ajax(request):
current_session_key = request.session.session_key
cart = Cart.objects.get(session_key = current_session_key)
item_id = request.GET['item_id']
current_item = Item.objects.get(id = item_id, cart = cart)
if current_item.sale_price:
amount = current_item.quantity * current_item.sale_price
else:
amount = current_item.quantity * current_item.price
return JsonResponse({
'message': 'everything is ok',
'item_amount': amount,
}, status = 200)
def update_total_amount_ajax(request):
current_session_key = request.session.session_key
cart = Cart.objects.get(session_key = current_session_key)
total_amount = cart.get_total()
if cart.promo != None:
total_amount_promo = cart.get_total_promo()
has_promo = 'true'
return JsonResponse({
'total_amount_promo': total_amount_promo,
'message': 'everything is ok',
'total_amount': total_amount,
'has_promo': has_promo,
}, status = 200)
else:
has_promo = 'false'
return JsonResponse({
'message': 'everything is ok',
'total_amount': total_amount,
'has_promo': has_promo,
}, status = 200)
def remove_item_from_cart_ajax(request):
current_session_key = request.session.session_key
cart = Cart.objects.get(session_key = current_session_key)
current_item_id = request.GET['item_id']
print('item id is',current_item_id )
current_item = Item.objects.get(cart = cart, id = current_item_id)
print(current_item)
current_item.delete()
check_items = cart.if_items_empty()
if check_items == True:
cart.promo = None
cart.save()
return JsonResponse({
'message': 'everything is ok',
}, status = 200)
def add_to_cart_ajax(request):
if not request.session.session_key:
request.session.create()
current_session_key = request.session.session_key
else:
current_session_key = request.session.session_key
cart = Cart.objects.get_or_create(session_key = current_session_key)[0]
current_product_id = request.GET['product_id']
current_product = Product.objects.get(id = current_product_id)
message = ""
if Item.objects.filter(cart = cart, name = current_product.name,
price = current_product.price).exists():
item = Item.objects.get(cart = cart, name = current_product.name,
price = current_product.price)
item.quantity += 1
item.save()
else:
item = Item(
cart = cart,
name = current_product.name,
price = current_product.price,
sale_price = current_product.sale_price,
imgurl = current_product.imgurl,
brand = current_product.pr_brand.name,
series = current_product.pr_series.name,
obiem = current_product.obiem,
)
item.save()
return JsonResponse({
'message': 'Товар добавлен в корзину!',
}, status = 200)
def create_order_ajax(request):
current_session_key = request.session.session_key
cart = Cart.objects.get(session_key = current_session_key)
cart_items = cart.item_set.all()
# parse cart info
delivery_method = request.GET['delivery_method']
delivery_method = urllib.parse.unquote(delivery_method)
delivery_cost = request.GET['delivery_cost']
payment_method = request.GET['payment_method']
payment_method = urllib.parse.unquote(payment_method)
customer_name = request.GET['customer_name']
customer_name = urllib.parse.unquote(customer_name)
customer_phone = request.GET['customer_phone']
customer_city = request.GET['customer_city']
customer_city = urllib.parse.unquote(customer_city)
customer_address = request.GET['customer_address']
customer_address = urllib.parse.unquote(customer_address)
order_comment = request.GET['cart_comment']
order_comment = urllib.parse.unquote(order_comment)
customer_email = request.GET['customer_email']
customer_email = urllib.parse.unquote(customer_email)
order_price = int(delivery_cost) + cart.get_total_promo()
new_order = Orders(
name = customer_name,
phone = customer_phone,
email = customer_email,
delivery = delivery_method + ' ' + delivery_cost,
payment = payment_method,
city = customer_city,
address = customer_address,
order_price = order_price,
comment = order_comment,
)
new_order.save()
cart_items_mail = []
order_price_mail = order_price
order_comment_mail = order_comment
customer_address_mail = customer_city + ', ' + customer_address
delivery_method_mail = delivery_method
order_id = new_order.id
for item in cart_items:
new_order.item_set.add(item)
cart_items_mail.append([item.name, item.quantity, item.price])
for item in cart.item_set.all():
cart.item_set.remove(item)
cart.promo = None
cart.save()
cart_items_all = new_order.item_set.all()
context = {
'order_id': order_id,
'order_price_mail': order_price_mail,
'name': customer_name,
'phone': customer_phone,
'email': customer_email,
'delivery_address': customer_address_mail,
'delivery_cost': delivery_cost,
'cart_items_all': cart_items_all,
'delivery_method_mail': delivery_method_mail,
'order_comment_mail': order_comment_mail,
}
client_html_message = render_to_string('cart/blocks/order_mail_template.html', context)
client_html_message_plain = strip_tags(client_html_message)
admin_html_message = render_to_string('cart/blocks/order_mail_template_admin.html', context)
admin_html_message_plain = strip_tags(admin_html_message)
try:
send_mail(
'Заказ № {}'.format(order_id),
admin_html_message_plain,
settings.EMAIL_HOST_USER,
[
# 'worlddelete0@yandex.ru',
'proff-butik@mail.ru'
],
html_message = admin_html_message
)
print('mail is sent')
print('try to send mail')
send_mail(
'Заказ № {}'.format(order_id),
client_html_message_plain,
settings.EMAIL_HOST_USER,
[
customer_email,
# 'proff-butik@mail.ru'
],
html_message = client_html_message
)
except:
print('was an error when send mail')
return JsonResponse({
'order_created': 'yes',
}, status = 200)
def update_nav_total(request):
if not request.session.session_key:
request.session.create()
current_session_key = request.session.session_key
else:
current_session_key = request.session.session_key
cart = Cart.objects.get_or_create(session_key = current_session_key)[0]
cart_total = cart.get_total()
return JsonResponse({
'cart_total': cart_total,
}, status = 200)
def check_promo_ajax(request):
current_session_key = request.session.session_key
cart = Cart.objects.get(session_key = current_session_key)
current_promo = request.GET['promo']
if Promocode.objects.filter(name = current_promo).exists():
print('this promo exist')
promo = Promocode.objects.get(name = current_promo)
cart.promo = promo
cart.save()
return JsonResponse({
'exist': 'yes',
}, status = 200)
else:
print('this promo not exist')
return JsonResponse({
'exist': 'no',
}, status = 200)
def set_promo(request):
current_session_key = request.session.session_key
cart = Cart.objects.get(session_key = current_session_key)
if cart.promo != None:
print('promo exist')
promo_name = cart.promo.name
return JsonResponse({
'promo_name': promo_name,
'exist': 'yes'
}, status = 200)
else:
return JsonResponse({
'exist': 'no'
}, status = 200)
| [
"noname@MacBook-Pro-Rin.local"
] | noname@MacBook-Pro-Rin.local |
631a2dcb65f7b01f394a4887810810476c69ec19 | 933376c11498a6567da8d7eb7d2675100895c3ba | /pyzoo/zoo/chronos/forecaster/tcn_forecaster.py | 1d2359d1cc2e54a9820e4f91c65c4ff5cd87761b | [
"Apache-2.0"
] | permissive | intel-analytics/analytics-zoo | 320a461765f86d41dd456b598b1cf1d51d57f4c4 | 7cc3e2849057d6429d03b1af0db13caae57960a5 | refs/heads/master | 2023-08-13T20:47:58.621714 | 2023-07-06T00:49:11 | 2023-07-06T00:49:11 | 90,328,920 | 3,104 | 996 | Apache-2.0 | 2023-09-06T01:51:18 | 2017-05-05T02:27:30 | Jupyter Notebook | UTF-8 | Python | false | false | 5,894 | py | #
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from zoo.chronos.forecaster.base_forecaster import BasePytorchForecaster
from zoo.chronos.forecaster.utils import set_pytorch_seed
from zoo.chronos.model.tcn import TCNPytorch
from zoo.chronos.model.tcn import model_creator, optimizer_creator, loss_creator
class TCNForecaster(BasePytorchForecaster):
"""
Example:
>>> #The dataset is split into x_train, x_val, x_test, y_train, y_val, y_test
>>> forecaster = TCNForecaster(past_seq_len=24,
future_seq_len=5,
input_feature_num=1,
output_feature_num=1,
...)
>>> forecaster.fit((x_train, y_train))
>>> forecaster.to_local() # if you set distributed=True
>>> test_pred = forecaster.predict(x_test)
>>> test_eval = forecaster.evaluate((x_test, y_test))
>>> forecaster.save({ckpt_name})
>>> forecaster.restore({ckpt_name})
"""
def __init__(self,
past_seq_len,
future_seq_len,
input_feature_num,
output_feature_num,
num_channels=[30]*7,
kernel_size=3,
repo_initialization=True,
dropout=0.1,
optimizer="Adam",
loss="mse",
lr=0.001,
metrics=["mse"],
seed=None,
distributed=False,
workers_per_node=1,
distributed_backend="torch_distributed"):
"""
Build a TCN Forecast Model.
TCN Forecast may fall into local optima. Please set repo_initialization
to False to alleviate the issue. You can also change a random seed to
work around.
:param past_seq_len: Specify the history time steps (i.e. lookback).
:param future_seq_len: Specify the output time steps (i.e. horizon).
:param input_feature_num: Specify the feature dimension.
:param output_feature_num: Specify the output dimension.
:param num_channels: Specify the convolutional layer filter number in
TCN's encoder. This value defaults to [30]*7.
:param kernel_size: Specify convolutional layer filter height in TCN's
encoder. This value defaults to 3.
:param repo_initialization: if to use framework default initialization,
True to use paper author's initialization and False to use the
framework's default initialization. The value defaults to True.
:param dropout: Specify the dropout close possibility (i.e. the close
possibility to a neuron). This value defaults to 0.1.
:param optimizer: Specify the optimizer used for training. This value
defaults to "Adam".
:param loss: Specify the loss function used for training. This value
defaults to "mse". You can choose from "mse", "mae" and
"huber_loss".
:param lr: Specify the learning rate. This value defaults to 0.001.
:param metrics: A list contains metrics for evaluating the quality of
forecasting. You may only choose from "mse" and "mae" for a
distributed forecaster. You may choose from "mse", "me", "mae",
"mse","rmse","msle","r2", "mpe", "mape", "mspe", "smape", "mdape"
and "smdape" for a non-distributed forecaster.
:param seed: int, random seed for training. This value defaults to None.
:param distributed: bool, if init the forecaster in a distributed
fashion. If True, the internal model will use an Orca Estimator.
If False, the internal model will use a pytorch model. The value
defaults to False.
:param workers_per_node: int, the number of worker you want to use.
The value defaults to 1. The param is only effective when
distributed is set to True.
:param distributed_backend: str, select from "torch_distributed" or
"horovod". The value defaults to "torch_distributed".
"""
# config setting
self.data_config = {
"past_seq_len": past_seq_len,
"future_seq_len": future_seq_len,
"input_feature_num": input_feature_num,
"output_feature_num": output_feature_num
}
self.config = {
"lr": lr,
"loss": loss,
"num_channels": num_channels,
"kernel_size": kernel_size,
"repo_initialization": repo_initialization,
"optim": optimizer,
"dropout": dropout
}
# model creator settings
self.local_model = TCNPytorch
self.model_creator = model_creator
self.optimizer_creator = optimizer_creator
self.loss_creator = loss_creator
# distributed settings
self.distributed = distributed
self.distributed_backend = distributed_backend
self.workers_per_node = workers_per_node
# other settings
self.lr = lr
self.metrics = metrics
self.seed = seed
super().__init__()
| [
"noreply@github.com"
] | noreply@github.com |
e3bffa2e9644e317f97a756bd852d57fe28c1fae | 50d8f7a805546edd768a245a544d3a362b674a73 | /03.image_stitch/image_stitch.py | 41706bca2902d0384ccbd7b7ef406ad1dddb2547 | [
"MIT"
] | permissive | Taishuaibo/pycv-training | 0d3f812a7e57acc8496ced9ada5879ee9f8a33f5 | a9ad8755c82bff62e907099ebac7587422b28ccd | refs/heads/main | 2023-05-05T15:51:19.394705 | 2021-05-30T01:08:17 | 2021-05-30T01:08:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,064 | py | import cv2
import numpy as np
def cv_show(name, img):
cv2.imshow(name, img)
cv2.waitKey(0)
cv2.destroyAllWindows()
def createFeature(img):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# 建立SIFT生成器
sift = cv2.xfeatures2d.SIFT_create()
# 检测SIFT特征点,并计算特征描述子
(kps, features) = sift.detectAndCompute(img, None)
# 将结果转换成NumPy数组
kps = np.float32([kp.pt for kp in kps])
# 返回特征点集,及对应的特征描述子
return (kps, features)
# 读取拼接图片
imageA = cv2.imread("./oriImgs/left.png") # 左图
imageB = cv2.imread("./oriImgs/right.png") # 右图
cv2.imshow('imageA', imageA)
cv2.imshow('imageB', imageB)
cv2.waitKey(0)
cv2.destroyAllWindows()
#检测A、B图片的SIFT关键特征点,并计算特征描述子
(kpsA, featuresA) = createFeature(imageA)
(kpsB, featuresB) = createFeature(imageB)
# 建立暴力匹配器
matcher = cv2.BFMatcher()
# 使用KNN检测来自A、B图的SIFT特征匹配对,K=2
rawMatches = matcher.knnMatch(featuresA, featuresB, 2)
goodMatches = []
for m in rawMatches:
# 当最近距离跟次近距离的比值小于ratio(0.75)值时,保留此匹配对
if len(m) == 2 and m[0].distance < m[1].distance * 0.75:
# 存储两个点在featuresA, featuresB中的索引值
goodMatches.append((m[0].trainIdx, m[0].queryIdx))
# 当筛选后的匹配对大于4时,才去计算视角变换矩阵
if len(goodMatches) > 4:
# 获取匹配对的点坐标
ptsA = np.float32([kpsA[i] for (_, i) in goodMatches])
ptsB = np.float32([kpsB[i] for (i, _) in goodMatches])
# 计算视角变换矩阵
# !!!注意A,B坐标点的顺序!!!
(H, status) = cv2.findHomography(ptsB, ptsA, cv2.RANSAC, 4.0)
# 将图片B进行视角变换,res是变换后图片
res = cv2.warpPerspective(imageB, H, (imageA.shape[1] + imageB.shape[1], imageB.shape[0]))
cv_show('result', res)
# 将图片A传入result图片最左端
res[0:imageA.shape[0], 0:imageA.shape[1]] = imageA
cv_show('result', res)
# 初始化可视化图片,将A、B图左右连接到一起
(hA, wA) = imageA.shape[:2]
(hB, wB) = imageB.shape[:2]
vis = np.zeros((max(hA, hB), wA + wB, 3), dtype="uint8")
vis[0:hA, 0:wA] = imageA
vis[0:hB, wA:] = imageB
# 联合遍历,画出匹配对
for ((trainIdx, queryIdx), s) in zip(goodMatches, status):
# 当点对匹配成功时,画到可视化图上
if s == 1:
# 画出匹配对
ptA = (int(kpsA[queryIdx][0]), int(kpsA[queryIdx][1]))
ptB = (int(kpsB[trainIdx][0]) + wA, int(kpsB[trainIdx][1]))
cv2.line(vis, ptA, ptB, (0, 255, 0), 1)
# 显示所有图片
cv2.imshow('imageA', imageA)
cv2.imshow('imageB', imageB)
cv2.imshow('keypoint matches', vis)
cv2.imwrite('./resImgs/keypointmatcher.png', vis)
cv2.imshow('result', res)
cv2.imwrite('./resImgs/result.png', res)
cv2.waitKey(0)
cv2.destroyAllWindows() | [
"coscyber@gmail.com"
] | coscyber@gmail.com |
1bd71c4ddec832e0d21c4f00197795038c20d8e4 | e5b5e21451f21bab50ed40eba73fbb1146a166b5 | /ecommerceproject/cart/views.py | a97556adf7f9f6ee15a81b6041b207180a3c90e5 | [] | no_license | Ebyprogramz/grocerystore | 64b46f16c98523bca9b5664bb7437ba549cb51e6 | 91afce25766879d3891f8b1e46b42cef09560f0f | refs/heads/master | 2023-08-15T01:45:16.668194 | 2021-09-29T05:37:30 | 2021-09-29T05:37:30 | 410,158,276 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,121 | py | from django.shortcuts import render, redirect, get_object_or_404
from store.models import Product
from .models import Cart,Cart_Item
from django.core.exceptions import ObjectDoesNotExist
# Create your views here.
def cart_id(request):
cart=request.session.session_key
if not cart:
cart=request.session.create()
return cart
def add_cart(request,product_id):
product=Product.objects.get(id=product_id)
try:
cart=Cart.objects.get(cart_id=cart_id(request))
except Cart.DoesNotExist:
cart=Cart.objects.create(cart_id=cart_id(request))
cart.save(),
try:
cart_item=Cart_Item.objects.get(product=product,cart=cart)
if cart_item.quantity < cart_item.product.stock:
cart_item.quantity +=1
cart_item.save()
except Cart_Item.DoesNotExist:
cart_item=Cart_Item.objects.create(
product=product,
quantity=1,
cart=cart
)
cart_item.save()
return redirect('cart:cart_detail')
def cart_detail(request,total=0,counter=0,cart_items=None):
try:
cart=Cart.objects.get(cart_id=cart_id(request))
cart_items=Cart_Item.objects.filter(cart=cart,active=True)
for cart_item in cart_items:
total+=(cart_item.product.price * cart_item.quantity)
counter +=cart_item.quantity
except ObjectDoesNotExist:
pass
return render(request,'cart.html',dict(cart_items=cart_items,total=total,counter=counter))
def cart_remove(request,product_id):
cart=Cart.objects.get(cart_id=cart_id(request))
product=get_object_or_404(Product,id=product_id)
cart_item=Cart_Item.objects.get(product=product,cart=cart)
if cart_item.quantity >1:
cart_item.quantity -=1
cart_item.save()
else:
cart_item.delete()
return redirect('cart:cart_detail')
def full_remove(request,product_id):
cart=Cart.objects.get(cart_id=cart_id(request))
product=get_object_or_404(Product,id=product_id)
cart_item=Cart_Item.objects.get(product=product,cart=cart)
cart_item.delete()
return redirect('cart:cart_detail') | [
"eby.hanson@yahoo.in"
] | eby.hanson@yahoo.in |
65eb25457df6d77b41c68d357cd137870ad8fe15 | f23c47a5495799015c8b803420813a6ee8d6d2a4 | /options/base_options.py | b8f879c556236b9d0b3fbbe242454ad2911e94d6 | [] | no_license | eitan3/continuous_view_synthesis_gluon | d3f5e74553ddfb480ba24861cc4cd39acbb7a1ce | b3d90378f0e49a165ae85a9249ee3e5d208348eb | refs/heads/master | 2022-12-02T16:16:37.440990 | 2020-08-21T17:17:33 | 2020-08-21T17:17:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,605 | py | import argparse
import os
from util import util
class BaseOptions():
def __init__(self):
self.parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
self.initialized = False
self.isTrain = True
self.opt = None
def initialize(self):
# basic info
self.parser.add_argument('--batchSize', type=int, default=16, help='input batch size')
self.parser.add_argument('--name', type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models')
self.parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here')
# visualization related
self.parser.add_argument('--display_winsize', type=int, default=256, help='display window size')
self.parser.add_argument('--display_id', type=int, default=1, help='window id of the web display')
self.parser.add_argument('--display_port', type=int, default=8097, help='visdom port of the web display')
# data loading related
self.parser.add_argument('--image_width', default=256, type=int, help='Image width')
self.parser.add_argument('--image_height', default=256, type=int, help='Image height')
# experiment related
self.parser.add_argument('--nz_geo', type=int, default=256, help='number of latent points')
self.parser.add_argument('--category', type=str, default='car', help='model category [car|chair|kitti]')
self.parser.add_argument('--random_elevation', action='store_true', help='introduce elevation changes between training pairs')
self.initialized = True
def parse(self):
if not self.initialized:
self.initialize()
self.opt = self.parser.parse_args()
self.opt.isTrain = self.isTrain # train or test
args = vars(self.opt)
print('------------ Options -------------')
for k, v in sorted(args.items()):
print('%s: %s' % (str(k), str(v)))
print('-------------- End ----------------')
# save to the disk
expr_dir = os.path.join(self.opt.checkpoints_dir, self.opt.name)
util.mkdirs(expr_dir)
file_name = os.path.join(expr_dir, 'opt.txt')
with open(file_name, 'wt') as opt_file:
opt_file.write('------------ Options -------------\n')
for k, v in sorted(args.items()):
opt_file.write('%s: %s\n' % (str(k), str(v)))
opt_file.write('-------------- End ----------------\n')
return self.opt
| [
"eitanhadar4@gmail.com"
] | eitanhadar4@gmail.com |
1cc38e8e931283f05c93bbaab335422b6e5f0351 | 61191088ee4a257b15fad0b8fe53641c70ca78f1 | /vectdraw/draw/colour.py | b318d51482b6c8587b1dc0087345978adbb1cebc | [] | no_license | ajpen/Vectdraw | 2e0394a6ff646f408a683033ef14692b6d7fd048 | 3b0d941b31f759737bd52210ac9738d1ef7b5aaa | refs/heads/master | 2022-11-14T09:51:58.772146 | 2020-07-08T21:40:53 | 2020-07-08T21:40:53 | 277,761,591 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,223 | py |
"""
Class for rgba colour representation
"""
class Colour(object):
red = 0
green = 0
blue = 0
alpha = 255
def __init__(self, rgba=None):
"""
Returns a colour instance. If nothing is passsed, the default
is 0,0, 0, 255 for red, green, blue and alpha respectively
:param rgba: a tuple containing numbers between 0-255 for red, green,
blue and alpha respectively
"""
if rgba:
self.SetColour(rgba)
def SetColour(self, rgba):
if not isinstance(rgba, tuple) and len(rgba) != 4:
raise TypeError(
"Unexpected type given. Expected tuple of size 4 "
"(int, int, int, int), received {}".format(type(rgba)))
for c in rgba:
if c > 255 or c < 0:
raise ValueError(
"Colour values are outside of the domain (0-255)")
self.red, self.green, self.blue, self.alpha = rgba
def __eq__(self, other):
if isinstance(other, self.__class__):
return (self.red == other.red and
self.green == other.green and
self.blue == other.blue and
self.alpha == other.alpha)
else:
raise NotImplemented | [
"anferneejervis@gmail.com"
] | anferneejervis@gmail.com |
98166df402980f456d8048e29aa8a450f9257655 | 80d879a552ce00a9bc73a26d0ddb74c278867b1f | /scripts/080_hilo_concrete.py | 4abf39886121d03650f95582dad542dc8c6f5d56 | [] | no_license | whiskyching/WS-EscobedoGroup | 4a25abe62fac91b82d3b1abd74ddc02af107457f | bd36d623ec2f60638fe3f330b9ad92c810804e8d | refs/heads/main | 2023-03-20T07:03:19.594765 | 2021-03-16T13:15:14 | 2021-03-16T13:15:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,119 | py | import os
import compas
from compas.utilities import pairwise
from compas_rhino.artists import MeshArtist
HERE = os.path.dirname(__file__)
DATA = os.path.join(HERE, '../data')
FILE = os.path.join(DATA, 'session.json')
session = compas.json_load(FILE)
mesh = session['mesh']
# ==============================================================================
# Idos
# ==============================================================================
idos = mesh.copy()
for face in mesh.faces_where({'is_loaded': False}):
idos.delete_face(face)
idos.remove_unused_vertices()
offset = 0.02
for vertex, attr in idos.vertices(True):
x, y, z = mesh.vertex_coordinates(vertex)
nx, ny, nz = mesh.vertex_normal(vertex)
if attr['nx'] is not None:
nx = attr['nx']
if attr['ny'] is not None:
ny = attr['ny']
if attr['nz'] is not None:
nz = attr['nz']
attr['x'] = x + offset * nx
attr['y'] = y + offset * ny
attr['z'] = z + offset * nz
# ==============================================================================
# Edos
# ==============================================================================
edos = idos.copy()
offset = 0.06
for vertex, attr in edos.vertices(True):
x, y, z = idos.vertex_coordinates(vertex)
nx, ny, nz = idos.vertex_normal(vertex)
if attr['nx'] is not None:
nx = attr['nx']
if attr['ny'] is not None:
ny = attr['ny']
if attr['nz'] is not None:
nz = attr['nz']
attr['x'] = x + offset * nx
attr['y'] = y + offset * ny
attr['z'] = z + offset * nz
# ==============================================================================
# Volume
# ==============================================================================
volume = idos.copy()
volume.flip_cycles()
max_vertex = volume._max_vertex + 1
max_face = volume._max_face + 1
for vertex, attr in edos.vertices(True):
volume.add_vertex(key=vertex + max_vertex, **attr)
for face in edos.faces():
vertices = edos.face_vertices(face)
vertices = [vertex + max_vertex for vertex in vertices]
volume.add_face(vertices)
boundary = edos.vertices_on_boundary()
boundary.append(boundary[0])
for a, b in pairwise(boundary):
volume.add_face([b, a, a + max_vertex, b + max_vertex])
# ==============================================================================
# Export
# ==============================================================================
session['idos'] = idos
session['edos'] = edos
session['volume'] = volume
compas.json_dump(session, FILE)
# ==============================================================================
# visualize
# ==============================================================================
artist = MeshArtist(idos, layer="HiLo::Concrete1::Idos")
artist.clear_layer()
artist.draw_mesh(disjoint=True, color=(255, 0, 0))
artist = MeshArtist(edos, layer="HiLo::Concrete1::Edos")
artist.clear_layer()
artist.draw_mesh(disjoint=True, color=(0, 0, 255))
artist = MeshArtist(volume, layer="HiLo::Concrete1::Volume")
artist.clear_layer()
artist.draw_mesh(disjoint=True)
| [
"vanmelet@ethz.ch"
] | vanmelet@ethz.ch |
803fe059f40e4b681949b8595a78dbf9f235cec5 | 7fbd07ed9d23381f73d408cb3a845e94c81ca496 | /mysite/settings.py | 6e3598873769ed4c12d9fe5ad4c2b240db9f6a16 | [] | no_license | halildilaver/E-CommercewithDjango | a1be0ce32f07f7b8874571140f845a8bf1ecc205 | b867a87bf4c272c461a75a5bc50af0895bcb85f7 | refs/heads/master | 2020-06-03T14:51:21.131135 | 2019-06-12T17:08:06 | 2019-06-12T17:08:06 | 191,613,313 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,641 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 2.2.1.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '2kx52jjxr@@xv=@^q+e#hbz%$ujy6x8un*ie$vd!z45j_#v!^d'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'product.apps.ProductConfig',
'home.apps.HomeConfig',
'polls.apps.PollsConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'ckeditor',
'ckeditor_uploader'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/images/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'uploads')
####################################
## CKEDITOR CONFIGURATION ##
####################################
SITE_ID = 1
CKEDITOR_JQUERY_URL = 'https://ajax.googleapis.com/ajax/libs/jquery/2.2.4/jquery.min.js'
CKEDITOR_UPLOAD_PATH = 'uploads/'
CKEDITOR_IMAGE_BACKEND = "pillow"
CKEDITOR_CONFIGS = {
'default': {
'toolbar': None,
},
} | [
"halildlvr@gmail.com"
] | halildlvr@gmail.com |
25239a74775ef3920fb3ecec660792a627e451bc | e69053a7cc99efc9ed560b32f378920613e77517 | /coins_optimal.py | 04fcbabd8130821f86d267f59ed2c2ff2b0ab462 | [] | no_license | aantillonl/CakeInterviewProblems | 41a2a719364440a11af6841ce3b2e334c207c456 | d671aa6b196a433963f62ef5217582d3f80c6c63 | refs/heads/master | 2021-08-23T12:13:22.811133 | 2017-12-04T21:34:52 | 2017-12-04T21:34:52 | 112,225,821 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,355 | py | # Your quirky boss collects rare, old coins...
# They found out you're a programmer and asked you to solve something they've been wondering for a long time.
# Write a function that, given:
# an amount of money
# a list of coin denominations
# computes the number of ways to make the amount of money with coins of the available denominations.
# Example: for amount=4 (4¢) and denominations=[1,2,3][1,2,3] (1¢, 2¢ and 3¢), your program would output 4—the number of ways to make 4¢ with those denominations:
# 1¢, 1¢, 1¢, 1¢
# 1¢, 1¢, 2¢
# 1¢, 3¢
# 2¢, 2¢
coins = [1,2,3]
target = 5
N = len(coins)
output = []
# Consider base cases: No coins given. And, amount = 0
# sol[ x = num of coins. y => target]
solutions = [[0 for x in range(N + 1)] for x in range(0,target + 1)]
# Fill the enteries for 0 value case (n = 0)
for i in range(N + 1):
solutions[0][i] = 1
for y in range(1, target + 1):
for x in range(1, N + 1):
curr_coin = coins[x-1]
sol_without_coin = solutions[y][x-1]
if curr_coin <= y:
sol_with_coin = solutions[y - curr_coin][x]
solutions[y][x] = sol_with_coin + sol_without_coin
else:
solutions[y][x] = sol_without_coin
print(solutions[target][N])
# Daamn, this was tough as hell, i dont know how i can make it for my solution | [
"aantillonl@gmail.com"
] | aantillonl@gmail.com |
69de6c1c8c8510d0f6fd116fd216eb399a3db19a | a3d73905af9102c6388b7501aa5067e4008d4c6a | /Programming Assignment 3/histogram.py | 73be3f9db0101a93413a4962ca936d1b64d4ba93 | [] | no_license | ShumbaBrown/CSCI-100 | b9a5b6c4446a5fdec3044d82eca3f59e502d5372 | 8eec33e39b26e10ddf9b8395fafe861cf1eb53ec | refs/heads/master | 2020-05-23T06:25:34.908759 | 2016-10-07T05:25:15 | 2016-10-07T05:25:15 | 70,217,597 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 620 | py | def PrintHistogram(nums):
# Prints the histogram for a list of numbers
# Dictionary to hold each key (number) and value (number of occurences)
numbers = { }
# Iterate through all the values in the list
for i in nums:
# Increment the number of asterics for the corresponding value in the dictionary
if i not in numbers:
numbers[i] = '*'
else:
numbers[i] = numbers[i] + '*'
# Print the histogram values
for i in numbers:
print('%s: %s' % (str(i), numbers[i]))
PrintHistogram([-2, -2, -3, -2])
PrintHistogram([1, 2.5, 3, 4, 4, 3, 6])
| [
"shumbabrownjc@gmail.com"
] | shumbabrownjc@gmail.com |
abe1005bd1d0c5882d3e588d9d3a1e4a7486c579 | 44197b58b52349b0557f4d2327be292d1c01ea50 | /test/test_data_62.py | 2dd9de7ce8a273d4da81d28b4534861d76aaff37 | [] | no_license | jonpurdy/netbox-swagger-python-client | 58b2b7984ea24a690d8910f6a6a496b99e5098f9 | 6bfe8cf3bb753c4d293dd56a541fac026642207f | refs/heads/master | 2021-06-28T03:16:09.670793 | 2017-09-17T18:15:54 | 2017-09-17T18:15:54 | 103,851,068 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 861 | py | # coding: utf-8
"""
NetBox API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version:
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import swagger_client
from swagger_client.rest import ApiException
from swagger_client.models.data_62 import Data62
class TestData62(unittest.TestCase):
""" Data62 unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testData62(self):
"""
Test Data62
"""
# FIXME: construct object with mandatory attributes with example values
#model = swagger_client.models.data_62.Data62()
pass
if __name__ == '__main__':
unittest.main()
| [
"jon@knowroaming.com"
] | jon@knowroaming.com |
b2b0b0aec75a1ce380bcdf00f88afd9502f5a719 | 5cfea22c5b2c401acae9d429915ed9ba7a73024a | /pausemenu/menues/mainmenu.py | bfd1af3639d97960f8a1cdb1a197442a06296a7d | [] | no_license | LookACastle/Oiram | f48fe3f0c9c6e3cc14737d37e5d91013414a4671 | 2302c929c874eb44fa38e6308d49e4d7a415095d | refs/heads/master | 2022-03-15T06:09:09.106758 | 2019-10-26T23:09:57 | 2019-10-26T23:09:57 | 108,526,721 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,284 | py | from pausemenu.menuitem.button import *
from constants import *
class MainMenu:
def __init__(self, screen):
options = ["Resume", "Options", "Save", "Exit"]
actions = [resumeAction, optionAction, saveAction, quitAction]
self.objects = []
y = 0
maxwidth = 0
for i in range(0, len(options)):
button = Button(options[i], 45, y + 30, screen, 2, actions[i], 0)
if (button.width > maxwidth):
maxwidth = button.width
self.objects.append(button)
y += 30
for o in self.objects:
o.center(maxwidth)
def resetHover(self):
for o in self.objects:
if (o.isHoverAble):
o.hover = False
def resetPress(self):
for o in self.objects:
if (o.isPressAble()):
o.pressed = False
def getCollision(self, x, y):
collided = []
for o in self.objects:
if (o.checkCollision(x, y)):
collided.append(o)
return collided
def render(self, screen, x, y):
for o in self.objects:
o.render(screen, x, y)
def quitAction(main):
if (main.player.onMap):
main.stop()
else:
main.saveConfig()
main.levelManager.changeLevel(main.player)
main.pausemenu.active = False
def resumeAction(main):
main.pausemenu.active = False
def optionAction(main):
main.pausemenu.changeMenu("option")
def saveAction(main):
main.pausemenu.changeMenu("save") | [
"martinpinholt@hotmail.com"
] | martinpinholt@hotmail.com |
52a6526d5fb09f675ab48c251acde7cab3c2e1fc | 3329be589ec0376495948d52c234a31f8ed07cf8 | /utils/ckpt_utils.py | 0ba977f956bdac4fa96f168c7e709765e5746e2b | [] | no_license | arthurtibame/tensorflow_api_utils | 908a68a9c7524b91a340c6a7a02d8d12b8a69ae4 | d6b5fd42dd3445989b9db15431bda0f2e8358c02 | refs/heads/main | 2023-01-18T21:46:20.804311 | 2020-11-26T08:21:48 | 2020-11-26T08:21:48 | 316,155,729 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,365 | py | import json
import os
def chk_model(model_name):
return os.path.isdir(model_name)
def download_ckpt(model_name):
r"""
if model folder not exists then download
the check point from tensorflow model zoo
model names are as following url: https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/tf2_detection_zoo.md
Model name Speed (ms) COCO mAP Outputs
CenterNet HourGlass104 512x512 70 41.9 Boxes
CenterNet HourGlass104 Keypoints 512x512 76 40.0/61.4 Boxes/Keypoints
CenterNet HourGlass104 1024x1024 197 44.5 Boxes
CenterNet HourGlass104 Keypoints 1024x1024 211 42.8/64.5 Boxes/Keypoints
CenterNet Resnet50 V1 FPN 512x512 27 31.2 Boxes
CenterNet Resnet50 V1 FPN Keypoints 512x512 30 29.3/50.7 Boxes/Keypoints
CenterNet Resnet101 V1 FPN 512x512 34 34.2 Boxes
CenterNet Resnet50 V2 512x512 27 29.5 Boxes
CenterNet Resnet50 V2 Keypoints 512x512 30 27.6/48.2 Boxes/Keypoints
EfficientDet D0 512x512 39 33.6 Boxes
EfficientDet D1 640x640 54 38.4 Boxes
EfficientDet D2 768x768 67 41.8 Boxes
EfficientDet D3 896x896 95 45.4 Boxes
EfficientDet D4 1024x1024 133 48.5 Boxes
EfficientDet D5 1280x1280 222 49.7 Boxes
EfficientDet D6 1280x1280 268 50.5 Boxes
EfficientDet D7 1536x1536 325 51.2 Boxes
SSD MobileNet v2 320x320 19 20.2 Boxes
SSD MobileNet V1 FPN 640x640 48 29.1 Boxes
SSD MobileNet V2 FPNLite 320x320 22 22.2 Boxes
SSD MobileNet V2 FPNLite 640x640 39 28.2 Boxes
SSD ResNet50 V1 FPN 640x640 (RetinaNet50) 46 34.3 Boxes
SSD ResNet50 V1 FPN 1024x1024 (RetinaNet50) 87 38.3 Boxes
SSD ResNet101 V1 FPN 640x640 (RetinaNet101) 57 35.6 Boxes
SSD ResNet101 V1 FPN 1024x1024 (RetinaNet101) 104 39.5 Boxes
SSD ResNet152 V1 FPN 640x640 (RetinaNet152) 80 35.4 Boxes
SSD ResNet152 V1 FPN 1024x1024 (RetinaNet152) 111 39.6 Boxes
Faster R-CNN ResNet50 V1 640x640 53 29.3 Boxes
Faster R-CNN ResNet50 V1 1024x1024 65 31.0 Boxes
Faster R-CNN ResNet50 V1 800x1333 65 31.6 Boxes
Faster R-CNN ResNet101 V1 640x640 55 31.8 Boxes
Faster R-CNN ResNet101 V1 1024x1024 72 37.1 Boxes
Faster R-CNN ResNet101 V1 800x1333 77 36.6 Boxes
Faster R-CNN ResNet152 V1 640x640 64 32.4 Boxes
Faster R-CNN ResNet152 V1 1024x1024 85 37.6 Boxes
Faster R-CNN ResNet152 V1 800x1333 101 37.4 Boxes
Faster R-CNN Inception ResNet V2 640x640 206 37.7 Boxes
Faster R-CNN Inception ResNet V2 1024x1024 236 38.7 Boxes
Mask R-CNN Inception ResNet V2 1024x1024 301 39.0/34.6 Boxes/Masks
"""
with open ("./utils/models.json", "r") as f:
url = json.loads(f.read())[model_name]
model_name_tar = url.split("/")[-1]
model_name_dir = url.split("/")[-1].split(".")[0]
if not chk_model(model_name_dir):
try:
print('Downloading %s to %s...' % (url, model_name_tar))
os.system("wget {}".format(str(url)))
os.system("tar xvf {}".format(str(model_name_tar)))
os.remove(model_name_tar)
# assert os.path.exists()# check
return model_name_dir
except Exception as e:
print(e)
return model_name_dir
def customized_ckpt_cofig(
pipeline_config_path,
fine_tune_checkpoint,
train_record_fname,
test_record_fname,
label_map_pbtxt_fname,
batch_size,
num_steps,
num_classes
):
import re
print('writing custom configuration file')
with open(pipeline_config_path) as f:
s = f.read()
with open(pipeline_config_path, 'w') as f:
# fine_tune_checkpoint
s = re.sub('fine_tune_checkpoint: ".*?"',
'fine_tune_checkpoint: "{}"'.format(fine_tune_checkpoint), s)
# tfrecord files train and test.
s = re.sub(
'(input_path: ".*?)(PATH_TO_BE_CONFIGURED)(.*?")', 'input_path: "{}"'.format(train_record_fname), s, count=1)
s = re.sub(
'(input_path: ".*?)(PATH_TO_BE_CONFIGURED)(.*?")', 'input_path: "{}"'.format(test_record_fname), s, count=1)
# label_map_path
s = re.sub(
'label_map_path: ".*?"', 'label_map_path: "{}"'.format(label_map_pbtxt_fname), s)
# Set training batch_size.
s = re.sub('batch_size: [0-9]+',
'batch_size: {}'.format(batch_size), s)
# Set training steps, num_steps
s = re.sub('num_steps: [0-9]+',
'num_steps: {}'.format(num_steps), s)
s = re.sub('total_steps: [0-9]+',
'total_steps: {}'.format(num_steps), s)
# Set number of classes num_classes.
s = re.sub('num_classes: [0-9]+',
'num_classes: {}'.format(num_classes), s)
#fine-tune checkpoint type
s = re.sub(
'fine_tune_checkpoint_type: "classification"', 'fine_tune_checkpoint_type: "{}"'.format('detection'), s)
f.write(s)
| [
"arthurtibame@gmail.com"
] | arthurtibame@gmail.com |
00c251321617e1466a341cd899b00c702a0c9568 | bf5058783d658d958bf84910721e828f1316dbbe | /SisconcBNB/apps/alumnos/forms.py | 3345e54865c0d2970346942ce640ffe139b4b2fa | [] | no_license | djangoadminn/jose | 3acfe5a2861305c94658f3177d61ea8c557b3d01 | 1567894037548b7b452ac202b972cf0b1867a064 | refs/heads/master | 2021-01-22T08:38:31.707532 | 2017-05-27T22:20:12 | 2017-05-27T22:20:12 | 92,627,631 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,729 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
from django import forms
from SisconcBNB.apps.alumnos.models import Alumno
from SisconcBNB.apps.alumnos.models import Seccion
from SisconcBNB.apps.alumnos.models import Inscripcion
from SisconcBNB.apps.alumnos.models import Enfermedad
from SisconcBNB.apps.alumnos.models import Enfermedad_alumno
from SisconcBNB.apps.alumnos.models import Profesor
from SisconcBNB.apps.alumnos.models import Seccion_profesor
from SisconcBNB.apps.alumnos.forms_date import DateInput
#formulario del alumno 1
class alumnForm(forms.ModelForm):
class Meta:
model = Alumno
fields = [
'codigo_hab',
'cedula_escolar',
'procedencia_alumn',
'plantel_retiro_alumn',
'lugar_habi_alumn',
'punto_referencia_alumn',
'religion',
]
labels = {
'codigo_hab':'codigo_hab',
'cedula_escolar':'cedula_escolar',
'procedencia_alumn':'procedencia_alumn',
'plantel_retiro_alumn':'plantel_retiro_alumn',
'lugar_habi_alumn':'Lugar_habi_alumn',
'punto_referencia_alumn':'punto_referencia_alumn',
'religion':'religion',
}
widgets = {
'codigo_hab':forms.Select(attrs={'class':'material-control tooltips-general'}),
'cedula_escolar':forms.Select(attrs={'class':'material-control tooltips-general','autocomplete': 'off'}),
'procedencia_alumn':forms.TextInput(attrs={'class':'material-control tooltips-general','placeholder':'procedencia','size':'40','autocomplete': 'off'}),
'plantel_retiro_alumn':forms.TextInput(attrs={'class':'material-control tooltips-general','placeholder':'plantel','size':'40','autocomplete': 'off'}),
'lugar_habi_alumn':forms.TextInput(attrs={'class':'material-control tooltips-general','placeholder':'lugar','size':'40','autocomplete': 'off'}),
'punto_referencia_alumn':forms.TextInput(attrs={'class':'material-control tooltips-general','placeholder':'punto de referencia','size':'40','autocomplete': 'off'}),
'religion':forms.Select(attrs={'class':'material-control tooltips-general'}),
}
#formulario de seccion 2
class seccForm(forms.ModelForm):
class Meta:
model = Seccion
fields = [
'codigo_seccion',
'turno',
'seccion',
'grado',
]
labels = {
'codigo_seccion':'codigo_seccion',
'turno':'turno',
'seccion':'seccion',
'grado':'grado',
}
widgets = {
'codigo_seccion':forms.NumberInput(attrs={'class':'material-control tooltips-general','placeholder':'codigo','size':'40','autocomplete': 'off'}),
'turno':forms.Select(attrs={'class':'material-control tooltips-general'}),
'seccion':forms.Select(attrs={'class':'material-control tooltips-general'}),
'grado':forms.Select(attrs={'class':'material-control tooltips-general'}),
}
#formulario de inscripcion 3
class inscripForm(forms.ModelForm):
beca_estudiantil=forms.BooleanField(label='beca_estudiantil', required=True,initial=False)
class Meta:
model = Inscripcion
fields = [
'cod_inscri',
'cedula_escolar',
'codigo_seccion',
'talla_pantalon',
'talla_franela',
'talla_Zapato',
'beca_estudiantil',
'ano_escolar',
]
labels = {
'cod_inscri':'cod_inscri',
'cedula_escolar':'cedula_escolar',
'codigo_seccion':'codigo_seccion',
'talla_pantalon':'talla_pantalon',
'talla_franela':'talla_franela',
'talla_Zapato':'talla_Zapato',
'beca_estudiantil':'beca_estudiantil',
'ano_escolar':'ano_escolar',
}
widgets = {
'cod_inscri':forms.NumberInput(attrs={'class':'material-control tooltips-general'}),
'cedula_escolar':forms.Select(attrs={'class':'material-control tooltips-general'}),
'codigo_seccion':forms.Select(attrs={'class':'material-control tooltips-general'}),
'talla_pantalon':forms.Select(attrs={'class':'material-control tooltips-general'}),
'talla_franela':forms.Select(attrs={'class':'material-control tooltips-general'}),
'talla_Zapato':forms.Select(attrs={'class':'material-control tooltips-general'}),
'ano_escolar':forms.Select(attrs={'class':'material-control tooltips-general'}),
}
#enfermedad de alumno 4
class enferForm(forms.ModelForm):
class Meta:
model = Enfermedad
fields = [
'codigo_enfermedad',
'nombres_enfermedad',
'descripcion_enfermedad',
]
labels = {
'codigo_enfermedad':'codigo_enfermedad',
'nombres_enfermedad':'nombres_enfermedad',
'descripcion_enfermedad':'descripcion_enfermedad',
}
widgets = {
'codigo_enfermedad':forms.NumberInput(attrs={'class':'material-control tooltips-general','placeholder':'código','size':'40','autocomplete': 'off'}),
'nombres_enfermedad':forms.TextInput(attrs={'class':'material-control tooltips-general','placeholder':'nombre de la enfermedad','size':'40','autocomplete': 'off'}),
'descripcion_enfermedad':forms.Textarea(attrs={'class':'material-control tooltips-general','placeholder':'descripción','size':'40','autocomplete': 'off'}),
}
#formulario del alumnos 5
class enfermalumnForm(forms.ModelForm):
#fecha_enfer=forms.DateField()
class Meta:
model = Enfermedad_alumno
fields = [
'cedula_escolar',
'codigo_enfermedad',
'fecha_enfer',
]
labels = {
'cedula_escolar':'cedula_escolar',
'codigo_enfermedad':'codigo_enfermedad',
'fecha_enfer':'fecha_enfer',
}
widgets = {
'cedula_escolar':forms.Select(attrs={'class':'material-control tooltips-general'}),
'codigo_enfermedad':forms.Select(attrs={'class':'material-control tooltips-general'}),
'fecha_enfer': DateInput(format = '%Y-%m-%d'),
}
#formulario del pofesor 6
class profeForm(forms.ModelForm):
class Meta:
model = Profesor
fields = [
#'codigo_prof',
'codigo_hab',
'turno',
'especialidad_prof',
'fecha_ingreso',
]
labels = {
#'codigo_prof':'codigo_prof',
'codigo_hab':'codigo_hab',
'turno':'turno',
'especialidad_prof':'especialidad_prof',
'fecha_ingreso':'fecha_ingreso',
}
widgets = {
'codigo_prof':forms.NumberInput(attrs={'class':'material-control tooltips-general','placeholder':'codigo','size':'40','autocomplete': 'off'}),
'codigo_hab':forms.Select(attrs={'class':'material-control tooltips-general'}),
'turno':forms.Select(attrs={'class':'material-control tooltips-general'}),
'especialidad_prof':forms.TextInput(attrs={'class':'material-control tooltips-general','placeholder':'especialidad','size':'40','autocomplete': 'off'}),
'fecha_ingreso':DateInput(format = '%Y-%m-%d'),
}
#formulario de la seccion de profesor 7
class seccproForm(forms.ModelForm):
class Meta:
model = Seccion_profesor
fields = [
'codigo_prof',
'codigo_seccion',
]
labels = {
'codigo_prof':'codigo_prof',
'codigo_seccion':'codigo_seccion',
}
widgets = {
'codigo_prof':forms.Select(attrs={'class':'material-control tooltips-general','autocomplete': 'off'}),
'codigo_seccion':forms.Select(attrs={'class':'material-control tooltips-general','autocomplete': 'off'}),
} | [
"josejavamax@hotmail.com"
] | josejavamax@hotmail.com |
6d11a2a08e99746fcf09d5f7a1e8b2a1c35a11e3 | 9716316eb0c5b5a1487866d37b58efc116511d22 | /charmdet/runReconstruction.py | 7e60609548ccbac61bb0a6d7f587dec8d911689f | [] | no_license | klleung/FairShip | 68245fcd042f47a5ed2feeaad1c2e84e5aa21241 | a7e67ac58387f651722068e8325513b5e0d6832a | refs/heads/master | 2020-06-06T23:02:00.991756 | 2019-07-19T12:23:35 | 2019-07-19T12:23:35 | 192,870,986 | 2 | 0 | null | 2019-06-20T07:26:44 | 2019-06-20T07:26:44 | null | UTF-8 | Python | false | false | 15,856 | py |
import os,subprocess,ROOT,time,multiprocessing
from rootpyPickler import Unpickler
from rootpyPickler import Pickler
import pwd
ncpus = int(multiprocessing.cpu_count()*3./4.)
pathToMacro = '$FAIRSHIP/charmdet/'
def count_python_processes(macroName):
username = pwd.getpwuid(os.getuid()).pw_name
callstring = "ps -f -u " + username
# only works if screen is wide enough to print full name!
status = subprocess.check_output(callstring,shell=True)
n=0
for x in status.split('\n'):
if not x.find(macroName)<0 and not x.find('python') <0: n+=1
return n
fileList = {}
badFiles = []
run = "RUN_8000_2395" # "RUN_8000_2396"
eospath='/eos/experiment/ship/data/muflux/DATA_Rebuild_8000/rootdata/'+run
def getFilesFromEOS():
# list of files
temp = subprocess.check_output("xrdfs "+os.environ['EOSSHIP']+" ls -l "+eospath,shell=True)
for x in temp.split('\n'):
if x.find('.root')<0: continue
if not x.find('START')<0: continue
fname = x[x.find('/eos'):]
nentries = 0
try:
f=ROOT.TFile.Open(os.environ['EOSSHIP']+fname)
nentries=f.cbmsim.GetEntries()
fileList[fname]=nentries
except:
print "problem accessing file",fname
badFiles.append(fname)
Nfiles = len(fileList)
tmp = {}
for fname in fileList:
newName = fname[fname.rfind('/')+1:]
rc = os.system("xrdcp -f $EOSSHIP"+fname+" "+newName)
tmp[newName]=fileList[fname]
fnames = tmp.keys()
fnames.sort()
return tmp,fnames
def getFilesLocal():
# list of files
for fname in os.listdir('.'):
if fname.find('.root')<0: continue
if not fname.find('_RT')<0: continue
test = fname.replace('.root','_RT.root')
if os.path.isfile(test): continue
nentries = 0
try:
f=ROOT.TFile.Open(fname)
nentries=f.cbmsim.GetEntries()
fileList[fname]=nentries
except:
print "problem accessing file",fname
badFiles.append(fname)
Nfiles = len(fileList)
fnames = fileList.keys()
fnames.sort()
return fileList,fnames
def recoStep0(local=False):
if local: tmp,fnames = getFilesLocal()
else: tmp,fnames = getFilesFromEOS()
Nfiles = len(fnames)
print "fileList established ",Nfiles
Ndone = 0
while Ndone < Nfiles:
cmd = "python "+pathToMacro+"drifttubeMonitoring.py -c recoStep0 -f "
# group files to get better stats
Ntot = 0
sample = []
i = 0
for k in range(Ndone,Nfiles):
Ntot += tmp[fnames[k]]
sample.append(fnames[k])
i+=1
if Ntot>350000: break
Ndone += i
# check that enough files remain
Nextsample = []
Ntot = 0
for k in range(Ndone,Nfiles):
Ntot += tmp[fnames[k]]
Nextsample.append(fnames[k])
if Ntot>350000: break
if Ntot < 350000:
for s in Nextsample: sample.append(s)
Ndone += len(Nextsample)
if len(sample)==0: break
for s in sample: cmd+=s+','
print 'step 0:',cmd[:cmd.rfind(',')],Ndone,Nfiles
os.system(cmd[:cmd.rfind(',')]+" &")
while 1>0:
if count_python_processes('drifttubeMonitoring')<ncpus: break
time.sleep(200)
if Ndone%100==0: cleanUp()
while count_python_processes('drifttubeMonitoring')>0: time.sleep(200)
print "files created with RT relations "
cleanUp()
def checkFilesWithRT():
fok = []
fNotok = []
fRaw = []
for fname in os.listdir('.'):
if not fname.find('histo')<0: continue
if not fname.find('_RT')<0:
f=ROOT.TFile(fname)
RT = f.Get('tMinAndTmax')
if RT:
fok.append(fname)
else:
fNotok.append(fname)
elif fname.find('root')>0 and not fname.find('SPILL')<0:
fRaw.append(fname)
print len(fok),len(fNotok),len(fRaw)
return fok,fNotok,fRaw
def checkMinusTwo():
fok,fNotok,fRaw = checkFilesWithRT()
for fname in fRaw:
if fname in fok: continue
N=0
f=ROOT.TFile(fname)
sTree = f.cbmsim
for n in range(sTree.GetEntries()):
rc = sTree.GetEvent(n)
for m in sTree.Digi_MufluxSpectrometerHits:
if m.GetDetectorID()<0: N+=1
print sTree.GetCurrentFile(),N
def recoStep1():
fileList=[]
# all RT files
for x in os.listdir('.'):
if x.find('_RT')>0 and x.find('histos')<0:
test = ROOT.TFile(x)
if test.cbmsim.GetBranch("FitTracks"): continue
fileList.append(x)
fileList.sort()
for fname in fileList:
cmd = "python "+pathToMacro+"drifttubeMonitoring.py -c recoStep1 -u 1 -f "+fname+' &'
print 'step 1:', cmd
os.system(cmd)
time.sleep(100)
while 1>0:
if count_python_processes('drifttubeMonitoring')<ncpus: break
time.sleep(100)
print "finished all the tasks."
def checkAlignment(fileList=[]):
# all RT files
if len(fileList)==0:
for x in os.listdir('.'):
if x.find('_RT')>0 and x.find('histos-residuals')<0:
fileList.append(x)
fileList.sort()
for fname in fileList:
cmd = "python "+pathToMacro+"drifttubeMonitoring.py -c alignment -f "+fname+' &'
print 'make residual plots:', cmd
os.system(cmd)
time.sleep(10)
while 1>0:
if count_python_processes('drifttubeMonitoring')<ncpus: break
time.sleep(100)
print "finished all the tasks."
def runMC():
# fast MC
inputFile = "/eos/experiment/ship/data/Mbias/background-prod-2018/pythia8_Geant4_10.0_withCharmandBeauty0_mu.root" # entries 13450391L
os.system("python $FAIRSHIP/macro/run_simScript.py -n 100000 --MuonBack --charm=1 --CharmdetSetup=0 -f "+inputFile)
# full simulation
os.system("python $SHIPBUILD/FairShip/macro/run_simScript.py --Muflux -n 1000 --charm=1 --CharmdetSetup=0 --charm=1 --CharmdetSetup=0")
def checkFilesWithTracks(D='.'):
fileList=[]
rest=[]
zombie=[]
# all RT files
if D.find('eos')<0:
for x in os.listdir(D):
if x.find('_RT')>0 and x.find('histos')<0:
test = ROOT.TFile(D+'/'+x)
if not test.GetKey('cbmsim'):
zombie.append(x)
elif test.cbmsim.GetBranch("FitTracks"): fileList.append(x)
else: rest.append(x)
else:
temp = subprocess.check_output("xrdfs "+os.environ['EOSSHIP']+" ls -l "+D,shell=True)
for x in temp.split('\n'):
if x.find('.root')<0: continue
fname = x[x.find('/eos'):]
try:
test=ROOT.TFile.Open(os.environ['EOSSHIP']+fname)
if not test.GetKey('cbmsim'):
zombie.append(fname)
elif test.cbmsim.GetBranch("FitTracks"): fileList.append(fname)
else: rest.append(fname)
except:zombie.append(fname)
fileList.sort()
print "n with tracks",len(fileList),' rest:',len(rest),' zombies:',zombie
return fileList
def checkFilesWithTracks2(D='.'):
badFile=[]
# all RT files
for x in os.listdir(D):
if x.find('_RT')>0 and x.find('histos')<0:
test = ROOT.TFile(D+'/'+x)
sTree = test.cbmsim
if not sTree: badFile.append(x+"?")
elif sTree.GetBranch("FitTracks"):
prev = 0
for n in range(min(20000,sTree.GetEntries())):
rc = sTree.GetEvent(n)
if sTree.FitTracks.GetEntries()>0:
st = sTree.FitTracks[0].getFitStatus()
if not st.isFitConverged(): continue
if prev==st.getChi2():
badFile.append(x)
break
else: prev=st.getChi2()
return badFile
def checkFilesWithTracks3(D='.'):
badFile={}
# all RT files
for x in os.listdir(D):
if x.find('_RT')>0 and x.find('histos')<0:
test = ROOT.TFile(D+'/'+x)
sTree = test.cbmsim
if not sTree:
badFile.append(x+"?")
continue
b = sTree.GetBranch("FitTracks")
if b:
if b.GetZipBytes()/1.E6 < 1.: badFile[x]= b.GetZipBytes()/1.E6
return badFile
# for f in bf: os.system('cp ../../ship-ubuntu-1710-64/RUN_8000_2395/'+f+' .')
def cleanUp(D='.'):
# remove raw data files for files with RT relations
fok,fNotok,fRaw = checkFilesWithRT()
for x in fok:
r = x.replace('_RT','')
cmd = 'rm '+r
os.system(cmd)
def copyMissingFiles(remote="../../ship-ubuntu-1710-64/RUN_8000_2395",exclude=[]):
toCopy=[]
allFilesR = os.listdir(remote)
allFilesL = os.listdir(".")
for fname in allFilesR:
if not fname.find('histos')<0: continue
if fname.find('RT')<0: continue
if fname in exclude: continue
if not fname in allFilesL: toCopy.append(fname)
print "len",len(toCopy)
for fname in toCopy: os.system('cp '+remote+"/"+fname+' .')
def importRTFiles(local='.',remote='/home/truf/ship-ubuntu-1710-32/home/truf/muflux/Jan08'):
# mkdir /media/truf/disk2/home/truf/ShipSoft/ship-ubuntu-1710-32
# sshfs ship-ubuntu-1710-32.cern.ch:/home/truf/muflux /media/truf/disk2/home/truf/ShipSoft/ship-ubuntu-1710-32
fileWithTracks = checkFilesWithTracks(local)
allFiles = os.listdir(remote)
for x in allFiles:
if x.find('_RT')>0 and x.find('histos')<0 and not x in fileWithTracks:
os.system('cp '+remote+'/'+x+' .')
def importRecoFiles(local='.',remote='/media/truf/disk2/home/truf/ShipSoft/ship-ubuntu-1710-16/home/truf/muflux/Jan08'):
fileWithTracks = checkFilesWithTracks(remote)
for x in fileWithTracks: os.system('cp '+remote+'/'+x+' .')
def mergeHistos(local='.',case='residuals'):
allFiles = os.listdir(local)
if case == 'residuals':
dest = 'residuals.root'
tag = 'histos-residuals'
else:
dest = 'momDistributions.root'
tag = 'histos-analysis'
cmd = "hadd -f "+dest+' '
N=0
for x in allFiles:
if not x.find(tag)<0 :
cmd += (local+'/'+x+' ')
N+=1
if N>500:
os.system(cmd)
os.system('cp '+dest+' tmp.root')
cmd = "hadd -f "+dest+' tmp.root '
N=0
os.system(cmd)
def checkRecoRun(eosLocation=eospath,local='.'):
temp = subprocess.check_output("xrdfs "+os.environ['EOSSHIP']+" ls -l "+eosLocation,shell=True)
for x in temp.split('\n'):
if x.find('.root')<0: continue
if not x.find('START')<0: continue
fname = x[x.rfind('/')+1:]
RTname = fname.replace('.root','_RT.root')
histosName = "histos-residuals-"+RTname
if not os.path.isfile(RTname):
print "missing RT file",fname
if not os.path.isfile(histosName):
print "missing histogram file",fname
def exportRunToEos(eosLocation="/eos/experiment/ship/user/truf/muflux-reco",run=run,local="."):
temp = os.system("xrdfs "+os.environ['EOSSHIP']+" mkdir "+eosLocation+"/"+run)
failures = []
for x in os.listdir(local):
if x.find('.root')<0: continue
cmd = "xrdcp -f "+x+" $EOSSHIP/"+eosLocation+"/"+run+"/"+x
rc = os.system(cmd)
if rc != 0: failures.append(x)
if len(failures)!=0: print failures
def makeMomDistributions(run=0):
if run==0: fileList = checkFilesWithTracks(D='.')
else:
eospathReco = '/eos/experiment/ship/user/odurhan/muflux-recodata/'+run
fileList = []
temp = subprocess.check_output("xrdfs "+os.environ['EOSSHIP']+" ls -l "+eospathReco,shell=True)
for x in temp.split('\n'):
if x.find('.root')<0: continue
fileList.append( os.environ['EOSSHIP'] + x[x.find('/eos'):])
# all RT files with tracks
for fname in fileList:
if not fname.find('sys')<0: continue
if os.path.isfile('histos-analysis-'+fname[fname.rfind('/')+1:]): continue
cmd = "python "+pathToMacro+"drifttubeMonitoring.py -c anaResiduals -f "+fname+' &'
print 'momentum analysis:', cmd
os.system(cmd)
time.sleep(10)
while 1>0:
if count_python_processes('drifttubeMonitoring')<ncpus: break
time.sleep(10)
print "finished all the tasks."
zeroField = ['2199','2200','2201']
noRPC = ['2144','2154','2192','2210','2217','2218','2235','2236','2237','2240','2241','2243','2291','2345','2359']
def massProduction(keyword = 'RUN_8000_23',fnames=[],merge=False):
pathToMacro = "$FAIRSHIP/charmdet/"
eospathReco = '/eos/experiment/ship/user/odurhan/muflux-recodata/'
if merge:
for run in os.listdir('.'):
if run.find(keyword)<0: continue
os.chdir(run)
mergeHistos(local='.',case='momDistributions')
os.chdir('../')
else:
if len(fnames)==0:
temp = subprocess.check_output("xrdfs "+os.environ['EOSSHIP']+" ls -l "+eospathReco,shell=True)
fnames = temp.split('\n')
for x in fnames:
if x.find(keyword)<0: continue
run = x[x.rfind('/')+1:]
if not run in os.listdir('.'): os.system('mkdir '+run)
temp2 = subprocess.check_output("xrdfs "+os.environ['EOSSHIP']+" ls -l "+eospathReco+run,shell=True)
if temp2.find('.root')<0: continue
skip = False
for x in zeroField:
if not run.find(x)<0: skip = True
if skip: continue
os.chdir(run)
makeMomDistributions(run)
os.chdir('../')
def massProductionAlignment(keyword = 'RUN_8000_2395',fnames=[],merge=False):
pathToMacro = "$FAIRSHIP/charmdet/"
eospathReco = '/eos/experiment/ship/user/odurhan/muflux-recodata/'
if merge:
for run in os.listdir('.'):
if run.find(keyword)<0: continue
os.chdir(run)
mergeHistos(local='.')
os.chdir('../')
else:
if len(fnames)==0:
temp = subprocess.check_output("xrdfs "+os.environ['EOSSHIP']+" ls -l "+eospathReco,shell=True)
fnames = temp.split('\n')
for x in fnames:
if x.find(keyword)<0: continue
run = x[x.rfind('/')+1:]
if not run in os.listdir('.'):
print "directory for this run does not exist",run
# os.system('mkdir '+run)
continue
temp2 = subprocess.check_output("xrdfs "+os.environ['EOSSHIP']+" ls -l "+eospathReco+run,shell=True)
if temp2.find('.root')<0: continue
os.chdir(run)
fileList = []
for x in temp2.split('\n'):
if x.find('.root')<0: continue
fileList.append( os.environ['EOSSHIP'] + x[x.find('/eos'):])
checkAlignment(fileList)
os.chdir('../')
def redoMuonTracks():
fileList = checkFilesWithTracks(D='.')
for fname in fileList:
cmd = "python "+pathToMacro+"drifttubeMonitoring.py -c recoMuonTaggerTracks -u 1 -f "+fname+' &'
print 'redo muonTracks:', cmd
os.system(cmd)
time.sleep(10)
while 1>0:
if count_python_processes('drifttubeMonitoring')<ncpus: break
time.sleep(10)
print "finished all the tasks."
def reRunReco(r,fname):
fRT = fname.replace('.root','_RT2.root')
os.system('xrdcp -f $EOSSHIP/eos/experiment/ship/data/muflux/DATA_Rebuild_8000/rootdata/'+r+'/'+fname+' '+fRT)
f = ROOT.TFile.Open(os.environ['EOSSHIP']+'/eos/experiment/ship/user/odurhan/muflux-recodata/'+r+'/'+fname.replace('.root','_RT.root'))
ftemp = ROOT.TFile(fRT,'update')
ftemp.cd('')
upkl = Unpickler(f)
tMinAndTmax = upkl.load('tMinAndTmax')
pkl = Pickler(ftemp)
pkl.dump(tMinAndTmax,'tMinAndTmax')
ftemp.mkdir('histos')
ftemp.histos.cd('')
for tc in ['TDCMapsX','hitMapsX']:
tmp = f.histos.Get(tc)
X = tmp.Clone()
X.Write()
ftemp.Write("",ROOT.TFile.kOverwrite)
ftemp.Close()
cmd = "python "+pathToMacro+"drifttubeMonitoring.py -c recoStep1 -u 1 -f "+fRT+' &'
os.system(cmd)
print 'step 1:', cmd
def pot():
fileList=[]
# all RT files
for x in os.listdir('.'):
if x.find('_RT')>0 and x.find('histos')<0:
fileList.append(x)
fileList.sort()
scalerStat = {}
for fname in fileList:
f=ROOT.TFile(fname)
if not f.FindKey("scalers"):
print "no scalers in this file",fname
continue
scalers = f.scalers
scalers.GetEntry(0)
for x in scalers.GetListOfBranches():
name = x.GetName()
s = eval('scalers.'+name)
if name!='slices':
if not scalerStat.has_key(name):scalerStat[name]=0
scalerStat[name]+=s
keys = scalerStat.keys()
keys.sort()
for k in keys: print k,':',scalerStat[k]
def makeDTEfficiency(merge=False):
cmd = "hadd -f DTEff.root "
for fname in os.listdir('.'):
if not merge and fname.find('SPILL')==0:
cmd = "python "+pathToMacro+"drifttubeMonitoring.py -c DTeffWithRPCTracks -f "+fname+' &'
os.system(cmd)
time.sleep(10)
while 1>0:
if count_python_processes('drifttubeMonitoring')<ncpus: break
time.sleep(10)
elif merge and fname.find('histos-DTEff')==0:
cmd+=fname+' '
if merge: os.system(cmd)
print "finished all the tasks."
def importMomDistr(keyword = 'RUN_8000_2'):
pathHistos = '/media/truf/disk2/home/truf/ShipSoft/ship-ubuntu-1710-64/'
temp = os.listdir(pathHistos)
for x in temp:
if x.find(keyword)<0: continue
run = x
if not run in os.listdir('.'):
os.system('mkdir '+run)
os.system('cp '+pathHistos+run+'/momDistributions.root '+run)
| [
"Thomas.Ruf@cern.ch"
] | Thomas.Ruf@cern.ch |
b3ffb1f7298e5fec7c80d71946a31e424ff4682d | f3a31d0692f0cc4deb5d23f9da38eb69623ca8d1 | /Dynamics/Finite-sized_SIAM/fig_4.py | 2ada6228f29e1b93cfd3e04bbda484ce20125030 | [] | no_license | soumyodipto/SIAM | f2039d5796412db1e2204b65f1b43189227c521b | 4c7a96684dff759d0da453ed437827dd1b3da0fd | refs/heads/master | 2021-09-09T09:25:10.994397 | 2018-03-14T19:01:36 | 2018-03-14T19:01:36 | 107,188,950 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,409 | py | from os import path
import h5py
import numpy as np
import matplotlib
import matplotlib.patches as mpatches
import pylab as plt
from matplotlib.ticker import AutoMinorLocator
from matplotlib.ticker import AutoMinorLocator
from matplotlib.ticker import FormatStrFormatter
majorFormatter = FormatStrFormatter('%g')
plt.rc('text.latex', preamble = '\usepackage{amsmath},' '\usepackage{yfonts},' '\usepackage[T1]{fontenc},' '\usepackage[latin1]{inputenc},' '\usepackage{txfonts},' '\usepackage{times},' '\usepackage{blindtext},' '\usepackage{braket}' )
plt.rc('font',**{'family':'sans-serif','sans-serif':['Helvetica'],'size':8})
plt.rc('lines', linewidth=0.7)
plt.rc('xtick', labelsize='small')
plt.rc('ytick', labelsize='small')
plt.rc('legend', fontsize='medium')
plt.rc('text', usetex=True)
nsites=8
delta=0.005
nimps=[1,2]
Us = [0.0, 2.5, 5.0, 7.5]
omega_grid = np.arange(-1.,1.,0.01)
edy=[]
emb1y=[]
emb2y=[]
#emb4y=[]
for ind, U in enumerate(Us):
eddir = './siam_ED/siam_dos_ed_U'+str(U)+'_nsites'+str(nsites)+'_delta'+str(delta)+'/siam_dos_ed_nsites'+str(nsites)+'_U'+str(U)+'_eta'+str(delta)
data_ed = np.loadtxt(eddir)
edy.append(data_ed[:,1])
emb1dir = './siam_emb/siam_emb_dos_nsites'+str(nsites)+'_U'+str(U/10)+'_nimp'+str(1)+'_eta'+str(delta)
data_emb1 = np.loadtxt(emb1dir)
emb1y.append(data_emb1[:,1])
emb2dir = './siam_emb/siam_emb_dos_nsites'+str(nsites)+'_U'+str(U/10)+'_nimp'+str(2)+'_eta'+str(delta)
data_emb2 = np.loadtxt(emb2dir)
emb2y.append(data_emb2[:,1])
#emb4dir = './siam_emb/siam_emb_dos_nsites'+str(nsites)+'_U'+str(U/10)+'_nimp'+str(4)+'_eta'+str(delta)
#data_emb4 = np.loadtxt(emb4dir)
#emb4y.append(data_emb4[:,1])
fig_size = (3.375,3.375/1.3)
f1, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=fig_size, dpi=200, sharex='col', sharey='row')
#dashes = [5,2,10,5]
dashes=[2, 1]
dashes1 = [5,2]
#ax1 = plt.subplot(221)
line1, = ax1.plot(omega_grid[120:190], edy[0][120:190],'k', markersize=1.5,markevery=1)
line2, = ax1.plot(omega_grid[120:190], emb1y[0][120:190], 'b--')
line3, = ax1.plot(omega_grid[120:190], emb2y[0][120:190], 'r--')
line2.set_dashes(dashes)
line3.set_dashes(dashes1)
#line4, = ax1.plot(omega_grid, emb4y[0], 'r--')
#ax1.legend(('ED','Emb(2)','Emb(1)'),'upper right',ncol=1,prop={'size':6})
ax1.set_ylim(0,8)
#textstr = '$\eta='+str(delta)+'$'
#plt.text(0.1, 0.7, textstr ,verticalalignment='top', horizontalalignment='left',transform=ax1.transAxes, color='black', fontsize=20)
textstr = '$U/V=0$'
plt.text(0.1, 0.9, textstr ,verticalalignment='top', horizontalalignment='left',transform=ax1.transAxes, color='black')
#ax1.set_xlabel(r'$\omega$')
ax1.set_ylabel(r'$A(\omega)$')
ax1.yaxis.set_ticks(np.arange(0,10,2))
minorLocator = AutoMinorLocator()
ax1.yaxis.set_minor_locator(minorLocator)
ax1.tick_params(which='both', width=0.5)
ax1.tick_params(which='major', length=4)
ax1.tick_params(which='minor', length=1.5)
ax1.yaxis.set_major_formatter(majorFormatter)
#ax2 = plt.subplot(222, sharex=ax1, sharey=ax1)
line1, = ax2.plot(omega_grid[120:190], edy[1][120:190],'k', markersize=1.5,markevery=1, label='ED')
line2, = ax2.plot(omega_grid[120:190], emb1y[1][120:190], 'b--', label='Emb(1)')
line3, = ax2.plot(omega_grid[120:190], emb2y[1][120:190], 'r--', label='Emb(2)')
line2.set_dashes(dashes)
line3.set_dashes(dashes1)
ax2.legend(loc='upper right',frameon=True,ncol=1,prop={'size':6})
#line4, = ax2.plot(omega_grid, emb4y[1], 'r--')
textstr = '$U/V=2.5$'
plt.text(0.1, 0.9, textstr ,verticalalignment='top', horizontalalignment='left',transform=ax2.transAxes, color='black')
#ax2.tick_params(axis='x',length=0, width=0)
#ax3 = plt.subplot(223, sharex=ax1, sharey=ax1)
line1, = ax3.plot(omega_grid[120:190], edy[2][120:190],'k', markersize=1.5,markevery=1)
line2, = ax3.plot(omega_grid[120:190], emb1y[2][120:190], 'b--')
line3, = ax3.plot(omega_grid[120:190], emb2y[2][120:190], 'r--')
line2.set_dashes(dashes)
line3.set_dashes(dashes1)
#line4, = ax3.plot(omega_grid, emb4y[2], 'r--')
ax3.set_xlabel(r'$\omega$')
ax3.set_ylabel(r'$A(\omega)$')
ax3.set_ylim(0,8)
textstr = '$U/V=5.0$'
plt.text(0.1, 0.9, textstr ,verticalalignment='top', horizontalalignment='left',transform=ax3.transAxes, color='black')
ax3.yaxis.set_ticks(np.arange(0,10,2))
ax3.xaxis.set_ticks([0.2,0.4,0.6,0.8])
minorLocator = AutoMinorLocator()
ax3.yaxis.set_minor_locator(minorLocator)
ax3.tick_params(which='both', width=0.5)
ax3.tick_params(which='major', length=4)
ax3.tick_params(which='minor', length=1.5)
ax3.xaxis.set_major_formatter(majorFormatter)
ax3.yaxis.set_major_formatter(majorFormatter)
#ax4 = plt.subplot(224, sharex=ax1, sharey=ax1)
line1, = ax4.plot(omega_grid[120:190], edy[3][120:190],'k', markersize=1.5,markevery=1)
line2, = ax4.plot(omega_grid[120:190], emb1y[3][120:190], 'b--')
line3, = ax4.plot(omega_grid[120:190], emb2y[3][120:190], 'r--')
line2.set_dashes(dashes)
line3.set_dashes(dashes1)
ax4.xaxis.set_ticks([0.2,0.4,0.6,0.8])
#line4, = ax4.plot(omega_grid, emb4y[3], 'r--')
textstr = '$U/V=7.5$'
plt.text(0.1, 0.9, textstr ,verticalalignment='top', horizontalalignment='left',transform=ax4.transAxes, color='black')
ax4.set_xlabel(r'$\omega$')
ax4.xaxis.set_major_formatter(majorFormatter)
f1.subplots_adjust(hspace=0)
#f.subplots_adjust(wspace=0.05)
f1.tight_layout(pad=0.15)
f1.subplots_adjust(wspace=0.1)
f1.savefig('ref1_reply_fig2.pdf')
plt.show()
| [
"noreply@github.com"
] | noreply@github.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.