blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e6c68eff9e6a0fdc168b30b5b841532a1cf4b03d | 2daa3894e6d6929fd04145100d8a3be5eedbe21c | /tests/artificial/transf_inv/trend_constant/cycle_7/ar_/test_artificial_32_inv_constant_7__0.py | a69b8bc01e2baf94726fff51468a7e4e6843851d | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Henri-Lo/pyaf | a1f73a0cc807873bd7b79648fe51de9cfd6c126a | 08c968425d85dcace974d90db7f07c845a0fe914 | refs/heads/master | 2021-07-01T12:27:31.600232 | 2017-09-21T11:19:04 | 2017-09-21T11:19:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 305 | py | import pyaf.Bench.TS_datasets as tsds
import pyaf.tests.artificial.process_artificial_dataset as art
dataset = tsds.generate_random_TS(N = 32 , FREQ = 'D', seed = 0, trendtype = "constant", cycle_length = 7, transform = "inv", sigma = 0.0, exog_count = 0, ar_order = 0);
art.process_dataset(dataset); | [
"antoine.carme@laposte.net"
] | antoine.carme@laposte.net |
ade16edad2cbac40e9bacca1b0aba5e260577e2f | dfe925c32292ba1e054b86ea660546eb9eac921b | /example/gs/__init__.py | c371d57c6a673838f1d0eb1f56482200e99ebb74 | [] | no_license | keul/example.gs | bc64488d5e67492994b5a12a99d0fa64f1af87de | 5435e9f4fde66b810ff184c25e2dc26aa40900df | refs/heads/master | 2020-04-30T09:50:23.781896 | 2013-05-01T18:02:15 | 2013-05-01T18:02:15 | 9,789,567 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,012 | py | # -*- coding: utf8 -*-
import logging
from zope.i18nmessageid import MessageFactory
from example.gs import config
from example.gs.tool import FooTool
from Products.Archetypes import atapi
from Products.CMFCore import utils
logger = logging.getLogger('example.gs')
gsMessageFactory = MessageFactory('example.gs')
def initialize(context):
"""Initializer called when used as a Zope 2 product."""
content_types, constructors, ftis = atapi.process_types(
atapi.listTypes(config.PROJECTNAME),
config.PROJECTNAME)
for atype, constructor in zip(content_types, constructors):
utils.ContentInit('%s: %s' % (config.PROJECTNAME, atype.portal_type),
content_types=(atype, ),
permission=config.ADD_PERMISSIONS[atype.portal_type],
extra_constructors=(constructor,),
).initialize(context)
# utils.ToolInit("Foo Tool",
# tools=(FooTool,),
# icon="qm.gif",
# ).initialize(context)
| [
"luca@keul.it"
] | luca@keul.it |
c73c4ac3c5fb9fa6999d6009f4f1f891873eff8f | 9b854e7c87852fa10b57d443e6048b3bc7c63047 | /setup.py | 2ed0cb9856356a7ef57278639a2c1acb3fb6ee50 | [
"MIT"
] | permissive | qqccmm/wikipedia-histories | 01979db66a2c505bb1290978af3c3e0f8d3e2629 | 382babd9b4ffbb217cb36da9f6563f3ed17785ea | refs/heads/master | 2022-12-25T22:39:36.042552 | 2020-10-08T15:59:29 | 2020-10-08T15:59:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,105 | py | # -*- coding: utf-8 -*-
# python3 setup.py sdist bdist_wheel
# twine upload --skip-existing dist/*
import codecs
import os
import re
import setuptools
def local_file(file):
return codecs.open(
os.path.join(os.path.dirname(__file__), file), 'r', 'utf-8'
)
with open("README.md", "r") as fh:
long_description = fh.read()
install_reqs = [
line.strip()
for line in local_file('requirements.txt').readlines()
if line.strip() != ''
]
setuptools.setup(
name="wikipedia_histories",
version="0.1.1",
author="Nathan Drezner",
author_email="nathan@drezner.com",
description="A simple package designed to collect the edit histories of Wikipedia pages",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/ndrezn/wikipedia-histories",
install_requires = install_reqs,
packages=['wikipedia_histories'],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
) | [
"nathan@drezner.com"
] | nathan@drezner.com |
194e1ed608f87e07c5bcab61c6f87c8dffa0bf91 | 11514a9a6288bd9f00f972e2facaaa00d7d771af | /test1.py | b669a63b65c7d2aa7cf21faa763fb2c1bfd110ff | [] | no_license | vsevolodnedora/sideprojects | 42b8bbc4e7fc29c44f50a2ce4e111a45da08cd97 | 111e15fc19556624a931d01399d8013dd954f862 | refs/heads/master | 2023-04-06T17:45:54.469247 | 2020-05-31T19:26:48 | 2020-05-31T19:26:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,161 | py | from mayavi import mlab
import numpy as np
#mlab.options.offscreen = True
import h5py as h5
from tvtk.util.ctf import PiecewiseFunction # countors in rendering
from tvtk.util.ctf import ColorTransferFunction # colormap for rendering
import matplotlib.pyplot as plt
# first test
def test1():
X= np.linspace(-10, 10, 100)
x, y, z = np.meshgrid(X, X, X)
f = np.cos(0.66 * np.pi * (x) / np.sqrt(x ** 2 + y ** 2 + z ** 2))
fig = mlab.figure()
mlab.contour3d(f, contours=6, transparent=True, figure=fig)
print("sved: {}".format("./test1.png"))
mlab.show()
mlab.savefig("./test1.png")
mlab.clf(fig)
mlab.close()
#test1()
def reset():
''' closes the fig and starts a new one '''
mlab.clf()
mlab.close()
fig = mlab.figure()
return fig
def test2():
X = np.linspace(-10, 10, 100)
x, y, z = np.meshgrid(X, X, X)
f = np.cos(0.66 * np.pi * (x) / np.sqrt(x ** 2 + y ** 2 + z ** 2))
fig = mlab.figure()
mlab.contour3d(f, contours=6, transparent=True, figure=fig)
print("sved: {}".format("./test1.png"))
#mlab.show()
mlab.clf(fig)
mlab.close()
fig = reset() # clear the previos fig
# Create a scalar field object
sca = mlab.pipeline.scalar_field(f) # add the data to the pipeline
sca.origin = (-10., -10., -10.) # set the center of the plot
dx = X[1] - X[0] # separation between slices
sca.spacing = (dx, dx, dx) # set separation
sca.scalar_name = 'f' # set the name of the field
#mlab.pipeline.iso_surface(sca, transparent=True, contours=[0., 0.25, 0.5], figure=fig) # plot
#mlab.show()
# manually setting opacity for countours
fig = reset()
mlab.pipeline.iso_surface(sca, opacity=1., contours=[0.], figure=fig) # Solid
mlab.pipeline.iso_surface(sca, opacity=0.4, contours=[0.25], figure=fig) # transparent
mlab.pipeline.iso_surface(sca, opacity=0.2, contours=[0.5], figure=fig) # transparent
mlab.show()
#test2()
# working with real data
def test4():
dfile = h5.File('../rho3d.h5', 'r')
print(dfile.attrs.keys())
print(dfile.attrs['mass ratio'])
print(dfile['t=3072.'].attrs.keys())
#
dset = dfile['t=3072.']
xyz = dset.attrs['grid']
dx = dset.attrs['dx']
#
fig = reset()
mlab.contour3d(dset[:].T, contours=6, transparent=True, figure=fig)
#mlab.show()
# see the data limits
print(np.max(dset[:]), np.min(dset[:]))
rho_cgs = dset[:] * 6.176269145886166e+17 # convert to cgs
print("rho_min = %.3e g/cm^3,\nrho_max = %.3e g/cm^3" % (np.max(rho_cgs), np.min(rho_cgs)))
rho_cgs = np.log10(rho_cgs)
fig = reset()
# Create a scalar field object
scr = mlab.pipeline.scalar_field(rho_cgs.T)
scr.origin = (-100., -100., 0.)
dx = dset.attrs['dx']
scr.spacing = (dx[0], dx[1], dx[2])
scr.scalar_name = 'rho'
mlab.pipeline.iso_surface(scr, opacity=1., contours=[13], figure=fig)
mlab.pipeline.iso_surface(scr, opacity=0.4, contours=[10], figure=fig)
mlab.pipeline.iso_surface(scr, opacity=0.2, contours=[8], figure=fig)
mlab.show()
#test4()
# volume rendering
def test5():
X = np.linspace(-10, 10, 100)
x, y, z = np.meshgrid(X, X, X)
f = np.cos(0.66 * np.pi * (x) / np.sqrt(x ** 2 + y ** 2 + z ** 2))
fig = reset()
sc = mlab.pipeline.scalar_field(f)
sc.origin = (-10., -10., -10.)
dx = X[1] - X[0]
sc.spacing = (dx, dx, dx)
sc.scalar_name = 'f_xyz'
mlab.pipeline.volume(sc, vmin=-0.1, vmax=0.6, figure=fig)
mlab.show()
#test5()
# volume rend. with values
def test6():
X = np.linspace(-10, 10, 100)
x, y, z = np.meshgrid(X, X, X)
f = np.cos(0.66 * np.pi * (x) / np.sqrt(x ** 2 + y ** 2 + z ** 2))
fig = reset()
# Create an array of samples between the min and max values we want to show
smpl = np.linspace(0.0, 0.5, 50)
# Initiate opacities
opac = np.zeros_like(smpl)
# Now, add gaussian-shaped function around the values we are interested into
centers = [0.0, 0.25, 0.49]
opacs = [1.0, 0.5, 0.3]
widths = [0.01, 0.04, 0.01]
for c, o, w in zip(centers, opacs, widths):
opac += o * np.exp(-((smpl - c) / w) ** 2)
# Now define piecewise opacity transfer function
otf = PiecewiseFunction()
for v, o in zip(smpl, opac):
otf.add_point(v, o)
def return_vrend(f, X, fig, otf):
sc = mlab.pipeline.scalar_field(f)
sc.origin = (-10., -10., -10.)
dx = X[1] - X[0]
sc.spacing = (dx, dx, dx)
sc.scalar_name = 'logf_xyz'
vol = mlab.pipeline.volume(sc, vmin=0., vmax=0.52, figure=fig)
vol._otf = otf
vol._volume_property.set_scalar_opacity(otf)
return vol
#
return_vrend(f, X, fig, otf)
mlab.show()
#test6()
# volume rend. with val and change of colormap -- see the GREEN
def test7():
''''''
fig = reset()
X = np.linspace(-10, 10, 100)
x, y, z = np.meshgrid(X, X, X)
f = np.cos(0.66 * np.pi * (x) / np.sqrt(x ** 2 + y ** 2 + z ** 2))
''' --- create opacities --- '''
# Create an array of samples between the min and max values we want to show
smpl = np.linspace(0.0, 0.5, 50)
# Initiate opacities
opac = np.zeros_like(smpl)
# Now, add gaussian-shaped function around the values we are interested into
centers = [0.0, 0.25, 0.49]
opacs = [1.0, 0.5, 0.3]
widths = [0.01, 0.04, 0.01]
for c, o, w in zip(centers, opacs, widths):
opac += o * np.exp(-((smpl - c) / w) ** 2)
# Now define piecewise opacity transfer function
otf = PiecewiseFunction()
for v, o in zip(smpl, opac):
otf.add_point(v, o)
''' --- create colormaps --- '''
# Initialize the color transfer function and set the range
ctf = ColorTransferFunction()
ctf.range = [0., 0.5]
# Choose a color map and sample it
cm = plt.get_cmap('jet_r', 10)
ik = np.arange(0, 10)
# colors
ck = cm(ik)[:, :3] # [:, r, g, b]
# vertexes
vk = ik / float(ik[-1])
clrs = [(v, tuple(c)) for v, c in zip(vk, ck)]
for v, (r, g, b) in clrs:
ctf.add_rgb_point(0.0 + v * (0.5 - 0.0), r, g, b)
def return_vrend(f, fig, otf, ctf):
sc = mlab.pipeline.scalar_field(f)
sc.origin = (-10., -10., -10.)
dx = X[1] - X[0]
sc.spacing = (dx, dx, dx)
sc.scalar_name = 'logf_xyz'
vol = mlab.pipeline.volume(sc, vmin=0., vmax=0.52, figure=fig)
vol._otf = otf
vol._volume_property.set_scalar_opacity(otf)
vol._volume_property.set_color(ctf)
vol._ctf = ctf
vol.update_ctf = True
return vol
return_vrend(f, fig, otf, ctf)
mlab.show()
mlab.clf()
mlab.close()
#test7()
# figure manipulation
def test8():
X = np.linspace(-10, 10, 100)
x, y, z = np.meshgrid(X, X, X)
f = np.cos(0.66 * np.pi * (x) / np.sqrt(x ** 2 + y ** 2 + z ** 2))
''' --- create opacities --- '''
# Create an array of samples between the min and max values we want to show
smpl = np.linspace(0.0, 0.5, 50)
# Initiate opacities
opac = np.zeros_like(smpl)
# Now, add gaussian-shaped function around the values we are interested into
centers = [0.0, 0.25, 0.49]
opacs = [1.0, 0.5, 0.3]
widths = [0.01, 0.04, 0.01]
for c, o, w in zip(centers, opacs, widths):
opac += o * np.exp(-((smpl - c) / w) ** 2)
# Now define piecewise opacity transfer function
otf = PiecewiseFunction()
for v, o in zip(smpl, opac):
otf.add_point(v, o)
''' --- create colormaps --- '''
# Initialize the color transfer function and set the range
ctf = ColorTransferFunction()
ctf.range = [0., 0.5]
# Choose a color map and sample it
cm = plt.get_cmap('jet_r', 10)
ik = np.arange(0, 10)
# colors
ck = cm(ik)[:, :3] # [:, r, g, b]
# vertexes
vk = ik / float(ik[-1])
clrs = [(v, tuple(c)) for v, c in zip(vk, ck)]
for v, (r, g, b) in clrs:
ctf.add_rgb_point(0.0 + v * (0.5 - 0.0), r, g, b)
def return_vrend(f, fig, otf, ctf):
sc = mlab.pipeline.scalar_field(f)
sc.origin = (-10., -10., -10.)
dx = X[1] - X[0]
sc.spacing = (dx, dx, dx)
sc.scalar_name = 'logf_xyz'
vol = mlab.pipeline.volume(sc, vmin=0., vmax=0.52, figure=fig)
vol._otf = otf
vol._volume_property.set_scalar_opacity(otf)
vol._volume_property.set_color(ctf)
vol._ctf = ctf
vol.update_ctf = True
return vol
''' --- plot --- '''
fig = mlab.figure(size=(1378, 720), bgcolor=(0., 0., 0.), fgcolor=(1., 1., 1.))
vol = return_vrend(f, fig, otf, ctf)
mlab.orientation_axes(figure=fig)
mlab.show()
#test8()
# real data
def test9():
"""
:return:
"""
# --- loading data ---
dfile = h5.File('../rho3d.h5', 'r')
print(dfile.attrs.keys())
print(dfile.attrs['mass ratio'])
print(dfile['t=3072.'].attrs.keys())
#
dset = dfile['t=3072.']
xyz = dset.attrs['grid']
dx = dset.attrs['dx']
rho_cgs = dset[:] * 6.176269145886166e+17
''' plotting '''
fig = reset()
sc = mlab.pipeline.scalar_field(rho_cgs.T)
sc.origin = (-100.,-100.,0.)
dx = dset.attrs['dx']
sc.spacing = (dx[0],dx[1],dx[2])
sc.scalar_name='rho_xyz'
def get_view(sc, fig):
im = mlab.pipeline.volume(sc, vmin=6., vmax=13., figure=fig)
mlab.view(azimuth=45.,
elevation=45., distance=600.,
focalpoint=[0, 0, 0],
figure=None)
mlab.orientation_axes(figure=fig)
return im
get_view(sc, fig)
mlab.show()
#test9()
def get_ctf(cmap='jet_r', smpls=50, crange=[0.,1.]):
# Initialize the color transfer function and set the range
ctf = ColorTransferFunction()
ctf.range = crange
# Choose a color map and sample it
cm = plt.get_cmap('jet_r', smpls)
ik = np.arange(0,smpls)
# colors
ck = cm(ik)[:,:3]
# vertexes
vk = ik / float(ik[-1])
clrs = [(v,tuple(c)) for v,c in zip(vk, ck)]
for v, (r,g,b) in clrs:
ctf.add_rgb_point(crange[0] + v*(crange[1]-crange[0]), r, g, b)
#
return ctf
def get_otf(centers, opacs, widths, smpls=50,orange=[0.,1.]):
# Create an array of samples between the min and max values we want to show
smpl = np.linspace(orange[0],orange[1],smpls)
# Initiate opacities
opac = np.zeros_like(smpl)
# Now, add gaussian-shaped function around the values we are interested into
for c,o,w in zip(centers,opacs,widths):
opac += o * np.exp(-((smpl-c)/w)**2)
#
# Now define piecewise opacity transfer function
otf = PiecewiseFunction()
for v,o in zip(smpl,opac):
otf.add_point(v,o)
#
return otf
def vol_rend(data_arr, dx, fig, otf, ctf):
sc = mlab.pipeline.scalar_field(data_arr.T)
sc.origin = (-100., -100., 0.)
#dx = dset.attrs['dx']
sc.spacing = (dx[0], dx[1], dx[2])
sc.scalar_name = 'logf_xyz'
vol = mlab.pipeline.volume(sc, vmin=6., vmax=13., figure=fig)
# OTF
vol._otf = otf
vol._volume_property.set_scalar_opacity(otf)
# CTF
vol._volume_property.set_color(ctf)
vol._ctf = ctf
vol.update_ctf = True
return vol
def test10():
# --- loading data ---
dfile = h5.File('../rho3d.h5', 'r')
print(dfile.attrs.keys())
print(dfile.attrs['mass ratio'])
print(dfile['t=3072.'].attrs.keys())
#
dset = dfile['t=3072.']
xyz = dset.attrs['grid']
dx = dset.attrs['dx']
rho_cgs = dset[:] * 6.176269145886166e+17
rho_cgs = np.log10(rho_cgs)
# rho_cgs = dset[:]
print(rho_cgs.min(), rho_cgs.max())
#
rho_range = [6., 13.]
centers = [8., 10., 11., 13.]
opacs = [0.2, 0.4, 0.6, 0.8]
widths = [0.2, 0.2, 0.2, 0.2]
''' -- '''
fig = reset()
ctf = get_ctf(crange=rho_range)
otf = get_otf(centers, opacs, widths, orange=rho_range)
vol = vol_rend(rho_cgs, dx, fig, otf, ctf)
mlab.orientation_axes(figure=fig)
mlab.show()
test10() | [
"fizik.vlk@gmail.com"
] | fizik.vlk@gmail.com |
fab9b9152fd6ae066494c53f96f89fc94015fb05 | 2fb830b5e24ed9e4e1950305f2b06ce838248575 | /bin/django-admin.py | 0e31882af06571ae6cdaffc7aa4a5e3e7ae79962 | [] | no_license | Debashis-Gupta/Mnist_App | 7b0153d0c2d10d5db842f19c5dd6e505e543df93 | f3dcf20654da51fc7df6a88fd19f1488e60d8a8c | refs/heads/master | 2022-05-28T08:15:02.442210 | 2020-05-03T08:19:59 | 2020-05-03T08:19:59 | 260,858,095 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 158 | py | #!/media/debashis/Working/Work/Mnist_App/bin/python
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| [
"debashisgutapapai@gmail.com"
] | debashisgutapapai@gmail.com |
5ca62a56dfac1f5838aef5ae3ba0e91d58c912c3 | f501a826a34a99fe00de05efdb14ed9a30c1147d | /keyboard_controls/keyboard_controls.py | bf083fbced85dbdc510a83fd691bf2e3bff74742 | [] | no_license | kgandhi09/Indoor_Autonomous_Drone_Flight | 7de482fdb4588fad7507d9e1ffe21fe2f2c0d498 | 84180628d9cab77bad43ed623f674354f17f7d88 | refs/heads/master | 2021-06-22T14:31:27.564336 | 2021-01-31T04:52:26 | 2021-01-31T04:52:26 | 189,007,579 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,225 | py | from __future__ import print_function
from dronekit import connect, VehicleMode
import time
from pymavlink import mavutil
import sys, os
from optparse import OptionParser
import Tkinter as tk
import argparse
parser = argparse.ArgumentParser(
description='Example showing how to set and clear vehicle channel-override information.')
parser.add_argument('--connect',
help="vehicle connection target string. If not specified, SITL automatically started and used.")
parser.add_argument('--baudrate',
help="Specify the baudrate of controller's serial port used for companion aircraft.")
parser.add_argument('--aircraft',
help="Specify the location to save the logs.")
args = parser.parse_args()
connection_string = args.connect
sitl = None
# Start SITL if no connection string specified
if not connection_string:
import dronekit_sitl
sitl = dronekit_sitl.start_default()
connection_string = sitl.connection_string()
# Connect to the Vehicle
print('Connecting to vehicle on: %s' % connection_string)
vehicle = connect(connection_string, baud=921600, wait_ready=True)
vehicle.armed = True
time.sleep(0.5)
vehicle.channels.overrides[3] = 1040 # Throttle
vehicle.channels.overrides[2] = 1499 # pitch
vehicle.channels.overrides[1] = 1502 # roll
amt = 100
amt_2 = 30
m = 0
def print_fn_1(num):
print("\nThrottle = " + str(num) + "% - " + str(vehicle.channels.overrides[3]))
print("Pitch value - " + str(vehicle.channels.overrides[1]))
print('Roll value - '+ str(vehicle.channels.overrides[2]))
def print_fn_2():
print("Throttle - " + str(vehicle.channels.overrides[3]))
print('Pitch value - ' + str(vehicle.channels.overrides[1]))
print('Roll value - ' + str(vehicle.channels.overrides[2]))
def key_press(event):
if m == 0:
if event.char == event.keysym: # ----------- standard-keys
if event.keysym == 'k':
vehicle.channels.overrides[3] = 1000
vehicle.channels.overrides[2] = 1499 # pitch
vehicle.channels.overrides[1] = 1502 # roll
print("kill")
print("\nThrottle value - " + str(vehicle.channels.overrides[3]))
print('Pitch value - ' + str(vehicle.channels.overrides[1]))
print('Roll value - ' + str(vehicle.channels.overrides[2]))
elif event.keysym == '1':
vehicle.channels.overrides[3] = 1040
print_fn_1(4)
elif event.keysym == '2':
vehicle.channels.overrides[3] = 1045
print_fn_1(4.5)
elif event.keysym == '3':
vehicle.channels.overrides[3] = 1050
print_fn_1(5)
elif event.keysym == '4':
vehicle.channels.overrides[3] = 1070
print_fn_1(7)
elif event.keysym == '5':
vehicle.channels.overrides[3] = 1090
print_fn_1(9)
elif event.keysym == '6':
vehicle.channels.overrides[3] = 1100
print_fn_1(10)
elif event.keysym == '7':
vehicle.channels.overrides[3] = 1120
print_fn_1(12)
elif event.keysym == '8':
vehicle.channels.overrides[3] = 1140
print_fn_1(14)
elif event.keysym == '9':
vehicle.channels.overrides[3] = 1160
print_fn_1(16)
elif event.keysym == '0':
vehicle.channels.overrides[3] = 1180
print_fn_1(18)
else :
if event.keysym == 'Up' :
vehicle.channels.overrides[2] -= amt_2
print("\nForward")
print_fn_2()
global m
m = 1
elif event.keysym == 'Down' :
vehicle.channels.overrides[2] += amt_2
print("\nBackward")
print_fn_2()
global m
m = 1
elif event.keysym == 'Left' :
vehicle.channels.overrides[1] -= amt_2
print("\nLeft")
print_fn_2()
global m
m = 1
elif event.keysym == 'Right' :
vehicle.channels.overrides[1] += amt_2
print("\nRight")
print_fn_2()
global m
m = 1
# else: # -- non standard keys
#if event.keysym == 'Up':
# vehicle.channels.overrides[2] -= amt # pitch-control = nose down (to go forward)
# print("forward, on throttle ", (int(vehicle.channels.overrides[3])))
#elif event.keysym == 'Down':
# vehicle.channels.overrides[2] += amt # pitch-control = nose up (to go backword)
# print("backward, on throttle ", (int(vehicle.channels.overrides[3])))
#elif event.keysym == 'Left':
# vehicle.channels.overrides[1] -= amt # roll-control = left (move leftwards)
# print("left, on throttle ", (int(vehicle.channels.overrides[3])))
#elif event.keysym == 'Right':
# vehicle.channels.overrides[1] += amt # roll control = right (move leftwards)
# print("right, on throttle ", (int(vehicle.channels.overrides[3])))
def key_down(event):
if m == 1:
vehicle.channels.overrides[1] = 1499
vehicle.channels.overrides[2] = 1502
print('\nThrottle value - ' + str(vehicle.channels.overrides[3]))
print('Pitch value - ' + str(vehicle.channels.overrides[1]))
print('Roll value - ' + str(vehicle.channels.overrides[2]))
global m
m = 0
def quit():
global root
root.quit()
# - Read the keyboard with tkinter
root = tk.Tk()
print(">> Control the drone with the arrow keys. Press r for RTL mode")
root.bind('<KeyPress>', key_press)
root.bind('<KeyRelease>', key_down)
#root.bind_all('<Key>', key)
tk.Button(root, text="Quit", command=root.destroy).pack()
root.mainloop()
| [
"kushal009gandhi@gmail.com"
] | kushal009gandhi@gmail.com |
61f0e3c7d062d468b12338437a3f16af28a3d8c0 | ed3bee58910af21d0e8593586d6bb144f8c124b9 | /vendor/github.com/containers-ai/api/datahub/resource/pod/assign/v1alpha2/assign_pb2.py | 4dc1f7024f2b1b2e153c403c2d921116da75df56 | [
"Apache-2.0"
] | permissive | ekko771/alameda | f3b4575fdfffd2be87a9441f4ce815902786b157 | 591488ad7d194c14034ccd6c6c7a86744aaf868d | refs/heads/master | 2020-05-20T19:18:56.348733 | 2019-05-07T10:17:39 | 2019-05-07T10:17:39 | 185,723,143 | 0 | 0 | Apache-2.0 | 2019-05-09T03:56:36 | 2019-05-09T03:56:34 | null | UTF-8 | Python | false | true | 5,758 | py | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: datahub/resource/pod/assign/v1alpha2/assign.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='datahub/resource/pod/assign/v1alpha2/assign.proto',
package='containersai.datahub.resource.pod.assign.v1alpha2',
syntax='proto3',
serialized_options=_b('ZAgithub.com/containers-ai/api/datahub/resource/pod/assign/v1alpha2'),
serialized_pb=_b('\n1datahub/resource/pod/assign/v1alpha2/assign.proto\x12\x31\x63ontainersai.datahub.resource.pod.assign.v1alpha2\"\x1d\n\x0cNodePriority\x12\r\n\x05nodes\x18\x01 \x03(\t\"\x98\x01\n\x08Selector\x12[\n\x08selector\x18\x01 \x03(\x0b\x32I.containersai.datahub.resource.pod.assign.v1alpha2.Selector.SelectorEntry\x1a/\n\rSelectorEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42\x43ZAgithub.com/containers-ai/api/datahub/resource/pod/assign/v1alpha2b\x06proto3')
)
_NODEPRIORITY = _descriptor.Descriptor(
name='NodePriority',
full_name='containersai.datahub.resource.pod.assign.v1alpha2.NodePriority',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='nodes', full_name='containersai.datahub.resource.pod.assign.v1alpha2.NodePriority.nodes', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=104,
serialized_end=133,
)
_SELECTOR_SELECTORENTRY = _descriptor.Descriptor(
name='SelectorEntry',
full_name='containersai.datahub.resource.pod.assign.v1alpha2.Selector.SelectorEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='containersai.datahub.resource.pod.assign.v1alpha2.Selector.SelectorEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='containersai.datahub.resource.pod.assign.v1alpha2.Selector.SelectorEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=_b('8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=241,
serialized_end=288,
)
_SELECTOR = _descriptor.Descriptor(
name='Selector',
full_name='containersai.datahub.resource.pod.assign.v1alpha2.Selector',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='selector', full_name='containersai.datahub.resource.pod.assign.v1alpha2.Selector.selector', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_SELECTOR_SELECTORENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=136,
serialized_end=288,
)
_SELECTOR_SELECTORENTRY.containing_type = _SELECTOR
_SELECTOR.fields_by_name['selector'].message_type = _SELECTOR_SELECTORENTRY
DESCRIPTOR.message_types_by_name['NodePriority'] = _NODEPRIORITY
DESCRIPTOR.message_types_by_name['Selector'] = _SELECTOR
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
NodePriority = _reflection.GeneratedProtocolMessageType('NodePriority', (_message.Message,), dict(
DESCRIPTOR = _NODEPRIORITY,
__module__ = 'datahub.resource.pod.assign.v1alpha2.assign_pb2'
# @@protoc_insertion_point(class_scope:containersai.datahub.resource.pod.assign.v1alpha2.NodePriority)
))
_sym_db.RegisterMessage(NodePriority)
Selector = _reflection.GeneratedProtocolMessageType('Selector', (_message.Message,), dict(
SelectorEntry = _reflection.GeneratedProtocolMessageType('SelectorEntry', (_message.Message,), dict(
DESCRIPTOR = _SELECTOR_SELECTORENTRY,
__module__ = 'datahub.resource.pod.assign.v1alpha2.assign_pb2'
# @@protoc_insertion_point(class_scope:containersai.datahub.resource.pod.assign.v1alpha2.Selector.SelectorEntry)
))
,
DESCRIPTOR = _SELECTOR,
__module__ = 'datahub.resource.pod.assign.v1alpha2.assign_pb2'
# @@protoc_insertion_point(class_scope:containersai.datahub.resource.pod.assign.v1alpha2.Selector)
))
_sym_db.RegisterMessage(Selector)
_sym_db.RegisterMessage(Selector.SelectorEntry)
DESCRIPTOR._options = None
_SELECTOR_SELECTORENTRY._options = None
# @@protoc_insertion_point(module_scope)
| [
"kuofu.huang@prophetstor.com"
] | kuofu.huang@prophetstor.com |
43443f553df06b95adf368e7f0b43b23b40a19bb | dc829066685d3764208f2a524ee4d58bca0f4d7f | /mysite/settings.py | 5f26fc3c109bfa3badb911d610e773e5746b431c | [] | no_license | deepmala-budhija/my-first-blog | 4247868ebf464a6a47e9dede36e8c6da2be7f60d | 5abea38571b71ab49a40346c3b60b811b43c5d01 | refs/heads/master | 2022-11-20T12:28:45.060294 | 2020-07-24T11:19:34 | 2020-07-24T11:19:34 | 279,516,910 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,545 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 2.2.14.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '9(ujb5mpeg2ae_v1idx#ccmos$&a9xywwnqy!tf@sfkn=s!2t6'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
#ALLOWED_HOSTS = []
ALLOWED_HOSTS = ['127.0.0.1', '.pythonanywhere.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'bootstrap_datepicker_plus',
'blog.apps.BlogConfig',
'MFS.apps.MFSConfig',
'widget_tweaks',
'crudapp',
'rest_framework',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'mfs',
'HOST': '127.0.0.1',
'PORT': '3306',
'USER': 'root',
'PASSWORD': '123456',
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Kolkata'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Use BOOTSTRAP3 if you are using Bootstrap 3
BOOTSTRAP3 = {
'include_jquery': True,
}
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
| [
"deepmala_b@yahoo.com"
] | deepmala_b@yahoo.com |
7714909e86d7cb824a84edc6d8ced3422f107600 | 54d17336ca03801bd9c9ef37be8642b332ab71c4 | /osm/SO/rwlaunchpad/plugins/rwautoscaler/rift/tasklets/rwautoscaler/engine.py | 3bd2645aeb390746fcbf31d24b4a18f0fad50d0f | [] | no_license | dennis-me/Pishahang | 2428379c4f7d3ee85df4b85727ce92e8fe69957a | cdd0abe80a76d533d08a51c7970d8ded06624b7d | refs/heads/master | 2020-09-07T12:35:54.734782 | 2020-01-24T20:11:33 | 2020-01-24T20:11:33 | 220,782,212 | 2 | 0 | null | 2019-11-10T11:46:44 | 2019-11-10T11:46:43 | null | UTF-8 | Python | false | false | 15,508 | py |
#
# Copyright 2016 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
import asyncio
import time
import numpy
from . import scaling_operation
from . import subscribers as monp_subscriber
from gi.repository import RwDts as rwdts
import rift.mano.dts as subscriber
class TimeSeries:
"""Convenience class to hold the data for the sliding window size.
"""
def __init__(self, threshold_time):
"""
Args:
threshold_time (int): window size in secs
"""
# 0 -> contains a list of all timestamps
# 1 -> contains a list of all values.
# self._series = numpy.empty(shape=(2, 1), dtype='int64')
self._series = numpy.array([[],[]], dtype='int64')
self.threshold_time = threshold_time
def add_value(self, timestamp, value):
timestamp = int(timestamp)
self._series = numpy.append(
self._series,
[[timestamp], [value]],
axis=1)
# Drop off stale value
# 0 -> timestamp
# 1 -> values
# Get all indexes that are outside the window, and drop them
window_values = self._series[0] >= (timestamp - self.threshold_time)
self._series = self._series[:, window_values]
def average(self):
return numpy.average(self._series[1])
def is_window_full(self):
"""Verify if there is sufficient data for the current window.
"""
if len(self._series[0]) < 2:
return False
start_time = self._series[0][0]
end_time = self._series[0][-1]
if (end_time - start_time) >= self.threshold_time:
return True
return False
class ScalingCriteria:
class Delegate:
"""Delegate: callbacks triggered by ScalingCriteris
"""
@abc.abstractmethod
def threshold_out_breached(self, criteria_name, avg_value):
"""Called when the value has crossed the scale-out-threshold
Args:
criteria_name (str): Criteria name
avg_value (float): The average value of the window.
"""
pass
@abc.abstractmethod
def threshold_in_breached(self, criteria_name, avg_value):
"""Called when the value has drops below the scale-in-threshold
Args:
criteria_name (str): Criteria name
avg_value (float): The average value of the window.
"""
pass
def __init__(
self,
log,
dts,
loop,
project,
nsr_id,
monp_id,
scaling_criteria,
window_size,
sampling_period=1,
delegate=None):
"""
Args:
log : Log
dts : DTS handle
loop : Event Handle
nsr_id (str): NSR ID
monp_id (str): Monitoring parameter
scaling_criteria : Yang data model
window_size (int): Length of the window
delegate : ScalingCriteria.Delegate
Note:
"""
self.log = log
self.dts = dts
self.loop = loop
self.sampling_period = sampling_period
self.window_size = window_size
self.delegate = delegate
self.nsr_id, self.monp_id = nsr_id, monp_id
self._scaling_criteria = scaling_criteria
self._timeseries = TimeSeries(self.window_size)
# Flag when set, triggers scale-in request.
self._scl_in_limit_enabled = False
self.nsr_monp_sub = monp_subscriber.NsrMonParamSubscriber(
self.log,
self.dts,
self.loop,
project,
self.nsr_id,
self.monp_id,
callback=self.add_value)
@property
def name(self):
return self._scaling_criteria.name
@property
def scale_in(self):
return self._scaling_criteria.scale_in_threshold
@property
def scale_out(self):
return self._scaling_criteria.scale_out_threshold
@asyncio.coroutine
def register(self):
yield from self.nsr_monp_sub.register()
def deregister(self):
self.nsr_monp_sub.deregister()
def trigger_action(self, timestamp, avg):
"""Triggers the scale out/in
Args:
timestamp : time in unix epoch
avg : Average of all the values in the window size.
"""
if self._timeseries.average() >= self.scale_out:
self.log.info("Triggering a scaling-out request for the criteria {}".format(
self.name))
self.delegate.threshold_out_breached(self.name, avg)
elif self._timeseries.average() < self.scale_in :
self.log.info("Triggering a scaling-in request for the criteria {}".format(
self.name))
self.delegate.threshold_in_breached(self.name, avg)
def add_value(self, monp, action):
"""Callback from NsrMonParamSubscriber
Args:
monp : Yang model
action : rwdts.QueryAction
"""
if action == rwdts.QueryAction.DELETE:
return
value = monp.value_integer
timestamp = time.time()
self._timeseries.add_value(timestamp, value)
if not self._timeseries.is_window_full():
return
self.log.debug("Sufficient sampling data obtained for criteria {}."
"Checking the scaling condition for the criteria".format(
self.name))
if not self.delegate:
return
self.trigger_action(timestamp, value)
class ScalingPolicy(ScalingCriteria.Delegate):
class Delegate:
@abc.abstractmethod
def scale_in(self, scaling_group_name, nsr_id, instance_id):
"""Delegate called when all the criteria for scaling-in are met.
Args:
scaling_group_name (str): Description
nsr_id (str): Description
"""
pass
@abc.abstractmethod
def scale_out(self, scaling_group_name, nsr_id):
"""Delegate called when all the criteria for scaling-out are met.
Args:
scaling_group_name (str): Description
nsr_id (str): Description
"""
pass
def __init__(
self,
log,
dts,
loop,
project,
nsr_id,
nsd_id,
scaling_group_name,
scaling_policy,
store,
delegate=None):
"""
Args:
log : Log
dts : DTS handle
loop : Event loop
nsr_id (str): NSR id
nsd_id (str): NSD id
scaling_group_name (str): Scaling group ref
scaling_policy : Yang model
store (SubscriberStore): Subscriber store instance
delegate (None, optional): ScalingPolicy.Delegate
"""
self.loop = loop
self.log = log
self.dts = dts
self.project = project
self.nsd_id = nsd_id
self.nsr_id = nsr_id
self.scaling_group_name = scaling_group_name
self._scaling_policy = scaling_policy
self.delegate = delegate
self.store = store
self.monp_sub = monp_subscriber.NsrMonParamSubscriber(
self.log,
self.dts,
self.loop,
self.project,
self.nsr_id,
callback=self.handle_nsr_monp)
self.nsr_scale_sub = monp_subscriber.NsrScalingGroupRecordSubscriber(
self.log,
self.dts,
self.loop,
self.project,
self.nsr_id,
self.scaling_group_name)
self.criteria_store = {}
# Timestamp at which the scale-in/scale-out request was generated.
self._last_triggered_time = None
self.scale_in_status = {cri.name: False for cri in self.scaling_criteria}
self.scale_out_status = {cri.name: False for cri in self.scaling_criteria}
self.scale_out_count = 0
def get_nsd_monp_cfg(self, nsr_monp):
"""Get the NSD's mon-param config.
"""
nsd = self.store.get_nsd(self.nsd_id)
for monp in nsd.monitoring_param:
if monp.id == nsr_monp.nsd_mon_param_ref:
return monp
def handle_nsr_monp(self, monp, action):
"""Callback for NSR mon-param handler.
Args:
monp : Yang Model
action : rwdts.QueryAction
"""
def handle_create():
if monp.id in self.criteria_store:
return
nsd_monp = self.get_nsd_monp_cfg(monp)
for cri in self.scaling_criteria:
if cri.ns_monitoring_param_ref != nsd_monp.id:
continue
# Create a criteria object as soon as the first monitoring data
# is published.
self.log.debug("Created a ScalingCriteria monitor for {}".format(
cri.as_dict()))
criteria = ScalingCriteria(
self.log,
self.dts,
self.loop,
self.project,
self.nsr_id,
monp.id,
cri,
self.threshold_time, # window size
delegate=self)
self.criteria_store[monp.id] = criteria
@asyncio.coroutine
def task():
yield from criteria.register()
self.loop.create_task(task())
def handle_delete():
if monp.id in self.criteria_store:
self.criteria_store[monp.id].deregister()
del self.criteria_store[monp.id]
if action in [rwdts.QueryAction.CREATE, rwdts.QueryAction.UPDATE]:
handle_create()
elif action == rwdts.QueryAction.DELETE:
handle_delete()
@property
def scaling_criteria(self):
return self._scaling_policy.scaling_criteria
@property
def scale_in_op(self):
optype = self._scaling_policy.scale_in_operation_type
return scaling_operation.get_operation(optype)
@property
def scale_out_op(self):
optype = self._scaling_policy.scale_out_operation_type
return scaling_operation.get_operation(optype)
@property
def name(self):
return self._scaling_policy.name
@property
def threshold_time(self):
return self._scaling_policy.threshold_time
@property
def cooldown_time(self):
return self._scaling_policy.cooldown_time
@asyncio.coroutine
def register(self):
yield from self.monp_sub.register()
yield from self.nsr_scale_sub.register()
def deregister(self):
self.monp_sub.deregister()
def _is_in_cooldown(self):
"""Verify if the current policy is in cooldown.
"""
if not self._last_triggered_time:
return False
if (time.time() - self._last_triggered_time) >= self.cooldown_time:
return False
return True
def can_trigger_action(self):
if self._is_in_cooldown():
self.log.debug("In cooldown phase ignoring the scale action ")
return False
return True
def threshold_in_breached(self, criteria_name, value):
"""Delegate callback when scale-in threshold is breached
Args:
criteria_name : Criteria name
value : Average value
"""
self.log.debug("Avg value {} has fallen below the threshold limit for "
"{}".format(value, criteria_name))
if not self.can_trigger_action():
return
if self.scale_out_count < 1:
self.log.debug('There is no scaled-out VNFs at this point. Hence ignoring the scale-in')
return
self.scale_in_status[criteria_name] = True
self.log.info("Applying {} operation to check if all criteria {} for"
" scale-in-threshold are met".format(
self.scale_out_op,
self.scale_out_status))
statuses = self.scale_in_status.values()
is_breached = self.scale_in_op(statuses)
if is_breached and self.delegate:
self.log.info("Triggering a scale-in action for policy {} as "
"all criteria have been met".format(self.name))
@asyncio.coroutine
def check_and_scale_in():
# data = yield from self.nsr_scale_sub.data()
# if len(data) <= 1:
# return
# # Get an instance ID
# instance_id = data[-1].instance_id
instance_id = 0 #assigning a value to follow existing scale_in signature
self._last_triggered_time = time.time()
self.scale_out_count -= 1
# Reset all statuses
self.scale_in_status = {cri.name: False for cri in self.scaling_criteria}
self.delegate.scale_in(self.scaling_group_name, self.nsr_id, instance_id)
self.loop.create_task(check_and_scale_in())
def threshold_out_breached(self, criteria_name, value):
"""Delegate callback when scale-out threshold is breached.
Args:
criteria_name : Criteria name
value : Average value
"""
self.log.debug("Avg value {} has gone above the threshold limit for "
"{}".format(value, criteria_name))
if not self.can_trigger_action():
return
self.scale_out_status[criteria_name] = True
self.log.info("Applying {} operation to check if all criteria {} for"
" scale-out-threshold are met".format(
self.scale_out_op,
self.scale_out_status))
statuses = self.scale_out_status.values()
is_breached = self.scale_out_op(statuses)
if is_breached and self.delegate:
self.log.info("Triggering a scale-out action for policy {} as "
"all criteria have been met".format(self.name))
self._last_triggered_time = time.time()
self.scale_out_count += 1
# Reset all statuses
self.scale_out_status = {cri.name: False for cri in self.scaling_criteria}
self.delegate.scale_out(self.scaling_group_name, self.nsr_id)
| [
"github@OrangeOnBlack.de"
] | github@OrangeOnBlack.de |
eec3ab7a9816f277ff12c71fc866588e7d9373cc | fffa54dd2284c45b4eb5565599c2a89c2e62076a | /server.py | 2387afb7622579196d484abc3f522ecaf189b942 | [] | no_license | BeeNeal/project_skeleton | b3a7a9b21212ab5ffaa2bf9d0eeb9fb77dc3e9f7 | ea5f02c15a1282308c294df07e187620c043c907 | refs/heads/master | 2020-03-16T17:04:24.069400 | 2018-05-09T21:44:14 | 2018-05-09T21:44:14 | 132,811,692 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 737 | py | """ Project Skeleton Server - happy building!"""
import sys
import os
from jinja2 import StrictUndefined
from flask import (Flask, jsonify, render_template, redirect, request,
flash, session)
from flask_debugtoolbar import DebugToolbarExtension
# if want to source API key into environment every time, do this
# API_KEY = os.environ['API_KEY']
app = Flask(__name__)
app.secret_key = "skeleton_key"
app.jinja_env.undefined = StrictUndefined
@app.route('/')
def index():
"""Display Homepage."""
return render_template("homepage.html")
if __name__ == "__main__":
app.debug = True
# connect_to_db(app)
# Use the DebugToolbar
# DebugToolbarExtension(app)
app.run(host="0.0.0.0") | [
"brittanyneal22@gmail.com"
] | brittanyneal22@gmail.com |
0d570057aec6a008be56456cb3be0340ae17d1a7 | 4d66dca638061a7e627e06760e45adb58ccf0325 | /draft.py | 614e04823b6d046d16e1d0c9199c3783ed2538ca | [] | no_license | delayzzz/ansys | 864f2021f560ab646762781dd576f483c9eac4ea | bc1b40bf898afe2b97718d8a7d77963a6c0a7efb | refs/heads/master | 2023-02-15T06:19:12.434857 | 2021-01-10T16:21:50 | 2021-01-10T16:21:50 | 281,868,112 | 0 | 0 | null | 2020-07-23T07:03:50 | 2020-07-23T06:19:50 | Python | UTF-8 | Python | false | false | 52 | py | import numpy as np
import sys
import os
print(test) | [
"delayzyc@163.com"
] | delayzyc@163.com |
51be068605faf7c3f8cf9699305d3ddf730f750b | a01099ad3b57e3ba58dcaa982d42192d0c7bc465 | /lab2.py | 1da5bdee680b0df8e3ad7593d0a16d6a5cc0390d | [] | no_license | pradumna123/Decision-tree-for-authorship-prediction | 54f89c3d52273bb9edc98a9c7f0d15bbe4b5df09 | d3bfc6c356b1c4304bec6bb02f143f11393a8b02 | refs/heads/master | 2020-07-25T02:06:16.826534 | 2019-09-14T15:48:47 | 2019-09-14T15:48:47 | 208,125,717 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,848 | py | import create_a_tree_from_file
import perceptron_model_gen
import processa250data as pp
import sys
"""
class used to run models on test _data
"""
def read_user_file(filename):
string_list = []
with open(filename, "r")as f:
data = f.read()
data = data.split()
# print(len(data))
for i in data:
if len(i) != 0:
string_list.append(i)
return string_list
def process_file_andFsp_create_a_list(filename):
data_list_of_list = []
no_of_features = 0
with open(filename, 'r') as f:
data = f.read()
list_temp = data.split("\n")
# print(type(list_temp[1]))
list_single_attribute = []
# print("!!!!!!", list_temp[0], len(list_temp[0]))
for i in list_temp:
if len(i) == 2:
# print(i)
no_of_features = int(i)
if len(i) > 1:
templist = i.split(" ")
# print("!!", templist)
if len(templist) > 1:
for att in templist:
if att != "":
if att.find('.') == -1:
list_single_attribute.append((int(att)))
else:
list_single_attribute.append(round(float(att), 3))
data_list_of_list.append(list_single_attribute)
list_single_attribute = []
return no_of_features, data_list_of_list
def sep_the_result_and_attr(data_list_of_list):
result = []
only_attr = []
for i in data_list_of_list:
result.append(i[-1])
temp = i[:-1]
only_attr.append(temp)
temp = []
return result, only_attr
def create_tree_obj(filename):
a = create_a_tree_from_file.grow_tree(filename) # this is a class
a.read_file(filename) # this is a function to read and crearte a dictionary of n nodes.
# a.print_data()
# g = a.get_max(a.Serial_no, 0, len(a.Serial_no) - 1)
# print(g)
a.root = a.make_tree(a.Serial_no, 0, len(a.Serial_no) - 1) # this function creates a tree.
a.print_inorder(a.root)
return a
def test_tree_obj(only_attr, result, tree_obj):
"""
returns accuracy
:param only_attr:
:param result:
:return:
"""
total = len(only_attr)
correct = 0
for i in range(len(only_attr)):
val = tree_obj.test(only_attr[i])
if val == result[i]:
correct += 1
return correct / total
def make_percept_model(file3):
a = perceptron_model_gen.Perceptron(test=True, w_file_name=file3)
# test_file_name = "test_data.txt"
a.worker()
return a
def test_p_obj(only_attr, result, p_obj):
# print(only_attr)
# return
# print("!!!!!!!!!!", result)
total = len(only_attr)
correct = 0
for i in range(len(only_attr)):
p_obj.predict(only_attr[i])
lista = p_obj.res
temp = lista[0]
index = 0
for j in range(1, len(lista)):
if lista[j] > temp:
temp = lista[j]
index = j
# max_val = int(max_val[0])
index = index + 1
# index=index[0]
# print(lista, index, temp, type(index), type(temp), result[i])
# print(lista, max_val, index, result[i])
if index == result[i]:
# print(index, result[i])
correct += 1
return correct / total
def main():
# print(len(sys.argv))
file1 = sys.argv[2]
# print("!!!", file1)
dict_auth = {1: "arthur", 2: "Melville", 3: "Austen"}
# status = int(input("enter a value \n 1 for using test_data_file \n 2 for using a test data from your side."))
# status = 1
status = 2
if status == 1:
# "we will test 2 models on a single test_data."
file1 = 'test_data.txt'
file2 = 'alpha.txt'
file3 = 'p_weights2.txt'
# file1 = input("enter file name for test_data")
# file2 = input("enter a file for tree creation")
# file3 = input("enter file name for perceptron creation")
No_features, data_list = process_file_andFsp_create_a_list(file1)
# for i in data_list:
# print(i)
# print(data_list)
result, only_attr = sep_the_result_and_attr(data_list)
# create a object of tree
tree_obj = create_tree_obj(file2)
acc_tree = test_tree_obj(only_attr, result, tree_obj)
print(acc_tree)
P_obj = make_percept_model(file3)
print(test_p_obj(only_attr, result, P_obj))
if status == 2:
# file1 = "delta"
# file1 = input("enter file name for test_data")
file2 = "alpha.txt"
file3 = "p_weights2.txt"
string_list = read_user_file(file1)
obj_data = pp.info_on250words(string_list, 1, 1)
vect = obj_data.return_a_vector_test()
tree_obj = create_tree_obj(file2)
val = tree_obj.test(vect)
print("output of tree : ", dict_auth[val])
#
# P_obj = make_percept_model(file3)
#
# P_obj.predict(vect)
# res = P_obj.res
#
# val1 = res[0]
# index = 0
#
# for j in range(len(res)):
# if res[j] > val1:
# val1 = res[j]
# index = j
# print("output of perceptron ", dict_auth[index + 1])
# print(vect)
# No_features, data_list = process_file_andFsp_create_a_list(file1)
# result, only_attr = sep_the_result_and_attr(data_list)
# print(only_attr)
# val=tree_obj.test(only_attr[0])
# print(val,result[0])
# print(type(val), type(result[0]))
# tree_obj.print_inorder(tree_obj.root)
# tree_obj.print_data()
# print(len(tree_obj.Serial_no))
main()
| [
"ps6275@gmail.com"
] | ps6275@gmail.com |
d32a1e73ad34e9246b555c6257064d618f6f94c2 | bdb300d0c07dafcefa9aeeb3e393cbe232e6b580 | /app/main/routes.py | 3355c7e6b8658dfc12d3129bd246ee61b1b18b84 | [] | no_license | nabeelshaikh91/pehgaam | d6fcec1f46f7db5bf9e839b1306fb21a5bc7b0a1 | 46d7827bbca946a0413bd4caaa4ac57251bafa46 | refs/heads/master | 2021-06-16T14:40:44.262089 | 2017-05-28T10:57:55 | 2017-05-28T10:57:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 915 | py | from flask import session, redirect, url_for, render_template, request
from . import main
from .forms import LoginForm
@main.route('/', methods=['GET', 'POST'])
def index():
"""Login form to enter a group."""
form = LoginForm()
if form.validate_on_submit():
session['name'] = form.name.data
session['group'] = form.group.data
return redirect(url_for('.chat'))
elif request.method == 'GET':
form.name.data = session.get('name', '')
form.group.data = session.get('group', '')
return render_template('index.html', form=form)
@main.route('/chat')
def chat():
"""Chat group. The user's name and group must be stored in
the session."""
name = session.get('name', '')
group = session.get('group', '')
if name == '' or group == '':
return redirect(url_for('.index'))
return render_template('chat.html', name=name, group=group)
| [
"noreply@github.com"
] | nabeelshaikh91.noreply@github.com |
372c095ed0698ba58ec02f461f6ac2f7fd4c1965 | 25270c94477bb0e00cbd5d070ed1e7bbea04f9c2 | /classification/dataset.py | b355958efce6797cb838065083919a2c8b4c91db | [] | no_license | nvvaulin/icevision2019 | 610ff095bb247663b07dd00dfc46c690e3aa9f19 | 5eeb5122b1faab96ee7f3e7ff2ec871d9f3923b4 | refs/heads/master | 2022-10-30T21:41:05.681326 | 2019-07-15T13:52:01 | 2019-07-15T13:55:42 | 207,381,303 | 0 | 1 | null | 2022-10-21T10:50:28 | 2019-09-09T18:50:14 | Jupyter Notebook | UTF-8 | Python | false | false | 1,496 | py | import torch.utils.data
import numpy as np
import cv2
from PIL import Image
import torch
import json
import imageio
REMAP = '/media/storage/vyurchenko/projects/ice/rep/classification/remapping.json'
N_CLASSES=231
N_TRAIN = 142838
N_VAL = 4278
class FuckingDataset(torch.utils.data.Dataset):
def __init__(self, path_to_dataset, transforms):
with open(path_to_dataset) as f:
self.data = json.load(f)
with open(REMAP) as f:
self.remapping = json.load(f)
self.transforms = transforms
def __len__(self):
return len(self.data)
def __getitem__(self, index):
if self.data[index]['image'].endswith('pnm'):
im = imageio.imread(self.data[index]['image'])
im = cv2.cvtColor(im, cv2.COLOR_BAYER_BG2RGB)
else:
im = cv2.imread(self.data[index]['image'])[:,:,::-1]
x0, y0 = self.data[index]['x0'], self.data[index]['y0']
x1, y1 = self.data[index]['x1'], self.data[index]['y1']
patch = im[y0:y1, x0:x1].copy()
cl = self.data[index]['class'].split('.')
cl = ['.'.join(cl[:i]) for i in range(1, len(cl)+1)]
cl = [self.remapping[elem] for elem in cl]
gt = np.zeros(N_CLASSES, np.float32)
gt[cl] = 1.0
patch = Image.fromarray(patch)
patch = self.transforms(patch)
gt = torch.Tensor(gt)
return {'image': patch, "classes": gt,
'is_temporary': self.data[index]['is_temporary']}
| [
"simflin@gmail.com"
] | simflin@gmail.com |
f705c228eae226560ee5b50be43a23dc4c224a7b | 7278717e86713f3abbcd74c03612dd54a25d33db | /tor/tor-exits | 516c110d3593c7ee73a29e1fd4d7160b3dd5dde1 | [] | no_license | DinoRatcliffe/dotfiles | 5161611e21158d7f330041fcf1c3b967227a3db0 | 19293c069044991f0e83a04093b45e5b2d8812a1 | refs/heads/master | 2021-06-07T16:07:06.203744 | 2019-11-26T15:11:18 | 2019-11-26T15:11:18 | 33,443,134 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,412 | #!/usr/bin/python
from stem import Signal
import os
import time
from stem import CircStatus
from stem.control import Controller
import argparse
exits = {}
with Controller.from_port(port = 9051) as controller:
controller.authenticate()
for circ in controller.get_circuits():
if circ.status != CircStatus.BUILT:
continue
exit_fp, exit_nickname = circ.path[-1]
exit_desc = controller.get_network_status(exit_fp, None)
exit_address = exit_desc.address if exit_desc else 'unknown' # get relay IP
exit_location = controller.get_info('ip-to-country/%s' % exit_address, '').upper()
exits[exit_address] = exit_location
parser = argparse.ArgumentParser(description='Outputs tor exit nodes')
parser.add_argument('-l', '--locations', action='store_true',
help="if set return location of exits")
parser.add_argument('-d', '--dedup', action='store_true',
help="if locations should be deduplicated")
parser.add_argument('-n', '--new', action='store_true',
help="indicates the script should signal new tor name")
args = parser.parse_args()
if args.new:
with Controller.from_port(port = 9051) as controller:
controller.authenticate()
controller.signal(Signal.NEWNYM)
else:
values = exits.values() if args.locations else exits.keys()
if args.dedup:
values = list(set(values))
values.sort()
print("\n".join(values))
| [
"yo@dino.io"
] | yo@dino.io | |
1173bf1c29ea1a2276e04426c8e1bafc771c2b30 | e1feb73c9821fe2b6cb1de6f7ac7a25ef29639c3 | /train_val_split.py | e7562f6e0e92682e364df93fd3672b294d710d3c | [] | no_license | aadithmoorthy/cs155_miniproject_1 | 3d5d6f8b2dbcc30fe8eb7cad1bb3b88a1ef82bb3 | 2f6e54ae0d27453625d9a2920bc709f90b12d8f3 | refs/heads/master | 2021-03-19T06:02:43.880993 | 2018-02-09T06:57:43 | 2018-02-09T06:57:43 | 119,757,717 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 546 | py | # repeatable train validation split for data; at initial stage, need a split
# so that we can avoid overfitting and train some hyperparameters
from sklearn.model_selection import train_test_split
import time
import numpy as np
t = time.time()
data = np.loadtxt('training_data.txt', skiprows=1)
print(data.shape)
print('took', time.time()-t)
X = data[:,1:]
y = data[:,0]
data_tr, data_val = train_test_split(data, test_size=0.05, random_state = 42)
np.savetxt('data_val.txt', data_val, fmt="%d")
np.savetxt('data_tr.txt', data_tr, fmt="%d")
| [
"amoorthy@caltech.edu"
] | amoorthy@caltech.edu |
94485b47b5197c46ba938c2909f22c3494292725 | 27a990249b6c9c01c8f483ca0403c03259427515 | /data structure/NativeDictionary.py | d16bab2157f1e603642cfd621af38e52ac5528dc | [] | no_license | Barzabel/py | 2298bd108b3c157de433641c87363deb6ad942af | b67e34f837f43c1b3aa1c5f7ae35ff8ecf4ca3b0 | refs/heads/master | 2023-05-04T08:18:30.359224 | 2021-05-25T19:02:46 | 2021-05-25T19:02:46 | 277,311,574 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,289 | py | class NativeDictionary:
def __init__(self, sz):
self.size = sz
self.slots = [None] * self.size
self.values = [None] * self.size
def __Hash(self,value):
a = str(value)
res = 0
for x in range(len(a)):
res = (res // 5 + ord(a[x])) * 13 + 7
return res % self.size
def hash_fun(self, key):
res = self.__Hash(key)
index = None
for x in range(0, self.size, 1):
if self.slots[(res + x) % self.size] == None or self.slots[(res + x) % self.size] == key:
index = (res + x) % self.size
break
return index
def is_key(self, key):
hash1 = self.__Hash(key)
for x in range(0, self.size, 1):
if self.slots[(hash1 + x) % self.size] == key:
return True
return False
def get(self, key):
hash1 = self.__Hash(key)
for x in range(0, self.size, 1):
if self.slots[(hash1 + x) % self.size] == key:
return self.values[(hash1 + x) % self.size]
return None
def put(self, key, value):
index = self.hash_fun(key)
self.slots[index]=key
self.values[index]=value
| [
"noreply@github.com"
] | Barzabel.noreply@github.com |
7813136641ab491c66df1cc62ebbf197a2fa38e5 | 452b7ca4eea5b19b237e177060797d2207caf26b | /coremltools/converters/mil/mil/passes/test_passes.py | c61e97c304e72a7f875cad6dc92f2b272d29e774 | [
"BSD-3-Clause"
] | permissive | seibert/coremltools | bb891873137fb47b03529b848f728cd6c2bad10e | 609188ebcfee2178293f0d4e93a5af2995c88645 | refs/heads/main | 2023-06-20T02:20:24.935368 | 2021-07-01T18:47:00 | 2021-07-01T18:47:00 | 332,535,892 | 0 | 0 | BSD-3-Clause | 2021-01-24T19:23:18 | 2021-01-24T19:23:18 | null | UTF-8 | Python | false | false | 14,855 | py | # Copyright (c) 2020, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
from coremltools.converters.mil.mil import Builder as mb
from coremltools.converters.mil.testing_utils import (
assert_op_count_match,
assert_model_is_valid,
assert_same_output_names,
get_op_types_in_program,
apply_pass_and_basic_check,
)
from coremltools.converters.mil.mil import Symbol, types
from coremltools.converters.mil.mil.passes.pass_registry import PASS_REGISTRY
import copy
import pytest
import itertools
import numpy as np
np.random.seed(1984)
validate_model = True
# TODO: rdar://58993652 (Add recursive block test cases for graph pass tests)
def test_const_elimination():
@mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))])
def prog(x):
a = np.random.rand(2, 4).astype(np.float32)
double_a = mb.add(x=a, y=a)
return mb.add(x=x, y=double_a)
assert_op_count_match(prog, expect=2, op="const")
prev_prog = copy.deepcopy(prog)
PASS_REGISTRY["common::const_elimination"](prog)
assert_same_output_names(prev_prog, prog)
assert_op_count_match(prog, expect=3, op="const")
if validate_model:
assert_model_is_valid(prog, {"x": (2, 4)})
def test_divide_to_multiply():
@mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))])
def prog(x):
div_val = np.random.rand(2, 4).astype(np.float32)
div_const = mb.const(val=div_val)
div_val_1 = np.random.rand(2, 4).astype(np.float32)
div_const_1 = mb.const(val=div_val_1)
real_div = mb.real_div(x=x, y=div_const)
return mb.real_div(x=real_div, y=div_const_1)
assert_op_count_match(prog, expect=2, op="real_div")
assert_op_count_match(prog, expect=0, op="mul")
prev_prog = copy.deepcopy(prog)
PASS_REGISTRY["common::divide_to_multiply"](prog)
assert_same_output_names(prev_prog, prog)
assert_op_count_match(prog, expect=0, op="real_div")
assert_op_count_match(prog, expect=2, op="mul")
if validate_model:
assert_model_is_valid(prog, {"x": (2, 4)})
def test_fuse_matmul_weight_bias():
@mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))])
def prog(x):
weights_val = np.random.rand(2, 4).T.astype(np.float32)
weights = mb.const(val=weights_val)
bias_val = np.random.rand(2).astype(np.float32)
bias = mb.const(val=bias_val)
matmul = mb.matmul(x=x, y=weights)
return mb.add(x=matmul, y=bias)
assert_op_count_match(prog, expect=1, op="matmul")
assert_op_count_match(prog, expect=0, op="linear")
prev_prog = copy.deepcopy(prog)
PASS_REGISTRY["common::fuse_matmul_weight_bias"](prog)
assert_same_output_names(prev_prog, prog)
assert_op_count_match(prog, expect=0, op="matmul")
assert_op_count_match(prog, expect=1, op="linear")
if validate_model:
assert_model_is_valid(prog, {"x": (2, 4)})
def test_dead_code_elimination():
@mb.program(
input_specs=[mb.TensorSpec(shape=(2, 4)), mb.TensorSpec(shape=(2, 4)),]
)
def program0(x, y):
# following three unused op should be eliminated
a = mb.const(val=np.zeros(shape=(1,)))
b = mb.const(val=np.zeros(shape=(1,)))
_ = mb.add(x=a, y=b)
return mb.add(x=x, y=y)
assert_op_count_match(program0, expect=4)
prev_prog = copy.deepcopy(program0)
PASS_REGISTRY["common::dead_code_elimination"](program0)
assert_same_output_names(prev_prog, program0)
assert_op_count_match(program0, expect=1)
if validate_model:
assert_model_is_valid(program0, {"x": (2, 4), "y": (2, 4)})
@mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))])
def program1(x):
weights_val = np.random.rand(2, 4).T.astype(np.float32)
weights = mb.const(val=weights_val)
bias_val = np.random.rand(4).astype(np.float32)
bias = mb.const(val=bias_val)
# unused op and its inputs should be eliminated
mb.matmul(x=x, y=weights)
return mb.linear(x=x, weight=weights, bias=bias)
assert_op_count_match(program1, expect=6)
prev_prog = copy.deepcopy(program1)
PASS_REGISTRY["common::dead_code_elimination"](program1)
assert_same_output_names(prev_prog, program1)
assert_op_count_match(program1, expect=3)
if validate_model:
assert_model_is_valid(program1, {"x": (2, 4)})
def test_remove_symbolic_reshape():
sym_b = Symbol("s0")
original_shape = (sym_b, Symbol("s1"), 2)
reshape_name = "reshape"
@mb.program(input_specs=[mb.TensorSpec(shape=(sym_b, 4))])
def prog(x):
# const cannot represent symbolic values. Use _const_symbolic
shape = mb._const_symbolic(val=original_shape)
return mb.reshape(x=x, shape=shape, name=reshape_name)
reshape_op = prog.find_ops(
prefix=reshape_name, op_type="reshape", exactly_one=True
)[0]
shape_var = reshape_op.shape
reshaped_var = reshape_op.outputs[0]
assert np.all(shape_var.sym_val == original_shape)
assert np.all(reshaped_var.shape == (sym_b, 2, 2))
# Note: we cannot deepcopy prog with symbol.
prev_outputs = [o.name for o in prog["main"].outputs]
PASS_REGISTRY["common::remove_symbolic_reshape"](prog)
curr_outputs = [o.name for o in prog["main"].outputs]
assert curr_outputs == prev_outputs
reshape_op = prog.find_ops(
prefix=reshape_name, op_type="reshape", exactly_one=True
)[0]
shape_var = reshape_op.shape
reshaped_var = reshape_op.outputs[0]
# shape param cannot be symbolic after the pass
assert np.all(shape_var.sym_val == (-1, 2, 2))
# output shape is still symbolic
assert np.all(reshaped_var.shape == (sym_b, 2, 2))
if validate_model:
assert_model_is_valid(prog, {"x": (3, 4)})
def test_loop_invariant_elimination1():
"""
Invariant pattern: Block input vars are returned as block output vars.
"""
def body(a, b):
return mb.add(x=a, y=b), b
def cond(a, b):
a_mean = mb.reduce_mean(x=a, axes=[0, 1])
b_mean = mb.reduce_mean(x=b, axes=[0, 1])
return mb.less(x=a_mean, y=b_mean)
@mb.program(
input_specs=[mb.TensorSpec(shape=(1, 2)), mb.TensorSpec(shape=(1, 2)),]
)
def prog(a, b):
# b is loop invariant
return mb.while_loop(_cond=cond, _body=body, loop_vars=(a, b))
while_op = prog.find_ops(op_type="while_loop", exactly_one=True)[0]
assert len(while_op.blocks[0].inputs) == 2
assert len(while_op.outputs) == 2
assert len(while_op.loop_vars) == 2
assert while_op.blocks[0].inputs[0].name == "a_x1"
assert while_op.blocks[0].inputs[1].name == "b_x1"
prev_prog = copy.deepcopy(prog)
PASS_REGISTRY["common::loop_invariant_elimination"](prog)
assert_same_output_names(prev_prog, prog)
while_op = prog.find_ops(op_type="while_loop", exactly_one=True)[0]
assert len(while_op.blocks[0].inputs) == 1
assert len(while_op.outputs) == 1
assert len(while_op.loop_vars) == 1
assert while_op.blocks[0].inputs[0].name == "a_x1"
if validate_model:
assert_model_is_valid(prog, {"a": (1, 2), "b": (1, 2)})
def test_loop_invariant_elimination2():
"""
Invariant pattern: Block outputs var from outside of the block
"""
@mb.program(
input_specs=[mb.TensorSpec(shape=(1, 2)), mb.TensorSpec(shape=(1, 2)),]
)
def prog(a, b):
def body(a, bx):
return mb.add(x=a, y=b), b
def cond(a, bx):
a_mean = mb.reduce_mean(x=a, axes=[0, 1])
b_mean = mb.reduce_mean(x=bx, axes=[0, 1])
return mb.less(x=a_mean, y=b_mean)
# b is loop invariant
return mb.while_loop(_cond=cond, _body=body, loop_vars=(a, b))
while_op = prog.find_ops(op_type="while_loop", exactly_one=True)[0]
assert len(while_op.blocks[0].inputs) == 2
assert len(while_op.outputs) == 2
assert len(while_op.loop_vars) == 2
assert while_op.blocks[0].inputs[0].name == "a_x1"
assert while_op.blocks[0].inputs[1].name == "b_x1"
prev_prog = copy.deepcopy(prog)
PASS_REGISTRY["common::loop_invariant_elimination"](prog)
assert_same_output_names(prev_prog, prog)
while_op = prog.find_ops(op_type="while_loop", exactly_one=True)[0]
assert len(while_op.blocks[0].inputs) == 1
assert len(while_op.outputs) == 1
assert len(while_op.loop_vars) == 1
assert while_op.blocks[0].inputs[0].name == "a_x1"
if validate_model:
assert_model_is_valid(prog, {"a": (1, 2), "b": (1, 2)})
def test_gelu_tanh_approximation():
"""
Detect gelu tanh approx pattern, found in the TF bert model.
y = ( tanh((.0447)x^3 + x ) * (sqrt(2/pi)) + 1 ) * 0.5 * x
"""
@mb.program(input_specs=[mb.TensorSpec(shape=(3, 5, 6))])
def prog(x):
x1 = mb.pow(x=x, y=3)
x1 = mb.mul(x=0.044715, y=x1)
x1 = mb.add(x=x1, y=x)
x1 = mb.mul(x=x1, y=np.sqrt(2 / np.pi))
x1 = mb.tanh(x=x1)
x1 = mb.add(x=1, y=x1)
x1 = mb.mul(x=0.5, y=x1)
x1 = mb.mul(x=x, y=x1)
return x1
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::fuse_gelu_tanh_approximation"
)
assert get_op_types_in_program(prev_prog) == [
"pow",
"mul",
"add",
"mul",
"tanh",
"add",
"mul",
"mul",
]
assert get_op_types_in_program(prog) == ["gelu"]
assert_model_is_valid(
prog,
{"x": (3, 5, 6)},
expected_output_shapes={block.outputs[0].name: (3, 5, 6)},
)
@pytest.mark.parametrize("rank", [1, 2, 3, 4])
def test_onehot_matmul_to_gather_fusion(rank):
"""
Input:
%2 = one_hot(%1, on_value=1, off_value=0, axis=-1)
%3 = const() # rank 2
%4 = matmul(%2, %3)
Output:
%4 = gather(%3, %2, axis=0)
"""
rank4_shape = (10, 3, 6, 7)
input_shape = rank4_shape[-rank:]
vocab_size = 15
embedding_size = 12
@mb.program(input_specs=[mb.TensorSpec(shape=input_shape, dtype=types.int32)])
def prog(x):
x = mb.one_hot(
indices=x, on_value=1, off_value=0, axis=-1, one_hot_vector_size=vocab_size
)
x = mb.matmul(x=x, y=np.random.rand(vocab_size, embedding_size))
return x
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::fuse_onehot_matmul_to_gather"
)
assert get_op_types_in_program(prev_prog) == ["one_hot", "matmul"]
assert get_op_types_in_program(prog) == ["gather"]
assert_model_is_valid(
prog,
{"x": input_shape},
expected_output_shapes={block.outputs[0].name: input_shape + (embedding_size,)},
)
def test_concat_interleave_fusion_pass():
"""
Given:
%3 = concat(%1.a, %1.b, axis=-3, interleave=False) #shape = (B, n*C, H, W)
%4 = reshape(%3) #shape = (B, n, C, H, W)
%5 = transpose(%4, perm=[0, 2, 1, 3, 4]) # shape = (B, C, n, H, W)
%6 = reshape(%5) # shape = (B, C*n, H, W)
Result:
%6 = concat(%1.a, %1.b, axis=-3, interleave=True)
"""
B, C, H, W = 1, 10, 20, 20
@mb.program(input_specs=[mb.TensorSpec(shape=(B,C,H,W)), mb.TensorSpec(shape=(B,C,H,W))])
def prog(x, y):
z = mb.concat(values=[x,y], axis=1)
z = mb.reshape(x=z, shape=(B, 2, C, H, W))
z = mb.transpose(x=z, perm=[0, 2, 1, 3, 4])
z = mb.reshape(x=z, shape=(B, -1, H, W))
return z
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::detect_concat_interleave"
)
assert get_op_types_in_program(prev_prog) == ["concat", "reshape", "transpose", "reshape"]
assert get_op_types_in_program(prog) == ["concat"]
concat_op = prog.find_ops(op_type="concat", exactly_one=True)[0]
assert concat_op.interleave.val
assert_model_is_valid(
prog,
{"x": (B, C, H, W), "y": (B, C, H, W)},
expected_output_shapes={block.outputs[0].name: (B, 2*C, H, W)},
)
def test_add_conv_transpose_output_shape():
"""
Given:
%1: (1, 5, 39, fp32) = conv_transpose(...) # no output_shape input.
Result:
%2: (3, i32) = const(val=[1,5,39])
%3: (1, 5, 39, fp32) = conv_transpose(..., output_shape=%2)
"""
N, C_in, C_out, D1 = 1, 3, 5, 20
@mb.program(input_specs=[mb.TensorSpec(shape=(N, C_in, D1))])
def prog(x):
weight = np.random.rand(C_in, C_out, D1).astype(np.float32)
return mb.conv_transpose(x=x, weight=weight)
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::add_conv_transpose_output_shape"
)
assert get_op_types_in_program(prev_prog) == ["conv_transpose"]
assert get_op_types_in_program(prog) == ["conv_transpose"]
prev_conv_transpose_op = prev_prog.find_ops(op_type="conv_transpose",
exactly_one=True)[0]
conv_transpose_op = prog.find_ops(op_type="conv_transpose",
exactly_one=True)[0]
assert np.all(conv_transpose_op.output_shape.val ==
prev_conv_transpose_op.outputs[0].shape)
@pytest.mark.parametrize(
"op_type, is_first_op1, is_first_op2, is_first_op3, is_first_op4, const_mul_first",
itertools.product(
["real_div", "mul"],
[True, False],
[True, False],
[True ,False],
[True, False],
[True, False],
)
)
def test_gelu_exact_approximation(op_type, is_first_op1, is_first_op2, is_first_op3, is_first_op4, const_mul_first):
"""
Detect gelu exact pattern.
y = 0.5 * x * ( 1 + erf ( x / srqt(2)))
"""
@mb.program(input_specs=[mb.TensorSpec(shape=(3, 5, 6))])
def prog(x):
if op_type == "real_div":
x1 = mb.real_div(x=x, y=2**0.5)
elif op_type == "mul":
x1 = mb.mul(x=x, y=2**-0.5) if is_first_op1 else mb.mul(x=2**-0.5, y=x)
x2 = mb.erf(x=x1)
x3 = mb.add(x=x2, y=1) if is_first_op2 else mb.add(x=1, y=x2)
if const_mul_first:
y1 = mb.const(val=0.5)
y2 = x
else:
y1 = x
y2 = mb.const(val=0.5)
x4 = mb.mul(x=x3, y=y1) if is_first_op3 else mb.mul(x=y1, y=x3)
x5 = mb.mul(x=x4, y=y2) if is_first_op4 else mb.mul(x=y2, y=x4)
return x5
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::fuse_gelu_exact"
)
assert get_op_types_in_program(prev_prog) == [
op_type,
"erf",
"add",
"mul",
"mul",
]
assert get_op_types_in_program(prog) == ["gelu"]
assert_model_is_valid(
prog,
{"x": (3, 5, 6)},
expected_output_shapes={block.outputs[0].name: (3, 5, 6)},
)
| [
"noreply@github.com"
] | seibert.noreply@github.com |
a75084fa17675a0b1c497281c9625602e38d3fa6 | 5f5631f2f750006ebfe8f351a544d33b8d9d5d4b | /list project 3.py | 497b1e1f4d17b9dcda04289b379e03957ff2dcfc | [] | no_license | jamesianreid/Python-Crash-Course-1 | 05fd9e0e3de98e32825b9d8499bc50ee7d10ef13 | e985db5f29e3d3ceb63c1e1b1a3a8e15ed336110 | refs/heads/master | 2020-05-07T16:15:07.549142 | 2019-04-10T23:45:36 | 2019-04-10T23:45:36 | 180,674,624 | 0 | 0 | null | 2019-04-10T23:45:37 | 2019-04-10T22:41:53 | Python | UTF-8 | Python | false | false | 694 | py | locations = ['usa', 'japan', 'australia', 'hawaii']
print(locations)
print(sorted(locations))
print(locations)
locations.reverse()
print(locations)
locations.reverse()
print(locations)
locations.sort()
print(locations)
locations.sort(reverse=True)
print(locations)
locations = ['usa', 'japan', 'australia', 'hawaii']
locations.insert(0, 'dubai')
print(locations)
locations.append('italy')
print(locations)
locations.remove('italy')
print(locations)
print(sorted(locations))
print(locations)
locations.sort()
print(locations)
locations.sort(reverse=True)
print(locations)
pop_locations = locations.pop()
print(locations)
print(pop_locations)
print(len(locations))
| [
"noreply@github.com"
] | jamesianreid.noreply@github.com |
8f6121566f8997cf53defda685b5c0d0eb05a5bd | 649e5484a100ad359667f96d777a6996147562ef | /ecomsite/shop/migrations/0023_auto_20200709_2232.py | de0efa33996b7c0ecddf4b343598818286c830e0 | [] | no_license | goeltushar2210/goeltushar2210.github.io | 35dccba71cf81841b2c319aefa2bbd16b73b8a7f | bcc8bfdc555b643ee9fef2b7337fb2eecb0f76d6 | refs/heads/master | 2022-11-18T04:22:57.170620 | 2020-07-19T10:24:38 | 2020-07-19T10:24:38 | 280,831,885 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 535 | py | # Generated by Django 2.2.13 on 2020-07-09 17:02
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shop', '0022_auto_20200709_2226'),
]
operations = [
migrations.AlterField(
model_name='about',
name='title',
field=models.CharField(max_length=50),
),
migrations.AlterField(
model_name='contact',
name='title',
field=models.CharField(max_length=50),
),
]
| [
"68200993+Nirpesh87@users.noreply.github.com"
] | 68200993+Nirpesh87@users.noreply.github.com |
37e52b404c039b0a5772c6347c94bc1a3be791c1 | 024c9921572dddfc76d17773799f0458934dd75e | /exp.py | 335d61616e0034d91f72222979686ba357bd2132 | [] | no_license | andychan94/tensorflow | 80253d96235b17e7c0ac3c184bbb1fdd457c8425 | 79bde6681bcaa9f6962c398169511c550bba3d33 | refs/heads/master | 2020-03-22T04:34:06.134489 | 2018-07-03T00:13:10 | 2018-07-03T00:13:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,101 | py | import tensorflow as tf
import numpy as np
import time
import csv
import sys
N = 1000
N_HL = 12
accuracy = 98
dim = {}
for i in range(1, N_HL + 1 + 1):
dim[str(i)] = 0
if (N_HL > 5):
for i in range(1, N_HL + 1 + 1):
dim[str(i)] = 3
else:
dim[str(1)] = 3
dim[str(2)] = 3
dim[str(3)] = 3
dim[str(4)] = 3
dim[str(5)] = 3
dim[str(0)] = 1
dim[str(N_HL + 1)] = 1
Error = 0 in dim.values()
if Error:
sys.exit("Error!")
W_nodes = []
b_nodes = []
for iW in range(0, N_HL + 1 + 1):
W_nodes.append(dim[str(iW)])
for ib in range(1, N_HL + 1 + 1):
b_nodes.append(dim[str(ib)])
W_dim = {}
b_dim = {}
for i in range(1, N_HL + 1 + 1):
W_dim[str(i)] = [dim[str(i - 1)], dim[str(i)]]
b_dim[str(i)] = [dim[str(i)]]
def Write(accuracy):
# W = {}
# b = {}
W_val = {}
b_val = {}
# sess = tf.Session()
for i in range(1, N_HL + 1 + 1):
W_val[str(i)] = sess.run(W[str(i)])
b_val[str(i)] = sess.run(b[str(i)])
with open('Wb.csv', 'wt', newline='') as f:
writer = csv.writer(f)
writer.writerow(["# Weight"])
writer.writerow([N_HL])
writer.writerow(W_nodes)
for i in range(1, N_HL + 1 + 1):
writer.writerow(W_dim[str(i)])
writer.writerows(W_val[str(i)])
writer = csv.writer(f)
writer.writerow(["# Bias"])
writer.writerow([N_HL])
writer.writerow(b_nodes)
for i in range(1, N_HL + 1 + 1):
writer.writerow(b_dim[str(i)])
writer.writerow(b_val[str(i)])
x = {}
W = {}
b = {}
layer = {}
x = tf.placeholder(tf.float32, [N, dim[str(0)]])
layer = x
for i in range(1, N_HL + 1 + 1):
W[str(i)] = tf.Variable(tf.random_uniform([dim[str(i - 1)], dim[str(i)]], minval=-1, maxval=1))
b[str(i)] = tf.Variable(tf.random_uniform([dim[str(i)]], minval=-1, maxval=1))
for i in range(1, N_HL+1):
layer = tf.tanh(tf.matmul(layer, W[str(i)]) + b[str(i)])
# layer = tf.matmul(layer, W[str(N_HL + 1)]) + b[str(N_HL + 1)]
y = tf.matmul(layer, W[str(N_HL + 1)]) + b[str(N_HL + 1)]
t = tf.placeholder(tf.float32, [N, dim[str(0)]])
loss = tf.reduce_sum(tf.square(y - t))
mape = tf.reduce_mean(tf.abs(y - t) / tf.abs(t)) * 100
train_step = tf.train.AdamOptimizer().minimize(loss)
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
pi = np.pi
train_xi = np.linspace(-pi, pi, N)
train_x = np.zeros([N, dim[str(0)]])
for i in range(1, N + 1):
train_x[i - 1, 0] = train_xi[i - 1]
train_t = (np.sin(train_x)**3+np.cos(train_x)**3)
start_time = time.time()
str(start_time)
print("--Learning--")
print("number of HL: %d" % N_HL)
n = 0
while True:
n += 1
sess.run(train_step, feed_dict={x: train_x, t: train_t})
MAPE = 100 - sess.run(mape, feed_dict={x: train_x, t: train_t})
if n % 1000 == 0:
print("Step: %d, MAPE: %f" % (n, MAPE))
if MAPE > accuracy:
Write(accuracy)
print("Step: %d, MAPE: %f" % (n, MAPE))
break
end_time = time.time()
str(end_time)
print("--Finished--")
print("Time: " + str(end_time - start_time))
| [
"nrashidov@yahoo.com"
] | nrashidov@yahoo.com |
0fffccdc51fc9326c1afbba7db934a81b41a5fac | 4aff067942325533514e696d8896add9164c255d | /CodeEval/test_challenge_8.py | 536482ea642680716cff5c8abbfb9888d0243af3 | [
"MIT"
] | permissive | andrewzwicky/puzzles | 678bea42ea8ada504a4002f7f1b239be62842166 | 7b264bb1442fa9c855c566ba92ad139a743683fc | refs/heads/master | 2022-12-24T02:10:05.178056 | 2022-12-14T06:43:37 | 2022-12-14T06:43:37 | 71,407,639 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 301 | py | from unittest import TestCase
from CodeEval.challenge_8 import challenge
class Challenge7Test(TestCase):
def test_input_1(self):
self.assertEqual("World Hello", challenge("Hello World"))
def test_input_2(self):
self.assertEqual("CodeEval Hello", challenge("Hello CodeEval")) | [
"andrew.zwicky@gmail.com"
] | andrew.zwicky@gmail.com |
9dac850c79526f3beabdcec45af6f4746838cae8 | 8890ff61262ff98369464721c165d53aa9febe85 | /oauthlib/oauth2/draft25/parameters.py | 4343dcb259213b64ac4a8ce3151d29588e82f6c1 | [
"Apache-2.0"
] | permissive | Mause/tumblr_conn | 09e91bb86e6310ac3f9b0be292967283990558ea | e0ac78947355e41a8432a2a3e12fb86fb28a4c72 | refs/heads/master | 2022-06-15T05:30:27.737676 | 2013-04-29T20:19:57 | 2013-04-29T20:19:57 | 9,258,639 | 0 | 0 | null | 2022-05-17T03:16:49 | 2013-04-06T10:52:39 | JavaScript | UTF-8 | Python | false | false | 11,616 | py | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
"""
oauthlib.oauth2_draft28.parameters
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module contains methods related to `Section 4`_ of the OAuth 2 draft.
.. _`Section 4`: http://tools.ietf.org/html/draft-ietf-oauth-v2-28#section-4
"""
import json
try:
import urlparse
except ImportError:
import urllib.parse as urlparse
from oauthlib.common import add_params_to_uri, add_params_to_qs, unicode_type
from .errors import raise_from_error, MissingTokenError, MissingTokenTypeError
from .errors import MismatchingStateError, MissingCodeError
from .errors import InsecureTransportError
from .utils import list_to_scope, scope_to_list
def prepare_grant_uri(uri, client_id, response_type, redirect_uri=None,
scope=None, state=None, **kwargs):
"""Prepare the authorization grant request URI.
The client constructs the request URI by adding the following
parameters to the query component of the authorization endpoint URI
using the "application/x-www-form-urlencoded" format as defined by
[W3C.REC-html401-19991224]:
response_type
REQUIRED. Value MUST be set to "code".
client_id
REQUIRED. The client identifier as described in `Section 2.2`_.
redirect_uri
OPTIONAL. As described in `Section 3.1.2`_.
scope
OPTIONAL. The scope of the access request as described by
`Section 3.3`_.
state
RECOMMENDED. An opaque value used by the client to maintain
state between the request and callback. The authorization
server includes this value when redirecting the user-agent back
to the client. The parameter SHOULD be used for preventing
cross-site request forgery as described in `Section 10.12`_.
GET /authorize?response_type=code&client_id=s6BhdRkqt3&state=xyz
&redirect_uri=https%3A%2F%2Fclient%2Eexample%2Ecom%2Fcb HTTP/1.1
Host: server.example.com
.. _`W3C.REC-html401-19991224`: http://tools.ietf.org/html/draft-ietf-oauth-v2-28#ref-W3C.REC-html401-19991224
.. _`Section 2.2`: http://tools.ietf.org/html/draft-ietf-oauth-v2-28#section-2.2
.. _`Section 3.1.2`: http://tools.ietf.org/html/draft-ietf-oauth-v2-28#section-3.1.2
.. _`Section 3.3`: http://tools.ietf.org/html/draft-ietf-oauth-v2-28#section-3.3
.. _`section 10.12`: http://tools.ietf.org/html/draft-ietf-oauth-v2-28#section-10.12
"""
if not uri.startswith('https://'):
raise InsecureTransportError()
params = [(('response_type', response_type)),
(('client_id', client_id))]
if redirect_uri:
params.append(('redirect_uri', redirect_uri))
if scope:
params.append(('scope', list_to_scope(scope)))
if state:
params.append(('state', state))
for k in kwargs:
if kwargs[k]:
params.append((unicode_type(k), kwargs[k]))
return add_params_to_uri(uri, params)
def prepare_token_request(grant_type, body='', **kwargs):
"""Prepare the access token request.
The client makes a request to the token endpoint by adding the
following parameters using the "application/x-www-form-urlencoded"
format in the HTTP request entity-body:
grant_type
REQUIRED. Value MUST be set to "authorization_code".
code
REQUIRED. The authorization code received from the
authorization server.
redirect_uri
REQUIRED, if the "redirect_uri" parameter was included in the
authorization request as described in `Section 4.1.1`_, and their
values MUST be identical.
grant_type=authorization_code&code=SplxlOBeZQQYbYS6WxSbIA
&redirect_uri=https%3A%2F%2Fclient%2Eexample%2Ecom%2Fcb
.. _`Section 4.1.1`: http://tools.ietf.org/html/draft-ietf-oauth-v2-28#section-4.1.1
"""
params = [('grant_type', grant_type)]
if 'scope' in kwargs:
kwargs['scope'] = list_to_scope(kwargs['scope'])
for k in kwargs:
if kwargs[k]:
params.append((unicode_type(k), kwargs[k]))
return add_params_to_qs(body, params)
def parse_authorization_code_response(uri, state=None):
"""Parse authorization grant response URI into a dict.
If the resource owner grants the access request, the authorization
server issues an authorization code and delivers it to the client by
adding the following parameters to the query component of the
redirection URI using the "application/x-www-form-urlencoded" format:
code
REQUIRED. The authorization code generated by the
authorization server. The authorization code MUST expire
shortly after it is issued to mitigate the risk of leaks. A
maximum authorization code lifetime of 10 minutes is
RECOMMENDED. The client MUST NOT use the authorization code
more than once. If an authorization code is used more than
once, the authorization server MUST deny the request and SHOULD
revoke (when possible) all tokens previously issued based on
that authorization code. The authorization code is bound to
the client identifier and redirection URI.
state
REQUIRED if the "state" parameter was present in the client
authorization request. The exact value received from the
client.
For example, the authorization server redirects the user-agent by
sending the following HTTP response:
HTTP/1.1 302 Found
Location: https://client.example.com/cb?code=SplxlOBeZQQYbYS6WxSbIA
&state=xyz
"""
if not uri.lower().startswith('https://'):
raise InsecureTransportError()
query = urlparse.urlparse(uri).query
params = dict(urlparse.parse_qsl(query))
if not 'code' in params:
raise MissingCodeError("Missing code parameter in response.")
if state and params.get('state', None) != state:
raise MismatchingStateError()
return params
def parse_implicit_response(uri, state=None, scope=None):
"""Parse the implicit token response URI into a dict.
If the resource owner grants the access request, the authorization
server issues an access token and delivers it to the client by adding
the following parameters to the fragment component of the redirection
URI using the "application/x-www-form-urlencoded" format:
access_token
REQUIRED. The access token issued by the authorization server.
token_type
REQUIRED. The type of the token issued as described in
Section 7.1. Value is case insensitive.
expires_in
RECOMMENDED. The lifetime in seconds of the access token. For
example, the value "3600" denotes that the access token will
expire in one hour from the time the response was generated.
If omitted, the authorization server SHOULD provide the
expiration time via other means or document the default value.
scope
OPTIONAL, if identical to the scope requested by the client,
otherwise REQUIRED. The scope of the access token as described
by Section 3.3.
state
REQUIRED if the "state" parameter was present in the client
authorization request. The exact value received from the
client.
HTTP/1.1 302 Found
Location: http://example.com/cb#access_token=2YotnFZFEjr1zCsicMWpAA
&state=xyz&token_type=example&expires_in=3600
"""
if not uri.lower().startswith('https://'):
raise InsecureTransportError()
fragment = urlparse.urlparse(uri).fragment
params = dict(urlparse.parse_qsl(fragment, keep_blank_values=True))
if 'scope' in params:
params['scope'] = scope_to_list(params['scope'])
if state and params.get('state', None) != state:
raise ValueError("Mismatching or missing state in params.")
validate_token_parameters(params, scope)
return params
def parse_token_response(body, scope=None):
"""Parse the JSON token response body into a dict.
The authorization server issues an access token and optional refresh
token, and constructs the response by adding the following parameters
to the entity body of the HTTP response with a 200 (OK) status code:
access_token
REQUIRED. The access token issued by the authorization server.
token_type
REQUIRED. The type of the token issued as described in
`Section 7.1`_. Value is case insensitive.
expires_in
RECOMMENDED. The lifetime in seconds of the access token. For
example, the value "3600" denotes that the access token will
expire in one hour from the time the response was generated.
If omitted, the authorization server SHOULD provide the
expiration time via other means or document the default value.
refresh_token
OPTIONAL. The refresh token which can be used to obtain new
access tokens using the same authorization grant as described
in `Section 6`_.
scope
OPTIONAL, if identical to the scope requested by the client,
otherwise REQUIRED. The scope of the access token as described
by `Section 3.3`_.
The parameters are included in the entity body of the HTTP response
using the "application/json" media type as defined by [`RFC4627`_]. The
parameters are serialized into a JSON structure by adding each
parameter at the highest structure level. Parameter names and string
values are included as JSON strings. Numerical values are included
as JSON numbers. The order of parameters does not matter and can
vary.
For example:
HTTP/1.1 200 OK
Content-Type: application/json;charset=UTF-8
Cache-Control: no-store
Pragma: no-cache
{
"access_token":"2YotnFZFEjr1zCsicMWpAA",
"token_type":"example",
"expires_in":3600,
"refresh_token":"tGzv3JOkF0XG5Qx2TlKWIA",
"example_parameter":"example_value"
}
.. _`Section 7.1`: http://tools.ietf.org/html/draft-ietf-oauth-v2-28#section-7.1
.. _`Section 6`: http://tools.ietf.org/html/draft-ietf-oauth-v2-28#section-6
.. _`Section 3.3`: http://tools.ietf.org/html/draft-ietf-oauth-v2-28#section-3.3
.. _`RFC4627`: http://tools.ietf.org/html/rfc4627
"""
params = json.loads(body)
if 'scope' in params:
params['scope'] = scope_to_list(params['scope'])
validate_token_parameters(params, scope)
return params
def validate_token_parameters(params, scope=None):
"""Ensures token precence, token type, expiration and scope in params."""
if 'error' in params:
raise_from_error(params.get('error'), params)
if not 'access_token' in params:
raise MissingTokenError(description="Missing access token parameter.")
if not 'token_type' in params:
raise MissingTokenTypeError()
# If the issued access token scope is different from the one requested by
# the client, the authorization server MUST include the "scope" response
# parameter to inform the client of the actual scope granted.
# http://tools.ietf.org/html/draft-ietf-oauth-v2-25#section-3.3
new_scope = params.get('scope', None)
scope = scope_to_list(scope)
if scope and new_scope and set(scope) != set(new_scope):
raise Warning("Scope has changed to %s." % new_scope)
| [
"jack.thatch@gmail.com"
] | jack.thatch@gmail.com |
bfded916f0c00ede56451a540fd87c8e2a4935f2 | 5bf72de60d67761fd14bfc3ccb9223c1bd02424e | /train_custom_compact_cnn_sgd.py | 8a92eb87b54f71ad2a799a7e05020344a22e22d3 | [] | no_license | ttpro1995/crowd_counting_framework | 41d0fb20fa09a51b311462a6f60289e2032aaaa4 | 86e4a0179b7a4b8caed185d64e1824f256fc88bd | refs/heads/master | 2021-06-30T02:47:13.283667 | 2021-01-14T02:04:29 | 2021-01-14T02:04:29 | 204,106,717 | 0 | 2 | null | 2021-01-14T02:04:31 | 2019-08-24T04:09:43 | Jupyter Notebook | UTF-8 | Python | false | false | 5,281 | py | from comet_ml import Experiment
from args_util import my_args_parse
from data_flow import get_train_val_list, get_dataloader, create_training_image_list, create_image_list
from ignite.engine import Events, create_supervised_trainer, create_supervised_evaluator
from ignite.metrics import Loss, MeanAbsoluteError, MeanSquaredError
from ignite.engine import Engine
from ignite.handlers import Checkpoint, DiskSaver
from crowd_counting_error_metrics import CrowdCountingMeanAbsoluteError, CrowdCountingMeanSquaredError
from visualize_util import get_readable_time
import torch
from torch import nn
from models import CustomCNNv4
import os
from model_util import get_lr
COMET_ML_API = "S3mM1eMq6NumMxk2QJAXASkUM"
PROJECT_NAME = "crowd-counting-framework"
if __name__ == "__main__":
experiment = Experiment(project_name=PROJECT_NAME, api_key=COMET_ML_API)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(device)
args = my_args_parse()
print(args)
experiment.set_name(args.task_id)
experiment.set_cmd_args()
experiment.log_parameter("note", args.note)
DATA_PATH = args.input
TRAIN_PATH = os.path.join(DATA_PATH, "train_data")
TEST_PATH = os.path.join(DATA_PATH, "test_data")
dataset_name = args.datasetname
if dataset_name=="shanghaitech":
print("will use shanghaitech dataset with crop ")
elif dataset_name == "shanghaitech_keepfull":
print("will use shanghaitech_keepfull")
else:
print("cannot detect dataset_name")
print("current dataset_name is ", dataset_name)
# create list
train_list = create_image_list(TRAIN_PATH)
test_list = create_image_list(TEST_PATH)
# create data loader
train_loader, val_loader, test_loader = get_dataloader(train_list, None, test_list, dataset_name=dataset_name, batch_size=args.batch_size)
print("len train_loader ", len(train_loader))
# model
model = CustomCNNv4()
model = model.to(device)
# loss function
loss_fn = nn.MSELoss(reduction='sum').to(device)
optimizer = torch.optim.SGD(model.parameters(), args.lr,
momentum= args.momentum,
weight_decay=args.decay)
trainer = create_supervised_trainer(model, optimizer, loss_fn, device=device)
evaluator = create_supervised_evaluator(model,
metrics={
'mae': CrowdCountingMeanAbsoluteError(),
'mse': CrowdCountingMeanSquaredError(),
'loss': Loss(loss_fn)
}, device=device)
print(model)
print(args)
if len(args.load_model) > 0:
load_model_path = args.load_model
print("load mode " + load_model_path)
to_load = {'trainer': trainer, 'model': model, 'optimizer': optimizer}
checkpoint = torch.load(load_model_path)
Checkpoint.load_objects(to_load=to_load, checkpoint=checkpoint)
print("load model complete")
for param_group in optimizer.param_groups:
param_group['lr'] = args.lr
print("change lr to ", args.lr)
else:
print("do not load, keep training")
@trainer.on(Events.ITERATION_COMPLETED(every=50))
def log_training_loss(trainer):
timestamp = get_readable_time()
print(timestamp + " Epoch[{}] Loss: {:.2f}".format(trainer.state.epoch, trainer.state.output))
@trainer.on(Events.EPOCH_COMPLETED)
def log_training_results(trainer):
evaluator.run(train_loader)
metrics = evaluator.state.metrics
timestamp = get_readable_time()
print(timestamp + " Training set Results - Epoch: {} Avg mae: {:.2f} Avg mse: {:.2f} Avg loss: {:.2f}"
.format(trainer.state.epoch, metrics['mae'], metrics['mse'], metrics['loss']))
experiment.log_metric("epoch", trainer.state.epoch)
experiment.log_metric("train_mae", metrics['mae'])
experiment.log_metric("train_mse", metrics['mse'])
experiment.log_metric("train_loss", metrics['loss'])
experiment.log_metric("lr", get_lr(optimizer))
@trainer.on(Events.EPOCH_COMPLETED)
def log_validation_results(trainer):
evaluator.run(test_loader)
metrics = evaluator.state.metrics
timestamp = get_readable_time()
print(timestamp + " Validation set Results - Epoch: {} Avg mae: {:.2f} Avg mse: {:.2f} Avg loss: {:.2f}"
.format(trainer.state.epoch, metrics['mae'], metrics['mse'], metrics['loss']))
experiment.log_metric("valid_mae", metrics['mae'])
experiment.log_metric("valid_mse", metrics['mse'])
experiment.log_metric("valid_loss", metrics['loss'])
# docs on save and load
to_save = {'trainer': trainer, 'model': model, 'optimizer': optimizer}
save_handler = Checkpoint(to_save, DiskSaver('saved_model/' + args.task_id, create_dir=True, atomic=True),
filename_prefix=args.task_id,
n_saved=5)
trainer.add_event_handler(Events.EPOCH_COMPLETED(every=5), save_handler)
trainer.run(train_loader, max_epochs=args.epochs)
| [
"thient@vng.com.vn"
] | thient@vng.com.vn |
58ac577ac562f09074872e4b8a5dc35f67865d5b | 737e3e9329fc9aa1bcacfb14ae3d6b88c5913755 | /01_hello/hello2.py | 391d0de51f126fd8af9f04e3f733087939a1e794 | [
"MIT"
] | permissive | Mesagan/tiny_python_projects | 61e9d389eb2cbbe08413bc6677a30e75ef43eb6f | f00feec1ee15527af75046b3e88d693124e57c7f | refs/heads/master | 2023-03-21T09:23:04.071644 | 2020-12-07T21:27:32 | 2020-12-07T21:27:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,973 | py | #!/usr/bin/env python3
"""
Author : adeptdabbler <adeptdabbler@localhost>
Date : 2020-12-07
Purpose: Rock the Casbah
"""
import argparse
# --------------------------------------------------
def get_args():
"""Get command-line arguments"""
parser = argparse.ArgumentParser(
description='Rock the Casbah',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('positional',
metavar='str',
help='A positional argument')
parser.add_argument('-a',
'--arg',
help='A named string argument',
metavar='str',
type=str,
default='')
parser.add_argument('-i',
'--int',
help='A named integer argument',
metavar='int',
type=int,
default=0)
parser.add_argument('-f',
'--file',
help='A readable file',
metavar='FILE',
type=argparse.FileType('rt'),
default=None)
parser.add_argument('-o',
'--on',
help='A boolean flag',
action='store_true')
return parser.parse_args()
# --------------------------------------------------
def main():
"""Make a jazz noise here"""
args = get_args()
str_arg = args.arg
int_arg = args.int
file_arg = args.file
flag_arg = args.on
pos_arg = args.positional
print(f'str_arg = "{str_arg}"')
print(f'int_arg = "{int_arg}"')
print('file_arg = "{}"'.format(file_arg.name if file_arg else ''))
print(f'flag_arg = "{flag_arg}"')
print(f'positional = "{pos_arg}"')
# --------------------------------------------------
if __name__ == '__main__':
main()
| [
"62133005+adeptdabbler@users.noreply.github.com"
] | 62133005+adeptdabbler@users.noreply.github.com |
341c021b020af11bbca3772185980adea06e8070 | b32a2ac049c24559de7655c42d82f42ef808e3fa | /evaluation.py | c0cbb032cc43a7b6937f52fc666d844fd6b58316 | [] | no_license | 23AIBox/23AIBox-DeepCFP | 0a925d3544c273901e1183427d6dcc35b32e66d3 | 321e266285457776dd5994fc48285126ec2d365c | refs/heads/main | 2023-03-22T18:42:36.106783 | 2021-03-01T07:45:19 | 2021-03-01T07:45:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,427 | py | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import os
import h5py
import argparse
from sklearn.metrics import roc_curve, precision_recall_curve, auc
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--path",
type=str,
default="./",
help="The path of the project."
)
parser.add_argument(
"--cell_line",
type=str,
default="GM12878",
help="The cell line of dataset."
)
parser.add_argument(
"--model_name",
type=str,
default="deepcfp",
help="The name of testing model."
)
parser.add_argument(
"--curve",
type=str,
default="ROC",
help="The name of testing model."
)
return parser.parse_args()
def auroc(labels, data):
FPR, TPR, thresholds = roc_curve(labels, data)
roc_auc = auc(FPR, TPR)
return FPR, TPR, roc_auc
def aupr(labels, data):
precision, recall, thresholds = precision_recall_curve(labels, data)
pr_auc = auc(recall, precision)
return precision, recall, pr_auc
def standardization(data):
mean = np.mean(data, axis=0)
std = np.std(data, axis=0)
return (data - mean) / std
def Curves(path,curve, cell_name, model_name):
data = pd.read_table(os.path.join(path, 'compare', cell_name, cell_name+'_'+model_name+'_datacmp.txt'))
labels = np.array(data['labels'])
fri = np.array(data['std(fri)'])
gnm = np.array(data['GNM'])
prediction = np.array(data['prediction'])
model_name = ['FRI','GNM','DeepCFP']
color = ['#1E90FF', '#DAA520', '#FF4500']
plt.figure(figsize=(7,6))
plt.grid(linestyle=':')
for target in model_name:
if(target=='FRI'):
c=color[0]
t=fri
elif(target=='GNM'):
c=color[1]
t=gnm
elif(target=='DeepCFP'):
c=color[2]
t=prediction
if(curve=='ROC'):
FPR, TPR, roc_auc=auroc(labels, standardization(t))
plt.plot(FPR, TPR,c,label='{0:s} (AUROC = {1:.2f})'.format(target,roc_auc),linewidth=1.5)
elif(curve=='P-R'):
precision, recall, pr_auc=aupr(labels, standardization(t))
plt.plot(recall, precision,c,label='{0:s} (AUPR = {1:.2f})'.format(target,pr_auc),linewidth=1.5)
if(curve=='ROC'):
plt.plot([0, 1], [0, 1], '--', color=(0.6, 0.6, 0.6), label='Reference')
elif(curve=='P-R'):
plt.plot([0, 1], [1, 0], '--', color=(0.6, 0.6, 0.6), label='Reference')
plt.xlim([-0.02, 1.02])
plt.ylim([-0.02, 1.02])
plt.xlabel('Recall',fontsize=15)
plt.ylabel('Precision',fontsize=15)
plt.title('P-R curves',fontsize=15)
plt.legend(loc="lower right",fontsize=11.5, framealpha=1)
plt.show()
def AUC_on_test_set(path, curve, cell_line, model_name):
AUROC = []
AUPR = []
data = pd.read_table(os.path.join(path, 'compare', cell_line, cell_line+'_'+model_name+'_datacmp.txt'))
labels = np.array(data['labels'])
fri = np.array(data['std(fri)'])
gnm = np.array(data['GNM'])
AUROC.append(round(auroc(labels, standardization(fri))[2], 4))
AUPR.append(round(aupr(labels, standardization(fri))[2], 4))
AUROC.append(round(auroc(labels, standardization(gnm))[2], 4))
AUPR.append(round(aupr(labels, standardization(gnm))[2], 4))
prediction = np.array(data['prediction'])
AUROC.append(round(auroc(labels, standardization(prediction))[2], 4))
AUPR.append(round(aupr(labels, standardization(prediction))[2], 4))
model_name = ['FRI','GNM','DeepCFP']
plt.figure(figsize=(6,5))
bar_width = 0.6
color = ['#4473C5','#A5A5A5','#FEBF00']
if(curve=='ROC'):
plt.bar(np.arange(3), AUROC, color=color, width=bar_width)
#for i in range(len(AUROC)):
# plt.text(i, AUROC[i] + 0.01, AUROC[i], ha='center')
plt.title('Compare AUROC on the test set', fontsize=15)
elif(curve=='P-R'):
plt.bar(np.arange(3), AUPR, color=color, width=bar_width)
#for i in range(len(AUPR)):
# plt.text(i, AUPR[i] + 0.01, AUPR[i], ha='center')
plt.title('Compare AUPR on the test set', fontsize=15)
plt.xlabel('Model', fontsize=15)
plt.ylabel('The area under ROC curve', fontsize=15)
plt.xticks(np.arange(3), model_name, fontsize=12, rotation=20)
plt.ylim([0.7, 1.02])
plt.show()
def AUC_on_each_chromosome(path, curve, cell_line, model_name):
data = pd.read_table(os.path.join(path, 'compare', cell_line, cell_line+'_'+model_name+'_datacmp.txt'))
chrr = ['chr' + str(i) for i in range(1, 23)]
chrr.append('chrX')
chrr1 = []
fri_auc = []
gnm_auc = []
prediction_auc = []
for i in chrr:
d = data[data['chr'].isin([i])]
labels = np.array(d['labels'])
fri = np.array(d['FRI'])
gnm = np.array(d['GNM'])
prediction = np.array(d['prediction'])
chrr1.append(i)
if(curve=='ROC'):
fri_auc.append(auroc(labels, fri)[2])
gnm_auc.append(auroc(labels, gnm)[2])
prediction_auc.append(auroc(labels, prediction)[2])
elif(curve=='P-R'):
fri_auc.append(aupr(labels, fri)[2])
gnm_auc.append(aupr(labels, gnm)[2])
prediction_auc.append(aupr(labels, prediction)[2])
print(np.mean(fri_auc))
print(np.mean(gnm_auc))
print(np.mean(prediction_auc))
plt.figure(figsize=(15, 8))
bar_width = 0.2
plt.bar(np.arange(23), fri_auc, label='FRI', color='#4473C5', alpha=0.8, width=bar_width)
plt.bar(np.arange(23) + bar_width + 0.05, gnm_auc, label='GNM', color='#A5A5A5', alpha=0.8, width=bar_width)
plt.bar(np.arange(23) + 2 * bar_width + 0.1, prediction_auc, label='DeepCFP', color='#FEBF00', alpha=0.8,
width=bar_width)
plt.xlabel('Chromosome', fontsize=15)
plt.ylabel('The area under '+curve+' curve', fontsize=15)
plt.xticks(np.arange(23) + 0.25, chrr, fontsize=12, rotation=20)
plt.ylim([0.0, 1.19])
plt.legend()
plt.show()
if __name__=='__main__':
args = parse_args()
Curves(args.path, args.curve, args.cell_line, args.model_name) #'ROC' or 'P-R'
AUC_on_test_set(args.path, args.curve, args.cell_line, args.model_name) #'ROC' or 'P-R'
AUC_on_each_chromosome(args.path, args.curve, args.cell_line, args.model_name) #'ROC' or 'P-R'
| [
"1571537695@qq.com"
] | 1571537695@qq.com |
5d1ed16f74021d81791fe06902bd4b73932fecc5 | 4f9930e15c02cb9a09af70d66b794480b8c9bd57 | /batch2/batch/driver/instance.py | 364815e0ed0adbb5c8d315f693c218f81632683b | [
"MIT"
] | permissive | gsarma/hail | d76aa16d718618c1915b629077fd80cbc4d3b526 | 6aa2d945bb7d57c463d5ab9afb686f18c2941b25 | refs/heads/master | 2020-06-20T06:09:43.408615 | 2019-10-29T21:40:23 | 2019-10-29T21:40:23 | 196,250,453 | 0 | 0 | MIT | 2019-07-10T17:44:48 | 2019-07-10T17:44:47 | null | UTF-8 | Python | false | false | 10,380 | py | import time
import logging
import googleapiclient.errors
import asyncio
import aiohttp
log = logging.getLogger('instance')
class Instance:
@staticmethod
def from_record(inst_pool, record):
ip_address = record['ip_address']
pending = ip_address is None
active = ip_address is not None
deleted = False
inst = Instance(inst_pool, record['name'], record['token'],
ip_address=ip_address, pending=pending,
active=active, deleted=deleted)
inst_pool.free_cores_mcpu += inst_pool.worker_capacity_mcpu # FIXME: this should get cores from db in future
if active:
inst_pool.n_active_instances += 1
inst_pool.instances_by_free_cores.add(inst)
else:
assert pending
inst_pool.n_pending_instances += 1
log.info(f'added instance {inst.name} to the instance pool with ip address {inst.ip_address}')
return inst
@staticmethod
async def create(inst_pool, name, token):
# FIXME: maybe add machine type, cores, batch_image etc.
await inst_pool.driver.db.instances.new_record(name=name,
token=token)
inst_pool.n_pending_instances += 1
inst_pool.free_cores_mcpu += inst_pool.worker_capacity_mcpu
return Instance(inst_pool, name, token, ip_address=None, pending=True,
active=False, deleted=False)
def __init__(self, inst_pool, name, token, ip_address, pending, active, deleted):
self.inst_pool = inst_pool
self.name = name
self.token = token
self.ip_address = ip_address
self.lock = asyncio.Lock()
self.pods = set()
self.free_cores_mcpu = inst_pool.worker_capacity_mcpu
# state: pending, active, deactivated (and/or deleted)
self.pending = pending
self.active = active
self.deleted = deleted
self.healthy = True
self.last_updated = time.time()
self.time_created = time.time()
self.last_ping = time.time()
log.info(f'{self.inst_pool.n_pending_instances} pending {self.inst_pool.n_active_instances} active workers')
def unschedule(self, pod):
assert not self.pending and self.active
self.pods.remove(pod)
if self.healthy:
self.inst_pool.instances_by_free_cores.remove(self)
self.free_cores_mcpu += pod.cores_mcpu
self.inst_pool.free_cores_mcpu += pod.cores_mcpu
self.inst_pool.instances_by_free_cores.add(self)
self.inst_pool.driver.changed.set()
else:
self.free_cores_mcpu += pod.cores_mcpu
def schedule(self, pod):
assert not self.pending and self.active and self.healthy
self.pods.add(pod)
self.inst_pool.instances_by_free_cores.remove(self)
self.free_cores_mcpu -= pod.cores_mcpu
self.inst_pool.free_cores_mcpu -= pod.cores_mcpu
assert self.inst_pool.free_cores_mcpu >= 0, (self.inst_pool.free_cores_mcpu, pod.cores_mcpu)
self.inst_pool.instances_by_free_cores.add(self)
# can't create more scheduling opportunities, don't set changed
async def activate(self, ip_address):
async with self.lock:
log.info(f'activating instance {self.name} after {time.time() - self.time_created} seconds since creation')
if self.active:
return
if self.deleted:
return
if self.pending:
self.pending = False
self.inst_pool.n_pending_instances -= 1
self.inst_pool.free_cores_mcpu -= self.inst_pool.worker_capacity_mcpu
self.active = True
self.ip_address = ip_address
self.inst_pool.n_active_instances += 1
self.inst_pool.instances_by_free_cores.add(self)
self.inst_pool.free_cores_mcpu += self.inst_pool.worker_capacity_mcpu
self.inst_pool.driver.changed.set()
await self.inst_pool.driver.db.instances.update_record(
self.name, ip_address=ip_address)
log.info(f'{self.inst_pool.n_pending_instances} pending {self.inst_pool.n_active_instances} active workers')
async def deactivate(self):
async with self.lock:
log.info(f'deactivating instance {self.name}')
start = time.time()
if self.pending:
self.pending = False
self.inst_pool.n_pending_instances -= 1
self.inst_pool.free_cores_mcpu -= self.inst_pool.worker_capacity_mcpu
assert not self.active
log.info(f'{self.inst_pool.n_pending_instances} pending {self.inst_pool.n_active_instances} active workers')
return
if not self.active:
return
self.mark_as_unhealthy()
pod_list = list(self.pods)
await asyncio.gather(*[p.unschedule() for p in pod_list])
assert not self.pods
for pod in pod_list:
asyncio.ensure_future(pod.put_on_ready())
self.active = False
log.info(f'took {time.time() - start} seconds to deactivate {self.name}')
log.info(f'{self.inst_pool.n_pending_instances} pending {self.inst_pool.n_active_instances} active workers')
def update_timestamp(self):
if self in self.inst_pool.instances:
self.inst_pool.instances.remove(self)
self.last_updated = time.time()
self.inst_pool.instances.add(self)
def mark_as_unhealthy(self):
if not self.active or not self.healthy:
return
self.inst_pool.instances.remove(self)
self.healthy = False
self.inst_pool.instances.add(self)
if self in self.inst_pool.instances_by_free_cores:
self.inst_pool.instances_by_free_cores.remove(self)
self.inst_pool.n_active_instances -= 1
self.inst_pool.free_cores_mcpu -= self.free_cores_mcpu
self.update_timestamp()
def mark_as_healthy(self):
self.last_ping = time.time()
if not self.active or self.healthy:
return
self.inst_pool.instances.remove(self)
self.healthy = True
self.inst_pool.instances.add(self)
if self not in self.inst_pool.instances_by_free_cores:
self.inst_pool.n_active_instances += 1
self.inst_pool.instances_by_free_cores.add(self)
self.inst_pool.free_cores_mcpu += self.free_cores_mcpu
self.inst_pool.driver.changed.set()
async def remove(self):
log.info(f'removing instance {self.name}')
await self.deactivate()
self.inst_pool.instances.remove(self)
if self.token in self.inst_pool.token_inst:
del self.inst_pool.token_inst[self.token]
await self.inst_pool.driver.db.instances.delete_record(self.name)
async def handle_call_delete_event(self):
log.info(f'handling call delete event for {self.name}')
await self.deactivate()
self.deleted = True
self.update_timestamp()
async def delete(self):
log.info(f'deleting instance {self.name}')
if self.deleted:
return
await self.deactivate()
try:
await self.inst_pool.driver.gservices.delete_instance(self.name)
except googleapiclient.errors.HttpError as e:
if e.resp['status'] == '404':
log.info(f'instance {self.name} was already deleted')
else:
raise e
self.deleted = True
async def handle_preempt_event(self):
log.info(f'handling preemption event for {self.name}')
await self.delete()
self.update_timestamp()
async def heal(self):
log.info(f'healing instance {self.name}')
async def _heal_gce():
try:
spec = await self.inst_pool.driver.gservices.get_instance(self.name)
except googleapiclient.errors.HttpError as e:
if e.resp['status'] == '404':
await self.remove()
return
status = spec['status']
log.info(f'heal gce: machine {self.name} status {status}')
# preempted goes into terminated state
if status == 'TERMINATED' and self.deleted:
log.info(f'instance {self.name} is terminated and deleted, removing')
await self.remove()
return
if status in ('TERMINATED', 'STOPPING'):
log.info(f'instance {self.name} is {status}, deactivating')
await self.deactivate()
if status == 'TERMINATED' and not self.deleted:
log.info(f'instance {self.name} is {status} and not deleted, deleting')
await self.delete()
if status == 'RUNNING' and self.active and not self.healthy and time.time() - self.last_ping > 60 * 5:
log.info(f'instance {self.name} is {status} and not healthy and last ping was greater than 5 minutes, deleting')
await self.delete()
if (status in ('STAGING', 'RUNNING')) and not self.active and time.time() - self.time_created > 60 * 5:
log.info(f'instance {self.name} is {status} and not active and older than 5 minutes, deleting')
await self.delete()
self.update_timestamp()
if self.ip_address and self.active:
try:
async with aiohttp.ClientSession(
raise_for_status=True, timeout=aiohttp.ClientTimeout(total=5)) as session:
await session.get(f'http://{self.ip_address}:5000/healthcheck')
self.mark_as_healthy()
self.update_timestamp()
except asyncio.CancelledError: # pylint: disable=try-except-raise
raise
except Exception as err: # pylint: disable=broad-except
log.info(f'healthcheck failed for {self.name} due to err {err}; asking gce instead')
self.mark_as_unhealthy()
await _heal_gce()
else:
await _heal_gce()
def __str__(self):
return self.name
| [
"daniel.zidan.king@gmail.com"
] | daniel.zidan.king@gmail.com |
db2b8203bfcc6e719473a13b065bcf0d51007f50 | b15fd3fa4431c3bc0e9098b8ece4cb1e3bb45d50 | /data_providers/downloader.py | ec29f6d09b6514f00c036b6841ea965efcc7c89b | [] | no_license | SoulDuck/DenseNet | 0cdbb86f0cb4a685585f562374c894c165b3459f | 96581dd8e2df973560cf69ff99da211e91af55bb | refs/heads/master | 2021-07-10T04:22:31.868745 | 2017-10-06T13:23:57 | 2017-10-06T13:23:57 | 105,623,435 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,044 | py | import sys ,os
from urllib import urlretrieve
import tarfile
import zipfile
def report_download_progress(count , block_size , total_size):
pct_complete = float(count * block_size) / total_size
msg = "\r {0:1%} already downloader".format(pct_complete)
sys.stdout.write(msg)
sys.stdout.flush()
def download_data_url(url, download_dir):
filename = url.split('/')[-1]
file_path = os.path.join(download_dir , filename)
if not os.path.exists(file_path):
try:
os.makedirs(download_dir)
except Exception :
pass
print "Download %s to %s" %(url , file_path)
file_path , _ = urlretrieve(url=url,filename=file_path,reporthook=report_download_progress)
print file_path
print('\nExtracting files')
if file_path.endswith(".zip"):
zipfile.ZipFile(file=file_path , mode="r").extracall(download_dir)
elif file_path.endswith(".tar.gz" , ".tgz"):
tarfile.open(name=file_path , mode='r:gz').extractall(download_dir)
| [
"plznw4me@naver.com"
] | plznw4me@naver.com |
f23cb9acdc6c776111b450eedf217b61c528be40 | 198b4e4339464dff21547f3ad4341f711f688a19 | /src/migrations/0003-populate-is-fields.py | b0d1da577544efaa5bb56a49bff03e0edaf27510 | [] | no_license | serversquared/VirtualMimic | f5a412cf359e279daf40ec2f60524dcfdcedf1e5 | 5370b0c5752b83f6b07db11a6d1e8c6f44564ba4 | refs/heads/master | 2021-01-10T04:18:02.244956 | 2015-11-09T09:15:21 | 2015-11-09T09:15:21 | 45,758,092 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 518 | py | from yoyo import step
step("""
UPDATE nodes
SET is_input=1
WHERE rowid
IN (SELECT input FROM nodes_to_nodes)
""")
step("""
UPDATE nodes
SET is_response=1
WHERE rowid
IN (SELECT response FROM nodes_to_nodes)
""")
''' #this doesn't work in sqlite
step("""UPDATE nodes n
JOIN nodes_to_nodes n2n
ON n.rowid=n2n.response
SET n.is_response=1""",
"UPDATE nodes SET is_response=0")
step("""UPDATE nodes n
JOIN nodes_to_nodes n2n
ON n.rowid=n2n.input
SET n.is_input=1""",
"UPDATE nodes SET is_input=0")
'''
| [
"shelvacu@gmail.com"
] | shelvacu@gmail.com |
3b8cad69619e1fd9a55f91bf856f0e57c24a48d5 | 9c06b4bfc52aefad5a648548e0c1b88536c2c5cf | /day8_caesar-cipher/main.py | d2e7f05e8cf46950ca7101c02f999b04a7b7a54d | [] | no_license | shivamkchoudhary/100_days_of_python | d46273b25a4d1e263ed4a15022722a813f4da4f0 | 8e07fcb02da266badfa3e977159dc2fa2f9be537 | refs/heads/main | 2023-02-27T20:21:55.287946 | 2021-02-04T14:16:41 | 2021-02-04T14:16:41 | 328,116,298 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,269 | py | alphabet = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
def caesar(start_text, shift_amount, cipher_direction):
end_text = ""
if cipher_direction == "decode":
shift_amount *= -1
for char in start_text:
if char in alphabet:
position = alphabet.index(char)
new_position = position + shift_amount
end_text += alphabet[new_position]
else:
end_text += char
print(f"Here's the {cipher_direction}d result: {end_text}")
from art import logo
print(logo)
should_end = False
while not should_end:
direction = input("Type 'encode' to encrypt, type 'decode' to decrypt:\n")
text = input("Type your message:\n").lower()
shift = int(input("Type the shift number:\n"))
#What if the user enters a shift that is greater than the number of letters in the alphabet?
shift = shift % 26
caesar(start_text=text, shift_amount=shift, cipher_direction=direction)
restart = input("Type 'yes' if you want to go again. Otherwise type 'no'.\n")
if restart == "no":
should_end = True
print("Goodbye")
| [
"noreply@github.com"
] | shivamkchoudhary.noreply@github.com |
cc24ade3dbe16d3b5a03059c8ce9d9f80a2e3893 | 4903f9eb05dc427fd30afffc91a86d514b5675c5 | /text.py | b3e18078ba449e2a7352a78de60bf5689c4ae614 | [] | no_license | chuiming24/Xiaomi-miui-forum-automatic-reply-script | dc3df7bcf5ba917492c1b7d116effa92a759b98c | 7ffcc8510b9afb3f856bc66b877a809cd179e71d | refs/heads/master | 2020-03-21T22:12:27.943082 | 2018-06-29T07:20:22 | 2018-06-29T07:20:22 | 139,111,423 | 0 | 0 | null | 2018-06-29T06:42:35 | 2018-06-29T06:42:34 | null | UTF-8 | Python | false | false | 785 | py | import requests
import sys
import io
#sys.stdout = io.TextIOWrapper(sys.stdout.buffer,encoding='utf8') #改变标准输出的默认编码
#登录后才能访问的网页
url = 'http://www.miui.com/forum-705-1.html'
#浏览器登录后得到的cookie,也就是刚才复制的字符串
cookie_str = ''
#把cookie字符串处理成字典,以便接下来使用
cookies = {}
for line in cookie_str.split(';'):
key, value = line.split('=', 1)
cookies[key] = value
#设置请求头
headers = {'User-agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36'}
#在发送get请求时带上请求头和cookies
resp = requests.get(url, headers = headers, cookies = cookies)
print(resp.content.decode('utf-8'))
| [
"794960040@qq.com"
] | 794960040@qq.com |
e87825d506967f4e8307c20299f2712bab770efa | 0a9acbfe588e908d4a98336a1ae7a9281cb907df | /src/routes/compliance.py | 8f770cd32083bbb524cdab853b9aefa13a0f129d | [] | no_license | JoelAtDeluxe/template-flask-microservice | d246b8596f0cd3a29ebc0d9d1f4b0697b4584570 | 1e942f4f51b7215704502c253862225a0c2f9e6a | refs/heads/master | 2023-02-08T09:08:13.595677 | 2019-07-18T22:36:17 | 2019-07-18T22:36:17 | 193,128,944 | 0 | 0 | null | 2023-02-02T06:32:33 | 2019-06-21T16:23:32 | Python | UTF-8 | Python | false | false | 1,024 | py | from flask import (
Blueprint, request, redirect, url_for, current_app, jsonify, Response
)
from constants import STATE_NAME
bp = Blueprint('compliance', __name__, url_prefix='')
@bp.route("/")
def index() -> Response:
return oapi_docs()
@bp.route('/about/docs')
def oapi_docs() -> Response:
return redirect(url_for('static', filename='swagger/index.html'))
@bp.route('/about/docs/swagger.json')
def swagger_json() -> Response:
# Option 1: To return an external file:
return redirect(url_for('static', filename="swagger.yaml"))
# Option 2: To read from docstrings (currently doesn't work -- but this is possible)
# return jsonify(current_app)
@bp.route('/about')
def about() -> Response:
return jsonify({
"version": current_app.config[STATE_NAME].config.app_version
})
@bp.route("/config")
def echo_config() -> Response:
"""Returns a json reprensentation of the loaded configuration
"""
return jsonify(current_app.config[STATE_NAME].config.get_config())
| [
"Hikash@gmail.com"
] | Hikash@gmail.com |
ee927a52b3654b7e823f1724aab3e3a4a33619fe | b7dca2e137c16bc2583e11f9a0d231a63642b04c | /poradnia/users/migrations/0027_alter_user_notify_old_cases.py | 1125ee02d0e7fb2b9d4431ded7a0d98385876394 | [
"MIT"
] | permissive | watchdogpolska/poradnia | 4ebc521e8ccfab0113d1a47cdf4469b758e90bdd | d679321a764218002e2c87ac71dd549208949b7e | refs/heads/master | 2023-08-16T20:29:31.720709 | 2023-04-28T18:50:09 | 2023-04-28T18:50:09 | 35,786,536 | 24 | 23 | MIT | 2023-07-13T06:59:38 | 2015-05-17T23:06:17 | JavaScript | UTF-8 | Python | false | false | 551 | py | # Generated by Django 3.2.18 on 2023-04-27 17:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("users", "0026_user_nicename"),
]
operations = [
migrations.AlterField(
model_name="user",
name="notify_old_cases",
field=models.BooleanField(
default=False,
help_text="Whether or not to notify user about old cases",
verbose_name="Notify about old cases",
),
),
]
| [
"piotr.iwanski@gmail.com"
] | piotr.iwanski@gmail.com |
2cd1a1a76fe6766a6854de9064bedf52a1da8564 | a2f9d55d686425c4b47ce150aa1a23ea933055cc | /crossposting/spawnprocess.py | 0fa69d71efbd3ebead59242be16e3f573bf5535b | [] | no_license | wd5/blombum | b31c581f2c36c220164901189be1ba95a8341e0e | fe11efb369fe2cec67af1e79bc8935a266df2f80 | refs/heads/master | 2020-12-25T02:23:30.297939 | 2010-06-29T10:03:31 | 2010-06-29T10:03:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 331 | py | #!/usr/bin/python
import subprocess
subprocess.Popen([
'/home/nide/code/kanobu/src/manage.py', 'rebuildindex', '--site_id', '4', '--parse', 'none'
])
subprocess.Popen([
'node', '/home/nide/code/blombum/crossposting/test.js'
], stdin = subprocess.PIPE).communicate('[{somevar: 1}, {somevar: 44}, {somevar: 22}]')
print 'kuku'
| [
"nide@inbox.ru"
] | nide@inbox.ru |
03118278115ad6ae8c93d5f01c0608692b75ac87 | 28a09828c0e74950fddc31312a0f0d564cd5fcf8 | /qa/rpc-tests/listtransactions.py | c2c089651629658c2f354d1694265ce91eeebb3d | [
"MIT"
] | permissive | jestevez/community-source | ba58c0a26ed5f8cebe6e6b5f454b42f9d7b46a8c | d9664b55222acf99f4d9afd28205f14bcc8e238e | refs/heads/master | 2020-05-06T20:20:39.739051 | 2019-03-05T19:05:06 | 2019-03-05T19:05:06 | 180,233,996 | 2 | 0 | MIT | 2019-04-08T21:14:38 | 2019-04-08T21:14:38 | null | UTF-8 | Python | false | false | 10,110 | py | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Exercise the listtransactions API
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.mininode import CTransaction, COIN
from io import BytesIO
def txFromHex(hexstring):
tx = CTransaction()
f = BytesIO(hex_str_to_bytes(hexstring))
tx.deserialize(f)
return tx
class ListTransactionsTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 4
self.setup_clean_chain = False
def run_test(self):
# Simple send, 0 to 1:
txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
self.sync_all()
assert_array_result(self.nodes[0].listtransactions(),
{"txid":txid},
{"category":"send","account":"","amount":Decimal("-0.1"),"confirmations":0})
assert_array_result(self.nodes[1].listtransactions(),
{"txid":txid},
{"category":"receive","account":"","amount":Decimal("0.1"),"confirmations":0})
# mine a block, confirmations should change:
self.nodes[0].generate(1)
self.sync_all()
assert_array_result(self.nodes[0].listtransactions(),
{"txid":txid},
{"category":"send","account":"","amount":Decimal("-0.1"),"confirmations":1})
assert_array_result(self.nodes[1].listtransactions(),
{"txid":txid},
{"category":"receive","account":"","amount":Decimal("0.1"),"confirmations":1})
# send-to-self:
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 0.2)
assert_array_result(self.nodes[0].listtransactions(),
{"txid":txid, "category":"send"},
{"amount":Decimal("-0.2")})
assert_array_result(self.nodes[0].listtransactions(),
{"txid":txid, "category":"receive"},
{"amount":Decimal("0.2")})
# sendmany from node1: twice to self, twice to node2:
send_to = { self.nodes[0].getnewaddress() : 0.11,
self.nodes[1].getnewaddress() : 0.22,
self.nodes[0].getaccountaddress("from1") : 0.33,
self.nodes[1].getaccountaddress("toself") : 0.44 }
txid = self.nodes[1].sendmany("", send_to)
self.sync_all()
assert_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.11")},
{"txid":txid} )
assert_array_result(self.nodes[0].listtransactions(),
{"category":"receive","amount":Decimal("0.11")},
{"txid":txid} )
assert_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.22")},
{"txid":txid} )
assert_array_result(self.nodes[1].listtransactions(),
{"category":"receive","amount":Decimal("0.22")},
{"txid":txid} )
assert_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.33")},
{"txid":txid} )
assert_array_result(self.nodes[0].listtransactions(),
{"category":"receive","amount":Decimal("0.33")},
{"txid":txid, "account" : "from1"} )
assert_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.44")},
{"txid":txid, "account" : ""} )
assert_array_result(self.nodes[1].listtransactions(),
{"category":"receive","amount":Decimal("0.44")},
{"txid":txid, "account" : "toself"} )
multisig = self.nodes[1].createmultisig(1, [self.nodes[1].getnewaddress()])
self.nodes[0].importaddress(multisig["redeemScript"], "watchonly", False, True)
txid = self.nodes[1].sendtoaddress(multisig["address"], 0.1)
self.nodes[1].generate(1)
self.sync_all()
assert(len(self.nodes[0].listtransactions("watchonly", 100, 0, False)) == 0)
assert_array_result(self.nodes[0].listtransactions("watchonly", 100, 0, True),
{"category":"receive","amount":Decimal("0.1")},
{"txid":txid, "account" : "watchonly"} )
# rbf is disabled in Community Core
# self.run_rbf_opt_in_test()
# Check that the opt-in-rbf flag works properly, for sent and received
# transactions.
def run_rbf_opt_in_test(self):
# Check whether a transaction signals opt-in RBF itself
def is_opt_in(node, txid):
rawtx = node.getrawtransaction(txid, 1)
for x in rawtx["vin"]:
if x["sequence"] < 0xfffffffe:
return True
return False
# Find an unconfirmed output matching a certain txid
def get_unconfirmed_utxo_entry(node, txid_to_match):
utxo = node.listunspent(0, 0)
for i in utxo:
if i["txid"] == txid_to_match:
return i
return None
# 1. Chain a few transactions that don't opt-in.
txid_1 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1)
assert(not is_opt_in(self.nodes[0], txid_1))
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_1}, {"bip125-replaceable":"no"})
sync_mempools(self.nodes)
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_1}, {"bip125-replaceable":"no"})
# Tx2 will build off txid_1, still not opting in to RBF.
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[1], txid_1)
# Create tx2 using createrawtransaction
inputs = [{"txid":utxo_to_use["txid"], "vout":utxo_to_use["vout"]}]
outputs = {self.nodes[0].getnewaddress(): 0.999}
tx2 = self.nodes[1].createrawtransaction(inputs, outputs)
tx2_signed = self.nodes[1].signrawtransaction(tx2)["hex"]
txid_2 = self.nodes[1].sendrawtransaction(tx2_signed)
# ...and check the result
assert(not is_opt_in(self.nodes[1], txid_2))
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_2}, {"bip125-replaceable":"no"})
sync_mempools(self.nodes)
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_2}, {"bip125-replaceable":"no"})
# Tx3 will opt-in to RBF
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[0], txid_2)
inputs = [{"txid": txid_2, "vout":utxo_to_use["vout"]}]
outputs = {self.nodes[1].getnewaddress(): 0.998}
tx3 = self.nodes[0].createrawtransaction(inputs, outputs)
tx3_modified = txFromHex(tx3)
tx3_modified.vin[0].nSequence = 0
tx3 = bytes_to_hex_str(tx3_modified.serialize())
tx3_signed = self.nodes[0].signrawtransaction(tx3)['hex']
txid_3 = self.nodes[0].sendrawtransaction(tx3_signed)
assert(is_opt_in(self.nodes[0], txid_3))
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_3}, {"bip125-replaceable":"yes"})
sync_mempools(self.nodes)
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_3}, {"bip125-replaceable":"yes"})
# Tx4 will chain off tx3. Doesn't signal itself, but depends on one
# that does.
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[1], txid_3)
inputs = [{"txid": txid_3, "vout":utxo_to_use["vout"]}]
outputs = {self.nodes[0].getnewaddress(): 0.997}
tx4 = self.nodes[1].createrawtransaction(inputs, outputs)
tx4_signed = self.nodes[1].signrawtransaction(tx4)["hex"]
txid_4 = self.nodes[1].sendrawtransaction(tx4_signed)
assert(not is_opt_in(self.nodes[1], txid_4))
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"yes"})
sync_mempools(self.nodes)
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"yes"})
# Replace tx3, and check that tx4 becomes unknown
tx3_b = tx3_modified
tx3_b.vout[0].nValue -= int(Decimal("0.004") * COIN) # bump the fee
tx3_b = bytes_to_hex_str(tx3_b.serialize())
tx3_b_signed = self.nodes[0].signrawtransaction(tx3_b)['hex']
txid_3b = self.nodes[0].sendrawtransaction(tx3_b_signed, True)
assert(is_opt_in(self.nodes[0], txid_3b))
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"unknown"})
sync_mempools(self.nodes)
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"unknown"})
# Check gettransaction as well:
for n in self.nodes[0:2]:
assert_equal(n.gettransaction(txid_1)["bip125-replaceable"], "no")
assert_equal(n.gettransaction(txid_2)["bip125-replaceable"], "no")
assert_equal(n.gettransaction(txid_3)["bip125-replaceable"], "yes")
assert_equal(n.gettransaction(txid_3b)["bip125-replaceable"], "yes")
assert_equal(n.gettransaction(txid_4)["bip125-replaceable"], "unknown")
# After mining a transaction, it's no longer BIP125-replaceable
self.nodes[0].generate(1)
assert(txid_3b not in self.nodes[0].getrawmempool())
assert_equal(self.nodes[0].gettransaction(txid_3b)["bip125-replaceable"], "no")
assert_equal(self.nodes[0].gettransaction(txid_4)["bip125-replaceable"], "unknown")
if __name__ == '__main__':
ListTransactionsTest().main()
| [
"48128427+thecripcommunity@users.noreply.github.com"
] | 48128427+thecripcommunity@users.noreply.github.com |
1db9cd06ecd2de3fa93a324083683bf0ef4d8763 | 60088f676a9ba7dd181e4c9c02e71c022a870200 | /minimal-mvt-aiohttp-pg.py | af2207bcfd26a75490f7f5fd537e8d57f29e4972 | [] | no_license | juanrmn/minimal-mvt | a91556d74c2911fd3cec18f36b1b7886e6cb01ea | 9472adf4485a2e9371952b886135633a78eb5e9a | refs/heads/master | 2021-02-10T21:45:23.588336 | 2020-03-11T18:39:51 | 2020-03-11T18:39:51 | 244,422,404 | 0 | 0 | null | 2020-03-02T16:43:55 | 2020-03-02T16:43:55 | null | UTF-8 | Python | false | false | 1,851 | py | import sys
import signal
import asyncio
from aiohttp import web
import aiohttp_cors
import asyncpg
import logging
logging.basicConfig(level=logging.INFO)
# Database to connect to
DATABASE = {
'user': 'postgres',
'password': 'XXX',
'host': 'XXX',
'port': '5432',
'database': 'postgres'
}
TILES_TABLE = 'mvt_censustract'
async def get_pool(app):
if app.get('db_pool'):
return app['db_pool']
else:
app['db_pool'] = await asyncpg.create_pool(**DATABASE, loop=app.loop)
return app['db_pool']
async def tile(request):
z = request.match_info['z']
x = request.match_info['x']
y = request.match_info['y']
logging.info(f'- Requested tile: {z}/{x}/{y}')
sql = f'''SELECT mvt FROM {TILES_TABLE}
WHERE z = {z} AND x = {x} AND y = {y};'''
db_pool = await get_pool(request.app)
async with db_pool.acquire() as conn:
res = await conn.fetchval(sql)
logging.info(f'+ serving tile: {z}/{x}/{y}')
return web.Response(
body=res,
content_type='application/vnd.mapbox-vector-tile'
)
async def create_app():
app = web.Application()
asyncio.set_event_loop(app.loop)
app.add_routes([web.get('/{z}/{x}/{y}.{ext}', tile)])
cors = aiohttp_cors.setup(app, defaults={
'*': aiohttp_cors.ResourceOptions(
allow_methods='*',
allow_credentials=True,
allow_headers='*',
expose_headers='*'
)
})
for route in app.router.routes():
cors.add(route)
return app
def main_exit_handler(*args, **kwargs):
sys.exit(0)
signal.signal(signal.SIGTERM, main_exit_handler)
app = asyncio.run(create_app())
# Run script with:
# gunicorn minimal-mvt-aiohttp-pg:app -b localhost:8081 -w 1 --worker-class aiohttp.GunicornUVLoopWebWorker
| [
"juanr.gonzalez@gmail.com"
] | juanr.gonzalez@gmail.com |
c5628a99b88ae9056bddcc5bed38fbd8e17d0b8c | 212ba69f343ba25f3b525c3670d67d6338a3f803 | /old/problems/k_sum.py | 2ec6e49a5015aa42e9bdfb5d58e445abce29dfd5 | [] | no_license | jalexanderbryant/python-practice | 5fa6bad5ed78b3eaf4a84535ffeb16ea13319569 | 6a45a8411e9747642d022e4c8c1468ffd3e271df | refs/heads/master | 2021-07-20T06:58:46.805496 | 2020-11-01T21:14:01 | 2020-11-01T21:14:01 | 113,541,365 | 0 | 0 | null | 2021-04-20T18:53:54 | 2017-12-08T06:44:30 | Python | UTF-8 | Python | false | false | 911 | py | """
Given a list of numbers and a number k, return whether any two numbers from the list add up to k.
For example, given [10, 15, 3, 7] and k of 17, return true since 10 + 7 is 17.
Bonus: Can you do this in one pass?
"""
def sum_to_k(list, k):
table = {}
for elem in list:
compliment = k - elem
# Look up compliment in dictionary
if compliment in table:
return True
else:
table[elem] = compliment
return False
if __name__ == '__main__':
print('list = [10, 15, 3, 7], k = 17', "result={}".format(sum_to_k([10, 15, 3, 7], 17)))
print('list = [10, 15, 3, 8, -1], k = 17', "result={}".format(sum_to_k([10, 15, 3, 8, -1], 9)))
print('list = [10, 15, 3, 8, -1], k = 17', "result={}".format(sum_to_k([10, 15, 3, 8, -1], 7)))
print('list = [10, 1, 31, 8, -11], k = 17', "result={}".format(sum_to_k([10, 1, 31, 8, -11], 7))) | [
"j.alexanderbryant@knights.ucf.edu"
] | j.alexanderbryant@knights.ucf.edu |
51da8e312770d0a2581c84ac2ef664dca607d04f | 3d6bb3df9ca1d0de6f749b927531de0790aa2e1d | /full_segmentation_histogram_creator.py | 97bc397018dc6ce79e45c96098caf6d100fa396d | [] | no_license | standardgalactic/kuhner-python | da1d66a6d638a9a379ba6bae2affdf151f8c27c5 | 30b73554cc8bc9d532c8108b34dd1a056596fec7 | refs/heads/master | 2023-07-07T04:18:30.634268 | 2020-04-06T04:37:48 | 2020-04-06T04:37:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,715 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Sep 1 12:43:28 2016
@author: lpsmith
"""
from __future__ import division
from os import walk
import lucianSNPLibrary as lsl
nsamples_min = 10 #Arbitrary value: minimum number of samples we require
data10_12 = []
data13_20 = []
data21_50 = []
data51_500 = []
data501_5000 = []
data5001_50000 = []
data50001_plus = []
dataall =[]
#fullseg_filenames = ["three_formal_cy_omni_mix3_b37RB.txt"]
fullseg_filenames = []
for (_, _, f) in walk("full_segmentation_output/"):
fullseg_filenames += f
break
discrepancies = open("full_segmentation_histograms/discrepancies.txt", "w")
for file in fullseg_filenames:
handle = open("full_segmentation_output/" + file, "r")
for line in handle:
(chr, start, end, pmean, pnmarkers, nmarkers, meanlog2r) = line.rstrip().split("\t")
if (chr=="chr"):
continue
if (pnmarkers != "?"):
pnmarkers = int(pnmarkers)
nmarkers = int(nmarkers)
if (pnmarkers != nmarkers):
print "Anomaly in", file, ": different nmarkers from partek vs. raw SNP data:"
print " ", line
line = file + "\t" + line
discrepancies.write(line)
if (nmarkers < nsamples_min):
continue
meanlog2r = float(meanlog2r)
dataall.append(meanlog2r)
if (nmarkers < 13):
data10_12.append(meanlog2r)
elif (nmarkers < 21):
data13_20.append(meanlog2r)
elif (nmarkers < 51):
data21_50.append(meanlog2r)
elif (nmarkers < 501):
data51_500.append(meanlog2r)
elif (nmarkers < 5001):
data501_5000.append(meanlog2r)
elif (nmarkers < 50001):
data5001_50000.append(meanlog2r)
elif (nmarkers < 500001):
data50001_plus.append(meanlog2r)
binwidth = 0.001
lsl.createPrintAndSaveHistogram(data10_12, "full_segmentation_histograms/data10_12.txt", binwidth)
lsl.createPrintAndSaveHistogram(data13_20, "full_segmentation_histograms/data13_20.txt", binwidth)
lsl.createPrintAndSaveHistogram(data21_50, "full_segmentation_histograms/data21_50.txt", binwidth)
lsl.createPrintAndSaveHistogram(data51_500, "full_segmentation_histograms/data51_500.txt", binwidth)
lsl.createPrintAndSaveHistogram(data501_5000, "full_segmentation_histograms/data501_5000.txt", binwidth)
lsl.createPrintAndSaveHistogram(data5001_50000, "full_segmentation_histograms/data5001_50000.txt", binwidth)
lsl.createPrintAndSaveHistogram(data50001_plus, "full_segmentation_histograms/data50001_plus.txt", binwidth)
lsl.createPrintAndSaveHistogram(dataall, "full_segmentation_histograms/dataall.txt", binwidth)
| [
"lpsmith@uw.edu"
] | lpsmith@uw.edu |
e8714022d3cbf4892839c9eca75ddc708a6f2f80 | e312a1a9b17ad4a85577d504f48761888d9c30b2 | /catkin_ws/build/rosserial_server/catkin_generated/generate_cached_setup.py | 9d0af48f3eaf84d5e840dda83dd395d2daa619bf | [] | no_license | scott364/Mobilebot | 1e2092c01fcfb5dd72a5a36f149f1ff5efaaa828 | 53bd6349c1e1d12690c59e8a18ffb8efc6cecfb8 | refs/heads/master | 2023-06-24T22:09:29.709962 | 2021-07-27T22:16:51 | 2021-07-27T22:16:51 | 390,138,941 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,329 | py | # -*- coding: utf-8 -*-
from __future__ import print_function
import os
import stat
import sys
# find the import for catkin's python package - either from source space or from an installed underlay
if os.path.exists(os.path.join('/opt/ros/melodic/share/catkin/cmake', 'catkinConfig.cmake.in')):
sys.path.insert(0, os.path.join('/opt/ros/melodic/share/catkin/cmake', '..', 'python'))
try:
from catkin.environment_cache import generate_environment_script
except ImportError:
# search for catkin package in all workspaces and prepend to path
for workspace in '/home/scott/catkin_ws/devel;/opt/ros/melodic'.split(';'):
python_path = os.path.join(workspace, 'lib/python2.7/dist-packages')
if os.path.isdir(os.path.join(python_path, 'catkin')):
sys.path.insert(0, python_path)
break
from catkin.environment_cache import generate_environment_script
code = generate_environment_script('/home/scott/catkin_ws/devel/.private/rosserial_server/env.sh')
output_filename = '/home/scott/catkin_ws/build/rosserial_server/catkin_generated/setup_cached.sh'
with open(output_filename, 'w') as f:
# print('Generate script for cached setup "%s"' % output_filename)
f.write('\n'.join(code))
mode = os.stat(output_filename).st_mode
os.chmod(output_filename, mode | stat.S_IXUSR)
| [
"scsc1908@colorado.edu"
] | scsc1908@colorado.edu |
f11ba3a4bd09e05c6f1c859a5f312c8b52ff7989 | 2866c8f2a5d7b7882ad72519261b511dbd1bf8c3 | /test.py | 9584b3bdfab1bf50998a17fa2d141c104e62e476 | [] | no_license | ikki407/Otto | f1d878273d793ebaafc32d6eb55d60188f8b471b | 0a686b88425336c70698585724e102f772062df7 | refs/heads/master | 2021-01-11T11:03:23.315780 | 2015-12-30T15:48:45 | 2015-12-30T15:48:45 | 37,064,088 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,146 | py | #!/usr/bin/env python
from __future__ import print_function
import otto
import random
from hashlib import sha1
from sklearn.feature_extraction import DictVectorizer
from sklearn.ensemble import GradientBoostingClassifier, RandomForestClassifier
from sklearn.pipeline import make_pipeline
from sklearn.metrics import make_scorer
from sklearn.grid_search import GridSearchCV
from sklearn.cross_validation import cross_val_score
print('Loading data...')
allX = []
ally = []
for Id,X,y in otto.train_set:
allX.append(X)
ally.append(y)
print('Number of samples: %d' % len(ally))
params = dict(gradientboostingclassifier__subsample=[1.0,0.5])
dv = DictVectorizer(sparse=False)
gbdt = GradientBoostingClassifier(max_features = 5, random_state = 0, verbose = True)
clf = make_pipeline(dv, gbdt)
print('Grid Search')
gsearch = GridSearchCV(clf,param_grid=params,cv=2,refit=True,scoring="log_loss", n_jobs = -1)
gsearch.fit(allX,ally)
print(gsearch.best_estimator_)
print('Writing submit file...')
otto.create_submit_file(gsearch.best_estimator_, 'sub_grid1.csv.gz')
print(sha1("otto\0" + open('sub_grid1.csv.gz','rb').read()).hexdigest())
| [
"IkkiTanaka@Ikki-Tanakano-MacBook-Pro.local"
] | IkkiTanaka@Ikki-Tanakano-MacBook-Pro.local |
99729f87bea17a7c98b4904b1d3c1b4d7b51042d | 0ec6a843bbfc56405bdbc63d285f6e5d65acb93b | /07_Cifar10_100_CNN/13_cifar100_CNN_With.py | 6e877b7bbae9badb15f5bfd50633ad45a81d6554 | [] | no_license | RichardMinsooGo-ML/TF1_2_machie_learning_MNIST_CNN | 579e7061af78a4e2a82eee678ce8a22d3b7b0129 | c22b7f8058939689060e4ce9c447111a4e3eb5f4 | refs/heads/master | 2022-12-15T16:42:37.426986 | 2020-09-11T01:19:00 | 2020-09-11T01:19:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,058 | py | import tensorflow as tf
import numpy as np
# CIFAR-10 데이터를 다운로드 받기 위한 keras의 helper 함수인 load_data 함수를 임포트합니다.
from tensorflow.keras.datasets.cifar100 import load_data
N_EPISODES = 20
batch_size = 100
# 다음 배치를 읽어오기 위한 next_batch 유틸리티 함수를 정의합니다.
def next_batch(num, data, labels):
'''
`num` 개수 만큼의 랜덤한 샘플들과 레이블들을 리턴합니다.
'''
idx = np.arange(0 , len(data))
np.random.shuffle(idx)
idx = idx[:num]
data_shuffle = [data[ i] for i in idx]
labels_shuffle = [labels[ i] for i in idx]
return np.asarray(data_shuffle), np.asarray(labels_shuffle)
# CNN 모델을 정의합니다.
def BUILD_NETWORK_CNN(x):
# 입력 이미지
x_image = x
with tf.name_scope('Conv_Layer_01'):
# 첫번째 convolutional layer - 하나의 grayscale 이미지를 64개의 특징들(feature)으로 맵핑(maping)합니다.
W_conv1 = tf.Variable(tf.truncated_normal(shape=[5, 5, 3, 64], stddev=5e-2))
b_conv1 = tf.Variable(tf.constant(0.1, shape=[64]))
h_conv1 = tf.nn.relu(tf.nn.conv2d(x_image, W_conv1, strides=[1, 1, 1, 1], padding='SAME') + b_conv1)
with tf.name_scope('Pool_Layer_01'):
# 첫번째 Pooling layer
h_pool1 = tf.nn.max_pool(h_conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME')
with tf.name_scope('Conv_Layer_02'):
# 두번째 convolutional layer - 32개의 특징들(feature)을 64개의 특징들(feature)로 맵핑(maping)합니다.
W_conv2 = tf.Variable(tf.truncated_normal(shape=[5, 5, 64, 64], stddev=5e-2))
b_conv2 = tf.Variable(tf.constant(0.1, shape=[64]))
h_conv2 = tf.nn.relu(tf.nn.conv2d(h_pool1, W_conv2, strides=[1, 1, 1, 1], padding='SAME') + b_conv2)
with tf.name_scope('Pool_Layer_02'):
# 두번째 pooling layer.
h_pool2 = tf.nn.max_pool(h_conv2, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME')
with tf.name_scope('Conv_Layer_03'):
# 세번째 convolutional layer
W_conv3 = tf.Variable(tf.truncated_normal(shape=[3, 3, 64, 128], stddev=5e-2))
b_conv3 = tf.Variable(tf.constant(0.1, shape=[128]))
h_conv3 = tf.nn.relu(tf.nn.conv2d(h_pool2, W_conv3, strides=[1, 1, 1, 1], padding='SAME') + b_conv3)
with tf.name_scope('Conv_Layer_04'):
# 네번째 convolutional layer
W_conv4 = tf.Variable(tf.truncated_normal(shape=[3, 3, 128, 128], stddev=5e-2))
b_conv4 = tf.Variable(tf.constant(0.1, shape=[128]))
h_conv4 = tf.nn.relu(tf.nn.conv2d(h_conv3, W_conv4, strides=[1, 1, 1, 1], padding='SAME') + b_conv4)
with tf.name_scope('Conv_Layer_05'):
# 다섯번째 convolutional layer
W_conv5 = tf.Variable(tf.truncated_normal(shape=[3, 3, 128, 128], stddev=5e-2))
b_conv5 = tf.Variable(tf.constant(0.1, shape=[128]))
h_conv5 = tf.nn.relu(tf.nn.conv2d(h_conv4, W_conv5, strides=[1, 1, 1, 1], padding='SAME') + b_conv5)
with tf.name_scope('Dense_Layer_01'):
# Fully Connected Layer 1 - 2번의 downsampling 이후에, 우리의 32x32 이미지는 8x8x128 특징맵(feature map)이 됩니다.
# 이를 384개의 특징들로 맵핑(maping)합니다.
W_fc1 = tf.Variable(tf.truncated_normal(shape=[8 * 8 * 128, 384], stddev=5e-2))
b_fc1 = tf.Variable(tf.constant(0.1, shape=[384]))
h_conv5_flat = tf.reshape(h_conv5, [-1, 8*8*128])
h_fc1 = tf.nn.relu(tf.matmul(h_conv5_flat, W_fc1) + b_fc1)
# Dropout - 모델의 복잡도를 컨트롤합니다. 특징들의 co-adaptation을 방지합니다.
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
with tf.name_scope('Output_Layer'):
# Fully Connected Layer 2 - 384개의 특징들(feature)을 10개의 클래스-airplane, automobile, bird...-로 맵핑(maping)합니다.
W_fc2 = tf.Variable(tf.truncated_normal(shape=[384, 100], stddev=5e-2))
b_fc2 = tf.Variable(tf.constant(0.1, shape=[100]))
logits = tf.matmul(h_fc1_drop,W_fc2) + b_fc2
y_pred = tf.nn.softmax(logits)
return y_pred, logits
# 인풋 아웃풋 데이터, 드롭아웃 확률을 입력받기위한 플레이스홀더를 정의합니다.
x = tf.placeholder(tf.float32, shape=[None, 32, 32, 3])
y = tf.placeholder(tf.float32, shape=[None, 100])
keep_prob = tf.placeholder(tf.float32)
# CIFAR-10 데이터를 다운로드하고 데이터를 불러옵니다.
(X_train, Y_train), (X_test, Y_test) = load_data()
# scalar 형태의 레이블(0~9)을 One-hot Encoding 형태로 변환합니다.
Y_train_one_hot = tf.squeeze(tf.one_hot(Y_train, 100),axis=1)
Y_test_one_hot = tf.squeeze(tf.one_hot(Y_test, 100),axis=1)
# Convolutional Neural Networks(CNN) 그래프를 생성합니다.
y_pred, logits = BUILD_NETWORK_CNN(x)
# Cross Entropy를 비용함수(loss function)으로 정의하고, RMSPropOptimizer를 이용해서 비용 함수를 최소화합니다.with tf.name_scope('optimizer'):
with tf.name_scope('Optimizer'):
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=logits))
optimizer = tf.train.RMSPropOptimizer(1e-3).minimize(loss)
# 정확도를 계산하는 연산을 추가합니다.
correct_prediction = tf.equal(tf.argmax(y_pred, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# 세션을 열어 실제 학습을 진행합니다.
with tf.Session() as sess:
# 모든 변수들을 초기화한다.
sess.run(tf.global_variables_initializer())
Total_batch = int(X_train.shape[0]/batch_size)
# print("Size Train : ", X_train.shape[0])
# print("Size Test : ", X_test.shape[0])
# print("Total batch : ", Total_batch)
# 10000 Step만큼 최적화를 수행합니다.
for episode in range(N_EPISODES):
total_cost = 0
for i in range(Total_batch):
batch = next_batch(batch_size, X_train, Y_train_one_hot.eval())
# 100 Step마다 training 데이터셋에 대한 정확도와 loss를 출력합니다.
train_accuracy = accuracy.eval(feed_dict={x: batch[0], y: batch[1], keep_prob: 1.0})
loss_print = loss.eval(feed_dict={x: batch[0], y: batch[1], keep_prob: 1.0})
# 20% 확률의 Dropout을 이용해서 학습을 진행합니다.
sess.run(optimizer, feed_dict={x: batch[0], y: batch[1], keep_prob: 0.8})
total_cost += loss_print
print("Epoch: %6d, Loss: %2.6f" % (episode+1, total_cost/Total_batch))
# 학습이 끝나면 테스트 데이터(10000개)에 대한 정확도를 출력합니다.
test_accuracy = 0.0
for i in range(10):
test_batch = next_batch(1000, X_test, Y_test_one_hot.eval())
test_accuracy = test_accuracy + accuracy.eval(feed_dict={x: test_batch[0], y: test_batch[1], keep_prob: 1.0})
test_accuracy = test_accuracy / 10;
print("Test Data Accuracy: %2.4f" % test_accuracy)
| [
"noreply@github.com"
] | RichardMinsooGo-ML.noreply@github.com |
25a3912d62dad734ce54ac0d003143bffb64ecc5 | 9f7a2d89efaabb52d9cede98db934b1660fab0ca | /exe49.py | 889c56b1de61f3f0a46fefd962dec12738f3ee96 | [] | no_license | WesleyTi/Exercicios-Python | 0be2d495d1e3b1ed81ee395b4b60a53f51fd0fbe | 48bda146dc80c2765da161c2c9a484fbf9aea435 | refs/heads/master | 2022-12-01T13:06:55.310291 | 2020-08-26T03:22:22 | 2020-08-26T03:22:22 | 290,366,693 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 301 | py | #coding: utf-8
#Refaça o desafio 009, mostrando a tabuada de um número que o usuário escolher,
# só que agora utilizando um laço for.
num = int(input('Digite um número para ver sua tabuada: '))
for c in range(1, 11):
resultado = num * c
print('{} x {:2} = {}'.format(num, c, resultado))
| [
"you@example.comwesley@worc.com.br"
] | you@example.comwesley@worc.com.br |
87eb88d102f76159683c01cee2a711c5e2d9b455 | 3fbd26091ebbc13913f9c7be1aaf10d477c79536 | /week01/zuoye/maoyan_scrapy/.history/manyan/manyan/spiders/maoyan_20200628205729.py | 1a0055be6d8db820c15c61f6e0e0a0a3b3f37f7a | [] | no_license | shuncon/Python001-class01 | d28faf3d5d8e9ea4cee93bcae7143a26fd8c472e | df19758181cdaf37f30d4b518600fc4612590499 | refs/heads/master | 2022-11-13T19:31:27.019214 | 2020-07-10T14:58:25 | 2020-07-10T14:58:25 | 273,135,541 | 0 | 0 | null | 2020-06-18T03:46:56 | 2020-06-18T03:46:55 | null | UTF-8 | Python | false | false | 63 | py | import scrapy
class Maoyanspider(scrapy.Spider):
name= '' | [
"1428834423@qq.com"
] | 1428834423@qq.com |
66e691f47c861d7a66d9fbbc7773a7d02a050be9 | 4792a301bbcd81e826d42068c3ba852f82a0642b | /slim/deployment/model_deploy.py | 5ac1e3ca368f06cd50ced295c771bd9c491ea73e | [] | no_license | fogfog2/deeplab | 813f6c820f851cc7967df85f650d1553e10ac030 | 17aa6f839c7f2aa6a58b8e9792f9df06282425f9 | refs/heads/master | 2022-12-11T17:03:33.291366 | 2020-09-13T08:10:10 | 2020-09-13T08:10:10 | 289,831,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,842 | py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Deploy Slim models across multiple clones and replicas.
# TODO(sguada) docstring paragraph by (a) motivating the need for the file and
# (b) defining clones.
# TODO(sguada) describe the high-level components of model deployment.
# E.g. "each model deployment is composed of several parts: a DeploymentConfig,
# which captures A, B and C, an input_fn which loads data.. etc
To easily train a model on multiple GPUs or across multiple machines this
module provides a set of helper functions: `create_clones`,
`optimize_clones` and `deploy`.
Usage:
g = tf.Graph()
# Set up DeploymentConfig
config = model_deploy.DeploymentConfig(num_clones=2, clone_on_cpu=True)
# Create the global step on the device storing the variables.
with tf.device(config.variables_device()):
global_step = slim.create_global_step()
# Define the inputs
with tf.device(config.inputs_device()):
images, labels = LoadData(...)
inputs_queue = slim.data.prefetch_queue((images, labels))
# Define the optimizer.
with tf.device(config.optimizer_device()):
optimizer = tf.train.MomentumOptimizer(FLAGS.learning_rate, FLAGS.momentum)
# Define the model including the loss.
def model_fn(inputs_queue):
images, labels = inputs_queue.dequeue()
predictions = CreateNetwork(images)
slim.losses.log_loss(predictions, labels)
model_dp = model_deploy.deploy(config, model_fn, [inputs_queue],
optimizer=optimizer)
# Run training.
slim.learning.train(model_dp.train_op, my_log_dir,
summary_op=model_dp.summary_op)
The Clone namedtuple holds together the values associated with each call to
model_fn:
* outputs: The return values of the calls to `model_fn()`.
* scope: The scope used to create the clone.
* device: The device used to create the clone.
DeployedModel namedtuple, holds together the values needed to train multiple
clones:
* train_op: An operation that run the optimizer training op and include
all the update ops created by `model_fn`. Present only if an optimizer
was specified.
* summary_op: An operation that run the summaries created by `model_fn`
and process_gradients.
* total_loss: A `Tensor` that contains the sum of all losses created by
`model_fn` plus the regularization losses.
* clones: List of `Clone` tuples returned by `create_clones()`.
DeploymentConfig parameters:
* num_clones: Number of model clones to deploy in each replica.
* clone_on_cpu: True if clones should be placed on CPU.
* replica_id: Integer. Index of the replica for which the model is
deployed. Usually 0 for the chief replica.
* num_replicas: Number of replicas to use.
* num_ps_tasks: Number of tasks for the `ps` job. 0 to not use replicas.
* worker_job_name: A name for the worker job.
* ps_job_name: A name for the parameter server job.
TODO(sguada):
- describe side effect to the graph.
- what happens to summaries and update_ops.
- which graph collections are altered.
- write a tutorial on how to use this.
- analyze the possibility of calling deploy more than once.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import tensorflow.compat.v1 as tf
import tf_slim as slim
__all__ = ['create_clones',
'deploy',
'optimize_clones',
'DeployedModel',
'DeploymentConfig',
'Clone',
]
# Namedtuple used to represent a clone during deployment.
Clone = collections.namedtuple('Clone',
['outputs', # Whatever model_fn() returned.
'scope', # The scope used to create it.
'device', # The device used to create.
])
# Namedtuple used to represent a DeployedModel, returned by deploy().
DeployedModel = collections.namedtuple('DeployedModel',
['train_op', # The `train_op`
'summary_op', # The `summary_op`
'total_loss', # The loss `Tensor`
'clones', # A list of `Clones` tuples.
])
# Default parameters for DeploymentConfig
_deployment_params = {'num_clones': 1,
'clone_on_cpu': False,
'replica_id': 0,
'num_replicas': 1,
'num_ps_tasks': 0,
'worker_job_name': 'worker',
'ps_job_name': 'ps'}
def create_clones(config, model_fn, args=None, kwargs=None):
"""Creates multiple clones according to config using a `model_fn`.
The returned values of `model_fn(*args, **kwargs)` are collected along with
the scope and device used to created it in a namedtuple
`Clone(outputs, scope, device)`
Note: it is assumed that any loss created by `model_fn` is collected at
the tf.GraphKeys.LOSSES collection.
To recover the losses, summaries or update_ops created by the clone use:
```python
losses = tf.r(tf.GraphKeys.LOSSES, clone.scope)
summaries = tf.get_collection(tf.GraphKeys.SUMMARIES, clone.scope)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, clone.scope)
```
The deployment options are specified by the config object and support
deploying one or several clones on different GPUs and one or several replicas
of such clones.
The argument `model_fn` is called `config.num_clones` times to create the
model clones as `model_fn(*args, **kwargs)`.
If `config` specifies deployment on multiple replicas then the default
tensorflow device is set appropriatly for each call to `model_fn` and for the
slim variable creation functions: model and global variables will be created
on the `ps` device, the clone operations will be on the `worker` device.
Args:
config: A DeploymentConfig object.
model_fn: A callable. Called as `model_fn(*args, **kwargs)`
args: Optional list of arguments to pass to `model_fn`.
kwargs: Optional list of keyword arguments to pass to `model_fn`.
Returns:
A list of namedtuples `Clone`.
"""
clones = []
args = args or []
kwargs = kwargs or {}
with slim.arg_scope([slim.model_variable, slim.variable],
device=config.variables_device()):
# Create clones.
for i in range(0, config.num_clones):
with tf.name_scope(config.clone_scope(i)) as clone_scope:
clone_device = config.clone_device(i)
with tf.device(clone_device):
with tf.variable_scope(tf.get_variable_scope(),
reuse=True if i > 0 else None):
outputs = model_fn(*args, **kwargs)
clones.append(Clone(outputs, clone_scope, clone_device))
return clones
def _gather_clone_loss(clone, num_clones, regularization_losses):
"""Gather the loss for a single clone.
Args:
clone: A Clone namedtuple.
num_clones: The number of clones being deployed.
regularization_losses: Possibly empty list of regularization_losses
to add to the clone losses.
Returns:
A tensor for the total loss for the clone. Can be None.
"""
# The return value.
sum_loss = None
# Individual components of the loss that will need summaries.
clone_loss = None
regularization_loss = None
# Compute and aggregate losses on the clone device.
with tf.device(clone.device):
all_losses = []
clone_losses = tf.get_collection(tf.GraphKeys.LOSSES, clone.scope)
if clone_losses:
clone_loss = tf.add_n(clone_losses, name='clone_loss')
if num_clones > 1:
clone_loss = tf.div(clone_loss, 1.0 * num_clones,
name='scaled_clone_loss')
all_losses.append(clone_loss)
if regularization_losses:
regularization_loss = tf.add_n(regularization_losses,
name='regularization_loss')
all_losses.append(regularization_loss)
if all_losses:
sum_loss = tf.add_n(all_losses)
# Add the summaries out of the clone device block.
if clone_loss is not None:
tf.summary.scalar('/'.join(filter(None,
['Losses', clone.scope, 'clone_loss'])),
clone_loss)
if regularization_loss is not None:
tf.summary.scalar('Losses/regularization_loss', regularization_loss)
return sum_loss
def _optimize_clone(optimizer, clone, num_clones, regularization_losses,
**kwargs):
"""Compute losses and gradients for a single clone.
Args:
optimizer: A tf.Optimizer object.
clone: A Clone namedtuple.
num_clones: The number of clones being deployed.
regularization_losses: Possibly empty list of regularization_losses
to add to the clone losses.
**kwargs: Dict of kwarg to pass to compute_gradients().
Returns:
A tuple (clone_loss, clone_grads_and_vars).
- clone_loss: A tensor for the total loss for the clone. Can be None.
- clone_grads_and_vars: List of (gradient, variable) for the clone.
Can be empty.
"""
sum_loss = _gather_clone_loss(clone, num_clones, regularization_losses)
clone_grad = None
if sum_loss is not None:
with tf.device(clone.device):
clone_grad = optimizer.compute_gradients(sum_loss, **kwargs)
return sum_loss, clone_grad
def optimize_clones(clones, optimizer,
regularization_losses=None,
**kwargs):
"""Compute clone losses and gradients for the given list of `Clones`.
Note: The regularization_losses are added to the first clone losses.
Args:
clones: List of `Clones` created by `create_clones()`.
optimizer: An `Optimizer` object.
regularization_losses: Optional list of regularization losses. If None it
will gather them from tf.GraphKeys.REGULARIZATION_LOSSES. Pass `[]` to
exclude them.
**kwargs: Optional list of keyword arguments to pass to `compute_gradients`.
Returns:
A tuple (total_loss, grads_and_vars).
- total_loss: A Tensor containing the average of the clone losses including
the regularization loss.
- grads_and_vars: A List of tuples (gradient, variable) containing the sum
of the gradients for each variable.
"""
grads_and_vars = []
clones_losses = []
num_clones = len(clones)
if regularization_losses is None:
regularization_losses = tf.get_collection(
tf.GraphKeys.REGULARIZATION_LOSSES)
for clone in clones:
with tf.name_scope(clone.scope):
clone_loss, clone_grad = _optimize_clone(
optimizer, clone, num_clones, regularization_losses, **kwargs)
if clone_loss is not None:
clones_losses.append(clone_loss)
grads_and_vars.append(clone_grad)
# Only use regularization_losses for the first clone
regularization_losses = None
# Compute the total_loss summing all the clones_losses.
total_loss = tf.add_n(clones_losses, name='total_loss')
# Sum the gradients across clones.
grads_and_vars = _sum_clones_gradients(grads_and_vars)
return total_loss, grads_and_vars
def deploy(config,
model_fn,
args=None,
kwargs=None,
optimizer=None,
summarize_gradients=False):
"""Deploys a Slim-constructed model across multiple clones.
The deployment options are specified by the config object and support
deploying one or several clones on different GPUs and one or several replicas
of such clones.
The argument `model_fn` is called `config.num_clones` times to create the
model clones as `model_fn(*args, **kwargs)`.
The optional argument `optimizer` is an `Optimizer` object. If not `None`,
the deployed model is configured for training with that optimizer.
If `config` specifies deployment on multiple replicas then the default
tensorflow device is set appropriatly for each call to `model_fn` and for the
slim variable creation functions: model and global variables will be created
on the `ps` device, the clone operations will be on the `worker` device.
Args:
config: A `DeploymentConfig` object.
model_fn: A callable. Called as `model_fn(*args, **kwargs)`
args: Optional list of arguments to pass to `model_fn`.
kwargs: Optional list of keyword arguments to pass to `model_fn`.
optimizer: Optional `Optimizer` object. If passed the model is deployed
for training with that optimizer.
summarize_gradients: Whether or not add summaries to the gradients.
Returns:
A `DeployedModel` namedtuple.
"""
# Gather initial summaries.
summaries = set(tf.get_collection(tf.GraphKeys.SUMMARIES))
# Create Clones.
clones = create_clones(config, model_fn, args, kwargs)
first_clone = clones[0]
# Gather update_ops from the first clone. These contain, for example,
# the updates for the batch_norm variables created by model_fn.
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, first_clone.scope)
train_op = None
total_loss = None
with tf.device(config.optimizer_device()):
if optimizer:
# Place the global step on the device storing the variables.
with tf.device(config.variables_device()):
global_step = slim.get_or_create_global_step()
# Compute the gradients for the clones.
total_loss, clones_gradients = optimize_clones(clones, optimizer)
if clones_gradients:
if summarize_gradients:
# Add summaries to the gradients.
summaries |= set(_add_gradients_summaries(clones_gradients))
# Create gradient updates.
grad_updates = optimizer.apply_gradients(clones_gradients,
global_step=global_step)
update_ops.append(grad_updates)
update_op = tf.group(*update_ops)
with tf.control_dependencies([update_op]):
train_op = tf.identity(total_loss, name='train_op')
else:
clones_losses = []
regularization_losses = tf.get_collection(
tf.GraphKeys.REGULARIZATION_LOSSES)
for clone in clones:
with tf.name_scope(clone.scope):
clone_loss = _gather_clone_loss(clone, len(clones),
regularization_losses)
if clone_loss is not None:
clones_losses.append(clone_loss)
# Only use regularization_losses for the first clone
regularization_losses = None
if clones_losses:
total_loss = tf.add_n(clones_losses, name='total_loss')
# Add the summaries from the first clone. These contain the summaries
# created by model_fn and either optimize_clones() or _gather_clone_loss().
summaries |= set(tf.get_collection(tf.GraphKeys.SUMMARIES,
first_clone.scope))
if total_loss is not None:
# Add total_loss to summary.
summaries.add(tf.summary.scalar('total_loss', total_loss))
if summaries:
# Merge all summaries together.
summary_op = tf.summary.merge(list(summaries), name='summary_op')
else:
summary_op = None
return DeployedModel(train_op, summary_op, total_loss, clones)
def _sum_clones_gradients(clone_grads):
"""Calculate the sum gradient for each shared variable across all clones.
This function assumes that the clone_grads has been scaled appropriately by
1 / num_clones.
Args:
clone_grads: A List of List of tuples (gradient, variable), one list per
`Clone`.
Returns:
List of tuples of (gradient, variable) where the gradient has been summed
across all clones.
"""
sum_grads = []
for grad_and_vars in zip(*clone_grads):
# Note that each grad_and_vars looks like the following:
# ((grad_var0_clone0, var0), ... (grad_varN_cloneN, varN))
grads = []
var = grad_and_vars[0][1]
for g, v in grad_and_vars:
assert v == var
if g is not None:
grads.append(g)
if grads:
if len(grads) > 1:
sum_grad = tf.add_n(grads, name=var.op.name + '/sum_grads')
else:
sum_grad = grads[0]
sum_grads.append((sum_grad, var))
return sum_grads
def _add_gradients_summaries(grads_and_vars):
"""Add histogram summaries to gradients.
Note: The summaries are also added to the SUMMARIES collection.
Args:
grads_and_vars: A list of gradient to variable pairs (tuples).
Returns:
The _list_ of the added summaries for grads_and_vars.
"""
summaries = []
for grad, var in grads_and_vars:
if grad is not None:
if isinstance(grad, tf.IndexedSlices):
grad_values = grad.values
else:
grad_values = grad
summaries.append(tf.summary.histogram(var.op.name + ':gradient',
grad_values))
summaries.append(tf.summary.histogram(var.op.name + ':gradient_norm',
tf.global_norm([grad_values])))
else:
tf.logging.info('Var %s has no gradient', var.op.name)
return summaries
class DeploymentConfig(object):
"""Configuration for deploying a model with `deploy()`.
You can pass an instance of this class to `deploy()` to specify exactly
how to deploy the model to build. If you do not pass one, an instance built
from the default deployment_hparams will be used.
"""
def __init__(self,
num_clones=1,
clone_on_cpu=False,
replica_id=0,
num_replicas=1,
num_ps_tasks=0,
worker_job_name='worker',
ps_job_name='ps'):
"""Create a DeploymentConfig.
The config describes how to deploy a model across multiple clones and
replicas. The model will be replicated `num_clones` times in each replica.
If `clone_on_cpu` is True, each clone will placed on CPU.
If `num_replicas` is 1, the model is deployed via a single process. In that
case `worker_device`, `num_ps_tasks`, and `ps_device` are ignored.
If `num_replicas` is greater than 1, then `worker_device` and `ps_device`
must specify TensorFlow devices for the `worker` and `ps` jobs and
`num_ps_tasks` must be positive.
Args:
num_clones: Number of model clones to deploy in each replica.
clone_on_cpu: If True clones would be placed on CPU.
replica_id: Integer. Index of the replica for which the model is
deployed. Usually 0 for the chief replica.
num_replicas: Number of replicas to use.
num_ps_tasks: Number of tasks for the `ps` job. 0 to not use replicas.
worker_job_name: A name for the worker job.
ps_job_name: A name for the parameter server job.
Raises:
ValueError: If the arguments are invalid.
"""
if num_replicas > 1:
if num_ps_tasks < 1:
raise ValueError('When using replicas num_ps_tasks must be positive')
if num_replicas > 1 or num_ps_tasks > 0:
if not worker_job_name:
raise ValueError('Must specify worker_job_name when using replicas')
if not ps_job_name:
raise ValueError('Must specify ps_job_name when using parameter server')
if replica_id >= num_replicas:
raise ValueError('replica_id must be less than num_replicas')
self._num_clones = num_clones
self._clone_on_cpu = clone_on_cpu
self._replica_id = replica_id
self._num_replicas = num_replicas
self._num_ps_tasks = num_ps_tasks
self._ps_device = '/job:' + ps_job_name if num_ps_tasks > 0 else ''
self._worker_device = '/job:' + worker_job_name if num_ps_tasks > 0 else ''
@property
def num_clones(self):
return self._num_clones
@property
def clone_on_cpu(self):
return self._clone_on_cpu
@property
def replica_id(self):
return self._replica_id
@property
def num_replicas(self):
return self._num_replicas
@property
def num_ps_tasks(self):
return self._num_ps_tasks
@property
def ps_device(self):
return self._ps_device
@property
def worker_device(self):
return self._worker_device
def caching_device(self):
"""Returns the device to use for caching variables.
Variables are cached on the worker CPU when using replicas.
Returns:
A device string or None if the variables do not need to be cached.
"""
if self._num_ps_tasks > 0:
return lambda op: op.device
else:
return None
def clone_device(self, clone_index):
"""Device used to create the clone and all the ops inside the clone.
Args:
clone_index: Int, representing the clone_index.
Returns:
A value suitable for `tf.device()`.
Raises:
ValueError: if `clone_index` is greater or equal to the number of clones".
"""
if clone_index >= self._num_clones:
raise ValueError('clone_index must be less than num_clones')
device = ''
if self._num_ps_tasks > 0:
device += self._worker_device
if self._clone_on_cpu:
device += '/device:CPU:0'
else:
device += '/device:GPU:%d' % clone_index
return device
def clone_scope(self, clone_index):
"""Name scope to create the clone.
Args:
clone_index: Int, representing the clone_index.
Returns:
A name_scope suitable for `tf.name_scope()`.
Raises:
ValueError: if `clone_index` is greater or equal to the number of clones".
"""
if clone_index >= self._num_clones:
raise ValueError('clone_index must be less than num_clones')
scope = ''
if self._num_clones > 1:
scope = 'clone_%d' % clone_index
return scope
def optimizer_device(self):
"""Device to use with the optimizer.
Returns:
A value suitable for `tf.device()`.
"""
if self._num_ps_tasks > 0 or self._num_clones > 0:
return self._worker_device + '/device:CPU:0'
else:
return ''
def inputs_device(self):
"""Device to use to build the inputs.
Returns:
A value suitable for `tf.device()`.
"""
device = ''
if self._num_ps_tasks > 0:
device += self._worker_device
device += '/device:CPU:0'
return device
def variables_device(self):
"""Returns the device to use for variables created inside the clone.
Returns:
A value suitable for `tf.device()`.
"""
device = ''
if self._num_ps_tasks > 0:
device += self._ps_device
device += '/device:CPU:0'
class _PSDeviceChooser(object):
"""Slim device chooser for variables when using PS."""
def __init__(self, device, tasks):
self._device = device
self._tasks = tasks
self._task = 0
def choose(self, op):
if op.device:
return op.device
node_def = op if isinstance(op, tf.NodeDef) else op.node_def
if node_def.op.startswith('Variable'):
t = self._task
self._task = (self._task + 1) % self._tasks
d = '%s/task:%d' % (self._device, t)
return d
else:
return op.device
if not self._num_ps_tasks:
return device
else:
chooser = _PSDeviceChooser(device, self._num_ps_tasks)
return chooser.choose
| [
"s-eungju-n@hanmail.net"
] | s-eungju-n@hanmail.net |
5ed175518b8b11b9c76c3dd60d7b5ce0f445c19a | 84ec34dc0159f28e5085f7696be4bf586eaef32f | /fibonacci/fibbonaaci.py | 5ade7ef78e079aa6c524707b6e117ed9f387780f | [] | no_license | kumaraswins/Coursera-Algorithm-Toolbox | f187431758983a39d187c789ceb377415c7689fa | bc096471a1c0ec3069e54b70a6d4399f75673789 | refs/heads/master | 2022-04-24T10:08:10.972094 | 2020-04-27T05:24:57 | 2020-04-27T05:24:57 | 255,663,774 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 176 | py | # python 3
n = int(input())
if n <= 1:
print(n)
quit()
def fib(n):
a, b = 0, 1
for _ in range(n-1):
c = a + b
b, a = c, b
print(c)
fib(n) | [
"kumaraswins@gmail.com"
] | kumaraswins@gmail.com |
46054b908ed5d924e13fa4414505f33c1d67974c | 0fb10e07046aad91be64f0c452a60d76c051efc7 | /mps_20160521/differential.py | 0947572501fdd03b916bfca32add3b8764824962 | [
"MIT"
] | permissive | wararaki718/mps | 9df6bccda0b13895f598b9823d5adc7e26b3c412 | 036263fc780a10fd3866704885ce9d9912e527d5 | refs/heads/master | 2021-06-01T04:03:18.013304 | 2016-05-23T15:41:04 | 2016-05-23T15:41:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 762 | py | # import library
import math
# define an accepted rate
accepted_error = 0.1
delta_x = 0.01
learning_rate = 0.05
number_of_trial = 1000
# test function
# f(x) = x^2
def test_func(x):
return x ** 2
# differential function
# f'(x) = (f(x_1) - f(x_0)) / (x_1 - x_0)
def diff(f, x):
return (f(x + delta_x) - f(x)) / ((x + delta_x) - x)
# exploring a minimum result
def test(f, x):
# number of trial
i = 1
# exploring
while math.sqrt(x ** 2) > accepted_error and i <= number_of_trial:
print("test case {0}: {1}".format(i, x))
x = x - learning_rate * diff(f, x)
i = i + 1
return x
if __name__ == "__main__":
result = test(test_func, 5)
print("final result: {0}".format(result))
| [
"wararaki@wararaki-no-MacBook-Air.local"
] | wararaki@wararaki-no-MacBook-Air.local |
7f83aa1abe81599af869a8bf8a594a065d07480b | cbf407a1c9f18a6d0b94099586c59f1422933bb5 | /tensorflow_probability/python/distributions/joint_distribution_test.py | 0c69d5acd7131ced38a4030055c85676aa8c2225 | [
"Apache-2.0"
] | permissive | idofr/probability | eef7db7709e0309a6a132d6ce5e2423ae5f9ff17 | c8fa646de563cc3ddc9b375538bf5b613a318a46 | refs/heads/master | 2020-05-05T04:05:34.757494 | 2019-04-04T21:09:57 | 2019-04-04T21:10:45 | 179,697,040 | 1 | 0 | null | 2019-04-05T14:26:21 | 2019-04-05T14:26:21 | null | UTF-8 | Python | false | false | 10,474 | py | # Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for the JointDistributionSequential."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
from absl.testing import parameterized
import tensorflow as tf
import tensorflow_probability as tfp
from tensorflow_probability.python.internal import test_util as tfp_test_util
from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import
tfd = tfp.distributions
@test_util.run_all_in_graph_and_eager_modes
class JointDistributionSequentialTest(tf.test.TestCase, parameterized.TestCase):
def test_sample_log_prob(self):
d = tfd.JointDistributionSequential(
[
tfd.Independent(tfd.Exponential(rate=[100, 120]), 1),
lambda e: tfd.Gamma(concentration=e[..., 0], rate=e[..., 1]),
tfd.Normal(loc=0, scale=2.),
tfd.Normal, # Or, `lambda loc, scale: tfd.Normal(loc, scale)`.
lambda m: tfd.Sample(tfd.Bernoulli(logits=m), 12),
],
validate_args=True)
self.assertEqual(
(
('e', ()),
('scale', ('e',)),
('loc', ()),
('m', ('loc', 'scale')),
('x', ('m',)),
),
d._resolve_graph())
xs = d.sample(seed=tfp_test_util.test_seed())
self.assertLen(xs, 5)
# We'll verify the shapes work as intended when we plumb these back into the
# respective log_probs.
ds, _ = d.sample_distributions(value=xs)
self.assertLen(ds, 5)
self.assertIsInstance(ds[0], tfd.Independent)
self.assertIsInstance(ds[1], tfd.Gamma)
self.assertIsInstance(ds[2], tfd.Normal)
self.assertIsInstance(ds[3], tfd.Normal)
self.assertIsInstance(ds[4], tfd.Sample)
# Static properties.
self.assertAllEqual(
[tf.float32, tf.float32, tf.float32, tf.float32, tf.int32],
d.dtype)
for expected, actual_tensorshape, actual_shapetensor in zip(
[[2], [], [], [], [12]],
d.event_shape,
self.evaluate(d.event_shape_tensor())):
self.assertAllEqual(expected, actual_tensorshape)
self.assertAllEqual(expected, actual_shapetensor)
for expected, actual_tensorshape, actual_shapetensor in zip(
[[], [], [], []],
d.batch_shape,
self.evaluate(d.batch_shape_tensor())):
self.assertAllEqual(expected, actual_tensorshape)
self.assertAllEqual(expected, actual_shapetensor)
expected_jlp = sum(d_.log_prob(x) for d_, x in zip(ds, xs))
actual_jlp = d.log_prob(xs)
self.assertAllEqual(*self.evaluate([expected_jlp, actual_jlp]))
def test_kl_divergence(self):
d0 = tfd.JointDistributionSequential(
[
tfd.Independent(tfd.Exponential(rate=[100, 120]), 1),
tfd.Normal(loc=0, scale=2.),
],
validate_args=True)
d1 = tfd.JointDistributionSequential(
[
tfd.Independent(tfd.Exponential(rate=[10, 12]), 1),
tfd.Normal(loc=1, scale=1.),
],
validate_args=True)
expected_kl = sum(tfd.kl_divergence(d0_, d1_) for d0_, d1_
in zip(d0.distribution_fn, d1.distribution_fn))
actual_kl = tfd.kl_divergence(d0, d1)
other_actual_kl = d0.kl_divergence(d1)
expected_kl_, actual_kl_, other_actual_kl_ = self.evaluate([
expected_kl, actual_kl, other_actual_kl])
self.assertNear(expected_kl_, actual_kl_, err=1e-5)
self.assertNear(expected_kl_, other_actual_kl_, err=1e-5)
def test_cross_entropy(self):
d0 = tfd.JointDistributionSequential(
[
tfd.Independent(tfd.Exponential(rate=[100, 120]), 1),
tfd.Normal(loc=0, scale=2.),
],
validate_args=True)
d1 = tfd.JointDistributionSequential(
[
tfd.Independent(tfd.Exponential(rate=[10, 12]), 1),
tfd.Normal(loc=1, scale=1.),
],
validate_args=True)
expected_xent = sum(
d0_.cross_entropy(d1_) for d0_, d1_
in zip(d0.distribution_fn, d1.distribution_fn))
actual_xent = d0.cross_entropy(d1)
expected_xent_, actual_xent_ = self.evaluate([expected_xent, actual_xent])
self.assertNear(actual_xent_, expected_xent_, err=1e-5)
def test_norequired_args_maker(self):
"""Test that only non-default args are passed through."""
d = tfd.JointDistributionSequential([tfd.Normal(0., 1.), tfd.Bernoulli])
with self.assertRaisesWithPredicateMatch(
ValueError, 'Must pass probs or logits, but not both.'):
d.sample()
def test_graph_resolution(self):
d = tfd.JointDistributionSequential(
[
tfd.Independent(tfd.Exponential(rate=[100, 120]), 1),
lambda e: tfd.Gamma(concentration=e[..., 0], rate=e[..., 1]),
tfd.HalfNormal(2.5),
lambda s: tfd.Normal(loc=0, scale=s),
tfd.Exponential(2),
lambda df, loc, _, scale: tfd.StudentT(df, loc, scale),
],
validate_args=True)
self.assertEqual(
(('e', ()),
('scale', ('e',)),
('s', ()),
('loc', ('s',)),
('df', ()),
('x', ('df', 'loc', '_', 'scale'))),
d._resolve_graph())
@parameterized.parameters('mean', 'mode', 'stddev', 'variance')
def test_summary_statistic(self, attr):
d = tfd.JointDistributionSequential(
[tfd.Normal(0., 1.), tfd.Bernoulli(logits=0.)],
validate_args=True)
expected = tuple(getattr(d_, attr)() for d_ in d.distribution_fn)
actual = getattr(d, attr)()
self.assertAllEqual(*self.evaluate([expected, actual]))
@parameterized.parameters(('covariance',))
def test_notimplemented_summary_statistic(self, attr):
d = tfd.JointDistributionSequential([tfd.Normal(0., 1.), tfd.Bernoulli],
validate_args=True)
with self.assertRaisesWithPredicateMatch(
NotImplementedError,
attr + ' is not implemented: JointDistributionSequential'):
getattr(d, attr)()
@parameterized.parameters(
'quantile', 'log_cdf', 'cdf',
'log_survival_function', 'survival_function',
)
def test_notimplemented_evaluative_statistic(self, attr):
d = tfd.JointDistributionSequential([tfd.Normal(0., 1.), tfd.Bernoulli],
validate_args=True)
with self.assertRaisesWithPredicateMatch(
NotImplementedError,
attr + ' is not implemented: JointDistributionSequential'):
getattr(d, attr)([0.]*len(d.distribution_fn))
def test_copy(self):
pgm = [tfd.Normal(0., 1.), tfd.Bernoulli]
d = tfd.JointDistributionSequential(pgm, validate_args=True)
d_copy = d.copy()
self.assertAllEqual(
{'distribution_fn': pgm,
'validate_args': True,
'name': None},
d_copy.parameters)
def test_batch_slicing(self):
d = tfd.JointDistributionSequential(
[
tfd.Exponential(rate=[10, 12, 14]),
lambda s: tfd.Normal(loc=0, scale=s),
lambda: tfd.Beta(concentration0=[3, 2, 1], concentration1=1),
],
validate_args=True)
d0, d1 = d[:1], d[1:]
x0 = d0.sample(seed=tfp_test_util.test_seed())
x1 = d1.sample(seed=tfp_test_util.test_seed())
self.assertLen(x0, 3)
self.assertEqual([1], x0[0].shape)
self.assertEqual([1], x0[1].shape)
self.assertEqual([1], x0[2].shape)
self.assertLen(x1, 3)
self.assertEqual([2], x1[0].shape)
self.assertEqual([2], x1[1].shape)
self.assertEqual([2], x1[2].shape)
def test_sample_shape_propagation_default_behavior(self):
d = tfd.JointDistributionSequential(
[
tfd.Independent(tfd.Exponential(rate=[100, 120]), 1),
lambda e: tfd.Gamma(concentration=e[..., 0], rate=e[..., 1]),
tfd.HalfNormal(2.5),
lambda s: tfd.Normal(loc=0, scale=s),
tfd.Exponential(2),
lambda df, loc, _, scale: tfd.StudentT(df, loc, scale),
],
validate_args=True)
x = d.sample([2, 3], seed=tfp_test_util.test_seed())
self.assertLen(x, 6)
self.assertEqual((2, 3, 2), x[0].shape)
self.assertEqual((2, 3), x[1].shape)
self.assertEqual((2, 3), x[2].shape)
self.assertEqual((2, 3), x[3].shape)
self.assertEqual((2, 3), x[4].shape)
self.assertEqual((2, 3), x[5].shape)
lp = d.log_prob(x)
self.assertEqual((2, 3), lp.shape)
def test_sample_shape_propagation_nondefault_behavior(self):
d = tfd.JointDistributionSequential(
[
tfd.Independent(tfd.Exponential(rate=[100, 120]), 1), # 0
lambda e: tfd.Gamma(concentration=e[..., 0], rate=e[..., 1]), # 1
tfd.HalfNormal(2.5), # 2
lambda s: tfd.Normal(loc=0, scale=s), # 3
tfd.Exponential(2), # 4
lambda df, loc, _, scale: tfd.StudentT(df, loc, scale), # 5
],
validate_args=False) # So log_prob doesn't complain.
# The following enables the nondefault sample shape behavior.
d._always_use_specified_sample_shape = True
sample_shape = (2, 3)
x = d.sample(sample_shape, seed=tfp_test_util.test_seed())
self.assertLen(x, 6)
self.assertEqual(sample_shape + (2,), x[0].shape)
self.assertEqual(sample_shape * 2, x[1].shape) # Has 1 arg.
self.assertEqual(sample_shape * 1, x[2].shape) # Has 0 args.
self.assertEqual(sample_shape * 2, x[3].shape) # Has 1 arg.
self.assertEqual(sample_shape * 1, x[4].shape) # Has 0 args.
# Has 3 args, one being scalar.
self.assertEqual(sample_shape * 3, x[5].shape)
lp = d.log_prob(x)
self.assertEqual(sample_shape * 3, lp.shape)
if __name__ == '__main__':
tf.test.main()
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
b09f43d8449ac36d1c6a2db23edc1b00cf44c6f2 | ff7d2f841de829a358246659b35dac7ac84d2e34 | /InsertTimestamp.py | 96d17d9e3716e5870ffb2ea09c49710875bdbaae | [
"MIT"
] | permissive | fricklerhandwerk/InsertTimestamp | 418f5ce85948634df331b2c4ea3884c7dfadce7e | 29a68309fbbbf7aca91c3e5e9b65e496ae67423e | refs/heads/master | 2021-01-19T12:51:58.645004 | 2017-02-17T22:58:25 | 2017-02-19T14:39:16 | 82,345,396 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 554 | py | import sublime_plugin
from datetime import datetime
from datetime import timezone
class InsertTimestampCommand(sublime_plugin.TextCommand):
def run(self, edit, fmt='%Y-%m-%dT%H:%M:%S%z'):
time = datetime.now(timezone.utc) \
.astimezone() \
.strftime(fmt)
for s in self.view.sel():
# do not select after inserting into empty region
if s.empty():
self.view.insert(edit, s.a, time)
else:
self.view.replace(edit, s, time)
| [
"valentin@fricklerhandwerk.de"
] | valentin@fricklerhandwerk.de |
a7cb86c2e4cbd4332442225c33eccf63b66b7f00 | de4e1332950d37707620c54a9613258c1dd9489c | /dongyeop/4주차/주식가격.py | 66a1fd2587ee46a60471b010445f940fe0c01ebf | [] | no_license | PnuLikeLion9th/Summer_algorithm | 8fe74066b9673fb891b7205f75f808a04c7fe750 | dcfcb6325854b3b4c529451d5c6b162298b53bc1 | refs/heads/master | 2023-07-10T13:57:05.511432 | 2021-08-15T07:50:00 | 2021-08-15T07:50:00 | 378,679,514 | 3 | 10 | null | 2021-08-15T07:50:01 | 2021-06-20T15:32:18 | Python | UTF-8 | Python | false | false | 943 | py | # 브루트포스
# def solution(prices):
# answer=[0]*len(prices)
# for i in range(len(prices)):
# for j in range(i+1,len(prices)):
# if prices[i] <=prices[j]:
# answer[i]+=1
# else:
# answer[i]+=1
# break
# return answer
def solution(prices):#스택
length = len(prices)
answer=[0]*length
stack = list()
for i,price in enumerate(prices):#가격들의 인덱스 값과 가격
while stack and price<prices[stack[-1]]:#스택이 존재하고 현재값이 더 작으면
index=stack.pop()#스택에서 빼주고
answer[index]=i-index#현재 인덱스와 스택에 담겼던 녀석의 인덱스를 빼면 시간임
stack.append(i)
while stack:#반복문이 다돌고 아직 남아있는 스택을 비워준다.
index=stack.pop()
answer[index] = length-index-1
return answer | [
"ckehdduq95@gmail.com"
] | ckehdduq95@gmail.com |
a9959f969e1eb4d2abb88b4c50c283f909536ea4 | 3d19e1a316de4d6d96471c64332fff7acfaf1308 | /Users/H/hanse/vol_essen.py | b2d4485b5664a460fc906ebcf35661445fb64799 | [] | no_license | BerilBBJ/scraperwiki-scraper-vault | 4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc | 65ea6a943cc348a9caf3782b900b36446f7e137d | refs/heads/master | 2021-12-02T23:55:58.481210 | 2013-09-30T17:02:59 | 2013-09-30T17:02:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,428 | py | import scraperwiki
import lxml.html
pageCounter = 1
while True:
page = scraperwiki.scrape("http://essen.vol.at/welcome.asp?page=%d" % (pageCounter))
root = lxml.html.fromstring(page)
for entry in root.cssselect('div[class="Entry"]'):
data={
"Name":entry.cssselect('div[class="CompanyName"]')[0].text_content(),
"Street": entry.cssselect('div[class="CompanyStreet"]')[0].text_content(),
"City" : entry.cssselect('div[class="CompanyPlace"]')[0].text_content()
}
scraperwiki.sqlite.save(unique_keys=["Name"], data=data)
if root.cssselect('a[class="Next"]'):
pageCounter=pageCounter+1
else:
break import scraperwiki
import lxml.html
pageCounter = 1
while True:
page = scraperwiki.scrape("http://essen.vol.at/welcome.asp?page=%d" % (pageCounter))
root = lxml.html.fromstring(page)
for entry in root.cssselect('div[class="Entry"]'):
data={
"Name":entry.cssselect('div[class="CompanyName"]')[0].text_content(),
"Street": entry.cssselect('div[class="CompanyStreet"]')[0].text_content(),
"City" : entry.cssselect('div[class="CompanyPlace"]')[0].text_content()
}
scraperwiki.sqlite.save(unique_keys=["Name"], data=data)
if root.cssselect('a[class="Next"]'):
pageCounter=pageCounter+1
else:
break | [
"pallih@kaninka.net"
] | pallih@kaninka.net |
49e40de4d1afb965c252fe55ede685c52e6ea490 | f8b41c788bcf0d895a5270e1e7c4f6085871b770 | /Uge 10/.history/iris_meanshift_20200331121352.py | 5390563e268d99e9985055db1b7919d076f07c2c | [] | no_license | Simonkruse2/Python_afleveringer | 0cb72d372556768d568bd2a5ca5ebecfca79653f | 5af0a492e8ae28c2b39e1f6c7de2bedfcc340c15 | refs/heads/master | 2020-12-22T13:23:53.663778 | 2020-05-22T10:21:54 | 2020-05-22T10:21:54 | 236,796,654 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,603 | py | # Exercise meanshift
# load 'iris_data.csv' into a dataframe
# get unique labels (Species column)
# plot with a scatter plot each iris flower sample colored by label (3 different colors)
# use: MeanShift and estimate_bandwidth from sklearn.cluster to first estimate bandwidth and then get the clusters (HINT: estimate_bandwidth() takes an argument: quantile set it to 0.2 for best result
# print out labels, cluster centers and number of clusters (as returned from the MeanShift function
# create a new scatter plot where each flower is colored according to cluster label
# add a dot for the cluster centers
# Compare the 2 plots (colored by actual labels vs. colored by cluster label)
from sklearn.cluster import estimate_bandwidth
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# Indlæs filen enten som xlsx(excel fil) eller som .csv
iris = pd.read_excel("iris_data.xlsx")
# Lav one hot encoding på species, så de hver især enten er 0 eller 1 i en art række.
iris_data = pd.get_dummies(iris,columns=['Species'])
print(iris_data.head())
# Tjek efter nullværdier
print('rows before drop n/a',len(iris_data))
missing = iris_data[iris_data.isnull().any(axis=1)]
# Fjern evt. nullværdier, i det her tilfælde er det ingen.
iris_data = iris_data.dropna()
print('rows after',len(iris_data))
iris_data.plot.scatter(x = 1, y =3, color=np.random.rand(50))
plt.show()
N = 50
x = np.random.rand(N)
y = np.random.rand(N)
colors = np.random.rand(N)
area = (30 * np.random.rand(N))**2 # 0 to 15 point radii
plt.scatter(x, y, s=area, c=colors, alpha=0.5)
plt.show()
| [
"Simonkruse2@gmail.com"
] | Simonkruse2@gmail.com |
731bb20a4c55f891b7b01b7882c24962d5ed80ac | 69e9ec4118a05d05f052c1f79dde1581312ea32a | /Exp1_questionnaire/pages.py | 8bb3af2690c39e454f48efdfc67207847deeb407 | [] | no_license | Anyish61/otree_HW2_0707 | 0774a2b4556918fad2793a404752c99ed1a511f9 | f5c3f715ce59f84d133d6f0e513e9dd2fec38202 | refs/heads/master | 2022-11-17T04:48:08.723764 | 2020-07-07T04:11:11 | 2020-07-07T04:11:11 | 277,713,468 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,945 | py | from otree.api import Currency as c, currency_range
from ._builtin import Page, WaitPage
from .models import Constants, WaitingPeriod, GainedAmount
from random import randint
import random
class Questionnaire(Page):
form_model = 'player'
form_fields = [
'waiting_period',
'sooner_period',
'treatment_method',
'switch_point',
]
def generate_questionnaire_parameters(self):
""" 步驟二:取得等待週數的list,並打亂順序後回傳"""
shuffled_waiting_period = random.sample(WaitingPeriod.list,8)
return shuffled_waiting_period
def setup_questionaire_parameters_pairs(self):
# 如果還不存在,就現在產生「週數的順序」並存起來
# 如果已經存在,就取出
if Constants.key_params not in self.participant.vars:
shuffled_waiting_period = self.generate_questionnaire_parameters()
self.participant.vars[Constants.key_params] = shuffled_waiting_period
params = self.participant.vars[Constants.key_params]
# 設定每一 round 的參數,並寫入 db
idx = self.round_number - 1 # list 從0開始 但 round_bnumber 從1開始
""" 步驟三:取得該回合的等待週數、存進player的waiting_period中"""
self.player.waiting_period = int(params[idx] // 10) #_用除以10的商作為等待週數
sooner_period = int(params[idx] % 10)
if sooner_period == 0:
self.player.sooner_period = '今天'
elif sooner_period == 4:
self.player.sooner_period = '4星期後'
def is_displayed(self): # 一定會跑的
# 設定每一 round 的參數(如週數和金額)
""" 步驟一:執行setup_questionaire_parameters_pairs,來分配週數"""
self.setup_questionaire_parameters_pairs() #_執行
return True
page_sequence = [Questionnaire]
| [
"b06303108@ntu.edu.tw"
] | b06303108@ntu.edu.tw |
bda191301750ca690fb5cac1d9f9abe3f859c48c | b773ca4e5f4a8642149316d3aded4c8b1e6037d2 | /sprint-challenge/aq_dashboard.py | e05d226a6c975acfb3676de3141310ccde108ea6 | [
"MIT"
] | permissive | echiyembekeza/DS-Unit-3-Sprint-3-Productization-and-Cloud | c2157e9078ec49b1f59d28220146a197dda3b25c | 64958ae8e9d2310d6c72606109a6ccf456bc5949 | refs/heads/master | 2020-08-04T18:39:27.405320 | 2019-12-11T03:11:28 | 2019-12-11T03:11:28 | 212,239,896 | 0 | 0 | MIT | 2019-10-02T02:27:48 | 2019-10-02T02:27:48 | null | UTF-8 | Python | false | false | 1,662 | py | """OpenAQ Air Quality Dashboard with Flask."""
from flask import Flask, request
from flask_sqlalchemy import SQLAlchemy
from decouple import config
from os import getenv
import openaq
APP = Flask(__name__)
APP.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///db.sqlite3'
DB = SQLAlchemy(APP)
API = openaq.OpenAQ()
mment = API.measurements(city='Los Angeles', parameter='pm25')
body = mment[1]
def LAquery(k):
LAresults = body['results']
values = []
for k in LAresults:
kvalue = k.get('value')
kdate = k.get('date')
kutc = kdate.get('utc')
values.append((kvalue, kutc))
return values
class Record(DB.Model):
id = DB.Column(DB.Integer, primary_key=True)
datetime = DB.Column(DB.String(25))
value = DB.Column(DB.Float, nullable=False)
def __repr__(self):
return f"<id={self.id}, datetime={self.datetime}, value={self.value}>"
@APP.route('/')
def root():
"""Base view."""
records = Record.query.filter(Record.value>=10).all()
res=''
for rec in records:
res += 'datetime = '+ rec.datetime
res += ", "
res += 'value = '+ str(rec.value)
res += '</br>'
return res
@APP.route('/refresh')
def refresh():
"""Pull fresh data from Open AQ and replace existing data."""
DB.drop_all()
DB.create_all()
API_items = body['results']
for i in API_items:
ivalue = i.get('value')
idate = i.get('date')
iutc = idate.get('utc')
db_item = (Record(datetime=iutc, value=ivalue))
DB.session.add(db_item)
DB.session.commit()
return 'Data refreshed!'
if __name__ == "__main__":
APP.run()
| [
"username@users.noreply.github.com"
] | username@users.noreply.github.com |
0aa502236804754deb0fe6bded9773f39bdce896 | dd2625e9b05b12d8b9bb8727392f2b4cd4eac11f | /All checklists/Data_entry_gr/Dataentry (1).py | 973e883749b592e40f68b1d9a5f92e31542d917c | [] | no_license | billhufnagle/Zea-IT | 07f083a89f34867abdf21bd931f9657255395775 | 7da601002323227afb346f3ab08c898214477175 | refs/heads/master | 2020-03-25T15:59:33.545028 | 2018-08-15T19:09:10 | 2018-08-15T19:09:10 | 143,910,168 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,265 | py | #GUI application for data entry from the grow room checklist
#Writes to .csv file as well as the growroomwalkthrough table
# in the datacollection database on the "Dataentrylaptop"
#
from tkinter import *
import datetime
import MySQLdb
#creates a class which will become our application
#inside of this we make our application, first with Labels, then entry boxes,
#then buttons, then we place them on the grid LAYOUT
#and then ultimately we creates the methods to be used on text entry or button
#press
class Data_entry:
def __init__(self, master):
self.master = master
master.title("Grow Room Data Entry")
#Labels
self.emptylabel = Label(master, text="")
self.secondempty = Label(master, text="")
self.errorlab = Label(master, text="ERROR: All inputs must be numbers, except for comments", fg="red")
self.workerlab = Label(master, text="Initials of data collector")
self.roomlab = Label(master, text="Room number: ")
self.racklab = Label(master, text="Rack number: ")
self.ltslab = Label(master, text="Lights: ", bg="white")
self.pltslab = Label(master, text="Plants: ")
self.wtrflwlab = Label(master, text="Water Flow: ", bg="white")
self.res50lab = Label(master, text="Reservoir > 50%: ")
self.pmpslab = Label(master, text="Pumps: ", bg="white")
self.ipmlab = Label(master, text="IPM(1,2,3): ")
self.airlab = Label(master, text="Airators: ", bg="white")
self.drpleaklab = Label(master, text="No Drips/Leaks: ")
self.riserlab = Label(master, text="Risers: ", bg="white")
self.commentslab = Label(master, text="---Comments---")
self.dontfitup = Label(master, text = "0 = Good || 1 = Bad", bg="red", fg="white")
self.seconddont = Label(master, text = "0 = Good || 1 = Bad", bg="red", fg="white")
self.legend = Label(master, text = "----Key----")
self.secondlegend = Label(master, text = "----Key----")
self.daily = Label(master, text = "---Daily list---")
self.weekend = Label(master, text = "---Weekend list---")
self.weekendpmps = Label(master, text = "Pumps: ", bg="white")
self.weekendmaniflw = Label(master, text = "Manifold Flow: ", bg="white")
self.weekenddrpleak = Label(master, text = "No Drips/Leaks: ")
self.weekendvisplts = Label(master, text = "Visual on Plants: ")
self.ltsignore = Label(master, text = "Leave blank", fg="navy", bg="white")
self.res50ignore = Label(master, text = "Leave blank", fg="navy")
self.ipmignore = Label(master, text = "Leave blank", fg="navy")
self.airignore = Label(master, text = "Leave blank", fg="navy", bg="white")
self.riserignore = Label(master, text = "Leave blank", fg="navy", bg="white")
self.keylabel = Label(master, text = "---Key---", fg = "black" , bg = "blue")
self.lightskey = Label(master, text = "Lights: Multi, Bar, Diode")
self.plantskey = Label(master, text = "Plants: Necrosis, Overgrown,\
Discolored, Wilted, Brown tipped roots, Brown Roots")
self.ipmkey = Label(master, text = "IPM: Aphids, Thrips, Brown Mold, Damage to Leaves")
self.waterflowkey = Label(master, text = "Waterflow: Where is the issue")
self.pumpskey = Label(master, text = "Pumps: Record # if <90")
self.dripskey = Label(master, text = "Drips: Observations. Why?")
self.reportlab = Label(master, text = "Only send report when you have \
finished SUBMITTING all Racks", bg="navy", fg="white")
#Entry boxes
vcmd = master.register(self.validate) # validate command for int inputs
commentvcmd = master.register(self.commentvalidate) #validate command for str inputs(worker, rack, room, comments)
self.worker = Entry(master, validate="key", validatecommand=(commentvcmd, '%P'))
self.room = Entry(master, validate="key", validatecommand=(commentvcmd, '%P'))
self.rack = Entry(master, validate="key", validatecommand=(commentvcmd, '%P'))
self.lts = Entry(master, validate="key", validatecommand=(vcmd, '%P'))
self.ltscmnt = Entry(master, validate="key", validatecommand=(commentvcmd, '%P'))
self.plts = Entry(master, validate="key", validatecommand=(vcmd, '%P'))
self.pltscmnt = Entry(master, validate="key", validatecommand=(commentvcmd, '%P'))
self.wtrflw = Entry(master, validate="key", validatecommand=(vcmd, '%P'))
self.wtrflwcmnt = Entry(master, validate="key", validatecommand=(commentvcmd, '%P'))
self.res50 = Entry(master, validate="key", validatecommand=(vcmd, '%P'))
self.res50cmnt = Entry(master, validate="key", validatecommand=(commentvcmd, '%P'))
self.pmps = Entry(master, validate="key", validatecommand=(vcmd, '%P'))
self.pmpscmnt = Entry(master, validate="key", validatecommand=(commentvcmd, '%P'))
self.ipm = Entry(master, validate="key", validatecommand=(vcmd, '%P'))
self.ipmcmnt = Entry(master, validate="key", validatecommand=(commentvcmd, '%P'))
self.air = Entry(master, validate="key", validatecommand=(vcmd, '%P'))
self.aircmnt = Entry(master, validate="key", validatecommand=(commentvcmd, '%P'))
self.drpleak = Entry(master, validate="key", validatecommand=(vcmd, '%P'))
self.drpleakcmnt = Entry(master, validate="key", validatecommand=(commentvcmd, '%P'))
self.riser = Entry(master, validate="key", validatecommand=(vcmd, '%P'))
self.risercmnt = Entry(master, validate="key", validatecommand=(commentvcmd, '%P'))
self.riserholding=self.riser.get() #holding of empty strings to check
#Submit button
self.submit_button = Button(master, text="Submit", command=lambda: self.submit())
self.report_button = Button(master, text="Send Email Report",\
command=lambda: self.sendreport())
# LAYOUT
self.workerlab.grid(row=0, column=0, columnspan=3, sticky=W+E)
self.worker.grid(row=1, column=0, columnspan=2, sticky=W+E)
self.emptylabel.grid(row=1, column=3)
self.roomlab.grid(row=2, column=0)
self.room.grid(row=2, column=1, columnspan=1, sticky=W+E)
self.racklab.grid(row=3, column=0)
self.rack.grid(row=3, column=1, columnspan=1, sticky=W+E)
self.secondlegend.grid(row=4, column=2)
self.daily.grid(row=5, column=0)
self.weekend.grid(row=5, column=1)
self.seconddont.grid(row=5, column=2)
self.commentslab.grid(row=5, column=4)
self.ltslab.grid(row=6, column=0, sticky=W+E)
self.ltsignore.grid(row=6, column=1, sticky=W+E)
self.lts.grid(row=6, column=2, columnspan=1, sticky=W+E)
self.ltscmnt.grid(row=6, column=4, columnspan=1, sticky=W+E)
self.pltslab.grid(row=7, column=0, sticky=W+E)
self.weekendvisplts.grid(row=7, column=1, sticky=W+E)
self.plts.grid(row=7, column=2, columnspan=1, sticky=W+E)
self.pltscmnt.grid(row=7, column=4, columnspan=1, sticky=W+E)
self.wtrflwlab.grid(row=8, column=0, sticky=W+E)
self.weekendmaniflw.grid(row=8, column=1, sticky=W+E)
self.wtrflw.grid(row=8, column=2, columnspan=1, sticky=W+E)
self.wtrflwcmnt.grid(row=8, column=4, columnspan=1, sticky=W+E)
self.res50lab.grid(row=9, column=0, sticky=W+E)
self.res50ignore.grid(row=9, column=1, sticky=W+E)
self.res50.grid(row=9, column=2, columnspan=1, sticky=W+E)
self.res50cmnt.grid(row=9, column=4, columnspan=1, sticky=W+E)
self.pmpslab.grid(row=10, column=0, sticky=W+E)
self.weekendpmps.grid(row=10, column=1, sticky=W+E)
self.pmps.grid(row=10, column=2, columnspan=1, sticky=W+E)
self.pmpscmnt.grid(row=10, column=4, columnspan=1, sticky=W+E)
self.ipmlab.grid(row=11, column=0, sticky=W+E)
self.ipmignore.grid(row=11, column=1, sticky=W+E)
self.ipm.grid(row=11, column=2, columnspan=1, sticky=W+E)
self.ipmcmnt.grid(row=11, column=4, columnspan=1, sticky=W+E)
self.airlab.grid(row=12, column=0, sticky=W+E)
self.airignore.grid(row=12, column=1, sticky=W+E)
self.air.grid(row=12, column=2, columnspan=1, sticky=W+E)
self.aircmnt.grid(row=12, column=4, columnspan=1, sticky=W+E)
self.drpleaklab.grid(row=13, column=0, sticky=W+E)
self.weekenddrpleak.grid(row=13, column=1, sticky=W+E)
self.drpleak.grid(row=13, column=2, columnspan=1, sticky=W+E)
self.drpleakcmnt.grid(row=13, column=4, columnspan=3, sticky=W+E)
self.riserlab.grid(row=14, column=0, sticky=W+E)
self.riserignore.grid(row=14, column=1, sticky=W+E)
self.riser.grid(row=14, column=2, columnspan=1, sticky=W+E)
self.risercmnt.grid(row=14, column=4, columnspan=1, sticky=W+E)
self.errorlab.grid(row=16, column=2, columnspan=2, sticky=W)
self.legend.grid(row=17, column=2)
self.dontfitup.grid(row=18, column=2)
self.submit_button.grid(row=18, columnspan=1, column=4, sticky=W+E)
self.keylabel.grid(row=19, columnspan=2, column=1, sticky=W+E)
self.lightskey.grid(row=20, columnspan=2, column=1, sticky=W+E)
self.plantskey.grid(row=21, columnspan=2, column=1, sticky=W+E)
self.ipmkey.grid(row=23, columnspan=2, column=1, sticky=W+E)
self.waterflowkey.grid(row=25, columnspan=2, column=1, sticky=W+E)
self.pumpskey.grid(row=26, columnspan=2, column=1, sticky=W+E)
self.dripskey.grid(row=27, columnspan=2, column=1, sticky=W+E)
self.reportlab.grid(row=28, column=0, columnspan=2, sticky=W+E)
self.report_button.grid(row=29, column=0, columnspan=2, sticky=W+E)
master.grid_columnconfigure(0, minsize=200, weight=1)
master.grid_columnconfigure(1, minsize=50, weight=1)
master.grid_columnconfigure(2, minsize=50, weight=2)
master.grid_columnconfigure(3, minsize=30)
master.grid_columnconfigure(4, weight=2)
self.errorclear()
#set for the dataentry laptop accessing the datacollection db
self.db = MySQLdb.connect("databaseserver", "root", "password", "datacollection")
self.cursor = self.db.cursor()
#Empty checkers used to see if an input is empty, so it can be
#appropriately stored in the DB as a NULL
self.emptynumb = self.lts.get()
self.emptycmnt = self.ltscmnt.get()
#method used to remove the error message from the window, grid_forget
#causes an object to lose its spot on the tkinter grid
def errorclear(self):
self.errorlab.grid_forget()
#method to redisplay the error label when an incorrect key input is
#noticed. basically opposite of above method
def errorhandle(self): #error message showing
self.errorlab.grid(row=16, column=1, columnspan=2, sticky=W)
#each tkinter input box can use a method to validate any inputs
#with this, we are checking to make sure that the new text is able to
#form a string
def commentvalidate(self, new_text):
self.errorclear()
try:
str(new_text) #ensures input can be made into string
return True
except ValueError:
return False
#this validate is used for any integer input to ensure that no characters
#other than the 10 digits are accepted into the entry box, as that would
#cause errors with the database table as it tried to put anything other
#than an int into the cell
def validate(self, new_text):
self.errorclear()
if not new_text:
self.errorclear()
return True
try:
int(new_text)
self.errorclear() #checks so that input is only int
return True #other chars can't be input
except ValueError:
self.errorhandle()
return False
#method used to clear out the entry boxes, doesnt clear the worker Initials
#entry or the room number, this was to save time for the person using the
#program
def clearcells(self):
#self.worker.delete(0, END) #commenting this out because the initials should stay same
#almost every time, can still be manually deleted
#self.room.delete(0, END)
self.rack.delete(0, END)
self.lts.delete(0, END)
self.ltscmnt.delete(0, END)
self.plts.delete(0, END)
self.pltscmnt.delete(0, END)
self.wtrflw.delete(0, END)
self.wtrflwcmnt.delete(0, END)
self.res50.delete(0, END)
self.res50cmnt.delete(0, END)
self.pmps.delete(0, END)
self.pmpscmnt.delete(0, END)
self.ipm.delete(0, END)
self.ipmcmnt.delete(0, END)
self.air.delete(0, END)
self.aircmnt.delete(0, END)
self.drpleak.delete(0, END)
self.drpleakcmnt.delete(0, END)
self.riser.delete(0, END)
self.risercmnt.delete(0, END)
return
#method for sending report, does so on button press of the submit Button
#importing a module can only be done once per program, as far as I know
#so, the program needs to be closed before a report can be sent again
def sendreport(self):
import report
#submit method for the data into the database and the csv file
#will only submit if the rack and initials entries are not empty
#other entries are allowed to be empty because of possibly unused
#racks
#the method takes all of the inputs and concatenates them into a string
#separated by commas for input to the .csv file, as well as inputing them
#all into the database
def submit(self):
if self.worker.get()==self.workerempty:
return
if self.rack.get()==self.workerempty:
return
#this same loop is needed for any input that isn't strictly numbers
#so that there are no commas which would cause errors in sql, no quotes
#and backslashes as they could allow sql injection issues
racknum=list(self.rack.get())
for i in range(len(racknum)):
print(racknum[i])
if ord(racknum[i])==92:
racknum[i]='backslash'
if racknum[i]==',':
racknum[i]=';'
if racknum[i]=='"':
racknum[i]='dblequote'
if racknum[i]=="'":
racknum[i]='snglquote'
racknum="'"+"".join(racknum)+"'"
roomnum=list(self.room.get())
for i in range(len(roomnum)):
if ord(roomnum[i])==92:
roomnum[i]='backslash'
if roomnum[i]==',':
roomnum[i]=';'
if roomnum[i]=='"':
roomnum[i]='dblequote'
if roomnum[i]=="'":
roomnum[i]='snglquote'
roomnum="'"+"".join(roomnum)+"'"
#MySQL doesnt allow empty inputs in the fields, so need to set empty
#entry boxes as null
lts=self.lts.get()
plts=self.plts.get()
wtrflw=self.wtrflw.get()
res50=self.res50.get()
pmps=self.pmps.get()
ipm=self.ipm.get()
air=self.air.get()
drpleak=self.drpleak.get()
risers=self.riser.get()
if self.rack.get()==self.emptycmnt:
racknum='NULL'
if self.room.get()==self.emptycmnt:
roomnum='NULL'
if self.lts.get()==self.emptynumb:
lts='NULL'
if self.plts.get()==self.emptynumb:
plts='NULL'
if self.wtrflw.get() == self.emptynumb:
wtrflw='NULL'
if self.res50.get()==self.emptynumb:
res50='NULL'
if self.pmps.get()==self.emptynumb:
pmps='NULL'
if self.ipm.get()==self.emptynumb:
ipm='NULL'
if self.air.get()==self.emptynumb:
air='NULL'
if self.drpleak.get()==self.emptynumb:
drpleak='NULL'
if self.riser.get()==self.emptynumb:
risers='NULL'
print(drpleak)
print (self.air.get())
holdingstring = roomnum + ',' + racknum\
+ "," + lts + "," + plts\
+","+wtrflw+","+res50+","+\
pmps+","+ipm+","+air \
+ "," + drpleak + "," + risers #Concats all the basic data
ltscmnt=list(self.ltscmnt.get())
for i in range(len(ltscmnt)):
if ord(ltscmnt[i])==92:
ltscmnt[i]='backslash'
if ltscmnt[i]==',':
ltscmnt[i]=';'
if ltscmnt[i]=='"':
ltscmnt[i]='dblequote'
if ltscmnt[i]=="'":
ltscmnt[i]='snglquote'
ltscmnt="'"+"".join(ltscmnt)+"'"
pltscmnt=list(self.pltscmnt.get())
for i in range(len(pltscmnt)):
if ord(pltscmnt[i])==92:
pltscmnt[i]='backslash'
if pltscmnt[i]==',':
pltscmnt[i]=';'
if pltscmnt[i]=='"':
pltscmnt[i]='dblequote'
if pltscmnt[i]=="'":
pltscmnt[i]='snglquote'
pltscmnt="'"+"".join(pltscmnt)+"'"
wtrflwcmnt=list(self.wtrflwcmnt.get())
for i in range(len(wtrflwcmnt)):
if ord(wtrflwcmnt[i])==92:
wtrflwcmnt[i]='backslash'
if wtrflwcmnt[i]==',':
wtrflwcmnt[i]=';'
if wtrflwcmnt[i]=='"':
wtrflwcmnt[i]='dblequote'
if wtrflwcmnt[i]=="'":
wtrflwcmnt[i]='snglquote'
wtrflwcmnt="'"+"".join(wtrflwcmnt)+"'"
res50cmnt=list(self.res50cmnt.get())
for i in range(len(res50cmnt)):
if ord(res50cmnt[i])==92:
res50cmnt[i]='backslash'
if res50cmnt[i]==',':
res50cmnt[i]=';'
if res50cmnt[i]=='"':
res50cmnt[i]='dblequote'
if res50cmnt[i]=="'":
res50cmnt[i]='snglquote'
res50cmnt="'"+"".join(res50cmnt)+"'"
pmpscmnt=list(self.pmpscmnt.get())
for i in range(len(pmpscmnt)):
if ord(pmpscmnt[i])==92:
pmpscmnt[i]='backslash'
if pmpscmnt[i]==',':
pmpscmnt[i]=';'
if pmpscmnt[i]=='"':
pmpscmnt[i]='dblequote'
if pmpscmnt[i]=="'":
pmpscmnt[i]='snglquote'
pmpscmnt="'"+"".join(pmpscmnt)+"'"
ipmcmnt=list(self.ipmcmnt.get())
for i in range(len(ipmcmnt)):
if ord(ipmcmnt[i])==92:
ipmcmnt[i]='backslash'
if ipmcmnt[i]==',':
ipmcmnt[i]=';'
if ipmcmnt[i]=='"':
ipmcmnt[i]='dblequote'
if ipmcmnt[i]=="'":
ipmcmnt[i]='snglquote'
ipmcmnt="'"+"".join(ipmcmnt)+"'"
aircmnt=list(self.aircmnt.get())
for i in range(len(aircmnt)):
if ord(aircmnt[i])==92:
aircmnt[i]='backslash'
if aircmnt[i]==',':
aircmnt[i]=';'
if aircmnt[i]=='"':
aircmnt[i]='dblequote'
if aircmnt[i]=="'":
aircmnt[i]='snglquote'
aircmnt="'"+"".join(aircmnt)+"'"
drpleakcmnt=list(self.drpleakcmnt.get())
for i in range(len(drpleakcmnt)):
if ord(drpleakcmnt[i])==92:
drpleakcmnt[i]='backslash'
if drpleakcmnt[i]==',':
drpleakcmnt[i]=';'
if drpleakcmnt[i]=='"':
drpleakcmnt[i]='dblequote'
if drpleakcmnt[i]=="'":
drpleakcmnt[i]='snglquote'
drpleakcmnt="'"+"".join(drpleakcmnt)+"'"
risercmnt=list(self.risercmnt.get())
for i in range(len(risercmnt)):
if ord(risercmnt[i])==92:
risercmnt[i]='backslash'
if risercmnt[i]==',':
risercmnt[i]=';'
if risercmnt[i]=='"':
risercmnt[i]='dblequote'
if risercmnt[i]=="'":
risercmnt[i]='snglquote'
risercmnt="'"+"".join(risercmnt)+"'"
#need to check each entry to make sure it isn't empty, as that would
#cause an error when trying to input to the database
if self.ltscmnt.get()==self.emptycmnt:
ltscmnt='NULL'
if self.pltscmnt.get()==self.emptycmnt:
pltscmnt='NULL'
if self.wtrflwcmnt.get()==self.emptycmnt:
wtrflwcmnt='NULL'
if self.res50cmnt.get()==self.emptycmnt:
res50cmnt='NULL'
if self.pmpscmnt.get()==self.emptycmnt:
pmpscmnt='NULL'
if self.ipmcmnt.get()==self.emptycmnt:
ipmcmnt='NULL'
if self.aircmnt.get()==self.emptycmnt:
aircmnt='NULL'
if self.drpleakcmnt.get()==self.emptycmnt:
drpleakcmnt='NULL'
if self.risercmnt.get()==self.emptycmnt:
risercmnt='NULL'
holdingstring=holdingstring +","+ltscmnt+","+pltscmnt+","+\
wtrflwcmnt+","+res50cmnt+","+pmpscmnt+","+\
ipmcmnt+","+aircmnt+","+\
drpleakcmnt+","+risercmnt
worker=list(self.worker.get()) #Change worker entry input into list so its mutable
for i in range(len(worker)): #check for commas, change to semicolons
if ord(worker[i])==92:
worker[i]='backslash'
if worker[i]==',':
worker[i]=';'
if worker[i]=='"':
worker[i]='dblquote'
if worker[i] == "'":
worker[i]= 'snglquote'
worker="".join(worker)
holdingstring=holdingstring + ",'" + worker + "',"#concatenate worker at end, and add new line character
timestamp=datetime.datetime.now().strftime("'%A','%Y-%m-%d %H:%M:%S'") #keeps DoW, date, H:M:S from date obj
holdingstring+= timestamp#concats all data with timestamp
mysqlholdingstring=holdingstring
holdingstring+="\n"
print (holdingstring)
filename="DailyWalkthrough_db.csv"
output=open(filename, 'a+')
output.write(holdingstring)
output.close()
holdinglist=holdingstring.split(',')
print (len(holdinglist))
mysqlholdingstring='('+mysqlholdingstring+')'
mysqlholdingstring = "".join(mysqlholdingstring)
print (mysqlholdingstring)
#SQL input string to be run
self.cursor.execute("""INSERT INTO growroomwalkthrough (Room,Rack,Lights,\
Plants,WaterFlow,Reservoir50,Pumps,IPM,Airators,NoDripsLeaks,Risers,Lights_commen\
ts,Plants_comments,WaterFlow_comments,Reservoir50_comments,Pumps_comments,IPM_co\
mments,Airators_comments,NoDripsLeaks_comments,Risers_comments,Initials,Day,Date)\
VALUES """ + mysqlholdingstring)
self.db.commit()
#this is the last action before the submit function complete
#so if the cells do not clear then there was some error before here
self.clearcells()
#this creates a top level window which we will use as the basis for our whole application
root=Tk()
my_gui=Data_entry(root)
root.mainloop()
| [
"whufnagle6@gmail.com"
] | whufnagle6@gmail.com |
bb69c9c8395c9a25148ec73ce40eadd561645c4c | dd5cb6b5e2cb9ddc6a361eff13abc82aae8e49b1 | /mem_app/apps.py | 8faa3b039a1be128ac0106f507c7bc15bca12362 | [] | no_license | djjohnsongeek/memorize | 1d8d13c79915180dc16202faaf4116b6d75eb2d6 | c540de01ffe11164d9256d8c009f17842349005b | refs/heads/master | 2022-12-09T20:41:16.283776 | 2022-10-27T12:38:28 | 2022-10-27T12:38:28 | 199,750,559 | 0 | 0 | null | 2022-12-08T06:05:16 | 2019-07-31T00:58:39 | CSS | UTF-8 | Python | false | false | 88 | py | from django.apps import AppConfig
class MemAppConfig(AppConfig):
name = 'mem_app'
| [
"danieleejohnson@gmail.com"
] | danieleejohnson@gmail.com |
99169552959f905fd2575d8516a93e69cb86524c | ea1170d1a6e3cf1cb4f01098c6d0cc8dfd12258b | /setup.py | c87de217f54c9542f4e705a93fbb073378aaedd5 | [
"MIT"
] | permissive | afansv/async-rethinkdb-pool | c95b2ed83065d09863240b9f439d52b4cbd2d5ab | 0d4fa871c0a15a39486b2f1f200678573d59214e | refs/heads/master | 2021-04-06T02:05:40.685662 | 2017-12-20T11:21:25 | 2017-12-20T11:21:25 | 124,897,370 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,039 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
from setuptools import setup, find_packages
with open('README.rst') as readme_file:
readme = readme_file.read()
setup(
name='async-repool',
version='0.2.1',
description="AsyncIO connection pool for RethinkDB",
long_description=readme,
author="Bogdan Gladyshev",
author_email='siredvin.dark@gmail.com',
url='https://gitlab.com/AnjiProject/async-repool',
packages=find_packages(),
include_package_data=True,
install_requires=[
"rethinkdb>=2.3.0.post6"
],
license="MIT license",
zip_safe=False,
keywords='rethinkdb asyncio connection pool',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
tests_require=[],
setup_requires=[],
)
| [
"siredvin.dark@gmail.com"
] | siredvin.dark@gmail.com |
695d0d073402440740fc8500b7e5f345c02b68c8 | 55c250525bd7198ac905b1f2f86d16a44f73e03a | /Python/Projects/Learn/PyCharm/Introduction to Python/Condition expressions/Boolean operators/tests.py | 470706706d0ff91198294d3ffe34a83d348960e6 | [
"LicenseRef-scancode-other-permissive"
] | permissive | NateWeiler/Resources | 213d18ba86f7cc9d845741b8571b9e2c2c6be916 | bd4a8a82a3e83a381c97d19e5df42cbababfc66c | refs/heads/master | 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null | UTF-8 | Python | false | false | 128 | py | version https://git-lfs.github.com/spec/v1
oid sha256:539e49395fbd7e0e4b30294456efe7d922a26823d5ac4c0eea6d348bb18cfba3
size 413
| [
"nateweiler84@gmail.com"
] | nateweiler84@gmail.com |
442012364b8cb87fcd92db37fa9253814865448b | fe4247267a21b4d76bbf3ec3ea560453b93b4c88 | /utils.py | 7b45a802acb8c9bdf2413f88af09312fb34b37a2 | [] | no_license | dudmel/Flask | 0b5715a3256117f584a3405e2e5240647a837495 | f048c2547aff34774e143107a5fd034a8d13e956 | refs/heads/master | 2021-01-11T18:05:22.871050 | 2017-01-19T20:02:54 | 2017-01-19T20:02:54 | 79,489,626 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,140 | py | import datetime, time
import glob
import os
import consts as consts
TRUE = 1
FALSE = 0
# Delete all cache and .pyc files
def clean():
filelist = glob.glob("*.cache")
for f in filelist:
os.remove(f) # delete all cache files
# Wrap response data in 'data' key.
def wrap_data(data, msg=None, error=None):
response_object = {}
response_object['data'] = data
if msg:
response_object['data']['message'] = msg
if error:
response_object['error'] = {}
response_object['error']['message'] = error
return response_object
def get_date_string(date_time, reboot_time):
date = datetime.now() + timedelta(seconds=date_time)
newDate = reboot_time + timedelta(milliseconds=date_time)
return newDate
def cast_to_int(dict, *keys):
for key in keys:
if key in dict and dict[key]:
dict[key] = int(dict[key])
def cast_to_int_divide_by_factor(dict, factor, *keys):
for key in keys:
if key in dict and dict[key] and factor:
dict[key] = int(dict[key]) / factor
def cast_to_int_multiply_by_factor(dict, factor, *keys):
for key in keys:
if key in dict and dict[key] and factor:
dict[key] = int(dict[key]) * factor
def getInterfaceName(number):
if number.isdigit() and number == '1':
return 'Management Port on ODU'
elif number.isdigit() and number == '101':
return 'Radio Interface'
else:
return ''
def formatFrequency(freq):
try:
floatFreq = float(freq)
result = "{0:0000.00} [MHz]".format(floatFreq / 1000) if floatFreq > 1000000 else '{:.3f} [GHz]'.format(floatFreq / 1000)
return result
except:
return None
def getConvertedTimeFromTimeT(number_of_ticks, rebootTime):
number_of_seconds = int(number_of_ticks)
init1970 = datetime.datetime(1970, 1, 1)
initial2005Date = datetime.datetime(2005, 9, 1)
date = init1970 + datetime.timedelta(seconds = number_of_seconds)
rebootTimeWithOneDay = rebootTime - datetime.timedelta(days=1)
if date < rebootTimeWithOneDay:
#New date = Event time - "9/1/2005 12:00:00" + reboot time
newDate = rebootTime + (date - initial2005Date)
#If future time or time before reboot, do not return new date
if (newDate > datetime.datetime.now() or newDate < (datetime.datetime.now() - datetime.timedelta(days = 1))):
# DEBUG
return newDate.strftime(consts.DATE_TIME_FORMAT)
#return '';
return newDate.strftime(consts.DATE_TIME_FORMAT)
return date.strftime(consts.DATE_TIME_FORMAT)
def getSysUpTime(number_of_ticks):
number_of_seconds = int(number_of_ticks)/100
return_time = datetime.datetime.now() - datetime.timedelta(seconds = number_of_seconds)
return return_time
def getFetureSupportByCapability(capabilityBitmask, index):
capabilities = list(capabilityBitmask)
if (len(capabilities) < (index - 1)):
return false
intIndex = int(index)
return capabilities[index] != '0'
def get_base_dir():
return os.path.abspath(os.path.dirname(__file__))
| [
"dudu@misterbit.co.il"
] | dudu@misterbit.co.il |
afc616a9f4fb8236030e4eb8ceecb618d9cae8a0 | 480eb12d0d2ac9341ecd924113775379f848c1c6 | /.heroku/python/bin/markdown_py | fdc5bbeb9445538ca1a8e2a44400906f2b7f5eaa | [] | no_license | RobertoMarroquin/pm | 7c62b120b1bb3ad1a6adaeedfc2a27396670eac6 | 065dbd1161f9022fd30daeb40b4d58f2f4c48641 | refs/heads/main | 2023-03-14T02:48:20.320209 | 2021-03-10T14:55:00 | 2021-03-10T14:55:00 | 335,676,792 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 228 | #!/app/.heroku/python/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from markdown.__main__ import run
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(run())
| [
"robma96@gmail.com"
] | robma96@gmail.com | |
fed00145a3c5c1d30b45f1db88141ea13698f559 | 09a2f88ce6b1d524acb0b08efc59d90792e3c92e | /dictionaries.py | 55fdd9cbf5ef35f2a4a98d9f4ca814794aa0a117 | [] | no_license | chepkoy/python_fundamentals | 3afe6f197449eac0b023ea145123fd98c18c3006 | 1fa58615dc33af0cdf73f654c3f3143d6bff8a96 | refs/heads/master | 2021-08-24T02:11:55.350990 | 2017-12-07T15:54:54 | 2017-12-07T15:54:54 | 112,932,470 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,557 | py | from pprint import pprint as pp
urls = {
'Google': 'http://google.com',
'pluralsight': 'http://pluralsight.com',
'Microsoft ': 'http://microsoft.com'
}
# dict constructor accepting iterable series of key-value 2-tuples
names_and_ages = [('allan', 24),('naomi', 19), ('brian', 13)]
d = dict(names_and_ages)
# keyword arguments - requires keys are valid python identifiers
phonetic = dict(a='alfa', b = 'bravo', c = 'charlie')
# Copying
d = dict(goldenrod=0xdaa529, indigo = '0xsffs2', seashell= 0x3433)
e = d.copy()
# using the dict constructor
f = dict(e)
# Extending a dictionary
g = dict(wheat = 0x23434, khaki = 0x87344)
f.update(g)
# Update replaces values corresponding to duplicate keys
stocks = {'GOOG': 891, 'AAPL': 416}
stocks.update({'GOOG': 894, 'YAHOO': 34})
# Iteration is over keys
colors = dict(aquamarine ="#86888", blue = "#DEGD888")
for key in colors:
print("{key} => {value}".format(key=key, value=colors[key]))
# use values for an iterable view onto the series of values
for value in colors.values():
print(value)
# keys method gives iterable view onto keys-not often needed
for key in colors.keys():
print(key)
# Use items for an iterable view onto the series of key-value tuples
for key, value in colors.items():
print("{key} => {value}".format(key=key, value=value))
# Membership in and not in operators work on the keys
symbols = dict(usd = '\u0024', gbp = '\u00a3')
'usd' in symbols
# use del keyword to remove by key
del d[key]
# python standard library pprint module
pp(colors) | [
"allankiplangat22@gmail.com"
] | allankiplangat22@gmail.com |
8562ba8f450a3ced6da3f1576b0ff963b7a108a6 | f76f79d346adc4c51060855a0d647a3cfb695f97 | /OBRecaptcha.py | a66e8e2aab9aea2b43d3d96ea95b6bde10b5c834 | [] | no_license | DandyDrop/OBRecaptcha | ec9b923616180e5e8e5337574a442792f20762d3 | 73dd859a9c4f2a3d75ead08634d0319f1c4f89e7 | refs/heads/main | 2023-08-14T17:51:03.143093 | 2021-09-20T13:35:29 | 2021-09-20T13:35:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,043 | py | import os
import requests
from colorama import Fore
from urllib import parse
from bs4 import BeautifulSoup
def logo():
os.system("cls")
print(Fore.CYAN +" ███████ ███████████ ███████████ █████ █████ ")
print(Fore.CYAN +" ███░░░░░███ ░░███░░░░░███░░███░░░░░███ ░░███ ░░███ ")
print(Fore.CYAN +" ███ ░░███ ░███ ░███ ░███ ░███ ██████ ██████ ██████ ████████ ███████ ██████ ░███████ ██████ ")
print(Fore.CYAN +"░███ ░███ ░██████████ ░██████████ ███░░███ ███░░███ ░░░░░███ ░░███░░███░░░███░ ███░░███ ░███░░███ ░░░░░███ ")
print(Fore.CYAN +"░███ ░███ ░███░░░░░███ ░███░░░░░███ ░███████ ░███ ░░░ ███████ ░███ ░███ ░███ ░███ ░░░ ░███ ░███ ███████ ")
print(Fore.CYAN +"░░███ ███ ░███ ░███ ░███ ░███ ░███░░░ ░███ ███ ███░░███ ░███ ░███ ░███ ███░███ ███ ░███ ░███ ███░░███ ")
print(Fore.CYAN +" ░░░███████░ ███████████ █████ █████░░██████ ░░██████ ░░████████ ░███████ ░░█████ ░░██████ ████ █████░░████████")
print(Fore.CYAN +" ░░░░░░░ ░░░░░░░░░░░ ░░░░░ ░░░░░ ░░░░░░ ░░░░░░ ░░░░░░░░ ░███░░░ ░░░░░ ░░░░░░ ░░░░ ░░░░░ ░░░░░░░░ ")
print(Fore.CYAN +" ░███ ")
print(Fore.CYAN +" Crée par Ell10T_4lD3rS0n █████ ")
print(Fore.CYAN +" Dev with ♥ ░░░░░ \n\n" + Fore.RESET)
logo()
# Input #
url_anchor = input("Entre l'URL anchor ► ")
var_chr = input("CHR [xx,xx,xx] : ")
var_vh = input("VH : ")
var_bg = input("BG !x* : ")
logo()
# Variables #
var_k = parse.parse_qs(parse.urlparse(url_anchor).query)['k'][0]
var_co = parse.parse_qs(parse.urlparse(url_anchor).query)['co'][0]
var_v = parse.parse_qs(parse.urlparse(url_anchor).query)['v'][0]
var_hl = parse.parse_qs(parse.urlparse(url_anchor).query)['hl'][0]
var_size = "invisible"
# Get recaptcha-token #
get_tkn = requests.get(url_anchor)
soup = BeautifulSoup(get_tkn.text,"html.parser")
var_c = soup.find(id="recaptcha-token")['value']
# Get rresp #
url_reload = f"https://www.google.com/recaptcha/api2/reload?k={var_k}"
payload = f"v={var_v}&reason=q&c={var_c}&k={var_k}&co={var_co}&hl={var_hl}&size=invisible&chr={var_chr}&vh={var_vh}&bg={var_bg}"
headers = {'Host': 'www.google.com', 'Content-Type': 'application/x-www-form-urlencoded'}
post_rresp = requests.post(url_reload, data=payload, headers=headers)
# Check rresp #
if "\"rresp\",\"" in post_rresp.text:
print(Fore.GREEN + "Bypass Recaptcha Possible" + Fore.RESET)
post_data = f"v={var_v}&reason=q&c=<recaptcha-token>&k={var_k}&co={var_co}&hl={var_hl}&size=invisible&chr={var_chr}&vh={var_vh}&bg={var_bg}"
loliscript = f"#GET_recaptcha-token REQUEST GET \"{str(url_anchor)}\"\n\n#recaptcha-token PARSE \"<SOURCE>\" LR \"<input type=\\\"hidden\\\" id=\\\"recaptcha-token\\\" value=\\\"\" \"\\\">\" -> VAR \"recaptcha-token\" \n\n#POST_GET_rresp REQUEST POST \"{str(url_reload)}\" AutoRedirect=FALSE \n CONTENT \"{str(post_data)}\" \n CONTENTTYPE \"application/x-www-form-urlencoded\" \n\n#rresp PARSE \"<SOURCE>\" LR \"[\\\"rresp\\\",\\\"\" \"\\\",\" -> VAR \"rresp\" "
with open("loliscript.txt", "w") as f:
f.writelines(loliscript)
f.close()
else:
print(Fore.RED + "Bypass Recaptcha Impossible" + Fore.RESET) | [
"noreply@github.com"
] | DandyDrop.noreply@github.com |
c71579c82c51f1e0a6ead2b6f2ae860c6142c8b8 | f5e589f51144af30d27cd2da39f134ceda995318 | /ssong/ssong/spiders/singlesong.py | bee1d0ebb5acbe3f98aa874d99ad0b113b67d652 | [] | no_license | umangsuthar/test | e1c865481e14fd8b6d5f8de50d02c0f94b8aa92c | a7171ad67ec6f111e3e05dc675e332ee4752bb2e | refs/heads/master | 2020-04-17T10:58:02.842579 | 2016-08-28T06:46:11 | 2016-08-28T06:46:11 | 66,753,392 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,757 | py | import scrapy
import time
from scrapy.selector import HtmlXPathSelector
from ssong.items import SsongItem
class scrp(scrapy.Spider):
name = "scr"
allowed_domains = ["songsmp3.net"]
start_urls = ["http://songsmp3.net/"]
def parse(self,response):
sel = response.selector.xpath("//div[@class='list_box_2']/div[@class='list_box_inside']")
for ss in sel.xpath("./ul/li/a/@href").extract():
if ss.startswith('/1/'):
s1 = ss
s1 = "http://songsmp3.net"+s1
yield scrapy.Request(s1,callback = self.SongParse)
def SongParse(self,response):
nameOrg = response.selector.xpath("//h1/text()").extract()
nameOrg = nameOrg[0]
MovieName = "".join(nameOrg)
Mname = MovieName[:-9]
so = response.selector.xpath("//div[@class='download-single-links_box']//div[@class='link-item']")
for link in so.xpath("./a/@href").extract():
link = "http://songsmp3.net"+link
#name12= so.xpath("./a/div[@class='link']/text()").extract()
yield scrapy.Request(link,callback = self.Song_Link, meta={'name': Mname})
'''print("\n")
print(name12)
print("\n")'''
def Song_Link(self,response):
item = SsongItem()
print("\n")
#print(response.meta['name'])
item['MovieName'] = response.meta['name']
#print(item['MovieName'])
item['TimeStamp'] = time.time()
so_link = response.selector.xpath("//div[@class='download-single-links_box']//div[@class='sinlge_link_item']")
ll = so_link.xpath("./div[@class='link-item_button3']/a/@href").extract()
na = so_link.xpath("./div[@class='link-item2']/div[@class='link']/text()").extract()
item['SongName'] = na[0]
#print(item['SongName'])
item['MP128'] = ll[0]
item['MP320'] = ll[1]
#print("\n")
#print(item['MP128'])
#print(item['MP320'])
#print("\n")
yield item
| [
"sahajanand@sahajanand-VirtualBox.(none)"
] | sahajanand@sahajanand-VirtualBox.(none) |
318a74534f2ec00ecb9d3d2d90042ac5ad963a45 | 56a7dd75f2a3f45d599ca89aaa9ca45390fbd546 | /ejercicios_preparcialito/parcialito_2/diccionarios/ejercicio_62.py | 28412a76c350229698e62a66e240a63a6c3ce189 | [] | no_license | facundoPri/algoritmo-programacion-i-essaya | e030d74de832b7642ff84a77212f8ea429d560d8 | 5ff7a8fc66f6683d47bc9faf80a35f9902b1e1a3 | refs/heads/master | 2023-04-07T01:04:10.221473 | 2021-04-10T13:05:59 | 2021-04-10T13:05:59 | 299,450,415 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 563 | py | """
Escribir una función que reciba una cadena y devuelva un diccionario cuyas claves sean las letras y cuyos valores sean la cantidad de apariciones de dicha letra. Por ejemplo, si recibe 'catamarca' debe devolver: {'c':2, 'a':4, 't':1, 'r':1, 'm':1}.
"""
def contar_caracteres(cadena):
"""
Recibe una cadena
Devuelve un diccionarion con la cantidad de veces que aparece cada caracter
"""
contador = {}
for letra in cadena:
contador[letra] = contador.get(letra, 0) + 1
return contador
print(contar_caracteres("facundo"))
| [
"facundo.prieto321@gmail.com"
] | facundo.prieto321@gmail.com |
ec8498ae54869540f229014677d6853284fde9fc | d4c67b2a12e990d4193e7ab06f04824a348067bf | /rl_trainer/ddpg_impl/flower/actor_critic/tf_ddpg_agent.py | 68c2dcbba15a6708b6789a492d9ba35ba24c020f | [
"BSD-3-Clause"
] | permissive | Roboy/nips-2018-ai-for-prosthetics | 2d57688ce85126379793e8643cbf0030c8f56beb | acb69f267a0cc852842828edbbfb47d1840c0a17 | refs/heads/master | 2020-03-26T05:39:25.565440 | 2018-11-01T23:28:08 | 2018-11-01T23:28:08 | 144,567,613 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,073 | py | import numpy as np
from typing import Callable, Collection
import tensorflow as tf
from gym.spaces import Box
from overrides import overrides
from typeguard import typechecked
from rl_trainer.agent import GymAgent
from rl_trainer.agent.replay_buffer import ReplayBuffer, InMemoryReplayBuffer
from rl_trainer.commons import Episode, ExperienceTupleBatch
from rl_trainer.ddpg_impl.flower.actor_critic.tf_model_saver import TFModelSaver
from .action_noise import OrnsteinUhlenbeckActionNoise
from .q_network import OnlineQNetwork
from .policy_network import OnlinePolicyNetwork
class TensorFlowDDPGAgent(GymAgent):
def __init__(self, state_dim: int, action_space: Box, sess: tf.Session = None,
gamma: float = 0.99, replay_buffer: ReplayBuffer = None,
actor_noise: Callable = None, tau: float = 0.001,
critic_nn: OnlineQNetwork = None, actor_nn: OnlinePolicyNetwork = None,
tf_model_saver: TFModelSaver = None):
action_dim = action_space.shape[0]
self._gamma = gamma
self._sess = sess if sess else tf.Session()
self._Q = critic_nn if critic_nn else OnlineQNetwork(
sess=self._sess, state_dim=state_dim, action_dim=action_dim)
self._Qʹ = self._Q.create_target_network(tau=tau)
self._μ = actor_nn if actor_nn else OnlinePolicyNetwork(
action_bound=action_space.high, sess=self._sess,
state_dim=state_dim, action_dim=action_dim, action_space=action_space)
self._μʹ = self._μ.create_target_network(tau=tau)
with self._sess.graph.as_default():
self._model_saver = tf_model_saver if tf_model_saver else TFModelSaver()
self._sess.run(tf.global_variables_initializer())
self._actor_noise = actor_noise if actor_noise else OrnsteinUhlenbeckActionNoise(
mu=np.zeros(action_dim))
self._replay_buffer = replay_buffer if replay_buffer else InMemoryReplayBuffer()
self.episode_max_q = 0
self._update_target_nets()
def _update_target_nets(self):
self._μʹ.update()
self._Qʹ.update()
@typechecked
@overrides
def act(self, current_state: Collection[float]):
if self._replay_buffer.has_sufficient_samples():
self._train()
s = np.array([current_state]) # pack single state into tf action batch
action = self._μ(s=s)
return action[0] + self._actor_noise() # unpack tf batch shape
def _train(self):
batch = self._replay_buffer.sample_batch()
self._train_critic(batch)
self._train_actor(batch)
self._update_target_nets()
@typechecked
def _train_critic(self, batch: ExperienceTupleBatch) -> None:
μʹ = self._μʹ
γ = self._gamma
s2 = np.array(batch.states_2)
dones = batch.states_2_are_terminal
Qs_s2 = self._Qʹ(s=s2, a=μʹ(s=s2))
yᵢ = [(r + (1-done)*γ*Q_s2) for r, done, Q_s2 in zip(batch.rewards, dones, Qs_s2)]
yᵢ = np.array(yᵢ).reshape((-1, 1))
s = np.array(batch.states_1)
a = np.array(batch.actions)
self._Q.train(s=s, a=a, y_i=yᵢ)
self._log_max_q(batch=batch)
@typechecked
def _train_actor(self, batch: ExperienceTupleBatch) -> None:
"""Update the actor policy using the sampled gradient"""
s = np.array(batch.states_1)
μ = self._μ
grads_a = self._Q.grads_a(s=s, a=μ(s))
assert len(grads_a) == 1
μ.train(s=s, grads_a=grads_a[0]) # unpack tf batch shape
@typechecked
def _log_max_q(self, batch: ExperienceTupleBatch):
s, a = batch.states_1, batch.actions
q_vals = self._Q(s=s, a=a)
self.episode_max_q = np.amax(q_vals)
@typechecked
@overrides
def observe_episode(self, episode: Episode):
self._replay_buffer.extend(episode.experience_tuples)
self._model_saver.step(self._sess)
@typechecked
@overrides
def set_seed(self, seed: int):
tf.set_random_seed(seed)
| [
"tomas.ruiz.te@gmail.com"
] | tomas.ruiz.te@gmail.com |
eb4a13c90d67f6bd864345caa861e78d8d623710 | a444cf1a175a5a51518e6e1cab8d5f5276c65799 | /laboratorios/internal/migrations/0029_auto_20170703_0059.py | 99c793b7b1c025400be01cdb4522cce2c16d2b19 | [] | no_license | mvasquezb/LaboratoriosPUCP | 9e4eb0cf8cd36aa25454181d08d208a87afbf775 | e68138afd8609341b1b2d08783aab9fb2ba1231c | refs/heads/dev | 2022-12-11T00:42:42.249222 | 2018-02-03T19:07:12 | 2018-02-03T19:07:12 | 90,681,793 | 1 | 3 | null | 2022-12-08T00:00:31 | 2017-05-08T23:34:35 | HTML | UTF-8 | Python | false | false | 456 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-07-03 05:59
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('internal', '0028_auto_20170702_1445'),
]
operations = [
migrations.AlterField(
model_name='client',
name='doc_number',
field=models.CharField(max_length=20),
),
]
| [
"pedromvasquezb@gmail.com"
] | pedromvasquezb@gmail.com |
e1c04602eb11935c3019f76fedd8f5debbf6c2c4 | b9c4c4e2ba9a54cf79169bb2c43e29b6994618f4 | /source/webapp/models.py | 5d3cd21c50b5fbf0b491d7e211c3065189b6e5ec | [] | no_license | big-arturka/exam_9 | 37bf8be08e0fd922bf36b0663babd4611d1ffb04 | 3505e39d9e2110c2912fc7f474e6ec297a8df4dd | refs/heads/master | 2022-12-31T14:06:53.576579 | 2020-10-24T13:24:24 | 2020-10-24T13:24:24 | 306,677,149 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,384 | py | from django.contrib.auth import get_user_model
from django.contrib.auth.models import User
from django.db import models
class Photo(models.Model):
image = models.ImageField(upload_to='images', verbose_name='Фото')
signature = models.CharField(max_length=200, verbose_name='Подпись')
created_at = models.DateTimeField(auto_now_add=True, verbose_name='Дата создания')
author = models.ForeignKey(get_user_model(), max_length=50, verbose_name='Автор',
related_name='image_author', on_delete=models.CASCADE)
def fav_by(self, user):
favs = self.favorite_photo.filter(author=user)
return favs
def __str__(self):
return f'{self.signature}-{self.author}'
class Meta:
verbose_name = 'Изображение'
verbose_name_plural = 'Изображения'
class Favorites(models.Model):
photo = models.ForeignKey('webapp.Photo', related_name='favorite_photo', verbose_name='Фото', on_delete=models.CASCADE)
author = models.ForeignKey(get_user_model(), related_name='favorite_author',
verbose_name='Автор', on_delete=models.CASCADE)
def __str__(self):
return f'{self.photo}-{self.author}'
class Meta:
verbose_name = 'Избранное'
verbose_name_plural = 'Избранные'
| [
"arturkrmnlv10@gmail.com"
] | arturkrmnlv10@gmail.com |
1a1dd1f93a63fea7f3054032927803590145ddbe | 9e69cc08bf4dba7332664bb4ea71825994b104c9 | /app/core/management/commands/wait_for_db.py | cceda395bc524f1974cf84e1425101e46421a76d | [
"MIT"
] | permissive | shakyaabiral/recipe-app-api | c2366983d8564b89a62ace1cfd4cbd9b7f47f046 | df9808ac1228db41912e219f6801f7d9fd6b5a6a | refs/heads/master | 2020-05-14T01:23:30.687135 | 2019-05-11T02:59:22 | 2019-05-11T02:59:22 | 181,684,213 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 693 | py | import time
from django.db import connections
from django.db.utils import OperationalError
from django.core.management.base import BaseCommand
class Command(BaseCommand):
"""Django command to pause execution until database is available"""
def handle(self, *args, **options):
self.stdout.write('waiting for database...')
db_conn = None
while not db_conn:
try:
db_conn = connections['default']
except OperationalError:
self.stdout.write('Database unavailable, waiting 1 second...')
time.sleep(1)
self.stdout.write(self.style.SUCCESS('Database available!!!'))
| [
"vagrant@vector.dev"
] | vagrant@vector.dev |
5f4aab11e41cbd3cd0d09e612d2a35bd928daf3b | d5203f57d89e44c20d3ee344e54c409070b732e4 | /2D_peak_finding.py | bb2f830701eda2bf738e66cef83e3b6e01c47822 | [] | no_license | bjs9yv/Other | 7a52bd921b1a8ff147ca279f425020ebc83c442c | ad7cc85069190cbf54d859b94f17cb30c6029ff6 | refs/heads/master | 2021-01-10T14:09:40.389706 | 2016-02-23T04:40:39 | 2016-02-23T04:40:39 | 52,330,517 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 679 | py | # return a local peak in a 2D array
def find_peak(a):
if len(a) <= 3:
return max(a)
middle = len(a)/2
left_lower = a[middle] > a[middle-1]
right_lower = a[middle] > a[middle+1]
if left_lower and right_lower:
# peak found
return a[middle]
elif left_lower and not right_lower:
# upward sloping, peak to the right
find_peak(a[middle:])
elif not left_lower and right_lower:
# downward sloping, peak to the left
find_peak(a[:middle])
else:
# valley, peak on either side, pick right
return find_peak(a[middle:])
if __name__ == "__main__":
print find_peak([1,2,1,11,33])
| [
"bjs9yv@virginia.edu"
] | bjs9yv@virginia.edu |
435181fce498698200dc379f5d88d9529f25d5f8 | 85158da987280192964040ffb941bfac3d5c2644 | /Medium/0230_Kth_Smallest_BST.py | 40473ae198e0a367ade8d78abda335101c54d019 | [] | no_license | concealedtea/Coding-Interview-Prep | 223cce0c63c65c7180239e7abe7dc8ae0f5453d3 | 4bc1f0cabd59456dadf54bdae2f10fa5bdf6c71e | refs/heads/master | 2022-11-19T07:20:29.188922 | 2020-07-30T06:08:46 | 2020-07-30T06:08:46 | 192,228,821 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 596 | py | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution(object):
def kthSmallest(self, root, k):
"""
:type root: TreeNode
:type k: int
:rtype: int
"""
def traverse(root):
if root is None:
return []
left = traverse(root.left)
right = traverse(root.right)
return left + [root.val] + right
return traverse(root)[k-1] | [
"juliusfan98@gmail.com"
] | juliusfan98@gmail.com |
46eba0bc20abb0447c20cf415650e475878b0ed9 | 34313dd0b7a915ab3e4beba76f8586ed7d5ae078 | /learning_racer/config/__init__.py | 8929c4ffe52b85e558ab1a19ebfbf604dbb01149 | [
"MIT"
] | permissive | masato-ka/airc-rl-agent | 81d2e9369ab2d24605d3eefc92a9d076b9e49b1c | af675a9ac449ae3819d7f0799c4fc66b0ef6fc82 | refs/heads/master | 2022-07-23T07:58:06.659529 | 2022-07-03T12:54:30 | 2022-07-03T12:57:53 | 239,724,933 | 86 | 30 | null | null | null | null | UTF-8 | Python | false | false | 33 | py | from .config import ConfigReader
| [
"jp6uzv@gmail.com"
] | jp6uzv@gmail.com |
e0cc3404720da69dcd2ca3f838239948afb93ce6 | 54c07f4e781838f50c8220c275d94e69cec40483 | /tabel_competition/pipline/regression/lib/adversarial_validation.py | 653838c534de202d1896a527b3d22fe002841c53 | [] | no_license | hirayukis/kaggle-my-favorite-technique | b39f102dd008731c6516ac8b2c43e1e5ec2b4db7 | 9128d5a098116eb23a39e08d3ede45802c87b89d | refs/heads/master | 2023-04-20T18:43:11.462673 | 2021-05-13T07:38:20 | 2021-05-13T07:38:20 | 261,166,945 | 6 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,953 | py | import lightgbm as lgb
import numpy as np
import pandas as pd
import seaborn as sns
from matplotlib import pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_auc_score, roc_curve
def plot_roc_feat(y_trues, y_preds, labels, est, filename, cols, x_max=1.0):
fig, ax = plt.subplots(1, 2, figsize=(16, 6))
for i, y_pred in enumerate(y_preds):
y_true = y_trues[i]
fpr, tpr, thresholds = roc_curve(y_true, y_pred)
auc = roc_auc_score(y_true, y_pred)
ax[0].plot(fpr, tpr, label='%s; AUC=%.3f' % (labels[i], auc), marker='o', markersize=1)
ax[0].legend()
ax[0].grid()
ax[0].plot(np.linspace(0, 1, 20), np.linspace(0, 1, 20), linestyle='--')
ax[0].set_title('ROC curve')
ax[0].set_xlabel('False Positive Rate')
ax[0].set_xlim([-0.01, x_max])
_ = ax[0].set_ylabel('True Positive Rate')
values = est.feature_importance()
importance = pd.DataFrame(data=values, index=cols,
columns=['score']).sort_values(by='score',
ascending=False)
sns.barplot(x=importance.score.iloc[:20],
y=importance.index[:20],
orient='h',
palette='Reds_r', ax=ax[1])
ax[1].set_title('Feature Importances')
plt.savefig(filename + "_importance_feature.png")
def adversarial_validate(data, splitnum, filename=""):
train = data[:splitnum]
test = data[splitnum:]
adv_train = train.copy()
adv_test = test.copy()
adv_train['dataset_label'] = 0
adv_test['dataset_label'] = 1
adv_master = pd.concat([adv_train, adv_test], axis=0)
adv_X = adv_master.drop('dataset_label', axis=1)
adv_y = adv_master['dataset_label']
adv_X_train, adv_X_test, adv_y_train, adv_y_test = train_test_split(adv_X,
adv_y,
test_size=0.4,
stratify=adv_y,
random_state=42)
params = {
'task': 'train',
'objective': 'binary',
'metric': 'binary_logloss',
"seed": 42,
}
lgb_train = lgb.Dataset(adv_X_train, adv_y_train)
lgb_valid = lgb.Dataset(adv_X_test, adv_y_test)
lgb_model = lgb.train(params, lgb_train,
num_boost_round=10000,
valid_names=["train", "valid"],
valid_sets=[lgb_train, lgb_valid],
early_stopping_rounds=10,
verbose_eval=-1)
validation = lgb_model.predict(adv_X_test)
plot_roc_feat(
[adv_y_test],
[validation],
['Baseline'],
lgb_model,
filename,
data.columns
)
| [
"ykbhiralmec@gmail.com"
] | ykbhiralmec@gmail.com |
e44c41349de4460990c48fffb6b21e3e203d26ab | 63afdad853f5b0670731205859faa5144015191d | /contact/models.py | 118c339f53556b4f78a85cb2df7f07e23e8a9366 | [] | no_license | georgehuble/django_movie | ef4d870afaf03ab9bf0c6b3d259ec2c56ea04500 | 4c94c171efccff6b070d9a7812dacb1399524724 | refs/heads/master | 2023-05-08T23:09:48.815903 | 2021-05-29T15:00:29 | 2021-05-29T15:00:29 | 371,945,818 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 239 | py | from django.db import models
class Contact(models.Model):
"""Подписка по email"""
email = models.EmailField()
date = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.email | [
"nevajno23@mail.ru"
] | nevajno23@mail.ru |
f09b5ba0c63513fae5eb3bf59e46085513e482a1 | 77166c6ed9b872fa69b454d3682f63527f5f3951 | /arcade/text.py | eb69a21fd6aad532e3894f36ed8a51fdfd3f1d17 | [
"MIT"
] | permissive | biggzlar/arcade | d72d936f3c244a9d5173b6f36bca3ede3382a0ae | fc444db356452660ac6cb2ffe241f0b1a3d4bcf3 | refs/heads/master | 2020-12-14T06:30:18.997456 | 2020-01-18T04:44:03 | 2020-01-18T04:44:03 | 234,668,560 | 1 | 0 | NOASSERTION | 2020-01-18T02:07:41 | 2020-01-18T02:07:40 | null | UTF-8 | Python | false | false | 12,133 | py | # --- BEGIN TEXT FUNCTIONS # # #
from typing import Tuple, Union, cast
import PIL.Image
import PIL.ImageDraw
import PIL.ImageFont
from arcade.sprite import Sprite
from arcade.arcade_types import Color
from arcade.draw_commands import Texture
from arcade.arcade_types import RGBA
from arcade.draw_commands import get_four_byte_color
import pyglet.gl as gl
import pyglet
class Text:
""" Class used for managing text. """
def __init__(self):
self.size = (0, 0)
self.text_sprite_list = None
class CreateText:
""" Class used for managing text """
def __init__(self,
text: str,
color: Color,
font_size: float = 12,
width: int = 20,
align="left",
font_name=('Calibri', 'Arial'),
bold: bool = False,
italic: bool = False,
anchor_x="left",
anchor_y="baseline",
rotation=0):
self.text = text
self.color = color
self.font_size = font_size
self.width = width
self.align = align
self.font_name = font_name
self.bold = bold
self.italic = italic
self.anchor_x = anchor_x
self.anchor_y = anchor_y
self.rotation = rotation
def create_text(text: str,
color: Color,
font_size: float = 12,
width: int = 0,
align="left",
font_name=('Calibri', 'Arial'),
bold: bool = False,
italic: bool = False,
anchor_x: str = "left",
anchor_y: str = "baseline",
rotation=0):
""" Deprecated. Two step text drawing for backwards compatibility. """
import warnings
warnings.warn("create_text has been deprecated, please use draw_text instead.", DeprecationWarning)
my_text = CreateText(text, color, font_size, width, align, font_name, bold, italic, anchor_x, anchor_y, rotation)
return my_text
def render_text(text: CreateText, start_x: float, start_y: float):
""" Deprecated. Two step text drawing for backwards compatibility. """
import warnings
warnings.warn("render_text has been deprecated, please use draw_text instead.", DeprecationWarning)
draw_text(text.text,
start_x,
start_y,
color=text.color,
font_size=text.font_size,
width=text.width,
align=text.align,
font_name=text.font_name,
bold=text.bold,
italic=text.italic,
anchor_x=text.anchor_x,
anchor_y=text.anchor_y,
rotation=text.rotation)
def draw_text(text: str,
start_x: float, start_y: float,
color: Color,
font_size: float = 12,
width: int = 0,
align: str = "left",
font_name: Union[str, Tuple[str, ...]] = ('calibri', 'arial'),
bold: bool = False,
italic: bool = False,
anchor_x: str = "left",
anchor_y: str = "baseline",
rotation: float = 0
):
"""
:param str text: Text to draw
:param float start_x:
:param float start_y:
:param Color color: Color of the text
:param float font_size: Size of the text
:param float width:
:param str align:
:param Union[str, Tuple[str, ...]] font_name:
:param bool bold:
:param bool italic:
:param str anchor_x:
:param str anchor_y:
:param float rotation:
"""
# Scale the font up, so it matches with the sizes of the old code back
# when Pyglet drew the text.
font_size *= 1.25
# Text isn't anti-aliased, so we'll draw big, and then shrink
scale_up = 5
scale_down = 5
font_size *= scale_up
# If the cache gets too large, dump it and start over.
if len(draw_text.cache) > 5000: # type: ignore # dynamic attribute on function obj
draw_text.cache = {} # type: ignore # dynamic attribute on function obj
key = f"{text}{color}{font_size}{width}{align}{font_name}{bold}{italic}"
if key in draw_text.cache: # type: ignore # dynamic attribute on function obj
label = draw_text.cache[key] # type: ignore # dynamic attribute on function obj
text_sprite = label.text_sprite_list[0]
if anchor_x == "left":
text_sprite.center_x = start_x + text_sprite.width / 2
elif anchor_x == "center":
text_sprite.center_x = start_x
elif anchor_x == "right":
text_sprite.right = start_x
else:
raise ValueError(f"anchor_x should be 'left', 'center', or 'right'. Not '{anchor_x}'")
if anchor_y == "top":
text_sprite.center_y = start_y - text_sprite.height / 2
elif anchor_y == "center":
text_sprite.center_y = start_y
elif anchor_y == "bottom" or anchor_y == "baseline":
text_sprite.bottom = start_y
else:
raise ValueError(f"anchor_y should be 'top', 'center', 'bottom', or 'baseline'. Not '{anchor_y}'")
text_sprite.angle = rotation
else:
label = Text()
# Figure out the font to use
font = None
# Font was specified with a string
if isinstance(font_name, str):
try:
font = PIL.ImageFont.truetype(font_name, int(font_size))
except OSError:
# print(f"1 Can't find font: {font_name}")
pass
if font is None:
try:
temp_font_name = f"{font_name}.ttf"
font = PIL.ImageFont.truetype(temp_font_name, int(font_size))
except OSError:
# print(f"2 Can't find font: {temp_font_name}")
pass
# We were instead given a list of font names, in order of preference
else:
for font_string_name in font_name:
try:
font = PIL.ImageFont.truetype(font_string_name, int(font_size))
# print(f"3 Found font: {font_string_name}")
except OSError:
# print(f"3 Can't find font: {font_string_name}")
pass
if font is None:
try:
temp_font_name = f"{font_string_name}.ttf"
font = PIL.ImageFont.truetype(temp_font_name, int(font_size))
except OSError:
# print(f"4 Can't find font: {temp_font_name}")
pass
if font is not None:
break
# Default font if no font
if font is None:
font_names = ("arial.ttf",
'Arial.ttf',
'NotoSans-Regular.ttf',
"/usr/share/fonts/truetype/freefont/FreeMono.ttf",
'/System/Library/Fonts/SFNSDisplay.ttf',
'/Library/Fonts/Arial.ttf')
for font_string_name in font_names:
try:
font = PIL.ImageFont.truetype(font_string_name, int(font_size))
break
except OSError:
# print(f"5 Can't find font: {font_string_name}")
pass
# This is stupid. We have to have an image to figure out what size
# the text will be when we draw it. Of course, we don't know how big
# to make the image. Catch-22. So we just make a small image we'll trash
text_image_size = (10, 10)
image = PIL.Image.new("RGBA", text_image_size)
draw = PIL.ImageDraw.Draw(image)
# Get size the text will be
text_image_size = draw.multiline_textsize(text, font=font)
# Create image of proper size
text_height = text_image_size[1]
text_width = text_image_size[0]
image_start_x = 0
if width == 0:
width = text_image_size[0]
else:
# Wait! We were given a field width.
if align == "center":
# Center text on given field width
field_width = width * scale_up
text_image_size = field_width, text_height
image_start_x = (field_width - text_width) // 2
width = field_width
else:
image_start_x = 0
# If we draw a y at 0, then the text is drawn with a baseline of 0,
# cutting off letters that drop below the baseline. This shoves it
# up a bit.
image_start_y = - font_size * scale_up * 0.02
image = PIL.Image.new("RGBA", text_image_size)
draw = PIL.ImageDraw.Draw(image)
# Convert to tuple if needed, because the multiline_text does not take a
# list for a color
if isinstance(color, list):
color = cast(RGBA, tuple(color))
draw.multiline_text((image_start_x, image_start_y), text, color, align=align, font=font)
image = image.resize((width // scale_down, text_height // scale_down), resample=PIL.Image.LANCZOS)
text_sprite = Sprite()
text_sprite._texture = Texture(key)
text_sprite.texture.image = image
text_sprite.image = image
text_sprite.texture_name = key
text_sprite.width = image.width
text_sprite.height = image.height
if anchor_x == "left":
text_sprite.center_x = start_x + text_sprite.width / 2
elif anchor_x == "center":
text_sprite.center_x = start_x
elif anchor_x == "right":
text_sprite.right = start_x
else:
raise ValueError(f"anchor_x should be 'left', 'center', or 'right'. Not '{anchor_x}'")
if anchor_y == "top":
text_sprite.center_y = start_y + text_sprite.height / 2
elif anchor_y == "center":
text_sprite.center_y = start_y
elif anchor_y == "bottom" or anchor_y == "baseline":
text_sprite.bottom = start_y
else:
raise ValueError(f"anchor_y should be 'top', 'center', 'bottom', or 'baseline'. Not '{anchor_y}'")
text_sprite.angle = rotation
from arcade.sprite_list import SpriteList
label.text_sprite_list = SpriteList()
label.text_sprite_list.append(text_sprite)
draw_text.cache[key] = label # type: ignore # dynamic attribute on function obj
label.text_sprite_list.draw()
draw_text.cache = {} # type: ignore # dynamic attribute on function obj
def draw_text_2(text: str,
start_x: float, start_y: float,
color: Color,
font_size: float = 12,
width: int = 0,
align: str = "left",
font_name: Union[str, Tuple[str, ...]] = ('calibri', 'arial'),
bold: bool = False,
italic: bool = False,
anchor_x: str = "left",
anchor_y: str = "baseline",
rotation: float = 0
):
"""
:param str text: Text to draw
:param float start_x:
:param float start_y:
:param Color color: Color of the text
:param float font_size: Size of the text
:param float width:
:param str align:
:param Union[str, Tuple[str, ...]] font_name:
:param bool bold:
:param bool italic:
:param str anchor_x:
:param str anchor_y:
:param float rotation:
"""
color = get_four_byte_color(color)
label = pyglet.text.Label(text,
font_name=font_name,
font_size=font_size,
x=start_x, y=start_y,
anchor_x=anchor_x, anchor_y=anchor_y,
color=color,
align=align,
bold=bold,
italic=italic,
width=width)
label.draw()
| [
"paul@cravenfamily.com"
] | paul@cravenfamily.com |
d01726e0dbc995c5c88494d9f3cee56d6177e3d0 | de2a9dd0a232960ebbc8e431a16f137aafaa8d3f | /trips/views.py | a9d3a828fab5a8ae9d57b1543bc7d36bf02310f6 | [] | no_license | leanneapichay/travelapp | 26aed8daf3dab9102898e983991ae9acc076c1dd | ef2c97e8ae301d171bbf45126ca41bcbdad888a8 | refs/heads/master | 2020-04-15T19:37:33.681796 | 2019-02-08T04:29:43 | 2019-02-08T04:29:43 | 164,957,736 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,533 | py | from rest_framework import status
from rest_framework.views import APIView
from rest_framework.response import Response
from .serializers import *
from .models import *
# View to create trip and all stops within it
class TripViews(APIView):
def post(self, request, format=None):
serializer = TripSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def get(self, request, id, format=None):
trip = self.get_object(id)
serializer = TripSerializer(trip)
return Response(serializer.data)
def delete(self, request, pk, format=None):
trip = self.get_object(pk)
trip.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class StopViews(APIView):
def post(self, request, format=None):
serializer = StopSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def get(self, request, id, format=None):
stop = self.get_object(id)
serializer = TripSerializer(stop)
return Response(serializer.data)
def delete(self, request, id, format=None):
stop = self.get_object(id)
stop.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class AddFlight(APIView):
def post(self, request, format=None):
serializer = FlightSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def get(self, request, id, format=None):
try:
flight = self.get_object(id=id)
except Flight.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
serializer = FlightSerializer(flight)
return Response(serializer.data, status=status.HTTP_200_OK)
class GetTripStops(APIView):
def get(self, request, id,format=None):
try:
trip = Trip.objects.get(id=id)
except Trip.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
def get_query(trip_input):
stop_list = []
for stop in Stop.objects.all:
if stop.trip == trip_input:
stop_list.append(stop)
return stops
stops = get_query(trip)
return Response(stops, status=status.HTTP_200_OK)
class BudgetViews(APIView):
def post(self, request, format=None):
serializer = BudgetSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def put(self, request, id, format=None):
budget = self.get_object(id)
serializer = BudgetSerializer(budget, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class BucketListViews(APIView):
def post(self, request, format=None):
serializer = BucketListItemSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, id, format=None):
stop = self.get_object(id)
stop.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class PackingListViews(APIView):
def post(self, request, format=None):
serializer = PackingListItemSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, id, format=None):
stop = self.get_object(id)
stop.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
| [
"leanneapichay@gmail.com"
] | leanneapichay@gmail.com |
f91465a0a230dc63c74e15613576ecc57d891906 | 3b7c02225cecf4a382546ba267996e072e0c0e9f | /migrations/versions/ab586224191e_new_fields_in_user.py | f93df7b837a693139244a3656db39bdd41ea7ded | [] | no_license | shashi4bs/blog_flask | d693a29be1d559125eef5e2a2aaae971a6996540 | 304237e815afdacced85f0ad8a66639f62d5692f | refs/heads/master | 2022-12-12T14:33:35.984335 | 2019-07-07T10:00:11 | 2019-07-07T10:00:11 | 195,239,156 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 786 | py | """new fields in user
Revision ID: ab586224191e
Revises: af7e9860ce48
Create Date: 2019-05-01 12:14:58.000583
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'ab586224191e'
down_revision = 'af7e9860ce48'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('user', sa.Column('about_me', sa.String(length=140), nullable=True))
op.add_column('user', sa.Column('last_seen', sa.DateTime(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('user', 'last_seen')
op.drop_column('user', 'about_me')
# ### end Alembic commands ###
| [
"shashi4bs@gmail.com"
] | shashi4bs@gmail.com |
64212e5f28688d5608272f8632b0bd60f2b2717a | 5294fd5896f514cbd6db5fb63f2a3473f4e9658e | /app.py | e61f829ad74e8afcd46355e1c72658a719840b17 | [] | no_license | jwpestrak/project-3-phrase-hunter | ffa36a337cf168c23c0652047dec661b4e28b974 | 8c9ae69828d765163ac116aa2cef658cdfe77b1c | refs/heads/master | 2020-05-07T01:22:13.463435 | 2019-04-09T02:55:48 | 2019-04-09T02:55:48 | 180,272,034 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 307 | py | from phrasehunter.game import Game # Import your Game class
if __name__ == "__main__": # Create your Dunder Main statement.
# Inside Dunder Main:
game = Game() ## Create an instance of your Game class
game.run() ## Start your game by calling the instance method that starts the game loop
| [
"james.w.pestrak@gmail.com"
] | james.w.pestrak@gmail.com |
d9688ce59735aea7ef8f1d52da614763b7f2d036 | dbe1f4110921a08cb13e22ea325d503bd5627195 | /chuhuo_2.7_clickhouse/bluedon/bdwafd/newscantools/plugins/SiteEngine5_xPagejumpScript.py | 36b3f98ef2796868c8a3a3a6381ac72f04f32ea9 | [] | no_license | Hehouhua/waf_branches | 92dc1b1cbecba20f24ef6c7372dde7caa43f9158 | ca76f3a1ed8150b423474c9e37aee37841a5ee35 | refs/heads/main | 2023-01-07T11:33:31.667688 | 2020-11-03T06:58:33 | 2020-11-03T06:58:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,184 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from lib.common import *
def run_domain(http,ob):
list = []
try:
domain = ob['domain']
detail = u''
url = "%s://%s%s" % (ob['scheme'],ob['domain'],ob['base_path'])
expurl="%s%s"%(url,"admin/images/css.css")
url+="api.php?action=logout&forward=http://www.baidu.com"
r,c=requestUrl(http,expurl,ob['task_id'],ob['domain_id'])
if c.find("siteengine")>=0:
res, content = requestUrl(http,url,ob['task_id'],ob['domain_id'])
if res.has_key('location') and res['location'] == 'http://www.baidu.com':
request = getRequest(url)
response = getResponse(res)
list.append(getRecord(ob,ob['scheme']+"://"+ob['domain'],ob['level'],detail,request,response))
except Exception,e:
logging.getLogger().error("File:SITEENGINE5.xpagejumpscript.py, run_domain function :" + str(e) + ",task id:" + ob['task_id'] + ",domain id:" + ob['domain_id'])
write_scan_log(ob['task_id'],ob['domain_id'],"File:SITEENGINE5.xpagejumpscript.py, run_domain function :" + str(e))
#end try
return list
#end def | [
"hanson_wong@qq.com"
] | hanson_wong@qq.com |
64b22fbf6cbc11f93ce0fbe1cbcfbf3742a0011b | f5797644f809f12e65dab6b00e24c1a738bbd787 | /gensim_lda/lda_gensim.py | 02a453afe9ca37e78f1781d0e294ac25d089f464 | [] | no_license | shcup/ML | 436875be51a3620f892eb13e79def8ba7a2cb93b | b761e0d94d0fa4bfd7dda598911d3bc80b7e0fd0 | refs/heads/master | 2020-06-29T13:33:03.593677 | 2018-01-11T18:06:46 | 2018-01-11T18:06:46 | 74,420,501 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,100 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
import sys
import logging
import os.path
import unittest
import tempfile
import itertools
import numpy
from gensim.utils import to_unicode
from gensim.interfaces import TransformedCorpus
from gensim.corpora import (bleicorpus, mmcorpus, lowcorpus, svmlightcorpus,
ucicorpus, malletcorpus, textcorpus, indexedcorpus, dictionary)
from gensim.models import (tfidfmodel,word2vec,ldamodel)
print 'start'
train_set=[]
for line in open('articles.txt'):
items = line.strip().split('\t', 1)
if len(items) < 2:
continue
words = items[1].strip().split(' ')
train_set.append(words)
print 'construct dict'
dic = dictionary.Dictionary(train_set)
print 'doc2bow'
corpus = [dic.doc2bow(text) for text in train_set]
print 'ifidf'
tfidf = tfidfmodel.TfidfModel(corpus)
print 'ifidf corpus'
corpus_tfidf = tfidf[corpus]
print 'lda model'
lda = ldamodel.LdaModel(corpus_tfidf, id2word = dic, num_topics = 1000, iterations = 1300, alpha = 0.15, eta = 0.01)
print 'corpus_tfidf'
corpus_lda = lda[corpus_tfidf]
lda.save('lda_model')
| [
"rec@Letv2TTPFD2.(none)"
] | rec@Letv2TTPFD2.(none) |
39a870579ef4ed97598cbc4f4f6818c96489c04f | bf2704bf2a65eda229fe52dc3bc37d30655ad3db | /microsip_consolidador/settings/common.py | 9e54dbf1eb5d8ef31c6c19af059d8f79338e5a89 | [] | no_license | ruff0/microsip_consolidador | 29276c6f96e2f2d3fb9eb06006234e7773c1aa8f | e8763651c5935d12f93a5413ea593dea16043f64 | refs/heads/master | 2021-05-03T22:02:45.045087 | 2014-04-02T00:50:36 | 2014-04-02T00:50:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,516 | py | #encoding:utf-8
# Identificando la ruta del proyecto
import os
import fdb
import sqlite3
from local_settings import MICROSIP_MODULES
RUTA_PROYECTO =os.path.dirname(os.path.realpath(__file__)).strip('settings')
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASE_ROUTERS = ['microsip_consolidador.libs.databases_routers.MainRouter']
MICROSIP_DATABASES = {}
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': RUTA_PROYECTO + 'data\USERS.sqlite',
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
'ATOMIC_REQUESTS': True,
},
}
try:
users_conn = sqlite3.connect(RUTA_PROYECTO + 'data\USERS.sqlite')
users_cur = users_conn.cursor()
users_cur.execute('''SELECT * FROM auth_conexiondb''')
conexiones_rows = users_cur.fetchall()
users_conn.close()
for conexion in conexiones_rows:
conexion_id = conexion[0]
conexion_id = "%02d" % conexion_id
host = conexion[3]
password = conexion[6]
user = conexion[5]
carpeta_datos = conexion[4]
conexion_exitosa = True
try:
db= fdb.connect(host=host, user=user, password=password, database="%s\System\CONFIG.FDB"%carpeta_datos )
except fdb.DatabaseError:
conexion_exitosa = False
else:
cur = db.cursor()
cur.execute("SELECT NOMBRE_CORTO FROM EMPRESAS")
empresas_rows = cur.fetchall()
db.close()
if conexion_exitosa:
DATABASES[ '%s-CONFIG'%conexion_id ] = {
'ENGINE': 'django.db.backends.firebird', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': '%s\System\CONFIG.FDB'% carpeta_datos,
'USER': user, # Not used with sqlite3.
'PASSWORD': password, # Not used with sqlite3.
'HOST': host, # Set to empty string for localhost. Not used with sqlite3.
'PORT': '3050', # Set to empty string for default. Not used with sqlite3.
'OPTIONS' : {'charset':'ISO8859_1'},
'ATOMIC_REQUESTS': True,
}
for empresa in empresas_rows:
try:
name = '%s\%s.FDB'% (carpeta_datos, empresa[0])
except UnicodeDecodeError:
pass
else:
MICROSIP_DATABASES['%s-%s'%(conexion_id, empresa[0].replace(' ','_'))] = {
'ENGINE': 'django.db.backends.firebird', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': name,
'USER': user, # Not used with sqlite3.
'PASSWORD': password, # Not used with sqlite3.
'HOST': host, # Set to empty string for localhost. Not used with sqlite3.
'PORT': '3050', # Set to empty string for default. Not used with sqlite3.
'OPTIONS' : {'charset':'ISO8859_1'},
'ATOMIC_REQUESTS': True,
}
DATABASES['%s-%s'%(conexion_id, empresa[0].replace(' ','_'))] = {
'ENGINE': 'django.db.backends.firebird', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': name,
'USER': user, # Not used with sqlite3.
'PASSWORD': password, # Not used with sqlite3.
'HOST': host, # Set to empty string for localhost. Not used with sqlite3.
'PORT': '3050', # Set to empty string for default. Not used with sqlite3.
'OPTIONS' : {'charset':'ISO8859_1'},
'ATOMIC_REQUESTS': True,
}
except sqlite3.Error, e:
print "Error %s:" % e.args[0]
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Mazatlan'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'es-mx'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
#MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_ROOT = os.path.join(RUTA_PROYECTO,'media')
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
MEDIA_URL = os.path.join(RUTA_PROYECTO,'media/')
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'dajaxice.finders.DajaxiceFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '3pq$&*)sd$k_olmn@lup_5)-)d=qk-&)18!+5bw7+$z++n2jm@'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'microsip_api.middleware.CustomerMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# 'django.middleware.cache.CacheMiddleware',
'django.middleware.transaction.TransactionMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'microsip_consolidador.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
(RUTA_PROYECTO + '/templates'),
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.request',
'django.contrib.messages.context_processors.messages'
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
#Configuraciones para enviar mensajes usando gmail
EMAIL_USE_TLS = True
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = 'remitente@gmail.com'
EMAIL_HOST_PASSWORD = 'clavedelcorreo'
EMAIL_PORT = 587
| [
"jesusmaherrera@gmail.com"
] | jesusmaherrera@gmail.com |
a05cbd60ec988ced4a37c9d6c16d84b3f78ffd8f | fdbcd2f19d55b57bafc4deaccafba812a3b56443 | /yolo_video.py | 940de5ac8c747750e76d840028bd352ae4de9c09 | [
"MIT"
] | permissive | mductran/keras-yolo3 | d28ed3715fd14299a401ef90146901db7ac3bccf | 5fecc4bf9dc5764de20cbec45c140412c10873d0 | refs/heads/master | 2020-04-18T02:28:44.287492 | 2019-01-23T10:31:08 | 2019-01-23T10:31:08 | 167,164,264 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,396 | py | import sys
import argparse
from yolo import YOLO, detect_video
from PIL import Image
def detect_img(yolo):
while True:
img = input('Input image filename:')
try:
image = Image.open(img)
except:
print('Open Error! Try again!')
continue
else:
r_image = yolo.detect_image(image)
r_image.show()
yolo.close_session()
FLAGS = None
if __name__ == '__main__':
# class YOLO defines the default value, so suppress any default here
parser = argparse.ArgumentParser(argument_default=argparse.SUPPRESS)
'''
Command line options
'''
parser.add_argument(
'--model_path', type=str,
help='path to model weight file, default ' + YOLO.get_defaults("model_path")
)
parser.add_argument(
'--anchors_path', type=str,
help='path to anchor definitions, default ' + YOLO.get_defaults("anchors_path")
)
parser.add_argument(
'--classes_path', type=str,
help='path to class definitions, default ' + YOLO.get_defaults("classes_path")
)
parser.add_argument(
'--gpu_num', type=int,
help='Number of GPU to use, default ' + str(YOLO.get_defaults("gpu_num"))
)
parser.add_argument(
'--image', default=False, action="store_true",
help='Image detection mode, will ignore all positional arguments'
)
'''
Command line positional arguments -- for video detection mode
'''
parser.add_argument(
"--input", nargs='?', type=str,required=False,default='./path2your_video',
help = "Video input path"
)
parser.add_argument(
"--output", nargs='?', type=str, default="",
help = "[Optional] Video output path"
)
FLAGS = parser.parse_args()
if FLAGS.image:
"""
Image detection mode, disregard any remaining command line arguments
"""
print("Image detection mode")
if "input" in FLAGS:
print(" Ignoring remaining command line arguments: " + FLAGS.input + "," + FLAGS.output)
detect_img(YOLO(**vars(FLAGS)))
elif "input" in FLAGS:
detect_video(YOLO(**vars(FLAGS)), FLAGS.input, FLAGS.output)
else:
print("Must specify at least video_input_path. See usage with --help.")
| [
"noreply@github.com"
] | mductran.noreply@github.com |
e4c9ea0294f5f19f9113c2c612678d888d0f5614 | 65dbd639309a9fee3d406f9d4023d137fcf250ac | /web_connector/src/connector.py | 14827697d3705b49ae3e760307a239bc7f8708bd | [] | no_license | uobirlab/butler | 6122eb80f7c06e39ea4cdbee6c86d0911e871f92 | 8489d9f59aa07d914d18d1d2097d9ba205fdb07f | refs/heads/master | 2021-01-18T13:54:52.547440 | 2013-09-09T18:27:45 | 2013-09-09T18:27:45 | 20,443,764 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,097 | py | #!/usr/bin/env python
try:
import requests
except:
print "Need the python-rquests library to connect to the web. Please install it."
raise Exception("Can't find requests package")
ORDERS_FILE_URL="http://www.cs.bham.ac.uk/~burbrcjc/bsf2013/data/orders.txt"
ORDER_COMPLETE_URL="http://www.cs.bham.ac.uk/~burbrcjc/bsf2013/completed.php?order_number="
ACTIVE_ORDERS_URL="http://www.cs.bham.ac.uk/~burbrcjc/bsf2013/active_order.php?active_orders="
ACTIVE_ORDERS_FILE_URL="http://www.cs.bham.ac.uk/~burbrcjc/bsf2013/data/active.txt"
class WebInterface(object):
def __init__(self):
pass
def get_orders(self):
orders = requests.get(ORDERS_FILE_URL).text
if orders.find("<html>") > 0:
raise Exception("Orders URL bad: html returned!")
lines = orders.split("\n")
orders_list=[]
for line in lines[:-1]:
if line=="":
continue
order = line.split(" ")
# order = order.extend(["-"] * (4 - len(order)))
checked = order[0:3]
name=""
for i in order[3:]:
name = name + i + " "
name = name[:-1]
checked.append(name)
orders_list.append(checked)
return orders_list
def mark_order_complete(self, order_id):
result = requests.get(ORDER_COMPLETE_URL+str(order_id))
return
def mark_active_orders(self, order_ids):
orders=""
for order in order_ids:
orders+="."+str(order)
result = requests.get(ACTIVE_ORDERS_URL+orders)
return
def get_active_orders(self):
orders = requests.get(ACTIVE_ORDERS_FILE_URL).text
if orders.find("<html>") > 0:
raise Exception("Orders URL bad: html returned!")
lines = orders.split("\n")
return lines
if __name__=="__main__":
# rospy.init_node("web_interface_connector")
connector = WebInterface()
for i in connector.get_orders():
print i
# connector.mark_order_complete(22)
# connector.mark_active_orders([22])
| [
"cburbridge@gmail.com"
] | cburbridge@gmail.com |
31797b3c34c773bd85a749e9b9a0ba89b518d703 | 5f70fed798c0134381f8d626c1c329a956a35b7b | /Clase10/formato_tabla.py | 2a337e023df5bbe652c7f793a312259f811ac08c | [] | no_license | agmontserrat/Ejercicios_Python_UNSAM | a1a9d4074a4eb7ce549b48d59573aca829060605 | 8160767c98e616e2255912e8b001c19723103957 | refs/heads/main | 2023-05-24T07:01:30.325558 | 2021-06-13T04:32:45 | 2021-06-13T04:32:45 | 347,520,604 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,657 | py | class FormatoTabla:
def encabezado(self, headers):
'''
Crea el encabezado de la tabla.
'''
pass
def fila(self, rowdata):
'''
Crea una única fila de datos de la tabla.
'''
pass
class FormatoTablaTXT(FormatoTabla):
'''
Generar una tabla en formato TXT
'''
def encabezado(self, headers):
for h in headers:
print(f'{h:>10s}', end=' ')
print()
print(('-'*10 + ' ')*len(headers))
def fila(self, data_fila):
for d in data_fila:
print(f'{d:>10s}', end=' ')
print()
class FormatoTablaCSV(FormatoTabla):
'''
Generar una tabla en formato CSV
'''
def encabezado(self, headers):
print(','.join(headers))
def fila(self, data_fila):
print(','.join(data_fila))
class FormatoTablaHTML(FormatoTabla):
'''
Generar una tabla en formato HTML
'''
def encabezado(self, headers):
result = '<tr>'
for h in headers:
result += f'<th>{h}</th>'
result += '</tr>'
print(result)
def fila(self, data_fila):
result = '<tr>'
for d in data_fila:
result += f'<td>{d}</td>'
result += '</tr>'
print(result)
def crear_formateador(nombre):
if nombre.lower() == 'txt':
return FormatoTablaTXT()
elif nombre.lower() == 'csv':
return FormatoTablaCSV()
elif nombre.lower() == 'html':
return FormatoTablaHTML()
else:
raise ValueError("Nombre debe ser 'txt', 'csv' o 'html'") | [
"noreply@github.com"
] | agmontserrat.noreply@github.com |
a7b932f83c1b462ecaa586bcf015affcb611122c | 22309918b0810c221680837032fe5d129f0d2f62 | /Server/sensors/sampleRangeSensor.py | 2c7e08a6537794d707e235a01d79be0634d34a30 | [] | no_license | kanakb/sensor-data-analysis | f846a9128e35987d60abd580e2baca5a8e35f4c2 | 49c577b8d0262e022ba86e1485dfac5d079e5474 | refs/heads/master | 2016-09-09T20:41:39.708263 | 2011-06-06T20:35:17 | 2011-06-06T20:35:17 | 32,226,149 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 508 | py | # sampleRangeSensor.py
# serves as an example for how to specify details about a given sensor
# this one has a domain of a range of values
# need GenericSensor to subclass it
from sensors.genericSensor import GenericSensor
from sensors.genericSensor import DataType
# sensor class definition -- inherits from a GenericSensor
class SampleRangeSensor(GenericSensor):
def __init__(self):
# define all fields specific to this sensor
self.dataType = DataType.FloatRange
| [
"kanakb@gmail.com@1627692a-01a1-f231-e942-478d199961fa"
] | kanakb@gmail.com@1627692a-01a1-f231-e942-478d199961fa |
a1bed95cd6dc5a558a1dcc440b65970e146efd9a | 91f9ffafe25d234c826105b6ef6e8f925a612f87 | /yolo.py | 3524751b3d9182805315c833e25b46c7e8eb9171 | [
"MIT"
] | permissive | pskrunner14/yolo-detector | 6732938142f2c3505247d495f79d798f972381fe | 295c4500f22efbf0775ba786bdacd4f390631456 | refs/heads/master | 2020-03-22T13:32:42.717276 | 2018-09-30T12:22:59 | 2018-09-30T12:22:59 | 140,115,097 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,623 | py | import os
import logging
import cv2
import keras
import numpy as np
import imageio as io
import tensorflow as tf
import keras.backend as K
# local imports
from utils import (
read_classes,
read_anchors,
generate_colors
)
# imports from `yad2k` project
from yad2k.models.keras_yolo import (
yolo_head,
yolo_boxes_to_corners
)
class YOLO():
"""YOLOv2 real-time object detection using pre-trained model.
For obtaining the pre-trained model using YOLOv2 weights, see
YAD2K project: https://github.com/allanzelener/YAD2K.
Args:
model_path (str):
Path to pre-trained model.
anchors_path (str):
Path to file conataining YOLO anchor values.
classes_path (str):
Path to file containing names of all classes.
dims (tuple of `float`):
Dimensions of the frame to detect objects in.
Raises:
ValueError: If any arg is missing or length of dims is not 2.
"""
def __init__(self, model_path=None, anchors_path=None, classes_path=None, dims=None):
if model_path is None or anchors_path is None or classes_path is None or dims is None or len(dims) != 2:
raise ValueError('Arguments do not match the specification.')
self._model = keras.models.load_model(model_path, compile=False)
self._anchors = read_anchors(anchors_path)
self._class_names = read_classes(classes_path)
self._dims = dims
self._image_shape = list(reversed([int(x) for x in dims]))
self._model_input_dims = (608, 608)
self._colors = generate_colors(self._class_names)
self._sess = K.get_session()
self._construct_graph()
@staticmethod
def _filter_boxes(box_confidence, boxes, box_class_probs, threshold=0.6):
"""Filter out bounding boxes that have highest scores.
Args:
box_confidence (tf.Tensor):
Sigmoid confidence value for potential bounding boxes.
boxes (tf.Tensor):
Tensor containing potential bounding boxes' corners.
box_class_probs (tf.Tensor):
Softmax probabilities for potential bounding boxes.
threshold (float, optional):
Threshold value for minimum score for a bounding box.
Returns:
tf.Tensor:
Filtered box scores.
tf.Tensor:
Filtered box corners.
tf.Tensor:
Filtered box classes.
"""
box_scores = box_confidence * box_class_probs # Compute box scores
# Find box_classes using max box_scores
# and keep track of the corresponding score
box_classes = K.argmax(box_scores, axis=-1) # index of max score
box_class_scores = K.max(box_scores, axis=-1) # actual max score
# Create a filtering mask based on 'box_class_scores'
# by using 'threshold' (with probability >= threshold).
filtering_mask = box_class_scores >= threshold
# Apply the mask to scores, boxes and classes
scores = tf.boolean_mask(box_class_scores, filtering_mask)
boxes = tf.boolean_mask(boxes, filtering_mask)
classes = tf.boolean_mask(box_classes, filtering_mask)
return scores, boxes, classes
@staticmethod
def _non_max_suppression(scores, boxes, classes, max_boxes=10, iou_threshold=0.5):
"""Applies non-max suppression to bounding boxes.
Args:
scores (tf.Tensor):
Scores of bounding boxes after filtering.
boxes (tf.Tensor):
Corner values of bounding boxes after filtering.
classes (tf.Tensor):
Classes for bounding boxes after filtering.
max_boxes (int, optional):
Max. number of bounding boxes for non-max suppression.
iou_threshold (float, optional):
Intersection over union threshold for non-max suppression.
Returns:
tf.Tensor:
Non-max suppressed box scores.
tf.Tensor:
Non-max suppressed box corners.
tf.Tensor:
Non-max suppressed box classes.
"""
max_boxes_tensor = K.variable(max_boxes, dtype='int32') # tensor to be used in `tf.image.non_max_suppression`
K.get_session().run(tf.variables_initializer([max_boxes_tensor]))
# To get the list of indices corresponding to boxes you keep
nms_indices = tf.image.non_max_suppression(boxes, scores, max_boxes, iou_threshold=iou_threshold)
# To select only nms_indices from scores, boxes and classes
scores = K.gather(scores, nms_indices)
boxes = K.gather(boxes, nms_indices)
classes = K.gather(classes, nms_indices)
return scores, boxes, classes
def _construct_graph(self, max_boxes=10, score_threshold=0.6, iou_threshold=0.5):
"""Creates operations and instantiates them on default graph.
Args:
max_boxes (int, optional):
Max. number of bounding boxes for non-max suppression.
score_threshold (float, optional):
Threshold value for min. score for a bounding box for score-filtering.
iou_threshold (float, optional):
Intersection over union threshold for non-max suppression.
"""
yolo_outputs = yolo_head(self._model.output, self._anchors, len(self._class_names))
box_xy, box_wh, box_confidence, box_class_probs = yolo_outputs
boxes = yolo_boxes_to_corners(box_xy, box_wh) # Convert boxes to be ready for filtering functions
scores, boxes, classes = self._filter_boxes(box_confidence, boxes, box_class_probs, score_threshold)
boxes = self._scale_boxes(boxes) # Scale boxes back to original image shape.
scores, boxes, classes = self._non_max_suppression(scores, boxes, classes, max_boxes, iou_threshold)
# Save tensors for later evaluation
self._scores = scores
self._boxes = boxes
self._classes = classes
def detect_image(self, image_path):
"""Detects objects in an image using YOLOv2.
Args:
image_path (str):
Path to image for detection.
"""
image = io.imread(image_path)
image_data = self._preprocess_image_cv2(image)
# Need to use feed_dict={yolo_model.input: ... , K.learning_phase(): 0})
out_scores, out_boxes, out_classes = self._sess.run([self._scores, self._boxes, self._classes],
feed_dict={self._model.input: image_data,
K.learning_phase(): 0})
image_name = os.path.split(image_path)[-1]
logging.info('found {} objects belonging to known classes'.format(len(out_boxes)))
self._draw_boxes_cv2(image, out_scores, out_boxes, out_classes)
logging.info('saving result in `images/out/{}`'.format(image_name))
io.imsave(os.path.join('images/out', image_name), image)
def detect_realtime(self, frame):
"""Detects objects in real-time using YOLOv2.
Args:
frame (numpy.ndarray):
Single frame from the webcam feed to run YOLO detection on.
Returns:
numpy.ndarray:
Output frame data after detection and drawing bounding boxes over it.
"""
image_data = self._preprocess_image_cv2(frame)
out_scores, out_boxes, out_classes = self._sess.run([self._scores, self._boxes, self._classes],
feed_dict={self._model.input: image_data,
K.learning_phase(): 0})
self._draw_boxes_cv2(frame, out_scores, out_boxes, out_classes)
return frame
def _preprocess_image_cv2(self, image):
"""Preprocesses and normalizes an image using openCV.
Args:
image (numpy.ndarray):
Image to preprocess.
Returns:
numpy.ndarray: Preprocessed image data.
"""
resized_image = cv2.resize(image, self._model_input_dims, interpolation=cv2.INTER_CUBIC)
image_data = np.array(resized_image, dtype='float32')
image_data /= 255.
image_data = np.expand_dims(image_data, 0) # Add batch dimension.
return image_data
def _scale_boxes(self, boxes):
"""Scales the predicted boxes in order to be drawable on the image
Args:
boxes (tf.Tensor):
Corner values of bounding boxes.
Returns:
tf.Tensor: Scaled corner values for bounding boxes.
"""
height, width = self._dims
image_dims = K.stack([height, width, height, width])
image_dims = K.reshape(image_dims, [1, 4])
boxes = boxes * image_dims
return boxes
def _draw_boxes_cv2(self, image, scores, boxes, classes):
"""Draws bounding boxes on frame using openCV.
Args:
image (numpy.ndarray):
Image on which to draw bounding boxes.
scores (numpy.ndarray):
Scores for each bounding box.
classes (numpy.ndarray):
Classes associated with each bounding box.
"""
for i, c in reversed(list(enumerate(classes))):
predicted_class = self._class_names[c]
box = boxes[i]
score = scores[i]
label = '{} {:.2f}'.format(predicted_class, score)
top, left, bottom, right = box
top = max(0, np.floor(top + 0.5).astype('int32'))
left = max(0, np.floor(left + 0.5).astype('int32'))
bottom = min(self._image_shape[1], np.floor(bottom + 0.5).astype('int32'))
right = min(self._image_shape[0], np.floor(right + 0.5).astype('int32'))
print(label, (left, top), (right, bottom))
text_size = cv2.getTextSize(label, cv2.FONT_HERSHEY_DUPLEX, 0.8, 2)
cv2.rectangle(image, (left, top), (right, bottom), self._colors[c], 2)
cv2.rectangle(image, (left, top - text_size[0][1] - 10),
(left + text_size[0][0] + 10, top), self._colors[c], cv2.FILLED)
cv2.putText(image, label, (left + 5, top - 5),
cv2.FONT_HERSHEY_DUPLEX, 0.8, (0, 0, 0), 2)
def __del__(self):
self._sess.close() | [
"pskrunner14@gmail.com"
] | pskrunner14@gmail.com |
afad583f0de6a7bf43989c94768169a8e17ac412 | e6f80e38fcd5cfd63e2e5f653a11d42ce0799fbb | /dm_result_analysis.py | be8099a0d9387500999fc00c84da8f3bf29d66b3 | [] | no_license | NiuGenen/DataMiningProject | e771e326b37e1f45d546c15658abd57067681f8c | d63173d092ddd284b176a1d71e75f8a616f0cb5d | refs/heads/master | 2020-04-05T04:24:48.818061 | 2018-12-12T02:06:38 | 2018-12-12T02:06:38 | 156,550,278 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,041 | py | import dm_source_data as sd
import pandas as pd
import dm_preprocess_fun as ppf
import dm_csv as dmcsv
import os
import dm_filepath as dmfp
import dm_common as dmc
verbose = 1
res_col = [
'start_time',
'item_nr',
'elem_nr',
'item_right',
'item_right_percentage',
'elem_right',
'elem_right_percentage',
'item_right_without_unknown',
'item_right_without_unknown_percentage',
'elem_right_without_unknown',
'elem_right_without_knonown_percentage'
]
label_files = os.listdir( dmfp.result_real_label_floder_path )
res_files = os.listdir( dmfp.prediction_result_floder_path)
res_files = sorted(res_files)
res_all = []
sp = []
for file in res_files:
if file[0] == '.':
continue
start_time = int( file.split('.')[2] )
res_path = os.path.join( dmfp.prediction_result_floder_path, file)
print("Result File : " + res_path )
rescsv = pd.read_csv(res_path , sep=',')
label_file = ""
for f in label_files:
if f.__contains__("." + str(start_time) + "."):
label_file = f
break
label_path = os.path.join( dmfp.result_real_label_floder_path, label_file)
print("Label File : " + label_path)
labelcsv = pd.read_csv( label_path, sep=',')
# [3] is linkid_tag
label_data = dict()
for item in labelcsv.values:
item_label = item[ item.__len__() - 6 : item.__len__() ]
label_data[ item[3] ] = item_label
item_nr = 0
elem_nr = 0
item_right = 0
elem_right = 0
item_right_without_unknown = 0
elem_right_without_unknown = 0
# sensitivity & precision
# sp[i][j] is the number of real-label-and-its-prediction pairs
# in which real label is i and prediction is j
sp = []
spun = 0
for i in range(0,4):
spi = []
for j in range(0,4):
spi.append(0)
res_col.append("sp" + str(i) + str(j))
sp.append( spi )
res_col.append("spun")
for i in range(0,4):
res_col.append("s" + str(i) )
for i in range(0,4):
res_col.append("p" + str(i) )
res_item = []
i = 0
# for each linkid
while i < rescsv.values.__len__():
res = rescsv.values[i]
label = label_data[ res[1] ]
item_nr += 1
elem_nr += res.__len__() - 2
flag_item = 1
flag_item_without_unknown = 1
k = 2
while k < res.__len__():
# considering unknown
if res[k] == label[k - 2]:
elem_right += 1
else:
flag_item = 0
# ignoring unknown
if res[k] == label[k - 2] or label[k - 2] == -1:
elem_right_without_unknown += 1
else:
flag_item_without_unknown = 0
# calculate sp
if label[k-2] != -1:
sp[int(label[k-2])][int(res[k])] += 1
else:
spun += 1
# next label of this linkid
k += 1
item_right += flag_item
item_right_without_unknown += flag_item_without_unknown
i += 1
res_item.append( start_time)
res_item.append( item_nr )
res_item.append( elem_nr )
res_item.append( item_right )
res_item.append( item_right / item_nr )
res_item.append( elem_right )
res_item.append( elem_right / elem_nr )
res_item.append( item_right_without_unknown )
res_item.append( item_right_without_unknown / item_nr )
res_item.append( elem_right_without_unknown )
res_item.append( elem_right_without_unknown / elem_nr )
for i in range(0,4):
for j in range(0,4):
res_item.append( sp[i][j] )
res_item.append( spun )
for i in range(0,4):
res_item.append(sp[i][i] / (sp[i][0] + sp[i][1] + sp[i][2] + sp[i][3]) )
for i in range(0,4):
res_item.append(sp[i][i] / (sp[0][i] + sp[1][i] + sp[2][i] + sp[3][i]) )
print( res_item )
res_all.append(res_item)
dmcsv.write_list2_into_csv(res_all, res_col, dmfp.result_analysis_path , verbose)
| [
"602131568@qq.com"
] | 602131568@qq.com |
f46f619f2254359a55e3c1510eb5a02268659659 | 2ea51ead1779c44c4f28cecfb8e5ca37d4c5ce4c | /registerCamera.py | 353d560d428361ace632859d62565a90d1d2c03a | [] | no_license | JeffStodd/Viator-Hardware | a1ad9e79420e5f67fa5aaa78ac427743d740f08a | be335ceb205d42954383bc50bf6fb4ec0c9d7f31 | refs/heads/master | 2022-10-18T05:06:43.769560 | 2020-06-11T21:33:15 | 2020-06-11T21:33:15 | 271,649,949 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,215 | py | import firebase_admin
from firebase_admin import credentials
from firebase_admin import firestore
import urllib.request
import sys
from uuid import getnode as get_mac
def main(argv):
if len(sys.argv) > 1:
pass
else:
pass
#search for matching address and update stream fields
def findAndUpdate(content, port):
print("Getting Client")
db = firestore.client()
print("Searching Address:", content)
for camera in db.collection(u'cameras').stream():
fields = camera.to_dict()
print(fields[u'linkedParkingSpot'])
if fields[u'linkedParkingSpot'] == content:
update(camera.reference, port)
return
print("Could not find match")
def update(document, port):
external_ip = urllib.request.urlopen('https://v4.ident.me/').read().decode('utf8')
macAddress = hex(get_mac())[2:]
print("Updating Camera")
field_updates = {u'ip' : external_ip, u'port': port, u'macAddress': macAddress}
document.update(field_updates)
print("Update Successful")
if __name__ == "__main__":
main(sys.argv)
| [
"noreply@github.com"
] | JeffStodd.noreply@github.com |
f06b314effbea49196936e04d020d70611e2ee01 | 3f9dd28efb7fb66b95a7b33ae3d15f6e4d0925f5 | /pydar/format.py | d8e1bb84cbbd5663e6463bfea12ee2347e12622b | [
"MIT"
] | permissive | MomsFriendlyRobotCompany/pydar | 926cdbb9431204b60a0816815904c1b27f415f0d | 20d5a6b382b4f047ba19f8f82a15a67ab3537543 | refs/heads/master | 2020-03-20T11:19:28.451746 | 2019-12-25T01:46:29 | 2019-12-25T01:46:29 | 137,399,625 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 80 | py |
from collections import namedtuple
Scan = namedtuple('Scan', 'scan timestamp')
| [
"walchko@users.noreply.github.com"
] | walchko@users.noreply.github.com |
0d43cabd0bac06a70dc5cf138a95ff01cf01d740 | ababe4e82dec68ff7936c87178061b179bb29931 | /mainApp/models.py | 08b8c550e39bdf279372b082b30a3e4eacdc07c2 | [] | no_license | nurhat1/news_aggregator_api | b76d5599c0628da5086940a28cc8110ae3e50a0e | 2377fb5e4486dfed9974471e9027c38a18b11ad1 | refs/heads/master | 2023-06-03T07:31:59.908745 | 2021-06-18T09:19:15 | 2021-06-18T09:19:15 | 378,096,785 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 957 | py | from django.db import models
# Create your models here.
class Category(models.Model):
name = models.CharField('Название категории', max_length=255)
is_active = models.BooleanField(default=True)
class Meta:
verbose_name = 'Категория'
verbose_name_plural = 'Категории'
def __str__(self):
return f'Категория {self.name}'
class Feed(models.Model):
category = models.ForeignKey(Category, help_text='Категория', on_delete=models.CASCADE)
name = models.CharField('Название ресурса', max_length=255)
url = models.URLField('Ссылка на ресурс', max_length=255, unique=True)
rss_link = models.TextField('RSS ссылка ресурса', max_length=255)
class Meta:
verbose_name = 'Ресурс'
verbose_name_plural = 'Ресурсы'
def __str__(self):
return f'Ресурс {self.name}'
| [
"nurhat_01.12.97@mail.ru"
] | nurhat_01.12.97@mail.ru |
2e858c17d93645b79fec3bc950bfad4291ef27b3 | 4e96f383d4703ad8ee58869ed91a0c8432c8a051 | /Cura/Cura/cura/Backups/BackupsManager.py | ba6fcab8d75e54207a7423215cf29cc707d74109 | [
"LGPL-3.0-only",
"GPL-3.0-only"
] | permissive | flight7788/3d-printing-with-moveo-1 | b2dba26010c4fa31815bc1d2d0966161a8600081 | 7fcb9c6b5da9245d54ac917de8c2a7f5148e42b0 | refs/heads/Feature_Marlin_with_AlanBoy | 2022-08-30T18:36:44.785058 | 2020-05-30T07:52:58 | 2020-05-30T07:52:58 | 212,583,912 | 0 | 0 | MIT | 2020-05-16T07:39:47 | 2019-10-03T13:13:01 | C | UTF-8 | Python | false | false | 3,039 | py | # Copyright (c) 2018 Ultimaker B.V.
# Cura is released under the terms of the LGPLv3 or higher.
from typing import Dict, Optional, Tuple, TYPE_CHECKING
from UM.Logger import Logger
from cura.Backups.Backup import Backup
if TYPE_CHECKING:
from cura.CuraApplication import CuraApplication
## The BackupsManager is responsible for managing the creating and restoring of
# back-ups.
#
# Back-ups themselves are represented in a different class.
class BackupsManager:
def __init__(self, application: "CuraApplication") -> None:
self._application = application
## Get a back-up of the current configuration.
# \return A tuple containing a ZipFile (the actual back-up) and a dict
# containing some metadata (like version).
def createBackup(self) -> Tuple[Optional[bytes], Optional[Dict[str, str]]]:
self._disableAutoSave()
backup = Backup(self._application)
backup.makeFromCurrent()
self._enableAutoSave()
# We don't return a Backup here because we want plugins only to interact with our API and not full objects.
return backup.zip_file, backup.meta_data
## Restore a back-up from a given ZipFile.
# \param zip_file A bytes object containing the actual back-up.
# \param meta_data A dict containing some metadata that is needed to
# restore the back-up correctly.
def restoreBackup(self, zip_file: bytes, meta_data: Dict[str, str]) -> None:
if not meta_data.get("cura_release", None):
# If there is no "cura_release" specified in the meta data, we don't execute a backup restore.
Logger.log("w", "Tried to restore a backup without specifying a Cura version number.")
return
self._disableAutoSave()
backup = Backup(self._application, zip_file = zip_file, meta_data = meta_data)
restored = backup.restore()
if restored:
# At this point, Cura will need to restart for the changes to take effect.
# We don't want to store the data at this point as that would override the just-restored backup.
self._application.windowClosed(save_data = False)
## Here we try to disable the auto-save plug-in as it might interfere with
# restoring a back-up.
def _disableAutoSave(self) -> None:
auto_save = self._application.getAutoSave()
# The auto save is only not created if the application has not yet started.
if auto_save:
auto_save.setEnabled(False)
else:
Logger.log("e", "Unable to disable the autosave as application init has not been completed")
## Re-enable auto-save after we're done.
def _enableAutoSave(self) -> None:
auto_save = self._application.getAutoSave()
# The auto save is only not created if the application has not yet started.
if auto_save:
auto_save.setEnabled(True)
else:
Logger.log("e", "Unable to enable the autosave as application init has not been completed")
| [
"t106360212@ntut.org.tw"
] | t106360212@ntut.org.tw |
3c639d64247b4a49b28c974d5d915777ea97abc0 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /egHeSWSjHTgzMysBX_11.py | 07299dceba5a669196df27a142df5458fa762af5 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 501 | py | """
Create a function that takes a number as an argument and returns half of it.
### Examples
half_a_fraction("1/2") ➞ "1/4"
half_a_fraction("6/8") ➞ "3/8"
half_a_fraction("3/8") ➞ "3/16"
### Notes
Always return the simplified fraction.
"""
def half_a_fraction(fract):
fraction = fract.split("/")
if int(fraction[0]) % 2 == 0:
return '{}/{}'.format(int(int(fraction[0])/2), int(fraction[1]))
return '{}/{}'.format(int(fraction[0]), int(fraction[1])*2)
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
37696f05b79e1f2feac4efbff88069ce1a01a3f3 | 1829c4ccef29f7c0074ed2ee07f960d63572b9d6 | /pdf417/__init__.py | f8d4187552e8c5773cd023f155001f0483a12557 | [] | no_license | rutm/pdf417 | c40b602324a765dc8ad0b1ee6f7a25dbe20d3d23 | 91e8f68190baf926883424113f3fbd47418afd17 | refs/heads/master | 2021-01-02T23:03:11.953216 | 2013-11-06T18:30:03 | 2013-11-06T18:30:03 | 7,704,937 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,857 | py | from PIL import Image
from PIL import ImageDraw
from ._pdf417 import PDF417
def write_to_ps(filename, barcode):
cols = (barcode.bit_columns / 8) + 1
with open(filename, 'w+') as f:
f.write("/Times findfont\n12 scalefont setfont\n100 80 moveto\n(A PDF417 example.)show\n")
f.write("stroke\n100 100 translate\n{0} {1} scale\n".format(
barcode.bit_columns / 2.0,
barcode.code_rows * 3 / 2.0)
)
f.write("{0} {1} 1 [{2} 0 0 {3} 0 {4}]{{<".format(
barcode.bit_columns,
barcode.code_rows,
barcode.bit_columns,
-barcode.code_rows,
barcode.code_rows)
)
for index, bit in enumerate(barcode.bits):
if not index % cols:
f.write('\n')
f.write('{:02X}'.format(bit & 0xFF))
f.write("\n>}image\nshowpage\n")
def to_bitmap_chunks(barcode):
bitmap = ''.join(['{:08b}'.format(x & 0xFF) for x in barcode.bits])
amount = 8 * int(barcode.bit_rows)
chunks = [bitmap[start:start + amount] for start in xrange(0, len(bitmap), amount)]
return chunks
def write_to_png(filename, barcode, x_scale=3, y_scale=9, margin=3):
full_width = (barcode.bit_columns * x_scale) + (margin * 2)
full_height = (barcode.code_rows * y_scale) + (margin * 2)
image = Image.new("RGB", (full_width, full_height), 'white')
draw = ImageDraw.Draw(image)
chunks = to_bitmap_chunks(barcode)
x = margin
y = margin
for line in chunks:
for bar in line:
if int(bar):
for xx in xrange(x, x + x_scale):
for yy in xrange(y, y + y_scale):
draw.point((xx, yy), fill='black')
x += x_scale
y += y_scale
x = margin
del draw
image.save(filename, 'PNG')
| [
"rockerzz@gmail.com"
] | rockerzz@gmail.com |
4cfb83d1cec52b7b06afc49b583c72b7a81f69cc | 3fb0fa7109826d3bf4cbe069ddfb2bda4928b73e | /src/python/populateItems.py | 0a194f21a8bd04daa72340275aac26739dcecf73 | [
"MIT"
] | permissive | DBMSRow3/DBMSLeague | 6e5950f224e3a655d05be3d5b60f89f0f3b01343 | 76f0b32011f08809b62a3147e9c608d4bbe9a33a | refs/heads/master | 2020-04-10T17:14:31.446051 | 2018-02-26T02:57:22 | 2018-02-26T02:57:22 | 84,141,006 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,574 | py | from cassiopeia import riotapi
from cassiopeia.type.core.common import LoadPolicy
import csv
import urllib
import configparser
import mysql.connector
def main():
config = configparser.ConfigParser()
config.read('settings.ini')
riotapi.set_api_key(config.get('LoL API','key'))
riotapi.set_load_policy(LoadPolicy.lazy)
riotapi.print_calls(False)
riotapi.set_region('NA')
try:
cnx = mysql.connector.connect(user=config.get('DB','username'),password=config.get('DB','password'),host=config.get('DB','host'),database=config.get('DB','database'))
cursor = cnx.cursor()
insertItem = ('INSERT INTO Item (id,name,description,gold,requiredChamp) '
'VALUES ({},"{}","{}",{},{})')
items = riotapi.get_items()
for item in items:
imageurl = 'http://ddragon.leagueoflegends.com/cdn/6.24.1/img/item/'+str(item.id)+'.png'
destPath = 'img/item-'+str(item.id)+'.png'
try:
urllib.urlretrieve(imageurl,destPath)
except IOError as err:
print("Error retreiving "+str(item.id)+'.png')
insertItemStmt = insertItem.format(item.id,item.name,item.description,item.gold.base,item.required_champion.id)
#print(insertItemStmt)
cursor.execute(insertItemStmt)
cursor.close()
cnx.close()
except mysql.connector.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
else:
cnx.close()
if __name__ == "__main__":
main()
| [
"donaldsa18@up.edu"
] | donaldsa18@up.edu |
b2111832fc0c4debedac707ba825b9e9fe864ff0 | dcdb7a05d52cd1f9d558a70570b3ecbd85cefbe6 | /apps/blog_sign/urls.py | 80393ab80dbc1e204ef049a5208f8d7c98b708f4 | [] | no_license | GDCenter/blog_django | ea7a9a556292b212a6d5a2de3d02f7b1e9002871 | 5cb12f630618bb49bd955bcc9072339ff3a01387 | refs/heads/master | 2020-09-09T15:51:49.918515 | 2018-05-11T09:21:05 | 2018-05-11T09:21:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 475 | py | from django.conf.urls import url
from apps.blog_sign import views
urlpatterns = [
url(r'^login$', views.LoginView.as_view(), name='login'),
url(r'^register$', views.Register.as_view(), name='register'),
url(r'^forget$', views.ForgetView.as_view(), name='forget'),
url(r'^active/(?P<token>.*)$', views.ActiveView.as_view(), name='active'),
url(r'^sendemail$', views.send_email, name='sendemail'),
url(r'^logout$', views.user_logout, name='logout'),
]
| [
"liduo945@163.com"
] | liduo945@163.com |
847dd07a7ba0510818c13daa8b6307f3fe3659e9 | a6112b9c7aea4e352abb23ef21e096742c382f9f | /linuxprivchecker.py | bbe897243e3ea9b58a5c439a7f45f009492c0da1 | [
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] | permissive | ambrosektal/recon2 | b198c9792b08a3d5f49a52eae54ac32dcb174ff6 | 4d3b1f1af133c987be76de0c8bb25d9bccedcf39 | refs/heads/master | 2021-04-26T23:46:41.622351 | 2021-03-20T17:43:36 | 2021-03-20T17:43:36 | 123,853,233 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 25,312 | py | #!/usr/env python
###############################################################################################################
## [Title]: linuxprivchecker.py -- a Linux Privilege Escalation Check Script
## [Author]: Mike Czumak (T_v3rn1x) -- @SecuritySift
##-------------------------------------------------------------------------------------------------------------
## [Details]:
## This script is intended to be executed locally on a Linux box to enumerate basic system info and
## search for common privilege escalation vectors such as world writable files, misconfigurations, clear-text
## passwords and applicable exploits.
##-------------------------------------------------------------------------------------------------------------
## [Warning]:
## This script comes as-is with no promise of functionality or accuracy. I have no plans to maintain updates,
## I did not write it to be efficient and in some cases you may find the functions may not produce the desired
## results. For lab2018ple, the function that links packages to running processes is based on keywords and will
## not always be accurate. Also, the exploit list included in this function will need to be updated over time.
## Feel free to change or improve it any way you see fit.
##-------------------------------------------------------------------------------------------------------------
## [Modification, Distribution, and Attribution]:
## You are free to modify and/or distribute this script as you wish. I only ask that you maintain original
## author attribution and not attempt to sell it or incorporate it into any commercial offering (as if it's
## worth anything anyway :)
###############################################################################################################
# conditional import for older versions of python not compatible with subprocess
try:
import subprocess as sub
compatmode = 0 # newer version of python, no need for compatibility mode
except ImportError:
import os # older version of python, need to use os instead
compatmode = 1
# title / formatting
bigline = "================================================================================================="
smlline = "-------------------------------------------------------------------------------------------------"
print bigline
print "LINUX PRIVILEGE ESCALATION CHECKER"
print bigline
print
# loop through dictionary, execute the commands, store the results, return updated dict
def execCmd(cmdDict):
for item in cmdDict:
cmd = cmdDict[item]["cmd"]
if compatmode == 0: # newer version of python, use preferred subprocess
out, error = sub.Popen([cmd], stdout=sub.PIPE, stderr=sub.PIPE, shell=True).communicate()
results = out.split('\n')
else: # older version of python, use os.popen
echo_stdout = os.popen(cmd, 'r')
results = echo_stdout.read().split('\n')
cmdDict[item]["results"]=results
return cmdDict
# print results for each previously executed command, no return value
def printResults(cmdDict):
for item in cmdDict:
msg = cmdDict[item]["msg"]
results = cmdDict[item]["results"]
print "[+] " + msg
for result in results:
if result.strip() != "":
print " " + result.strip()
print
return
def writeResults(msg, results):
f = open("privcheckout.txt", "a");
f.write("[+] " + str(len(results)-1) + " " + msg)
for result in results:
if result.strip() != "":
f.write(" " + result.strip())
f.close()
return
# Basic system info
print "[*] GETTING BASIC SYSTEM INFO...\n"
results=[]
sysInfo = {"OS":{"cmd":"cat /etc/issue","msg":"Operating System","results":results},
"KERNEL":{"cmd":"cat /proc/version","msg":"Kernel","results":results},
"HOSTNAME":{"cmd":"hostname", "msg":"Hostname", "results":results}
}
sysInfo = execCmd(sysInfo)
printResults(sysInfo)
# Networking Info
print "[*] GETTING NETWORKING INFO...\n"
netInfo = {"NETINFO":{"cmd":"/sbin/ifconfig -a", "msg":"Interfaces", "results":results},
"ROUTE":{"cmd":"route", "msg":"Route", "results":results},
"NETSTAT":{"cmd":"netstat -antup | grep -v 'TIME_WAIT'", "msg":"Netstat", "results":results}
}
netInfo = execCmd(netInfo)
printResults(netInfo)
# File System Info
print "[*] GETTING FILESYSTEM INFO...\n"
driveInfo = {"MOUNT":{"cmd":"mount","msg":"Mount results", "results":results},
"FSTAB":{"cmd":"cat /etc/fstab 2>/dev/null", "msg":"fstab entries", "results":results}
}
driveInfo = execCmd(driveInfo)
printResults(driveInfo)
# Scheduled Cron Jobs
cronInfo = {"CRON":{"cmd":"ls -la /etc/cron* 2>/dev/null", "msg":"Scheduled cron jobs", "results":results},
"CRONW": {"cmd":"ls -aRl /etc/cron* 2>/dev/null | awk '$1 ~ /w.$/' 2>/dev/null", "msg":"Writable cron dirs", "results":results}
}
cronInfo = execCmd(cronInfo)
printResults(cronInfo)
# User Info
print "\n[*] ENUMERATING USER AND ENVIRONMENTAL INFO...\n"
userInfo = {"WHOAMI":{"cmd":"whoami", "msg":"Current User", "results":results},
"ID":{"cmd":"id","msg":"Current User ID", "results":results},
"ALLUSERS":{"cmd":"cat /etc/passwd", "msg":"All users", "results":results},
"SUPUSERS":{"cmd":"grep -v -E '^#' /etc/passwd | awk -F: '$3 == 0{print $1}'", "msg":"Super Users Found:", "results":results},
"HISTORY":{"cmd":"ls -la ~/.*_history; ls -la /root/.*_history 2>/dev/null", "msg":"Root and current user history (depends on privs)", "results":results},
"ENV":{"cmd":"env 2>/dev/null | grep -v 'LS_COLORS'", "msg":"Environment", "results":results},
"SUDOERS":{"cmd":"cat /etc/sudoers 2>/dev/null | grep -v '#' 2>/dev/null", "msg":"Sudoers (privileged)", "results":results},
"LOGGEDIN":{"cmd":"w 2>/dev/null", "msg":"Logged in User Activity", "results":results}
}
userInfo = execCmd(userInfo)
printResults(userInfo)
if "root" in userInfo["ID"]["results"][0]:
print "[!] ARE YOU SURE YOU'RE NOT ROOT ALREADY?\n"
# File/Directory Privs
print "[*] ENUMERATING FILE AND DIRECTORY PERMISSIONS/CONTENTS...\n"
fdPerms = {"WWDIRSROOT":{"cmd":"find / \( -wholename '/home/homedir*' -prune \) -o \( -type d -perm -0002 \) -exec ls -ld '{}' ';' 2>/dev/null | grep /root", "msg":"World Writeable Directories for User/Group 'Root'", "results":results},
"WWDIRS":{"cmd":"find / \( -wholename '/home/homedir*' -prune \) -o \( -type d -perm -0002 \) -exec ls -ld '{}' ';' 2>/dev/null | grep -v /root", "msg":"World Writeable Directories for Users other than Root", "results":results},
"WWFILES":{"cmd":"find / \( -wholename '/home/homedir/*' -prune -o -wholename '/proc/*' -prune \) -o \( -type f -perm -0002 \) -exec ls -l '{}' ';' 2>/dev/null", "msg":"World Writable Files", "results":results},
"SUID":{"cmd":"find / \( -perm -2000 -o -perm -4000 \) -exec ls -ld {} \; 2>/dev/null", "msg":"SUID/SGID Files and Directories", "results":results},
"ROOTHOME":{"cmd":"ls -ahlR /root 2>/dev/null", "msg":"Checking if/root's home folder is accessible", "results":results}
}
fdPerms = execCmd(fdPerms)
printResults(fdPerms)
pwdFiles = {"LOGPWDS":{"cmd":"find /var/log -name '*.log' 2>/dev/null | xargs -l10 egrep 'pwd|password' 2>/dev/null", "msg":"Logs containing keyword 'password'", "results":results},
"CONFPWDS":{"cmd":"find /etc -name '*.c*' 2>/dev/null | xargs -l10 egrep 'pwd|password' 2>/dev/null", "msg":"Config files containing keyword 'password'", "results":results},
"SHADOW":{"cmd":"cat /etc/shadow 2>/dev/null", "msg":"Shadow File (Privileged)", "results":results}
}
pwdFiles = execCmd(pwdFiles)
printResults(pwdFiles)
# Processes and Applications
print "[*] ENUMERATING PROCESSES AND APPLICATIONS...\n"
if "debian" in sysInfo["KERNEL"]["results"][0] or "ubuntu" in sysInfo["KERNEL"]["results"][0]:
getPkgs = "dpkg -l | awk '{$1=$4=\"\"; print $0}'" # debian
else:
getPkgs = "rpm -qa | sort -u" # RH/other
getAppProc = {"PROCS":{"cmd":"ps aux | awk '{print $1,$2,$9,$10,$11}'", "msg":"Current processes", "results":results},
"PKGS":{"cmd":getPkgs, "msg":"Installed Packages", "results":results}
}
getAppProc = execCmd(getAppProc)
printResults(getAppProc) # comment to reduce output
otherApps = { "SUDO":{"cmd":"sudo -V | grep version 2>/dev/null", "msg":"Sudo Version (Check out http://www.exploit-db.com/search/?action=search&filter_page=1&filter_description=sudo)", "results":results},
"APACHE":{"cmd":"apache2 -v; apache2ctl -M; httpd -v; apachectl -l 2>/dev/null", "msg":"Apache Version and Modules", "results":results},
"APACHECONF":{"cmd":"cat /etc/apache2/apache2.conf 2>/dev/null", "msg":"Apache Config File", "results":results}
}
otherApps = execCmd(otherApps)
printResults(otherApps)
print "[*] IDENTIFYING PROCESSES AND PACKAGES RUNNING AS ROOT OR OTHER SUPERUSER...\n"
# find the package information for the processes currently running
# under /root or another super user
procs = getAppProc["PROCS"]["results"]
pkgs = getAppProc["PKGS"]["results"]
supusers = userInfo["SUPUSERS"]["results"]
procdict = {} # dictionary to hold the processes running as super users
for proc in procs: # loop through each process
relatedpkgs = [] # list to hold the packages related to a process
try:
for user in supusers: # loop through the known super users
if (user != "") and (user in proc): # if the process is being run by a super user
procname = proc.split(" ")[4] # grab the process name
if "/" in procname:
splitname = procname.split("/")
procname = splitname[len(splitname)-1]
for pkg in pkgs: # loop through the packages
if not len(procname) < 3: # name too short to get reliable package results
if procname in pkg:
if procname in procdict:
relatedpkgs = procdict[proc] # if already in the dict, grab its pkg list
if pkg not in relatedpkgs:
relatedpkgs.append(pkg) # add pkg to the list
procdict[proc]=relatedpkgs # add any found related packages to the process dictionary entry
except:
pass
for key in procdict:
print " " + key # print the process name
try:
if not procdict[key][0] == "": # only print the rest if related packages were found
print " Possible Related Packages: "
for entry in procdict[key]:
print " " + entry # print each related package
except:
pass
# EXPLOIT ENUMERATION
# First discover the avaialable tools
print
print "[*] ENUMERATING INSTALLED LANGUAGES/TOOLS FOR SPLOIT BUILDING...\n"
devTools = {"TOOLS":{"cmd":"which awk perl python ruby gcc cc vi vim nmap find netcat nc wget tftp ftp 2>/dev/null", "msg":"Installed Tools", "results":results}}
devTools = execCmd(devTools)
printResults(devTools)
print "[+] Related Shell Escape Sequences...\n"
escapeCmd = {"vi":[":!bash", ":set shell=/bin/bash:shell"], "awk":["awk 'BEGIN {system(\"/bin/bash\")}'"], "perl":["perl -e 'exec \"/bin/bash\";'"], "find":["find / -exec /usr/bin/awk 'BEGIN {system(\"/bin/bash\")}' \\;"], "nmap":["--interactive"]}
for cmd in escapeCmd:
for result in devTools["TOOLS"]["results"]:
if cmd in result:
for item in escapeCmd[cmd]:
print " " + cmd + "-->\t" + item
print
print "[*] FINDING RELEVENT PRIVILEGE ESCALATION EXPLOITS...\n"
# Now check for relevant exploits (note: this list should be updated over time; source: Exploit-DB)
# sploit format = sploit name : {minversion, maxversion, exploitdb#, language, {keywords for applicability}} -- current keywords are 'kernel', 'proc', 'pkg' (unused), and 'os'
sploits= { "2.2.x-2.4.x ptrace kmod local exploit":{"minver":"2.2", "maxver":"2.4.99", "exploitdb":"3", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"< 2.4.20 Module Loader Local Root Exploit":{"minver":"0", "maxver":"2.4.20", "exploitdb":"12", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"2.4.22 "'do_brk()'" local Root Exploit (PoC)":{"minver":"2.4.22", "maxver":"2.4.22", "exploitdb":"129", "lang":"asm", "keywords":{"loc":["kernel"], "val":"kernel"}},
"<= 2.4.22 (do_brk) Local Root Exploit (working)":{"minver":"0", "maxver":"2.4.22", "exploitdb":"131", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"2.4.x mremap() bound checking Root Exploit":{"minver":"2.4", "maxver":"2.4.99", "exploitdb":"145", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"<= 2.4.29-rc2 uselib() Privilege Elevation":{"minver":"0", "maxver":"2.4.29", "exploitdb":"744", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"2.4 uselib() Privilege Elevation Exploit":{"minver":"2.4", "maxver":"2.4", "exploitdb":"778", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"2.4.x / 2.6.x uselib() Local Privilege Escalation Exploit":{"minver":"2.4", "maxver":"2.6.99", "exploitdb":"895", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"2.4/2.6 bluez Local Root Privilege Escalation Exploit (update)":{"minver":"2.4", "maxver":"2.6.99", "exploitdb":"926", "lang":"c", "keywords":{"loc":["proc","pkg"], "val":"bluez"}},
"<= 2.6.11 (CPL 0) Local Root Exploit (k-rad3.c)":{"minver":"0", "maxver":"2.6.11", "exploitdb":"1397", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"MySQL 4.x/5.0 User-Defined Function Local Privilege Escalation Exploit":{"minver":"0", "maxver":"99", "exploitdb":"1518", "lang":"c", "keywords":{"loc":["proc","pkg"], "val":"mysql"}},
"2.6.13 <= 2.6.17.4 sys_prctl() Local Root Exploit":{"minver":"2.6.13", "maxver":"2.6.17.4", "exploitdb":"2004", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"2.6.13 <= 2.6.17.4 sys_prctl() Local Root Exploit (2)":{"minver":"2.6.13", "maxver":"2.6.17.4", "exploitdb":"2005", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"2.6.13 <= 2.6.17.4 sys_prctl() Local Root Exploit (3)":{"minver":"2.6.13", "maxver":"2.6.17.4", "exploitdb":"2006", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"2.6.13 <= 2.6.17.4 sys_prctl() Local Root Exploit (4)":{"minver":"2.6.13", "maxver":"2.6.17.4", "exploitdb":"2011", "lang":"sh", "keywords":{"loc":["kernel"], "val":"kernel"}},
"<= 2.6.17.4 (proc) Local Root Exploit":{"minver":"0", "maxver":"2.6.17.4", "exploitdb":"2013", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"2.6.13 <= 2.6.17.4 prctl() Local Root Exploit (logrotate)":{"minver":"2.6.13", "maxver":"2.6.17.4", "exploitdb":"2031", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"Ubuntu/Debian Apache 1.3.33/1.3.34 (CGI TTY) Local Root Exploit":{"minver":"4.10", "maxver":"7.04", "exploitdb":"3384", "lang":"c", "keywords":{"loc":["os"], "val":"debian"}},
"Linux/Kernel 2.4/2.6 x86-64 System Call Emulation Exploit":{"minver":"2.4", "maxver":"2.6", "exploitdb":"4460", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"< 2.6.11.5 BLUETOOTH Stack Local Root Exploit":{"minver":"0", "maxver":"2.6.11.5", "exploitdb":"4756", "lang":"c", "keywords":{"loc":["proc","pkg"], "val":"bluetooth"}},
"2.6.17 - 2.6.24.1 vmsplice Local Root Exploit":{"minver":"2.6.17", "maxver":"2.6.24.1", "exploitdb":"5092", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"2.6.23 - 2.6.24 vmsplice Local Root Exploit":{"minver":"2.6.23", "maxver":"2.6.24", "exploitdb":"5093", "lang":"c", "keywords":{"loc":["os"], "val":"debian"}},
"Debian OpenSSL Predictable PRNG Bruteforce SSH Exploit":{"minver":"0", "maxver":"99", "exploitdb":"5720", "lang":"python", "keywords":{"loc":["os"], "val":"debian"}},
"Linux Kernel < 2.6.22 ftruncate()/open() Local Exploit":{"minver":"0", "maxver":"2.6.22", "exploitdb":"6851", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"< 2.6.29 exit_notify() Local Privilege Escalation Exploit":{"minver":"0", "maxver":"2.6.29", "exploitdb":"8369", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"2.6 UDEV Local Privilege Escalation Exploit":{"minver":"2.6", "maxver":"2.6.99", "exploitdb":"8478", "lang":"c", "keywords":{"loc":["proc","pkg"], "val":"udev"}},
"2.6 UDEV < 141 Local Privilege Escalation Exploit":{"minver":"2.6", "maxver":"2.6.99", "exploitdb":"8572", "lang":"c", "keywords":{"loc":["proc","pkg"], "val":"udev"}},
"2.6.x ptrace_attach Local Privilege Escalation Exploit":{"minver":"2.6", "maxver":"2.6.99", "exploitdb":"8673", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"2.6.29 ptrace_attach() Local Root Race Condition Exploit":{"minver":"2.6.29", "maxver":"2.6.29", "exploitdb":"8678", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"Linux Kernel <=2.6.28.3 set_selection() UTF-8 Off By One Local Exploit":{"minver":"0", "maxver":"2.6.28.3", "exploitdb":"9083", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"Test Kernel Local Root Exploit 0day":{"minver":"2.6.18", "maxver":"2.6.30", "exploitdb":"9191", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"PulseAudio (setuid) Priv. Escalation Exploit (ubu/9.04)(slack/12.2.0)":{"minver":"2.6.9", "maxver":"2.6.30", "exploitdb":"9208", "lang":"c", "keywords":{"loc":["pkg"], "val":"pulse"}},
"2.x sock_sendpage() Local Ring0 Root Exploit":{"minver":"2", "maxver":"2.99", "exploitdb":"9435", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"2.x sock_sendpage() Local Root Exploit 2":{"minver":"2", "maxver":"2.99", "exploitdb":"9436", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"2.4/2.6 sock_sendpage() ring0 Root Exploit (simple ver)":{"minver":"2.4", "maxver":"2.6.99", "exploitdb":"9479", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"2.6 < 2.6.19 (32bit) ip_append_data() ring0 Root Exploit":{"minver":"2.6", "maxver":"2.6.19", "exploitdb":"9542", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"2.4/2.6 sock_sendpage() Local Root Exploit (ppc)":{"minver":"2.4", "maxver":"2.6.99", "exploitdb":"9545", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"< 2.6.19 udp_sendmsg Local Root Exploit (x86/x64)":{"minver":"0", "maxver":"2.6.19", "exploitdb":"9574", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"< 2.6.19 udp_sendmsg Local Root Exploit":{"minver":"0", "maxver":"2.6.19", "exploitdb":"9575", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"2.4/2.6 sock_sendpage() Local Root Exploit [2]":{"minver":"2.4", "maxver":"2.6.99", "exploitdb":"9598", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"2.4/2.6 sock_sendpage() Local Root Exploit [3]":{"minver":"2.4", "maxver":"2.6.99", "exploitdb":"9641", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"2.4.1-2.4.37 and 2.6.1-2.6.32-rc5 Pipe.c Privelege Escalation":{"minver":"2.4.1", "maxver":"2.6.32", "exploitdb":"9844", "lang":"python", "keywords":{"loc":["kernel"], "val":"kernel"}},
"'pipe.c' Local Privilege Escalation Vulnerability":{"minver":"2.4.1", "maxver":"2.6.32", "exploitdb":"10018", "lang":"sh", "keywords":{"loc":["kernel"], "val":"kernel"}},
"2.6.18-20 2009 Local Root Exploit":{"minver":"2.6.18", "maxver":"2.6.20", "exploitdb":"10613", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"Apache Spamassassin Milter Plugin Remote Root Command Execution":{"minver":"0", "maxver":"99", "exploitdb":"11662", "lang":"sh", "keywords":{"loc":["proc"], "val":"spamass-milter"}},
"<= 2.6.34-rc3 ReiserFS xattr Privilege Escalation":{"minver":"0", "maxver":"2.6.34", "exploitdb":"12130", "lang":"python", "keywords":{"loc":["mnt"], "val":"reiser"}},
"Ubuntu PAM MOTD local /root":{"minver":"7", "maxver":"10.04", "exploitdb":"14339", "lang":"sh", "keywords":{"loc":["os"], "val":"ubuntu"}},
"< 2.6.36-rc1 CAN BCM Privilege Escalation Exploit":{"minver":"0", "maxver":"2.6.36", "exploitdb":"14814", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"Kernel ia32syscall Emulation Privilege Escalation":{"minver":"0", "maxver":"99", "exploitdb":"15023", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"Linux RDS Protocol Local Privilege Escalation":{"minver":"0", "maxver":"2.6.36", "exploitdb":"15285", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"<= 2.6.37 Local Privilege Escalation":{"minver":"0", "maxver":"2.6.37", "exploitdb":"15704", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"< 2.6.37-rc2 ACPI custom_method Privilege Escalation":{"minver":"0", "maxver":"2.6.37", "exploitdb":"15774", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"CAP_SYS_ADMIN to /root Exploit":{"minver":"0", "maxver":"99", "exploitdb":"15916", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"CAP_SYS_ADMIN to Root Exploit 2 (32 and 64-bit)":{"minver":"0", "maxver":"99", "exploitdb":"15944", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"< 2.6.36.2 Econet Privilege Escalation Exploit":{"minver":"0", "maxver":"2.6.36.2", "exploitdb":"17787", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"Sendpage Local Privilege Escalation":{"minver":"0", "maxver":"99", "exploitdb":"19933", "lang":"ruby", "keywords":{"loc":["kernel"], "val":"kernel"}},
"2.4.18/19 Privileged File Descriptor Resource Exhaustion Vulnerability":{"minver":"2.4.18", "maxver":"2.4.19", "exploitdb":"21598", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"2.2.x/2.4.x Privileged Process Hijacking Vulnerability (1)":{"minver":"2.2", "maxver":"2.4.99", "exploitdb":"22362", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"2.2.x/2.4.x Privileged Process Hijacking Vulnerability (2)":{"minver":"2.2", "maxver":"2.4.99", "exploitdb":"22363", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"Samba 2.2.8 Share Local Privilege Elevation Vulnerability":{"minver":"2.2.8", "maxver":"2.2.8", "exploitdb":"23674", "lang":"c", "keywords":{"loc":["proc","pkg"], "val":"samba"}},
"open-time Capability file_ns_capable() - Privilege Escalation Vulnerability":{"minver":"0", "maxver":"99", "exploitdb":"25307", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"open-time Capability file_ns_capable() Privilege Escalation":{"minver":"0", "maxver":"99", "exploitdb":"25450", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
}
# variable declaration
os = sysInfo["OS"]["results"][0]
version = sysInfo["KERNEL"]["results"][0].split(" ")[2].split("-")[0]
langs = devTools["TOOLS"]["results"]
procs = getAppProc["PROCS"]["results"]
kernel = str(sysInfo["KERNEL"]["results"][0])
mount = driveInfo["MOUNT"]["results"]
#pkgs = getAppProc["PKGS"]["results"] # currently not using packages for sploit appicability but my in future
# lists to hold ranked, applicable sploits
# note: this is a best-effort, basic ranking designed to help in prioritizing priv escalation exploit checks
# all applicable exploits should be checked and this function could probably use some improvement
avgprob = []
highprob = []
for sploit in sploits:
lang = 0 # use to rank applicability of sploits
keyword = sploits[sploit]["keywords"]["val"]
sploitout = sploit + " || " + "http://www.exploit-db.com/exploits/" + sploits[sploit]["exploitdb"] + " || " + "Language=" + sploits[sploit]["lang"]
# first check for kernell applicability
if (version >= sploits[sploit]["minver"]) and (version <= sploits[sploit]["maxver"]):
# next check language applicability
if (sploits[sploit]["lang"] == "c") and (("gcc" in str(langs)) or ("cc" in str(langs))):
lang = 1 # language found, increase applicability score
elif sploits[sploit]["lang"] == "sh":
lang = 1 # language found, increase applicability score
elif (sploits[sploit]["lang"] in str(langs)):
lang = 1 # language found, increase applicability score
if lang == 0:
sploitout = sploitout + "**" # added mark if language not detected on system
# next check keyword matches to determine if some sploits have a higher probability of success
for loc in sploits[sploit]["keywords"]["loc"]:
if loc == "proc":
for proc in procs:
if keyword in proc:
highprob.append(sploitout) # if sploit is associated with a running process consider it a higher probability/applicability
break
break
elif loc == "os":
if (keyword in os) or (keyword in kernel):
highprob.append(sploitout) # if sploit is specifically applicable to this OS consider it a higher probability/applicability
break
elif loc == "mnt":
if keyword in mount:
highprob.append(sploitout) # if sploit is specifically applicable to a mounted file system consider it a higher probability/applicability
break
else:
avgprob.append(sploitout) # otherwise, consider average probability/applicability based only on kernel version
print " Note: Exploits relying on a compile/scripting language not detected on this system are marked with a '**' but should still be tested!"
print
print " The following exploits are ranked higher in probability of success because this script detected a related running process, OS, or mounted file system"
for exploit in highprob:
print " - " + exploit
print
print " The following exploits are applicable to this kernel version and should be investigated as well"
for exploit in avgprob:
print " - " + exploit
print
print "Finished"
print bigline
| [
"joe.spann@gmail.com"
] | joe.spann@gmail.com |
22cca10d1314023d365cc0bdaae9d23ec9feeb56 | ac4b9385b7ad2063ea51237fbd8d1b74baffd016 | /.history/utils/ocr/handle_image_20210209170155.py | 17a1ffee42abc92b121aff59d84aa5bebaf2bf31 | [] | no_license | preethanpa/ssoemprep | 76297ef21b1d4893f1ac2f307f60ec72fc3e7c6f | ce37127845253c768d01aeae85e5d0d1ade64516 | refs/heads/main | 2023-03-09T00:15:55.130818 | 2021-02-20T06:54:58 | 2021-02-20T06:54:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,092 | py | import os
import cv2
import re
import numpy as np
from PIL import Image
import pytesseract
from pytesseract import Output
from fpdf import FPDF
'''
IMAGE HANDLING METHODS
'''
# get grayscale image
def get_grayscale(image):
return cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# blur removal
def remove_blur(image):
return cv2.medianBlur(image,5)
# noise removal
def remove_noise(image):
return cv2.fastNlMeansDenoisingColored(image, None, 10, 10, 7, 15)
#thresholding
def thresholding(image):
return cv2.threshold(image, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
#dilation
def dilate(image):
kernel = np.ones((5,5),np.uint8)
return cv2.dilate(image, kernel, iterations = 1)
#erosion
def erode(image):
kernel = np.ones((5,5),np.uint8)
return cv2.erode(image, kernel, iterations = 1)
def extract_pdf_from_image(fileName='', pdf_path='', action='', psm=3):
'''
Extract text from image and save as PDF.
fileName=''
pdf_path='',
action='',
psm=3
'''
print(f'FileName is {fileName}')
#custom_config = r'-c tessedit_char_whitelist=123456789MALEPQRETHANabcdefghijklmnopqrstuvwxyz --psm 6'
#custom_config = r'-l eng --psm 11'
custom_config = r'-l eng --psm ' + str(psm)
pdfdir = pdf_path
if not os.path.exists(pdfdir):
os.makedirs(pdfdir)
# pdfFileName = os.path.basename(fileName).split('.')[0] + '.pdf'
pdfFileName = os.path.basename(fileName).split('.')[0]+ '.pdf'
pdfFilePath = pdfdir + '/' + pdfFileName
print(f'PDF File Path {pdfFilePath}')
#d = pytesseract.image_to_data(img, output_type=Output.DICT)
img = cv2.imread(fileName)
img1 = None
if (action == 1):
img1 = remove_noise(img)
if (action == 2):
img1 = get_grayscale(img)
#img1 = erode(img)
if (action == 3):
img1 = remove_blur(img)
#text = pytesseract.image_to_string(img1, config=custom_config,lang='eng')
text = pytesseract.image_to_pdf_or_hocr(img1, extension='pdf')
with open(pdfFilePath, mode = 'w+b') as f:
f.write(text)
return pdfFilePath
def convert_text_to_pdf(text='', pdf_path='', filename=''):
'''
Convert text file to PDF
text=''
pdf_path=''
filename=''
'''
tempdir = "/tmp"
pdfdir = pdf_path
textFileName = tempdir + '/' + filename + ".txt"
pdfFileName = pdfdir + '/' + filename + ".pdf"
if not os.path.exists(tempdir):
os.makedirs(tempdir)
if not os.path.exists(pdfdir):(
os.makedirs(pdfdir)
# save FPDF() class into a
# variable pdf
pdf = FPDF()
# Add a page
pdf.add_page()
# set style and size of font
# that you want in the pdf
pdf.set_font("Arial", size = 15)
with open(textFileName, mode = 'w+b') as f:
f.write(text)
line = 1
f = open(textFileName, "r")
for x in f:
x1 = re.sub(u"(\u2014|\u2018|\u2019|\u201c|\u201d)", "", x)
pdf.cell(100, 10, txt=x1, ln=line, align='L')
line=line+1
#save the pdf with name .pdf
pdf.output(pdfFileName,'F')
def mark_region(image_path):
im = cv2.imread(image_path)
gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (9,9), 0)
thresh = cv2.adaptiveThreshold(blur,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV,11,30)
# Dilate to combine adjacent text contours
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (9,9))
dilate = cv2.dilate(thresh, kernel, iterations=4)
# Find contours, highlight text areas, and extract ROIs
cnts = cv2.findContours(dilate, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
line_items_coordinates = []
for c in cnts:
area = cv2.contourArea(c)
x,y,w,h = cv2.boundingRect(c)
if y >= 600 and x <= 1000:
if area > 10000:
image = cv2.rectangle(im, (x,y), (2200, y+h), color=(255,0,255), thickness=3)
line_items_coordinates.append([(x,y), (2200, y+h)])
if y >= 2400 and x<= 2000:
image = cv2.rectangle(im, (x,y), (2200, y+h), color=(255,0,255), thickness=3)
line_items_coordinates.append([(x,y), (2200, y+h)])
return image, line_items_coordinates) | [
"{abhi@third-ray.com}"
] | {abhi@third-ray.com} |
613e4670af944ff84ec49616bde20658eee1914b | 9b09d354053fe571eec2e8335248ee3311bc3f48 | /python/tHome/util/hex/dump.py | c72f406e2ca296ca476247d87b1c3ce149029000 | [
"BSD-2-Clause"
] | permissive | TD22057/T-Home | c29d3e6f8924f8661694ac5c6722945e46cf6d8a | 5dc8689f52d87dac890051e540b338b009293ced | refs/heads/master | 2018-11-07T02:28:41.821700 | 2018-08-27T23:38:06 | 2018-08-27T23:38:06 | 44,873,682 | 20 | 14 | BSD-2-Clause | 2018-08-27T23:38:07 | 2015-10-24T16:07:35 | Python | UTF-8 | Python | false | false | 837 | py | #===========================================================================
#
# Dump hex bytes to a table.
#
#===========================================================================
import StringIO
#===========================================================================
def dump( buf ):
"""Input is bytes buffer,
Returns a string w/ the hex values in a table
"""
# Convert to hex characters
h = [ i.encode( "hex" ).upper() for i in buf ]
f = StringIO.StringIO()
f.write( "---: 00 01 02 03 04 05 06 07 08 09\n" )
for i in range( len( h ) ):
if i % 10 == 0:
if i > 0:
f.write( "\n" )
f.write( "%03d: " % i )
f.write( "%2s " % h[i] )
f.write( "\n" )
return f.getvalue()
#===========================================================================
| [
"ted.drain@gmail.com"
] | ted.drain@gmail.com |
95d139364528c7fe52cf3bca523c96c6f4958e7a | 326b8651880c0295f2c0c9207dda1c6db54599a2 | /release_notes_generator.py | afbdf7803c0f3692df3a012879f8c415a50e292b | [] | no_license | chris-relaxing/Release-Notes-Generator | 81ba2f379198547dee40048c3d84f8551eef7483 | faf68db6781963bb46191a54f61062d0b2619316 | refs/heads/master | 2021-04-27T21:24:20.893978 | 2018-02-21T22:34:01 | 2018-02-21T22:34:01 | 122,399,962 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 29,743 | py | #-------------------------------------------------------------------------------
# Name: Release Notes Generator
# Purpose: Create Word.docx Release Notes on the fly, with minimal input.
#
# Author: Chris Nielsen
# Last Updated: June 15, 2015
# Note: Some of the code used for editing .docx files with Python comes from:
# https://github.com/mikemaccana/python-docx/blob/master/docx.py
#-------------------------------------------------------------------------------
import os
import re
import time
import shutil
import zipfile
from xml.etree import ElementTree as etree
from os.path import abspath, basename, join
from exceptions import PendingDeprecationWarning
from warnings import warn
from Tkinter import *
import tkMessageBox
import tkFileDialog
regionList = ['APAC', 'AUNZ', 'NA', 'SAM', 'India', 'EEU', 'WEU', 'MEA', 'TWN', 'EU', 'KOR', 'HK']
dvnList = ['151E0','15105','15109','151F0','15118','15122','151G0','15131','15135','151H0','15144','15148', '151J0','161E0','161F0','161G0','161H0']
monthList = ['January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December']
yearList = ['2013', '2014', '2015', '2016', '2017']
productList = []
versionList = ['1.0', '2.1', '3.0', '4.0']
selected_region = ''
selected_initDVN = ''
selected_product = ''
selected_month = ''
selected_year = ''
selected_version = ''
# Inputs for testing
##region = "SAM"
##qtr = "Q2"
##year = "2014"
##month = "April"
##product = "2D Signs"
# All Word prefixes / namespace matches used in document.xml & core.xml.
# LXML doesn't actually use prefixes (just the real namespace) , but these # make it easier to copy Word output more easily.
nsprefixes = {
'mo': 'http://schemas.microsoft.com/office/mac/office/2008/main',
'o': 'urn:schemas-microsoft-com:office:office',
've': 'http://schemas.openxmlformats.org/markup-compatibility/2006',
# Text Content
'w': 'http://schemas.openxmlformats.org/wordprocessingml/2006/main',
'w10': 'urn:schemas-microsoft-com:office:word',
'wne': 'http://schemas.microsoft.com/office/word/2006/wordml',
# Drawing
'a': 'http://schemas.openxmlformats.org/drawingml/2006/main',
'm': 'http://schemas.openxmlformats.org/officeDocument/2006/math',
'mv': 'urn:schemas-microsoft-com:mac:vml',
'pic': 'http://schemas.openxmlformats.org/drawingml/2006/picture',
'v': 'urn:schemas-microsoft-com:vml',
'wp': ('http://schemas.openxmlformats.org/drawingml/2006/wordprocessing'
'Drawing'),
# Properties (core and extended)
'cp': ('http://schemas.openxmlformats.org/package/2006/metadata/core-pr'
'operties'),
'dc': 'http://purl.org/dc/elements/1.1/',
'ep': ('http://schemas.openxmlformats.org/officeDocument/2006/extended-'
'properties'),
'xsi': 'http://www.w3.org/2001/XMLSchema-instance',
# Content Types
'ct': 'http://schemas.openxmlformats.org/package/2006/content-types',
# Package Relationships
'r': ('http://schemas.openxmlformats.org/officeDocument/2006/relationsh'
'ips'),
'pr': 'http://schemas.openxmlformats.org/package/2006/relationships',
# Dublin Core document properties
'dcmitype': 'http://purl.org/dc/dcmitype/',
'dcterms': 'http://purl.org/dc/terms/'}
#------------------------------------------------------------------------
class Page(Frame): # A tk Frame widget
def __init__(self, parent, page, *args, **kwargs):
Frame.__init__(self, *args, borderwidth=0, **kwargs)
self.parent = parent
self.pack(fill=BOTH, expand=1)
self.columnconfigure(0, weight = 1)
self.centerWindow()
if page == "p1":
self.initUI(page)
else:
self.initPage2(page)
def initUI(self, page):
root.title("Release Notes Generator")
windowBorder = LabelFrame(self, text=" Release Notes Inputs: ", padx=0, pady=0, width=740,height=260)
windowBorder.grid(row = 0, column = 0, pady=10, padx=10, columnspan = 3, rowspan = 4, sticky='NW')
region = StringVar()
initDVN = StringVar()
month = StringVar()
year = StringVar()
product = StringVar()
version = StringVar()
select_width = 48
product.set('Select Product:') # default value
S = OptionMenu(self, product, *productList)
S.config(width=select_width)
S.pack( side = LEFT)
S.grid(row = 1, column = 0, pady=10, padx=20, sticky='NW')
region.set('Select Region:') # default value
O = OptionMenu(self, region, *regionList)
O.config(width=select_width)
O.pack( side = LEFT)
O.grid(row = 1, column = 1, pady=10, padx=20, columnspan = 2, sticky='NW')
month.set('Select Month:') # default value
Q = OptionMenu(self, month, *monthList)
Q.config(width=select_width)
Q.pack( side = LEFT)
Q.grid(row = 2, column = 0, pady=10, padx=20, sticky='NW')
year.set('Select Year:') # default value
R = OptionMenu(self, year, *yearList)
R.config(width=select_width)
R.pack( side = LEFT)
R.grid(row = 2, column = 1, pady=10, padx=20, columnspan = 2, sticky='NW')
initDVN.set('Select the initial release DVN:') # default value
P = OptionMenu(self, initDVN, *dvnList)
P.config(width=select_width)
P.pack( side = LEFT)
P.grid(row = 3, column = 0, pady=10, padx=20, sticky='NW')
DVN = StringVar()
Label(self, text = 'DVN:').grid(row = 3, column = 1, pady=15, padx=0, sticky='NE')
Entry(self, width=6, textvariable = DVN).grid(row = 3, column = 2, pady=15, padx=0, sticky='NW')
submitButton = LabelFrame(self, text="", padx=0, pady=0, width=740,height=80)
submitButton.grid(row = 4, column = 0, pady=10, padx=10, columnspan = 3, sticky='NW')
Button(self, text = ' Generate Release Notes ', command = lambda: multCommands(region, initDVN, product, month, year, DVN)).grid(row = 4, columnspan = 3, pady=35, padx=15, sticky='N')
def multCommands(region, initDVN, product, month, year, DVN):
global selected_region
global selected_initDVN
global selected_product
global selected_month
global selected_year
global selected_DVN
region = str(region.get())
initDVN = str(initDVN.get())
month = str(month.get())
year = str(year.get())
product = str(product.get())
DVN = str(DVN.get())
selected_region = region
selected_initDVN = initDVN
selected_product = product
selected_month = month
selected_year = year
if DVN <> '':
selected_DVN = DVN
else:
selected_DVN = initDVN
printInputs(region, initDVN, product, month, year, DVN)
# This is the logic that determines whether or not to go on to a second
# page of inputs. A second page of inputs will appear (asking for version (placeholder))
# if the product selected is "Hypothetical". Otherwise, the root window will close after
# one page of inputs.
# -------------------------------
if selected_product == "Hypothetical":
self.callback()
else:
try:
self.close_window()
except:
pass
# -------------------------------
def printInputs(region, initDVN, product, month, year, DVN):
print "The selected region is:", region
print "The selected initial release DVN is:", initDVN
print "The selected month is:", month
print "The selected year is:", year
print "The selected product is:", product
print "The selected DVN is:", DVN
missing_selections = ["Select Region:", "Select Quarter:", "Select Month:", "Select Year:", "Select Product:"]
e = "Error"
if product == missing_selections[4]:
m = "Error. Please select product to continue."
ThrowError(e, m, "", "")
elif region == missing_selections[0]:
m = "Error. Please select region to continue."
ThrowError(e, m, "", "")
elif initDVN == missing_selections[1]:
m = "Error. Please select initial release DVN to continue."
ThrowError(e, m, "", "")
elif year == missing_selections[3]:
m = "Error. Please select year to continue."
ThrowError(e, m, "", "")
elif month == missing_selections[2]:
m = "Error. Please select month to continue."
ThrowError(e, m, "", "")
else:
pass
def initPage2(self, page):
windowBorder = LabelFrame(self, text=" More release notes inputs needed: ", padx=0, pady=0, width=740,height=260)
windowBorder.grid(row = 0, column = 0, pady=10, padx=10, columnspan = 2, rowspan = 4, sticky='NW')
version = StringVar()
select_width = 46
version.set('Select Version:') # default value
t = OptionMenu(self, version, *versionList)
t.config(width=select_width)
t.pack( side = TOP)
t.grid(row = 1, column = 0, pady=0, padx=20, sticky='NW')
submitButton = LabelFrame(self, text="", padx=0, pady=0, width=600,height=80)
submitButton.grid(row = 4, column = 0, pady=10, padx=10, columnspan = 2, sticky='NW')
Button(self, text = ' Generate Release Notes ', command = lambda: multCommands2(version)).grid(row = 4, columnspan = 2, pady=35, padx=15, sticky='N')
def multCommands2(version):
self.callback()
printInputs2(version)
def printInputs2(version):
global selected_version
version = str(version.get())
selected_version = version
print "The selected version is:", version
def centerWindow(self):
w = 760
h = 380
sw = root.winfo_screenwidth()
sh = root.winfo_screenheight()
x = (sw - w)/2
y = (sh - h)/2
root.geometry('%dx%d+%d+%d' % (w,h, x, y))
def onlift(self):
self.lift()
def close_window(self):
root.destroy()
#------------------------------------------------------------------------
class App(Frame): # A tk Frame widget app, allowing for switching between multiple frames
def __init__(self, *args, **kwargs):
Frame.__init__(self, *args, **kwargs)
root.protocol("WM_DELETE_WINDOW", self.handler)
p1 = Page(self, 'p1') # Create two Page instances
p2 = Page(self, 'p2') #
p1.callback = p2.onlift # Switch to the second window
p2.callback = p2.close_window # close the second window
p1.place(x=0, y=0, relwidth=1, relheight=1) # both frames stacked on top of each other
p2.place(x=0, y=0, relwidth=1, relheight=1) # both frames stacked on top of each other
p1.onlift()
def handler(self):
if tkMessageBox.askokcancel("Quit?", "Are you sure you want to quit?"):
root.destroy()
print "Destoy root window."
self.master.quit()
print "Quit main loop."
sys.exit()
#------------------------------------------------------------------------
#taken from http://stackoverflow.com/questions/458436/adding-folders-to-a-zip-file-using-python
def zipdir(dirPath=None, zipFilePath=None, includeDirInZip=False):
if not zipFilePath:
zipFilePath = dirPath + ".zip"
if not os.path.isdir(dirPath):
raise OSError("dirPath argument must point to a directory.'%s' does not." % dirPath)
parentDir, dirToZip = os.path.split(dirPath)
##---------------------------------
def trimPath(path):
try:
archivePath = path.replace(parentDir, "", 1)
if parentDir:
archivePath = archivePath.replace(os.path.sep, "", 1)
if not includeDirInZip:
archivePath = archivePath.replace(dirToZip + os.path.sep, "", 1)
return os.path.normcase(archivePath)
except:
print "trimPath failure, exiting.."
sys.exit()
##---------------------------------
try:
outFile = zipfile.ZipFile(zipFilePath, "w", compression=zipfile.ZIP_DEFLATED)
except:
e = "Error"
m = "Error. The Release Notes generator is looking for a \"new_rn\" folder in the same directory where the script is running. \nThis folder needs to be created and is where your generated release notes will be stored."
ThrowError(e, m, generated_folder, "")
sys.exit()
for (archiveDirPath, dirNames, fileNames) in os.walk(dirPath):
for fileName in fileNames:
filePath = os.path.join(archiveDirPath, fileName)
outFile.write(filePath, trimPath(filePath))
# Make sure we get empty directories as well
if not fileNames and not dirNames:
zipInfo = zipfile.ZipInfo(trimPath(archiveDirPath) + "/")
outFile.close()
#------------------------------------------------------------------------
def createSecondaries():
global yyyy_q
global qqyy
global qq_yy
global full_region
region = selected_region
## qtr = selected_qtr
product = selected_product
month = selected_month
year = selected_year
version = selected_version
regionHash = {
'TWN' : 'Taiwan',
'APAC' : 'Asia Pacific',
'WEU' : 'Western Europe',
'EEU' : 'Eastern Europe',
'NA' : 'North America',
'RN' : 'India',
'India' : 'India',
'SAM' : 'South America',
'MEA' : 'Middle East/Africa',
'AUNZ' : 'Australia/New Zealand',
'EU' : 'Europe',
'KOR' : 'South Korea',
'HK' : 'Hong Kong-China'
}
## q = qtr[1:]
## yy = year[2:]
## yyyy_q = year+'.'+q
##
## qqyy = qtr+yy
## qq_yy = qtr+'/'+yy
try:
full_region = regionHash[region]+' ('+region+')'
if region == "AUNZ":
full_region = regionHash[region]+' (AU)'
if region == "India":
full_region = region
except:
sys.exit()
#------------------------------------------------------------------------
def getReplacements():
global selected_region
if selected_region == 'AUNZ':
adjusted_region = 'AU'
else:
adjusted_region = selected_region
# Populated with globals
replacementHash = {
'==YEAR==' : selected_year, # eg. 2014
'==INITDVN==' : selected_initDVN, # eg. 151F0,15135
'==REGION==' : adjusted_region, # eg. TWN
'==MONTH==' : selected_month, # eg. February
'==FULL_REGION==' : full_region, # eg. Taiwan (TWN)
'==DVN==' : selected_DVN # eg. 151F0
## '==YYYY.Q==' : yyyy_q, # eg. 2014.2
## '==QQYY==' : qqyy, # eg. Q214
## '==QQ/YY==' : qq_yy, # eg. Q2/14
}
return replacementHash
#------------------------------------------------------------------------
def readDocument(theDirectory):
xmlDataFile = open(theDirectory)
xmlData = file.read(xmlDataFile)
document = etree.fromstring(xmlData)
return document
#------------------------------------------------------------------------
# Unzip an OpenXML Document and pass the directory back
def unpackTheOpenXMLFile(theOpenXMLFile, uncompressedDirectoryName):
theFile = zipfile.ZipFile(theOpenXMLFile)
theFile.extractall(path=uncompressedDirectoryName)
return uncompressedDirectoryName
#------------------------------------------------------------------------
# The AdvSearch and AdvReplace were based off of https://github.com/mikemaccana/python-docx/blob/master/docx.py
def findTypeParent(element, tag):
""" Finds fist parent of element of the given type
@param object element: etree element
@param string the tag parent to search for
@return object element: the found parent or None when not found
"""
p = element
while True:
p = p.getparent()
if p.tag == tag:
return p
# Not found
return None
#------------------------------------------------------------------------
def AdvReplace(document, search, replace, bs=3):
# Change this function so that search and replace are arrays instead of strings
"""
Replace all occurences of string with a different string, return updated
document
This is a modified version of python-docx.replace() that takes into
account blocks of <bs> elements at a time. The replace element can also
be a string or an xml etree element.
What it does:
It searches the entire document body for text blocks.
Then scan thos text blocks for replace.
Since the text to search could be spawned across multiple text blocks,
we need to adopt some sort of algorithm to handle this situation.
The smaller matching group of blocks (up to bs) is then adopted.
If the matching group has more than one block, blocks other than first
are cleared and all the replacement text is put on first block.
Examples:
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search / replace: 'Hello,' / 'Hi!'
output blocks : [ 'Hi!', '', ' world!' ]
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search / replace: 'Hello, world' / 'Hi!'
output blocks : [ 'Hi!!', '', '' ]
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search / replace: 'Hel' / 'Hal'
output blocks : [ 'Hal', 'lo,', ' world!' ]
@param instance document: The original document
@param str search: The text to search for (regexp)
@param mixed replace: The replacement text or lxml.etree element to
append, or a list of etree elements
@param int bs: See above
@return instance The document with replacement applied
"""
# Enables debug output
DEBUG = False
newdocument = document
# Compile the search regexp
for k, v in replacementHash.iteritems():
#print k, v
search = k
replace = v
searchre = re.compile(search)
# Will match against searchels. Searchels is a list that contains last
# n text elements found in the document. 1 < n < bs
searchels = []
# If using Python 2.6, use newdocument.getiterator() instead of newdocument.iter():
for element in newdocument.iter():
if element.tag == '{%s}t' % nsprefixes['w']: # t (text) elements
if element.text:
# Add this element to searchels
searchels.append(element)
if len(searchels) > bs:
# Is searchels is too long, remove first elements
searchels.pop(0)
# Search all combinations, of searchels, starting from
# smaller up to bigger ones
# l = search lenght
# s = search start
# e = element IDs to merge
found = False
for l in range(1, len(searchels)+1):
if found:
break
#print "slen:", l
for s in range(len(searchels)):
if found:
break
if s+l <= len(searchels):
e = range(s, s+l)
#print "elems:", e
txtsearch = ''
for k in e:
txtsearch += searchels[k].text
# Searcs for the text in the whole txtsearch
match = searchre.search(txtsearch)
if match:
found = True
curlen = 0
replaced = False
for i in e:
curlen += len(searchels[i].text)
if curlen > match.start() and not replaced:
# The match occurred in THIS element.
# Puth in the whole replaced text
if isinstance(replace, etree._Element):
# Convert to a list and process
# it later
replace = [replace]
if isinstance(replace, (list, tuple)):
# I'm replacing with a list of
# etree elements
# clear the text in the tag and
# append the element after the
# parent paragraph
# (because t elements cannot have
# childs)
p = findTypeParent(
searchels[i],
'{%s}p' % nsprefixes['w'])
searchels[i].text = re.sub(
search, '', txtsearch)
insindex = p.getparent().index(p)+1
for r in replace:
p.getparent().insert(
insindex, r)
insindex += 1
else:
# Replacing with pure text
searchels[i].text = re.sub(
search, replace, txtsearch)
replaced = True
else:
# Clears the other text elements
searchels[i].text = ''
return newdocument
#------------------------------------------------------------------------
def saveElements(document, docName):
if 'footer' in docName:
theData = etree.tostring(document)
outputPath = extraction_dir+'\\'+docName
theOutputFile = open(outputPath, 'w')
theOutputFile.write(theData)
elif 'header' in docName:
theData = etree.tostring(document)
outputPath = extraction_dir+'\\'+docName
theOutputFile = open(outputPath, 'w')
theOutputFile.write(theData)
elif docName == 'document.xml':
theData = etree.tostring(document)
outputPath = extraction_dir+'\\'+docName
theOutputFile = open(outputPath, 'w')
theOutputFile.write(theData)
else:
pass
#------------------------------------------------------------------------
def ThrowError(title, message, path, special_note):
root = Tk()
root.title(title)
w = 1000
h = 200
sw = root.winfo_screenwidth()
sh = root.winfo_screenheight()
x = (sw - w)/2
y = (sh - h)/2
root.geometry('%dx%d+%d+%d' % (w,h, x, y))
m = message
m += '\n'
m += path
m += special_note
w = Label(root, text=m, width=240, height=10)
w.pack()
b = Button(root, text="OK", command=root.destroy, width=10)
b.pack()
mainloop()
#------------------------------------------------------------------------
# Placeholder function to be used if config files are implemented
def readConfig():
script_dir = os.path.dirname(os.path.realpath(__file__))
print script_dir
#------------------------------------------------------------------------
def getScriptPath():
return os.path.dirname(os.path.realpath(sys.argv[0]))
#------------------------------------------------------------------------
def setupEnvironment():
global script_dir
global scratch_folder
global generated_folder
global template_folder
global new_rn
global specific_template
global extraction_dir
script_dir = getScriptPath()
print script_dir
scratch_folder = script_dir+'\scratch'
print scratch_folder
template_folder = script_dir+'\\templates'
print template_folder
generated_folder = script_dir+'\\new_rn'
print generated_folder
extraction_dir = scratch_folder+'\\word'
print extraction_dir
#------------------------------------------------------------------------
def replaceALL(theDocumentData, replacementHash):
newXMLobject = AdvReplace(theDocumentData, replacementHash, '')
return newXMLobject
#------------------------------------------------------------------------
def getReleaseNotesName():
underscore = "_"
quarterly_release = 0
global selected_initDVN
if selected_initDVN.isdigit():
quarterly_release = 0
else:
quarterly_release = 1
if selected_initDVN:
selected_initDVN = " "+selected_initDVN
else:
selected_initDVN = ""
if underscore in selected_product:
u = selected_product.split(underscore)
deduced_product = u[0]
deduced_region = u[1]
print "deduced product and region are:", deduced_product, deduced_region
rn_name = deduced_product+" "+deduced_region+selected_initDVN+" Release Notes.docx"
else:
rn_name = selected_product+" "+selected_region+selected_initDVN+" Release Notes.docx"
print "rn_name", rn_name
return rn_name
#------------------------------------------------------------------------
def loadProductTemplates():
global productList
print template_folder
dot = "."
temp_file = "~"
try:
for file_name in os.listdir(template_folder):
p = file_name.split(dot)
product_name = p[0]
print product_name
if temp_file not in product_name:
productList.append(product_name)
print productList
except:
e = "Error"
m = "Error. The Release Notes generator is looking for a \"templates\" folder in the same directory where the script is running. \nThis folder needs to be created and is where your release notes templates will be stored."
ThrowError(e, m, template_folder, "")
sys.exit()
#------------------------------------------------------------------------
if __name__ == '__main__':
setupEnvironment()
loadProductTemplates()
root = Tk()
root.resizable(0, 0)
app = App(root)
root.mainloop()
createSecondaries()
replacementHash = getReplacements()
specific_template = template_folder+'\\'+selected_product+'.docx'
print "specific_template", specific_template
theDirectory = unpackTheOpenXMLFile(specific_template, scratch_folder)
filePath = extraction_dir+'\\'+'document.xml'
headerPath = extraction_dir+'\\'+'header1.xml'
headerPath2 = extraction_dir+'\\'+'header2.xml'
headerPath3 = extraction_dir+'\\'+'header3.xml'
footerPath = extraction_dir+'\\'+'footer1.xml'
footerPath2 = extraction_dir+'\\'+'footer2.xml'
theDocumentData = readDocument(filePath)
theHeaderData = readDocument(headerPath)
theHeaderData2 = readDocument(headerPath2)
theHeaderData3 = readDocument(headerPath3)
theFooterData = readDocument(footerPath)
theFooterData2 = readDocument(footerPath2)
documentBody = replaceALL(theDocumentData, replacementHash)
documentHeader = replaceALL(theHeaderData, replacementHash)
documentHeader2 = replaceALL(theHeaderData2, replacementHash)
documentHeader3 = replaceALL(theHeaderData3, replacementHash)
documentFooter = replaceALL(theFooterData, replacementHash)
documentFooter2 = replaceALL(theFooterData2, replacementHash)
saveElements(documentBody, 'document.xml')
saveElements(documentHeader, 'header1.xml')
saveElements(documentHeader2, 'header2.xml')
saveElements(documentHeader3, 'header3.xml')
saveElements(documentFooter, 'footer1.xml')
saveElements(documentFooter2, 'footer2.xml')
rn_name = getReleaseNotesName()
new_rn = generated_folder+'\\'+rn_name
print new_rn
zipdir(scratch_folder, new_rn)
ThrowError("Process Complete", "Process complete. New Release Notes were generated and can be found here:", new_rn, "\n\n Note: The new Release Notes must be opened and saved before they will be usable.\n\n")
| [
"chris.relaxing@gmail.com"
] | chris.relaxing@gmail.com |
e29b089058e29bb420886f0d8fc1f588b333e46f | a1be3108f88689bf9fc1f9079d38eb0202ede385 | /app.py | e9e5dc6df89a99aeae54039661763be289c8a9a1 | [] | no_license | umeshaS/API | 21ed4f5a5dd1f583a8af2c5ed63ef6882be4bb8b | 07fc970d499f3d7c7274d6b0140e01ae422aab84 | refs/heads/master | 2023-08-13T10:52:15.799446 | 2021-09-21T04:34:18 | 2021-09-21T04:34:18 | 408,685,235 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,774 | py | import pickle
from flask import Flask, request, jsonify
import translation as tr
from keywordExtraction import KeywordExtractor
from languageIdentification import LanguageIdentifier
from transliteration import Transliterator
from ScriptIdentification import ScriptIdentifier
app = Flask(__name__)
transliterator = Transliterator()
keywordExtractor = KeywordExtractor()
languageIdentifier = LanguageIdentifier()
translator = tr.SinToEngTranslator()
scriptidentifier = ScriptIdentifier()
# route for the language classification
@app.route('/languageIdentification', methods=["POST"])
# This function is to preprocess the input and predict and return the output of language identification
#
# returns json object
def classification():
keywords = []
input_sample = request.json["text"]
outputTranslator = input_sample
trans_text = input_sample
isEnglish = scriptidentifier.isEnglish(input_sample)
if isEnglish == 1:
pred_romanized_lan = languageIdentifier.languageIdentification(input_sample)
print("engisl")
print(pred_romanized_lan)
if (pred_romanized_lan == "[1]"):
trans_text = transliterator.singlish2sinhala(input_sample)
outputTranslator = translator.sinToEngTranslation(trans_text)
keywords = keywordExtractor.keywordExtraction(outputTranslator)
else:
outputTranslator = translator.sinToEngTranslation(trans_text)
keywords = keywordExtractor.keywordExtraction(outputTranslator)
else:
outputTranslator = translator.sinToEngTranslation(trans_text)
keywords = keywordExtractor.keywordExtraction(outputTranslator)
return jsonify(keywords,outputTranslator,trans_text,isEnglish)
if __name__ == '__main__':
app.run()
| [
"it17148450@my.sliit.lk"
] | it17148450@my.sliit.lk |
5137aaffcf7d05ff965a029baca8cbaf89355706 | f4f7cdc448ed15fe3eaf677fcf5e6fb38d6a617b | /ACNetwork/visualization/total_time_visualizer.py | 1d4de46cedd0225eea7e528e0be07125f0c1227c | [] | no_license | reineltJanis/BachelorThesis | 06142efef6aceb7a02bae87f38c7979df67486d2 | 8d2299dd9bfb8f147a42ede959355b0769839837 | refs/heads/master | 2022-12-15T11:16:55.249409 | 2020-05-05T20:13:00 | 2020-05-05T20:13:00 | 260,041,397 | 1 | 0 | null | 2022-06-22T23:13:06 | 2020-04-29T20:41:46 | Python | UTF-8 | Python | false | false | 1,842 | py | import matplotlib.pyplot as plt
import numpy as np
import os, sys, csv
from pathlib import Path
# From https://matplotlib.org/3.1.1/gallery/lines_bars_and_markers/barchart.html#sphx-glr-gallery-lines-bars-and-markers-barchart-py
def autolabel(rects, axes):
"""Attach a text label above each bar in *rects*, displaying its height."""
for rect in rects:
height = rect.get_height()
axes.annotate('{}'.format(int(np.round(height,0))),
xy=(rect.get_x() + rect.get_width() / 2, height),
xytext=(0, 3), # 3 points vertical offset
textcoords="offset points",
ha='center', va='bottom')
if __name__ == "__main__":
NODES = 5
RUN = 1
SET = 1
plt.style.use('fivethirtyeight')
fig = plt.figure(1, figsize=(16,12))
labels = []
width = .9
for i in [5,15,30,50]:
for mode in ['default', 'error00', 'error01', 'error50', 'star', 'ring']:
for algorithm in range(1,3):
if i >= 15 and mode != 'default':
break
times_path = Path(f"../results-a{algorithm}-{mode}-n{i:02d}/times.csv")
print(times_path)
if times_path.exists():
label = f"A{algorithm}\n{str(mode).upper()}\n{i} nodes"
labels.append(label)
data = np.loadtxt(times_path.absolute(), usecols=1, delimiter=', ')
rects = plt.bar(label, np.mean(data)/i, width=width, align='center')
autolabel(rects, plt)
fig.suptitle('Average runtime per node (35 datasets)')
plt.xticks(labels, rotation=0, size=8)
plt.ylabel('time in s', size=16)
fig.subplots_adjust(bottom=0.1)
fig.savefig(f"graphics/total_times.png", dpi=200)
plt.show()
| [
"dev@reinelt.online"
] | dev@reinelt.online |
b15ae00c90717a2a67c39cb9e72a1951ed5f1ae4 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_217/ch21_2019_08_26_19_58_29_478795.py | 8e46bdfeb1e79e43246166f70246709b75ed0188 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 184 | py | def valor_da_conta(valor):
valor = int(input('Qual valor da conta?:' ))
com10% = valor + valor*(10/100)
return com10%
print("Valor da conta com 10%: R${0}".format(com10%)) | [
"you@example.com"
] | you@example.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.