blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 257 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0a435d06d21f40db400cf199480df79d7c0e0d0f | dcc491dd2fa4ece68728255d236fa6e784eef92d | /modules/2.78/bpy/types/PythonController.py | a6975a4e8c4fe364b1b578e3084575a187be1d7d | [
"MIT"
] | permissive | cmbasnett/fake-bpy-module | a8e87d5a95d075e51133307dfb55418b94342f4f | acb8b0f102751a9563e5b5e5c7cd69a4e8aa2a55 | refs/heads/master | 2020-03-14T16:06:29.132956 | 2018-05-13T01:29:55 | 2018-05-13T01:29:55 | 131,691,143 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 98 | py | class PythonController:
mode = None
module = None
text = None
use_debug = None
| [
"nutti.metro@gmail.com"
] | nutti.metro@gmail.com |
88227d50f6dd56d2617ece74114d5da82ce05283 | 57bf3bfaf5efe943bba0508e7054971141b208d1 | /Emily/homework/beachballanime/RandintBallBounceGame.py | 53937b768412e8824d3f20347533f3dc43d6ee99 | [] | no_license | aseafamily/Coding | 318769b6cc887012d3612b739c474a3d7962e69f | fe7e790bed04aa1cd9e972cdd9c318e0028bcdcb | refs/heads/master | 2022-12-31T08:06:11.718027 | 2020-10-26T00:43:07 | 2020-10-26T00:43:07 | 277,988,971 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,741 | py | import pygame, sys
from random import *
pygame.init()
screen = pygame.display.set_mode([800,720])
screen.fill([229,174,134])
font = pygame.font.Font(None, 50)
rext1 = font.render('You will win the lottery.', 1, (0,0,0))
screen.blit(rext1, [140, 60])
rext2 = font.render('You will never get the COVID-19.', 1, (0,0,0))
screen.blit(rext2, [140, 145])
rext3 = font.render('Your IQ will jump up by 50 tommorow.', 1, (0,0,0))
screen.blit(rext3, [140, 225])
rext4 = font.render('Someone will hand you 5 million dollars.', 1, (0,0,0))
screen.blit(rext4, [140, 305])
rext5 = font.render('Your next salary will be $230,00.', 1, (0,0,0))
screen.blit(rext5, [140, 390])
rext6 = font.render('If you run for president, you will win.', 1, (0,0,0))
screen.blit(rext6, [140, 490])
rext7 = font.render('You will win any competition today.', 1, (0,0,0))
screen.blit(rext7, [140, 585])
rext8 = font.render('You will be completely happy tommrow.', 1, (0,0,0))
screen.blit(rext8, [140, 675])
for i in range(7):
height = (i+1) * 90
linenumber1 = [[140, height], [800, height]]
pygame.draw.lines(screen, [255, 0, 0], True, linenumber1, 1)
my_ball = pygame.image.load('beach_ball.png')
x = 0
y = 0
y_speed = 10
running = True
runner = 0
tery_trandom_tumber = randint(0, 250)
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
if runner < tery_trandom_tumber:
pygame.time.delay(20)
pygame.draw.rect(screen, [229,174,134], [x, y, 90, 90], 0)
y = y + y_speed
if y > screen.get_height() - 90 or y < 0:
y_speed = - y_speed
screen.blit(my_ball, [x, y])
pygame.display.flip()
runner = runner + 1
pygame.quit() | [
"emilyma@gmail.com"
] | emilyma@gmail.com |
5699dafae03660ced229e5fb381de892c3f83a6d | c4f7b067dbf9efa404d446453cdf2b0839d33fe1 | /src/sensorrunner/devices/SPI/ADC/device.py | 48e20b4d57883c0fadef06f30a025389e38cda66 | [] | no_license | JackBurdick/sensorrunner | 90e05e35381363ad28301b0e28579372fd78c179 | 506772d2ec4887b3890e4555b66bf5548910d020 | refs/heads/master | 2023-07-02T18:26:21.418501 | 2021-02-22T02:27:04 | 2021-02-22T02:27:04 | 298,879,591 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,386 | py | from sensorrunner.devices.sensor.SPI.ADC.light.pt19 import PT19
from gpiozero import MCP3008, Device
# from gpiozero.pins.mock import MockFactory
from gpiozero.pins.native import NativeFactory
Device.pin_factory = NativeFactory()
class MDC3800:
def __init__(
self,
name,
# devices
devices_dict,
):
# NOTE: accepting tuples currently because I'm not sure what the config
# will look like yet
# "fn": None --> resort to default fn
self.ALLOWED_DEVICES = {"pt19": {"device_class": PT19, "fn": None}}
# connected = (name, address, channel, device, fn)
if devices_dict is None:
raise ValueError("no devices specified in `device_dict`")
# TODO: assure pins are valid/acceptable
# light = MCP3008(channel=0, clock_pin=11, mosi_pin=10, miso_pin=9,
# select_pin=8)
# TODO: ensure channel 0-8
channel_to_device = {}
devices = {}
for name, dd in devices_dict.items():
devices[name] = {}
cur_dev_class = self.ALLOWED_DEVICES[dd["device_type"]]["device_class"]
if dd["channel"] not in channel_to_device:
channel_to_device[dd["channel"]] = MCP3008(
channel=dd["channel"],
clock_pin=11,
mosi_pin=10,
miso_pin=9,
select_pin=8,
)
cur_device = channel_to_device[dd["channel"]]
cur_device_obj = cur_dev_class(cur_device)
# TODO: this really isn't a device_type but a device_object - same
# in I2C
devices[name]["device_type"] = cur_device_obj
available_fns = [
f
for f in dir(cur_device)
if callable(getattr(cur_device, f)) and not f.startswith("_")
]
try:
dev_fn = dd["fn_name"]
except KeyError:
dev_fn = None
if dev_fn is not None:
if dev_fn not in available_fns:
raise ValueError(
f"specified fn ({dev_fn}) for {name} not available for {cur_device}.\n"
f"please select from {available_fns}"
)
fn_name = dev_fn
else:
fn_name = "return_value"
try:
devices[name]["fn"] = getattr(devices[name]["device_type"], fn_name)
except KeyError:
raise ValueError(
f"specified fn ({fn_name}) for {name} not available for {cur_device}.\n"
f"please select from {available_fns}"
)
self.devices = devices
def return_value(self, name, params):
if name is None:
return ValueError(
f"no name specified. please select from {self.devices.keys()}"
)
if not isinstance(name, str):
return ValueError(f"`name` is expected to be type {str}, not {type(name)}")
try:
dev_d = self.devices[name]
except KeyError:
raise ValueError(
f"{name} is not available. please select from {self.devices.keys()}"
)
if params:
value = dev_d["fn"](**params)
else:
# TODO: try
value = dev_d["fn"]()
return value
@staticmethod
def build_task_params(device_name, device_dict):
"""
dist0 = Entry(
"run_dist_0",
"tasks.iic.tasks.dist_select",
schedule=celery.schedules.schedule(run_every=2),
kwargs={},
app=celery_app.app,
)
# name=None, task=None, schedule=None, kwargs, app
{
"env_a": {
"channel": 2,
"address": 114,
"device_type": "si7021",
"params": {"run": {"unit": "f"}, "schedule": {"frequency": 1800.0}},
"fn_name": None,
},
"dist_a": {
"channel": 0,
"address": 112,
"device_type": "vl53l0x",
"params": {"run": {"unit": "in"}, "schedule": {"frequency": 1800.0}},
"fn_name": None,
},
}
"""
DEFAULT_FN_NAME = "return_value"
entry_specs = {}
for comp_name, comp_dict in device_dict.items():
dev_dict = comp_dict.copy()
entry_d = {}
fn_name = comp_dict["fn_name"]
if fn_name is None:
fn_name = DEFAULT_FN_NAME
entry_d["name"] = f"{device_name}_{comp_name}_{fn_name}"
# TODO: make more robust
entry_d["task"] = "sensorrunner.tasks.devices.MDC3800.tasks.MDC3800_run"
# maybe make schedule outside this?
entry_d["run_every"] = comp_dict["params"]["schedule"]["frequency"]
if not isinstance(dev_dict, dict):
raise ValueError(
f"run params ({dev_dict}) expected to be type {dict}, not {type(dev_dict)}"
)
# add component name
dev_dict["name"] = comp_name
entry_d["kwargs"] = {"dev_dict": dev_dict}
entry_specs[comp_name] = entry_d
return entry_specs
| [
"jackbburdick@gmail.com"
] | jackbburdick@gmail.com |
d38f60b54240168425be4043fc5872cb806287f3 | 9473cf6880c4d32e82474efcd199d78d065fddfb | /app/auto-reply.py | d7130c7a6d770da98739524e06d9133aaef6fcee | [] | no_license | taufiqade/wabot | de6901e62ed2c4100154ae93fbfa8c7cba81d112 | 7e93b69d90a839f952d9a8b55098cb17a0bc6bd1 | refs/heads/master | 2020-03-12T10:45:14.786524 | 2018-04-22T15:34:11 | 2018-04-22T15:34:11 | 130,580,252 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,337 | py | import time , json
from webwhatsapi import WhatsAPIDriver
from webwhatsapi.objects.message import Message
from pprint import pprint
driver = WhatsAPIDriver()
print("Waiting for QR")
driver.wait_for_login()
print("Bot started")
with open('data/products.json') as data_file:
products = json.load(data_file)
txt = '''
*PAKET %s*
*Harga* : %s
*FUP* : %s
'''
"""
match message with products data
"""
while True:
time.sleep(3)
print('Checking for more messages')
for contact in driver.get_unread():
for message in contact.messages:
if isinstance(message, Message):
chat_id = message.id
sender_id = message.chat_id
sender = str(message.sender)
content = message.content.lower()
if any(reg in content for reg in products):
for reg in products:
if reg in content:
reply_msg = txt % (reg.upper(), products[reg]['price'], products[reg]['fup'])
contact.chat.send_message(reply_msg)
else:
reply_msg = 'maaf data yang Anda cari tidak tersedia saat ini.'
contact.chat.send_message(reply_msg)
# contact.chat.send_message(reply_msg) | [
"taufiqadesurya@gmail.com"
] | taufiqadesurya@gmail.com |
eb1babf920093b006230d7ec6c101e59b897093d | cf91f1a6354ba7a803af8382e0ef8bde6175845e | /tests/test_with.py | 1fd2a1f616ddef483c4ca7b17a027e7e1cd824b0 | [] | permissive | mueslo/python-progressbar | a230dc1be0af48015215d10a6b21e1d15005ccb4 | 5621a26b51cddc3ce3f2b62a9e32a28eb60a2f84 | refs/heads/master | 2022-11-10T18:23:08.242413 | 2020-06-25T19:36:56 | 2020-06-25T19:36:56 | 275,635,088 | 0 | 0 | BSD-3-Clause | 2020-06-28T17:29:57 | 2020-06-28T17:29:56 | null | UTF-8 | Python | false | false | 429 | py | import progressbar
def test_with():
with progressbar.ProgressBar(max_value=10) as p:
for i in range(10):
p.update(i)
def test_with_stdout_redirection():
with progressbar.ProgressBar(max_value=10, redirect_stdout=True) as p:
for i in range(10):
p.update(i)
def test_with_extra_start():
with progressbar.ProgressBar(max_value=10) as p:
p.start()
p.start()
| [
"Wolph@wol.ph"
] | Wolph@wol.ph |
7502869ebf3862b842a7424cb0d677a4c7f07ef1 | 2a7b3ed9b819425cb8df80c54df0297f6c952f21 | /VisIt_read_vtk/example_read_txt_and_bin.py | 58279eef36df4463a3388a244a8c846774403333 | [] | no_license | EmmanuelSchaan/PythonSnippets | 76287ead36208b53c617509612438e127767cb8b | 410cf42fdb5fd9c9e37fee580e7a2cb7293cac9d | refs/heads/master | 2021-03-28T06:18:19.654831 | 2020-03-20T22:57:22 | 2020-03-20T22:57:22 | 247,845,351 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,294 | py | from numpy import *
from pylab import *
from scipy import *
from math import *
import struct
#############################################################################################
#############################################################################################
#############################################################################################
#############################################################################################
def readvtk(nomfichier):
#################################### open file #########################
data = open(nomfichier, 'rb')
#################################### read header #########################
data.readline() # ignore 1 line
line = data.readline()
t = line.split()[4]
t = float( t[:len(t)-1] ) # remove the comma at the end of nb
data.readline()
data.readline() # ignore 2 lines
line = data.readline()
[other, Nx, Ny, Nz] = line.split() # read nb ov vertices
[Nx, Ny, Nz] = [int(Nx)-1, int(Ny)-1, int(Nz)-1] # get nb of cells
line = data.readline()
[other, x0, y0, z0] = line.split()
[x0, y0, z0] = [float(x0), float(y0), float(z0)]
line = data.readline()
[other, dx, dy, dz] = line.split()
[dx, dy, dz] = [float(dx), float(dy), float(dz)]
data.readline() # ignore 1 line
#################################### print header #########################
print '##################################'
print 't= ', t
print 'Nx, Ny, Nz = ', Nx, Ny, Nz
print 'x0, y0, z0 = ', x0, y0, z0
print 'dx, dy, dz = ', dx, dy, dz
print '##################################'
#################################### read D #########################
print 'reading', data.readline()
data.readline() # ignoring 1 line
D = zeros((Nx, Ny, Nz))
for k in range (Nz):
for j in range (Ny):
for i in range (Nx):
bindata = data.read(4) # reads the bin for one nb
D[i,j,k] = struct.unpack('>f', bindata)[0] # converts into float, from big-endian
#################################### read M1, M2, M3 #########################
data.readline() # ignore 1 line
print 'reading', data.readline()
M1 = zeros((Nx, Ny, Nz))
M2 = zeros((Nx, Ny, Nz))
M3 = zeros((Nx, Ny, Nz))
for k in range (Nz):
for j in range (Ny):
for i in range (Nx):
bindata = data.read(4) # M1
M1[i,j,k] = struct.unpack('>f', bindata)[0]
bindata = data.read(4) # M2
M2[i,j,k] = struct.unpack('>f', bindata)[0]
bindata = data.read(4) # M3
M3[i,j,k] = struct.unpack('>f', bindata)[0]
#################################### read B1, B2, B3 #########################
data.readline() # ignore 1 line
print 'reading', data.readline()
B1 = zeros((Nx, Ny, Nz))
B2 = zeros((Nx, Ny, Nz))
B3 = zeros((Nx, Ny, Nz))
for k in range (Nz):
for j in range (Ny):
for i in range (Nx):
bindata = data.read(4) # B1
B1[i,j,k] = struct.unpack('>f', bindata)[0]
bindata = data.read(4) # B2
B2[i,j,k] = struct.unpack('>f', bindata)[0]
bindata = data.read(4) # B3
B3[i,j,k] = struct.unpack('>f', bindata)[0]
#################################### close file #########################
data.close()
return t, Nx, Ny, Nz, x0, y0, z0, dx, dy, dz, D, M1, M2, M3, B1, B2, B3
#############################################################################################
#############################################################################################
#############################################################################################
#############################################################################################
def slice2d(S, x0, dx, y0, dy):
Nx = len(S[:, 0])
Ny = len(S[0, :])
xi = x0
xf = x0 + Nx*dx
yi = y0
yf = y0 + Ny*dy
xx = zeros((Nx+1,Ny+1)) # the dimension has to be one greater than for I, for pcolor
yy = zeros((Nx+1,Ny+1))
for i in range (Nx+1):
yy[i,:] = linspace(yi, yf, Ny+1)
for j in range (Ny+1):
xx[:,j] = linspace(xi, xf, Nx+1)
figure(1)
hold(False)
pcolor(xx, yy, S)
#xlabel(r'$x$', fontsize=16)
#ylabel(r'$y$', fontsize=16)
hold(True)
colorbar()
axis('scaled')
#title(r'pcolor', fontsize=20)
draw()
#############################################################################################
#############################################################################################
#############################################################################################
#############################################################################################
def fft_z(S): # here no remapping needed because kx=ky=0
Nx = len(S[:, 0, 0])
Ny = len(S[0, :, 0])
Nz = len(S[0, 0, :])
K = linspace(0, Nz-1, Nz) # array of wave vectors in unit 2*pi/Lz
Sk = zeros(Nz)
for i in range(Nx):
for j in range(Ny):
Sk[:] += abs( fft( S[i,j,:] ) )
Sk = Sk / (Nx*Ny*Nz)
return K, Sk # since S is real, a(k) = a(-k) so we reduce the interval
#############################################################################################
#############################################################################################
#############################################################################################
#############################################################################################
def unwrap(S, x0, dx, dy, q, Omega0, t): # coord change in real space before fft
Nx = len(S[:, 0, 0])
Ny = len(S[0, :, 0])
Nz = len(S[0, 0, :])
newS = zeros((Nx, Ny, Nz))
for i in range(Nx):
for j in range(Ny):
x = (x0+dx/2) + i * dx # x coordinate of the center of the current cell
deltay = -q*Omega0*t * x
Dj = floor( deltay/dy ) # integer nb of j to offset
dj = deltay/dy - Dj # remainder of the offset, between 0 and 1
newj = j + Dj
newS[i,j,:] = (1-dj) * S[i, newj%Ny, :] + dj * S[i, (newj+1)%Ny, :] # interpolation, with periodicity
return newS
def rewrap(S, x0, dx, dy, q, Omega0, t): # coord change in fourier space after fft, S is the fft
Nx = len(S[:, 0, 0])
Ny = len(S[0, :, 0])
Nz = len(S[0, 0, :])
dkx = 2*pi / ( (Nx-1) * dx )
dky = 2*pi / ( (Ny-1) * dy )
newS = zeros((Nx, Ny, Nz))
for i in range(Nx):
for j in range(Ny):
ky = j * dky # value of ky corresponding to the current j
deltakx = -q*Omega0*t * ky
Di = floor( deltakx/dkx ) # integer nb of
di = deltakx/dkx - Di
newi = i + Di
newS[i, j, :] = (1-di) * S[inew%Nx, j, :] + di * S[(inew+1)%Nx, j, :]
return newS
def fft3d(S): # gives the 3d fft for the unwrapped 3D field
Nx = len(S[:, 0, 0])
Ny = len(S[0, :, 0])
Nz = len(S[0, 0, :])
Kx = linspace(0, Nx-1, Nx)
Ky = linspace(0, Ny-1, Ny)
Kz = linspace(0, Nz-1, Nz)
newS = abs( fftn(S) ) / (Nx*Ny*Nz)
return Kx, Ky, Kz, newS
#############################################################################################
#############################################################################################
#############################################################################################
#############################################################################################
nomfichier = 'HGBffttest.0000.vtk'
q = 2.1
Omega0 = 1.e-3
[t, Nx, Ny, Nz, x0, y0, z0, dx, dy, dz, D, M1, M2, M3, B1, B2, B3] = readvtk(nomfichier)
slice2d(M2[:, :, 31], x0, dx, y0, dy)
[Kz, M2kz] = fft_z(M2)
figure(2)
hold(False)
plot(Kz[:Nz/2+1], M2kz[:Nz/2+1])
hold(True)
Kx, Ky, Kz, M2k = fft3d(M2)
figure(3)
hold(False)
plot(Kz, M2k[0, 0, :], 'r')
hold(True)
figure(4)
hold(False)
plot(Kx, M2k[:, 0, 0], 'b')
hold(True)
figure(5)
hold(False)
plot(Ky, M2k[0, :, 0], 'g')
hold(True)
| [
"emmanuel.schaan@gmail.com"
] | emmanuel.schaan@gmail.com |
d0d37e651c2edae028110931ad32ab21bd065f7f | 475d4a68cffb87116675c948fe6e90ba9e10ec3a | /articles/urls.py | 0ab5e7a210257371581aaaaab0b560ad6a9b7cf5 | [] | no_license | geoglyphmusic/susancharles | 118ecf30a114c88a39b19a9ff03ce9026909005a | 3f0ad1ec01cd70be1ac94dccdc135bdeecafbe06 | refs/heads/master | 2023-02-14T13:44:39.688311 | 2020-12-29T13:50:08 | 2020-12-29T13:50:08 | 315,355,049 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 189 | py | from django.urls import path
from . import views
urlpatterns = [
path('', views.articles_main_view, name='articles'),
path('<article_slug>', views.article_view, name='article'),
]
| [
"chrischarles89@yahoo.co.uk"
] | chrischarles89@yahoo.co.uk |
01fd76371431a37e8804b4f2de5e71eb488b3154 | 0e9f73d2ef1239b22e049ef6338362da7dbfb122 | /source/web/Django/FatQuantsDjango/FatQuantsDjango/ticker/migrations/0097_auto_20190514_2147.py | 5d4d4dab7faf5ac58c7e11ed8ee2ae65fe9af49c | [] | no_license | Andy-Mason/FatQuants | 3c4bfafc29834af76b0be40e93b0e210e0ef5056 | edd0e98f4599ef91adbdf4179164769ddd66c62a | refs/heads/master | 2023-01-11T10:57:50.563742 | 2021-08-11T19:04:59 | 2021-08-11T19:04:59 | 73,127,295 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 745 | py | # Generated by Django 2.1.7 on 2019-05-14 20:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ticker', '0096_auto_20190514_2147'),
]
operations = [
migrations.AddField(
model_name='ticker',
name='product_leverage',
field=models.FloatField(blank=True, db_column='product_leverage', null=True, verbose_name='Product Leverage'),
),
migrations.AddField(
model_name='ticker',
name='unit_type',
field=models.CharField(blank=True, choices=[('Acc', 'Accumulation'), ('Inc', 'Income')], db_column='unit_type', default='', max_length=3, verbose_name='Unit Type'),
),
]
| [
"Andy-Mason@users.noreply.github.com"
] | Andy-Mason@users.noreply.github.com |
d35785c21782919b684d35e141d3e18080ac6950 | 53716af48679707648ee0e004d6d22d22a4acda7 | /HW4/homework04.py | ff5f9c5d8e7b3b69770db840572cef4f10abc552 | [] | no_license | LDercher/Algorithms | d1b822d6190dd3bcada6307ce038e9826916920c | 944e8c41aa4db5e5e825a3a1d750a2f05332c636 | refs/heads/master | 2020-03-27T06:31:12.328733 | 2018-12-06T18:36:35 | 2018-12-06T18:36:35 | 146,112,728 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,363 | py | # imports {{{1
from __future__ import division
from copy import deepcopy
from random import randrange
from collections import deque
from sys import *
#---------------------------------------------------------------------------}}}1
class AdjList: # {{{1
# A class for the adjacency list representation of a graph.
# Undirected graphs will have an edge (s,t) if and only if it has edge (t,s).
# A directed graph might have edge (s,t) without having edge (t,s).
# AdjList.adj is the actual adjacency list.
# AdjList.rev is the adjacency list of the reverse graph
# AdjList.directed is a bool indicating whether the graph is directed.
# AdjList.nodes is an array of the form range(n).
# Edges may be specified on initialization or with the add_edge method.
# If A is an AdjList, then...
# - A[i] is the adjacency list for node i
# - len(A) is the number of nodes in the graph, *not* the number of edges
# - str(A) is a "nicer" version of the adjacency list. It gets run when you
# explicity or implicityly convert A to a string (like with print).
# These correspond to the last 3 class methods.
def __init__(self, num_nodes, edges = [], directed = False): # {{{
self.nodes = range(num_nodes)
self.adj = [ [] for _ in self.nodes ]
self.rev = [ [] for _ in self.nodes ]
self.directed = directed
for (s,t) in edges:
self.add_edge(s,t)
self.sort()
#--------------------------------------------------------------------------}}}
def add_edge(self, s, t, try_directed = True): # {{{
# Adds an edge (s,t). If the graph is undirected, it adds edge (t,s) as well.
if t not in self.adj[s]:
self.adj[s].append(t)
self.rev[t].append(s)
if not self.directed and try_directed:
self.add_edge(t, s, try_directed = False)
#--------------------------------------------------------------------------}}}
def del_edge(self, s, t, try_directed = True): # {{{
# Deletes an edge (s,t) if it exists. If the graph is undirected, it deletes
# the edge (t,s) as well.
try:
t_index = self.adj[s].index(t)
del self.adj[s][t_index]
s_index = self.rev[t].index(s)
del self.adj[t][s_index]
except ValueError:
pass
if not self.directed and try_directed:
self.del_edge(t, s, try_directed = False)
#--------------------------------------------------------------------------}}}
def has_edge(self, s, t): # {{{
return t in self.adj[s]
#--------------------------------------------------------------------------}}}
def has_edge_rev(self, s, t): # {{{
return t in self.rev[s]
#--------------------------------------------------------------------------}}}
def degree(self, s): # {{{
if not self.directed:
return len(self.adj[s])
out_deg = len(self.adj[s])
in_deg = len(self.rev[s])
return out_deg + in_deg
#--------------------------------------------------------------------------}}}
def sort(self): # {{{
# Sort the adjacency lists
for n in self.nodes:
self.adj[n] = sorted(self.adj[n])
self.rev[n] = sorted(self.rev[n])
#--------------------------------------------------------------------------}}}
def reverse(self): # {{{
# returns reverse graph
rev_adjlist = AdjList(self.nodes, directed = self.directed)
rev_adjlist.adj = deepcopy(self.adj)
rev_adjlist.rev = deepcopy(self.rev)
return rev_adjlist
#--------------------------------------------------------------------------}}}
def __getitem__(self, node): # {{{
return self.adj[node]
#--------------------------------------------------------------------------}}}
def __len__(self): # {{{
return len(self.nodes)
#--------------------------------------------------------------------------}}}
def __str__(self): # {{{
ret = ""
for n in self.nodes:
neighbors = [ str(i) for i in self.adj[n] ]
ret += str(n) + ": " + " ".join(neighbors) + "\n"
return ret[:-1]
#--------------------------------------------------------------------------}}}
#----------------------------------------------------------------------------}}}1
def BFS(G, s): # {{{
# Breadth First search for G and s. Returns a BFS tree rooted at s. The data
# structure deque is used. It is something like a symmetric queue, with O(1)
# operations to add/pop elements from either end:
# https://docs.python.org/2/library/collections.html#collections.deque
seen = [ False for _ in G.nodes ]
dist = [ -1 for _ in G.nodes ]
seen[s] = True
dist[s] = 0
BFS_Tree = AdjList(len(G), directed=True)
working_nodes = deque([s])
while len(working_nodes) != 0:
u = working_nodes.popleft()
for v in G[u]:
if not seen[v]:
BFS_Tree.add_edge(u,v)
seen[v] = True
dist[v] = dist[u] + 1
working_nodes.append(v)
return BFS_Tree, dist
#----------------------------------------------------------------------------}}}
def predecessors(BFS_Tree, u, stop_at=None): # {{{
# Return an array of predecessors of u in the BFS tree. The last element will
# be the root, and the first will be u. If stop_at is specified, then stop at
# that ancestor instead of the root of the tree
preds = [u]
parent = u
while len(BFS_Tree.rev[parent]) != 0 and parent != stop_at:
print "len of BFS_tree rev at parent ", len(BFS_Tree.rev[parent])
parent = BFS_Tree.rev[parent][0]
print "parent set to ", parent
preds.append(parent)
return preds
#----------------------------------------------------------------------------}}}
def common_ancestor_paths(BFS_Tree, u, v): # {{{
# The nodes u and v have a common ancestor, call it c. Function returns a pair
# of arrays U, V such that U is a path in BFS_Tree from u to c and V is a path
# in BFS_Tree from v to c.
preds_u = predecessors(BFS_Tree, u)
preds_v = predecessors(BFS_Tree, v)
while len(preds_u) != 0 and len(preds_v) != 0 and preds_u[-1] == preds_v[-1]:
common_ancestor = preds_u.pop()
preds_v.pop()
path_u_common_ancestor = predecessors(BFS_Tree, u, stop_at=common_ancestor)
path_v_common_ancestor = predecessors(BFS_Tree, v, stop_at=common_ancestor)
return path_u_common_ancestor, path_v_common_ancestor
#----------------------------------------------------------------------------}}}
def is_cycle(G, seq): # {{{
if len(seq) == 0 or len(seq) == 2:
return False
prev_node = seq[0]
for i in range(1, len(seq)):
cur_node = seq[i]
if not G.has_edge(prev_node, cur_node):
return False
prev_node = cur_node
return G.has_edge(seq[-1], seq[0])
#----------------------------------------------------------------------------}}}
def findCycle(G): # {{{
# Find a cycle in undirected G if it exists. If one is found, return an array
# of the nodes in the cycle. If one is not found, return the python value
# None. For example, if we have a 5-cycle 1 -- 0 -- 5 -- 6 -- 1, then return
# [1,0,5,6] (or any cyclic permutation of this list). A loop 1 -- 1 is a
# 0-cycle and you should return [1]. Things like 1 -- 2 -- 1 don't count as
# cycles since you have to take the same edge back to 1.
# You may want to base your algorithm on the BFS function above, and I suggest
# using the functions predecessors and common_ancestor_paths.
# Mark all the vertices as not visited
seen =[False]*(len(G.nodes))
cyc = []
# Call the recursive helper function to detect cycle in different
#DFS trees
for i in range(len(G.nodes)):
if seen[i] ==False:
isCyclic(G,i,seen,-1,cyc)
if cyc != []:
cyc.pop() #helper function added node where cycle began twice. Popped to remove
return cyc
else:
return None
#----------------------------------------------------------------------------}}}
def isCyclic(G,v,seen,parent,cyc):
#Mark the current node as seen
seen[v]= True
#Recur for all the vertices adjacent to this vertex
for i in G.adj[v]:
# If the node is not seen then recurse on it
if seen[i]==False :
if(isCyclic(G,i,seen,v,cyc)):
cyc.append(i)
return cyc
# If an adjacent vertex is seen and not the parent of the current index then there is a cycle
elif parent!=i:
cyc.append(i)
return cyc
return None
def randgraph(num_nodes): # {{{
phi = (1 + 5**0.5)/2
num_edges = int( num_nodes*phi )
G = AdjList(num_nodes)
for _ in xrange(num_edges):
new_edge = (randrange(num_nodes), randrange(num_nodes))
if not G.has_edge( *new_edge ):
G.add_edge( *new_edge )
G.sort()
return G
#----------------------------------------------------------------------------}}}
# You can check your findCycle implementation by running this several times and
# checking the output:
# A = randgraph(randrange(25))
A = AdjList(5)
A.add_edge(0,1)
A.add_edge(0,2)
A.add_edge(2,3)
A.add_edge(2,4)
A.add_edge(3,4)
print A
C = findCycle(A)
print C
exit()
# Once you think your implementation works in general, you might try automating
# the above test with this:
for _ in xrange(10**5):
A = randgraph(randrange(25))
C = findCycle(A)
if C is not None and not is_cycle(A, C):
print A, C
break
exit()
| [
"luke.dercher@gmail.com"
] | luke.dercher@gmail.com |
385053382cb462ca295e3ea3ca1df86b6ad1b044 | 99b2aff89dcec2f43cee32a6bdd4c0c43d6c51fa | /tests/contract_tests/growl_tdg_garden/test_growl_tdg_garden_pick_intial_id.py | 1782590265f597e5d879efb03aac96504f4f4d5d | [
"MIT"
] | permissive | baking-bad/pytezos | c4248bde49a5b05521b8cc51eeca588b1a721660 | 19747e3acec2141f06e812025673f497fc07e2d4 | refs/heads/master | 2023-07-06T21:57:09.572985 | 2023-07-05T11:45:27 | 2023-07-05T11:45:27 | 169,243,460 | 115 | 43 | MIT | 2023-07-04T16:28:09 | 2019-02-05T13:12:50 | Python | UTF-8 | Python | false | false | 1,885 | py | import json
from os.path import dirname
from os.path import join
from unittest import TestCase
from pytezos.michelson.forge import forge_micheline
from pytezos.michelson.forge import unforge_micheline
from pytezos.michelson.program import MichelsonProgram
folder = 'typed_minter'
entrypoint = 'mint_TYPED'
class MainnetOperationTestCaseGROWL_TDG_GARDEN(TestCase):
@classmethod
def setUpClass(cls):
with open(join(dirname(__file__), f'', '__script__.json')) as f:
script = json.loads(f.read())
cls.program = MichelsonProgram.match(script['code'])
with open(join(dirname(__file__), f'', f'pick_intial_id.json')) as f:
operation = json.loads(f.read())
cls.entrypoint = f'pick_intial_id'
cls.operation = operation
# cls.maxDiff = None
def test_parameters_growl_tdg_garden(self):
original_params = self.program.parameter.from_parameters(self.operation['parameters'])
py_obj = original_params.to_python_object()
# pprint(py_obj)
readable_params = self.program.parameter.from_parameters(original_params.to_parameters(mode='readable'))
self.assertEqual(py_obj, readable_params.to_python_object())
self.program.parameter.from_python_object(py_obj)
def test_lazy_storage_growl_tdg_garden(self):
storage = self.program.storage.from_micheline_value(self.operation['storage'])
lazy_storage_diff = self.operation['lazy_storage_diff']
extended_storage = storage.merge_lazy_diff(lazy_storage_diff)
py_obj = extended_storage.to_python_object(try_unpack=True, lazy_diff=True)
# pprint(py_obj)
def test_parameters_forging(self):
expected = self.operation['parameters'].get('value', {'prim': 'Unit'})
actual = unforge_micheline(forge_micheline(expected))
self.assertEqual(expected, actual)
| [
"noreply@github.com"
] | baking-bad.noreply@github.com |
7b1c2611686a4f3edec1423c2f160bef45a811b7 | b39dc1024655e9db5433dbfe326a5dd47d75c3c1 | /myapps/art/views.py | 4dc27b6f66d89509270e441b62ff54af177e72fe | [] | no_license | cjxxu/A_Fiction_web | 381ab9e52714156879ee44978b450d1156346b61 | f9f0fc40bc27f5307de8c61bcc75fca82c75d8ed | refs/heads/master | 2021-06-22T13:16:30.989697 | 2020-12-10T09:05:35 | 2020-12-10T09:05:35 | 144,845,320 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,808 | py | from django.http import JsonResponse
from django.shortcuts import render
from redis_ import rd
# Create your views here.
from django.views.decorators.cache import cache_page
from art.models import Art
from user import helper
import redis_
from art import tasks
@cache_page(30)
def show(request,id):
login_user = helper.getLoginInfo(request) #读取session登陆信息
#阅读art_id的文章
art = Art.objects.get(pk=id)
#写入到阅读排行中(Redis->ReadTopRank)
redis_.incrTopRank(id)
readTopRank = redis_.getReadTopRank(5) #[(,score)]
return render(request,'art/show.html',locals())
def qdArt(request,id):
#获取当前登录的用户信息
login_user = helper.getLoginInfo(request)
if not login_user:
return JsonResponse({'msg':'请先登录','code':101})
tasks.qdTask.delay(login_user.get(id),id) #延迟异步执行
return JsonResponse({'msg':'正在抢读','code':201})
def queryQDState(request,id):
login_user = helper.getLoginInfo(request)
if not login_user:
return JsonResponse({'msg':'请先登录','code':101})
uid = login_user.get('id')
# if rd.hexists('qdArt',uid):
# # 一个用户抢两本书,查询最新的id的抢读状态,而不是之前抢读的状态
# qdId = rd.hget('qdArt', uid) # 已抢的书id, qdId和id可能不一样
if login_user.get('id'):
art = Art.objects.get(pk=id)
return JsonResponse({'msg':'抢读成功','code':200,
'art':{'title':art.title,
'author':art.author}
})
if rd.hlen('qdArt') < 5:
return JsonResponse({'msg': '抢读中', 'code': 201})
else:
return JsonResponse({'msg': '抢读失败', 'code': 300})
| [
"747603365@qq.com"
] | 747603365@qq.com |
136a40a1eafdcd384e82287c18065fea36cbab7b | d93b0fdaa67e3fc4e126038ed27de2df29f04b7e | /src/pyats/contrib/creators/file.py | f86e71aa655d84f4f99329cec5c6d6e1b611f769 | [
"Apache-2.0"
] | permissive | egreenspan2/pyats.contrib | b3c3eef11e94bcc842d77d6c2d9ca7154289012a | 83184ae9e5e580ae53a08b898696cef721a3dd72 | refs/heads/master | 2022-07-18T15:41:10.261846 | 2020-05-05T23:50:01 | 2020-05-05T23:50:01 | 262,427,040 | 0 | 0 | Apache-2.0 | 2020-05-08T20:56:07 | 2020-05-08T20:56:07 | null | UTF-8 | Python | false | false | 6,181 | py | import os
import xlrd
import csv
import pathlib
from pyats.topology import loader
from .creator import TestbedCreator
class File(TestbedCreator):
""" File class (TestbedCreator)
Creator for the 'file' source. Takes in a CSV or Excel file and outputs the
corresponding testbed object or file. Alternatively, it can take in a folder
as path and converts all the CSV and Excel files inside.
Args:
path ('str'): The path of the input CSV/Excel file or a folder.
recurse ('bool') default=False: If a folder is passed in, whether or not
traversal should include subdirectories.
encode_password ('bool') default=False: Should generated testbed encode
its passwords.
CLI Argument | Class Argument
---------------------------------------------
--path=value | path=value
--encode-password | encode_password=True
-r | recurse=True
pyATS Examples:
pyats create testbed file --path=test.csv --output=testbed.yaml
pyats create testbed file --path=folder --output=testbeds -r
Examples:
# Create testbed from test.csv with encoded password
creator = File(path="test.csv", encode_password=True)
creator.to_testbed_file("testbed.yaml")
creator.to_testbed_object()
"""
def _init_arguments(self):
""" Specifies the arguments for the creator.
Returns:
dict: Arguments for the creator.
"""
self._cli_replacements.setdefault('-r', ('recurse', True))
return {
'required': ['path'],
'optional': {
'recurse': False,
'encode_password': False
}
}
def to_testbed_file(self, output_location):
""" Saves the source data as a testbed file.
Args:
output_location ('str'): Where to save the file.
Returns:
bool: Indication that the operation is successful or not.
"""
testbed = self._generate()
if isinstance(testbed, list):
for base, item in testbed:
self._write_yaml(os.path.join(output_location, base),
item, self._encode_password, input_file=self._path)
else:
self._write_yaml(output_location, testbed, self._encode_password,
input_file=self._path)
return True
def to_testbed_object(self):
""" Creates testbed object from the source data.
Returns:
Testbed: The created testbed.
"""
testbed = self._generate()
if isinstance(testbed, list):
return [self._create_testbed(data) for _, data in testbed]
else:
return self._create_testbed(testbed)
def _generate(self):
""" Core implementation of how the testbed data is created.
Returns:
dict: The intermediate testbed dictionary.
"""
if not os.path.exists(self._path):
raise FileNotFoundError('File or directory does not exist: %s'
% self._path)
# if is a dir then walk through it
if os.path.isdir(self._path):
result = []
for root, _, files in os.walk(self._path):
for file in files:
input_file = os.path.join(root, file)
relative = os.path.relpath(input_file, self._path)
devices = self._read_device_data(input_file)
# The testbed filename should be same as the file
output = os.path.splitext(relative)[0] + '.yaml'
result.append((output, self._construct_yaml(devices)))
# if recursive option is not set, then stop after first level
if not self._recurse:
break
else:
devices = self._read_device_data(self._path)
return self._construct_yaml(devices)
return result
def _read_device_data(self, file):
""" Read device data based on file type.
Args:
file ('str'): Path of the file.
Returns:
list: List of dictionaries containing device data.
"""
_, extension = os.path.splitext(file)
# Check if file is csv or xls
devices = {}
if extension == '.csv':
devices = self._read_csv(file)
elif extension in {'.xls', '.xlsx'}:
devices = self._read_excel(file)
else:
raise Exception("Given path is not a folder or a CSV/Excel file.")
return devices
def _read_csv(self, file_name):
""" Reads CSV file containing device data.
Args:
file_name ('str'): Name of the CSV file.
Returns:
list: List of dictionaries containing the device attributes from
each row of the file.
"""
row_lst = []
with open(file_name, 'r') as f:
reader = csv.reader(f)
self._keys = next(reader)
for row in reader:
# Only take key which has value
row_lst.append({k: v for k, v in dict(
zip(self._keys, row)).items() if v})
return row_lst
def _read_excel(self, file_name):
""" Read Excel file containing device data.
Args:
file_name ('str'): name of the excel file
Returns:
list: List of dictionaries containing device attributes from each
row of the file.
"""
row_lst = []
ws = xlrd.open_workbook(file_name).sheet_by_index(0)
self._keys = ws.row_values(0)
for i in range(1, ws.nrows):
# Only take key which has value
row_lst.append({k: v for k, v in dict(
zip(self._keys, ws.row_values(i))).items() if v})
return row_lst
| [
"tezheng@cisco.com"
] | tezheng@cisco.com |
f874d3d01c4a38618dfbade3415abc56847a31d6 | d0a5b42b75a8884fd62fbbb2138bc03a4ab04c5d | /Projects/sudoku-master/sdk_group.py | 520e5ef0af09e6348ca1c6ac3e36f362491fc3e2 | [
"MIT"
] | permissive | HenziKou/CIS-211 | b0221b42128fdb74fd8ca4c6db133c239bd999bb | b3510b28aa70f3d620f8b317c6cb206536f20fcb | refs/heads/master | 2021-01-09T11:48:56.967065 | 2020-02-22T06:33:04 | 2020-02-22T06:33:04 | 242,289,169 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,567 | py | """
A "group" is a collection of 9 Sudoku tiles, which
may form a row, a column, or a block (aka 'region'
or 'box').
Constraint propagation are localized here.
"""
from typing import List
import sdk_tile
import logging
logging.basicConfig()
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
class Group(object):
"""A group of 9 Sudoku tiles"""
def __init__(self, title: str):
"""Intially empty. The title is just for debugging."""
self.title = title
self.tiles: List[sdk_tile.Tile] = []
def add(self, tile: sdk_tile.Tile):
"""Add a tile to this group"""
assert len(self.tiles) < 9
self.tiles.append(tile)
def __str__(self):
"""Represent as string of values"""
values = []
for tile in self.tiles:
values.append(tile.value)
return self.title + " " + "".join(values)
def attend(self):
"""Announce that we are working on these tiles. A view component
may make this visible.
"""
for tile in self.tiles:
tile.attend()
def unattend(self):
"""Announce that we are done working on these tiles for now"""
for tile in self.tiles:
tile.unattend()
def is_complete(self) -> bool:
"""A group is complete if all of its tiles hold a
value (not the wild-card symbol UNKNOWN)
"""
for tile in self.tiles:
if tile.value == sdk_tile.UNKNOWN:
return False
return True
def is_consistent(self) -> bool:
"""A group is consistent if it has no duplicates,
every tile has at least one candidate, and
every value has a place to go.
"""
can_place = set()
used = set()
for tile in self.tiles:
# One or more candidates
if len(tile.candidates) == 0:
return False
# Checking for any duplicates
if tile.value in used:
return False
elif tile.value != sdk_tile.UNKNOWN:
used.add(tile.value)
can_place = can_place | tile.candidates
if can_place != set(sdk_tile.CHOICES):
return False
return True
def duplicates(self) -> List[str]:
"""One line report per duplicate found"""
reports = []
used = set()
for tile in self.tiles:
if tile.value == sdk_tile.UNKNOWN:
continue
elif tile.value in used:
reports.append("Duplicate in {}: {}, value {}"
.format(self.title, self, tile.value))
return reports
# ---------------------------------
# Constraint propagation in a group
# ----------------------------------
def naked_single_constrain(self) -> bool:
"""A choice can be used at most once in the group.
For each choice that has already been used in the group,
eliminate that choice as a candidate in all the
UNKNOWN tiles in the group.
"""
self.attend()
changed = False
remaining = []
for tile in self.tiles:
if tile.value != sdk_tile.UNKNOWN:
remaining.append(tile.value)
set_remaining = set(remaining)
for tile in self.tiles:
if tile.value == sdk_tile.UNKNOWN:
changed = tile.eliminate(set_remaining) or changed
self.unattend()
return changed
def hidden_single_constrain(self) -> bool:
"""Each choice must be used in the group.
For each choice that has not already been used
in the group, if there is exactly one tile in the
group for which it is a candidate, then that
tile must hold that choice. Note this depends
on narrowing of candidates by naked_single. Hidden
single can only work in combination with naked single.
"""
self.attend()
changed = False
leftovers = set(['1', '2', '3', '4', '5', '6', '7', '8', '9'])
for tile in self.tiles:
leftovers -= set(tile.value)
for value in leftovers:
bucket = []
# Compare the current with the leftovers using an if statement
for tile in self.tiles:
if tile.could_be(value) == True:
bucket.append(tile)
if len(bucket) == 1:
bucket.pop().set_value(value)
changed = True
self.unattend()
return changed
| [
"henzikou@gmail.com"
] | henzikou@gmail.com |
8c2b88a90276e8878d80b2051ba69a0ae3c43a9d | 9f491494ad39b91c906517ceb3008c752c214989 | /NRE_paper_study/ERNIE/ERNIE/code/run_fewrel.py | 6cd39d87b03a062812499b41ae86f44523c8c782 | [
"MIT"
] | permissive | yuwell1999/nlp_paper_study | 0b73b2e8235a4dffc0fa5016c23d7998a15f58a7 | b7772aa9c15d3b8459d9b8c3addb93c575a93ef2 | refs/heads/master | 2022-04-15T22:01:45.526579 | 2020-04-07T14:24:27 | 2020-04-07T14:24:27 | 256,650,641 | 1 | 0 | null | 2020-04-18T02:07:02 | 2020-04-18T02:07:02 | null | UTF-8 | Python | false | false | 23,722 | py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import os
import logging
import argparse
import random
from tqdm import tqdm, trange
import simplejson as json
import numpy as np
import torch
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
from knowledge_bert.tokenization import BertTokenizer
from knowledge_bert.modeling import BertForSequenceClassification
from knowledge_bert.optimization import BertAdam
from knowledge_bert.file_utils import PYTORCH_PRETRAINED_BERT_CACHE
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, input_ent, ent_mask, label_id):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
self.input_ent = input_ent
self.ent_mask = ent_mask
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_json(cls, input_file):
with open(input_file, "r", encoding='utf-8') as f:
return json.loads(f.read())
class FewrelProcessor(DataProcessor):
"""Processor for the CoLA data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
examples = self._create_examples(
self._read_json(os.path.join(data_dir, "train.json")), "train")
labels = set([x.label for x in examples])
return examples, list(labels)
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_json(os.path.join(data_dir, "dev.json")), "dev")
def get_labels(self):
"""Useless"""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
for x in line['ents']:
if x[1] == 1:
x[1] = 0
text_a = (line['text'], line['ents'])
label = line['label']
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer, threshold):
"""Loads a data file into a list of `InputBatch`s."""
label_list = sorted(label_list)
label_map = {label : i for i, label in enumerate(label_list)}
entity2id = {}
with open("kg_embed/entity2id.txt") as fin:
fin.readline()
for line in fin:
qid, eid = line.strip().split('\t')
entity2id[qid] = int(eid)
features = []
for (ex_index, example) in enumerate(examples):
ex_text_a = example.text_a[0]
h, t = example.text_a[1]
h_name = ex_text_a[h[1]:h[2]]
t_name = ex_text_a[t[1]:t[2]]
if h[1] < t[1]:
ex_text_a = ex_text_a[:h[1]] + "# "+h_name+" #" + ex_text_a[h[2]:t[1]] + "$ "+t_name+" $" + ex_text_a[t[2]:]
else:
ex_text_a = ex_text_a[:t[1]] + "$ "+t_name+" $" + ex_text_a[t[2]:h[1]] + "# "+h_name+" #" + ex_text_a[h[2]:]
if h[1] < t[1]:
h[1] += 2
h[2] += 2
t[1] += 6
t[2] += 6
else:
h[1] += 6
h[2] += 6
t[1] += 2
t[2] += 2
tokens_a, entities_a = tokenizer.tokenize(ex_text_a, [h, t])
if len([x for x in entities_a if x!="UNK"]) != 2:
print(entities_a, len([x for x in entities_a if x[0]!="UNK"]))
exit(1)
tokens_b = None
if example.text_b:
tokens_b, entities_b = tokenizer.tokenize(example.text_b[0], [x for x in example.text_b[1] if x[-1]>threshold])
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, entities_a, entities_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[:(max_seq_length - 2)]
entities_a = entities_a[:(max_seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambigiously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = ["[CLS]"] + tokens_a + ["[SEP]"]
ents = ["UNK"] + entities_a + ["UNK"]
segment_ids = [0] * len(tokens)
if tokens_b:
tokens += tokens_b + ["[SEP]"]
ents += entities_b + ["UNK"]
segment_ids += [1] * (len(tokens_b) + 1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_ent = []
ent_mask = []
for ent in ents:
if ent != "UNK" and ent in entity2id:
input_ent.append(entity2id[ent])
ent_mask.append(1)
else:
input_ent.append(-1)
ent_mask.append(0)
ent_mask[0] = 1
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
padding = [0] * (max_seq_length - len(input_ids))
padding_ = [-1] * (max_seq_length - len(input_ids))
input_ids += padding
input_mask += padding
segment_ids += padding
input_ent += padding_
ent_mask += padding
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
assert len(input_ent) == max_seq_length
assert len(ent_mask) == max_seq_length
label_id = label_map[example.label]
if ex_index < 5:
logger.info("*** Example ***")
logger.info("guid: %s" % (example.guid))
logger.info("tokens: %s" % " ".join(
[str(x) for x in tokens]))
logger.info("ents: %s" % " ".join(
[str(x) for x in ents]))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
logger.info(
"segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
logger.info("label: %s (id = %d)" % (example.label, label_id))
features.append(
InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
input_ent=input_ent,
ent_mask=ent_mask,
label_id=label_id))
return features
def _truncate_seq_pair(tokens_a, tokens_b, ents_a, ents_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
ents_a.pop()
else:
tokens_b.pop()
ents_b.pop()
def accuracy(out, labels):
outputs = np.argmax(out, axis=1)
return np.sum(outputs == labels)
def warmup_linear(x, warmup=0.002):
if x < warmup:
return x/warmup
return 1.0
def main():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--data_dir",
default=None,
type=str,
required=True,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
parser.add_argument("--ernie_model", default=None, type=str, required=True,
help="Ernie pre-trained model")
parser.add_argument("--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.")
## Other parameters
parser.add_argument("--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
parser.add_argument("--do_train",
default=False,
action='store_true',
help="Whether to run training.")
parser.add_argument("--do_eval",
default=False,
action='store_true',
help="Whether to run eval on the dev set.")
parser.add_argument("--do_lower_case",
default=False,
action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--train_batch_size",
default=32,
type=int,
help="Total batch size for training.")
parser.add_argument("--learning_rate",
default=5e-5,
type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--num_train_epochs",
default=3.0,
type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--warmup_proportion",
default=0.1,
type=float,
help="Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10%% of training.")
parser.add_argument("--no_cuda",
default=False,
action='store_true',
help="Whether not to use CUDA when available")
parser.add_argument("--local_rank",
type=int,
default=-1,
help="local_rank for distributed training on gpus")
parser.add_argument('--seed',
type=int,
default=42,
help="random seed for initialization")
parser.add_argument('--gradient_accumulation_steps',
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument('--fp16',
default=False,
action='store_true',
help="Whether to use 16-bit float precision instead of 32-bit")
parser.add_argument('--loss_scale',
type=float, default=0,
help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n"
"0 (default value): dynamic loss scaling.\n"
"Positive power of 2: static loss scaling value.\n")
parser.add_argument('--threshold', type=float, default=.3)
args = parser.parse_args()
processors = FewrelProcessor
num_labels_task = 80
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
n_gpu = torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
n_gpu = 1
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.distributed.init_process_group(backend='nccl')
logger.info("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format(
device, n_gpu, bool(args.local_rank != -1), args.fp16))
if args.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
args.gradient_accumulation_steps))
args.train_batch_size = int(args.train_batch_size / args.gradient_accumulation_steps)
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
if not args.do_train:
raise ValueError("At least one of `do_train` or `do_eval` must be True.")
if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train:
raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir))
os.makedirs(args.output_dir, exist_ok=True)
processor = processors()
num_labels = num_labels_task
label_list = None
tokenizer = BertTokenizer.from_pretrained(args.ernie_model, do_lower_case=args.do_lower_case)
train_examples = None
num_train_steps = None
train_examples, label_list = processor.get_train_examples(args.data_dir)
num_train_steps = int(
len(train_examples) / args.train_batch_size / args.gradient_accumulation_steps * args.num_train_epochs)
# Prepare model
model, _ = BertForSequenceClassification.from_pretrained(args.ernie_model,
cache_dir=PYTORCH_PRETRAINED_BERT_CACHE / 'distributed_{}'.format(args.local_rank),
num_labels = num_labels)
if args.fp16:
model.half()
model.to(device)
if args.local_rank != -1:
try:
from apex.parallel import DistributedDataParallel as DDP
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
model = DDP(model)
elif n_gpu > 1:
model = torch.nn.DataParallel(model)
# Prepare optimizer
param_optimizer = list(model.named_parameters())
no_grad = ['bert.encoder.layer.11.output.dense_ent', 'bert.encoder.layer.11.output.LayerNorm_ent']
param_optimizer = [(n, p) for n, p in param_optimizer if not any(nd in n for nd in no_grad)]
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
t_total = num_train_steps
if args.local_rank != -1:
t_total = t_total // torch.distributed.get_world_size()
if args.fp16:
try:
from apex.optimizers import FP16_Optimizer
from apex.optimizers import FusedAdam
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
optimizer = FusedAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
bias_correction=False,
max_grad_norm=1.0)
if args.loss_scale == 0:
optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
else:
optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale)
else:
optimizer = BertAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
warmup=args.warmup_proportion,
t_total=t_total)
global_step = 0
if args.do_train:
train_features = convert_examples_to_features(
train_examples, label_list, args.max_seq_length, tokenizer, args.threshold)
vecs = []
vecs.append([0]*100)
with open("kg_embed/entity2vec.vec", 'r') as fin:
for line in fin:
vec = line.strip().split('\t')
vec = [float(x) for x in vec]
vecs.append(vec)
embed = torch.FloatTensor(vecs)
embed = torch.nn.Embedding.from_pretrained(embed)
#embed = torch.nn.Embedding(5041175, 100)
logger.info("Shape of entity embedding: "+str(embed.weight.size()))
del vecs
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_examples))
logger.info(" Batch size = %d", args.train_batch_size)
logger.info(" Num steps = %d", num_train_steps)
all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.long)
all_ent = torch.tensor([f.input_ent for f in train_features], dtype=torch.long)
all_ent_masks = torch.tensor([f.ent_mask for f in train_features], dtype=torch.long)
train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_ent, all_ent_masks, all_label_ids)
if args.local_rank == -1:
train_sampler = RandomSampler(train_data)
else:
train_sampler = DistributedSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size)
output_loss_file = os.path.join(args.output_dir, "loss")
loss_fout = open(output_loss_file, 'w')
model.train()
for _ in trange(int(args.num_train_epochs), desc="Epoch"):
tr_loss = 0
nb_tr_examples, nb_tr_steps = 0, 0
for step, batch in enumerate(tqdm(train_dataloader, desc="Iteration")):
batch = tuple(t.to(device) if i != 3 else t for i, t in enumerate(batch))
input_ids, input_mask, segment_ids, input_ent, ent_mask, label_ids = batch
input_ent = embed(input_ent+1).to(device) # -1 -> 0
loss = model(input_ids, segment_ids, input_mask, input_ent.half(), ent_mask, label_ids)
if n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu.
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
optimizer.backward(loss)
else:
loss.backward()
loss_fout.write("{}\n".format(loss.item()))
tr_loss += loss.item()
nb_tr_examples += input_ids.size(0)
nb_tr_steps += 1
if (step + 1) % args.gradient_accumulation_steps == 0:
# modify learning rate with special warm up BERT uses
lr_this_step = args.learning_rate * warmup_linear(global_step/t_total, args.warmup_proportion)
for param_group in optimizer.param_groups:
param_group['lr'] = lr_this_step
optimizer.step()
optimizer.zero_grad()
global_step += 1
model_to_save = model.module if hasattr(model, 'module') else model
output_model_file = os.path.join(args.output_dir, "pytorch_model.bin_{}".format(global_step))
torch.save(model_to_save.state_dict(), output_model_file)
# Save a trained model
model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self
output_model_file = os.path.join(args.output_dir, "pytorch_model.bin")
torch.save(model_to_save.state_dict(), output_model_file)
if __name__ == "__main__":
main()
| [
"958747457@qq.com"
] | 958747457@qq.com |
bb2f3583ce87e0c49e35263a9eb452014649b3d1 | 7c6248cf1e5e8f4b0a9ae21a139d0ac75ae704e9 | /usr/local/bin/docker-cloud | 8e920495654576ea24cd41dbde8fe8b0ffceda3a | [] | no_license | tn-osimis/highland_builder | 94b299f1baf8c9294f476159af54da7fa65e6878 | 973c5908bb226b4374e390c06d3e88176f219ccf | refs/heads/master | 2021-05-09T14:35:17.278118 | 2018-01-26T15:45:04 | 2018-01-26T15:45:04 | 119,069,264 | 0 | 0 | null | 2018-01-26T15:37:08 | 2018-01-26T15:37:08 | null | UTF-8 | Python | false | false | 326 | #!/usr/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'docker-cloud==1.0.7','console_scripts','docker-cloud'
__requires__ = 'docker-cloud==1.0.7'
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.exit(
load_entry_point('docker-cloud==1.0.7', 'console_scripts', 'docker-cloud')()
)
| [
"tn@osimis.io"
] | tn@osimis.io | |
d5d532ae0c5bdd0ab6358c0d0abd97160acd91bf | 4854dc001b90d6eb2aa0370888ff2ad3ae94207f | /Greedy/BOJ_1931.py | e5c54fb454ab02df21dce00a76e1c57d9e9cf349 | [] | no_license | gocyzhod/algorithm | b565a3075719fd1e7976f229ffeaf3c72b91b173 | b2f304ecce6b0a7a072ff370a78c91b85ca1bbb4 | refs/heads/main | 2023-08-16T20:12:54.500647 | 2021-10-04T07:36:18 | 2021-10-04T07:36:18 | 381,949,067 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,011 | py | import sys
input = sys.stdin.readline
N = int(input())
room = [[0]*2 for _ in range(N)]
Num = len(room)
for i in range(N):
a, b = map(int,input().split())
room[i][0] = a
room[i][1] = b
room.sort(key = lambda x : (x[1], x[0]))
count = 1
end_room = room[0][1]
for i in range(1,N):
if room[i][0] >= end_room:
count+=1
end_room = room[i][1]
print(count)
문제 해설 :
이 문제에서는 끝나는 시간과 시작하는 시간이 같을 수가 있다고 조건을 제시하였다.
이 부분을 잘 이해해야한다.
또한 예를 들어
0 11
2 3
3 4
4 5
라고 제시가 주어지면, 23->34->45 라고하여야겠지만, 시작시간을 기준으로 정렬을 한다면 0 11 한가지만 뽑혀 1가지로 답이 나올수도 있다는 점을 유의하여한다.
따라서 0번 인덱스와 1번 인덱스 순으로 정렬을 시켜주면 되는 문제이다.
따라서 lambda를 이용하여 0번 인덱스와 1번 인덱스를 순서대로 정렬을 시켜준다.
| [
"noreply@github.com"
] | gocyzhod.noreply@github.com |
42ab6de1527788b1ff72e1291ae810240bd3d70d | b3edbdc7c673f0f6cc7eee034c48aabc581fa9ae | /01.Import_Module_Package/main.py | 4e7486e29f0fb33f01f1ad6531de9d8a87cc596c | [] | no_license | Oleg-Stukalov/Advanced_Python | d913157dcf74a48db491a020a924f84916139cd1 | b8af98beb06a405d8e26532f7f981395040fcf6c | refs/heads/master | 2022-12-10T13:28:35.325841 | 2020-09-10T12:38:33 | 2020-09-10T12:38:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 215 | py | from datetime import date
from application import salary
from application.db import people
if __name__ == '__main__':
print(date.today())
print(people.get_employees())
print(salary.calculate_salary())
| [
"serge.rybakov@gmail.com"
] | serge.rybakov@gmail.com |
89f46992c9b73e991430fc581dfd8328dadbe2c0 | 4814eda1fa54376396f227ef18d9f84e087641f2 | /python/stats/stats-switch-slave.py | 78f66fe1c53ede02416ad4d918a2897142acf8c1 | [] | no_license | Travisivart/ADAPTS | 82fcdfa7c07f96ddf4c9061e645b0c24f7c3b623 | babfed70557e02ff98a50cbf5907b3cebf63c5f5 | refs/heads/master | 2021-05-10T15:29:58.258499 | 2018-05-02T00:19:49 | 2018-05-02T00:19:49 | 118,552,761 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,704 | py | import sys, logging
import frenetic
import pymysql
from frenetic.syntax import *
from network_information_base import *
from tornado.ioloop import PeriodicCallback, IOLoop
from functools import partial
hostname = 'localhost'
username = 'mtd'
password = 'mtd'
database = 'mtd'
myConnection = pymysql.connect( host=hostname, user=username, passwd=password, db=database )
def doUpdate( conn, switch, data ) :
cur = conn.cursor()
insert = (switch, data['port_no'], data['rx_packets'], data['tx_packets'], data['rx_bytes'], data['tx_bytes'], data['rx_dropped'], data['tx_dropped'], data['rx_errors'], data['tx_errors'], data['rx_fram_err'], data['rx_over_err'], data['rx_crc_err'], data['collisions'], data['rx_packets'], switch, data['port_no'], data['tx_packets'], switch, data['port_no'], data['rx_bytes'], switch, data['port_no'], data['tx_bytes'], switch, data['port_no'])
cur.execute("INSERT INTO mtd.logs "
"(switch_id, port_id, timestamp, rx_packets, tx_packets, rx_bytes, tx_bytes, rx_dropped, tx_dropped, rx_errors, tx_errors, rx_fram_err, rx_over_err, rx_crc_err, collisions, delta_rx_packets, delta_tx_packets, delta_rx_bytes, delta_tx_bytes) "
"VALUES(%s, %s, NOW(), %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, "
"%s-(select rx_packets from mtd.logs a2 where a2.switch_id = %s and a2.port_id = %s and a2.timestamp = (select max(timestamp) from mtd.logs a22 "
"where a22.switch_id = a2.switch_id and a22.port_id = a2.port_id and a22.timestamp < NOW())),"
"%s-(select tx_packets from mtd.logs a2 where a2.switch_id = %s and a2.port_id = %s and a2.timestamp = (select max(timestamp) from mtd.logs a22 "
"where a22.switch_id = a2.switch_id and a22.port_id = a2.port_id and a22.timestamp < NOW())),"
"%s-(select rx_bytes from mtd.logs a2 where a2.switch_id = %s and a2.port_id = %s and a2.timestamp = (select max(timestamp) from mtd.logs a22 "
"where a22.switch_id = a2.switch_id and a22.port_id = a2.port_id and a22.timestamp < NOW())),"
"%s-(select tx_bytes from mtd.logs a2 where a2.switch_id = %s and a2.port_id = %s and a2.timestamp = (select max(timestamp) from mtd.logs a22 "
"where a22.switch_id = a2.switch_id and a22.port_id = a2.port_id and a22.timestamp < NOW()))"
");", insert)
cur.close()
conn.commit()
oldData = data
class StatsApp1(frenetic.App):
client_id = "stats"
def __init__(self):
frenetic.App.__init__(self)
self.nib = NetworkInformationBase(logging)
def connected(self):
def handle_current_switches(switches):
logging.info("Connected to Frenetic - Stats for switch: " + str(switches.keys()[0]))
dpid = switches.keys()[0]
self.nib.set_dpid(dpid)
self.nib.set_ports( switches[dpid] )
PeriodicCallback(self.count_ports, 1000).start()
self.current_switches(callback=handle_current_switches)
def print_count(self, future, switch):
data = future.result()
# myConnection = pymysql.connect( host=hostname, user=username, passwd=password, db=database )
doUpdate(myConnection, switch, data)
# myConnection.close()
def count_ports(self):
switch_id = self.nib.get_dpid()
# print self.nib.all_ports()
for port in self.nib.all_ports():
ftr = self.port_stats(switch_id, str(port))
f = partial(self.print_count, switch = switch_id)
IOLoop.instance().add_future(ftr, f)
if __name__ == '__main__':
# logging.basicConfig(\
# stream = sys.stderr, \
# format='%(asctime)s [%(levelname)s] %(message)s', level=logging.INFO \
# )
app = StatsApp1()
app.start_event_loop()
| [
"travisneely@gmail.com"
] | travisneely@gmail.com |
bd83125d739186ca8984dec11d65bb01194fda34 | 77f4b39f6587560e3c3ca29ab5dd81ffe1807e5c | /values.py | e4a35d875742091f6d8cf0ba743689940e3e6e52 | [] | no_license | nikosninosg/Modbus-RS-485 | 425e8ac4caa49bd030df7fb906719d25a49ef7ee | 2d6ef3dee95fccc1339a19d579f07cb50494fb6d | refs/heads/master | 2020-07-25T23:42:13.030663 | 2019-12-20T11:46:44 | 2019-12-20T11:46:44 | 208,458,970 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,340 | py | #!/usr/bin/env python
# Test program for DZT 6001: 80A, Single phase kWh meter, LCD, RS485/Modbus
#
# Home page: https://www.dutchmeters.com/index.php/product/dzt-6001/
# User manual: http://dutchmeters.com/wp-content/uploads/2017/06/DZT-6001-manual.pdf
# Register reference: http://dutchmeters.com/wp-content/uploads/2017/04/DZT6001-Modbus.pdf
import time
from pymodbus.client.sync import ModbusSerialClient as ModbusClient
client = ModbusClient(method='rtu', port='/dev/ttyUSB0', timeout=1, stopbits = 2, bytesize = 8, parity='N', baudrate= 38400)
client.connect()
while True:
# query the device with address 0x1B (unit=0x1B)
# the last readable register has index 0x2c
r = client.read_holding_registers(0x00,0x2c,unit=0x1B) #300=0x12c
print "Voltage: %.1f V" % (int(r.registers[0])/10.0)
print "Current: %.1f A" % (float(r.registers[1])/10.0)
print "Active power: %d W" % r.registers[3]
print "Reactive power: %d W" % r.registers[4]
print "Apparent power: %d W" % r.registers[5]
print "CosPhy: %.3f" % (float(r.registers[6])/1000.0)
print "Active energy: %.2f kWh" % (float(r.registers[7])/100.0)
print "Reactive energy: %.2f kWh" % (float(r.registers[0x11])/100.0)
print "Bit rate: %d " % (1200 << (r.registers[0x2a] - 1))
print "--------------"
time.sleep (5)
| [
"noreply@github.com"
] | nikosninosg.noreply@github.com |
b1ccedbab71f1b853cf34e1014089d47ee4ce1b1 | 36b10bf10e168d2b3177688f1f9b45606118c9ab | /lesson_05/homework5.py | dc7285fdca6a0c4a34e719db2afd08c7af117de9 | [] | no_license | FatDou/pyCourse | 6270fd2f27944fafff743323286d6bd6631048b7 | f979b25d93107b5c5f1b28ef50dc415263e21155 | refs/heads/master | 2021-04-26T23:33:06.766493 | 2018-09-29T02:02:07 | 2018-09-29T02:02:07 | 124,013,336 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,661 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 17 20:47:16 2018
@author: fatdou
"""
"""
读取文件
"""
from sklearn import svm, datasets
import numpy as np
import os
from functools import reduce
class DataSet:
def __init__(self, name):
self.name = name
#读取文件内容,读到一个list中
def readFile(self):
file = open(self.name)
data = []
for line in file.readlines():
line = line.strip('\n').split(',')
for i in range(len(line)):
line[i] = float(line[i])
data.append(line)
file.close()
return data
def generate_xy(self):
#获得文件内容
datas = readFile(self.name)
#标签
data2 = []
#数据
data1 = datas[:]
for i in range(len(datas)):
#标签
data2.append(datas[i][-1])
#数据
del(data1[i][-1])
# print('\n Original data looks like this: \n', data1)
# print('\nLabels looks like this: \n', data2)
return data1,data2
def get_train_test_set(self,ratio):
#将数据集分成训练集和测试集
#用一个参数来区分多少分成训练集,多少分成测试机
#首先把数据和label生成出来
data,label = self.generate_xy()
data = np.array(data)
label = np.array(label)
print(type(data))
#计算数据长度
n_samples = len(data)
#分训练集和测试集
n_train = n_samples * ratio
n_train = int(n_train)
X_train = data[:n_train]
Y_train = label[:n_train]
X_test = data[n_train:]
Y_test = label[n_train:]
#获得所有的数据
return X_train, Y_train, X_test, Y_test
def main():
data = DataSet('pima-indians-diabetes.txt')
X_train, Y_train, X_test, Y_test = data.get_train_test_set(0.7)
print('X_train: \n', len(X_train))
print('Y_train: \n', len(Y_train))
clf = svm.SVC()
clf.fit(X_train, Y_train)
clf.predict(X_test)
print(clf.predict(X_test))
print(X_test)
test_point = clf.predict(X_test[12].reshape(1,-1))
y_true = Y_test[12]
print('test_point: ', test_point)
print('y_true: ' , y_true)
new_list = list(map(lambda x,y :x == y, clf.predict(X_test),Y_test ) )
tCount = new_list.count(True)
precise = lambda x,y : x / y * 100
print('precise:',precise(tCount, len(new_list)),'%')
if __name__ == '__main__':
main() | [
"pfkmldf4@outlook.com"
] | pfkmldf4@outlook.com |
97a997c29e858c17b6cf726578235b19d6605e7d | 3fa480cf996c4f597013f89c31dacdbd1abde153 | /server/lobby_server_handlers/SendChallengeHandler.py | b6e9094edc90ab35dd56e5e08726c77dda8e6358 | [] | no_license | einhornus/CrazyGo | bdfd3700f912a616384cec1cace822e0a0604e4a | a7ffcd6cc75c515e93b7f9ff11ac412d5a0b7d5e | refs/heads/master | 2021-01-20T04:04:43.913605 | 2017-10-05T16:20:26 | 2017-10-05T16:20:26 | 101,261,846 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,299 | py |
from Server import *
from utils.Validator import *
from utils.print_utils import *
from utils.db_queries import *
from utils.security import *
import settings
import random
from lobby_server_handlers.factory_utils import *
class SendChallengeHandler(Handler):
def get_validator(self):
res = Validator(['id', 'challenger_id', 'title', 'settings'])
return res
def get_type(self):
return 'challenge'
def action(self, request, factory, me):
id = int(request["id"])
challengerId = int(request["challenger_id"])
title = request["title"]
settings = request["settings"]
if isAuthorized(me):
challengerClient = findClient(factory, challengerId)
if not hasattr(me.factory, "stored_challenges"):
me.factory.stored_challenges = {}
challenge_id = random.randint(100, 1000000000)
me.factory.stored_challenges[challenge_id] = (id, challengerId, title, settings)
meClient = findClient(factory, id)
challengerClient.sendLine('new_challenge '+str(id)+';'+str(title)+";"+str(settings)+";"+str(challenge_id))
meClient.sendLine('created_challenge '+str(challenge_id))
else:
me.sendLine(print_error(SECURITY_ERROR, "")) | [
"luckjanovdmitry@yandex.ru"
] | luckjanovdmitry@yandex.ru |
b680364282f0465ffb32dd84c7508cc3a31d9dd8 | a2a9641305adbf31636c3168659da4c3ecd1c3b1 | /model1.py | 3377351cfeb8dce1e7b3cfe7315855e1576c9970 | [] | no_license | kikyou123/DFN | c4b5a3973f2f4bc794aed0374dcd0d79e789d174 | fba22ff0fcb3759843c4ef5eb2f392343d286c27 | refs/heads/master | 2021-07-20T08:09:57.933121 | 2017-10-28T03:48:49 | 2017-10-28T03:48:49 | 108,617,381 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,211 | py | from op import *
import tensorflow as tf
import os
import numpy as np
def add_upscale(input):
prev_shape = input.get_shape()
size = [2 * int(s) for s in prev_shape[1:3]]
return tf.image.resize_nearest_neighbor(input, size)
def dfl(input, filters, ker = 9):
'''
:param input: input image of size [batch_size, h, w, 3]
:param filter: filter [batch_size, h, w, 81]
:return: output image of size [batch_size, h, w, 3]
'''
# filter_size = ker * ker
# expand_filter = np.reshape(np.eye(filter_size, filter_size), (filter_size, 1, ker, ker))
# expand_filter = np.transpose(expand_filter, (2,3,1,0))
# expand_input = tf.nn.conv2d(input, expand_filter, strides = [1,1,1,1], padding = 'SAME')
# output = expand_input * filter
# output = tf.reduce_sum(output, axis = -1, keep_dims = True)
image_patches = tf.extract_image_patches(input, [1, ker, ker, 1], [1, 1, 1, 1], [1, 1, 1, 1], padding='SAME')
output = tf.reduce_sum(tf.multiply(image_patches, filters), 3, keep_dims=True)
return output
def pred(input, hidden, filter = 9):
# encoder
h1 = lrelu(conv2d(input, 32, d_h = 1, d_w = 1, name = 'h1'))
h2 = lrelu(conv2d(h1, 32, d_h = 2, d_w = 2, name = 'h2'))
h3 = lrelu(conv2d(h2, 64, d_h = 1, d_w = 1, name = 'h3'))
h4 = lrelu(conv2d(h3, 64, d_h = 1, d_w = 1, name = 'h4'))
# middle
h5 = lrelu(conv2d(h4, 128, d_h = 1, d_w = 1, name = 'h5'))
hidden1 = lrelu(conv2d(hidden, 128, d_h = 1, d_w = 1, name = 'hidden_1'))
hidden2 = lrelu(conv2d(hidden1, 128, d_h = 1, d_w = 1, name = 'hidden_2'))
h6 = tf.add(hidden2, h5)
hidden_state = h6
#decoder
h7 = lrelu(conv2d(h6, 64, d_h = 1, d_w = 1, name = 'h7'))
h8 = lrelu(conv2d(h7, 64, d_h = 1, d_w = 1, name = 'h8'))
h9 = add_upscale(h8)
h10 = lrelu(conv2d(h9, 64, d_h = 1, d_w = 1, name = 'h10'))
h11 = lrelu(conv2d(h10, 64, d_h = 1, d_w = 1, name = 'h11'))
h12 = lrelu(conv2d(h11, 128, k_h=1, k_w=1, d_h = 1, d_w = 1, name = 'h12'))
l_filter = conv2d(h12, filter * filter, k_h=1, k_w=1, d_h = 1, d_w = 1, name = 'h13')
l_filter = tf.nn.softmax(l_filter)
#filter
output = dfl(input[:,:,:,-1:], l_filter, filter)
return output, hidden_state
def model(inputs, input_seqlen = 3, target_seqlen = 3, buffer_len = 1, filter = 9, reuse = False):
# inputs : [batch_size, seqlen, image_size, image_size, n_channel]
# return : [batch_size, seqlen, image_size, image_size, n_channel]
with tf.variable_scope('gen') as scope:
if reuse:
scope.reuse_variables()
network_template = tf.make_template('pred', pred)
batch_size, seqlen, image_size, image_size, n_channel = inputs.get_shape().as_list()
inputs = tf.transpose(inputs, (0,2,3,1,4))
inputs = tf.reshape(inputs, (batch_size, image_size, image_size, seqlen*n_channel))
hidden = tf.zeros((batch_size, image_size // 2, image_size // 2, 128))
outputs = []
for i in range(input_seqlen - buffer_len + target_seqlen):
pred_input = inputs[..., 0: buffer_len]
output, hidden = network_template(pred_input, hidden, filter)
inputs = inputs[..., 1:None]
if i >= input_seqlen - buffer_len:
outputs.append(output)
if inputs.get_shape()[-1] == 0:
inputs = output
else:
inputs = tf.concat([inputs, output], axis = 1)
outputs = tf.stack(outputs, axis = 0)
outputs = tf.transpose(outputs, (1,0,2,3,4))
vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope = 'gen')
return outputs, vars
def get_loss(targets, outputs, target_seqlen):
outputs = tf.clip_by_value(outputs, np.finfo(np.float32).eps, 1 - np.finfo(np.float32).eps)
loss = -targets * tf.log(outputs) - (1 - targets) * tf.log(1 - outputs)
loss = tf.reduce_mean(loss)
#loss = tf.reduce_mean(tf.square(targets - outputs))
#loss = tf.reduce_mean(tf.abs(targets - outputs))
return loss
def create_optimizers(loss, params, learning_rate):
opt = tf.train.AdamOptimizer(learning_rate).minimize(loss, var_list = params)
return opt
| [
"1163763177@qq.com"
] | 1163763177@qq.com |
f571afedabe7f777765b939014a2f36982fec0f5 | 00f2b2d9c383855bbc7ac3d7f98172aca9999012 | /testPrograms/testingCamera.py | 8e9906c7f3aba4fdd30729c753296c0a49740eaf | [] | no_license | ProducerBill/Vision_System | 87a508dcc04c3bda1695f95f5c6e25d66696f630 | 5e0c809543eb2847adb2fa959f0b41fc8691c207 | refs/heads/master | 2020-07-07T04:08:30.041555 | 2020-03-01T22:09:23 | 2020-03-01T22:09:23 | 203,243,618 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 959 | py | import numpy as np
import cv2
from PIL import Image
cap = cv2.VideoCapture(0)
cap.set(3,640)
cap.set(4,480)
#cap.set(5,1)
#cap.set(15,1)
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
# Our operations on the frame come here
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Display the resulting frame
cv2.imshow('frame',gray)
edge = cv2.Canny(gray, 5, 300)
cv2.imshow('edge', edge)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
if cv2.waitKey(1) & 0xFF == ord('s'):
cv2.imwrite('C:/capture/test.jpg', gray)
cv2.imwrite('C:/capture/edgetest.jpg', edge)
cv2.imwrite('C:/capture/1bit.png', edge, [cv2.IMWRITE_PNG_BILEVEL, 1])
image_file = Image.open("C:/capture/edgetest.jpg")
image_file = image_file.convert('1')
image_file.save('C:/capture/edgetest-bw.jpg')
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows() | [
"william.c.jones1981@gmail.com"
] | william.c.jones1981@gmail.com |
edd9ba49ef0c1db6c197e3b0069e1e0b71884094 | fabad4997deeedafa6fa501361d459a94a754d70 | /dm_memorytasks/__init__.py | e89aa8e2eb830cb05927e34a68310b681c575e1b | [
"Apache-2.0"
] | permissive | mbrukman/dm_memorytasks | 6119ec1628392d12e4ef94c8e7714b23e458ae09 | 7672decd503d34ea21150e5eeb453f84e91747ae | refs/heads/master | 2020-11-24T17:15:53.123045 | 2019-12-12T16:59:12 | 2019-12-12T16:59:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,005 | py | # Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Python utilities for running dm_memorytasks."""
from dm_memorytasks import _load_environment
from dm_memorytasks._version import __version__
EnvironmentSettings = _load_environment.EnvironmentSettings
load_from_disk = _load_environment.load_from_disk
load_from_docker = _load_environment.load_from_docker
| [
"tomward@google.com"
] | tomward@google.com |
bfdfc1a62852507f68a014cbcc9ad012b1f7e16e | 9139bd5dad2c66f070d1eb01958a11a2af1c9835 | /game-again.py | 224f4ce078f24c31bd6fed0be854de5fba7b5cf7 | [] | no_license | anmolrajaroraa/python-reg-oct | 7223487b864d969e89f9daae2a77522405977f27 | acb62ad7c8acb78f348bdc47e5ed6230808d967c | refs/heads/master | 2020-08-04T09:10:25.152732 | 2019-11-08T08:57:28 | 2019-11-08T08:57:28 | 212,085,152 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 292 | py | import pygame
pygame.init()
HEIGHT = 500
WIDTH = 1000
# red green blue (0-255)
BLACK = 0,0,0
WHITE = 255,255,255
RED = 255,0,0
RANDOM_COLOR = 100,150,200
gameboard = pygame.display.set_mode((WIDTH,HEIGHT))
while True:
print("!")
gameboard.fill( BLACK )
pygame.display.update( )
| [
"anmolarora1711@gmail.com"
] | anmolarora1711@gmail.com |
bfed5fdd83ea7f7730c54d041952feeacb748f12 | a9e40fbba7891ed2a5e0eaac79c326003c845891 | /baremetal_network_provisioning/tests/unit/drivers/hp/test_hp_snmp_provisioning_driver.py | 32eb4ad71f3defb5643db68974752910088c78c5 | [
"Apache-2.0"
] | permissive | priya-j/baremetal-network-provisioning | 1c404f9b4762355de70948165b5bc107c9650154 | 7ba50178da76b605a91aff03e0127f2cd4eae93e | refs/heads/master | 2021-01-18T07:43:59.829457 | 2016-01-29T09:42:47 | 2016-01-29T09:42:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,844 | py | # Copyright 2015 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from baremetal_network_provisioning.common import constants as hp_const
from baremetal_network_provisioning.db import bm_nw_provision_db as db
from baremetal_network_provisioning.db import bm_nw_provision_models as models
from baremetal_network_provisioning.drivers.hp import (
hp_snmp_provisioning_driver as driver)
import contextlib
import mock
from neutron.plugins.ml2.common import exceptions as ml2_exc
from neutron.tests import base
class TestHPSNMPProvisioningDriver(base.BaseTestCase):
def setUp(self):
super(TestHPSNMPProvisioningDriver, self).setUp()
self.driver = driver.HPSNMPProvisioningDriver()
def test_create_port_with_switch_enabled(self):
"""Test create port for with enabled case."""
port_dict = self._get_port_payload()
bnp_phys_switch = models.BNPPhysicalSwitch
bnp_phys_port = models.BNPPhysicalSwitchPort
bnp_phys_switch.status = 'ENABLED'
bnp_phys_by_mac = 'get_bnp_phys_switch_by_mac'
with contextlib.nested(mock.patch.object(db, 'get_subnets_by_network',
return_value=["subnet"]),
mock.patch.object(db, bnp_phys_by_mac,
return_value=bnp_phys_switch),
mock.patch.object(db, 'get_bnp_phys_port',
return_value=bnp_phys_port)):
self.driver.create_port(port_dict)
def test_create_port_with_switch_disabled(self):
"""Test create port for with disabled case."""
port_dict = self._get_port_payload()
bnp_phys_port = models.BNPPhysicalSwitchPort
bnp_phys_switch = models.BNPPhysicalSwitch
bnp_phys_switch.status = 'DISABLED'
bnp_phys_by_mac = 'get_bnp_phys_switch_by_mac'
with contextlib.nested(mock.patch.object(db, 'get_subnets_by_network',
return_value=["subnet"]),
mock.patch.object(db, bnp_phys_by_mac,
return_value=bnp_phys_switch),
mock.patch.object(db, 'get_bnp_phys_port',
return_value=bnp_phys_port)):
self.assertRaises(ml2_exc.MechanismDriverError,
self.driver.create_port, port_dict)
def test_bind_port_to_segment_success(self):
"""Test bind port to segment for success case."""
port_dict = self._get_port_payload()
bnp_phys_switch = models.BNPPhysicalSwitch
bnp_phys_port = models.BNPPhysicalSwitchPort
bnp_mappings = models.BNPSwitchPortMapping
cred_dict = self._get_credentials_dict()
with contextlib.nested(
mock.patch.object(db,
'get_bnp_phys_switch_by_mac',
return_value=bnp_phys_switch),
mock.patch.object(db,
'get_bnp_phys_port',
return_value=bnp_phys_port),
mock.patch.object(db,
'get_all_bnp_swport_mappings',
return_value=bnp_mappings),
mock.patch.object(self.driver,
'_get_credentials_dict',
return_value=cred_dict),
mock.patch.object(self.driver,
'bind_port_to_segment',
return_value=hp_const.BIND_SUCCESS)):
value = self.driver.bind_port_to_segment(port_dict)
self.assertEqual(value, hp_const.BIND_SUCCESS)
def test_delete_port(self):
"""Test delete neutron port."""
bnp_mappings = models.BNPSwitchPortMapping
bnp_phys_switch = models.BNPPhysicalSwitch
bnp_phys_port = models.BNPPhysicalSwitchPort
bnp_ntrn_port = models.BNPNeutronPort
cred_dict = self._get_credentials_dict()
with contextlib.nested(mock.patch.object(db,
'get_bnp_switch_port_mappings',
return_value=bnp_mappings),
mock.patch.object(db,
'get_bnp_phys_switch',
return_value=bnp_phys_switch),
mock.patch.object(self.driver,
'_get_credentials_dict',
return_value=cred_dict),
mock.patch.object(db,
'get_bnp_phys_switch_port_by_id',
return_value=bnp_phys_port),
mock.patch.object(db,
'get_bnp_neutron_port_by_seg_id',
return_value=bnp_ntrn_port)):
self.driver.delete_port('321f506f-5f0d-435c-9c23-c2a11f78c3e3')
def _get_port_payload(self):
"""Get port payload for processing requests."""
port_dict = {'port':
{'segmentation_id': '1001',
'host_id': 'ironic',
'access_type': hp_const.ACCESS,
'switchports':
[{'port_id': 'Ten-GigabitEthernet1/0/35',
'switch_id': '44:31:92:61:89:d2'}],
'id': '321f506f-5f0d-435c-9c23-c2a11f78c3e3',
'network_id': 'net-id',
'is_lag': False}}
return port_dict
def _get_credentials_dict(self):
creds_dict = {}
creds_dict['ip_address'] = "1.1.1.1"
creds_dict['write_community'] = 'public'
creds_dict['security_name'] = 'test'
creds_dict['security_level'] = 'test'
creds_dict['auth_protocol'] = 'md5'
creds_dict['access_protocol'] = 'test1'
creds_dict['auth_key'] = 'test'
creds_dict['priv_protocol'] = 'aes'
creds_dict['priv_key'] = 'test_priv'
return creds_dict
| [
"selvakumar.s2@hp.com"
] | selvakumar.s2@hp.com |
20ede17c952b40d8bfe9406df93dd193f5dceb68 | b4ddc954a7dc0d24352de64a567c10c9e7231eee | /LeetCode/Pascal_Triangle.py | 19ffa5bc0886c62656dc9045ad7221ae44c9f5e0 | [] | no_license | sharadbhat/Competitive-Coding | 4d80c99093bf05a2213799c95467309cf3e40d07 | 79eec04cc6b1ac69295530bda1575ecb613a769e | refs/heads/master | 2023-07-05T02:25:33.397140 | 2023-06-27T05:38:12 | 2023-06-27T05:38:12 | 78,031,600 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 490 | py | # LeetCode
# https://leetcode.com/problems/pascals-triangle/description/
class Solution(object):
def generate(self, numRows):
"""
:type numRows: int
:rtype: List[List[int]]
"""
if numRows == 0:
return []
l = [[1]]
for i in range(1, numRows):
k = [1]
for j in range(1, i):
k.append(l[i - 1][j - 1] + l[i - 1][j])
k.append(1)
l.append(k)
return l
| [
"sharad.mbhat@gmail.com"
] | sharad.mbhat@gmail.com |
342dd966e0ae8e7001e1e29509b365599ace7f56 | 4ddb74db1860561ead2b1127522b6e45d057f76b | /7_bond-distance_histogram.py | 59e3920313fe5abe202d5dd7ec24227392573434 | [] | no_license | itamblyn/FORCE | 25accbb24e049fe011b02fbc0b7a5c35437378ed | b3550159f8568ce35f510acb53418bbcb448fe17 | refs/heads/master | 2020-05-27T10:57:23.482976 | 2012-07-05T23:19:25 | 2012-07-05T23:19:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,756 | py | #! /usr/local/bin/python
import sys
if len (sys.argv) == 1:
print 'usage: ' + sys.argv[0] + ' distance.dat number_of_bond_length_bins [min_bond_length_value, max_bond_length_value]'
import numpy
input_filename = sys.argv[1]
inputFile = open (input_filename,'r')
number_of_bond_length_bins = int(sys.argv[2])
infoline = inputFile.readline() # skip over info line
min_isolated_atom_distance = float(infoline.split()[5])
max_isolated_atom_distance = float(infoline.split()[7])
distance_array = []
for line in inputFile:
distance_array.append(float(line.split()[0]))
inputFile.close()
if len(sys.argv) == 5:
min_bond_length_value = float(sys.argv[3])
max_bond_length_value = float(sys.argv[4])
else:
min_bond_length_value = min(distance_array)
max_bond_length_value = max(distance_array)
histogram = numpy.zeros(int(number_of_bond_length_bins + 1), dtype=numpy.float)
bond_length_bin_size = (max_bond_length_value - min_bond_length_value)/float(number_of_bond_length_bins)
for distance in distance_array:
bin = int((distance - min_bond_length_value)/bond_length_bin_size)
if bin > 0 and bin < len(histogram):
histogram[bin] += 1
histogram /= numpy.sum(histogram)
output_filename = input_filename.rpartition('.')[0] + '.hist'
outputFile = open (output_filename, 'w')
outputFile.write('# distance occupancy, isolated atom distance( ' + str(min_isolated_atom_distance) + ' , ' + str(max_isolated_atom_distance) + ' )\n')
for bin_index in range(number_of_bond_length_bins):
outputFile.write(repr(bin_index*bond_length_bin_size + bond_length_bin_size/2.0 + min_bond_length_value) + ' ')
outputFile.write(str(histogram[bin_index]) + ' ')
outputFile.write('\n')
outputFile.close()
| [
"tamblyn2@sierra"
] | tamblyn2@sierra |
cf1b2dbe6779ecf7958e0746ee7fb772c6a8579f | 680a8f2f04a06e82019c277f1de665ea4f5c13df | /castling/blog.py | b4defe360e402eb09552b5c3a13846db8cdd7fa4 | [] | no_license | narayanahari/castling.fun | d2a6157be4b38507588c95cb4f52d0dd4e213d25 | 69e11d2745fbbf6bbd5ab33d8ece3559b992311c | refs/heads/master | 2021-09-17T09:14:45.017216 | 2018-06-29T17:42:00 | 2018-06-29T17:42:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,577 | py | from flask import (
Blueprint, request, url_for, g, redirect, flash, render_template
)
from werkzeug.exceptions import abort
from castling.auth import login_required
from castling.db import get_db
bp = Blueprint('blog', __name__)
@bp.route('/')
def index():
db = get_db()
posts = db.execute(
'SELECT p.id, p.title, p.body, p.created, p.author_id, u.username'
' FROM post as p JOIN user as u ON p.author_id = u.id'
' ORDER BY created DESC').fetchall();
return render_template('blog/index.html', posts=posts)
@bp.route('/create', methods=('GET', 'POST'))
@login_required
def create():
if request.method == 'POST':
title = request.form['title']
body = request.form['body']
error = None
if not title:
error = 'Title is required'
if error is not None:
flash(error)
else:
db = get_db()
db.execute('insert into post (author_id, title, body) values (?, ?, ?)',
(g.user['id'], title, body))
db.commit()
return redirect(url_for('blog.index'))
return render_template('blog/create.html')
def get_post(id, check_author=True):
post = get_db().execute(
'select p.id, title, body, created, author_id, username'
' from post p join user u on p.author_id = u.id'
' where p.id = ?', (id,)).fetchone()
if post is None:
abort(404, "Post id {0} doesn't exist.".format(id))
if check_author and post['author_id'] != g.user['id']:
abort(403)
return post
@bp.route('/<int:id>/update', methods=('GET', 'POST'))
@login_required
def update(id):
post = get_post(id)
if request.method == 'POST':
title = request.form['title']
body = request.form['body']
error = None
if not title:
error = 'Title required'
elif not body:
error = 'Message content required'
if error is not None:
flash(error)
else:
db = get_db()
db.execute('update post set title = ?, body = ?'
'where author_id=?', (title, body, id))
db.commit()
return redirect(url_for('blog.index'))
return render_template('blog/update.html', post=post)
@bp.route('/<int:id>/delete', methods=('POST',))
@login_required
def delete(id):
get_post(id)
db = get_db()
db.execute('delete from post where id=?', (id,))
db.commit()
return redirect(url_for('blog.index'))
| [
"narhari@amazon.com"
] | narhari@amazon.com |
27bda1b1a714d283c53c57ab24bbbf6842074047 | 46d67307d64323df77aa35c36a6026863efb1258 | /P3/4/a/graph3.py | 3e1c2c735cddfd07fc16de83f2d443876bcf08f7 | [] | no_license | LFRusso/Intro_Fiscomp | 760a3bd8686f368eb024ef27d4b7706add877ea2 | 266e1d4ebf1e60db5035b3ba070f354b98f4f81d | refs/heads/master | 2020-05-16T06:56:15.431994 | 2019-09-17T00:49:38 | 2019-09-17T00:49:38 | 182,862,922 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 256 | py | from matplotlib import pyplot as plt
import numpy as np
tau, theta = np.loadtxt('fort.3', delimiter = ',', unpack = True)
plt.plot(tau, theta, linewidth = 1)
plt.xlabel("T")
plt.ylabel("Theta (rad)")
plt.grid()
plt.title("Euler")
plt.savefig("euler.png")
| [
"lf.santos@usp.br"
] | lf.santos@usp.br |
ba79f0a7a16eee2f5d086bd7d5e06adec8636825 | f10d45aecbfccb3f469ab0c4ae55fc0f256c9004 | /Functions/chr_ord.py | 80c8745e0e21329911e636eedb326846d34957cc | [] | no_license | Do-code-ing/Python_Built-ins | c34c1cea19a2cef80ab3a16d050e8825af0feb59 | 03b2f277acde4fce00bb521e3a0b8c0469b39879 | refs/heads/master | 2023-07-29T15:30:00.693005 | 2021-09-04T18:48:18 | 2021-09-04T18:48:18 | 354,467,767 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 518 | py | # chr(i) : character(int)
# 유니코드 포인트 정수 값을 입력하면 해당 정수 값의 유니코드 문자열을 반환한다.
# i 가 0 ~ 1,114,111(16진수로 0x10FFFF)를 벗어나면 'ValueError'가 발생한다.
# 정수를 문자로
print(chr(8364))
# '€'
# ord(c) : ordinary character(character)
# 유니코드 문자열이 주어지면 해당 문자의 유니코드 코드 포인트 정수 값을 반환한다.
# chr() 와 반대로 작동한다.
# 문자를 정수로
print(ord("€"))
# 8364 | [
"zxcvbn658@naver.com"
] | zxcvbn658@naver.com |
ea0d2d7415c8d98590a6caf8cc4bb1aa659fd24e | 1457bf059b94e04d4d512012b28a924167c68938 | /NetworkBehaviour/Basics/Normalization_Sparse.py | 164188463a9a59cb81bc31b3411633742dab0ba2 | [] | no_license | YaminaDJOUDI/PymoNNto | e063c81547d41a9841ff8f8071c4d6347ce792da | 807aa7e0ba38cb29ad7839b39f29752da00eee78 | refs/heads/master | 2023-07-08T03:06:41.722292 | 2021-08-04T11:30:52 | 2021-08-04T11:30:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 850 | py | import numpy as np
scipy.sparse
def normalize_synapse_attr_sparse(src_attr, target_attr, target_value, neurons, synapse_type):
neurons.temp_weight_sum = neurons.get_neuron_vec()
for s in neurons.afferent_synapses[synapse_type]:
if 'sparse' in s.tags:
s.dst.temp_weight_sum += np.array(getattr(s, src_attr).sum(1)).flatten()
else:
s.dst.temp_weight_sum += np.sum(getattr(s, src_attr), axis=1)
neurons.temp_weight_sum /= target_value
for s in neurons.afferent_synapses[synapse_type]:
if 'sparse' in s.tags:
W = getattr(s, target_attr)
W.data /= np.array(neurons.temp_weight_sum[W.indices]).reshape(W.data.shape)
else:
setattr(s, target_attr, getattr(s, target_attr) / (s.dst.temp_weight_sum[:, None]+(s.dst.temp_weight_sum[:, None]==0)))
| [
"mv15go@gmail.com"
] | mv15go@gmail.com |
c4ae6a3d42ad8bf39696371b8e7e988fe8327e55 | 4e972d4ab53aa698ee310bf4b056ab0db600077f | /SQL_connector.py | a91528346126669d35f3cdf3f20818f32315ea8e | [] | no_license | askalach/oncocentre_bot | c8ce827c55a06f6411672d3ee12adf437401097d | 7838fd4ab5555b92aba719a8f4cd9d2b768361bc | refs/heads/main | 2023-05-04T03:16:43.228011 | 2021-05-20T11:10:54 | 2021-05-20T11:10:54 | 367,136,544 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,806 | py | import logging
import sqlite3
from datetime import datetime
import settings
from data_loader import DataLoader
class SQLite_saver:
def __init__(self, connector: sqlite3.Connection):
self.connector = connector
self.cursor = self.connector.cursor()
# self.connector.set_trace_callback(print)
def close_saver(self):
self.connector.close()
def create_documents(self):
sql = '''
CREATE TABLE IF NOT EXISTS documents (
name TEXT,
protocolid TEXT,
treat_code TEXT,
treat_company_id TEXT,
treat_hash TEXT,
treat_place TEXT,
href TEXT,
author TEXT,
date TEXT
);
'''
self.cursor.execute(sql)
def check_dockument(self, data):
sql = 'SELECT * FROM documents WHERE protocolid = ?'
self.cursor.execute(sql, (data['protocolid'],))
return self.cursor.fetchall()
def add_documents(self, data):
added_documents = []
sql = '''
INSERT INTO documents(
name,
protocolid,
treat_code,
treat_company_id,
treat_hash,
treat_place,
href,
author,
date
) values(?, ?, ?, ?, ?, ?, ?, ?, ?);
'''
for item in data:
if not self.check_dockument(item):
self.cursor.execute(sql, (
item['name'],
item['protocolid'],
item['treat_code'],
item['treat_company_id'],
item['treat_hash'],
item['treat_place'],
item['href'],
item['author'],
item['date']
))
self.connector.commit()
added_documents.append(item)
if added_documents:
logging.info(f'Added {len(added_documents)} new documents:')
for document in added_documents:
logging.info(f"{document['name']}\t{document['author']}\t{document['date']}")
else:
logging.info('no new documents')
return added_documents
def get_document(self, id):
self.cursor.execute('SELECT * FROM documents WHERE protocolid = ?', (id,))
return self.cursor.fetchall()
def main():
with sqlite3.connect(settings.DB) as sqlite_conn:
sqlite_saver = SQLite_saver(sqlite_conn)
sqlite_saver.create_documents()
dl = DataLoader(settings.URL)
sqlite_saver.add_documents(dl.get_data())
# sqlite_saver.close_saver()
if __name__ == '__main__':
main() | [
"askalach@gmail.com"
] | askalach@gmail.com |
d294763355f1cde1ecd4be75eef850fc95e0f6f8 | b5c343216f13ef4e4ee30274cca7a496848ccb9b | /posts/migrations/0012_auto_20200527_1626.py | 694a24f8d3fa9dcc3befcf046763f8f49e5a8164 | [] | no_license | nikhanal/blogproject | 3bced7b88e4822ed647ae7101e73d8254e226965 | eb09107415ebe15475f6425a3c409f41b6d918c1 | refs/heads/master | 2023-08-15T02:25:31.272914 | 2020-06-06T07:55:03 | 2020-06-06T07:55:03 | 267,789,765 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 479 | py | # Generated by Django 3.0.6 on 2020-05-27 16:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('posts', '0011_auto_20200527_1508'),
]
operations = [
migrations.RemoveField(
model_name='post',
name='comment_count',
),
migrations.AlterField(
model_name='post',
name='overview',
field=models.TextField(),
),
]
| [
"khanal.nishan28@gmail.com"
] | khanal.nishan28@gmail.com |
96e8a2c7b7b68b5270107da8cea93260d995e304 | 9b15c5f6a65117472983172b0864dbd0f1bc1937 | /qsauto/qsauto/items.py | d344dc18e0acb76a3482242ddcb0ef151b61f824 | [] | no_license | Chunge135/Python_System | c4621ebf5161cb2477817b8959d01b88281eca64 | 396358a78b89e26cd3606199ec206191aed45cb4 | refs/heads/master | 2022-10-25T13:25:21.056269 | 2020-06-19T08:51:38 | 2020-06-19T08:51:38 | 273,444,534 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 333 | py | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html
import scrapy
class QsautoItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
content = scrapy.Field()
link = scrapy.Field()
| [
"63931102+Chunge135@users.noreply.github.com"
] | 63931102+Chunge135@users.noreply.github.com |
e0af98a161bb2fe76f40a9dab414307691aed916 | cdecfcc56973ae143f04a9e92225c5fc90a052ab | /tracing/tracing/value/diagnostics/reserved_infos.py | 13aedf28520e992994fa0efa641eba6d7f919036 | [
"BSD-3-Clause"
] | permissive | eugenesavenko/catapult | 8e43adab9a4650da4e8e1860f3b9b49936955aae | f2ad70de40a8f739438d89b0c8d5ed6509b3cbe6 | refs/heads/master | 2021-05-05T17:31:51.483972 | 2017-09-13T15:10:56 | 2017-09-13T15:10:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,662 | py | # Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
class _Info(object):
def __init__(self, name, _type=None, entry_type=None):
self._name = name
self._type = _type
if entry_type is not None and self._type != 'GenericSet':
raise ValueError(
'entry_type should only be specified if _type is GenericSet')
self._entry_type = entry_type
@property
def name(self):
return self._name
@property
def type(self):
return self._type
@property
def entry_type(self):
return self._entry_type
ANGLE_REVISIONS = _Info('angleRevisions', 'GenericSet', str)
ARCHITECTURES = _Info('architectures', 'GenericSet', str)
BENCHMARKS = _Info('benchmarks', 'GenericSet', str)
BENCHMARK_START = _Info('benchmarkStart', 'DateRange')
BOTS = _Info('bots', 'GenericSet', str)
BUG_COMPONENTS = _Info('bugComponents', 'GenericSet', str)
BUILDS = _Info('builds', 'GenericSet', int)
CATAPULT_REVISIONS = _Info('catapultRevisions', 'GenericSet', str)
CHROMIUM_COMMIT_POSITIONS = _Info('chromiumCommitPositions', 'GenericSet', int)
CHROMIUM_REVISIONS = _Info('chromiumRevisions', 'GenericSet', str)
GPUS = _Info('gpus', 'GenericSet', str)
GROUPING_PATH = _Info('groupingPath')
LABELS = _Info('labels', 'GenericSet', str)
LOG_URLS = _Info('logUrls', 'GenericSet', str)
MASTERS = _Info('masters', 'GenericSet', str)
MEMORY_AMOUNTS = _Info('memoryAmounts', 'GenericSet', int)
MERGED_FROM = _Info('mergedFrom', 'RelatedHistogramMap')
MERGED_TO = _Info('mergedTo', 'RelatedHistogramMap')
OS_NAMES = _Info('osNames', 'GenericSet', str)
OS_VERSIONS = _Info('osVersions', 'GenericSet', str)
OWNERS = _Info('owners', 'GenericSet', str)
PRODUCT_VERSIONS = _Info('productVersions', 'GenericSet', str)
RELATED_NAMES = _Info('relatedNames', 'GenericSet', str)
SKIA_REVISIONS = _Info('skiaRevisions', 'GenericSet', str)
STORIES = _Info('stories', 'GenericSet', str)
STORYSET_REPEATS = _Info('storysetRepeats', 'GenericSet', int)
STORY_TAGS = _Info('storyTags', 'GenericSet', str)
TAG_MAP = _Info('tagmap', 'TagMap')
TRACE_START = _Info('traceStart', 'DateRange')
TRACE_URLS = _Info('traceUrls', 'GenericSet', str)
V8_COMMIT_POSITIONS = _Info('v8CommitPositions', 'DateRange')
V8_REVISIONS = _Info('v8Revisions', 'GenericSet', str)
WEBRTC_REVISIONS = _Info('webrtcRevisions', 'GenericSet', str)
def GetTypeForName(name):
for info in globals().itervalues():
if isinstance(info, _Info) and info.name == name:
return info.type
def AllInfos():
for info in globals().itervalues():
if isinstance(info, _Info):
yield info
| [
"commit-bot@chromium.org"
] | commit-bot@chromium.org |
bbfd06063961347aeeab79c3e872547e87083676 | f46c230cd989261a913ff5c24e16ddcdf154fbb7 | /apps.py | 23896687f5336785dc53eb76b175ee8c72257de6 | [] | no_license | penolove/WebPtt_django | 17a78b6318a5caf7a25734e1ab9443225de6c631 | c33462d173664867fe9b0fd43faaa8fe11ded64d | refs/heads/master | 2021-05-02T01:02:06.372416 | 2017-07-03T03:53:37 | 2017-07-03T03:53:37 | 78,621,539 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 128 | py | from __future__ import unicode_literals
from django.apps import AppConfig
class pttWebConfig(AppConfig):
name = 'pttWeb'
| [
"penolove15@gmail.com"
] | penolove15@gmail.com |
286e5a84629ddfa8a87808ef1f9d99445655a7e5 | 7e79ca343d8d3246fc783161673550f6e4ae8896 | /tests/test_search.py | 73b1bddf6e9c87215ffb0554d6c68407a13132a2 | [
"MIT"
] | permissive | interrogator/buzz | 5ba0907115aa29efc24f016d1345a0371b91350a | 7627b8ce4a286f65388f0825487441df00055b39 | refs/heads/master | 2023-04-02T03:18:01.691139 | 2020-11-19T12:00:21 | 2020-11-19T12:00:21 | 163,623,092 | 42 | 2 | MIT | 2023-03-25T00:51:45 | 2018-12-30T22:55:18 | Python | UTF-8 | Python | false | false | 2,544 | py | import unittest
from buzz.corpus import Corpus
class TestSearch(unittest.TestCase):
@classmethod
def setUpClass(cls):
""" get_some_resource() is slow, to avoid calling it for each test use setUpClass()
and store the result as class variable
"""
super().setUpClass()
cls.parsed = Corpus("tests/testing-parsed")
cls.loaded = cls.parsed.load()
def test_non_loaded(self):
# todo: find out why .equals isn't the same.
res = self.parsed.depgrep("w/book/ = x/NOUN/")
lres = self.loaded.depgrep("w/book/ = x/NOUN/")
self.assertEqual(len(res), 3)
self.assertTrue(list(res._n) == list(lres._n))
res = self.parsed.depgrep("l/book/")
lres = self.loaded.depgrep("l/book/")
self.assertEqual(len(res), 6)
self.assertTrue(list(res.index) == list(lres.index))
self.assertTrue(list(res._n) == list(lres._n))
def test_bigrams(self):
j = self.loaded.just.words("(?i)jungle")
self.assertEqual(len(j), 6)
big = self.loaded.bigrams.depgrep("l/jungle/", from_reference=True).table(
show=["x"]
)
self.assertTrue("punct" in big.columns)
self.assertEqual(big.shape[1], 5)
no_punct = self.loaded.skip.wordclass.PUNCT
big = no_punct.bigrams.lemma("jungle", from_reference=False).table(show=["x"])
self.assertFalse("punct" in big.columns)
self.assertEqual(big.shape[1], 3)
def test_depgrep(self):
res = self.loaded.depgrep("L/book/")
self.assertEqual(len(res), 3)
res = self.loaded.depgrep('x/^NOUN/ -> l"the"', case_sensitive=False)
sup = self.loaded.depgrep('p/^N/ -> l"the"', case_sensitive=False)
# sup is a superset of res
self.assertTrue(all(i in sup.index for i in res.index))
self.assertEqual(len(sup), 28)
self.assertEqual(len(res), 24)
self.assertTrue((res.x == "NOUN").all())
# let us check this manually
# get all rows whose lemma is 'the'
the = self.loaded[self.loaded["l"] == "the"]
count = 0
# iterate over rows, get governor of the, lookup this row.
# if row is a noun, check that its index is in our results
for (f, s, _), series in the.T.items():
gov = series["g"]
gov = self.loaded.loc[f, s, gov]
if gov.x == "NOUN":
self.assertTrue(gov.name in res.index)
count += 1
self.assertEqual(count, len(res))
| [
"mcddjx@gmail.com"
] | mcddjx@gmail.com |
2d743a13cd7cceb8c568ddc627bf234b71489089 | 3ced0d61d1f8043bfaf2d8f4a6872b1c074a41fd | /articles_scrapers/items/article_item.py | 3e93f823358ec26291f0fd765182bcb08ea54eb3 | [] | no_license | jennettefir/news | 7de750cf22b5c4ca72dc2ec24cacdc6bf76857c8 | 26bf188ac517c69fe5c44db8d1309174b5e38a6e | refs/heads/master | 2023-01-01T03:11:59.212237 | 2020-10-26T17:14:17 | 2020-10-26T17:14:17 | 295,839,858 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,409 | py | import scrapy
from scrapy.loader.processors import TakeFirst, MapCompose
from articles_scrapers.utils import strip_str
class ArticleItem(scrapy.Item):
title = scrapy.Field(serializer=str,
input_processor=MapCompose(strip_str),
output_processor=TakeFirst())
link = scrapy.Field(serializer=str,
output_processor=TakeFirst())
description = scrapy.Field(serializer=str,
input_processor=MapCompose(strip_str),
output_processor=TakeFirst())
author = scrapy.Field(serializer=str,
output_processor=TakeFirst())
text = scrapy.Field(serializer=str,
output_processor=TakeFirst())
text_filepath = scrapy.Field(serializer=str,
output_processor=TakeFirst())
publication_date = scrapy.Field(serializer=str,
output_processor=TakeFirst())
guid = scrapy.Field(serializer=str,
output_processor=TakeFirst())
categories = scrapy.Field(serializer=str,
output_processor=TakeFirst())
image_url = scrapy.Field(serializer=str,
output_processor=TakeFirst())
credit = scrapy.Field(serializer=str,
output_processor=TakeFirst())
| [
"noreply@github.com"
] | jennettefir.noreply@github.com |
ef60bbe494adc9b1d959a59bee964c438e7f3dcc | 55c13a144da515c697f1b6d2c376d67135f25570 | /IT-Essentials-Oefeningen/3_Condities/slides_opgave3.2_p15.py | 65f5917835614a46c1c351413356262c7e5a0041 | [] | no_license | JensHuskensPXL/1TIN_IT_Essentials | a6135b9937338ada74c03fe09a61e589d6483f2b | 14d978e70200839d46f9c11833d50e4b0df43f89 | refs/heads/master | 2020-03-30T12:34:36.047443 | 2018-10-03T09:00:26 | 2018-10-03T09:00:26 | 151,230,369 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 342 | py | print("Is 1/10 groter dan 0.10? " + str(1 / 10 > 0.10))
print("Is 1/10 gelijk aan 0.10? " + str(1 / 10 == 0.10))
print("is 1/3 groter dan 0.33? " + str(1/3 > 0.33))
print("is 1/3 gelijk aan 0.33? " + str(1/3 == 0.33))
print("is (1/3) * 3 groter dan 1? " + str((1 / 3) * 3 > 1))
print("is 1/3 * 3 gelijk aan 1? " + str((1 / 3) * 3 == 1))
| [
"jens.huskens@student.pxl.be"
] | jens.huskens@student.pxl.be |
3e0d7702e1f23ccd54a568d363e1d04484642374 | 1e307f2a4120a0774d0ba4376462a00a8fa642cb | /LEUpdatesScreen.py | 8d0d7737c11c8a4c7776f31cb6aa2bbe4650f426 | [
"MIT"
] | permissive | markusj1201/Le_Utility_1d | 13980d2861ce3a57731e5e4e7ff37bb38fe433d9 | 46f299bcafeb07ee4f009395318213af764cb991 | refs/heads/master | 2020-08-29T16:14:27.416971 | 2019-10-28T20:29:11 | 2019-10-28T20:29:11 | 218,087,824 | 1 | 0 | MIT | 2019-10-28T16:04:13 | 2019-10-28T15:59:47 | Python | UTF-8 | Python | false | false | 239 | py | import sys
sys.path.append('../')
from PyQt5 import QtGui
class Window(QtGui.QMainWindow):
def __init__(self):
super(Window, self).__init__()
self.setGeometry(50, 50, 500, 300)
self.SetWindowTitle()
| [
"noreply@github.com"
] | markusj1201.noreply@github.com |
de198265ca023fde36b1896bd7f7a3c4b83a552d | d94b6845aeeb412aac6850b70e22628bc84d1d6d | /bigg/bigg/torch_ops/tensor_ops.py | 9f544ab7efd4d3e2c752d63f5d72056f16c23cef | [
"CC-BY-4.0",
"Apache-2.0"
] | permissive | ishine/google-research | 541aea114a68ced68736340e037fc0f8257d1ea2 | c1ae273841592fce4c993bf35cdd0a6424e73da4 | refs/heads/master | 2023-06-08T23:02:25.502203 | 2023-05-31T01:00:56 | 2023-05-31T01:06:45 | 242,478,569 | 0 | 0 | Apache-2.0 | 2020-06-23T01:55:11 | 2020-02-23T07:59:42 | Jupyter Notebook | UTF-8 | Python | false | false | 3,956 | py | # coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: skip-file
import torch
from torch.nn import Module
from torch.nn.parameter import Parameter
from torch.autograd import Function
import numpy as np
from bigg.common.consts import t_float
class MultiIndexSelectFunc(Function):
@staticmethod
def forward(ctx, idx_froms, idx_tos, *mats):
assert len(idx_tos) == len(idx_froms) == len(mats)
cols = mats[0].shape[1]
assert all([len(x.shape) == 2 for x in mats])
assert all([x.shape[1] == cols for x in mats])
num_rows = sum([len(x) for x in idx_tos])
out = mats[0].new(num_rows, cols)
for i, mat in enumerate(mats):
x_from = idx_froms[i]
x_to = idx_tos[i]
if x_from is None:
out[x_to] = mat.detach()
else:
assert len(x_from) == len(x_to)
out[x_to] = mat[x_from].detach()
ctx.idx_froms = idx_froms
ctx.idx_tos = idx_tos
ctx.shapes = [x.shape for x in mats]
return out
@staticmethod
def backward(ctx, grad_output):
idx_froms, idx_tos = ctx.idx_froms, ctx.idx_tos
list_grad_mats = [None, None]
for i in range(len(idx_froms)):
x_from = idx_froms[i]
x_to = idx_tos[i]
if x_from is None:
grad_mat = grad_output[x_to].detach()
else:
grad_mat = grad_output.new(ctx.shapes[i]).zero_()
grad_mat[x_from] = grad_output[x_to].detach()
list_grad_mats.append(grad_mat)
return tuple(list_grad_mats)
class MultiIndexSelect(Module):
def forward(self, idx_froms, idx_tos, *mats):
return MultiIndexSelectFunc.apply(idx_froms, idx_tos, *mats)
multi_index_select = MultiIndexSelect()
def test_multi_select():
a = Parameter(torch.randn(4, 2))
b = Parameter(torch.randn(3, 2))
d = Parameter(torch.randn(5, 2))
idx_froms = [[0, 1], [1, 2], [3, 4]]
idx_tos = [[4, 5], [0, 1], [2, 3]]
c = multi_index_select(idx_froms, idx_tos, a, b, d)
print('===a===')
print(a)
print('===b===')
print(b)
print('===d===')
print(d)
print('===c===')
print(c)
t = torch.sum(c)
t.backward()
print(a.grad)
print(b.grad)
print(d.grad)
class PosEncoding(Module):
def __init__(self, dim, device, base=10000, bias=0):
super(PosEncoding, self).__init__()
p = []
sft = []
for i in range(dim):
b = (i - i % 2) / dim
p.append(base ** -b)
if i % 2:
sft.append(np.pi / 2.0 + bias)
else:
sft.append(bias)
self.device = device
self.sft = torch.tensor(sft, dtype=t_float).view(1, -1).to(device)
self.base = torch.tensor(p, dtype=t_float).view(1, -1).to(device)
def forward(self, pos):
with torch.no_grad():
if isinstance(pos, list):
pos = torch.tensor(pos, dtype=t_float).to(self.device)
pos = pos.view(-1, 1)
x = pos / self.base + self.sft
return torch.sin(x)
if __name__ == '__main__':
# test_multi_select()
pos_enc = PosEncoding(128, 'cpu')
print(pos_enc([1, 2, 3]))
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
b7b026f7642d82363d9802fe0d817ba66118aad4 | 9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97 | /sdBs/AllRun/ec_11022-1357/sdB_EC_11022-1357_lc.py | 350257c414f2078d50e83da141fccc17f22aa32c | [] | no_license | tboudreaux/SummerSTScICode | 73b2e5839b10c0bf733808f4316d34be91c5a3bd | 4dd1ffbb09e0a599257d21872f9d62b5420028b0 | refs/heads/master | 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 351 | py | from gPhoton.gAperture import gAperture
def main():
gAperture(band="NUV", skypos=[166.190667,-14.236356], stepsz=30., csvfile="/data2/fleming/GPHOTON_OUTPU/LIGHTCURVES/sdBs/sdB_EC_11022-1357 /sdB_EC_11022-1357_lc.csv", maxgap=1000., overwrite=True, radius=0.00555556, annulus=[0.005972227,0.0103888972], verbose=3)
if __name__ == "__main__":
main()
| [
"thomas@boudreauxmail.com"
] | thomas@boudreauxmail.com |
0bd82e74ba3c6621cb7fa14b9f43311bc864df59 | 3a28b1a12d0710c06f6360381ad8be6cf3707907 | /modular_model/triHPC/triHPCThermo/HPCAllTrays23CstmVapN2.py | b31256787ee26b7321199ab3098b7e3d1d66394a | [] | no_license | WheatZhang/DynamicModelling | 6ce1d71d3b55176fd4d77a6aedbaf87e25ce4d02 | ea099245135fe73e8c9590502b9c8b87768cb165 | refs/heads/master | 2020-06-15T14:12:50.373047 | 2019-07-05T01:37:06 | 2019-07-05T01:37:06 | 195,319,788 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 307 | py | def VapN2(P,T,x_N2):
x = (P-5.50184878e+02)/3.71707400e-01
y = (T--1.77763832e+02)/1.81029000e-02
z = (x_N2-9.82420040e-01)/2.44481265e-03
output = \
1*-8.60567815e-01+\
z*1.86073097e+00+\
y*8.60696199e-01+\
x*-4.21414345e-01
y_N2 = output*1.31412243e-03+9.90969573e-01
return y_N2 | [
"1052632241@qq.com"
] | 1052632241@qq.com |
17035f0daddf4031869431bf0f4cdf4e24f57d18 | 15875d185c6952b4215715dfce1ef1c2bccd8f8c | /PythonApp/Python/Python-3.py | 187b6821ba7ad786c4b92cbb5047e1cfbbb4fdbe | [] | no_license | King-Key/Blogger | 8f7cde7c9d86ed94bc341a321719922132eb988b | dcbce25e7da0ea7f52457ddef9f0499aabd9e0b7 | refs/heads/master | 2022-10-04T03:29:20.025579 | 2022-09-04T16:26:39 | 2022-09-04T16:26:39 | 150,231,725 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 691 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2019-11-03 22:55:40
# @Author : King-Key
# @Email : guo_wang_113@163.com
# @Link : https://king-key.github.io
import math
print(dir(math))
file = open("./课程内容差异化调研.md", "wb")
print(file.name)
print(file.closed)
print(file.mode)
file1 = open("test.txt", "w")
file1.write('hello world')
file1.close()
file2 = open("test.txt", "r+")
data = file2.read()
print(data)
print(file2.tell())
import os
os.remove("test.txt")
print(os.getcwd())
print(os.chdir("/home/king-key/下载/"))
try:
file3=open("test.txt","r")
file3.write("hello")
except IOError:
print("error")
else:
print("true")
file3.close() | [
"guo_wang_113@163.com"
] | guo_wang_113@163.com |
401c0dc13225c97610ce48f99cbceb713f804d1b | 6244fc26a16664cd4cd4db096c1938773d9d435a | /custom scripts/create_roc_plot_multiple.py | 885b5594b03111483c6687af32ffd250880f25ec | [] | no_license | Folkert94/bioinf | c3a5d81c708ff046522600cd1ec698acaa8c2f52 | 022150846806b853a1df96ced104368402722bb3 | refs/heads/master | 2020-07-28T09:45:40.092432 | 2019-10-05T10:22:49 | 2019-10-05T10:22:49 | 209,384,738 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,714 | py | #!/usr/bin/python
# This script reads and parses your previously obtained results out of a blast output file, and a benchmark output file.
# It then creates the corresponding ROC plot, exports the coordinates and calculates the AUC.
import argparse
import numpy
import matplotlib
matplotlib.use('AGG')
import pylab
def parse_blast_results(filename):
"""
Parse every protein pair's e-value out of a BLAST results file.
:param filename: input file with BLAST results.
:return: dictionary with a tuple of two UniProt IDs (key), and the corresponding e-value from BLAST (value).
"""
blast_results = {}
with open(filename,'r') as f:
for line in f:
line = line.rstrip()
arr = line.split("\t")
if len(arr) != 3:
print("Warning: the following line does not have three elements separated by a tab:\n", line)
elif arr[0] == arr[1]:
print("Warning: Comparing protein to itself:", arr[0])
key = (arr[0], arr[1])
if arr[2] == "NA": # Substitute protein pairs whose e-value is
value = 1e6 # not available with an e-value of 1 million
else:
value = float(arr[2])
blast_results[key] = value
return blast_results
def parse_benchmark_results(filename):
"""
Parse every protein pair's classification out of the benchmark file.
:param filename: input file with benchmark classifacations.
:return: dictionary with a tuple of two UniProt IDs (key), and the corresponding call (value).
"""
benchmark_results = {}
with open(filename,'r') as f:
for line in f:
line = line.rstrip()
arr = line.split("\t")
if len(arr) < 3:
print("Warning: the following line does not have three elements separated by a tab:\n", line)
elif arr[0] == arr[1]:
print("Warning: Comparing protein to itself:", arr[0])
# Benchmark classifications are symmetric, so add both possible keys:
key1 = (arr[0],arr[1])
key2 = (arr[1],arr[0])
value = arr[2]
benchmark_results[key1] = value
benchmark_results[key2] = value
return benchmark_results
def integrate(x, y):
"""
Calculate the Area Under the Curve (AUC) for a given list of coordinates
:param x: a list of x-coordinates
:param y: a list of y-coordinates
:return: a float with the surface area under the curve described by x and y
"""
auc = 0.
last_x = x[0]
last_y = y[0]
for cur_x, cur_y in list(zip(x, y))[1:]:
#########################
### START CODING HERE ###
#########################
auc += numpy.trapz([last_y, cur_y], [last_x, cur_x])
#########################
### END CODING HERE ###
#########################
last_x = cur_x
last_y = cur_y
return auc
def roc_plot(blast_evalues, benchmark_dict, png_filename, evalue_blast, threshold, clr):
"""
Draw the ROC plot for a given set of e-values and corresponding benchmark classifications.
:param blast_evalues: the dictionary produced by parse_blast_results()
:param benchmark_dict: the dictionary produced by parse_benchmark_results()
"""
### Create the lists of coordinates
x = [0] # array of the ROC plot's x-coordinates: False Positive Rate = FP/(FP+TN)
y = [0] # array of the ROC plot's y-coordinates: True Positive Rate = TP/(TP+FN)
last_evalue = -1
evalues = [(v, k) for k, v in blast_evalues.items()] # List of tuples consisting of (evalue, protein_pair)
sorted_evalues = sorted(evalues)
for evalue, protein_pair in sorted_evalues:
#########################
### START CODING HERE ###
#########################
# Iterate through the protein pairs, in order of ascending e-value
# Determine whether it is
# different -> actual negative, thus a false positive (x)
# similar -> actual positive, thus a true positive (y)
# Increase the respective value and add a new coordinate for every unique e-value
# If the e-value is the same as the last one, only increase x or y of the last coordinate
# Ignore entries in the benchmark_dict classified as "ambiguous" and decide how to handle blast NA results
if protein_pair not in benchmark_dict:
continue
if benchmark_dict[protein_pair] == "different":
if evalue == last_evalue:
x[-1] += 1
else:
x.append(x[-1] + 1)
y.append(y[-1])
if benchmark_dict[protein_pair] == "similar":
if evalue == last_evalue:
y[-1] += 1
else:
x.append(x[-1])
y.append(y[-1] + 1)
#########################
### END CODING HERE ###
#########################
last_evalue = evalue
# In order to get the rates for every coordinate we divide by the total number (last entry)
x = numpy.array(x) / float(x[-1])
y = numpy.array(y) / float(y[-1])
### Figure out the AUC
auc = integrate(x, y)
### Draw the plot and write it to a file
pylab.plot(x, y, label="BLAST eval={0}, auc={1:.3f}".format(evalue_blast, auc), color=clr)
### Write coordinates to a file
with open(png_filename.split('.')[0] + '_xy.tsv','w') as f:
for a,b in zip(x,y):
f.write(str(a) + '\t' + str(b) + '\n')
return auc
def roc_plot_psi(blast_evalues, benchmark_dict, png_filename, evalue_blast, threshold, clr):
"""
Draw the ROC plot for a given set of e-values and corresponding benchmark classifications.
:param blast_evalues: the dictionary produced by parse_blast_results()
:param benchmark_dict: the dictionary produced by parse_benchmark_results()
"""
### Create the lists of coordinates
x = [0] # array of the ROC plot's x-coordinates: False Positive Rate = FP/(FP+TN)
y = [0] # array of the ROC plot's y-coordinates: True Positive Rate = TP/(TP+FN)
last_evalue = -1
evalues = [(v, k) for k, v in blast_evalues.items()] # List of tuples consisting of (evalue, protein_pair)
sorted_evalues = sorted(evalues)
for evalue, protein_pair in sorted_evalues:
#########################
### START CODING HERE ###
#########################
# Iterate through the protein pairs, in order of ascending e-value
# Determine whether it is
# different -> actual negative, thus a false positive (x)
# similar -> actual positive, thus a true positive (y)
# Increase the respective value and add a new coordinate for every unique e-value
# If the e-value is the same as the last one, only increase x or y of the last coordinate
# Ignore entries in the benchmark_dict classified as "ambiguous" and decide how to handle blast NA results
if protein_pair not in benchmark_dict:
continue
if benchmark_dict[protein_pair] == "different":
if evalue == last_evalue:
x[-1] += 1
else:
x.append(x[-1] + 1)
y.append(y[-1])
if benchmark_dict[protein_pair] == "similar":
if evalue == last_evalue:
y[-1] += 1
else:
x.append(x[-1])
y.append(y[-1] + 1)
#########################
### END CODING HERE ###
#########################
last_evalue = evalue
# In order to get the rates for every coordinate we divide by the total number (last entry)
x = numpy.array(x) / float(x[-1])
y = numpy.array(y) / float(y[-1])
### Figure out the AUC
auc = integrate(x, y)
### Draw the plot and write it to a file
pylab.plot(x, y, label="PSIBLAST eval={0}, auc={1:.3f}".format(evalue_blast, auc), linestyle="dashed", color=clr)
### Write coordinates to a file
with open(png_filename.split('.')[0] + '_xy.tsv','w') as f:
for a,b in zip(x,y):
f.write(str(a) + '\t' + str(b) + '\n')
return auc
def main(blast_results_map, benchmark_results_file, png_file):
pylab.figure()
pylab.plot([0,1],[0,1],'--k')
pylab.xlabel('False Positive Rate')
pylab.ylabel('True Positive Rate')
pylab.title('Plots BLAST/PSI-BLAST')
colors = ['r', 'b', 'g', 'c', 'm', 'y']
evalues = [10.0, 1000.0, 1000000.0]
auc_list = []
for i in range(len(evalues)):
evalue_blast = evalues[i]
# Parse the input files and retrieve every protein pair's e-value and benchmark classification.
blast_evalues = parse_blast_results("{0}/{0}{1}".format(blast_results_map, evalues[i]))
benchmark_results = parse_benchmark_results(benchmark_results_file)
# Draw and save the ROC plot
roc_plot(blast_evalues, benchmark_results, png_file, evalue_blast, evalues[i], colors[i])
for j in range(len(evalues)):
evalue_psiblast = evalues[j]
# Parse the input files and retrieve every protein pair's e-value and benchmark classification.
blast_evalues = parse_blast_results("output_psiblast/output_psiblast{0}".format(evalues[j]))
benchmark_results = parse_benchmark_results(benchmark_results_file)
# Draw and save the ROC plot
roc_plot_psi(blast_evalues, benchmark_results, png_file, evalue_psiblast, evalue_psiblast, colors[j])
pylab.legend()
pylab.savefig(png_file)
# ADDITIONAL CODE FOR AUC VS EVALUE PLOTS
# pylab.close()
#
# pylab.figure()
# pylab.xlabel('log E-values')
# pylab.ylabel('AUC')
# pylab.title('AUC vs E-values PSI-BLAST')
# pylab.plot(numpy.log(evalues), auc_list)
# pylab.savefig("auc-vs-evalue PSI-BLAST.png")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Draw and save a ROC plot to a file")
parser.add_argument("-iblast","--input_blast_results", help="map with tab-separated (PSI-)BLAST results files", required=True)
parser.add_argument("-ibench","--input_benchmark_results", help="tab-separated benchmark classification file", required=True)
parser.add_argument("-o", "--output_png", help="output png file", required=True)
args = parser.parse_args()
blast_results_map = args.input_blast_results
benchmark_file = args.input_benchmark_results
png_file = args.output_png
main(blast_results_map,benchmark_file, png_file)
| [
"folkertstijnman@gmail.com"
] | folkertstijnman@gmail.com |
b86fa99f8c6307b908df6db6c0fd826f50421a8e | 646a83d3de5ff2d2dc0c6f7efbd3f459a6479a63 | /HW2TermStructure/Bond.py | bca23be52fcef20ed362fa35865a4a3a68a11504 | [] | no_license | Wangvory/AdvQuantFin | f35454f04ddcb80e80bd76bcf7e0e378322113ae | c198a7b04d4e88996c4baec2f926d71d566faddf | refs/heads/master | 2020-12-26T20:57:05.019188 | 2020-12-14T19:30:18 | 2020-12-14T19:30:18 | 237,636,629 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,250 | py | import math
class Bond(object):
def __init__(self, name, coupon, issue_date, maturity_date, compounding_frequency_per_annum):
self._name = name
self._coupon = coupon
self._issue_date = issue_date
self._maturity_date = maturity_date
self._compounding_frequency_per_annum = compounding_frequency_per_annum
self._number_of_coupon_payments = 1 if (name == "6m") \
else int((maturity_date - issue_date) / 10000 * compounding_frequency_per_annum)
self._price = 0.0
self._face_value = 1000.0
def get_name(self):
return self._name
def get_coupon(self):
return self._coupon
def get_issue_date(self):
return self._issue_date
def get_maturity_date(self):
return self._maturity_date
def get_compounding_frequency_per_annum(self):
return self._compounding_frequency_per_annum
def get_price(self):
return self._price
def set_price(self, price, face_value=1000.0):
self._price = price
self._face_value = face_value
@staticmethod
def compute_price(face_value, coupon, ytm, number_of_coupon_payments):
price = 0.0
for i in range(number_of_coupon_payments):
price += coupon * face_value / math.pow(1.0 + ytm, i + 1)
price += face_value / math.pow(1.0 + ytm, number_of_coupon_payments)
return price
def compute_ytm(self):
ytm, tolerance = 0.0, 0.0001
a, b, c = 0.0, 100.0, 0.0
while True:
fa = self.compute_price(self._face_value, self._coupon / 100.0 / self._compounding_frequency_per_annum,
a / 100.0 / self._compounding_frequency_per_annum, self._number_of_coupon_payments) \
- self._price / 100.0 * self._face_value
fb = self.compute_price(self._face_value, self._coupon / 100.0 / self._compounding_frequency_per_annum,
b / 100.0 / self._compounding_frequency_per_annum, self._number_of_coupon_payments) \
- self._price / 100.0 * self._face_value
if math.fabs(fa) <= tolerance:
ytm = a
break
elif math.fabs(fb) <= tolerance:
ytm = b
break
elif fa * fb < 0.0:
c = (a + b) / 2.0
fc = self.compute_price(self._face_value, self._coupon / 100.0 / self._compounding_frequency_per_annum,
c / 100.0 / self._compounding_frequency_per_annum,
self._number_of_coupon_payments) \
- self._price / 100.0 * self._face_value
if math.fabs(fc) <= tolerance:
ytm = c
break
if fa * fc < 0.0:
b = c
else:
a = c
else:
print("Problem: Lower and upper bounds of the starting range does not have a root.")
return -1.0
return ytm
def bootstrap_spot_rate(self, spot_rates, interpolatedTenorStart, interpolatedTenorEnd):
ytm, tolerance = 0.0, 0.0001
a, b, c = 0.0, 100.0, 0.0
spot_rates_a, spot_rates_b, spot_rates_c = [], [], []
for i in range(20):
spot_rates_a.append(0.0)
spot_rates_b.append(0.0)
spot_rates_c.append(0.0)
while True:
for i in range(interpolatedTenorStart + 1):
spot_rates_a[i] = spot_rates[i]
spot_rates_b[i] = spot_rates[i]
for i in range(interpolatedTenorStart + 1, interpolatedTenorEnd + 1):
spot_rates_a[i] = spot_rates[interpolatedTenorStart] + (
a - spot_rates[interpolatedTenorStart]) * (i - interpolatedTenorStart) / (
interpolatedTenorEnd - interpolatedTenorStart)
spot_rates_b[i] = spot_rates[interpolatedTenorStart] + (
b - spot_rates[interpolatedTenorStart]) * (i - interpolatedTenorStart) / (
interpolatedTenorEnd - interpolatedTenorStart)
for i in range(len(spot_rates)):
spot_rates_a[i] = spot_rates_a[i] / 100.0 / self._compounding_frequency_per_annum
spot_rates_b[i] = spot_rates_b[i] / 100.0 / self._compounding_frequency_per_annum
fa = self.compute_price_from_spot(self._face_value,
self._coupon / 100.0 / self._compounding_frequency_per_annum,
spot_rates_a,
self._number_of_coupon_payments) - self._price / 100.0 * self._face_value
fb = self.compute_price_from_spot(self._face_value,
self._coupon / 100.0 / self._compounding_frequency_per_annum,
spot_rates_b,
self._number_of_coupon_payments) - self._price / 100.0 * self._face_value
if math.fabs(fa) <= tolerance:
for i in range(interpolatedTenorStart + 1, interpolatedTenorEnd + 1):
spot_rates[i] = spot_rates[interpolatedTenorStart] + (a - spot_rates[interpolatedTenorStart]) * (
i - interpolatedTenorStart) / (interpolatedTenorEnd - interpolatedTenorStart)
break
elif math.fabs(fb) <= tolerance:
for i in range(interpolatedTenorStart + 1, interpolatedTenorEnd + 1):
spot_rates[i] = spot_rates[interpolatedTenorStart] + (b - spot_rates[interpolatedTenorStart]) * (
i - interpolatedTenorStart) / (interpolatedTenorEnd - interpolatedTenorStart)
break
elif fa * fb < 0.0:
c = (a + b) / 2.0
for i in range(interpolatedTenorStart + 1):
spot_rates_c[i] = spot_rates[i]
for i in range(interpolatedTenorStart + 1, interpolatedTenorEnd + 1):
spot_rates_c[i] = spot_rates[interpolatedTenorStart] + (c - spot_rates[interpolatedTenorStart]) * (
i - interpolatedTenorStart) / (interpolatedTenorEnd - interpolatedTenorStart)
for i in range(len(spot_rates)):
spot_rates_c[i] = spot_rates_c[i] / 100.0 / self._compounding_frequency_per_annum
fc = self.compute_price_from_spot(self._face_value,
self._coupon / 100.0 / self._compounding_frequency_per_annum,
spot_rates_c,
self._number_of_coupon_payments) - self._price / 100.0 * self._face_value
if math.fabs(fc) <= tolerance:
for i in range(interpolatedTenorStart + 1, interpolatedTenorEnd + 1):
spot_rates[i] = spot_rates[interpolatedTenorStart] + (
c - spot_rates[interpolatedTenorStart]) * (i - interpolatedTenorStart) / (
interpolatedTenorEnd - interpolatedTenorStart)
break
if fa * fc < 0.0:
b = c
else:
a = c
else:
print("Problem: Lower and upper bounds of the starting range does not have a root.")
return -1.0
return ytm
@staticmethod
def compute_price_from_spot(face_value, coupon, spot_rates, number_of_coupon_payments):
price = 0.0
for i in range(number_of_coupon_payments):
price += coupon * face_value / math.pow(1.0 + spot_rates[i], i + 1)
price += face_value / math.pow(1.0 + spot_rates[number_of_coupon_payments - 1], number_of_coupon_payments)
return price
| [
"zhoujohnone@gmail.com"
] | zhoujohnone@gmail.com |
7a09c2d76104f8dd348cfb5c054d8ed6d565d3e1 | b212ec9d705fb77cac102dceb12eb668099fd1ae | /oop/exams/december_2020/tests/project/spaceship/spaceship.py | 0defe638ec725097b266e2afa6f7fdba3fb197b5 | [] | no_license | xpucko/Software-University-SoftUni | 20ef91a0be91a8a09a56d9fdc15888f91409de2f | a1fc1781424f025954948299be7f75d317e32dc1 | refs/heads/master | 2023-02-04T11:58:33.068431 | 2020-12-24T00:39:11 | 2020-12-24T00:39:11 | 280,227,310 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,052 | py | class Spaceship:
SPACESHIP_FULL = "Spaceship is full"
ASTRONAUT_EXISTS = "Astronaut {} Exists"
ASTRONAUT_NOT_FOUND = "Astronaut Not Found"
ASTRONAUT_ADD = "Added astronaut {}"
ASTRONAUT_REMOVED = "Removed {}"
ZERO_CAPACITY = 0
def __init__(self, name: str, capacity: int):
self.name = name
self.capacity = capacity
self.astronauts = []
def add(self, astronaut_name: str) -> str:
if len(self.astronauts) == self.capacity:
raise ValueError(self.SPACESHIP_FULL)
if astronaut_name in self.astronauts:
raise ValueError(self.ASTRONAUT_EXISTS.format(astronaut_name))
self.astronauts.append(astronaut_name)
return self.ASTRONAUT_ADD.format(astronaut_name)
def remove(self, astronaut_name: str) -> str:
if astronaut_name not in self.astronauts:
raise ValueError(self.ASTRONAUT_NOT_FOUND.format(astronaut_name))
self.astronauts.remove(astronaut_name)
return self.ASTRONAUT_REMOVED.format(astronaut_name)
| [
"hristiyan.plamenov.valchev@gmail.com"
] | hristiyan.plamenov.valchev@gmail.com |
a67b3be8bf770a11a0515a42fe9e37b479324764 | 297497957c531d81ba286bc91253fbbb78b4d8be | /testing/web-platform/tests/tools/manifest/utils.py | 5cd53c22e7745bd3656dadd6940aa4d5f33f4f19 | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] | permissive | marco-c/gecko-dev-comments-removed | 7a9dd34045b07e6b22f0c636c0a836b9e639f9d3 | 61942784fb157763e65608e5a29b3729b0aa66fa | refs/heads/master | 2023-08-09T18:55:25.895853 | 2023-08-01T00:40:39 | 2023-08-01T00:40:39 | 211,297,481 | 0 | 0 | NOASSERTION | 2019-09-29T01:27:49 | 2019-09-27T10:44:24 | C++ | UTF-8 | Python | false | false | 2,232 | py | import os
import subprocess
import sys
from typing import Any, Callable, Generic, Optional, Text, TypeVar
T = TypeVar("T")
def rel_path_to_url(rel_path: Text, url_base: Text = "/") -> Text:
assert not os.path.isabs(rel_path), rel_path
if url_base[0] != "/":
url_base = "/" + url_base
if url_base[-1] != "/":
url_base += "/"
return url_base + rel_path.replace(os.sep, "/")
def from_os_path(path: Text) -> Text:
assert os.path.sep == "/" or sys.platform == "win32"
if "/" == os.path.sep:
rv = path
else:
rv = path.replace(os.path.sep, "/")
if "\\" in rv:
raise ValueError("path contains \\ when separator is %s" % os.path.sep)
return rv
def to_os_path(path: Text) -> Text:
assert os.path.sep == "/" or sys.platform == "win32"
if "\\" in path:
raise ValueError("normalised path contains \\")
if "/" == os.path.sep:
return path
return path.replace("/", os.path.sep)
def git(path: Text) -> Optional[Callable[..., Text]]:
def gitfunc(cmd: Text, *args: Text) -> Text:
full_cmd = ["git", cmd] + list(args)
try:
return subprocess.check_output(full_cmd, cwd=path, stderr=subprocess.STDOUT).decode('utf8')
except Exception as e:
if sys.platform == "win32" and isinstance(e, WindowsError):
full_cmd[0] = "git.bat"
return subprocess.check_output(full_cmd, cwd=path, stderr=subprocess.STDOUT).decode('utf8')
else:
raise
try:
gitfunc("rev-parse", "--show-toplevel")
except (subprocess.CalledProcessError, OSError):
return None
else:
return gitfunc
class cached_property(Generic[T]):
def __init__(self, func: Callable[[Any], T]) -> None:
self.func = func
self.__doc__ = getattr(func, "__doc__")
self.name = func.__name__
def __get__(self, obj: Any, cls: Optional[type] = None) -> T:
if obj is None:
return self
assert self.name not in obj.__dict__
rv = obj.__dict__[self.name] = self.func(obj)
obj.__dict__.setdefault("__cached_properties__", set()).add(self.name)
return rv
| [
"mcastelluccio@mozilla.com"
] | mcastelluccio@mozilla.com |
95271a597babd080f7902f79bf71de01060eb442 | 3531e0b4c11415b21b10e25eec96ca5daa29a097 | /publication_old.py | a21e6ad44a596c2d972eb08ede367a26ebe3318a | [] | no_license | akanksha2806/Characterisation-of-Ties-in-the-Research-World | 428eb321e5a0a879f886c38808a51c93350b420d | aafe541427855815435e8d976147221742a16e31 | refs/heads/master | 2020-04-02T09:10:16.042806 | 2018-07-30T18:44:44 | 2018-07-30T18:44:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,865 | py | import matplotlib.pyplot as plt
from matplotlib import style
import numpy as np
import matplotlib as mpl
import pandas as pd
import pymongo
import pprint
from pymongo import MongoClient
from collections import defaultdict
import xlrd
import xlwt
from xlwt import Workbook
import collections
'''India--------------------------------------------------------------------------'''
client = MongoClient()
db = client.solarsystem
collection = db.sun
query={
"authors":{"$exists": True},
"year":{"$exists": True}
}
projection={
"authors":[],
"year":1,
}
rm =list(collection.find(query,projection))
authIn=[y['authors'] for y in rm]
yearIn=[y['year'] for y in rm]
di=defaultdict(list)
p=0
for i in authIn:
y=yearIn[p]
for k in i:
di[k].append(y)
p=p+1
loc = ("/Users/apoorvasingh/Downloads/indiaFilter.xlsx")
wb = xlrd.open_workbook(loc)
sheet = wb.sheet_by_index(0)
authorNameIn=[]
sheet.cell_value(0, 0)
for i in range(sheet.nrows):
authorNameIn.append(sheet.cell_value(i,0))
wb = xlwt.Workbook()
sheet1 = wb.add_sheet('sheet 1')
sheet1.write(0, 0, 'Authors')
sheet1.write(0, 1, 'Year')
sheet1.write(0, 2, 'Count')
c=2
for i in authorNameIn:
yc=defaultdict(int)
if len(di[i])== 0:
sheet1.write(c, 0, i)
sheet1.write(c, 2, 0)
c=c+1
else:
for k in di[i]:
yc[k]=yc[k]+1
od = collections.OrderedDict(sorted(yc.items()))
for key in od:
sheet1.write(c, 0, i)
sheet1.write(c, 1, key)
sheet1.write(c, 2, od[key])
c=c+1
c=c+1
wb.save('Indian publications.xls')
'''USA----------------------------------------------------------------------'''
client = MongoClient()
db = client.vehicle
collection = db.car
query={
"authors":{"$exists": True},
"year":{"$exists": True}
}
projection={
"authors":[],
"year":1,
}
rm =list(collection.find(query,projection))
authUs=[y['authors'] for y in rm]
yearUs=[y['year'] for y in rm]
du=defaultdict(list)
p=0
for i in authUs:
y=yearUs[p]
for k in i:
du[k].append(y)
p=p+1
loc = ("/Users/apoorvasingh/Downloads/usaFilter.xlsx")
wb = xlrd.open_workbook(loc)
sheet = wb.sheet_by_index(0)
authorNameUs=[]
sheet.cell_value(0, 0)
for i in range(sheet.nrows):
authorNameUs.append(sheet.cell_value(i,0))
wb = xlwt.Workbook()
sheet1 = wb.add_sheet('sheet 1')
sheet1.write(0, 0, 'Authors')
sheet1.write(0, 1, 'Year')
sheet1.write(0, 2, 'Count')
c=2
for i in authorNameUs:
yc=defaultdict(int)
if len(du[i])== 0:
sheet1.write(c, 0, i)
sheet1.write(c, 2, 0)
c=c+1
else:
for k in du[i]:
yc[k]=yc[k]+1
od = collections.OrderedDict(sorted(yc.items()))
for key in od:
sheet1.write(c, 0, i)
sheet1.write(c, 1, key)
sheet1.write(c, 2, od[key])
c=c+1
c=c+1
wb.save('USA publications.xls')
| [
"apoorvasingh2811@gmail.com"
] | apoorvasingh2811@gmail.com |
f0291b04f1493de5db89fb82e294ecc2556628fe | e0c9ae210ffa579bfbfb109c34988af7b8737f7e | /MotorServicesApp/migrations/0003_remove_area_notes.py | 7f7b1f2f5c62f5b4f5f95ec332d95d495bdfffc1 | [] | no_license | ash018/ServiceDashboard | 509c5aa098bcb03808070924ec37ca581a4b77ea | 230883635e0e325f45ab7b9d77604ac32de6e3da | refs/heads/master | 2020-08-04T06:11:41.866302 | 2019-10-01T06:37:26 | 2019-10-01T06:37:26 | 212,033,501 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 326 | py | # Generated by Django 2.1.8 on 2019-09-18 06:03
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('MotorServicesApp', '0002_area_notes'),
]
operations = [
migrations.RemoveField(
model_name='area',
name='Notes',
),
]
| [
"smakash@aci-bd.com"
] | smakash@aci-bd.com |
f36f8c2aaef1d6942cfce8bf2076106ba24351ae | ec2c5a950f6e17d11343b1082c64bd6c313b527d | /flasker/Myself_CSRF.py | f469b808a8bc9c7883aba7e1433df927b7a159f0 | [] | no_license | 0726Huyan/flask-demon | 6ce33f68a8b0c665e270fb3e35d4f820e1f7068f | 0e2714e103c0af3852f5bd5b863f155266d6d856 | refs/heads/master | 2020-06-25T00:57:49.477130 | 2019-07-27T09:48:19 | 2019-07-27T09:48:19 | 199,146,543 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 828 | py | from wtforms.csrf.core import CSRF
from hashlib import md5
SECRET_KEY='111111' # 可写入配置文件中 通过 app.config[xxx]调用
class Myself_CSRF(CSRF):
def setup_form(self, form):
self.csrf_context=form.meta.csrf_secret
return super(Myself_CSRF, self).setup_form(form)
def generate_csrf_token(self, csrf_token_field):
token=md5(SECRET_KEY+self.csrf_secret).hexdigest() #通过KEY值和传入的csrf_secret值生成TOKEN
return token
def validate_csrf_token(self, form, field):
if field.data != field.current_token:
raise ValueError('Invalid CSRF') #检测两次TOKEN是否一致
from flask_wtf import FlaskForm
class Form_Token(FlaskForm):
class Meta:
csrf=True
csrf_class=Myself_CSRF
csrf_secret=None #继承时自行定义 | [
"localhost@localhostdeMacBook-Pro.local"
] | localhost@localhostdeMacBook-Pro.local |
b45500ecd7485e86f04ae3a4fb0708ea4bacd640 | 7823c78c2e0123bc89ec845e07e954c6e8e410c4 | /dl_1_2/part1/1-4/normalizing_rows.py | 681f3d2f2e0f286e53fba063e0bf42be878223c7 | [] | no_license | ChambersLiu/deeplearning_homework | 9b550ffe5695874381817c4515f216997686036b | a2c3f7545ca4d077695734d6fee27ec773426430 | refs/heads/master | 2021-08-24T06:02:17.231086 | 2017-12-08T09:50:30 | 2017-12-08T09:50:30 | 113,547,648 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 421 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2017/12/8 14:58
# @Author : ChambersLiu
# @Software: PyCharm
import numpy as np
def normalizeRows(x):
x_norm = np.linalg.norm(x, axis=1, keepdims=True)
print(x_norm)
x = x / x_norm
return x
if __name__ == '__main__':
x = np.array([
[0, 3, 4],
[1, 6, 4]
])
normalized_x = normalizeRows(x)
print(normalized_x)
| [
"changbo89@163.com"
] | changbo89@163.com |
3c09f0181b157f6eb955f3e8ab36c73ce0214183 | bfca3d22d440ebd099c15b1b803472d84aa05a97 | /P0160.py | 8a3c96125ef14af92d92bc0d14caed61c9cfd904 | [] | no_license | chenjiahui1991/LeetCode | 83a8354be70f13867212923995fb35fa1a95b9be | c2b01374942dcba7fbbe7865d13d7599bbc083f3 | refs/heads/master | 2020-03-22T14:13:50.621233 | 2018-10-07T07:05:41 | 2018-10-07T07:05:41 | 140,162,843 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,165 | py | # Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def getIntersectionNode(self, headA, headB):
"""
:type head1, head1: ListNode
:rtype: ListNode
"""
def getSize(node):
result = 0
while node is not None:
result += 1
node = node.next
return result
sizeA = getSize(headA)
sizeB = getSize(headB)
point1 = headA
point2 = headB
if sizeA > sizeB:
for i in range(sizeA - sizeB):
point1 = point1.next
else:
for i in range(sizeB - sizeA):
point2 = point2.next
while point1 is not None and point2 is not None and point1 != point2:
point1 = point1.next
point2 = point2.next
if point1 is None: return None
return point1
headA = ListNode(1)
headA.next = ListNode(2)
headA.next.next = ListNode(3)
headB = ListNode(4)
headB.next = headA.next
s = Solution()
print(s.getIntersectionNode(headA, headB).val)
| [
"chenjh@buaa.edu.cn"
] | chenjh@buaa.edu.cn |
93b3be45620815fce54df382049cb6a916461c03 | 6ee06ad2bbc15cc0f829f99198947ffe8cffe395 | /webscan/admin.py | b8f064eba4084bf5987ad65b805c97ebb1feb56e | [
"Apache-2.0"
] | permissive | heslay/Sec-Tools | b80f07eb98b71a308b359df01ee9f494a99348ac | 11a1ada25e0e1d376580253512fe658dc5f8b257 | refs/heads/master | 2023-09-05T04:35:45.077360 | 2021-11-20T02:05:38 | 2021-11-20T02:05:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,808 | py | from django.contrib import admin
from .models import Category,Item,PortList,FpCategory,FingerPrint
from import_export.admin import ImportExportModelAdmin
from django.db import models
from django.forms import TextInput
# Register your models here.
#修改后台管理页面头部显示内容和后台名称
admin.site.site_header = 'Sec-tools 后台'
admin.site.site_title = 'Sec-tools | 后台'
# 导航分类
@admin.register(Category)
class CategoryAdmin(ImportExportModelAdmin):
list_display = ['sort','name','add_menu','get_items','icon','icon_data']
list_editable = ['sort','add_menu','icon']
search_fields = ('name',)
list_display_links = ('name',) #设置哪些字段可以点击进入编辑界面
# 导航条目
@admin.register(Item)
class ItemAdmin(ImportExportModelAdmin):
list_display = ['title', 'url', 'img_admin','img_width','category']
list_editable = ['url', 'category','img_width']
search_fields = ('title', 'url', 'desc')
list_display_links = ('title',) #设置哪些字段可以点击进入编辑界面
list_filter = ('category',) # 过滤器,按字段进行筛选
list_per_page = 10 # 设置每页显示多少条记录,默认是100条
# 指纹分类
@admin.register(FpCategory)
class ComponentCategory(ImportExportModelAdmin):
list_display = ['id','name','get_items']
search_fields = ('name',)
list_display_links = ('name',) #设置哪些字段可以点击进入编辑界面
ordering = ('id',) # 设置默认排序字段,负号表示降序排序
# 常见指纹
@admin.register(FingerPrint)
class ComponentAdmin(ImportExportModelAdmin):
list_display = ('name','icon_data', 'desc','category')
list_display_links = ('name',) #设置哪些字段可以点击进入编辑界面
list_editable = ['category']
search_fields = ('name', 'desc',)
list_filter = ('category',) # 过滤器,按字段进行筛选
readonly_fields = ('icon_data',)
ordering = ('name',) # 设置默认排序字段,负号表示降序排序
list_per_page = 15
fieldsets = (
('编辑组件', {
'fields': ('name', 'desc', 'category', 'icon', 'icon_data')
}),
)
formfield_overrides = {
models.CharField: {'widget': TextInput(attrs={'size': '59'})},
}
# 端口列表
@admin.register(PortList)
class PortListAdmin(ImportExportModelAdmin):
list_display = ('num', 'service', 'protocol', 'status',)
list_display_links = ('num',) # 设置哪些字段可以点击进入编辑界面
search_fields = ('num', 'service',)
list_filter = ('protocol','status') # 过滤器,按字段进行筛选
ordering = ('num', )#设置默认排序字段,负号表示降序排序
list_per_page = 15 | [
"“jianwentaook@163.com”"
] | “jianwentaook@163.com” |
f62ae05cbab3828d87056f0e424b8047131fd5a4 | f38bb187847005fc121bec01fe10042358004a87 | /StarUml/commandline.py | 465cfa18e5e59494e6cc529b1f36441e51f7692d | [] | no_license | loyolastalin/Python_Training_2020 | 17aaee735711ef3b4673479fb06f34fbf8bdba0b | dcee3c17615ff567b57afb67abbb45d63d72812a | refs/heads/master | 2023-08-14T23:44:43.897149 | 2021-10-05T08:20:28 | 2021-10-05T08:20:28 | 278,421,132 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 178 | py | import sys,os
def main():
para =len(sys.argv) - 1
print("Total Parameter: {}".format(para))
for target_list in sys.argv:
print(target_list)
main() | [
"loyolastalin@gmail.com"
] | loyolastalin@gmail.com |
c10025495e49e178e839ee495b8d2b7559ca3fc4 | 6b16458a0c80613a66c251511462e7a7d440970e | /packages/pyright-internal/src/tests/samples/variadicTypeVar5.py | 8089b00a89ef9b4f7adfc12be8efb3939e34e3d4 | [
"MIT",
"LicenseRef-scancode-generic-cla"
] | permissive | ikamensh/pyright | 3bbbb2cf1a1bdbbecb89ef389036756f47ef7114 | 5ea620ad2008de57dcac720a84674bdb712bffc4 | refs/heads/main | 2023-08-26T05:54:43.660282 | 2021-10-30T16:35:06 | 2021-10-30T16:35:06 | 422,952,836 | 0 | 0 | NOASSERTION | 2021-10-30T17:52:03 | 2021-10-30T17:52:02 | null | UTF-8 | Python | false | false | 2,648 | py | # This sample tests the handling of variadic type variables used
# within Callable types.
# pyright: reportMissingModuleSource=false
from typing import Any, Callable, Literal, Protocol, Union
from typing_extensions import TypeVarTuple, Unpack
_Xs = TypeVarTuple("_Xs")
def func1(func: Callable[[int, Unpack[_Xs]], Any]) -> Callable[[Unpack[_Xs]], int]:
...
def func2(func: Callable[[Unpack[_Xs]], int]) -> Callable[[Unpack[_Xs]], int]:
...
def callback1(a: int) -> int:
...
def callback2(a: str) -> int:
...
def callback3(a: str) -> None:
...
def callback4(a: int, b: complex, c: str) -> int:
...
def callback5(a: int, *args: Unpack[_Xs]) -> Union[Unpack[_Xs]]:
...
def callback6(a: int, *args: Any) -> int:
...
def callback7(a: int, b: str, c: str, d: str, *args: Any) -> int:
...
c1 = func1(callback1)
t_c1: Literal["() -> int"] = reveal_type(c1)
c1_1 = c1()
t_c1_1: Literal["int"] = reveal_type(c1_1)
# This should generate an error.
c2 = func1(callback2)
# This should generate an error.
c3 = func2(callback3)
c4 = func1(callback4)
t_c4: Literal["(complex, str) -> int"] = reveal_type(c4)
c4_1 = c4(3j, "hi")
t_c4_1: Literal["int"] = reveal_type(c4_1)
# This should generate an error.
c4_2 = c4(3j)
# This should generate an error.
c4_3 = c4(3j, "hi", 4)
c5 = func1(callback5)
t_c5: Literal["(*_Xs@callback5) -> int"] = reveal_type(c5)
# This should generate an error.
c6_1 = func1(callback6)
# This should generate an error.
c6_2 = func2(callback6)
# This should generate an error.
c7_1 = func1(callback7)
# This should generate an error.
c7_2 = func2(callback7)
class CallbackA(Protocol[Unpack[_Xs]]):
def __call__(self, a: int, *args: Unpack[_Xs]) -> Any:
...
def func3(func: CallbackA[Unpack[_Xs]]) -> Callable[[Unpack[_Xs]], int]:
...
d1 = func3(callback1)
t_d1: Literal["() -> int"] = reveal_type(d1)
# This should generate an error.
d2 = func3(callback2)
# This should generate an error.
d3 = func3(callback3)
d4 = func3(callback4)
t_d4: Literal["(complex, str) -> int"] = reveal_type(d4)
d4_1 = d4(3j, "hi")
t_d4_1: Literal["int"] = reveal_type(d4_1)
# This should generate an error.
d4_2 = d4(3j)
# This should generate an error.
d4_3 = d4(3j, "hi", 4)
def func4(func: Callable[[Unpack[_Xs], int], int]) -> Callable[[Unpack[_Xs]], int]:
...
def callback8(a: int, b: str, c: complex, d: int) -> int:
...
d5_1 = func4(callback1)
t_d5_1: Literal["() -> int"] = reveal_type(d5_1)
# This should generate an error.
d5_2 = func4(callback4)
d5_3 = func4(callback8)
t_d5_3: Literal["(int, str, complex) -> int"] = reveal_type(d5_3)
| [
"erictr@microsoft.com"
] | erictr@microsoft.com |
f8efb8796402968e0d65adeb58b5693319539a4e | ef60f1908dba8f3854148ad1395db43a23caa850 | /libsystem/libsystem/wsgi.py | f884fcdd95b3300a8580e6a00c1f1d0ebd85e469 | [] | no_license | Richardo3/libsystem | 797403038e23778843fc7bc4146bc37eaaa11361 | 8f025a1bfd7e902b6871cac8ccbd85503de67990 | refs/heads/master | 2020-05-04T19:43:50.454937 | 2019-04-05T09:11:47 | 2019-04-05T09:11:47 | 179,405,561 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 396 | py | """
WSGI config for libsystem project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "libsystem.settings")
application = get_wsgi_application()
| [
"xwp_fullstack@163.com"
] | xwp_fullstack@163.com |
305bd9adfd5fdf771c0106042bf798ea22446ea3 | 717753ad7477992a5e47a6930f64597b75219db5 | /ftenv/bin/easy_install-3.5 | e279077031588a0226ef80533755f84bebf13068 | [] | no_license | freshtracksnyc/ftnyc | 76a01afdc014f7854108cb7dfdd7dc085c3c54c8 | 297ec03010e13aaa5ee40c1c7cb851a014bfb89d | refs/heads/master | 2020-04-13T20:56:26.408186 | 2019-01-28T14:05:17 | 2019-01-28T14:05:17 | 163,443,760 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 252 | 5 | #!/home/andy/ftnyc/ftenv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"freshtracksnyc@gmail.com"
] | freshtracksnyc@gmail.com |
e87040c6a1bd846558f8c253422413cbb91f6f5f | 161daf1046832d25e66858157f95eb226ecf7cdf | /Linear Regression/Single Variable Linear Regression Manually.py | 6919d3f1af1bf449d416df7b20ca966b71574d64 | [] | no_license | Dipeshpal/Machine-Learning | 551552c0f5fc922aa6f9f5ec5d522db983ae6063 | 626516ef9f0d63a67a073eab4fc266fd6510e482 | refs/heads/master | 2022-07-05T22:19:38.050175 | 2019-07-10T09:05:31 | 2019-07-10T09:05:31 | 188,903,340 | 0 | 0 | null | 2022-06-21T22:05:10 | 2019-05-27T20:10:12 | Python | UTF-8 | Python | false | false | 2,101 | py | # Linear Regression
# Import Libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# load dataset
dataset = pd.read_csv('headbrain.csv')
# dropping ALL duplicate values
dataset.drop_duplicates(keep=False, inplace=True)
print("Dataset head: ", dataset.head())
print("Dataset shape: ", dataset.shape)
# Correlations Matrix (Visualize Relations between Data)
# From this we can find which param has more relations
correlations = dataset.corr()
sns.heatmap(correlations, square=True, cmap="YlGnBu")
plt.title("Correlations")
plt.show()
# Getting feature (x) and label(y)
# From correlations matrix we found Head Size(cm^3) and Brain Weight(grams) are most co-related data
x = dataset["Head Size(cm^3)"].values
y = dataset["Brain Weight(grams)"].values
# Fitting Line (Model) y = mx + c
# where, m = summation[(x-mean_x)(y-mean_y)]%summation[(x-mean_x)**2]
# c = y - mx
mean_x = np.mean(x)
mean_y = np.mean(y)
# Total number of features
l = len(x)
# numerator = summation[(x-mean_x)(y-mean_y)
# denominator = summation[(x-mean_x)**2
numerator = 0
denominator = 0
for i in range(l):
numerator += (x[i] - mean_x) * (y[i] - mean_y)
denominator += (x[i] - mean_x) ** 2
# m is gradient
m = numerator / denominator
# c is intercept
c = mean_y - (m * mean_x)
print("m: ", m)
print("c: ", c)
# for better visualization (Scaling of data) get max and min point of x
max_x = np.max(x) + 100
min_x = np.min(x) - 100
# X is data points (between max_x and min_y)
X = np.linspace(max_x, min_x, 10)
# model here (we know m and c, already calculated above on sample dataset)
Y = m*X + c
# plotting graph for model
plt.plot(X, Y, color='#58b970', label='Regression Line')
plt.scatter(x, y, c='#ef5424', label='Scatter Plot:n Given Data')
plt.legend()
plt.show()
# Calculate R Square
sst = 0
ssr = 0
for i in range(l):
y_pred = m * x[i] + c
sst += (y[i] - mean_y) ** 2
ssr += (y[i] - y_pred) ** 2
# print("Sum of Squared Total: ", sst)
# print("Sum of Squared due to Regression: ", ssr)
r2 = 1 - (ssr / sst)
print("R Squared: ", r2)
| [
"dipeshpal17@gmail.com"
] | dipeshpal17@gmail.com |
714834b479f46b3a9ea7d245e0736f11a96e7357 | 52efcaacf23e2345d09a1de61610a74df457057f | /auto_derby/__init__.py | 7b72bc0d8b1659b8117b05e1211ef6877a5160d5 | [
"MIT"
] | permissive | debi-derby/auto-derby | 78bc726e8243c8a25ddc13b364b7289f322caaaa | c2e5c138125cac6dc13dbd74045161ca03f6e5cf | refs/heads/master | 2023-09-03T09:03:35.305321 | 2021-11-02T16:18:45 | 2021-11-02T16:18:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 55 | py | from ._config import config
from .plugin import Plugin
| [
"NateScarlet@Gmail.com"
] | NateScarlet@Gmail.com |
f7842958af9da420f4f2bc0eb30d92ea8dfc953b | 93ab21f992fdf8aec004959cb1c6519abfcca69b | /parameter_estimation_smallBox/parameter_estimation_sim/model_numpy_3D_element_betterFormation.py | 1b638ee956c3da2e9f835226e51025a0aedfd2a0 | [] | no_license | sbo5/irrigation | 1b232d8e2229a36cb2e9c7d5570f7a5ef025daec | 2a780260067b596180f558feb0ab7a6640eba17d | refs/heads/master | 2020-04-10T18:28:15.041926 | 2018-12-10T21:02:16 | 2018-12-10T21:02:16 | 161,205,426 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,126 | py | """
Created on Wed Oct 31 2018
@author: Song Bo (sbo@ualberta.ca)
This is a 3D Richards equation example simulating the small box.
ode is coded element by element
"""
from __future__ import (print_function, division)
from scipy import io, optimize, integrate
from numpy import diag, zeros, ones, dot, copy, mean, asarray, array, interp
from scipy.linalg import lu
from numpy.linalg import inv, matrix_rank, cond, cholesky, norm
import time
import timeit
import csv
import mpctools as mpc
from casadi import *
import matplotlib.pyplot as plt
start_time = time.time()
# ----------------------------------------------------------------------------------------------------------------------
# Parameters
# ----------------------------------------------------------------------------------------------------------------------
def Loam():
pars = {}
pars['thetaR'] = 0.078
pars['thetaS'] = 0.43
pars['alpha'] = 0.036*100
pars['n'] = 1.56
pars['m'] = 1 - 1 / pars['n']
pars['Ks'] = 1.04/100/3600
pars['neta'] = 0.5
pars['Ss'] = 0.00001
pars['mini'] = 1.e-20
return pars
def LoamIni():
pars = {}
pars['thetaR'] = 0.078*0.9
pars['thetaS'] = 0.43* 0.9
pars['alpha'] = 0.036*100*0.9
pars['n'] = 1.56* 0.9
pars['m'] = 1 - 1 / pars['n']
pars['Ks'] = 1.04/100/3600*0.9
pars['neta'] = 0.5
pars['Ss'] = 0.00001
pars['mini'] = 1.e-20
return pars
def LoamOpt():
pars = {}
pars['thetaR'] = 6.18438096e-02
pars['thetaS'] = 4.25461746e-01
pars['alpha'] = 3.71675224e+00
pars['n'] = 1.50828922e+00
pars['m'] = 1 - 1 / pars['n']
pars['Ks'] = 2.99737827e-06
pars['neta'] = 0.5
pars['Ss'] = 0.00001
pars['mini'] = 1.e-20
return pars
def LoamySand():
pars = {}
pars['thetaR'] = 0.057
pars['thetaS'] = 0.41
pars['alpha'] = 0.124*100
pars['n'] = 2.28
pars['m'] = 1 - 1 / pars['n']
pars['Ks'] = 14.59/100/3600
pars['neta'] = 0.5
pars['Ss'] = 0.00001
pars['mini'] = 1.e-20
return pars
def SandyLoam():
pars = {}
pars['thetaR'] = 0.065
pars['thetaS'] = 0.41
pars['alpha'] = 0.075*100
pars['n'] = 1.89
pars['m'] = 1 - 1 / pars['n']
pars['Ks'] = 4.42/100/3600
pars['neta'] = 0.5
pars['Ss'] = 0.00001
pars['mini'] = 1.e-20
return pars
def SiltLoam():
pars = {}
pars['thetaR'] = 0.067
pars['thetaS'] = 0.45
pars['alpha'] = 0.020*100
pars['n'] = 1.41
pars['m'] = 1 - 1 / pars['n']
pars['Ks'] = 0.45/100/3600
pars['neta'] = 0.5
pars['Ss'] = 0.00001
pars['mini'] = 1.e-20
return pars
# ----------------------------------------------------------------------------------------------------------------------
# Functions
# ----------------------------------------------------------------------------------------------------------------------
def hFun(theta, pars): # Assume all theta are smaller than theta_s
psi = (((((theta/100 - pars['thetaR']) / (pars['thetaS'] - pars['thetaR'] + pars['mini']) + pars['mini']) ** (1. / (-pars['m'] + pars['mini']))
- 1) + pars['mini']) ** (1. / (pars['n'] + pars['mini']))) / (-pars['alpha'] + pars['mini'])
return psi
def thetaFun(psi,pars):
Se = if_else(psi>=0., 1., (1+abs(psi*pars['alpha']+pars['mini'])**pars['n']+pars['mini'])**(-pars['m']))
theta = 100*(pars['thetaR']+(pars['thetaS']-pars['thetaR'])*Se)
theta = theta.full()
theta = theta.ravel(order='F')
return theta
def KFun(psi,pars):
Se = if_else(psi>=0., 1., (1+abs(psi*pars['alpha']+pars['mini'])**pars['n']+pars['mini'])**(-pars['m']))
K = pars['Ks']*(Se+pars['mini'])**pars['neta']*(1-((1-(Se+pars['mini'])**(1/(pars['m']+pars['mini'])))+pars['mini'])**pars['m']+pars['mini'])**2
K = K.full()
K = K.ravel(order='F')
return K
def CFun(psi,pars):
Se = if_else(psi>=0., 1., (1+abs(psi*pars['alpha']+pars['mini'])**pars['n']+pars['mini'])**(-pars['m']))
dSedh=pars['alpha']*pars['m']/(1-pars['m']+pars['mini'])*(Se+pars['mini'])**(1/(pars['m']+pars['mini']))*(1-(Se+pars['mini'])**(1/(pars['m']+pars['mini']))+pars['mini'])**pars['m']
C = Se*pars['Ss']+(pars['thetaS']-pars['thetaR'])*dSedh
C = C.full().ravel(order='F')
return C
def mean_hydra_conductivity(left_boundary, right_boundary):
lk = KFun(left_boundary, pars)
rk = KFun(right_boundary, pars)
mk = (lk+rk)/2
return mk
# Calculated the initial state
def ini_state_np(thetaIni, p):
psiIni = hFun(thetaIni, p)
# Approach: States in the same section are the same
hMatrix = np.zeros(numberOfNodes)
hMatrix[int(layersOfSensors[0] * nodesInPlane * ratio_z):int(layersOfSensors[1] * nodesInPlane * ratio_z)] = -5 # Top
hMatrix[int(layersOfSensors[1] * nodesInPlane * ratio_z):int(layersOfSensors[2] * nodesInPlane * ratio_z)] = psiIni[0]
hMatrix[int(layersOfSensors[2] * nodesInPlane * ratio_z):int(layersOfSensors[-1] * nodesInPlane * ratio_z)] = -3 # Bottom
return hMatrix, psiIni
# Calculated the initial state
def ini_state_np_botToTop(thetaIni, p):
hMatrix, psiIni = ini_state_np(thetaIni, p)
hMatrix = hMatrix[::-1]
psiIni = psiIni[::-1]
return hMatrix, psiIni
def psiTopFun(hTop0, psi, qTop, dz):
F = psi + 0.5*dz*(-1-(qTop+max(0, hTop0/dt))/KFun(hTop0, pars)) - hTop0 # Switching BC
# F = psi + dz*(-1-(qTop)/KFun(hTop0, p)) - hTop0 # Only unsat. BC
# F = psi + dz*(-1-(qTop-max(0, hTop0/dt))/KFun(hTop0, p)) - hTop0
return F
# def qpet(pet=PET):
# q_pet = pet*()
# def aet():
def RichardsEQN_3D(x, t, u, u1):
head_pressure = np.zeros(numberOfNodes)
for i in range(0, numberOfNodes):
dx = lengthOfX / nodesInX # meter
dy = lengthOfY / nodesInY
dz = lengthOfZ / nodesInZ
state = x[i]
coordinate = positionOfNodes[i]
for index, item in enumerate(coordinate):
# print('Working with', i, 'node')
if index == 0:
if item == 0:
bc_xl = state
bc_xr = x[i+1]
dx = 0.5*dx
elif item == nodesInX-1:
bc_xl = x[i - 1]
bc_xr = state
dx = 0.5*dx
else:
bc_xl = x[i - 1]
bc_xr = x[i + 1]
elif index == 1:
if item == 0:
bc_yl = state
bc_yr = x[i + nodesInX]
dy = 0.5*dy
elif item == nodesInY-1:
bc_yl = x[i - nodesInX]
bc_yr = state
dy = 0.5*dy
else:
bc_yl = x[i - nodesInX]
bc_yr = x[i + nodesInX]
else:
if item == 0:
bc_zl = state
bc_zu = x[i + nodesInPlane]
dz = 0.5*dz
elif item == nodesInZ-1:
bc_zl = x[i - nodesInPlane]
# KzU1 = hydraulic_conductivity(state)
# bc_zu = state + dz*(-1 - u/KzU1)
bc_zu = optimize.fsolve(psiTopFun, state, args=(state, u, dz))
dz = 0.5*dz
else:
bc_zl = x[i - nodesInPlane]
bc_zu = x[i + nodesInPlane]
KxL = mean_hydra_conductivity(x[i], bc_xl)
KxR = mean_hydra_conductivity(x[i], bc_xr)
deltaHxL = (x[i] - bc_xl) / dx
deltaHxR = (bc_xr - x[i]) / dx
KyL = mean_hydra_conductivity(x[i], bc_yl)
KyR = mean_hydra_conductivity(x[i], bc_yr)
deltaHyL = (x[i] - bc_yl) / dy
deltaHyR = (bc_yr - x[i]) / dy
KzL = mean_hydra_conductivity(x[i], bc_zl)
KzU = mean_hydra_conductivity(x[i], bc_zu)
deltaHzL = (x[i] - bc_zl) / dz
deltaHzU = (bc_zu - x[i]) / dz
temp0 = 1 / (0.5 * 2 * dx) * (KxR * deltaHxR - KxL * deltaHxL)
temp1 = 1 / (0.5 * 2 * dy) * (KyR * deltaHyR - KyL * deltaHyL)
temp2 = 1 / (0.5 * 2 * dz) * (KzU * deltaHzU - KzL * deltaHzL)
temp3 = 1 / (0.5 * 2 * dz) * (KzU - KzL)
temp4 = 0 # source term
temp5 = temp0 + temp1 + temp2 + temp3 - temp4
temp6 = temp5 / CFun(state, pars)
head_pressure[i] = temp6
return head_pressure
def simulate(p):
# define states and measurements arrays
h = np.zeros(shape=(len(timeList), numberOfNodes))
theta = np.zeros(shape=(len(timeList), numberOfNodes))
h0, hIni = ini_state_np_botToTop(thetaIni, p)
h[0] = h0
theta0 = thetaFun(h0, p) # Initial state of theta
theta[0] = theta0[::-1]
h_avg = np.zeros(shape=(len(timeList), numberOfSensors)) # 1 sensor
h_avg[0] = hIni
theta_avg = np.zeros(shape=(len(timeList), numberOfSensors))
theta_avg[0] = thetaIni
# Boundary conditions
qTfun = irrigation
for i in range(len(timeList)-1): # in ts, end point is timeList[i+1], which is 2682*60
print('From', i, ' min(s), to ', i+1, ' min(s)')
ts = [timeList[i], timeList[i + 1]]
if i == 414:
pass
y = integrate.odeint(RichardsEQN_3D, h0, ts, args=(qTfun[i], qTfun[i]))
h0 = y[-1]
h[i + 1] = h0
theta0 = thetaFun(h0, p)
theta0 = theta0[::-1]
theta[i + 1] = theta0
theta_avg[i+1] = np.matmul(CMatrix, theta0)
return h_avg, theta_avg, theta, h
# ----------------------------------------------------------------------------------------------------------------------
# main
# ----------------------------------------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------------------
# Define the geometry
# ----------------------------------------------------------------------------------------------------------------------
ratio_x = 1
ratio_y = 1
ratio_z = 1
ratio_ft_to_m = 0.3048 # m/ft
lengthOfXinFt = 2 # feet
lengthOfYinFt = 4 # feet
lengthOfZinFt = 0.7874 # feet [In order to use 2 sensors of the probe, the minimum soil depth is 150+150+41=341mm or 1.11877ft)]
# The maximum height is 60.50 cm or 1.98 ft
lengthOfX = lengthOfXinFt * ratio_ft_to_m # meter. Should be around 61 cm
lengthOfY = lengthOfYinFt * ratio_ft_to_m # meter. Should be around 121.8 cm
# lengthOfZ = lengthOfZinFt * ratio_ft_to_m # meter. Should be around 60.5 cm
lengthOfZ = 0.24
nodesInX = int(2*ratio_x)
nodesInY = int(2*ratio_y)
nodesInZ = int(24*ratio_z)
nodesInPlane = nodesInX*nodesInY
numberOfNodes = nodesInPlane*nodesInZ
dx = lengthOfX/nodesInX # meter
dy = lengthOfY/nodesInY
dz = lengthOfZ/nodesInZ
# Label the nodes
positionOfNodes = []
for k in range(0, nodesInZ):
for j in range(0, nodesInY):
for i in range(0, nodesInX):
positionOfNodes.append([i, j, k])
# ----------------------------------------------------------------------------------------------------------------------
# Sensors
# ----------------------------------------------------------------------------------------------------------------------
numberOfSensors = 1
layersOfSensors = np.array([0, 5, 20, nodesInZ]) # beginning = top & end = bottom
# C matrix
start = layersOfSensors[1] * ratio_z
end = layersOfSensors[2] * ratio_z
difference = end - start
CMatrix = np.zeros((numberOfSensors, numberOfNodes))
for i in range(0, numberOfSensors):
CMatrix[i][start * nodesInPlane: end * nodesInPlane] = 1. / ((end - start) * nodesInPlane)
start += difference * ratio_z
end += difference * ratio_z
# ----------------------------------------------------------------------------------------------------------------------
# Time interval
# ----------------------------------------------------------------------------------------------------------------------
ratio_t = 1
dt = 60.0*ratio_t # second
timeSpan = 15
interval = int(timeSpan*60/dt)
timeList_original = np.arange(0, timeSpan+1)*dt/ratio_t
timeList = np.arange(0, interval+1)*dt
# ----------------------------------------------------------------------------------------------------------------------
# Inputs: irrigation
# ----------------------------------------------------------------------------------------------------------------------
irrigation = np.zeros(len(timeList))
for i in range(0, len(irrigation)): # 1st node is constant for 1st temporal element. The last node is the end of the last temporal element.
if i in range(0, 180):
irrigation[i] = -0.050/86400
elif i in range(180, 540):
irrigation[i] = -0.010/86400
else:
irrigation[i] = 0
# ----------------------------------------------------------------------------------------------------------------------
# Parameters
# ----------------------------------------------------------------------------------------------------------------------
pars = Loam()
# pars_ini = LoamIni()
# pars_opt = LoamOpt()
# ----------------------------------------------------------------------------------------------------------------------
# Initial measurements
# ----------------------------------------------------------------------------------------------------------------------
thetaIni = array([21.6]) # left 21.6, right 18.6
# ----------------------------------------------------------------------------------------------------------------------
# Main
# ----------------------------------------------------------------------------------------------------------------------
h_i, theta_i, theta_i_all, h_i_all = simulate(pars)
# ----------------------------------------------------------------------------------------------------------------------
# Plots
# ----------------------------------------------------------------------------------------------------------------------
plt.figure()
# plt.plot(timeList_original/dt*ratio_t, theta_e[:, 0], 'b-.', label=r'$theta_1$ measured')
plt.plot(timeList/dt, theta_i[:, 0], 'y--', label=r'$theta_1$ initial')
# plt.plot(timeList/dt*ratio_t, theta_opt[:, 0], 'r--', label=r'$theta_1$ optimized')
plt.xlabel('Time, t (min)')
plt.ylabel('Water content (%)')
plt.legend(loc='best')
plt.show()
plt.figure()
# plt.plot(timeList_original/dt*ratio_t, theta_e[:, 0], 'b-.', label=r'$theta_1$ measured')
plt.plot(timeList/dt, theta_i_all[:, 0], 'y--', label=r'$theta_1$ initial_Top')
# plt.plot(timeList/dt*ratio_t, theta_opt[:, 0], 'r--', label=r'$theta_1$ optimized')
plt.xlabel('Time, t (min)')
plt.ylabel('Water content (%)')
plt.legend(loc='best')
plt.show()
print('Time elapsed: {:.3f} sec'.format(time.time() - start_time)) | [
"sbo@ualberta.ca"
] | sbo@ualberta.ca |
aeb178754d3e11d4c0785eac82d396cb1a9efc7e | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/synthetic/prime-big-431.py | 6dff84319c64e9671d5fbc210e23958e95c5317e | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,705 | py | # Get the n-th prime starting from 2
def get_prime(n:int) -> int:
candidate:int = 2
found:int = 0
while True:
if is_prime(candidate):
found = found + 1
if found == n:
return candidate
candidate = candidate + 1
return 0 # Never happens
def is_prime(x:int) -> bool:
div:int = 2
div2:int = 2
div3:int = 2
div4:int = 2
div5:int = 2
while div < x:
if x % div == 0:
return False
div = div + 1
return True
def is_prime2(x:int, x2:int) -> bool:
div:int = 2
div2:int = 2
div3:int = 2
div4:int = 2
div5:int = 2
while div < x:
if x % div == 0:
return False
div = div + 1
return True
def is_prime3(x:int, x2:int, x3:int) -> bool:
div:int = 2
div2:int = 2
div3:int = 2
div4:int = 2
div5:int = 2
while div < x:
if x % div == 0:
return False
div = div + 1
return True
def is_prime4(x:int, x2:int, x3:int, x4:int) -> bool:
div:int = 2
$TypedVar = 2
div3:int = 2
div4:int = 2
div5:int = 2
while div < x:
if x % div == 0:
return False
div = div + 1
return True
def is_prime5(x:int, x2:int, x3:int, x4:int, x5:int) -> bool:
div:int = 2
div2:int = 2
div3:int = 2
div4:int = 2
div5:int = 2
while div < x:
if x % div == 0:
return False
div = div + 1
return True
# Input parameter
n:int = 15
n2:int = 15
n3:int = 15
n4:int = 15
n5:int = 15
# Run [1, n]
i:int = 1
i2:int = 1
i3:int = 1
i4:int = 1
i5:int = 1
# Crunch
while i <= n:
print(get_prime(i))
i = i + 1
| [
"647530+Virtlink@users.noreply.github.com"
] | 647530+Virtlink@users.noreply.github.com |
e395b5c154ed0bfe02ce059d10aa1b8d0d7f022e | 5f682f5852383ea443238266fe47bdec1850ef8b | /archive/sp18/a5/scenarios/1.py | 4075c82270c521b96ac97e76d485ab204de15109 | [
"BSD-3-Clause"
] | permissive | 0xcf/decal-labs | b1d851df0c55871aa12d668aed167b79d09c2ba2 | 3acb62af3eff4a1dbbe875e81ec1485d9d10c44b | refs/heads/master | 2023-04-08T22:05:03.974384 | 2023-03-22T04:39:51 | 2023-03-22T04:39:51 | 119,335,175 | 3 | 36 | BSD-3-Clause | 2023-03-22T04:39:52 | 2018-01-29T05:16:17 | Python | UTF-8 | Python | false | false | 260 | py | import lib.util as util
command = "ifdown {iface}"
routing_entries = util.get_default_routing_information()
default_entry = next(
(e for e in routing_entries if util.is_default_gateway(e)),
None
)
util.run(command.format(iface=default_entry.iface))
| [
"c2.tonyc2@gmail.com"
] | c2.tonyc2@gmail.com |
0d45fe0579332cf72bfb0617550ec23452aea3aa | a0c24087e281d33f73306f0399184cfc153ab9d5 | /factor.py | 844ad4437e1700649f4a2d4d7a9741a76ccafadb | [] | no_license | creasyw/project_euler | c94215886390cd2606c2c92af79008de4d67146f | b0584fb01ba2a255b2049a8cdc24fba98f46aff0 | refs/heads/master | 2021-11-19T10:39:48.946028 | 2021-10-12T03:59:34 | 2021-10-12T03:59:34 | 4,611,853 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,615 | py | def factorize(n):
""" Provide factorization in a list """
import math
res = []
# iterate over all even numbers first.
while n % 2 == 0:
res.append(2)
n //= 2
# try odd numbers up to sqrt(n)
limit = math.sqrt(n+1)
i = 3
while i <= limit:
if n % i == 0:
res.append(i)
n //= i
limit = math.sqrt(n+i)
else:
i += 2
if n != 1:
res.append(n)
return res
def pull_off_factors(n, p, output_list):
count = 0
while True:
if n % p == 0:
count += 1
n = n // p
else:
if count != 0:
output_list.append((p, count))
break
return n
def prime_factors(n):
output_list = []
primes = [2,3,5,7,11,13,17,19,23,29]
other_primes = [1,7,11,13,17,19,23,29]
for p in primes:
n = pull_off_factors(n, p, output_list)
c = 0
while True:
top = n**0.5 + 1
c += 30
if c > top:
if n != 1:
output_list.append((n,1))
return output_list
for p in other_primes:
n = pull_off_factors(n, c+p, output_list)
#def factors(n):
# """ Provide all factors in a list """
# factors = prime_factors(n)
#
# all = [1]
# for p,e in factors:
# prev = all[:]
# pn = 1
# for i in range(e):
# pn *= p
# all.extend([a*pn for a in prev])
# all.sort()
# return all
# a much concise version
def factors(n):
return filter(lambda i: n % i == 0, range(1, n + 1))
| [
"creasywuqiong@gmail.com"
] | creasywuqiong@gmail.com |
6cd6e5909a0368323c8af0e4fa9a44957c2f0f36 | 5636cb0c282d03e91a830d30cec3bd54c225bd3b | /P_05_AlgorithmiqueProgrammation/03_Tris/TD_01_Bulles/programmes/tri_bulles.py | 3cb89b551a595d40d3e8a838803994b50a2c38c8 | [] | no_license | xpessoles/Informatique | 24d4d05e871f0ac66b112eee6c51cfa6c78aea05 | 3cb4183647dc21e3acbcbe0231553a00e41e4e55 | refs/heads/master | 2023-08-30T21:10:56.788526 | 2021-01-26T20:57:51 | 2021-01-26T20:57:51 | 375,464,331 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 508 | py | import random
def tri_bulles_naif(l):
for i in range(0,len(l)-1):
for j in range(0,len(l)-1):
if l[j]>l[j+1]:
l[j],l[j+1]=l[j+1],l[j]
def tri_bulles(l):
for i in range(0,len(l)-1):
for j in range(0,len(l)-i-1):
if l[j]>l[j+1]:
l[j],l[j+1]=l[j+1],l[j]
"""
l = [random.randint(0,10) for i in range(10)]
print(l)
tri_bulles_naif(l)
print(l)
"""
l = [random.randint(0,10) for i in range(10)]
print(l)
tri_bulles(l)
print(l)
| [
"xpessoles.ptsi@free.fr"
] | xpessoles.ptsi@free.fr |
d69be9222ffe98bad85ffbde662445a8ef068221 | 8a74d61f5fc523f633ba3a15fbf7560c7b1b4d22 | /homework_one/nth_fibonacii/nth_fibonacci.py | ebcb66255eae30a15ea2f959767079a9ae9695ab | [] | no_license | MariaKrusteva/HackBulgaria | 9bb9488b72cadb1fca4f0b3bdebf0dc8fd6c3bc6 | a753426c50464f97555e90c5b1b89fe81212bec7 | refs/heads/master | 2020-05-29T19:19:21.688183 | 2014-03-24T14:51:28 | 2014-03-24T14:51:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 157 | py | def nth_fibonacci(n):
if n == 1:
return (1)
elif n == 2:
return (1)
else:
return nth_fibonacci(n-1) + nth_fibonacci(n-2)
| [
"mimito_68@abv.bg"
] | mimito_68@abv.bg |
052907234bd35f10b44602a79f07bbea1423c64b | 0485085be4c2078665e5cd3141c96835e003612b | /subset/build/lib/ember/__init__.py | 66bec60d989ad6d8141edbe948b0509ff0235886 | [] | no_license | 6r0k3d/ml-intro | d20e0ee6b25aa69656954197dc9d1336a628f2ce | 0ade59e3e2d67d342040fffc7cb2ec2274489e53 | refs/heads/master | 2020-04-17T19:42:46.771998 | 2019-02-25T19:48:48 | 2019-02-25T19:48:48 | 166,875,574 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,819 | py | # -*- coding: utf-8 -*-
import os
import json
import tqdm
import numpy as np
import pandas as pd
import lightgbm as lgb
import multiprocessing
from .features import PEFeatureExtractor
def raw_feature_iterator(file_paths):
"""
Yield raw feature strings from the inputed file paths
"""
for path in file_paths:
with open(path, "r") as fin:
for line in fin:
yield line
def vectorize(irow, raw_features_string, X_path, y_path, nrows):
"""
Vectorize a single sample of raw features and write to a large numpy file
"""
extractor = PEFeatureExtractor()
raw_features = json.loads(raw_features_string)
feature_vector = extractor.process_raw_features(raw_features)
print(feature_vector)
y = np.memmap(y_path, dtype=np.float32, mode="r+", shape=nrows)
y[irow] = raw_features["label"]
X = np.memmap(X_path, dtype=np.float32, mode="r+", shape=(nrows, extractor.dim))
X[irow] = feature_vector
def vectorize_unpack(args):
"""
Pass through function for unpacking vectorize arguments
"""
return vectorize(*args)
def vectorize_subset(X_path, y_path, raw_feature_paths, nrows):
"""
Vectorize a subset of data and write it to disk
"""
# Create space on disk to write features to
extractor = PEFeatureExtractor()
X = np.memmap(X_path, dtype=np.float32, mode="w+", shape=(nrows, extractor.dim))
y = np.memmap(y_path, dtype=np.float32, mode="w+", shape=nrows)
del X, y
# Distribute the vectorization work
pool = multiprocessing.Pool()
argument_iterator = ((irow, raw_features_string, X_path, y_path, nrows)
for irow, raw_features_string in enumerate(raw_feature_iterator(raw_feature_paths)))
for _ in tqdm.tqdm(pool.imap_unordered(vectorize_unpack, argument_iterator), total=nrows):
pass
def create_vectorized_features(data_dir):
"""
Create feature vectors from raw features and write them to disk
"""
print("Vectorizing training set")
X_path = os.path.join(data_dir, "X_train.dat")
y_path = os.path.join(data_dir, "y_train.dat")
raw_feature_paths = [os.path.join(data_dir, "train_features_{}.jsonl".format(i)) for i in range(1)]
vectorize_subset(X_path, y_path, raw_feature_paths, 7000)
print("Vectorizing test set")
X_path = os.path.join(data_dir, "X_test.dat")
y_path = os.path.join(data_dir, "y_test.dat")
raw_feature_paths = [os.path.join(data_dir, "test_features.jsonl")]
vectorize_subset(X_path, y_path, raw_feature_paths, 1750)
def read_vectorized_features(data_dir, subset=None):
"""
Read vectorized features into memory mapped numpy arrays
"""
if subset is not None and subset not in ["train", "test"]:
return None
ndim = PEFeatureExtractor.dim
X_train = None
y_train = None
X_test = None
y_test = None
if subset is None or subset == "train":
X_train_path = os.path.join(data_dir, "X_train.dat")
y_train_path = os.path.join(data_dir, "y_train.dat")
X_train = np.memmap(X_train_path, dtype=np.float32, mode="r", shape=(7000, ndim))
y_train = np.memmap(y_train_path, dtype=np.float32, mode="r", shape=7000)
if subset == "train":
return X_train, y_train
if subset is None or subset == "test":
X_test_path = os.path.join(data_dir, "X_test.dat")
y_test_path = os.path.join(data_dir, "y_test.dat")
X_test = np.memmap(X_test_path, dtype=np.float32, mode="r", shape=(1750, ndim))
y_test = np.memmap(y_test_path, dtype=np.float32, mode="r", shape=1750)
if subset == "test":
return X_test, y_test
return X_train, y_train, X_test, y_test
def read_metadata_record(raw_features_string):
"""
Decode a raw features stringa and return the metadata fields
"""
full_metadata = json.loads(raw_features_string)
return {"sha256": full_metadata["sha256"], "appeared": full_metadata["appeared"], "label": full_metadata["label"]}
def create_metadata(data_dir):
"""
Write metadata to a csv file and return its dataframe
"""
pool = multiprocessing.Pool()
train_feature_paths = [os.path.join(data_dir, "train_features_{}.jsonl".format(i)) for i in range(6)]
train_records = list(pool.imap(read_metadata_record, raw_feature_iterator(train_feature_paths)))
train_records = [dict(record, **{"subset": "train"}) for record in train_records]
test_feature_paths = [os.path.join(data_dir, "test_features.jsonl")]
test_records = list(pool.imap(read_metadata_record, raw_feature_iterator(test_feature_paths)))
test_records = [dict(record, **{"subset": "test"}) for record in test_records]
metadf = pd.DataFrame(train_records + test_records)[["sha256", "appeared", "subset", "label"]]
metadf.to_csv(os.path.join(data_dir, "metadata.csv"))
return metadf
def read_metadata(data_dir):
"""
Read an already created metadata file and return its dataframe
"""
return pd.read_csv(os.path.join(data_dir, "metadata.csv"), index_col=0)
def train_model(data_dir):
"""
Train the LightGBM model from the EMBER dataset from the vectorized features
"""
# Read data
X_train, y_train = read_vectorized_features(data_dir, subset="train")
# Filter unlabeled data
train_rows = (y_train != -1)
# Train
lgbm_dataset = lgb.Dataset(X_train[train_rows], y_train[train_rows])
lgbm_model = lgb.train({"application": "binary"}, lgbm_dataset)
return lgbm_model
def predict_sample(lgbm_model, file_data):
"""
Predict a PE file with an LightGBM model
"""
extractor = PEFeatureExtractor()
features = np.array(extractor.feature_vector(file_data), dtype=np.float32)
return lgbm_model.predict([features])[0]
| [
"6r0k3d@gmail.com"
] | 6r0k3d@gmail.com |
5b6cd49c625a8e8bae87cd66c435ae0c843751d7 | c287ab4324bbfdd67daa1df3327c3aafb4fd75a7 | /ValcanoModel.py | 5e35c98f8e97df3df2d2ccef64bc33aaaccf23d6 | [] | no_license | AkshayAltics/Python_Study | f8c4d316e45fc23defc1817768715cba31eeada4 | c6e85788026b44c086da497de26dcd0662b28509 | refs/heads/master | 2020-04-28T15:00:23.790316 | 2019-03-15T05:50:15 | 2019-03-15T05:50:15 | 175,356,652 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,564 | py | '''
Created on 13-Mar-2019
@author: Admin
'''
import pandas
from geopy.geocoders import ArcGIS
import folium
df=pandas.read_csv("Volcanoes.txt")
lat=list(df["LAT"])
lon=list(df["LON"])
elev=list(df["ELEV"])
name=list(df["NAME"])
html = """
Volcano name:<br>
<a href="https://www.google.com/search?q=%%22%s%%22" target="_blank">%s</a><br>
Height: %s m
"""
def color_producer(elevation):
if elevation <1000:
return "green"
elif 1000 <= elevation < 3000:
return "blue"
else:
return "red"
map=folium.Map
map=folium.Map(location=[38.59, -99.82],zoom_start=6,tiles="Mapbox Bright")
map.save("Valcanos1.html")
fg=folium.FeatureGroup(name="Valcanoes")
fg.add_child(folium.Marker(location=[18.499097, 73.828352],popup="HII im Marker",icon=folium.Icon(color="green")))
for lt,ln,add,name in zip(lat,lon,elev,name):
iframe = folium.IFrame(html=html % (name,name,add), width=200, height=100)
fg.add_child(folium.CircleMarker(location=[lt,ln],radius=6, popup=folium.Popup(iframe),fill_color=color_producer(add),color="gray",fill_opacity=0.9))
fgp=folium.FeatureGroup(name="Population")
fgp.add_child(folium.GeoJson(data=open("world.json",'r',encoding='utf-8-sig').read(),style_function=lambda x:{'fillColor':'green' if x["properties"]["POP2005"]<10000000
else 'orange' if 10000000<= x["properties"]["POP2005"]<20000000 else 'red' if 20000000<= x["properties"]["POP2005"]<50000000 else 'black'}))
df=pandas.read_csv("f://CodeRepo/demo_c.txt")
print(df)
nom=ArcGIS()
df["Address"]=df["Address"]+","+df["City"]+","+df["State"]+","+df["Country"]
print(df)
'''
#df["Co-ordinates"]=df["Address"].apply(nom.geocode)
print(df["Co-ordinates"])
print(df["Co-ordinates"][0].latitude)
print(df["Co-ordinates"][0].longitude)
df["Latitude"]=df["Co-ordinates"][0].longitude
df["Latitude"]=df["Co-ordinates"].apply(lambda x:x.latitude if x!=None else None)
df["longitude"]=df["Co-ordinates"].apply(lambda x:x.longitude if x!=None else None)
print(df)'''
lat1=list([18.20,18.30,18.40,18.50])
lon1=list([73.10,73.20,73.40,73.50])
address=list(df["Address"])
html = """<h4>Volcano information:</h4>
Height: %s m
"""
fgi=folium.FeatureGroup(name="icon")
for lt,ln,add in zip(lat1,lon1,address):
iframe1 = folium.IFrame(html=html % str(ln), width=200, height=100)
fgi.add_child(folium.Marker(location=[lt,ln],popup=folium.Popup(iframe1),icon=folium.Icon(color="green")))
map.add_child(fgi)
map.add_child(fgp)
map.add_child(fg)
map.add_child(folium.LayerControl())
map.save("Valcanos1.html")
print("DONE") | [
"akshay@alticssystems.com"
] | akshay@alticssystems.com |
02462adad604ab5a51df03f9d2cf5bb78fe4fadc | 159deaaef070ee937b12412dddc9369563c2f0b4 | /Section 5/starter_code/tests/integration/models/test_item.py | 8665d35002f3bdc75cdb1082580a6c7e64c72588 | [] | no_license | kaloyansabchev/Udemy-Automated-Software-Testing-with-Python | b92067d0af92b3e86150f97c0f07ba6736c64311 | a954b09a88961b06bf3267a483df2522c4ec5447 | refs/heads/main | 2023-03-20T08:23:54.177674 | 2021-03-16T17:15:58 | 2021-03-16T17:15:58 | 344,233,733 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 298 | py | from models.item import ItemModel
from tests.base_test import BaseTest
class ItemTest(BaseTest):
def test_crud(self):
with self.app_context():
item = ItemModel('test', 19.99)
item.save_to_db()
self.assertIsNotNone(ItemModel.find_by_name('test'))
| [
"77458843+kaloyansabchev@users.noreply.github.com"
] | 77458843+kaloyansabchev@users.noreply.github.com |
28d548a1e5a496fcbd33a800f11e1a3da376399f | dba84ea46f15207025cd7f255df83516a144657b | /my_blog/migrations/0003_alter_comment_post.py | d8bc5aee945094e0995d2a225db83d749b16b469 | [] | no_license | Duvie728/CRUD-DJANGO-APPLICATION | cb151c6c04b366adee06deda957f9c655c243b8a | f47c7e676c6a2754b54335d1adebc997fadcc7c7 | refs/heads/main | 2023-05-04T11:08:02.726068 | 2021-05-21T15:44:34 | 2021-05-21T15:44:34 | 363,314,823 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 480 | py | # Generated by Django 3.2 on 2021-05-03 09:11
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('my_blog', '0002_comment'),
]
operations = [
migrations.AlterField(
model_name='comment',
name='Post',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='my_blog.post'),
),
]
| [
"80975277+Duvie728@users.noreply.github.com"
] | 80975277+Duvie728@users.noreply.github.com |
652601604241c2e2ee692a47f47ced4f2df80ffb | ae2abae80805067bd31797a6572f468cb5de9a29 | /1/25.py | 9c12d77878448b48996e2141a94c5ec2cd6d5159 | [] | no_license | nikuzuki/I111_Python_samples | 9f3f8a16cce2ad28d84fa9e0ce9c93b710f7e0c6 | 42aec52e1680e6837845bfb2ff374d44f51092be | refs/heads/master | 2020-03-19T02:40:52.515465 | 2018-06-01T05:46:29 | 2018-06-01T05:46:29 | 135,650,685 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 281 | py | # forループ - 繰り返し実行
# (2i - 1)^2のiが1~nの時の総和を計算
n = int(input()) # 標準入力で整数値nを代入
ans_sum = 0
for i in range(1, 2*n, 2): # 1から~2*nまで繰り返す、iは2ずつ増える
ans_sum = ans_sum + i * i
print(ans_sum)
| [
"nikuzuki29@gmail.com"
] | nikuzuki29@gmail.com |
1085ba45a8f735ea9ea5fa371a548f5de125ee1a | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/leap/a3b8b55879a04bce804d9c199db55772.py | c6ce2afde394c9c920e45ec48d6cd4dde93f53ae | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 416 | py | __author__ = 'Ben'
# on every year that is evenly divisible by 4
# except every year that is evenly divisible by 100
# unless the year is also evenly divisible by 400
def is_leap_year(year):
if year % 4 == 0 and year % 100 == 0 and year % 400 == 0:
return True
if year % 4 == 0 and year % 100 == 0:
return False
if year % 4 == 0:
return True
else:
return False
| [
"rrc@berkeley.edu"
] | rrc@berkeley.edu |
2e75c1475e8e6feedd4344f417482e182a58a796 | f9ba9eccf8b7706a2d5e23512fe0897c4727177a | /02/code/get_next_RANDU_int.py | 63f28b5b495f09d413e82c65439d2c7d78453142 | [] | no_license | vincentmader/FSIM-exercises | 80be1654b9667585db49f787a1d02c1a9f40ad15 | 31f4ecd7fb747dee54edad08fd7629bb87012ba6 | refs/heads/master | 2023-03-01T06:45:27.085711 | 2021-02-10T23:06:13 | 2021-02-10T23:06:13 | 317,183,816 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 250 | py | from numpy import uint64, power
def main(previous_int):
# make sure all variables are 64-bit floats
previous_int = uint64(previous_int)
a = uint64(65539)
b = uint64(2)
c = uint64(31)
return (a * previous_int) % power(b, c)
| [
"vincent.mader@icloud.com"
] | vincent.mader@icloud.com |
56469db01ea249ddea32bb268aa7dbfd67e1ee3b | f51bbc406fc8ada180cc2ed6521314cf5857cd18 | /web/apps/lms/views.py | b6b069a7c5546e0b4b7e49fcf625dcb85499a537 | [
"MIT"
] | permissive | QuantEdu/quant | f54f0c80dd3ef1cec4108ad519eb2a16dfb40a6a | 41fa00cb0b8be6c5cf67b7a334d4340163255160 | refs/heads/master | 2021-07-15T05:52:37.923473 | 2018-11-27T06:09:50 | 2018-11-27T06:09:50 | 118,225,099 | 0 | 0 | MIT | 2018-11-23T08:15:32 | 2018-01-20T08:40:27 | HTML | UTF-8 | Python | false | false | 143 | py | # Django core
from django.shortcuts import render
# Create your views here.
def index(request):
return render(request, 'lms/index.html')
| [
"harchenko.grape@gmail.com"
] | harchenko.grape@gmail.com |
9a6a545c995d5073bf0eabc225e82eafbdc90914 | 426ec0e464993949e594cf044fd206b53a7d9120 | /open_set.py | 54330d050c38c23d63ffa903bc9d19031b192d4e | [] | no_license | HubertTang/DeepMir | 05024bf4a8c535d9dd0bf77f97160dba8ebcb374 | cc2ec565fbcf1bcc2d6042ec776912d06c2aadc2 | refs/heads/master | 2020-06-12T03:55:36.682052 | 2019-07-08T07:33:11 | 2019-07-08T07:33:11 | 194,187,136 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 15,411 | py | import numpy as np
import pandas as pd
from Bio import SeqIO
import scipy.stats as stats
from keras.models import load_model
from keras import backend as K
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
plt.switch_backend('agg')
from sklearn.metrics import roc_curve, auc
import random
import seaborn as sns
import os
import sys
parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, parent_dir)
import utils
import rna_dataset
random.seed(66)
def plot_normal(data, data_label, dir_name, fig_name, line_color='coral'):
# plot and save the normal distribution of the given data
m = np.mean(data)
sig = np.std(data)
x = np.linspace(-0.5, 1.5, 200)
y = stats.norm.pdf(x, m, sig)
# plt.plot(x, y, color=line_color)
plt.plot(x, y, alpha=0.7, label=data_label)
# plt.hist(data, bins=100, density=True, stacked=True, alpha=0.7)
# plt.hist(data, bins=50, alpha=0.7)
plt.grid()
plt.title(fig_name)
plt.xlabel('Probability')
plt.ylabel('Normal Distribution')
plt.legend()
plt.savefig(f"{dir_name}/{fig_name}.png")
plt.close('all')
def plot_dense(data, data_label, dir_name, fig_name):
# plot and save the probability density of the given data
plt.figure()
sns.kdeplot(data, shade=True, label=data_label, alpha=.7)
plt.grid()
plt.title(fig_name)
plt.xlabel('Probability')
plt.ylabel('Density')
plt.legend()
plt.savefig(f"{dir_name}/{fig_name}.png")
plt.close('all')
def to_percent(y, position):
return str(100 * y) + '%'
def plot_hist(data, data_label, dir_name, fig_name):
# plot and save the probability histgram of the given data
# plt.figure()
f, (ax, ax2) = plt.subplots(2, 1, sharex=True)
# ax.hist(data, bins=100, density=True, alpha=0.7, label=data_label)
# ax2.hist(data, bins=100, density=True, alpha=0.7)
# formatter = FuncFormatter(to_percent)
ax.hist(data, bins=100, weights= [1./ len(data)] * len(data), alpha=0.7, label=data_label)
ax2.hist(data, bins=100, weights= [1./ len(data)] * len(data), alpha=0.7)
# ax.gca().yaxis.set_major_formatter(formatter)
# ax2.gca().yaxis.set_major_formatter(formatter)
ax.set_ylim(0.8, 1.0)
ax2.set_ylim(0, 0.1)
ax.spines['bottom'].set_visible(False)
ax2.spines['top'].set_visible(False)
ax.xaxis.tick_top()
ax.tick_params(labeltop=False)
ax2.xaxis.tick_bottom()
d = .015 # how big to make the diagonal lines in axes coordinates
# arguments to pass to plot, just so we don't keep repeating them
kwargs = dict(transform=ax.transAxes, color='k', clip_on=False)
ax.plot((-d, +d), (-d, +d), **kwargs) # top-left diagonal
ax.plot((1 - d, 1 + d), (-d, +d), **kwargs) # top-right diagonal
kwargs.update(transform=ax2.transAxes) # switch to the bottom axes
ax2.plot((-d, +d), (1 - d, 1 + d), **kwargs) # bottom-left diagonal
ax2.plot((1 - d, 1 + d), (1 - d, 1 + d), **kwargs) # bottom-right diagonal
# sns.distplot(data, hist=True, bins=100, label=data_label)
ax.grid()
ax2.grid()
# ax.title(fig_name)
plt.xlabel('Probability')
plt.ylabel('Percentage')
ax.legend()
plt.savefig(f"{dir_name}/{fig_name}.png")
plt.close('all')
def softmax2npy(model_name, test_file, num_classes):
model = load_model(model_name)
model.summary()
# load test dataset
test_file_path = f"data/{test_file}"
num_tests = utils.count_lines(test_file_path)
test_generator = rna_dataset.RNA_onehot(test_file_path, num_tests, batch_size=256, dim=(200, 4),
num_channels=1, num_classes=num_classes, shuffle=False)
# prediction
print('Start predicting ... ...')
prediction = model.predict_generator(test_generator, workers=6,
use_multiprocessing=True, verbose=1)
# prediction = model.predict_generator(test_generator, verbose=1)
np.save(f"{model_name.split('.').pop(0)}/{test_file.split('.').pop(0)}.npy", prediction)
# release the memory occupied by GPU
K.clear_session()
def out_plot(dir_name, array_file):
arr = np.load(f"{dir_name}/{array_file}.npy")
arr_1d = [np.max(r) for r in arr]
# plot_normal(arr_1d, array_file, dir_name, 'test')
print(np.max(arr_1d))
# plot_dense(arr_1d, array_file, dir_name, 'test_dense')
plot_hist(arr_1d, array_file, dir_name, 'test_broken_hist')
def draw_roc(model_name, posi_data, neg_data):
# initialization
num_seq = 1700
# generate the label and score
posi_arr = np.load(f"{model_name}/{posi_data}.npy")
random.shuffle(posi_arr)
posi_1d = [np.max(r) for r in posi_arr][:num_seq]
neg_arr = np.load(f"{model_name}/{neg_data}.npy")
random.shuffle(neg_arr)
neg_1d = [np.max(r) for r in neg_arr][:num_seq]
print(len(posi_1d), posi_1d[:10], len(neg_1d), neg_1d[:10])
y_label = np.append(np.ones((num_seq,)), np.zeros((num_seq,)))
posi_1d.extend(neg_1d)
y_score = np.array(posi_1d)
# print(y_label.shape, y_label[149998:150002], y_score.shape, y_score[149998:150002])
# calculate the result of roc
fpr, tpr, threshold = roc_curve(y_label, y_score)
roc_auc = auc(fpr, tpr)
# find the optimal cut off based on the Youden index
# Youden Index = sensitiveity + specificity - 1
# tpr = sensitivity
# fpr = 1 - specificity
youden_index = tpr - fpr
optimal_thres = threshold[youden_index.argmax()]
# plot the roc
plt.figure()
lw = 2
plt.plot(fpr, tpr, color='darkorange',
lw=lw, label=f'ROC curve (area = {roc_auc:.5f})\n(cutoff = {optimal_thres:.5f})')
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title(model_name)
plt.legend(loc="lower right")
plt.savefig(f"{model_name}/ROC.png")
plt.close('all')
def paper_plot(dir_name):
# plot the miRNA with nagetive inputs
# draw the precentage histgram
mirna_arr = np.load(f"{dir_name}/mi_balance_60_50_test.npy")
mirna_arr_1d = [np.max(r) for r in mirna_arr]
other_arr = np.load(f"{dir_name}/neg_other.npy")
other_arr_1d = [np.max(r) for r in other_arr]
f, (ax, ax2) = plt.subplots(2, 1, sharex=True)
# ax.hist(data, bins=100, density=True, alpha=0.7, label=data_label)
# ax2.hist(data, bins=100, density=True, alpha=0.7)
# formatter = FuncFormatter(to_percent)
ax.hist(mirna_arr_1d, color='C1', bins=100, weights= [1./ len(mirna_arr_1d)] * len(mirna_arr_1d), alpha=0.6, label='miRNA')
ax2.hist(mirna_arr_1d, color='C1', bins=100, weights= [1./ len(mirna_arr_1d)] * len(mirna_arr_1d), alpha=0.6)
ax.hist(other_arr_1d, color='C2', bins=100, weights= [1./ len(other_arr_1d)] * len(other_arr_1d), alpha=0.6, label='negative')
ax2.hist(other_arr_1d, color='C2', bins=100, weights= [1./ len(other_arr_1d)] * len(other_arr_1d), alpha=0.6)
# ax.gca().yaxis.set_major_formatter(formatter)
# ax2.gca().yaxis.set_major_formatter(formatter)
# ax.set_ylim(0.8, 1.0)
# ax2.set_ylim(0, 0.045)
ax.set_ylim(0.6, 0.9)
ax2.set_ylim(0, 0.06)
ax.spines['bottom'].set_visible(False)
ax2.spines['top'].set_visible(False)
ax.xaxis.tick_top()
ax.tick_params(labeltop=False)
ax2.xaxis.tick_bottom()
d = .015 # how big to make the diagonal lines in axes coordinates
# arguments to pass to plot, just so we don't keep repeating them
kwargs = dict(transform=ax.transAxes, color='k', clip_on=False)
ax.plot((-d, +d), (-d, +d), **kwargs) # top-left diagonal
ax.plot((1 - d, 1 + d), (-d, +d), **kwargs) # top-right diagonal
kwargs.update(transform=ax2.transAxes) # switch to the bottom axes
ax2.plot((-d, +d), (1 - d, 1 + d), **kwargs) # bottom-left diagonal
ax2.plot((1 - d, 1 + d), (1 - d, 1 + d), **kwargs) # bottom-right diagonal
# sns.distplot(data, hist=True, bins=100, label=data_label)
ax.grid()
ax2.grid()
# ax.title(fig_name)
plt.xlabel('Probability')
plt.ylabel('Percentage')
ax.legend(loc="upper left")
plt.savefig(f"{dir_name}/test_paper_hist.png")
plt.close('all')
# draw the roc curve
num_seq = 1700
# generate the label and score
posi_arr = np.load(f"{dir_name}/mi_balance_60_50_test.npy")
random.shuffle(posi_arr)
# posi_1d = [np.max(r) for r in posi_arr][:num_seq]
posi_1d = [np.max(r) for r in posi_arr]
neg_arr = np.load(f"{dir_name}/neg_other.npy")
random.shuffle(neg_arr)
# neg_1d = [np.max(r) for r in neg_arr][:num_seq]
neg_1d = [np.max(r) for r in neg_arr]
print(len(posi_1d), posi_1d[:10], len(neg_1d), neg_1d[:10])
# y_label = np.append(np.ones((num_seq,)), np.zeros((num_seq,)))
y_label = np.append(np.ones((len(posi_1d),)), np.zeros((len(neg_1d),)))
posi_1d.extend(neg_1d)
y_score = np.array(posi_1d)
# print(y_label.shape, y_label[149998:150002], y_score.shape, y_score[149998:150002])
# calculate the result of roc
fpr, tpr, threshold = roc_curve(y_label, y_score)
roc_auc = auc(fpr, tpr)
# find the optimal cut off based on the Youden index
# Youden Index = sensitiveity + specificity - 1
# tpr = sensitivity
# fpr = 1 - specificity
youden_index = tpr - fpr
optimal_thres = threshold[youden_index.argmax()]
optimal_index = 20500
print(fpr[optimal_index], threshold[optimal_index])
print("highest:", optimal_thres)
# plot the roc
plt.figure()
lw = 2
plt.plot(fpr, tpr, color='darkorange',
lw=lw, label=f'ROC curve (area = {roc_auc:.5f})')
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--', label='Random ROC curve')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
# plt.title(model_name)
plt.legend(loc="lower right")
plt.savefig(f"{dir_name}/test_paper_ROC_2.png")
plt.close('all')
def paper_plot_hist_line(dir_name):
# plot the miRNA with nagetive inputs
# draw the precentage histgram
mirna_arr = np.load(f"{dir_name}/mi_balance_60_50_test.npy")
mirna_arr_1d = [np.max(r) for r in mirna_arr]
other_arr = np.load(f"{dir_name}/neg_other.npy")
other_arr_1d = [np.max(r) for r in other_arr]
bins = [0.01 * i for i in range(101)]
f, (ax, ax2) = plt.subplots(2, 1, sharex=True)
m, edges = np.histogram(mirna_arr_1d, bins)
m = 100 * m / len(mirna_arr_1d)
centers = 0.5 * (edges[1:] + edges[:-1])
o, _ = np.histogram(other_arr_1d, bins)
o = 100 * o / len(other_arr_1d)
ax.plot(centers, m, color='C1', label='miRNA')
ax2.plot(centers, m, color='C1')
ax.plot(centers, o, color='blue', label='negative', linestyle="--")
ax2.plot(centers, o, color='blue', linestyle="--")
ax.set_ylim(60, 90)
ax2.set_ylim(0, 6)
ax.spines['bottom'].set_visible(False)
ax2.spines['top'].set_visible(False)
ax.xaxis.tick_top()
ax.tick_params(labeltop=False)
ax2.xaxis.tick_bottom()
d = .015 # how big to make the diagonal lines in axes coordinates
# arguments to pass to plot, just so we don't keep repeating them
kwargs = dict(transform=ax.transAxes, color='k', clip_on=False)
ax.plot((-d, +d), (-d, +d), **kwargs) # top-left diagonal
ax.plot((1 - d, 1 + d), (-d, +d), **kwargs) # top-right diagonal
kwargs.update(transform=ax2.transAxes) # switch to the bottom axes
ax2.plot((-d, +d), (1 - d, 1 + d), **kwargs) # bottom-left diagonal
ax2.plot((1 - d, 1 + d), (1 - d, 1 + d), **kwargs) # bottom-right diagonal
# sns.distplot(data, hist=True, bins=100, label=data_label)
ax.grid()
ax2.grid()
# ax.title(fig_name)
plt.xlabel('Probability')
plt.ylabel('Percentage')
ax.legend(loc="upper left")
plt.savefig(f"{dir_name}/test_paper_hist_line.png")
plt.close('all')
def paper_plot_others(dir_name):
# plot the miRNA with nagetive inputs
mirna = np.load(f"{dir_name}/neg_mirna_60.npy")
mirna_1d = [np.max(r) for r in mirna]
trna = np.load(f"{dir_name}/neg_trna.npy")
trna_1d = [np.max(r) for r in trna]
cdbox = np.load(f"{dir_name}/neg_cdbox.npy")
cdbox_1d = [np.max(r) for r in cdbox]
plt.hist(mirna_1d, color='C1', bins=100, weights= [1./ len(mirna_1d)] * len(mirna_1d), alpha=0.6, label='untrained miRNA')
plt.hist(trna_1d, color='C2', bins=100, weights= [1./ len(trna_1d)] * len(trna_1d), alpha=0.6, label='tRNA')
plt.hist(cdbox_1d, color='C3', bins=100, weights= [1./ len(cdbox_1d)] * len(cdbox_1d), alpha=0.6, label='CDBOX')
plt.grid()
# ax.title(fig_name)
plt.xlabel('Probability')
plt.ylabel('Percentage')
# ax.legend(loc="upper left")
plt.legend()
plt.savefig(f"{dir_name}/test_paper_hist_others.png")
plt.close('all')
def paper_plot_line_others(dir_name):
# plot the miRNA with nagetive inputs
mirna = np.load(f"{dir_name}/neg_mirna_60.npy")
mirna_1d = [np.max(r) for r in mirna]
trna = np.load(f"{dir_name}/neg_trna.npy")
trna_1d = [np.max(r) for r in trna]
cdbox = np.load(f"{dir_name}/neg_cdbox.npy")
cdbox_1d = [np.max(r) for r in cdbox]
bins = [0.01 * i for i in range(101)]
m, edges = np.histogram(mirna_1d, bins)
m = 100 * m / len(mirna_1d)
centers = 0.5 * (edges[1:] + edges[:-1])
t, _ = np.histogram(trna_1d, bins)
t = 100 * t / len(trna_1d)
c, _ = np.histogram(cdbox_1d, bins)
c = 100 * c / len(cdbox_1d)
plt.plot(centers, m, color='C1', label='Untrained miRNA')
plt.plot(centers, t, color='C2', label='tRNA', linestyle="--")
plt.plot(centers, c, color='C3', label='CDBOX', linestyle=":")
plt.grid()
# ax.title(fig_name)
plt.xlabel('Probability')
plt.ylabel('Percentage')
# ax.legend(loc="upper left")
plt.legend()
plt.savefig(f"{dir_name}/test_paper_line_others.png")
plt.close('all')
if __name__ == '__main__':
model_n = 'mi_balance_60_50'
# test_file_ba_list = ['mi_balance_60_50_test', 'neg_mirna_60', 'neg_trna', 'neg_cdbox', 'neg_other']
# test_file_ba_list = ['mi_imbalance_60_50_test', 'neg_mirna_60', 'neg_trna', 'neg_cdbox', 'neg_other']
test_file_name = 'mi_balance_60_50_test'
test_file_ba_list = ['mi_balance_60_50_test', 'neg_other']
try:
os.mkdir(model_n)
except FileExistsError:
pass
# # generate prediction result from a list of dataset
# for test_file_name in test_file_ba_list:
# softmax2npy(model_name=f'{model_n}.h5',
# test_file=f'{test_file_name}.csv',
# num_classes=165)
# # generate prediction result from one specific dataset file
# softmax2npy(model_name=f'{model_n}.h5',
# test_file=f'{test_file_name}.csv',
# num_classes=165)
# # out_plot(test_file_name)
# for plot_arr in test_file_ba_list:
# out_plot(model_n, plot_arr)
# draw_roc(model_n, 'mi_balance_60_50_test', 'neg_other')
# temp test
# out_plot(model_n, test_file_name)
paper_plot(model_n)
# paper_plot_others(model_n)
# paper_plot_line_others(model_n)
# paper_plot_hist_line(model_n)
| [
"yukpok.tong@gmail.com"
] | yukpok.tong@gmail.com |
b2f49412d10ffc208d7f6b5a2f250dbb861a886e | 9913fb6756240ede8b65ac2f59f8693570ac401e | /database/admin.py | e1b433ea6868382eef94103df3e704cee61a3cdc | [] | no_license | R-Prince/scrumb_epos | 2e8ededa86cdf6cd5e26a58dea0b827678cc7de0 | c68dfa479e739ee67fbe9df851cdbba0472509c7 | refs/heads/master | 2023-04-19T13:13:02.702763 | 2021-05-16T21:19:15 | 2021-05-16T21:19:15 | 367,925,798 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 368 | py | from django.contrib import admin
from .models import Sku, SkuData
# Sku data Admin
class SkuDataAdminInline(admin.TabularInline):
model = SkuData
readonly_fields = ('sales_total',)
# Sku Admin
class SkuAdmin(admin.ModelAdmin):
inlines = (SkuDataAdminInline,)
fields = ('sku', 'total_units', 'sku_description',)
admin.site.register(Sku, SkuAdmin)
| [
"reneprince93@icloud.com"
] | reneprince93@icloud.com |
bcd6703adf6b6efc47c805564e9a629a40f3d9f8 | 76755cc00a3163db22f2daf4c1be59b52b9ebea5 | /Experiments/BBB/thermometer.py | 371b0c01d316d4cfc5fbc808f3a66df534f2ae49 | [] | no_license | romanomatthew23/lavaLamp | f1722b16c3c2a575e769d2fa3994e0e57348c0ee | 02716ba8c47f3e2ba65df2fe104da47d7bcb28ab | refs/heads/master | 2021-01-11T02:10:33.610198 | 2016-11-29T02:42:23 | 2016-11-29T02:42:23 | 70,098,623 | 0 | 0 | null | 2016-11-11T20:53:01 | 2016-10-05T20:54:21 | Matlab | UTF-8 | Python | false | false | 512 | py | import Adafruit_BBIO.ADC as ADC
from time import sleep
ADC.setup()
delay = 1
while(True):
value = ADC.read("P9_40")
value = ADC.read("P9_40") #the library has a bug that requires sampling twice
voltage = value * 1.8 #1.8V
tempC = (voltage - 0.5) * 100 #convert from 10mv/deg with 500mV offset
tempF = (tempC * 9.0 / 5.0) + 32.0
#print "value = %f" % value
print "voltage = %f" % voltage
print "temp (deg C) = %f" % tempC
print "temp (deg F) = %f" % tempF
sleep(delay)
| [
"noreply@github.com"
] | romanomatthew23.noreply@github.com |
2c49c987c699a45efe461600eacfd0cc0c231961 | 6a4d58481d4c9709467dc799546807ef2c1720e6 | /Row data/Рейтинг регионов по численности среднего класса/to_csv.py | 5d7e73fdbe705387aca40ccad1a0a57ba7bd53a3 | [] | no_license | lnetw/Hackathon_project | 611aa6238d60b5ffd5fcf43c2f3deea027ce555d | fe0622c033f4557aebf060488e96937a814f40bb | refs/heads/master | 2023-04-22T04:50:36.307013 | 2020-12-14T12:09:15 | 2020-12-14T12:09:15 | 291,506,714 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,155 | py | import pandas as pd
table = pd.DataFrame(data=None, index=None, columns=['Место',
'Регион',
'Доля семей, относящихся к среднему классу, %',
], dtype=None, copy=False)
count_string = 0
dict_append = {}
with open('output.txt', encoding='UTF-8') as file:
for line in file:
if count_string == 0:
dict_append['Место'] = int(str(line)[:-1])
elif count_string == 1:
dict_append['Регион'] = str(line)[:-1]
elif count_string == 2:
dict_append['Доля семей, относящихся к среднему классу, %'] = float(str(line)[:-1].replace(',', '.'))
count_string += 1
if count_string >= 3:
table = table.append(dict_append, ignore_index=True)
count_string = 0
dict_append.clear()
table.to_csv('table_output.csv', index=False, header=True)
table.to_excel('table_excel.xlsx', index=False, header=True)
print(table)
| [
"max7ermak@gmail.com"
] | max7ermak@gmail.com |
8ebfda79d97103cc841ba79f1f71822028d38762 | ef525d588a520c9ddb0e22a749a975a0a71d92cd | /loss/soft_t_test_loss.py | 5f76ec93e46b9f95c7e40a3720a56861aecfccce | [] | no_license | 502463708/Microcalcification_Detection | a57cdc80430435e3fb810ff6d1f9b3c30174a0ee | 0f59ab8c614141df3bcea42704b2ee940c43038d | refs/heads/master | 2022-03-27T19:01:22.283957 | 2020-01-07T11:55:07 | 2020-01-07T11:55:07 | 206,355,880 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,959 | py | """
This file implements another version of t-test loss which is derived from the original version of implemented in
t_test_loss_v3.py. This version implements a novel method to pick up soft positive pixels
"""
import numpy as np
import torch
import torch.nn as nn
class SoftTTestLoss(nn.Module):
def __init__(self, beta=0.8, lambda_p=1, lambda_n=0.1, sp_ratio=0.01):
super(SoftTTestLoss, self).__init__()
self.beta = beta
self.lambda_p = lambda_p
self.lambda_n = lambda_n
self.sp_ratio = sp_ratio
return
def forward(self, residues, pixel_level_labels, logger=None):
assert torch.is_tensor(residues)
assert torch.is_tensor(pixel_level_labels)
assert len(residues.shape) == 4 # shape: B, C, H, W
# add the missing dimension of channel in pixel_level_labels
if len(pixel_level_labels.shape) == 3:
pixel_level_labels = pixel_level_labels.unsqueeze(dim=1)
assert residues.shape == pixel_level_labels.shape
# pixel_level_labels must be a tensor on gpu devices
if pixel_level_labels.device.type != 'cuda':
pixel_level_labels = pixel_level_labels.cuda()
# reshaped into vectors
residues = residues.view(-1)
pixel_level_labels = pixel_level_labels.view(-1)
# generate soft positive pixel indexes
residues_np = residues.cpu().detach().numpy()
K = int(self.sp_ratio * len(residues_np))
residue_threshold = residues_np[np.argpartition(residues_np, -K)[-K:]].min()
soft_positive_pixel_idx = (residues >= residue_threshold).bool()
# bool variable for the following torch.masked_select() operation
true_positive_pixel_idx = pixel_level_labels.bool()
positive_pixel_idx = true_positive_pixel_idx | soft_positive_pixel_idx
negative_pixel_idx = ~positive_pixel_idx
# split residues into positive and negative one
positive_residue_pixels = torch.masked_select(residues, positive_pixel_idx)
negative_residue_pixels = torch.masked_select(residues, negative_pixel_idx)
loss = torch.FloatTensor([0]).cuda()
if positive_residue_pixels.shape[0] > 0:
mean_residue_pixels_positive = positive_residue_pixels.mean()
loss += torch.max(self.beta - mean_residue_pixels_positive, torch.FloatTensor([0]).cuda())
# calculate variance only when the number of the positive pixels > 1
if positive_residue_pixels.shape[0] > 1:
var_residue_pixels_positive = positive_residue_pixels.var()
loss += self.lambda_n * var_residue_pixels_positive
if negative_residue_pixels.shape[0] > 0:
mean_residue_pixels_negative = negative_residue_pixels.mean()
loss += mean_residue_pixels_negative
# calculate variance only when the number of the negative pixels > 1
if negative_residue_pixels.shape[0] > 1:
var_residue_pixels_negative = negative_residue_pixels.var()
loss += self.lambda_p * var_residue_pixels_negative
log_message = 'num_p: {}, num_n: {}, m_r_p: {:.4f}, m_r_n: {:.4f}, v_r_p: {:.4f}, v_r_n: {:.4f}, loss: {:.4f}'.format(
positive_residue_pixels.shape[0],
negative_residue_pixels.shape[0],
mean_residue_pixels_positive.item() if positive_residue_pixels.shape[0] > 0 else -1,
mean_residue_pixels_negative.item() if negative_residue_pixels.shape[0] > 0 else -1,
var_residue_pixels_positive.item() if positive_residue_pixels.shape[0] > 1 else -1,
var_residue_pixels_negative.item() if negative_residue_pixels.shape[0] > 1 else -1,
loss.item())
if logger is not None:
logger.write_and_print(log_message)
else:
print(log_message)
return loss
def get_name(self):
return 'SoftTTestLoss'
| [
"502463708@qq.com"
] | 502463708@qq.com |
070e02d77bd4b1bfbabfa3a0b3c1e86dde6faa0f | 6e857a4ba164cdbd63a11f1e3a7085c9fae2e5d9 | /pdf_server_tests.py | 74d1e91c8b85eca2824e01620b0f6a7390680e49 | [] | no_license | CT-Data-Collaborative/reports | d95fb7c031c96ec30b9a103f6531f1fea5331004 | 834b82f7c17b6b4ff963e07f8a41dd87f2d43689 | refs/heads/master | 2021-01-17T05:12:09.900976 | 2020-01-14T15:37:43 | 2020-01-14T15:37:43 | 39,834,756 | 2 | 0 | null | 2020-03-13T20:39:23 | 2015-07-28T13:14:42 | JavaScript | UTF-8 | Python | false | false | 3,191 | py | ## Imports
# the flask application we're testing
import pdf_server
# unittest package
import unittest
# os file and pathing utilities
from os import path, mkdir, unlink, write, close, path as path
# temp file creation
from tempfile import mkstemp
# file comparing tool
import filecmp
##
class TownProfileTest(unittest.TestCase):
def setUp(self):
# test client setup
# application setup
pdf_server.app.config["TESTING"] = True
self.app = pdf_server.app.test_client()
self.tempFiles = []
# if temp directory doesn't exist, create one.
if not path.isdir("temp"):
mkdir("temp")
def tearDown(self):
# Remove temp files from tests
for tempFile in self.tempFiles:
unlink(tempFile)
def test_town_profile(self):
# Get file path to testing standard
standardFile = path.abspath("static/tests/Hartford.pdf")
# Get Town profile mock request from file.
townProfileMock = open("static/tests/Hartford.json")
townProfileMock = townProfileMock.read()
# Get response to simulated request
res = self.app.post('/download', data = {"data" : townProfileMock})
# Create temporary file, get handle and path
tempHandle, tempFile = mkstemp(dir="temp")
# store path in list of temp paths to remove later.
self.tempFiles.append(tempFile)
# Write request response to temp file
write(tempHandle, res.data)
# Close temp file handle
close(tempHandle)
# Assert tempfile is the same as current testing standard
assert filecmp.cmp(tempFile, standardFile), "Town Profile does not match Standard!"
class ConnectTest(unittest.TestCase):
def setUp(self):
# test client setup
# application setup
pdf_server.app.config["TESTING"] = True
self.app = pdf_server.app.test_client()
self.tempFiles = []
# if temp directory doesn't exist, create one.
if not path.isdir("temp"):
mkdir("temp")
def tearDown(self):
# Remove temp files from tests
for tempFile in self.tempFiles:
unlink(tempFile)
def test_connect(self):
# Get file path to testing standard
standardFile = path.abspath("static/tests/statewide_connect_report.pdf")
# Get Town profile mock request from file.
connectMock = open("static/tests/statewide_connect_report.json")
connectMock = connectMock.read()
# Get response to simulated request
res = self.app.post('/download', data = {"data" : connectMock})
# Create temporary file, get handle and path
tempHandle, tempFile = mkstemp(dir="temp")
# store path in list of temp paths to remove later.
self.tempFiles.append(tempFile)
# Write request response to temp file
write(tempHandle, res.data)
# Close temp file handle
close(tempHandle)
# Assert tempfile is the same as current testing standard
assert filecmp.cmp(tempFile, standardFile), "CONNECT Report does not match Standard!"
if __name__ == "__main__":
unittest.main() | [
"brendan.james.swiniarski@gmail.com"
] | brendan.james.swiniarski@gmail.com |
288fe026c2871de7b88efa1d019ca2cc8cd537b6 | a67e55cee1a3d610939efe5794320d64784436b0 | /Sum of Primes/main.py | 8aca8aacbce132c070f9dfddb0d4f15b61db4ed8 | [] | no_license | AhmedFat7y/codeeval-challenges | ad90163d4a147532a60ae3d8b74c73c1b652228e | e1c49e1a067f7c623fda87d8fd9f0f86ff416b90 | refs/heads/master | 2020-02-26T17:05:01.242591 | 2016-10-02T15:14:10 | 2016-10-02T15:14:10 | 69,395,851 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 64 | py | result = 1 + 2 + 3
last_prime = 3
# for i in range(5, 2, 1000):
| [
"ahmed.abdel-wahab@robustastudio.com"
] | ahmed.abdel-wahab@robustastudio.com |
8f42d63d09594a9151bdfc9600cf61be6e19b38d | 11490d66dc07a405862a9bc44a762f70a3764d41 | /Checkbook.py | 2d154945291199a66ae6aec1cfac7ab746a9a40f | [] | no_license | amkirby/Checkbook | 46efb6c5d78145842e9f0915850f486f17581102 | aed67af1d63a08a6f08d56768a08012ac928b4f3 | refs/heads/master | 2023-09-01T09:18:16.483629 | 2023-05-10T15:43:52 | 2023-05-10T15:43:52 | 43,181,688 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 15,326 | py | import locale
from typing import Any, Callable, List, Optional
import CheckbookTransaction as CBT
import ConfigurationProcessor as Conf
from DateProcessor import DateProcessor
conf = Conf.ConfigurationProcessor()
ROW_SEP = '\n' + (conf.get_property("HLINE_CHAR") * (sum(conf.get_property("SIZE_LIST")) + len(conf.get_property("SIZE_LIST")))) + '\n'
class Checkbook:
"""A class that represents a checkbook
Attributes:
check_register (list) : contains instances of CheckbookTransaction
"""
def __init__(self):
"""Initializes an empty check register"""
self.check_register: List[CBT.CheckbookTransaction] = []
self.file_name: str = conf.get_property("FILE_NAME")
self.edited: bool = False
def create_based_on_list(self, cbt_list: List[CBT.CheckbookTransaction]) -> None:
self.check_register = cbt_list
def add(self, cbt_list: List[CBT.CheckbookTransaction]) -> None:
"""Adds the specified list to the checkbook
Args:
cbt_list (list) : contains values in the order of the CBT.KEYS that
are used to create a transaction
"""
cbt = CBT.CheckbookTransaction()
for i in range(len(cbt_list)):
cbt.set_value(CBT.KEYS[i], cbt_list[i])
self.check_register.append(cbt)
self.edited = True
def add_single_trans(self, cbt: CBT.CheckbookTransaction) -> None:
"""Adds a CheckbookTransaction to the register
Args:
cbt (CheckbookTransaction) : the CBT to be added to the checkbook
"""
self.check_register.append(cbt)
self.edited = True
def load(self, file_name: str, load_function: Callable[[str], List[CBT.CheckbookTransaction]]) -> None:
"""Tries to load the specified file name into the check register
Args:
file_name (string) : the file to load into the checkbook
load_function (function): function used to save the checkbook
"""
self.file_name = file_name
list_of_paths = conf.get_property("PATH_FOR_REGISTERS").split(";")
list_of_paths.append("./") # always check current directory
full_path = ""
for path in list_of_paths:
full_path = path + file_name
self.check_register = load_function(full_path)
if(len(self.check_register) > 0):
self.file_name = full_path
break
if(len(self.check_register) == 0):
# file doesn't exist so it will be created
self.file_name = file_name
self.check_register = load_function(full_path)
def clear(self) -> None:
"""Clears the checkbook"""
del self.check_register[:]
CBT.CheckbookTransaction.reset_uid()
def save(self, save_function: Callable[[str, List[CBT.CheckbookTransaction]], None]) -> None:
"""Saves the checkbook in XML format
Args:
save_function (function): function used to save the checkbook
"""
save_function(self.file_name, self.check_register)
self.edited = False
def is_edited(self) -> bool:
"""Returns if the checkbook has been edited
Returns:
boolean: True if checkbook has been edited, False otherwise
"""
return self.edited
def set_edited(self, edit: bool) -> None:
"""Sets the edited status to the specified value
Args:
edit (boolean) : the state to set if the checkbook is edited
"""
self.edited = edit
def get_transaction_type(self, trans_type: str) -> List[CBT.CheckbookTransaction]:
"""Gets all transactions with the specified trans type
Args:
trans_type (string) : the transaction type to gather
Returns:
list: a list of transactions with the specified trans type
"""
return_list: List[CBT.CheckbookTransaction] = []
for elem in self.check_register:
if elem.get_dictionary().get("Trans") == trans_type:
return_list.append(elem)
return return_list
def get_category(self, cat: str) -> List[CBT.CheckbookTransaction]:
"""Gets all transactions with the specified category
Args:
cat (string) : the category to gather
Returns:
list: a list of transactions with the specified category
"""
return_list: List[CBT.CheckbookTransaction] = []
cat_list = [x.strip() for x in cat.split(",")]
for category in cat_list:
for elem in self.check_register:
if str(elem.get_dictionary().get("Category")).lower() == category.lower():
return_list.append(elem)
return return_list
def get_month(self, date_processor: DateProcessor) -> List[CBT.CheckbookTransaction]:
"""Gets all transactions with the specified month
Args:
date_processor (DateProcessor): The date range
Returns:
list: a list of transactions with the specified month
"""
return_list: List[CBT.CheckbookTransaction] = []
for elem in self.check_register:
date: Any = elem.get_dictionary().get("Date")
if (date_processor.date_within_range(date)):
return_list.append(elem)
return return_list
def _process_date_range(self, month_str: str):
month_start = 1
month_end = 12
year_start = 1998
year_end = 9999
vals = month_str.split() # separate month and year by spaces
if(len(vals) == 1):
# could be month (range) or year (range)
ranges = vals[0].split("-")
ranges = [int(i) for i in ranges]
if(ranges[0] >= 1 and ranges[0] <= 12):
#month value
month_start, month_end = self._get_start_end_values(ranges)
else:
# assumed year value
year_start, year_end = self._get_start_end_values(ranges)
elif(len(vals) == 2):
# both month (range) and year (range)
month_ranges = vals[0].split("-")
month_ranges = [int(i) for i in month_ranges]
year_ranges = vals[1].split("-")
year_ranges = [int(i) for i in year_ranges]
month_start, month_end = self._get_start_end_values(month_ranges)
year_start, year_end = self._get_start_end_values(year_ranges)
return month_start, month_end, year_start, year_end
def _get_start_end_values(self, ranges : List[int]):
start = -1
end = -1
if(len(ranges) == 1):
start = end = ranges[0]
else:
start = ranges[0]
end = ranges[1]
return start, end
def _validate_date_ranges(self, month_start: int, month_end: int, year_start: int, year_end: int) -> bool:
is_valid = True
month_valid = False
year_valid = False
if(month_start <= month_end and 1 <= month_start <= 12 and 1 <= month_end <= 12):
month_valid = True
if(year_start <= year_end and 1998 <= year_start <= 9999 and 1998 <= year_end <= 9999):
year_valid = True
is_valid = month_valid and year_valid
return is_valid
def get_description(self, search_term: str) -> List[CBT.CheckbookTransaction]:
return_list: List[CBT.CheckbookTransaction] = []
if(type(search_term) is not str):
search_term = str(search_term)
for cbt in self.check_register:
transaction_desc = str(cbt.get_value("Desc"))
if(search_term.lower() in transaction_desc.lower()):
return_list.append(cbt)
return return_list
def get_total_for_trans(self, trans: str) -> float:
"""Get the total amount for the specified trans type
Args:
trans (string) : the transaction type that is totaled
Returns:
float: Total amount for the specified trans type
"""
trans_list = self.get_transaction_type(trans)
total = 0.0
for elem in trans_list:
total += elem.get_amount()
return total
def get_total_for_trans_month(self, trans: str, date_processor: DateProcessor) -> float:
"""Get the total for the specified transaction in the specified month
Args:
trans (string) : the transaction type to total
date_processor (DateProcessor): The date range
Returns:
float: Total amount for the specified trans type for the specified month
"""
month_list = self.get_month(date_processor)
total = 0.0
for elem in month_list:
if elem.get_value("Trans") == trans:
total += elem.get_amount()
return total
def get_total_for_cat(self, category: str) -> float:
"""Get the total for the specified category
Args:
category (string) : The category to total
Returns:
float: Total amount for the specified category
"""
cat_list = self.get_category(category)
total = 0.0
for elem in cat_list:
total += elem.get_amount()
return total
def get_total(self) -> float:
"""Gets the total for the register
Returns:
float: Total amount for the checkbook
"""
total = 0.0
for elem in self.check_register:
total += elem.get_amount()
return total
def find_transaction(self, in_trans: int) -> CBT.CheckbookTransaction:
"""Gets the specified transaction number from the register
Args:
in_trans (int) : the transaction to gather
Returns:
CheckbookTransaction: The specified transaction
"""
transaction: Any = None #CBT.CheckbookTransaction()
for currentTrans in self.check_register:
if int(currentTrans.get_value("Num")) == in_trans:
transaction = currentTrans
return transaction
def find_transactions(self, in_trans: List[str]) -> List[CBT.CheckbookTransaction]:
"""Gets the specified transaction number from the register
Args:
in_trans (int) : the transaction to gather
Returns:
CheckbookTransaction: The specified transaction
"""
transactions: List[CBT.CheckbookTransaction] = [] #CBT.CheckbookTransaction()
for num in in_trans:
for currentTrans in self.check_register:
if int(currentTrans.get_value("Num")) == int(num):
transactions.append(currentTrans)
return transactions
def get_file_name(self) -> str:
return self.file_name
def _gen_total_line_print(self) -> str:
"""creates the total line at the bottom of the register
Returns:
str: The total line for the checkbook
"""
string = conf.get_property("VLINE_CHAR")
# format total: text
format_string = '{:>' + str(sum(conf.get_property("SIZE_LIST")[:-2]) + 4) + '}'
string += format_string.format("Total : ")
# format amount
format_string = '{:^' + str((conf.get_property("SIZE_LIST")[-2])) + '}'
string += format_string.format(locale.currency(self.get_total(), grouping=conf.get_property("THOUSAND_SEP")))
# format final bar
format_string = '{:>' + str((conf.get_property("SIZE_LIST")[-1]) + 2) + '}'
string += format_string.format(conf.get_property("VLINE_CHAR"))
return string
def _gen_header_print(self) -> str:
"""Creates the header line at the top of the register
Returns:
str: The header line for the checkbook
"""
header = ROW_SEP
header += conf.get_property("VLINE_CHAR")
for i in range(len(CBT.KEYS)):
header_length = conf.get_property("SIZE_LIST")[i]
format_string = '{:^' + str(header_length) + '}'
header += format_string.format(CBT.KEYS[i]) + conf.get_property("VLINE_CHAR")
return header
def _gen_trans_print(self, print_list: Optional[List[CBT.CheckbookTransaction]]=None) -> str:
"""Creates the print for each transaction in the register
Args:
print_list (list) : the list of CBTs to loop through to generate
the transaction print. If None, loop through
the whole checkbook
Returns:
str: The print for the transactions in the checkbook
"""
iter_list: List[CBT.CheckbookTransaction] = []
if print_list is None:
iter_list = self.check_register
else:
iter_list = print_list
string = ''
for elem in iter_list:
string += str(elem)
string += ROW_SEP
return string
def get_specific_print(self, key: str, value: Any) -> str:
"""Print a subset of the checkbook
Args:
key (string) : the key to to get the subset from
value (int | string) : the value from key to get
Returns:
str: The print for a subset of transactions based on the specified input
"""
string = self._gen_header_print()
string += ROW_SEP
string += self._gen_trans_print(self.get_specific_list(key, value))
return string
def get_specific_list(self, key: str, value: Any) -> List[CBT.CheckbookTransaction]:
"""Gets the subset list based on the given input
Args:
key (string) : the key to to get the subset from
value (int | string) : the value from key to get
Returns:
list: A list of a subset of transactions based on the specified input
"""
func = self.specific_print_functions[key.capitalize()]
func_param: Any = None
if "Date" == key.capitalize():
func_param = DateProcessor(value)
elif value.isdigit():
func_param = int(value)
else:
func_param = value.capitalize()
return_list = func(self, func_param)
return return_list
def get_register(self):
return self.check_register
def order_by(self, key: str):
self.check_register.sort(key=lambda cbt: cbt.get_value(key))
def delete_transaction(self, cbt:CBT.CheckbookTransaction) -> None:
self.get_register().remove(cbt)
self.edited = True
def delete_transactions(self, cbts:List[CBT.CheckbookTransaction]) -> None:
for cbt in cbts:
self.get_register().remove(cbt)
self.edited = True
def __str__(self):
"""A string representation of a checkbook
Returns:
str: The print for the checkbook
"""
string = self._gen_header_print()
string += ROW_SEP
string += self._gen_trans_print()
string += self._gen_total_line_print()
string += ROW_SEP
return string
specific_print_functions = {
"Date": get_month,
"Trans": get_transaction_type,
"Category": get_category,
"Desc": get_description
}
| [
"17akirby@gmail.com"
] | 17akirby@gmail.com |
0a440679fed74487e89e74758aff6e6101b05743 | 9024050ed26a4b3e712a9da64ee3b53d029f1c2d | /blog/models.py | 8666c8409d956537a0413c46b2d2818bcf767a6a | [] | no_license | eyurvati/my-first-blog | 8674244086f4ac11101d7420534c7cfe18aaa851 | 333c69c34a8a3391b1a8ea7aa901430524153003 | refs/heads/master | 2021-01-22T21:07:06.234536 | 2017-03-18T16:22:04 | 2017-03-18T16:22:04 | 85,394,013 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 510 | py | from django.db import models
from django.utils import timezone
class Post(models.Model):
author = models.ForeignKey('auth.User')
title = models.CharField(max_length=200)
text = models.TextField()
created_date = models.DateTimeField(default=timezone.now)
published_date = models.DateTimeField(blank=True, null=True)
is_public = models.BooleanField(default=True)
def publish(self):
self.published_date = timezone.now()
self.save()
def __str__(self):
return self.title
# Create your models here.
| [
"eyurvati@gmail.com"
] | eyurvati@gmail.com |
feaa11ac9c9654dcac5b82c4723fcf59931647f2 | ce60f76c6ad4c48fd6182240b302ee057809cc66 | /extra/jobqueue/dispatcher.py | a9f043e8fc7ee4f9dd606e8201f33c3083a2c6dd | [
"MIT",
"LicenseRef-scancode-public-domain"
] | permissive | bumps/bumps | 8ae10e8d15c0aa64e0bab6e00e7fabb2ca1b0860 | 2594e69567d534b434dc0eae727b77fdeff411d4 | refs/heads/master | 2023-08-22T17:56:49.987181 | 2023-07-26T14:22:23 | 2023-07-26T14:22:23 | 2,799,064 | 48 | 28 | NOASSERTION | 2023-07-26T14:22:24 | 2011-11-17T22:22:02 | Python | UTF-8 | Python | false | false | 6,471 | py |
from datetime import datetime, timedelta
import logging
from sqlalchemy import and_, or_, func, select
from sqlalchemy.exc import IntegrityError
from sqlalchemy.orm.exc import NoResultFound
from . import runjob, store, db, notify
from .db import Job, ActiveJob
class Scheduler(object):
def __init__(self):
db.connect()
def jobs(self, status=None):
session = db.Session()
if status:
jobs = (session.query(Job)
.filter(Job.status==status)
.order_by(Job.priority)
)
else:
jobs = (session.query(Job)
.order_by(Job.priority)
)
return [j.id for j in jobs]
def submit(self, request, origin):
session = db.Session()
# Find number of jobs for the user in the last 30 days
n = (session.query(Job)
.filter(or_(Job.notify==request['notify'],Job.origin==origin))
.filter(Job.date >= datetime.utcnow() - timedelta(30))
.count()
)
#print "N",n
job = Job(name=request['name'],
notify=request['notify'],
origin=origin,
priority=n)
session.add(job)
session.commit()
store.create(job.id)
store.put(job.id,'request',request)
return job.id
def _getjob(self, id):
session = db.Session()
return session.query(Job).filter(Job.id==id).first()
def results(self, id):
job = self._getjob(id)
try:
return runjob.results(id)
except KeyError:
if job:
return { 'status': job.status }
else:
return { 'status': 'UNKNOWN' }
def status(self, id):
job = self._getjob(id)
return job.status if job else 'UNKNOWN'
def info(self,id):
request = store.get(id,'request')
return request
def cancel(self, id):
session = db.Session()
(session.query(Job)
.filter(Job.id==id)
.filter(Job.status.in_('ACTIVE','PENDING'))
.update({ 'status': 'CANCEL' })
)
session.commit()
def delete(self, id):
"""
Delete any external storage associated with the job id. Mark the
job as deleted.
"""
session = db.Session()
(session.query(Job)
.filter(Job.id == id)
.update({'status': 'DELETE'})
)
store.destroy(id)
def nextjob(self, queue):
"""
Make the next PENDING job active, where pending jobs are sorted
by priority. Priority is assigned on the basis of usage and the
order of submissions.
"""
session = db.Session()
# Define a query which returns the lowest job id of the pending jobs
# with the minimum priority
_priority = select([func.min(Job.priority)],
Job.status=='PENDING')
min_id = select([func.min(Job.id)],
and_(Job.priority == _priority,
Job.status == 'PENDING'))
for _ in range(10): # Repeat if conflict over next job
# Get the next job, if there is one
try:
job = session.query(Job).filter(Job.id==min_id).one()
#print job.id, job.name, job.status, job.date, job.start, job.priority
except NoResultFound:
return {'request': None}
# Mark the job as active and record it in the active queue
(session.query(Job)
.filter(Job.id == job.id)
.update({'status': 'ACTIVE',
'start': datetime.utcnow(),
}))
activejob = db.ActiveJob(jobid=job.id, queue=queue)
session.add(activejob)
# If the job was already taken, roll back and try again. The
# first process to record the job in the active list wins, and
# will change the job status from PENDING to ACTIVE. Since the
# job is no longer pending, the so this
# should not be an infinite loop. Hopefully if the process
# that is doing the transaction gets killed in the middle then
# the database will be clever enough to roll back, otherwise
# we will never get out of this loop.
try:
session.commit()
except IntegrityError:
session.rollback()
continue
break
else:
logging.critical('dispatch could not assign job %s'%job.id)
raise IOError('dispatch could not assign job %s'%job.id)
request = store.get(job.id,'request')
# No reason to include time; email or twitter does that better than
# we can without client locale information.
notify.notify(user=job.notify,
msg=job.name+" started",
level=1)
return { 'id': job.id, 'request': request }
def postjob(self, id, results):
# TODO: redundancy check, confirm queue, check sig, etc.
# Update db
session = db.Session()
(session.query(Job)
.filter(Job.id == id)
.update({'status': results.get('status','ERROR'),
'stop': datetime.utcnow(),
})
)
(session.query(ActiveJob)
.filter(ActiveJob.jobid == id)
.delete())
try:
session.commit()
except:
session.rollback()
# Save results
store.put(id,'results',results)
# Post notification
job = self._getjob(id)
if job.status == 'COMPLETE':
if 'value' in results:
status_msg = " ended with %s"%results['value']
else:
status_msg = " complete"
elif job.status == 'ERROR':
status_msg = " failed"
elif job.status == 'CANCEL':
status_msg = " cancelled"
else:
status_msg = " with status "+job.status
# Note: no reason to include time; twitter or email will give it
# Plus, doing time correctly requires knowing the locale of the
# receiving client.
notify.notify(user=job.notify,
msg=job.name+status_msg,
level=2)
| [
"paul.kienzle@nist.gov"
] | paul.kienzle@nist.gov |
c68970ff0fc2d6862fba3d855d35206ab5274279 | a308557b463b03d7031d49548ba8d98885325871 | /十进制转二进制.py | 85f3cb2a4ad9e98426312361404c96156ac88569 | [] | no_license | LittleWhaleRx/python_files | 835d74ebdb2fc142209f492f4101b1ac6db69863 | a4c5a6600158a032511128e219740798330971db | refs/heads/master | 2020-04-02T03:46:48.932974 | 2018-11-01T05:57:00 | 2018-11-01T05:57:00 | 153,983,750 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 244 | py | def gcd(x):
if x == 1:
print(x,end = ' ')
else:
y = x%2
gcd(x//2)
print(y, end=' ')
x = int(input('请输入要转换成二进制的十进制数字:'))
print('转换为二进制为:',end = ' ')
gcd(x) | [
"576440764@qq.com"
] | 576440764@qq.com |
eebbab8cc0fe982d9573dbef8fc19af5181a7c48 | 9b77f1e31d5901924431a2a3164312cc346bde4f | /ADI_MINI_PROJECT/blog/views.py | 77aca8b4054fcba1c1dd859c800aa3a307556c0c | [] | no_license | Adi19471/Djnago_Code-Daily | c2184bf21db5c8d4b3c4098fbd593e4949375ae8 | 03b1b70d3e187fe85eb24e88b7ef3391b14aa98c | refs/heads/master | 2023-08-14T14:36:36.144243 | 2021-09-20T12:52:46 | 2021-09-20T12:52:46 | 375,690,484 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,673 | py | from django.shortcuts import render,HttpResponseRedirect
from .forms import SignupForm,LoginForm,PostForm
from django.contrib import messages
from django.contrib.auth import authenticate,login,logout
from .models import Post
# home page
def home(request):
posts = Post.objects.all()
return render(request, 'blog/home.html',{'posts':posts})
#about page
def about(request):
return render(request, 'blog/about.html')
# contact page
def contact(request):
return render(request, 'blog/contact.html')
# dahsboard page
def dashbord(request):
if request.user.is_authenticated:
posts = Post.objects.all()
messages.info(request,'you enter DashBoard....!!!','dont you want dashboed then click okay')
return render(request, 'blog/dashbord.html',{'posts':posts})
else:
return HttpResponseRedirect('/login/')
# logout page
def user_logout(request):
logout(request)
return HttpResponseRedirect('/')
#signup page
def user_signup(request):
if request.method == "POST":
form = SignupForm(request.POST)
if form.is_valid():
messages.info(request,'Congratulation..!! You have become a Author')
form.save()
else:
form = SignupForm()
return render(request, 'blog/signup.html',{'form':form})
# login page
def user_login(request):
if not request.user.is_authenticated:
if request.method == "POST":
form = LoginForm(request=request, data=request.POST)
if form.is_valid():
uname = form.cleaned_data['username']
upass = form.cleaned_data['password']
user = authenticate(username=uname, password=upass)
if user is not None:
login(request, user)
messages.success(request, 'Logged in Successfully !!')
return HttpResponseRedirect('/dashbord/')
else:
form = LoginForm()
return render(request, 'blog/login.html', {'form':form})
else:
return HttpResponseRedirect('/dashbord/')
# add new post
def add_post(request):
if request.user.is_authenticated:
if request.method =='POST':
form = PostForm(request.POST)
if form.is_valid():
ti = form.cleaned_data['title']
de = form.cleaned_data['desc']
dt = form.cleaned_data['date_time']
user = Post(title=ti,desc=de,date_time=dt)
user.save()
messages.warning(request,'you go to dashboard MENU okay....?')
form = PostForm()
else:
form = PostForm()
return render(request,'blog/addpost.html',{'form':form})
else:
return HttpresponseRedirect('/login/')
# update post
def update_post(request,id):
if request.user.is_authenticated:
if request.method == 'POST':
pi = Post.objects.get(pk=id)
form = PostForm(request.POST,instance=pi)
if form.is_valid():
form.save()
else:
pi = Post.objects.get(pk=id)
form = PostForm(instance=pi)
return render(request,'blog/update.html',{'form':form})
else:
return HttpresponseRedirect('/login/')
# delete post
# def delete_post(request,id):
# if request.user.is_authenticated:
# if request.method == 'POST':
# pi = Post.objects.get(pk = id)
# pi.delete()
# return HttpresponseRedirect('/dashbord/'
# else:
# return HttpresponseRedirect('/login/')
def delete_post(request, id):
if request.user.is_authenticated:
if request.method == 'POST':
pi = Post.objects.get(pk = id)
pi.delete()
return HttpResponseRedirect('/dashbord/')
else:
return HttpResponseRedirect('/login/')
| [
"akumatha@gmail.com"
] | akumatha@gmail.com |
3184b1daec047b0a000f90524e73ffa75afdad91 | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/4/k3t.py | 9810316e4d55d2272be6a2e8490993ef2354650e | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'k3T':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"juliettaylorswift@gmail.com"
] | juliettaylorswift@gmail.com |
9dc494e27d3e05e6996225243aa0e6e6a642219a | 6ca8515012d063cff552258389ca51b5cc25141d | /Analysis/T_analysis/struc/chi_ang(sin theta).py | 490cf4d6f12d693b251279130fcb331d88856633 | [] | no_license | shockingpants/Scripts_MD | db660d61cdd6af4646d5fbe3c4a19990ac95f4f2 | d1bafc175b0cdbf6e4dce83c2dd6a1147e448d88 | refs/heads/master | 2021-01-01T18:33:21.878879 | 2012-10-03T04:40:00 | 2012-10-03T04:40:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,369 | py | #!/opt/local/bin/python2.7
# chi_ang.py
# Extracts chi_ang from gromacs file
#=============== MODULES ======================
##{{{
#----------------- Suppress stdout --------------------
import sys
class NullDevice():
def write(self, s):
pass
ori = sys.stdout # keep a reference to STDOUT
sys.stdout = NullDevice() # redirect the real STDOUT
#--------------------------------------------------------
from numpy import *
import numpy as np
import scipy
import scipy.stats as ss
import os
import pprint
import matplotlib.pyplot as plt
import matplotlib
import matplotlib.cm as cm
from mpl_toolkits.mplot3d import Axes3D
import MDAnalysis
from strtup import *
import math
import shelve
import shelve_manager as SM
import mpl_manager as MM
import re
import time
import atexit
import argparse
#========= R-python interface ===============
import rpy2.robjects as ro
R=ro.r
from rpy2.robjects.packages import importr
import rpy2.robjects.numpy2ri
rpy2.robjects.numpy2ri.activate()
R.library('bio3d')
R.library('optparse')
R.library('cluster')
R.library('ncdf')
R.library('class')
R.library('stats4')
R.library('energy')
sys.stdout = ori # turn STDOUT back on
##}}}
#==============================================
###############################################
#### Script Startup
###############################################
#============================
# Save Load
#============================
mode=[] # 0 --> run only 00 --> run or load 1 --> run and save 2 --> load
filename='chi_ang0.db'
mode,filename = SM.saveload(mode, filename) # Run this for checks.
if mode == 2 or mode == 1 or mode==0.1:
ds=SM.get_set(filename) # Loads saved data
start_time=time.time() # Start recording time
#============================
# Plotting
#============================
MPL=MM.mpl([],'CCA_graph.pdf')
DPI=150
###############################################
#### Functions
###############################################
##{{{
def distance(coord,ref,n=3,t=[]):
"""Calculates the distance using the xyz mat and a reference structure (slow)"""
import time
s_time=time.time()
if np.shape(coord)[0] != len(ref):
# Checks Dimension between coord and ref
print np.shape(coord)
print len(ref)
raise Exception('Dimensions do not match. Make sure its n*p, 1*n')
if np.shape(coord)[0]%n!=0:
# Check dimension against n
print np.shape(coord)[0]
raise Exception('This only works with system with {0:0.0f} dimension.'.format(n))
temp,ave_mat=np.meshgrid(np.arange(np.shape(coord)[1]),ref)
diff_mat=coord-ave_mat # Difference matrix
diff_mat2=diff_mat**2 # Difference matrix squared
sum_mat=[]
for i in arange(len(ref))[::n]:
sum_=np.sum(diff_mat2[i:i+n,:],axis=0)
if i==0:
sum_mat=sum_
else:
sum_mat=vstack((sum_mat,sum_))
if t != []:
print time.time()-s_time
return np.sqrt(sum_mat)
##}}}
###############################################
#### Classes
###############################################
##{{{
class MyClass(object):
def __init__(self):
pass
def __call__(self):
pass
def __str__(self):
pass
def __getattr__(self):
pass
def __setattr__(self):
pass
def __delattr__(self):
pass
def __getitem__(self):
pass
def __setitem__(self):
pass
def __delitem__(self):
pass
##}}}
###############################################
#### Defining Parameters
###############################################
##{{{ Param
print 'Defining parameters...'
if mode==0 or mode==1 :
# Mode 0 --> run
param={}
param['step']=0.1 # Step
param['start']=0 # Start
param['end']=50 # End
if mode==1:
ds.add_param(param)
elif mode==2 or mode==0.1:
ds.get_param()
##}}}
###############################################
#### Main scripts
###############################################
##{{{
#=====================================
# Extracting Chi
#=====================================
##{{{ Loading Chi Angles
print 'Opening and extracting from chi_ang file... ('+tim()+')'
with SM.sl(['chi_ang','chi_resid'],ds,2,mode):
folder='chi_angles' # Folder that stores the temporary chi files
# Process shell script
if os.path.isfile(folder+'/chi.log') and os.path.isfile(folder+'/chi_index.txt'):
print 'chi_index.txt exist. Assumes that chi angles have already been processed by gromacs.\nProceeding...'
else:
os.environ['FOLD']=folder
os.system('./chi_ang.sh')
# Extract data from chi files
name=genfromtxt(folder+'/chi_index.txt', delimiter=" ", dtype=('|S20, |S20'))
for ind,(res,i) in enumerate(name):
print "Loading {0:s}...".format(i)
if ind == 0:
chi_ang=genfromtxt(folder+'/'+i,skip_header=12, dtype=(float, float))[:,1]
else:
chi_ang=np.vstack((chi_ang,genfromtxt(folder+'/'+i,skip_header=12, dtype=(float, float))[:,1]))
chi_resid=name['f0'] # resid of residues with chi
# Remove files
import shutil
shutil.rmtree(folder) #remove Chi Angles
#remove extra trajectory
try:
os.remove('../../prod_vac.trr')
except OSError:
filen=raw_input('prod_vac.trr not found. What is the name of the temporary gromacs trajectory?\n')
try:
os.remove('../../'+filen)
except OSError:
print 'Unable to remove temporary file, please remove it manually.'
print '{0:0.1f}s has elapsed.'.format(time.time()-start_time)
##}}}
#=====================================
# Extracting xyz
#=====================================
##{{{
print 'Loading from structural file... ('+tim()+')'
#---------------------------------------------------------
struc_file='dat0.db'
struc=SM.get_set(struc_file) #import data shelve
b3d=importr('bio3d') #import bio3d
with SM.sl(['xyz','fluc','ave','cij'],struc,2,2):
pass
# Calculating distance from avg struture
xyz2=np.array(xyz)
xyz2=xyz2.T
with SM.sl(['D'],ds,2,mode):
D=distance(xyz2,np.array(ave),t='y') #D of RMDS. Distance from ave structure
print '{0:0.1f}s has elapsed.'.format(time.time()-start_time)
##}}}
#=====================================
# Canonical Correlation Analysis
#=====================================
##{{{ Performing CCA
print 'Performing CCA... ('+tim()+')'
#-----------------Calculations--------------------
#with SM.sl([],ds,1,mode):
CCA=importr('CCA')
with SM.sl(['chi_ang_trans'],ds,1,mode):
chi_ang_trans=np.cos(chi_ang[0])
chi_ang_trans=np.vstack((chi_ang_trans,np.sin(chi_ang[0])))
for i in chi_ang[1:]:
chi_ang_trans=np.vstack((chi_ang_trans,np.cos(i)))
chi_ang_trans=np.vstack((chi_ang_trans,np.sin(i)))
with SM.sl(['CCA_rslt'],ds,1,mode):
CCA_rslt=CCA.cc(D.T,chi_ang_trans.T)
CCA_eigva=CCA_rslt.rx2('cor')
CCA_eigveX=CCA_rslt.rx2('xcoef') #Distance
CCA_eigveY2=np.array(CCA_rslt.rx2('ycoef')).T #Chi_ang in terms of sin and cos
CCA_eigveY=[]
for i in CCA_eigveY2:
CCA_eigveY.append(sqrt(i[::2]**2+i[1::2]**2))
CCA_eigveY=np.vstack(CCA_eigveY)
CCA_eigveY=CCA_eigveY.T
#-----------Secondary Calculations----------------
#----------------Plotting-------------------------
print 'Plotting my results.. ('+tim()+')'
#-------------------------------
# Scatter and Projections
#-------------------------------
##{{{
for i in xrange(4):
# http://matplotlib.org/examples/pylab_examples/scatter_hist.html
#=====================
# Setting plots up
#=====================
##{{{
#----------Setting up Dimensions
width1,width2,width3=0.12,0.58,0.06
hgap1,hgap2=0.005,0.11
left1= 0.05
left2=left1+width1+hgap1
left3=left2+width2+hgap2
height1,height2=0.62,0.15
vgap1=0.008
height3=height1+vgap1+height2
bottom1=0.110
bottom2=bottom1+height1+vgap1
bottom3=bottom1
rect_v1v2= [left2, bottom1, width2, height1]
rect_v1 = [left1, bottom1, width1, height1]
rect_v2 = [left2, bottom2, width2, height2]
rect_cb=[left3,bottom3,width3,height3]
fig=vars()['fig'+str(i)]=plt.figure(i,figsize=(11.69, 8.27),dpi=DPI)
##}}}
#=====================
# Plotting Vectors
#=====================
##{{{
ax_v1v2 = plt.axes(rect_v1v2)
ax_v1 = plt.axes(rect_v1,sharey=ax_v1v2)
ax_v2 = plt.axes(rect_v2,sharex=ax_v1v2)
# Labels and titles
ax_v1.get_yaxis().set_visible(False) #make ticks invisible
ax_v2.get_xaxis().set_visible(False)
plt.setp(ax_v1.xaxis.get_ticklabels(),size=7,rotation=90) #Rotate ticks and change font size
plt.setp(ax_v2.yaxis.get_ticklabels(),size=7)
ax_v1.set_title('Displacement from ave',position=(-0.1,0.5),transform=ax_v1.transAxes,rotation='vertical',fontsize='9')
ax_v2.set_title('Chi_Angle',fontsize='9')
ax_v2.set_ylabel('Contribution to vector',fontsize='7')
nn=np.array
#-----------------------------------------------------------------------------
# Side graphs
v1=abs(nn(CCA_eigveX)[:,i]/max(nn(CCA_eigveX)[:,i])) #Distances normalized into percentages
v1=v1/sum(v1)
bar11=ax_v1.barh(arange(len(v1)),v1,align='center',edgecolor='#ff5252',color='#ffcdcd')
v2=abs(nn(CCA_eigveY)[:,i]/max(nn(CCA_eigveY)[:,i])) #chi_ang normalized
v2=v2/sum(v2)
bar21=ax_v2.bar(arange(len(v2)),v2,align='center',edgecolor='#ff5252',color='#ffcdcd')
ax_v1.set_xlim(ax_v1.get_xlim()[::-1]) #Flip bar graph
#-----------------------------------------------------------------------------
# Main Graphs
X,Y=np.meshgrid(v2,v1)
Z=(X*Y)
surf=ax_v1v2.pcolor(Z,cmap='Reds',rasterized=True)
ax_v1v2.autoscale(enable=True, axis='both', tight=True)
Ylabel=struc.ca_names #distance
Xlabel=chi_resid #chi_angle
# Set xaxis ticks
ax_v1v2.set_xticks(arange(len(Xlabel)))
ax_v1v2.set_xticklabels(Xlabel,size=5)
plt.setp(ax_v1v2.xaxis.get_ticklabels(),va='top',ha='left',rotation=90) #Rotates
# Set yaxis ticks
ax_v1v2.yaxis.tick_right()
ax_v1v2.set_yticks(arange(len(Ylabel)))
ax_v1v2.set_yticklabels(Ylabel,size=4)
plt.setp(ax_v1v2.yaxis.get_ticklabels(),va='bottom',ha='left')
#ax_v1v2.grid(True)
#-----------------------------------------------------------------------------
# Colorbar and creating color bar axes
ax_cm=plt.axes(rect_cb)
cb=plt.colorbar(surf,cax=ax_cm)
plt.setp(ax_cm.get_xticklabels(),size=6)
ax_cm.yaxis.set_ticks(np.linspace(ax_cm.yaxis.get_ticklocs()[0],ax_cm.yaxis.get_ticklocs()[-1],3))
ax_cm.yaxis.set_ticklabels(['low','med','high'])
fig.text(0.1,0.96,os.getcwd(),fontsize=9)
fig.text(0.1,0.92,'correlation coefficient = {0:0.2f}'.format(CCA_eigva[i]),fontsize=9)
MPL.show()
##}}}
##}}}
#---------------------
# Plotting Scatter
#---------------------
##{{{
fig8=plt.figure(8,figsize=(11.69,8.27),dpi=DPI)
for i in xrange(4):
ax_cluster=fig8.add_subplot(2,2,i+1)
print 'Projecting data onto eigenvectors {0:0.0f}... ('.format(i+1)+tim()+')'
proj_dis=np.dot(D.T,nn(CCA_eigveX)[:,i]) #distance
proj_chi=np.dot(chi_ang.T,nn(CCA_eigveY)[:,i]) #chi_ang
# Plotting clusters
lin=ax_cluster.scatter(proj_chi,proj_dis,c=arange(len(proj_dis)),cmap='gist_rainbow',marker='o',lw=0,s=2, rasterized=True)
ax_cluster.set_ylabel('Distance proj'+str(i+1),fontsize=10)
ax_cluster.set_xlabel('Chi Angle proj'+str(i+1),fontsize=10)
cb=plt.colorbar(lin,ax=ax_cluster)
cb.set_label('Frames',fontsize=8)
cb.set_ticks([0,1])
cb.set_ticklabels(['0','.'])
fig8.text(0.1,0.96,os.getcwd(),fontsize=9)
MPL.show()
##}}}
#----------------------
# Correlation bar
#----------------------
##{{{
fig7=plt.figure(7,figsize=(11.69, 8.27),dpi=DPI)
ax17=fig7.add_subplot(111)
b117=ax17.bar(arange(len(CCA_eigva)),CCA_eigva,width=0.8,color='#9f9f9f')
ax17.set_xlabel('Eigenvector number')
ax17.set_ylabel('Correlation coefficient value')
ax17.set_title('Canonical Correlation Analysis')
MPL.show()
##}}}
##}}}
| [
"shockingpants@hotmail.com"
] | shockingpants@hotmail.com |
52a6c0f1d64c388941cf72c08bfda36968ed4e20 | b1695448cab31b976e17b11e085d90315cb1e5d6 | /UCLA/data_mining_cs145/satisfactory/Random Forest/rf.py | dde5d7206ffee2354b4fb9b0a010379c1ccb5c30 | [] | no_license | ehtk0502/my_files | be9bc1d0a32ba82d8b0cf18b486bd6d372322412 | 516baec4fd2f467dca611fea42c16f376873c7d7 | refs/heads/master | 2023-01-24T06:05:11.116602 | 2020-05-22T07:38:51 | 2020-05-22T07:38:51 | 170,579,241 | 0 | 0 | null | 2023-01-24T01:29:00 | 2019-02-13T21:03:32 | Jupyter Notebook | UTF-8 | Python | false | false | 5,566 | py | import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
from sklearn.metrics import mean_squared_error
# Features we're using
# HEADERS = ["attributes_Alcohol", "attributes_casual", "attributes_romantic", "attributes_intimate", "attributes_classy",
# "attributes_hipster", "attributes_touristy", "attributes_trendy", "attributes_upscale", "attributes_BestNights_M",
# "attributes_BestNights_Tu", "attributes_BestNights_W", "attributes_BestNights_Th", "attributes_BestNights_F",
# "attributes_BestNights_Sat", "attributes_BestNights_Sun", "attributes_BikeParking", "attributes_Caters",
# "attributes_HasTV", "attributes_OutdoorSeating", "attributes_RestaurantsTakeOut", "attributes_WiFi",
# "attributes_stars", "attributes_NoiseLevel", "attributes_RestaurantsGoodForGroups", "is_open", "is_restaurant",
# "attributes_WheelchairAccessible", "attributes_RestaurantsPriceRange2", "attributes_RestaurantsReservations",
# "attributes_RestaurantsDelivery", "attributes_GoodForKids", "attributes_BusinessAcceptsCreditCards",
# "attributes_BusinessParking", "attributes_RestaurantsAttire", "review_count", "attributes_GoodForMeal",
# "attributes_RestaurantsTableService", "average_stars"]
HEADERS = ['average_stars', 'attributes_Alcohol', 'attributes_stars', 'attributes_WiFi', 'attributes_GoodForMeal']
def rf_classifier(features, target):
clf = RandomForestClassifier()
clf.fit(features, target)
return clf
if __name__ == "__main__":
print("Reading csv's...")
business = pd.read_csv('datasets/business_attributes.csv', encoding='latin1')
users = pd.read_csv('datasets/users.csv', encoding='latin1')
expected = pd.read_csv('datasets/validate_queries.csv', encoding='latin1')
final_test = pd.read_csv('datasets/test_queries.csv', encoding='latin1')
# users dataset also has a column called review_count,
# we want to take the columns we need here
users = users[['average_stars', 'user_id']]
print("Merging data...")
# Training data
traindata = pd.merge_ordered(business, expected, how='right', on='business_id')
train_x = pd.merge_ordered(traindata, users, how='left', on='user_id')
# Testing data (to submit)
testdata = pd.merge(business, final_test, how='right', on='business_id')
final_test_x = pd.merge(testdata, users, how='left', on='user_id')
print("Filter data for testing and training")
train_x.drop(columns=['business_id', 'user_id'], axis=1, inplace=True)
# Un-comment if you want to check RMSE
train_x, test_x, train_y, test_y = train_test_split(train_x[HEADERS], train_x['stars'], train_size=0.7)
# Un-comment if you don't want to check RMSE
# train_x, train_y = train_x[HEADERS], train_x['stars']
final_test_x.drop(columns=['business_id', 'user_id'], axis=1, inplace=True)
final_test_x = final_test_x[HEADERS]
# Output the csvs
train_x.to_csv('datasets/train_x.csv')
train_y.to_csv('datasets/train_y.csv')
final_test_x.to_csv('datasets/final_test_x.csv')
# Un-comment if you want to check RMSE
test_x.to_csv('datasets/test_x.csv')
test_y.to_csv('datasets/test_y.csv')
print("Training model...")
trained_model = rf_classifier(train_x, train_y)
# Print the feature ranking
# print("Feature ranking:")
# importances = trained_model.feature_importances_
# std = np.std([tree.feature_importances_ for tree in trained_model.estimators_],
# axis=0)
# indices = np.argsort(importances)[::-1]
# for f in range(train_x.shape[1]):
# print("%d. feature %d (%f)" % (f + 1, indices[f], importances[indices[f]]))
# Plot the feature importances of the forest
# plt.figure()
# plt.title("Feature importances")
# plt.bar(range(train_x.shape[1]), importances[indices],
# color="r", yerr=std[indices], align="center")
# plt.xticks(range(train_x.shape[1]), indices)
# plt.xlim([-1, train_x.shape[1]])
# plt.show()
# Un-comment if you want to check RMSE
print("Check accuracy...")
test_predictions = trained_model.predict_proba(test_x)
train_predictions = trained_model.predict_proba(train_x)
# predict_proba provides probability that that row is classified as 1, 2, 3, 4, 5
# We need to change this into a single decimal value for RMSE
# Weigh the classification of that column by the probability
# *1 *2 *3 *4 *5
# [ 0.4, 0.2, 0.3, 0.1, 0 ] => [ 0.4, 0.4, 0.9, 0.4, 0 ] => [ 2.1 ]
for i in range(test_predictions.shape[1]):
test_predictions[:,i] *= (i+1)
train_predictions[:,i] *= (i+1)
# [ 0.4, 0.4, 0.9, 0.4 ] => [ 2.1 ]
test_predictions = np.sum(test_predictions, axis=1)
train_predictions = np.sum(train_predictions, axis=1)
print("Train RMSE: ", mean_squared_error(train_y, train_predictions))
print("Test RMSE: ", mean_squared_error(test_y, test_predictions))
# End un-comment if you want to check RMSE
print("Making predictions...")
predictions = trained_model.predict_proba(final_test_x)
for i in range(predictions.shape[1]):
predictions[:,i] *= (i+1)
predictions = np.sum(predictions, axis=1)
print("Making csv...")
predictions = pd.DataFrame(predictions)
predictions.to_csv('datasets/predictions.csv')
| [
"ehtk0502@gmail.com"
] | ehtk0502@gmail.com |
35a9a876dc10d8de63623e6d3d37890bb3842900 | bea3febeda4c0688dfbb2db584ab4f7d710040e0 | /django/cbv/person/views.py | c954514b57714390b9b1210f810dd5c51ab31499 | [] | no_license | airpong/TIL-c9 | c471ac73e23716cf677ba590dd6099e584c42883 | 069cc53820a09cd9787765ad41ba7e792dc342b5 | refs/heads/master | 2022-12-12T22:26:23.147651 | 2019-06-27T08:24:44 | 2019-06-27T08:24:44 | 166,777,129 | 0 | 0 | null | 2022-11-22T03:46:57 | 2019-01-21T08:34:01 | Python | UTF-8 | Python | false | false | 999 | py | from django.shortcuts import render,redirect
from .models import Person
from .forms import PersonForm
from django.views.generic import ListView,CreateView
from django.contrib.auth.mixins import LoginRequiredMixin
# Create your views here.
# def list(request):
# people = Person.objects.all()
# return render(request,'person/person_list.html',{'people':people})
class PersonList(ListView):
model = Person
context_object_name = 'people'
# def create(request):
# if request.method == 'GET':
# form = PersonForm()
# return render(request,'person/person_form.html',{'form':form})
# else:
# last_name = request.POST.get('last_name')
# email = request.POST.get('email')
# age = request.POST.get('age')
# Person.objects.create(last_name=last_name,email=email,age=age)
# return redirect('list')
class PersonCreate(LoginRequiredMixin,CreateView):
model = Person
form_class = PersonForm
success_url = '/person/' | [
"giponge@gmail.com"
] | giponge@gmail.com |
1bea9bc3616fe721a74dbcd53630aec212558032 | 8420f7c680f1b3b66d7f903b9986fdd533ce63d9 | /examples/example_05_custom_parameter.py | ac1416041fcbbc99d18abbfcf8370ba211ebd2c4 | [
"BSD-3-Clause"
] | permissive | ilonajulczuk/pypet | 9cef890cc856a769441aef983e4367fee56d1d12 | 99dd37243c30178d3dc02798dcc6aa9320b6c213 | refs/heads/master | 2020-12-26T11:21:16.691896 | 2014-04-16T07:53:37 | 2014-04-16T07:53:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,240 | py | __author__ = 'Robert Meyer'
import numpy as np
import inspect
from pypet.environment import Environment
from pypet.parameter import Parameter, ArrayParameter
from pypet.trajectory import Trajectory
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
# Here we will see how we can write our own custom parameters and how we can use
# it with a trajectory.
# Now we want to do a more sophisticated simulations, we will integrate a differential equation
# with an Euler scheme
# Let's first define our job to do
def euler_scheme(traj, diff_func):
"""Simulation function for Euler integration.
:param traj:
Container for parameters and results
:param diff_func:
The differential equation we want to integrate
"""
steps = traj.steps
initial_conditions = traj.initial_conditions
dimension = len(initial_conditions)
# This array will collect the results
result_array = np.zeros((steps,dimension))
# Get the function parameters stored into `traj` as a dictionary
# with the (short) names as keys :
func_params_dict = traj.func_params.f_to_dict(short_names=True, fast_access=True)
# Take initial conditions as first result
result_array[0] = initial_conditions
# Now we compute the Euler Scheme steps-1 times
for idx in range(1,steps):
result_array[idx] = diff_func(result_array[idx-1], **func_params_dict) * traj.dt + \
result_array[idx-1]
# Note the **func_params_dict unzips the dictionary, it's the reverse of **kwargs in function
# definitions!
#Finally we want to keep the results
traj.f_add_result('euler_evolution', data=result_array, comment='Our time series data!')
# Ok, now we want to make our own (derived) parameter that stores source code of python functions.
# We do NOT want a parameter that stores an executable function. This would complicate
# the problem a lot. If you have something like that in mind, you might wanna take a look
# at the marshal (http://docs.python.org/2/library/marshal) module
# or dill (https://pypi.python.org/pypi/dill) package.
# Our intention here is to define a parameter that we later on use as a derived parameter
# to simply keep track of the source code we use ('git' would be, of course, the better solution
# but this is just an illustrative example)
class FunctionParameter(Parameter):
# We can go for a a cheap solution and make use of the function `_convert_data` of the parent.
# This gets called before adding data to the parameter to turn numpy arrays
# into read-only numpy arrays. But we will use the function for our purpose to extract
# the source code:
def _convert_data(self, val):
if callable(val):
return inspect.getsource(val)
else:
return super(FunctionParameter,self)._convert_data(val)
# For more complicate parameters you might consider implementing:
# `f_supports` (we do not need it since we convert the data to stuff the parameter already
# supports, and that is strings!)
#
# and
# the private functions
#
# `_values_of_same_type` (to tell whether data is similar, i.e. of two data items agree in their
# type, this is important to only allow exploration within the same dimension.
# For instance, a parameter that stores integers, should only explore integers etc.)
#
# and
#
# `_equal_values` (to tell if two data items are equal. This is important for merging if you
# want to erase duplicate parameter points. The trajectory needs to know when a
# parameter space point was visited before.)
#
# and
#
# `_store` (to be able to turn complex data into basic types understood by the storage service)
#
# and
#
# `_load` (to be able to recover your complex data form the basic types understood by the storage
# service)
#
# But for now we will rely on the parent functions and hope for the best!
# Ok now let's follow the ideas in the final section of the cookbook and let's
# have a part in our simulation that only defines the parameters.
def add_parameters(traj):
"""Adds all necessary parameters to the `traj` container"""
traj.f_add_parameter('steps', 10000, comment='Number of time steps to simulate')
traj.f_add_parameter('dt', 0.01, comment='Step size')
# Here we want to add the initial conditions as an array parameter. We will simulate
# a 3-D differential equation, the Lorenz attractor.
traj.f_add_parameter(ArrayParameter,'initial_conditions', np.array([0.0,0.0,0.0]),
comment = 'Our initial conditions, as default we will start from'
' origin!')
# We will group all parameters of the Lorenz differential equation into the group 'func_params'
traj.f_add_parameter('func_params.sigma', 10.0)
traj.f_add_parameter('func_params.beta', 8.0/3.0)
traj.f_add_parameter('func_params.rho', 28.0)
#For the fun of it we will annotate the group
traj.func_params.v_annotations.info='This group contains as default the original values chosen ' \
'by Edward Lorenz in 1963. Check it out on wikipedia ' \
'(https://en.wikipedia.org/wiki/Lorenz_attractor)!'
# We need to define the lorenz function, we will assume that the value array is 3 dimensional,
# First dimension contains the x-component, second y-component, and third the z-component
def diff_lorenz(value_array, sigma, beta, rho):
"""The Lorenz attractor differential equation
:param value_array: 3d array containing the x,y, and z component values.
:param sigma: Constant attractor parameter
:param beta: FConstant attractor parameter
:param rho: Constant attractor parameter
:return: 3d array of the Lorenz system evaluated at `value_array`
"""
diff_array = np.zeros(3)
diff_array[0] = sigma * (value_array[1]-value_array[0])
diff_array[1] = value_array[0] * (rho - value_array[2]) - value_array[1]
diff_array[2] = value_array[0] * value_array[1] - beta * value_array[2]
return diff_array
# And here goes our main function
def main():
env = Environment(trajectory='Example_05_Euler_Integration',
filename='experiments/example_05/HDF5/example_05.hdf5',
file_title='Example_05_Euler_Integration',
log_folder='experiments/example_05/LOGS/',
comment = 'Go for Euler!')
traj = env.v_trajectory
trajectory_name = traj.v_name
# 1st a) phase parameter addition
add_parameters(traj)
# 1st b) phase preparation
# We will add the differential equation (well, its source code only) as a derived parameter
traj.f_add_derived_parameter(FunctionParameter,'diff_eq', diff_lorenz,
comment='Source code of our equation!')
# We want to explore some initial conditions
traj.f_explore({'initial_conditions' : [
np.array([0.01,0.01,0.01]),
np.array([2.02,0.02,0.02]),
np.array([42.0,4.2,0.42])
]})
# 3 different conditions are enough for an illustrative example
# 2nd phase let's run the experiment
# We pass `euler_scheme` as our top-level simulation function and
# the Lorenz equation 'diff_lorenz' as an additional argument
env.f_run(euler_scheme, diff_lorenz)
# We don't have a 3rd phase of post-processing here
# 4th phase analysis.
# I would recommend to do post-processing completely independent from the simulation,
# but for simplicity let's do it here.
# Let's assume that we start all over again and load the entire trajectory new.
# Yet, there is an error within this approach, do you spot it?
del traj
traj = Trajectory(filename='experiments/example_05/HDF5/example_05.hdf5')
# We will only fully load parameters and derived parameters.
# Results will be loaded manually later on.
try:
# However, this will fail because our trajectory does not know how to
# build the FunctionParameter. You have seen this coming, right?
traj.f_load(name=trajectory_name,load_parameters=2,
load_derived_parameters=2,load_results=1)
except ImportError as e:
print 'That did\'nt work, I am sorry. %s ' % e.message
# Ok, let's try again but this time with adding our parameter to the imports
traj = Trajectory(filename='experiments/example_05/HDF5/example_05.hdf5',
dynamically_imported_classes=FunctionParameter)
# Now it works:
traj.f_load(name=trajectory_name,load_parameters=2,
load_derived_parameters=2,load_results=1)
#For the fun of it, let's print the source code
print '\n ---------- The source code of your function ---------- \n %s' % traj.diff_eq
# Let's get the exploration array:
initial_conditions_exploration_array = traj.f_get('initial_conditions').f_get_range()
# Now let's plot our simulated equations for the different initial conditions:
# We will iterate through the run names
for idx, run_name in enumerate(traj.f_get_run_names()):
#Get the result of run idx from the trajectory
euler_result = traj.results.f_get(run_name).euler_evolution
# Now we manually need to load the result. Actually the results are not so large and we
# could load them all at once. But for demonstration we do as if they were huge:
traj.f_load_item(euler_result)
euler_data = euler_result.data
#Plot fancy 3d plot
fig = plt.figure(idx)
ax = fig.gca(projection='3d')
x = euler_data[:,0]
y = euler_data[:,1]
z = euler_data[:,2]
ax.plot(x, y, z, label='Initial Conditions: %s' % str(initial_conditions_exploration_array[idx]))
plt.legend()
plt.show()
# Now we free the data again (because we assume its huuuuuuge):
del euler_data
euler_result.f_empty()
# You have to click through the images to stop the example_05 module!
if __name__ == '__main__':
main()
| [
"robert.meyer@ni.tu-berlin.de"
] | robert.meyer@ni.tu-berlin.de |
18f7fd778281764630b6d87c06f297330644c9a1 | 7bededcada9271d92f34da6dae7088f3faf61c02 | /pypureclient/flasharray/FA_2_17/models/reference_with_type.py | 686da10b71d57d6ece1d116f5a7668ce8c35aa23 | [
"BSD-2-Clause"
] | permissive | PureStorage-OpenConnect/py-pure-client | a5348c6a153f8c809d6e3cf734d95d6946c5f659 | 7e3c3ec1d639fb004627e94d3d63a6fdc141ae1e | refs/heads/master | 2023-09-04T10:59:03.009972 | 2023-08-25T07:40:41 | 2023-08-25T07:40:41 | 160,391,444 | 18 | 29 | BSD-2-Clause | 2023-09-08T09:08:30 | 2018-12-04T17:02:51 | Python | UTF-8 | Python | false | false | 4,557 | py | # coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.17
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_17 import models
class ReferenceWithType(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'str',
'name': 'str',
'resource_type': 'str'
}
attribute_map = {
'id': 'id',
'name': 'name',
'resource_type': 'resource_type'
}
required_args = {
}
def __init__(
self,
id=None, # type: str
name=None, # type: str
resource_type=None, # type: str
):
"""
Keyword args:
id (str): A globally unique, system-generated ID. The ID cannot be modified.
name (str): The resource name, such as volume name, pod name, snapshot name, and so on.
resource_type (str): Type of the object (full name of the endpoint). Valid values are `hosts`, `host-groups`, `network-interfaces`, `pods`, `ports`, `pod-replica-links`, `subnets`, `volumes`, `volume-snapshots`, `volume-groups`, `directories`, `policies/nfs`, `policies/smb`, and `policies/snapshot`, etc.
"""
if id is not None:
self.id = id
if name is not None:
self.name = name
if resource_type is not None:
self.resource_type = resource_type
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `ReferenceWithType`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def __getitem__(self, key):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `ReferenceWithType`".format(key))
return object.__getattribute__(self, key)
def __setitem__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `ReferenceWithType`".format(key))
object.__setattr__(self, key, value)
def __delitem__(self, key):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `ReferenceWithType`".format(key))
object.__delattr__(self, key)
def keys(self):
return self.attribute_map.keys()
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ReferenceWithType, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ReferenceWithType):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"noreply@github.com"
] | PureStorage-OpenConnect.noreply@github.com |
08195f511eddf027330cb04042e386639b04923b | 1ff701332e1518a6b184da5c64e72c25e3128525 | /python-testsuite/common/read_helper.py | 7ed4531188db71c7e9023c8dde64b2c89b74bb9e | [
"MIT"
] | permissive | enyx-opensource/nxaccess-hls-framework | df1e0e9e30958206c6549ad0dc508a791a5431ca | f001c596bbda7ce216e7db9ed3e93131b2a3a308 | refs/heads/master | 2022-03-07T10:45:01.481637 | 2022-02-09T13:53:05 | 2022-02-09T13:53:05 | 185,207,033 | 5 | 6 | NOASSERTION | 2022-02-09T13:47:50 | 2019-05-06T13:54:33 | C++ | UTF-8 | Python | false | false | 3,374 | py | from threading import Thread
from time import sleep
from enum import IntEnum
from common.wait import waitFor
from enyx_oe import DataBuffer
from enyx_oe import read as swig_read
from enyx_oe import AsyncReader as swig_AsyncReader
from enyx_oe import CollectionMetadata, BaseMessage, FailedMessage
from enyx_oe import TCPReplyPayload, TCPStatusMessage
def read(oue_manager):
return bytes(swig_read(oue_manager))
class MessageType(IntEnum):
RawMessage = 0,
UpdatedMessage = 1,
TCPReplyPayload = 2,
TCPStackStatus = 3,
KillSwitchEvent = 4,
FailedMessage = 5,
DataSourceReportingMessage = 6
class TcpStatus(IntEnum):
# closed = 0,
# listen =1 ,
# syn_rcvd =2,
# syn_sent =3,
# established =4,
# close_wait =5,
# last_ack =6,
# fin_wait_1 =7,
# fin_wait_2 =8,
# closing =9 ,
# time_wait = 10
closed = 0,
opening = 1,
established = 2,
closing = 3
def get_metadata(data_read):
assert(isinstance(data_read, bytes))
data = DataBuffer(data_read)
metadata = BaseMessage(data.view()).metadata()
ret = {}
ret['type'] = metadata.messageType()
ret['Type'] = MessageType(metadata.messageType())
ret['collection_id'] = metadata.collectionId()
ret['session_id'] = metadata.tcpSessionId()
ret['length'] = metadata.length()
ret['timestamp'] = metadata.timestamp()
ret['status'] = metadata.status() # this is vector of uint8_t
return ret
def get_msg_type(data_read):
return get_metadata(data_read)['type']
def get_msg_length(data_read):
return get_metadata(data_read)['length']
def get_failed_message_reason(data_read):
assert(isinstance(data_read, bytes))
assert(get_msg_type(data_read) == CollectionMetadata.MessageType_FailedMessage)
data = DataBuffer(data_read)
ret = {}
ret['tcp'] = FailedMessage(data.view()).tpcSessionNotOpened()
ret['fifo'] = FailedMessage(data.view()).listOfFieldsBelowThreshold()
return ret
def get_tcp_stack_message_status(data_read):
assert(isinstance(data_read, bytes))
assert(get_msg_type(data_read) == CollectionMetadata.MessageType_TCPStackStatus)
data = DataBuffer(data_read)
return TCPStatusMessage(data.view()).status()
def get_tcp_reply_message(data_read):
assert(isinstance(data_read, bytes))
assert(get_msg_type(data_read) == CollectionMetadata.MessageType_TCPReplyPayload)
data = DataBuffer(data_read)
msg = TCPReplyPayload(data.view()).message()
return bytes(DataBuffer(msg).value())
class AsyncReader(Thread):
def __init__(self, oue_manager):
self.oue_manager = oue_manager
self.reader = swig_AsyncReader(oue_manager)
self.messages = list()
self.should_stop = False
Thread.__init__(self)
self.start()
def wait_for(self, number_of_message, timeout=1):
def check_nb_message():
return len(self.messages) >= number_of_message
return waitFor(check_nb_message, timeout)
def stop(self):
self.should_stop = True
self.join()
def run(self):
self.reader.start()
while not self.should_stop:
vector_msg = self.reader.pop()
msg = bytes(vector_msg)
if len(msg) > 0:
self.messages.append(msg)
else:
sleep(0.05)
self.reader.stop()
| [
"antoine.bernardeau@enyx.fr"
] | antoine.bernardeau@enyx.fr |
0f7ec680721030d047f06b1b94341a7c982454b5 | 402ef712e2d98bb616e64eb7d57145a643ad61d7 | /backend/mobile_testing_app__15569/wsgi.py | 7832e8909382ddeb029a4fbf984861ab927942fb | [] | no_license | crowdbotics-apps/mobile-testing-app--15569 | ce5299c1dc7b5ebf531043dbe7614c7206880ce0 | 5c6e5f045a9ba80592e81584ac7c88ea53eabdfa | refs/heads/master | 2023-01-24T21:54:07.548231 | 2020-11-24T06:01:05 | 2020-11-24T06:01:05 | 315,533,722 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 427 | py | """
WSGI config for mobile_testing_app__15569 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'mobile_testing_app__15569.settings')
application = get_wsgi_application()
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
d4d527b421b1ffb54d22cc8f23e2769a3893d095 | be33921cf65768da98eb4d1e8dd885b2730308dd | /Test/Hello.py | 5043a771613217bb7268c79601f9677e93cdac94 | [] | no_license | mohsin-aslam/Python-Docs-Files | 1b3ed6c7958f44a56bb05bbd89cded8a3f402439 | 30009605d14cfed65436ca34a883b6e3f5fde432 | refs/heads/master | 2020-04-02T17:24:23.594678 | 2018-10-25T11:13:09 | 2018-10-25T11:13:09 | 154,656,456 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,486 | py | import numpy as np
abc = np.array([556,213,312,212,543])
c = abc.std()
d= abc.var()
import nltk as nt
tokens = nt.word_tokenize('It is a chair. my name is Mohsin Aslam. I went thererere "ssas LOL Pakistan:)"')
tagged = nt.pos_tag(tokens)
sentiment = nt.sent_tokenize('It is a chair. my name is yasir . I went thererere "ssas LOL :)"')
entities = nt.chunk.ne_chunk(tagged)
from textblob import TextBlob
text = "وقال متحدث باسم الحوثيين إن هذا الهجوم أعقبه هجوم آخر استهدف العاملين في مجال الطوارئ في أرحب، على بعد 40 كيلومترا (25 ميلا) من مدينة صنعاء."
from langdetect import detect
lang = detect(text)
import requests
url = 'http://translate.google.com/translate_a/t'
params = {
"text": text,
"sl": lang,
"tl": "en",
"client": "p"}
print(requests.get(url, params=params).content)
if(lang == 'en'):
blob = TextBlob(text)
sent = blob.sentiment
print(sent)
else:
import requests
url = 'http://translate.google.com/translate_a/t'
params = {
"text": text,
"sl": lang,
"tl": "en",
"client": "p"
}
print(requests.get(url, params=params).content)
blob = TextBlob('great')
sent = blob.sentiment
print(sent)
#en_blob = TextBlob(u'Simple is better than complex.')
from langdetect import detect
# print(detect(text))
b1 = TextBlob("And now for something completely different.")
print(b1.parse())
| [
"mohsin.alsam@github.com"
] | mohsin.alsam@github.com |
8ef1c4074a8008b323b7858f94bd6e86f09a5fc3 | 0fa4f25759b6ec123b297775bfe5310be5b8637a | /GetTranscript.py | a88c0c1a0d32478e02b5c8ddf3fd47ef18b07f82 | [
"MIT"
] | permissive | YingRushi-mac/Ahnu | 115eabcb0665c53d414ed622473312857c3dcfc5 | 841cdec44ac9a323d6ffabcb643ed7b65e864dc6 | refs/heads/master | 2023-03-17T16:02:32.855369 | 2018-05-13T10:05:16 | 2018-05-13T10:05:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,423 | py | """
自己查成绩实在是麻烦,一次次的查,一个爬虫解决
Author: Aber Sheeran
Time: 2017-12-24
"""
from Base import Base
class GetTheFuckTranscripts(Base):
"""获取期末成绩"""
def get_transcripts(self, year, semester):
"""获取特定学年特定学期的成绩"""
html = self.get_page("http://jwgl.ahnu.edu.cn/query/cjquery/index?action=ok&xkxn=%s&xkxq=%s" % (year, semester))
# 一波切割字符串骚操作,不用re也不用xpath或者bs
table = html.split("table")[2]
return "{0}{1}{2}{3}{4}".format(
'<html>\n<head>\n<link rel="stylesheet" href="https://agent.cathaysian.cn/cloudflare?url=ajax/libs/pure/1.0.0/pure-min.css">\n</head>\n',
'<body>\n<table class="pure-table pure-table-bordered "',
table.replace('class="thtd"', 'align="center"'),
"table>\n</body>",
'\n</html>',
)
def check_transcripts_integrity(self, html):
"""检查成绩的完整性"""
from bs4 import BeautifulSoup
page = BeautifulSoup(html, "lxml")
for each in page.find_all("tr")[1:-1]:
if each.find_all("td")[7].text == "":
return False
return True
if __name__ == "__main__":
main = GetTheFuckTranscripts({
"username":"",
"password":""
})
data = main.get_transcripts("2017-2018", "2")
print(data)
| [
"1191170766@qq.com"
] | 1191170766@qq.com |
019f0fafc2964e1fc09c65a2372b3c0bff47223d | ba47b609ca10ce5cf576297973a000271d77fec5 | /Lab 01/q3.py | 998d15fc86338a56e202227c14e24c9e23c6a2c3 | [] | no_license | saisandeep2484/SciCompLab | bcaf27b44c269848c0eec57cf3dd98e6dc2f51ad | 87e797fd85799045ef7cd5b862f945f1d780de3f | refs/heads/main | 2023-04-17T22:09:31.494379 | 2021-04-26T10:52:04 | 2021-04-26T10:52:04 | 361,713,615 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,535 | py | # -*- coding: utf-8 -*-
from __future__ import division
import math
print("")
print("-------------q3-------------")
print("")
def f(x):
return (-math.sin(x)+(x/2))
def Df(x):
return (-math.cos(x)+(1/2))
def BisectionMethod():
print("Method Used : Bisection Method")
a = (math.pi)/2
b = math.pi
eps = 1/(pow(10,1)) #epsilon
print("Epsilon : 10^(-1)")
n = math.ceil((math.log(b-a) - math.log(eps))/math.log(2))
print("n (number of iterations) :",n)
while n>0:
n = n-1
c = (a+b)/2
if f(b)*f(c)<=0:
a = c
else:
b = c
print("aprroximate root of f(x) = x/2 - sinx in the interval [pi/2,pi] :",c)
print("")
return c
def NewtonsMethod(x_init):
print("Method Used : Newton's Method")
x0 = x_init #initial estimate
print("Intial Estimate (from Bisection Method) :",x0)
eps = 1/(2*pow(10,7))
print("Epsilon : 0.5 x 10^(-7)")
root = x0
n = 0
while 1:
n+=1
Dfx0 = Df(x0)
if Dfx0 == 0:
print("First Derivative is 0")
break
x1 = x0 - f(x0)/Dfx0
if abs(x1-x0) <= eps:
root = x1
break
else:
x0 = x1
print("n (number of iterations) :",n)
print("The root (accurate upto 7 decimal places) of f(x) = x/2 - sinx in the interval [pi/2,pi] :",root)
print("")
approx_root = BisectionMethod()
NewtonsMethod(approx_root)
| [
"noreply@github.com"
] | saisandeep2484.noreply@github.com |
235f40b873b377065055784a18c708cd33c11a20 | dbdb9b2102b25808f0363d50ff85626921b1c70a | /rest_api_3_product/settings.py | baee96ac579b1af253e75334bad35faaa15bf71c | [] | no_license | vinodkumar96/rest_api_3_product | dee834e704c25c812ba94a682641ab7f9bcabd44 | b0be03b80d7b59ef4d81e02f977c5ed1df862709 | refs/heads/master | 2020-07-04T20:38:15.552542 | 2019-08-14T19:08:09 | 2019-08-14T19:08:09 | 202,409,475 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,242 | py | """
Django settings for rest_api_3_product project.
Generated by 'django-admin startproject' using Django 2.2.4.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'd56v)@7t(80-417mdh)+3++!d5hd($la5m$w*b4xum9vjfnx)u'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'Rapp_3_ModelClass.apps.Rapp3ModelclassConfig',
'rest_framework',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'rest_api_3_product.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'rest_api_3_product.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
| [
"vinodkumaryv96@gmail.com"
] | vinodkumaryv96@gmail.com |
ca406833c0c73a83ba691135a58ff4bc5566f385 | 3a5b6b0c84256ca497e88b7bfddb6fb0cf273e5c | /main.py | 73072fc251e020664bef0247895a6e5cbe4c8e6c | [] | no_license | JohnSchaumleffel/build-a-blog | 09a868b03bd007c1cbc439eaf7e2238e68f75932 | 2163113c8b6af3f2c71adf8a7b5f26e19fd59b56 | refs/heads/master | 2020-03-31T15:39:15.344154 | 2018-10-14T15:09:25 | 2018-10-14T15:09:25 | 152,345,476 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,700 | py | from flask import Flask, request, redirect, render_template, session, flash
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config['DEBUG'] = True
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://build-a-blog:password@localhost:8889/build-a-blog'
app.config['SQLALCHEMY_ECHO'] = True
db = SQLAlchemy(app)
class Blog(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(120))
body = db.Column(db.String(1000))
def __init__(self, title, body):
self.title = title
self.body = body
@app.route('/blog', methods=['POST', 'GET'])
def blog():
blogs = Blog.query.all()
id = request.query_string
if request.method == 'GET':
if not id:
return render_template('blog.html', blogs=blogs)
else:
b = int(request.args.get('b'))
blog = Blog.query.get(b)
return render_template('singlepost.html', blog=blog)
@app.route('/newpost', methods=['POST', 'GET'])
def new_post():
if request.method == 'POST':
title = request.form['title']
body = request.form['body']
if not title:
flash('Title can not be blank.')
return redirect('/newpost')
if not body:
flash('Please enter a post.')
return redirect('/newpost')
else:
new_post = Blog(title, body)
db.session.add(new_post)
db.session.commit()
b = new_post.id
blog = Blog.query.get(b)
return render_template('singlepost.html', blog=blog)
return redirect('/blog')
return render_template('newpost.html')
app.run() | [
"johnschaumleffel@gmail.com"
] | johnschaumleffel@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.