blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
15592a4780200d155b607e06d039b8f6cc76f932 | 187313a571fa75c32090dc04d5159425c068b015 | /practicals/06_python_2015/nb_exons.py | b95b648b97fe380acc163dacddaf8747bfc2089f | [] | no_license | dputhier/jgb53d-bd-prog_github | 51e02f15fb85acb0713f6278a94140ae71dae8b8 | ca7430bc50b557b3ac9da18dd0baba8e3af79190 | refs/heads/master | 2021-10-29T14:13:50.209213 | 2018-12-03T22:30:09 | 2018-12-03T22:30:09 | 70,686,609 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,946 | py | # -*- coding: utf-8 -*-
# La ligne précédente permet d'utiliser des caractères accentués
# dans les commentaires
"""
Author D. Puthier
date: Wed Oct 28 16:44:29 CET 2015
program: nb_exons.py
This program is intended to compute the number of exons in the gtf file.
"""
# On importe le module re
# afin de pouvoir rechercher
# une expression régulière dans une
# chaîne de caractères.
import re
# Ce dictionnaire contiendra le nombre d'exon de chaque transcrit
nb_exons = dict()
# Pensez à adapter le chemin vers le fichier.
file_handler = open("../data/refGene_hg38.gtf", "r")
# On utilise une boucle for pour
# lire des lignes renvoyées par l'objet
# file_handler
for line in file_handler:
# On découpe la ligne sur le séparateur "\t"
columns = line.split("\t")
if columns[2] == "exon":
# On substitue dans la colonne 9 n'importe quel caractère (.)
# répété 0 fois ou plus (*) et
# suivit de transcript_id, d'un espace et d'un double guillemet par
# une chaine de caractères vide ("").
tx_name = re.sub('.* transcript_id "', "", columns[8])
# On substitue tout ce qui se trouve à droite du nom du transcrit: un guillemet suivit de
# n'importe quel caractère (.) répété 0 fois ou plus (*) et d'un caractère retour à la ligne.
tx_name = re.sub('".*\n', "", tx_name)
# Si le transcript n'est pas connu dans le dictionnaire
# On l'initialise à 1 pour la clef tx_name.
if not nb_exons.get(tx_name):
nb_exons[tx_name] = 1
else:
# Sinon on ajoute 1 à la valeur associée à la clef
# tx_name
nb_exons[tx_name] += 1
# Pour toutes les clefs du dictionnaire nb_exons
for tx_name in nb_exons:
print(tx_name + "\t" + str(nb_exons[tx_name])) | [
"puthier@gmail.com"
] | puthier@gmail.com |
a2cbb64e6ca5306f7716b8b05e7503a78cc5cc68 | 9eef34128b8574f5ceeb5de85d23da0b4f271693 | /accounts/migrations/0002_auto_20180922_0513.py | 49e2401d189612170f2d301f3e035659a80bec9c | [] | no_license | MiJamil/My-first-djangoapp | ef6cae484951745bf6cd8051f49047bc5cc12afe | 6df3fd60dd411a159339e55d9515249ba0956d1e | refs/heads/master | 2020-04-01T18:13:35.957598 | 2019-08-06T02:47:37 | 2019-08-06T02:47:37 | 153,479,819 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 560 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-09-22 05:13
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='userprofile',
name='Phome',
),
migrations.AddField(
model_name='userprofile',
name='Phone',
field=models.CharField(default='', max_length=20),
),
]
| [
"m.mdislam93@gmail.com"
] | m.mdislam93@gmail.com |
c3f1df372c3cd9580a5d053a7825131737d34df7 | a199e8cdd97d7b698bafb22707c488a5984ea360 | /elastic_VTI_solvers/elastic_VTI_example.py | baa1d8942f0d62d7e0f4fc1e0fd0fee2c44907d2 | [
"MIT"
] | permissive | ckhimji/IRP_testing | c1b5ed3b81386fe3a150d032e1a05e7e7d6ceb5f | f3d69af7987d182bdd7c5d659d3fa9ed02e792cf | refs/heads/master | 2022-11-17T10:42:41.939780 | 2020-07-11T15:32:27 | 2020-07-11T15:32:27 | 278,889,514 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,347 | py | import numpy as np
from argparse import ArgumentParser
from devito import configuration
from devito.logger import info
from elastic_VTI_solvers.wavesolver import ElasticVTIWaveSolver
from examples.seismic import setup_geometry
from model_VTI import ModelElasticVTI
def elastic_VTI_setup(origin=(0., 0., 0.), spacing=(15.0, 15.0, 15.0), shape=(50, 50, 50), space_order=4, vp= 2.0 , vs=1.0, rho=1.8,
epsilon=0.25, delta = 0.10, gamma = 0.05,
nbl=10, tn=500., constant=False, **kwargs):
model = ModelElasticVTI(origin=origin, spacing=spacing, shape=shape, space_order=space_order,
vp=vp, vs=vs, rho=rho, epsilon=epsilon, delta = delta, gamma = gamma, nbl = nbl,
dtype = kwargs.pop('dtype', np.float32), **kwargs)
# Source and receiver geometries
geometry = setup_geometry(model, tn)
# Create solver object to provide relevant operators
solver = ElasticVTIWaveSolver(model, geometry, space_order=space_order, **kwargs)
return solver
def run(shape=(50, 50), spacing=(20.0, 20.0), tn=1000.0,
space_order=4, nbl=40, autotune=False, constant=False, **kwargs):
solver = elastic_VTI_setup(shape=shape, spacing=spacing, nbl=nbl, tn=tn,
space_order=space_order, constant=constant, **kwargs)
info("Applying Forward")
# Define receiver geometry (spread across x, just below surface)
rec1, rec2, v, tau, summary = solver.forward(autotune=autotune)
return (summary.gflopss, summary.oi, summary.timings,
[rec1, rec2, v, tau])
def test_elastic_VTI():
_, _, _, [rec1, rec2, v, tau] = run()
norm = lambda x: np.linalg.norm(x.data.reshape(-1))
assert np.isclose(norm(rec1), 23.7273, atol=1e-3, rtol=0)
assert np.isclose(norm(rec2), 0.99306, atol=1e-3, rtol=0)
if __name__ == "__main__":
description = ("Example script for a set of elastic operators.")
parser = ArgumentParser(description=description)
parser.add_argument('--2d', dest='dim2', default=False, action='store_true',
help="Preset to determine the physical problem setup")
parser.add_argument("-so", "--space_order", default=4,
type=int, help="Space order of the simulation")
parser.add_argument("--nbl", default=40,
type=int, help="Number of boundary layers around the domain")
parser.add_argument("--constant", default=False, action='store_true',
help="Constant velocity model, default is a two layer model")
parser.add_argument("-opt", default="advanced",
choices=configuration._accepted['opt'],
help="Performance optimization level")
parser.add_argument('-a', '--autotune', default='off',
choices=(configuration._accepted['autotuning']),
help="Operator auto-tuning mode")
args = parser.parse_args()
# 2D preset parameters
if args.dim2:
shape = (150, 150)
spacing = (10.0, 10.0)
tn = 750.0
# 3D preset parameters
else:
shape = (150, 150, 150)
spacing = (10.0, 10.0, 10.0)
tn = 1250.0
run(shape=shape, spacing=spacing, nbl=args.nbl, tn=tn, opt=args.opt,
space_order=args.space_order, autotune=args.autotune, constant=args.constant)
| [
"noreply@github.com"
] | noreply@github.com |
55d08469ece7444288c08eaf48facc922824714d | 8ed19af680ce46ae33f9c05681356432f60e2cb9 | /ssh-reset-mavlink-router.py | 3ac617dc44e2d09fea619fb5f75222cb0df7c3a5 | [] | no_license | themcfadden/bin | 648f3c074531f4d59d6c13d925bb54c6e7a1a5d8 | 7d43f30e7c50d1cd386478f6aba8e5798098c466 | refs/heads/master | 2023-08-03T11:21:59.614047 | 2023-08-01T16:11:04 | 2023-08-01T16:11:04 | 89,616,212 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,135 | py | #!/usr/bin/env python3
"""
SSH systemctl restart mavlink-router.
Usage: ssh-reset-mavlink-router.py
"""
import os
import sys
import time
import traceback
import pexpect
MK1_PASSWORD = "oelinux123"
def ssh_password(child, passwords):
"""
Use expect to send passwords and process response
"""
ssh_pass_fail = 'try again.'
for password in passwords:
#now we should be sending the password
child.sendline(password)
i = child.expect([pexpect.TIMEOUT, ssh_pass_fail, '[#$]'])
if i == 0: #timeout
print('ERROR!')
print('SSH COULD NOT LOGIN:')
print(child.before, child.after)
return None
if i == 1: #wrong password
print('Wrong Password! ('+ password +')')
continue
if i == 2: #success
print('SUCCESS LOGGING IN!')
return child
return None
def ssh_cli_command(child, ssh_cmd_string):
#print(ssh_cmd_string)
child.sendline(ssh_cmd_string)
resp = child.expect([pexpect.TIMEOUT, '[#$]'])
if resp != 1:
print("Did not get to path")
print(child.before, child.after)
return None
return child
def ssh_command(ssh_command_string, passwords):
"""
Use expect to issue ssh command
"""
#define some variables
ssh_newkey = 'Are you sure you want to continue connecting'
ssh_wrongkey = 'Host key verification failed.'
ssh_device_offline = 'No route to host'
#ssh_last_login = "Last login:"
child = pexpect.spawn(ssh_command_string)
while True:
i = child.expect([pexpect.TIMEOUT, ssh_newkey, ssh_wrongkey,
'[#$]', 'Connection refused', ssh_device_offline,
'password: '])
if i == 0: # Timeout
print('ERROR!')
print('SSH could not login. Here is what SSH said:')
print(child.before, child.after)
return None
if i == 1: # SSH does not have the public key. Just accept it.
#print(child.before, child.after)
child.sendline('yes')
time.sleep(0.1)
elif i == 2:
print('KEY CHANGE. HANDLE THIS SCENARIO.')
#print(child.before, child.after)
sys.exit(1)
#child.close(force=True)
#child = pexpect.spawn(ssh_command)
#continue
return None
if i == 3: #already logged in and ssh multiplexing is occurring
print('LOGGED IN (already).')
#print(child.before, child.after)
return child
if i == 7:
print('LOGGED IN.')
return child
if i == 4:
print('SSH NOT RUNNING ON REMOTE HOST')
#print(child.before, child.after)
sys.exit(-1)
if i == 5:
print('IP Unreachable')
#print(child.before, child.after)
sys.exit(-1)
elif i == 6:
#print('Got Password Request')
#print(child.before, child.after)
break
child = scp_password(child, passwords)
return child
def scp_password(child, passwords):
"""
Use expect to send passwords and process response
"""
ssh_pass_fail = 'try again.'
for password in passwords:
#now we should be sending the password
child.sendline(password)
i = child.expect([pexpect.TIMEOUT, ssh_pass_fail, '100%', '[#$]'])
if i == 0: #timeout
print('ERROR!')
print('SSH COULD NOT LOGIN:')
print(child.before, child.after)
return None
if i == 1: #wrong password
print('Wrong Password! ('+ password +')')
continue
if i == 2: #success
#print('FILE COPIED')
return child
if i == 3: #success
#print('LOGGED IN')
return child
return None
def main(opts, args):
"""
Do the stuff
"""
# vars
lin, col = os.popen('stty size', 'r').read().split()
# resize_command="COLUMNS=" + col + ";LINES=" + lin + ";export COLUMNS LINES;\n"
host = "192.168.1.222"
user = 'root'
passwords = [MK1_PASSWORD, 'oelinux123', 'pw3']
ssh_clearkey = 'ssh-keygen -f "' + os.path.expanduser("~") + \
'/.ssh/known_hosts" -R ' + host + ' &>/dev/null'
# clear any existing key to facilitate quickly moving from device to device,
# or same device with new keys
os.system(ssh_clearkey)
# make connection
ssh_command_string = ('/usr/bin/ssh -l %s %s'%(user, host))
child = ssh_command(ssh_command_string, passwords)
if child is None:
print('Could Not Login.')
sys.exit(1)
ssh_cli_command(child, "systemctl restart mavlink-router")
print("mavlink router reset")
if __name__ == '__main__':
try:
opts = [opt for opt in sys.argv[1:] if opt.startswith("-")]
args = [arg for arg in sys.argv[1:] if not arg.startswith("-")]
main(opts, args)
except(Exception) as e:
print(str(e))
traceback.print_exc()
sys.exit(1)
| [
"matt.mcfadden@tealdrones.com"
] | matt.mcfadden@tealdrones.com |
18bb104561c4f2f543251644d9fcd7289c25a575 | 4125bad7406f9f44a5a83101cee4b81142c8de73 | /paypro/hooks.py | 051b4e66bee3cec7c7a31cfea7dd780329f110e4 | [
"MIT"
] | permissive | lightningmaharaja/payPRO | 1eb3e271864d3d4b4c2473b61f65aac5c1ad39fe | 15389ce24bd3b5825c65f91ad2f85a9a29342e5f | refs/heads/master | 2023-03-15T14:18:27.097526 | 2020-11-09T10:35:31 | 2020-11-09T10:35:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,006 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from . import __version__ as app_version
app_name = "paypro"
app_title = "Paypro"
app_publisher = "Teampro"
app_description = "Payroll Management"
app_icon = "octicon octicon-file-directory"
app_color = "grey"
app_email = "barathprathosh@groupteampro.com"
app_license = "MIT"
# Includes in <head>
# ------------------
# include js, css files in header of desk.html
# app_include_css = "/assets/paypro/css/paypro.css"
# app_include_js = "/assets/paypro/js/paypro.js"
# include js, css files in header of web template
# web_include_css = "/assets/paypro/css/paypro.css"
# web_include_js = "/assets/paypro/js/paypro.js"
# include js in page
# page_js = {"page" : "public/js/file.js"}
# include js in doctype views
# doctype_js = {"doctype" : "public/js/doctype.js"}
# doctype_list_js = {"doctype" : "public/js/doctype_list.js"}
# doctype_tree_js = {"doctype" : "public/js/doctype_tree.js"}
# doctype_calendar_js = {"doctype" : "public/js/doctype_calendar.js"}
# Home Pages
# ----------
# application home page (will override Website Settings)
# home_page = "login"
# website user home page (by Role)
# role_home_page = {
# "Role": "home_page"
# }
# Website user home page (by function)
# get_website_user_home_page = "paypro.utils.get_home_page"
# Generators
# ----------
# automatically create page for each record of this doctype
# website_generators = ["Web Page"]
# Installation
# ------------
# before_install = "paypro.install.before_install"
# after_install = "paypro.install.after_install"
# Desk Notifications
# ------------------
# See frappe.core.notifications.get_notification_config
# notification_config = "paypro.notifications.get_notification_config"
# Permissions
# -----------
# Permissions evaluated in scripted ways
# permission_query_conditions = {
# "Event": "frappe.desk.doctype.event.event.get_permission_query_conditions",
# }
#
# has_permission = {
# "Event": "frappe.desk.doctype.event.event.has_permission",
# }
# Document Events
# ---------------
# Hook on document methods and events
# doc_events = {
# "*": {
# "on_update": "method",
# "on_cancel": "method",
# "on_trash": "method"
# }
# }
# Scheduled Tasks
# ---------------
# scheduler_events = {
# "all": [
# "paypro.tasks.all"
# ],
# "daily": [
# "paypro.tasks.daily"
# ],
# "hourly": [
# "paypro.tasks.hourly"
# ],
# "weekly": [
# "paypro.tasks.weekly"
# ]
# "monthly": [
# "paypro.tasks.monthly"
# ]
# }
# Testing
# -------
# before_tests = "paypro.install.before_tests"
# Overriding Methods
# ------------------------------
#
# override_whitelisted_methods = {
# "frappe.desk.doctype.event.event.get_events": "paypro.event.get_events"
# }
#
# each overriding function accepts a `data` argument;
# generated from the base implementation of the doctype dashboard,
# along with any modifications made in other Frappe apps
# override_doctype_dashboards = {
# "Task": "paypro.task.get_dashboard_data"
# }
| [
"subash13579@gmail.com"
] | subash13579@gmail.com |
8fe386036ae3d2b2bc96e016ee126c685860a1bb | 785f6cdfd819fa57cd50d2b3d930009686832384 | /Docx/Use_ReadDocx.py | 1d14364f3f47bcea3d050b25d650b2137afe6f59 | [] | no_license | dixit5sharma/Individual-Automations | b5da6e4abafd11f74556a07d709b2a05a3a50052 | aab85aa19d4cc74f3ecb7ba0151193320a84dd6d | refs/heads/master | 2020-04-14T13:46:12.354063 | 2019-01-19T10:55:37 | 2019-01-19T10:55:37 | 163,878,383 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 80 | py | import ReadDocx
filename = "Files\\demo.docx"
print(ReadDocx.getText(filename)) | [
"dixit5sharma@gmail.com"
] | dixit5sharma@gmail.com |
a4172035efb4e703eaf933bf8be065a1c6214edd | 14e3a6d5d5ef1a7fc576c0670361fc908630b495 | /siem_integrations/clx_query_service/manage.py | 7d1c4283d89a7e6b0b9ec1217baeea5034f89afd | [
"Apache-2.0"
] | permissive | rapidsai/clx | 3b6e49b53704de7f81fcd923ae88148a6ed5f031 | 68c14f460b5d3ab41ade9b2450126db0d2536745 | refs/heads/branch-23.04 | 2023-05-25T09:37:15.553353 | 2023-05-19T16:07:00 | 2023-05-19T16:07:00 | 186,716,715 | 169 | 93 | Apache-2.0 | 2023-05-19T16:07:02 | 2019-05-14T23:47:32 | Jupyter Notebook | UTF-8 | Python | false | false | 637 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "clx_query_service.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == "__main__":
main()
| [
"bhargavsuryadevara@gmail.com"
] | bhargavsuryadevara@gmail.com |
e25b350871e12d31f6b6bc62c04e5aba3c26130e | 5db3009eb36afe7110ed5402be3a9e570c58c540 | /my_plugins/YouCompleteMe/third_party/ycmd/third_party/jedi_deps/jedi/test/completion/docstring.py | 2b9f3481cf5fd27532a2eb46fe7d83f487fbd3c2 | [
"GPL-3.0-only",
"GPL-1.0-or-later",
"MIT"
] | permissive | imfangli/vimrc | ced2c6caece1cf19421c6ea7deb017bec4ca3a27 | d2d14e7d083d70cc8627ddccb5b99c53c3c38be3 | refs/heads/master | 2022-02-01T00:34:31.855421 | 2022-01-22T15:57:28 | 2022-01-22T15:57:28 | 211,766,038 | 2 | 0 | MIT | 2019-09-30T03:15:03 | 2019-09-30T03:15:02 | null | UTF-8 | Python | false | false | 3,723 | py | """ Test docstrings in functions and classes, which are used to infer types """
# -----------------
# sphinx style
# -----------------
def sphinxy(a, b, c, d, x):
""" asdfasdf
:param a: blablabla
:type a: str
:type b: (str, int)
:type c: random.Random
:type d: :class:`random.Random`
:param str x: blablabla
:rtype: dict
"""
#? str()
a
#? str()
b[0]
#? int()
b[1]
#? ['seed']
c.seed
#? ['seed']
d.seed
#? ['lower']
x.lower
#? dict()
sphinxy()
# wrong declarations
def sphinxy2(a, b, x, y, z):
"""
:param a: Forgot type declaration
:type a:
:param b: Just something
:type b: ``
:param x: Just something without type
:param y: A function
:type y: def l(): pass
:param z: A keyword
:type z: return
:rtype:
"""
#?
a
#?
b
#?
x
#?
y
#?
z
#?
sphinxy2()
def sphinxy_param_type_wrapped(a):
"""
:param str a:
Some description wrapped onto the next line with no space after the
colon.
"""
#? str()
a
# local classes -> github #370
class ProgramNode():
pass
def local_classes(node, node2):
"""
:type node: ProgramNode
... and the class definition after this func definition:
:type node2: ProgramNode2
"""
#? ProgramNode()
node
#? ProgramNode2()
node2
class ProgramNode2():
pass
def list_with_non_imports(lst):
"""
Should be able to work with tuples and lists and still import stuff.
:type lst: (random.Random, [collections.defaultdict, ...])
"""
#? ['seed']
lst[0].seed
import collections as col
# use some weird index
#? col.defaultdict()
lst[1][10]
def two_dots(a):
"""
:type a: json.decoder.JSONDecoder
"""
#? ['raw_decode']
a.raw_decode
# sphinx returns
def return_module_object():
"""
:rtype: :class:`random.Random`
"""
#? ['seed']
return_module_object().seed
# -----------------
# epydoc style
# -----------------
def epydoc(a, b):
""" asdfasdf
@type a: str
@param a: blablabla
@type b: (str, int)
@param b: blablah
@rtype: list
"""
#? str()
a
#? str()
b[0]
#? int()
b[1]
#? list()
epydoc()
# Returns with param type only
def rparam(a,b):
"""
@type a: str
"""
return a
#? str()
rparam()
# Composite types
def composite():
"""
@rtype: (str, int, dict)
"""
x, y, z = composite()
#? str()
x
#? int()
y
#? dict()
z
# Both docstring and calculated return type
def both():
"""
@rtype: str
"""
return 23
#? str() int()
both()
class Test(object):
def __init__(self):
self.teststr = ""
"""
# jedi issue #210
"""
def test(self):
#? ['teststr']
self.teststr
# -----------------
# statement docstrings
# -----------------
d = ''
""" bsdf """
#? str()
d.upper()
# -----------------
# class docstrings
# -----------------
class InInit():
def __init__(self, foo):
"""
:type foo: str
"""
#? str()
foo
class InClass():
"""
:type foo: str
"""
def __init__(self, foo):
#? str()
foo
class InBoth():
"""
:type foo: str
"""
def __init__(self, foo):
"""
:type foo: int
"""
#? str() int()
foo
def __init__(foo):
"""
:type foo: str
"""
#? str()
foo
# -----------------
# Renamed imports (#507)
# -----------------
import datetime
from datetime import datetime as datetime_imported
def import_issues(foo):
"""
@type foo: datetime_imported
"""
#? datetime.datetime()
foo
| [
"fangli@zhiai2019.com"
] | fangli@zhiai2019.com |
d26b26be22550b430e392792287c16f88d629e86 | 8826ef8f6a206a3d995bf21ebd9f26a940933d46 | /project/app/main.py | d1454fbcc58a29034b8dbf1fb83b8ede93a4b972 | [] | no_license | caetasousa/FastCar-FastApi-SqlModel-Alembic-Docker | ff8f14303a2c06dfcfae7f04f2a46a7ab30cc40c | f3a80c276113618b163c66bee062cdfa3300e898 | refs/heads/main | 2023-08-25T17:05:19.302365 | 2021-10-17T01:53:00 | 2021-10-17T01:53:00 | 417,993,120 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 207 | py | from fastapi import FastAPI
from app.api.routes import router as api_router
app = FastAPI()
@app.get("/ping")
async def pong():
return {"ping": "pong!"}
app.include_router(api_router, prefix="/api") | [
"root@LAPTOP-BM9S8AHN.localdomain"
] | root@LAPTOP-BM9S8AHN.localdomain |
465619035f486d9994e385124ffd90f3755d09a0 | 6a2d106c896f62018198f90e8d3a3323f25a0624 | /bot.py | f94fb37b48c158201547ca188b98ddcad4b568cd | [
"MIT"
] | permissive | arya2krishnan/covidPredictorBot | d02851b680ed5ae93d6ab5d7ad713f34776c21b1 | 18215bd30117bf3aba32e9c45847118988a94bb6 | refs/heads/main | 2023-01-24T12:23:03.794297 | 2020-11-15T22:38:46 | 2020-11-15T22:38:46 | 312,965,622 | 0 | 1 | MIT | 2020-11-15T06:19:06 | 2020-11-15T05:36:19 | Python | UTF-8 | Python | false | false | 1,381 | py | import discord
import newcovidpredictor
client = discord.Client()
@client.event
async def on_ready():
print('We have logged in as {0.user}'.format(client))
@client.event
async def on_message(message):
if message.author == client.user:
return
end, tomorrow, new_tot, diff, pending, hospital, total = newcovidpredictor.predictor()
if message.content.startswith('$covidpred'):
embed = discord.Embed(title='COVID Predictor',
description='Hello. Thank you for running the COVID-19 Predictor. \n\n'
'We predict there will be ' + str(tomorrow) + ' new cases tomorrow and ' + str(new_tot) + ' '
'total cases tomorrow. \n\n'
'Today there are ' + str(pending) + ' pending cases yet to be deemed positive/negative. \n\n'
'There are currently ' + str(hospital) + ' hospitalized COVID-19 patients. \n\n'
'There were ' + str(total) + ' total cases today. \n\n'
'We predict that COVID-19 will end ' + str(end) + ' days from April 1st 2020. \n\n'
'Thank you come back tomorrow for a new prediction.')
await message.channel.send(embed=embed)
client.run('Your Token Here')
| [
"noreply@github.com"
] | noreply@github.com |
b763c6f7ccf02fc091dbceba1f1aa1bff14ba011 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /8qD23E6XRMaWhyJ5z_9.py | 4d2e12540b5fba5e913c81d0957e9d467412bb06 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 166 | py |
def happiness_number(s):
happy = 0
sad = 0
happy += s.count(":)")
happy += s.count("(:")
sad -= s.count(":(")
sad -= s.count("):")
return happy + sad
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
94074b5e17d457e7a9d022d4332e0c95e6d45fa4 | c471e8d4d5cf59a68ccfbe79037cb256505b5502 | /venv/lib/python3.8/site-packages/hass_nabucasa/__init__.py | fb1c329404b66c3ff972eece6c2260f24dbd7c43 | [
"Apache-2.0"
] | permissive | vsevolodpohvalenko/home-assistant | b7fc37537929cc2c9989df357a8b76eb4de849e3 | 4ae19b7d5d843c65ba700922c1814755257eb4e0 | refs/heads/master | 2023-07-16T08:05:07.126996 | 2021-08-30T11:52:35 | 2021-08-30T11:52:35 | 401,318,509 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,947 | py | """Component to integrate the Home Assistant cloud."""
import asyncio
from datetime import datetime, timedelta
import json
import logging
from pathlib import Path
from typing import Awaitable, Callable, Coroutine, List
import aiohttp
import async_timeout
from atomicwrites import atomic_write
from jose import jwt
from .auth import CognitoAuth
from .client import CloudClient
from .cloudhooks import Cloudhooks
from .const import CONFIG_DIR, MODE_DEV, SERVERS, STATE_CONNECTED
from .google_report_state import GoogleReportState
from .iot import CloudIoT
from .remote import RemoteUI
from .utils import UTC, gather_callbacks, parse_date, utcnow
from .voice import Voice
_LOGGER = logging.getLogger(__name__)
class Cloud:
"""Store the configuration of the cloud connection."""
def __init__(
self,
client: CloudClient,
mode: str,
cognito_client_id=None,
user_pool_id=None,
region=None,
relayer=None,
google_actions_report_state_url=None,
subscription_info_url=None,
cloudhook_create_url=None,
remote_api_url=None,
alexa_access_token_url=None,
account_link_url=None,
voice_api_url=None,
acme_directory_server=None,
thingtalk_url=None,
):
"""Create an instance of Cloud."""
self._on_start: List[Callable[[], Awaitable[None]]] = []
self._on_stop: List[Callable[[], Awaitable[None]]] = []
self.mode = mode
self.client = client
self.id_token = None
self.access_token = None
self.refresh_token = None
self.iot = CloudIoT(self)
self.google_report_state = GoogleReportState(self)
self.cloudhooks = Cloudhooks(self)
self.remote = RemoteUI(self)
self.auth = CognitoAuth(self)
self.voice = Voice(self)
# Set reference
self.client.cloud = self
if mode == MODE_DEV:
self.cognito_client_id = cognito_client_id
self.user_pool_id = user_pool_id
self.region = region
self.relayer = relayer
self.google_actions_report_state_url = google_actions_report_state_url
self.subscription_info_url = subscription_info_url
self.cloudhook_create_url = cloudhook_create_url
self.remote_api_url = remote_api_url
self.alexa_access_token_url = alexa_access_token_url
self.acme_directory_server = acme_directory_server
self.account_link_url = account_link_url
self.voice_api_url = voice_api_url
self.thingtalk_url = thingtalk_url
return
info = SERVERS[mode]
self.cognito_client_id = info["cognito_client_id"]
self.user_pool_id = info["user_pool_id"]
self.region = info["region"]
self.relayer = info["relayer"]
self.google_actions_report_state_url = info["google_actions_report_state_url"]
self.subscription_info_url = info["subscription_info_url"]
self.cloudhook_create_url = info["cloudhook_create_url"]
self.remote_api_url = info["remote_api_url"]
self.alexa_access_token_url = info["alexa_access_token_url"]
self.account_link_url = info["account_link_url"]
self.voice_api_url = info["voice_api_url"]
self.acme_directory_server = info["acme_directory_server"]
self.thingtalk_url = info["thingtalk_url"]
@property
def is_logged_in(self) -> bool:
"""Get if cloud is logged in."""
return self.id_token is not None
@property
def is_connected(self) -> bool:
"""Return True if we are connected."""
return self.iot.state == STATE_CONNECTED
@property
def websession(self) -> aiohttp.ClientSession:
"""Return websession for connections."""
return self.client.websession
@property
def subscription_expired(self) -> bool:
"""Return a boolean if the subscription has expired."""
return utcnow() > self.expiration_date + timedelta(days=7)
@property
def expiration_date(self) -> datetime:
"""Return the subscription expiration as a UTC datetime object."""
return datetime.combine(
parse_date(self.claims["custom:sub-exp"]), datetime.min.time()
).replace(tzinfo=UTC)
@property
def username(self) -> str:
"""Return the subscription username."""
return self.claims["cognito:username"]
@property
def claims(self):
"""Return the claims from the id token."""
return self._decode_claims(self.id_token)
@property
def user_info_path(self) -> Path:
"""Get path to the stored auth."""
return self.path("{}_auth.json".format(self.mode))
def register_on_start(self, on_start_cb: Callable[[], Awaitable[None]]):
"""Register an async on_start callback."""
self._on_start.append(on_start_cb)
def register_on_stop(self, on_stop_cb: Callable[[], Awaitable[None]]):
"""Register an async on_stop callback."""
self._on_stop.append(on_stop_cb)
def path(self, *parts) -> Path:
"""Get config path inside cloud dir.
Async friendly.
"""
return Path(self.client.base_path, CONFIG_DIR, *parts)
def run_task(self, coro: Coroutine) -> Coroutine:
"""Schedule a task.
Return a coroutine.
"""
return self.client.loop.create_task(coro)
def run_executor(self, callback: Callable, *args) -> asyncio.Future:
"""Run function inside executore.
Return a awaitable object.
"""
return self.client.loop.run_in_executor(None, callback, *args)
async def fetch_subscription_info(self):
"""Fetch subscription info."""
await self.auth.async_check_token()
return await self.websession.get(
self.subscription_info_url, headers={"authorization": self.id_token}
)
async def login(self, email: str, password: str) -> None:
"""Log a user in."""
async with async_timeout.timeout(30):
await self.auth.async_login(email, password)
self.run_task(self.start())
async def logout(self) -> None:
"""Close connection and remove all credentials."""
await self.stop()
self.id_token = None
self.access_token = None
self.refresh_token = None
# Cleanup auth data
if self.user_info_path.exists():
await self.run_executor(self.user_info_path.unlink)
await self.client.cleanups()
def write_user_info(self) -> None:
"""Write user info to a file."""
base_path = self.path()
if not base_path.exists():
base_path.mkdir()
with atomic_write(self.user_info_path, overwrite=True) as fp:
fp.write(
json.dumps(
{
"id_token": self.id_token,
"access_token": self.access_token,
"refresh_token": self.refresh_token,
},
indent=4,
)
)
self.user_info_path.chmod(0o600)
async def start(self):
"""Start the cloud component."""
def load_config():
"""Load config."""
# Ensure config dir exists
base_path = self.path()
if not base_path.exists():
base_path.mkdir()
if not self.user_info_path.exists():
return None
try:
return json.loads(self.user_info_path.read_text())
except (ValueError, OSError) as err:
path = self.user_info_path.relative_to(self.client.base_path)
self.client.user_message(
"load_auth_data",
"Home Assistant Cloud error",
f"Unable to load authentication from {path}. [Please login again](/config/cloud)",
)
_LOGGER.warning(
"Error loading cloud authentication info from %s: %s", path, err
)
return None
if not self.is_logged_in:
info = await self.run_executor(load_config)
if info is None:
# No previous token data
return
self.id_token = info["id_token"]
self.access_token = info["access_token"]
self.refresh_token = info["refresh_token"]
await self.client.logged_in()
await gather_callbacks(_LOGGER, "on_start", self._on_start)
async def stop(self):
"""Stop the cloud component."""
if not self.is_logged_in:
return
await gather_callbacks(_LOGGER, "on_stop", self._on_stop)
@staticmethod
def _decode_claims(token):
"""Decode the claims in a token."""
return jwt.get_unverified_claims(token)
| [
"vsevolod.pohvalenko@gmail.com"
] | vsevolod.pohvalenko@gmail.com |
04859fbd85175a3cb70f7db15f7f3fa3793ea838 | 4363315facc6ce5c3982e7f2f300cb6be2501a37 | /allennlp_spatial_reasoning_NLVR/DeepSpRL/NLVR2_image_together/train_test_functions/train_funcs.py | c002c0919844c6c8676290d3c16b1b62b4d6c01d | [] | no_license | HLR/DeepSpRL | cb80e8376687dd6919358f1b9745dda00c11f33c | d566a3711013817a122fbc113f8df779ee9d81de | refs/heads/master | 2020-03-31T13:40:20.642771 | 2019-09-04T12:09:46 | 2019-09-04T12:09:46 | 152,264,526 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,581 | py | '''
Institution: Tulane University
Name: Chen Zheng
Date: 10/23/2018
Purpose: Some functions help to train process.
'''
import torch
import torch.nn as nn
from torch import optim
import torch.nn.functional as F
import sys
sys.path.append('../')
from config.first_config import CONFIG
from train_test_functions.test_funcs import testIters
import time
import math
def asMinutes(s):
m = math.floor(s / 60)
s -= m * 60
return '%dm %ds' % (m, s)
def timeSince(since, percent):
now = time.time()
s = now - since
es = s / (percent)
rs = es - s
return '%s (- %s)' % (asMinutes(s), asMinutes(rs))
def begin_to_train(input1, input2, input3, input_total, input_sen, input1_len, input2_len, input3_len,
input_total_len, input_sen_len, target, model, optimizer, criterion, hidden_size):
# hidden_tensor = model.initHidden(hidden_size)
optimizer.zero_grad()
# input_length = input_sen.size(0)
y_pred = model(input1, input2, input3, input_total, input_sen, input1_len, input2_len, input3_len,
input_total_len, input_sen_len, CONFIG['batch_size'], CONFIG['embed_size'], CONFIG['hidden_size'])
_, predicted = torch.max(y_pred, 1)
correct = (predicted == target.view(-1)).sum().item()
#print("correct = ", correct)
loss = criterion(y_pred, target.view(-1))
# print(y_pred.size(), target.view(-1).size())
loss.backward()
optimizer.step()
# print("correct = ", correct, ' loss=', loss.item())
return loss.item(), correct
def trainIters(input1, input2, input3, input_total, input_sen, input1_len, input2_len, input3_len,
input_total_len, input_sen_len, target, model, hidden_size,
input_0_test, input_1_test, input_2_test, input_total_test, input_tensor_test,
input_0_len_test, input_1_len_test, input_2_len_test, input_total_len_test, input_length_test, target_test
):
# start = time.time()
print_loss_total = 0 # Reset every print_every
print_acc_total = 0
# plot_loss_total = 0 # Reset every plot_every
optimizer = optim.Adam(model.parameters(), lr=CONFIG['learning_rate'])
criterion = torch.nn.CrossEntropyLoss()
f_train = open(CONFIG['save_train_result_dir'], 'w')
f_test = open(CONFIG['save_test_result_dir'], 'w')
for key, value in CONFIG.items():
f_train.write(str(key) + ' : ' + str(value) + '\n')
f_test.write(str(key) + ' : ' + str(value) + '\n')
for iter in range(1, CONFIG['n_iters'] + 1):
bad_count = 0
print('it is the ', iter, 'iteration')
for i in range(0, input1.size()[0], CONFIG['batch_size']):
# print('----->',input3_len[i])
# print('come here')
# print(len(input1_len[i:i+CONFIG['batch_size']]), input1_len[i:i+CONFIG['batch_size']])
# print(len(input2_len[i:i + CONFIG['batch_size']]), input2_len[i:i + CONFIG['batch_size']])
# print(len(input3_len[i:i + CONFIG['batch_size']]), input3_len[i:i + CONFIG['batch_size']])
# print('-------------------------------------------------------------')
loss, correct = begin_to_train(input1[i:i+CONFIG['batch_size']],
input2[i:i+CONFIG['batch_size']],
input3[i:i+CONFIG['batch_size']],
input_total[i:i+CONFIG['batch_size']],
input_sen[i:i+CONFIG['batch_size']],
input1_len[i:i+CONFIG['batch_size']],
input2_len[i:i+CONFIG['batch_size']],
input3_len[i:i+CONFIG['batch_size']],
input_total_len[i:i+CONFIG['batch_size']],
input_sen_len[i:i+CONFIG['batch_size']],
target[i:i+CONFIG['batch_size']],
model, optimizer, criterion, hidden_size)
# loss, correct = begin_to_train(input1[i], input2[i], input3[i], input_sen[i], input1_len[i], input2_len[i],
# input3_len[i], input_sen_len[i], target[i], model, optimizer, criterion, hidden_size)
print_loss_total += loss
print_acc_total += correct
try:
loss, correct = begin_to_train(input1[i:i + CONFIG['batch_size']],
input2[i:i + CONFIG['batch_size']],
input3[i:i + CONFIG['batch_size']],
input_total[i:i + CONFIG['batch_size']],
input_sen[i:i + CONFIG['batch_size']],
input1_len[i:i + CONFIG['batch_size']],
input2_len[i:i + CONFIG['batch_size']],
input3_len[i:i + CONFIG['batch_size']],
input_total_len[i:i + CONFIG['batch_size']],
input_sen_len[i:i + CONFIG['batch_size']],
target[i:i + CONFIG['batch_size']],
model, optimizer, criterion, hidden_size)
print_loss_total += loss
print_acc_total += correct
except:
# print('the', i, 'th data has problem')
bad_count += 1
pass
# if (iter*(input1.size()[0])+i) % CONFIG['print_every'] == 0:
# print_loss_avg = print_loss_total / CONFIG['print_every']
# print_loss_total = 0
# print('%s (%d %d%%) %.4f' % (timeSince(start, iter / CONFIG['n_iters']),
# iter, iter / CONFIG['n_iters'] * 100, print_loss_avg))
if iter % 1 == 0:
'''
train part
'''
print_loss_avg = float(print_loss_total) / (input1_len.size()[0] // CONFIG['batch_size'])
print('training acc is: ', float(print_acc_total) / (input1_len.size()[0] - bad_count), ', training loss is: ', print_loss_avg,
', total training size is: ', (input1_len.size()[0] - bad_count))
f_train.write('training acc is: ' + str(float(print_acc_total) / (input1_len.size()[0] - bad_count)) +
', training loss is: ' + str(print_loss_avg) +
', total training size is: ' + str((input1_len.size()[0] - bad_count)) + '\n')
f_train.flush()
print_acc_total = 0
print_loss_total = 0
'''
test part
'''
test_res = testIters(input_0_test, input_1_test, input_2_test, input_total_test, input_tensor_test, input_0_len_test, input_1_len_test,
input_2_len_test, input_total_len_test, input_length_test, target_test, model, CONFIG['hidden_size'])
f_test.write(test_res)
f_test.flush()
# after training, save model
f_train.close()
f_test.close()
torch.save(model.state_dict(), CONFIG['save_checkpoint_dir'])
# model.save_state_dict(CONFIG['save_checkpoint_dir'])
# load previously training model:
# model.load_state_dict(torch.load('mytraining.pt'))
| [
"zhengchen02@baidu.com"
] | zhengchen02@baidu.com |
e2c6029f4567166af805e894ef6dbf2ccd3b4e48 | 86af752602afec596b50a6dee07d824a36698d81 | /sales/views.py | 9e0700b077ab0627ef3f9ef1f4d3db285be98eac | [] | no_license | ijharulislam/e-commece | 9f78d2516de7594d3b5ae60d09dfe245f13a617b | cbcdac6f4035804618835ac1045fcc685e746221 | refs/heads/master | 2021-01-10T09:42:16.506852 | 2015-05-24T02:10:30 | 2015-05-24T02:10:30 | 36,151,582 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,058 | py | from django.db import transaction
from django.db.models import Q
from django.conf import settings
from django.http import HttpResponseRedirect, HttpResponseBadRequest
from django.http.response import Http404
from django.core.urlresolvers import reverse
from django.template import Context
from django.template.loader import get_template
from django.shortcuts import render, get_object_or_404
from geo.models import Address
from sales.models import Cart, Order, PaymentMethod
from sales.forms import AddressForm
from catalog.views import CatalogBaseView, get_default_currency
from ideapub.exceptions import IdeapubError
from config.views import BaseView
from ideapub.utils.helpers import send_mail
@transaction.atomic
def add_to_cart(request):
book_id = int(request.POST['book_id'])
# Checking if user already has cart in session
# otherwise create a new cart for the user
if 'cart_id' in request.session:
cart_id = int(request.session['cart_id'])
cart = Cart.get_cart(cart_id)
else:
cart = Cart.get_cart()
request.session['cart_id'] = cart.id
try:
quantity = int(request.POST['quantity'])
if quantity > 0:
cart.add_item(book_id, quantity, request.user)
else:
raise ValueError()
except ValueError:
return HttpResponseBadRequest('Book quantity is not correct, please enter one or more books in numbers.')
if request.is_ajax():
default_currency = get_default_currency(request)
return render(request, 'cart_basket.html', {'cart': cart, 'default_currency': default_currency})
return HttpResponseRedirect(reverse('sales_checkout_cart'))
@transaction.atomic
def remove_from_cart(request):
book_id = int(request.POST['book_id'])
# Checking if user session has cart or session may already flushed
# Cart an empty cart for user
if 'cart_id' in request.session:
cart_id = int(request.session['cart_id'])
cart = Cart.get_cart(cart_id)
cart.remove_item(book_id)
else:
cart = Cart()
if request.is_ajax():
default_currency = get_default_currency(request)
return render(request, 'cart_basket.html', {'cart': cart, 'default_currency': default_currency})
return HttpResponseRedirect(reverse('sales_checkout_cart'))
@transaction.atomic
def remove_all_from_cart(request):
if request.method == 'POST':
if 'cart_id' in request.session:
cart_id = int(request.session['cart_id'])
cart = Cart.get_cart(cart_id)
cart.remove_all_items()
else:
cart = Cart()
if request.is_ajax():
default_currency = get_default_currency(request)
return render(request, 'cart_basket.html', {'cart': cart, 'default_currency': default_currency})
return HttpResponseRedirect(reverse('sales_checkout_cart'))
class CheckoutBaseView(CatalogBaseView):
"""
Base checkout steps view
"""
style_name = 'sales'
checkout_template_name = 'checkout_base.html'
def get_context_data(self, **kwargs):
breadcrumbs = self.get_breadcrumbs()
return super(CheckoutBaseView, self).get_context_data(
breadcrumbs=breadcrumbs,
step_active=self.step_active,
steps_processed=self.steps_processed,
checkout_template_name=self.checkout_template_name,
**kwargs)
class CheckoutCartView(CheckoutBaseView):
"""
Display user shopping cart
"""
step_active = 'cart'
steps_processed = ()
template_name = 'checkout_cart.html'
@classmethod
def get_breadcrumbs(cls):
return ({'name': 'Shopping Cart', 'url': reverse('sales_checkout_cart')},)
def get(self, request):
if 'cart_id' in request.session:
cart_id = int(request.session['cart_id'])
cart = Cart.get_cart(cart_id)
else:
cart = Cart()
return super(CheckoutCartView, self).get(request, cart=cart)
def post(self, request):
error = None
message = None
if 'cart_id' in request.session:
cart_id = int(request.session['cart_id'])
book_id = int(request.POST['book_id'])
cart = Cart.get_cart(cart_id)
try:
quantity = int(request.POST['quantity'])
if quantity > 0:
cart.update_item(book_id, quantity)
message = 'Your shopping cart has been updated.'
else:
raise ValueError()
except ValueError:
error = 'Book quantity is not correct, please enter one or more boos in numbers.'
else:
cart = Cart()
return super(CheckoutCartView, self).get(request, cart=cart, error=error, message=message)
class CheckoutAddressView(CheckoutBaseView):
"""
Base checkout view for billing and shipping address
"""
template_name = 'checkout_address.html'
def get_context_data(self, **kwargs):
context = super(CheckoutAddressView, self).get_context_data(**kwargs)
addresses_filter = None
request = self.request
if request.user.is_authenticated():
addresses_filter = Q(email__iexact=request.user.email) | Q(customer=request.user)
addresses_ids = request.session.get('addresses', None)
if addresses_ids:
addresses_filter = (addresses_filter | Q(id__in=addresses_ids)) if addresses_filter else Q(id__in=addresses_ids)
if addresses_filter:
context['addresses'] = list(Address.objects.filter(addresses_filter))
context['current_step'] = self.current_step
return context
def get(self, request, **kwargs):
form = AddressForm()
if self.session_address_key in request.session:
del request.session[self.session_address_key]
return super(CheckoutAddressView, self).get(request, form=form, **kwargs)
def post(self, request):
form = AddressForm(request.POST)
address_id = request.POST['address_id']
if address_id:
address_id = int(address_id)
address = get_object_or_404(Address, id=address_id)
# Binding address permenantly to authenticated user
if address.customer is None and request.user.is_authenticated():
address.customer = request.user
address.save()
request.session[self.session_address_key] = address_id
return HttpResponseRedirect(reverse(self.next_step))
if form.is_valid():
data = form.cleaned_data
customer = request.user if request.user.is_authenticated() else None
address = Address.objects.create(
customer=customer,
first_name=data['first_name'],
last_name=data['last_name'],
email=data['email'],
address1=data['address1'],
address2=data['address2'],
phone_number=data['phone_number'],
fax_number=data['fax_number'],
zip_or_postal_code=data['zip_or_postal_code'],
city=data['city'],
country=data['country'],
state=data['state'],
company=data['company'],
created_by=str(request.user),
updated_by=str(request.user))
addresses = self.request.session.get('addresses', [])
addresses.append(address.id)
self.request.session['addresses'] = addresses
request.session[self.session_address_key] = address.id
return HttpResponseRedirect(reverse(self.next_step))
return super(CheckoutAddressView, self).get(request, form=form)
class CheckoutBillingView(CheckoutAddressView):
"""
User billing address for order
"""
step_active = 'billing'
steps_processed = ['cart']
current_step = 'sales_checkout_billing'
next_step = 'sales_checkout_shipping'
session_address_key = 'billing_address'
@classmethod
def get_breadcrumbs(cls):
return ({'name': 'Billing Address', 'url': reverse('sales_checkout_billing')},)
class CheckoutShippingView(CheckoutAddressView):
"""
Display user shipping address
"""
step_active = 'shipping'
steps_processed = ['cart', 'billing']
current_step = 'sales_checkout_shipping'
next_step = 'sales_checkout_payment'
session_address_key = 'shipping_address'
@classmethod
def get_breadcrumbs(cls):
return ({'name': 'Shipping Address', 'url': reverse('sales_checkout_shipping')},)
class CheckoutPaymentView(CheckoutBaseView):
"""
Display payment method for checkout
"""
step_active = 'payment'
steps_processed = ['cart', 'billing', 'shipping']
template_name = 'checkout_payment.html'
def get_context_data(self, **kwargs):
payment_methods = PaymentMethod.get_all()
return super(CheckoutPaymentView, self).get_context_data(payment_methods=payment_methods, **kwargs)
@classmethod
def get_breadcrumbs(cls):
return ({'name': 'Payment', 'url': reverse('sales_checkout_payment')},)
def get(self, request):
po_number = request.session.get('po_number', None)
payment_method = request.session.get('payment_method', None)
return super(CheckoutPaymentView, self).get(request, payment_method=payment_method, po_number=po_number)
def post(self, request):
error = None
payment_method = request.POST.get('payment_method', None)
payment_methods = dict(PaymentMethod.ALL)
if payment_method and payment_method in payment_methods:
if payment_method == PaymentMethod.PURCHASE_ORDER:
po_number = request.POST['po_number']
if po_number:
request.session['po_number'] = po_number
request.session['payment_method'] = payment_method
return HttpResponseRedirect(reverse('sales_checkout_order'))
else:
error = 'Please provide purchase order number.'
else:
if 'po_number' in request.session:
del request.session['po_number']
request.session['payment_method'] = payment_method
return HttpResponseRedirect(reverse('sales_checkout_order'))
else:
error = 'Please select payment method'
return super(CheckoutPaymentView, self).get(request, error=error, payment_method=payment_method)
class CheckoutOrderView(CheckoutBaseView):
"""
Display user order information
"""
step_active = 'order'
steps_processed = ['cart', 'billing', 'shipping', 'payment']
template_name = 'checkout_order.html'
decorators = [transaction.atomic]
@classmethod
def get_breadcrumbs(cls):
return ({'name': 'Order', 'url': reverse('sales_checkout_order')},)
def get(self, request, **kwargs):
if ('cart_id' in request.session
and 'payment_method' in request.session
and CheckoutBillingView.session_address_key in request.session
and CheckoutShippingView.session_address_key in request.session):
cart = Cart.get_cart(int(request.session['cart_id']))
payment_method = PaymentMethod.ALL_METHODS[request.session['payment_method']]
billing_address = get_object_or_404(
Address, id=int(request.session[CheckoutBillingView.session_address_key]))
shipping_address = get_object_or_404(
Address, id=int(request.session[CheckoutShippingView.session_address_key]))
return super(CheckoutOrderView, self).get(
request, cart=cart, payment_method=payment_method, billing_address=billing_address,
shipping_address=shipping_address, **kwargs)
return HttpResponseRedirect(reverse('sales_checkout_cart'))
def post(self, request):
error = None
try:
cart_id = request.session['cart_id']
payment_method = request.session['payment_method']
po_number = request.session.get('po_number', None)
billing_address_id = request.session[CheckoutBillingView.session_address_key]
shipping_address_id = request.session[CheckoutShippingView.session_address_key]
if request.user.is_authenticated():
user = request.user
username = str(user)
else:
user = None
username = str(request.user)
currency_code = self.request.session.get('default_currency', self.primary_currency.code)
order = Order.objects.place(cart_id, billing_address_id, shipping_address_id,
payment_method, po_number, currency_code, user, username)
del request.session['cart_id']
del request.session['payment_method']
del request.session['billing_address']
del request.session['shipping_address']
request.session['order_confirmed'] = True
if payment_method == PaymentMethod.CREDIT_CARD:
return HttpResponseRedirect(reverse('payments_process_online', args=[order.id, order.receipt_code]))
return HttpResponseRedirect(reverse('sales_checkout_receipt', args=[order.id, order.receipt_code]))
except DoorsaleError as e:
error = e.message
return self.get(request, error=error)
class CheckoutReceiptView(CheckoutBaseView):
"""
Display user order receipt
"""
step_active = 'receipt'
steps_processed = []
template_name = 'checkout_receipt.html'
def get_breadcrumbs(self):
return ({'name': 'Order Receipt', 'url': reverse('sales_checkout_receipt',
args=[self.order_id, self.receipt_code])},)
def get(self, request, order_id, receipt_code):
order_id = int(order_id)
order_confirmed = request.session.pop('order_confirmed', None)
try:
order = Order.objects.prefetch_related('billing_address', 'shipping_address', 'payment_method',
'currency', 'items').get(id=order_id, receipt_code=receipt_code)
except Order.DoesNotExist:
raise Http404()
if order_confirmed:
# Sending order confirmation email to user's billing email address
context = Context(
{'order': order, 'user': request.user, 'SITE_NAME': self.get_config('SITE_NAME'), 'DOMAIN': self.get_config('DOMAIN')})
msg_subject = get_template("sales/email/order_confirmation_subject.txt").render(context)
context = Context({'order': order, 'user': request.user,
'SITE_NAME': self.get_config('SITE_NAME'), 'DOMAIN': self.get_config('SITE_NAME')})
msg_text = get_template("/templates/email/order_confirmation.html").render(context)
to_email = '%s <%s>' % (order.billing_address.get_name(), order.billing_address.email)
send_mail(msg_subject, msg_text, [to_email], True)
self.order_id = order_id
self.receipt_code = receipt_code
return super(CheckoutReceiptView, self).get(request, order=order, order_confirmed=order_confirmed)
class PrintReceiptView(BaseView):
"""
Print user order receipt
"""
template_name = 'print_receipt.html'
def get(self, request, order_id, receipt_code):
order_id = int(order_id)
try:
order = Order.objects.prefetch_related(
'billing_address', 'shipping_address', 'payment_method',
'currency', 'items').get(id=order_id, receipt_code=receipt_code)
except Order.DoesNotExist:
raise Http404()
return super(PrintReceiptView, self).get(request, order=order)
| [
"rajbp02@gmail.com"
] | rajbp02@gmail.com |
b7bc6489f228ace5d7d44cd54e9fe2572d8a1f8d | 4865b66089f6ca2b6ffbb6522b82aa7dea6eade8 | /RNS/__init__.py | de31d78880d11807a3c7d7a823aae857cd87a120 | [
"MIT"
] | permissive | scottrcarlson/Reticulum | 5163acc6eed5ea99fdb484e4fa904c9c7afc3df7 | 8e19d5bd97a109935d5b748cb91f80de529a8d06 | refs/heads/master | 2020-09-02T11:46:23.145964 | 2019-11-10T12:56:04 | 2019-11-10T12:56:04 | 219,214,357 | 0 | 0 | MIT | 2019-11-02T21:10:12 | 2019-11-02T21:10:11 | null | UTF-8 | Python | false | false | 1,940 | py | import os
import sys
import glob
import time
import random
from .Reticulum import Reticulum
from .Identity import Identity
from .Link import Link
from .Transport import Transport
from .Destination import Destination
from .Packet import Packet
from .Packet import PacketReceipt
from .Resource import Resource
modules = glob.glob(os.path.dirname(__file__)+"/*.py")
__all__ = [ os.path.basename(f)[:-3] for f in modules if not f.endswith('__init__.py')]
LOG_CRITICAL = 0
LOG_ERROR = 1
LOG_WARNING = 2
LOG_NOTICE = 3
LOG_INFO = 4
LOG_VERBOSE = 5
LOG_DEBUG = 6
LOG_EXTREME = 7
LOG_STDOUT = 0x91
LOG_FILE = 0x92
loglevel = LOG_NOTICE
logfile = None
logdest = LOG_STDOUT
logtimefmt = "%Y-%m-%d %H:%M:%S"
random.seed(os.urandom(10))
def loglevelname(level):
if (level == LOG_CRITICAL):
return "Critical"
if (level == LOG_ERROR):
return "Error"
if (level == LOG_WARNING):
return "Warning"
if (level == LOG_NOTICE):
return "Notice"
if (level == LOG_INFO):
return "Info"
if (level == LOG_VERBOSE):
return "Verbose"
if (level == LOG_DEBUG):
return "Debug"
if (level == LOG_EXTREME):
return "Extra"
return "Unknown"
def log(msg, level=3):
# TODO: not thread safe
if loglevel >= level:
timestamp = time.time()
logstring = "["+time.strftime(logtimefmt)+"] ["+loglevelname(level)+"] "+msg
if (logdest == LOG_STDOUT):
print(logstring)
if (logdest == LOG_FILE and logfile != None):
file = open(logfile, "a")
file.write(logstring+"\n")
file.close()
def rand():
result = random.random()
return result
def hexprint(data):
print(hexrep(hexrep))
def hexrep(data, delimit=True):
delimiter = ":"
if not delimit:
delimiter = ""
hexrep = delimiter.join("{:02x}".format(ord(c)) for c in data)
return hexrep
def prettyhexrep(data):
delimiter = ""
hexrep = "<"+delimiter.join("{:02x}".format(ord(c)) for c in data)+">"
return hexrep
def panic():
os._exit(255) | [
"mark@adepta.io"
] | mark@adepta.io |
d58f040c51ca400611d8cdbbf0e0e9810f30c476 | b81711ae06421c2168a54e4b59f9533603a5f154 | /Weekend/LetCode Problems/ReverseString.py | 49e8ff7c03e7a17b12876d779a1f456486956d8e | [] | no_license | vxela/altera-batch5- | 359b69ca0c69f16afd412a90c55880595b198113 | a0934917dfe1bbb0720b15e046d6a47c55428579 | refs/heads/master | 2022-06-18T04:13:48.414471 | 2020-03-23T08:45:19 | 2020-03-23T08:45:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 138 | py | s = [1, 2, 3, 4, 5, 6, 7]
print(s)
for i in range(len(s)//2) :
temp = s[i]
s[i] = s[len(s)-1-i]
s[len(s)-1-i] = temp
print(s) | [
"sholeh@alterra.id"
] | sholeh@alterra.id |
fa751f128d9ce6cc8de27b5d0d8262f701ca0df7 | 1dc727f5b326dd984962efa4d982ed9fe036c8fc | /cmsplugin_hanz_card/cms_plugins.py | 9342ebd45a4d621b861df6bbe5db794242c93700 | [] | no_license | hanztura/iamhanz | 2a7380dfe5aa9f05d72fdc1d77d77c950692d30c | 1aeee4c3404ed5048a48187e8b75f0e958c042ba | refs/heads/master | 2021-08-30T22:51:53.916315 | 2017-12-19T18:13:44 | 2017-12-19T18:13:44 | 113,453,197 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,369 | py | from django.utils.translation import ugettext as _
from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from cmsplugin_filer_image.cms_plugins import FilerImagePlugin
from .models import Card
from .forms import CardForm
@plugin_pool.register_plugin
class CardPlugin(FilerImagePlugin):
cache = False
form = CardForm
model = Card
name = _("Card Plugin")
render_template = "plugins/card_plugin.html"
fieldsets = (
(_('Card'), {
'fields': [
'style',
'card_title',
'caption_text',
'image',
'image_url',
'alt_text',
]
}),
(_('Image resizing options'), {
'fields': (
'use_original_image',
('width', 'height',),
('crop', 'upscale',),
'thumbnail_option',
'use_autoscale',
)
}),
(None, {
'fields': ('alignment',)
}),
(_('More'), {
'classes': ('collapse',),
'fields': (
'free_link',
'page_link',
'file_link',
('original_link', 'target_blank',),
'link_attributes',
'description',
),
}),
) | [
"hctura.official@gmail.com"
] | hctura.official@gmail.com |
d0c04d8d6b0caebcc5131c6d7c9185c6da08fb8a | b7ebcfa8429948745dbd9fb11f6d13c6905e9aa1 | /lib/panda/_obj.py | fd7b9191f276e967f2b4dc2a6fbb176e63be53ec | [] | no_license | SiewYan/PandaTree | c00c83a92044b59d460dd2d9a4319eef9f777045 | 5d2da2dc5d419c498a3a14870197aad360d6b071 | refs/heads/master | 2020-12-30T12:35:36.718617 | 2018-02-01T16:25:54 | 2018-02-01T16:25:54 | 91,395,990 | 0 | 1 | null | 2017-05-16T00:16:27 | 2017-05-16T00:16:27 | null | UTF-8 | Python | false | false | 2,279 | py | from base import Definition
from oneliner import Include
from constexpr import Constant, Enum
from refbranch import RefBranch
from refvbranch import RefVectorBranch
from generic import GenericBranch
from objbranch import ObjBranch
from branch import Branch
from reference import Reference
from function import Function
from obj import Object
def __init__(self, name, source):
"""
Constructor called either by PhysicsObject or Tree.
Parse the source text block and collect all information on this object.
"""
self.name = name
self.includes = []
self.constants = []
self.enums = []
self.objbranches = []
self.branches = []
self.references = []
self.functions = []
while True:
line = source.readline()
line = line.strip()
if line == '':
break
try:
self.includes.append(Include(line))
continue
except Definition.NoMatch:
pass
try:
self.enums.append(Enum(line, source))
continue
except Definition.NoMatch:
pass
try:
self.constants.append(Constant(line, source))
continue
except Definition.NoMatch:
pass
try:
self.branches.append(RefBranch(line))
continue
except Definition.NoMatch:
pass
try:
self.branches.append(RefVectorBranch(line))
continue
except Definition.NoMatch:
pass
try:
self.objbranches.append(ObjBranch(line))
continue
except Definition.NoMatch:
pass
try:
self.branches.append(Branch(line))
continue
except Definition.NoMatch:
pass
try:
self.branches.append(GenericBranch(line))
continue
except Definition.NoMatch:
pass
try:
self.references.append(Reference(line))
continue
except Definition.NoMatch:
pass
try:
self.functions.append(Function(line, source))
continue
except Definition.NoMatch:
pass
break
Object.__init__ = __init__
| [
"yiiyama@mit.edu"
] | yiiyama@mit.edu |
db43ee1f9ac5c213db5b0a430f22e316fd4513b6 | 842c21f0a93c5e2ebc43501f8b9f3fd25c39851a | /shepherd.py | b0dab82befea3d847fec33adfdee43485b074fb6 | [] | no_license | mortrevere/shepherd | 1062ac6117d6bf1613c14bc96ce76ba87f986f01 | b2fa572f4893bd11b9ced3d01effd8582e35311e | refs/heads/master | 2020-06-27T04:15:20.035574 | 2019-08-08T09:25:02 | 2019-08-08T09:25:02 | 199,841,961 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,609 | py | #! /usr/local/bin/python3.7
import socket
import sys
from http.server import BaseHTTPRequestHandler, HTTPServer
import asyncio
import json
import os
import time
hosts = {};
class S(BaseHTTPRequestHandler):
def _set_headers(self):
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
def do_GET(self):
self._set_headers()
latest_release = os.popen('git rev-parse HEAD').read().strip()
for hostname in hosts:
hosts[hostname]['latest_release'] = latest_release
self.wfile.write(bytes(json.dumps(hosts), 'utf-8'))
def log_message(self, format, *args):
return
def http_server(server_class=HTTPServer, handler_class=S, port=8088):
server_address = ('', port)
httpd = server_class(server_address, handler_class)
print('Starting httpd...')
httpd.serve_forever()
async def handle_client(reader, writer):
request = None
buf = ''
print('.', end='')
sys.stdout.flush()
while True:
if reader.at_eof():
print('closed')
break
try:
request = (await reader.read(255)).decode('utf8')
if not request:
writer.close()
break
buf += request
except Exception as e:
print(e)
writer.close()
try:
parseHostReport(buf)
except:
print(buf)
pass
def parseHostReport(report):
host = {'updatedAt': time.time()}
lines = report.split('\n')
linesChunks = [line.split(' ') for line in lines]
linesChunks = [[chunk for chunk in chunks if chunk != ''] for chunks in linesChunks] #python
hostname = lines[1]
host['release_id'] = lines[0]
host['ip'] = linesChunks[2][0]
host['disk_usage'] = linesChunks[3][-2]
host['time'] = linesChunks[4][0]
load = linesChunks[5]
host['load'] = [load[-3][0:-1], load[-2][0:-1], load[-1]]
host['internet'] = (linesChunks[6][1] == 'up')
host['dns'] = (linesChunks[7][1] == 'up')
ram = linesChunks[8]
host['ram'] = int(ram[2])*100/int(ram[1])
temp = [int(a) for a in linesChunks[9][1:]]
host['temp'] = sum(temp)/(1000*len(temp))
host['cpu'] = float(linesChunks[10][1])
LANpings = {}
processes = []
ws = []
currentLANpeer = ''
i = 11
for line in lines[i:]:
cline = line.strip()
chunks = cline.split(' ')
chunks = [chunk for chunk in chunks if chunk != '']
if len(chunks) != 0:
if chunks[0] == 'PING':
currentLANpeer = chunks[1]
LANpings[currentLANpeer] = False
if chunks[0] == '64':
LANpings[currentLANpeer] = chunks[-2].split('=')[1]
if chunks[0] == 'PROCESSES':
processLines = linesChunks[i+1:i+1+int(chunks[1])]
processes = [{'load' : a[0], 'pid' : a[1], 'user' : a[2], 'process' : a[3]} for a in processLines]
if chunks[0] == 'WEBSERVICES' and int(chunks[1]) != 0:
wsLines = linesChunks[i+1:i+1+int(chunks[1])]
ws = [{'port' : a[0][3:], 'process' : a[1]} for a in wsLines]
i += 1
if hostname:
host['LAN'] = LANpings
host['processes'] = processes
host['webservices'] = ws
hosts[hostname] = host
loop = asyncio.get_event_loop()
loop.create_task(asyncio.start_server(handle_client, '', 7777))
loop.run_in_executor(None, http_server)
loop.run_forever()
| [
"leo.vincent1337@gmail.com"
] | leo.vincent1337@gmail.com |
31d9a115cbd2a43f5ea11e98d4b3a4cde1224566 | 6bdb32ddbd72c4337dab12002ff05d6966538448 | /gridpack_folder/mc_request/LHEProducer/Spin-0/Radion_ZZ_ZlepZhad/Radion_ZZ_ZlepZhad_narrow_M2500_13TeV-madgraph_cff.py | 55b8ce6b6aafef4854d46112ee4cdb202e8e7861 | [] | no_license | cyrilbecot/DibosonBSMSignal_13TeV | 71db480de274c893ba41453025d01bfafa19e340 | d8e685c40b16cde68d25fef9af257c90bee635ba | refs/heads/master | 2021-01-11T10:17:05.447035 | 2016-08-17T13:32:12 | 2016-08-17T13:32:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 769 | py | import FWCore.ParameterSet.Config as cms
# link to cards:
# https://github.com/cms-sw/genproductions/tree/b9fddd83b7d8e490347744408902940547e8135f/bin/MadGraph5_aMCatNLO/cards/production/13TeV/exo_diboson/Spin-0/Radion_ZZ_ZlepZhad/Radion_ZZ_ZlepZhad_narrow_M2500
externalLHEProducer = cms.EDProducer("ExternalLHEProducer",
args = cms.vstring('/cvmfs/cms.cern.ch/phys_generator/gridpacks/slc6_amd64_gcc481/13TeV/madgraph/V5_2.2.2/exo_diboson/Spin-0/Radion_ZZ_ZlepZhad/narrow/v1/Radion_ZZ_ZlepZhad_narrow_M2500_tarball.tar.xz'),
nEvents = cms.untracked.uint32(5000),
numberOfParameters = cms.uint32(1),
outputFile = cms.string('cmsgrid_final.lhe'),
scriptName = cms.FileInPath('GeneratorInterface/LHEInterface/data/run_generic_tarball_cvmfs.sh')
)
| [
"sudha.ahuja@cern.ch"
] | sudha.ahuja@cern.ch |
579528bb6dac8b7a786b56c7fa8aebcbc771d0bc | dd15b5ed1050bdd6de3d9a0ee0c448d2ccba09e0 | /assets/python/mm_surface.py | f39a152bc61b12bf8992ad5c81b8cbbfa09dac2c | [] | no_license | rblack42/nffs-2021-symposium | 7f5c581fb46c23dd6896a37e0ac429b22d9de823 | 496696a43958fdf6ad5870b730675ed0b097e8cc | refs/heads/master | 2023-02-24T02:16:01.579345 | 2021-01-27T21:47:15 | 2021-01-27T21:47:15 | 329,161,311 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,925 | py | import math
class Surface(object):
def __init__(self,
span, # with dihedral
chord, # root chord
camber, # root camber
tip_radius, # outer LE tip radius
center_span, # center section span (<= span)
tip_elevation # dihedral tip elevation
):
self.root_chord = chord
self.root_camber = camber * chord / 100
self.center_span = center_span
self.tip_radius = tip_radius
self.span = span
self.tip_elevation = tip_elevation
def radius(self, c, t):
return c**2/(8*t) + t/2
def arc_height(self, x_percent, chord, camber):
xr = x_percent * chord
rad = self.radius(chord, camber)
cx = chord/2
cy = -(rad-camber)
fact = math.sqrt(rad**2 - (xr - cx)**2)
xh = cy + fact
print(xr,xh, rad, camber, cx,cy,rad,fact)
return xh
def get_chord(self, y):
r = self.tip_radius
c = self.root_chord
yt = y - (self.span/2 - r)
print("->",y,r, yt)
if yt < 0:
return c
f = r**2 - yt**2
print("F:",f)
return c - r + math.sqrt(f)
def gen_mesh(self,nx, ny):
dx = 1.0/nx
dy = 1.0/ny
print(dx,dy)
for y in range(ny+1):
yr = y * dy * self.span/2 # 0-span
ch = self.get_chord(yr)
x0 = self.root_chord - ch;
for x in range(nx+1):
xr = x0 + x * dx * ch
print("(%3.2f,%3.2f)" % (xr,yr), end="")
print()
def run(self):
tip_span = (self.span - self.center_span)/2
self.dihedral_angle = \
math.atan2(self.tip_elevation, tip_span)
print (self.dihedral_angle * 180/math.pi)
self.gen_mesh(nx=5,ny=50)
if __name__ == "__main__":
s = Surface(18,5,6,2,10,1.75);
s.run()
| [
"roie.black@gmail.com"
] | roie.black@gmail.com |
d468d136d9cebb4e44f01f359725a327063eb133 | 241dc11ca83565b0e4626277c2b4226d2bb2a7d0 | /Dhein_Elegans_Projects/Code/time_evol.py | ea4bc4da2e81a8b4b147a217fcdce107a1fb051d | [] | no_license | SES591/C.-elegans | 7badaaf0317e6b5f67fd41e6a9d867d2f569a2cd | 08f0ef49f7002dd4847b27c7dc9afac8e75da989 | refs/heads/master | 2016-08-12T13:39:38.032623 | 2016-05-05T23:26:30 | 2016-05-05T23:26:30 | 50,062,107 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,496 | py | #!/usr/bin/python
#bioinfo.py
__author__ = '''Hyunju Kim'''
import os
import sys
import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
from collections import OrderedDict
import input_net as inet
import updating_rule as ur
################# BEGIN: decimal_to_binary(nodes_list, decState, Nbr_States=2) ########################
def decimal_to_binary(nodes_list, decState, Nbr_States=2): # more left in the nodes list means higher order of 2 in binary
biStates = {}
x = len(nodes_list) -1
for u in nodes_list:
biStates[u] = decState / np.power(Nbr_States, x)
decState = decState % np.power(Nbr_States, x)
x = x - 1
return biStates
################# END: decimal_to_binary(nodes_list, decState, Nbr_States=2) ########################
################# BEGIN: binary_to_decimal(nodes_list, biStates, Nbr_States=2) ########################
def binary_to_decimal(nodes_list, biStates, Nbr_States=2): # more left in the nodes list means higher order of 2 in binary
decState = 0
x = len(nodes_list) -1
for u in nodes_list:
decState = decState + biStates[u] * np.power(Nbr_States, x)
x = x - 1
return decState
################# END: binary_to_decimal(nodes_list, biStates, Nbr_States=2) ########################
################# BEGIN: biological_sequence(net, nodes_list, Nbr_States=2) ########################
def biological_sequence(net, nodes_list, bio_initStates, fileName, Nbr_States=2):
bioSeq = []
currBiStates = bio_initStates
finished = False
while(not finished):
oneDiff = 0
prevBiStates = currBiStates.copy()
bioSeq.append(prevBiStates)
currBiStates = ur.sigmoid_updating(net, prevBiStates)
for u in nodes_list:
if abs(prevBiStates[u] - currBiStates[u]) > 0:
oneDiff = 1
break
finished = (oneDiff < 1)
OUTPUT_FILE = open(fileName, 'w')
OUTPUT_FILE.write('time step')
for u in nodes_list:
OUTPUT_FILE.write('\t%s'%(u))
OUTPUT_FILE.write('\n')
for i in range(len(bioSeq)):
OUTPUT_FILE.write('%d'%i)
for u in nodes_list:
OUTPUT_FILE.write('\t%d'%(bioSeq[i][u]))
OUTPUT_FILE.write('\n')
#return bioSeq
################# END: biological_sequence(net, nodes_list, Nbr_States=2) ########################
<<<<<<< HEAD
################# BEGIN: time_series_en(net, nodes_list, Nbr_States=2, MAX_TimeStep=20, Transition_Step=0) ########################
def time_series_all(net, nodes_list, Nbr_Initial_States, Nbr_States, MAX_TimeStep=20):
=======
################# BEGIN: ensemble_time_series(net, nodes_list, Nbr_States=2, MAX_TimeStep=20, Transition_Step=0) ########################
def time_series(net, nodes_list, Nbr_Initial_States, Nbr_States, MAX_TimeStep=20):
>>>>>>> origin/master
'''
Description:
-- compute TE for every pair of nodes using distribution from all possible initial conditions or an arbitrary set of initial conditions
Arguments:
-- 1. net
-- 2. nodes_list
-- 3. Initial_States_List
-- 4. Nbr_States
-- 5. MAX_TimeStep
Return:
-- 1. timeSeriesData
'''
#Nbr_Nodes = len(net.nodes())
#Nbr_All_Initial_States = np.power(Nbr_States, Nbr_Nodes)
timeSeriesData = {}
for n in net.nodes():
timeSeriesData[n] = {}
for initState in range(Nbr_Initial_States):
timeSeriesData[n][initState] = []
for initDecState in range(Nbr_Initial_States):
currBiState = decimal_to_binary(nodes_list, initDecState, Nbr_States)
for step in range(MAX_TimeStep):
prevBiState = currBiState.copy()
for n in nodes_list:
timeSeriesData[n][initDecState].append(prevBiState[n])
currBiState = ur.sigmoid_updating(net, prevBiState)
return timeSeriesData
################# END: time_series_en(net, nodes_list, Nbr_States=2, MAX_TimeStep=20) ########################
################# BEGIN: net_state_transition_map(net, nodes_list, Nbr_States=2) ########################
def net_state_transition(net, nodes_list, Nbr_States=2):
'''
Arguments:
1. net
2. Nbr_States
Return:
1. decStateTransMap
'''
Nbr_Nodes = len(net.nodes())
Nbr_All_Initial_States = np.power(Nbr_States, Nbr_Nodes)
decStateTransMap = nx.DiGraph()
for prevDecState in range(Nbr_All_Initial_States):
prevBiState = decimal_to_binary(nodes_list, prevDecState, Nbr_States)
currBiState = ur.sigmoid_updating(net, prevBiState)
currDecState = binary_to_decimal(nodes_list, currBiState, Nbr_States)
decStateTransMap.add_edge(prevDecState, currDecState)
return decStateTransMap
################# END: net_state_transition_map(net, nodes_list, Nbr_States=2) ########################
################# BEGIN: find_attractor_old(decStateTransMap) ########################
def find_attractor_old(decStateTransMap):
'''
Arguments:
1. decStateTransMap
Return:
1. attractor
'''
attractor_list = nx.simple_cycles(decStateTransMap) #in case of deterministic system, any cycle without considering edge direction will be directed cycle.
attractors = {}
attractors['fixed'] = []
attractors['cycle'] = []
for u in attractor_list:
if len(u) == 1:
attractors['fixed'].append(u)
else:
attractors['cycle'].append(u)
return attractors
################# END: find_attractor_old(decStateTransMap) ########################
################# BEGIN: attractor_analysis(decStateTransMap) ########################
def find_attractor(decStateTransMap):
'''
Arguments:
-- 1. decStateTransMap
Return:
-- attractor
'''
attractor_list = nx.simple_cycles(decStateTransMap) #in case of deterministic system, any cycle without considering edge direction will be directed cycle.
attractors = {}
#attractors['fixed'] = []
#attractors['cycle'] = []
undirectedMap = nx.DiGraph.to_undirected(decStateTransMap)
for u in attractor_list:
attractors[u[0]] = {}
if len(u) == 1:
attractors[u[0]]['type'] = 'fixed'
else:
attractors[u[0]]['type'] = 'cycle'
for v in attractors.iterkeys():
basin = nx.node_connected_component(undirectedMap, v)
attractors[v]['basin'] = basin
attractors[v]['basin-size'] = len(basin)
sorted_attractors = OrderedDict(sorted(attractors.items(), key=lambda kv: kv[1]['basin-size'], reverse=True))
return sorted_attractors
################# END: attractor_analysis(decStateTransMap) ########################
################# BEGIN: time_series_pa(net, nodes_list, Initial_States_List, Nbr_States=2, MAX_TimeStep=20) ########################
def time_series_pa(net, nodes_list, Initial_States_List, Nbr_States, MAX_TimeStep=20):
'''
Description:
-- compute TE for every pair of nodes using distribution from all initial conditions that converge to the primary or biological attractor
Arguments:
-- 1. net
-- 2. nodes_list
-- 3. Initial_States_List
-- 4. Nbr_States
-- 5. MAX_TimeStep
Return:
-- 1. timeSeriesData (only for primary attractor)
'''
timeSeriesData = {}
for n in net.nodes():
timeSeriesData[n] = {}
for initState in range(len(Initial_States_List)):
timeSeriesData[n][initState] = []
for initState in range(len(Initial_States_List)):
initDecState = Initial_States_List[initState]
currBiState = decimal_to_binary(nodes_list, initDecState, Nbr_States)
for step in range(MAX_TimeStep):
prevBiState = currBiState.copy()
for n in nodes_list:
timeSeriesData[n][initState].append(prevBiState[n])
currBiState = ur.sigmoid_updating(net, prevBiState)
return timeSeriesData
################# END: time_series_pa(net, nodes_list, Nbr_States=2, MAX_TimeStep=20) ########################
################# BEGIN: time_series_one(net, nodes_list, Initial_State, Nbr_States=2, MAX_TimeStep=20) ########################
def time_series_one(net, nodes_list, Initial_State, Nbr_States, MAX_TimeStep=20):
'''
Description:
-- compute TE for every pair of nodes using distribution from all initial conditions that converge to the primary or biological attractor
Arguments:
-- 1. net
-- 2. nodes_list
-- 3. Initial_States_List
-- 4. Nbr_States
-- 5. MAX_TimeStep
Return:
-- 1. timeSeriesData (only for primary attractor)
'''
timeSeriesData = {}
for n in net.nodes():
timeSeriesData[n] = {}
timeSeriesData[n][0] = []
currBiState = Initial_State
for step in range(MAX_TimeStep):
prevBiState = currBiState.copy()
for n in nodes_list:
timeSeriesData[n][0].append(prevBiState[n])
currBiState = ur.sigmoid_updating(net, prevBiState)
return timeSeriesData
################# END: time_series_one(net, nodes_list, Initial_State, Nbr_States=2, MAX_TimeStep=20) ########################
def main():
'''
print "time_evol module is the main code."
## to import a network of 3-node example
EDGE_FILE = '../data/example/example-net-edges.dat'
NODE_FILE = '../data/example/example-net-nodes.dat'
net = inet.read_network_from_file(EDGE_FILE, NODE_FILE)
nodes_list = inet.build_nodes_list(NODE_FILE)
## to obtain time series data for all possible initial conditions for 3-node example network
timeSeriesData = ensemble_time_series(net, nodes_list, 2, 10)#, Nbr_States=2, MAX_TimeStep=20)
initState = 1
biStates = decimal_to_binary(nodes_list, initState)
print 'initial state', biStates
## to print time series data for each node: a, b, c starting particualr decimal inital condition 1
print 'a', timeSeriesData['a'][1]
print 'b', timeSeriesData['b'][1]
print 'c', timeSeriesData['c'][1]
## to obtain and visulaize transition map in the network state space
decStateTransMap = net_state_transition(net, nodes_list)
nx.draw(decStateTransMap)
plt.show()
## to find fixed point attractors and limited cycle attractors with given transition map.
attractors = find_attractor(decStateTransMap)
print attractors
'''
## to obtain biological sequence for the Fission Yeast Cell-Cycle Net starting from biological inital state
EDGE_FILE = '../data/fission-net/fission-net-edges.txt'
NODE_FILE = '../data/fission-net/fission-net-nodes.txt'
<<<<<<< HEAD
#BIO_INIT_FILE = '../data/fission-net/fission-net-bioSeq-initial.txt'
=======
BIO_INIT_FILE = '../data/fission-net/fission-net-bioSeq-initial.txt'
>>>>>>> origin/master
net = inet.read_network_from_file(EDGE_FILE, NODE_FILE)
nodes_list = inet.build_nodes_list(NODE_FILE)
bio_initStates = inet.read_init_from_file(BIO_INIT_FILE)
outputFile = '../results/fission-net/fission-net-bioSeq.txt'
bioSeq = biological_sequence(net, nodes_list, bio_initStates, outputFile)
if __name__=='__main__':
main()
| [
"Kelle Dhein"
] | Kelle Dhein |
d3fc31344ad05d1cccd859ad51a3d6332059f748 | 8b7559f7b69173109d7b6e89ab912dbb8b675c3f | /main/tests/test_models.py | 104c45dcd9bc75e5d3b2024147d13fa149a12099 | [] | no_license | GoodnessEzeokafor/django-bookstore | 7859b74ad0bddd32415b6bd917d37c008ba38a73 | dc47e7fe201cf2a62a93c30730fa1e72a6707f93 | refs/heads/master | 2023-02-14T14:13:58.941227 | 2021-01-08T10:14:29 | 2021-01-08T10:14:29 | 327,135,448 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 626 | py | from decimal import Decimal
from django.test import TestCase
from main import models
class TestModel(TestCase):
def test_active_manager_works(self):
models.Product.objects.create(
name="The cathedral and the bazaar",
price=Decimal("10.0")
)
models.Product.objects.create(
name="Pride and Prejudice",
price=Decimal("2.00")
)
models.Product.objects.create(
name="A Tale of Two Cities",
price = Decimal("2.00"),
active=False
)
self.assertEqual(len(models.Product.objects.active()), 2) | [
"gootech442@yahoo.com"
] | gootech442@yahoo.com |
af58e7975c92cb99766c3323524c22904d9ae372 | 55fae905406470ed60aadf3460115c1b1b0a49d9 | /chap2/02-7_bool.py | 62bc409ec6b1043cfc516b447caa4745b8cf1617 | [] | no_license | junmin98/JumpToPython_practice | e858abe8bb397a8ac39f50fcc0738d6b6cfd3dd7 | 159d43758aad298da661313618139eae943a3bab | refs/heads/main | 2023-06-07T23:04:46.503935 | 2023-06-02T15:50:43 | 2023-06-02T15:50:43 | 322,387,573 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 80 | py | a = True
b = False
print(1 == 1)
print(bool(0)) # False
print(bool('')) # False | [
"noreply@github.com"
] | noreply@github.com |
ad7b0b5fccd2951a4d8e3d28056322b5a64c1f14 | f9646f1a269b0108b174b68172424f19ea563da5 | /lande/utilities/shell.py | b81a52fb5394cf59fcfe24f8cce4cf478e85e955 | [] | no_license | zimmerst/PhD-python | 07a4ef2dd66e2bc9ac08861a04acbf934cb0ae49 | 21d24c0ae70925201b05f73c8044cc39639f8859 | refs/heads/master | 2020-12-26T04:56:27.165230 | 2014-01-27T00:55:17 | 2014-01-27T00:55:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,235 | py |
def format_command(*args, **kwargs):
r""" Create a string suitable for running a shell program command where
*args are the positional arguments for the command and
**kwargs are the keyword arguments for the script
For example:
>>> print format_command('ls','-al', '--author')
ls \
-al \
--author
>>> print format_command('gtlike', evfile='ft1.fits')
gtlike \
evfile=ft1.fits
If you need parameters with dashes, you can pass in a dictionary:
>>> print format_command('du', '-h', {'--max-depth':3})
du \
-h \
--max-depth=3
This function is not (yet) very robust, but does what I need.
"""
line_break = ' \\' # slash
tab=' '
sep = '\n'.join([line_break,tab])
args=list(args)
for i,v in enumerate(args):
if isinstance(v,dict):
kwargs.update(args.pop(i))
if args < 1: raise Exception("Command name must be passed into script")
return sep.join(map(str,args) + ['%s=%s' % (a,b) for a,b in kwargs.items()])
if __name__ == "__main__":
import doctest
doctest.testmod()
| [
"lande@37a9682d-6443-41a2-8582-b44379b6e86f"
] | lande@37a9682d-6443-41a2-8582-b44379b6e86f |
20bb0ef25901482c47de8542f21e7e78fb02f09f | 614cad3588af9c0e51e0bb98963075e3195e92f5 | /models/vote_net/backbone_module.py | 674167186b7eb5fdbacd2d4702c1c38abea4bcc9 | [] | no_license | dragonlong/haoi-pose | 2810dae7f9afd0a26b3d0a5962fd9ae8a5abac58 | 43388efd911feecde588b27a753de353b8e28265 | refs/heads/master | 2023-07-01T14:18:29.029484 | 2021-08-10T10:57:42 | 2021-08-10T10:57:42 | 294,602,794 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,862 | py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import sys
import os
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(BASE_DIR)
sys.path.append(ROOT_DIR)
sys.path.append(os.path.join(ROOT_DIR, '../models/pointnet2'))
from pointnet2_modules import PointnetSAModuleVotes, PointnetFPModule
class Pointnet2Backbone(nn.Module):
r"""
Backbone network for point cloud feature learning.
Based on Pointnet++ single-scale grouping network.
Parameters
----------
input_feature_dim: int
Number of input channels in the feature descriptor for each point.
e.g. 3 for RGB.
"""
def __init__(self, input_feature_dim=0):
super().__init__()
self.sa1 = PointnetSAModuleVotes(
npoint=2048,
radius=0.2,
nsample=64,
mlp=[input_feature_dim, 64, 64, 128],
use_xyz=True,
normalize_xyz=True
)
self.sa2 = PointnetSAModuleVotes(
npoint=1024,
radius=0.4,
nsample=32,
mlp=[128, 128, 128, 256],
use_xyz=True,
normalize_xyz=True
)
self.sa3 = PointnetSAModuleVotes(
npoint=512,
radius=0.8,
nsample=16,
mlp=[256, 128, 128, 256],
use_xyz=True,
normalize_xyz=True
)
self.sa4 = PointnetSAModuleVotes(
npoint=256,
radius=1.2,
nsample=16,
mlp=[256, 128, 128, 256],
use_xyz=True,
normalize_xyz=True
)
self.fp1 = PointnetFPModule(mlp=[256+256,256,256])
self.fp2 = PointnetFPModule(mlp=[256+256,256,256])
def _break_up_pc(self, pc):
xyz = pc[..., 0:3].contiguous()
features = (
pc[..., 3:].transpose(1, 2).contiguous()
if pc.size(-1) > 3 else None
)
return xyz, features
def forward(self, pointcloud: torch.cuda.FloatTensor, end_points=None):
r"""
Forward pass of the network
Parameters
----------
pointcloud: Variable(torch.cuda.FloatTensor)
(B, N, 3 + input_feature_dim) tensor
Point cloud to run predicts on
Each point in the point-cloud MUST
be formated as (x, y, z, features...)
Returns
----------
end_points: {XXX_xyz, XXX_features, XXX_inds}
XXX_xyz: float32 Tensor of shape (B,K,3)
XXX_features: float32 Tensor of shape (B,K,D)
XXX-inds: int64 Tensor of shape (B,K) values in [0,N-1]
"""
if not end_points: end_points = {}
batch_size = pointcloud.shape[0]
xyz, features = self._break_up_pc(pointcloud)
# --------- 4 SET ABSTRACTION LAYERS ---------
xyz, features, fps_inds = self.sa1(xyz, features)
end_points['sa1_inds'] = fps_inds
end_points['sa1_xyz'] = xyz
end_points['sa1_features'] = features
xyz, features, fps_inds = self.sa2(xyz, features) # this fps_inds is just 0,1,...,1023
end_points['sa2_inds'] = fps_inds
end_points['sa2_xyz'] = xyz
end_points['sa2_features'] = features
xyz, features, fps_inds = self.sa3(xyz, features) # this fps_inds is just 0,1,...,511
end_points['sa3_xyz'] = xyz
end_points['sa3_features'] = features
xyz, features, fps_inds = self.sa4(xyz, features) # this fps_inds is just 0,1,...,255
end_points['sa4_xyz'] = xyz
end_points['sa4_features'] = features
# --------- 2 FEATURE UPSAMPLING LAYERS --------
features = self.fp1(end_points['sa3_xyz'], end_points['sa4_xyz'], end_points['sa3_features'], end_points['sa4_features'])
features = self.fp2(end_points['sa2_xyz'], end_points['sa3_xyz'], end_points['sa2_features'], features)
end_points['fp2_features'] = features
end_points['fp2_xyz'] = end_points['sa2_xyz']
num_seed = end_points['fp2_xyz'].shape[1]
end_points['fp2_inds'] = end_points['sa1_inds'][:,0:num_seed] # indices among the entire input point clouds
return end_points
if __name__=='__main__':
backbone_net = Pointnet2Backbone(input_feature_dim=0).cuda()
print(backbone_net)
backbone_net.eval()
out = backbone_net(torch.rand(16,20000,3).cuda())
for key in sorted(out.keys()):
print(key, '\t', out[key].shape)
| [
"lxiaol9@vt.edu"
] | lxiaol9@vt.edu |
e54990b791469e8f9788843e62d9cbd5ba1586b7 | 55c250525bd7198ac905b1f2f86d16a44f73e03a | /Python/Games/Py Box/Games/Connect4.py | f10c212bbe6d558099c3a54e5c383f2009f477de | [] | no_license | NateWeiler/Resources | 213d18ba86f7cc9d845741b8571b9e2c2c6be916 | bd4a8a82a3e83a381c97d19e5df42cbababfc66c | refs/heads/master | 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:1559de38727aac969fef6c397825e86b6e68b16dacaafbe4b2f54499954aaaa9
size 5271
| [
"nateweiler84@gmail.com"
] | nateweiler84@gmail.com |
0af95378e0e392f99cf06587dc97eef7e8859d13 | ef2ea1152afc07e1341abdc99b037f2c803a0a68 | /test_cnn.py | 6de00fafda4942ffd6bbc0f62aafb20aaa792164 | [
"Apache-2.0"
] | permissive | Diriba-Getch/CNN-Multi-Label-Text-Classificati2on | 484a82ed66e7266fb565ebe834e2c7842d1d2f91 | 0792c0f244b8190e097da42e8719c8bb03573e14 | refs/heads/master | 2023-05-14T16:22:32.973452 | 2021-05-27T14:47:21 | 2021-05-27T14:47:21 | 362,522,758 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,167 | py | # -*- coding:utf-8 -*-
import os
import time
import numpy as np
import tensorflow as tf
import data_helpers
# Parameters
# ==================================================
logger = data_helpers.logger_fn('tflog', 'test-{}.log'.format(time.asctime()))
MODEL = input("☛ Please input the model file you want to test, it should be like(1490175368): ")
while not (MODEL.isdigit() and len(MODEL) == 10):
MODEL = input('✘ The format of your input is illegal, it should be like(1490175368), please re-input: ')
logger.info('✔︎ The format of your input is legal, now loading to next step...')
CLASS_BIND = input("☛ Use Class Bind or Not?(Y/N) \n")
while not (CLASS_BIND.isalpha() and CLASS_BIND.upper() in ['Y', 'N']):
CLASS_BIND = input('✘ The format of your input is illegal, please re-input: ')
logger.info('✔︎ The format of your input is legal, now loading to next step...')
CLASS_BIND = CLASS_BIND.upper()
TRAININGSET_DIR = 'Train.json'
VALIDATIONSET_DIR = 'Validation.json'
TESTSET_DIR = 'Test.json'
MODEL_DIR = 'runs/' + MODEL + '/checkpoints/'
SAVE_FILE = 'predictions.txt'
# Data loading params
tf.flags.DEFINE_string("training_data_file", TRAININGSET_DIR, "Data source for the training data.")
tf.flags.DEFINE_string("validation_data_file", VALIDATIONSET_DIR, "Data source for the validation data")
tf.flags.DEFINE_string("test_data_file", TESTSET_DIR, "Data source for the test data")
tf.flags.DEFINE_string("checkpoint_dir", MODEL_DIR, "Checkpoint directory from training run")
tf.flags.DEFINE_string("use_classbind_or_not", CLASS_BIND, "Use the class bind info or not.")
# Model Hyperparameters
tf.flags.DEFINE_integer("pad_seq_len", 150, "Recommand padding Sequence length of data (depends on the data)")
tf.flags.DEFINE_integer("embedding_dim", 100, "Dimensionality of character embedding (default: 128)")
tf.flags.DEFINE_integer("embedding_type", 1, "The embedding type (default: 1)")
tf.flags.DEFINE_string("filter_sizes", "3,4,5", "Comma-separated filter sizes (default: '3,4,5')")
tf.flags.DEFINE_integer("num_filters", 128, "Number of filters per filter size (default: 128)")
tf.flags.DEFINE_float("dropout_keep_prob", 0.5, "Dropout keep probability (default: 0.5)")
tf.flags.DEFINE_float("l2_reg_lambda", 0.0, "L2 regularization lambda (default: 0.0)")
# Training parameters
tf.flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)")
tf.flags.DEFINE_integer("num_classes", 367, "Number of labels (depends on the task)")
tf.flags.DEFINE_integer("top_num", 2, "Number of top K prediction classess (default: 3)")
# Misc Parameters
tf.flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
tf.flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
tf.flags.DEFINE_boolean("gpu_options_allow_growth", True, "Allow gpu options growth")
FLAGS = tf.flags.FLAGS
FLAGS._parse_flags()
dilim = '-' * 100
logger.info('\n'.join([dilim, *['{:>50}|{:<50}'.format(attr.upper(), value)
for attr, value in sorted(FLAGS.__flags.items())], dilim]))
def test_cnn():
"""Test CNN model."""
# Load data
logger.info("✔ Loading data...")
logger.info('Recommand padding Sequence length is: {}'.format(FLAGS.pad_seq_len))
logger.info('✔︎ Test data processing...')
test_data = data_helpers.load_data_and_labels(FLAGS.test_data_file, FLAGS.num_classes, FLAGS.embedding_dim)
logger.info('✔︎ Test data padding...')
x_test, y_test = data_helpers.pad_data(test_data, FLAGS.pad_seq_len)
y_test_bind = test_data.labels_bind
# Build vocabulary
VOCAB_SIZE = data_helpers.load_vocab_size(FLAGS.embedding_dim)
pretrained_word2vec_matrix = data_helpers.load_word2vec_matrix(VOCAB_SIZE, FLAGS.embedding_dim)
# Load cnn model
logger.info("✔ Loading model...")
checkpoint_file = tf.train.latest_checkpoint(FLAGS.checkpoint_dir)
logger.info(checkpoint_file)
graph = tf.Graph()
with graph.as_default():
session_conf = tf.ConfigProto(
allow_soft_placement=FLAGS.allow_soft_placement,
log_device_placement=FLAGS.log_device_placement)
session_conf.gpu_options.allow_growth = FLAGS.gpu_options_allow_growth
sess = tf.Session(config=session_conf)
with sess.as_default():
# Load the saved meta graph and restore variables
saver = tf.train.import_meta_graph("{}.meta".format(checkpoint_file))
saver.restore(sess, checkpoint_file)
# Get the placeholders from the graph by name
input_x = graph.get_operation_by_name("input_x").outputs[0]
# input_y = graph.get_operation_by_name("input_y").outputs[0]
dropout_keep_prob = graph.get_operation_by_name("dropout_keep_prob").outputs[0]
# pre-trained_word2vec
pretrained_embedding = graph.get_operation_by_name("embedding/W").outputs[0]
# Tensors we want to evaluate
logits = graph.get_operation_by_name("output/logits").outputs[0]
# Generate batches for one epoch
batches = data_helpers.batch_iter(list(zip(x_test, y_test, y_test_bind)),
FLAGS.batch_size, 1, shuffle=False)
# Collect the predictions here
all_predicitons = []
eval_loss, eval_rec, eval_acc, eval_counter = 0.0, 0.0, 0.0, 0
for batch_test in batches:
x_batch_test, y_batch_test, y_batch_test_bind = zip(*batch_test)
feed_dict = {
input_x: x_batch_test,
dropout_keep_prob: 1.0
}
batch_logits = sess.run(logits, feed_dict)
if FLAGS.use_classbind_or_not == 'Y':
predicted_labels = data_helpers.get_label_using_logits_and_classbind(
batch_logits, y_batch_test_bind, top_number=FLAGS.top_num)
if FLAGS.use_classbind_or_not == 'N':
predicted_labels = data_helpers.get_label_using_logits(batch_logits, top_number=FLAGS.top_num)
all_predicitons = np.append(all_predicitons, predicted_labels)
cur_rec, cur_acc = 0.0, 0.0
for index, predicted_label in enumerate(predicted_labels):
rec_inc, acc_inc = data_helpers.cal_rec_and_acc(predicted_label, y_batch_test[index])
cur_rec, cur_acc = cur_rec + rec_inc, cur_acc + acc_inc
cur_rec = cur_rec / len(y_batch_test)
cur_acc = cur_acc / len(y_batch_test)
eval_rec, eval_acc, eval_counter = eval_rec + cur_rec, eval_acc + cur_acc, eval_counter + 1
logger.info("✔︎ validation batch {} finished.".format(eval_counter))
eval_rec = float(eval_rec / eval_counter)
eval_acc = float(eval_acc / eval_counter)
logger.info("☛ Recall {:g}, Accuracy {:g}".format(eval_rec, eval_acc))
np.savetxt(SAVE_FILE, list(zip(all_predicitons)), fmt='%s')
logger.info("✔ Done.")
if __name__ == '__main__':
test_cnn()
| [
"chinawolfman@hotmail.com"
] | chinawolfman@hotmail.com |
12a0e0d2aeaf443a2c76350a7f0a7ce2f8a41329 | 3d85f716aaeb42592adcdeabd7cb76152b8104ed | /pugh_torch/tests/datasets/classification/test_cifar.py | 616cc78f11d59e21948511350baec7cb988778dc | [
"MIT"
] | permissive | BrianPugh/pugh_torch | a78836b9c286814b8df5df57d67b8dbfc8d6412d | d620a518d78ec03556c5089bfc76e4cf7bd0cd70 | refs/heads/master | 2023-02-14T16:53:07.625650 | 2020-11-03T16:23:22 | 2020-11-03T16:23:22 | 294,986,957 | 4 | 1 | MIT | 2020-11-03T16:23:23 | 2020-09-12T16:54:00 | Python | UTF-8 | Python | false | false | 998 | py | """
Really only tests cifar10, but cifar100 shares everything.
"""
import pytest
from pugh_torch.datasets.classification import CIFAR10
from torchvision import transforms
from torch.utils.data import DataLoader
from tqdm import tqdm
@pytest.fixture
def train(tmp_path):
return CIFAR10(split="train")
@pytest.fixture
def val(tmp_path):
return CIFAR10(split="val")
def assert_imagenet(loader):
""""""
bar = tqdm(loader)
for i, (image, label) in enumerate(bar):
assert image.max() <= 1
assert image.min() >= 0
assert image.shape == (16, 3, 32, 32)
assert label.shape == (16,)
assert label.max() <= 9
assert label.min() >= 0
@pytest.mark.dataset
def test_val_get(val):
loader = DataLoader(val, batch_size=16, drop_last=True, shuffle=True)
assert_imagenet(loader)
@pytest.mark.dataset
def test_train_get(train):
loader = DataLoader(train, batch_size=16, drop_last=True, shuffle=True)
assert_imagenet(loader)
| [
"noreply@github.com"
] | noreply@github.com |
ea66c9e0116ad4c114f43c85859b9d1075caba0a | 8ac002b27782b8ad8df977f062959846bf567de5 | /test_sudocube/module/test_data_extraction.py | 39170796c8230a2bc30916702d191772a8bfe74e | [] | no_license | pobed2/sudocubes | 820e38154ce7f99af84e8055bfb98484d020de85 | e5cb1d4f472b084dedd8cc73db60096dab55eba3 | refs/heads/master | 2021-01-01T18:49:41.484018 | 2013-11-19T23:14:11 | 2013-11-19T23:14:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,362 | py | #coding: utf-8
import unittest
import cv2
from Kinocto.sudocube.sudocube_factory import SudocubeFactory
from Kinocto.test.test_sudocube.test_images.test_images_parameters import *
class ModuleTestDataExtraction(unittest.TestCase):
def setUp(self):
self._sudocube_factory = SudocubeFactory()
def test_should_detect_right_numbers_in_front_facing_image(self):
image = self._read_image(first_front_image_path)
sudocube = self._sudocube_factory.create_sudocube_from_image(image)
expected = first_front_image_expected
self._assert_same_unsolved_sudocube(expected, sudocube)
def test_should_detect_right_numbers_in_another_front_facing_image(self):
image = self._read_image(second_front_image_path)
sudocube = self._sudocube_factory.create_sudocube_from_image(image)
expected = second_front_image_expected
self._assert_same_unsolved_sudocube(expected, sudocube)
def _assert_same_unsolved_sudocube(self, expected, given):
data = given._data
self.assertEqual(expected["front"], data["front"])
self.assertEqual(expected["side"], data["side"])
self.assertEqual(expected["top"], data["top"])
self.assertEqual(expected["red square"], data["red square"])
def _read_image(self, path):
rgb = cv2.imread(path)
return rgb | [
"pobed2@gmail.com"
] | pobed2@gmail.com |
3f6a8661a1e93d80f971df67da5aee4c9b53e2a1 | 556d821af0042dcc1a68a7235cf37f84dc473676 | /bin/make_shape.py | b97d4b3cbd86e2a5daea1ea87ed09d4a5ba01f29 | [
"MIT"
] | permissive | Ded316/mujoco-worldgen | 6f685b38b3a09ced1c600f89339b8355e58c4061 | 39f52b1b47aed499925a6a214b58bdbdb4e2f75e | refs/heads/master | 2020-08-30T00:33:13.314079 | 2019-08-21T20:43:55 | 2019-09-17T15:41:25 | 218,215,970 | 1 | 0 | MIT | 2019-10-29T06:08:28 | 2019-10-29T06:08:28 | null | UTF-8 | Python | false | false | 6,951 | py | #!/usr/bin/env python
# Make STL files for simple geometric shapes
# This file is expected to be edited and ran, or used as a module
# It's a bit of a hack, but it makes objects that work!
import os
import numpy as np
from collections import OrderedDict
from itertools import chain, combinations, product
import xmltodict
from pyhull.convex_hull import ConvexHull
from stl.mesh import Mesh
from mujoco_worldgen.util.path import worldgen_path
# Basic (half) unit length we normalize to
u = 0.038 # 38 millimeters in meters
def norm(shape):
''' Center a shape over the origin, and scale it to fit in unit box '''
mins, maxs = np.min(shape, axis=0), np.max(shape, axis=0)
return (shape - (maxs + mins) / 2) / (maxs - mins) * u * 2
def roll3(points):
''' Return a set of rotated 3d points (used for construction) '''
return np.vstack([np.roll(points, i) for i in range(3)])
def subdivide(shape):
''' Take a triangulated sphere and subdivide each face. '''
# https://medium.com/game-dev-daily/d7956b825db4 - Icosahedron section
hull = ConvexHull(shape)
radius = np.mean(np.linalg.norm(hull.points, axis=1)) # Estimate radius
edges = set(chain(*[combinations(v, 2) for v in hull.vertices]))
midpoints = np.mean(hull.points.take(list(edges), axis=0), axis=1)
newpoints = midpoints / np.linalg.norm(midpoints, axis=1)[:, None] * radius
return norm(np.vstack((hull.points, newpoints)))
def top(shape):
''' Get only the top half (z >= 0) points in a shape. '''
return shape[np.where(shape.T[2] >= 0)]
phi = (1 + 5 ** .5) / 2 # Golden ratio
ihp = 1 / phi # Inverted golden ratio (phi backwards)
# Construct tetrahedron from unit axes and projected (ones) point
tetra = norm(np.vstack((np.ones(3), np.eye(3))))
# Construct cube from tetrahedron and inverted tetrahedron
cube = np.vstack((tetra, -tetra))
# Construct octahedron from unit axes and inverted unit axes
octa = norm(np.vstack((np.eye(3), -np.eye(3))))
# Construct icosahedron from (phi, 1) planes
ico_plane = np.array(list(product([1, -1], [phi, -phi], [0])))
icosa = norm(roll3(ico_plane))
# Construct dodecahedron from unit cube and (phi, ihp) planes
dod_cube = np.array(list(product(*([(-1, 1)] * 3))))
dod_plane = np.array(list(product([ihp, -ihp], [phi, -phi], [0])))
dodeca = norm(np.vstack((dod_cube, roll3(dod_plane))))
# Subdivided icosahedrons
sphere80 = subdivide(icosa) # pentakis icosidodecahedron
sphere320 = subdivide(sphere80) # 320 sided spherical polyhedra
sphere1280 = subdivide(sphere320) # 1280 sided spherical polyhedra
# Josh's shapes (basic length 38mm)
line = np.linspace(0, 2 * np.pi, 100)
circle = np.c_[np.cos(line), np.sin(line), np.zeros(100)] * u
halfsphere = np.r_[circle, top(sphere1280)]
cone = np.r_[circle, np.array([[0, 0, 2 * u]])]
h = u * .75 ** .5 # half-height of the hexagon
halfagon = np.array([[u, 0, 0], [u / 2, h, 0], [-u / 2, h, 0]])
hexagon = np.r_[halfagon, -halfagon]
hexprism = np.r_[hexagon, hexagon + np.array([[0, 0, 2 * u]])]
triangle = np.array([[u, 0, 0], [-u, 0, 0], [0, 2 * h, 0]])
tetra = np.r_[triangle, np.array([[0, h, 2 * u]])]
triprism = np.r_[triangle, triangle + np.array([[0, 0, 2 * u]])]
square = np.array([[u, u, 0], [-u, u, 0], [-u, -u, 0], [u, -u, 0]])
pyramid = np.r_[square, np.array([[0, 0, u * 2]])]
def build_stl(name, points):
''' Given a set point points, make a STL file of the convex hull. '''
points = np.array(points)
points -= np.min(points, axis=0) # Move bound to origin
hull = ConvexHull(points)
shape = Mesh(np.zeros(len(hull.vertices), dtype=Mesh.dtype))
for i, vertex in enumerate(hull.vertices):
shape.vectors[i] = hull.points[vertex][::-1] # Turn it inside out
size = np.max(hull.points, axis=0)
return shape, size
def build_xml(name, size):
''' Make the corresponding XML file to match the STL file '''
path = os.path.join(os.path.join('shapes', name), name + '.stl')
mesh = OrderedDict([('@name', name), ('@file', path), ('@scale', '1 1 1')])
asset = OrderedDict(mesh=mesh)
rgba = ' '.join(str(v) for v in np.random.uniform(size=3)) + ' 1'
halfsize = ' '.join([str(v) for v in size * 0.5])
joints = []
for joint_type in ('slide', 'hinge'):
for i, axis in enumerate(('0 0 1', '0 1 0', '1 0 0')):
joints.append(OrderedDict([('@name', '%s%d' % (joint_type, i)),
('@type', joint_type),
('@pos', '0 0 0'),
('@axis', axis),
('@damping', '10')]))
body_geom = OrderedDict([('@name', name),
('@pos', '0 0 0'),
('@type', 'mesh'),
('@mesh', name),
('@rgba', rgba)])
body = OrderedDict([('@name', name),
('@pos', '0 0 0'),
('geom', body_geom),
('joint', joints)])
outer_geom = OrderedDict([('@type', 'box'),
('@pos', '0 0 0'),
('@size', halfsize)])
outer_bound = OrderedDict([('@name', 'annotation:outer_bound'),
('@pos', halfsize),
('geom', outer_geom)])
worldbody = OrderedDict(body=[body, outer_bound])
return OrderedDict(mujoco=OrderedDict(asset=asset, worldbody=worldbody))
def make_shape(name, points):
''' Make the STL and XML, and save both to the proper directories. '''
# Make the STL and XML
shape, size = build_stl(name, points)
xml_dict = build_xml(name, size)
# Make the directory to save files to if we have to
xml_dirname = worldgen_path('assets', 'xmls', 'shapes', name)
stl_dirname = worldgen_path('assets', 'stls', 'shapes', name)
os.makedirs(xml_dirname, exist_ok=True)
os.makedirs(stl_dirname, exist_ok=True)
# Save the STL and XML to our new directories
shape.save(os.path.join(stl_dirname, name + '.stl'))
with open(os.path.join(xml_dirname, 'main.xml'), 'w') as f:
f.write(xmltodict.unparse(xml_dict, pretty=True))
shapes_to_build = {'cube': cube,
'octa': octa,
'icosa': icosa,
'dodeca': dodeca,
'sphere80': sphere80,
'sphere320': sphere320,
'sphere1280': sphere1280,
'halfsphere': halfsphere,
'cone': cone,
'hexprism': hexprism,
'tetra': tetra,
'triprism': triprism,
'pyramid': pyramid}
test_shapes_to_build = {'tetra': tetra,
'triprism': triprism,
'pyramid': pyramid}
if __name__ == '__main__':
for name, points in test_shapes_to_build.items():
make_shape(name, points)
| [
"todor.m.markov@gmail.com"
] | todor.m.markov@gmail.com |
844eb6e55297c84d9f5d6f7cda1364b1996d71df | 8b9dd576d28aa315c41c7ac2cf328e661890c65c | /src/ltool/rules/rulecate.py | 558e044fc96c608820673fa861cd228b022b9458 | [] | no_license | the-champions-of-capua/xsqlmb | 72fdd8bd09d59d37b2900887a3a68fe92e706846 | 6f1446dd76bf9ba0a533a14642f3c9b0bd14ea28 | refs/heads/master | 2020-05-23T12:42:11.291948 | 2019-05-15T06:26:44 | 2019-05-15T06:26:44 | 181,443,773 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,339 | py | # coding:utf-8
res_pratean = """
REQUEST-903.9001-DRUPAL-EXCLUSION-RULES.conf(规则应用示例)
REQUEST-903.9002-WORDPRESS-EXCLUSION-RULES.conf(规则应用示例)
REQUEST-910-IP-REPUTATION.conf(可疑IP匹配)
REQUEST-911-METHOD-ENFORCEMENT.conf(强制方法)
REQUEST-912-DOS-PROTECTION.conf(DOS攻击)
REQUEST-913-SCANNER-DETECTION.conf(扫描器检测)
REQUEST-920-PROTOCOL-ENFORCEMENT.conf(HTTP协议规范相关规则)
REQUEST-921-PROTOCOL-ATTACK.conf(协议攻击)
- 举例:HTTP Header Injection Attack、HTTP参数污染
REQUEST-930-APPLICATION-ATTACK-LFI.conf(应用攻击-路径遍历)
REQUEST-931-APPLICATION-ATTACK-RFI.conf(远程文件包含)
REQUEST-932-APPLICATION-ATTACK-RCE.conf(远程命令执行)
REQUEST-933-APPLICATION-ATTACK-PHP.conf(PHP注入攻击)
REQUEST-941-APPLICATION-ATTACK-XSS.conf(XSS注入攻击)
REQUEST-942-APPLICATION-ATTACK-SQLI.conf(SQL注入攻击)
REQUEST-943-APPLICATION-ATTACK-SESSION-FIXATION.conf(会话固定)
REQUEST-949-BLOCKING-EVALUATION.conf(引擎上下文联合评估)
RESPONSE-950-DATA-LEAKAGES.conf(信息泄露)
RESPONSE-951-DATA-LEAKAGES-SQL.conf(SQL信息泄露)
RESPONSE-952-DATA-LEAKAGES-JAVA.conf(JAVA源代码泄露)
RESPONSE-953-DATA-LEAKAGES-PHP.conf(PHP信息泄露)
RESPONSE-954-DATA-LEAKAGES-IIS.conf(IIS信息泄露)
REQUEST-905-COMMON-EXCEPTIONS.conf(常见示例)
REQUEST-901-INITIALIZATION.conf(引擎初始化)
modsecurity.conf(引擎内置补丁规则和设置)
localized.conf(自定义规则过滤)
dynamic.conf(自定义规则访问控制)
RESPONSE-980-CORRELATION.conf(内置关联规则)
REQUEST-900-EXCLUSION-RULES-BEFORE-CRS.conf.example(引擎规则解释器)
RESPONSE-959-BLOCKING-EVALUATION.conf(引擎上下文联合评估)"""
import re
def get_kv_of_rukes():
regexp = {}
for x in res_pratean.split("\n"):
# matched = re.match(".*?[\-|\.](\d+)\-.*?\.conf((.*?))", x)
matched = re.match("^(.*?\.conf)((.*?))", x)
if matched:
regexp.setdefault(matched.group(1), matched.group(2))
return regexp
def get_rule_cate_by_filepath(filepath):
try:
filename = filepath.split("/")[-1:][0]
return get_kv_of_rukes()[filename]
except:
return "自定义规则"
| [
"meigea@0528@gmail.com"
] | meigea@0528@gmail.com |
2cb17da1f2ca89667aa5351107d28665b8883899 | 177cf66abc14a3779146d18b87ab941f41b01efc | /data_preprocess/data_preprocess.py | 180ae9e14f446cdf3209f55e05522158e8820b05 | [] | no_license | liang-8421/text_classification_pytorch | 446b34f76233d1612c9644766e9da0164c25e6f6 | c1bb91937f3be5cbba0b4b429065b79dcb6a3b4c | refs/heads/main | 2023-06-13T22:06:11.330652 | 2021-07-11T13:12:56 | 2021-07-11T13:12:56 | 384,947,212 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,593 | py | # -*- coding: utf-8 -*-
# @Time : 2021/6/6 19:47
# @Author : miliang
# @FileName: data_preprocess.py.py
# @Software: PyCharm
from config import Config
import pandas as pd
def Get_class2id(file_path):
class_list, class2id, id2class = [], dict(), dict()
with open(file_path, "r", encoding="utf-8") as fr:
for i in fr:
i = i.strip()
class_list.append(i)
for num, value in enumerate(class_list):
id2class[num] = value
class2id[value] = num
return class2id, id2class
config = Config()
class2id, id2class = Get_class2id(config.origin_data_dir + "class.txt")
def get_csv(file_path):
text_list, label_list = [], []
with open(file_path, "r", encoding="utf-8") as fr:
for i in fr:
item = i.strip().split("\t")
text_list.append(item[0])
label_list.append(item[1])
df = pd.DataFrame({
"text": text_list,
"label": label_list
})
df["label"] = df["label"].apply(restore_label)
return df
def restore_label(label_id):
return id2class[int(label_id)]
if __name__ == '__main__':
train_df = get_csv(config.origin_data_dir+"train.txt")
dev_df = get_csv(config.origin_data_dir+"dev.txt")
test_df = get_csv(config.origin_data_dir+"test.txt")
#
# #生成csv文件
train_df.to_csv(config.source_data_dir+"train.csv", index=False, encoding="utf-8")
dev_df.to_csv(config.source_data_dir+"dev.csv", index=False, encoding="utf-8")
test_df.to_csv(config.source_data_dir+"test.csv", index=False, encoding="utf-8")
| [
"2937198838@qq.com"
] | 2937198838@qq.com |
4cd35535f7781c80ba779ff265828851cf0927a2 | 32bebb34124cfd06705e20276524a19d2a938e0c | /dev/old/ray_drawing_split.py | 04f9c6b7c753079e0f88f333c9b2b942522b7309 | [] | no_license | ecpoppenheimer/TensorFlowRayTrace | 48c512141119ebf61f9946ec3547cbdb1df03115 | 18116946f0ac53bfca5726fc38ef1de99015557c | refs/heads/master | 2022-09-29T08:39:50.473225 | 2022-06-09T14:07:18 | 2022-06-09T14:07:18 | 171,573,566 | 4 | 1 | null | 2022-06-08T16:48:16 | 2019-02-20T00:41:09 | Python | UTF-8 | Python | false | false | 1,909 | py | # ray drawing test
import itertools
import math
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import tfrt.TFRayTrace as tfrt
import tfrt.drawing as drawing
import tfrt.OpticsUtilities as outl
from tfrt.spectrumRGB import rgb
STYLES = itertools.cycle(["-", "--", "-.", ":"])
COLORMAPS = itertools.cycle(
[
mpl.colors.ListedColormap(rgb()),
plt.get_cmap("viridis"),
plt.get_cmap("seismic"),
plt.get_cmap("spring"),
plt.get_cmap("winter"),
plt.get_cmap("brg"),
plt.get_cmap("gist_ncar"),
]
)
def get_rays(count=50):
wavelengths = np.linspace(drawing.VISIBLE_MIN, drawing.VISIBLE_MAX, count)
rays = np.array([[w, 0.1, w, 0.9, w] for w in wavelengths])
return rays
def on_key(event, drawer):
# Message loop called whenever a key is pressed on the figure
if event.key == "t":
drawer.rays = get_rays()
elif event.key == "c":
drawer.rays = None
elif event.key == "n":
drawer.set_wavelength_limits(0.450, 0.650)
elif event.key == "m":
drawer.set_wavelength_limits(drawing.VISIBLE_MIN, drawing.VISIBLE_MAX)
elif event.key == "i":
drawer.style = next(STYLES)
elif event.key == "u":
drawer.colormap = next(COLORMAPS)
elif event.key == "d":
drawer.draw()
drawing.redraw_current_figure()
if __name__ == "__main__":
drawing.disable_figure_key_commands()
# set up the figure and axes
fig, ax = plt.subplots(1, 1, figsize=(15, 9))
# configure axes
ax.set_aspect("equal")
ax.set_xbound(0, 1)
ax.set_ybound(0, 1)
# set up drawer
drawer = drawing.RayDrawer(
ax, rays=get_rays(), style=next(STYLES), colormap=next(COLORMAPS)
)
drawer.draw()
# hand over to user
fig.canvas.mpl_connect("key_press_event", lambda event: on_key(event, drawer))
plt.show()
| [
"ecpoppenheimer@gmail.com"
] | ecpoppenheimer@gmail.com |
69514aeaf459f434fbf702f46da5b26a01c4ab4d | 37815f4d3201fc1e29f6aab014d134f61b4acf8c | /main_project/main_project/settings.py | 3968fa990c59d58ba1bf755a72384490f474ac47 | [
"MIT"
] | permissive | mayc2/MStream | 92dc34440896170c7dc4efd6ce4772547dcd769b | f5ec884a69499ecf90414c36dadb6c17f7fd6b95 | refs/heads/master | 2020-12-24T14:35:37.061933 | 2015-02-06T03:22:22 | 2015-02-06T03:22:22 | 26,694,223 | 0 | 0 | null | 2014-11-16T18:38:51 | 2014-11-15T21:15:20 | PHP | UTF-8 | Python | false | false | 2,868 | py | """
Django settings for main_project project.
Generated by 'django-admin startproject' using Django 1.9.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/dev/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'p%z2jqyf&q_8w)-ec)pm*&ahvpsm44^*hfmlsquo%owuef+hix'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'accounts',
)
# noinspection PyUnresolvedReferences
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'main_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'main_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'mstream',
'USER': 'mayc',
'PASSWORD': 'PASSWORD123',
'HOST': 'localhost',
'PORT': '3306',
}
}
# Internationalization
# https://docs.djangoproject.com/en/dev/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'EST'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/dev/howto/static-files/
STATIC_URL = '/static/'
| [
"paulc183@gmail.com"
] | paulc183@gmail.com |
971eddb84ca8f99728885698226ec131063f72c1 | f049ffaf6a9bd0a8c8a0fb785f016b73651778f3 | /util/connect_db.py | 1fc548398a2cf5047445e065c2cf23a708ff0ea7 | [] | no_license | hemj132/Ainterface | 031b2b6974d352afba387967ab3c21c9009626e7 | 6ca5185e02b823abb17047cc28abbbd4fdc5ac30 | refs/heads/master | 2020-05-03T00:34:10.307747 | 2019-03-29T01:53:41 | 2019-03-29T01:53:41 | 178,313,037 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 546 | py | #coding:utf-8
import pymysql
import json
class OperationMysql:
def __init__(self):
self.conn = pymysql.connect(
host='192.168.199.162',
port=3306,
user='root',
passwd='root',
db='game_api_developmentv3',
charset='utf8',
)
self.cur = self.conn.cursor()
#查询一条数据
def search_one(self,sql):
self.cur.execute(sql)
result = self.cur.fetchone()
self.conn.close()
return result
if __name__ == '__main__':
op_mysql = OperationMysql()
res = op_mysql.search_one("SELECT *from users limit 1;")
print res
| [
"717474445@qq.com"
] | 717474445@qq.com |
d04d6df927667a5e8f413d32185bf9ec4e892f1a | f1c41d441f31f82e44a32e69cf697594e6f81f03 | /homeassistant/components/broadlink/device.py | c8751182cb9efc877e1f9eeca3d2089a05e2cb47 | [
"Apache-2.0"
] | permissive | titilambert/home-assistant | 363240782fb74b47251ccb5f2e518ab5ff790aa9 | a2651845f379992231fd7b9c8458828036296ee0 | refs/heads/dev | 2023-01-23T04:19:40.006676 | 2020-08-26T16:03:03 | 2020-08-26T16:03:03 | 56,938,172 | 4 | 0 | Apache-2.0 | 2023-01-13T06:01:54 | 2016-04-23T19:56:36 | Python | UTF-8 | Python | false | false | 5,673 | py | """Support for Broadlink devices."""
import asyncio
from functools import partial
import logging
import broadlink as blk
from broadlink.exceptions import (
AuthenticationError,
AuthorizationError,
BroadlinkException,
ConnectionClosedError,
DeviceOfflineError,
)
from homeassistant.const import CONF_HOST, CONF_MAC, CONF_NAME, CONF_TIMEOUT, CONF_TYPE
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import device_registry as dr
from .const import DEFAULT_PORT, DOMAIN, DOMAINS_AND_TYPES
from .updater import get_update_manager
_LOGGER = logging.getLogger(__name__)
def get_domains(device_type):
"""Return the domains available for a device type."""
return {domain for domain, types in DOMAINS_AND_TYPES if device_type in types}
class BroadlinkDevice:
"""Manages a Broadlink device."""
def __init__(self, hass, config):
"""Initialize the device."""
self.hass = hass
self.config = config
self.api = None
self.update_manager = None
self.fw_version = None
self.authorized = None
self.reset_jobs = []
@property
def name(self):
"""Return the name of the device."""
return self.config.title
@property
def unique_id(self):
"""Return the unique id of the device."""
return self.config.unique_id
@staticmethod
async def async_update(hass, entry):
"""Update the device and related entities.
Triggered when the device is renamed on the frontend.
"""
device_registry = await dr.async_get_registry(hass)
device_entry = device_registry.async_get_device(
{(DOMAIN, entry.unique_id)}, set()
)
device_registry.async_update_device(device_entry.id, name=entry.title)
await hass.config_entries.async_reload(entry.entry_id)
async def async_setup(self):
"""Set up the device and related entities."""
config = self.config
api = blk.gendevice(
config.data[CONF_TYPE],
(config.data[CONF_HOST], DEFAULT_PORT),
bytes.fromhex(config.data[CONF_MAC]),
name=config.title,
)
api.timeout = config.data[CONF_TIMEOUT]
try:
await self.hass.async_add_executor_job(api.auth)
except AuthenticationError:
await self._async_handle_auth_error()
return False
except (DeviceOfflineError, OSError):
raise ConfigEntryNotReady
except BroadlinkException as err:
_LOGGER.error(
"Failed to authenticate to the device at %s: %s", api.host[0], err
)
return False
self.api = api
self.authorized = True
update_manager = get_update_manager(self)
coordinator = update_manager.coordinator
await coordinator.async_refresh()
if not coordinator.last_update_success:
raise ConfigEntryNotReady()
self.update_manager = update_manager
self.hass.data[DOMAIN].devices[config.entry_id] = self
self.reset_jobs.append(config.add_update_listener(self.async_update))
try:
self.fw_version = await self.hass.async_add_executor_job(api.get_fwversion)
except (BroadlinkException, OSError):
pass
# Forward entry setup to related domains.
tasks = (
self.hass.config_entries.async_forward_entry_setup(config, domain)
for domain in get_domains(self.api.type)
)
for entry_setup in tasks:
self.hass.async_create_task(entry_setup)
return True
async def async_unload(self):
"""Unload the device and related entities."""
if self.update_manager is None:
return True
while self.reset_jobs:
self.reset_jobs.pop()()
tasks = (
self.hass.config_entries.async_forward_entry_unload(self.config, domain)
for domain in get_domains(self.api.type)
)
results = await asyncio.gather(*tasks)
return all(results)
async def async_auth(self):
"""Authenticate to the device."""
try:
await self.hass.async_add_executor_job(self.api.auth)
except (BroadlinkException, OSError) as err:
_LOGGER.debug(
"Failed to authenticate to the device at %s: %s", self.api.host[0], err
)
if isinstance(err, AuthenticationError):
await self._async_handle_auth_error()
return False
return True
async def async_request(self, function, *args, **kwargs):
"""Send a request to the device."""
request = partial(function, *args, **kwargs)
try:
return await self.hass.async_add_executor_job(request)
except (AuthorizationError, ConnectionClosedError):
if not await self.async_auth():
raise
return await self.hass.async_add_executor_job(request)
async def _async_handle_auth_error(self):
"""Handle an authentication error."""
if self.authorized is False:
return
self.authorized = False
_LOGGER.error(
"The device at %s is locked for authentication. Follow the configuration flow to unlock it",
self.config.data[CONF_HOST],
)
self.hass.async_create_task(
self.hass.config_entries.flow.async_init(
DOMAIN,
context={"source": "reauth"},
data={CONF_NAME: self.name, **self.config.data},
)
)
| [
"noreply@github.com"
] | noreply@github.com |
1d1585c6cd01744d768b0a0412fb34e50bda988f | 20e5d1bf167ac56db3f0bb784e182898a793529f | /src/data/cages.py | 02db8d6c065fc390a47c11e7bd86bd59a33bc2d7 | [] | no_license | ericodex/Python_mongoDB | fd84918afdc9522a7afd149dee72269fcb7a6e9a | af8746f5082fe67abfcaa4d27c858ca2b7e4d789 | refs/heads/master | 2020-06-17T22:53:29.205985 | 2019-07-10T14:38:45 | 2019-07-10T14:38:45 | 196,089,048 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 736 | py | import datetime # Date time, otimização do uso de unidade de tempo no banco de dados.
import mongoengine as me
from data.bookings import Booking
class Cage(me.Document):
''' Estruturação de definição de propriedade do objeto Cage '''
registered_date = me.DateTimeField(default=datetime.datetime.now)
name = me.StringField(required=True)
price = me.FloatField(required=True)
square_meters = me.FloatField(required=True)
is_carpeted = me.BooleanField(required=True)
has_toys = me.BooleanField(required=True)
allow_dangerous_snakes = me.BooleanField(default=False)
bookings = me.EmbeddedDocumentListField(Booking)
meta = {
'db_alias': 'core',
'collection': 'cages'
}
| [
"eric.075315@gmail.com"
] | eric.075315@gmail.com |
161c467f5b64ffc651b0161eecd5079017b1d8f1 | c0de030b078a49d246f9f6eb27e0c3707719904c | /atharva_theme_general/__manifest__.py | 498f206d592979a32892849044e7a42600e180b4 | [] | no_license | cokotracy/Pinnacle | 5cc728b3c67d694ac57589c9b3aac026433f5ca7 | 3f051397de002ba589f4109f65666a74e36080ad | refs/heads/main | 2023-02-26T02:34:42.337556 | 2021-02-04T13:41:09 | 2021-02-04T13:41:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,625 | py | # -*- coding: utf-8 -*-
{
'name': "Atharva Theme General",
'category': 'Website',
'sequence': 5,
'summary': """Atharva Theme General""",
'version': '2.6',
'author': 'Atharva System',
'support': 'support@atharvasystem.com',
'website' : 'http://www.atharvasystem.com',
'license' : 'OPL-1',
'description': """
Base Module for all themes by Atharva System""",
'depends': [
'website_sale_wishlist',
'website_sale_stock',
'website_sale_comparison',
'website_mass_mailing',
'website_blog'
],
'data': [
'security/ir.model.access.csv',
'views/assets.xml',
'views/website_menu_views.xml',
'views/res_config_settings_views.xml',
'views/category_configure_views.xml',
'views/category_views.xml',
'views/multitab_configure_views.xml',
'views/product_brand_views.xml',
'views/product_tabs_views.xml',
'views/product_tags_views.xml',
'views/custom_shop_views.xml',
'views/blog_configure_views.xml',
'views/templates.xml',
'views/megamenu_templates.xml',
'views/dynamic_snippets.xml',
'views/breadcrumb_templates.xml',
'views/custom_shop_templates.xml',
'views/product_brand_page.xml',
'views/header_footer_template.xml',
'views/product_quick_view_template.xml'
],
'demo': [
'data/demo.xml',
],
'price': 4.00,
'currency': 'EUR',
'images': ['static/description/atharva-theme-general-banner.png'],
'installable': True,
'application': True
}
| [
"support@atharvasystem.com"
] | support@atharvasystem.com |
65da8f31eec34e35df36db0edc77988d9760b5bb | ccf94dcb6b1500fcbbd56964ae8c4832a496b8b3 | /python/baiduads-sdk-auto/test/test_plat_product_get_list_request.py | f8948ba21e7402d29e9aed6e09e6cc8e9cb8dcca | [
"Apache-2.0"
] | permissive | baidu/baiduads-sdk | 24c36b5cf3da9362ec5c8ecd417ff280421198ff | 176363de5e8a4e98aaca039e4300703c3964c1c7 | refs/heads/main | 2023-06-08T15:40:24.787863 | 2023-05-20T03:40:51 | 2023-05-20T03:40:51 | 446,718,177 | 16 | 11 | Apache-2.0 | 2023-06-02T05:19:40 | 2022-01-11T07:23:17 | Python | UTF-8 | Python | false | false | 738 | py | """
dev2 api schema
'dev2.baidu.com' api schema # noqa: E501
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import baiduads
from baiduads.platproduct.model.plat_product_get_list_request import PlatProductGetListRequest
class TestPlatProductGetListRequest(unittest.TestCase):
"""PlatProductGetListRequest unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testPlatProductGetListRequest(self):
"""Test PlatProductGetListRequest"""
# FIXME: construct object with mandatory attributes with example values
# model = PlatProductGetListRequest() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"tokimekiyxp@foxmail.com"
] | tokimekiyxp@foxmail.com |
38611a283e055ff5824b88d92fcf6d1216fec345 | bc3da13c8e6427553d4e917290d65d66d54e4b77 | /pharma/migrations/0004_auto_20201219_0620.py | 3fdd5049dfff5793d2708d166460350daed4621f | [
"MIT"
] | permissive | RishiMenon2004/med-bay | 7a8f6f0ea47ae23a4e78a0c87e3e52745b09f327 | ed039b1bf3b10fb1b5097567df28fb4575c95b18 | refs/heads/main | 2023-08-18T17:51:04.062791 | 2021-03-23T10:33:52 | 2021-03-23T10:33:52 | 407,439,729 | 0 | 0 | MIT | 2021-09-17T07:01:28 | 2021-09-17T07:01:27 | null | UTF-8 | Python | false | false | 541 | py | # Generated by Django 3.0.7 on 2020-12-19 06:20
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('patients', '0003_cases_appointed_date'),
('pharma', '0003_prescription_status'),
]
operations = [
migrations.AlterField(
model_name='prescription',
name='case',
field=models.OneToOneField(null=True, on_delete=django.db.models.deletion.SET_NULL, to='patients.Cases'),
),
]
| [
"marudhupaandian@gmail.com"
] | marudhupaandian@gmail.com |
ab3cd9228918e57b3233b0500f80929d0d936bb2 | 85a247759d026d03eb8b625667a5aa99bdace5b0 | /deep_learning_nano_degree/4_recurrent_neural_networks/language_translation/language_translation.py | 2e5ce1af4051fd693596d7f5a0a4da351d800331 | [] | no_license | mkao006/dl_udacity | 000d325bdeb507f2b57a25e592c34ec72287cb06 | a3f795b3c66c16946b0cdab0255cc1d79fc0f82f | refs/heads/master | 2022-12-15T06:50:49.403783 | 2018-01-30T23:45:52 | 2018-01-30T23:45:52 | 93,637,489 | 0 | 0 | null | 2022-12-07T23:59:22 | 2017-06-07T13:19:11 | Jupyter Notebook | UTF-8 | Python | false | false | 18,903 | py | import time
import numpy as np
import tensorflow as tf
import helper
import problem_unittests as tests
source_path = 'data/small_vocab_en'
target_path = 'data/small_vocab_fr'
source_text = helper.load_data(source_path)
target_text = helper.load_data(target_path)
def inspect_data(source_text, target_text, view_sentence_range=(0, 10)):
print('Dataset Stats')
print('Roughly the number of unique words: {}'.format(
len({word: None for word in source_text.split()})))
sentences = source_text.split('\n')
word_counts = [len(sentence.split()) for sentence in sentences]
print('Number of sentences: {}'.format(len(sentences)))
print('Average number of words in a sentence: {}'.format(
np.average(word_counts)))
print()
print('English sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(source_text.split('\n')[
view_sentence_range[0]:view_sentence_range[1]]))
print()
print('French sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(target_text.split('\n')[
view_sentence_range[0]:view_sentence_range[1]]))
def text_to_ids(source_text, target_text, source_vocab_to_int,
target_vocab_to_int):
"""
Convert source and target text to proper word ids
:param source_text: String that contains all the source text.
:param target_text: String that contains all the target text.
:param source_vocab_to_int: Dictionary to go from the source words to an id
:param target_vocab_to_int: Dictionary to go from the target words to an id
:return: A tuple of lists (source_id_text, target_id_text)
"""
source_id_text = [
[source_vocab_to_int[word] for word in sentence.split()]
for sentence in source_text.split('\n')]
target_id_text = [
[target_vocab_to_int[word] for word in sentence.split()] +
[target_vocab_to_int['<EOS>']]
for sentence in target_text.split('\n')]
return source_id_text, target_id_text
def model_inputs():
"""
Create TF Placeholders for input, targets, and learning rate.
:return: Tuple (input, targets, learning rate, keep probability)
"""
input = tf.placeholder(tf.int32, shape=[None, None], name='input')
targets = tf.placeholder(tf.int32, shape=[None, None], name='target')
learning_rate = tf.placeholder(tf.float32, name='learning_rate')
keep_prob = tf.placeholder(tf.float32, name='keep_prob')
return input, targets, learning_rate, keep_prob
def process_decoding_input(target_data, target_vocab_to_int, batch_size):
"""
Preprocess target data for decoding
:param target_data: Target Placeholder
:param target_vocab_to_int: Dictionary to go from the target words to an id
:param batch_size: Batch Size
:return: Preprocessed target data
"""
go_id = target_vocab_to_int['<GO>']
truncated_data = tf.strided_slice(
input_=target_data,
begin=[0, 0],
end=[batch_size, -1],
strides=[1, 1])
start_signal = tf.fill(dims=[batch_size, 1], value=go_id)
processed_decoding_input = tf.concat(
[start_signal, truncated_data], axis=1)
return processed_decoding_input
def encoding_layer(rnn_inputs, rnn_size, num_layers, keep_prob):
"""
Create encoding layer
:param rnn_inputs: Inputs for the RNN
:param rnn_size: RNN Size
:param num_layers: Number of layers
:param keep_prob: Dropout keep probability
:return: RNN state
"""
cell = tf.contrib.rnn.BasicLSTMCell(num_units=rnn_size)
cell_with_dropout = tf.contrib.rnn.DropoutWrapper(
cell=cell, output_keep_prob=keep_prob)
encoder = tf.contrib.rnn.MultiRNNCell(
cells=[cell_with_dropout] * num_layers)
_, encoder_state = tf.nn.dynamic_rnn(cell=encoder,
inputs=rnn_inputs,
dtype=tf.float32)
return encoder_state
def decoding_layer_train(encoder_state, dec_cell, dec_embed_input,
sequence_length, decoding_scope, output_fn,
keep_prob):
"""
Create a decoding layer for training
:param encoder_state: Encoder State
:param dec_cell: Decoder RNN Cell
:param dec_embed_input: Decoder embedded input
:param sequence_length: Sequence Length
:param decoding_scope: TenorFlow Variable Scope for decoding
:param output_fn: Function to apply the output layer
:param keep_prob: Dropout keep probability
:return: Train Logits
"""
train_decoder_function = tf.contrib.seq2seq.simple_decoder_fn_train(
encoder_state=encoder_state)
train_pred, _, _ = tf.contrib.seq2seq.dynamic_rnn_decoder(
cell=dec_cell,
decoder_fn=train_decoder_function,
inputs=dec_embed_input,
sequence_length=sequence_length,
scope=decoding_scope)
logit = output_fn(train_pred)
return logit
def decoding_layer_infer(encoder_state, dec_cell, dec_embeddings,
start_of_sequence_id, end_of_sequence_id,
maximum_length, vocab_size, decoding_scope,
output_fn, keep_prob):
# NOTE (Michael): Need to double check where the 'keep_prob' goes.
"""
Create a decoding layer for inference
:param encoder_state: Encoder state
:param dec_cell: Decoder RNN Cell
:param dec_embeddings: Decoder embeddings
:param start_of_sequence_id: GO ID
:param end_of_sequence_id: EOS Id
:param maximum_length: The maximum allowed time steps to decode
:param vocab_size: Size of vocabulary
:param decoding_scope: TensorFlow Variable Scope for decoding
:param output_fn: Function to apply the output layer
:param keep_prob: Dropout keep probability
:return: Inference Logits
"""
infer_decoder_fn = tf.contrib.seq2seq.simple_decoder_fn_inference(
output_fn=output_fn,
encoder_state=encoder_state,
embeddings=dec_embeddings,
start_of_sequence_id=start_of_sequence_id,
end_of_sequence_id=end_of_sequence_id,
maximum_length=maximum_length - 1,
num_decoder_symbols=vocab_size)
infer_logit, _, _ = tf.contrib.seq2seq.dynamic_rnn_decoder(
cell=dec_cell,
decoder_fn=infer_decoder_fn,
scope=decoding_scope)
return infer_logit
def decoding_layer(dec_embed_input, dec_embeddings, encoder_state, vocab_size,
sequence_length, rnn_size, num_layers, target_vocab_to_int,
keep_prob):
"""
Create decoding layer
:param dec_embed_input: Decoder embedded input
:param dec_embeddings: Decoder embeddings
:param encoder_state: The encoded state
:param vocab_size: Size of vocabulary
:param sequence_length: Sequence Length
:param rnn_size: RNN Size
:param num_layers: Number of layers
:param target_vocab_to_int: Dictionary to go from the target words to an id
:param keep_prob: Dropout keep probability
:return: Tuple of (Training Logits, Inference Logits)
"""
cell = tf.contrib.rnn.BasicLSTMCell(num_units=rnn_size)
cell_with_dropout = tf.contrib.rnn.DropoutWrapper(
cell=cell, output_keep_prob=keep_prob)
decoder = tf.contrib.rnn.MultiRNNCell(
cells=[cell_with_dropout] * num_layers)
with tf.variable_scope('decoding_scope') as decoding_scope:
# NOTE (Michael): Need to double check the activation function
output_fn = (lambda x: tf.contrib.layers.fully_connected(
inputs=x,
num_outputs=vocab_size,
activation_fn=None,
scope=decoding_scope))
train_logit = decoding_layer_train(
encoder_state=encoder_state,
dec_cell=decoder,
dec_embed_input=dec_embed_input,
sequence_length=sequence_length,
decoding_scope=decoding_scope,
output_fn=output_fn,
keep_prob=keep_prob)
with tf.variable_scope('decoding_scope', reuse=True) as decoding_scope:
start_of_sequence_id = target_vocab_to_int['<GO>']
end_of_sequence_id = target_vocab_to_int['<EOS>']
infer_logit = decoding_layer_infer(
encoder_state=encoder_state,
dec_cell=decoder,
dec_embeddings=dec_embeddings,
start_of_sequence_id=start_of_sequence_id,
end_of_sequence_id=end_of_sequence_id,
maximum_length=sequence_length - 1,
vocab_size=vocab_size,
decoding_scope=decoding_scope,
output_fn=output_fn,
keep_prob=keep_prob)
return train_logit, infer_logit
def seq2seq_model(input_data, target_data, keep_prob, batch_size,
sequence_length, source_vocab_size, target_vocab_size,
enc_embedding_size, dec_embedding_size, rnn_size, num_layers,
target_vocab_to_int):
"""
Build the Sequence-to-Sequence part of the neural network
:param input_data: Input placeholder
:param target_data: Target placeholder
:param keep_prob: Dropout keep probability placeholder
:param batch_size: Batch Size
:param sequence_length: Sequence Length
:param source_vocab_size: Source vocabulary size
:param target_vocab_size: Target vocabulary size
:param enc_embedding_size: Decoder embedding size
:param dec_embedding_size: Encoder embedding size
:param rnn_size: RNN Size
:param num_layers: Number of layers
:param target_vocab_to_int: Dictionary to go from the target words to an id
:return: Tuple of (Training Logits, Inference Logits)
"""
# Encode the source and output the state
embedded_input = tf.contrib.layers.embed_sequence(
ids=input_data,
vocab_size=source_vocab_size,
embed_dim=enc_embedding_size)
encoder_state = encoding_layer(rnn_inputs=embedded_input,
rnn_size=rnn_size,
num_layers=num_layers,
keep_prob=keep_prob)
decoder_input = process_decoding_input(
target_data=target_data,
target_vocab_to_int=target_vocab_to_int,
batch_size=batch_size)
# Take in the state and processed target input and output the
# training and inference logits
decoder_embeddings_weights = tf.Variable(
tf.random_uniform([target_vocab_size, dec_embedding_size]))
decoder_embed_input = tf.nn.embedding_lookup(
params=decoder_embeddings_weights,
ids=decoder_input)
train_logit, infer_logit = decoding_layer(
dec_embed_input=decoder_embed_input,
dec_embeddings=decoder_embeddings_weights,
encoder_state=encoder_state,
vocab_size=target_vocab_size,
sequence_length=sequence_length,
rnn_size=rnn_size,
num_layers=num_layers,
target_vocab_to_int=target_vocab_to_int,
keep_prob=keep_prob)
return train_logit, infer_logit
def get_accuracy(target, logits):
"""
Calculate accuracy
"""
max_seq = max(target.shape[1], logits.shape[1])
if max_seq - target.shape[1]:
target = np.pad(
target,
[(0, 0), (0, max_seq - target.shape[1])],
'constant')
if max_seq - logits.shape[1]:
logits = np.pad(
logits,
[(0, 0), (0, max_seq - logits.shape[1]), (0, 0)],
'constant')
return np.mean(np.equal(target, np.argmax(logits, 2)))
def train_model(source,
target,
epochs=10,
batch_size=128,
rnn_size=256,
num_layers=3,
encoding_embedding_size=256,
decoding_embedding_size=256,
learning_rate=0.001,
keep_probability=0.5,
save_path='checkpoints/dev'):
''' Wrapper to train the model
'''
train_source = source[batch_size:]
train_target = target[batch_size:]
valid_source = helper.pad_sentence_batch(source[:batch_size])
valid_target = helper.pad_sentence_batch(target[:batch_size])
(source_int_text, target_int_text), (source_vocab_to_int,
target_vocab_to_int), _ = (
helper.load_preprocess())
max_source_sentence_length = max([len(sentence)
for sentence in source_int_text])
# Build the graphx
train_graph = tf.Graph()
with train_graph.as_default():
input_data, targets, lr, keep_prob = model_inputs()
sequence_length = tf.placeholder_with_default(
max_source_sentence_length, None, name='sequence_length')
input_shape = tf.shape(input_data)
# According to the original paper, reversing the input actually
# improves the model.
train_logits, inference_logits = seq2seq_model(
input_data=tf.reverse(input_data, [-1]),
target_data=targets,
keep_prob=keep_prob,
batch_size=batch_size,
sequence_length=sequence_length,
source_vocab_size=len(source_vocab_to_int),
target_vocab_size=len(target_vocab_to_int),
encoding_embedding_size=encoding_embedding_size,
decoding_embedding_size=decoding_embedding_size,
rnn_size=rnn_size,
num_layers=num_layers,
target_vocab_to_int=target_vocab_to_int)
tf.identity(inference_logits, 'logits')
with tf.name_scope("optimization"):
# Loss function
cost = tf.contrib.seq2seq.sequence_loss(
train_logits,
targets,
tf.ones([input_shape[0], sequence_length]))
# Optimizer
optimizer = tf.train.AdamOptimizer(lr)
# Gradient Clipping
gradients = optimizer.compute_gradients(cost)
capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var)
for grad, var in gradients if grad is not None]
train_op = optimizer.apply_gradients(capped_gradients)
# Train the model
with tf.Session(graph=train_graph) as sess:
sess.run(tf.global_variables_initializer())
for epoch_i in range(epochs):
for batch_i, (source_batch, target_batch) in enumerate(
helper.batch_data(train_source, train_target, batch_size)):
start_time = time.time()
_, loss = sess.run(
[train_op, cost],
{input_data: source_batch,
targets: target_batch,
lr: learning_rate,
sequence_length: target_batch.shape[1],
keep_prob: keep_probability})
batch_train_logits = sess.run(
inference_logits,
{input_data: source_batch, keep_prob: 1.0})
batch_valid_logits = sess.run(
inference_logits,
{input_data: valid_source, keep_prob: 1.0})
train_acc = get_accuracy(target_batch, batch_train_logits)
valid_acc = get_accuracy(
np.array(valid_target), batch_valid_logits)
end_time = time.time()
print('Epoch {:>3} Batch {:>4}/{} - Train Accuracy: {:>6.3f}, Validation Accuracy: {:>6.3f}, Loss: {:>6.3f}'
.format(epoch_i, batch_i,
len(source_int_text) // batch_size,
train_acc, valid_acc, loss))
# Save Model
saver = tf.train.Saver()
saver.save(sess, save_path)
print('Model Trained and Saved')
# Save parameters for checkpoint
helper.save_params(save_path)
def sentence_to_seq(sentence, vocab_to_int):
"""
Convert a sentence to a sequence of ids
:param sentence: String
:param vocab_to_int: Dictionary to go from the words to an id
:return: List of word ids
"""
sentence_ind = [vocab_to_int.get(word, vocab_to_int['<UNK>'])
for word in sentence.lower().split()]
return sentence_ind
def translate(sentence):
(_,
(source_vocab_to_int, target_vocab_to_int),
(source_int_to_vocab, target_int_to_vocab)) = helper.load_preprocess()
load_path = helper.load_params()
translate_sentence_ind = sentence_to_seq(
translate_sentence, source_vocab_to_int)
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load saved model
loader = tf.train.import_meta_graph(load_path + '.meta')
loader.restore(sess, load_path)
input_data = loaded_graph.get_tensor_by_name('input:0')
logits = loaded_graph.get_tensor_by_name('logits:0')
keep_prob = loaded_graph.get_tensor_by_name('keep_prob:0')
translate_logits = sess.run(
logits, {input_data: [translate_sentence_ind], keep_prob: 1.0})[0]
print('Input')
print(' Word Ids: {}'.format(
[i for i in translate_sentence_ind]))
print(' English Words: {}'.format(
[source_int_to_vocab[i] for i in translate_sentence_ind]))
print('\nPrediction')
print(' Word Ids: {}'.format(
[i for i in np.argmax(translate_logits, 1)]))
print(' French Words: {}'.format(
[target_int_to_vocab[i] for i in np.argmax(translate_logits, 1)]))
# Inspect data
inspect_data(source_text, target_text, (0, 5))
# Unit tests
tests.test_text_to_ids(text_to_ids)
tests.test_model_inputs(model_inputs)
tests.test_process_decoding_input(process_decoding_input)
tests.test_encoding_layer(encoding_layer)
tests.test_decoding_layer_train(decoding_layer_train)
tests.test_decoding_layer_infer(decoding_layer_infer)
tests.test_decoding_layer(decoding_layer)
tests.test_seq2seq_model(seq2seq_model)
tests.test_sentence_to_seq(sentence_to_seq)
# Preprocess all the data and save it
helper.preprocess_and_save_data(source_path, target_path, text_to_ids)
# Load the preprocessed data
((source_int_text, target_int_text),
(source_vocab_to_int, target_vocab_to_int),
_) = helper.load_preprocess()
# Train the model
train_model(source_int_text,
target_int_text,
epochs=10,
batch_size=256,
rnn_size=256,
num_layers=3,
encoding_embedding_size=256,
decoding_embedding_size=256,
learning_rate=0.001,
keep_probability=0.5,
save_path='checkpoints/dev')
# Translate
translate_sentence = 'he saw a old yellow truck .'
translate(translate_sentence)
| [
"mkao006@gmail.com"
] | mkao006@gmail.com |
29d59797e23ce5b3c1c01c4ca79eddeffbe0447a | 981da55bfc1a0eb8ff5584b6c7a88c6f10c43d15 | /blog/urls.py | aa76875e4bdcb8e7db600228fd0dc668f5a987ad | [] | no_license | Rih/djangogirls | 1055e14737e3c8d73b363417e2f416294d9871a8 | c4439f8c27e40092bc02193cbc80cc670f59b73c | refs/heads/master | 2020-03-22T08:14:07.251838 | 2019-04-04T11:53:17 | 2019-04-04T11:53:17 | 139,753,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 121 | py | from django.conf.urls import include, url
from . import views
urlpatterns = [
url(r'^$', views.post_list),
] | [
"rodrigo.ediaz.f@gmail.com"
] | rodrigo.ediaz.f@gmail.com |
456a38ad9b87e1b826c521e146df928c90163e88 | 0fbd56d4a2ee512cb47f557bea310618249a3d2e | /official/vision/beta/modeling/layers/roi_sampler.py | 46b4c349839f207291fc2ca42a601d9eaabce92c | [
"Apache-2.0"
] | permissive | joppemassant/models | 9968f74f5c48096f3b2a65e6864f84c0181465bb | b2a6712cbe6eb9a8639f01906e187fa265f3f48e | refs/heads/master | 2022-12-10T01:29:31.653430 | 2020-09-11T11:26:59 | 2020-09-11T11:26:59 | 294,675,920 | 1 | 1 | Apache-2.0 | 2020-09-11T11:21:51 | 2020-09-11T11:21:51 | null | UTF-8 | Python | false | false | 5,978 | py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""ROI sampler."""
# Import libraries
import tensorflow as tf
from official.vision.beta.modeling.layers import box_matcher
from official.vision.beta.modeling.layers import box_sampler
from official.vision.beta.ops import box_ops
@tf.keras.utils.register_keras_serializable(package='Vision')
class ROISampler(tf.keras.layers.Layer):
"""Sample ROIs and assign targets to the sampled ROIs."""
def __init__(self,
mix_gt_boxes=True,
num_sampled_rois=512,
foreground_fraction=0.25,
foreground_iou_threshold=0.5,
background_iou_high_threshold=0.5,
background_iou_low_threshold=0,
**kwargs):
"""Initializes a ROI sampler.
Args:
mix_gt_boxes: bool, whether to mix the groundtruth boxes with proposed
ROIs.
num_sampled_rois: int, the number of sampled ROIs per image.
foreground_fraction: float in [0, 1], what percentage of proposed ROIs
should be sampled from the foreground boxes.
foreground_iou_threshold: float, represent the IoU threshold for a box to
be considered as positive (if >= `foreground_iou_threshold`).
background_iou_high_threshold: float, represent the IoU threshold for a
box to be considered as negative (if overlap in
[`background_iou_low_threshold`, `background_iou_high_threshold`]).
background_iou_low_threshold: float, represent the IoU threshold for a box
to be considered as negative (if overlap in
[`background_iou_low_threshold`, `background_iou_high_threshold`])
**kwargs: other key word arguments passed to Layer.
"""
self._config_dict = {
'mix_gt_boxes': mix_gt_boxes,
'num_sampled_rois': num_sampled_rois,
'foreground_fraction': foreground_fraction,
'foreground_iou_threshold': foreground_iou_threshold,
'background_iou_high_threshold': background_iou_high_threshold,
'background_iou_low_threshold': background_iou_low_threshold,
}
self._matcher = box_matcher.BoxMatcher(
foreground_iou_threshold,
background_iou_high_threshold,
background_iou_low_threshold)
self._sampler = box_sampler.BoxSampler(
num_sampled_rois, foreground_fraction)
super(ROISampler, self).__init__(**kwargs)
def call(self, boxes, gt_boxes, gt_classes):
"""Assigns the proposals with groundtruth classes and performs subsmpling.
Given `proposed_boxes`, `gt_boxes`, and `gt_classes`, the function uses the
following algorithm to generate the final `num_samples_per_image` RoIs.
1. Calculates the IoU between each proposal box and each gt_boxes.
2. Assigns each proposed box with a groundtruth class and box by choosing
the largest IoU overlap.
3. Samples `num_samples_per_image` boxes from all proposed boxes, and
returns box_targets, class_targets, and RoIs.
Args:
boxes: a tensor of shape of [batch_size, N, 4]. N is the number of
proposals before groundtruth assignment. The last dimension is the
box coordinates w.r.t. the scaled images in [ymin, xmin, ymax, xmax]
format.
gt_boxes: a tensor of shape of [batch_size, MAX_NUM_INSTANCES, 4].
The coordinates of gt_boxes are in the pixel coordinates of the scaled
image. This tensor might have padding of values -1 indicating the
invalid box coordinates.
gt_classes: a tensor with a shape of [batch_size, MAX_NUM_INSTANCES]. This
tensor might have paddings with values of -1 indicating the invalid
classes.
Returns:
sampled_rois: a tensor of shape of [batch_size, K, 4], representing the
coordinates of the sampled RoIs, where K is the number of the sampled
RoIs, i.e. K = num_samples_per_image.
sampled_gt_boxes: a tensor of shape of [batch_size, K, 4], storing the
box coordinates of the matched groundtruth boxes of the samples RoIs.
sampled_gt_classes: a tensor of shape of [batch_size, K], storing the
classes of the matched groundtruth boxes of the sampled RoIs.
sampled_gt_indices: a tensor of shape of [batch_size, K], storing the
indices of the sampled groudntruth boxes in the original `gt_boxes`
tensor, i.e.
gt_boxes[sampled_gt_indices[:, i]] = sampled_gt_boxes[:, i].
"""
if self._config_dict['mix_gt_boxes']:
gt_boxes = tf.cast(gt_boxes, dtype=boxes.dtype)
boxes = tf.concat([boxes, gt_boxes], axis=1)
(matched_gt_boxes, matched_gt_classes, matched_gt_indices,
positive_matches, negative_matches, ignored_matches) = (
self._matcher(boxes, gt_boxes, gt_classes))
sampled_indices = self._sampler(
positive_matches, negative_matches, ignored_matches)
sampled_rois, sampled_gt_boxes, sampled_gt_classes, sampled_gt_indices = (
box_ops.gather_instances(
sampled_indices,
boxes,
matched_gt_boxes,
matched_gt_classes,
matched_gt_indices))
return (sampled_rois, sampled_gt_boxes, sampled_gt_classes,
sampled_gt_indices)
def get_config(self):
return self._config_dict
@classmethod
def from_config(cls, config):
return cls(**config)
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
1dba441eba9e895c8b00e03309a0bcd68e736e31 | d61d05748a59a1a73bbf3c39dd2c1a52d649d6e3 | /chromium/mojo/public/tools/bindings/pylib/mojom/generate/test_support.py | eb394619d2bf4855522d7157dff0d13e87c59850 | [
"BSD-3-Clause"
] | permissive | Csineneo/Vivaldi | 4eaad20fc0ff306ca60b400cd5fad930a9082087 | d92465f71fb8e4345e27bd889532339204b26f1e | refs/heads/master | 2022-11-23T17:11:50.714160 | 2019-05-25T11:45:11 | 2019-05-25T11:45:11 | 144,489,531 | 5 | 4 | BSD-3-Clause | 2022-11-04T05:55:33 | 2018-08-12T18:04:37 | null | UTF-8 | Python | false | false | 6,092 | py | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
import traceback
import module as mojom
# Support for writing mojom test cases.
# RunTest(fn) will execute fn, catching any exceptions. fn should return
# the number of errors that are encountered.
#
# EXPECT_EQ(a, b) and EXPECT_TRUE(b) will print error information if the
# expectations are not true and return a non zero value. This allows test cases
# to be written like this
#
# def Foo():
# errors = 0
# errors += EXPECT_EQ('test', test())
# ...
# return errors
#
# RunTest(foo)
def FieldsAreEqual(field1, field2):
if field1 == field2:
return True
return field1.name == field2.name and \
KindsAreEqual(field1.kind, field2.kind) and \
field1.ordinal == field2.ordinal and \
field1.default == field2.default
def KindsAreEqual(kind1, kind2):
if kind1 == kind2:
return True
if kind1.__class__ != kind2.__class__ or kind1.spec != kind2.spec:
return False
if kind1.__class__ == mojom.Kind:
return kind1.spec == kind2.spec
if kind1.__class__ == mojom.Struct:
if kind1.name != kind2.name or \
kind1.spec != kind2.spec or \
len(kind1.fields) != len(kind2.fields):
return False
for i in range(len(kind1.fields)):
if not FieldsAreEqual(kind1.fields[i], kind2.fields[i]):
return False
return True
if kind1.__class__ == mojom.Array:
return KindsAreEqual(kind1.kind, kind2.kind)
print 'Unknown Kind class: ', kind1.__class__.__name__
return False
def ParametersAreEqual(parameter1, parameter2):
if parameter1 == parameter2:
return True
return parameter1.name == parameter2.name and \
parameter1.ordinal == parameter2.ordinal and \
parameter1.default == parameter2.default and \
KindsAreEqual(parameter1.kind, parameter2.kind)
def MethodsAreEqual(method1, method2):
if method1 == method2:
return True
if method1.name != method2.name or \
method1.ordinal != method2.ordinal or \
len(method1.parameters) != len(method2.parameters):
return False
for i in range(len(method1.parameters)):
if not ParametersAreEqual(method1.parameters[i], method2.parameters[i]):
return False
return True
def InterfacesAreEqual(interface1, interface2):
if interface1 == interface2:
return True
if interface1.name != interface2.name or \
len(interface1.methods) != len(interface2.methods):
return False
for i in range(len(interface1.methods)):
if not MethodsAreEqual(interface1.methods[i], interface2.methods[i]):
return False
return True
def ModulesAreEqual(module1, module2):
if module1 == module2:
return True
if module1.name != module2.name or \
module1.namespace != module2.namespace or \
len(module1.structs) != len(module2.structs) or \
len(module1.interfaces) != len(module2.interfaces):
return False
for i in range(len(module1.structs)):
if not KindsAreEqual(module1.structs[i], module2.structs[i]):
return False
for i in range(len(module1.interfaces)):
if not InterfacesAreEqual(module1.interfaces[i], module2.interfaces[i]):
return False
return True
# Builds and returns a Module suitable for testing/
def BuildTestModule():
module = mojom.Module('test', 'testspace')
struct = module.AddStruct('teststruct')
struct.AddField('testfield1', mojom.INT32)
struct.AddField('testfield2', mojom.Array(mojom.INT32), 42)
interface = module.AddInterface('Server')
method = interface.AddMethod('Foo', 42)
method.AddParameter('foo', mojom.INT32)
method.AddParameter('bar', mojom.Array(struct))
return module
# Tests if |module| is as built by BuildTestModule(). Returns the number of
# errors
def TestTestModule(module):
errors = 0
errors += EXPECT_EQ('test', module.name)
errors += EXPECT_EQ('testspace', module.namespace)
errors += EXPECT_EQ(1, len(module.structs))
errors += EXPECT_EQ('teststruct', module.structs[0].name)
errors += EXPECT_EQ(2, len(module.structs[0].fields))
errors += EXPECT_EQ('testfield1', module.structs[0].fields[0].name)
errors += EXPECT_EQ(mojom.INT32, module.structs[0].fields[0].kind)
errors += EXPECT_EQ('testfield2', module.structs[0].fields[1].name)
errors += EXPECT_EQ(mojom.Array, module.structs[0].fields[1].kind.__class__)
errors += EXPECT_EQ(mojom.INT32, module.structs[0].fields[1].kind.kind)
errors += EXPECT_EQ(1, len(module.interfaces))
errors += EXPECT_EQ('Server', module.interfaces[0].name)
errors += EXPECT_EQ(1, len(module.interfaces[0].methods))
errors += EXPECT_EQ('Foo', module.interfaces[0].methods[0].name)
errors += EXPECT_EQ(2, len(module.interfaces[0].methods[0].parameters))
errors += EXPECT_EQ('foo', module.interfaces[0].methods[0].parameters[0].name)
errors += EXPECT_EQ(mojom.INT32,
module.interfaces[0].methods[0].parameters[0].kind)
errors += EXPECT_EQ('bar', module.interfaces[0].methods[0].parameters[1].name)
errors += EXPECT_EQ(
mojom.Array,
module.interfaces[0].methods[0].parameters[1].kind.__class__)
errors += EXPECT_EQ(
module.structs[0],
module.interfaces[0].methods[0].parameters[1].kind.kind)
return errors
def PrintFailure(string):
stack = traceback.extract_stack()
frame = stack[len(stack)-3]
sys.stderr.write("ERROR at %s:%d, %s\n" % (frame[0], frame[1], string))
print "Traceback:"
for line in traceback.format_list(stack[:len(stack)-2]):
sys.stderr.write(line)
def EXPECT_EQ(a, b):
if a != b:
PrintFailure("%s != %s" % (a, b))
return 1
return 0
def EXPECT_TRUE(a):
if not a:
PrintFailure('Expecting True')
return 1
return 0
def RunTest(fn):
sys.stdout.write('Running %s...' % fn.__name__)
try:
errors = fn()
except:
traceback.print_exc(sys.stderr)
errors = 1
if errors == 0:
sys.stdout.write('OK\n')
elif errors == 1:
sys.stdout.write('1 ERROR\n')
else:
sys.stdout.write('%d ERRORS\n' % errors)
return errors
| [
"csineneo@gmail.com"
] | csineneo@gmail.com |
816c44aeab0edee9437c9a9d14a89d6d3f9adb72 | f2c296c34f5fcbc58f242a223086851150752b25 | /modules/form_bucket.py | d8e98435acd61c37893eb56255401e0194e4eaae | [] | no_license | chandradee23/ec2_admin | 82f6b416742a9db182b10678e98590e5db5090cc | ab569576df2b01a6faaa3923c2fc9df2bfb939ca | refs/heads/master | 2020-05-07T11:01:14.167430 | 2019-02-16T00:45:23 | 2019-02-16T00:45:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,744 | py | #echo s3fs#$BUCKET /mnt/$BUCKET fuse _netdev,uid=userbda,gid=analytics,allow_other,umask=002,endpoint=us-east-2 0 0 >> /etc/fstab
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 1 23:33:56 2018
@author: dfischer
"""
from PySide2.QtWidgets import *
from modules.functions import *
from modules import SettingsManager as sm, functions
class BucketForm(QDialog):
def __init__(self,parent):
super(BucketForm,self).__init__(parent)
self.settings = sm.settingsManager()
self.parent = parent
self.setMinimumWidth(400)
self.setWindowTitle("Bucket Connector")
self.api_access = self.settings.getParam('aws_access_key_id')
self.api_secret = self.settings.getParam('aws_secret_access_key')
self.region = self.settings.getParam('region')
self.user = self.settings.getParam("user")
self.mainLayout = QVBoxLayout()
self.session = self.settings.getSession()
s3 = self.session.resource("s3")
self.buckets = {x.name: x for x in list(s3.buckets.all())}
self.combo_bucket = QComboBox()
self.combo_bucket.addItems(list(self.buckets.keys()))
self.connect = QPushButton("Connect")
self.connect.clicked.connect(self.fm_connect)
self.mainLayout.addWidget(self.combo_bucket)
self.mainLayout.addWidget(self.connect)
self.setLayout(self.mainLayout)
def fm_connect(self):
settings = SettingsManager.settingsManager()
cmd_limpiar = 'cat /etc/fstab | grep -v {} > /etc/fstab.tmp;'.format(self.combo_bucket.currentText())
cmd_move = 'mv /etc/fstab.tmp /etc/fstab;'
cmd_fstab = 'echo s3fs#{bucket} /home/{user}/{bucket} fuse ' \
'_netdev,uid={user},allow_other,umask=002,endpoint={region} 0 0 ' \
'>> /etc/fstab;'.format(bucket = self.combo_bucket.currentText(),
region = self.region,
user = self.user)
cmd_api = 'echo '+self.api_access + ':' + self.api_secret+' > /etc/passwd-s3fs;' \
'chmod 640 /etc/passwd-s3fs;'
cmd_mount = 'mkdir /home/{user}/{bucket};mount /home/{user}/{bucket}'.format(bucket =self.combo_bucket.currentText(),
user = self.user)
cmd = cmd_limpiar +cmd_move + cmd_fstab + cmd_api + cmd_mount
print(cmd)
functions.run_script(cmd)
self.close()
if __name__ == '__main__':
app = QApplication(sys.argv)
window = BucketForm(None)
window.show()
# sys.exit(app.exec_())
app.exec_()
| [
"danielfm123@gmail.com"
] | danielfm123@gmail.com |
1a8972f5e58acab710e90a551d29145682b229c8 | 2d91e44b4c474b826ee109d7eebace6a07e22f7e | /test/test_del_group.py | cb0c7b1b5e7cfd9848b14e85fb73c8ad82e4d63d | [
"Apache-2.0"
] | permissive | Mai-ah/python4testers | 5c0fa3416bd4393813090960db3412a38766ee94 | 457751759c77d9fb06d8567aaf93787928503456 | refs/heads/master | 2021-01-22T07:22:52.016208 | 2017-04-19T22:38:35 | 2017-04-19T22:38:35 | 81,809,974 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 547 | py | # -*- coding: utf-8 -*-
from model.group import Group
import random
def test_del_some_group(app, db, check_ui):
if len(db.get_group_list()) == 0:
app.group.create(Group(name="test"))
old_groups = db.get_group_list()
group = random.choice(old_groups)
app.group.del_group_by_id(group.id)
new_groups = db.get_group_list()
old_groups.remove(group)
assert old_groups == new_groups
if check_ui:
assert sorted(new_groups, key=Group.id_or_max) == sorted(app.group.get_group_list(), key=Group.id_or_max)
| [
"milena.zahorska@stxnext.pl"
] | milena.zahorska@stxnext.pl |
8ec2d2db2d68e554d38973660c0b8a31ff674532 | 0dcd04348eb8364973d775380a242066cfcf42e0 | /src/parser.py | 5309d3074a42d89dce8f684618ea8c805250d0d0 | [] | no_license | zatserkl/donation-analytics-project | b5877511b0efb550f79c5076fa9be40d3e32a282 | 1827f9e43baa4c2faa6d675f4a57ce181011e297 | refs/heads/master | 2021-05-01T03:22:05.103981 | 2018-02-14T04:23:22 | 2018-02-14T04:23:22 | 121,191,351 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,474 | py | # Andriy Zatserklyaniy <zatserkl@gmail.com> Feb 12, 2018
from __future__ import print_function # to run this code under python27
class LineParser:
""" Parses line and provides basic checks.
"""
def __init__(self):
self.debug = False
def clear(self):
""" Clear the data fields before read them from the line
"""
self.recipient = ""
self.donor_name = ""
self.zip_code = "" # use as string to keep leading zeros
self.year = ""
self.amount = 0.
def parse(self, line):
"""
Parses line from the input file, assigns variables and
carries out basic checks.
Returns True for the valid line, False otherwise.
"""
self.clear()
# set values to the local variables for the columns indices
# NB: the names were copied/pasted from the documentation
(CMTE_ID, AMNDT_IND, RPT_TP, TRANSACTION_PGI, IMAGE_NUM,
TRANSACTION_TP, ENTITY_TP, NAME, CITY, STATE, ZIP_CODE, EMPLOYER,
OCCUPATION, TRANSACTION_DT, TRANSACTION_AMT, OTHER_ID, TRAN_ID,
FILE_NUM, MEMO_CD, MEMO_TEXT, SUB_ID) = range(21)
# Basic checks, see chapter "Input files" of the Challenge description.
if len(line[OTHER_ID]) > 0:
if self.debug:
print("not empty OTHER_ID:", line[OTHER_ID])
return False # this is not a contribution from individual
if len(line[TRANSACTION_DT]) < 8:
if self.debug:
print("empty TRANSACTION_DT")
return False
if len(line[NAME]) == 0:
if self.debug:
print("empty NAME")
return False # empty name
if len(line[CMTE_ID]) == 0:
if self.debug:
print("empty CMTE_ID")
return False # empty recipient id
if len(line[TRANSACTION_AMT]) == 0:
if self.debug:
print("empty TRANSACTION_AMT")
return False # empty amount field
if len(line[ZIP_CODE]) < 5:
if self.debug:
print("Malformed ZIP_CODE")
return False
try:
self.amount = float(line[TRANSACTION_AMT])
except ValueError as e:
if self.debug:
print("Malformed contribution amount:", e)
return False
if self.amount <= 0:
if self.debug:
print("Malformed TRANSACTION_AMT:", line[TRANSACTION_AMT])
return False
try:
# convert time to make sure that the date is fine
month = int(line[TRANSACTION_DT][:2])
day = int(line[TRANSACTION_DT][2:4])
year = int(line[TRANSACTION_DT][4:])
except ValueError as e:
if self.debug:
print("Malformed date:", e)
return False
if year < 1900 or year > 2100:
if self.debug:
print("Malformed date")
return False
if month < 1 or month > 12:
if self.debug:
print("Malformed date")
return False
if day < 1 or day > 31:
if self.debug:
print("Malformed date")
return False
self.recipient = line[CMTE_ID]
self.donor_name = line[NAME]
self.zip_code = line[ZIP_CODE][:5]
self.year = line[TRANSACTION_DT][4:]
return True
| [
"zatserkl@gmail.com"
] | zatserkl@gmail.com |
7c46d8a9418deb8e9a5a2c6c23f07e133a2c2d90 | 20a6fb2ff61e01c30c73079f7f90f2489b5a9123 | /spider_wooyun.py | 4089ebdffb0d77cfcf8a78ec7cd128c2d295a613 | [] | no_license | JASON0916/spiders | 4e20da55d6c3782dd410da05ec7bd5a970fb5031 | f39b487cd96081abc82102ceb19133e2d3f7abb8 | refs/heads/master | 2020-12-24T08:30:28.694780 | 2016-08-12T02:51:44 | 2016-08-12T02:51:44 | 30,913,988 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 9,007 | py | Enter file contents here
import mysql.connector
import sys, os
import urllib.request
import re
import itertools
import base64
search_item='金融'
#以后只需要修改search_item就可以了
#转成bytes string
bytesString = search_item.encode(encoding="utf-8")
encodestr = base64.b64encode(bytesString)
#base64 编码
user = 'root'
pwd = ''
host = '127.0.0.1'
db = 'test'
data_file = 'wooyun.dat'
create_table_sql = "CREATE TABLE IF NOT EXISTS mytable ( serial_number_sql varchar(100), title_sql varchar(100), \
loophole_type_sql varchar(100) , industry_sql varchar(100) , author_sql varchar(100) , yield_time_sql varchar(100), \
loophole_mood_sql varchar(100), hazard_rating_sql varchar(100), reveal_mood_sql varchar(200),\
detail_sql varchar(5000), repair_sql varchar(2000), path_sql varchar(50))\
CHARACTER SET utf8"
insert_sql = "INSERT INTO mytable (serial_number_sql, title_sql, loophole_type_sql, industry_sql, \
author_sql, yield_time_sql, loophole_mood_sql, hazard_rating_sql, reveal_mood_sql, \
detail_sql, repair_sql, path_sql) VALUES ( %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
query_num_sql="select count(serial_number_sql) from mytable"
remove_duplicated_sql="delete from mytable where serial_number_sql in (select serial_number_sql from \
(select serial_number_sql from mytable group by serial_number_sql having count(serial_number_sql) > 1)as total)"
cnx = mysql.connector.connect(user=user, password=pwd, host=host, database=db)
cursor = cnx.cursor()
def create_table_sql_api(a):
try:
cursor.execute(a)
except mysql.connector.Error as err:
print("create table 'mytable' failed.")
print("Error: {}".format(err.msg))
sys.exit()
def insert_sql_api(a,b):
try:
cursor.execute(a,b)
except mysql.connector.Error as err:
print("insert table 'mytable' failed.")
print("Error: {}".format(err.msg))
sys.exit()
create_table_sql_api(create_table_sql)
#mysql数据库
starturl="http://www.wooyun.org/searchbug.php?q="+encodestr.decode()
loophole=[]
nextpage=[]
result=[]
#定义页面跳转相关变量
def get_html_response(url):
html_response = urllib.request.urlopen(url).read().decode('utf-8')
return html_response
def geturl(starturl):
a=get_html_response(starturl)
childurl=(re.findall(r'/bugs/wooyun-\w*-\w*\b',a))
return childurl
def get_nextpage(starturl):
d=get_html_response(starturl)
num_p=0
num=re.findall(r'\d*\s页',d)
for i in num:
i=re.sub(r'\s页','',i)
num_p=i
for x in range(1,int(num_p)):
x='searchbug.php?q='+encodestr.decode()+'&pNO='+str(x)
nextpage.append(x)
return nextpage
def download_img(url):
img_name=re.sub(r'http://wooyun.org/upload/\d*/','',url)
download_img=urllib.request.urlretrieve(url,'D:\wooyun\%s'%img_name)
def download_html(i,title):
html_path='D:\\wooyun_html\\'+title+'.html'
download_html=open(html_path,'w+',encoding='utf-8')
download_html.write(i)
download_html.close()
return('wooyun_html\\'+title+'.html')
for i in get_nextpage(starturl):
result+=geturl('http://wooyun.org/'+i)
#扫描各种漏洞的url地址放入result中
result=set(result)#去除result中重复的地址
serial_number_p=''
title_p=''
refered_industry_p=''
author_p=''
yield_time_p=''
loophole_type_p=''
loophole_mood_p=''
hazard_rating_p=''
reveal_mood_p=[]
detail_p=[]
repair_p=''
final=[]
if_updated=False
updated_serial_num=[]
#定义漏洞相关变量
cursor.execute(query_num_sql)
num_t=list(cursor.fetchall())
num_t=list(num_t[0])
num=int(num_t[0])
for i in result:
k=get_html_response('http://wooyun.org/'+re.sub(search_item,encodestr,i))#下载页面到k
#基础信息提取
serial_number=re.findall(r'">WooYun-\w{4}-\w*',k)
title=re.findall(r'漏洞标题:.*.</h3>',k)
refered_industry=re.findall(r'相关厂商:.*.',k)
author=re.findall(r'<a href="http://www.wooyun.org/whitehats/\S*">',k)
yield_time=re.findall(r'提交时间:.*.',k)
loophole_type=re.findall(r'漏洞类型:.*.',k)
hazard_rating=re.findall(r'危害等级:.*.</h3>',k)
loophole_mood=re.findall(r'漏洞状态:\s*\S*\s*</h3>',k)
#详细信息提取
reveal_mood=re.findall(r'\d*-\d*-\d*:\s*\S*<br/>',k)
detail=re.findall(r'<p class="detail">.*.</p>',k)
repair=re.findall(r'修复方案:</h3>\s*<p class="detail">.*.\s*</p>',k)
#基础信息处理
for j in serial_number:
j=re.sub(r'">','',j)
serial_number_p=j
for j in title:
j=re.sub('漏洞标题:\t\t','',j)
j=re.sub(r'\s</h3>','',j)
title_p=j
for j in refered_industry:
j=re.sub(r'相关厂商:\t\t<a href="http://www.wooyun.org/corps/','',j)
j=re.sub(r'">\r','',j)
refered_industry_p=j
for j in author:
j=re.sub(r'<a href="http://www.wooyun.org/whitehats/','',j)
j=re.sub(r'">','',j)
author_p=j
for j in yield_time:
j=re.sub(r'提交时间:\t\t','',j)
j=re.sub(r'</h3>\r','',j)
yield_time_p=j
for j in loophole_type:
j=re.sub(r'漏洞类型:\t\t','',j)
j=re.sub(r'</h3>\r','',j)
loophole_type_p=j
for j in hazard_rating:
j=re.sub(r'危害等级:\t\t','',j)
j=re.sub(r'</h3>','',j)
hazard_rating_p=j
for j in loophole_mood:
j=re.sub(r'漏洞状态:\s*','',j)
j=re.sub(r'\s*</h3>','',j)
loophole_mood_p=j
#详细信息处理
for j in reveal_mood:
j=re.sub('<br/>','',j)
reveal_mood_p.append(j)
for j in detail:#处理详情
j=re.sub(r':\s',':',j)
j=re.sub(r'<p class="detail">','',j)
j=re.sub(r'</p>','',j)
j=re.sub(r'"\starget="_blank"><img\ssrc="/upload/.*.width="600"/></a>',',',j)
j=re.sub(r'<a href="',' http://wooyun.org',j)
j=re.sub(r'对本漏洞信息进行评价,.*.备学习价值','',j)
detail_p.append(j)
for j in repair:#处理回复方法
j=re.sub(r'</br>','',j)
j=re.sub(r'</p>','',j)
j=re.sub(r'修复方案:</h3>','',j)
j=re.sub(r'<p\sclass="detail">','',j)
j=re.sub(r':',':',j)
j=j.split()
repair_p=j
serial_number_str= "".join(itertools.chain(*serial_number_p))
title_str="".join(itertools.chain(*title_p))
loophole_type_str="".join(itertools.chain(*loophole_type_p))
refered_industry_str="".join(itertools.chain(*refered_industry_p))
author_str="".join(itertools.chain(*author_p))
yield_time_str="".join(itertools.chain(*yield_time_p))
loophole_mood_str="".join(itertools.chain(*loophole_mood_p))
hazard_rating_str="".join(itertools.chain(*hazard_rating_p))
detail_str="".join(itertools.chain(*detail_p))
reveal_mood_str="".join(itertools.chain(*reveal_mood_p))
repair_str="".join(itertools.chain(*repair_p))
img=re.findall(r'http://wooyun.org/upload/\d*/\w*\.\w{3}',detail_str)
for j in img:
download_img(j)
path=download_html(k,serial_number_str)
#将str加入final列表并转化为元组保存进sql
final.append(serial_number_str)
final.append(title_str)
final.append(loophole_type_str)
final.append(refered_industry_str)
final.append(author_str)
final.append(yield_time_str)
final.append(loophole_mood_str)
final.append(hazard_rating_str)
final.append(reveal_mood_str)
final.append(detail_str)
final.append(repair_str)
final.append(path)
query_update_sql="select * from mytable where serial_number_sql = '%s'"%serial_number_str
cursor.execute(query_update_sql)
if final==list(cursor.fetchall()):
pass
else:
if_updated=True
updated_serial_num.append(serial_number_str)
insert_sql_api(insert_sql,tuple(final))
detail_p.clear()
reveal_mood_p.clear()
final.clear()
cursor.execute(remove_duplicated_sql)#去除重复项
cursor.execute(query_num_sql)
num_t=list(cursor.fetchall())
num_t=list(num_t[0])
num2=int(num_t[0])
#邮件模块
if(num2>num or if_updated):
import smtplib
import email.mime.multipart
import email.mime.text
msg=email.mime.multipart.MIMEMultipart()
msg['from']=''
msg['to']=''
msg['subject']='告警邮件'
content='您好,您的漏洞库新增%d'%(num2-num)+'条漏洞,请速去查看。'
if num2>num:
content+='从第%d条开始为新增的漏洞。'%num
if if_updated:
content+='以下乌云漏洞状态有更新:\n'
for i in updated_serial_num:
content+=i
content+=','
txt=email.mime.text.MIMEText(content)
msg.attach(txt)
smtp=smtplib
smtp=smtplib.SMTP()
smtp.connect('','25')
#example:address of smtp server
smtp.login('','')
#sender and keyword
smtp.sendmail('','',str(msg))
#sender & acceptor
smtp.quit()
cnx.commit()
cursor.close()
cnx.close()
| [
"JASON0916PHOENIX@GMAIL.COM"
] | JASON0916PHOENIX@GMAIL.COM |
47d8eed9075c33a4d57f2b6b83d5a4a59086c59c | 60070c509749086eb385013160ee537e28f0fbf5 | /apitaqueria/apitaqueria/settings.py | b3fd6050544de3aaa48d4185f857d6bc3afa25cc | [] | no_license | arturoBP/taqueria | 0709de10fbb6fdbd85dcfe4777608dfab2db8856 | 0c59e06760fd7bcb6150238988274b011adf73a3 | refs/heads/master | 2021-01-20T05:13:22.685105 | 2017-03-04T03:34:02 | 2017-03-04T03:34:02 | 83,857,109 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,216 | py | """
Django settings for apitaqueria project.
Generated by 'django-admin startproject' using Django 1.9.10.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '01uwvyda*6qg6m&5%_q0dv+74t7i%&$&$!-im8_qz_%db!w(2^'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'apitacos',
'rest_framework'
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'apitaqueria.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'apitaqueria.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'ES'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
| [
"fatima beltran"
] | fatima beltran |
d149b5add1701e0ca3c77c9ad09001bfccd01188 | 99ea5ac1c5f09a569b2373474d1fc0b2be71f6b0 | /clean_plots.py | b1802a796535099ef87d3480069c9c5334f76a99 | [] | no_license | whiteted-strats/Lag_Analyser | e53e0cbcbaad1fea773d2c7ab77d20252c7b46f4 | 1ce60852adf0898751002412f4b44351f3c76254 | refs/heads/master | 2020-04-25T00:47:43.912801 | 2019-02-26T22:28:06 | 2019-02-26T22:28:06 | 172,388,854 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 223 | py |
from PIL import Image
import os
for f in os.listdir("tmp_plots"):
im = Image.open(os.path.join("tmp_plots", f))
assert im.size == (320+64, 240+48)
im.crop((32,34,352,274)).save(os.path.join("plots", f)) | [
"noreply@github.com"
] | noreply@github.com |
3c1535a2a07ed38a85551fd89caf43b2d1e16f7a | c1d63e4d798d9895419994c2ec87563246d24bab | /textchange/migrations/0010_auto_20151129_2212.py | 4f2982c4496af438018de6b91c01f02a9eb00792 | [
"MIT"
] | permissive | ProgrammingJoe/Texchange | 5d336dc4b2919c44a0c63bd58a0f939f3b16d498 | f0992f62092bf2362669cc3a8454be9d428e34bc | refs/heads/master | 2021-01-24T17:06:38.807892 | 2017-11-23T01:09:36 | 2017-11-23T01:09:36 | 36,322,874 | 0 | 0 | null | 2017-11-23T00:52:43 | 2015-05-26T20:48:06 | JavaScript | UTF-8 | Python | false | false | 427 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('textchange', '0009_auto_20151129_2210'),
]
operations = [
migrations.RemoveField(
model_name='feedback',
name='user',
),
migrations.DeleteModel(
name='Feedback',
),
]
| [
"joeczepil@gmail.com"
] | joeczepil@gmail.com |
3404f3e49a25f966611424cfd7a383685b804ee8 | df4ab1a98e59bb995bf9866041feccf732918ceb | /mysite/polls/migrations/0002_auto_20201113_1920.py | 185f3600ef609697b65291de4c776ac57f324b3e | [] | no_license | zhasny17/Django-tutorial | 4ca7a630c11f5087c41bb144188f993245a69f4d | 8c299f285d91675b941876a607cae6e77d8ff6bb | refs/heads/master | 2023-01-20T21:29:03.209266 | 2020-11-17T20:40:57 | 2020-11-17T20:40:57 | 308,008,754 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 356 | py | # Generated by Django 3.1.2 on 2020-11-13 19:20
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('polls', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='question',
old_name='pud_date',
new_name='pub_date',
),
]
| [
"lucasluz@fortalsistemas.com.br"
] | lucasluz@fortalsistemas.com.br |
9703ffbfa97298169431afeb7f45ea557aa014ec | 4b0569763f6b5dcaee25369ea35560af27c89f09 | /tests/test_average_pixels.py | c45c45f52c1c0926e61a1a5646573ba2e513f09e | [
"MIT"
] | permissive | liviu-/average-pixels | 4ddddaab466d5743144d7f62d689546dc85da847 | 0f798fbef40c5fae66271f18566cfbdb9def3505 | refs/heads/develop | 2022-11-27T05:05:01.624361 | 2016-09-01T19:57:18 | 2016-09-01T19:57:18 | 60,898,052 | 8 | 0 | MIT | 2022-11-22T01:08:45 | 2016-06-11T08:29:56 | Python | UTF-8 | Python | false | false | 1,567 | py | import os
import glob
from average_pixels import average_pixels as ap
from average_pixels import SAVE_DIR
from average_pixels.average_pixels import WIDTH, HEIGHT, MAX_INTENSITY
SAVE_DIR_TEST = SAVE_DIR + '_test'
def download_images(term='anything', count=10):
return ap.save_images(term, count)
filenames = glob.glob(SAVE_DIR_TEST + "/*") if os.path.exists(SAVE_DIR_TEST) else download_images()
image = ap.average_images(filenames)
def test_default_average_images_size():
assert WIDTH, HEIGHT == ap.average_images(filenames).shape
def test_modified_average_images_size():
global WIDTH
global HEIGHT
WIDTH += 100
HEIGHT += 100
assert WIDTH, HEIGHT == ap.average_images(filenames).shape
def test_modified_average_images_size_not_square():
global WIDTH
global HEIGHT
WIDTH -= 100
assert WIDTH, HEIGHT == ap.average_images(filenames).shape
def test_offset_image_over_max_intensity():
new_intensity = MAX_INTENSITY + 100
assert ap.offset_image(image, new_intensity).max() <= MAX_INTENSITY
def test_unweighted_images_are_the_same_different_runs():
unweighted_image_1 = ap.average_images(filenames, weighted=False)
unweighted_image_2 = ap.average_images(filenames, weighted=False)
assert (unweighted_image_1 == unweighted_image_2).all()
def test_weighted_images_are_differet_different_runs():
unweighted_image_1 = ap.average_images(filenames, weighted=True)
unweighted_image_2 = ap.average_images(filenames, weighted=True)
assert (unweighted_image_1 != unweighted_image_2).all()
| [
"hi@liviu.me"
] | hi@liviu.me |
a07113903ecfd7a15611e6321b364d3204f11ee3 | a26f5826cb08e759aeef857cab33259f61e736dc | /ramid/fake_data.py | 38e6b96f3b662b1aac1d2115bb5b2d7a22bc3704 | [] | no_license | Sijiu/talkpy-ramid | 3cdbb414a924e4a98e78065c8ba3fa456ed11357 | f708d0d5894d64ab74f4a4f118e93f3dc819ab7b | refs/heads/master | 2020-05-03T00:47:53.401515 | 2019-03-29T03:58:10 | 2019-03-29T03:58:10 | 178,318,540 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 431 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: mxh @time:2019/3/29 10:43
"""
def get_orders():
return [
{'name': 'Cereal', 'price': 4.99},
{'name': 'Cheese', 'price': 2.15},
{'name': 'Milk', 'price': 6.99},
{'name': 'Oranges', 'price': 2.54},
{'name': 'Apples', 'price': 1.99},
{'name': 'Bread', 'price': 2.99},
]
def get_greetings():
return ["Tom", "Bob"] | [
"mxh403@163.com"
] | mxh403@163.com |
1121f6a53acf5a9c8ef5d493abbc9e47d48c8622 | f5215d2abeeffab1f2340cd2e9584c65f4c8b091 | /new code/configure.py | 8f8e71475ea62163677dfb733a1e3b12fd2b3cf5 | [] | no_license | pengyanhui/LineaRE | aebca037d7e49bb7ff58723f65e4d55d085b5cad | 506bc1377ade11c7e1657291cf30f6358b536d71 | refs/heads/master | 2022-05-09T01:20:05.024645 | 2022-03-15T23:16:30 | 2022-03-15T23:16:30 | 233,355,178 | 25 | 5 | null | null | null | null | UTF-8 | Python | false | false | 1,681 | py | import json
import logging
import torch
class Configure(object):
def __init__(self, config_path):
with open(config_path, 'r') as fjson:
json_config = json.load(fjson)
self.dim = json_config['dim']
self.norm_p = json_config['norm_p']
self.alpha = json_config['alpha']
self.beta = json_config['beta']
self.gamma = json_config['gamma']
self.learning_rate = json_config['learning_rate']
self.decay_rate = json_config['decay_rate']
self.batch_size = json_config['batch_size']
self.neg_size = json_config['neg_size']
self.regularization = json_config['regularization']
self.drop_rate = json_config['drop_rate']
self.test_batch_size = json_config['test_batch_size']
self.data_path = json_config['data_path']
self.save_path = json_config['save_path']
self.max_step = json_config['max_step']
self.valid_step = json_config['valid_step']
self.log_step = json_config['log_step']
self.test_log_step = json_config['test_log_step']
self.optimizer = json_config['optimizer']
self.init_checkpoint = json_config['init_checkpoint']
self.use_old_optimizer = json_config['use_old_optimizer']
self.sampling_rate = json_config['sampling_rate']
self.sampling_bias = json_config['sampling_bias']
self.device = torch.device(json_config['device'])
self.multi_gpu = json_config['multiGPU']
if self.multi_gpu:
if torch.cuda.device_count() == 0:
logging.error('no GPUs!!!\n')
exit(-1)
if torch.cuda.device_count() == 1:
logging.error('only one GPU!!!\nwill use only one GPU!!!')
def setting(self, new_path):
self.__init__(new_path)
config = Configure('config/config_FB15k.json')
| [
"noreply@github.com"
] | noreply@github.com |
a6a68143b6886cc7011672847f3ca8144462f48b | 1a48130b5b71dee249e66531290ddc5cba19d93a | /add_event_test.py | a69b87d27c27001684c1a71046c3e308a69aa45e | [] | no_license | HardyHu/FirstAutomationFramework | 456bf30c40354e67885d753874ff14bb884755ae | b770a7b361ed7f8c0dc635287d27d0f4fd9d8c49 | refs/heads/master | 2020-03-23T04:41:04.620754 | 2018-07-16T08:18:55 | 2018-07-16T08:18:55 | 137,856,242 | 0 | 0 | null | 2018-06-19T07:43:31 | 2018-06-19T07:33:38 | null | UTF-8 | Python | false | false | 2,370 | py | # -*- coding: utf8 -*-
__author__ = 'HardyHu'
import unittest
import requests
import os,sys
parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0,parentdir)
from db_fixture import test_data
class AddEventTest(unittest.TestCase):
''' 添加发布会 '''
def setUp(self):
self.base_url = 'http://127.0.0.1:8000/api/add_event/'
def tearDown(self):
print(self.result)
def test_add_event_all_null(self):
''' 所有参数为空 '''
payload = {'eid':'','':'','limit':'','address':'','start_time':'',}
r = requests.post(self.base_url,data = payload)
self.result = r.json()
self.assertEqual(self.result['status'],10021)
self.assertEqual(self.result['message'],'parameter error')
def test_add_event_eid_exist(self):
''' id已经存在 '''
payload = {'eid':1,'name':'一加6发布会','limit':2000,'address':'深圳宝体','start_time':'2017'}
r = requests.post(self.base_url,data = payload)
self.result = r.json()
self.assertEqual(self.result['status'],10022)
self.assertEqual(self.result['message'],'event id already exists')
def test_add_event_name_exist(self):
''' 名称已存在 '''
payload = {'eid':11,'name':'红米Pro发布会','limit':2000,'address':'深圳宝体','start_time':'2017'}
r = requests.post(self.base_url,data = payload)
self.result = r.json()
self.assertEqual(self.result['status'],10023)
self.assertEqual(self.result['message'],'event name already exists')
def test_add_event_data_type_error(self):
''' 日期格式错误 '''
payload = {'eid':8,'name':'一加6手机发布会','limit':2000,'address':'深圳宝体','start_time':'2017'}
r = requests.post(self.base_url,data = payload)
self.result = r.json()
self.assertEqual(self.result['status'],10024)
self.assertIn('start_time format error.',self.result['message'])
def test_add_event_success(self):
''' 添加成功 '''
payload = {'eid':8,'name':'一加6手机发布会','limit':2000,
'address':'深圳宝体','start_time':'2018-05-20 13:14:20'}
r = requests.post(self.base_url,data = payload)
self.result = r.json()
self.assertEqual(self.result['status'],200)
self.assertEqual(self.result['message'],'add event success')
if __name__ == '__main__':
test_data.init_data() #初始化接口测试数据
unittest.main()
| [
"noreply@github.com"
] | noreply@github.com |
461cf65f28dea329b58a8685b0d9103c062b6eb9 | a3e722e34bc0e1b8d05920b637fa0501775494cf | /app/helpers.py | ee801000e8c68617f276c3ac28c6373cdbd09ccb | [] | no_license | LachlanMarnham/PersonalWebsite | 09a6ad99372ad1f8f10440919e1f50965de76717 | ef8712e763ae64a8fc8c2e688bb567d4462b9b21 | refs/heads/master | 2022-12-10T15:24:43.269400 | 2019-05-05T23:17:00 | 2019-05-05T23:17:00 | 136,801,777 | 0 | 0 | null | 2022-09-16T17:54:18 | 2018-06-10T11:09:16 | JavaScript | UTF-8 | Python | false | false | 2,836 | py | from typing import Tuple
from collections import namedtuple
from enum import Enum, unique
from flask import url_for
MenuItem = namedtuple('MenuItem', ('title', 'url', 'context'))
@unique
class NavContext(Enum):
home = 0
music = 1
projects = 2
partita = 3
cv = 4
class NavBarMenu:
def __init__(self):
self.home = MenuItem(
title='Home',
url=url_for('app.home'),
context=NavContext.home,
)
self.music = MenuItem(
title='Music',
url=url_for('app.music'),
context=NavContext.music,
)
self.projects = MenuItem(
title='Projects',
url=url_for('app.projects'),
context=NavContext.projects,
)
self.partita = MenuItem(
title='Partita',
url=url_for('app.partita'),
context=NavContext.partita,
)
self.cv = MenuItem(
title='CV',
url=url_for('app.cv'),
context=NavContext.cv,
)
def items(self) -> Tuple[MenuItem, MenuItem, MenuItem, MenuItem, MenuItem]:
return (
self.home,
self.music,
self.projects,
self.partita,
self.cv
)
# The main menu has only a subset of the items that the nav bar has
class MainMenu(NavBarMenu):
def items(self) -> Tuple[MenuItem, MenuItem, MenuItem, MenuItem]:
return (
self.music,
self.projects,
self.partita,
self.cv
)
class MusicVideos:
frame_border = 0
allowed_options = "accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture"
def __init__(self):
self._claire_de_lune = "https://www.youtube.com/embed/NhCcd-7DxAk?wmode=transparent"
self._se_ela_perguntar = "https://www.youtube.com/embed/cTjFNhBM-pw"
self._rumores_de_la_caleta = "https://www.youtube.com/embed/adUsXe464ow"
self._recuerdos_de_la_alhambra = "https://www.youtube.com/embed/x6OhZz1qHMI"
self._caprico_arabe = "https://www.youtube.com/embed/PpdDOV3NmCM"
self._asturias = "https://www.youtube.com/embed/wfb5orlwtHE"
self._nocturne = "https://www.youtube.com/embed/MqQdqJ7osYQ"
self._prelude = "https://www.youtube.com/embed/Cb3uU9kUPgo"
self._registro = "https://www.youtube.com/embed/3qPen_gpJX4"
@property
def urls(self) -> Tuple[str, str, str, str, str, str, str, str, str]:
return (
self._claire_de_lune,
self._rumores_de_la_caleta,
self._se_ela_perguntar,
self._asturias,
self._nocturne,
self._caprico_arabe,
self._recuerdos_de_la_alhambra,
self._prelude,
self._registro,
)
| [
"lachlan.marnham@gmail.com"
] | lachlan.marnham@gmail.com |
66e34708e4306f979f9fb0805e11116b0f55e041 | 176ebc25c8f2cd361ca9135fc2c2b1df32386371 | /horizen_code/tools/train.py | 90888007f4750ba7db351b7ab79170086176b198 | [
"MIT"
] | permissive | yangJirui/DOTA-DOAI | a3ba7c0e17ebad69eb24ad373d3ffb74f84b2cb3 | aec4a2085d62a941c1b3f50fc9a452b2c8d3cd9f | refs/heads/master | 2022-02-23T07:42:52.565128 | 2019-04-16T07:05:05 | 2019-04-16T07:05:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,387 | py | # -*- coding:utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import tensorflow as tf
import tensorflow.contrib.slim as slim
import os, sys
import numpy as np
import time
sys.path.append("../")
from libs.configs import cfgs
from libs.networks import build_whole_network
from data.io.read_tfrecord import next_batch
from libs.box_utils import show_box_in_tensor
from libs.box_utils.coordinate_convert import back_forward_convert
from libs.box_utils.boxes_utils import get_horizen_minAreaRectangle
from help_utils import tools
os.environ["CUDA_VISIBLE_DEVICES"] = cfgs.GPU_GROUP
def train():
if not cfgs.USE_CONCAT:
faster_rcnn = build_whole_network.DetectionNetwork(base_network_name=cfgs.NET_NAME,
is_training=True)
else:
from libs.networks import build_whole_network_Concat
faster_rcnn = build_whole_network_Concat.DetectionNetwork(base_network_name=cfgs.NET_NAME,
is_training=True)
with tf.name_scope('get_batch'):
img_name_batch, img_batch, gtboxes_and_label_batch, num_objects_batch = \
next_batch(dataset_name=cfgs.DATASET_NAME, # 'pascal', 'coco'
batch_size=cfgs.BATCH_SIZE,
shortside_len=cfgs.IMG_SHORT_SIDE_LEN,
is_training=True)
'''
r_gtboxes_and_label is (x, y, w, h, theta, label)
gtboxes_and_label is (xmin, ymin, xmax, ymax, label)
'''
# gtboxes_and_label = tf.reshape(gtboxes_and_label_batch, [-1, 5])
gtboxes_and_label = tf.py_func(back_forward_convert,
inp=[tf.squeeze(gtboxes_and_label_batch, 0)],
Tout=tf.float32)
r_gtboxes_and_label = tf.reshape(gtboxes_and_label, [-1, 6]) # [x, y, w, h, theta, label]
gtboxes_and_label = get_horizen_minAreaRectangle(gtboxes_and_label, tf.shape(img_batch)[1:])
gtboxes_and_label = tf.reshape(gtboxes_and_label, [-1, 5])
biases_regularizer = tf.no_regularizer
weights_regularizer = tf.contrib.layers.l2_regularizer(cfgs.WEIGHT_DECAY)
# list as many types of layers as possible, even if they are not used now
with slim.arg_scope([slim.conv2d, slim.conv2d_in_plane, \
slim.conv2d_transpose, slim.separable_conv2d, slim.fully_connected],
weights_regularizer=weights_regularizer,
biases_regularizer=biases_regularizer,
biases_initializer=tf.constant_initializer(0.0)):
final_bbox, final_scores, final_category, loss_dict = faster_rcnn.build_whole_detection_network(
input_img_batch=img_batch,
gtboxes_batch=gtboxes_and_label,
gtboxes_r_batch=r_gtboxes_and_label)
# ----------------------------------------------------------------------------------------------------build loss
weight_decay_loss = tf.add_n(slim.losses.get_regularization_losses())
rpn_location_loss = loss_dict['rpn_loc_loss']
rpn_cls_loss = loss_dict['rpn_cls_loss']
rpn_total_loss = rpn_location_loss + rpn_cls_loss
fastrcnn_cls_loss = loss_dict['fastrcnn_cls_loss']
fastrcnn_loc_loss = loss_dict['fastrcnn_loc_loss']
fastrcnn_total_loss = fastrcnn_cls_loss + fastrcnn_loc_loss
if cfgs.USE_SUPERVISED_MASK:
mask_total_loss = loss_dict['mask_loss']
total_loss = rpn_total_loss + fastrcnn_total_loss + weight_decay_loss + mask_total_loss
else:
total_loss = rpn_total_loss + fastrcnn_total_loss + weight_decay_loss
# ____________________________________________________________________________________________________build loss
# ---------------------------------------------------------------------------------------------------add summary
tf.summary.scalar('RPN_LOSS/cls_loss', rpn_cls_loss)
tf.summary.scalar('RPN_LOSS/location_loss', rpn_location_loss)
tf.summary.scalar('RPN_LOSS/rpn_total_loss', rpn_total_loss)
tf.summary.scalar('FAST_LOSS/fastrcnn_cls_loss', fastrcnn_cls_loss)
tf.summary.scalar('FAST_LOSS/fastrcnn_location_loss', fastrcnn_loc_loss)
tf.summary.scalar('FAST_LOSS/fastrcnn_total_loss', fastrcnn_total_loss)
tf.summary.scalar('LOSS/total_loss', total_loss)
tf.summary.scalar('LOSS/regular_weights', weight_decay_loss)
if cfgs.USE_SUPERVISED_MASK:
tf.summary.scalar('LOSS/mask_loss', mask_total_loss)
gtboxes_in_img = show_box_in_tensor.draw_boxes_with_categories(img_batch=img_batch,
boxes=gtboxes_and_label[:, :-1],
labels=gtboxes_and_label[:, -1])
if cfgs.ADD_BOX_IN_TENSORBOARD:
detections_in_img = show_box_in_tensor.draw_boxes_with_categories_and_scores(img_batch=img_batch,
boxes=final_bbox,
labels=final_category,
scores=final_scores)
tf.summary.image('Compare/final_detection', detections_in_img)
tf.summary.image('Compare/gtboxes', gtboxes_in_img)
# ___________________________________________________________________________________________________add summary
global_step = slim.get_or_create_global_step()
lr = tf.train.piecewise_constant(global_step,
boundaries=[np.int64(cfgs.DECAY_STEP[0]), np.int64(cfgs.DECAY_STEP[1])],
values=[cfgs.LR, cfgs.LR / 10., cfgs.LR / 100.])
tf.summary.scalar('lr', lr)
optimizer = tf.train.MomentumOptimizer(lr, momentum=cfgs.MOMENTUM)
# optimizer = tf.train.AdamOptimizer(lr)
# ---------------------------------------------------------------------------------------------compute gradients
gradients = faster_rcnn.get_gradients(optimizer, total_loss)
# enlarge_gradients for bias
if cfgs.MUTILPY_BIAS_GRADIENT:
gradients = faster_rcnn.enlarge_gradients_for_bias(gradients)
if cfgs.GRADIENT_CLIPPING_BY_NORM:
with tf.name_scope('clip_gradients_YJR'):
gradients = slim.learning.clip_gradient_norms(gradients,
cfgs.GRADIENT_CLIPPING_BY_NORM)
# _____________________________________________________________________________________________compute gradients
# train_op
train_op = optimizer.apply_gradients(grads_and_vars=gradients,
global_step=global_step)
summary_op = tf.summary.merge_all()
init_op = tf.group(
tf.global_variables_initializer(),
tf.local_variables_initializer()
)
restorer, restore_ckpt = faster_rcnn.get_restorer()
saver = tf.train.Saver(max_to_keep=15)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
sess.run(init_op)
if not restorer is None:
restorer.restore(sess, restore_ckpt)
print('restore model')
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess, coord)
summary_path = os.path.join(cfgs.SUMMARY_PATH, cfgs.VERSION)
tools.mkdir(summary_path)
summary_writer = tf.summary.FileWriter(summary_path, graph=sess.graph)
for step in range(cfgs.MAX_ITERATION):
training_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
if step % cfgs.SHOW_TRAIN_INFO_INTE != 0 and step % cfgs.SMRY_ITER != 0:
_, global_stepnp = sess.run([train_op, global_step])
else:
if step % cfgs.SHOW_TRAIN_INFO_INTE == 0 and step % cfgs.SMRY_ITER != 0:
start = time.time()
if cfgs.USE_SUPERVISED_MASK:
_, global_stepnp, img_name, rpnLocLoss, rpnClsLoss, rpnTotalLoss, \
fastrcnnLocLoss, fastrcnnClsLoss, fastrcnnTotalLoss, maskLoss, totalLoss = \
sess.run(
[train_op, global_step, img_name_batch, rpn_location_loss, rpn_cls_loss, rpn_total_loss,
fastrcnn_loc_loss, fastrcnn_cls_loss, fastrcnn_total_loss, mask_total_loss, total_loss])
else:
_, global_stepnp, img_name, rpnLocLoss, rpnClsLoss, rpnTotalLoss, \
fastrcnnLocLoss, fastrcnnClsLoss, fastrcnnTotalLoss, totalLoss = \
sess.run(
[train_op, global_step, img_name_batch, rpn_location_loss, rpn_cls_loss, rpn_total_loss,
fastrcnn_loc_loss, fastrcnn_cls_loss, fastrcnn_total_loss, total_loss])
end = time.time()
if cfgs.USE_SUPERVISED_MASK:
print(""" {}: step{} image_name:{} |\t
rpn_loc_loss:{} |\t rpn_cla_loss:{} |\t rpn_total_loss:{} |
fast_rcnn_loc_loss:{} |\t fast_rcnn_cla_loss:{} |\t fast_rcnn_total_loss:{} |
mask_loss:{} |\t total_loss:{} |\t per_cost_time:{}s""" \
.format(training_time, global_stepnp, str(img_name[0]), rpnLocLoss, rpnClsLoss,
rpnTotalLoss, fastrcnnLocLoss, fastrcnnClsLoss, fastrcnnTotalLoss, maskLoss,
totalLoss, (end - start)))
else:
print(""" {}: step{} image_name:{} |\t
rpn_loc_loss:{} |\t rpn_cla_loss:{} |\t rpn_total_loss:{} |
fast_rcnn_loc_loss:{} |\t fast_rcnn_cla_loss:{} |\t fast_rcnn_total_loss:{} |
total_loss:{} |\t per_cost_time:{}s""" \
.format(training_time, global_stepnp, str(img_name[0]), rpnLocLoss, rpnClsLoss,
rpnTotalLoss, fastrcnnLocLoss, fastrcnnClsLoss, fastrcnnTotalLoss, totalLoss,
(end - start)))
else:
if step % cfgs.SMRY_ITER == 0:
_, global_stepnp, summary_str = sess.run([train_op, global_step, summary_op])
summary_writer.add_summary(summary_str, global_stepnp)
summary_writer.flush()
if (step > 0 and step % cfgs.SAVE_WEIGHTS_INTE == 0) or (step == cfgs.MAX_ITERATION - 1):
save_dir = os.path.join(cfgs.TRAINED_CKPT, cfgs.VERSION)
tools.mkdir(save_dir)
save_ckpt = os.path.join(save_dir, 'voc_' + str(global_stepnp) + 'model.ckpt')
saver.save(sess, save_ckpt)
print(' weights had been saved')
coord.request_stop()
coord.join(threads)
if __name__ == '__main__':
train()
| [
"1192150908@qq.com"
] | 1192150908@qq.com |
6ea3fc70974521946eb18ef5d9e5c911a97d513d | 84e9a9a57001404af4595b60eeb817415592600b | /src/lisa_drive/scripts/vel_stabilizer.py | 5e7dc75fd5e92b1d80c4eb1df1aeeae521dc3e26 | [
"MIT"
] | permissive | ncos/lisa | f0589e18bea6e5b88abb2502ebb6406cdc09ef2c | 81d3197b41bd2746ab51ce5a6c2bfa461b9d3e23 | refs/heads/master | 2021-01-22T15:00:27.559370 | 2019-05-09T21:17:47 | 2019-05-09T21:17:47 | 33,986,312 | 0 | 0 | null | 2015-04-15T09:55:55 | 2015-04-15T09:55:54 | null | UTF-8 | Python | false | false | 1,897 | py | #!/usr/bin/env python
import roslib; roslib.load_manifest('lisa_drive')
import rospy
import sys
import argparse
from nav_msgs.msg import Odometry
from geometry_msgs.msg import Twist
import numpy as np
import matplotlib.pyplot as plt
class VelocityControl:
def __init__(self, target_speed, alpha):
self.target_speed = target_speed
self.alpha = alpha
self.pub = rospy.Publisher('/cmd_vel_2', Twist, queue_size=20)
self.sub = rospy.Subscriber('/DVS346/odom', Odometry, self.callback)
plt.ion()
plt.show()
# self.sub = rospy.Subscriber("/cmd_vel", Twist, self.callback)
self.speed = 0.05
self.messages = 0
self.power = 0
def publish(self,lin_pow):
msg = Twist()
msg.linear.x = lin_pow
# Publish the message
self.pub.publish(msg)
# Log info
# rospy.loginfo("Message published to /cmd_vel_2: \n\tLinear.x: %f\n\tAngular.z: %f", lin_pow, ang_pow)
def callback(self, data):
lin_x = data.twist.twist.linear.x
lin_y = data.twist.twist.linear.y
lin_z = data.twist.twist.linear.z
# Log info
# rospy.loginfo("Message received from /DVS346/odom: \n\tLinear components: [%f, %f, %f]" % (lin_x, lin_y, lin_z))
vel_comps = np.array([lin_x, lin_y, lin_z]).reshape((3, 1))
vel_comps = vel_comps.reshape((3, 1))
spd = np.linalg.norm(vel_comps)
self.messages = self.messages + 1
self.power += ((self.target_speed - spd) * self.alpha)
# Invoke method to publish message
self.publish(self.power)
def main():
parser = argparse.ArgumentParser(description='code')
parser.add_argument('-t', '--target_speed', type=float, default=0.3, help='Target speed of vehicle')
parser.add_argument('-a', '--alpha', type=float, default=0.1, help='Alpha value for updating power')
args = parser.parse_args()
rospy.init_node('plotter_util', anonymous=True)
VelocityControl(args.target_speed, args.alpha)
rospy.spin()
if __name__ == '__main__':
main()
| [
"bcolebro@umd.edu"
] | bcolebro@umd.edu |
fed85901a7eb3200b3c289b1fe6e387840863bd0 | df071775e4cc39a56669469c7f59c253e618ddb6 | /backend/simps_restful/wsgi.py | 6965020abda571a96371b0617c74fcdab850a18f | [] | no_license | LNMIIT-CODING/simps-cnb | 371cb8742192ea51916ae4bcafddaf080e49d4d3 | 3a25100c65355c824f3eef24aadf3b99c436830e | refs/heads/master | 2023-03-20T18:30:57.494193 | 2021-02-26T13:51:50 | 2021-02-26T13:51:50 | 342,176,780 | 0 | 3 | null | 2021-02-26T13:51:51 | 2021-02-25T08:37:57 | Python | UTF-8 | Python | false | false | 403 | py | """
WSGI config for simps_restful project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'simps_restful.settings')
application = get_wsgi_application()
| [
"dreadarceus@gmail.com"
] | dreadarceus@gmail.com |
2a3b4c6c3858e836062f22a4320698cb132e99a1 | 8ec293c17d5a8635c4bf06cdd5b264429ec94d48 | /INF/INF tc1/BE1/temp.py | 60beeba0121e603c50e038f09dd6545edd9141e7 | [] | no_license | EmelineGOT/Centrale | 79636116ac18e3f5a31e67477d910edb0b105d9d | 8661f5010fbe60db3443ff0fc90e8df44d9dce4d | refs/heads/master | 2020-04-03T15:50:13.616829 | 2018-10-30T12:24:46 | 2018-10-30T12:24:46 | 155,379,782 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,098 | py |
a=int(input('Donner un entier:')) #On demande l'entier a
if (a%2==0): #Si le reste de la division euclidienne de a par 2 est nul...
print(a,"est pair.") #...L'entier est pair
else:
print(a,"est impair.") #Sinon l'entier est impair
import math;
def quadratique(a,b,c):
if (a == 0):
if ( b != 0) :
print("Racine simple x = ",-c/b)
else :
print("c=0")
else :
delta=b*b-4*a*c
if (delta < 0) :
print ("Pas de racines réelles")
else :
if (delta>0):
x1 = (-b+math.sqrt(delta))/(2*a)
x2 = (-b-math.sqrt(delta))/(2*a)
print("x1 = ",x1)
print("x2 = ",x2)
else:
x1 = x2 = -b/(2*a)
print("Racine double x1=x2=",x1)
import random;
def moyenne(a,b):
return (a+b)/2
echantillon=[random.gauss(16,2) for n in range(100)]
m=(echantillon[0]-16)**2
for i in range(1,100):
m=moyenne(m,(echantillon[i]-16)**2)
print(math.sqrt(m))
carre=[i**2 for i in echantillon]
m2=carre[0]-16**2
for i in range(1,100):
m2=moyenne(m2,carre[i]-16**2)
print(math.sqrt(m2))
| [
"got.emeline@gmail.com"
] | got.emeline@gmail.com |
cf4527d6e9245e38d0554d1b83b1a2874c00b008 | 0fcef1d34be0d8375c5ada73697f48fd8d507ea0 | /0x0B-python-input_output/1-write_file.py | a124ded86b4ad3d8579c482284a8e5969e6c7a7f | [] | no_license | jdcera4/holbertonschool-higher_level_programming | 1a81c43fa30ff2d9324bc1d3878d2769491d03a6 | 15cc85c2a6a9b8aaa209d47d393f27a6fa8615f2 | refs/heads/master | 2023-08-17T22:25:39.599851 | 2021-09-27T22:26:03 | 2021-09-27T22:26:03 | 361,773,047 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 302 | py | #!/usr/bin/python3
"""function that writes a string to a text file (UTF8)
and returns the number of characters written
"""
def write_file(filename="", text=""):
"""Print text in the filename"""
with open(filename, "w", encoding="utf-8") as f:
cont = f.write(text)
return cont
| [
"jdcera4@gmail.com"
] | jdcera4@gmail.com |
e6c36884b98e2a9d2654d352c67bc82e1b7d2659 | 9c18348e6bcb255fe187743907c262e842966829 | /semestres/5_semestre/Estructura discreta/unidad-4-estructura/mcd_metodo_clase.py | acfe18b25035c26688aee80b5ab112d827358973 | [] | no_license | ricardovergarat/A-U | 0d912f561f313be94ffdabeb0eac1a25ef4daea5 | 5f85dae83ad45f21de9c7f066fcd011dd7d8869f | refs/heads/master | 2021-09-11T14:45:45.406639 | 2021-08-26T17:06:30 | 2021-08-26T17:06:30 | 204,372,375 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 147 | py | from el_algoritmo_euclidiano import *
def obtener_mcd_metodo_clase(a,b):
lista = algoritmo_euclidiano(a,b)
return lista[len(lista) - 2][6] | [
"ricardovergarat1@gmail.com"
] | ricardovergarat1@gmail.com |
87920170a74947b9f89c3e958893179fc55c33ba | d7f6c6c480375cff847712e754ae37e81aa31d46 | /make_game/14_grid.py | aa25feaf674a9297cf0a77aacdc6c6fd27e9e3a8 | [] | no_license | junwoopark93/pythonworkspace | fc0804697a0ede0fbbc65283f2e6e77cf428c8e5 | fa1c8994a997fdd88bf5318203faccfdf71f7682 | refs/heads/master | 2023-01-23T00:53:13.118219 | 2020-12-12T14:17:25 | 2020-12-12T14:17:25 | 317,523,882 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,819 | py | from tkinter import *
root = Tk()
root.title("Park's UI")
root.geometry("640x480")
# btn1 = Button(root, text="button1")
# btn2 = Button(root, text="button2")
# #btn1.pack(side="left")
# #btn2.pack(side="left")
# btn1.grid(row=0, column=0)
# btn2.grid(row=0, column=1)
btn_f16 = Button(root,text="F16", width=5, height=2)
btn_f17 = Button(root,text="F17", width=5, height=2)
btn_f18 = Button(root,text="F18", width=5, height=2)
btn_f19 = Button(root,text="F19", width=5, height=2)
btn_f16.grid(row=0, column=0, sticky=N+E+W+S, padx=3, pady=3)
btn_f17.grid(row=0, column=1, sticky=N+E+W+S, padx=3, pady=3)
btn_f18.grid(row=0, column=2, sticky=N+E+W+S, padx=3, pady=3)
btn_f19.grid(row=0, column=3, sticky=N+E+W+S, padx=3, pady=3)
btn_clear = Button(root, text="clear", width=5, height=2)
btn_equal = Button(root, text="=", width=5, height=2)
btn_div = Button(root, text="/", width=5, height=2)
btn_mul = Button(root, text="*", width=5, height=2)
btn_clear.grid(row=1, column=0, sticky=N+E+W+S, padx=3, pady=3)
btn_equal.grid(row=1, column=1, sticky=N+E+W+S, padx=3, pady=3)
btn_div.grid(row=1, column=2, sticky=N+E+W+S, padx=3, pady=3)
btn_mul.grid(row=1, column=3, sticky=N+E+W+S, padx=3, pady=3)
btn_7 = Button(root, text="7", width=5, height=2)
btn_8 = Button(root, text="8", width=5, height=2)
btn_9 = Button(root, text="9", width=5, height=2)
btn_sub = Button(root, text="-", width=5, height=2)
btn_7.grid(row=2, column=0, sticky=N+E+W+S, padx=3, pady=3)
btn_8.grid(row=2, column=1, sticky=N+E+W+S, padx=3, pady=3)
btn_9.grid(row=2, column=2, sticky=N+E+W+S, padx=3, pady=3)
btn_sub.grid(row=2, column=3, sticky=N+E+W+S, padx=3, pady=3)
btn_4 = Button(root, text="4", width=5, height=2)
btn_5 = Button(root, text="5", width=5, height=2)
btn_6 = Button(root, text="6", width=5, height=2)
btn_add = Button(root, text="+", width=5, height=2)
btn_4.grid(row=3, column=0, sticky=N+E+W+S, padx=3, pady=3)
btn_5.grid(row=3, column=1, sticky=N+E+W+S, padx=3, pady=3)
btn_6.grid(row=3, column=2, sticky=N+E+W+S, padx=3, pady=3)
btn_add.grid(row=3, column=3, sticky=N+E+W+S, padx=3, pady=3)
btn_1 = Button(root, text="1", width=5, height=2)
btn_2 = Button(root, text="2", width=5, height=2)
btn_3 = Button(root, text="3", width=5, height=2)
btn_enter = Button(root, text="enter", width=5, height=2)
btn_1.grid(row=4, column=0, sticky=N+E+W+S, padx=3, pady=3)
btn_2.grid(row=4, column=1, sticky=N+E+W+S, padx=3, pady=3)
btn_3.grid(row=4, column=2, sticky=N+E+W+S, padx=3, pady=3)
btn_enter.grid(row=4, column=3, rowspan=2, sticky=N+E+W+S, padx=3, pady=3)
btn_0 = Button(root, text="0", width=5, height=2)
btn_dot = Button(root, text=".", width=5, height=2)
btn_0.grid(row=5, column=0, columnspan=2, sticky=N+E+W+S, padx=3, pady=3)
btn_dot.grid(row=5, column=2, sticky=N+E+W+S, padx=3, pady=3)
root.mainloop()
| [
"pjw040293@gmail.com"
] | pjw040293@gmail.com |
8b0d0af563b04234591e79e11b078ff9d9f40eb4 | 9547e94056acc29caa76aa3d55ee6dea501e5f6f | /guess/guess/guess/wsgi.py | 2640ca849accf6e7377f2e6613838b453803daa7 | [
"MIT"
] | permissive | PeteCoward/teach-python | bd1cdaa08af9fe7cac22e19e69dd6f644a505496 | 2a63ece83151631ab4dcf868c361acdfe4e6c85f | refs/heads/master | 2021-05-04T10:37:29.998473 | 2017-08-19T06:46:15 | 2017-08-19T06:46:15 | 45,660,962 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 387 | py | """
WSGI config for guess project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "guess.settings")
application = get_wsgi_application()
| [
"peter.coward@gmail.com"
] | peter.coward@gmail.com |
cf699af21b0d0fd761b081328d5a6b79c0af8136 | 90e55952cd6890498f972e1896a8ae815134ee40 | /whogotit/profiles/models.py | e798afd4366f16e2be076fe0219f10508faf2fef | [] | no_license | stonaz/whogotit | 50e6b7f3abc3dd1da46f8a6c54ef78a18a909f88 | e7493fbefbaca6362e8bc737976e6a77bcb79100 | refs/heads/master | 2021-04-14T19:44:37.321862 | 2018-05-09T15:28:25 | 2018-05-09T15:28:25 | 126,524,231 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,393 | py | from datetime import datetime
from django.contrib.auth.models import User
from django.contrib.gis.db import models
from django.utils.timezone import utc
from django.utils.translation import ugettext_lazy as _
from django.utils.http import int_to_base36
from django.template.loader import render_to_string
from django.core.mail import send_mail
from django.contrib.auth.tokens import default_token_generator as token_generator
from django.conf import settings
from email_null import EmailNullField
def now():
""" returns the current date and time in UTC format (datetime object) """
return datetime.utcnow().replace(tzinfo=utc)
#def get_user_email(user):
# print type( user)
# u = User.objects.get(id= user)
# email =u.email
# return email
class UserProfile(models.Model):
# This line is required. Links UserProfile to a User model instance.
user = models.OneToOneField(settings.AUTH_USER_MODEL)
# The additional attributes we wish to include.
profile_email = EmailNullField(blank=True, null=True,unique=True)
phone = models.CharField( _("Telefono"),max_length=20,help_text=_("Telefono"),blank=True,null=True)
publish_phone = models.BooleanField(default=False)
publish_email = models.BooleanField(default=True)
notify_wishlist = models.BooleanField(default=False)
notify_added_books = models.BooleanField(default=False)
# Override the __unicode__() method to return out something meaningful!
def __unicode__(self):
return self.user.username
class PasswordResetManager(models.Manager):
""" Password Reset Manager """
def create_for_user(self, user):
""" create password reset for specified user """
# support passing email address too
if type(user) is unicode:
userprofile = UserProfile.objects.get(profile_email=user)
user = User.objects.get(id=userprofile.user_id)
temp_key = token_generator.make_token(user)
# save it to the password reset model
password_reset = PasswordReset(user=user, temp_key=temp_key)
password_reset.save()
print user.id
print int_to_base36(5000)
# send the password reset email
subject = _("Password reset email sent")
message = render_to_string("profiles/email_messages/password_reset_key_message.txt", {
"user": user,
"uid": int_to_base36(user.id),
"temp_key": temp_key,
"site_url": settings.SITE_URL,
"site_name": settings.SITE_NAME
})
print message
send_mail(subject, message, settings.DEFAULT_FROM_EMAIL, [userprofile.profile_email])
return password_reset
class PasswordReset(models.Model):
"""
Password reset Key
"""
user = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name=_("user"))
temp_key = models.CharField(_("temp_key"), max_length=100)
timestamp = models.DateTimeField(_("timestamp"), default=now)
reset = models.BooleanField(_("reset yet?"), default=False)
objects = PasswordResetManager()
class Meta:
verbose_name = _('password reset')
verbose_name_plural = _('password resets')
app_label = 'profiles'
def __unicode__(self):
return "%s (key=%s, reset=%r)" % (
self.user.username,
self.temp_key,
self.reset
)
| [
"stefano.tonazzi@gmail.com"
] | stefano.tonazzi@gmail.com |
d8f8388ccf0bde786d3c4b612af5b9f908999b36 | eb9c3dac0dca0ecd184df14b1fda62e61cc8c7d7 | /google/ads/googleads/v5/googleads-py/google/ads/googleads/v5/errors/types/keyword_plan_idea_error.py | 7317f5950bfe3158aa862fb8e1d10e4d1711708a | [
"Apache-2.0"
] | permissive | Tryweirder/googleapis-gen | 2e5daf46574c3af3d448f1177eaebe809100c346 | 45d8e9377379f9d1d4e166e80415a8c1737f284d | refs/heads/master | 2023-04-05T06:30:04.726589 | 2021-04-13T23:35:20 | 2021-04-13T23:35:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,218 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package='google.ads.googleads.v5.errors',
marshal='google.ads.googleads.v5',
manifest={
'KeywordPlanIdeaErrorEnum',
},
)
class KeywordPlanIdeaErrorEnum(proto.Message):
r"""Container for enum describing possible errors from
KeywordPlanIdeaService.
"""
class KeywordPlanIdeaError(proto.Enum):
r"""Enum describing possible errors from KeywordPlanIdeaService."""
UNSPECIFIED = 0
UNKNOWN = 1
URL_CRAWL_ERROR = 2
INVALID_VALUE = 3
__all__ = tuple(sorted(__protobuf__.manifest))
| [
"bazel-bot-development[bot]@users.noreply.github.com"
] | bazel-bot-development[bot]@users.noreply.github.com |
3c7165e1c6f28d3dec906fe46f353875353e70cf | 8e99450be41b6a448f50b58b63fe0650765e6fa1 | /pliki/nauka09.py | df2d9bfafc5c3ac658975c95a26e7ed999217458 | [] | no_license | adrianmikol/2020-11-21-python | 2cf7fd1234603db3c224f647eb90a4c727741898 | 890a3fbdd242c87654b919c68ea959839b6b2ad8 | refs/heads/main | 2023-02-27T23:49:10.103642 | 2021-01-31T15:44:18 | 2021-01-31T15:44:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 199 | py | import pickle
dane = {
"imie" : "Mateusz",
"szkolenia" : { "Linux", "Python", "git" },
"xyz" : [5,6,7,8,9],
"test4" : range(5,10)
}
x = pickle.dump(dane, open("nauka09.dat", "wb"))
| [
"m.adamowski@alx.pl"
] | m.adamowski@alx.pl |
fde91efd3a8d9eba07eafe0198bc165fb6784d17 | e87b7192c836b680ec36c2b93aa07d8cbb297a6f | /kbengine_spaceship_demos_assets/scripts/cell/interfaces/Arsenal.py | da47f2ae2a75cb59a6266e600911354f7a92c9ff | [] | no_license | petergjh/spacecraft_demos | 95ee993e0e4e59457cf3a3227e6d695c27eed73a | 53833e089e50ea54bd1ef319a8797ebdec89b456 | refs/heads/master | 2020-12-10T03:36:14.764199 | 2018-07-18T09:14:03 | 2018-07-18T09:14:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,977 | py | # -*- coding: utf-8 -*-
import KBEngine
import GameConfigs
import SCDefine
import d_entities
import math
import Math
from KBEDebug import *
class Arsenal:
"""docstring for Arsenal"""
def __init__(self):
self.coolTimeList={}
# self.addTimer(1,1,SCDefine.TIMER_TYPE_WEAPON_CD)
def hasWeapon(self,weapon):
return entityID in self.weapons
def reqWeaponList(self,exposed):
if self.id != exposed:
return
DEBUG_MSG("Arsenal::reqWeaponList: weapons = %s" % str(self.weapons))
self.client.onReqWeaponList(self.weapons)
def addWeapon(self,entityCall):
self.weapons[entityCall.id] = entityCall
def removeWeapon(self,entityID):
del self.weapons[entityID]
def canUseWeapon(self,weaponID):
cd = self.coolTimeList.get(weaponID,0)
haveWeapon = d_entities.datas[weaponID] is None
self.client.canUseWeaponResult(cd,haveWeapon)
if cd> 0 or haveWeapon:
return False
self.coolTimeList[weaponID] = d_entities.datas[weaponID]["cd"]
return True
def onTimer(self, tid, userArg):
"""
KBEngine method.
引擎回调timer触发
"""
#DEBUG_MSG("%s::onTimer: %i, tid:%i, arg:%i" % (self.getScriptName(), self.id, tid, userArg))
if SCDefine.TIMER_TYPE_WEAPON_CD == userArg:
self.onCoolDownTick()
def onCoolDownTick(self):
"""
onCoolDownTick
此处可以轮询所有的CD,将需要执行的CD执行
"""
DEBUG_MSG("onCoolDownTick")
for weapon in self.coolTimeList:
if self.coolTimeList[weapon] > 0 :
self.coolTimeList[weapon] -= 1
def reqUseWeapon(self,exposed,position,direction,destForward,weaponID):
"""
defined.
对目标发射武器
"""
if exposed != self.id:
return
# if self.canUseWeapon(weaponID):
# return
datas = d_entities.datas[weaponID]
if datas is None:
ERROR_MSG("SpawnPoint::spawn:%i not found." % weaponID)
return
DEBUG_MSG("Arsenal::reqUseWeapon(%i):weaponID=%i,position:(%f,%f,%f)" % (self.id, weaponID,position.x,position.y,position.z))
delayBornTime = datas["delayBornTime"] * 0.001
position = position + Math.Vector3(destForward) * (self.cruiseSpeed*0.1 * delayBornTime)
params = {
"ownerID": self.id,
"uid" : datas["id"],
"utype" : datas["etype"],
"modelID" : datas["modelID"],
"dialogID" : datas["dialogID"],
"name" : datas["name"],
"CD": datas["cd"],
"destForward": destForward,
"descr" : datas.get("descr", ''),
}
e = KBEngine.createEntity(datas["entityType"], self.spaceID, position, direction, params)
self.changeState(GameConfigs.ENTITY_STATE_FIGHT)
| [
"760905035@qq.com"
] | 760905035@qq.com |
290df94161503fe64508cd16fdcb1b54fe6edb58 | 170b1fc17e29c0d96b1f7c0970f85b1328597447 | /manage.py | abf3cfbaf070bc7f28e52cca9c3d5f7c8a21b862 | [] | no_license | adiela/directory-backend | 5446c32b12e0a6ebf28c703d08cf9dafc2456f6a | 75bf3b881eb381a94c9cf797447f53a507fdc03b | refs/heads/main | 2023-04-14T20:18:45.925725 | 2021-04-21T20:46:40 | 2021-04-21T20:46:51 | 349,205,964 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 670 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
from decouple import config
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', config('DJANGO_SETTINGS_MODULE'))
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"adiela.abishua@gmail.com"
] | adiela.abishua@gmail.com |
3123b2e166e0c4d6732e9496e51de271ea7f14b1 | 512f48fdcfa78e322526cf47163110009b84bf73 | /rapid7vmconsole/models/privileges.py | 62da3458bc049d084294218fcc09b1e20475b6aa | [
"MIT"
] | permissive | confluentinc/vm-console-client-python | 9a0f540c0113acf68ee9dc914715bc255e4d99f4 | ccbd944a0e0333c73e098b769fe4c82755d29874 | refs/heads/master | 2023-07-18T10:33:58.909287 | 2021-09-02T20:52:20 | 2021-09-02T20:52:20 | 402,559,283 | 0 | 0 | MIT | 2021-09-02T20:49:56 | 2021-09-02T20:49:56 | null | UTF-8 | Python | false | false | 4,944 | py | # coding: utf-8
"""
Python InsightVM API Client
OpenAPI spec version: 3
Contact: support@rapid7.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class Privileges(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'links': 'list[Link]',
'resources': 'list[str]'
}
attribute_map = {
'links': 'links',
'resources': 'resources'
}
def __init__(self, links=None, resources=None): # noqa: E501
"""Privileges - a model defined in Swagger""" # noqa: E501
self._links = None
self._resources = None
self.discriminator = None
if links is not None:
self.links = links
if resources is not None:
self.resources = resources
@property
def links(self):
"""Gets the links of this Privileges. # noqa: E501
:return: The links of this Privileges. # noqa: E501
:rtype: list[Link]
"""
return self._links
@links.setter
def links(self, links):
"""Sets the links of this Privileges.
:param links: The links of this Privileges. # noqa: E501
:type: list[Link]
"""
self._links = links
@property
def resources(self):
"""Gets the resources of this Privileges. # noqa: E501
:return: The resources of this Privileges. # noqa: E501
:rtype: list[str]
"""
return self._resources
@resources.setter
def resources(self, resources):
"""Sets the resources of this Privileges.
:param resources: The resources of this Privileges. # noqa: E501
:type: list[str]
"""
allowed_values = ["all-permissions", "create-reports", "configure-global-settings", "manage-sites", "manage-tags", "manage-static-asset-groups", "manage-dynamic-asset-groups", "manage-scan-templates", "manage-report-templates", "manage-scan-engines", "submit-vulnerability-exceptions", "approve-vulnerability-exceptions", "delete-vulnerability-exceptions", "manage-vuln-investigations", "view-vuln-investigations", "create-tickets", "close-tickets", "assign-ticket-assignee", "manage-site-access", "manage-asset-group-access", "manage-report-access", "use-restricted-report-sections", "manage-policies", "view-asset-group-asset-data", "manage-asset-group-assets", "view-site-asset-data", "specify-site-metadata", "purge-site-asset-data", "specify-scan-targets", "assign-scan-engine", "assign-scan-template", "manage-site-credentials", "manage-scan-alerts", "schedule-automatic-scans", "start-unscheduled-scans"] # noqa: E501
if not set(resources).issubset(set(allowed_values)):
raise ValueError(
"Invalid values for `resources` [{0}], must be a subset of [{1}]" # noqa: E501
.format(", ".join(map(str, set(resources) - set(allowed_values))), # noqa: E501
", ".join(map(str, allowed_values)))
)
self._resources = resources
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Privileges, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Privileges):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"zachary_youtz@rapid7.com"
] | zachary_youtz@rapid7.com |
0546efda0b5f0a7c3fa89c0af1a6b8f52c07b48e | 27d43a78ad85c8182489bd2b95578597780fdf15 | /isprinkle-server/isprinkle-server | 87617372d09467aae99bfdc0aec27816b5674d07 | [] | no_license | djsmith42/isprinkle | 1ba1c2630f5a5365f8f9a54a39075c01e44ad4ac | b154c8cac8060d4c3d5c1e570986455c5fbab686 | refs/heads/master | 2016-09-05T10:01:08.785592 | 2015-08-29T04:10:47 | 2015-08-29T04:10:47 | 4,769,808 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,376 | #!/usr/bin/env python
import os, sys, time, signal, datetime, uuid
from webservice import iSprinkleWebService
from wateringservice import iSprinkleWateringService
from persister import iSprinklePersister
shutdown_requested = False
THREAD_TIMEOUT_SECONDS = 5.0
def handle_sigterm(signal, func=None):
print 'Received SIGTERM'
global shutdown_requested
shutdown_requested = True
if __name__ == '__main__':
signal.signal(signal.SIGTERM, handle_sigterm)
model = iSprinklePersister().load()
print 'Loaded %d saved watering%s:' % (
len(model.get_waterings()),
'' if len(model.get_waterings()) == 1 else 's')
for watering in model.get_waterings():
print watering
try:
wateringservice = iSprinkleWateringService(model)
wateringservice.start()
webservice = iSprinkleWebService(model)
webservice.start()
except IOError as (errno, strerror):
print 'Could not startup: %s, code %d.' % (strerror, errno)
sys.exit()
except:
print 'Could not startup: unknown error'
raise
try:
while not shutdown_requested:
time.sleep(0.5)
except KeyboardInterrupt:
pass
webservice.stop()
wateringservice.stop()
webservice.join(THREAD_TIMEOUT_SECONDS)
wateringservice.join(THREAD_TIMEOUT_SECONDS)
| [
"dave@thesmithfam.org"
] | dave@thesmithfam.org | |
3a427aa4224c49b21ff8fe2aa4a4b2fc97d65227 | 695ec68b5e3e1c4b038b472cabced8944f479ca7 | /backend/db/patient.py | 464a989a9cb27c1d1baa77e94436a8092fe897da | [
"MIT"
] | permissive | wooque/openpacs | 1ca5326ea98be6f40129684a408705bd2c3a4693 | 4524bc7fade0934a70a53bb311f302828cc56905 | refs/heads/master | 2022-07-22T11:34:25.891686 | 2019-09-25T13:13:49 | 2019-09-25T13:13:49 | 210,719,218 | 4 | 1 | MIT | 2022-07-05T21:35:09 | 2019-09-25T00:07:45 | Python | UTF-8 | Python | false | false | 2,633 | py | from db.table import Table
from db.study import Study
from db.series import Series
from pypika.pseudocolumns import PseudoColumn
class Patient(Table):
name = 'patients'
async def sync_db(self):
await self.exec("""
CREATE TABLE IF NOT EXISTS patients (
id SERIAL PRIMARY KEY,
patient_id TEXT UNIQUE NOT NULL,
name TEXT NOT NULL,
birth_date TEXT,
sex TEXT,
meta JSONB
);
""")
await self.exec("""
CREATE INDEX IF NOT EXISTS patients_patient_id ON patients(patient_id);
""")
async def insert_or_select(self, data):
q = self.select('*').where(self.table.patient_id == data['patient_id'])
p = await self.fetchone(q)
if p:
return p
q = self.insert().columns(
'patient_id', 'name', 'birth_date', 'sex',
).insert((
data['patient_id'], data['patient_name'],
data['patient_birth_date'], data['patient_sex'],
),).on_conflict('patient_id').do_update(
self.table.name, PseudoColumn('EXCLUDED.name'),
).returning('id')
patient_id = await self.fetchval(q)
return {'id': patient_id}
async def get_extra(self, patient_id):
from db.files import Files
q = self.select('*').where(self.table.id == patient_id)
patient = await self.fetchone(q)
patient = dict(patient)
StudyT = Study(self.conn)
q = StudyT.select('*').where(
StudyT.table.patient_id == patient_id
)
studies_data = await self.fetch(q)
studies_data = [dict(s) for s in studies_data]
studies = {}
for s in studies_data:
s['series'] = {}
studies[s['id']] = s
SeriesT = Series(self.conn)
q = SeriesT.select('*').where(
SeriesT.table.study_id.isin(list(studies.keys()))
)
series_data = await self.fetch(q)
series_data = [dict(s) for s in series_data]
for s in series_data:
s['files'] = []
studies[s['study_id']]['series'][s['id']] = s
FilesT = Files(self.conn)
q = FilesT.select('*').where(FilesT.table.study_id.isin(list(studies.keys())))
files = await self.fetch(q)
files = [dict(f) for f in files]
for f in files:
studies[f['study_id']]['series'][f['series_id']]['files'].append(f)
for s in studies.values():
s['series'] = list(s['series'].values())
patient['studies'] = list(studies.values())
return patient
| [
"contact@vukmirovic.org"
] | contact@vukmirovic.org |
1e62a0ec5f56aed2c6fcd80abc1a40a459dd63b0 | d1b407466f57ac998b84d06b66af997d1f009706 | /t100data/settings.py | 307c71e5277f83e10e132dfca0143725a2e1020e | [] | no_license | jmshulett/assignment45 | 4727f46fec7616c31f38fb502a1f796c628d9cef | b0a6fe27ff658ca9f11c05bc40f7986c2989a8ef | refs/heads/main | 2023-08-23T13:56:14.774451 | 2021-10-24T17:08:29 | 2021-10-24T17:08:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,484 | py | """
Django settings for t100data project.
Generated by 'django-admin startproject' using Django 3.2.8.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-n37!d16vs9hxo!wtm9fcei9sethe2@=44$k4tav3xyx_c7a(8('
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'home.apps.HomeConfig',
'air_carrier_market_data.apps.AirCarrierMarketDataConfig',
'django.contrib.humanize',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 't100data.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 't100data.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| [
"noreply@github.com"
] | noreply@github.com |
982b9bf2c89ff1eb8bd5a4efbb592ba072bf5008 | c169ba2ac6719b6668ac69861f6dcbaeb2635f9c | /setup.py | 9459fb034c6a05c8f83e9317509d648b6846a704 | [
"MIT"
] | permissive | lhillber/tmps | 7652eba1519170513fdd108c795c1ae6378b5801 | f5f12cfa5a5e180c1947b138e0c42787a5de90ea | refs/heads/master | 2022-03-06T14:41:19.522604 | 2019-09-30T18:31:23 | 2019-09-30T18:31:23 | 104,544,591 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 615 | py | import setuptools
with open("README.md", "r") as f:
long_description = f.read()
setuptools.setup(
name="tmps",
version="0.1.dev2",
author="Logan Hillberry",
author_email="lhillberry@gmail.com",
description="thermo-magnetic particle simulator",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/lhillber/tmps",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
| [
"lhillberry@sensoryinc.com"
] | lhillberry@sensoryinc.com |
2425e893da55a228ed9df844aa45c63afeec10c7 | c36996f302ced350a5264f366eec608b52f65b9a | /__init__.py | 4f4eb0e52ad20eb25360dab0422cc4e6c8a674a4 | [
"MIT"
] | permissive | parkus/arc | 1d99fbc2cd703c1350858bfa619881e8c78fe5a8 | a3df293e49531c1d0e7dfac71794f1896925835b | refs/heads/master | 2020-05-17T18:33:34.950538 | 2015-04-28T20:14:19 | 2015-04-28T20:14:19 | 33,963,596 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 27 | py | from arc_functions import * | [
"parke.loyd@gmail.com"
] | parke.loyd@gmail.com |
8c462a9504616211d1a864ac6f1a00d0a2cba936 | b7b2f80ab5e1ee0ea028576e3014b62b8d3a8d7e | /pypos/pypos-000/pypos.py | 1d36f8dacb776baa51da76f15440425f8f40a5f8 | [] | no_license | pglen/pgpygtk | 4d1405478a714f003984cf3e3db04ff1f767470b | 33f58010e304f1a312f2356de453ecedb7aa21ef | refs/heads/master | 2021-01-22T01:18:52.238415 | 2019-01-01T01:37:24 | 2019-01-01T01:37:24 | 102,215,955 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,615 | py | #!/usr/bin/env python
import os, sys, getopt, signal
import gobject, gtk, pango
# ------------------------------------------------------------------------
# This is open source sticker program. Written in python.
GAP = 4 # Gap in pixels
TABSTOP = 4
FGCOLOR = "#000000"
BGCOLOR = "#ffff88"
version = 1.0
verbose = False
# Where things are stored (backups, orgs, macros)
config_dir = os.path.expanduser("~/.pypos")
def OnExit(win):
gtk.main_quit()
def help():
print
print "Pypos version: ", version
print
print "Usage: " + os.path.basename(sys.argv[0]) + " [options] [[filename] ... [filenameN]]"
print
print "Options:"
print
print " -d level - Debug level 1-10. (Limited implementation)"
print " -v - Verbose (to stdout and log)"
print " -c - Dump Config"
print " -h - Help"
print
def area_motion(self, area, event):
print "window motion event", event.state, event.x, event.y
if event.state & gtk.gdk.BUTTON1_MASK:
print "drag"
# Start of program:
if __name__ == '__main__':
try:
if not os.path.isdir(config_dir):
os.mkdir(config_dir)
except: pass
# Let the user know it needs fixin'
if not os.path.isdir(config_dir):
print "Cannot access config dir:", config_dir
sys.exit(1)
opts = []; args = []
try:
opts, args = getopt.getopt(sys.argv[1:], "hv")
except getopt.GetoptError, err:
print "Invalid option(s) on command line:", err
sys.exit(1)
#print "opts", opts, "args", args
for aa in opts:
if aa[0] == "-d":
try:
pgdebug = int(aa[1])
except:
pgdebug = 0
if aa[0] == "-h": help(); exit(1)
if aa[0] == "-v": verbose = True
#if aa[0] == "-x": clear_config = True
#if aa[0] == "-c": show_config = True
#if aa[0] == "-t": show_timing = True
if verbose:
print "PyPos running on", "'" + os.name + "'", \
"GTK", gtk.gtk_version, "PyGtk", gtk.pygtk_version
www = gtk.gdk.screen_width(); hhh = gtk.gdk.screen_height();
window = gtk.Window(gtk.WINDOW_TOPLEVEL)
#window.set_decorated(False)
window.set_position(gtk.WIN_POS_CENTER)
window.set_default_size(3*www/4, 3*hhh/4)
window.set_flags(gtk.CAN_FOCUS | gtk.SENSITIVE)
window.connect("destroy", OnExit)
window.show_all()
gtk.main()
| [
"peterglen99@gmail.com"
] | peterglen99@gmail.com |
7c2282ccb89860811bd007ff2cc35647d217322f | 02023fb46d07eb23a21d733062500dbabe636998 | /loginlogout/urls_log.py | 43dccd6ae4c936d85fee98677eceb5b66edff9fe | [] | no_license | SamSSupreme/myrepo | 7c4c5d1cb1ba44ac1751b5b6a0c2bab8ab5a3ee9 | a9a72431a840cfa5358dcf38038dd29815c24005 | refs/heads/master | 2020-04-03T21:55:24.077366 | 2018-11-08T21:14:28 | 2018-11-08T21:14:28 | 155,587,213 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 156 | py | from django.urls import re_path
from . import views
urlpatterns = [
re_path(r'^login/$', views.login),
re_path(r'^logout/$', views.logout),
] | [
"danya.samusenkov@gmail.com"
] | danya.samusenkov@gmail.com |
28b8940fb39efb304eb29f75d9a59657e3bea185 | 89a331cf47fc7b9765de9abf8d4fd53dd6fe399d | /final/support.py | a454ff767372d7632a8dc61735c1c021259d9034 | [] | no_license | devangsharma14/Skin-Cancer | 11dc5921a46a1b346d73a05f6eb9d11858bf0665 | 88de8cdb3d4cd87033ac83a3aef1e6b4fdd0c0d7 | refs/heads/main | 2023-03-28T08:42:45.681973 | 2021-03-30T10:49:33 | 2021-03-30T10:49:33 | 338,076,191 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,590 | py | # -*- coding: utf-8 -*-
"""
Created on Sat Mar 27 11:24:25 2021
@author: Devang
"""
import glob
import boto3
from flask import send_file
import os
import numpy as np
import torch
from PIL import Image
import torchvision.transforms as transforms
import torch.nn as nn
import torch.optim as optim
import torchvision
from torchvision import models
def get_filename(bucket):
s3 = boto3.client('s3')
get_last_modified = lambda obj: int(obj['LastModified'].timestamp())
objs = s3.list_objects_v2(Bucket=bucket)['Contents']
files = [obj['Key'] for obj in sorted(objs, key=get_last_modified, reverse=True)]
file_name = files[0]
#file_name = s3.list_objects(Bucket=bucket)['Contents'][0]["Key"]
return file_name
def download_file(file_name, bucket):
s3 = boto3.resource('s3')
output = f"downloads/{file_name}"
s3.Bucket(bucket).download_file(file_name, output)
return send_file(output, as_attachment=True)
def load_input_image(img_path):
image = Image.open(img_path).convert('RGB')
prediction_transform = transforms.Compose([
transforms.Resize(size=(299, 299)),
transforms.CenterCrop(299),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
image = prediction_transform(image)[:3,:,:].unsqueeze(0)
return image
def predict_skin_cancer(model, class_names, img_path):
# load the image and return the predicted breed
img=load_input_image(img_path)
model=model.cpu()
model.eval()
i=torch.argmax(model(img))
return class_names[i]
def set_parameter_requires_grad(model, feature_extracting):
if feature_extracting:
for param in model.parameters():
param.requires_grad = False
def initialize_model(model_name, num_classes, feature_extract, use_pretrained=True):
# Initialize these variables which will be set in this if statement. Each of these
# variables is model specific.
model_ft = None
input_size = 0
if model_name == "resnet":
""" Resnet18
"""
model_ft = models.resnet18(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, feature_extract)
num_ftrs = model_ft.fc.in_features
model_ft.fc = nn.Linear(num_ftrs, num_classes)
input_size = 224
elif model_name == "alexnet":
""" Alexnet
"""
model_ft = models.alexnet(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, feature_extract)
num_ftrs = model_ft.classifier[6].in_features
model_ft.classifier[6] = nn.Linear(num_ftrs,num_classes)
input_size = 224
elif model_name == "vgg":
""" VGG11_bn
"""
model_ft = models.vgg11_bn(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, feature_extract)
num_ftrs = model_ft.classifier[6].in_features
model_ft.classifier[6] = nn.Linear(num_ftrs,num_classes)
input_size = 224
elif model_name == "squeezenet":
""" Squeezenet
"""
model_ft = models.squeezenet1_0(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, feature_extract)
model_ft.classifier[1] = nn.Conv2d(512, num_classes, kernel_size=(1,1), stride=(1,1))
model_ft.num_classes = num_classes
input_size = 224
elif model_name == "densenet":
""" Densenet
"""
model_ft = models.densenet121(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, feature_extract)
num_ftrs = model_ft.classifier.in_features
model_ft.classifier = nn.Linear(num_ftrs, num_classes)
input_size = 224
elif model_name == "inception":
""" Inception v3
Be careful, expects (299,299) sized images and has auxiliary output
"""
model_ft = models.inception_v3(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, feature_extract)
# Handle the auxilary net
num_ftrs = model_ft.AuxLogits.fc.in_features
model_ft.AuxLogits.fc = nn.Linear(num_ftrs, num_classes)
# Handle the primary net
num_ftrs = model_ft.fc.in_features
model_ft.fc = nn.Linear(num_ftrs,num_classes)
input_size = 299
else:
print("Invalid model name, exiting...")
exit()
return model_ft, input_size
| [
"noreply@github.com"
] | noreply@github.com |
73801716579f06b4fea1bcac3e99b7ab239a38bc | b0dfba93bee2a5483295ce2e827164c76f9ab66c | /09_Special Pythagorean triplet.py | 14aa2db9622f9892f2f30d0ff6544953ed01af41 | [] | no_license | kasanitej/Project-Euler-Python | bf9f25ff5b136616c6b6f19f8a0a5d65ab61c9f6 | 52159a345b0544cc60e37c872b2799f70d671277 | refs/heads/master | 2020-03-22T08:19:18.620451 | 2018-07-23T19:19:47 | 2018-07-23T19:19:47 | 139,759,380 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 290 | py | from math import sqrt
triplets=[]
for x in range(1,333):
for y in range(x,500):
z=sqrt(x**2+y**2)
if z%1==0:
z=int(z)
triplets.append((x,y,z))
print(triplets)
sum1=[(x,y,z) for (x,y,z) in triplets if x+y+z==1000]
print(sum1)
print(sum1[0][0]*sum1[0][1]*sum1[0][2]) | [
"noreply@github.com"
] | noreply@github.com |
2c7462f9a75f58b2ed2e7b8ccb191d0fef666f05 | a096eb61ca7c2146c41cba8affabf3daafd6373f | /instagram/migrations/0002_auto_20210712_1000.py | 0dfbffa775e0279e5eb9e55f6acaad91c8b616d9 | [
"MIT"
] | permissive | Kiru-axis/Instagram_Clone | 2a32ea3cdbc85f30c429a729ab571abc676d62c0 | 07937945b7ce7310c886335a4d077731fe1b1250 | refs/heads/master | 2023-06-20T04:42:23.900578 | 2021-07-12T07:13:19 | 2021-07-12T07:13:19 | 385,066,627 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 634 | py | # Generated by Django 3.2.5 on 2021-07-12 07:00
import cloudinary.models
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('instagram', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='post',
name='image',
field=cloudinary.models.CloudinaryField(max_length=255, verbose_name='post'),
),
migrations.AlterField(
model_name='profile',
name='profile_picture',
field=cloudinary.models.CloudinaryField(max_length=255, verbose_name='images'),
),
]
| [
"peter.kiru@student.moringaschool.com"
] | peter.kiru@student.moringaschool.com |
4fb9881f8d25ca657aa47ab7c4b51b8976af9eed | fa5cb3cb27132a330673650afa1d68dd35f15251 | /tests/datastore_memcache/test_multiple_dbs.py | b83d7dfcc1be82e7947678f63af35397e02cebe7 | [
"Apache-2.0"
] | permissive | jbeveland27/newrelic-python-agent | 95b4fdf253915100bc62bbd143066f589efc3ab9 | 86c78370ace1eba18e05de5e37aadb880f5f3ac4 | refs/heads/main | 2023-07-12T06:40:58.741312 | 2021-08-19T23:37:14 | 2021-08-19T23:37:14 | 398,122,410 | 1 | 0 | Apache-2.0 | 2021-08-20T01:38:35 | 2021-08-20T01:38:33 | null | UTF-8 | Python | false | false | 4,162 | py | # Copyright 2010 New Relic, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import memcache
from newrelic.api.background_task import background_task
from testing_support.fixtures import (validate_transaction_metrics,
override_application_settings)
from testing_support.db_settings import memcached_settings
from testing_support.util import instance_hostname
DB_MULTIPLE_SETTINGS = memcached_settings()
# Settings
_enable_instance_settings = {
'datastore_tracer.instance_reporting.enabled': True,
}
_disable_instance_settings = {
'datastore_tracer.instance_reporting.enabled': False,
}
# Metrics
_base_scoped_metrics = (
('Datastore/operation/Memcached/get_multi', 1),
('Datastore/operation/Memcached/set_multi', 1),
)
_base_rollup_metrics = (
('Datastore/all', 2),
('Datastore/allOther', 2),
('Datastore/Memcached/all', 2),
('Datastore/Memcached/allOther', 2),
('Datastore/operation/Memcached/set_multi', 1),
('Datastore/operation/Memcached/get_multi', 1),
)
_disable_scoped_metrics = list(_base_scoped_metrics)
_disable_rollup_metrics = list(_base_rollup_metrics)
_enable_scoped_metrics = list(_base_scoped_metrics)
_enable_rollup_metrics = list(_base_rollup_metrics)
if len(DB_MULTIPLE_SETTINGS) > 1:
memcached_1 = DB_MULTIPLE_SETTINGS[0]
memcached_2 = DB_MULTIPLE_SETTINGS[1]
host_1 = instance_hostname(memcached_1['host'])
port_1 = memcached_1['port']
host_2 = instance_hostname(memcached_2['host'])
port_2 = memcached_2['port']
instance_metric_name_1 = 'Datastore/instance/Memcached/%s/%s' % (host_1,
port_1)
instance_metric_name_2 = 'Datastore/instance/Memcached/%s/%s' % (host_2,
port_2)
_enable_rollup_metrics.extend([
(instance_metric_name_1, None),
(instance_metric_name_2, None),
])
_disable_rollup_metrics.extend([
(instance_metric_name_1, None),
(instance_metric_name_2, None),
])
def exercise_memcached(client, multi_dict):
client.set_multi(multi_dict)
client.get_multi(multi_dict.keys())
transaction_metric_prefix = 'test_multiple_dbs:test_multiple_datastores'
@pytest.mark.skipif(len(DB_MULTIPLE_SETTINGS) < 2,
reason='Test environment not configured with multiple databases.')
@override_application_settings(_enable_instance_settings)
@validate_transaction_metrics(transaction_metric_prefix+'_enabled',
scoped_metrics=_enable_scoped_metrics,
rollup_metrics=_enable_rollup_metrics,
background_task=True)
@background_task()
def test_multiple_datastores_enabled(memcached_multi):
memcached1 = DB_MULTIPLE_SETTINGS[0]
memcached2 = DB_MULTIPLE_SETTINGS[1]
settings = [memcached1, memcached2]
servers = ["%s:%s" % (x['host'], x['port']) for x in settings]
client = memcache.Client(servers=servers)
exercise_memcached(client, memcached_multi)
@pytest.mark.skipif(len(DB_MULTIPLE_SETTINGS) < 2,
reason='Test environment not configured with multiple databases.')
@override_application_settings(_disable_instance_settings)
@validate_transaction_metrics(transaction_metric_prefix+'_disabled',
scoped_metrics=_disable_scoped_metrics,
rollup_metrics=_disable_rollup_metrics,
background_task=True)
@background_task()
def test_multiple_datastores_disabled(memcached_multi):
memcached1 = DB_MULTIPLE_SETTINGS[0]
memcached2 = DB_MULTIPLE_SETTINGS[1]
settings = [memcached1, memcached2]
servers = ["%s:%s" % (x['host'], x['port']) for x in settings]
client = memcache.Client(servers=servers)
exercise_memcached(client, memcached_multi)
| [
"noreply@github.com"
] | noreply@github.com |
1adf1bdb1f54c7630e2de726c7944c29d6553e38 | 56e3b92f011214980e48ba55685f908a3afb5630 | /Model/app.py | ff68d0b799892f727bd7c87b85c63a466804beb8 | [] | no_license | anuanmol/Air_pollution_Analysis | af2dc4cee3546733eed2ff0a975d64845ebba87a | b6962509281a776f4dd68ee9edb0d2807aba6911 | refs/heads/main | 2023-07-09T20:36:37.988096 | 2021-08-14T09:14:28 | 2021-08-14T09:14:28 | 393,097,859 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 14,931 | py | import streamlit as st
import pickle
import numpy as np
import pandas as pd
from datetime import datetime, timedelta
from fuzzywuzzy import process
import requests
import plotly.express as px
from copy import deepcopy
import json
import os
from PIL import Image
import time
# import folium
# set mapbox token
px.set_mapbox_access_token('pk.eyJ1IjoiYW5tb2w5IiwiYSI6ImNrczM2MWxmYzJhNm0zMnM3d25pZXJqcW0ifQ.e2XnfHzo0cEwS1pQemxH3w')
def validate_api(api):
"""
Validates the API key by sending
a single request to Climacell API
:param api: 32-char API ket
:return: True if api is valid
"""
endpoint = "https://api.climacell.co/v3/weather/realtime"
# Build sample params
params = {'lat': '0', 'lon': '0', 'fields': 'temp',
'apikey': str(api), 'unit_system': 'si'}
# Get response
response = requests.request('GET', endpoint, params=params)
# If successful
if response.status_code == 200:
return True
return False
def run_app():
"""
A function to run
the main part of the program
"""
def load_data(path):
"""
A function load data
:param path: a path to the file source
:return: pandas.DataFrame instance
"""
df = pd.read_csv(path)
return df
def match_country(custom_input, df):
"""
Match user input to available
countries in the
:param custom_input: text input for country
:param df: main data
:return: matching country as str
"""
# Store unique country names
unique_countries = set(df['country'].unique())
# Find all matches for user_input
match = process.extractOne(custom_input, unique_countries)
# If similarity is over 70
if match[1] >= 80:
return match[0]
else:
return 'No match'
def top25(df, country):
"""
Subset for the top <25
cities of the given country
:param df: a dataset containing coords
for cities and countries
:param country: a country matched from
user input
:return: pandas.DataFrame containing
coords for top 25 cities
of given country
"""
# Subset for cities of given country
subset = df[df['country'] == country][['city_ascii', 'lat',
'lng', 'population']]
# Extract top 25 based on population size
subset_sorted = subset.sort_values('population',
ascending=False).iloc[:25]
# Rename lng column to lon
subset_sorted['lon'] = subset_sorted['lng']
# Drop lng column
subset_sorted.drop('lng', axis='columns', inplace=True)
# Reorder columns
subset_sorted = subset_sorted[['city_ascii', 'lat',
'lon', 'population']]
return subset_sorted.reset_index().drop('index', axis='columns')
def call_api(cities_df, temp_unit):
"""
Get current weather data
for top25 cities from cities_df
based on lat/lon
:param temp_unit: value got from the user input radio btns
:param cities_df: pandas.DataFrame with cities sorted by pop
:return:
"""
# Realtime endpoint
weather_endpoint = "https://api.climacell.co/v3/weather/realtime"
# Set the unit
if temp_unit == '°C':
temp_unit = 'si'
else:
temp_unit = 'us'
# Query params
params = {
'unit_system': temp_unit,
'fields': 'temp',
# 'apikey': 'ke4PuUV8m6cepBeVZmfbe0gG7J3VdU2e',
'apikey': '69GZYHnjqfMs3eQrnUDbWw0fbTck4S4l',
'lat': '',
'lon': ''
}
def call(row):
"""
Function to return realtime temperature
for each lat, lon
"""
# Build querystring params
params['lat'] = str(row['lat'])
params['lon'] = str(row['lon'])
# Make an API call
response = requests.request("GET", weather_endpoint, params=params)
if response.status_code == 200:
response = json.loads(response.content)
# Update row
return round(float(response['temp']['value']), 1)
else:
response = '<400>'
return response
# Call for API for each row
cities_df['Temperature'] = cities_df.apply(call, axis=1)
# Create a column to resize the scatter plot dots
cities_df['size'] = 15
# Rename columns
cities_df.rename(columns={'city_ascii': 'City'}, inplace=True)
if 'population' in cities_df.columns:
cities_df.drop('population', axis=True, inplace=True)
# Check for status code
if '<400>' in list(cities_df['Temperature']):
return 400, None
else:
return 200, cities_df
def map_plot(df, country):
"""
A function to plot a scatter_mapbox
of plotly
:param country: a country input by user
:param df: pandas.DataFrame containing temperature
and cities data
:return: plotly figure
"""
# Change the zoom level according to the shape of df
size = df.shape[0]
if size == 25:
zoom = 3
elif size == 20:
zoom = 4
else:
zoom = 5
# Get time for the moment
now = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
# Construct the figure
fig = px.scatter_mapbox(df, hover_data=['Temperature', 'City'],
lat='lat', lon='lon',
color='Temperature', size='size',
color_continuous_scale=px.colors.cyclical.IceFire,
zoom=zoom)
fig.update_traces(textposition='top center')
fig.update_layout(title_text=f'Temperatures for {now}, {country.title()}', title_x=0.5)
return fig
def make_req(lat, lon, unit_system):
"""
A vanilla function to make
API call based on lat, lon
"""
endpoint = "https://api.climacell.co/v3/weather/realtime"
params = {
'lat': lat, 'lon': lon,
'fields': 'temp', 'unit_system': unit_system,
# 'apikey': 'ke4PuUV8m6cepBeVZmfbe0gG7J3VdU2e'
'apikey': '69GZYHnjqfMs3eQrnUDbWw0fbTck4S4l'
}
res = requests.request("GET", endpoint, params=params)
# If successful
if res.status_code == 200:
response = json.loads(res.content)
# Build df
df_dict = {
'lat': [lat],
'lon': [lon],
'Temperature': [round(response['temp']['value'], 1)],
'size': [15]
}
df = pd.DataFrame(df_dict, index=[0])
else: # set df to none if other status codes
df = None
return df, res.status_code
def plot_single(df):
"""
Vanilla function to
plot scatter_mapbox based on single
location
"""
# Get time for the moment
now = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
# Construct the figure
fig = px.scatter_mapbox(df, hover_data=['Temperature'],
lat='lat', lon='lon',
size='size',
color_continuous_scale=px.colors.cyclical.IceFire,
zoom=14)
# Align text to the center
fig.update_traces(textposition='top center')
# Set title to the plot
fig.update_layout(title_text=f'Temperatures for {now}, at ({df["lat"][0]},'
f' {df["lon"][0]})', title_x=0.5)
return fig
# Load cities data with locations
cities = load_data('dataall.csv')
# Set a title
st.title('')
# Create radio options for location input
st.subheader('Location:')
action = st.radio('',
['Coordinate(lati, long)', 'Enter Country Name', 'Choose Here'])
unit = st.radio('Choose the unit for temperature:',
['°C', '°F'])
# Depending on action
if action == 'Coordinate(lati, long)':
# Create two columns to insert inputs side by side
col1, col2 = st.beta_columns(2)
with col1: # latitude input
latitude = st.text_input('Latitude (lat):')
with col2: # longitude input
longitude = st.text_input('Longitude (lon):')
# Leave instructions to get the coords
# st.markdown('<small>If you don\'t know your coordinate '
# 'location, go to <a href="https://www.latlong.net/">this</a> link. '
# '</small>',
# unsafe_allow_html=True)
# If both fields are filled
if latitude and longitude:
# Call API and store as a single df
temp_df, status_code = make_req(latitude, longitude, {'°C': 'si', '°F': 'us'}[unit])
if status_code == 200:
# Plot a single point
plot = plot_single(temp_df)
# Display dataframe too
st.table(temp_df[['lat', 'lon', 'Temperature']])
# Display as plotly chart
st.plotly_chart(plot)
elif status_code == 400:
st.error('Invalid coordinates. Please try again!')
else:
st.error('Too many requests. Please try again in an hour...')
elif action == 'Enter Country Name':
user_input = st.text_input('Enter country (basic string matching '
'is enabled under the hood):', max_chars=60)
if user_input:
# Match the input to existing countries
country_input = match_country(user_input, cities)
# If country matches
if country_input != 'No match':
# Inform the user about their option
st.markdown(f"Matched **{country_input}**")
# Create waiting event while getting temp data from API
with st.spinner('Hang on... Fetching realtime temperatures...'):
# Subset for top <=25 cities of the country choice
top_cities = top25(cities, country_input)
# Store results of API call
status, temperatures = call_api(cities_df=top_cities, temp_unit=unit)
# If request successful
if status == 200:
# Show dataframe
st.dataframe(temperatures.drop('size', axis=1)) # TODO add a df toggler
# Create a waiting event while plotting
with st.spinner("Little more... Plotting the results..."):
# Inform the user to hover over points
st.subheader('Hover over the points and drag around to see temperatures')
# Display the plotly chart using returned data
st.plotly_chart(map_plot(top_cities, country_input))
else: # if status code != 200, it means too many requests
st.error('Too many requests. Please try again in an hour')
else: # if country_input == 'No match'
st.error('Could not find a match from the database. Try again...')
else: # If user chooses to input via dropdown
# Create a dropdown
country_input = st.selectbox('Choose country',
sorted([''] + list(cities['country'].unique())))
# If user choose a country from dropdown
if country_input:
# Inform the user about their option
st.markdown(f"You chose **{country_input}**")
# Create waiting event while getting temp data from API
with st.spinner('Getting realtime temperatures...'):
# Subset for top <=25 cities of the country choice
top_cities = top25(cities, country_input)
# Store results of API call
status, temperatures = call_api(cities_df=top_cities, temp_unit=unit)
# If request successful
if status == 200:
# Show dataframe
st.dataframe(temperatures.drop('size', axis=1)) # TODO add a df toggler
# Create a waiting event while plotting
with st.spinner("Little more... Plotting the results..."):
# Inform the user to hover over points
st.subheader('Hover over the points to see temperatures')
# Display the plotly chart using returned data
st.plotly_chart(map_plot(top_cities, country_input))
else: # if status code != 200, it means too many requests
st.error('Too many requests. Please try again in an hour')
def main():
st.markdown('''<center><h1><b>AQI</b></center></h1>''', unsafe_allow_html=True)
# st.header("AQI")
name = "modelall.sav"
model = pickle.load(open('modelall.sav','rb'))
# st.markdown('''<center><img src="https://user-images.githubusercontent.com/51512071/128558138-0b069fe8-71ac-4a68-8877-4690e6937f0f.png"></center><br><hr>''', unsafe_allow_html=True)
st.markdown('''<br>''', unsafe_allow_html=True)
ch = st.selectbox("",['--select--','About','Study','Live'])
st.markdown('''<br>''', unsafe_allow_html=True)
if ch == '--select--':
st.markdown("<center><img src='https://user-images.githubusercontent.com/51512071/128558576-8290e14f-450d-42c0-b8ab-b036e117c9ac.png'></center>", unsafe_allow_html=True)
st.markdown('''''')
if ch == 'Live':
run_app()
if ch == 'Predict':
st.subheader("PM2.5")
pm25 = st.number_input("", key=1)
pm10 = st.number_input("", key=2)
co = st.number_input("", key=3)
no = st.number_input("", key=4)
so = st.number_input("", key=5)
nox = st.number_input("", key=6)
p = model.predict([[25, 35, 95, 100, 50, 78, 55, 15, 10, 5]])
st.write(p)
if ch == 'Study':
pass
if __name__ == "__main__":
main() | [
"noreply@github.com"
] | noreply@github.com |
f19c41f917adc71b85eb4842e9c5e8595add38fd | caab2457e8f4961ec7e3725f5060f899161e0c7e | /Algorithm_And_Data_Structure/hashtable/magic_dictionary.py | 8ee20070b40976f4dff13214799e169e56d89365 | [] | no_license | omidziaee/DataStructure | 7ea9991737ecbf97bd17ec9aea1db0383c5a390c | 66a4325c5999535e64e8e985bac4e3a96108bf1a | refs/heads/master | 2021-06-06T17:37:42.003610 | 2020-04-05T03:38:02 | 2020-04-05T03:38:02 | 148,665,697 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,483 | py | '''
Created on Feb 23, 2020
@author: omid
Implement a magic directory with buildDict, and search methods.
For the method buildDict, you'll be given a list of non-repetitive words to build a dictionary.
For the method search, you'll be given a word, and judge whether if you modify exactly one
character into another character in this word, the modified word is in the dictionary you just built.
Example 1:
Input: buildDict(["hello", "leetcode"]), Output: Null
Input: search("hello"), Output: False
Input: search("hhllo"), Output: True
Input: search("hell"), Output: False
Input: search("leetcoded"), Output: False
Note:
You may assume that all the inputs are consist of lowercase letters a-z.
For contest purpose, the test data is rather small by now. You could think about highly efficient algorithm after the contest.
Please remember to RESET your class variables declared in class MagicDictionary, as static/class variables are
persisted across multiple test cases. Please see here for more details.
'''
class MagicDictionary(object):
def __init__(self):
"""
Initialize your data structure here.
"""
import collections
self.dic = collections.defaultdict(list)
def buildDict(self, dict):
"""
Build a dictionary through a list of words
:type dict: List[str]
:rtype: None
"""
for word in dict:
self.dic[len(word)].append(word)
def search(self, word):
"""
Returns if there is any word in the trie that equals to the given word after modifying exactly one character
:type word: str
:rtype: bool
"""
if len(word) not in self.dic:
return False
for candidate in self.dic[len(word)]:
counter = 0
for i, ch in enumerate(candidate):
if ch != word[i]:
counter += 1
if counter == 1:
return True
return False
# Your MagicDictionary object will be instantiated and called as such:
# obj = MagicDictionary()
# obj.buildDict(dict)
# param_2 = obj.search(word)
'''
Java:
class MagicDictionary {
Map<Integer, List<String>> dic;
/** Initialize your data structure here. */
public MagicDictionary() {
dic = new HashMap<>();
}
/** Build a dictionary through a list of words */
public void buildDict(String[] dict) {
for(String word: dict){
int len = word.length();
if(!dic.containsKey(len)){
dic.put(len, new ArrayList<String>());
}
dic.get(len).add(word);
}
}
/** Returns if there is any word in the trie that equals to the given word after modifying exactly one character */
public boolean search(String word) {
int len = word.length();
if (!dic.containsKey(len)) {
return false;
}
for (String s : dic.get(len)) {
int count = 0;
for (int i = 0; i < len; ++i) {
if (word.charAt(i) != s.charAt(i)) {
++count;
}
}
if (count == 1) {
return true;
}
}
return false;
}
}
/**
* Your MagicDictionary object will be instantiated and called as such:
* MagicDictionary obj = new MagicDictionary();
* obj.buildDict(dict);
* boolean param_2 = obj.search(word);
*/ | [
"omid@192.168.2.15"
] | omid@192.168.2.15 |
ed2cdf64be7e759b23041fe811fe97aeafd47736 | 1db1dd6f8a76d61c37e66cd7eaede726bd053f06 | /projectsample1/app/serializers.py | ac9001703362d058987982f2d7005c1abadf53e7 | [] | no_license | CDACFRTP-APR21/G6-PythonShoutNest | 9b90463c147dffe566ced658d8b4fd9d20af4fc7 | 31ebae49137f94a8f7a662eb985f6920208e85b7 | refs/heads/main | 2023-06-03T05:34:52.018724 | 2021-06-26T09:31:38 | 2021-06-26T09:31:38 | 380,444,454 | 0 | 1 | null | 2021-06-26T09:38:25 | 2021-06-26T07:40:09 | Python | UTF-8 | Python | false | false | 1,610 | py | from django.db import models
from django.db.models import fields
from rest_framework import serializers
from app.models import Reports, User,Shouts,Friends
""" class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('userId',
'userName',
'emailId',
'password',
'admin_verify') """
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('userId',
'userName',
'emailId',
'password',
'admin_verify',
'firstName',
'lastName',
'DateOfBirth',
'MobileNo',
'profilePic')
extra_kwargs = {
'password': {'write_only': True}
}
class ShoutSerializer(serializers.ModelSerializer):
class Meta:
model= Shouts
fields=(
'shoutId',
'userId',
'path',
'caption',
'type',
'uploadDate'
)
class FriendsSerializer(serializers.ModelSerializer):
class Meta:
model=Friends
fields=(
'userId',
'friendId',
'status'
)
class ReportsSerializer(serializers.ModelSerializer):
class Meta:
model=Reports
fields=(
'reportId',
'shoutId',
'userId',
'reason'
)
| [
"noreply@github.com"
] | noreply@github.com |
b90f2ec4cb0dc5d6cc682ef6baaf6cf27eab2d04 | ca16fc994a6a4994b5f20c45a47cbff66f041e2a | /store_owner/migrations/0001_initial.py | 1cf62befa13f94601df2aecc1f521211d0d312b6 | [] | no_license | hackathonteam16/hackathon_web | fbbe1dfcb4ee49c4978047858e0169d9a5af0c60 | 4b58e5bdd9cb558642e1f6f28e7b4adce0df7c22 | refs/heads/master | 2023-04-29T06:53:13.570455 | 2021-05-20T20:25:30 | 2021-05-20T20:25:30 | 369,142,860 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 581 | py | # Generated by Django 3.2.3 on 2021-05-20 10:58
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Shop',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('shop_name', models.CharField(max_length=200, null=True)),
('description', models.CharField(max_length=200, null=True)),
],
),
]
| [
"mohakoune@gmail.com"
] | mohakoune@gmail.com |
b055d8ae6cafcbe25b727929949414109497dfbf | fe91e0f7f74c3156a5c194713a69d9846b9e26a2 | /flask_app/blueprints/api/blueprint.py | 9493ed788ee23a4f569ba5bbd705e244e4682ac4 | [
"BSD-3-Clause"
] | permissive | getslash/backslash | cbf963006e3de565a1512f79c6c9ab84e705c67e | 67554c039f8ac6a648deb191cc7fb69480f28253 | refs/heads/develop | 2023-01-10T22:26:11.666887 | 2022-06-17T05:06:00 | 2022-06-17T05:06:00 | 23,376,788 | 17 | 15 | NOASSERTION | 2022-12-27T16:17:59 | 2014-08-27T04:30:58 | Python | UTF-8 | Python | false | false | 1,268 | py | import functools
from flask import Blueprint
from flask_simple_api import SimpleAPI
from ... import activity
from ...utils.api_utils import (auto_render, requires_login,
requires_login_or_runtoken)
blueprint = Blueprint('api', __name__, url_prefix='/api')
api = SimpleAPI(blueprint)
_api_info = {'endpoints': {}}
def API(func=None, require_real_login=False, generates_activity=True, require_login=True, version=1):
if func is None:
return functools.partial(API, require_real_login=require_real_login, generates_activity=generates_activity, require_login=require_login, version=version)
returned = auto_render(func)
endpoint_info = _api_info['endpoints'][func.__name__] = {}
endpoint_info['login_required'] = require_login
endpoint_info['version'] = version
if generates_activity:
returned = activity.updates_last_active(returned)
if require_login:
if require_real_login:
returned = requires_login(returned)
else:
returned = requires_login_or_runtoken(returned)
return api.include(returned)
@blueprint.route('/', methods=['OPTIONS'], strict_slashes=False)
def get_api_info():
from flask import jsonify
return jsonify(_api_info)
| [
"vmalloc@gmail.com"
] | vmalloc@gmail.com |
b992279df4179e343cd86a13c730cb7d56b36b83 | 96909e3b2eb787afa739f3020a9292afae61b0b5 | /web/__init__.py | f2ab81fbc93b8f2f236e99d276ed434f89b742c1 | [] | no_license | fengges/se | 09bd6306f67d78fe0f51286ab41f629237fcf4d6 | 51e199a7fc5f7666063a556f41669a6a8b4fe37d | refs/heads/master | 2020-03-27T04:29:32.207191 | 2018-08-24T05:47:40 | 2018-08-24T05:47:40 | 145,944,808 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,314 | py |
# author :feng
# time :2018/1/25
# function : 应用初始化
# 注册蓝图
import os
from main import Query
from flask import Flask,json,request
app = Flask(__name__)
subject = [{"code": '01', "k": 46}, {"code": '02', "k": 98}, {"code": '03', "k": 98},
{"code": '04', "k": 88}, {"code": '05', "k": 98}, {"code": '06', "k": 28},
{"code": '07', "k": 54}, {"code": '0701', "k": 64}, {"code": '0702', "k": 30},
{"code": '0703', "k": 52}, {"code": '0705', "k": 16}, {"code": '0706', "k": 12},
{"code": '0707', "k": 14}, {"code": '0709', "k": 98}, {"code": '0710', "k": 98},
{"code": '0712', "k": 10}, {"code": '08', "k": 50}, {"code": '0801', "k": 26},
{"code": '0802', "k": 98}, {"code": '0803', "k": 14}, {"code": '0804', "k": 12},
{"code": '0805', "k": 98}, {"code": '0806', "k": 12}, {"code": '0807', "k": 38},
{"code": '0808', "k": 98}, {"code": '0809', "k": 52}, {"code": '0810', "k": 98},
{"code": '0811', "k": 22}, {"code": '0812', "k": 72}, {"code": '0813', "k": 30},
{"code": '0814', "k": 68}, {"code": '0815', "k": 14}, {"code": '0816', "k": 14},
{"code": '0817', "k": 98}, {"code": '0818', "k": 14}, {"code": '0819', "k": 18},
{"code": '0820', "k": 18}, {"code": '0821', "k": 18}, {"code": '0823', "k": 24},
{"code": '0824', "k": 14}, {"code": '0825', "k": 26}, {"code": '0826', "k": 10},
{"code": '0827', "k": 12}, {"code": '0828', "k": 36}, {"code": '0829', "k": 14},
{"code": '0830', "k": 82}, {"code": '0831', "k": 16}, {"code": '0832', "k": 28},
{"code": '09', "k": 74}, {"code": '10', "k": 98}, {"code": '11', "k": 14},
{"code": '12', "k": 98}]
a = Query(subject)
@app.route('/search',methods=['GET','POST'])
def index6():
t = request.data
if len(t)==0:
t=request.values['data']
data = json.loads(t)
text=data['keyword']
if "filer" not in data:
filer={}
else:
filer = data['filer']
if "school" in filer and "all" in filer["school"]:
del filer["school"]
if "code" in filer and "all" in filer["code"]:
del filer["code"]
r=a.do_query(text,filer)
s=json.jsonify(r)
return s
| [
"1059387928@qq.com"
] | 1059387928@qq.com |
312251e0b9dadff3dd07e922e12f3799828a2905 | 4c56bbe4a2ed47ce4d1f18a13a89d0180d02f1bb | /venv/Scripts/pip3-script.py | ea9ba72ed63332a6d7195d3e2692787496295b84 | [] | no_license | spiritanand/Prime-Check | ba16ad565ffa26d27b99107b43b500349acda5d9 | cf8a6831f9fc591401bd6e46a0bcdbf645089f72 | refs/heads/master | 2023-02-12T21:04:44.802116 | 2020-03-21T12:06:33 | 2020-03-21T12:06:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 412 | py | #!C:\Users\surya\PycharmProjects\primer\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3')()
)
| [
"suryansh.anand007@gmail.com"
] | suryansh.anand007@gmail.com |
2a66489cb6a9a1d4ce60b4f659a14f264a284583 | ebaa9244a881dceb14dbbecf61e86b429516e332 | /appium/爱买店.py | f45b82c80c8af94801d694e5ac580220822ef020 | [] | no_license | lw2000017/ibuy_test | 249737f5b2d172615b09fb576ff736bb4b23ff54 | b0b8c1686eaab76a9a6d513b75fbeacb5fd122c9 | refs/heads/master | 2020-05-22T12:18:15.752000 | 2019-06-25T10:08:55 | 2019-06-25T10:08:55 | 186,334,728 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,542 | py | # -*- coding:utf-8 -*-
# @Time :2019/4/16 14:53
# @Author :LW
# @File :爱买店.py
from appium import webdriver
import time
def Start_appium():
device = '127.0.0.1:62001' # 设备号
pack = 'cn.iiibest.app' # app的package名称
activity = 'com.ibuyproject.MainActivity' # app的主activity
desired_caps = {}
desired_caps['device'] = 'android'
desired_caps['platformName'] = 'Android'
desired_caps['platformVersion'] = '5.1.1'
desired_caps['deviceName'] = device
desired_caps['appPackage'] = pack
desired_caps['appActivity'] = activity
driver = webdriver.Remote('http://127.0.0.1:4723/wd/hub', desired_caps)
# time.sleep(8)
driver.wait_activity('com.ibuyproject.MainActivity', 10)
return driver
def Quit(driver):
driver.quit()
def login_pwd(driver):
# time.sleep(3) # 休眠3s,等待app启动
loc_text = 'new UiSelector().text(" 使用密码登录 ")'
driver.find_element_by_android_uiautomator(loc_text).click()
time.sleep(3)
loc_text = 'new UiSelector().text("请输入手机号")'
driver.find_element_by_android_uiautomator(loc_text).send_keys('15211111011')
time.sleep(2)
loc_text1 = 'new UiSelector().className("android.widget.EditText")'
driver.find_elements_by_android_uiautomator(loc_text1)[1].send_keys('123456')
time.sleep(2)
loc_text2 = 'new UiSelector().className("android.widget.TextView")'
driver.find_elements_by_android_uiautomator(loc_text2)[3].click()
time.sleep(3)
def My_sign(driver):
loc_text = 'new UiSelector().text("我的")'
driver.find_element_by_android_uiautomator(loc_text).click()
time.sleep(2)
loc_text2 = 'new UiSelector().className("android.widget.ImageView")'
driver.find_elements_by_android_uiautomator(loc_text2)[1].click()
time.sleep(2)
loc_text = 'new UiSelector().text("邀好友签到,领更多奖励金")'
cc = driver.find_element_by_android_uiautomator(loc_text).get_attribute('name')
if cc == '邀好友签到,领更多奖励金':
print('已签到')
else:
loc_text = 'new UiSelector().text("去签到")'
driver.find_element_by_android_uiautomator(loc_text).click()
time.sleep(2)
driver.tap((40, 80), 100)
print('签到成功')
if __name__ == '__main__':
# 获取driver
driver = Start_appium()
# 登录
login_pwd(driver=driver)
# 进入我的-签到
My_sign(driver=driver)
# 退出
Quit(driver=driver)
# loc_text = 'new UiSelector().text("会员中心")'
# driver.find_element_by_android_uiautomator(loc_text).click()
# loc_text = 'new UiSelector().className("android.widget.TextView")'
# czz = driver.find_elements_by_android_uiautomator(loc_text)[4].get_attribute("name")
# # print(czz)
# loc_text = 'new UiSelector().className("android.widget.ImageView")'
# driver.find_elements_by_android_uiautomator(loc_text)[1].click()
# time.sleep(2)
# loc_text = 'new UiSelector().text("去签到")'
# driver.find_element_by_android_uiautomator(loc_text).click()
# time.sleep(2)
# loc_text = 'new UiSelector().text("立即签到")'
# driver.find_element_by_android_uiautomator(loc_text).click()
# time.sleep(2)
# driver.tap((40, 80), 100) # 实在是获取不到元素,只能靠点击关闭按钮坐标了
# time.sleep(2)
# loc_text = 'new UiSelector().text("邀好友签到,领更多奖励金")'
# print(assert driver.find_element_by_android_uiautomator(loc_text))
# assert driver.find_element_by_android_uiautomator(loc_text).text
| [
"1063681467@qq.com"
] | 1063681467@qq.com |
3dc723fb32a6c2be36f941fbfb3a83459bebe914 | f7c923c11184d184d40221b5f7119b3aa2b263da | /src/apps/book_app/admin.py | b17b64b1c7b57d8c1ee322812062c36f037dca2f | [] | no_license | skiesx/book_djangostars | 0c6bf7acda144cdb1095f31d940100368cf6722a | c54681524c2264c3110525bf3ece53254fb17b77 | refs/heads/master | 2020-04-03T13:50:53.965770 | 2018-10-30T01:19:19 | 2018-10-30T01:19:19 | 155,300,962 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 774 | py | from django.contrib.admin.models import LogEntry
from django.contrib import admin
from apps.book_app.models import *
admin.site.register(WebRequest)
admin.site.register(Book)
class LogEntryBookAdmin(admin.ModelAdmin):
list_display = readonly_fields = [
'pk', 'action_time', 'object_id', 'object_title', 'action_flag'
]
admin.site.register(LogBook, admin_class=LogEntryBookAdmin)
class LogEntryAdmin(admin.ModelAdmin):
list_display = [
'pk', 'object_repr', 'action_time', 'user', 'content_type', 'object_id', 'action_flag', 'change_message'
]
readonly_fields = [
'user', 'action_time', 'content_type', 'object_id', 'object_repr', 'change_message', 'action_flag'
]
admin.site.register(LogEntry, admin_class=LogEntryAdmin)
| [
"SkiesX@ex.ua"
] | SkiesX@ex.ua |
e9e3cda5266717a5660707d3e5cbb04a54cdf11c | 34a7e30c3ceafb06c9a21c59c88c3ea5a6e91388 | /python/datagen/addPriority.py | dbbff881d7bd7fd56ded7dd0a280ef0ad32f27fd | [] | no_license | DinoBektesevic/DinoBektesevic.github.io | 91643f54411d214e7552e9ef2e1e0fbece5fb841 | be8cc8b3b2b58cbc1517593377228ff541fd515c | refs/heads/main | 2023-05-29T22:39:23.801299 | 2021-06-10T02:55:12 | 2021-06-10T02:55:12 | 364,038,461 | 0 | 0 | null | 2021-05-10T20:30:01 | 2021-05-03T19:27:07 | HTML | UTF-8 | Python | false | false | 1,301 | py | import glob
import pandas as pd
import numpy
import seaborn as sns
import matplotlib.pyplot as plt
# filename = "mjd-59662-sdss-simple-expanded.csv"
allfiles = glob.glob("testDir/mjd*-simple-expanded.csv")
for filename in allfiles:
df = pd.read_csv(filename, index_col=0)
newfilename = filename.strip(".csv") + "-priority.csv"
priority = numpy.array([-1]*len(df))
completion = numpy.array([-1]*len(df))
df["priority"] = priority
df["completion"] = completion
fields = list(set(df[df.objType=="sdss field"]["fieldID"]))
fieldPriority = numpy.random.choice([0,1,2,3,4,5], size=len(fields))
fieldCompletion = numpy.random.uniform(high=100, size=len(fields))
for field, priority, completion in zip(fields, fieldPriority, fieldCompletion):
# check if its scheduled
sched = list(df[df.fieldID==field]["scheduled"])
if True in sched:
# give all scheduled plates high priority
priority = 0
df["priority"].loc[df["fieldID"]==field] = priority
df["completion"].loc[df["fieldID"]==field] = completion
df.reset_index()
df.to_csv(newfilename)
# sns.scatterplot(x="fieldID", y="completion", data=df[df.objType=="sdss field"])
# plt.show()
# import pdb; pdb.set_trace()
# print(fields) | [
"csayres@uw.edu"
] | csayres@uw.edu |
b108a1c031785469143ca7c1f5b9ae87b78aa63d | 057b5b3044cb9b5b8078a42b1b16b60b86d5d612 | /welcome/models/__init__.py | cdfd4b338177d3a17d959c8c69080c82b044fdad | [
"LicenseRef-scancode-public-domain",
"CC0-1.0"
] | permissive | vincentrm89/django-ex | 202063969cde0f56bf786d1b511b550f44985a3e | feaf60ba19f21d0155c69850c0f9a1ab478b4b83 | refs/heads/master | 2020-08-31T06:07:10.698771 | 2019-10-30T23:17:32 | 2019-10-30T23:17:32 | 218,618,179 | 0 | 0 | null | 2019-10-30T20:25:15 | 2019-10-30T20:25:14 | null | UTF-8 | Python | false | false | 59 | py | from .pageview import PageView
from .mission import Mission | [
"vincentrm89@gmail.com"
] | vincentrm89@gmail.com |
e060fc4d2e2654c3df2dbe29d697eb692a0cbbd4 | db94f342091c298f49adc49dc052d9541934d960 | /tests/__init__.py | 8503b79fbcdd5e7a09275f846c27cb2f0a98953b | [] | no_license | dhanshew72/zonar-wishlist | 590f78ee962cb38da49c6ca928249054d7570805 | 664f2ffb9d3643cc3540874badad9af84941f3ff | refs/heads/main | 2023-08-22T14:20:07.647361 | 2021-10-07T20:44:54 | 2021-10-07T20:44:54 | 414,752,225 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 103 | py | import os
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.dirname(__file__)), "src"))
| [
"dhanshew@infoblox.com"
] | dhanshew@infoblox.com |
99eacc90992f58160c82a3091b537b0fcdf37c30 | 4b1fc2fb34f15c2cb7c159ad75bb639dee520819 | /pydocker/netclass.py | 2184fb80f7cb7eddc00e0de626b2c9adda66e47c | [
"Apache-2.0"
] | permissive | Hyperpilotio/experimental | df7afb5a2ab45c70daeece5ec820cdf5d9543377 | 67fdb3136c05a2289dbaedb73ebeccc3fde7248b | refs/heads/master | 2021-03-22T00:21:56.404190 | 2018-03-04T04:30:40 | 2018-03-04T04:30:40 | 83,843,168 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,837 | py | """
Network utilies class
Current assumptions:
- Manual entry of max throughput possible
- Each BE container has their own IP address
- Not managing bursts for now
"""
__author__ = "Christos Kozyrakis"
__email__ = "christos@hyperpilot.io"
__copyright__ = "Copyright 2017, HyperPilot Inc"
import subprocess
import re
import time
import datetime as dt
class NetClass(object):
"""This class performs network bandwidth isolation using HTB qdisc and ipfilters.
Useful documents and examples:
- Creating multiple htb service classes:
http://luxik.cdi.cz/~devik/qos/htb/manual/userg.htm
- Classifying packets with filters
http://lartc.org/howto/lartc.qdisc.filters.html
- Common iptables commands
http://www.thegeekstuff.com/2011/06/iptables-rules-examples
"""
def __init__(self, iface_ext, iface_cont, max_bw_mbps, link_bw_mbps):
self.iface_ext = iface_ext
self.iface_cont = iface_cont
self.max_bw_mbps = max_bw_mbps
self.link_bw_mbps = link_bw_mbps
self.cont_ips = set()
self.mark = 6
# reset IP tables
try:
subprocess.check_call(('iptables -t mangle -F').split())
except subprocess.CalledProcessError:
raise Exception('Could not reset iptables')
# make sure HTB is in a reasonable state to begin with
try:
subprocess.check_call(('tc qdisc del dev %s root' % self.iface_ext).split())
except:
pass
# replace root qdisc with HTB
# need to disable/enable HTB to get the stats working
try:
subprocess.check_call(('tc qdisc add dev %s root handle 1: htb default 1' \
% self.iface_ext).split())
subprocess.check_call(('echo 1 > /sys/module/sch_htb/parameters/htb_rate_est'), shell=True)
subprocess.check_call(('tc qdisc del dev %s root' % self.iface_ext).split())
subprocess.check_call(('tc qdisc add dev %s root handle 1: htb default 1' \
% self.iface_ext).split())
subprocess.check_call(('tc class add dev %s parent 1: classid 1:1 htb rate %dmbit ceil %dmbit' \
% (self.iface_ext, self.link_bw_mbps, self.link_bw_mbps)).split())
subprocess.check_call(('tc class add dev %s parent 1: classid 1:10 htb rate %dmbit ceil %dmbit' \
% (self.iface_ext, self.max_bw_mbps, self.max_bw_mbps)).split())
subprocess.check_call(('tc filter add dev %s parent 1: protocol all prio 10 handle %d fw flowid 1:10' \
% (self.iface_ext, self.mark)).split())
except subprocess.CalledProcessError:
raise Exception('Could not setup htb qdisc')
def addIPtoFilter(self, cont_ip):
""" Adds the IP of a container to the IPtables filter
"""
if cont_ip in self.cont_ips:
raise Exception('Duplicate filter for IP %s' % cont_ip)
self.cont_ips.add(cont_ip)
try:
subprocess.check_call(('iptables -t mangle -A PREROUTING -i %s -s %s -j MARK --set-mark %d' \
% (self.iface_cont, cont_ip, self.mark)).split())
except subprocess.CalledProcessError:
raise Exception('Could not add iptable filter for %s' % cont_ip)
def removeIPfromFilter(self, cont_ip):
""" Adds the IP of a container to the IPtables filter
"""
if cont_ip not in self.cont_ips:
raise Exception('Not existing filter for %s' % cont_ip)
self.cont_ips.remove(cont_ip)
try:
subprocess.check_call(('iptables -t mangle -D PREROUTING -i %s -s %s -j MARK --set-mark %d' \
% (self.iface_cont, cont_ip, self.mark)).split())
except subprocess.CalledProcessError:
raise Exception('Could not add iptable filter for %s' % cont_ip)
def setBwLimit(self, bw_mbps):
# replace always work for tc filter
try:
subprocess.check_call(('tc class replace dev %s parent 1: classid 1:10 htb rate %dmbit ceil %dmbit' \
% (self.iface_ext, bw_mbps, bw_mbps)).split())
except subprocess.CalledProcessError:
raise Exception('Could not change htb class rate')
def getBwStatsBlocking(self):
"""Performs a blocking read to get one second averaged bandwidth statistics
"""
# helper method to get stats from tc
def read_tc_stats():
text = subprocess.check_output(('tc -s class show dev %s' % self.iface_ext).split())
"""Example format to parse. For some reason rate and pps are always 0...
class htb 1:1 root prio 0 rate 10000Mbit ceil 10000Mbit burst 0b cburst 0b
Sent 108 bytes 2 pkt (dropped 0, overlimits 0 requeues 0)
rate 0bit 0pps backlog 0b 0p requeues 0
lended: 2 borrowed: 0 giants: 0
tokens: 14 ctokens: 14
class htb 1:2 root prio 0 rate 1000Mbit ceil 1000Mbit burst 1375b cburst 1375b
Sent 1253014380 bytes 827622 pkt (dropped 0, overlimits 0 requeues 0)
rate 0bit 0pps backlog 0b 0p requeues 0
lended: 18460 borrowed: 0 giants: 0
tokens: -47 ctokens: -47
"""
results = {}
for _ in re.finditer('class htb 1:(?P<cls>\d+).*?\n.*?Sent (?P<bytes>\d+) bytes', text, re.DOTALL):
cls = int(_.group('cls'))
bytes = int(_.group('bytes'))
results[cls] = 8.0*bytes/1000/1000 # convert to mbps
return results
# read stats from tc
starting_value = read_tc_stats()
starting_time = dt.datetime.now()
time.sleep(1)
ending_value = read_tc_stats()
ending_time = dt.datetime.now()
# take the difference to find the average
elapsed_time = (ending_time - starting_time).total_seconds()
results = {}
for _ in dict.iterkeys():
results[_] = float(ending_value[_] - starting_value[_]/elapsed_time)
return results
def getBwStats(self):
"""Performs a non-blocking read averaged bandwidth statistics
"""
text = subprocess.check_output(('tc -s class show dev %s' % self.iface_ext).split())
"""
Example format to parse. Rate and pps are assumed to be valid
class htb 1:1 root prio 0 rate 10Gbit ceil 10Gbit burst 0b cburst 0b
Sent 3552621 bytes 22143 pkt (dropped 0, overlimits 0 requeues 0)
rate 59400bit 50pps backlog 0b 0p requeues 0
lended: 22143 borrowed: 0 giants: 0
tokens: 13 ctokens: 13
class htb 1:2 root prio 0 rate 1000Mbit ceil 1000Mbit burst 1375b cburst 1375b
Sent 1253014380 bytes 827622 pkt (dropped 0, overlimits 0 requeues 0)
rate 59400bit 50pps backlog 0b 0p requeues 0
lended: 18460 borrowed: 0 giants: 0
tokens: -47 ctokens: -47
"""
results = {}
for _ in re.finditer('class htb 1:(?P<cls>\d+).*?\n.*?rate (?P<rate>\d+)bit', text, re.DOTALL):
cls = int(_.group('cls'))
rate = int(_.group('rate'))
results[cls] = float(rate / (1000000.0)) # convert to mbps
return results
| [
"kozyraki@stanford.edu"
] | kozyraki@stanford.edu |
326d041bf3c62db88e54e60c05578758e5dbe54d | 2bb7d296bb159540d5dd1f201b88f13a2c4f36e0 | /project/tests/test_summaries.py | d43f1236e0702d4efa3c5c159ddaa98895d0fdbd | [] | no_license | heizerbalazs/fastapi-tdd-docker | 96e4394e14b52b448c07b66f8037c3ee2a31561e | 42b4c2e9115294586fabdf2ce39006bf314884a0 | refs/heads/master | 2022-12-20T06:17:31.122002 | 2020-09-27T08:50:04 | 2020-09-27T08:50:04 | 296,577,414 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,815 | py | # project/tests/test_summaries.py
import json
def test_create_summary(test_app_with_db):
response = test_app_with_db.post(
"/summaries/", data=json.dumps({"url": "https://foo.bar"})
)
assert response.status_code == 201
assert response.json()["url"] == "https://foo.bar"
def test_create_summaries_invalid_json(test_app):
response = test_app.post("/summaries/", data=json.dumps({}))
assert response.status_code == 422
assert response.json() == {
"detail": [
{
"loc": ["body", "pyload", "url"],
"msg": "field required",
"type": "value_error.missing",
}
]
}
def test_read_summary(test_app_with_db):
response = test_app_with_db.post(
"/summaries/", data=json.dumps({"url": "https://foo.bar"})
)
summary_id = response.json()["id"]
response = test_app_with_db.get(f"/summaries/{summary_id}/")
assert response.status_code == 200
response_dict = response.json()
assert response_dict["id"] == summary_id
assert response_dict["url"] == "https://foo.bar"
assert response_dict["summary"]
assert response_dict["created_at"]
def test_read_summary_incorrect_id(test_app_with_db):
response = test_app_with_db.get("/summaries/999/")
assert response.status_code == 404
assert response.json()["detail"] == "Summary not found"
def test_read_all_summaries(test_app_with_db):
response = test_app_with_db.post(
"/summaries/", data=json.dumps({"url": "https://foo.bar"})
)
summary_id = response.json()["id"]
response = test_app_with_db.get("/summaries/")
assert response.status_code == 200
response_list = response.json()
assert len(list(filter(lambda d: d["id"] == summary_id, response_list))) == 1
| [
"heizer.balazs@gmail.com"
] | heizer.balazs@gmail.com |
ca9b5394dc6522949a1d8a88828a4ac6086710d5 | 38be6da813f2d230a90d1ac4c7deb81ca6221be0 | /ad_hoc/beginner/timus/F1000/Solution.py | d913c165f7dad707131916fd5663b31f6f25c1a3 | [
"MIT"
] | permissive | MdAman02/problem_solving | c8c0ce3cd5d6daa458cb0a54ac419c7518bdbe1f | 1cb731802a49bbb247b332f2d924d9440b9ec467 | refs/heads/dev | 2022-09-13T09:40:51.998372 | 2022-09-04T14:15:17 | 2022-09-04T14:15:17 | 256,194,798 | 0 | 0 | MIT | 2020-04-16T19:08:24 | 2020-04-16T11:27:57 | Java | UTF-8 | Python | false | false | 251 | py | # problem name: A+B Problem
# problem link: https://acm.timus.ru/problem.aspx?space=1&num=1000
# contest link: (?)
# time: (?)
# author: reyad
# other tags: (?)
# difficulty level: beginner
a, b = map(int, input().split())
print(a + b) | [
"reyadussalahin@gmail.com"
] | reyadussalahin@gmail.com |
b454a7571ea51919efa5e03634793990c193a480 | 2571b3ad135e7085156817c929d6bc68fb0bec97 | /pilha1.py | f787e866eaccb50bee239f5d7cc5eb96610ed693 | [] | no_license | pmalvees/atividades-pilha-est-tica | 4d86d9d1e7ba89459f9f938b69d1fdd5614ba2c6 | 71b1845d4052e3fee18baa92eb9bb1a7b496f810 | refs/heads/main | 2023-07-13T01:44:59.123955 | 2021-08-18T23:06:12 | 2021-08-18T23:06:12 | 397,755,949 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,038 | py | class Pilha1:
def __init__(self):
self.elementos = []
self.tamanho = 0
def empilha(self, valor):
self.elementos.append(valor)
self.tamanho += 1
return valor
def desempilha(self):
if self.esta_vazia():
raise Exception('Pilha Vazia, operação não pode ser realizada')
else:
topo = self.topo()
self.elementos = self.elementos[:-1]
self.tamanho -= 1
return topo
def topo(self):
return self.elementos[-1]
def retorna_todos_elementos(self):
for elemento in self.elementos:
yield elemento
def esta_vazia(self):
return self.tamanho == 0
def menor_elemento(self):
if self.esta_vazia():
raise Exception('Pilha Vazia, operação não pode ser realizada')
else:
elementos_copia = list(self.retorna_todos_elementos())
elementos_copia.sort()
return elementos_copia[0]
def maior_elemento(self):
if self.esta_vazia():
raise Exception('Pilha Vazia, operação não pode ser realizada')
else:
elementos_copia = list(self.retorna_todos_elementos())
print(elementos_copia)
elementos_copia.sort(reverse=True)
return elementos_copia[0]
def media_elementos(self):
if self.esta_vazia():
raise Exception('Pilha Vazia, operação não pode ser realizada')
else:
soma = 0
for elemento in self.elementos:
soma += elemento
return soma / self.tamanho
def comparar(self, pilha12):
if self.tamanho != pilha12.tamanho:
return False
else:
iguais = True
elementos_2 = list(pilha12.retorna_todos_elementos())
for i in range(self.tamanho):
if not self.elementos[i] == elementos_2[i]:
iguais = False
break
return iguais
| [
"pmalvees@gmail.com"
] | pmalvees@gmail.com |
a7127f2690e2da8ab0f8bf832c908d3211dd21a2 | b98cd0e4c06d48fc197e5f10468f359a70305676 | /tensorflow_graphics/physics/demos/finger_plot.py | 1e7f6cbe19259abe9d7910b60963d9a51845f160 | [
"Apache-2.0"
] | permissive | aespielberg/graphics | e2b515b35ae8f228d9cb68476bc10be1dbb88d63 | 28a5c1a51565a9c30c82290eb420e9854d588882 | refs/heads/main | 2023-06-26T18:29:31.343006 | 2021-02-23T19:38:29 | 2021-02-23T19:38:29 | 341,668,523 | 0 | 0 | Apache-2.0 | 2021-02-23T19:37:08 | 2021-02-23T19:37:07 | null | UTF-8 | Python | false | false | 7,320 | py | import sys
sys.path.append('..')
import random
import os
from simulation import Simulation, get_bounding_box_bc
import time
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import tensorflow as tf
import tensorflow.contrib.layers as ly
from vector_math import *
import export
lr = 2
gamma = 0.0
sample_density = 40
multi_target = True
group_num_particles = sample_density**2
goal_pos = np.array([0.5, 0.6])
goal_range = np.array([0.15, 0.15])
if not multi_target:
goal_pos = np.array([0.6, 0.6])
goal_range = np.zeros((2,), dtype = np.float32)
batch_size = 1
actuation_strength = 4
config = 'B'
if config == 'B':
# Finger
num_groups = 3
group_offsets = [(1, 0), (1.5, 0), (1, 2)]
group_sizes = [(0.5, 2), (0.5, 2), (1, 1)]
actuations = [0, 1]
head = 2
gravity = (0, 0)
else:
print('Unknown config {}'.format(config))
num_particles = group_num_particles * num_groups
def particle_mask(start, end):
r = tf.range(0, num_particles)
return tf.cast(tf.logical_and(start <= r, r < end), tf.float32)[None, :]
def particle_mask_from_group(g):
return particle_mask(g * group_num_particles, (g + 1) * group_num_particles)
# NN weights
W1 = tf.Variable(
0.02 * tf.random.normal(shape=(len(actuations), 6 * len(group_sizes))),
trainable=True)
b1 = tf.Variable([0.0] * len(actuations), trainable=True)
def main(sess):
t = time.time()
goal = tf.compat.v1.placeholder(dtype=tf.float32, shape=[batch_size, 2], name='goal')
# Define your controller here
def controller(state):
controller_inputs = []
for i in range(num_groups):
mask = particle_mask(i * group_num_particles,
(i + 1) * group_num_particles)[:, None, :] * (
1.0 / group_num_particles)
pos = tf.reduce_sum(input_tensor=mask * state.position, axis=2, keepdims=False)
vel = tf.reduce_sum(input_tensor=mask * state.velocity, axis=2, keepdims=False)
controller_inputs.append(pos)
controller_inputs.append(vel)
if multi_target:
controller_inputs.append((goal - goal_pos) / goal_range)
else:
controller_inputs.append(goal)
# Batch, dim
controller_inputs = tf.concat(controller_inputs, axis=1)
assert controller_inputs.shape == (batch_size, 6 * num_groups), controller_inputs.shape
controller_inputs = controller_inputs[:, :, None]
assert controller_inputs.shape == (batch_size, 6 * num_groups, 1)
# Batch, 6 * num_groups, 1
intermediate = tf.matmul(W1[None, :, :] +
tf.zeros(shape=[batch_size, 1, 1]), controller_inputs)
# Batch, #actuations, 1
assert intermediate.shape == (batch_size, len(actuations), 1)
assert intermediate.shape[2] == 1
intermediate = intermediate[:, :, 0]
# Batch, #actuations
actuation = tf.tanh(intermediate + b1[None, :]) * actuation_strength
debug = {'controller_inputs': controller_inputs[:, :, 0], 'actuation': actuation}
total_actuation = 0
zeros = tf.zeros(shape=(batch_size, num_particles))
for i, group in enumerate(actuations):
act = actuation[:, i:i+1]
assert len(act.shape) == 2
mask = particle_mask_from_group(group)
act = act * mask
# First PK stress here
act = make_matrix2d(zeros, zeros, zeros, act)
# Convert to Kirchhoff stress
total_actuation = total_actuation + act
return total_actuation, debug
res = (40, 40)
bc = get_bounding_box_bc(res)
if config == 'B':
bc[0][:, :, :7] = -1 # Sticky
bc[1][:, :, :7] = 0 # Sticky
sim = Simulation(
dt=0.005,
num_particles=num_particles,
grid_res=res,
gravity=gravity,
controller=controller,
batch_size=batch_size,
bc=bc,
sess=sess)
print("Building time: {:.4f}s".format(time.time() - t))
final_state = sim.initial_state['debug']['controller_inputs']
s = head * 6
final_position = final_state[:, s:s+2]
final_velocity = final_state[:, s + 2: s + 4]
loss1 = tf.reduce_mean(input_tensor=tf.reduce_sum(input_tensor=(final_position - goal) ** 2, axis = 1))
loss2 = tf.reduce_mean(input_tensor=tf.reduce_sum(input_tensor=final_velocity ** 2, axis = 1))
loss = loss1 + gamma * loss2
initial_positions = [[] for _ in range(batch_size)]
for b in range(batch_size):
for i, offset in enumerate(group_offsets):
for x in range(sample_density):
for y in range(sample_density):
scale = 0.2
u = ((x + 0.5) / sample_density * group_sizes[i][0] + offset[0]
) * scale + 0.2
v = ((y + 0.5) / sample_density * group_sizes[i][1] + offset[1]
) * scale + 0.1
initial_positions[b].append([u, v])
assert len(initial_positions[0]) == num_particles
initial_positions = np.array(initial_positions).swapaxes(1, 2)
sess.run(tf.compat.v1.global_variables_initializer())
initial_state = sim.get_initial_state(
position=np.array(initial_positions), youngs_modulus=10)
trainables = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.TRAINABLE_VARIABLES)
sim.set_initial_state(initial_state=initial_state)
sym = sim.gradients_sym(loss, variables=trainables)
sim.add_point_visualization(pos=goal, color=(0, 1, 0), radius=3)
sim.add_vector_visualization(pos=final_position, vector=final_velocity, color=(0, 0, 1), scale=50)
sim.add_point_visualization(pos=final_position, color=(1, 0, 0), radius=3)
if multi_target:
fout = open('multi_target_{}.log'.format(lr), 'w')
else:
fout = open('single_target_{}.log'.format(lr), 'w')
# Optimization loop
for it in range(100000):
t = time.time()
goal_input = ((np.random.random([batch_size, 2]) - 0.5) * goal_range + goal_pos)
print('train...')
memo = sim.run(
initial_state=initial_state,
num_steps=150,
iteration_feed_dict={goal: goal_input},
loss=loss)
grad = sim.eval_gradients(sym=sym, memo=memo)
gradient_descent = [
v.assign(v - lr * g) for v, g in zip(trainables, grad)
]
sess.run(gradient_descent)
print('Iter {:5d} time {:.3f} loss {}'.format(
it, time.time() - t, memo.loss))
loss_cal = memo.loss
if False: #i % 5 == 0:
sim.visualize(memo, batch = 0, interval = 5)
# sim.visualize(memo, batch = 1)
print('L2:', loss_cal ** 0.5)
print(it, 'L2 distance: ', loss_cal ** 0.5, file = fout)
'''
print('valid...')
loss_cal = 0
for goal_input in goal_valid:
memo = sim.run(
initial_state=initial_state,
num_steps=80,
iteration_feed_dict={goal: goal_input},
loss=loss)
print('time {:.3f} loss {:.4f}'.format(
time.time() - t, memo.loss))
loss_cal = loss_cal + memo.loss
if i == 0:# memo.loss < 1e-4:
for b in vis_id:
sim.visualize(memo, batch = b, export = exp)
exp.export()
print('valid loss {}'.format(loss_cal / len(goal_valid)))
print('==============================================')
'''
if __name__ == '__main__':
sess_config = tf.compat.v1.ConfigProto(allow_soft_placement=True)
sess_config.gpu_options.allow_growth = True
sess_config.gpu_options.per_process_gpu_memory_fraction = 0.4
with tf.compat.v1.Session(config=sess_config) as sess:
main(sess=sess)
| [
"aespielberg@csail.mit.edu"
] | aespielberg@csail.mit.edu |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.