blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9cf0f32bf084dae5f77deaf0c8b0fa769f272b68 | f4c2244632310fdeb0578a3112ef75ce9727fe3b | /Hackerrank/Python/Finding-the-percentage.py | 3d60e194c10276705f4c427cb0098d6b2b9f5052 | [] | no_license | KUMAWAT55/Monk-Code | 1f4ba5709d5ebd64cbd72c1cfdb49f6b7f38d3f7 | 9920ccc74aa26ccf7bb6e530be400081a93e9c1c | refs/heads/master | 2021-05-14T02:10:02.985763 | 2018-02-16T17:58:53 | 2018-02-16T17:58:53 | 43,749,518 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 348 | py | # Enter your code here. Read input from STDIN. Print output to STDOUT
n=input()
dic={}
lis=[]
for i in range(n):
a=raw_input()
lis=a.split()
d=lis[0]
lis.remove(lis[0])
lst1=list(map(float,lis))
dic[d]=lst1
name=raw_input()
if name in dic:
marks=dic[name]
t=0
for i in marks:
t=t+i
t=t/len(marks)
print "%.2f" %t
| [
"noreply@github.com"
] | noreply@github.com |
80c33d1bb5c426a16cacf906af5e9b05cce93b96 | 8542fbf185f9d1e7776520360d0eb1653a5cf0c4 | /Movie DDRental/src/MovieDDL/controller/ControllerRental.py | b11eeeac662de63d46ae1658003c2e702383a12d | [] | no_license | Skysoulther/Python-assignments-A1 | 7e1d520b6eb33dde06df96eeca3d11a0966b4ded | 85bd43fbf4a82d2cc31a90fd65b17e403f7f07fd | refs/heads/master | 2021-01-16T23:03:26.112330 | 2017-01-15T18:24:18 | 2017-01-15T18:24:18 | 71,887,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,131 | py | '''
Created on 6 Nov 2016
@author: DDL
'''
from MovieDDL.domain.Entities import Rental
from MovieDDL.controller.UndoController import Operation
from MovieDDL.controller.ControllerExceptions import ControllerException
from MovieDDL.repository.SortingFilter import SortingFilter
import datetime
class rentalController:
'''
Contains functions which operates on rentals
'''
def __init__(self,repoMovie,repoClient,repoRental,validator,undo):
'''
Creates a controller for rentals
'''
self._undoControl=undo
self._repositoryRental=repoRental
self._repositoryMovie=repoMovie
self._repositoryClient=repoClient
self.__validator=validator
self.__functions={"add_movie":self._repositoryMovie.add_movie,
"remove_movie":self._repositoryMovie.remove_movie,
"edit_movie":self._repositoryMovie.update_movie,
"add_client":self._repositoryClient.add_client,
"remove_client":self._repositoryClient.remove_client,
"edit_client":self._repositoryClient.update_client,
"remove_rentals":self._repositoryRental.remove_rentals,
"add_rentals":self._repositoryRental.add_rentals,
"rent_movie":self._repositoryRental.add_rental,
"unrent_movie":self._remove_rental,
"return_movie":self._return_rental,
"unreturn_movie":self._disable_return}
def _generate_rentalID(self,Id):
'''
Generates a rental ID
'''
Rents=self._repositoryRental.get_all()
for key in Rents:
if Rents[key].get_rclientId()==Id and Rents[key].get_dueDate()<datetime.date.today():
client=self._repositoryClient.return_client_Id(Id)
movie=self._repositoryMovie.return_movie_Id(Rents[key].get_rmovieId())
if not movie.get_availability():
raise ControllerException("The client: '"+str(client.get_clientID())+" - "+str(client.get_clientName())+"' can't rent this movie!\nThe movie: '"+str(movie.get_Id())+" - "+str(movie.get_title())+"' passed the due date!\n")
i=1
while i in Rents:
i+=1
return i
def _validateID(self,Id):
'''
Validates ID
Exceptions: Controller Exception when ID is not a number
'''
try:
Id=int(Id)
except ValueError:
raise ControllerException("The ID should be a number!\n")
return True
def remove_rentals(self,client):
'''
Remove some rentals from the repository if they exist
Input: client - a client Object
Output: -
Exceptions: Controller Exception when Id is not valid
'''
Id=client.get_clientID()
self.__validator.validateID(Id)
removedRentals=self._repositoryRental.remove_rentals(Id)
self._undoControl.store_undo([Operation("add_client",[client]),Operation("add_rentals",[removedRentals])])
self._undoControl.store_redo([Operation("remove_client",[client.get_clientID()]),Operation("remove_rentals",[client.get_clientID()])])
def add_rentals(self,rentals):
'''
Add some rentals in the repository if they exist
Input: rentals - a list of rentals
Output: -
Exceptions: Controller Exception when Id is not valid
'''
self._repositoryRental.add_rentals(rentals)
def _remove_rental(self,Id):
'''
Remove rental from the repository
Input: Id - the ID of the rental
Output: -
Exceptions: Controller Exception when the Id of the rental is not valid
'''
rental=self._repositoryRental.remove_rental(Id)
self._repositoryMovie.change_availability(rental.get_rmovieId(),True)
def get_allRentals(self):
'''
Returns the list of rentals
'''
return self._repositoryRental.get_all()
def checks_movie(self,Id):
'''
Checks if a movie is in the list of availableMovies or not
Input: Id - a number
Output: -
Exceptions: ControllerException from invalid Id or if the movie is not available
'''
self._validateID(Id)
Id=int(Id)
available=self._repositoryMovie.get_available()
if not Id in available:
raise ControllerException("The movie with the ID: "+str(Id)+" is not in the available list!\n")
return True
def checks_client(self,Id):
'''
Checks if the client exists or not
Input: Id - a number
Output: -
Exceptions: ControllerException from invalid Id or if the client can't be found
'''
self._validateID(Id)
Id=int(Id)
available=self._repositoryClient.get_all()
if not Id in available:
raise ControllerException("There is not client with the ID: "+str(Id)+"!\n")
return True
def rent_movie(self,rental):
'''
Add a rental in the list of rentals
Input: rental - a list which contains the values of the fields of a rental
Output: -
Exceptions: StoreException if the data from rental is invalid
'''
rentalId=self._generate_rentalID(rental[1])
rental.insert(0,rentalId)
rental=Rental(rental[0],rental[1],rental[2],rental[3],rental[4])
self.__validator.validateRental(rental)
self._repositoryRental.add_rental(rental)
self._repositoryMovie.change_availability(rental.get_rmovieId(),False)
self._undoControl.store_undo([Operation("unrent_movie",[rental.get_rentalId()])])
self._undoControl.store_redo([Operation("rent_movie",[rental])])
def checks_movie2(self,Id):
'''
Checks if a movie is in the list of availableMovies or not
Input: Id - a number
Output: -
Exceptions: ControllerException from invalid Id or if the movie is not available
'''
self._validateID(Id)
Id=int(Id)
alls=self._repositoryMovie.get_all()
available=self._repositoryMovie.get_available()
if not Id in alls:
raise ControllerException("There is no movie with the ID: "+str(Id)+"!\n")
if Id in available:
raise ControllerException("The movie with the ID: "+str(Id)+" is not rented!\n")
return True
def return_rental(self,clientId,movieId):
'''
Return a rental from the list of rentals
Input: clientId - the Id of the client
movieId - the Id of the movie
Output: -
Exceptions: -
'''
clientId=int(clientId)
movieId=int(movieId)
rental=self._repositoryRental.return_rental(clientId,movieId)
self._repositoryMovie.change_availability(rental.get_rmovieId(),True)
self._undoControl.store_undo([Operation("unreturn_movie",[clientId,movieId])])
self._undoControl.store_redo([Operation("return_movie",[clientId,movieId])])
def _return_rental(self,clientId,movieId):
'''
Return a rental from the list of rentals
Input: clientId - the Id of the client
movieId - the Id of the movie
Output: -
Exceptions: -
'''
clientId=int(clientId)
movieId=int(movieId)
rental=self._repositoryRental.return_rental(clientId,movieId)
self._repositoryMovie.change_availability(rental.get_rmovieId(),True)
def _disable_return(self,clientId,movieId):
'''
Set as unreturned a rental from the list of rentals
Input: clientId - the Id of the client
movieId - the Id of the movie
Output: -
Exceptions: -
'''
rental=self._repositoryRental.unreturn_rental(clientId,movieId)
self._repositoryMovie.change_availability(rental.get_rmovieId(),False)
def all_rentals(self):
'''
returns a list of rentals
Input: -
Output: rentedMovies - the list of rentals
Exceptions: -
'''
rentedMovies=[]
allMovies=self._repositoryMovie.get_all()
for key in allMovies:
if not allMovies[key].get_availability():
rentedMovies.append(allMovies[key])
return rentedMovies
def _calculate_lateDays(self,dueDate,returnDate):
'''
Calculates the late rental days of a rental
Input: dueDate - duedate of the rental
returnDate - returndate of the movie
Output: delta.days - number of late rental days of a rental
Exceptions: -
'''
today=datetime.date.today()
delta=today-today
if today>dueDate:
if returnDate==None:
delta=today-dueDate
elif returnDate>dueDate:
delta=returnDate-dueDate
return delta.days
def _calculate_days(self,rentDate,returnDate):
'''
Calculates the rental days of a rental
Input: rentDate - rent date of the rental
returnDate - returndate of the movie
Output: delta.days - number of late rental days of a rental
Exceptions: -
'''
today=datetime.date.today()
if returnDate==None:
delta=today-rentDate
else:
delta=returnDate-rentDate
return delta.days
def late_rentals(self):
'''
returns a list of rentals which are late with their returns
Input: -
Output: list2 - the list of late rentals
Exceptions: -
'''
dtoList=[]
list1=[]
allMovies=self._repositoryMovie.get_all()
rentals=self._repositoryRental.get_all()
for key in rentals:
dueDate=rentals[key].get_dueDate()
returnDate=rentals[key].get_returnedDate()
delta=self._calculate_lateDays(dueDate, returnDate)
if delta>0:
movieId=rentals[key].get_rmovieId()
list1.append([movieId,delta])
for element in list1:
dtoList.append(objectRentalCount(allMovies[element[0]],element[1]))
dtoList=SortingFilter.gnomeSort(dtoList,SortingFilter.comparison2)
return dtoList
def active_clients(self):
'''
returns a list of clients who rented movies pretty often
Input: -
Output: askedString - a string which represents the list of late rentals
Exceptions: -
'''
list1={}
dtoList=[]
allClients=self._repositoryClient.get_all()
for key in allClients:
list1[key]=0
rentals=self._repositoryRental.get_all()
for key in rentals:
rentDate=rentals[key].get_rentDate()
returnDate=rentals[key].get_returnedDate()
delta=self._calculate_days(rentDate, returnDate)
clientId=rentals[key].get_rclientId()
list1[clientId]+=delta
for key in list1:
dtoList.append(objectRentalCount(allClients[key],list1[key]))
dtoList=SortingFilter.gnomeSort(dtoList,SortingFilter.comparison2)
return dtoList
def most_rented(self,option):
'''
Returns a list of movies which were rented the most
Input: -
Output: askedString - a string which represents the list of late rentals
Exceptions: -
'''
dtoList=[]
list1={}
allMovies=self._repositoryMovie.get_all()
for key in allMovies:
list1[key]=[0,0]
rentals=self._repositoryRental.get_all()
for key in rentals:
rentDate=rentals[key].get_rentDate()
returnDate=rentals[key].get_returnedDate()
delta=self._calculate_days(rentDate, returnDate)
movieId=rentals[key].get_rmovieId()
list1[movieId][0]+=delta
list1[movieId][1]+=1
if option==1:
for key in list1:
dtoList.append(objectRentalCount(allMovies[key],list1[key][0]))
else:
for key in list1:
dtoList.append(objectRentalCount(allMovies[key],list1[key][1]))
dtoList=SortingFilter.gnomeSort(dtoList,SortingFilter.comparison2)
return dtoList
def undo(self):
'''
Undo function
'''
operations=self._undoControl.load_undo()
for operation in operations:
params=operation.get_parameters()
name=operation.get_name()
self.__functions[name](*params)
def redo(self):
'''
Redo function
'''
operations=self._undoControl.load_redo()
for operation in operations:
params=operation.get_parameters()
name=operation.get_name()
self.__functions[name](*params)
####################################################################################
class objectRentalCount:
'''
class for data transfer object
'''
def __init__(self,obiect,rentalCount):
'''
Constructor for this data transfer object
object - The object
rentalCount - The number of times/days the object was rented
'''
self.__object=obiect
self.__count=rentalCount
def get_count(self):
'''
Get the count
'''
return self.__count
def get_movie(self):
'''
Get the movie
'''
return self.__object
def __lt__(self, objectRental):
'''
'''
return self.get_count()<objectRental.get_count()
def __str__(self):
'''
'''
return str(self.get_movie()).ljust(15)+str(self.get_count())
################################################################################# | [
"skysoulther@gmail.com"
] | skysoulther@gmail.com |
3f76c5219763ce65b0e9516ffd62f0b5c83303d7 | b603ec8037a2aee7dc0426cc0baffb5889011a3f | /Final_Task/transformers.py | 7824d1269b9e612c196418402806bad5950fa7d0 | [] | no_license | SiarheiZhamoidzik/Python-Course | 30ae0398229ba44424ebff19a7054dc9855c6126 | 54dad20a4f6aad9d08bca7a260921f5484eda668 | refs/heads/master | 2023-06-05T23:50:41.265452 | 2021-06-23T07:31:20 | 2021-06-23T07:31:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 298 | py | #from collections import defaultdict as dd
#from datetime import datetime
#from copy import deepcopy
#from datetime import datetime
#import re
class FeatureTransformer:
@staticmethod
def transform_date(date):
pass
@staticmethod
def price_to_float(price):
pass
| [
"Egor_Pilat@epam.com"
] | Egor_Pilat@epam.com |
159e1bbb69f50777b2ba294e3298a272b72dcb2a | 7f4c82f7eb8d2805e378586f14e214cdaacfdb4a | /books/model/CommentList.py | 4c2ece791575dd51f9a9ea502c5e8bd24457084a | [
"MIT"
] | permissive | deepubansal/books-python-wrappers | 5a922267ec8382b3542638d894c96f4891b57bf5 | 51210c8d557a32564f976a56214d3c0807f46a90 | refs/heads/master | 2022-12-05T11:25:01.694021 | 2020-08-29T07:35:23 | 2020-08-29T07:35:23 | 288,738,813 | 0 | 0 | MIT | 2020-08-29T07:35:24 | 2020-08-19T13:26:04 | Python | UTF-8 | Python | false | false | 1,042 | py | #$Id$
from books.model.PageContext import PageContext
class CommentList:
"""This class is used to create object for comments."""
def __init__(self):
"""Initialize parameters for Comments list."""
self.comments = []
self.page_context = PageContext()
def set_comments(self, comment):
"""Set comments.
Args:
comment(instance): Comment object.
"""
self.comments.append(comment)
def get_comments(self):
"""Get comments.
Returns:
list: List of comments object.
"""
return self.comments
def set_page_context(self, page_context):
"""Set page context.
Args:
page_context(instance): Page context object.
"""
self.page_context = page_context
def get_page_context(self):
"""Get page context.
Returns:
instance: Page context object.
"""
return self.page_context
| [
"sahaya.ramesh@zohocorp.com"
] | sahaya.ramesh@zohocorp.com |
56268a103fe83d78d38f018dedf55473c7f45c3a | b089108f89769712e7d5be265f19f87f2912b6e3 | /Práctica 7/Ej14.py | 46f7b862b0a1da05e33538ac3e8c560c9137aa18 | [] | no_license | Asintesc/PROGRAMACION | f8dc02950dfde03204008bca1f2327fb3841396f | bd42203f7218c30111251cb9a8f4feb8486569b1 | refs/heads/master | 2020-08-20T17:38:32.454570 | 2019-12-10T15:54:20 | 2019-12-10T15:54:20 | 216,049,585 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 447 | py | diccionari = {
1: 'Enero',
2: 'Febrero',
3: 'Marzo',
4: 'Abril',
5: 'Mayo',
6: 'Junio',
7: 'Julio',
8: 'Agosto',
9: 'Septiembre',
10: 'Octubre',
11: 'Noviembre',
12: 'Diciembre'
}
fecha = input("Dime la fecha: ")
def fecha_mes(d,f):
split = f.split("/")
mes = int(split[1])
print(split[0],'de',d[mes],'de',split[2])
return mes
fecha_mes(diccionari, fecha)
| [
"albeertsc@gmail.com"
] | albeertsc@gmail.com |
96b63c7726f7ad96496b3359c66f0089bdb68609 | 663e7987c923fac93c96921e6a6bd4eebdc43873 | /P3_Implement_SLAM/helpers_test.py | 2af7b3d7c15221776f23121e70ad022f7a68df05 | [] | no_license | boyshen/udacity-cv | dcbc376f33c3f894149efe635d55712d967c9627 | 974b2520231be31e9c6905606b3ed08d75448f8e | refs/heads/master | 2020-04-29T21:12:49.568127 | 2019-05-08T04:25:26 | 2019-05-08T04:25:26 | 176,406,146 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,294 | py | from robot_class import robot
from math import *
import random
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# --------
# this helper function displays the world that a robot is in
# it assumes the world is a square grid of some given size
# and that landmarks is a list of landmark positions(an optional argument)
def display_world(world_size, position, landmarks=None):
# using seaborn, set background grid to gray
sns.set_style("dark")
# Plot grid of values
world_grid = np.zeros((world_size+1, world_size+1))
# Set minor axes in between the labels
ax=plt.gca()
cols = world_size+1
rows = world_size+1
ax.set_xticks([x for x in range(1,cols)],minor=True )
ax.set_yticks([y for y in range(1,rows)],minor=True)
# Plot grid on minor axes in gray (width = 1)
plt.grid(which='minor',ls='-',lw=1, color='white')
# Plot grid on major axes in larger width
plt.grid(which='major',ls='-',lw=2, color='white')
# Create an 'o' character that represents the robot
# ha = horizontal alignment, va = vertical
ax.text(position[0], position[1], 'o', ha='center', va='center', color='r', fontsize=30)
# Draw landmarks if they exists
if(landmarks is not None):
# loop through all path indices and draw a dot (unless it's at the car's location)
for pos in landmarks:
if(pos != position):
ax.text(pos[0], pos[1], 'x', ha='center', va='center', color='purple', fontsize=20)
# Display final result
plt.show()
# --------
# this routine makes the robot data
# the data is a list of measurements and movements: [measurements, [dx, dy]]
# collected over a specified number of time steps, N
#
def make_data_test(N, num_landmarks, world_size, measurement_range, motion_noise,
measurement_noise, distance):
# check if data has been made
complete = False
while not complete:
data = []
# make robot and landmarks
r = robot(world_size, measurement_range, motion_noise, measurement_noise)
r.make_landmarks(num_landmarks)
seen = [False for row in range(num_landmarks)]
# guess an initial motion
orientation = random.random() * 2.0 * pi
dx = cos(orientation) * distance
dy = sin(orientation) * distance
for k in range(N-1):
# collect sensor measurements in a list, Z
Z = r.sense()
# check off all landmarks that were observed
for i in range(len(Z)):
seen[Z[i][0]] = True
# move
while not r.move(dx, dy):
# if we'd be leaving the robot world, pick instead a new direction
orientation = random.random() * 2.0 * pi
dx = cos(orientation) * distance
dy = sin(orientation) * distance
# collect/memorize all sensor and motion data
data.append([Z, [dx, dy]])
# we are done when all landmarks were observed; otherwise re-run
complete = (sum(seen) == num_landmarks)
print(' ')
print('Landmarks: ', r.landmarks)
print(r)
robot_coordinate = (r.x,r.y)
return data,r.landmarks,robot_coordinate
| [
"yangwayu999@qq.com"
] | yangwayu999@qq.com |
5b999742e90a5b35598a16d0af48b143d26136b8 | 4f24e195835c07e451a6b250ad8c056cd8858164 | /socket/nodemcu/nodemcu.py | 8e3e33c84413bac45c95457f90b280694617df9a | [] | no_license | nano-2-ly/nodeMCU-server | e16d1ea05ea53bef3641585fc64ee7fa3f49c688 | b7d58afec687efa2418c062e86e0cd791aac26b3 | refs/heads/master | 2020-07-04T10:00:11.316270 | 2019-08-14T01:27:22 | 2019-08-14T01:27:22 | 202,249,488 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 746 | py | # Echo server program
import socket
import sqlite3
conn_db = sqlite3.connect('./nodemcu_DB.db')
cs = conn_db.cursor()
query = "CREATE TABLE IF NOT EXISTS data (ip VARCHAR(255), data VARCHAR(255))"
cs.execute(query)
HOST = '' # Symbolic name meaning all available interfaces
PORT = 8000 # Arbitrary non-privileged port
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind((HOST, PORT))
s.listen(1)
while True:
conn, addr = s.accept()
print(addr)
data = conn.recv(1024)
if not data: break
print(data)
conn.sendall(data)
query = "INSERT into data values (?, ?)"
cs.execute(query,(addr[0], data.decode()))
conn_db.commit() | [
"dong991207@unist.ac.kr"
] | dong991207@unist.ac.kr |
243f9844bc2368e7521d055910a36de2f944497b | c03225c395b5b0d8cdd79e88f78c13666c7c953a | /utils/dataset/__init__.py | 816fb0be6aedd8710f9820a6ca430f6ba99fe353 | [] | no_license | zhaoyin214/cv_lab_pytorch | de52bacd098c378cd06a80f6f77a76b42d7d0e42 | 4ccaf973678943f6b8e3b07d053cc447d5d57e76 | refs/heads/master | 2022-11-17T06:48:58.121305 | 2020-07-15T23:48:33 | 2020-07-15T23:48:33 | 272,972,408 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 32 | py | from .load import load_ibug_300w | [
"zhaoyin214@qq.com"
] | zhaoyin214@qq.com |
44e91287be6fa1cf1f09cf83e30d086e58f1966e | 2e4e85c1e871eb5e258d6bd03ad20541757c9095 | /change_tracker/frcnn_object_detector.py | 8d20a119e6bcf084ba5996ae670a6bde0101bf2a | [] | no_license | emreegriboz/od_api_tf_my_notebooks | 222f734985ad19044f192d9b8c57d6602829a957 | c66d760d8cf60ffd2c96dc7ad2cc02fecab60dd5 | refs/heads/master | 2022-01-12T21:21:24.639884 | 2019-05-23T15:33:52 | 2019-05-23T15:33:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,751 | py | import numpy as np
import os
import six.moves.urllib as urllib
import sys
import tarfile
import tensorflow as tf
import zipfile
from distutils.version import StrictVersion
from collections import defaultdict
from io import StringIO
from matplotlib import pyplot as plt
from PIL import Image
# This is needed since the notebook is stored in the object_detection folder.
sys.path.append("..")
from object_detection.utils import ops as utils_ops
from object_detection.builders import model_builder
from google.protobuf import text_format
import tensorflow as tf
from object_detection.protos import model_pb2
import time
from IPython.display import clear_output
from keras.models import load_model
import keras.backend as K
from math import floor, ceil
if StrictVersion(tf.__version__) < StrictVersion('1.9.0'):
raise ImportError('Please upgrade your TensorFlow installation to v1.9.* or later!')
def batch_gen(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i: min(i + n, len(l))]
def split_output_dicts(outputs, images, score_tresh=.9):
output_splitted = []
i=0
for big_dict in outputs:
for j in range(len(big_dict["num_detections"])):
output_dict = {}
for k in big_dict.keys():
output_dict[k] = big_dict[k][j]
filter_scores_idx = np.where(output_dict['detection_scores'] > score_tresh)[0]
for k in output_dict.keys():
try:
output_dict[k] = output_dict[k][filter_scores_idx]
except:
continue
image_np = images[i]
output_dict["image_np"] = image_np
output_splitted.append(output_dict)
i+=1
return output_splitted
class FRCNN_Object_detector:
def __init__(self, graph_path, memory_fraction=0.9):
print ("detection model")
self.graphPath = graph_path
self.tfConfig = tf.ConfigProto()
self.tfConfig.gpu_options.allow_growth = True
self.od_graph_def = tf.GraphDef()
with tf.gfile.GFile(graph_path, 'rb') as fid:
serialized_graph = fid.read()
self.od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(self.od_graph_def, name='')
print ("done")
def run_inference_for_batch(self, images):
with tf.Session(config=self.tfConfig) as sess:
ops = tf.get_default_graph().get_operations()
all_tensor_names = {output.name for op in ops for output in op.outputs}
tensor_dict = {}
for key in [
'num_detections', 'detection_boxes', 'detection_scores',
'detection_classes', 'detection_masks'
]:
tensor_name = key + ':0'
if tensor_name in all_tensor_names:
tensor_dict[key] = tf.get_default_graph().get_tensor_by_name(
tensor_name)
features = tf.get_default_graph().get_tensor_by_name(
'FirstStageFeatureExtractor/resnet_v1_50/resnet_v1_50/block3/unit_6/bottleneck_v1/Relu:0')
tensor_dict['features'] = tf.expand_dims(features, 0)
image_tensor = tf.get_default_graph().get_tensor_by_name('image_tensor:0')
# Run inference
output_dict = sess.run(tensor_dict,
feed_dict={image_tensor: images})
# all outputs are float32 numpy arrays, so convert types as appropriate
output_dict['num_detections'] = output_dict['num_detections'].astype(np.uint8)
output_dict['detection_classes'] = output_dict[
'detection_classes'].astype(np.uint8)
output_dict['detection_boxes'] = output_dict['detection_boxes']
output_dict['detection_scores'] = output_dict['detection_scores']
output_dict['features'] = output_dict['features'][0]
return output_dict
def run_inference_for_single_image(self, image):
return self.run_inference_for_batch([image])[0]
def run_inference_for_images(self, images, batch_size=16, log=False):
if log:
print ("detect all")
outputs = []
progress = 0
total_images = len(images)
image_batch = batch_gen(images, batch_size)
for img_b in image_batch:
output_dict = self.run_inference_for_batch(img_b)
outputs.append(output_dict)
progress += 1
if log:
print (str(min(progress*batch_size, len(images))/len(images))[:5], end="\r")
print ("")
return split_output_dicts(outputs, images)
| [
"bbaltac.mihai@gmail.com"
] | bbaltac.mihai@gmail.com |
c0e5794c8fba8e6317e8362da2e6120a576bd597 | a69673b0cc47c7bd4156cc5181e6a60698d1e5de | /application/admin/delete_db.py | cc5ab0ff0e7ce40531bc66a78002e8ad3e77774f | [] | no_license | tible/HT-Match-Predictor-Py | e33fc9882250636cf50d73dc0ed7ba45a38e6fd2 | 25a3561ae1ffe68a944e9f41ee363e2ab1cfe108 | refs/heads/master | 2022-07-10T21:53:17.628218 | 2020-05-19T14:25:54 | 2020-05-19T14:25:54 | 265,358,319 | 1 | 0 | null | 2020-05-19T20:28:09 | 2020-05-19T20:28:08 | null | UTF-8 | Python | false | false | 449 | py | import os
from sqlalchemy_utils import drop_database
import application.dialog_windows as dw
import global_library
def delete_database():
if os.path.exists(global_library.database_file_path):
drop_database(global_library.database_file_uri)
dw.show_info_window_in_thread(title='Succes!', message='Baza de date a fost stearsa.')
else:
dw.show_error_window_in_thread(title='Esec!', message='Baza de date nu exista')
| [
"alex.zanardi@gmail.com"
] | alex.zanardi@gmail.com |
d7e694d8b7e339f353fe621aef7be75b1bd0d979 | 9a1b033774e371bd6442048f43e862dfb71abed7 | /Lists As Stacks And Queues/Exercises/Cups_and_Bottles.py | 57a94258fc2f957d4054343b06ab2bb9d026c989 | [] | no_license | mialskywalker/PythonAdvanced | ea4fde32ba201f6999cd0d59d1a95f00fb5f674b | c74ad063154c94b247aaf73b7104df9c6033b1a5 | refs/heads/master | 2023-03-09T00:13:28.471328 | 2021-02-24T15:21:11 | 2021-02-24T15:21:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 727 | py | from collections import deque
cups_capacity = deque(int(el) for el in input().split())
bottles_capacity = [int(el) for el in input().split()]
wasted_water = 0
while True:
if not cups_capacity or not bottles_capacity:
break
bottle = bottles_capacity.pop()
cup = cups_capacity.popleft()
total = bottle - cup
if total >= 0:
wasted_water += total
elif total < 0:
cups_capacity.appendleft(abs(total))
if not cups_capacity:
print(f"Bottles: {' '.join(map(str, bottles_capacity))}")
print(f"Wasted litters of water: {wasted_water}")
elif not bottles_capacity:
print(f"Cups: {' '.join(map(str, cups_capacity))}")
print(f"Wasted litters of water: {wasted_water}")
| [
"kalqga123@gmail.com"
] | kalqga123@gmail.com |
04ee7321aedd012f9d423909892f46d094c42241 | de570029bc591e7fe8371151d9376803fc501561 | /Facial Expression Recognition/project/com/dao/DatasetDAO.py | dd42591ea724f86f124f310c574879d29ea8eb4a | [] | no_license | MaulikZalavadiya/final_year_project | fdc82cb177dbfb931bb69b58ad7ccb9867caf276 | 7b2c30dec3d832bae79da8673f04fce76ff23233 | refs/heads/main | 2023-01-23T01:58:27.100437 | 2020-11-25T09:43:23 | 2020-11-25T09:43:23 | 315,892,856 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 625 | py | from project import db
from project.com.vo.DatasetVO import DatasetVO
from datetime import datetime
class DatasetDAO:
def insertDataset(self, datasetVO):
db.session.add(datasetVO)
db.session.commit()
def viewDataset(self):
print("hello from viewDataset")
datasetList = DatasetVO.query.all()
print("datasetList=", datasetList)
return datasetList
def deleteDataset(self, datasetVO):
datasetList = DatasetVO.query.get(datasetVO.datasetId)
db.session.delete(datasetList)
db.session.commit()
return datasetList
| [
"maulik@aksharmineche.com"
] | maulik@aksharmineche.com |
2a8f39b09a2cb2ea1eafb3b7598ac5640ccedeef | 07149c3efeb879a3f7dbe735d8efb4427323d455 | /src/hubmap.py | 6866ba93f30209c2b3303847675a0bbf666d5f19 | [] | no_license | Bdl-1989/KidneyRepo | c5bac80603902a05760e84c9e0db533cae726d4f | c8f02d6a351b29b35e03627d4116f474249e4377 | refs/heads/main | 2023-02-11T00:30:51.848849 | 2021-01-05T14:27:20 | 2021-01-05T14:27:20 | 327,018,635 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,648 | py | from common import *
import tifffile as tiff
import json
data_dir = os.getcwd() + '/data'
def read_tiff(image_file):
image = tiff.imread(image_file)
if image.shape[0] == 3:
image = image.transpose(1, 2, 0)
image = np.ascontiguousarray(image)
return image
# --- rle ---------------------------------
def rle_decode(rle, height, width , fill=255):
s = rle.split()
start, length = [np.asarray(x, dtype=int) for x in (s[0:][::2], s[1:][::2])]
start -= 1
mask = np.zeros(height*width, dtype=np.uint8)
for i, l in zip(start, length):
mask[i:i+l] = fill
mask = mask.reshape(width,height).T
mask = np.ascontiguousarray(mask)
return mask
def rle_encode(mask):
m = mask.T.flatten()
m = np.concatenate([[0], m, [0]])
run = np.where(m[1:] != m[:-1])[0] + 1
run[1::2] -= run[::2]
rle = ' '.join(str(r) for r in run)
return rle
# --- tile ---------------------------------
def to_tile(image, mask, scale, size, step, min_score):
half = size//2
image_small = cv2.resize(image, dsize=None, fx=scale, fy=scale, interpolation=cv2.INTER_LINEAR)
#make score
height, width, _ = image_small.shape
vv = cv2.resize(image_small, dsize=None, fx=1 / 32, fy=1 / 32, interpolation=cv2.INTER_LINEAR)
vv = cv2.cvtColor(vv, cv2.COLOR_RGB2HSV)
# image_show('v[0]', v[:,:,0])
# image_show('v[1]', v[:,:,1])
# image_show('v[2]', v[:,:,2])
# cv2.waitKey(0)
vv = (vv[:, :, 1] > 32).astype(np.float32)
vv = cv2.resize(vv, dsize=(width, height), interpolation=cv2.INTER_LINEAR)
#make coord
xx = np.array_split(np.arange(half, width - half), np.floor((width - size) / step))
yy = np.array_split(np.arange(half, height - half), np.floor((height - size) / step))
# xx = [int(x.mean()) for x in xx]
# yy = [int(y.mean()) for y in yy]
xx = [int(x[0]) for x in xx] + [width-half]
yy = [int(y[0]) for y in yy] + [height-half]
coord = []
reject = []
for cy in yy:
for cx in xx:
cv = vv[cy - half:cy + half, cx - half:cx + half].mean()
if cv>min_score:
coord.append([cx,cy,cv])
else:
reject.append([cx,cy,cv])
#-----
if 1:
tile_image = []
for cx,cy,cv in coord:
t = image_small[cy - half:cy + half, cx - half:cx + half]
assert (t.shape == (size, size, 3))
tile_image.append(t)
if mask is not None:
mask_small = cv2.resize(mask, dsize=None, fx=scale, fy=scale, interpolation=cv2.INTER_LINEAR)
tile_mask = []
for cx,cy,cv in coord:
t = mask_small[cy - half:cy + half, cx - half:cx + half]
assert (t.shape == (size, size))
tile_mask.append(t)
else:
mask_small = None
tile_mask = None
return {
'image_small': image_small,
'mask_small' : mask_small,
'tile_image' : tile_image,
'tile_mask' : tile_mask,
'coord' : coord,
'reject' : reject,
}
def to_mask(tile, coord, height, width, scale, size, step, min_score):
half = size//2
mask = np.zeros((height, width), np.float32)
if 0:
count = np.zeros((height, width), np.float32)
for t, (cx, cy, cv) in enumerate(coord):
mask [cy - half:cy + half, cx - half:cx + half] += tile[t]
count[cy - half:cy + half, cx - half:cx + half] += 1
# simple averge, <todo> guassian weighing?
# see unet paper for "Overlap-tile strategy for seamless segmentation of arbitrary large images"
m = (count != 0)
mask[m] /= count[m]
if 1:
for t, (cx, cy, cv) in enumerate(coord):
mask[cy - half:cy + half, cx - half:cx + half] = np.maximum(
mask[cy - half:cy + half, cx - half:cx + half], tile[t] )
return mask
# --draw ------------------------------------------
def mask_to_inner_contour(mask):
mask = mask>0.5
pad = np.lib.pad(mask, ((1, 1), (1, 1)), 'reflect')
contour = mask & (
(pad[1:-1,1:-1] != pad[:-2,1:-1]) \
| (pad[1:-1,1:-1] != pad[2:,1:-1]) \
| (pad[1:-1,1:-1] != pad[1:-1,:-2]) \
| (pad[1:-1,1:-1] != pad[1:-1,2:])
)
return contour
def draw_contour_overlay(image, mask, color=(0,0,255), thickness=1):
contour = mask_to_inner_contour(mask)
if thickness==1:
image[contour] = color
else:
r = max(1,thickness//2)
for y,x in np.stack(np.where(contour)).T:
cv2.circle(image, (x,y), r, color, lineType=cv2.LINE_4 )
return image | [
"dl.super@hotmail.com"
] | dl.super@hotmail.com |
619678a330185af0106634f96800527442f9aecb | 471cab6ba89d01cf0361722ee02cbc33b2787c49 | /_exercicios/ex067_a15_tabuada_3.0.py | 4dcf60c62e864919f32aae0deafd54ec62852c5e | [] | no_license | ercris990/_curso_gg_py_3 | 2e9d6445ec9b0f40bca36bbfd684a09d5b91cb3b | fe4ab944be911edf51683fdab0d6aa452ed96b12 | refs/heads/master | 2022-11-30T02:00:53.947613 | 2020-08-09T10:59:41 | 2020-08-09T10:59:41 | 282,326,899 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 275 | py | num = 0
while True:
num = int(input('Digite um valor para ver a tabuada: '))
print('_-_' * 13)
if num <= 0:
break
for i in range(1, 11):
print(f'{num} x {i} = {num * i}')
print('=+=' * 13)
print('PROGRAMA TABUADA ENCERADO. Volte sempre!')
| [
"ercris990@gmail.com"
] | ercris990@gmail.com |
416d169e47bff48b0597e33ac2869e41bcc7c7d2 | c2b222336e5595b0c73c78be51ba34c4ea6c8f40 | /inClass03nap.py | 291378742e09bb2c20b75bb37fd7064dce874f56 | [] | no_license | npaul007/DataStructuresHomeWorkCSC212 | 794cad073cc1cedbc8b3ce8a6991cd1f933bbe03 | cbcd84b8fec91bc410e531c380d8fcde7550a6b9 | refs/heads/master | 2020-05-23T12:26:54.866907 | 2017-04-23T03:23:58 | 2017-04-23T03:23:58 | 84,768,438 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,793 | py | # Implementation of an Unordered List ADT as a linked list. The list
# is accessed through a reference to the first element, head.
# Adopted from Section 3.9 of the textbook.
# Node class representing a node in a linked list
# A node contains data and reference to the next node
# Adopted from Section 3.9 in the textbook
class Node:
'''
Create a Node object and initialize its data.
'''
def __init__(self, init_data):
self.data = init_data
self.next = None
'''
Accessor for node data
'''
def get_data(self):
return self.data
'''
Accessor for next reference
'''
def get_next(self):
return self.next
'''
Mutator for node data
'''
def set_data(self, new_data):
self.data = newdata
'''
Mutator for next reference
'''
def set_next(self, new_next):
self.next = new_next
class UnorderedList:
'''
List is empty upon creation and the head reference is None
'''
def __init__(self):
self.head = None
'''
Returns True if list is empty, False otherwise
'''
def is_empty(self):
return self.head == None
'''
Add an element to head of the list
'''
def add(self, item):
# Create a node using item as its data
temp = Node(item)
# make the next reference of the new node refer to the head
# of the list
temp.set_next(self.head)
# modify the list head so that it references the new node
self.head = temp
'''
Returns the size of the list
'''
def size(self):
# start at the head of the list
current = self.head
count = 0
# Traverse the list one element at a time. We know
# we reached the end when the next reference is None
while current != None:
count = count + 1
current = current.get_next()
return count
'''
Search for an item in the list. Returns True if found, False otherise.
'''
def search(self,item):
current = self.head
found = False
# As long as the element is not found and we haven't
# reached the end of the list
while current != None and not found:
if current.get_data() == item:
found = True
else:
# go to the next element
current = current.get_next()
return found
'''
Remove the first occurrence of item from the list.
'''
def remove(self, item):
# keep track of current and previous elements
current = self.head
previous = None
found = False
# traverse the list
while current != None and not found:
# if we have a match, stop
if current.get_data() == item:
found = True
# otherwise advance current and next references
else:
previous = current
current = current.get_next()
# the element to be deleted is the head of the list
if found:
if previous == None:
self.head = current.get_next()
# the element to be deleted is not the head
else:
previous.set_next(current.get_next())
#print all list elements in a list format
def print_list(self):
current = self.head
count = 0
array = []
while current != None and count < self.size():
#add data to list
array.append(current.get_data())
#move to next list instance and repeat
current = current.get_next()
count = count + 1
#display unorderedlist
print(array)
# replace method parameters are position to change along with new value
def replace_element(self,position,newValue):
current = self.head
count = 0
while current != None and count < self.size():
if count == position:
current.data = newValue
#move to next list instance and repeat
current = current.get_next()
count = count + 1
def main():
# create a list and add some elements to it
aList = UnorderedList()
print("Adding 3, 5, 8, and 11 to the list.")
aList.add(3)
aList.add(5)
aList.add(8)
# 11 is the head of the list
aList.add(11)
# print list method
aList.print_list()
# replace method
aList.replace_element(2,100)
# check that replace method works
aList.print_list()
if __name__ == "__main__":
main()
| [
"npaulemon@gmail.com"
] | npaulemon@gmail.com |
833b2cedd23e6d055dabd3abc3309d7d8b3dd5c7 | 727feea87e535902c39324f49b6bdc68798bc640 | /qad_api/__version__.py | 3d8040fa073c4e2cdb5809d99d9ee9a50a2aab44 | [
"Apache-2.0"
] | permissive | stjordanis/qad-api | b1cb510e77476f517279a4adeb574a997add0133 | 123200d1a5044b8e38a73b3c40f0870ff603c1be | refs/heads/master | 2022-12-22T19:19:14.236532 | 2020-10-06T07:14:12 | 2020-10-06T07:14:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 641 | py | # Copyright 2020 HQS Quantum Simulations GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The version file."""
__version__ = '0.1dev'
| [
"sebastian.lehmann@quantumsimulations.de"
] | sebastian.lehmann@quantumsimulations.de |
346c684e98233a116ab34c44eac7e3ba8cb16476 | c11f738961c5d699ce91100d7c33b5877a814477 | /userpro/models.py | 227d38f0ee2fa5431d175416f1c7441859ad6ad2 | [] | no_license | dotaing/icqb | 0c29e38c0fd673a9056871df262dc83b0270ec11 | 97bc47f8a3d2625648ea89136c83dd668e801d7b | refs/heads/master | 2021-01-01T05:21:06.414451 | 2016-05-09T10:18:34 | 2016-05-09T10:18:34 | 58,362,651 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,805 | py | #-*- coding:UTF-8 -*-
from django.db import models
class UserGroup(models.Model):
"""用户权限分组"""
name = models.CharField(unique=type,max_length=20,verbose_name="组名称")
permission = models.CharField(max_length=500,verbose_name="组所有权限")
is_doc = models.CharField(max_length=200,verbose_name="注释")
def __unicode__(self):
return self.name
class AccountUser(models.Model):
"""用户表"""
username =models.CharField(unique=type,max_length=30,verbose_name="登陆名")
userpasswd = models.CharField(max_length=50,verbose_name="登陆密码")
is_lock = models.BooleanField(default=False,verbose_name="是否锁定")
in_group =models.ForeignKey(UserGroup)
is_superman = models.IntegerField(max_length=4,verbose_name="管理员")
lastlogin_host = models.CharField(max_length=50,verbose_name="最后登录地址")
create_time = models.DateTimeField(auto_now_add=True,verbose_name="创建时间")
lastlogin_time = models.DateTimeField(auto_now_add=True,verbose_name="最后登陆时间")
class UserSessionCache(models.Model):
"""用户表"""
session_key =models.CharField(unique=type,max_length=40,verbose_name="每个用户session_key")
username = models.CharField(unique=type,max_length=50,verbose_name="登录用户名")
expire_date = models.DateTimeField(auto_now_add=True,verbose_name="登陆时间")
class ViePerUrl(models.Model):
"""试图,权限,Url"""
title = models.CharField(max_length=50, verbose_name="权限名")
url = models.CharField(max_length=50, verbose_name="访问URL")
menu_type = models.IntegerField(max_length=1, verbose_name="菜单级别1:顶级菜单,2:二级菜单,3:应用方法")
in_menu = models.IntegerField(max_length=50, verbose_name="id") | [
"9512727@qq.com"
] | 9512727@qq.com |
46449184dd1392a243a1a920c645acf77d085a56 | 96bc060c502db8b2dd4308bf0ec4d12422ef3f61 | /portfolio/migrations/0001_initial.py | 2d77c7409c77457420039f7ded4d21ce32d58203 | [] | no_license | kdh92417/new_stock_api | 0b5c1708d5e6d089ce300eb1eeb64c268d111af3 | 271978a3f33daa66930e7a101d802e6d88892d4c | refs/heads/master | 2023-08-11T08:26:06.852435 | 2021-09-22T13:05:01 | 2021-09-22T13:05:01 | 395,270,015 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,624 | py | # Generated by Django 3.2.7 on 2021-09-21 20:49
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('company', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Portfolio',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('content', models.CharField(max_length=1000)),
('total_like', models.IntegerField(default=0)),
('search_count', models.IntegerField(default=0)),
('create_date', models.DateTimeField(auto_now_add=True, null=True)),
('modify_date', models.DateTimeField(auto_now=True, null=True)),
],
options={
'db_table': 'portfolios',
},
),
migrations.CreateModel(
name='PortfolioStock',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('shares_count', models.IntegerField(default=0)),
('shares_amount', models.IntegerField(default=0)),
('company', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='company.company')),
('portfolio', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='portfolio.portfolio')),
],
options={
'db_table': 'portfolio_stocks',
},
),
migrations.AddField(
model_name='portfolio',
name='company',
field=models.ManyToManyField(through='portfolio.PortfolioStock', to='company.Company'),
),
migrations.AddField(
model_name='portfolio',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='portfolio_user', to=settings.AUTH_USER_MODEL),
),
migrations.CreateModel(
name='LikePortfolio',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('portfolio', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='portfolio.portfolio')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'like_portfolio',
},
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user_name', models.CharField(max_length=50)),
('content', models.CharField(max_length=1000)),
('create_date', models.DateTimeField(auto_now_add=True, null=True)),
('modify_date', models.DateTimeField(auto_now=True, null=True)),
('portfolio', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='portfolio.portfolio')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'comments',
},
),
]
| [
"kdh92417@gmail.com"
] | kdh92417@gmail.com |
03432da73f21498b90e123b644298e3e39f491fb | 016f273695785240bb01f416c812ddc072586ef5 | /PMI/2016/ShamugijaLika/task_1_18.py | ecfedbda2bdb540c0868538454c6ff5a1021cf27 | [] | no_license | Rokkitt/rgr | 55fcf2ea45166d05fbcfdbe4c81ac78951ed50a5 | 64d02b45f1e1d5a4aff3338bc8b1faa5165797ae | refs/heads/master | 2021-01-21T10:41:45.289912 | 2017-04-08T18:53:23 | 2017-04-08T18:53:23 | 83,471,719 | 0 | 3 | null | 2022-03-23T12:08:24 | 2017-02-28T19:36:10 | Python | UTF-8 | Python | false | false | 660 | py | #Задача 1. Вариант 18.
#Напишите программу, которая будет сообщать род деятельности и псевдоним
#под которым скрывается Мария Магдалена фон Лош. После вывода информации
#программа должна дожидаться пока пользователь нажмет Enter для выхода.
#Shamugija L.G.
#13.03.2017
print("Затянувшаяся дискуссия означает, что обе стороны не правы.")
print("\n\t\t\t\t\t\tВольтер")
print("Explore")
input("\nPress Enter")
| [
"shelley.bass.li@gmail.com"
] | shelley.bass.li@gmail.com |
96725e15b759b87dd5e711c6eaba643f5e3dc82a | 6cbc748a19758c702b2ebcc4085157e42bdb8436 | /Class9_[18-11-2018]/zip.py | 41c0ce1a52a2cab3abc3c846845cb2647ad77ef5 | [] | no_license | rohitgit7/Ethans_Python | 5a426410a2ef34b5fa08d04c9bd807860ce5fb55 | 621b28d477764b5a98c8182eaa7c61aaea930890 | refs/heads/master | 2023-01-12T19:57:10.638671 | 2020-11-16T12:12:33 | 2020-11-16T12:12:33 | 313,290,862 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 549 | py | #Zip() aggregates elements based on the iterables passed, and returns an iterator of tuples
#List of tuples
lt1 = [1,2,3,4,5]
lt2 = ['one','two','three']
lt3 = ['a','b','c','d']
ret = zip(lt1,lt3,lt2)
print ret #will print the list with smallest no. of elements(here lt2)
print "ret[0]",ret[0]
print "type(ret)",type(ret)
print "type(ret[0])",type(ret[0])
#zip(*xyz) -> unzip
print zip(*ret)
l1,l2,l3 = zip(*ret)
print list(l1),l2,l3
#converting lists to dictionary
keys = ['a','b','c']
values = [1,2,3]
d = dict(zip(keys,values))
print d | [
"rohitgit7@users.noreply.github.com"
] | rohitgit7@users.noreply.github.com |
058afedbf094af580771ffe9fc6a5a0d485bc5be | 82779f61bb540c5eb5376746d04930de9e5e9d08 | /traveler/urls.py | 464a7af40a432eb867e1c073f8f7b5a0e6575479 | [] | no_license | vijayraval406/TravellerApp | fe6f7dff963ac7d99643abf7e8b25c621dbe043a | 3abf631db060e6df91668b39986e3a9378933eff | refs/heads/master | 2023-06-20T12:00:47.660808 | 2021-07-21T15:58:56 | 2021-07-21T15:58:56 | 388,171,898 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,052 | py | from django.urls import path
from . import views
urlpatterns = [
path('',views.index,name='index'),
path('contact/',views.contact,name='contact'),
path('destination/',views.destination,name='destination'),
path('pricing/',views.pricing,name='pricing'),
path('signup/',views.signup,name='signup'),
path('login/',views.login,name='login'),
path('enter_otp/',views.enter_otp,name='enter_otp'),
path('enter_email/',views.enter_email,name='enter_email'),
path('verify_forgot_otp/',views.verify_forgot_otp,name='verify_forgot_otp'),
path('update_password/',views.update_password,name='update_password'),
path('logout/',views.logout,name='logout'),
path('change_password/',views.change_password,name='change_password'),
path('edit_profile/',views.edit_profile,name='edit_profile'),
path('seller_index/',views.seller_index,name='seller_index'),
path('seller_edit_profile/',views.seller_edit_profile,name='seller_edit_profile'),
path('seller_change_password/',views.seller_change_password,name='seller_change_password'),
path('seller_add_flight/',views.seller_add_flight,name='seller_add_flight'),
path('seller_view_products/',views.seller_view_products,name='seller_view_products'),
path('seller_flight_detail/<int:pk>/',views.seller_flight_detail,name='seller_flight_detail'),
path('seller_edit_flight/<int:pk>/',views.seller_edit_flight,name='seller_edit_flight'),
path('index_datetime/',views.index_datetime,name='index_datetime'),
path('seller_delete_flight/<int:pk>/',views.seller_delete_flight,name='seller_delete_flight'),
path('seller_add_cab',views.seller_add_cab,name='seller_add_cab'),
path('seller_add_hotel',views.seller_add_hotel,name='seller_add_hotel'),
path('seller_cab_detail/<int:pk>/',views.seller_cab_detail,name='seller_cab_detail'),
path('seller_hotel_detail/<int:pk>/',views.seller_hotel_detail,name='seller_hotel_detail'),
path('seller_edit_hotel/<int:pk>/',views.seller_edit_hotel,name='seller_edit_hotel'),
path('seller_delete_hotel/<int:pk>/',views.seller_delete_hotel,name='seller_delete_hotel'),
path('seller_edit_cab/<int:pk>/',views.seller_edit_cab,name='seller_edit_cab'),
path('seller_delete_cab/<int:pk>/',views.seller_delete_cab,name='seller_delete_cab'),
path('hotel_view_products/',views.hotel_view_products,name='hotel_view_products'),
path('user_flight_detail/<int:pk>/',views.user_flight_detail,name='user_flight_detail'),
path('user_cab_detail/<int:pk>/',views.user_cab_detail,name='user_cab_detail'),
path('hotel_detail/<int:pk>/',views.hotel_detail,name='hotel_detail'),
path('user_hotel_detail/<int:pk>/',views.user_hotel_detail,name='user_hotel_detail'),
path('user_add_wishlist/<int:pk>/',views.user_add_wishlist,name='user_add_wishlist'),
path('mywishlist/',views.mywishlist,name='mywishlist'),
path('user_remove_wishlist/<int:pk>',views.user_remove_wishlist,name='user_remove_wishlist'),
path('hotel_add_cart/<int:pk>/',views.hotel_add_cart,name='hotel_add_cart'),
path('flight_add_cart/<int:pk>/',views.flight_add_cart,name='flight_add_cart'),
path('cab_add_cart/<int:pk>/',views.cab_add_cart,name='cab_add_cart'),
path('mycart/',views.mycart,name='mycart'),
path('my_booking_order/',views.my_booking_order,name='my_booking_order'),
path('hotel_remove_cart/<int:pk>/',views.hotel_remove_cart,name='hotel_remove_cart'),
path('flight_remove_cart/<int:pk>/',views.flight_remove_cart,name='flight_remove_cart'),
path('cab_remove_cart/<int:pk>/',views.cab_remove_cart,name='cab_remove_cart'),
path('hotel_change_qty/',views.hotel_change_qty,name='hotel_change_qty'),
path('flight_change_qty/',views.flight_change_qty,name='flight_change_qty'),
path('cab_change_qty/',views.cab_change_qty,name='cab_change_qty'),
path('seller_add_hotelgallery/',views.seller_add_hotelgallery,name='seller_add_hotelgallery'),
path('pay/',views.initiate_payment,name='pay'),
path('callback/',views.callback,name='callback'),
] | [
"rvr10081985@gmail.com"
] | rvr10081985@gmail.com |
aee868eb2597469429538bbd075d10a018a753ac | 6fe2d3c27c4cb498b7ad6d9411cc8fa69f4a38f8 | /algorithms/algorithms-python/leetcode_medium/Question_111_Combination_Sum_III.py | 96ac21f6e928162be84fa4ea48977d9e38d1fd35 | [] | no_license | Lanceolata/code | aae54af632a212c878ce45b11dab919bba55bcb3 | f7d5a7de27c3cc8a7a4abf63eab9ff9b21d512fb | refs/heads/master | 2022-09-01T04:26:56.190829 | 2021-07-29T05:14:40 | 2021-07-29T05:14:40 | 87,202,214 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 582 | py | #!/usr/bin/python
# coding: utf-8
class Solution(object):
def combinationSum3(self, k, n):
"""
:type k: int
:type n: int
:rtype: List[List[int]]
"""
vec = []
res = []
self.helper(k, n, 1, vec, res)
return res
def helper(self, k, n, l, vec, res):
if n < 0:
return
if n == 0 and len(vec) == k:
res.append(vec[:])
return
for i in range(l, 10):
vec.append(i)
self.helper(k, n - i, i + 1, vec, res)
vec.pop()
| [
"lanceolatayuan@gmail.com"
] | lanceolatayuan@gmail.com |
5c4481583616b552e258f790a750fce1afc4245a | 85a9ffeccb64f6159adbd164ff98edf4ac315e33 | /pysnmp-with-texts/MPOA-EXT-MIB.py | 8cccf569872dba8ee2a1f2f3442c0ee661ce4a91 | [
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | agustinhenze/mibs.snmplabs.com | 5d7d5d4da84424c5f5a1ed2752f5043ae00019fb | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | refs/heads/master | 2020-12-26T12:41:41.132395 | 2019-08-16T15:51:41 | 2019-08-16T15:53:57 | 237,512,469 | 0 | 0 | Apache-2.0 | 2020-01-31T20:41:36 | 2020-01-31T20:41:35 | null | UTF-8 | Python | false | false | 10,427 | py | #
# PySNMP MIB module MPOA-EXT-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/MPOA-EXT-MIB
# Produced by pysmi-0.3.4 at Wed May 1 14:14:59 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, SingleValueConstraint, ValueSizeConstraint, ConstraintsUnion, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "SingleValueConstraint", "ValueSizeConstraint", "ConstraintsUnion", "ConstraintsIntersection")
Boolean, extensions = mibBuilder.importSymbols("CENTILLION-ROOT-MIB", "Boolean", "extensions")
mpcIndex, = mibBuilder.importSymbols("MPOA-MIB", "mpcIndex")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
Counter32, Integer32, NotificationType, Gauge32, MibIdentifier, ModuleIdentity, MibScalar, MibTable, MibTableRow, MibTableColumn, IpAddress, Bits, iso, TimeTicks, Counter64, ObjectIdentity, Unsigned32 = mibBuilder.importSymbols("SNMPv2-SMI", "Counter32", "Integer32", "NotificationType", "Gauge32", "MibIdentifier", "ModuleIdentity", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "IpAddress", "Bits", "iso", "TimeTicks", "Counter64", "ObjectIdentity", "Unsigned32")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
RowStatus, TruthValue = mibBuilder.importSymbols("SNMPv2-TC-v1", "RowStatus", "TruthValue")
cnMpoaExt = MibIdentifier((1, 3, 6, 1, 4, 1, 930, 3, 7))
cnMpcConfigTable = MibTable((1, 3, 6, 1, 4, 1, 930, 3, 7, 2), )
if mibBuilder.loadTexts: cnMpcConfigTable.setStatus('mandatory')
if mibBuilder.loadTexts: cnMpcConfigTable.setDescription('The MPOA Bay Networks proprietary Client Configuration Table. This table contains configuration information for all MPOA Clients which this agent manages.')
cnMpcConfigEntry = MibTableRow((1, 3, 6, 1, 4, 1, 930, 3, 7, 2, 1), ).setIndexNames((0, "MPOA-MIB", "mpcIndex"))
if mibBuilder.loadTexts: cnMpcConfigEntry.setStatus('mandatory')
if mibBuilder.loadTexts: cnMpcConfigEntry.setDescription('MPOA Client Bay Networks Configuration Entry. Each entry contains configuration information for one MPOA Client.')
cnMpcShareControlVccs = MibTableColumn((1, 3, 6, 1, 4, 1, 930, 3, 7, 2, 1, 1), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cnMpcShareControlVccs.setStatus('mandatory')
if mibBuilder.loadTexts: cnMpcShareControlVccs.setDescription('This Parameter enables VCC sharing for MPOA Control VCCs if set to true. LLC encapsulation is always signaled, regardless of sharing.')
cnMpcShareDataVccs = MibTableColumn((1, 3, 6, 1, 4, 1, 930, 3, 7, 2, 1, 2), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cnMpcShareDataVccs.setStatus('mandatory')
if mibBuilder.loadTexts: cnMpcShareDataVccs.setDescription('This parameter enables VCC sharing for MPOA Data VCCs if set to true. LLC encapsulation is always signaled, regardless of sharing.')
cnMpcValidEntryCheckInterval = MibTableColumn((1, 3, 6, 1, 4, 1, 930, 3, 7, 2, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cnMpcValidEntryCheckInterval.setStatus('mandatory')
if mibBuilder.loadTexts: cnMpcValidEntryCheckInterval.setDescription('This parameter specifies the interval in seconds, to check LOCAL IP FDB entries in the Valid state for minimum activity.')
cnMpcMinFlowPacketCount = MibTableColumn((1, 3, 6, 1, 4, 1, 930, 3, 7, 2, 1, 4), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cnMpcMinFlowPacketCount.setStatus('mandatory')
if mibBuilder.loadTexts: cnMpcMinFlowPacketCount.setDescription('This parameter specifies the minimum number of packets to be forwarded by a Local FDB Entry in the Valid state in cnMpcValidEntryCheckInterval to maintain minimum activity level. If minimum activity is not maintained, the entry is deleted.')
cnMpoaIpVerification = MibIdentifier((1, 3, 6, 1, 4, 1, 930, 3, 7, 3))
cnMpoaIpVerificationTableType = MibScalar((1, 3, 6, 1, 4, 1, 930, 3, 7, 3, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("unknown", 1), ("exclusion", 2), ("inclusion", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cnMpoaIpVerificationTableType.setStatus('mandatory')
if mibBuilder.loadTexts: cnMpoaIpVerificationTableType.setDescription('This object controls the type of verification table that is being used. This object is used in combination with the status and download object and the IP verification table. Any change made to this object must be downloaded to the switch cards using the cnMpoaIpVerificationTableDownload object before the settings actually take effect. To enable a verification table, the table type must be set to exclusion or inclusion, enabled using the table status object and then downloaded to the cards using the download object. To delete the IP verification information, you must set the table status object to clear and then downloaded to the cards using the download object. When the information is deleted, the table type is read as unknown. To change the table type between exclusion and inclusion, you must first delete the IP verification information and then recreate it.')
cnMpoaIpVerificationTableStatus = MibScalar((1, 3, 6, 1, 4, 1, 930, 3, 7, 3, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("disable", 1), ("enable", 2), ("clear", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cnMpoaIpVerificationTableStatus.setStatus('mandatory')
if mibBuilder.loadTexts: cnMpoaIpVerificationTableStatus.setDescription('This object is used to enable, disable or clear the IP Verification information. Any change to this object information must be downloaded to the switch cards using the cnMpoaIpVerificationTableDownload object. An empty IP verification table will yield disable on a get.')
cnMpoaIpVerificationTableDownload = MibScalar((1, 3, 6, 1, 4, 1, 930, 3, 7, 3, 3), Boolean()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cnMpoaIpVerificationTableDownload.setStatus('mandatory')
if mibBuilder.loadTexts: cnMpoaIpVerificationTableDownload.setDescription('Setting this object to true(1) causes the MPOA IP Verification Table information to be downloaded to all of the cards in the switch that support MPOA Clients (MPCs). You must download the IP Verification Table information to the cards before it will become effective when you are dynamically configuring this feature. The IP Verification Table is automatically downloaded to the MPC configured cards at card initialization. When read, this object always returns false(2).')
cnMpoaIpVerificationTable = MibTable((1, 3, 6, 1, 4, 1, 930, 3, 7, 3, 4), )
if mibBuilder.loadTexts: cnMpoaIpVerificationTable.setStatus('mandatory')
if mibBuilder.loadTexts: cnMpoaIpVerificationTable.setDescription('The MPC IP Verification Table is either an inclusion or exclusion list as indicated by the cnMpoaIpVerificationTableType object. Any change to this table must be downloaded to the switch cards using the cnMpoaIpVerificationTableDownload object.')
cnMpoaIpVerificationEntry = MibTableRow((1, 3, 6, 1, 4, 1, 930, 3, 7, 3, 4, 1), ).setIndexNames((0, "MPOA-EXT-MIB", "cnMpoaIpVerificationAddress"), (0, "MPOA-EXT-MIB", "cnMpoaIpVerificationMask"))
if mibBuilder.loadTexts: cnMpoaIpVerificationEntry.setStatus('mandatory')
if mibBuilder.loadTexts: cnMpoaIpVerificationEntry.setDescription('Each row of the cnMpoaIpVerificationTable consists of an IP address and IP mask that is used to identify a range of addresses that are included or excluded when creating MPOA IP shortcuts. This cnMpoaIpVerificationStatus object is used to control adding or deleting each row.')
cnMpoaIpVerificationAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 930, 3, 7, 3, 4, 1, 1), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cnMpoaIpVerificationAddress.setStatus('mandatory')
if mibBuilder.loadTexts: cnMpoaIpVerificationAddress.setDescription('This object is one of the two keys used to access the cnMpoaIpVerificationTable entries. This object contains an IP address used in conjunction with the cnMpoaIpVerificationMask to identify a range of one or more IP addresses.')
cnMpoaIpVerificationMask = MibTableColumn((1, 3, 6, 1, 4, 1, 930, 3, 7, 3, 4, 1, 2), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cnMpoaIpVerificationMask.setStatus('mandatory')
if mibBuilder.loadTexts: cnMpoaIpVerificationMask.setDescription('This object is one of the two keys used to access the cnMpoaIpVerificationTable entries. This object contains an IP mask used in conjunction with the cnMpoaIpVerificationAddress to identify a range of one or more IP addresses.')
cnMpoaIpVerificationStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 930, 3, 7, 3, 4, 1, 3), RowStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cnMpoaIpVerificationStatus.setStatus('mandatory')
if mibBuilder.loadTexts: cnMpoaIpVerificationStatus.setDescription('Use this object to add or delete rows in the cnMpoaIpVerificationTable. To create new rows, use createAndGo(4) or createAndWait(5). To delete entries use destroy(6). A valid row will have the status of active(1) on a get.')
mibBuilder.exportSymbols("MPOA-EXT-MIB", cnMpoaIpVerificationMask=cnMpoaIpVerificationMask, cnMpoaIpVerificationTable=cnMpoaIpVerificationTable, cnMpoaIpVerificationAddress=cnMpoaIpVerificationAddress, cnMpcShareDataVccs=cnMpcShareDataVccs, cnMpcConfigTable=cnMpcConfigTable, cnMpcValidEntryCheckInterval=cnMpcValidEntryCheckInterval, cnMpoaIpVerification=cnMpoaIpVerification, cnMpoaIpVerificationTableType=cnMpoaIpVerificationTableType, cnMpoaIpVerificationTableStatus=cnMpoaIpVerificationTableStatus, cnMpcMinFlowPacketCount=cnMpcMinFlowPacketCount, cnMpcConfigEntry=cnMpcConfigEntry, cnMpcShareControlVccs=cnMpcShareControlVccs, cnMpoaExt=cnMpoaExt, cnMpoaIpVerificationEntry=cnMpoaIpVerificationEntry, cnMpoaIpVerificationStatus=cnMpoaIpVerificationStatus, cnMpoaIpVerificationTableDownload=cnMpoaIpVerificationTableDownload)
| [
"dcwangmit01@gmail.com"
] | dcwangmit01@gmail.com |
32dfb20f883bdf96177a04c14bb4d9b15114f4a0 | 645877ca2c70cdb573e729aa2873f91057dbb990 | /Frontend/Password_Window_S.py | fd6349dcdb908ab0ae4f2b6b030f80143541ad4c | [] | no_license | Jefferson-9907/SYSTEM_IFAP | aa9c7fdc8c7cc4c5b1c9061016b6fece23ca12ab | 4c79258ec7dcbf92dfbd3cd0ec2841f016b31111 | refs/heads/main | 2023-07-20T20:08:37.399487 | 2021-08-14T01:03:48 | 2021-08-14T01:03:48 | 395,813,177 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,167 | py | # Import Modules
from _datetime import datetime
from tkinter import *
import mariadb
from tkinter import messagebox, ttk
class Password_S:
def __init__(self, root):
self.root = root
self.root.title("SYST_CONTROL--›Usuarios")
self.root.attributes('-fullscreen', True)
self.root.resizable(False, False)
imagenes = {
'fondo': PhotoImage(file='./recursos/FONDO_PRINCIPAL1.png'),
}
self.barra1 = Label(self.root)
self.barra1.config(bg='black', padx=681, pady=20)
self.barra1.grid(row=0, column=0, sticky='w', padx=0, pady=0)
self.barra2 = Label(self.root)
self.barra2.config(bg="#a27114", padx=681, pady=10)
self.barra2.grid(row=0, column=0, sticky='w', padx=0, pady=0)
self.texto1 = Label(self.root, text='SYSTEM CONTROL (CAMBIAR CONTRASEÑA)')
self.texto1.config(font=("Britannic", 20, "bold"), fg='black', bg="#a27114")
self.texto1.grid(row=0, column=0, sticky='w', padx=475, pady=0)
# =============================================================
# FONDO PANTALLA PRINCIPAL
# =============================================================
self.img_p_pr = Label(self.root, image=imagenes['fondo'], compound=TOP)
self.img_p_pr.image = imagenes['fondo']
self.img_p_pr.grid(row=1, column=0, sticky='NW', padx=0, pady=0)
# =============================================================
# CREACIÓN DE LA BARRA DE MENÚ
# =============================================================
self.menubarra = Menu(self.root)
# =============================================================
# CREACIÓN DEL MENÚ
# =============================================================
self.menubarra.add_cascade(label='ALUMNOS')
self.root.config(menu=self.menubarra)
self.menus = Menu(self.root)
self.Column1 = Menu(self.menus, tearoff=0)
# =============================================================
# AÑADIENDO OPCIONES AL MENÚ PRINCIPAL
# =============================================================
self.menus.add_cascade(label='INICIO', menu=self.Column1)
self.Column1.add_command(label='Menú Inicio', command=self.principal_btn)
self.Column2 = Menu(self.menus, tearoff=0)
self.root.config(menu=self.menus)
# =============================================================
# AÑADIENDO OPCIONES AL MENÚ ALUMNO
# =============================================================
self.menus.add_cascade(label='ALUMNOS', menu=self.Column2)
self.Column3 = Menu(self.menus, tearoff=0)
self.root.config(menu=self.menus)
self.cuaderno = ttk.Notebook(self.root, width=1000, height=625)
self.cuaderno.grid(row=1, column=0, sticky='nw', padx=10, pady=5)
# =============================================================
# CREACIÓN DEL MENÚ ASESORES
# =============================================================
self.menus.add_cascade(label='ASESORES', menu=self.Column3)
self.Column3.add_command(label='Menú Asesores', command=self.assesor_btn)
self.Column4 = Menu(self.menus, tearoff=0)
self.root.config(menu=self.menus)
# =============================================================
# CREACIÓN DEL DE MENÚ CURSOS
# =============================================================
self.menus.add_cascade(label='CURSOS', menu=self.Column4)
self.Column4.add_command(label='Menú Cursos', command=self.courses_btn)
self.Column5 = Menu(self.menus, tearoff=0)
self.root.config(menu=self.menus)
# =============================================================
# CREACIÓN DEL DE MENÚ AYUDA
# =============================================================
self.menus.add_cascade(label='USUARIOS', menu=self.Column5)
self.Column5.add_command(label='Cambiar Usuario', command=self.logout)
self.Column5.add_command(label='Cambiar Contraseña')
self.Column5.add_separator()
self.Column5.add_command(label='Cerrar Sesión', command=self.salir_principal)
self.Column5.add_separator()
self.Column6 = Menu(self.menus, tearoff=0)
self.root.config(menu=self.menus)
# =============================================================
# CREACIÓN DEL DE MENÚ INFO
# =============================================================
self.menus.add_cascade(label='INFO', menu=self.Column6)
self.Column6.add_command(label='Sobre IFAP®', command=self.caja_info_ifap)
self.Column6.add_separator()
self.Column6.add_command(label='Sobre SIST_CONTROL (IFAP®)', command=self.caja_info_sist)
self.Column6.add_separator()
self.root.config(menu=self.menus)
data = datetime.now()
fomato_f = " %A %d/%B/%Y %H:%M:%S %p "
self.footer = Label(self.root, text=' FECHA Y HORA DE INGRESO: ', font=("Cooper Black", 10), bg='Honeydew2',
relief=RIDGE)
self.footer.place(x=0, y=703)
self.footer_1 = Label(self.root, text=str(data.strftime(fomato_f)), font=("Lucida Console", 10), bg='Honeydew2',
relief=RIDGE)
self.footer_1.place(x=212, y=704)
self.footer_4 = Label(self.root, text='J.C.F DESING® | Derechos Reservados 2021', width=195, bg='black',
fg='white')
self.footer_4.place(x=0, y=725)
# Manage Frame
self.Manage_Frame = Frame(self.root, relief=RIDGE, bd=4, bg='#0d1e24')
self.Manage_Frame.place(x=200, y=200, width=600, height=300)
self.m_title = Label(self.Manage_Frame, text="-ADMINISTAR USUARIOS-\nCAMBIAR CONTRASEÑA",
font=("Copperplate Gothic Bold", 16, "bold"), bg='#0d1e24', fg="White")
self.m_title.grid(row=0, column=0, columnspan=1, padx=10, pady=30)
# Variables
self.username = StringVar()
self.old_password = StringVar()
self.new_password = StringVar()
self.User_Frame = Frame(self.Manage_Frame, bg='#0d1e24')
self.User_Frame.grid(row=1, column=0, padx=100, pady=10)
self.lbl_us = Label(self.User_Frame, text="USUARIO", width='10', font=('Copperplate Gothic Bold', 10),
bg='#808080', fg="#0A090C")
self.lbl_us.grid(row=1, column=0, padx=0, pady=5, sticky="E")
self.e_us = Entry(self.User_Frame, textvariable=self.username, bd=5, relief=GROOVE)
self.e_us.grid(row=1, column=1, padx=10, pady=5, sticky="W")
self.l_c_ant = Label(self.User_Frame, text="CONTRASEÑA ANTERIOR", font=('Copperplate Gothic Bold', 10),
bg='#808080',
fg="#0A090C")
self.l_c_ant.grid(row=2, column=0, padx=0, pady=5, sticky="S")
self.e_c_ant = Entry(self.User_Frame, show="*", textvariable=self.old_password, bd=5, relief=GROOVE)
self.e_c_ant.grid(row=2, column=1, padx=10, pady=5, sticky="W")
self.l_n_cont = Label(self.User_Frame, text="NUEVA CONTRASEÑA", font=('Copperplate Gothic Bold', 10),
bg='#808080',
fg="#0A090C")
self.l_n_cont.grid(row=3, column=0, padx=0, pady=5, sticky="E")
self.e_n_cont = Entry(self.User_Frame, show="*", textvariable=self.new_password, bd=5, relief=GROOVE)
self.e_n_cont.grid(row=3, column=1, padx=10, pady=5, sticky="W")
self.chg_btn = Button(self.Manage_Frame, text="CAMBIAR CONTRASEÑA", font=('Copperplate Gothic Bold', 10),
bg="#00A1E4",
fg="#FFFCF9", relief=GROOVE, width=20, command=self.change_pass)
self.chg_btn.grid(row=2, column=0, padx=200, pady=5)
def change_pass(self):
if self.username.get() == '':
messagebox.showerror("SYST_CONTROL(IFAP®) (ERROR)", "POR FAVOR INGRESE EL CAMPO: USUARIO")
elif self.old_password.get() == "":
messagebox.showerror("SYST_CONTROL(IFAP®) (ERROR)", "POR FAVOR INGRESE EL CAMPO: CONTRASEÑA ANTERIOR")
elif self.new_password.get() == "":
messagebox.showerror("SYST_CONTROL(IFAP®) (ERROR)", "POR FAVOR INGRESE EL CAMPO: CONTRASEÑA NUEVA")
else:
self.connect = mariadb.connect(host="localhost", user="root", passwd="", database="system_bd_ifap")
self.curr = self.connect.cursor()
sql = "SELECT contraseña FROM usuarios WHERE usuario='" + self.username.get() + "' and contraseña='" \
+ self.old_password.get() + "'"
self.curr.execute(sql)
if self.curr.fetchall():
self.connect = mariadb.connect(host="localhost", user="root", passwd="", database="system_bd_ifap")
self.curr = self.connect.cursor()
self.sql = f"""UPDATE usuarios SET contraseña='{self.new_password.get()}'\
WHERE usuario='{self.username.get()}'"""
self.curr.execute(self.sql)
self.connect.commit()
messagebox.showinfo("SYST_CONTROL(IFAP®)", "CAMBIO DE CONTRASEÑA EXITOSO\nUSUARIO: " +
self.username.get() + "\nCONTRASEÑA: " + self.new_password.get())
else:
messagebox.showerror("ERROR!!!", "NO SE PUDO REALIZAR LA ACCIÓN: CAMBIO DE CONTRASEÑA.\n"
"\t(INGRESE SU CREDENCIALES ACTUALES)")
self.username.set('')
def logout(self):
self.root.destroy()
from Login_Window import Login
st_root = Tk()
Login(st_root)
st_root.mainloop()
def principal_btn(self):
self.root.destroy()
from Secretaria.Principal_Window_S import Principal_S
st_root = Tk()
Principal_S(st_root)
st_root.mainloop()
def student_btn(self):
self.root.destroy()
from Secretaria.Student_Window_S import Student_S
st_root = Tk()
Student_S(st_root)
st_root.mainloop()
def assesor_btn(self):
self.root.destroy()
from Secretaria.Assesor_Window_S import Assesor_S
st_root = Tk()
Assesor_S(st_root)
st_root.mainloop()
def courses_btn(self):
self.root.destroy()
from Secretaria.Course_Window_S import Course_S
st_root = Tk()
Course_S(st_root)
st_root.mainloop()
def salir_principal(self):
self.sa = messagebox.askyesno('CERRAR SESIÓN', 'CERRAR SYST_CONTROL(IFAP®)')
if self.sa:
exit()
# =============================================================
# FUNCIÓN CAJA DE INFORMACIÓN DEL INSTITUTO(INFO)
# =============================================================
def caja_info_ifap(self):
self.men1 = messagebox.showinfo('SIST_CONTROL (IFAP®)', 'INSTITUTO DE FORMACIÓN ACADEMICA PROEZAS(IFAP®)')
# =============================================================
# FUNCIÓN CAJA DE INFORMACIÓN DEL SISTEMA(INFO)
# =============================================================
def caja_info_sist(self):
self.men2 = messagebox.showinfo('SIST_CONTROL (IFAP®)',
'SIST_CONTROL (IFAP®) v2.0\n'
'El uso de este software queda sujeto a los términos y condiciones del '
'contrato "BJM DESING®-CLIENTE". \n'
'El uso de este software queda sujeto a su contrato. No podrá utilizar '
'este software para fines de distribución\n'
'total o parcial.\n\n\n© 2021 BJM DESING®. Todos los derechos reservados')
if __name__ == '__main__':
root = Tk()
application = Password_S(root)
root.mainloop()
| [
"jeffersoncaguafigueroa@gmail.com"
] | jeffersoncaguafigueroa@gmail.com |
c3f2b8f0d8c6f079bf8e9c35baec97c029ca844c | e2a7d114abe00c293a661506382aabc991144dd5 | /Services/ResponderMensagem.py | 990231df1d928d5684bfabb381b8b747df9ebe4f | [] | no_license | alexaugustobr/impacta-desenvolvimento-aplicacoes-distribuidas | 57978053cbacd678802f830bc838c5e756e6b770 | c449dc7aa5c790e09a596971b507ced6308e3f94 | refs/heads/master | 2021-09-14T12:25:54.427637 | 2018-05-13T20:44:54 | 2018-05-13T20:44:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 392 | py | from Server import mensagens
from Models.Mensagem import Mensagem
from Services.CadastrarMensagem import cadastrarMensagem
def responderMensagem(id, resposta):
for mensagem in mensagens:
if str(mensagem["id"]) == str(id):
resposta = cadastrarMensagem(resposta)
mensagem['respostas'].append(resposta["id"])
return resposta
return None | [
"alex.augusto@cartorios.com.vc"
] | alex.augusto@cartorios.com.vc |
4226d913c82fc3fd3d68a44df6697fe697b6cc5c | ca5b5c217e0053645c2664d777699e9a5050715e | /tex/gen_links.py | 2a733f8bffe856d7ac4c2dffecd46daa7733bfae | [
"MIT"
] | permissive | rodluger/starrynight | 1405ffdb5a0dd0fefc0ae34e7cdaf7eab4735356 | d3f015e466621189cb271d4d18b538430b14a557 | refs/heads/master | 2021-10-26T03:32:15.220725 | 2021-10-22T15:16:48 | 2021-10-22T15:16:48 | 236,542,672 | 7 | 1 | MIT | 2020-06-03T19:51:10 | 2020-01-27T16:58:05 | Jupyter Notebook | UTF-8 | Python | false | false | 881 | py | from __future__ import print_function
import subprocess
import os
# Generate the github links
hash = subprocess.check_output(["git", "rev-parse", "HEAD"]).decode("utf-8")[:-1]
slug = "rodluger/starrynight"
with open("gitlinks.tex", "w") as f:
print(
r"\newcommand{\codelink}[1]{\href{https://github.com/%s/blob/%s/tex/figures/#1.py}{\codeicon}\,\,}"
% (slug, hash),
file=f,
)
print(
r"\newcommand{\animlink}[1]{\href{https://github.com/%s/blob/%s/tex/figures/#1.gif}{\animicon}\,\,}"
% (slug, hash),
file=f,
)
print(
r"\newcommand{\prooflink}[1]{\href{https://github.com/%s/blob/%s/tex/proofs/#1.ipynb}{\raisebox{-0.1em}{\prooficon}}}"
% (slug, hash),
file=f,
)
print(
r"\newcommand{\cilink}[1]{\href{https://dev.azure.com/%s/_build}{#1}}" % (slug),
file=f,
)
| [
"rodluger@gmail.com"
] | rodluger@gmail.com |
44cd07d49d544ba58d1b0d5a4e6f1e31628e7fb4 | 200d2ab5cb13df314d7fa9a7e281000d32b72b5e | /asd.py | 44660173276579fdf4cf39b4c433e56de2460401 | [] | no_license | xowjd258/ML_with_python | 246bece6a22177a5b4231c8b4cdbb2602607ffbd | 814e289a25768caf3fb0780e3d25e03a81b54233 | refs/heads/master | 2020-08-02T16:24:39.304517 | 2019-11-24T07:46:04 | 2019-11-24T07:46:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 535 | py | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import torch
# In[2]:
torch.empty(5,3)
# In[3]:
x=torch.rand(5,3)
# In[4]:
print(x)
# In[5]:
torch.zeros(5,3,dtype=torch.long)
# In[6]:
a=[
[1,2,3,4],[2,3,4,5]
]
# In[8]:
torch.FloatTensor(a)
x = torch.tensor(a)
# In[10]:
print(x.size())
# In[11]:
#행렬은 같은모양끼리만 더할 수 있다.
print(x+3)
# In[16]:
y=torch.tensor([[1],[2]])
print(x+y)
print(x*y)
# In[23]:
y[:,:]
# In[29]:
torch.cuda.is_available()
# In[ ]:
| [
"noreply@github.com"
] | noreply@github.com |
fa20d3ae1f8e6295713b6a8f217a871b4d843616 | b6f4393777d4f6a3a8b599700ce3405be76c4bc4 | /Apple-Music/Leticia/api/models.py | eb394de811af6473a82f9e7f5f7aa8d11e8e4c24 | [] | no_license | azatnt/Apple-Music-rest_framework- | b13897dd40337384469df269cdf46bd085487442 | 09b7e602078a6d82f63725b757bb657afd221776 | refs/heads/main | 2023-02-10T16:45:47.618860 | 2021-01-14T14:37:25 | 2021-01-14T14:37:25 | 326,934,387 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 760 | py | from django.db import models
import string
import random
def generate_unique_code():
length = 6
while True:
code = ''.join(random.choices(string.ascii_uppercase, k=length))
if Room.objects.filter(code=code).count() == 0:
break
return code
class Room(models.Model):
code = models.CharField(
max_length=8, default=generate_unique_code, unique=True)
host = models.CharField(max_length=50, unique=True)
guest_can_pause = models.BooleanField(default=False, null=False)
votes_to_skip = models.IntegerField(null=False, default=2)
created_at = models.DateTimeField(auto_now_add=True)
current_song = models.CharField(max_length=50, null=True)
def __str__(self):
return self.code
| [
"58590243+pr1nce07@users.noreply.github.com"
] | 58590243+pr1nce07@users.noreply.github.com |
def63f86d5ae9ed379eaafccbf5fab2b06e4b68a | eb2998606d9b8e6ad9ecdb5a602f2060cb9f5d55 | /scripts/guiScripts/pathLists.py | 2ca77076d1a8b3f7164f0461b01799bb4bcc6e3b | [] | no_license | hirokimoto/CodeAnalysis | 5faa2fdf8ce2502c2d9434e2127fa0871f85e941 | 5f8bc2962dfc366c5eafdfac17801f56decc857a | refs/heads/master | 2021-01-19T15:50:02.866043 | 2017-04-14T18:39:10 | 2017-04-14T18:39:10 | 88,230,836 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 424 | py | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
#pathLists.py
#Defines lists which will be used on the GUI and main to synchronize
#Created: ssia@keystonestrategy.com
#Last Updated: 11/16/2016
def returnPaths():
pathList = ["rampout", "diffout", "inputFolder", "langDef", "undpath", "strings", "depout", "profout", "vizout"]
auxPathList = ["tools", "code", "thirdparty", "deployments"]
return (pathList, auxPathList) | [
"hiroki.moto.pro@outlook.com"
] | hiroki.moto.pro@outlook.com |
df04a0ca32872fae075e270fad43a2e7b4c7e86f | 0b8b6b6ae8e3544f214c18aab5a023ce45e4e763 | /Partie 3/app/boids.py | 519c11a75df78d5302fd98dad42241d59913a69e | [] | no_license | 3201101/3I025 | 7b86ac478fa85cf73d88543b60f0464f0a7d4661 | 89dbbb1f3d88bf9d69f625d4b2f65576cf0b09aa | refs/heads/master | 2021-01-13T00:37:37.627313 | 2016-04-12T00:35:10 | 2016-04-12T00:35:10 | 51,313,851 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 8,271 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# multirobot.py
# Contact (ce fichier uniquement): nicolas.bredeche(at)upmc.fr
#
# Description:
# Template pour simulation mono- et multi-robots type khepera/e-puck/thymio
# Ce code utilise pySpriteWorld, développé par Yann Chevaleyre (U. Paris 13)
#
# Dépendances:
# Python 2.x
# Matplotlib
# Pygame
#
# Historique:
# 2016-03-28__23:23 - template pour 3i025 (IA&RO, UPMC, licence info)
#
# Aide: code utile
# - Partie "variables globales"
# - La méthode "step" de la classe Agent
# - La fonction setupAgents (permet de placer les robots au début de la simulation)
# - La fonction setupArena (permet de placer des obstacles au début de la simulation)
# - il n'est pas conseillé de modifier les autres parties du code.
#
import sys,os
cwd = os.getcwd()
sys.path.append(cwd.replace("app", "pySpriteWorld", 1))
from robosim import *
from random import random, shuffle
import time
import sys
import atexit
from itertools import count
'''''''''''''''''''''''''''''
'''''''''''''''''''''''''''''
''' Aide '''
'''''''''''''''''''''''''''''
'''''''''''''''''''''''''''''
#game.setMaxTranslationSpeed(3) # entre -3 et 3
# size of arena:
# screenw,screenh = taille_terrain()
# OU: screen_width,screen_height
'''''''''''''''''''''''''''''
'''''''''''''''''''''''''''''
''' variables globales '''
'''''''''''''''''''''''''''''
'''''''''''''''''''''''''''''
game = Game()
agents = []
screen_width=32*20 #512,768,... -- multiples de 32
screen_height=32*20 #512,768,... -- multiples de 32
nbAgents = 40
maxSensorDistance = 30 # utilisé localement.
maxRotationSpeed = 5
SensorBelt = [-170,-80,-40,-20,+20,40,80,+170] # angles en degres des senseurs
maxIterations = -1 # infinite: -1
showSensors = True
frameskip = 0 # 0: no-skip. >1: skip n-1 frames
verbose = True
'''''''''''''''''''''''''''''
'''''''''''''''''''''''''''''
''' Classe Agent/Robot '''
'''''''''''''''''''''''''''''
'''''''''''''''''''''''''''''
class Agent(object):
agentIdCounter = 0 # use as static
id = -1
robot = -1
name = "Equipe Alpha" # A modifier avec le nom de votre équipe
def __init__(self,robot):
self.id = Agent.agentIdCounter
Agent.agentIdCounter = Agent.agentIdCounter + 1
#print "robot #", self.id, " -- init"
self.robot = robot
def getRobot(self):
return self.robot
def step(self):
#print "robot #", self.id, " -- step"
p = self.robot
# actions
rotation = 0
for i,impact in enumerate(sensors[p]):
if impact.layer == 'player' and impact.dist_from_border < maxSensorDistance:
if impact.dist_from_border > maxSensorDistance/2 :
rotation += SensorBelt[i] * (impact.dist_from_border / ( 2 * maxSensorDistance))
else :
rotation -= SensorBelt[i] * (1 - impact.dist_from_border / ( 2 *maxSensorDistance))
elif impact.dist_from_border < maxSensorDistance :
rotation -= SensorBelt[i] * (1 - impact.dist_from_border / maxSensorDistance)
p.rotate(rotation) # normalisé -1,+1 -- valeur effective calculé avec maxRotationSpeed et maxTranslationSpeed
p.forward(1) # normalisé -1,+1
#Exemple: comment récuperer le senseur #2
#dist = sensor_infos[2].dist_from_border
#if dist > maxSensorDistance:
# dist = maxSensorDistance # borne
# monitoring - affiche diverses informations sur l'agent et ce qu'il voit.
# pour ne pas surcharger l'affichage, je ne fais ca que pour le player 1
if verbose == True and self.id == 0:
efface() # j'efface le cercle bleu de l'image d'avant
color( (0,0,255) )
circle( *game.player.get_centroid() , r = 22) # je dessine un rond bleu autour de ce robot
print "\n# Current robot at " + str(p.get_centroid()) + " with orientation " + str(p.orientation())
#sensor_infos = sensors[p] # sensor_infos est une liste de namedtuple (un par capteur).
for i,impact in enumerate(sensors[p]): # impact est donc un namedtuple avec plein d'infos sur l'impact: namedtuple('RayImpactTuple', ['sprite','layer','x', 'y','dist_from_border','dist_from_center','rel_angle_degree','abs_angle_degree'])
if impact.dist_from_border > maxSensorDistance:
print "- sensor #" + str(i) + " touches nothing"
else:
print "- sensor #" + str(i) + " touches something at distance " + str(impact.dist_from_border)
print " - rotation = " + str(rotation)
if impact.layer == 'joueur':
playerTMP = impact.sprite
print " - type: robot no." + str(playerTMP.numero)
print " - x,y = " + str( playerTMP.get_centroid() ) + ")" # renvoi un tuple
print " - orientation = " + str( playerTMP.orientation() ) + ")" # p/r au "nord"
elif impact.layer == 'obstacle':
print " - type obstacle"
else:
print " - type boundary of window"
return
'''''''''''''''''''''''''''''
'''''''''''''''''''''''''''''
''' Fonctions init/step '''
'''''''''''''''''''''''''''''
'''''''''''''''''''''''''''''
def setupAgents():
global screen_width, screen_height, nbAgents, agents, game
# Make agents
nbAgentsCreated = 0
for i in range(nbAgents):
while True:
p = -1
while p == -1: # p renvoi -1 s'il n'est pas possible de placer le robot ici (obstacle)
p = game.add_players( (random()*screen_width , random()*screen_height) , None , tiled=False)
if p:
p.oriente( random()*360 )
p.numero = nbAgentsCreated
nbAgentsCreated = nbAgentsCreated + 1
agents.append(Agent(p))
break
game.mainiteration()
def setupArena():
for i in range(6,13):
addObstacle(row=3,col=i)
for i in range(3,10):
addObstacle(row=12,col=i)
addObstacle(row=4,col=12)
addObstacle(row=5,col=12)
addObstacle(row=6,col=12)
addObstacle(row=11,col=3)
addObstacle(row=10,col=3)
addObstacle(row=9,col=3)
def stepWorld():
# chaque agent se met à jour. L'ordre de mise à jour change à chaque fois (permet d'éviter des effets d'ordre).
shuffledIndexes = [i for i in range(len(agents))]
shuffle(shuffledIndexes) ### TODO: erreur sur macosx
for i in range(len(agents)):
agents[shuffledIndexes[i]].step()
return
'''''''''''''''''''''''''''''
'''''''''''''''''''''''''''''
''' Fonctions internes '''
'''''''''''''''''''''''''''''
'''''''''''''''''''''''''''''
def addObstacle(row,col):
# le sprite situe colone 13, ligne 0 sur le spritesheet
game.add_new_sprite('obstacle',tileid=(0,13),xy=(col,row),tiled=True)
class MyTurtle(Turtle): # also: limit robot speed through this derived class
maxRotationSpeed = maxRotationSpeed # 10, 10000, etc.
def rotate(self,a):
mx = MyTurtle.maxRotationSpeed
Turtle.rotate(self, max(-mx,min(a,mx)))
def onExit():
print "\n[Terminated]"
'''''''''''''''''''''''''''''
'''''''''''''''''''''''''''''
''' Main loop '''
'''''''''''''''''''''''''''''
'''''''''''''''''''''''''''''
init('vide3',MyTurtle,screen_width,screen_height) # display is re-dimensioned, turtle acts as a template to create new players/robots
game.auto_refresh = False # display will be updated only if game.mainiteration() is called
game.frameskip = frameskip
atexit.register(onExit)
#setupArena()
setupAgents()
game.mainiteration()
iteration = 0
while iteration != maxIterations:
# c'est plus rapide d'appeler cette fonction une fois pour toute car elle doit recalculer le masque de collision,
# ce qui est lourd....
sensors = throw_rays_for_many_players(game,game.layers['joueur'],SensorBelt,max_radius = maxSensorDistance+game.player.diametre_robot() , show_rays=showSensors)
stepWorld()
game.mainiteration()
iteration = iteration + 1
| [
"loglisci@hotmail.fr"
] | loglisci@hotmail.fr |
da07c9bf4e4dfa6fedec67e45efc284753925f26 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02952/s489061530.py | c11b8cfc760ea2a0ea44bc3ca92b0888dbd71b04 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 180 | py | s=str(input())
n=len(s)
s=int(s)
if n==1:
print(s)
elif n==2:
print(9)
elif n==3:
print(10+s-100)
elif n==4:
print(909)
elif n==5:
print(910+s-10000)
else:
print(90909) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
e5de502355aee9790733fb5254d6bc4f05305577 | 0024f0d37290bca5e15ec2c5a2d569cb03a7acd5 | /main/urls.py | 8836528307934fd1111e7ad2e8dea60eacfdcc2b | [] | no_license | Anjali-Joy/covidSafe | 78d9483f248923a7cc6905ad419bdcfc68ba12a6 | 0b32b0f3c9d3469804f104913c10f1d299eeb633 | refs/heads/master | 2023-01-24T04:10:23.776063 | 2020-11-30T18:43:33 | 2020-11-30T18:43:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 687 | py | """covidSafe URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.urls import path
urlpatterns = [
,
]
| [
"neharosegeorge@gmail.com"
] | neharosegeorge@gmail.com |
bb9751619e50440d869dca9747dcc8bbc008e715 | 6e54ce02441211085d2dff6a3d9ef4127b2902e8 | /manage.py | 4af45206f7a7f222d5204c3ece4239dd1cb6c8ef | [] | no_license | jeffplata/flask-base-app | 3079ec96e8c80109ce56ac989a4e0f86a69279b9 | 69843974d92bc762888d5f86186287b8018461e9 | refs/heads/master | 2022-12-29T07:52:36.849474 | 2020-10-12T00:30:43 | 2020-10-12T00:30:43 | 303,238,764 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 846 | py | """This file sets up a command line manager.
Use "python manage.py" for a list of available commands.
Use "python manage.py runserver" to start the develop web on localhost:5000.
Use "python manage.py runserver --help" for additional runserver options.
"""
from flask_migrate import MigrateCommand
from flask_script import Manager
from app import create_app
from app.commands import InitDbCommand
# from app.setup import UploadUsers
# Setup Flask-Script with command line commands
manager = Manager(create_app)
manager.add_command('db', MigrateCommand)
manager.add_command('init_db', InitDbCommand)
# manager.add_command('upload', UploadUsers)
if __name__ == "__main__":
# python manage.py # shows available commands
# python manage.py runserver --help # shows available runserver options
manager.run()
| [
"rjreside@nfa.gov.ph.local"
] | rjreside@nfa.gov.ph.local |
26cc986e7a163cb9431a684a0d09b876fd8e7f95 | 354ee2dc23ac7a58bbe512c98d9febf2bfa47249 | /py/bee/bee_runner.py | f83143f45c83d19b38d84cf6b3400fc6a6487785 | [] | no_license | domingoxx/bee-leader | 61ccb82a48bfa0d552a5d3c6f038a160dfae156f | 6500757e7277fc7af3aa91d2640376e39420d2fb | refs/heads/main | 2023-05-30T07:21:26.501833 | 2021-06-08T13:33:23 | 2021-06-08T13:33:23 | 372,505,100 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,305 | py | import sys,threading, subprocess, signal, os
commandArgs = " ".join(sys.argv[1:])
def beeDataPath(dirname):
return f"/opt/data/{dirname}/bee"
def kill_child_processes(parent_pid, sig=signal.SIGTERM):
ps_command = subprocess.Popen("ps -o pid --ppid %d --noheaders" % parent_pid, shell=True, stdout=subprocess.PIPE)
ps_output = ps_command.stdout.read()
ps_output = str(ps_output, encoding="utf-8")
retcode = ps_command.wait()
assert retcode == 0, "ps command returned %d" % retcode
for pid_str in ps_output.split("\n")[:-1]:
os.kill(int(pid_str), sig)
class Bee(threading.Thread):
def __init__(self, datadir, callback):
threading.Thread.__init__(self)
self.datadir = datadir
self.node_id = datadir
self.callback = callback
self.bee_process = None
def run(self):
self.startBee(self.datadir, self.callback)
def isRunning(self):
return self.bee_process != None and self.bee_process.returncode == None
def shutdown(self):
if self.bee_process.returncode == None:
print('send terminate bee', flush=True)
kill_child_processes(self.bee_process.pid)
self.bee_process.terminate()
self.bee_process.wait(5)
if self.bee_process.returncode == None:
print(f'send kill bee {self.bee_process.pid}', flush=True)
subprocess.check_output(f"kill -9 {self.bee_process.pid}", shell=True)
def startBee(self, dirname, callback):
print('开始启动')
# --clef-signer-enable --clef-signer-endpoint http://127.0.0.1:8551
cmd = f"""
bee start {commandArgs} --password {dirname} --data-dir {beeDataPath(dirname)}
"""
print(cmd)
self.bee_process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,stdin=subprocess.PIPE)
success = False
while self.bee_process.returncode == None:
result = self.bee_process.stdout.readline()
logStr = str(result, encoding="utf-8")
print(logStr, flush=True)
if logStr.find("using ethereum address") != -1:
splitList = logStr.split("using ethereum address")
address = splitList[1][1:-2]
print(address)
success = True
callback(address)
self.bee_process.poll()
if success == False:
callback(None)
print('Bee退出',flush=True)
| [
"wangchenxu92@gmail.com"
] | wangchenxu92@gmail.com |
8566bb358d3e0f236f623eb78b073d0d7776e85a | 77dcc06d8237c98218924a16b3138c857c405721 | /mysite/settings.py | 7b36e6d9443e22f10a8b8d9f3ab5dd262027efff | [] | no_license | gtoerner/django_shopcart1 | 8a7f10f21e007dfca17b2be5e51a57ada6842481 | 6aee9eaa3ba604846fccf318ba50ab94ab7011da | refs/heads/master | 2020-03-25T22:41:36.276309 | 2018-08-10T04:44:37 | 2018-08-10T04:44:37 | 144,236,549 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,503 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 1.11.15.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'o_d(*+ej16mdfb8a$hci@%z0ubwht+b67_l$^+$&bz!kl$b$tm'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'shop.apps.ShopConfig',
'cart.apps.CartConfig',
'orders.apps.OrdersConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'cart.context_processors.cart',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
#STATIC_ROOT = os.path.join(BASE_DIR, 'products/')
#MEDIA_URL = '/media/'
#MEDIA_ROOT = os.path.join(BASE_DIR, 'media_products/')
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
CART_SESSION_ID = 'cart'
| [
"gtoerner@gmail.com"
] | gtoerner@gmail.com |
0823f5ea3e81c4e1e9f4032ea6744467374ffecf | da00754b095f2783166aa50eaab83d6d0875488b | /blog/models.py | 14f1278870c00fa950830261027e1091b8248466 | [] | no_license | Krom121/Pytonixs-v2 | 0e5b6c575c16715b0be471b2472c59a665c1317a | 087ddcffe2c414b0da78f7d994f9990594031cae | refs/heads/master | 2022-12-11T18:29:00.077621 | 2020-01-22T14:32:02 | 2020-01-22T14:32:02 | 235,055,686 | 0 | 0 | null | 2022-12-08T03:28:59 | 2020-01-20T08:43:55 | CSS | UTF-8 | Python | false | false | 3,305 | py | from django.db import models
from django.urls import reverse
from django.utils import timezone
from django.contrib.auth.models import User
from ckeditor_uploader.fields import RichTextUploadingField
from ckeditor.fields import RichTextField
from imagekit.models import ImageSpecField, ProcessedImageField
from imagekit.processors import ResizeToFill
from taggit.managers import TaggableManager
###### POST CREATOR MODEL ########
class Author(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
profile_image = ProcessedImageField(upload_to='author_profile/',processors=[ResizeToFill(50, 50)],format='PNG',options={'quality': 60})
def __str__(self):
return self.user.username
##### POST BLOG CATEGORY MODEL ######
class Category(models.Model):
title = models.CharField(max_length=30,blank=True,null=True)
def __str__(self):
return self.title
##### MAIN POST MODEL PUBLISHED MANAGER #######
class PublishedManager(models.Manager):
def get_queryset(self):
return super(PublishedManager,self).get_queryset().filter(status='published')
class Post(models.Model):
STATUS_CHOICES = (
('draft', 'Draft'),
('published', 'Published'),
)
title = models.CharField(max_length=100, blank=True,null=True)
slug = models.SlugField(blank=True, null=True,unique_for_date='publish')
author = models.ForeignKey(Author, on_delete=models.CASCADE,related_name='blog_posts')
categories = models.ManyToManyField(Category)
featured = models.BooleanField(default=False)
tags = TaggableManager()
image_thumbnail = ProcessedImageField(upload_to='thumnail_headers/',processors=[ResizeToFill(400, 350)],format='PNG',options={'quality': 60})
blog_header_image = ProcessedImageField(upload_to='blog_headers/',processors=[ResizeToFill(750, 500)],format= 'PNG',options={'quality': 60})
alt = models.CharField(max_length=100, verbose_name='alternative text', blank=True,null=True)
description = RichTextUploadingField(blank=True,null=True)
comment_count = models.IntegerField(default=0)
publish = models.DateTimeField(default=timezone.now)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
status = models.CharField(max_length=10, choices=STATUS_CHOICES, default='draft')
published = PublishedManager() # PUBLISHED POST MANAGER.
def get_absolute_url(self):
return reverse("post-detail", kwargs={
"slug": self.slug,
"pk": self.pk
})
class Meta:
ordering = ('-publish',)
def __str__(self):
return self.title
##### POST COMMENT MODEL ######
class Comment(models.Model):
post = models.ForeignKey(Post,on_delete=models.CASCADE,related_name='comments')
name = models.CharField(max_length=80,blank=True, null=True)
email = models.EmailField(max_length=150, blank=True, null=True)
your_comment = RichTextField(blank=True, null=True)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
active = models.BooleanField(default=True)
class Meta:
ordering = ('created',)
def __str__(self):
return 'Commented by {} on {}'.format(self.name, self.post) | [
"stephenmacgregor9@gmail.com"
] | stephenmacgregor9@gmail.com |
2b871da9be741dbd1b9ef07d92c2e2156e9f0b24 | 16eccb09227958ef909c745b76df9c9eb264fa3d | /blog/migrations/0004_auto_20191111_0432.py | 0c3bc1cf60c80bb7dfc38a1852bff9e929778b6a | [] | no_license | yosephbernandus/my-first-django | f53f48b5e7cc60e8ad5ca14ba1d9c9a607d91a6d | a1ad6ac815eb548e49e20555279c66d7001731c6 | refs/heads/master | 2020-09-11T22:13:00.086623 | 2019-11-20T12:40:30 | 2019-11-20T12:40:30 | 222,206,869 | 0 | 0 | null | 2019-11-20T12:40:31 | 2019-11-17T06:26:53 | Python | UTF-8 | Python | false | false | 405 | py | # Generated by Django 2.2.6 on 2019-11-11 04:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0003_auto_20191111_0310'),
]
operations = [
migrations.AlterField(
model_name='post',
name='tag',
field=models.ManyToManyField(related_name='tags', to='blog.Tag'),
),
]
| [
"yosephbernandus@gmail.com"
] | yosephbernandus@gmail.com |
9ff36f0d84b6706713ba5db7bb10bfe7f268b71d | 0ca383fe9093940e549b38a06ad1b9f199d9b675 | /water_quality_index/taskOne.py | 55e85c30b74ef8b2bc1a14722e2164a63aa9dd05 | [] | no_license | harshithanjaniathi/HYDROTEAM8 | b79911929d378e7dfa5f78abd351c179ca09ddc4 | abae29d75d278d3fbdaa51e1d87349a1c452c13a | refs/heads/master | 2022-12-24T01:58:45.713576 | 2020-10-03T06:30:24 | 2020-10-03T06:30:24 | 300,853,287 | 0 | 0 | null | 2020-10-03T10:38:40 | 2020-10-03T10:25:41 | null | UTF-8 | Python | false | false | 2,347 | py | import tkinter as tk
from selectPage import SelectPage
from vars import Vars
class TaskOne(SelectPage):
def __init__(self, parent, controller):
self.pH = "pH"
self.temp = "Tempurature"
self.wind = "Turbidity"
self.tdv = "Total Dissolved Values"
self.ns = "Nitrates"
self.fc = "Fecal Coliform"
self.words = [self.pH, self.wind, self.temp, self.tdv, self.ns, self.fc]
self.weights = {
self.pH: 0.11,
self.temp: 0.10,
self.wind: 0.08,
self.tdv: 0.07,
self.ns: 0.10,
self.fc: 0.16
}
self.textFields = {}
self.labelFields = {}
self.cal_value = tk.StringVar()
self.cal_value.set("Not Calculated")
SelectPage.__init__(self, parent, controller)
# self.initUI(parent, controller)
# def initUI(self, parent, controller):
for idx, i in enumerate(self.words):
self.labelFields[i] = tk.Label(self, text = i, font = Vars.LABEL_FONT)
self.textFields[i] = tk.Entry(self, validate='key',
vcmd=(controller.register(self.validate_float), '%P'))
self.labelFields[i].place(x = 200, y = 50 + (50 * idx))
self.textFields[i].place(x = 450, y = 50 + (50 * idx))
self.calculate = tk.Button(self, text = "Calculate",
command = lambda: self._calculate_wqi(),
padx = 10,
pady = 10)
self.calculate.place(x = 350, y = 350, width = 125, height = 35)
self.curr_value = tk.Label(self, textvariable = self.cal_value, font = Vars.LABEL_FONT)
self.curr_value.place(x = 500, y = 350)
def validate_float(self, inp, empty = 0):
try:
if inp != "" or empty:
float(inp)
except:
return False
return True
def _calculate_wqi(self):
validate = True
for i in self.words:
validate = validate and self.validate_float(self.textFields[i].get(), 1)
sum = 0
if validate:
for i in self.words:
sum += (self.weights[i] * float(self.textFields[i].get()))
self.cal_value.set(sum)
else:
self.cal_value.set("Enter all Inputs")
| [
"cipher.bsd@gmail.com"
] | cipher.bsd@gmail.com |
93698e028445ae34d152462b1fb93ed46657de65 | 4c6d87577d9f12c08b5cd23ca294e0c94ebf92fd | /app.py | 62a558972aadd15b280de597e2cfa26fc75d31f3 | [] | no_license | wonhee0803/web-midtem-test | 732b3aba9650e1926ebc56b00cfc72f134dc22d8 | 7353ad8e39ea0f608b52bb6436145cd76ea05647 | refs/heads/master | 2023-01-05T09:20:58.636442 | 2020-10-29T13:38:09 | 2020-10-29T13:38:09 | 303,866,012 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,121 | py | from flask import Flask, render_template
from flask_bootstrap import Bootstrap
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField
from wtforms.validators import DataRequired
import os
basedir = os.path.abspath(os.path.dirname(__file__))
app = Flask(__name__)
bootstrap = Bootstrap(app)
app.config['SECRET_KEY'] = 'some string no one can guess'
class DeleteForm(FlaskForm):
blank = StringField("작성", validators=[DataRequired()])
delete = SubmitField('삭제')
@app.route('/')
def hello_world():
return render_template('index.html')
@app.route('/board')
def board():
myform = DeleteForm()
return render_template('board.html', form=myform)
@app.route('/post')
def post():
return render_template('post.html')
@app.route('/author')
def author():
return render_template('author.html')
@app.route('/writing')
def writing():
return render_template('writing.html')
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
if __name__ == '__main__':
app.run()
| [
"noreply@github.com"
] | noreply@github.com |
3d6543fe5c68e08ed789ad69df3e48da6f7addc9 | 119068a3c18a27a36d2c9cacb7a416a85443a7ff | /Fakerproject1/MyApp1/views.py | 1f4228d68d5a783579a97b92c51f78265bdbd42c | [] | no_license | Poojathimmanna/Faker_project | b4d49a1d4bb5f67319e3d25bf1f289f7367ead7e | 750020a49e7936b52a1c76b84f88ba1e0686a5b1 | refs/heads/master | 2023-04-01T04:37:20.820297 | 2021-04-15T09:56:21 | 2021-04-15T09:56:21 | 358,217,189 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 179 | py | from django.shortcuts import render
from MyApp1.models import Student
def view1(request):
S=Student.objects.all()
d={"std":S}
return render(request,'MyApp1/1.html',d)
| [
"poojathimmanna@gmail.com"
] | poojathimmanna@gmail.com |
a40845fe784984a2a2ef36f79556424959d0fcd3 | 5689bffe9a9594e52f934542994db464ed095d71 | /08_unittest/test05_assert_exercises.py | 1d6a1bd6fa8d1033455a67c98cde5e33428fe349 | [] | no_license | WenhaoChen0907/Web-automation | 5c0e2c61e247f32b0e5f2f2a33c9f8cc6e73dc20 | 5488f2c62016f02c934b709e7e9e6ea831d9891c | refs/heads/master | 2023-03-07T13:31:10.265019 | 2021-02-15T06:33:50 | 2021-02-15T06:33:50 | 338,986,720 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,618 | py | # iwebshop正向登录代码练习
import unittest
import sys
import time
from time import sleep
from selenium import webdriver
class IwebLogin(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Firefox()
self.driver.maximize_window()
self.driver.implicitly_wait(30)
self.driver.get("http://localhost/iwebshop/")
def tearDown(self):
sleep(2)
self.driver.quit()
def testLogin(self):
driver = self.driver
driver.find_element_by_link_text("登录").click()
driver.find_element_by_css_selector("input[alt*='邮箱']").send_keys("admin")
driver.find_element_by_css_selector("input[alt*='密码']").send_keys("123456")
driver.find_element_by_css_selector(".submit_login").click()
sleep(3)
# 获取登陆信息
text = driver.find_element_by_css_selector(".loginfo").text
# 断言
try:
self.assertIn("admin", text)
except AssertionError:
# driver.get_screenshot_as_file("../images/img2.jpg")
# 图片名称添加动态时间-加时间戳的写法,-推荐
now = time.strftime("%Y_%m_%d %H_%M_%S")
# 图片名称添加断言错误日志
rep = sys.exc_info()[1]
driver.get_screenshot_as_file("../images/%s--%s.jpg" % (now, rep))
# 抛出异常
raise AssertionError
sleep(3)
driver.find_element_by_css_selector(".reg").click()
if __name__ == '__main__':
# 调用main方法执行unitetest内所有test开头方法
unittest.main()
| [
"18738127274@163.com"
] | 18738127274@163.com |
b28c6969f1c0ba1babff55ae8e4e2731fe7d109a | 377adaa60de35b5afd2fb4e211f57db41c631cf5 | /tools/svglit/metaPath.py | 58a628c5645fb64147f8e59593a695c78cacc751 | [] | no_license | tyrande/spbot | c071a73824a86f55b4d2908f27a4d46810f5fde7 | 9964481986350190381de3c6bfda606bfb5a6875 | refs/heads/master | 2016-09-05T23:19:29.668489 | 2014-01-16T15:58:35 | 2014-01-16T15:58:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,167 | py | from point import point
from line import line
from curve import curve
from qcurve import qcurve
import math, re
class metaPath:
def __init__(self, dataStr):
self.curves = []
if not re.match(r"^[Mm]", dataStr):
raise "no start point"
_ds = re.sub(r"([Mm])", r"\1:", dataStr)
_ds = re.sub(r"([LHVCSQTAZlhvcsqtaz])", r";\1:", _ds)
_ds = re.sub(r"-", r",-", _ds)
_ds = re.sub(r"([:,]),", r"\1", _ds)
for c in _ds.split(";"):
_dt = c.split(":")
if len(_dt) < 2:
break
_command = re.sub(r"\s", "", _dt[0])
_ss = re.sub("-", ",-", _dt[1])
_ss = re.sub("\s", ",", _ss)
_ss = re.sub(r"^[,]+", '', _ss)
_ss = re.sub(r"[,]+", ',', _ss)
_ps = _ss.split(",")
if 'M' == _command or 'm' == _command:
_obj = point(_ps[0], _ps[1])
elif 'L' == _command:
_obj = line(_obj.end, point(_ps[0], _ps[1]))
elif 'l' == _command:
_obj = line(_obj.end, _obj.end.move(_ps[0], _ps[1]))
elif 'H' == _command:
_obj = line(_obj.end, point(_ps[0], _obj.end.y))
elif 'h' == _command:
_obj = line(_obj.end, _obj.end.move(_ps[0], 0))
elif 'V' == _command:
_obj = line(_obj.end, point(_obj.end.x, _ps[1]))
elif 'v' == _command:
_obj = line(_obj.end, _obj.end.move(0, _ps[1]))
elif 'C' == _command:
_obj = curve(_obj.end, point(_ps[0], _ps[1]), point(_ps[2], _ps[3]), point(_ps[4], _ps[5]))
elif 'c' == _command:
_obj = curve(_obj.end, _obj.end.move(_ps[0], _ps[1]), _obj.end.move(_ps[2], _ps[3]), _obj.end.move(_ps[4], _ps[5]))
elif 'S' == _command:
_obj = curve(_obj.end, _obj.refCon(), point(_ps[0], _ps[1]), point(_ps[2], _ps[3]))
elif 's' == _command:
_obj = curve(_obj.end, _obj.refCon(), _obj.end.move(_ps[0], _ps[1]), _obj.end.move(_ps[2], _ps[3]))
elif 'Q' == _command:
_obj = qcurve(_obj.end, point(_ps[0], _ps[1]), point(_ps[2], _ps[3]))
elif 'q' == _command:
_obj = qcurve(_obj.end, _obj.end.move(_ps[0], _ps[1]), _obj.end.move(_ps[2], _ps[3]))
elif 'T' == _command:
_obj = qcurve(_obj.end, _obj.refCon(), point(_ps[0], _ps[1]))
elif 't' == _command:
_obj = qcurve(_obj.end, _obj.refCon(), _obj.end.move(_ps[0], _ps[1]))
elif 'A' == _command:
''
else: break
self.curves.append(_obj)
def output(self):
str = ""
for c in self.curves:
str += c.output()
return str
if __name__ == "__main__":
ds = 'M352,368H160c-8.844,0-16-7.156-16-16s7.156-16,16-16h192c8.844,0,16,7.156,16,16S360.844,368,352,368z'
g = metaPath(ds)
print g.output()
# ds1 = "M200,300 Q400,50 600,300 T1000,300"
# g1 = metaPath(ds1)
# print g1.output() | [
"xuekun.s@gmail.com"
] | xuekun.s@gmail.com |
96fd031274243132b38f7eb70a57d295d5cdd79e | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/7/sy9.py | dbeb1cd55cf03ba4853cb5971ccaa811db6542f3 | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'sy9':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"juliettaylorswift@gmail.com"
] | juliettaylorswift@gmail.com |
5edf93f12059539e3393547eb7eda1639274dad2 | bd2ce1aa961845820457c9873fa92eb5ea5f1b4a | /backend/mongo.py | 77e02959f1575f33aba999c462b482e88a631ba9 | [] | no_license | janslee/crisis_predictor | 0ca8447209032c491c7e41bc4cf5a1c7ffaef7e7 | 55a135473a815c372f449b68b77bdc9810aa9efc | refs/heads/master | 2020-05-16T12:06:51.804927 | 2018-11-06T09:10:30 | 2018-11-06T09:10:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,179 | py | from pymongo import MongoClient
import csv
import json
connection_params = {
'user': 'master',
'password': 'stanford1',
'host':'ds041167.mlab.com',
'port':'41167',
'namespace': 'crisis'
}
client = MongoClient('mongodb://master:stanford1@ds041167.mlab.com:41167/crisis')
db = client.crisis
with open('sentiment.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
for row in csv_reader:
if row[1] == "01/01/2012":
features = eval(row[3])
#features = json.loads(n)
array = features["keywords"]
sentiment = 0
count = 0
for thing in array:
if "sentiment" not in thing:
continue
sentiment = sentiment + float(thing["sentiment"]["score"])
count = count + 1
if (count == 0):
sentiment = 0
else:
sentiment = sentiment/count
posts = db.posts
post = {
'country': row[0],
'content': 2,
'data': sentiment
}
result = db.sentimment.insert(post)
| [
"tejav@stanford.edu"
] | tejav@stanford.edu |
6470058545a094cfec1eaa39c814657a621889d2 | 377b42bb9e7ebe1b0c56bcab87e78572d98ccc34 | /Algorithm/Algorithm Level1/sublist_max_2.py | 9b63323dfe5907848a455f536ec7f7172ee09ce7 | [] | no_license | YONGJINJO/Python | 9cd9ef1d3a7ac840670b10a10e91f93fd1b52d96 | 833a7fa5572872eef746b3951d4ee3d7da78291f | refs/heads/master | 2021-03-06T22:31:30.600563 | 2020-04-20T10:11:10 | 2020-04-20T10:11:10 | 246,223,975 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 424 | py | def sublist_max_2(profits):
max_profit = 0
for i in range(len(profits)):
total = profits[i]
for j in range(i + 1, len(profits)):
total += profits[j]
max_profit = max(max_profit,total)
return max_profit
print(sublist_max_2([4, 3, 8, -2, -5, -3, -5, -3]))
print(sublist_max_2([2, 3, 1, -1, -2, 5, -1, -1]))
print(sublist_max_2([7, -3, 14, -8, -5, 6, 8, -5, -4, 10, -1, 8])) | [
"49581644+YONGJINJO@users.noreply.github.com"
] | 49581644+YONGJINJO@users.noreply.github.com |
ee7845df3aecfb80de476bbf727aca8a2ade8529 | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /data/p3BR/R2/benchmark/startQiskit_QC118.py | 5c08e2c7132cd11baec4e6f28b5a87f2de870cc0 | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,785 | py | # qubit number=3
# total number=20
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
# oracle.draw('mpl', filename=(kernel + '-oracle.png'))
return oracle
def build_circuit(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the Bernstein-Vazirani circuit
zero = np.binary_repr(0, n)
b = f(zero)
# initial n + 1 bits
input_qubit = QuantumRegister(n+1, "qc")
classicals = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classicals)
# inverse last one (can be omitted if using O_f^\pm)
prog.x(input_qubit[n])
# circuit begin
prog.h(input_qubit[1]) # number=1
prog.h(input_qubit[2]) # number=17
prog.cz(input_qubit[0],input_qubit[2]) # number=18
prog.h(input_qubit[2]) # number=19
prog.x(input_qubit[2]) # number=12
prog.cx(input_qubit[0],input_qubit[2]) # number=13
prog.h(input_qubit[1]) # number=7
prog.cz(input_qubit[2],input_qubit[1]) # number=8
prog.h(input_qubit[1]) # number=9
prog.cx(input_qubit[2],input_qubit[1]) # number=4
prog.y(input_qubit[1]) # number=14
prog.cx(input_qubit[2],input_qubit[1]) # number=10
prog.z(input_qubit[2]) # number=3
prog.y(input_qubit[2]) # number=5
# apply H to get superposition
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[n])
prog.barrier()
# apply oracle O_f
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [input_qubit[n]])
# apply H back (QFT on Z_2^n)
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
# measure
return prog
def get_statevector(prog: QuantumCircuit) -> Any:
state_backend = Aer.get_backend('statevector_simulator')
statevec = execute(prog, state_backend).result()
quantum_state = statevec.get_statevector()
qubits = round(log2(len(quantum_state)))
quantum_state = {
"|" + np.binary_repr(i, qubits) + ">": quantum_state[i]
for i in range(2 ** qubits)
}
return quantum_state
def evaluate(backend_str: str, prog: QuantumCircuit, shots: int, b: str) -> Any:
# Q: which backend should we use?
# get state vector
quantum_state = get_statevector(prog)
# get simulate results
# provider = IBMQ.load_account()
# backend = provider.get_backend(backend_str)
# qobj = compile(prog, backend, shots)
# job = backend.run(qobj)
# job.result()
backend = Aer.get_backend(backend_str)
# transpile/schedule -> assemble -> backend.run
results = execute(prog, backend, shots=shots).result()
counts = results.get_counts()
a = Counter(counts).most_common(1)[0][0][::-1]
return {
"measurements": counts,
# "state": statevec,
"quantum_state": quantum_state,
"a": a,
"b": b
}
def bernstein_test_1(rep: str):
"""011 . x + 1"""
a = "011"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_2(rep: str):
"""000 . x + 0"""
a = "000"
b = "0"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_3(rep: str):
"""111 . x + 1"""
a = "111"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
if __name__ == "__main__":
n = 2
a = "11"
b = "1"
f = lambda rep: \
bitwise_xor(bitwise_dot(a, rep), b)
prog = build_circuit(n, f)
sample_shot =4000
writefile = open("../data/startQiskit_QC118.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
provider.backends()
backend = provider.get_backend("ibmq_5_yorktown")
circuit1 = transpile(prog, FakeYorktown())
circuit1.h(qubit=2)
circuit1.x(qubit=3)
circuit1.measure_all()
info = execute(circuit1,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
| [
"wangjiyuan123@yeah.net"
] | wangjiyuan123@yeah.net |
5f1c21424845a3cf18b69179939ab7c25bf02e94 | becc4f2b55204034e4fc2260e3bba202a0d997d4 | /liam_chatbot/interact.py | dce636c795bd53b39e1f16ddc10539bb0b3bc3c2 | [
"MIT"
] | permissive | soneo1127/liambot | 0002340f6f88f9e5f18bcda3912c41180ac051fe | 3a6640c4e9af4718548c939abf002bf2a9d5fdcd | refs/heads/master | 2022-11-29T12:07:10.510763 | 2019-11-02T11:36:58 | 2019-11-02T11:36:58 | 219,141,933 | 1 | 1 | null | 2022-11-26T20:32:09 | 2019-11-02T11:06:09 | Python | UTF-8 | Python | false | false | 10,730 | py | # # Copyright (c) 2019-present, HuggingFace Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# utf-8をシェルで出力するおまじない https://qiita.com/ikuyukida/items/89e70d6516b5051dba7b
"""
import sys
import io
sys.stdin = io.TextIOWrapper(sys.stdin.buffer, encoding='utf-8')
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')
sys.stderr = io.TextIOWrapper(sys.stderr.buffer, encoding='utf-8')
"""
import logging
import random
from argparse import ArgumentParser
from itertools import chain
from pprint import pformat
import warnings
import torch
import torch.nn.functional as F
from pytorch_transformers import OpenAIGPTLMHeadModel, OpenAIGPTTokenizer, GPT2LMHeadModel, GPT2Tokenizer
from liam_chatbot.train import SPECIAL_TOKENS, build_input_from_segments, add_special_tokens_
from liam_chatbot.utils import get_dataset_personalities, download_pretrained_model
import MeCab
wakati = MeCab.Tagger('-Owakati') #分かち書き
# neo_wakati = MeCab.Tagger('-Owakati -d /usr/lib/mecab/dic/mecab-ipadic-neologd/') #追加辞書を適用
def top_filtering(logits, top_k=0, top_p=0.0, threshold=-float('Inf'), filter_value=-float('Inf')):
""" Filter a distribution of logits using top-k, top-p (nucleus) and/or threshold filtering
Args:
logits: logits distribution shape (vocabulary size)
top_k: <=0: no filtering, >0: keep only top k tokens with highest probability.
top_p: <=0.0: no filtering, >0.0: keep only a subset S of candidates, where S is the smallest subset
whose total probability mass is greater than or equal to the threshold top_p.
In practice, we select the highest probability tokens whose cumulative probability mass exceeds
the threshold top_p.
threshold: a minimal threshold to keep logits
"""
assert logits.dim() == 1 # Only work for batch size 1 for now - could update but it would obfuscate a bit the code
top_k = min(top_k, logits.size(-1))
if top_k > 0:
# Remove all tokens with a probability less than the last token in the top-k tokens
indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
logits[indices_to_remove] = filter_value
if top_p > 0.0:
# Compute cumulative probabilities of sorted tokens
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cumulative_probabilities = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
# Remove tokens with cumulative probability above the threshold
sorted_indices_to_remove = cumulative_probabilities > top_p
# Shift the indices to the right to keep also the first token above the threshold
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
# Back to unsorted indices and set them to -infinity
indices_to_remove = sorted_indices[sorted_indices_to_remove]
logits[indices_to_remove] = filter_value
indices_to_remove = logits < threshold
logits[indices_to_remove] = filter_value
return logits
def sample_sequence(personality, history, tokenizer, model, args, current_output=None):
special_tokens_ids = tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS)
if current_output is None:
current_output = []
for i in range(args.max_length):
instance, _ = build_input_from_segments(personality, history, current_output, tokenizer, with_eos=False)
input_ids = torch.tensor(instance["input_ids"], device=args.device).unsqueeze(0)
token_type_ids = torch.tensor(instance["token_type_ids"], device=args.device).unsqueeze(0)
logits = model(input_ids, token_type_ids=token_type_ids)
if isinstance(logits, tuple): # for gpt2 and maybe others
logits = logits[0]
logits = logits[0, -1, :] / args.temperature
logits = top_filtering(logits, top_k=args.top_k, top_p=args.top_p)
probs = F.softmax(logits, dim=-1)
prev = torch.topk(probs, 1)[1] if args.no_sample else torch.multinomial(probs, 1)
if i < args.min_length and prev.item() in special_tokens_ids:
while prev.item() in special_tokens_ids:
if probs.max().item() == 1:
warnings.warn("Warning: model generating special token with probability 1.")
break # avoid infinitely looping over special token
prev = torch.multinomial(probs, num_samples=1)
if prev.item() in special_tokens_ids:
break
current_output.append(prev.item())
return current_output
def run():
parser = ArgumentParser()
parser.add_argument("--dataset_path", type=str, default="", help="Path or url of the dataset. If empty download from S3.")
parser.add_argument("--dataset_cache", type=str, default='./dataset_cache', help="Path or url of the dataset cache")
parser.add_argument("--model", type=str, default="gpt", help="Model type (gpt or gpt2)")
parser.add_argument("--model_checkpoint", type=str, default="", help="Path, url or short name of the model")
parser.add_argument("--max_history", type=int, default=2, help="Number of previous utterances to keep in history")
parser.add_argument("--device", type=str, default="cuda" if torch.cuda.is_available() else "cpu", help="Device (cuda or cpu)")
parser.add_argument("--no_sample", action='store_true', help="Set to use greedy decoding instead of sampling")
parser.add_argument("--max_length", type=int, default=20, help="Maximum length of the output utterances")
parser.add_argument("--min_length", type=int, default=1, help="Minimum length of the output utterances")
parser.add_argument("--seed", type=int, default=42, help="Seed")
parser.add_argument("--temperature", type=int, default=0.7, help="Sampling softmax temperature")
parser.add_argument("--top_k", type=int, default=0, help="Filter top-k tokens before sampling (<=0: no filtering)")
parser.add_argument("--top_p", type=float, default=0.9, help="Nucleus filtering (top-p) before sampling (<=0.0: no filtering)")
args = parser.parse_args()
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__file__)
logger.info(pformat(args))
if args.model_checkpoint == "":
args.model_checkpoint = download_pretrained_model()
random.seed(args.seed)
torch.random.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
logger.info("Get pretrained model and tokenizer")
tokenizer_class = GPT2Tokenizer
tokenizer = tokenizer_class.from_pretrained(args.model_checkpoint)
model_class = GPT2LMHeadModel
model = model_class.from_pretrained(args.model_checkpoint)
model.to(args.device)
add_special_tokens_(model, tokenizer)
logger.info("Sample a personality")
personalities = get_dataset_personalities(tokenizer, args.dataset_path, args.dataset_cache)
personality = random.choice(personalities)
logger.info("Selected personality: %s", tokenizer.decode(chain(*personality)))
history = []
while True:
raw_text = input(">>> ")
while not raw_text:
print('Prompt should not be empty!')
raw_text = input(">>> ")
wakati_text = wakati.parse(raw_text).strip()
# print(wakati_text)
history.append(tokenizer.encode(wakati_text))
with torch.no_grad():
out_ids = sample_sequence(personality, history, tokenizer, model, args)
history.append(out_ids)
history = history[-(2*args.max_history+1):]
out_text = tokenizer.decode(out_ids, skip_special_tokens=True)
out_text = out_text.replace(" ", "")
print(out_text)
parser = ArgumentParser()
parser.add_argument("--dataset_path", type=str, default="", help="Path or url of the dataset. If empty download from S3.")
parser.add_argument("--dataset_cache", type=str, default='./dataset_cache', help="Path or url of the dataset cache")
parser.add_argument("--model", type=str, default="gpt", help="Model type (gpt or gpt2)")
parser.add_argument("--model_checkpoint", type=str, default="", help="Path, url or short name of the model")
parser.add_argument("--max_history", type=int, default=2, help="Number of previous utterances to keep in history")
parser.add_argument("--device", type=str, default="cuda" if torch.cuda.is_available() else "cpu", help="Device (cuda or cpu)")
parser.add_argument("--no_sample", action='store_true', help="Set to use greedy decoding instead of sampling")
parser.add_argument("--max_length", type=int, default=20, help="Maximum length of the output utterances")
parser.add_argument("--min_length", type=int, default=1, help="Minimum length of the output utterances")
parser.add_argument("--seed", type=int, default=42, help="Seed")
parser.add_argument("--temperature", type=int, default=0.7, help="Sampling softmax temperature")
parser.add_argument("--top_k", type=int, default=0, help="Filter top-k tokens before sampling (<=0: no filtering)")
parser.add_argument("--top_p", type=float, default=0.9, help="Nucleus filtering (top-p) before sampling (<=0.0: no filtering)")
args = parser.parse_args()
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__file__)
logger.info(pformat(args))
if args.model_checkpoint == "":
args.model_checkpoint = download_pretrained_model()
random.seed(42)
torch.random.manual_seed(42)
torch.cuda.manual_seed(42)
logger.info("Get pretrained model and tokenizer")
tokenizer_class = GPT2Tokenizer
tokenizer = tokenizer_class.from_pretrained("liam_chatbot/liam1")
model_class = GPT2LMHeadModel
model = model_class.from_pretrained("liam_chatbot/liam1")
model.to("cpu")
add_special_tokens_(model, tokenizer)
logger.info("Sample a personality")
personalities = get_dataset_personalities(tokenizer, "liam_chatbot/data/liam", "liam_chatbot/dataset_cache_liam_124M")
personality = random.choice(personalities)
logger.info("Selected personality: %s", tokenizer.decode(chain(*personality)))
def reply(input_text):
history = []
raw_text = input_text
wakati_text = wakati.parse(raw_text).strip()
print(wakati_text)
history.append(tokenizer.encode(wakati_text))
with torch.no_grad():
out_ids = sample_sequence(personality, history, tokenizer, model, args)
history.append(out_ids)
history = history[-(2*args.max_history+1):]
out_text = tokenizer.decode(out_ids, skip_special_tokens=True)
out_text = out_text.replace(" ", "")
return out_text
if __name__ == "__main__":
run()
| [
"osone.hiroyuki.su@alumni.tsukuba.ac.jp"
] | osone.hiroyuki.su@alumni.tsukuba.ac.jp |
15fcc498298fb27365a93e3595794528564152ce | 9a2fd5e27d3f811cb18763ed388c2d56ae9907b6 | /爬虫练习/gupiao.py | ee4595015acd484f424596fda32dc78170398d30 | [] | no_license | wzc-ob/PycharmProjects | 5297ce60bade883495e5dbdb614131d31c47682e | 09f5ad6004dbdc83d456cabd78b769fde13d5357 | refs/heads/master | 2020-05-05T07:12:38.789400 | 2019-04-06T10:06:08 | 2019-04-06T10:06:08 | 179,817,189 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,007 | py | import re
import requests
from bs4 import BeautifulSoup
import traceback
def getHTMLText(url,code = 'UTF-8'):
try:
kv = {'user-agent': 'Mozilla/5.0'}
r = requests.get(url, headers=kv, timeout=30)
r.encoding = code
# print(r.text)
return r.text
except:
return ""
def getStockList(lst,stockURL):
html = getHTMLText(stockURL,'GB2312')
soup = BeautifulSoup(html,'html.parser')
a = soup.find_all('a')
for i in a:
try:
href = i.attrs['href']
lst.append(re.findall(r'[s][hz]\d{6}',href)[0])
except:
continue
def getStockInfo(lst,stockURL,fpath):
count = 0
for stock in lst:
url = stockURL +stock +".html"
print(url)
html = getHTMLText(url)
try:
if html =='':
continue
infoDict = {}
soup = BeautifulSoup(html,'html.parser')
stockInfo = soup.find('div',attrs={'class':'stock-bets'})
name = stockInfo.find_all(attrs = {'class':'bets-name'})[0]
print(name.text.split()[0])
infoDict.update({'股票名称':name.text.split()[0]})
keyList = stockInfo.find_all('dt')
valueList = stockInfo.find_all('dd')
for i in range(len(keyList)):
key = keyList[i].text
val = valueList[i].text
infoDict[key] = val
with open(fpath,'a',encoding='UTF-8') as f:
f.write(str(infoDict) +'\n')
count = count+1
print('\r当前进度:{:.2f}%'.format(count*100/len(lst)),end='')
except:
traceback.print_exc()
continue
def main():
stock_list_url = 'http://quote.eastmoney.com/stocklist.html'
stock_info_url = 'https://gupiao.baidu.com/stock/'
output_file = 'E://BaiduStockInfo(1).txt'
slist = []
getStockList(slist,stock_list_url)
getStockInfo(slist,stock_info_url,output_file)
main() | [
"43775612+wzc-ob@users.noreply.github.com"
] | 43775612+wzc-ob@users.noreply.github.com |
ed61ba463ce8957822c31da9eb498f9c5fdf53b7 | 75f13621d926b968474d1ffe0aeb40ab8394358a | /shapeEditor/tms/apps.py | 1c028b4e44e4fe1975bcf3aaf811ab8df84930d1 | [] | no_license | kamzmaseno/ShapeEditor | 2bc97c72ee76d499ea9915f5a6100677baaf5da0 | 121fef1cc39066b6d8ce3ee0a24af17581be6515 | refs/heads/master | 2021-01-20T21:07:22.968076 | 2016-06-18T12:44:02 | 2016-06-18T12:44:02 | 61,432,986 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 122 | py | from __future__ import unicode_literals
from django.apps import AppConfig
class TmsConfig(AppConfig):
name = 'tms'
| [
"gatugimato@gmail.com"
] | gatugimato@gmail.com |
af20a58c05e5138a7719bb4e16dae03e8f9a9d7d | ee6c0f161b6f3d524a7c137b53531107ba74367e | /myapp/views.py | 1582e40ea4944acedfb486795840f5c9cf79752e | [] | no_license | prachikalbhor/quiz-app-master | 18dd37acc4796a30aba1579b16bae0634816e971 | 16ff018dc39726ddfaff63af0f357bedd5c3cd39 | refs/heads/master | 2022-04-15T17:14:57.048898 | 2020-04-14T10:24:28 | 2020-04-14T10:24:28 | 255,565,254 | 0 | 0 | null | 2020-04-14T10:22:45 | 2020-04-14T09:24:44 | JavaScript | UTF-8 | Python | false | false | 1,713 | py | from django.shortcuts import render,redirect
from .models import Questions,Review
# Create your views here.
def home(request):
return render(request,'home.html')
def allreview(request):
r = Review.objects.all()
return render(request,'allreviews.html',{'reviews':r})
def test(request):
question= Questions.objects.all()
list_of_id=[q.id for q in question]
global counter
counter = 0
global counter2
counter2=0
global id_of_incorrect
id_of_incorrect=[]
context={
'question':question,
}
if request.POST:
list_of_input = list(request.POST)[1:]
list_of_input = [int(request.POST[x]) for x in list_of_input]
list_of_ans = [q.right for q in question]
for i in range(len(list_of_input)):
if(list_of_input[i] == list_of_ans[i]):
counter=counter+1
else:
counter2=counter2+1
id_of_incorrect.append(list_of_id[i])
return redirect('review')
return render(request,'question.html',context)
def review(request):
question=[]
for i in id_of_incorrect:
question.append(Questions.objects.get(id=i))
context={
'question':question,
'correct':counter,
'incorrect':counter2,
'p':(counter2/(counter+counter2))*100
}
if request.POST:
name = request.POST['name']
review = request.POST['review']
stars=request.POST['stars']
Review.objects.create(name=name,review=review,rating=stars)
return redirect('/')
return render(request,'review.html',context) | [
"noreply@github.com"
] | noreply@github.com |
e3aaeaaf28467cd124290d5f46c50d80f92b0aea | 75055956f43a467ed62724683a1aa116207c12c9 | /demo/weather.py | 5ea55a0e7bce5ef7c3133782b550b5cc0efe9877 | [] | no_license | Eagle112/pyautogui | 4ea642ad52d32b00fab86a6347a9a1d293f154ab | be9ebf5325cbde30d59ffd94f427f9aa77c63070 | refs/heads/main | 2023-07-09T07:18:27.275407 | 2023-06-22T13:56:35 | 2023-06-22T13:56:35 | 355,157,165 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 606 | py | # encoding:utf-8
import requests, json
from bs4 import BeautifulSoup
#api地址
def getWeather(city):
f = open('city.json', 'rb')
cities = json.load(f)
citycode = cities.get(city)
url = 'http://www.weather.com.cn/weather/'+ citycode +'.shtml'
response = requests.get(url)
response.encoding ='utf-8'
# d = response.json()
# f = open('./response.json','w')
# f.write(response.text)
soup=BeautifulSoup(response.text,"lxml")
weather_lis = soup.select('.c7d > ul li p.wea')
# 7天的天气
return weather_lis
# print(soup.select('.c7d > ul li p.wea')[0].string.find('雨') != -1) | [
"yinghongbin@appledeMacBook-Pro.local"
] | yinghongbin@appledeMacBook-Pro.local |
9771714fa9460a5eb150ffff0bd397386631123b | 0568572ff27fb32e1c407c27f70a850eb9c2005f | /hw-2.py | 23f4bb94b05c1d7e66b884bc3e434104586f484e | [] | no_license | ElinaKirillova/lesson2 | 8f0981d91c4764a991d9756a7755de0c59ef2e9f | 6a7d9bd231c7921f5cfbf727fc68f6b18edeb169 | refs/heads/master | 2020-05-22T00:35:57.119749 | 2017-03-25T09:04:33 | 2017-03-25T09:04:33 | 84,656,145 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 686 | py | journals =[
{'school_class': '1a', 'scores': [5, 4, 3, 2, 5, 4, 4, 4, 5, 5, 3, 5]},
{'school_class': '1b', 'scores': [5, 5, 4, 3, 2, 5, 4, 5, 4, 3]},
{'school_class': '2a', 'scores': [5, 4, 3, 4, 3, 3, 3, 4, 5, 4, 5]}
]
def calc_mean(array):
return sum(array) / len(array)
all_classes_means = 0
all_classes_count = len(journals)
for journal in journals:
scores = journal['scores']
school_class = journal['school_class']
mean = calc_mean(scores)
all_classes_means += mean
print('%s -- %0.1f' % (school_class, mean))
school_mean = all_classes_means / all_classes_count
print ('Средний балл по школе — %0.1f' % school_mean) | [
"asfaganova@MacBook-Air-Elina.local"
] | asfaganova@MacBook-Air-Elina.local |
61b227fb19c0098e0d8449df91b59cc77ac3049d | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_142/662.py | e253a45877c6e9142d258233d25715ca05f57e07 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,452 | py | if __name__ == "__main__":
with open("A-small-attempt1.in", 'r') as inputf:
outputf=open("A_out.out",'w')
line=inputf.readline()
line=line.rstrip('\n')
test_num=int(line)
for test in range(test_num):
line = inputf.readline()
line = line.rstrip('\n')
n = int(line)
analysis = [[[[]]],[[[]]]]
j = [0, 0]
for i in range(n):
line = inputf.readline()
line = line.rstrip('\n')
temp = line[0]
analysis[i][0][0]=temp
count = 0
char_c = len(line)
for char in line:
if char == temp:
count = count + 1
else:
analysis[i][j[i]].append(count)
temp = char
j[i] = j[i]+1
count = 1
analysis[i].append([temp])
char_c = char_c-1
if char_c == 0:
analysis[i][j[i]].append(count)
change = 0
pos = True
if j[0]!=j[1]:
result = "Case #%d: Fegla Won"%(test+1)
outputf.write(result)
pos = False
else:
for k in range(j[0]+1):
if analysis[0][k][0] != analysis[1][k][0]:
result = "Case #%d: Fegla Won"%(test+1)
outputf.write(result)
pos = False
break
else:
if analysis[0][k][1] > analysis[1][k][1]:
change = change + analysis[0][k][1] - analysis[1][k][1]
else:
change = change - analysis[0][k][1] + analysis[1][k][1]
if pos == True:
result = "Case #%d: %d" %(test+1, change)
outputf.write(result)
if test != test_num - 1:
outputf.write('\n')
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
797c22f5c1259641bc1dfd92fdf2388f63a890f7 | a2096f8b24db6ae3d563ccb64fd826fc70ae6b30 | /test/unit/model/v2/test_verifier_section.py | 867158fe26aa05f4efab10b58f2dcb46a27e10d0 | [
"MIT",
"CC-BY-ND-4.0"
] | permissive | kunstkomputer/molecule | 70ca3d3f0b26536e03dc2d39b1a5433d0d61963c | 3eea75f2e85efa649eda235b1f42b115e99b1a97 | refs/heads/master | 2020-03-11T04:30:21.119602 | 2018-04-23T12:21:55 | 2018-04-23T12:22:54 | 129,778,013 | 0 | 0 | MIT | 2018-04-16T17:04:36 | 2018-04-16T17:04:36 | null | UTF-8 | Python | false | false | 5,205 | py | # Copyright (c) 2015-2017 Cisco Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import pytest
from molecule.model import schema_v2
@pytest.fixture
def _model_verifier_section_data():
return {
'verifier': {
'name': 'testinfra',
'enabled': True,
'directory': 'foo',
'options': {
'foo': 'bar'
},
'env': {
'FOO': 'foo',
'FOO_BAR': 'foo_bar',
},
'additional_files_or_dirs': [
'foo',
],
'lint': {
'name': 'flake8',
'enabled': True,
'options': {
'foo': 'bar',
},
'env': {
'FOO': 'foo',
'FOO_BAR': 'foo_bar',
},
},
}
}
@pytest.mark.parametrize(
'_config', ['_model_verifier_section_data'], indirect=True)
def test_verifier(_config):
assert {} == schema_v2.validate(_config)
@pytest.fixture
def _model_verifier_errors_section_data():
return {
'verifier': {
'name': int(),
'enabled': str(),
'directory': int(),
'options': [],
'env': {
'foo': 'foo',
'foo-bar': 'foo-bar',
},
'additional_files_or_dirs': [
int(),
],
'lint': {
'name': int(),
'enabled': str(),
'options': [],
'env': {
'foo': 'foo',
'foo-bar': 'foo-bar',
},
},
}
}
@pytest.mark.parametrize(
'_config', ['_model_verifier_errors_section_data'], indirect=True)
def test_verifier_has_errors(_config):
x = {
'verifier': [{
'name': ['must be of string type'],
'lint': [{
'enabled': ['must be of boolean type'],
'name': ['must be of string type'],
'env': [{
'foo': ["value does not match regex '^[A-Z0-9_-]+$'"],
'foo-bar': ["value does not match regex '^[A-Z0-9_-]+$'"],
}],
'options': ['must be of dict type'],
}],
'enabled': ['must be of boolean type'],
'env': [{
'foo': ["value does not match regex '^[A-Z0-9_-]+$'"],
'foo-bar': ["value does not match regex '^[A-Z0-9_-]+$'"],
}],
'directory': ['must be of string type'],
'additional_files_or_dirs': [{
0: ['must be of string type'],
}],
'options': ['must be of dict type'],
}]
}
assert x == schema_v2.validate(_config)
@pytest.fixture
def _model_verifier_allows_ansible_lint_section_data():
return {
'verifier': {
'name': 'testinfra',
'lint': {
'name': 'flake8',
},
}
}
@pytest.fixture
def _model_verifier_allows_goss_section_data():
return {
'verifier': {
'name': 'goss',
'lint': {
'name': 'flake8',
},
}
}
@pytest.mark.parametrize(
'_config', [
('_model_verifier_allows_ansible_lint_section_data'),
('_model_verifier_allows_goss_section_data'),
],
indirect=True)
def test_verifier_allows_name(_config):
assert {} == schema_v2.validate(_config)
@pytest.fixture
def _model_verifier_errors_invalid_section_data():
return {
'verifier': {
'name': str(),
'lint': {
'name': str(),
},
}
}
@pytest.mark.parametrize(
'_config', ['_model_verifier_errors_invalid_section_data'], indirect=True)
def test_verifier_invalid_verifier_name_has_errors(_config):
x = {
'verifier': [{
'lint': [{
'name': ['unallowed value ']
}],
'name': ['unallowed value ']
}]
}
assert x == schema_v2.validate(_config)
| [
"noreply@github.com"
] | noreply@github.com |
fced50c84cb7719556400b17068c0514cd1b5fc6 | 42132c3853a9d1a8377bd8373d15c54ee6256635 | /test_5.py | 7f7c05e6ba43efc381540e08e629800b44ba4326 | [] | no_license | Djet78/hillel_homework_repository | a767b6de504e3482600e2b7cfbc24ed46a6cc3b1 | a2709c16e34b7aea0f8e042cc578efc6e96bcefe | refs/heads/master | 2020-03-16T20:07:47.655961 | 2018-06-26T06:04:08 | 2018-06-26T06:04:08 | 132,947,571 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 455 | py | # ----------------------- task 5 ------------------------
def nearest_to_ten(first_num, second_num):
"""
:param first_num:
:param second_num:
:return: nearest number to ten
"""
first_distance = abs(first_num - 10)
second_distance = abs(second_num - 10)
if first_distance > second_distance:
return second_num
else:
return first_num
print(nearest_to_ten(-10.1, -9.8))
print(nearest_to_ten(10.1, 9.8))
| [
"vacheslavuslistyj@gmail.com"
] | vacheslavuslistyj@gmail.com |
326f720d3f00ce6fea68425c9d1ebfbc2906b8df | 9de9bcd87e3f15f743de436d669feb979e55f005 | /timesketch/lib/analyzers/ssh_sessionizer_test.py | a432041fce5b9ee04a020c5228287f633afbcdc1 | [
"Apache-2.0"
] | permissive | jorlamd/timesketch | 97b1f08e9797837672a51bc817426ae61f5fb529 | c7704bede82747d42a8579a264d2b385b93d6dee | refs/heads/master | 2020-12-04T02:54:57.496194 | 2019-11-12T21:07:21 | 2019-11-12T21:07:21 | 230,008,261 | 0 | 0 | Apache-2.0 | 2019-12-24T22:09:17 | 2019-12-24T22:09:16 | null | UTF-8 | Python | false | false | 6,551 | py | """Tests for SSHSessionizerSketchPlugin"""
from __future__ import unicode_literals
import mock
from timesketch.lib.analyzers.ssh_sessionizer import SSHSessionizerSketchPlugin
from timesketch.lib.testlib import BaseTest
from timesketch.lib.testlib import MockDataStore
# TODO _create_mock_event will be renamed in another pull request. It's name
# should be also changed here.
from timesketch.lib.analyzers.sequence_sessionizer_test \
import _create_mock_event
# Message attributes for events that represent one mock SSH session.
one_ssh_session_args = [{
'message':
'[sshd] [1]: Connection from 1.1.1.1 port 1 on 1.1.1.1 port 1'
}, {
'message': '[sshd] [1]: Accepted certificate ID'
}]
# Message attributes for events that represent two mock SSH sessions.
many_ssh_session_args = [{
'message':
'[sshd] [1]: Connection from 1.1.1.1 port 1 on 1.1.1.1 port 1'
}, {
'message': '[sshd] [1]: Accepted certificate ID'
}, {
'message':
'[sshd] [2]: Connection from 2.2.2.2 port 2 on 2.2.2.2 port 2'
}, {
'message': '[sshd] [2]: Accepted certificate ID'
}]
# Message attributes for a SSH event that is not a connection SSH event
no_ssh_session_args = [{
'message': '[sshd] [0]: Loaded keys'
}]
class TestSSHSessionizerPlugin(BaseTest):
"""Tests the functionality of the ssh sessionizing sketch analyzer."""
@mock.patch('timesketch.lib.analyzers.interface.ElasticsearchDataStore',
MockDataStore)
def test_sessionizer(self):
"""Test basic ssh sessionizer functionality."""
index = 'test_index'
sketch_id = 1
sessionizer = SSHSessionizerSketchPlugin(index, sketch_id)
self.assertIsInstance(sessionizer, SSHSessionizerSketchPlugin)
self.assertEqual(index, sessionizer.index_name)
self.assertEqual(sketch_id, sessionizer.sketch.id)
@mock.patch('timesketch.lib.analyzers.interface.ElasticsearchDataStore',
MockDataStore)
def test_session_starts_with_connection_event(self):
"""Test a session is created if it starts with SSH connection event."""
index = 'test_index'
sketch_id = 1
sessionizer = SSHSessionizerSketchPlugin(index, sketch_id)
sessionizer.datastore.client = mock.Mock()
datastore = sessionizer.datastore
_create_mock_event(datastore, 0, 1, one_ssh_session_args)
message = sessionizer.run()
self.assertEqual(
message,
'Sessionizing completed, number of ssh_session sessions created: 1'
)
session_id = '1.1.1.1_1'
#pylint: disable=unexpected-keyword-arg
event = datastore.get_event('test_index', '0', stored_events=True)
self.assertEqual(event['_source']['session_id']['ssh_session'],
session_id)
@mock.patch('timesketch.lib.analyzers.interface.ElasticsearchDataStore',
MockDataStore)
def test_all_events_from_session_are_labeled(self):
"""Test one SSH session of events is finded and allocated correctly."""
index = 'test_index'
sketch_id = 1
sessionizer = SSHSessionizerSketchPlugin(index, sketch_id)
sessionizer.datastore.client = mock.Mock()
datastore = sessionizer.datastore
_create_mock_event(datastore, 0, 2, one_ssh_session_args, [1])
message = sessionizer.run()
self.assertEqual(
message,
'Sessionizing completed, number of ssh_session sessions created: 1'
)
session_id = '1.1.1.1_1'
#pylint: disable=unexpected-keyword-arg
event = datastore.get_event('test_index', '0', stored_events=True)
self.assertEqual(event['_source']['session_id']['ssh_session'],
session_id)
event = datastore.get_event('test_index', '101', stored_events=True)
self.assertEqual(event['_source']['session_id']['ssh_session'],
session_id)
@mock.patch('timesketch.lib.analyzers.interface.ElasticsearchDataStore',
MockDataStore)
def test_session_doesnt_start_with_no_connection_event(self):
"""Test a session is not created if it doesn't start with SSH connection
event."""
index = 'test_index'
sketch_id = 1
sessionizer = SSHSessionizerSketchPlugin(index, sketch_id)
sessionizer.datastore.client = mock.Mock()
datastore = sessionizer.datastore
_create_mock_event(datastore, 0, 1, no_ssh_session_args)
message = sessionizer.run()
self.assertEqual(
message,
'Sessionizing completed, number of ssh_session sessions created: 0'
)
#pylint: disable=unexpected-keyword-arg
event = datastore.get_event('test_index', '0', stored_events=True)
self.assertNotIn('session_id', event['_source'])
@mock.patch('timesketch.lib.analyzers.interface.ElasticsearchDataStore',
MockDataStore)
def test_multiple_sessions(self):
"""Test multiple sessions are found and allocated correctly."""
index = 'test_index'
sketch_id = 1
sessionizer = SSHSessionizerSketchPlugin(index, sketch_id)
sessionizer.datastore.client = mock.Mock()
datastore = sessionizer.datastore
_create_mock_event(datastore,
0,
4,
many_ssh_session_args,
time_diffs=[1, 1, 1])
message = sessionizer.run()
self.assertEqual(
message,
'Sessionizing completed, number of ssh_session sessions created: 2'
)
session_id_1 = '1.1.1.1_1'
session_id_2 = '2.2.2.2_2'
#pylint: disable=unexpected-keyword-arg
event = datastore.get_event('test_index', '0', stored_events=True)
self.assertEqual(event['_source']['session_id']['ssh_session'],
session_id_1)
event = datastore.get_event('test_index', '101', stored_events=True)
self.assertEqual(event['_source']['session_id']['ssh_session'],
session_id_1)
event = datastore.get_event('test_index', '202', stored_events=True)
self.assertEqual(event['_source']['session_id']['ssh_session'],
session_id_2)
event = datastore.get_event('test_index', '303', stored_events=True)
self.assertEqual(event['_source']['session_id']['ssh_session'],
session_id_2)
| [
"tomchop@gmail.com"
] | tomchop@gmail.com |
ec4edc8cbff797fbd1e94da6de3c6cd670d3a6b9 | 867fe6404f05b6153813bcc1ee83dc6511c0de97 | /python_drill2/testCode4.py | 3467c5610e32d793f3b10dc9cc03ce14c772e97a | [] | no_license | eriatlovj/The-Tech-Academy-Basic-Python-Projects | 5dc41b75252d274995c82525b32c328515ea71ef | 2887fe6b73b34d7368d99ccded173b8efe777e43 | refs/heads/master | 2020-09-06T04:01:01.680251 | 2020-02-19T18:20:14 | 2020-02-19T18:20:14 | 220,315,101 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 293 | py | import os
def writeData():
data = '!dlrow olleH'
with open('test.txt', 'a') as f:
f.write(data)
def openFile():
with open('test.txt','r') as f:
data = f.read()
print(data)
f.close()
if __name__== "__main__":
writeData()
openFile()
| [
"56607475+eriatlovj@users.noreply.github.com"
] | 56607475+eriatlovj@users.noreply.github.com |
b3eecc48b5a6655fb0ae16960cff65aa207ed89d | a6ef13387c24c719a0dcfeb173521cd70beac282 | /devops/day4/ding_Robot.py | 8f01ac1bc53391322f4ad3edd35ab0fd70672935 | [] | no_license | youjiahe/python | f60472d61daf58b7f5bb6aa557949de4babf8c9c | 74eb4c5ba211ae5ffed2040576e5eead75d16e7d | refs/heads/master | 2020-03-31T02:35:55.787809 | 2019-12-02T16:32:54 | 2019-12-02T16:32:54 | 151,831,735 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 744 | py | #!/usr/bin/env python3
import json
import requests
import sys
def send_msg(url,remiders,msg):
headers = {'Content-Type':'application/json;charset=utf-8'}
data={
"msgtype": "text", # 发送消息类型为文本
"at": {
"atMobiles": reminders,
"isAtAll": False, # 不@所有人
},
"text": {
"content": msg, # 消息正文
}
}
r = requests.post(url,data=json.dumps(data),headers=headers)
return r.text
if __name__ == '__main__':
msg = sys.argv[1]
reminders= ['13676240551']
url = 'https://oapi.dingtalk.com/robot/send?access_token=47f4ae71f59ee1624cf30a4f6a4641fac15478aeec406c7f952556906096d790'
print(send_msg(url,reminders,msg)) | [
"youjiahe@163.com"
] | youjiahe@163.com |
27c30b4e28cd6209d4467194ca8a6e7dbac36009 | cb2c548c83aeb88a75eaa8615a2b1e0aeddb90f4 | /enterprise_exam/netease/tiaozhuzi.py | a854069592a22d85cd4a835b5569efe46804f7be | [] | no_license | SuMarsss/exam | ee961c93d3de225420bcf9b923053d510d7af5d1 | 5f387e258b43ef1b3179186cabfd87be4d3a250e | refs/heads/master | 2023-05-30T10:25:24.172703 | 2021-06-09T07:35:57 | 2021-06-09T07:35:57 | 263,010,932 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,755 | py | # T = int(input())
# # T =1
# for t in range(T):
# n,k = list(map(int, input().split()))
# h = list(map(int, input().split()))
#
# # n,k = [10,5]
# # h = "50590387 8028493 660013516 226575217 85137277 569716449 31075276 331696 27381455 503700401"
# # h = list(map(int,h.split()))
# # n,k = [5,3]
# # h = "6 2 4 3 8"
# # h = list(map(int, h.split()))
#
# if n != len(h):
# print("len not equal")
# HFlag = 0 # 超能力标志位,未使用为0,使用后置1
# slist = [(0, HFlag)]
# FindFlag = 0# 结果标志位,0输出NO,1输出YES
# count = 0
# while slist and not FindFlag: # slist栈不为空,一直循环
# i,HFlag = slist.pop(0) # 出栈
# count += 1
# # if count > 1000000000:
# # print("time out")
# # break
# mmax, Hmax = 0 , 0
# for j in range(min(i+k,n-1),i,-1):# 从当前位置遍历到k个可能位置
# # if j>=n: # 如果重复 或者超限
# # continue
# if h[j]> h[i] and h[j]>Hmax and HFlag == 0: # 使用超能力找到最后一个位置 返回YES
# if j == n-1:
# FindFlag = 1
# print("YES")
# break
# Hmax = h[j]
# slist.append((j, HFlag+1))
# elif h[j]<= h[i] and h[j]>mmax: # 小于等于 可跳
# mmax = h[j]
# slist.append((j, HFlag)) # 入栈
# if j == n-1: # 找到最后一个位置 返回YES
# FindFlag = 1
# print("YES")
# break
#
# if not FindFlag: # slit为空(遍历为所有可能)时,标志仍为0
# print("NO")
#
#
# "YES NO YES YES YES NO YES YES YES NO"
# #%%
# # t = int(input())
# t = 1
# for _ in range(t):
# # n, k = list(map(int, input().split()))
# # li = list(map(int, input().split()))
# t = "5 3"
# n, k = list(map(int, t.split()))
# h = "6 2 4 3 8"
# li = list(map(int,h.split()))
# dp = [[False, 1] for _ in range(n)]
# dp[0][0] = True
# for i in range(1, n):
# flag = False
# max1 = 0
# for j in range(max(0, i - k), i):
# if dp[j][0] == False:
# continue
# elif li[j] >= li[i]:
# dp[i] = dp[j].copy()
# max1 = max(max1, dp[j][1])
# dp[i][1] = max1
# flag = True
# elif li[j] < li[i] and not flag and dp[j][1]:
# dp[i] = dp[j].copy()
# dp[i][1] = 0
# if dp[-1][0]:
# print('YES')
# else:
# print('NO')
#%%dp
T = int(input())
# T =1
for t in range(T):
n,k = list(map(int, input().split()))
h = list(map(int, input().split()))
# n,k = 5,3
# h = "6 2 4 3 8"
# h = list(map(int,h.split()))
slist = [[False, 1]] * n # 第一列代表能否跳上, 第二列代表是否使用超能力
slist[0] = [True, 0]
for i in range(1,n): # 判断是否能站上第i个,能则slist[i][0]=True
# mmin = 1 # 超能力
for j in range(max(0,i-k),i):
if slist[j][0] :
if h[j] >= h[i]:
# slist[i][0] = True
# slist[i][1] = min(slist[j][1], slist[i][1])
slist[i] = [True, min(slist[j][1], slist[i][1])]
if h[j] < h[i] and slist[j][1] ==0:
# slist[i][0] = True
# slist[i][1] = min(slist[j][1]+1, slist[i][1])
slist[i] = [True, min(slist[j][1]+1, slist[i][1])]
if slist[-1][0] == True:
print("YES")
else:
16..22
print("NO")
| [
"1204568237@qq.com"
] | 1204568237@qq.com |
1f776ab59de60a4dc27a7ae37e53c3e7898ac312 | f8384e2f19b57a6783f4206c589adf8fb83d62b4 | /sensor_process.py | f798eb8762d81ec2eaedbacdf7d7405064f4c625 | [] | no_license | iandeboisblanc/Amber | 7fd69ef0800e8a05a9db37fcb853f97714b32e5a | 4e0922759179aeaa1b312ea138cc4d49eaef83e6 | refs/heads/master | 2023-04-07T05:05:50.645370 | 2019-06-09T04:58:08 | 2019-06-09T05:07:03 | 189,902,775 | 0 | 0 | null | 2023-03-31T14:41:26 | 2019-06-02T22:48:06 | Python | UTF-8 | Python | false | false | 944 | py | from multiprocessing import Process
import redis
class SensorProcess():
def __init__(self, name):
self.name = name
def _make_target(self):
def target(redis_conn_pool):
redis_client = redis.Redis(connection_pool=redis_conn_pool)
self.run(redis_client)
return target
def start(self, redis_conn_pool):
"""Starts the process, triggering run() to be called in subprocess."""
target = self._make_target()
# I think we're supposed to pass mutable resources like this?
args = (redis_conn_pool,)
process = Process(target=target,
args=args,
name="SensorProcess-{}".format(self.name))
process.start()
def run(self, redis_client):
"""Main entry point for this SensorProcess.
Subclasses should override this with the behavior they need to continuously
read their sensors and publish values to Redis.
"""
raise NotImplementedError
| [
"grady.hsimon@gmail.com"
] | grady.hsimon@gmail.com |
edcb1a2c177f6634d25b679f32eaa3d10997b8ca | b6aed63c49d24b4c3e2d5be6795ecbcf0a793653 | /examples/feature_engineering/get_scdv.py | 13cd8123885cea7a8d6159052e017ea37f9643c2 | [] | no_license | sidhee-hande/nlp-recipes-ja | 713f053a3cc907a314c6575a0ce65de2b36076c9 | 8ac5e898864137841de8b03c11da34815009af24 | refs/heads/master | 2023-04-25T03:41:33.536244 | 2021-04-10T23:07:45 | 2021-04-10T23:07:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 693 | py | from konoha import WordTokenizer
import neologdn
import numpy as np
from utils_nlp.dataset.livedoor import load_pandas_df
from utils_nlp.features import scdv
from utils_nlp.models.pretrained_embeddings.word2vec import load_pretrained_vectors
if __name__ == '__main__':
df = load_pandas_df(nrows=10)
# Normalization
df['text'] = df['text'].apply(neologdn.normalize)
tokenizer = WordTokenizer('MeCab')
docs = np.array([
map(str, tokenizer.tokenize(text)) for text in df['text']
])
print(docs.shape)
# (10,)
word_vec = load_pretrained_vectors('data')
scdv = scdv.create(docs, word_vec, n_components=10)
print(scdv.shape)
# (10, 3000)
| [
"upura0@gmail.com"
] | upura0@gmail.com |
9706c26c8869c0333343a0dae2cbbd2467b37e93 | 94a2c4417c1fdd8577a75b09a17912ebae129e6c | /test/test_prop_is.py | e211fe127da287bfb4f0504b7a588929a7f6c795 | [
"MIT"
] | permissive | slavaGanzin/ramda.py | ad88a3cf6e7eb1461d4a09aad35ae1c18ca32db8 | 634bfbe0dcb300315ded327756cb3e33241589b8 | refs/heads/master | 2023-01-23T04:43:48.485314 | 2023-01-06T10:11:53 | 2023-01-06T10:11:53 | 142,413,822 | 68 | 7 | MIT | 2021-12-22T13:59:56 | 2018-07-26T08:43:31 | Python | UTF-8 | Python | false | false | 278 | py | from ramda import *
from ramda.private.asserts import *
from numbers import Number
def test_prop_is():
assert_equal(prop_is(Number, "x", {"x": 1, "y": 2}), True)
assert_equal(prop_is(Number, "x", {"x": "foo"}), False)
assert_equal(prop_is(Number, "x", {}), False)
| [
"slava.ganzin@gmail.com"
] | slava.ganzin@gmail.com |
fbee2b55008deb726fe9493adc2b602013480099 | 3a4cea5ad919e01fc72934f6e7cb34e57ebda887 | /tests/test_pipe_seisspark_modules.py | ac99915b237f1f474ef59126298d94e1efc254f4 | [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | kdeyev/SeisSpark | ca2d4e4b2ea0a07ca424f8d5aef6cf22e8587b3f | 528d22143acb72e78ed310091db07eb5d731ca09 | refs/heads/master | 2022-09-05T20:42:31.122894 | 2021-09-18T14:02:51 | 2021-09-18T14:02:51 | 97,557,820 | 11 | 6 | Apache-2.0 | 2022-08-10T22:22:34 | 2017-07-18T05:50:52 | Python | UTF-8 | Python | false | false | 1,815 | py | # =============================================================================
# Copyright (c) 2021 SeisSpark (https://github.com/kdeyev/SeisSpark).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
from seisspark.seisspark_context import SeisSparkContext
from seisspark_modules.sufilter import SUfilter
from seisspark_modules.suimp2d import SUimp2d, SUimp2dParams
from seisspark_modules.susort import SUsort
from su_rdd.kv_operations import gather_from_rdd_gather_tuple
def test_build_and_run_modules(seisspark_context: SeisSparkContext) -> None:
gather_count_to_produce = 10
trace_count_per_gather = 5
input_module = SUimp2d(id="1", name="b")
input_module.set_paramters(SUimp2dParams(nshot=gather_count_to_produce, nrec=trace_count_per_gather))
sort = SUsort(id="1", name="b")
filter = SUfilter(id="1", name="b")
filter_schema = filter.params_schema
print(filter_schema)
input_module.init_rdd(seisspark_context, None)
sort.init_rdd(seisspark_context, input_module.rdd)
filter.init_rdd(seisspark_context, sort.rdd)
first_gather = gather_from_rdd_gather_tuple(filter.rdd.first())
assert len(first_gather.traces) == trace_count_per_gather
print(first_gather.traces[0].buffer)
| [
"kostya.deev@bluware.com"
] | kostya.deev@bluware.com |
765c13a3d98c4b1797f5687016e83f23d80773fb | 7b1917c526a26940bbfb4e0518719d277d5acf3d | /src/hellobioinformatics.py | 69cd5e925fe22adee1da19399c9d2aa1a8ee3793 | [] | no_license | tcloudb/Biological-Databases-Assignment-1 | e21c6316105bbb824411e81673d9380b400294ba | 4c3e68e7cf6578e4040af0d84e9f2c9d4d10bec4 | refs/heads/master | 2021-01-19T04:24:37.504146 | 2017-05-11T18:02:43 | 2017-05-11T18:02:43 | 87,369,407 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 29 | py | print("Hello Bioinformatics") | [
"tcloudb@gmail.com"
] | tcloudb@gmail.com |
db193ead0fdb7a4c4c2e4c6e535a38d266999794 | bb4ccc87eb582a763d88671677f88052889c969e | /python/simulation/new_method_vs_old.py | a719fca3ccc824985789b43b7cc4ae72b600cdd2 | [] | no_license | victor-zheng/book | ff63e66683abb3fe654c70249e0f4d5118830999 | f9bc443d4f7f7f5abf676f89bf5d94f0b856988d | refs/heads/master | 2021-05-12T11:55:39.712245 | 2018-05-09T08:43:46 | 2018-05-09T08:43:46 | 117,399,612 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,374 | py | import numpy as np
import matplotlib.pyplot as plt
#user input
period_step = 100 # simulation step number in one period
period_num = 2 # how many cycles you want to calculate
Vdc = 1 # bus voltage
Vref = 1.4 # regulator output the voltage reference
#generate input variable
total_step = period_step*period_num #
pi_2 = 2*np.pi #const 2PI
pi_div_3 = np.pi/3 #const PI/3
pi_div_6 = np.pi/6
sqrt3 = np.sqrt(3)
theta_input = np.linspace(0,pi_2*period_num,total_step,endpoint=True) #
TA = np.ndarray(shape=(total_step), dtype=float)
TB = np.ndarray(shape=(total_step), dtype=float)
TC = np.ndarray(shape=(total_step), dtype=float)
TA_new = np.ndarray(shape=(total_step), dtype=float)
TB_new = np.ndarray(shape=(total_step), dtype=float)
TC_new = np.ndarray(shape=(total_step), dtype=float)
#calculate the alpha. it is
if Vref <= 0.866:
alpha = 0.0
print('linear')
elif Vref <= 1.0: #OVM1 range
y = sqrt3/4 - sqrt3/2*np.sqrt(Vref*Vref - 3/4)
x = 1 - y/sqrt3
alpha = np.arctan(y/x)
print('OVM 1')
else: #OVM2 range
y = np.sqrt(3/4*Vref*Vref - 9/16) - sqrt3/4
x = 1 + y/sqrt3
alpha = np.arctan(y/x)
gain = pi_div_6/(pi_div_6 - alpha)
offset = alpha * 0.955
print('OVM 2')
#calculate output
for i in range (0,total_step):
cycle_cnt = (int)(i/period_step)
theta_internal = theta_input[i] - (float)(cycle_cnt)*pi_2
#sector 1
if theta_internal < pi_div_3:
theta = theta_internal
T1 = Vref*(np.cos(theta) - np.sin(theta)/sqrt3)
T2 = Vref*(2/sqrt3)*np.sin(theta)
T1_T2 = T1 + T2
if Vref <= 0.866: #linear range
T1 = T1
T2 = T2
T1_new = T1
T2_new = T2
elif Vref <= 1.0: #OVM1 range
if T1_T2 > 1.0:
T1 = T1 / T1_T2
T2 = T2 / T1_T2
T1_new = T1
T2_new = T2
else: #OVM2 range
if T1 > 1.0:
T1 = 1.0
T2 = 0
T1_new = T1
T2_new = T2
elif T2 > 1.0:
T1 = 0
T2 = 1.0
T1_new = T1
T2_new = T2
else:
T1_temp = T1 / T1_T2
T1_new = (T1_temp - offset) * gain
T2_new = 1.0 - T1_new
theta_temp = (theta - alpha)*(pi_div_6/(pi_div_6 - alpha))
T1 = Vref*(np.cos(theta_temp) - np.sin(theta_temp)/sqrt3)
T2 = Vref*(2/sqrt3)*np.sin(theta_temp)
T1_T2 = T1 + T2
T1 = T1 / T1_T2
T2 = T2 / T1_T2
TA[i] =+Vdc/2*T1 + Vdc/2*T2
TB[i] =-Vdc/2*T1 + Vdc/2*T2
TC[i] =-Vdc/2*T1 - Vdc/2*T2
TA_new[i] =+Vdc/2*T1_new + Vdc/2*T2_new
TB_new[i] =-Vdc/2*T1_new + Vdc/2*T2_new
TC_new[i] =-Vdc/2*T1_new - Vdc/2*T2_new
#sector 2
elif theta_internal < 2*pi_div_3:
theta = theta_internal - pi_div_3
T1 = Vref*(np.cos(theta) - np.sin(theta)/sqrt3)
T2 = Vref*(2/sqrt3)*np.sin(theta)
T1_T2 = T1 + T2
if Vref <= 0.866:
T1 = T1
T2 = T2
T1_new = T1
T2_new = T2
elif Vref <= 1.0:
if T1_T2 > 1:
T1 = T1 / T1_T2
T2 = T2 / T1_T2
T1_new = T1
T2_new = T2
else:
if T1 > 1.0:
T1 = 1.0
T2 = 0
T1_new = T1
T2_new = T2
elif T2 > 1.0:
T1 = 0
T2 = 1.0
T1_new = T1
T2_new = T2
else:
T1_temp = T1 / T1_T2
T1_new = (T1_temp - offset) * gain
T2_new = 1.0 - T1_new
theta_temp = (theta - alpha)*(pi_div_6/(pi_div_6 - alpha))
T1 = Vref*(np.cos(theta_temp) - np.sin(theta_temp)/sqrt3)
T2 = Vref*(2/sqrt3)*np.sin(theta_temp)
T1_T2 = T1 + T2
T1 = T1 / T1_T2
T2 = T2 / T1_T2
TA[i] =+Vdc/2*T1 - Vdc/2*T2
TB[i] =+Vdc/2*T1 + Vdc/2*T2
TC[i] =-Vdc/2*T1 - Vdc/2*T2
TA_new[i] =+Vdc/2*T1_new - Vdc/2*T2_new
TB_new[i] =+Vdc/2*T1_new + Vdc/2*T2_new
TC_new[i] =-Vdc/2*T1_new - Vdc/2*T2_new
#sector 3
elif theta_internal < 3*pi_div_3:
theta = theta_internal - 2*pi_div_3
T1 = Vref*(np.cos(theta) - np.sin(theta)/sqrt3)
T2 = Vref*(2/sqrt3)*np.sin(theta)
T1_T2 = T1 + T2
if Vref <= 0.866:
T1 = T1
T2 = T2
T1_new = T1
T2_new = T2
elif Vref <= 1.0:
if T1_T2 > 1:
T1 = T1 / T1_T2
T2 = T2 / T1_T2
T1_new = T1
T2_new = T2
else:
if T1 > 1.0:
T1 = 1.0
T2 = 0
T1_new = T1
T2_new = T2
elif T2 > 1.0:
T1 = 0
T2 = 1.0
T1_new = T1
T2_new = T2
else:
T1_temp = T1 / T1_T2
T1_new = (T1_temp - offset) * gain
T2_new = 1.0 - T1_new
theta_temp = (theta - alpha)*(pi_div_6/(pi_div_6 - alpha))
T1 = Vref*(np.cos(theta_temp) - np.sin(theta_temp)/sqrt3)
T2 = Vref*(2/sqrt3)*np.sin(theta_temp)
T1_T2 = T1 + T2
T1 = T1 / T1_T2
T2 = T2 / T1_T2
TA[i] =-Vdc/2*T1 - Vdc/2*T2
TB[i] =+Vdc/2*T1 + Vdc/2*T2
TC[i] =-Vdc/2*T1 + Vdc/2*T2
TA_new[i] =-Vdc/2*T1_new - Vdc/2*T2_new
TB_new[i] =+Vdc/2*T1_new + Vdc/2*T2_new
TC_new[i] =-Vdc/2*T1_new + Vdc/2*T2_new
#sector 4
elif theta_internal < 4*pi_div_3:
theta = theta_internal - 3*pi_div_3
T1 = Vref*(np.cos(theta) - np.sin(theta)/sqrt3)
T2 = Vref*(2/sqrt3)*np.sin(theta)
T1_T2 = T1 + T2
if Vref <= 0.866:
T1 = T1
T2 = T2
T1_new = T1
T2_new = T2
elif Vref <= 1.0:
if T1_T2 > 1:
T1 = T1 / T1_T2
T2 = T2 / T1_T2
T1_new = T1
T2_new = T2
else:
if T1 > 1.0:
T1 = 1.0
T2 = 0
T1_new = T1
T2_new = T2
elif T2 > 1.0:
T1 = 0
T2 = 1.0
T1_new = T1
T2_new = T2
else:
T1_temp = T1 / T1_T2
T1_new = (T1_temp - offset) * gain
T2_new = 1.0 - T1_new
theta_temp = (theta - alpha)*(pi_div_6/(pi_div_6 - alpha))
T1 = Vref*(np.cos(theta_temp) - np.sin(theta_temp)/sqrt3)
T2 = Vref*(2/sqrt3)*np.sin(theta_temp)
T1_T2 = T1 + T2
T1 = T1 / T1_T2
T2 = T2 / T1_T2
TA[i] =-Vdc/2*T1 - Vdc/2*T2
TB[i] =+Vdc/2*T1 - Vdc/2*T2
TC[i] =+Vdc/2*T1 + Vdc/2*T2
TA_new[i] =-Vdc/2*T1_new - Vdc/2*T2_new
TB_new[i] =+Vdc/2*T1_new - Vdc/2*T2_new
TC_new[i] =+Vdc/2*T1_new + Vdc/2*T2_new
#sector 5
elif theta_internal < 5*pi_div_3:
theta = theta_internal - 4*pi_div_3
T1 = Vref*(np.cos(theta) - np.sin(theta)/sqrt3)
T2 = Vref*(2/sqrt3)*np.sin(theta)
T1_T2 = T1 + T2
if Vref <= 0.866:
T1 = T1
T2 = T2
T1_new = T1
T2_new = T2
elif Vref <= 1.0:
if T1_T2 > 1:
T1 = T1 / T1_T2
T2 = T2 / T1_T2
T1_new = T1
T2_new = T2
else:
if T1 > 1.0:
T1 = 1.0
T2 = 0
T1_new = T1
T2_new = T2
elif T2 > 1.0:
T1 = 0
T2 = 1.0
T1_new = T1
T2_new = T2
else:
T1_temp = T1 / T1_T2
T1_new = (T1_temp - offset) * gain
T2_new = 1.0 - T1_new
theta_temp = (theta - alpha)*(pi_div_6/(pi_div_6 - alpha))
T1 = Vref*(np.cos(theta_temp) - np.sin(theta_temp)/sqrt3)
T2 = Vref*(2/sqrt3)*np.sin(theta_temp)
T1_T2 = T1 + T2
T1 = T1 / T1_T2
T2 = T2 / T1_T2
TA[i] =-Vdc/2*T1 + Vdc/2*T2
TB[i] =-Vdc/2*T1 - Vdc/2*T2
TC[i] =+Vdc/2*T1 + Vdc/2*T2
TA_new[i] =-Vdc/2*T1_new + Vdc/2*T2_new
TB_new[i] =-Vdc/2*T1_new - Vdc/2*T2_new
TC_new[i] =+Vdc/2*T1_new + Vdc/2*T2_new
#sector 6
else:
theta = theta_internal - 5*pi_div_3
T1 = Vref*(np.cos(theta) - np.sin(theta)/sqrt3)
T2 = Vref*(2/sqrt3)*np.sin(theta)
T1_T2 = T1 + T2
if Vref <= 0.866:
T1 = T1
T2 = T2
T1_new = T1
T2_new = T2
elif Vref <= 1.0:
if T1_T2 > 1:
T1 = T1 / T1_T2
T2 = T2 / T1_T2
T1_new = T1
T2_new = T2
else:
if T1 > 1.0:
T1 = 1.0
T2 = 0
T1_new = T1
T2_new = T2
elif T2 > 1.0:
T1 = 0
T2 = 1.0
T1_new = T1
T2_new = T2
else:
T1_temp = T1 / T1_T2
T1_new = (T1_temp - offset) * gain
T2_new = 1.0 - T1_new
theta_temp = (theta - alpha)*(pi_div_6/(pi_div_6 - alpha))
T1 = Vref*(np.cos(theta_temp) - np.sin(theta_temp)/sqrt3)
T2 = Vref*(2/sqrt3)*np.sin(theta_temp)
T1_T2 = T1 + T2
T1 = T1 / T1_T2
T2 = T2 / T1_T2
TA[i] =+Vdc/2*T1 + Vdc/2*T2
TB[i] =-Vdc/2*T1 - Vdc/2*T2
TC[i] =+Vdc/2*T1 - Vdc/2*T2
TA_new[i] =+Vdc/2*T1_new + Vdc/2*T2_new
TB_new[i] =-Vdc/2*T1_new - Vdc/2*T2_new
TC_new[i] =+Vdc/2*T1_new - Vdc/2*T2_new
#output
plt.plot(theta_input,TB,color='red',linewidth=1,linestyle='-')
plt.plot(theta_input,TA_new,color='blue',linewidth=1,linestyle='-')
#plt.plot(theta_input,TC,color='green',linewidth=1,linestyle='-')
plt.show()
| [
"yi@surface"
] | yi@surface |
d70d328ae9c6d2d57d96e74cc9e2196f7104bcb4 | 69b757e0abc78c52befee2940a6607cdd76e1438 | /Lab 03 -- The Wave Equation - Steady State and Resonance/src/problem 3-4b part ii.py | 69e84f1d84c083525ba38320a0716970a4e62600 | [] | no_license | y624745579/python-for-scientific-computing | 71d4235ed967ebe166779c3e75c6458ac6a12da1 | e452e5c3ec666bbd3f6b6c8e834e41b788848dd6 | refs/heads/master | 2023-03-15T11:52:31.122953 | 2019-12-16T19:30:53 | 2019-12-16T19:30:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,614 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Jan 23 11:13:18 2019
@author: dboyce5
"""
import numpy as np
import scipy.linalg as la
import matplotlib.pyplot as plt
#we wish to solve the equation g''(x) = (-mu * omega**2 / T) * g(x) as an eigenvalue problem
# we will solve it in the form A g = lambda B g, where A is a matrix describing the linear operation (the second derivative) acting on g in the lhs of the equation, lambda is the eigenvalues, and B is an identity matrix with the endpoints made to be zero so that we are solving a generalized eigenvalue problem.
#some constants
mu = 0.003
T = 127
# two boundary conditions
a = 0 #far left x coordinate of boundary condition for g(x); g(a) = 0
L = 1.2 #far right x coordinate of boundary condition for g(x); g'(L) = 2*g(L)
N = 30 #number of grid points we wish to use
x,h = np.linspace(a,L,N,retstep = True)
#now we'll define the A and B matrices
A = np.zeros((N,N))
A[0,0] = 1
A[-1,-1] = -2 + 3/(2*h)
A[-1,-2] = (-2)/h
A[-1,-3] = 1/(2*h)
for n in range(1,N-1):
A[n,n-1] = h**(-2)
A[n,n] = -2 * h**(-2)
A[n,n+1] = h**(-2)
B = np.identity(N)
B[0,0] = 0
B[-1,-1] = 0
#now we will find the eigenvalues and vectors of this problem
vals, vecs = la.eig(A,B)
#because lambda = (-mu * omega**2 / T), we can find omega (the eigenfrequencies)
omega = np.sqrt(-T * np.real(vals) / mu)
# now we'll sort the eigenvalues and eigenvectors
ind = np.argsort(omega)
omega=omega[ind]
vecs = vecs[:,ind]
plt.figure()
plt.plot(x,vecs[:,0])
plt.plot(x,vecs[:,1])
plt.plot(x,vecs[:,2])
plt.show()
| [
"danielwboyce@gmail.com"
] | danielwboyce@gmail.com |
81e43dc7f6f67c70626ca1cefee9b9d5d50febca | 430d2da1bd471d0e3c9b78cc39103b0f5480d923 | /myblog/myblog/urls.py | 195af32e35bd95500fca9237a9aaed6c20cc42e7 | [] | no_license | bigharshrag/sm-website | 9d7feb5aaf43ab7729b88f6052aca787c4c0d46f | 7050adc05572089e4c0c9a807d9e4414a2d3b9b3 | refs/heads/master | 2016-08-11T21:22:11.327662 | 2016-03-06T21:23:30 | 2016-03-06T21:23:30 | 44,806,438 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 968 | py | """myblog URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^accounts/login/$', 'django.contrib.auth.views.login'),
url(r'^accounts/logout/$', 'django.contrib.auth.views.logout', {'next_page': '/'}),
url(r'', include('blog.urls')),
]
| [
"garg.rishabh22@gmail.com"
] | garg.rishabh22@gmail.com |
dc7407be2eac74329684c8bbfcf4fcb7843780dd | d7388b0405dc60189203d062ee359a66d718449e | /FCG/online/PlayerClient.py | 04fc22a0c6f5c6eed175bfca3ab339a5c0def06a | [] | no_license | Defratino/FCG | 6032dda72f5660e55bb2d30b003f37cf9811011e | fde6a39635e525a31aebdf513d74fbc98b852b21 | refs/heads/master | 2020-03-12T06:03:48.947757 | 2018-04-23T13:40:14 | 2018-04-23T13:40:14 | 130,477,273 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,555 | py | import pygame
import socket
import threading
import Client
import online_constants
import display_constants
import colors
class PlayerClient(threading.Thread):
def __init__(self, ip, port, client_id):
threading.Thread.__init__(self)
self.client = Client.Client(ip, port, client_id)
self.game_display = None
self.clock = None
def run(self):
self.pygame_starter()
self.input_client()
def input_client(self):
to_send = pygame.key.get_pressed()
try:
while to_send != online_constants.END_OF_COMMUNICATION:
self.pygame_update()
to_send = pygame.key.get_pressed()
if to_send[pygame.K_x]:
self.client.update(online_constants.END_OF_COMMUNICATION)
else:
self.client.update(str(to_send))
self.client.socket.close()
except socket.error:
self.client.socket.close()
def pygame_starter(self):
pygame.init()
self.game_display = pygame.display.set_mode((display_constants.DISPLAY_WIDTH, display_constants.DISPLAY_HEIGHT))
self.clock = pygame.time.Clock()
def pygame_update(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.client.socket.close()
pygame.quit()
quit()
self.game_display.fill(colors.BACKGROUND)
'''
Dear Daniel,
self.client.last_data is where all data from all players (including you) is stored, as a string.
Typical one would look like this:
1:a#2:b#
There are 2 clients (1 and 2 are their ids), 1 is saying a, and 2 is saying b.
Players are separated with online_constants.PLAYER_SEPERATOR and the ids are separated from the data with
online_constants.ID_SEPERATOR.
Currently, if you run the Server.py file and this file together you will see printed the keys you press, if you
press x (the key), the server will disconnect and so will you.
Take a look in the main() function in this file to see how to start a player.
The data from the server is the pygame.key.get_pressed() of each player, use it to make other players move on
screen, because i don't wanna do it.
Take a look at the code in this class to see what is being called where, and please don't fuck this up.
Suck a fat one,
Ely.
'''
dtime = self.clock.tick(display_constants.FPS)
pygame.display.update()
def _input_client(self):
to_send = ''
try:
while to_send != online_constants.END_OF_COMMUNICATION:
to_send = raw_input()
self.client.update(to_send)
print self.client.last_data
self.client.socket.close()
except socket.error:
print 'Disconnected from server'
self.client.socket.close()
def _old_client(self):
chars = ['a', 'b', 'c', 'd', 'e', 'f', online_constants.END_OF_COMMUNICATION]
for c in chars:
self.client.update(c)
print self.client.last_data
self.client.socket.close()
def main():
pc = PlayerClient('127.0.0.1', online_constants.PORT, 2)
pc.start()
if __name__ == '__main__':
main() | [
"noreply@github.com"
] | noreply@github.com |
a0f654a97c55a0d9d61f08e7bef2078a6a016bdc | 2aa00a04b1132159155879ce339cb9c491b85ee1 | /Module2/hop/musicstore/payment/views.py | ee471defc05a4ea8489d071892fb41973bc99756 | [] | no_license | datphan126/CS260_Django | 187eefb6fbd6c486f772f26e7a3ec421568deb20 | 49573ad24b120f78d38571971c852d8c690782cb | refs/heads/master | 2020-08-07T05:25:02.655628 | 2020-01-30T03:13:16 | 2020-01-30T03:13:16 | 213,311,408 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 646 | py | from django.shortcuts import render
# Create your views here.
# Set default payment type to creditcard
def pay(request, payment_type="Creditcard",discount=0):
# 'city' has value 'Seattle' or '' if city not in url
# 'zipcode' has value '98101' or '' if zipcode not in url
city = request.GET.get('city', 'N/A')
zipcode = request.GET.get('zipcode', 'N/A')
# Create an address dictionary
address = {'city':city, 'zipcode':zipcode}
return render(request,'payment/pay.html', {'address': address, 'payment_type' : payment_type, 'discount': discount})
def help(request):
return render(request,'payment/help.html')
| [
"dphant126@gmail.com"
] | dphant126@gmail.com |
d82ad526feef8c98d335aa9b18ab22ab1b55e45c | 982194d558b20c32c70ae22bd1935f1728321c56 | /download.py | f98197d588495bb7a8bb01b16cfac373cefeef40 | [] | no_license | maconel/stockwatcher4py | 96502e9f7dca40c348d45ea23fd379915979aea7 | 12847d07283fd4acb82d8be4b399e2c55a795e27 | refs/heads/master | 2016-09-06T14:25:12.764616 | 2008-11-04T02:42:41 | 2008-11-04T02:42:41 | 32,499,523 | 0 | 0 | null | null | null | null | GB18030 | Python | false | false | 546 | py | # -*- coding: gb2312 -*-
import httplib
#传入股票的代码或简写或名字,下载包含股票信息的html页面,返回html页面的内容。
def download(stockSymbols):
try:
url = "/p/pl.php?sc=1&st=1&code="
for symbol in stockSymbols:
url = url + symbol + ","
httpConn = httplib.HTTPConnection("stock.business.sohu.com")
httpConn.request("GET", url)
resp = httpConn.getresponse()
return resp.read()
except httplib.socket.error:
return None
| [
"lenocam@528a26e0-fc4f-0410-9ef4-f340f0ff023a"
] | lenocam@528a26e0-fc4f-0410-9ef4-f340f0ff023a |
73a95223af8b6bcebbae53588c38d1b3ee61928e | 14722f3f263018d11c83634ad27a5995b646a137 | /web-crawling/recursive-crawler/myproj/myproj/middlewares.py | e21161770b57182e657e45dbcbcf456c6dc84a9e | [
"MIT"
] | permissive | ayushnagar123/Data-Science | 509046e963c6fe1e29b2f7d957ef02ce545db6d4 | 22fa3a2d2eee1adaf4be51663b61bcae587cfe21 | refs/heads/master | 2020-06-28T16:10:45.626000 | 2020-03-29T00:24:12 | 2020-03-29T00:24:12 | 200,277,441 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,597 | py | # -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
class MyprojSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Request, dict
# or Item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class MyprojDownloaderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
| [
"an431999@gmail.com"
] | an431999@gmail.com |
ce4ca846b3f0da4e32b60528ef313c47c6527e6a | 4376b6dcd01c12844c27916effc0448826605828 | /tools/x86prime.py | 5eed0229c9e8f59aae23401c5838f1139801e5d2 | [] | no_license | PtxDK/compSys-e2018-pub | 5111fcfbe12b12e56d90da1a58c9de5b0683af23 | 79fd8c830562fbbaa3ae8dee860b8ac3f28e2d0b | refs/heads/master | 2020-03-31T08:52:24.537821 | 2018-10-08T07:35:02 | 2018-10-08T07:35:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,132 | py | #!/usr/bin/env python3
import sys, re, os, mimetypes, argparse, requests
# -f <name of file> translates and assembles file
# -txl transform gcc output to x86prime
# -asm assemble x86prime into byte stream
# -list list (transformed and/or assembled) program
# -show show each simulation step (requires -run)
# -tracefile <name of file> create a trace file for later verification (requires -run)
# -run <name of function> starts simulation at indicated function (requires -asm)
parser = argparse.ArgumentParser(description='Transform gcc output to x86\', assemble and simulate.')
parser.add_argument('-f', dest='file',
help='translates and assembles file')
parser.add_argument('-asm', dest='asm', action='store_const',
const=True, default=False,
help='assemble x86prime into byte stream')
parser.add_argument('-txl', dest='txl', action='store_const',
const=True, default=False,
help='transform gcc output to x86prime')
parser.add_argument('-list', dest='list', action='store_const',
const=True, default=False,
help='list (transformed and/or assembled) program')
parser.add_argument('-show', dest='show', action='store_const',
const=True, default=False,
help='show each simulation step (requires -run)')
parser.add_argument('-tracefile', dest='tracefile',
help='create a trace file for later verification (requires -run)')
parser.add_argument('-run', dest='procedure',
help='starts simulation at indicated procedure (requires -asm)')
args = parser.parse_args()
if args.file==None :
print("Program needs an input file.\n")
parser.print_help()
exit()
extensions = args.file.split(".")
fileextension = extensions[-1]
if fileextension != "s":
print("The input is expected to be a assembler program; fileextension 's'.\n")
exit()
if args.file==None :
print("Program needs an input file.\n")
parser.print_help()
exit()
if not os.path.isfile(args.file):
print("Input file does not exist: "+args.file+"\n")
exit()
file = open(args.file, 'r')
args.fileCont = file.read()
file.close()
# x86prime Online location
URL = "http://topps.diku.dk/compsys/x86prime.php"
# defining a params dict for the parameters to be sent to the API
DATA = {'file':args.fileCont, 'txl':args.txl, 'asm':args.asm, 'list':args.list, 'show':args.show, 'tracefile':args.tracefile, 'procedure':args.procedure}
# sending get request and saving the response as response object
r = requests.post(url = URL, data = DATA)
URLDIR = "http://topps.diku.dk/compsys/x86prime_runs/"
# extracting data in json format
runid = r.text
error = requests.get(url = URLDIR+runid+".error")
output = requests.get(url = URLDIR+runid+".out")
if error.text != "":
print(error.text)
exit()
else:
output = requests.get(url = URLDIR+runid+".out")
print(output.text)
if args.tracefile != None:
trace = requests.get(url = URLDIR+runid+".trace")
file = open(args.tracefile, 'w')
args.fileCont = file.write(trace.text)
file.close()
| [
"kirkedal@acm.org"
] | kirkedal@acm.org |
a9c16ccb95144821a4c1c4ca84a2f1682c2c148a | 9c6411a79d0352d2e03e389902f7e5bc1036afa9 | /lgb/src/0_data_preprocess.py | ec80ca4658c396e19012287bab6abd2a4230b438 | [] | no_license | zhenghaoj/OGeek-1 | b426336306bb00e47e5633e2a2c61e262f7b4f98 | fb2a8b8c43509a4bffbf0d16089b8ab436cee257 | refs/heads/master | 2020-04-01T16:18:50.521634 | 2018-10-16T23:27:55 | 2018-10-16T23:27:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,332 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
import sys
import json
import time
import logging
import pandas as pd
import numpy as np
sys.path.append("conf")
import config
sys.path.append("tool")
import nlp_basic
logging.basicConfig(
level = logging.INFO,
format = "[%(asctime)s] %(message)s",
datefmt = "%Y-%m-%d %H:%M:%S",
)
all_segments = set()
def get_segment(text, tag):
segs = list(nlp_basic.segment(text))
if tag == "train":
for seg in segs:
if seg not in all_segments:
all_segments.add(seg)
return "|".join(segs)
def move_useless_char(s):
return re.sub("[\s+\.\!\/_,$%^*(+\"\']+|[+??!,。??、~@#¥%……&*()]+", "", s)
def parse_query_prediction(query_prediction, data_tag):
if query_prediction == "":
return 0, "\t".join(["PAD"] * 10), "\t".join(['0.000'] * 10)
json_data = json.loads(query_prediction)
result = sorted(json_data.items(), key=lambda d:d[1], reverse = True)
texts = [get_segment(move_useless_char(item[0]), data_tag) for item in result]
scores = [item[1] for item in result]
n = len(texts)
return n, "\t".join(texts + ["PAD"]*(10-n)), "\t".join(scores + ['0.000']*(10-n))
def load_data(input_file, output_file, data_tag):
with open(output_file, 'w') as fo:
fo.write("\t".join([
"prefix", "title", "tag", "label", "flag", "num",
"\t".join(["text_"+str(i+1) for i in range(10)]),
"\t".join(["score_"+str(i+1) for i in range(10)])
]))
fo.write("\n")
with open(input_file, 'r', encoding="utf-8") as f:
for line in f:
line = line.strip()
if not line:
continue
if data_tag == "test":
prefix, query_prediction, title, tag = line.split("\t")
label = "-1"
else:
prefix, query_prediction, title, tag, label = line.split("\t")
prefix = move_useless_char(prefix)
title = move_useless_char(title)
n, prediction_text, prediction_score = parse_query_prediction(query_prediction, data_tag)
fo.write("\t".join([
get_segment(prefix, data_tag), get_segment(title, data_tag), tag, label, data_tag, str(n), prediction_text, prediction_score
]))
fo.write("\n")
def get_word_vector():
logging.info("load word vectors ...")
with open(config.WORD_VECTORS_FILE, 'w') as fo:
with open(config.ORI_WORD_VECTORS_FILE, 'r', encoding="utf-8") as f:
for line in f:
line = line.strip()
if not line:
continue
tokens = line.split()
word = tokens[0]
if word in all_segments:
fo.write(line)
fo.write("\n")
def merge_data():
train_data = pd.read_csv(config.TRAIN_DATA_FILE, sep="\t", encoding="utf-8", low_memory=False)
valid_data = pd.read_csv(config.VALID_DATA_FILE, sep="\t", encoding="utf-8", low_memory=False)
test_data = pd.read_csv(config.cTEST_DATA_FILE, sep="\t", encoding="utf-8", low_memory=False)
def main():
data_type = "test_A"
time_point = time.time()
if data_type == "test_A":
logging.info("load train data ...")
load_data(config.ORI_TRAIN_DATA_FILE, config.TRAIN_DATA_FILE, "train")
logging.info("load train data cost time: "+str(time.time()-time_point))
logging.info("load valid data ...")
time_point = time.time()
load_data(config.ORI_VALID_DATA_FILE, config.VALID_DATA_FILE, "valid")
logging.info("load valid data cost time: "+str(time.time()-time_point))
logging.info("load test data ...")
time_point = time.time()
load_data(config.ORI_TEST_DATA_FILE, config.TEST_DATA_FILE, "test")
logging.info("load test data cost time: "+str(time.time()-time_point))
logging.info("get word vector ...")
time_point = time.time()
get_word_vector()
logging.info("get word vector cost time: "+str(time.time()-time_point))
elif data_type == "test_B":
logging.info("load test B data ...")
time_point = time.time()
load_data(config.ORI_TEST_B_DATA_FILE, config.TEST_B_DATA_FILE, "test")
logging.info("load test B data cost time: "+str(time.time()-time_point))
else:
logging.error("wrong data_type!")
train_data = pd.read_csv(config.TRAIN_DATA_FILE, sep="\t", encoding="utf-8", low_memory=False)
valid_data = pd.read_csv(config.VALID_DATA_FILE, sep="\t", encoding="utf-8", low_memory=False)
if data_type == "test_A":
test_data = pd.read_csv(config.TEST_DATA_FILE, sep="\t", encoding="utf-8", low_memory=False)
elif data_type == "test_B":
test_data = pd.read_csv(config.TEST_B_DATA_FILE, sep="\t", encoding="utf-8", low_memory=False)
else:
logging.error("wrong data_type!")
data = pd.concat([train_data, valid_data, test_data])
data.to_csv(config.ORI_DATA_FILE, sep="\t", index=False, encoding="utf-8")
logging.info("done!")
if __name__ == "__main__":
main()
| [
"yangxiaohan@xiaomi.xom"
] | yangxiaohan@xiaomi.xom |
43d6be02ad53d10fe603d86bb6d22aa3a1315b94 | 7b1ca1f076afd2ae6f3630db892a8312b7a4a4f3 | /CivitasScrapers/providers/CivitasScrapers/en/hosters/bs.py | a1d62d7597c89269d4c5d6196eb0cadeccda899a | [] | no_license | WeedOverfeed/ScraperPackages | 743066bb3be0e843e193210540042b18e8e3d6b7 | 5626b1f14f3bbae10ba69d0cdd40036819ef997f | refs/heads/master | 2020-04-26T14:58:02.356732 | 2019-03-03T22:27:04 | 2019-03-03T22:27:04 | 173,632,323 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,016 | py | # -*- coding: UTF-8 -*-
#######################################################################
# ----------------------------------------------------------------------------
# "THE BEER-WARE LICENSE" (Revision 42):
# @Daddy_Blamo wrote this file. As long as you retain this notice you
# can do whatever you want with this stuff. If we meet some day, and you think
# this stuff is worth it, you can buy me a beer in return. - Muad'Dib
# ----------------------------------------------------------------------------
#######################################################################
# Addon Name: Placenta
# Addon id: plugin.video.placenta
# Addon Provider: Mr.Blamo
import re
import json
import urlparse
from providerModules.CivitasScrapers import cache
from providerModules.CivitasScrapers import cleantitle
from providerModules.CivitasScrapers import client
from providerModules.CivitasScrapers import source_utils
class source:
def __init__(self):
self.priority = 1
self.language = ['de']
self.domains = ['bs.to']
self.base_link = 'https://www.bs.to/'
self.api_link = 'api/%s'
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
url = self.__search([localtvshowtitle] + source_utils.aliases_to_array(aliases), year)
if not url and tvshowtitle != localtvshowtitle: url = self.__search([tvshowtitle] + source_utils.aliases_to_array(aliases), year)
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if not url: return
return url + "%s/%s" % (season, episode)
except:
return
def sources(self, url, hostDict, hostprDict):
sources = []
try:
if not url:
return sources
j = self.__get_json(url)
j = [i for i in j['links'] if 'links' in j]
j = [(i['hoster'].lower(), i['id']) for i in j]
j = [(re.sub('hd$', '', i[0]), i[1], 'HD' if i[0].endswith('hd') else 'SD') for i in j]
j = [(i[0], i[1], i[2]) for i in j]
for hoster, url, quality in j:
valid, hoster = source_utils.is_host_valid(hoster, hostDict)
if not valid: continue
sources.append({'source': hoster, 'quality': quality, 'language': 'de', 'url': ('watch/%s' % url), 'direct': False, 'debridonly': False})
return sources
except:
return sources
def resolve(self, url):
try: return self.__get_json(url)['fullurl']
except: return
def __get_json(self, api_call):
try:
headers = bs_finalizer().get_header(api_call)
result = client.request(urlparse.urljoin(self.base_link, self.api_link % api_call), headers=headers)
return json.loads(result)
except:
return
def __search(self, titles, year):
try:
t = [cleantitle.get(i) for i in set(titles) if i]
y = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0']
r = cache.get(self.__get_json, 12, "series")
r = [(i.get('id'), i.get('series')) for i in r]
r = [(i[0], i[1], re.findall('(.+?) \((\d{4})\)?', i[1])) for i in r if cleantitle.get(i[1]) in t]
r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1], i[2][0][1] if len(i[2]) > 0 else '0') for i in r]
r = sorted(r, key=lambda i: int(i[2]), reverse=True) # with year > no year
r = [i[0] for i in r if i[2] in y][0]
return 'series/%s/' % r
except:
return
#############################################################
import sys
import time
import json as j
import base64 as l1
import hmac as l11ll1
import hashlib as l1ll
class bs_finalizer:
def __init__(self):
self.l1lll1 = sys.version_info[0] == 2
self.l11 = 26
self.l1l1l1 = 2048
self.l11l = 7
self.l1l1 = False
try:
self.l11l1l = self.l1111(u"ࡸࡪࡾ࡞ࡨ࡙ࡹࡲࡘࡻ࠶ࡒࡈࡣࡵࡧࡈࡍࡪࡸࡻࡆࡈࡷࡩࡲࡉ࡙ࡃ࠼ࡴࡩ࠳")
self.l1l111 = self.l1111(u"ࡋࡪࡩࡱࡰࡼࡹ࠸ࡱࡳ࠵ࡷ࠳ࡋࡽ࠶ࡌ࠸ࡥ࠷ࡹࡔ࠺ࡨࡤࡵࡩࡿ࠳")
except:
pass
def l1111(self, ll):
l1ll11 = ord(ll[-1]) - self.l1l1l1
ll = ll[:-1]
if ll:
l111l1 = l1ll11 % len(ll)
else:
l111l1 = 0
if self.l1lll1:
l111 = u''.join([unichr(ord(l1111l) - self.l1l1l1 - (l1l11l + l1ll11) % self.l11l) for l1l11l, l1111l in
enumerate(ll[:l111l1] + ll[l111l1:])])
else:
l111 = ''.join([unichr(ord(l1111l) - self.l1l1l1 - (l1l11l + l1ll11) % self.l11l) for l1l11l, l1111l in
enumerate(ll[:l111l1] + ll[l111l1:])])
if self.l1l1:
return str(l111)
else:
return l111
def get_header(self, string):
return {self.l1111(u"ࡄࡖ࠱࡙ࡵࡦࡰࠥ"): self.l111ll(string), self.l1111(u"ࡘࡷࡪࡸ࠭ࡂࡩࡨࡲࡹࠦ"): self.l1111(u"ࡧࡹࡢࡰࡧࡶࡴࡤ࠽")}
def l111ll(self, l1lll):
l11l11 = int(time.time())
l11lll = {self.l1111(u"ࡱࡷࡥࡰࡩࡧࡼࠫ"): self.l11l1l, self.l1111(u"ࡸࡳࡥࡴࡶࡤࡱࡵࡑ"): l11l11,
self.l1111(u"ࡩࡤࡧࡇ"): self.l1l11(l11l11, l1lll)}
return l1.b64encode(j.dumps(l11lll).encode(self.l1111(u"ࡻࡴࡧ࠻ࠩ")))
def l1l11(self, l11l11, l1l1l):
l1ll1 = self.l1l111.encode(self.l1111(u'ࡺࡺࡦ࠺ࡒ'))
l1l1ll = str(l11l11) + self.l1111(u'࠵ࠛ') + str(l1l1l)
l1l1ll = l1l1ll.encode(self.l1111(u'ࡦࡹࡣࡪ'))
l1lllll = l11ll1.new(l1ll1, l1l1ll, digestmod=l1ll.sha256)
return l1lllll.hexdigest()
| [
"vanLeeuwenDaan@outlook.com"
] | vanLeeuwenDaan@outlook.com |
bafddbac9090d52a9bf31d8ace6a5b456b428687 | 3228c7ab30e7d1b8a51454b470b4b94a6c810cbf | /CSDGAN/classes/tabular/TabularNetD.py | eab4f746b12cc612bd2c2d248e7363bad142fd55 | [
"MIT"
] | permissive | GAIMJKP/CSDGAN | ca0cdf66f0219dd50aaa4a4ffbab305ad30fe4f9 | 712be213e59b32a79a4970684d726af63616edaf | refs/heads/master | 2022-03-30T04:44:18.298640 | 2020-01-25T00:13:01 | 2020-01-25T00:13:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,550 | py | import torch.nn as nn
import torch
from CSDGAN.classes.NetUtils import NetUtils, GaussianNoise
import torch.optim as optim
import numpy as np
# Discriminator class
class TabularNetD(nn.Module, NetUtils):
def __init__(self, device, H, out_dim, nc, noise, lr=2e-4, beta1=0.5, beta2=0.999, wd=0):
super().__init__()
NetUtils.__init__(self)
self.name = "Discriminator"
self.device = device
self.loss_real = None
self.loss_fake = None
self.noise = GaussianNoise(device=self.device, sigma=noise)
# Layers
self.fc1 = nn.Linear(out_dim + nc, H, bias=True)
self.output = nn.Linear(H, 1, bias=True)
self.act = nn.LeakyReLU(0.2)
self.m = nn.Sigmoid()
# Loss and Optimizer
self.loss_fn = nn.BCELoss() # BCE Loss
self.opt = optim.Adam(self.parameters(), lr=lr, betas=(beta1, beta2), weight_decay=wd)
# Record history of training
self.init_layer_list()
self.init_history()
self.update_hist_list()
self.D_x = [] # Per step
self.Avg_D_reals = [] # D_x across epochs
self.D_G_z1 = [] # Per step
self.Avg_D_fakes = [] # Store D_G_z1 across epochs
# Initialize weights
self.weights_init()
def forward(self, row, labels):
"""
:param row: Row of input data to discriminate on
:param labels: Label embedding
:return: Binary classification (sigmoid activation on a single unit hidden layer)
"""
row = self.noise(row)
x = torch.cat([row, labels], 1)
x = self.act(self.fc1(x))
return self.m(self.output(x))
def train_one_step_real(self, output, label):
self.zero_grad()
self.loss_real = self.loss_fn(output, label)
self.loss_real.backward()
self.D_x.append(output.mean().item())
def train_one_step_fake(self, output, label):
self.loss_fake = self.loss_fn(output, label)
self.loss_fake.backward()
self.D_G_z1.append(output.mean().item())
def combine_and_update_opt(self):
self.loss.append(self.loss_real.item() + self.loss_fake.item())
self.opt.step()
self.store_weight_and_grad_norms()
def next_epoch_discrim(self):
"""Discriminator specific actions"""
self.Avg_D_reals.append(np.mean(self.D_x)) # Mean of means is not exact, but close enough for our purposes
self.D_x = []
self.Avg_D_fakes.append(np.mean(self.D_G_z1))
self.D_G_z1 = []
| [
"aj.gray619@gmail.com"
] | aj.gray619@gmail.com |
712905a2830d7d74c9e8d58ab90cc13afff73f85 | 51bca51cc0d81cf15cae4422a0d70deea79f51bd | /Recursos/PostProcesamiento/testingEscenaHOGSVM.py | 0c5a331cc30656800afd2327867b59192e0054d3 | [] | no_license | DetectorPersonas/PeopleDectectionCountingSys | fae6dd862bf3622d5a0939dfb0fb5953d034a098 | 7d6800026ce6b04c6454305de7dacf3d69bb1bc5 | refs/heads/master | 2021-05-10T17:42:19.283402 | 2018-01-23T21:02:43 | 2018-01-23T21:02:43 | 118,552,409 | 1 | 1 | null | 2020-08-07T17:05:44 | 2018-01-23T03:28:40 | null | UTF-8 | Python | false | false | 21,588 | py | import sys, glob, argparse
import numpy as np
import math, cv2
from scipy.stats import multivariate_normal
import time
from sklearn import svm
from sklearn.ensemble import RandomForestClassifier
from sklearn.externals import joblib
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from matplotlib import pyplot as plt
import matplotlib.patches as patches
def predecir(classifier,carHOG,numimag):
cont = 0
probs = []
preds = []
for i in range(0,numimag):
v = carHOG[i,:]
v = v.reshape(1,-1)
#Calculando prediccion de la imagen
pred = classifier.predict(v)
pred = pred[0]
preds.append(pred)
if pred ==-1:
resultado = 'NO PERSONA'
else:
resultado = 'PERSONA'
cont = cont + 1
prob = classifier.predict_proba(v)
ppersona = prob[0,0]
pnopersona = prob[0,1]
print ("Imagen",i+1,resultado,prob)
np.reshape(prob,(1,-1))
probs.append(prob)
print("PERSONAS:",cont,"NO PERSONAS:",numimag - cont)
preds = np.asfarray(preds,dtype='i')
return probs,preds
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('-tcb' , "--tcarbordes", help="Directory of txt with characteristics TEST BORDES" , default='E:/DocumentosOSX/TrabajoGrado/TecnicasBarrido/Bordes/CaracteristicasHOGBordes.txt')
parser.add_argument('-tce' , "--tcarex", help="Directory of txt with characteristics TEST EXHAUSTIVO" , default='E:/DocumentosOSX/TrabajoGrado/TecnicasBarrido/Exhaustivo/CaracteristicasHOGExhaustivo.txt')
parser.add_argument('-tdb' , "--tdesbordes", help="Directory of txt with description BORDES" , default='E:/DocumentosOSX/TrabajoGrado/TecnicasBarrido/Bordes/BarridoBordes/DescripcionBarridoBordes.txt')
parser.add_argument('-tde' , "--tdesex", help="Directory of txt with description EXHAUSTIVO" , default='E:/DocumentosOSX/TrabajoGrado/TecnicasBarrido/Exhaustivo/BarridoVentana/DescripcionBarridoExhaustivo.txt')
parser.add_argument('-ch' , "--ncha", help="Number of characteristics" , default=3780, type=int)
parser.add_argument('-dsvm', "--dirsvm", help="Directorio de donde se carga el SVM" , default='E:/DocumentosOSX/Desktop/fisherpython/ModeloSinFV/')
parser.add_argument('-drf' , "--dirrf" , help="Directorio de donde se carga el RF" , default='E:/DocumentosOSX/Desktop/fisherpython/ModeloSinFV/')
parser.add_argument('-nmodsvm' , "--namemodelSVM" , help="Nombre del modelo a cargar SVM" , default='SVM.pkl')
parser.add_argument('-nmodrf' , "--namemodelRF" , help="Nombre del modelo a cargar RF" , default='RF.pkl')
parser.add_argument('-tipo' , "--tipovent" , help="Ventaneo a utilizar 0(Exhaustivo) - 1(Bordes)" , default=1, type=int)
parser.add_argument('-gra' , "--visualize" , help="Mostrar resultados graficos 0(No) - 1(Si)" , default=0, type=int)
parser.add_argument('-ther' , "--thereshold" , help="Thereshold para NMS" , default=0.35, type=float)
parser.add_argument('-fr' , "--frame" , help="Directorio del frame" , default='E:/DocumentosOSX/TrabajoGrado/TecnicasBarrido/Exhaustivo/BarridoVentana/Factor1.JPG')
parser.add_argument('-gr' , "--ground" , help="Directorio del txt con ground truth" , default='E:\DocumentosOSX\TrabajoGrado/coord.txt')
parser.add_argument('-dif' , "--diferencia" , help="Thereshold para diferencia BBs" , default=0.35, type=float)
args = parser.parse_args()
return args
def getchara(tc):
# Directorio de caracteristicas
c = open(tc, 'r')
numcar = args.ncha
arreglo = []
# Lectura del archivo de caracteristicas
print ('Leyendo caracteristicas del .txt ...')
contador = 0
while True:
cara = c.readline()
if cara =='':
break
else:
cara = float(cara)
arreglo.append(cara)
contador = contador + 1
numimag = int(contador/numcar)
print ("Imagenes Leidas:",numimag)
# Conversion de list a array
arreglo = np.asfarray(arreglo, dtype='f')
arreglo = np.reshape(arreglo,(numimag,-1))
print ('Matriz de caracteristicas leidas',arreglo.shape)
return arreglo,numimag
def leerDescripcion(directorio):
dimensiones = []
archivo = open(directorio, 'r')
caracter = archivo.readlines();
caracter = [x.strip() for x in caracter]
ancho = caracter[0]
alto = caracter[1]
caracter = caracter[3:]
escalas = []
coordenadas = []
coordenadas2 = []
inicio = 0
for i in range(0,len(caracter)):
if caracter[i] == '*':
escalas.append(caracter[inicio:i])
inicio = i + 1
# Construccion de objeto coordenadas
for i in range(0,len(escalas)):
esc = np.asfarray(escalas[i])
esc = np.reshape(esc,(-1,2))
coordenadas.append(esc)
# Construccion de objeto coordenadas2
for esc in escalas:
for elemento in esc:
coordenadas2.append(elemento)
coordenadas2 = np.asfarray(coordenadas2)
coordenadas2 = np.reshape(coordenadas2,(-1,2))
return ancho,alto,coordenadas,coordenadas2
def NMS(ancho,alto,coordenadas,coordenadas2,scores,preds,th,visualize,frame):
print("Ejecutando algoritmo de NMS...")
ancho = float(ancho)
alto = float(alto)
boxes = []
boxesReales = []
pick = []
overlapThresh = th
# Algoritmo para dibujar las personas encontradas sin NMS
# Aqui se incluye el cambio de escala segun el barrido
for i in range (0,len(coordenadas)):
factor = math.pow(2,i)
#print ("Calculando boxes de escala",i,"con factor",factor)
coresc = coordenadas[i]
for j in range(0,len(coresc)):
y1 = float(coresc[j,0]) * factor
x1 = float(coresc[j,1]) * factor
#print x1,y1,ancho*factor,alto*factor,i
boxes.append(x1)
boxes.append(y1)
boxes.append(ancho * factor)
boxes.append(alto * factor)
boxes.append(i)
boxes = np.asfarray(boxes)
boxes = np.reshape(boxes,(-1,5))
#print boxes
# Boxes: X,Y,ANCHO,ALTO,ESCALA A LA QUE PERTENECE (PARA CAMBIAR COLOR)
img = cv2.imread(frame,0)
fig,ax = plt.subplots(1)
plt.title("Resultados de las predicciones sin NMS")
plt.axis('off')
ax.imshow(img, cmap = 'gray')
for i in range(0,len(boxes)):
if preds[i] == 1:
if boxes[i,4] ==0:
color = 'r'
else:
color = 'b'
rect = patches.Rectangle((boxes[i,0],boxes[i,1]),boxes[i,2],boxes[i,3],linewidth=1,edgecolor=color,facecolor='none')
ax.add_patch(rect)
personaX1 = boxes[i,0]
personaY1 = boxes[i,1]
personaX2 = personaX1 + boxes[i,2]
personaY2 = personaY1 + boxes[i,3]
scor = scores[i]
personaScore = max(scor[0,1],scor[0,0])
# Construccion de BoxesReales
boxesReales.append(personaX1)
boxesReales.append(personaY1)
boxesReales.append(personaX2)
boxesReales.append(personaY2)
boxesReales.append(personaScore)
boxesReales.append(boxes[i,2])
boxesReales.append(boxes[i,3])
# boxesReales = X1,Y1,X2,Y2,SCORE(MAX),ANCHO,ALTO
boxesReales = np.asfarray(boxesReales)
boxesReales = np.reshape(boxesReales,(-1,7))
boxesIniciales = len(boxesReales)
# Inicio de algoritmo de NMS
x1 = boxesReales[:,0]
y1 = boxesReales[:,1]
x2 = boxesReales[:,2]
y2 = boxesReales[:,3]
sc = boxesReales[:,4]
idxs = np.argsort(boxesReales[:,4])
area = (boxesReales[:,2] - boxesReales[:,0] + 1) * (boxesReales[:,3] - boxesReales[:,1] + 1)
#print ("Indices a analizar:",idxs)
while len(idxs) > 0:
# Recorro BB de abajo hacia arriba (mayor a menor score)
last = len(idxs) - 1
i = idxs[last]
pick.append(i)
suppress = [last]
for pos in range(0, last):
# Itero sobre todos los BB que hay
j = idxs[pos]
#print ("Analizando traslape de imagen",i,"con",j)
# Encuentro coordenadas para el traslape
xx1 = max(x1[i], x1[j])
yy1 = max(y1[i], y1[j])
xx2 = min(x2[i], x2[j])
yy2 = min(y2[i], y2[j])
# Encuentro area de traslape
w = max(0, xx2 - xx1 + 1)
h = max(0, yy2 - yy1 + 1)
# Computo la relacion de area de traslape con area real
overlap = float(w * h) / area[j]
#print ("Overlap:",overlap)
# Si se sobrepasa el area de traslape, borro esa caja
if overlap > overlapThresh:
#print("Se suprimio la imagen",i)
scor1 = sc[i]
scor2 = sc[j]
if abs(scor1-scor2) > args.diferencia:
suppress.append(pos)
# Borro indices de boxes que ya no estan
#print ("Supress:",suppress)
#print ("IDSX:",idxs)
idxs = np.delete(idxs, suppress)
#print ("IDSX2:",idxs)
#print ("PICK",pick)
boxesFinales = len(boxesReales[pick])
print ("Boxes Iniciales:",boxesIniciales,"Boxes Finales:",boxesFinales)
boxesReales = boxesReales[pick]
#print boxesReales
fig2,ax2 = plt.subplots(1)
ax2.imshow(img, cmap = 'gray')
plt.title("Resultados de las predicciones con NMS")
plt.axis('off')
for i in range(0,len(boxesReales)):
rect = patches.Rectangle((boxesReales[i,0],boxesReales[i,1]),boxesReales[i,5],boxesReales[i,6],linewidth=1,edgecolor='r',facecolor='none')
ax2.add_patch(rect)
return boxesReales
def validacion(boxesReales,ground):
verPositivos = []
falPositivos = []
falNegativos = []
# Lectura del txt con el Ground Truth
c = open(ground,'r')
coordenadas = c.readlines()
coordenadas = np.asfarray(coordenadas)
coordenadas = np.reshape(coordenadas,(-1,4))
# Informacion del Ground Truth
x1t = coordenadas[:,0]
y1t = coordenadas[:,1]
anchost = coordenadas[:,2]
altost = coordenadas[:,3]
x2t = x1t + anchost
y2t = y1t + altost
# Informacion de los BB detectados
x1e = boxesReales[:,0]
y1e = boxesReales[:,1]
anchose = boxesReales[:,5]
altose = boxesReales[:,6]
x2e = boxesReales[:,2]
y2e = boxesReales[:,3]
# Re-acomodacion de boxesReales
auxiliar = []
for i in range(0,len(boxesReales)):
auxiliar.append(boxesReales[i,0])
auxiliar.append(boxesReales[i,1])
auxiliar.append(boxesReales[i,5])
auxiliar.append(boxesReales[i,6])
boxesReales = np.asfarray(auxiliar)
boxesReales = np.reshape(boxesReales,(-1,4))
# Calculo centros de los BB teoricos
centrosXt = (x1t + (x1t + anchost))/2
centrosYt = (y1t + (y1t + altost))/2
# Calculo centros de los BB experimentales
centrosXe = (x1e + (x1e + anchose))/2
centrosYe = (y1e + (y1e + altose))/2
# Caso 1: No se predijo ninguna persona cuando en realidad si hay
# verPositivos = 0, falPositivos = 0, falNegativos = por lo menos len(teoricos)
if len(boxesReales)==0 and len(coordenadas)!=0:
print ("\n\nCASO 1: NO SE PREDICEN PERSONAS CUANDO EN REALIDAD HAY")
verPositivos = []
falPositivos = []
falNegativos = len(coordenadas)
return verPositivos,falPositivos,falNegativos,coordenadas
# Caso 2: Se predicen personas cuando en realidad no hay
# verPositivos = 0,falPositivos = boxesReales, falNegativos=len(coordenadas)
if len(boxesReales)!=0 and len(coordenadas)==0:
print ("\n\nCASO 2: SE PREDICEN PERSONAS CUANDO EN REALIDAD NO HAY")
verPositivos = []
falPositivos = boxesReales
falNegativos = 0
return verPositivos,falPositivos,falNegativos,coordenadas
# Caso 3: Se predicen mas personas de las que realmente hay
if len(coordenadas) < len(boxesReales):
print ("\n\nCASO 3: MAS PERSONAS DE LAS QUE REALMENTE HAY")
indices = [];
# Algoritmo para determinar pares de BB mas cercanos y que se traslapen:
for i in range(0,len(coordenadas)):
indice = -1
# Recorro los BB teoricos
Cxt = centrosXt[i]
Cyt = centrosYt[i]
dif = 10000000
for j in range(0,len(boxesReales)):
# Recorro los BB experimentales
Cxe = centrosXe[j]
Cye = centrosYe[j]
if (math.sqrt(math.pow(abs(Cxt - Cxe),2) + math.pow(abs(Cyt - Cye),2)) < dif):
# Encontrar el area de traslape
xx1 = max(x1t[i], x1e[j])
yy1 = max(y1t[i], y1e[j])
xx2 = min(x2t[i], x2e[j])
yy2 = min(y2t[i], y2e[j])
# Encuentro area de traslape
w = max(0, xx2 - xx1 + 1)
h = max(0, yy2 - yy1 + 1)
# Tienen que traslaparse para que sea valida la deteccion
if w*h != 0:
dif = math.sqrt(math.pow(abs(Cxt - Cxe),2) + math.pow(abs(Cyt - Cye),2))
indice = j
if indice != -1:
indices.append(indice)
# Algoritmo que determina indices de los fallidos
indices2 = []
for i in range(0,len(boxesReales)):
if not i in indices:
indices2.append(i)
#print ("indices",indices)
#print ("boxes reales",boxesReales,boxesReales.shape)
boxesAcertadas = boxesReales[indices]
#print ("boxes acertadas",boxesAcertadas)
boxesFallidas = boxesReales[indices2]
#print ("boxes fallidas", boxesFallidas)
verPositivos = boxesAcertadas
falPositivos = boxesFallidas
falNegativos = len(coordenadas) - len(verPositivos)
return verPositivos,falPositivos,falNegativos,coordenadas
# Caso 4: Se predicen menos personas de las que realmente hay
if len(coordenadas) > len(boxesReales):
print ("\n\nCASO 4: MENOS PERSONAS DE LAS QUE REALMENTE HAY")
indices = []
yaEvaluados = []
distancias = []
# Algoritmo para determinar pares de BB mas cercanos y que se traslapen:
for i in range(0,len(boxesReales)):
indice = -1
indice2 = -1
# Recorro los BB experimentales
Cxe = centrosXe[i]
Cye = centrosYe[i]
dif = 10000000
for j in range(0,len(coordenadas)):
# Recorro los BB teoricos
Cxt = centrosXt[j]
Cyt = centrosYt[j]
#print "experimental",i,"teorica",j,"dife",math.sqrt(math.pow(abs(Cxt - Cxe),2) + math.pow(abs(Cyt - Cye),2)),"difac",dif
if (math.sqrt(math.pow(abs(Cxt - Cxe),2) + math.pow(abs(Cyt - Cye),2)) < dif):
# Encontrar el area de traslape
xx1 = max(x1t[j], x1e[i])
yy1 = max(y1t[j], y1e[i])
xx2 = min(x2t[j], x2e[i])
yy2 = min(y2t[j], y2e[i])
# Encuentro area de traslape
w = max(0, xx2 - xx1 + 1)
h = max(0, yy2 - yy1 + 1)
# Tienen que traslaparse para que sea valida la deteccion
if w*h != 0:
dif = math.sqrt(math.pow(abs(Cxt - Cxe),2) + math.pow(abs(Cyt - Cye),2))
indice2 = j
indice = i
d = dif
if indice!= -1:
indices.append(indice)
yaEvaluados.append(indice2)
distancias.append(d)
# Algoritmo para descartar multiples traslapes en un mismo punto del GT
nuevoEvaluados = []
nuevoIndices = []
nuevoDistancias = []
for i in range(0,len(indices)):
teorico = yaEvaluados[i]
if not teorico in nuevoEvaluados:
nuevoEvaluados.append(teorico)
nuevoIndices.append(indices[i])
nuevoDistancias.append(distancias[i])
else:
# Buscar la escena
for j in range(0,len(nuevoEvaluados)):
# Si la encuentra
if nuevoEvaluados[j] == teorico:
if nuevoDistancias[j] > distancias[i]:
nuevoEvaluados[j] = teorico
nuevoIndices[j] = indices[i]
nuevoDistancias[j] = distancias[i]
indices = nuevoIndices
yaEvaluados = nuevoEvaluados
distancias = nuevoDistancias
#print indices
#print yaEvaluados
#print distancias
# Algoritmo que determina indices de los fallidos
indices2 = []
for i in range(0,len(boxesReales)):
if not i in indices:
indices2.append(i)
#print ("indices",indices)
#print ("boxes reales",boxesReales,boxesReales.shape)
boxesAcertadas = boxesReales[indices]
#print ("boxes acertadas",boxesAcertadas)
boxesFallidas = boxesReales[indices2]
#print ("boxes fallidas", boxesFallidas)
verPositivos = boxesAcertadas
falPositivos = boxesFallidas
falNegativos = len(coordenadas) - len(verPositivos)
return verPositivos,falPositivos,falNegativos,coordenadas
def escribirReporte(therNMS,therDIF,VP,FP,FN,P,R,F1,folder,bandera):
if bandera == 1:
print ("Se escribió el reporte de esta escena")
archivo = open(folder,'a')
linea = str(therNMS) + ";" + str(therDIF) + ";" + str(VP) + ";" + str(FP) + ";" + str(FN) + ";" + str(P) + ";" + str(R) + ";" + str(F1) + "\n"
archivo.write(linea)
if __name__ == '__main__':
# Obtencion de parametros
args = get_args()
# Lectura de la posicion de los parches
# coordenadas = Lista donde cada espacio hace referencia a una escala
# coordenadas2 = Arreglo donde estan las coordenadas de todas las imagenes
if args.tipovent == 1:
# Ventaneo por Bordes
ancho,alto,coordenadas,coordenadas2 = leerDescripcion(args.tdesbordes)
carHOG,numimag = getchara(args.tcarbordes)
else:
# Ventaneo exhaustivo
ancho,alto,coordenadas,coordenadas2 = leerDescripcion(args.tdesex)
carHOG,numimag = getchara(args.tcarex)
# Importacion de los clasificadores
classifier1 = joblib.load(args.dirsvm + args.namemodelSVM)
# Evaluacion de clasificadores
scores1,preds1 = predecir(classifier1,carHOG,numimag)
# Non Maxima Supression
boxesReales = NMS(ancho,alto,coordenadas,coordenadas2,scores1,preds1,args.thereshold,args.visualize,args.frame)
# Validacion
verPositivos,falPositivos,falNegativos,coordenadas = validacion(boxesReales,args.ground)
# Calculo de Errores
pre = float(len(verPositivos))/float(((len(verPositivos))+(len(falPositivos))))
re = float(len(verPositivos))/float((len(verPositivos)+float(falNegativos)))
f1 = float((2*(re*pre)))/float((re +pre))
# Impresion de Resultados
print("Personas REALES en la escena:",len(coordenadas))
print("Personas totales CONTADAS en la escena:",len(verPositivos) + len(falPositivos))
print("Personas correctas CONTADAS en la escena:",len(verPositivos))
print("Personas incorrectas CONTADAS en la escena:",len(falPositivos))
print("Personas que faltaron por contar:",falNegativos)
print("Precision:",pre)
print("Recall:",re)
print("F1 SCORE:",f1)
# Dibujar BB
fig,ax = plt.subplots(1)
plt.title("Resultados del conteo")
img = cv2.imread(args.frame,0)
ax.imshow(img, cmap = 'gray')
for i in range(0,len(verPositivos)):
rect = patches.Rectangle((verPositivos[i,0],verPositivos[i,1]),verPositivos[i,2],verPositivos[i,3],linewidth=1,edgecolor='g',facecolor='none')
ax.add_patch(rect)
for i in range(0,len(falPositivos)):
rect = patches.Rectangle((falPositivos[i,0],falPositivos[i,1]),falPositivos[i,2],falPositivos[i,3],linewidth=1,edgecolor='r',facecolor='none')
ax.add_patch(rect)
for i in range(0,len(coordenadas)):
rect = patches.Rectangle((coordenadas[i,0],coordenadas[i,1]),coordenadas[i,2],coordenadas[i,3],linewidth=1,edgecolor='b',facecolor='none')
ax.add_patch(rect)
green_patch = patches.Patch(color='g', label='Positivos Verdaderos')
red_patch = patches.Patch(color='r', label='Positivos Falsos')
blue_patch = patches.Patch(color='b', label='Ground Truth')
plt.axis('off')
plt.legend(handles=[red_patch,blue_patch,green_patch])
ax.text(500, 660,"Precision:" + str(pre))
ax.text(500, 680,"Recall:" + str(re))
ax.text(500, 700,"F1 Score:" + str(f1))
ax.text(0, 660,"Personas correctas totales: " + str(len(coordenadas)))
ax.text(0, 680,"Personas correctas contadas: " + str(len(verPositivos)))
ax.text(0, 700,"Personas incorrectas contadas: " + str(len(falPositivos)))
ax.text(0, 720,"Personas que faltaron por contar:" + str(falNegativos))
if args.visualize == 1:
plt.show();
escribirReporte(args.thereshold,args.diferencia,len(verPositivos),len(falPositivos),falNegativos,pre,re,f1,'E:\DocumentosOSX\TrabajoGrado/Reporte.csv',args.visualize)
| [
"andresfelipelunac@gmail.com"
] | andresfelipelunac@gmail.com |
be320a449fe3bf7958632efd9d081e35040cd7ce | 167923e16f1b6fcb40d979b38eae6348b97d9d35 | /ops.py | a107b7cdb7043580d5685063bc17ca1a739399ee | [] | no_license | halimiqi/www21 | e6cbf04a6034d62c798fa7effa2b9055cdcd05c4 | 0603c819123d29220a0ddd9bd6ebf86c8b60d099 | refs/heads/master | 2023-02-27T00:07:34.640575 | 2021-02-07T13:16:32 | 2021-02-07T13:16:32 | 329,658,371 | 6 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,278 | py | from tensorflow.contrib.layers.python.layers import batch_norm
import numpy as np
#from tensorflow.python.layers import batch_norm
def batch_normal(input , scope="scope" , reuse=False):
return batch_norm(input , epsilon=1e-5, decay=0.9 , scale=True, scope=scope , reuse=reuse , updates_collections=None)
def print_similarity(pred, target_label_dict):
similar_list = []
for target in target_label_dict:
G_res_index = np.argmax(pred[target, :])
G_one = pred[:, G_res_index]
real_y = target_label_dict[target]
G_similar = np.sum(real_y * G_one)
G_similar = G_similar / np.sum(real_y)
# print(f"the target:{target}. The similarity is {G_similar}")
if target % 1000 == 0:
print("the target:%d. The similarity is %f" % (target, G_similar))
print("The target values are:")
print(pred[target, :])
similar_list.append(G_similar)
print("check mean similar values")
print(np.mean(similar_list))
def print_mu(target_list, pred_dis_res,n_clusters):
out_list = []
for targets in target_list:
target_pred = pred_dis_res[targets]
max_index = np.argmax(target_pred, axis=1)
out_list.append(((len(np.unique(max_index)) - 1) / (np.max([n_clusters-1, 1]) * (np.max(np.bincount(max_index))))))
print("The mu_1 is:%f"%(np.mean(out_list)))
return np.mean(out_list)
def print_mu2(target_list, pred_dis_res, n_clusters):
out_list = []
truth_list = [[] for x in range(n_clusters)]
overall_n = 0
for targets in target_list:
for target in targets:
target_pred = pred_dis_res[target]
max_index = np.argmax(target_pred)
truth_list[max_index].append(target)
overall_n += 1
for targets in target_list:
target_pred = pred_dis_res[targets]
max_indexes = np.argmax(target_pred, axis = 1)
max_indexes = np.unique(max_indexes)
group_len = 0
for idx in max_indexes:
group_len += len(truth_list[idx])
group_len -= len(targets)
const_denom = np.max([overall_n - len(targets), 1])
out_list.append(group_len / const_denom)
print("The mu2 is %f" %(np.mean(out_list)))
return np.mean(out_list)
| [
"zhanghl1996@163.com"
] | zhanghl1996@163.com |
637bb3d2c2ec2fdc5a8bfa3d383807a9429896f0 | c5c1560bacd9fdac9aa623deee7f878f98021d02 | /str_token.py | f04267f0213e76c41bc96d74c5a2273606bc5583 | [] | no_license | shirouzu/teaching | 6fef8a5685880318d9f426f48fb8610143ff521e | 6daa6c5008c6fdfb294b196a31f60a01d8610775 | refs/heads/master | 2022-12-08T10:32:18.317415 | 2020-08-22T07:10:43 | 2020-08-22T07:10:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,318 | py | # global keyword has risk, H.Shirouzu
next_str = None
# if targ_str is None, tokenize trail substring
def str_token(targ_str=None):
global next_str
if not targ_str:
if not next_str:
return None
targ_str = next_str
tlist = targ_str.split(maxsplit=1)
if len(tlist) == 2:
next_str = tlist[1]
return tlist[0]
elif len(tlist) == 1:
next_str = None
return tlist[0]
else: # len(tlist) == 0
next_str = None
return None
def test1():
command_line = "hello.exe harg1 harg2 harg3"
ret = str_token(command_line)
while ret:
print("cmdline", ret)
ret = str_token()
print()
def test2():
command_line1 = "hello.exe harg1 harg2 harg3"
command_line2 = "byebye.exe barg1 barg2 barg3"
ret1 = str_token(command_line1)
ret2 = str_token(command_line2)
while ret1 and ret2:
print("cmdline1", ret1)
ret1 = str_token()
print("cmdline2", ret2)
ret2 = str_token()
print()
test1()
# output is following.
#
# cmdline hello.exe
# cmdline harg1
# cmdline harg2
# cmdline harg3
test2() # why test2 is not good? how to fix it?
# output is following.
#
# cmdline1 hello.exe
# cmdline2 byebye.exe
# cmdline1 barg1 <-- what?
# cmdline2 barg2
| [
"noreply@github.com"
] | noreply@github.com |
f2ac05164c08227219172faf8205fd443bc1bc92 | 9c507b6008919e1d3ab17921b0f483a0660dbc0c | /tc_lab13_convo_mnist_1.0_softmax.py | 331257c25181c66215d6405b8e8b5bd6f0460517 | [
"Apache-2.0"
] | permissive | ltchuan/tensorflow-mnist-tutorial | 884af2ebf5d9122e5ceee58ba8fd0075590d2757 | 98774bffa79c19c86b54b29e6bc1d4164007aadd | refs/heads/master | 2020-05-24T23:33:25.318156 | 2017-03-16T07:37:37 | 2017-03-16T07:37:37 | 84,890,639 | 0 | 0 | null | 2017-03-14T01:16:00 | 2017-03-14T01:16:00 | null | UTF-8 | Python | false | false | 6,927 | py | # encoding: UTF-8
# Copyright 2016 Google.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math as math
import tensorflow as tf
import tensorflowvisu
from tensorflow.contrib.learn.python.learn.datasets.mnist import read_data_sets
tf.set_random_seed(0)
# neural network with 1 layer of 10 softmax neurons
#
# · · · · · · · · · · (input data, flattened pixels) X [batch, 784] # 784 = 28 * 28
# \x/x\x/x\x/x\x/x\x/ -- fully connected layer (softmax) W [784, 10] b[10]
# · · · · · · · · Y [batch, 10]
# The model is:
#
# Y = softmax( X * W + b)
# X: matrix for 100 grayscale images of 28x28 pixels, flattened (there are 100 images in a mini-batch)
# W: weight matrix with 784 lines and 10 columns
# b: bias vector with 10 dimensions
# +: add with broadcasting: adds the vector to each line of the matrix (numpy)
# softmax(matrix) applies softmax on each line
# softmax(line) applies an exp to each value then divides by the norm of the resulting line
# Y: output matrix with 100 lines and 10 columns
# Download images and labels into mnist.test (10K images+labels) and mnist.train (60K images+labels)
mnist = read_data_sets("data", one_hot=True, reshape=False, validation_size=0)
# Learning rate parameters
lr_min = 0.0001
lr_max = 0.003
pkeep_input = 0.75
learning_rate = tf.placeholder(tf.float32)
pkeep = tf.placeholder(tf.float32)
# input X: 28x28 grayscale images, the first dimension (None) will index the images in the mini-batch
X = tf.placeholder(tf.float32, [None, 28, 28, 1])
# correct answers will go here
Y_ = tf.placeholder(tf.float32, [None, 10])
# weights W[784, (Layer 1)] 784=28*28
# Layer 1: 6 channels
W1 = tf.Variable(tf.truncated_normal([6, 6, 1, 6], stddev=0.1))
# biases b[6]
b1 = tf.Variable(tf.ones([6])/10)
# Layer 2: 8 channels
W2 = tf.Variable(tf.truncated_normal([5, 5, 6, 12], stddev=0.1))
b2 = tf.Variable(tf.ones([12])/10)
# Layer 3: 12 channels
W3 = tf.Variable(tf.truncated_normal([4, 4, 12, 24], stddev=0.1))
b3 = tf.Variable(tf.ones([24])/10)
# Layer 4: normal fully connected layer, 200 neurons
W4 = tf.Variable(tf.truncated_normal([7*7*24, 200], stddev=0.1))
b4 = tf.Variable(tf.ones([200])/10)
# Output layer connection
W_o = tf.Variable(tf.truncated_normal([200, 10], stddev=0.1))
b_o = tf.Variable(tf.ones([10])/10)
# The model
# Layers + dropout
Y1cnv = tf.nn.conv2d(X, W1, strides=[1, 1, 1, 1], padding='SAME')
Y1 = tf.nn.relu(Y1cnv + b1)
Y2cnv = tf.nn.conv2d(Y1, W2, strides=[1, 2, 2, 1], padding='SAME')
Y2 = tf.nn.relu(Y2cnv + b2)
Y3cnv = tf.nn.conv2d(Y2, W3, strides=[1, 2, 2, 1], padding='SAME')
Y3 = tf.nn.relu(Y3cnv + b3)
Y3_flat = tf.reshape(Y3, [-1, 7*7*24])
Y4 = tf.nn.relu(tf.matmul(Y3_flat, W4) + b4)
Y4d = tf.nn.dropout(Y4, pkeep)
# Output
Y_o_logits = tf.matmul(Y4d, W_o) + b_o
Y_o = tf.nn.softmax(Y_o_logits)
# loss function: cross-entropy = - sum( Y_i * log(Yi) )
# Y_o: the computed output vector
# Y_: the desired output vector
# cross-entropy
# log takes the log of each element, * multiplies the tensors element by element
# reduce_mean will add all the components in the tensor
# so here we end up with the total cross-entropy for all images in the batch
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=Y_o_logits, labels=Y_) # normalized later,
cross_entropy = tf.reduce_mean(cross_entropy)*100
# accuracy of the trained model, between 0 (worst) and 1 (best)
correct_prediction = tf.equal(tf.argmax(Y_o, 1), tf.argmax(Y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# training, learning rate adaptive
train_step = tf.train.AdamOptimizer(learning_rate).minimize(cross_entropy)
# matplotlib visualisation
allweights = tf.concat([tf.reshape(W_o, [-1]), tf.reshape(W1, [-1]), tf.reshape(W2, [-1]), tf.reshape(W3, [-1]), tf.reshape(W4, [-1])], 0)
allbiases = tf.concat([tf.reshape(b_o, [-1]), tf.reshape(b1, [-1]), tf.reshape(b2, [-1]), tf.reshape(b3, [-1]), tf.reshape(b4, [-1])], 0)
I = tensorflowvisu.tf_format_mnist_images(X, Y_o, Y_) # assembles 10x10 images by default
It = tensorflowvisu.tf_format_mnist_images(X, Y_o, Y_, 1000, lines=25) # 1000 images on 25 lines
datavis = tensorflowvisu.MnistDataVis()
# init
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
# You can call this function in a loop to train the model, 100 images at a time
def training_step(i, update_test_data, update_train_data):
# adaptive learning rate
learning_rate_input = lr_min + (lr_max - lr_min)*math.exp(-i/2000)
# training on batches of 100 images with 100 labels
batch_X, batch_Y = mnist.train.next_batch(100)
# compute training values for visualisation
if update_train_data:
a, c, im, w, b = sess.run([accuracy, cross_entropy, I, allweights, allbiases], feed_dict={X: batch_X, Y_: batch_Y, pkeep: 1})
datavis.append_training_curves_data(i, a, c)
datavis.append_data_histograms(i, w, b)
datavis.update_image1(im)
print(str(i) + ": accuracy:" + str(a) + " loss: " + str(c) + " (lr:" + str(learning_rate_input) + ")")
# compute test values for visualisation
if update_test_data:
a, c, im = sess.run([accuracy, cross_entropy, It], feed_dict={X: mnist.test.images, Y_: mnist.test.labels, pkeep: 1})
datavis.append_test_curves_data(i, a, c)
datavis.update_image2(im)
print(str(i) + ": ********* epoch " + str(i*100//mnist.train.images.shape[0]+1) + " ********* test accuracy:" + str(a) + " test loss: " + str(c))
# the backpropagation training step
sess.run(train_step, feed_dict={X: batch_X, Y_: batch_Y, learning_rate: learning_rate_input, pkeep: pkeep_input})
datavis.animate(training_step, iterations=10000+1, train_data_update_freq=20, test_data_update_freq=100, more_tests_at_start=True)
# to save the animation as a movie, add save_movie=True as an argument to datavis.animate
# to disable the visualisation use the following line instead of the datavis.animate line
# for i in range(2000+1): training_step(i, i % 50 == 0, i % 10 == 0)
print("max test accuracy: " + str(datavis.get_max_test_accuracy()))
# final max test accuracy = 0.9268 (10K iterations). Accuracy should peak above 0.92 in the first 2000 iterations.
| [
"ltchuan@gmail.com"
] | ltchuan@gmail.com |
71bf52c3f75e834fe7938987cc7b559aa46b54db | ab0e9b543852bc2d3c828b2351c30d1626f0b321 | /CustomProceduralRiggingTool/CustomProceduralRigTool/rigLib/base/controlShape/unitSliderControl.py | f55103622c28f26d51caf910f83abbbaf7302f2a | [] | no_license | tHeBeStXu/CustomProceduralRigTool | 397011b9519a3e5382aec5aee6115f3e6a14a802 | 003fa61b460d8e76c026f47913ebdab5c0cbfef8 | refs/heads/master | 2021-07-13T09:02:07.697909 | 2020-07-09T07:28:27 | 2020-07-09T07:28:27 | 157,082,564 | 15 | 3 | null | null | null | null | UTF-8 | Python | false | false | 762 | py | import maya.cmds as cmds
def createShape(prefix=''):
"""
create a unit slider for blend operation
:param prefix: str, prefix of the control
:param scale: float, scale of the control
:return: str, ctrlBox of the unitSliderControl
"""
Ctrl = cmds.circle(radius=0.2, nr=(1, 0, 0), n=prefix + '_Ctrl')[0]
cmds.transformLimits(Ctrl, tx=(0, 0), ty=(0, 1), tz=(0, 0), etx=(1, 1), ety=(1, 1), etz=(1, 1))
CtrlBox = cmds.curve(d=1, p=[(0, 0, 0), (0, 1, 0)], k=[0, 1], n=prefix + '_CtrlBox')
parentCrvShape = cmds.listRelatives(CtrlBox, s=1)
cmds.setAttr(parentCrvShape[0] + '.template', 1)
cmds.parent(Ctrl, CtrlBox)
cmds.makeIdentity(CtrlBox, apply=1, t=1, r=1, s=1, n=0)
cmds.select(cl=1)
return CtrlBox
| [
"328665042@qq.com"
] | 328665042@qq.com |
dbf83c7d3adc355a74fec95bf764ff880f412e5d | 1c2584495c65f28b08830bef2fc51f91adc61c71 | /query_results/extractEntitiesMod.py | 0bb405c4a2268013d9fa26d621f5787931e884f7 | [] | no_license | locta66/TweetEventDetection | 6db46bdc0727bfa33768f72f9b97251da12969e6 | a55740afe5358b8958d4821785029f3c1d848045 | refs/heads/master | 2020-03-14T05:10:52.692916 | 2018-05-02T07:18:34 | 2018-05-02T07:34:32 | 131,458,887 | 0 | 0 | null | 2018-04-29T02:07:31 | 2018-04-29T02:07:30 | null | UTF-8 | Python | false | false | 11,154 | py | #!/usr/bin/python
import sys
import os
import re
import subprocess
from signal import *
BASE_DIR = 'twitter_nlp.jar'
if os.environ.has_key('TWITTER_NLP'):
BASE_DIR = os.environ['TWITTER_NLP']
sys.path.append('%s/python' % (BASE_DIR))
sys.path.append('%s/python/ner' % (BASE_DIR))
sys.path.append('%s/hbc/python' % (BASE_DIR))
import Features
import twokenize
from LdaFeatures import LdaFeatures
from Dictionaries import Dictionaries
from Vocab import Vocab
sys.path.append('%s/python/cap' % (BASE_DIR))
sys.path.append('%s/python' % (BASE_DIR))
import cap_classifier
import pos_tagger_stdin
import chunk_tagger_stdin
import event_tagger_stdin
def GetNer(ner_model, memory="256m"):
return subprocess.Popen(
'java -Xmx%s -cp %s/mallet-2.0.6/lib/mallet-deps.jar:%s/mallet-2.0.6/class cc.mallet.fst.SimpleTaggerStdin --weights sparse --model-file %s/models/ner/%s' % (
memory, BASE_DIR, BASE_DIR, BASE_DIR, ner_model),
shell=True, close_fds=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
def GetLLda():
return subprocess.Popen(
'%s/hbc/models/LabeledLDA_infer_stdin.out %s/hbc/data/combined.docs.hbc %s/hbc/data/combined.z.hbc 100 100' % (
BASE_DIR, BASE_DIR, BASE_DIR),
shell=True, close_fds=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
class EntityExtractor:
def __init__(self, pos, chunk, event, classify, mallet_memory='256m'):
self.clear_line_counter()
self.posTagger = pos_tagger_stdin.PosTagger() if pos else None
self.chunkTagger = chunk_tagger_stdin.ChunkTagger() if chunk and pos else None
self.eventTagger = event_tagger_stdin.EventTagger() if event and pos else None
self.llda = GetLLda() if classify else None
if pos and chunk:
self.ner_model = 'ner.model'
elif pos:
self.ner_model = 'ner_nochunk.model'
else:
self.ner_model = 'ner_nopos_nochunk.model'
self.ner = GetNer(self.ner_model, memory=mallet_memory)
self.fe = Features.FeatureExtractor('%s/data/dictionaries' % (BASE_DIR))
self.capClassifier = cap_classifier.CapClassifier()
self.vocab = Vocab('%s/hbc/data/vocab' % (BASE_DIR))
self.dictMap = {}
self.dictMap = self.dictMap
i = 1
for line in open('%s/hbc/data/dictionaries' % (BASE_DIR)):
dictionary = line.rstrip('\n')
self.dictMap[i] = dictionary
i += 1
self.dict2index = {}
for i in self.dictMap.keys():
self.dict2index[self.dictMap[i]] = i
if self.llda:
self.dictionaries = Dictionaries('%s/data/LabeledLDA_dictionaries3' % (BASE_DIR), self.dict2index)
self.entityMap = {}
i = 0
if self.llda:
for line in open('%s/hbc/data/entities' % (BASE_DIR)):
entity = line.rstrip('\n')
self.entityMap[entity] = i
i += 1
self.dict2label = {}
for line in open('%s/hbc/data/dict-label3' % (BASE_DIR)):
(dictionary, label) = line.rstrip('\n').split(' ')
self.dict2label[dictionary] = label
def clear_line_counter(self):
self.nlines = 0
def trigger_line_counter(self):
self.ner.stdin.close()
self.ner.stdout.close()
os.kill(self.ner.pid, SIGTERM) # Need to do this for python 2.4
self.ner.wait()
self.ner = GetNer(self.ner_model)
def line_counter(self):
self.nlines += 1
if self.nlines % 50000 == 0:
self.trigger_line_counter()
self.clear_line_counter()
def parse_lines(self, lines):
res = []
for line in lines:
# nLines = 1
line = line.encode('utf-8', "ignore")
words = twokenize.tokenize(line)
seq_features = []
tags = []
goodCap = self.capClassifier.Classify(words) > 0.9
if self.posTagger:
pos = self.posTagger.TagSentence(words)
pos = [re.sub(r':[^:]*$', '', p) for p in pos] # remove weights
else:
pos = None
# Chunking the tweet
if self.posTagger and self.chunkTagger:
word_pos = zip(words, [p.split(':')[0] for p in pos])
chunk = self.chunkTagger.TagSentence(word_pos)
chunk = [c.split(':')[0] for c in chunk] # remove weights
else:
chunk = None
# Event tags
if self.posTagger and self.eventTagger:
events = self.eventTagger.TagSentence(words, [p.split(':')[0] for p in pos])
events = [e.split(':')[0] for e in events]
else:
events = None
quotes = Features.GetQuotes(words)
for i in range(len(words)):
features = self.fe.Extract(words, pos, chunk, i, goodCap) + ['DOMAIN=Twitter']
if quotes[i]:
features.append("QUOTED")
seq_features.append(" ".join(features))
self.ner.stdin.write(("\t".join(seq_features) + "\n").encode('utf8'))
for i in range(len(words)):
tags.append(self.ner.stdout.readline().rstrip('\n').strip(' '))
features = LdaFeatures(words, tags)
# Extract and classify entities
for i in range(len(features.entities)):
# type = None
wids = [str(self.vocab.GetID(x.lower())) for x in features.features[i] if self.vocab.HasWord(x.lower())]
if self.llda and len(wids) > 0:
entityid = "-1"
if self.entityMap.has_key(features.entityStrings[i].lower()):
entityid = str(self.entityMap[features.entityStrings[i].lower()])
labels = self.dictionaries.GetDictVector(features.entityStrings[i])
if sum(labels) == 0:
labels = [1 for _ in labels]
self.llda.stdin.write("\t".join([entityid, " ".join(wids), " ".join([str(x) for x in labels])]) + "\n")
sample = self.llda.stdout.readline().rstrip('\n')
labels = [self.dict2label[self.dictMap[int(x)]] for x in sample[4:len(sample) - 8].split(' ')]
count = {}
for label in labels:
count[label] = count.get(label, 0.0) + 1.0
maxL = None
maxP = 0.0
for label in count.keys():
p = count[label] / float(len(count))
if p > maxP or maxL == None:
maxL = label
maxP = p
if maxL != 'None':
tags[features.entities[i][0]] = "B-%s" % (maxL)
for j in range(features.entities[i][0] + 1, features.entities[i][1]):
tags[j] = "I-%s" % (maxL)
else:
tags[features.entities[i][0]] = "O"
for j in range(features.entities[i][0] + 1, features.entities[i][1]):
tags[j] = "O"
else:
tags[features.entities[i][0]] = "B-ENTITY"
for j in range(features.entities[i][0] + 1, features.entities[i][1]):
tags[j] = "I-ENTITY"
output = ["%s/%s" % (words[x], tags[x]) for x in range(len(words))]
if pos:
output = ["%s/%s" % (output[x], pos[x]) for x in range(len(output))]
if chunk:
output = ["%s/%s" % (output[x], chunk[x]) for x in range(len(output))]
if events:
output = ["%s/%s" % (output[x], events[x]) for x in range(len(output))]
res.append(" ".join(output))
# seems like there is a memory leak comming from mallet, so just restart it every 1,000 tweets or so
# if nLines % 10000 == 0:
# self.trigger_line_counter()
self.line_counter()
return res
def close(self):
self.ner.stdin.close()
self.ner.stdout.close()
self.ner.terminate()
self.ner.wait()
self.llda.stdin.close()
self.llda.stdout.close()
self.llda.terminate()
self.llda.wait()
del self.ner, self.llda
del self.dict2index, self.dict2label, self.dictionaries, self.dictMap, self.entityMap
del self.chunkTagger, self.eventTagger, self.posTagger
del self.capClassifier, self.fe, self.vocab
def append_endline(string):
string += '\n ' if not string.endswith('\n') else ''
return string
entityExtractor = None
if __name__ == "__main__":
while True:
command = str(sys.stdin.readline()).strip()
params = str(sys.stdin.readline()).strip()
if 'open' in command:
pos = True if '-p' in params else False
classify = True if '-c' in params else False
try:
entityExtractor = EntityExtractor(pos=pos, chunk=False, event=False, classify=classify)
state = 'open success'
except:
entityExtractor = None
state = 'error: open failed'
finally:
res = ' '
elif 'close' in command:
try:
entityExtractor.close()
del entityExtractor
state = 'close success'
except:
state = 'error: close failed'
finally:
res = ' '
entityExtractor = None
elif 'ex' == command:
text = params.strip()
try:
if entityExtractor:
res = str(entityExtractor.parse_lines([text, ])[0])
state = 'su'
else:
res = ' '
state = 'error: extractor not exist'
except:
res = ' '
state = 'error: ner failed'
elif 'execute array' == command:
num = int(params.strip())
for i in range(num):
text = str(sys.stdin.readline()).strip()
res = str(entityExtractor.parse_lines([text, ])[0])
sys.stdout.write(append_endline(res).encode('utf8'))
sys.stdout.flush()
sys.stdout.write(append_endline('suc').encode('utf8'))
sys.stdout.flush()
continue
else:
state = 'error: no instructuion'
res = ' '
sys.stdout.write(append_endline(state).encode('utf8'))
sys.stdout.write(append_endline(res).encode('utf8'))
sys.stdout.flush()
| [
"dong17.14@163.com"
] | dong17.14@163.com |
249edc0e5fb7c5fae23b6d8c5752ffa60b404a5b | 60aa3bcf5ace0282210685e74ee8ed31debe1769 | /base/lib/uu.py | 6ee9f9acad9bccce569ad6152e8da80b8e368319 | [] | no_license | TheBreadGuy/sims4-ai-engine | 42afc79b8c02527353cc084117a4b8da900ebdb4 | 865212e841c716dc4364e0dba286f02af8d716e8 | refs/heads/master | 2023-03-16T00:57:45.672706 | 2016-05-01T17:26:01 | 2016-05-01T17:26:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,505 | py | import binascii
import os
import sys
__all__ = ['Error', 'encode', 'decode']
class Error(Exception):
__qualname__ = 'Error'
def encode(in_file, out_file, name=None, mode=None):
opened_files = []
try:
if in_file == '-':
in_file = sys.stdin.buffer
elif isinstance(in_file, str):
if name is None:
name = os.path.basename(in_file)
if mode is None:
try:
mode = os.stat(in_file).st_mode
except AttributeError:
pass
in_file = open(in_file, 'rb')
opened_files.append(in_file)
if out_file == '-':
out_file = sys.stdout.buffer
elif isinstance(out_file, str):
out_file = open(out_file, 'wb')
opened_files.append(out_file)
if name is None:
name = '-'
if mode is None:
mode = 438
out_file.write(('begin %o %s\n' % (mode & 511, name)).encode('ascii'))
data = in_file.read(45)
while len(data) > 0:
out_file.write(binascii.b2a_uu(data))
data = in_file.read(45)
out_file.write(b' \nend\n')
finally:
for f in opened_files:
f.close()
def decode(in_file, out_file=None, mode=None, quiet=False):
opened_files = []
if in_file == '-':
in_file = sys.stdin.buffer
elif isinstance(in_file, str):
in_file = open(in_file, 'rb')
opened_files.append(in_file)
try:
while True:
hdr = in_file.readline()
if not hdr:
raise Error('No valid begin line found in input file')
if not hdr.startswith(b'begin'):
continue
hdrfields = hdr.split(b' ', 2)
if len(hdrfields) == 3 and hdrfields[0] == b'begin':
try:
int(hdrfields[1], 8)
break
except ValueError:
pass
if out_file is None:
out_file = hdrfields[2].rstrip(b' \t\r\n\x0c').decode('ascii')
if os.path.exists(out_file):
raise Error('Cannot overwrite existing file: %s' % out_file)
if mode is None:
mode = int(hdrfields[1], 8)
if out_file == '-':
out_file = sys.stdout.buffer
elif isinstance(out_file, str):
fp = open(out_file, 'wb')
try:
os.path.chmod(out_file, mode)
except AttributeError:
pass
out_file = fp
opened_files.append(out_file)
s = in_file.readline()
while s:
while s.strip(b' \t\r\n\x0c') != b'end':
try:
data = binascii.a2b_uu(s)
except binascii.Error as v:
nbytes = ((s[0] - 32 & 63)*4 + 5)//3
data = binascii.a2b_uu(s[:nbytes])
while not quiet:
sys.stderr.write('Warning: %s\n' % v)
out_file.write(data)
s = in_file.readline()
while not s:
raise Error('Truncated input file')
finally:
for f in opened_files:
f.close()
def test():
import optparse
parser = optparse.OptionParser(usage='usage: %prog [-d] [-t] [input [output]]')
parser.add_option('-d', '--decode', dest='decode', help='Decode (instead of encode)?', default=False, action='store_true')
parser.add_option('-t', '--text', dest='text', help='data is text, encoded format unix-compatible text?', default=False, action='store_true')
(options, args) = parser.parse_args()
if len(args) > 2:
parser.error('incorrect number of arguments')
sys.exit(1)
input = sys.stdin.buffer
output = sys.stdout.buffer
if len(args) > 0:
input = args[0]
if len(args) > 1:
output = args[1]
if options.decode:
if options.text:
if isinstance(output, str):
output = open(output, 'wb')
else:
print(sys.argv[0], ': cannot do -t to stdout')
sys.exit(1)
decode(input, output)
else:
if options.text:
if isinstance(input, str):
input = open(input, 'rb')
else:
print(sys.argv[0], ': cannot do -t from stdin')
sys.exit(1)
encode(input, output)
if __name__ == '__main__':
test()
| [
"jp@bellgeorge.com"
] | jp@bellgeorge.com |
fc88b01f572ee5e686b4e934fd4d14b69c47ff05 | c0595eca0c5cbc5cad7b9cd84ecc8f5c5b6e1a84 | /app/auth/forms.py | de1fa77ad193937198163c7277f77ed238719d82 | [] | no_license | libinglove/flask_app | e8f394d71e46a3531ac5fd43e2eab5b765752a24 | e3c19e7ef64de49ac257d2539e434d664c4d53ec | refs/heads/master | 2020-04-15T04:36:16.423571 | 2019-01-12T01:26:35 | 2019-01-12T01:26:35 | 164,389,287 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,185 | py | from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField, SubmitField
from wtforms.validators import Required, Length, Email, Regexp, EqualTo
from wtforms import ValidationError
from ..models import User
class LoginForm(FlaskForm):
email = StringField('Email', validators=[Required(), Length(1, 64),
Email()])
password = PasswordField('Password', validators=[Required()])
remember_me = BooleanField('Keep me logged in')
submit = SubmitField('登录')
class RegistrationForm(FlaskForm):
email = StringField('Email', validators=[Required(), Length(1, 64),
Email()])
username = StringField('Username', validators=[
Required(), Length(1, 64), Regexp('^[A-Za-z][A-Za-z0-9_.]*$', 0,
'Usernames must have only letters, '
'numbers, dots or underscores')])
password = PasswordField('Password', validators=[
Required(), EqualTo('password2', message='Passwords must match.')])
password2 = PasswordField('Confirm password', validators=[Required()])
submit = SubmitField('Register')
def validate_email(self, field):
if User.query.filter_by(email=field.data).first():
raise ValidationError('Email already registered.')
def validate_username(self, field):
if User.query.filter_by(username=field.data).first():
raise ValidationError('Username already in use.')
class ChangePasswordForm(FlaskForm):
old_password = PasswordField('Old password', validators=[Required()])
password = PasswordField('New password', validators=[Required(), EqualTo('password2', message='Passwords must match')])
password2 = PasswordField('Confirm new password', validators=[Required()])
submit = SubmitField('Update Password')
class PasswordResetRequestForm(FlaskForm):
email = StringField('Email',validators=[Required(),Length(1,64),Email()])
submit = SubmitField('重设密码')
class PasswordResetForm(FlaskForm):
email = StringField('Email',validators=[Required(),Length(1,64),Email()])
password = PasswordField('新密码',validators=[Required(),EqualTo('password2',message='密码必须匹配')])
password2 = PasswordField('确认密码',validators=[Required()])
submit = SubmitField('重设密码')
def validate_email(self,field):
if User.query.filter_by(email=field.data).first() is None :
raise ValidationError('错误的邮箱地址')
class ChangeEmailForm(FlaskForm):
email = StringField('New Email', validators=[Required(), Length(1, 64),
Email()])
password = PasswordField('Password', validators=[Required()])
submit = SubmitField('Update Email Address')
def validate_email(self, field):
if User.query.filter_by(email=field.data).first():
raise ValidationError('Email already registered.')
| [
"18310275531@163.com"
] | 18310275531@163.com |
e06f579b86ff58dcdec29c650eeab1896303af0d | 8e082fd485d8f9bf79c8cf9fc5fb44d8ac2b2968 | /Test2.py | b09b622623c70a8b8404140f3ba74a851361c2a3 | [] | no_license | igor94grozdanic/GitHub1 | c6e4fda53d14f5f1cb8b8f4c7d89ee81e2cbb3ac | bd7d3a69448bb954c1d65fe545ad38834f17ca26 | refs/heads/master | 2022-12-24T14:29:00.756168 | 2020-10-04T20:36:45 | 2020-10-04T20:36:45 | 300,065,434 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 37 | py | print("This line will be printed 3.") | [
"igor94grozdanic@gmail.com"
] | igor94grozdanic@gmail.com |
aac4d5fd43519d3e8b5e64343338316a33460a65 | 8dc84558f0058d90dfc4955e905dab1b22d12c08 | /third_party/catapult/telemetry/telemetry/web_perf/metrics/rendering_stats_unittest.py | e96c06c9f476e5202881973b78f825e7781a8062 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MIT",
"Apache-2.0"
] | permissive | meniossin/src | 42a95cc6c4a9c71d43d62bc4311224ca1fd61e03 | 44f73f7e76119e5ab415d4593ac66485e65d700a | refs/heads/master | 2022-12-16T20:17:03.747113 | 2020-09-03T10:43:12 | 2020-09-03T10:43:12 | 263,710,168 | 1 | 0 | BSD-3-Clause | 2020-05-13T18:20:09 | 2020-05-13T18:20:08 | null | UTF-8 | Python | false | false | 24,876 | py | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import random
import unittest
from telemetry.timeline import async_slice
from telemetry.timeline import model
from telemetry.util import perf_tests_helper
from telemetry.util import statistics
from telemetry.web_perf import timeline_interaction_record as tir_module
from telemetry.web_perf.metrics import rendering_stats
class MockTimer(object):
"""A mock timer class which can generate random durations.
An instance of this class is used as a global timer to generate random
durations for stats and consistent timestamps for all mock trace events.
The unit of time is milliseconds.
"""
def __init__(self):
self.milliseconds = 0
def Advance(self, low=0.1, high=1):
delta = random.uniform(low, high)
self.milliseconds += delta
return delta
def AdvanceAndGet(self, low=0.1, high=1):
self.Advance(low, high)
return self.milliseconds
class MockVblankTimer(object):
"""A mock vblank timer class which can generate random durations.
An instance of this class is used as a vblank timer to generate random
durations for drm stats and consistent timeval for mock trace drm events.
The unit of time is microseconds.
"""
def __init__(self):
self.microseconds = 200000000
def TvAdvance(self, low=100, high=1000):
delta = random.randint(low, high)
self.microseconds += delta
return delta
def TvAdvanceAndGet(self, low=100, high=1000):
self.TvAdvance(low, high)
return self.microseconds
class ReferenceRenderingStats(object):
""" Stores expected data for comparison with actual RenderingStats """
def __init__(self):
self.frame_timestamps = []
self.frame_times = []
self.approximated_pixel_percentages = []
self.checkerboarded_pixel_percentages = []
def AppendNewRange(self):
self.frame_timestamps.append([])
self.frame_times.append([])
self.approximated_pixel_percentages.append([])
self.checkerboarded_pixel_percentages.append([])
class ReferenceInputLatencyStats(object):
""" Stores expected data for comparison with actual input latency stats """
def __init__(self):
self.input_event_latency = []
self.input_event = []
def AddSurfaceFlingerStats(mock_timer, thread, first_frame,
ref_stats=None):
""" Adds a random surface flinger stats event.
thread: The timeline model thread to which the event will be added.
first_frame: Is this the first frame within the bounds of an action?
ref_stats: A ReferenceRenderingStats object to record expected values.
"""
# Create random data and timestamp for impl thread rendering stats.
data = {'frame_count': 1,
'refresh_period': 16.6666}
timestamp = mock_timer.AdvanceAndGet()
# Add a slice with the event data to the given thread.
thread.PushCompleteSlice(
'SurfaceFlinger', 'vsync_before',
timestamp, duration=0.0, thread_timestamp=None, thread_duration=None,
args={'data': data})
if not ref_stats:
return
# Add timestamp only if a frame was output
if data['frame_count'] == 1:
if not first_frame:
# Add frame_time if this is not the first frame in within the bounds of an
# action.
prev_timestamp = ref_stats.frame_timestamps[-1][-1]
ref_stats.frame_times[-1].append(timestamp - prev_timestamp)
ref_stats.frame_timestamps[-1].append(timestamp)
def AddDrmEventFlipStats(mock_timer, vblank_timer, thread,
first_frame, ref_stats=None):
""" Adds a random drm flip complete event.
thread: The timeline model thread to which the event will be added.
first_frame: Is this the first frame within the bounds of an action?
ref_stats: A ReferenceRenderingStats object to record expected values.
"""
# Create random data and timestamp for drm thread flip complete stats.
vblank_timeval = vblank_timer.TvAdvanceAndGet()
vblank_tv_sec = vblank_timeval / 1000000
vblank_tv_usec = vblank_timeval % 1000000
data = {'frame_count': 1,
'vblank.tv_usec': vblank_tv_usec,
'vblank.tv_sec': vblank_tv_sec}
timestamp = mock_timer.AdvanceAndGet()
# Add a slice with the event data to the given thread.
thread.PushCompleteSlice(
'benchmark,drm', 'DrmEventFlipComplete',
timestamp, duration=0.0, thread_timestamp=None, thread_duration=None,
args={'data': data})
if not ref_stats:
return
# Add vblank timeval only if a frame was output.
cur_timestamp = vblank_tv_sec * 1000.0 + vblank_tv_usec / 1000.0
if not first_frame:
# Add frame_time if this is not the first frame in within the bounds of an
# action.
prev_timestamp = ref_stats.frame_timestamps[-1][-1]
ref_stats.frame_times[-1].append(cur_timestamp - prev_timestamp)
ref_stats.frame_timestamps[-1].append(cur_timestamp)
def AddDisplayRenderingStats(mock_timer, thread, first_frame,
ref_stats=None):
""" Adds a random display rendering stats event.
thread: The timeline model thread to which the event will be added.
first_frame: Is this the first frame within the bounds of an action?
ref_stats: A ReferenceRenderingStats object to record expected values.
"""
# Create random data and timestamp for main thread rendering stats.
data = {'frame_count': 1}
timestamp = mock_timer.AdvanceAndGet()
# Add a slice with the event data to the given thread.
thread.PushCompleteSlice(
'benchmark', 'BenchmarkInstrumentation::DisplayRenderingStats',
timestamp, duration=0.0, thread_timestamp=None, thread_duration=None,
args={'data': data})
if not ref_stats:
return
# Add timestamp only if a frame was output
if not first_frame:
# Add frame_time if this is not the first frame in within the bounds of an
# action.
prev_timestamp = ref_stats.frame_timestamps[-1][-1]
ref_stats.frame_times[-1].append(timestamp - prev_timestamp)
ref_stats.frame_timestamps[-1].append(timestamp)
def AddImplThreadRenderingStats(mock_timer, thread, first_frame,
ref_stats=None):
""" Adds a random impl thread rendering stats event.
thread: The timeline model thread to which the event will be added.
first_frame: Is this the first frame within the bounds of an action?
ref_stats: A ReferenceRenderingStats object to record expected values.
"""
# Create random data and timestamp for impl thread rendering stats.
data = {'frame_count': 1,
'visible_content_area': random.uniform(0, 100),
'approximated_visible_content_area': random.uniform(0, 5),
'checkerboarded_visible_content_area': random.uniform(0, 5)}
timestamp = mock_timer.AdvanceAndGet()
# Add a slice with the event data to the given thread.
thread.PushCompleteSlice(
'benchmark', 'BenchmarkInstrumentation::ImplThreadRenderingStats',
timestamp, duration=0.0, thread_timestamp=None, thread_duration=None,
args={'data': data})
if not ref_stats:
return
# Add timestamp only if a frame was output
if data['frame_count'] == 1:
if not first_frame:
# Add frame_time if this is not the first frame in within the bounds of an
# action.
prev_timestamp = ref_stats.frame_timestamps[-1][-1]
ref_stats.frame_times[-1].append(timestamp - prev_timestamp)
ref_stats.frame_timestamps[-1].append(timestamp)
ref_stats.approximated_pixel_percentages[-1].append(
round(statistics.DivideIfPossibleOrZero(
data['approximated_visible_content_area'],
data['visible_content_area']) * 100.0, 3))
ref_stats.checkerboarded_pixel_percentages[-1].append(
round(statistics.DivideIfPossibleOrZero(
data['checkerboarded_visible_content_area'],
data['visible_content_area']) * 100.0, 3))
def AddInputLatencyStats(mock_timer, start_thread, end_thread,
ref_latency_stats=None):
""" Adds a random input latency stats event.
start_thread: The start thread on which the async slice is added.
end_thread: The end thread on which the async slice is ended.
ref_latency_stats: A ReferenceInputLatencyStats object for expected values.
"""
original_comp_time = mock_timer.AdvanceAndGet(2, 4) * 1000.0
ui_comp_time = mock_timer.AdvanceAndGet(2, 4) * 1000.0
begin_comp_time = mock_timer.AdvanceAndGet(2, 4) * 1000.0
forward_comp_time = mock_timer.AdvanceAndGet(2, 4) * 1000.0
end_comp_time = mock_timer.AdvanceAndGet(10, 20) * 1000.0
data = {rendering_stats.ORIGINAL_COMP_NAME: {'time': original_comp_time},
rendering_stats.UI_COMP_NAME: {'time': ui_comp_time},
rendering_stats.BEGIN_COMP_NAME: {'time': begin_comp_time},
rendering_stats.END_COMP_NAME: {'time': end_comp_time}}
timestamp = mock_timer.AdvanceAndGet(2, 4)
tracing_async_slice = async_slice.AsyncSlice(
'benchmark', 'InputLatency', timestamp)
async_sub_slice = async_slice.AsyncSlice(
'benchmark', rendering_stats.GESTURE_SCROLL_UPDATE_EVENT_NAME, timestamp)
async_sub_slice.args = {'data': data}
async_sub_slice.parent_slice = tracing_async_slice
async_sub_slice.start_thread = start_thread
async_sub_slice.end_thread = end_thread
tracing_async_slice.sub_slices.append(async_sub_slice)
tracing_async_slice.start_thread = start_thread
tracing_async_slice.end_thread = end_thread
start_thread.AddAsyncSlice(tracing_async_slice)
# Add scroll update latency info.
scroll_update_data = {
rendering_stats.BEGIN_SCROLL_UPDATE_COMP_NAME: {'time': begin_comp_time},
rendering_stats.FORWARD_SCROLL_UPDATE_COMP_NAME:
{'time': forward_comp_time},
rendering_stats.END_COMP_NAME: {'time': end_comp_time}
}
scroll_async_slice = async_slice.AsyncSlice(
'benchmark', 'InputLatency', timestamp)
scroll_async_sub_slice = async_slice.AsyncSlice(
'benchmark', rendering_stats.MAIN_THREAD_SCROLL_UPDATE_EVENT_NAME,
timestamp)
scroll_async_sub_slice.args = {'data': scroll_update_data}
scroll_async_sub_slice.parent_slice = scroll_async_slice
scroll_async_sub_slice.start_thread = start_thread
scroll_async_sub_slice.end_thread = end_thread
scroll_async_slice.sub_slices.append(scroll_async_sub_slice)
scroll_async_slice.start_thread = start_thread
scroll_async_slice.end_thread = end_thread
start_thread.AddAsyncSlice(scroll_async_slice)
# Also add some dummy frame statistics so we can feed the resulting timeline
# to RenderingStats.
AddImplThreadRenderingStats(mock_timer, end_thread, False)
if not ref_latency_stats:
return
ref_latency_stats.input_event.append(async_sub_slice)
ref_latency_stats.input_event.append(scroll_async_sub_slice)
ref_latency_stats.input_event_latency.append((
rendering_stats.GESTURE_SCROLL_UPDATE_EVENT_NAME,
(data[rendering_stats.END_COMP_NAME]['time'] -
data[rendering_stats.ORIGINAL_COMP_NAME]['time']) / 1000.0))
scroll_update_time = (
scroll_update_data[rendering_stats.END_COMP_NAME]['time'] -
scroll_update_data[rendering_stats.BEGIN_SCROLL_UPDATE_COMP_NAME]['time'])
ref_latency_stats.input_event_latency.append((
rendering_stats.MAIN_THREAD_SCROLL_UPDATE_EVENT_NAME,
scroll_update_time / 1000.0))
class RenderingStatsUnitTest(unittest.TestCase):
def testBothSurfaceFlingerAndDisplayStats(self):
timeline = model.TimelineModel()
timer = MockTimer()
ref_stats = ReferenceRenderingStats()
ref_stats.AppendNewRange()
surface_flinger = timeline.GetOrCreateProcess(pid=4)
surface_flinger.name = 'SurfaceFlinger'
surface_flinger_thread = surface_flinger.GetOrCreateThread(tid=41)
renderer = timeline.GetOrCreateProcess(pid=2)
browser = timeline.GetOrCreateProcess(pid=3)
browser_main = browser.GetOrCreateThread(tid=31)
browser_main.BeginSlice('webkit.console', 'ActionA',
timer.AdvanceAndGet(2, 4), '')
# Create SurfaceFlinger stats and display rendering stats.
for i in xrange(0, 10):
first = (i == 0)
AddSurfaceFlingerStats(timer, surface_flinger_thread, first, ref_stats)
timer.Advance(2, 4)
for i in xrange(0, 10):
first = (i == 0)
AddDisplayRenderingStats(timer, browser_main, first, None)
timer.Advance(5, 10)
browser_main.EndSlice(timer.AdvanceAndGet())
timer.Advance(2, 4)
browser.FinalizeImport()
renderer.FinalizeImport()
timeline_markers = timeline.FindTimelineMarkers(['ActionA'])
records = [tir_module.TimelineInteractionRecord(e.name, e.start, e.end)
for e in timeline_markers]
stats = rendering_stats.RenderingStats(
renderer, browser, surface_flinger, None, records)
# Compare rendering stats to reference - Only SurfaceFlinger stats should
# count
self.assertEquals(stats.frame_timestamps, ref_stats.frame_timestamps)
self.assertEquals(stats.frame_times, ref_stats.frame_times)
def testBothDrmAndDisplayStats(self):
timeline = model.TimelineModel()
timer = MockTimer()
vblank_timer = MockVblankTimer()
ref_stats = ReferenceRenderingStats()
ref_stats.AppendNewRange()
gpu = timeline.GetOrCreateProcess(pid=6)
gpu.name = 'GPU Process'
gpu_drm_thread = gpu.GetOrCreateThread(tid=61)
renderer = timeline.GetOrCreateProcess(pid=2)
browser = timeline.GetOrCreateProcess(pid=3)
browser_main = browser.GetOrCreateThread(tid=31)
browser_main.BeginSlice('webkit.console', 'ActionA',
timer.AdvanceAndGet(2, 4), '')
vblank_timer.TvAdvance(2000, 4000)
# Create drm flip stats and display rendering stats.
for i in xrange(0, 10):
first = (i == 0)
AddDrmEventFlipStats(timer, vblank_timer, gpu_drm_thread,
first, ref_stats)
timer.Advance(2, 4)
vblank_timer.TvAdvance(2000, 4000)
for i in xrange(0, 10):
first = (i == 0)
AddDisplayRenderingStats(timer, browser_main, first, None)
timer.Advance(5, 10)
vblank_timer.TvAdvance(5000, 10000)
browser_main.EndSlice(timer.AdvanceAndGet())
timer.Advance(2, 4)
vblank_timer.TvAdvance(2000, 4000)
browser.FinalizeImport()
renderer.FinalizeImport()
timeline_markers = timeline.FindTimelineMarkers(['ActionA'])
records = [tir_module.TimelineInteractionRecord(e.name, e.start, e.end)
for e in timeline_markers]
stats = rendering_stats.RenderingStats(
renderer, browser, None, gpu, records)
# Compare rendering stats to reference - Only drm flip stats should
# count
self.assertEquals(stats.frame_timestamps, ref_stats.frame_timestamps)
self.assertEquals(stats.frame_times, ref_stats.frame_times)
def testBothDisplayAndImplStats(self):
timeline = model.TimelineModel()
timer = MockTimer()
ref_stats = ReferenceRenderingStats()
ref_stats.AppendNewRange()
renderer = timeline.GetOrCreateProcess(pid=2)
browser = timeline.GetOrCreateProcess(pid=3)
browser_main = browser.GetOrCreateThread(tid=31)
browser_main.BeginSlice('webkit.console', 'ActionA',
timer.AdvanceAndGet(2, 4), '')
# Create main, impl, and display rendering stats.
for i in xrange(0, 10):
first = (i == 0)
AddImplThreadRenderingStats(timer, browser_main, first, None)
timer.Advance(2, 4)
for i in xrange(0, 10):
first = (i == 0)
AddDisplayRenderingStats(timer, browser_main, first, ref_stats)
timer.Advance(5, 10)
browser_main.EndSlice(timer.AdvanceAndGet())
timer.Advance(2, 4)
browser.FinalizeImport()
renderer.FinalizeImport()
timeline_markers = timeline.FindTimelineMarkers(['ActionA'])
records = [tir_module.TimelineInteractionRecord(e.name, e.start, e.end)
for e in timeline_markers]
stats = rendering_stats.RenderingStats(
renderer, browser, None, None, records)
# Compare rendering stats to reference - Only display stats should count
self.assertEquals(stats.frame_timestamps, ref_stats.frame_timestamps)
self.assertEquals(stats.frame_times, ref_stats.frame_times)
def testRangeWithoutFrames(self):
timer = MockTimer()
timeline = model.TimelineModel()
# Create a renderer process, with a main thread and impl thread.
renderer = timeline.GetOrCreateProcess(pid=2)
renderer_main = renderer.GetOrCreateThread(tid=21)
renderer_compositor = renderer.GetOrCreateThread(tid=22)
# Create 10 main and impl rendering stats events for Action A.
renderer_main.BeginSlice('webkit.console', 'ActionA',
timer.AdvanceAndGet(2, 4), '')
for i in xrange(0, 10):
first = (i == 0)
AddImplThreadRenderingStats(timer, renderer_compositor, first, None)
renderer_main.EndSlice(timer.AdvanceAndGet(2, 4))
timer.Advance(2, 4)
# Create 5 main and impl rendering stats events not within any action.
for i in xrange(0, 5):
first = (i == 0)
AddImplThreadRenderingStats(timer, renderer_compositor, first, None)
# Create Action B without any frames. This should trigger
# NotEnoughFramesError when the RenderingStats object is created.
renderer_main.BeginSlice('webkit.console', 'ActionB',
timer.AdvanceAndGet(2, 4), '')
renderer_main.EndSlice(timer.AdvanceAndGet(2, 4))
renderer.FinalizeImport()
timeline_markers = timeline.FindTimelineMarkers(['ActionA', 'ActionB'])
records = [tir_module.TimelineInteractionRecord(e.name, e.start, e.end)
for e in timeline_markers]
stats = rendering_stats.RenderingStats(renderer, None, None, None, records)
self.assertEquals(0, len(stats.frame_timestamps[1]))
def testFromTimeline(self):
timeline = model.TimelineModel()
# Create a browser process and a renderer process, and a main thread and
# impl thread for each.
browser = timeline.GetOrCreateProcess(pid=1)
browser_main = browser.GetOrCreateThread(tid=11)
browser_compositor = browser.GetOrCreateThread(tid=12)
renderer = timeline.GetOrCreateProcess(pid=2)
renderer_main = renderer.GetOrCreateThread(tid=21)
renderer_compositor = renderer.GetOrCreateThread(tid=22)
timer = MockTimer()
renderer_ref_stats = ReferenceRenderingStats()
browser_ref_stats = ReferenceRenderingStats()
browser_ref_stats.AppendNewRange()
renderer_ref_stats.AppendNewRange()
# Add display rendering stats.
browser_main.BeginSlice('webkit.console', 'Action0',
timer.AdvanceAndGet(2, 4), '')
for i in xrange(0, 10):
first = (i == 0)
AddDisplayRenderingStats(timer, browser_main, first, browser_ref_stats)
timer.Advance(5, 10)
browser_main.EndSlice(timer.AdvanceAndGet(2, 4))
# Create 10 main and impl rendering stats events for Action A.
renderer_main.BeginSlice('webkit.console', 'ActionA',
timer.AdvanceAndGet(2, 4), '')
renderer_ref_stats.AppendNewRange()
browser_ref_stats.AppendNewRange()
for i in xrange(0, 10):
first = (i == 0)
AddImplThreadRenderingStats(
timer, renderer_compositor, first, renderer_ref_stats)
AddImplThreadRenderingStats(
timer, browser_compositor, first, None)
renderer_main.EndSlice(timer.AdvanceAndGet(2, 4))
# Create 5 main and impl rendering stats events not within any action.
for i in xrange(0, 5):
first = (i == 0)
AddImplThreadRenderingStats(timer, renderer_compositor, first, None)
AddImplThreadRenderingStats(timer, browser_compositor, first, None)
# Create 10 main and impl rendering stats events for Action B.
renderer_main.BeginSlice('webkit.console', 'ActionB',
timer.AdvanceAndGet(2, 4), '')
renderer_ref_stats.AppendNewRange()
browser_ref_stats.AppendNewRange()
for i in xrange(0, 10):
first = (i == 0)
AddImplThreadRenderingStats(
timer, renderer_compositor, first, renderer_ref_stats)
AddImplThreadRenderingStats(
timer, browser_compositor, first, None)
renderer_main.EndSlice(timer.AdvanceAndGet(2, 4))
# Create 10 main and impl rendering stats events for Action A.
renderer_main.BeginSlice('webkit.console', 'ActionA',
timer.AdvanceAndGet(2, 4), '')
renderer_ref_stats.AppendNewRange()
browser_ref_stats.AppendNewRange()
for i in xrange(0, 10):
first = (i == 0)
AddImplThreadRenderingStats(
timer, renderer_compositor, first, renderer_ref_stats)
AddImplThreadRenderingStats(
timer, browser_compositor, first, None)
renderer_main.EndSlice(timer.AdvanceAndGet(2, 4))
timer.Advance(2, 4)
browser.FinalizeImport()
renderer.FinalizeImport()
timeline_markers = timeline.FindTimelineMarkers(
['Action0', 'ActionA', 'ActionB', 'ActionA'])
records = [tir_module.TimelineInteractionRecord(e.name, e.start, e.end)
for e in timeline_markers]
stats = rendering_stats.RenderingStats(
renderer, browser, None, None, records)
# Compare rendering stats to reference.
self.assertEquals(stats.frame_timestamps,
browser_ref_stats.frame_timestamps)
self.assertEquals(stats.frame_times, browser_ref_stats.frame_times)
self.assertEquals(stats.approximated_pixel_percentages,
renderer_ref_stats.approximated_pixel_percentages)
self.assertEquals(stats.checkerboarded_pixel_percentages,
renderer_ref_stats.checkerboarded_pixel_percentages)
def testInputLatencyFromTimeline(self):
timeline = model.TimelineModel()
# Create a browser process and a renderer process.
browser = timeline.GetOrCreateProcess(pid=1)
browser_main = browser.GetOrCreateThread(tid=11)
renderer = timeline.GetOrCreateProcess(pid=2)
renderer_main = renderer.GetOrCreateThread(tid=21)
timer = MockTimer()
ref_latency = ReferenceInputLatencyStats()
# Create 10 input latency stats events for Action A.
renderer_main.BeginSlice('webkit.console', 'ActionA',
timer.AdvanceAndGet(2, 4), '')
for _ in xrange(0, 10):
AddInputLatencyStats(timer, browser_main, renderer_main, ref_latency)
renderer_main.EndSlice(timer.AdvanceAndGet(2, 4))
# Create 5 input latency stats events not within any action.
timer.Advance(2, 4)
for _ in xrange(0, 5):
AddInputLatencyStats(timer, browser_main, renderer_main, None)
# Create 10 input latency stats events for Action B.
renderer_main.BeginSlice('webkit.console', 'ActionB',
timer.AdvanceAndGet(2, 4), '')
for _ in xrange(0, 10):
AddInputLatencyStats(timer, browser_main, renderer_main, ref_latency)
renderer_main.EndSlice(timer.AdvanceAndGet(2, 4))
# Create 10 input latency stats events for Action A.
renderer_main.BeginSlice('webkit.console', 'ActionA',
timer.AdvanceAndGet(2, 4), '')
for _ in xrange(0, 10):
AddInputLatencyStats(timer, browser_main, renderer_main, ref_latency)
renderer_main.EndSlice(timer.AdvanceAndGet(2, 4))
browser.FinalizeImport()
renderer.FinalizeImport()
latency_events = []
timeline_markers = timeline.FindTimelineMarkers(
['ActionA', 'ActionB', 'ActionA'])
records = [tir_module.TimelineInteractionRecord(e.name, e.start, e.end)
for e in timeline_markers]
for record in records:
if record.GetBounds().is_empty:
continue
latency_events.extend(rendering_stats.GetLatencyEvents(
browser, record.GetBounds()))
self.assertEquals(latency_events, ref_latency.input_event)
event_latency_result = rendering_stats.ComputeEventLatencies(latency_events)
self.assertEquals(event_latency_result,
ref_latency.input_event_latency)
stats = rendering_stats.RenderingStats(
renderer, browser, None, None, records)
self.assertEquals(
perf_tests_helper.FlattenList(stats.input_event_latency),
[latency for name, latency in ref_latency.input_event_latency
if name != rendering_stats.MAIN_THREAD_SCROLL_UPDATE_EVENT_NAME])
self.assertEquals(
perf_tests_helper.FlattenList(stats.main_thread_scroll_latency),
[latency for name, latency in ref_latency.input_event_latency
if name == rendering_stats.MAIN_THREAD_SCROLL_UPDATE_EVENT_NAME])
self.assertEquals(
perf_tests_helper.FlattenList(stats.gesture_scroll_update_latency),
[latency for name, latency in ref_latency.input_event_latency
if name == rendering_stats.GESTURE_SCROLL_UPDATE_EVENT_NAME])
| [
"arnaud@geometry.ee"
] | arnaud@geometry.ee |
f46478898719688e54f93619c63e8f87a40c6778 | 83016459a416564b6d5148eb49e9ad2bfb4e0b50 | /libreria_complejos.py | 5a8dd16f4153445105c3ef17b512ae5bd9dea036 | [] | no_license | CarlosOrduz777/Proyecto_complejos | de5a23101d7b94f5fb7c9f820fdbce87087dea4c | 18cb74f3077fa7b1966c29b29a7fe82260ce45ee | refs/heads/master | 2022-12-25T02:38:33.938592 | 2020-09-30T16:29:00 | 2020-09-30T16:29:00 | 286,089,461 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,123 | py | #Libreria de Numeros Complejos
#Fecha: 4/08/2020
#Autor: Carlos Javier Orduz Trujillo
from math import pi,atan,atan2,sin,cos,sqrt
def sumar_complejos(c1, c2):
"""Funcion que toma dos numeros complejos(como vectores) y retorna su suma
( list, list )---> list"""
return [c1[0] + c2[0], c1[1] + c2[1]]
def producto_complejos(c1, c2):
"""Funcion que toma dos numeros complejos(como vectores) y retorna su producto
( list, list )---->list"""
return [c1[0]*c2[0] - c1[1]*c2[1], c1[0]*c2[1] + c2[0]*c1[1]]
def resta_complejos(c1, c2):
"""Funcion que toma dos numeros complejos (como vectores) y retorna su resta
( list, list )---->list"""
return [c1[0]-c2[0], c1[1]-c2[1]]
def division_complejos(c1, c2):
"""Funcion que toma dos numeroa complejos(como vectores) y retorna su division
( list, list )--->list"""
try:
a_1=c1[0]
b_1=c1[1]
a_2=c2[0]
b_2=c2[1]
return [(a_1*a_2 + b_1*b_2)/(a_2**2 + b_2**2),(a_2*b_1 - a_1*b_2)/(a_2**2 + b_2**2)]
except ZeroDivisionError:
print("No puedes dividir por cero")
def modulo_complejo(c):
"""Funcion que toma un numero complejo(como un vector) y retorna su modulo o tamaño
redondeado a 4 cifras
( list )--->float"""
return round(sqrt((c[0]**2 + c[1]**2)),4)
def conjugado(c):
"""Funcion que toma un numero complejo(como un vector) y retorna el conjugado del complejo
( list )--->list"""
return [c[0], -c[1]]
def cartesiano_a_polar(c):
"""Funcion que toma un numero complejo(como vector) en representacion cartesiana
y retorna la representacion polar y redondea su angulo a 4 cifras
( list )---->list"""
return [modulo_complejo(c), round(atan2(c[1],c[0]),4)]
def polar_a_cartesiano(c):
"""Funcion que toma un numero complejo(como vector) en representacion polar
y retorna la representacion cartesiana aproximada"""
return [c[0]*cos(c[1]),c[0]*sin(c[1])]
def fase_complejo(c):
"""Funcion que retorna la fase de un numero complejo, debe estar en
representacion cartesiana"""
return round(atan2(c[1],c[0]), 4)
| [
"carlos.orduz@mail.escuelaing.edu.co"
] | carlos.orduz@mail.escuelaing.edu.co |
931d1d4f3cfb9a546c89598dd1f4e43ee3dffe6c | 3d3f2b56b073a8de123aeed141cf03932cebe456 | /engine/utils/constants.py | b0bc8b78e7d79100b6fdf30a180bce70776cf11e | [] | no_license | wilson-calixto/angular-electron-with-python | d8c9ec05294a21d07fc656567f94b5fc55af056f | a1a5e84bb58a51822857bbb5561887ac8150191c | refs/heads/main | 2023-04-11T19:01:55.286606 | 2021-05-04T15:43:24 | 2021-05-04T15:43:24 | 354,842,402 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 167 | py | NEW_PDI_CROP='NEW_PDI_CROP'
WITH_WHITE_SCREEN='WITH_WHITE_SCREEN'
HDMI_ANN='HDMI_ANN'
GRAY_LEVEL_ANN='gray_level_crop'
MURA_ANN='mura_ann'
WHITE_AND_BLACK_PATTERN=106
| [
"wilsoncalisto2012@gmail.com"
] | wilsoncalisto2012@gmail.com |
405b40fed8c06654f5d01ec7edc2fac779de95ac | 6910b127858c5dab983fab4da044a6742875567f | /xsxc/yike.py | f7c76ebc0fb67ec47a7cddac36def7a31651b6dc | [] | no_license | amperwang/hitom | 58422d5129344e7fb4adb2eca61c3a0eb614d179 | 4887b0486edbcdd8bfc8b6a81f81dcead1b37aa0 | refs/heads/master | 2020-04-15T22:16:28.963029 | 2019-03-24T08:43:35 | 2019-03-24T08:43:35 | 165,066,506 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,517 | py | from flask import Flask, render_template, url_for, request, jsonify
from flask_sqlalchemy import SQLAlchemy
import config
app = Flask(__name__)
app.config.from_object(config)
db = SQLAlchemy(app)
class Student(db.Model):
__tablename__ = 'students'
id = db.Column(db.Integer, primary_key = True, autoincrement = True)
phoneNum = db.Column(db.String(11), nullable = False, unique = True)
userName = db.Column(db.String(16), nullable = False)
course = db.Column(db.String(10), nullable = False)
Number = db.Column(db.Integer, nullable = False)
@app.route('/yike')
def index():
return render_template('index.html')
@app.route('/info', methods = ['POST'])
def addInfo():
ntfInfo = {'result' : 'error'}
if request.method == 'POST':
data = request.get_json()
phoneNum = data.get('phoneNum')
userName = data.get('userName')
course = data.get('course')
Number = data.get('Number')
if None != phoneNum and None != userName and None != course and None != Number:
if Student.query.filter_by(phoneNum = phoneNum).first() == None:
student = Student(phoneNum = phoneNum, userName = userName, course = course, Number = Number)
db.session.add(student)
db.session.commit()
ntfInfo['result'] = 'success'
else:
ntfInfo['result'] = 'already_exist'
return jsonify(ntfInfo)
if __name__ == '__main__':
app.debug = True
app.run()
| [
"wfwang1123@163.com"
] | wfwang1123@163.com |
e54cb9984892e2ea197e14be2a17bc7122fbe045 | 11dfdd1b1027c96a1b541d8b6de899518dec32ab | /test.py | 1172b9e4010dfbae9732ba6fe0431a7ee105ef05 | [] | no_license | AnthonyQuantum/python-course-tasks | 353db4e92392e328b411fdb91b9382a5c2dd6ddf | 0183195e1e3a0c89e4b6c17813fec799d31c5b81 | refs/heads/master | 2020-08-07T00:59:05.458581 | 2019-10-06T19:27:52 | 2019-10-06T19:27:52 | 213,230,167 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 27 | py | print(AssertionError.mro()) | [
"noreply@github.com"
] | noreply@github.com |
8ac2981396ce3c864570de3b0c3c3b5ea3581540 | 3fefbb4aad2e49552de61ea867ca434979684cd2 | /routine/base/models.py | 50d84dfab04d947562f44cd446616210b0aefb31 | [] | no_license | riddhi0811/to-do-app | 9519613262631a4914b105c703e3a8b8f8af12d6 | 014956095d7ddccaf71130136341b577592c7aa5 | refs/heads/master | 2023-06-03T05:37:54.694936 | 2021-07-02T13:02:46 | 2021-07-02T13:02:46 | 382,345,784 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 511 | py | from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Task(models.Model):
user= models.ForeignKey(User, on_delete=models.CASCADE, null=True, blank=True)
title= models.CharField(max_length=200)
description= models.TextField(null=True,blank=True)
complete= models.BooleanField(default=False)
created= models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.title
class Meta:
ordering= ['complete'] | [
"siddhidubey47@gmail.com"
] | siddhidubey47@gmail.com |
36dac1185836a41d62e7890b1a1d00a3ead74cbf | b6ee46de1a2cab744aedd942942265dad09a0779 | /Stack/Easy/1441. Build an Array with Stack Operations/solution.py | 8b4ffc07376780a7d7b582575a809b53ee5a7485 | [
"MIT"
] | permissive | tintindas/leetcode-solutions | 0e01245036a9bd36e93b51b8c8c1ae3b8ca9554c | eb97254dafddffccbce048ef04aea1e934277282 | refs/heads/main | 2023-04-20T03:11:15.218001 | 2021-05-20T17:11:28 | 2021-05-20T17:11:28 | 346,100,322 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 375 | py | from typing import List
class Solution:
def buildArray(self, target: List[int], n: int) -> List[str]:
i = 0
j = 1
res = []
while i < len(target):
if target[i] == j:
res.append("Push")
i += 1
else:
res.extend(["Push", "Pop"])
j += 1
return res | [
"upamanyu.das.work@gmail.com"
] | upamanyu.das.work@gmail.com |
64bf3f596ddb68f5fd19cd6ddf3d2a13a5a0cebe | c377e63059c8176d404644305af5980662368679 | /questions_api/views.py | 6ee550131cf6a312d4b8fd29b337910526030eb0 | [
"MIT"
] | permissive | v-yadykin/questions | bf4cb4c8402d62257f691c6235b62033b4426653 | 83f6dd0e44b42b1bce6a2cdf94b3f892392f5170 | refs/heads/master | 2023-09-01T05:09:19.049570 | 2020-07-03T15:03:59 | 2020-07-03T19:20:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,877 | py | from rest_framework.views import APIView
from rest_framework import viewsets
from rest_framework import filters
from rest_framework.response import Response
from rest_framework import status
from rest_framework.authentication import TokenAuthentication, SessionAuthentication
from rest_framework.permissions import IsAuthenticated, IsAdminUser
import requests
from questions_api import serializers
from questions_api import models
INTELLECT_API_URL = 'http://vega.fcyb.mirea.ru/intellectapi/'
class UserViewSet(viewsets.ModelViewSet):
serializer_class = serializers.UserSerializer
queryset = models.User.objects.all()
filter_backends = (filters.SearchFilter,)
search_fields = ('=tg_login',)
permission_classes = (IsAdminUser,)
class QuestionViewSet(viewsets.ModelViewSet):
serializer_class = serializers.QuestionSerializer
queryset = models.Question.objects.all()
filter_backends = (filters.SearchFilter,)
search_fields = ('=status',)
class GetResourceApiView(APIView):
serializer_class = serializers.GetResourceSerializer
def post(self, request):
serializer = self.serializer_class(data=request.data)
if not serializer.is_valid():
return Response(
serializer.errors,
status=status.HTTP_400_BAD_REQUEST
)
question_text = serializer.question.text
data = {
'phrase': question_text,
'collection': 1,
'batch-start': serializer.validated_data['batch_start'],
'batch-size': serializer.validated_data['batch_size']
}
r = requests.post(INTELLECT_API_URL+'search-phrase', data=data)
if not status.is_success(r.status_code):
return Response('Failed to get resources', r.status_code)
return Response({'question': question_text, **r.json()}) | [
"VEL4EG@yandex.ru"
] | VEL4EG@yandex.ru |
a335c4193efcda24ba5d4b087319a3d44cf95b76 | 2183156fec0cf3f704a3f78ffe672ed90090fe8a | /problem_examples/signature/fastbit/checker.py | 5a81871ed827c667851116f9acd4b7a5cf4cca97 | [] | no_license | DMOJ/docs | 3714ee8ba36a1c35c9270b481345dffc2d302eb0 | 15ce3a0d6397e81c3a903ecca7184bce2441d17e | refs/heads/master | 2023-07-20T20:02:56.743010 | 2023-07-10T17:09:59 | 2023-07-10T17:09:59 | 31,271,968 | 25 | 126 | null | 2023-07-22T17:45:28 | 2015-02-24T17:23:40 | Python | UTF-8 | Python | false | false | 427 | py | from dmoj.result import CheckerResult
from dmoj.utils.unicode import utf8text
def check(process_output, judge_output, judge_input, point_value, submission_source, **kwargs):
result = utf8text(process_output.rstrip()).split('\n')
if len(result) != 1 or result[0].strip() != 'Correct.':
return CheckerResult(False, 0)
return CheckerResult(True, int(point_value) / (1 if len(submission_source) < 560 else 2))
| [
"vulcainus@gmail.com"
] | vulcainus@gmail.com |
8b92c3aa92ffdd47189bf395cbe5c5ac6bb78180 | 5b48a2afed9f1021952a64331cc3977393cfdc3b | /ConfigMaker.py | db795b02cc24b658a3bd7d5902f130e1d24d012a | [] | no_license | snagultoof/UAEConfigMaker | ab167343b0b26dba6cc6fbb6e05e341e618e38c9 | b73a1e5d3d0829616a94645bb337ed1ca6c6f0de | refs/heads/master | 2021-01-21T05:10:14.315182 | 2017-02-23T22:25:20 | 2017-02-23T22:25:20 | 83,138,815 | 0 | 0 | null | 2017-02-25T14:56:05 | 2017-02-25T14:56:04 | null | UTF-8 | Python | false | false | 33,527 | py | import glob, platform
from urllib.request import *
#from urllib.request import urlopen, Request
import urllib
import ssl
import os
import math
import shutil
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def disable(self):
self.HEADER = ''
self.OKBLUE = ''
self.OKGREEN = ''
self.WARNING = ''
self.FAIL = ''
self.ENDC = ''
def left(s, amount):
return s[:amount]
def right(s, amount):
return s[-amount:]
def mid(s, point, amount):
return s[point:point+amount]
def midAMOS(s, point, amount):
if point==0:
point=1
return s[point-1:point-1+amount]
def MakeFullCD32Name(inName):
##' check the txt file
fname = "Settings/CD32ISO_Longname_Fixes.txt"
content = ""
if os.path.isfile(fname)==True:
with open(fname) as f:
content = f.readlines()
content = [x.strip() for x in content]
f.close()
for ThisLine in content:
if ThisLine.find("|") > -1:
FindPart = left(ThisLine,ThisLine.find("|"))
ReplacePart = right(ThisLine,len(ThisLine)-ThisLine.find("|")-1)
if inName == FindPart:
inName = ReplacePart
return inName
def MakeFullName(inName):
## special "clean up" rules
inName = inName.replace("'n'"," 'n'")
inName = inName.replace("+"," +")
inName = inName.replace("&"," &")
oldName = inName
inName = inName+"___"
firstlength=len(inName)
## special loop
## for A in range(2,firstlength-3):
A=1
B=len(oldName)
while A<len(inName) and A<B:
A=A+1
PREVCHAR2=ord(midAMOS(inName,A-2,1))
PREVCHAR=ord(midAMOS(inName,A-1,1))
THISCHAR=ord(midAMOS(inName,A,1))
NECKCHAR=ord(midAMOS(inName,A+1,1))
NECKCHAR2=ord(midAMOS(inName,A+2,1))
## ===== add spaces
if THISCHAR>=65 and THISCHAR<=90:
#Rem we are a capital letter
## ' special MB rule
if chr(THISCHAR)=="M" and chr(NECKCHAR)=="B" and(THISCHAR>=48 and THISCHAR<=57):
pass
## two underscores ... ignore
elif PREVCHAR==95 and PREVCHAR2==95:
pass
## previous is a capital A, but not part of AGA
elif PREVCHAR==65 and THISCHAR != 71 and NECKCHAR != 65:
inName = AddSpace(inName,A)
A=A+1
B=B+1
## and the previous letter is not a space , and not also capital, or dash
elif PREVCHAR != 32 and PREVCHAR != 45 and not(PREVCHAR>=65 and PREVCHAR<=90):
inName = AddSpace(inName,A)
A=A+1
B=B+1
## ' =====: Rem we are a number
elif THISCHAR>=48 and THISCHAR<=57:
## 'and previous number was not a number and not a space
if not(PREVCHAR>=48 and PREVCHAR<=57) and PREVCHAR!=32:
inName = AddSpace(inName,A)
A=A+1
B=B+1
if A>firstlength:
break
## dirty manual fixes
inName=inName.replace(" "," ")
inName=inName.replace("___","")
inName=inName.replace("CD 32","CD32")
inName=inName.replace(" CD32"," [CD32]")
inName=inName.replace(" CDTV"," [CDTV]")
inName=inName.replace(" AGA"," [AGA]")
inName=inName.replace(" 512 Kb"," (512Kb)")
inName=inName.replace(" 1 MB"," (1MB)")
inName=inName.replace(" 2 MB"," (2MB)")
inName=inName.replace(" 4 MB"," (4MB)")
inName=inName.replace(" 8 MB"," (8MB)")
inName=inName.replace(" 1 Disk"," (1 Disk)")
inName=inName.replace(" 2 Disk"," (2 Disk)")
inName=inName.replace(" 3 Disk"," (3 Disk)")
inName=inName.replace(" 4 Disk"," (4 Disk)")
inName=inName.replace(" Files"," (Files)")
inName=inName.replace(" Image"," (Image)")
inName=inName.replace(" Chip"," (Chip)")
inName=inName.replace(" Fast"," (Fast)")
inName=inName.replace("(Fast) Break","Fast Break")
inName=inName.replace("R³sselsheim","Russelsheim")
##' check the txt file
fname = "Settings/WHD_Longname_Fixes.txt"
content = ""
if os.path.isfile(fname)==True:
with open(fname) as f:
content = f.readlines()
content = [x.strip() for x in content]
f.close()
for ThisLine in content:
if ThisLine.find("|") > -1:
FindPart = left(ThisLine,ThisLine.find("|"))
ReplacePart = right(ThisLine,len(ThisLine)-ThisLine.find("|")-1)
if inName == FindPart:
inName = ReplacePart
# language rules
language=right(inName,3)
if language == " De":
inName=left(inName,len(inName)-3)+" (Deutsch)"
elif language==" Pl":
inName=left(inName,len(inName)-3)+" (Polski)"
elif language==" It":
inName=left(inName,len(inName)-3)+" (Italiano)"
elif language==" Dk":
inName=left(inName,len(inName)-3)+" (Dansk)"
elif language==" Es":
inName=left(inName,len(inName)-3)+" (Espanol)"
elif language==" Fr":
inName=left(inName,len(inName)-3)+" (Francais)"
elif language==" Cz":
inName=left(inName,len(inName)-3)+" (Czech)"
elif language==" Se":
inName=left(inName,len(inName)-3)+" (Svenska)"
elif language==" Fi":
inName=left(inName,len(inName)-3)+" (Finnish)"
return inName
def AddSpace(inBit,pos):
inBit=left(inBit,pos-1)+" "+right(inBit,len(inBit)-pos+1)
return inBit
def DownloadUpdate(infile):
# GetFile = "http://www.djcresswell.com/RetroPie/ConfigMaker/" +infile
GetFile = "https://raw.githubusercontent.com/HoraceAndTheSpider/UAEConfigMaker/master/" +infile
PutFile = "" + infile
try:
a = urllib.request.urlretrieve(GetFile, PutFile)
print ("Update downloaded for " + bcolors.OKBLUE+ infile + bcolors.ENDC + ".")
## except urllib.error.HTTPError as err:
## print ("No update downloaded for " + bcolors.FAIL + infile + bcolors.ENDC + ". (Web Page not found)")
##
except:
print ("No update downloaded for " + bcolors.FAIL + infile + bcolors.ENDC + ". (Web Page not found)")
return
def ChexList(inFile,GameName):
fname = "Settings/"+inFile
if os.path.isfile(fname) == False:
return False
with open(fname) as f:
content = f.readlines()
content = [x.strip() for x in content]
f.close()
answer = False
for ThisLine in content:
if ThisLine == GameName:
answer = True
break
return answer
def FindHostOption(inOption):
fname = "hostconfig.uaetemp"
if os.path.isfile(fname) == False:
return ""
with open(fname) as f:
content = f.readlines()
content = [x.strip() for x in content]
f.close()
answer = ""
for ThisLine in content:
if left(ThisLine,len(inOption)) == inOption:
answer = ThisLine.replace(inOption,"")
answer = answer.replace("=","").strip()
break
return answer
def DoScan(inputdir,pathname):
if os.path.isdir(inputdir + pathname) == True:
print("Config Save Path: " + bcolors.OKBLUE + inputdir + bcolors.ENDC )
print("Games Files Path: " + bcolors.BOLD + bcolors.OKBLUE + pathname + bcolors.ENDC )
print()
else:
print("Specified Scan path "+ bcolors.FAIL + inputdir + pathname + bcolors.ENDC + " does not exist")
return
##' what type of scan is it -- default , Whdload folder
if pathname.find("WHDLoad_HDF")>-1 and pathname.lower().find(".hdf"):
scanmode="WHDLoadHDF"
hdsettings=",0"
elif pathname.find("WHDLoad")>-1:
scanmode="WHDLoad"
hdsettings=",0"
elif pathname.lower().find(".hdf")>-1:
scanmode="HDF"
hdsettings=",32,1,2,512,50,,uae"
elif pathname.lower().find(".adf")>-1:
scanmode="ADF"
elif pathname.lower().find("cd32")>-1:
scanmode="CD32"
elif pathname.lower().find("cdtv")>-1:
scanmode="CD32"
else:
scanmode="WHDLoad"
hdsettings=",0"
print("Scan Mode: " + bcolors.BOLD + bcolors.HEADER + scanmode + bcolors.ENDC)
print()
thefilter = ""
count = 1
SkipAll = 0
QuitButton = FindHostOption("button_for_quit")
MenuButton = FindHostOption("button_for_menu")
if QuitButton == "": QuitButton = -1
if MenuButton == "": MenuButton = -1
## cycle through all folders / files
for filename in glob.glob(inputdir + pathname+"/*"):
# WHDLOAD mode needs folders, the rest need files
if scanmode == "WHDLoad":
typetest = os.path.isdir(filename)
else:
typetest = os.path.isfile(filename)
thisfile = filename.replace(inputdir+pathname+"/","")
## # type filter applies
## # elif scanmode == "CD32" and (right(thisfile.lower(),4) != ".iso" and right(thisfile.lower(),4) != ".cue"):
## # not a folder and no sub cue or iso file
## elif scanmode == "CD32" and (os.path.isdir(filename) == False or os.path.isfile(filename + "/" + thisfile + ".cue")==False):
## pass
# name filter applies
if thefilter != '' and thisfile.find(thefilter) <0:
pass
# WHDLOAD will accept .zip
# WHDLOAD mode needs folders, mostly
# HDF file extension must be .hdf
# CD32 file extension must be .iso
# CD32 folders need sub file with extension as .cue
elif (scanmode == "WHDLoad" and os.path.isfile(filename) == True and right(thisfile.lower(),4) == ".zip") or \
(scanmode == "WHDLoad" and os.path.isdir(filename) == True) or \
(scanmode == "HDF" and os.path.isfile(filename) == True and right(thisfile.lower(),4) == ".hdf") or \
(scanmode == "CD32" and os.path.isfile(filename)==True and right(thisfile.lower(),4) == ".iso") or \
(scanmode == "CD32" and os.path.isdir(filename) == True and os.path.isfile(filename + "/" + thisfile + ".cue")==True):
## print ("Processed: " + bcolors.OKBLUE +str(count) + bcolors.ENDC )
## print ()
tempname = thisfile.replace("R³sselsheim","Russelsheim")
print (bcolors.OKBLUE +str(count) + bcolors.ENDC + ": Processing Game: " + bcolors.BOLD + tempname + bcolors.ENDC)
if thisfile.lower().endswith(".zip")==True:
thisfile=left(thisfile,len(thisfile)-4)
# standard 'expand name' thing
if scanmode=="WHDLoad":
fullgamename = MakeFullName(thisfile)
elif scanmode=="CD32":
fullgamename = MakeFullCD32Name(thisfile)
# there may be alternative one for TOSEC CD32 images....
print ()
print (" Full Name: " + bcolors.OKGREEN + fullgamename + bcolors.ENDC)
# normal method for selection
if fullgamename.find("AGA") > -1:
MachineType="A1200/020"
elif scanmode == "CD32":
MachineType="CD32"
elif fullgamename.find("AGA") > -1:
MachineType="A1200/020"
else:
MachineType="A600+"
# check if config already exists - yes/no to overwrite
CreateConfig = True
answer = ""
if os.path.isfile(inputdir+fullgamename+".uae") == True and SkipAll==0:
while answer != "Y" and answer !="N" and answer !="S" and answer != "A":
answer = input (bcolors.OKBLUE + " Config already exists - overwrite? "+"(Yes/No/Always/Skip) " + bcolors.ENDC)
if answer == "a" or answer =="s" or answer == "n" or answer == "y":
answer = answer.upper()
print()
elif os.path.isfile(inputdir+fullgamename+".uae") == True and SkipAll == -1:
CreateConfig = False
print(bcolors.OKBLUE + " Skipping existing file."+bcolors.ENDC)
print()
# process the answers
if answer == "N":
CreateConfig = False
elif answer == "Y":
CreateConfig = True
elif answer == "A":
SkipAll = 1
elif answer == "S":
SkipAll = -1
# what to do 'automatically'
if SkipAll == 1:
CreateConfig = True
## elif SkipAll == -1:
## CreateConfig = False
# this is where we start the code to actually build the config with chnages
if CreateConfig == True:
# check other parameters
# hardware options
# ======== SYSTEM HARDWARE =======
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# override the machine type, if on a list
if ChexList("System_A500.txt",thisfile) == True:
MachineType = "A500"
elif ChexList("System_A1200.txt",thisfile) == True:
MachineType = "A1200/020"
elif ChexList("System_A4000.txt",thisfile) == True:
MachineType = "A4000/040"
elif ChexList("System_CD32.txt",thisfile) == True:
MachineType = "CD32"
# PRESETS: CPU / chipset/ kickstart
Z3Ram=0
if MachineType=="A500":
ChipSet="OCS"
ACpu="68000"
Kickstart="kick13.rom"
KickstartExt=""
FastRam=0
ChipRam=1
ClockSpeed=0
elif MachineType=="A600+" or MachineType == "":
ChipSet="ECS_Agnus"
ACpu="68020"
Kickstart="kick31.rom"
KickstartExt=""
ChipRam=4
FastRam=8
ClockSpeed=14
elif MachineType=="A1200":
ChipSet="AGA"
ACpu="68ec020"
Kickstart="kick30.rom"
KickstartExt=""
ChipRam=4
FastRam=0
ClockSpeed=14
elif MachineType=="A1200/020":
ChipSet="AGA"
ACpu="68020"
Kickstart="kick31.rom"
KickstartExt=""
ChipRam=4
FastRam=4
ClockSpeed=14
elif MachineType=="A4000":
ChipSet="AGA"
ACpu="68040"
Kickstart="kick31.rom"
KickstartExt=""
ChipRam=4
FastRam=8
ClockSpeed=28
elif MachineType=="CD32":
ChipSet="AGA"
ACpu="68ec020"
Kickstart="cd32kick31.rom"
KickstartExt="cd32ext.rom"
ChipRam=4
FastRam=0
ClockSpeed=14
#'======== MEMORY SETTINGS =======
#' ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#' when we want different chip ram!!
OldChipRam=ChipRam
for LCOUNT in range(0 , 4):
ChipRam = int(math.pow(2,LCOUNT))/2
if ChipRam >= 1:
ChipRam = int(ChipRam)
if ChexList("Memory_ChipRam_"+str(ChipRam)+".txt",thisfile)==True:
ChipRam = int(ChipRam*2)
break
ChipRam = OldChipRam
#' ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#' when we want different fast ram!!
OldFastRam=FastRam
for LCOUNT in range(0 , 4):
FastRam = int(math.pow(2,LCOUNT))
if ChexList("Memory_FastRam_"+str(FastRam)+".txt",thisfile)==True:
break
FastRam = OldFastRam
#' ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#' when we want different Z3 ram!!
for LCOUNT in range(0 , 8):
Z3Ram = int(math.pow(2,LCOUNT))
if ChexList("Memory_Z3Ram_"+str(Z3Ram)+".txt",thisfile)==True:
break
Z3Ram=0
#'======== CHIPSET SETTINGS =======
#' ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#' sprite collisions
SprtCol="playfields"
if ChexList("Chipset_CollisionLevel_playfields.txt",thisfile)==True : SprtCol="playfields"
if ChexList("Chipset_CollisionLevel_none.txt",thisfile)==True : SprtCol="none"
if ChexList("Chipset_CollisionLevel_sprites.txt",thisfile)==True : SprtCol="sprites"
if ChexList("Chipset_CollisionLevel_full.txt",thisfile)==True : SprtCol="full"
#' imm. blits & fast copper
FastCopper = not ChexList("Chipset_NoFastCopper.txt",thisfile)
ImmediateBlits = ChexList("Chipset_ImmediateBlitter.txt",thisfile)
#'======== CPU SETTINGS =======
#' ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#' max emu speed
ACpuSpeed = "real"
if ChexList("CPU_MaxSpeed.txt",thisfile)==True : ACpuSpeed="max"
#' clock speed
if ChexList("CPU_ClockSpeed_7.txt",thisfile)==True : ClockSpeed=7
if ChexList("CPU_ClockSpeed_14.txt",thisfile)==True : ClockSpeed=14
if ChexList("CPU_ClockSpeed_28.txt",thisfile)==True : ClockSpeed=28
#' 24 bit addressing / compatible CPU / JIT Cache
_24BitAddress = not ChexList("CPU_No24BitAddress.txt",thisfile)
CompatibleCpu = ChexList("CPU_Compatible.txt",thisfile)
CycleExact = ChexList("CPU_CycleExact.txt",thisfile)
UseJIT = not ChexList("CPU_NoJIT.txt",thisfile)
#UseJIT =ChexList("CPU_ForceJIT.txt",thisfile)
#'======== DISPLAY SETTINGS =======
#' ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#' screen Y/X Offsets
ScrnOffsetY=0
for Z in range(-16, 16):
if ChexList("Screen_OffsetY_"+str(Z)+".txt",thisfile) ==True : ScrnOffsetY=Z
ScrnOffsetX=0
for Z in range(-16, 16):
if ChexList("Screen_OffsetX_"+str(Z)+".txt",thisfile) : ScrnOffsetX=Z
#' screen heights
ScrnHight=240
if ChexList("Screen_Height_270.txt",thisfile)==True : ScrnHight=270
if ChexList("Screen_Height_262.txt",thisfile)==True : ScrnHight=262
if ChexList("Screen_Height_256.txt",thisfile)==True : ScrnHight=256
if ChexList("Screen_Height_240.txt",thisfile)==True : ScrnHight=240
if ChexList("Screen_Height_224.txt",thisfile)==True : ScrnHight=224
if ChexList("Screen_Height_216.txt",thisfile)==True : ScrnHight=216
if ChexList("Screen_Height_200.txt",thisfile)==True : ScrnHight=200
#' screen widths
ScrnWidth=320
if ChexList("Screen_Width_384.txt",thisfile)==True : ScrnWidth=384
if ChexList("Screen_Width_352.txt",thisfile)==True : ScrnWidth=352
if ChexList("Screen_Width_320.txt",thisfile)==True : ScrnWidth=320
if ChexList("Screen_Width_768.txt",thisfile)==True : ScrnWidth=768
if ChexList("Screen_Width_704.txt",thisfile)==True : ScrnWidth=704
if ChexList("Screen_Width_640.txt",thisfile)==True : ScrnWidth=640
#' extras
_Aspect = bool(ChexList("Screen_Force43Aspect.txt",thisfile))
if FindHostOption("gfx_correct_aspect") !="" : _Aspect = FindHostOption("gfx_correct_aspect")
UseNTSC = ChexList("Screen_ForceNTSC.txt",thisfile)
#'======== CONTROL SETTINGS =======
#' ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#' mouse / mouse 2 / CD32
UseMouse1 = ChexList("Control_Port0_Mouse.txt",thisfile)
UseMouse2 = ChexList("Control_Port1_Mouse.txt",thisfile)
UseCD32Pad = ChexList("Control_CD32.txt",thisfile)
if scanmode=="CD32" : UseCD32Pad = True
#'======== MISC SETTINGS =======
#' ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#' BSD Socket / Floppy Speed etc
UseBSDS = ChexList("Misc_BSDSocket.txt",thisfile)
FloppySpeed=800
Disk = ["","","",""]
if ChexList("Floppy_Speed_100.txt",thisfile) == True : FloppySpeed = 100
if ChexList("Floppy_Speed_200.txt",thisfile) == True : FloppySpeed = 200
if ChexList("Floppy_Speed_400.txt",thisfile) == True : FloppySpeed = 400
if ChexList("Floppy_Speed_800.txt",thisfile) == True : FloppySpeed = 800
#'======== SETUP CONFIG =======
#' ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#' ....
#print("we are making a config ....")
fname = inputdir + fullgamename + ".uae"
shutil.copyfile("uaeconfig.uaetemp", fname)
if os.path.isfile(fname)==False:
print (bcolors.fail + "Error creating config." + bcolors.ENDC)
else:
print (" Editing File: " + bcolors.HEADER + fname + bcolors.ENDC)
# put the text from the file into a string
text_file = open(fname, "r")
ConfigText = text_file.read()
text_file.close()
# all the major find/replaces
# game / path
ConfigText = ConfigText.replace("<<game>>",thisfile)
ConfigText = ConfigText.replace("<<fullgame>>",fullgamename)
ConfigText = ConfigText.replace("<<hdpath>>",pathname)
ConfigText = ConfigText.replace("<<quitbutton>>",str(QuitButton))
ConfigText = ConfigText.replace("<<menubutton>>",str(MenuButton))
# screens
ConfigText = ConfigText.replace("<<screenheight>>",str(ScrnHight))
if ScrnWidth<321 : ScrnWidth=ScrnWidth*2
ConfigText = ConfigText.replace("<<screenwidth>>",str(ScrnWidth))
ConfigText = ConfigText.replace("<<offset_y>>",str(ScrnOffsetY))
ConfigText = ConfigText.replace("<<offset_x>>",str(ScrnOffsetX))
ConfigText = ConfigText.replace("<<43aspect>>",str(_Aspect))
ConfigText = ConfigText.replace("<<ntsc>>",str(bool(0-UseNTSC)))
# memory
ConfigText = ConfigText.replace("<<chipmem>>",str(ChipRam))
ConfigText = ConfigText.replace("<<fastmem>>",str(FastRam))
ConfigText = ConfigText.replace("<<z3mem>>",str(Z3Ram))
if Z3Ram>0 and (ACpu != "68020" and ACpu != "68040"):
ACpu="68020"
# chipset
ConfigText = ConfigText.replace("<<chipset>>",ChipSet)
ConfigText = ConfigText.replace("<<spritecollision>>",SprtCol)
ConfigText = ConfigText.replace("<<fastcopper>>",str(bool(0-FastCopper)))
ConfigText = ConfigText.replace("<<immediateblitter>>",str(bool(0-ImmediateBlits)))
# cpu
ConfigText = ConfigText.replace("<<kickstart>>",Kickstart)
ConfigText = ConfigText.replace("<<extkickstart>>",KickstartExt)
ConfigText = ConfigText.replace("<<cputype>>",ACpu)
ConfigText = ConfigText.replace("<<cpuspeed>>",ACpuSpeed)
if ClockSpeed==14:
ClockSpeed=1024
elif ClockSpeed==28:
ClockSpeed=128
else:
ClockSpeed=0
ConfigText = ConfigText.replace("<<clockspeed>>",str(ClockSpeed))
ConfigText = ConfigText.replace("<<cpucompatible>>",str(bool(0-CompatibleCpu)))
ConfigText = ConfigText.replace("<<cycleexact>>",str(bool(0-CompatibleCpu)))
ConfigText = ConfigText.replace("<<24bitaddress>>",str(bool(0-CycleExact)))
if UseJIT==False:
ConfigText = ConfigText.replace("<<jitcache>>","0")
else:
ConfigText = ConfigText.replace("<<jitcache>>","8192")
# misc
ConfigText = ConfigText.replace("<<bsdsocket>>",str(bool(0-UseBSDS)))
# hard disk files
DiskNr=0
if scanmode=="HDF" and os.path.isfile (pathname + thisfile.replace(".hdf","")+"_savedisk.adf") == True:
DiskNr=1
ConfigText = ConfigText.replace("<<diskpath0>>",pathname)
ConfigText = ConfigText.replace("<<disk0>>",thisfile.replace(".hdf","")+"_savedisk.adf")
ConfigText = ConfigText.replace("<<disktype0>>","0")
# disable the HDF parameter
else:
ConfigText = ConfigText.replace("hardfile2=",";hardfile2=")
ConfigText = ConfigText.replace("filesystem2=rw,DH2",";filesystem2=rw,DH2")
for LCOUNT in range(DiskNr,4):
ConfigText = ConfigText.replace("<<diskpath" + str(LCOUNT)+">>",pathname)
ConfigText = ConfigText.replace("<<disk" + str(LCOUNT)+">>",Disk[LCOUNT])
## print ("disk... "+Disk[LCOUNT])
if Disk[LCOUNT] == "":
DiskOn = "0"
else:
DiskNr=DiskNr+1
DiskOn = "1"
ConfigText = ConfigText.replace(";floppy" + str(LCOUNT),"floppy" + str(LCOUNT))
ConfigText = ConfigText.replace("<<disktype"+ str(LCOUNT) + ">>",str(DiskOn))
ConfigText = ConfigText.replace("<<floppyspeed>>",str(FloppySpeed))
ConfigText = ConfigText.replace("<<totaldisks>>",str(DiskNr))
if MachineType == "CD32":
ConfigText = ConfigText.replace("uaehf1=",";uaehf1=")
ConfigText = ConfigText.replace("uaehf0=",";uaehf0=")
ConfigText = ConfigText.replace("filesystem2=",";filesystem2=")
ConfigText = ConfigText.replace("<<cd32mode>>","1")
else:
ConfigText = ConfigText.replace("<<cd32mode>>","0")
# controls (TO BE WORKED ON)
if UseMouse1==True:
ConfigText = ConfigText.replace("pandora.custom_dpad=1",pathname)
ConfigText = ConfigText.replace("<<port0>>","mouse")
ConfigText = ConfigText.replace("<<port0mode>>","mousenowheel")
## if UseMouse2==True:
## ConfigText = ConfigText.replace("<<port1>>","mouse")
## ConfigText = ConfigText.replace("<<port1mode>>","mousenowheel")
if UseCD32Pad==True:
ConfigText = ConfigText.replace("<<port0>>","joy2")
ConfigText = ConfigText.replace("<<port0mode>>","cd32joy")
ConfigText = ConfigText.replace("<<port1>>","joy1")
ConfigText = ConfigText.replace("<<port1mode>>","cd32joy")
else:
ConfigText = ConfigText.replace("<<port0>>","joy2")
ConfigText = ConfigText.replace("<<port0mode>>","djoy")
ConfigText = ConfigText.replace("<<port1>>","joy1")
ConfigText = ConfigText.replace("<<port1mode>>","djoy")
# save out the config changes
text_file = open(fname, "w")
text_file.write(ConfigText)
text_file.close()
print ()
count = count + 1
print ("Folder Scan of "+ pathname +" Complete.")
print ()
return
## main section starting here...
print()
print(bcolors.BOLD + bcolors.OKBLUE + "HoraceAndTheSpider" + bcolors.ENDC + "'s " + bcolors.BOLD + "UAE Configuration Maker" + bcolors.ENDC + bcolors.OKGREEN + " (v2.1)" + bcolors.ENDC + " | " + "" + bcolors.FAIL + "www.ultimateamiga.co.uk" + bcolors.ENDC)
print()
## initialisations
try:
ssl._create_default_https_context = ssl._create_unverified_context
except:
pass
## -------- input dir ... i.e. where we will scan for Sub-Folders
if platform.system()=="Darwin":
inputdir="/Volumes/roms/amiga/"
inputdir = "/Users/horaceandthespider/Documents/Gaming/AmigaWHD/WorkingFolder2/"
## -------- I SEE YOU AINGER! o_o
elif platform.node()=="RAVEN":
inputdir="C:\\Users\\oaing\\Desktop\\whdload\\"
else:
inputdir="//home/pi/RetroPie/roms/amiga/"
# paths/folders if needed
os.makedirs("Settings", exist_ok=True)
## we can go through all files in 'settings' and attempt a download of the file
for filename in glob.glob('Settings/*.txt'):
DownloadUpdate(filename)
## do similar for
DownloadUpdate("uaeconfig.uaetemp")
if os.path.isfile("uaeconfig.uaetemp")==False:
print(bcolors.FAIL + "Essential file: " + bcolors.BOLD + bcolors.OKBLUE + "uaeconfig.uaetemp" + bcolors.FAIL + bcolors.ENDC + " missing."+ bcolors.ENDC)
raise SystemExit
print()
## go through the paths
##DoScan(inputdir,"Games_WHDLoad_DomTest")
##raise SystemExit
DoScan(inputdir,"Games_WHDLoad")
DoScan(inputdir,"Games_WHDLoad_AGA")
DoScan(inputdir,"Games_WHDLoad_CDTV")
DoScan(inputdir,"Games_WHDLoad_CD32")
DoScan(inputdir,"Games_WHDLoad_DemoVersions")
DoScan(inputdir,"Games_WHDLoad_AltVersions")
DoScan(inputdir,"Games_WHDLoad_AltLanguage")
DoScan(inputdir,"Games_WHDLoad_AGACD32_AltLanguage")
DoScan(inputdir,"Games_WHDLoad_AGACD32_AltVersions")
DoScan(inputdir,"Games_WHDLoad_Unofficial")
DoScan(inputdir,"Games_HDF")
DoScan(inputdir,"Games_CD32")
DoScan(inputdir,"Games_WHDLoad_HDF")
#DoScan(inputdir,"Games_CDTV")
#DoScan(inputdir,"Games_ADF")
#DoScan(inputdir,"Games_Script_Unreleased")
DoScan(inputdir,"Demos_WHDLoad")
raise SystemExit
| [
"horaceandthespider@hotmail.com"
] | horaceandthespider@hotmail.com |
0a607ad298916549426f6f843ef6ce749fadc185 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03610/s415894874.py | 9a33970973344ddf348592e3209a4248803c0737 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 92 | py | s = list(input())
t = []
for i in range(0, len(s), 2):
t.append(s[i])
print(''.join(t))
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
5f78dc2017f0e9588d5ed2188d02785b189ec637 | 0bb474290e13814c2498c086780da5096453da05 | /abc133/E/main.py | 20212d42d80ef6a27ba00b0743cbd41a23b91777 | [] | no_license | ddtkra/atcoder | 49b6205bf1bf6a50106b4ae94d2206a324f278e0 | eb57c144b5c2dbdd4abc432ecd8b1b3386244e30 | refs/heads/master | 2022-01-25T15:38:10.415959 | 2020-03-18T09:22:08 | 2020-03-18T09:22:08 | 208,825,724 | 1 | 0 | null | 2022-01-21T20:10:20 | 2019-09-16T14:51:01 | Python | UTF-8 | Python | false | false | 790 | py | #!/usr/bin/env python3
import sys
MOD = 1000000007 # type: int
def solve(N: int, K: int, a: "List[int]", b: "List[int]"):
return
# Generated by 1.1.4 https://github.com/kyuridenamida/atcoder-tools (tips: You use the default template now. You can remove this line by using your custom template)
def main():
def iterate_tokens():
for line in sys.stdin:
for word in line.split():
yield word
tokens = iterate_tokens()
N = int(next(tokens)) # type: int
K = int(next(tokens)) # type: int
a = [int()] * (N-1) # type: "List[int]"
b = [int()] * (N-1) # type: "List[int]"
for i in range(N-1):
a[i] = int(next(tokens))
b[i] = int(next(tokens))
solve(N, K, a, b)
if __name__ == '__main__':
main()
| [
"deritefully@gmail.com"
] | deritefully@gmail.com |
92df5f0ae14e23c0600fd57b407368f340103547 | 4b431704fa58900a7b848aada3d10949be76ba65 | /student/views.py | 8ed0983ae38fa181f3a834d1c67585b80d645e7b | [] | no_license | priyankaonly1/Session_project | 1b5e48a77753cfa87c93fff7463d758cf0f1dcd8 | 41529270c0390627824b6de1aed6fdf4bb75a95c | refs/heads/main | 2023-06-03T04:21:38.411008 | 2021-06-17T10:32:13 | 2021-06-17T10:32:13 | 377,792,361 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 677 | py | from django.shortcuts import render
# Create your views here.
def setsession(request):
request.session['name'] = 'sonam'
request.session['lname'] = 'Jha'
return render(request, 'student/setsession.html')
# def delsession(request):
# if 'name' in request.session:
# del request.session['name']
# return render(request, 'student/delsession.html')
def getsession(request):
name = request.session.get('name')
lname = request.session.get('lname')
return render(request, 'student/getsession.html', {'name':name, 'lname':lname})
def delsession(request):
request.session.flush()
return render(request, 'student/delsession.html') | [
"priyankabiswasonly1@gmail.com"
] | priyankabiswasonly1@gmail.com |
8459423345d61a3988b5a726a810f1e52cd1eab4 | 4ef7725748abef45ff23053b7b2e87af1f93d24f | /Day_of_the_Programmer.py | 581079469673756a1f7d837c1c1184d7b6099c0b | [] | no_license | akash-yadagouda/code-library | 6020656cadd53d4fde4abc873df70f9d913a6406 | 004aa7627d54415b3e922a0c1d8ef5a2d2edfedb | refs/heads/master | 2022-07-09T22:38:08.090646 | 2020-05-17T06:04:26 | 2020-05-17T06:04:26 | 236,165,799 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 275 | py |
def check(a):
if(a%400):
return True
else:
if a%400:
if a%100!=0:
return True
else:
return False
else:
return False
a=256
year=int(input())
if check(year)==True:
print(12.09.2016)
else:
print(13.09.2017)
| [
"noreply@github.com"
] | noreply@github.com |
ee5638f427e266afc5d5855606c34b7c76ac09b2 | c68d36ed1d36ede96a5a22e1052c73b8515feaae | /HyperNews Portal/task/hypernews/news/views.py | 3cefca44c6ba79a74ca61d32697ea15338fb602a | [] | no_license | wangpengda1210/HyperNews-Portal | dd531889666794c11158dc92a9dcdb03293d409b | 436e257dd315999187650dedf3dce2ff12267a77 | refs/heads/main | 2023-03-03T03:22:59.644304 | 2021-02-09T00:19:23 | 2021-02-09T00:19:23 | 336,978,367 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,124 | py | from django.shortcuts import render
from django.views import View
from django.http import Http404, QueryDict
from django.shortcuts import redirect
import datetime
from collections import defaultdict
import json
from hypernews.settings import NEWS_JSON_PATH
with open(NEWS_JSON_PATH, 'r') as f:
news_list = json.load(f)
for news in news_list:
news['created'] = datetime.datetime.strptime(news['created'], '%Y-%m-%d %H:%M:%S')
# Create your views here.
class IndexView(View):
def get(self, request, *args, **kwargs):
return redirect("news/")
class NewsContentView(View):
def get(self, request, link, *args, **kwargs):
for news in news_list:
if int(link) == news['link']:
return render(request, 'news/news_content.html',
context={
'title': news['title'],
'created': news['created'],
'text': news['text']
})
raise Http404
class AllNewsView(View):
def get(self, request, *args, **kwargs):
query_dict = request.GET
keyword = query_dict['q'] if 'q' in query_dict else ''
times = defaultdict()
for news in news_list:
if keyword in news['title']:
times.setdefault(news['created'].date(), []).append(news)
time_dict = [{'created': key, 'value': value} for key, value in times.items()]
return render(request, 'news/news_all.html',
context={'time_dict': time_dict})
class CreateNewsView(View):
def get(self, request, *args, **kwargs):
return render(request, 'news/news_create.html')
def post(self, request, *args, **kwargs):
title = request.POST.get('title')
text = request.POST.get('text')
created = datetime.datetime.now()
news_list.append({'title': title,
'text': text,
'created': created,
'link': len(news_list) + 1})
return redirect('/news/')
| [
"515484505@qq.com"
] | 515484505@qq.com |
620f4e5be97f032147d6ed2f55233997dc8baf7d | 765fe526adfb30e37f47e84a732d3d4b05197e9a | /tripadvisor/items.py | bb3029f03b8c355f09e17f3e5dfbbd650256f7c9 | [] | no_license | YShinohara07/TripAdvisor_Scrapy | 4059670e29c61b5203123fd8864205bc6f45591f | a2d68d4630113bb878d0dfaf93e5d2ee1504f4e8 | refs/heads/master | 2020-12-21T11:41:10.075252 | 2020-01-30T23:02:14 | 2020-01-30T23:02:14 | 236,420,285 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 730 | py | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class TripadvisorItem(scrapy.Item):
# define the fields for your item here like:
title = scrapy.Field()
rating = scrapy.Field()
total_reviews = scrapy.Field()
walking_grade = scrapy.Field()
restaurant = scrapy.Field()
attraction = scrapy.Field()
page_link = scrapy.Field()
amenities = scrapy.Field()
address = scrapy.Field()
#display_price = scrapy.Field()
num_excellent = scrapy.Field()
num_good = scrapy.Field()
num_avg = scrapy.Field()
num_poor = scrapy.Field()
num_bad = scrapy.Field()
| [
"yasuhiro.yks@gmail.com"
] | yasuhiro.yks@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.