blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c258b35a94733feceb797ff62582f7f2124933d7 | 632d7759536ed0726499c2d52c8eb13b5ab213ab | /Data/Packages/pygments/all/pygments/lexers/scripting.py | bffd8c0036ee4125ba80cc336285a932ba6d69e4 | [] | no_license | Void2403/sublime_text_3_costomize | e660ad803eb12b20e9fa7f8eb7c6aad0f2b4d9bc | c19977e498bd948fd6d8f55bd48c8d82cbc317c3 | refs/heads/master | 2023-08-31T21:32:32.791574 | 2019-05-31T11:46:19 | 2019-05-31T11:46:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 56,509 | py | # -*- coding: utf-8 -*-
"""
pygments.lexers.scripting
~~~~~~~~~~~~~~~~~~~~~~~~~
Lexer for scripting and embedded languages.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, bygroups, default, combined, \
words
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Error, Whitespace
from pygments.util import get_bool_opt, get_list_opt, iteritems
__all__ = ['LuaLexer', 'MoonScriptLexer', 'ChaiscriptLexer', 'LSLLexer',
'AppleScriptLexer', 'RexxLexer', 'MOOCodeLexer', 'HybrisLexer']
class LuaLexer(RegexLexer):
"""
For `Lua <http://www.lua.org>`_ source code.
Additional options accepted:
`func_name_highlighting`
If given and ``True``, highlight builtin function names
(default: ``True``).
`disabled_modules`
If given, must be a list of module names whose function names
should not be highlighted. By default all modules are highlighted.
To get a list of allowed modules have a look into the
`_lua_builtins` module:
.. sourcecode:: pycon
>>> from pygments.lexers._lua_builtins import MODULES
>>> MODULES.keys()
['string', 'coroutine', 'modules', 'io', 'basic', ...]
"""
name = 'Lua'
aliases = ['lua']
filenames = ['*.lua', '*.wlua']
mimetypes = ['text/x-lua', 'application/x-lua']
tokens = {
'root': [
# lua allows a file to start with a shebang
(r'#!(.*?)$', Comment.Preproc),
default('base'),
],
'base': [
(r'(?s)--\[(=*)\[.*?\]\1\]', Comment.Multiline),
('--.*$', Comment.Single),
(r'(?i)(\d*\.\d+|\d+\.\d*)(e[+-]?\d+)?', Number.Float),
(r'(?i)\d+e[+-]?\d+', Number.Float),
('(?i)0x[0-9a-f]*', Number.Hex),
(r'\d+', Number.Integer),
(r'\n', Text),
(r'[^\S\n]', Text),
# multiline strings
(r'(?s)\[(=*)\[.*?\]\1\]', String),
(r'(==|~=|<=|>=|\.\.\.|\.\.|[=+\-*/%^<>#])', Operator),
(r'[\[\]{}().,:;]', Punctuation),
(r'(and|or|not)\b', Operator.Word),
('(break|do|else|elseif|end|for|if|in|repeat|return|then|until|'
r'while)\b', Keyword),
(r'(local)\b', Keyword.Declaration),
(r'(true|false|nil)\b', Keyword.Constant),
(r'(function)\b', Keyword, 'funcname'),
(r'[A-Za-z_]\w*(\.[A-Za-z_]\w*)?', Name),
("'", String.Single, combined('stringescape', 'sqs')),
('"', String.Double, combined('stringescape', 'dqs'))
],
'funcname': [
(r'\s+', Text),
('(?:([A-Za-z_]\w*)(\.))?([A-Za-z_]\w*)',
bygroups(Name.Class, Punctuation, Name.Function), '#pop'),
# inline function
('\(', Punctuation, '#pop'),
],
# if I understand correctly, every character is valid in a lua string,
# so this state is only for later corrections
'string': [
('.', String)
],
'stringescape': [
(r'''\\([abfnrtv\\"']|\d{1,3})''', String.Escape)
],
'sqs': [
("'", String, '#pop'),
include('string')
],
'dqs': [
('"', String, '#pop'),
include('string')
]
}
def __init__(self, **options):
self.func_name_highlighting = get_bool_opt(
options, 'func_name_highlighting', True)
self.disabled_modules = get_list_opt(options, 'disabled_modules', [])
self._functions = set()
if self.func_name_highlighting:
from pygments.lexers._lua_builtins import MODULES
for mod, func in iteritems(MODULES):
if mod not in self.disabled_modules:
self._functions.update(func)
RegexLexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text):
if token is Name:
if value in self._functions:
yield index, Name.Builtin, value
continue
elif '.' in value:
a, b = value.split('.')
yield index, Name, a
yield index + len(a), Punctuation, u'.'
yield index + len(a) + 1, Name, b
continue
yield index, token, value
class MoonScriptLexer(LuaLexer):
"""
For `MoonScript <http://moonscript.org>`_ source code.
.. versionadded:: 1.5
"""
name = "MoonScript"
aliases = ["moon", "moonscript"]
filenames = ["*.moon"]
mimetypes = ['text/x-moonscript', 'application/x-moonscript']
tokens = {
'root': [
(r'#!(.*?)$', Comment.Preproc),
default('base'),
],
'base': [
('--.*$', Comment.Single),
(r'(?i)(\d*\.\d+|\d+\.\d*)(e[+-]?\d+)?', Number.Float),
(r'(?i)\d+e[+-]?\d+', Number.Float),
(r'(?i)0x[0-9a-f]*', Number.Hex),
(r'\d+', Number.Integer),
(r'\n', Text),
(r'[^\S\n]+', Text),
(r'(?s)\[(=*)\[.*?\]\1\]', String),
(r'(->|=>)', Name.Function),
(r':[a-zA-Z_]\w*', Name.Variable),
(r'(==|!=|~=|<=|>=|\.\.\.|\.\.|[=+\-*/%^<>#!.\\:])', Operator),
(r'[;,]', Punctuation),
(r'[\[\]{}()]', Keyword.Type),
(r'[a-zA-Z_]\w*:', Name.Variable),
(words((
'class', 'extends', 'if', 'then', 'super', 'do', 'with',
'import', 'export', 'while', 'elseif', 'return', 'for', 'in',
'from', 'when', 'using', 'else', 'and', 'or', 'not', 'switch',
'break'), suffix=r'\b'),
Keyword),
(r'(true|false|nil)\b', Keyword.Constant),
(r'(and|or|not)\b', Operator.Word),
(r'(self)\b', Name.Builtin.Pseudo),
(r'@@?([a-zA-Z_]\w*)?', Name.Variable.Class),
(r'[A-Z]\w*', Name.Class), # proper name
(r'[A-Za-z_]\w*(\.[A-Za-z_]\w*)?', Name),
("'", String.Single, combined('stringescape', 'sqs')),
('"', String.Double, combined('stringescape', 'dqs'))
],
'stringescape': [
(r'''\\([abfnrtv\\"']|\d{1,3})''', String.Escape)
],
'sqs': [
("'", String.Single, '#pop'),
(".", String)
],
'dqs': [
('"', String.Double, '#pop'),
(".", String)
]
}
def get_tokens_unprocessed(self, text):
# set . as Operator instead of Punctuation
for index, token, value in LuaLexer.get_tokens_unprocessed(self, text):
if token == Punctuation and value == ".":
token = Operator
yield index, token, value
class ChaiscriptLexer(RegexLexer):
"""
For `ChaiScript <http://chaiscript.com/>`_ source code.
.. versionadded:: 2.0
"""
name = 'ChaiScript'
aliases = ['chai', 'chaiscript']
filenames = ['*.chai']
mimetypes = ['text/x-chaiscript', 'application/x-chaiscript']
flags = re.DOTALL | re.MULTILINE
tokens = {
'commentsandwhitespace': [
(r'\s+', Text),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
(r'^\#.*?\n', Comment.Single)
],
'slashstartsregex': [
include('commentsandwhitespace'),
(r'/(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/'
r'([gim]+\b|\B)', String.Regex, '#pop'),
(r'(?=/)', Text, ('#pop', 'badregex')),
default('#pop')
],
'badregex': [
(r'\n', Text, '#pop')
],
'root': [
include('commentsandwhitespace'),
(r'\n', Text),
(r'[^\S\n]+', Text),
(r'\+\+|--|~|&&|\?|:|\|\||\\(?=\n)|\.\.'
r'(<<|>>>?|==?|!=?|[-<>+*%&|^/])=?', Operator, 'slashstartsregex'),
(r'[{(\[;,]', Punctuation, 'slashstartsregex'),
(r'[})\].]', Punctuation),
(r'[=+\-*/]', Operator),
(r'(for|in|while|do|break|return|continue|if|else|'
r'throw|try|catch'
r')\b', Keyword, 'slashstartsregex'),
(r'(var)\b', Keyword.Declaration, 'slashstartsregex'),
(r'(attr|def|fun)\b', Keyword.Reserved),
(r'(true|false)\b', Keyword.Constant),
(r'(eval|throw)\b', Name.Builtin),
(r'`\S+`', Name.Builtin),
(r'[$a-zA-Z_]\w*', Name.Other),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'[0-9]+', Number.Integer),
(r'"', String.Double, 'dqstring'),
(r"'(\\\\|\\'|[^'])*'", String.Single),
],
'dqstring': [
(r'\$\{[^"}]+?\}', String.Interpol),
(r'\$', String.Double),
(r'\\\\', String.Double),
(r'\\"', String.Double),
(r'[^\\"$]+', String.Double),
(r'"', String.Double, '#pop'),
],
}
class LSLLexer(RegexLexer):
"""
For Second Life's Linden Scripting Language source code.
.. versionadded:: 2.0
"""
name = 'LSL'
aliases = ['lsl']
filenames = ['*.lsl']
mimetypes = ['text/x-lsl']
flags = re.MULTILINE
lsl_keywords = r'\b(?:do|else|for|if|jump|return|while)\b'
lsl_types = r'\b(?:float|integer|key|list|quaternion|rotation|string|vector)\b'
lsl_states = r'\b(?:(?:state)\s+\w+|default)\b'
lsl_events = r'\b(?:state_(?:entry|exit)|touch(?:_(?:start|end))?|(?:land_)?collision(?:_(?:start|end))?|timer|listen|(?:no_)?sensor|control|(?:not_)?at_(?:rot_)?target|money|email|run_time_permissions|changed|attach|dataserver|moving_(?:start|end)|link_message|(?:on|object)_rez|remote_data|http_re(?:sponse|quest)|path_update|transaction_result)\b'
lsl_functions_builtin = r'\b(?:ll(?:ReturnObjectsBy(?:ID|Owner)|Json(?:2List|[GS]etValue|ValueType)|Sin|Cos|Tan|Atan2|Sqrt|Pow|Abs|Fabs|Frand|Floor|Ceil|Round|Vec(?:Mag|Norm|Dist)|Rot(?:Between|2(?:Euler|Fwd|Left|Up))|(?:Euler|Axes)2Rot|Whisper|(?:Region|Owner)?Say|Shout|Listen(?:Control|Remove)?|Sensor(?:Repeat|Remove)?|Detected(?:Name|Key|Owner|Type|Pos|Vel|Grab|Rot|Group|LinkNumber)|Die|Ground|Wind|(?:[GS]et)(?:AnimationOverride|MemoryLimit|PrimMediaParams|ParcelMusicURL|Object(?:Desc|Name)|PhysicsMaterial|Status|Scale|Color|Alpha|Texture|Pos|Rot|Force|Torque)|ResetAnimationOverride|(?:Scale|Offset|Rotate)Texture|(?:Rot)?Target(?:Remove)?|(?:Stop)?MoveToTarget|Apply(?:Rotational)?Impulse|Set(?:KeyframedMotion|ContentType|RegionPos|(?:Angular)?Velocity|Buoyancy|HoverHeight|ForceAndTorque|TimerEvent|ScriptState|Damage|TextureAnim|Sound(?:Queueing|Radius)|Vehicle(?:Type|(?:Float|Vector|Rotation)Param)|(?:Touch|Sit)?Text|Camera(?:Eye|At)Offset|PrimitiveParams|ClickAction|Link(?:Alpha|Color|PrimitiveParams(?:Fast)?|Texture(?:Anim)?|Camera|Media)|RemoteScriptAccessPin|PayPrice|LocalRot)|ScaleByFactor|Get(?:(?:Max|Min)ScaleFactor|ClosestNavPoint|StaticPath|SimStats|Env|PrimitiveParams|Link(?:PrimitiveParams|Number(?:OfSides)?|Key|Name|Media)|HTTPHeader|FreeURLs|Object(?:Details|PermMask|PrimCount)|Parcel(?:MaxPrims|Details|Prim(?:Count|Owners))|Attached|(?:SPMax|Free|Used)Memory|Region(?:Name|TimeDilation|FPS|Corner|AgentCount)|Root(?:Position|Rotation)|UnixTime|(?:Parcel|Region)Flags|(?:Wall|GMT)clock|SimulatorHostname|BoundingBox|GeometricCenter|Creator|NumberOf(?:Prims|NotecardLines|Sides)|Animation(?:List)?|(?:Camera|Local)(?:Pos|Rot)|Vel|Accel|Omega|Time(?:stamp|OfDay)|(?:Object|CenterOf)?Mass|MassMKS|Energy|Owner|(?:Owner)?Key|SunDirection|Texture(?:Offset|Scale|Rot)|Inventory(?:Number|Name|Key|Type|Creator|PermMask)|Permissions(?:Key)?|StartParameter|List(?:Length|EntryType)|Date|Agent(?:Size|Info|Language|List)|LandOwnerAt|NotecardLine|Script(?:Name|State))|(?:Get|Reset|GetAndReset)Time|PlaySound(?:Slave)?|LoopSound(?:Master|Slave)?|(?:Trigger|Stop|Preload)Sound|(?:(?:Get|Delete)Sub|Insert)String|To(?:Upper|Lower)|Give(?:InventoryList|Money)|RezObject|(?:Stop)?LookAt|Sleep|CollisionFilter|(?:Take|Release)Controls|DetachFromAvatar|AttachToAvatar(?:Temp)?|InstantMessage|(?:GetNext)?Email|StopHover|MinEventDelay|RotLookAt|String(?:Length|Trim)|(?:Start|Stop)Animation|TargetOmega|RequestPermissions|(?:Create|Break)Link|BreakAllLinks|(?:Give|Remove)Inventory|Water|PassTouches|Request(?:Agent|Inventory)Data|TeleportAgent(?:Home|GlobalCoords)?|ModifyLand|CollisionSound|ResetScript|MessageLinked|PushObject|PassCollisions|AxisAngle2Rot|Rot2(?:Axis|Angle)|A(?:cos|sin)|AngleBetween|AllowInventoryDrop|SubStringIndex|List2(?:CSV|Integer|Json|Float|String|Key|Vector|Rot|List(?:Strided)?)|DeleteSubList|List(?:Statistics|Sort|Randomize|(?:Insert|Find|Replace)List)|EdgeOfWorld|AdjustSoundVolume|Key2Name|TriggerSoundLimited|EjectFromLand|(?:CSV|ParseString)2List|OverMyLand|SameGroup|UnSit|Ground(?:Slope|Normal|Contour)|GroundRepel|(?:Set|Remove)VehicleFlags|(?:AvatarOn)?(?:Link)?SitTarget|Script(?:Danger|Profiler)|Dialog|VolumeDetect|ResetOtherScript|RemoteLoadScriptPin|(?:Open|Close)RemoteDataChannel|SendRemoteData|RemoteDataReply|(?:Integer|String)ToBase64|XorBase64|Log(?:10)?|Base64To(?:String|Integer)|ParseStringKeepNulls|RezAtRoot|RequestSimulatorData|ForceMouselook|(?:Load|Release|(?:E|Une)scape)URL|ParcelMedia(?:CommandList|Query)|ModPow|MapDestination|(?:RemoveFrom|AddTo|Reset)Land(?:Pass|Ban)List|(?:Set|Clear)CameraParams|HTTP(?:Request|Response)|TextBox|DetectedTouch(?:UV|Face|Pos|(?:N|Bin)ormal|ST)|(?:MD5|SHA1|DumpList2)String|Request(?:Secure)?URL|Clear(?:Prim|Link)Media|(?:Link)?ParticleSystem|(?:Get|Request)(?:Username|DisplayName)|RegionSayTo|CastRay|GenerateKey|TransferLindenDollars|ManageEstateAccess|(?:Create|Delete)Character|ExecCharacterCmd|Evade|FleeFrom|NavigateTo|PatrolPoints|Pursue|UpdateCharacter|WanderWithin))\b'
lsl_constants_float = r'\b(?:DEG_TO_RAD|PI(?:_BY_TWO)?|RAD_TO_DEG|SQRT2|TWO_PI)\b'
lsl_constants_integer = r'\b(?:JSON_APPEND|STATUS_(?:PHYSICS|ROTATE_[XYZ]|PHANTOM|SANDBOX|BLOCK_GRAB(?:_OBJECT)?|(?:DIE|RETURN)_AT_EDGE|CAST_SHADOWS|OK|MALFORMED_PARAMS|TYPE_MISMATCH|BOUNDS_ERROR|NOT_(?:FOUND|SUPPORTED)|INTERNAL_ERROR|WHITELIST_FAILED)|AGENT(?:_(?:BY_(?:LEGACY_|USER)NAME|FLYING|ATTACHMENTS|SCRIPTED|MOUSELOOK|SITTING|ON_OBJECT|AWAY|WALKING|IN_AIR|TYPING|CROUCHING|BUSY|ALWAYS_RUN|AUTOPILOT|LIST_(?:PARCEL(?:_OWNER)?|REGION)))?|CAMERA_(?:PITCH|DISTANCE|BEHINDNESS_(?:ANGLE|LAG)|(?:FOCUS|POSITION)(?:_(?:THRESHOLD|LOCKED|LAG))?|FOCUS_OFFSET|ACTIVE)|ANIM_ON|LOOP|REVERSE|PING_PONG|SMOOTH|ROTATE|SCALE|ALL_SIDES|LINK_(?:ROOT|SET|ALL_(?:OTHERS|CHILDREN)|THIS)|ACTIVE|PASSIVE|SCRIPTED|CONTROL_(?:FWD|BACK|(?:ROT_)?(?:LEFT|RIGHT)|UP|DOWN|(?:ML_)?LBUTTON)|PERMISSION_(?:RETURN_OBJECTS|DEBIT|OVERRIDE_ANIMATIONS|SILENT_ESTATE_MANAGEMENT|TAKE_CONTROLS|TRIGGER_ANIMATION|ATTACH|CHANGE_LINKS|(?:CONTROL|TRACK)_CAMERA|TELEPORT)|INVENTORY_(?:TEXTURE|SOUND|OBJECT|SCRIPT|LANDMARK|CLOTHING|NOTECARD|BODYPART|ANIMATION|GESTURE|ALL|NONE)|CHANGED_(?:INVENTORY|COLOR|SHAPE|SCALE|TEXTURE|LINK|ALLOWED_DROP|OWNER|REGION(?:_START)?|TELEPORT|MEDIA)|OBJECT_(?:(?:PHYSICS|SERVER|STREAMING)_COST|UNKNOWN_DETAIL|CHARACTER_TIME|PHANTOM|PHYSICS|TEMP_ON_REZ|NAME|DESC|POS|PRIM_EQUIVALENCE|RETURN_(?:PARCEL(?:_OWNER)?|REGION)|ROO?T|VELOCITY|OWNER|GROUP|CREATOR|ATTACHED_POINT|RENDER_WEIGHT|PATHFINDING_TYPE|(?:RUNNING|TOTAL)_SCRIPT_COUNT|SCRIPT_(?:MEMORY|TIME))|TYPE_(?:INTEGER|FLOAT|STRING|KEY|VECTOR|ROTATION|INVALID)|(?:DEBUG|PUBLIC)_CHANNEL|ATTACH_(?:AVATAR_CENTER|CHEST|HEAD|BACK|PELVIS|MOUTH|CHIN|NECK|NOSE|BELLY|[LR](?:SHOULDER|HAND|FOOT|EAR|EYE|[UL](?:ARM|LEG)|HIP)|(?:LEFT|RIGHT)_PEC|HUD_(?:CENTER_[12]|TOP_(?:RIGHT|CENTER|LEFT)|BOTTOM(?:_(?:RIGHT|LEFT))?))|LAND_(?:LEVEL|RAISE|LOWER|SMOOTH|NOISE|REVERT)|DATA_(?:ONLINE|NAME|BORN|SIM_(?:POS|STATUS|RATING)|PAYINFO)|PAYMENT_INFO_(?:ON_FILE|USED)|REMOTE_DATA_(?:CHANNEL|REQUEST|REPLY)|PSYS_(?:PART_(?:BF_(?:ZERO|ONE(?:_MINUS_(?:DEST_COLOR|SOURCE_(ALPHA|COLOR)))?|DEST_COLOR|SOURCE_(ALPHA|COLOR))|BLEND_FUNC_(DEST|SOURCE)|FLAGS|(?:START|END)_(?:COLOR|ALPHA|SCALE|GLOW)|MAX_AGE|(?:RIBBON|WIND|INTERP_(?:COLOR|SCALE)|BOUNCE|FOLLOW_(?:SRC|VELOCITY)|TARGET_(?:POS|LINEAR)|EMISSIVE)_MASK)|SRC_(?:MAX_AGE|PATTERN|ANGLE_(?:BEGIN|END)|BURST_(?:RATE|PART_COUNT|RADIUS|SPEED_(?:MIN|MAX))|ACCEL|TEXTURE|TARGET_KEY|OMEGA|PATTERN_(?:DROP|EXPLODE|ANGLE(?:_CONE(?:_EMPTY)?)?)))|VEHICLE_(?:REFERENCE_FRAME|TYPE_(?:NONE|SLED|CAR|BOAT|AIRPLANE|BALLOON)|(?:LINEAR|ANGULAR)_(?:FRICTION_TIMESCALE|MOTOR_DIRECTION)|LINEAR_MOTOR_OFFSET|HOVER_(?:HEIGHT|EFFICIENCY|TIMESCALE)|BUOYANCY|(?:LINEAR|ANGULAR)_(?:DEFLECTION_(?:EFFICIENCY|TIMESCALE)|MOTOR_(?:DECAY_)?TIMESCALE)|VERTICAL_ATTRACTION_(?:EFFICIENCY|TIMESCALE)|BANKING_(?:EFFICIENCY|MIX|TIMESCALE)|FLAG_(?:NO_DEFLECTION_UP|LIMIT_(?:ROLL_ONLY|MOTOR_UP)|HOVER_(?:(?:WATER|TERRAIN|UP)_ONLY|GLOBAL_HEIGHT)|MOUSELOOK_(?:STEER|BANK)|CAMERA_DECOUPLED))|PRIM_(?:TYPE(?:_(?:BOX|CYLINDER|PRISM|SPHERE|TORUS|TUBE|RING|SCULPT))?|HOLE_(?:DEFAULT|CIRCLE|SQUARE|TRIANGLE)|MATERIAL(?:_(?:STONE|METAL|GLASS|WOOD|FLESH|PLASTIC|RUBBER))?|SHINY_(?:NONE|LOW|MEDIUM|HIGH)|BUMP_(?:NONE|BRIGHT|DARK|WOOD|BARK|BRICKS|CHECKER|CONCRETE|TILE|STONE|DISKS|GRAVEL|BLOBS|SIDING|LARGETILE|STUCCO|SUCTION|WEAVE)|TEXGEN_(?:DEFAULT|PLANAR)|SCULPT_(?:TYPE_(?:SPHERE|TORUS|PLANE|CYLINDER|MASK)|FLAG_(?:MIRROR|INVERT))|PHYSICS(?:_(?:SHAPE_(?:CONVEX|NONE|PRIM|TYPE)))?|(?:POS|ROT)_LOCAL|SLICE|TEXT|FLEXIBLE|POINT_LIGHT|TEMP_ON_REZ|PHANTOM|POSITION|SIZE|ROTATION|TEXTURE|NAME|OMEGA|DESC|LINK_TARGET|COLOR|BUMP_SHINY|FULLBRIGHT|TEXGEN|GLOW|MEDIA_(?:ALT_IMAGE_ENABLE|CONTROLS|(?:CURRENT|HOME)_URL|AUTO_(?:LOOP|PLAY|SCALE|ZOOM)|FIRST_CLICK_INTERACT|(?:WIDTH|HEIGHT)_PIXELS|WHITELIST(?:_ENABLE)?|PERMS_(?:INTERACT|CONTROL)|PARAM_MAX|CONTROLS_(?:STANDARD|MINI)|PERM_(?:NONE|OWNER|GROUP|ANYONE)|MAX_(?:URL_LENGTH|WHITELIST_(?:SIZE|COUNT)|(?:WIDTH|HEIGHT)_PIXELS)))|MASK_(?:BASE|OWNER|GROUP|EVERYONE|NEXT)|PERM_(?:TRANSFER|MODIFY|COPY|MOVE|ALL)|PARCEL_(?:MEDIA_COMMAND_(?:STOP|PAUSE|PLAY|LOOP|TEXTURE|URL|TIME|AGENT|UNLOAD|AUTO_ALIGN|TYPE|SIZE|DESC|LOOP_SET)|FLAG_(?:ALLOW_(?:FLY|(?:GROUP_)?SCRIPTS|LANDMARK|TERRAFORM|DAMAGE|CREATE_(?:GROUP_)?OBJECTS)|USE_(?:ACCESS_(?:GROUP|LIST)|BAN_LIST|LAND_PASS_LIST)|LOCAL_SOUND_ONLY|RESTRICT_PUSHOBJECT|ALLOW_(?:GROUP|ALL)_OBJECT_ENTRY)|COUNT_(?:TOTAL|OWNER|GROUP|OTHER|SELECTED|TEMP)|DETAILS_(?:NAME|DESC|OWNER|GROUP|AREA|ID|SEE_AVATARS))|LIST_STAT_(?:MAX|MIN|MEAN|MEDIAN|STD_DEV|SUM(?:_SQUARES)?|NUM_COUNT|GEOMETRIC_MEAN|RANGE)|PAY_(?:HIDE|DEFAULT)|REGION_FLAG_(?:ALLOW_DAMAGE|FIXED_SUN|BLOCK_TERRAFORM|SANDBOX|DISABLE_(?:COLLISIONS|PHYSICS)|BLOCK_FLY|ALLOW_DIRECT_TELEPORT|RESTRICT_PUSHOBJECT)|HTTP_(?:METHOD|MIMETYPE|BODY_(?:MAXLENGTH|TRUNCATED)|CUSTOM_HEADER|PRAGMA_NO_CACHE|VERBOSE_THROTTLE|VERIFY_CERT)|STRING_(?:TRIM(?:_(?:HEAD|TAIL))?)|CLICK_ACTION_(?:NONE|TOUCH|SIT|BUY|PAY|OPEN(?:_MEDIA)?|PLAY|ZOOM)|TOUCH_INVALID_FACE|PROFILE_(?:NONE|SCRIPT_MEMORY)|RC_(?:DATA_FLAGS|DETECT_PHANTOM|GET_(?:LINK_NUM|NORMAL|ROOT_KEY)|MAX_HITS|REJECT_(?:TYPES|AGENTS|(?:NON)?PHYSICAL|LAND))|RCERR_(?:CAST_TIME_EXCEEDED|SIM_PERF_LOW|UNKNOWN)|ESTATE_ACCESS_(?:ALLOWED_(?:AGENT|GROUP)_(?:ADD|REMOVE)|BANNED_AGENT_(?:ADD|REMOVE))|DENSITY|FRICTION|RESTITUTION|GRAVITY_MULTIPLIER|KFM_(?:COMMAND|CMD_(?:PLAY|STOP|PAUSE|SET_MODE)|MODE|FORWARD|LOOP|PING_PONG|REVERSE|DATA|ROTATION|TRANSLATION)|ERR_(?:GENERIC|PARCEL_PERMISSIONS|MALFORMED_PARAMS|RUNTIME_PERMISSIONS|THROTTLED)|CHARACTER_(?:CMD_(?:(?:SMOOTH_)?STOP|JUMP)|DESIRED_(?:TURN_)?SPEED|RADIUS|STAY_WITHIN_PARCEL|LENGTH|ORIENTATION|ACCOUNT_FOR_SKIPPED_FRAMES|AVOIDANCE_MODE|TYPE(?:_(?:[A-D]|NONE))?|MAX_(?:DECEL|TURN_RADIUS|(?:ACCEL|SPEED)))|PURSUIT_(?:OFFSET|FUZZ_FACTOR|GOAL_TOLERANCE|INTERCEPT)|REQUIRE_LINE_OF_SIGHT|FORCE_DIRECT_PATH|VERTICAL|HORIZONTAL|AVOID_(?:CHARACTERS|DYNAMIC_OBSTACLES|NONE)|PU_(?:EVADE_(?:HIDDEN|SPOTTED)|FAILURE_(?:DYNAMIC_PATHFINDING_DISABLED|INVALID_(?:GOAL|START)|NO_(?:NAVMESH|VALID_DESTINATION)|OTHER|TARGET_GONE|(?:PARCEL_)?UNREACHABLE)|(?:GOAL|SLOWDOWN_DISTANCE)_REACHED)|TRAVERSAL_TYPE(?:_(?:FAST|NONE|SLOW))?|CONTENT_TYPE_(?:ATOM|FORM|HTML|JSON|LLSD|RSS|TEXT|XHTML|XML)|GCNP_(?:RADIUS|STATIC)|(?:PATROL|WANDER)_PAUSE_AT_WAYPOINTS|OPT_(?:AVATAR|CHARACTER|EXCLUSION_VOLUME|LEGACY_LINKSET|MATERIAL_VOLUME|OTHER|STATIC_OBSTACLE|WALKABLE)|SIM_STAT_PCT_CHARS_STEPPED)\b'
lsl_constants_integer_boolean = r'\b(?:FALSE|TRUE)\b'
lsl_constants_rotation = r'\b(?:ZERO_ROTATION)\b'
lsl_constants_string = r'\b(?:EOF|JSON_(?:ARRAY|DELETE|FALSE|INVALID|NULL|NUMBER|OBJECT|STRING|TRUE)|NULL_KEY|TEXTURE_(?:BLANK|DEFAULT|MEDIA|PLYWOOD|TRANSPARENT)|URL_REQUEST_(?:GRANTED|DENIED))\b'
lsl_constants_vector = r'\b(?:TOUCH_INVALID_(?:TEXCOORD|VECTOR)|ZERO_VECTOR)\b'
lsl_invalid_broken = r'\b(?:LAND_(?:LARGE|MEDIUM|SMALL)_BRUSH)\b'
lsl_invalid_deprecated = r'\b(?:ATTACH_[LR]PEC|DATA_RATING|OBJECT_ATTACHMENT_(?:GEOMETRY_BYTES|SURFACE_AREA)|PRIM_(?:CAST_SHADOWS|MATERIAL_LIGHT|TYPE_LEGACY)|PSYS_SRC_(?:INNER|OUTER)ANGLE|VEHICLE_FLAG_NO_FLY_UP|ll(?:Cloud|Make(?:Explosion|Fountain|Smoke|Fire)|RemoteDataSetRegion|Sound(?:Preload)?|XorBase64Strings(?:Correct)?))\b'
lsl_invalid_illegal = r'\b(?:event)\b'
lsl_invalid_unimplemented = r'\b(?:CHARACTER_(?:MAX_ANGULAR_(?:ACCEL|SPEED)|TURN_SPEED_MULTIPLIER)|PERMISSION_(?:CHANGE_(?:JOINTS|PERMISSIONS)|RELEASE_OWNERSHIP|REMAP_CONTROLS)|PRIM_PHYSICS_MATERIAL|PSYS_SRC_OBJ_REL_MASK|ll(?:CollisionSprite|(?:Stop)?PointAt|(?:(?:Refresh|Set)Prim)URL|(?:Take|Release)Camera|RemoteLoadScript))\b'
lsl_reserved_godmode = r'\b(?:ll(?:GodLikeRezObject|Set(?:Inventory|Object)PermMask))\b'
lsl_reserved_log = r'\b(?:print)\b'
lsl_operators = r'\+\+|\-\-|<<|>>|&&?|\|\|?|\^|~|[!%<>=*+\-/]=?'
tokens = {
'root':
[
(r'//.*?\n', Comment.Single),
(r'/\*', Comment.Multiline, 'comment'),
(r'"', String.Double, 'string'),
(lsl_keywords, Keyword),
(lsl_types, Keyword.Type),
(lsl_states, Name.Class),
(lsl_events, Name.Builtin),
(lsl_functions_builtin, Name.Function),
(lsl_constants_float, Keyword.Constant),
(lsl_constants_integer, Keyword.Constant),
(lsl_constants_integer_boolean, Keyword.Constant),
(lsl_constants_rotation, Keyword.Constant),
(lsl_constants_string, Keyword.Constant),
(lsl_constants_vector, Keyword.Constant),
(lsl_invalid_broken, Error),
(lsl_invalid_deprecated, Error),
(lsl_invalid_illegal, Error),
(lsl_invalid_unimplemented, Error),
(lsl_reserved_godmode, Keyword.Reserved),
(lsl_reserved_log, Keyword.Reserved),
(r'\b([a-zA-Z_]\w*)\b', Name.Variable),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d*', Number.Float),
(r'(\d+\.\d*|\.\d+)', Number.Float),
(r'0[xX][0-9a-fA-F]+', Number.Hex),
(r'\d+', Number.Integer),
(lsl_operators, Operator),
(r':=?', Error),
(r'[,;{}()\[\]]', Punctuation),
(r'\n+', Whitespace),
(r'\s+', Whitespace)
],
'comment':
[
(r'[^*/]+', Comment.Multiline),
(r'/\*', Comment.Multiline, '#push'),
(r'\*/', Comment.Multiline, '#pop'),
(r'[*/]', Comment.Multiline)
],
'string':
[
(r'\\([nt"\\])', String.Escape),
(r'"', String.Double, '#pop'),
(r'\\.', Error),
(r'[^"\\]+', String.Double),
]
}
class AppleScriptLexer(RegexLexer):
"""
For `AppleScript source code
<http://developer.apple.com/documentation/AppleScript/
Conceptual/AppleScriptLangGuide>`_,
including `AppleScript Studio
<http://developer.apple.com/documentation/AppleScript/
Reference/StudioReference>`_.
Contributed by Andreas Amann <aamann@mac.com>.
.. versionadded:: 1.0
"""
name = 'AppleScript'
aliases = ['applescript']
filenames = ['*.applescript']
flags = re.MULTILINE | re.DOTALL
Identifiers = r'[a-zA-Z]\w*'
# XXX: use words() for all of these
Literals = ('AppleScript', 'current application', 'false', 'linefeed',
'missing value', 'pi', 'quote', 'result', 'return', 'space',
'tab', 'text item delimiters', 'true', 'version')
Classes = ('alias ', 'application ', 'boolean ', 'class ', 'constant ',
'date ', 'file ', 'integer ', 'list ', 'number ', 'POSIX file ',
'real ', 'record ', 'reference ', 'RGB color ', 'script ',
'text ', 'unit types', '(?:Unicode )?text', 'string')
BuiltIn = ('attachment', 'attribute run', 'character', 'day', 'month',
'paragraph', 'word', 'year')
HandlerParams = ('about', 'above', 'against', 'apart from', 'around',
'aside from', 'at', 'below', 'beneath', 'beside',
'between', 'for', 'given', 'instead of', 'on', 'onto',
'out of', 'over', 'since')
Commands = ('ASCII (character|number)', 'activate', 'beep', 'choose URL',
'choose application', 'choose color', 'choose file( name)?',
'choose folder', 'choose from list',
'choose remote application', 'clipboard info',
'close( access)?', 'copy', 'count', 'current date', 'delay',
'delete', 'display (alert|dialog)', 'do shell script',
'duplicate', 'exists', 'get eof', 'get volume settings',
'info for', 'launch', 'list (disks|folder)', 'load script',
'log', 'make', 'mount volume', 'new', 'offset',
'open( (for access|location))?', 'path to', 'print', 'quit',
'random number', 'read', 'round', 'run( script)?',
'say', 'scripting components',
'set (eof|the clipboard to|volume)', 'store script',
'summarize', 'system attribute', 'system info',
'the clipboard', 'time to GMT', 'write', 'quoted form')
References = ('(in )?back of', '(in )?front of', '[0-9]+(st|nd|rd|th)',
'first', 'second', 'third', 'fourth', 'fifth', 'sixth',
'seventh', 'eighth', 'ninth', 'tenth', 'after', 'back',
'before', 'behind', 'every', 'front', 'index', 'last',
'middle', 'some', 'that', 'through', 'thru', 'where', 'whose')
Operators = ("and", "or", "is equal", "equals", "(is )?equal to", "is not",
"isn't", "isn't equal( to)?", "is not equal( to)?",
"doesn't equal", "does not equal", "(is )?greater than",
"comes after", "is not less than or equal( to)?",
"isn't less than or equal( to)?", "(is )?less than",
"comes before", "is not greater than or equal( to)?",
"isn't greater than or equal( to)?",
"(is )?greater than or equal( to)?", "is not less than",
"isn't less than", "does not come before",
"doesn't come before", "(is )?less than or equal( to)?",
"is not greater than", "isn't greater than",
"does not come after", "doesn't come after", "starts? with",
"begins? with", "ends? with", "contains?", "does not contain",
"doesn't contain", "is in", "is contained by", "is not in",
"is not contained by", "isn't contained by", "div", "mod",
"not", "(a )?(ref( to)?|reference to)", "is", "does")
Control = ('considering', 'else', 'error', 'exit', 'from', 'if',
'ignoring', 'in', 'repeat', 'tell', 'then', 'times', 'to',
'try', 'until', 'using terms from', 'while', 'whith',
'with timeout( of)?', 'with transaction', 'by', 'continue',
'end', 'its?', 'me', 'my', 'return', 'of', 'as')
Declarations = ('global', 'local', 'prop(erty)?', 'set', 'get')
Reserved = ('but', 'put', 'returning', 'the')
StudioClasses = ('action cell', 'alert reply', 'application', 'box',
'browser( cell)?', 'bundle', 'button( cell)?', 'cell',
'clip view', 'color well', 'color-panel',
'combo box( item)?', 'control',
'data( (cell|column|item|row|source))?', 'default entry',
'dialog reply', 'document', 'drag info', 'drawer',
'event', 'font(-panel)?', 'formatter',
'image( (cell|view))?', 'matrix', 'menu( item)?', 'item',
'movie( view)?', 'open-panel', 'outline view', 'panel',
'pasteboard', 'plugin', 'popup button',
'progress indicator', 'responder', 'save-panel',
'scroll view', 'secure text field( cell)?', 'slider',
'sound', 'split view', 'stepper', 'tab view( item)?',
'table( (column|header cell|header view|view))',
'text( (field( cell)?|view))?', 'toolbar( item)?',
'user-defaults', 'view', 'window')
StudioEvents = ('accept outline drop', 'accept table drop', 'action',
'activated', 'alert ended', 'awake from nib', 'became key',
'became main', 'begin editing', 'bounds changed',
'cell value', 'cell value changed', 'change cell value',
'change item value', 'changed', 'child of item',
'choose menu item', 'clicked', 'clicked toolbar item',
'closed', 'column clicked', 'column moved',
'column resized', 'conclude drop', 'data representation',
'deminiaturized', 'dialog ended', 'document nib name',
'double clicked', 'drag( (entered|exited|updated))?',
'drop', 'end editing', 'exposed', 'idle', 'item expandable',
'item value', 'item value changed', 'items changed',
'keyboard down', 'keyboard up', 'launched',
'load data representation', 'miniaturized', 'mouse down',
'mouse dragged', 'mouse entered', 'mouse exited',
'mouse moved', 'mouse up', 'moved',
'number of browser rows', 'number of items',
'number of rows', 'open untitled', 'opened', 'panel ended',
'parameters updated', 'plugin loaded', 'prepare drop',
'prepare outline drag', 'prepare outline drop',
'prepare table drag', 'prepare table drop',
'read from file', 'resigned active', 'resigned key',
'resigned main', 'resized( sub views)?',
'right mouse down', 'right mouse dragged',
'right mouse up', 'rows changed', 'scroll wheel',
'selected tab view item', 'selection changed',
'selection changing', 'should begin editing',
'should close', 'should collapse item',
'should end editing', 'should expand item',
'should open( untitled)?',
'should quit( after last window closed)?',
'should select column', 'should select item',
'should select row', 'should select tab view item',
'should selection change', 'should zoom', 'shown',
'update menu item', 'update parameters',
'update toolbar item', 'was hidden', 'was miniaturized',
'will become active', 'will close', 'will dismiss',
'will display browser cell', 'will display cell',
'will display item cell', 'will display outline cell',
'will finish launching', 'will hide', 'will miniaturize',
'will move', 'will open', 'will pop up', 'will quit',
'will resign active', 'will resize( sub views)?',
'will select tab view item', 'will show', 'will zoom',
'write to file', 'zoomed')
StudioCommands = ('animate', 'append', 'call method', 'center',
'close drawer', 'close panel', 'display',
'display alert', 'display dialog', 'display panel', 'go',
'hide', 'highlight', 'increment', 'item for',
'load image', 'load movie', 'load nib', 'load panel',
'load sound', 'localized string', 'lock focus', 'log',
'open drawer', 'path for', 'pause', 'perform action',
'play', 'register', 'resume', 'scroll', 'select( all)?',
'show', 'size to fit', 'start', 'step back',
'step forward', 'stop', 'synchronize', 'unlock focus',
'update')
StudioProperties = ('accepts arrow key', 'action method', 'active',
'alignment', 'allowed identifiers',
'allows branch selection', 'allows column reordering',
'allows column resizing', 'allows column selection',
'allows customization',
'allows editing text attributes',
'allows empty selection', 'allows mixed state',
'allows multiple selection', 'allows reordering',
'allows undo', 'alpha( value)?', 'alternate image',
'alternate increment value', 'alternate title',
'animation delay', 'associated file name',
'associated object', 'auto completes', 'auto display',
'auto enables items', 'auto repeat',
'auto resizes( outline column)?',
'auto save expanded items', 'auto save name',
'auto save table columns', 'auto saves configuration',
'auto scroll', 'auto sizes all columns to fit',
'auto sizes cells', 'background color', 'bezel state',
'bezel style', 'bezeled', 'border rect', 'border type',
'bordered', 'bounds( rotation)?', 'box type',
'button returned', 'button type',
'can choose directories', 'can choose files',
'can draw', 'can hide',
'cell( (background color|size|type))?', 'characters',
'class', 'click count', 'clicked( data)? column',
'clicked data item', 'clicked( data)? row',
'closeable', 'collating', 'color( (mode|panel))',
'command key down', 'configuration',
'content(s| (size|view( margins)?))?', 'context',
'continuous', 'control key down', 'control size',
'control tint', 'control view',
'controller visible', 'coordinate system',
'copies( on scroll)?', 'corner view', 'current cell',
'current column', 'current( field)? editor',
'current( menu)? item', 'current row',
'current tab view item', 'data source',
'default identifiers', 'delta (x|y|z)',
'destination window', 'directory', 'display mode',
'displayed cell', 'document( (edited|rect|view))?',
'double value', 'dragged column', 'dragged distance',
'dragged items', 'draws( cell)? background',
'draws grid', 'dynamically scrolls', 'echos bullets',
'edge', 'editable', 'edited( data)? column',
'edited data item', 'edited( data)? row', 'enabled',
'enclosing scroll view', 'ending page',
'error handling', 'event number', 'event type',
'excluded from windows menu', 'executable path',
'expanded', 'fax number', 'field editor', 'file kind',
'file name', 'file type', 'first responder',
'first visible column', 'flipped', 'floating',
'font( panel)?', 'formatter', 'frameworks path',
'frontmost', 'gave up', 'grid color', 'has data items',
'has horizontal ruler', 'has horizontal scroller',
'has parent data item', 'has resize indicator',
'has shadow', 'has sub menu', 'has vertical ruler',
'has vertical scroller', 'header cell', 'header view',
'hidden', 'hides when deactivated', 'highlights by',
'horizontal line scroll', 'horizontal page scroll',
'horizontal ruler view', 'horizontally resizable',
'icon image', 'id', 'identifier',
'ignores multiple clicks',
'image( (alignment|dims when disabled|frame style|scaling))?',
'imports graphics', 'increment value',
'indentation per level', 'indeterminate', 'index',
'integer value', 'intercell spacing', 'item height',
'key( (code|equivalent( modifier)?|window))?',
'knob thickness', 'label', 'last( visible)? column',
'leading offset', 'leaf', 'level', 'line scroll',
'loaded', 'localized sort', 'location', 'loop mode',
'main( (bunde|menu|window))?', 'marker follows cell',
'matrix mode', 'maximum( content)? size',
'maximum visible columns',
'menu( form representation)?', 'miniaturizable',
'miniaturized', 'minimized image', 'minimized title',
'minimum column width', 'minimum( content)? size',
'modal', 'modified', 'mouse down state',
'movie( (controller|file|rect))?', 'muted', 'name',
'needs display', 'next state', 'next text',
'number of tick marks', 'only tick mark values',
'opaque', 'open panel', 'option key down',
'outline table column', 'page scroll', 'pages across',
'pages down', 'palette label', 'pane splitter',
'parent data item', 'parent window', 'pasteboard',
'path( (names|separator))?', 'playing',
'plays every frame', 'plays selection only', 'position',
'preferred edge', 'preferred type', 'pressure',
'previous text', 'prompt', 'properties',
'prototype cell', 'pulls down', 'rate',
'released when closed', 'repeated',
'requested print time', 'required file type',
'resizable', 'resized column', 'resource path',
'returns records', 'reuses columns', 'rich text',
'roll over', 'row height', 'rulers visible',
'save panel', 'scripts path', 'scrollable',
'selectable( identifiers)?', 'selected cell',
'selected( data)? columns?', 'selected data items?',
'selected( data)? rows?', 'selected item identifier',
'selection by rect', 'send action on arrow key',
'sends action when done editing', 'separates columns',
'separator item', 'sequence number', 'services menu',
'shared frameworks path', 'shared support path',
'sheet', 'shift key down', 'shows alpha',
'shows state by', 'size( mode)?',
'smart insert delete enabled', 'sort case sensitivity',
'sort column', 'sort order', 'sort type',
'sorted( data rows)?', 'sound', 'source( mask)?',
'spell checking enabled', 'starting page', 'state',
'string value', 'sub menu', 'super menu', 'super view',
'tab key traverses cells', 'tab state', 'tab type',
'tab view', 'table view', 'tag', 'target( printer)?',
'text color', 'text container insert',
'text container origin', 'text returned',
'tick mark position', 'time stamp',
'title(d| (cell|font|height|position|rect))?',
'tool tip', 'toolbar', 'trailing offset', 'transparent',
'treat packages as directories', 'truncated labels',
'types', 'unmodified characters', 'update views',
'use sort indicator', 'user defaults',
'uses data source', 'uses ruler',
'uses threaded animation',
'uses title from previous column', 'value wraps',
'version',
'vertical( (line scroll|page scroll|ruler view))?',
'vertically resizable', 'view',
'visible( document rect)?', 'volume', 'width', 'window',
'windows menu', 'wraps', 'zoomable', 'zoomed')
tokens = {
'root': [
(r'\s+', Text),
(u'¬\\n', String.Escape),
(r"'s\s+", Text), # This is a possessive, consider moving
(r'(--|#).*?$', Comment),
(r'\(\*', Comment.Multiline, 'comment'),
(r'[(){}!,.:]', Punctuation),
(u'(«)([^»]+)(»)',
bygroups(Text, Name.Builtin, Text)),
(r'\b((?:considering|ignoring)\s*)'
r'(application responses|case|diacriticals|hyphens|'
r'numeric strings|punctuation|white space)',
bygroups(Keyword, Name.Builtin)),
(u'(-|\\*|\\+|&|≠|>=?|<=?|=|≥|≤|/|÷|\\^)', Operator),
(r"\b(%s)\b" % '|'.join(Operators), Operator.Word),
(r'^(\s*(?:on|end)\s+)'
r'(%s)' % '|'.join(StudioEvents[::-1]),
bygroups(Keyword, Name.Function)),
(r'^(\s*)(in|on|script|to)(\s+)', bygroups(Text, Keyword, Text)),
(r'\b(as )(%s)\b' % '|'.join(Classes),
bygroups(Keyword, Name.Class)),
(r'\b(%s)\b' % '|'.join(Literals), Name.Constant),
(r'\b(%s)\b' % '|'.join(Commands), Name.Builtin),
(r'\b(%s)\b' % '|'.join(Control), Keyword),
(r'\b(%s)\b' % '|'.join(Declarations), Keyword),
(r'\b(%s)\b' % '|'.join(Reserved), Name.Builtin),
(r'\b(%s)s?\b' % '|'.join(BuiltIn), Name.Builtin),
(r'\b(%s)\b' % '|'.join(HandlerParams), Name.Builtin),
(r'\b(%s)\b' % '|'.join(StudioProperties), Name.Attribute),
(r'\b(%s)s?\b' % '|'.join(StudioClasses), Name.Builtin),
(r'\b(%s)\b' % '|'.join(StudioCommands), Name.Builtin),
(r'\b(%s)\b' % '|'.join(References), Name.Builtin),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r'\b(%s)\b' % Identifiers, Name.Variable),
(r'[-+]?(\d+\.\d*|\d*\.\d+)(E[-+][0-9]+)?', Number.Float),
(r'[-+]?\d+', Number.Integer),
],
'comment': [
('\(\*', Comment.Multiline, '#push'),
('\*\)', Comment.Multiline, '#pop'),
('[^*(]+', Comment.Multiline),
('[*(]', Comment.Multiline),
],
}
class RexxLexer(RegexLexer):
"""
`Rexx <http://www.rexxinfo.org/>`_ is a scripting language available for
a wide range of different platforms with its roots found on mainframe
systems. It is popular for I/O- and data based tasks and can act as glue
language to bind different applications together.
.. versionadded:: 2.0
"""
name = 'Rexx'
aliases = ['rexx', 'arexx']
filenames = ['*.rexx', '*.rex', '*.rx', '*.arexx']
mimetypes = ['text/x-rexx']
flags = re.IGNORECASE
tokens = {
'root': [
(r'\s', Whitespace),
(r'/\*', Comment.Multiline, 'comment'),
(r'"', String, 'string_double'),
(r"'", String, 'string_single'),
(r'[0-9]+(\.[0-9]+)?(e[+-]?[0-9])?', Number),
(r'([a-z_]\w*)(\s*)(:)(\s*)(procedure)\b',
bygroups(Name.Function, Whitespace, Operator, Whitespace,
Keyword.Declaration)),
(r'([a-z_]\w*)(\s*)(:)',
bygroups(Name.Label, Whitespace, Operator)),
include('function'),
include('keyword'),
include('operator'),
(r'[a-z_]\w*', Text),
],
'function': [
(words((
'abbrev', 'abs', 'address', 'arg', 'b2x', 'bitand', 'bitor', 'bitxor',
'c2d', 'c2x', 'center', 'charin', 'charout', 'chars', 'compare',
'condition', 'copies', 'd2c', 'd2x', 'datatype', 'date', 'delstr',
'delword', 'digits', 'errortext', 'form', 'format', 'fuzz', 'insert',
'lastpos', 'left', 'length', 'linein', 'lineout', 'lines', 'max',
'min', 'overlay', 'pos', 'queued', 'random', 'reverse', 'right', 'sign',
'sourceline', 'space', 'stream', 'strip', 'substr', 'subword', 'symbol',
'time', 'trace', 'translate', 'trunc', 'value', 'verify', 'word',
'wordindex', 'wordlength', 'wordpos', 'words', 'x2b', 'x2c', 'x2d',
'xrange'), suffix=r'(\s*)(\()'),
bygroups(Name.Builtin, Whitespace, Operator)),
],
'keyword': [
(r'(address|arg|by|call|do|drop|else|end|exit|for|forever|if|'
r'interpret|iterate|leave|nop|numeric|off|on|options|parse|'
r'pull|push|queue|return|say|select|signal|to|then|trace|until|'
r'while)\b', Keyword.Reserved),
],
'operator': [
(r'(-|//|/|\(|\)|\*\*|\*|\\<<|\\<|\\==|\\=|\\>>|\\>|\\|\|\||\||'
r'&&|&|%|\+|<<=|<<|<=|<>|<|==|=|><|>=|>>=|>>|>|¬<<|¬<|¬==|¬=|'
r'¬>>|¬>|¬|\.|,)', Operator),
],
'string_double': [
(r'[^"\n]+', String),
(r'""', String),
(r'"', String, '#pop'),
(r'\n', Text, '#pop'), # Stray linefeed also terminates strings.
],
'string_single': [
(r'[^\'\n]', String),
(r'\'\'', String),
(r'\'', String, '#pop'),
(r'\n', Text, '#pop'), # Stray linefeed also terminates strings.
],
'comment': [
(r'[^*]+', Comment.Multiline),
(r'\*/', Comment.Multiline, '#pop'),
(r'\*', Comment.Multiline),
]
}
_c = lambda s: re.compile(s, re.MULTILINE)
_ADDRESS_COMMAND_PATTERN = _c(r'^\s*address\s+command\b')
_ADDRESS_PATTERN = _c(r'^\s*address\s+')
_DO_WHILE_PATTERN = _c(r'^\s*do\s+while\b')
_IF_THEN_DO_PATTERN = _c(r'^\s*if\b.+\bthen\s+do\s*$')
_PROCEDURE_PATTERN = _c(r'^\s*([a-z_]\w*)(\s*)(:)(\s*)(procedure)\b')
_ELSE_DO_PATTERN = _c(r'\belse\s+do\s*$')
_PARSE_ARG_PATTERN = _c(r'^\s*parse\s+(upper\s+)?(arg|value)\b')
PATTERNS_AND_WEIGHTS = (
(_ADDRESS_COMMAND_PATTERN, 0.2),
(_ADDRESS_PATTERN, 0.05),
(_DO_WHILE_PATTERN, 0.1),
(_ELSE_DO_PATTERN, 0.1),
(_IF_THEN_DO_PATTERN, 0.1),
(_PROCEDURE_PATTERN, 0.5),
(_PARSE_ARG_PATTERN, 0.2),
)
def analyse_text(text):
"""
Check for inital comment and patterns that distinguish Rexx from other
C-like languages.
"""
if re.search(r'/\*\**\s*rexx', text, re.IGNORECASE):
# Header matches MVS Rexx requirements, this is certainly a Rexx
# script.
return 1.0
elif text.startswith('/*'):
# Header matches general Rexx requirements; the source code might
# still be any language using C comments such as C++, C# or Java.
lowerText = text.lower()
result = sum(weight
for (pattern, weight) in RexxLexer.PATTERNS_AND_WEIGHTS
if pattern.search(lowerText)) + 0.01
return min(result, 1.0)
class MOOCodeLexer(RegexLexer):
"""
For `MOOCode <http://www.moo.mud.org/>`_ (the MOO scripting
language).
.. versionadded:: 0.9
"""
name = 'MOOCode'
filenames = ['*.moo']
aliases = ['moocode', 'moo']
mimetypes = ['text/x-moocode']
tokens = {
'root': [
# Numbers
(r'(0|[1-9][0-9_]*)', Number.Integer),
# Strings
(r'"(\\\\|\\"|[^"])*"', String),
# exceptions
(r'(E_PERM|E_DIV)', Name.Exception),
# db-refs
(r'((#[-0-9]+)|(\$\w+))', Name.Entity),
# Keywords
(r'\b(if|else|elseif|endif|for|endfor|fork|endfork|while'
r'|endwhile|break|continue|return|try'
r'|except|endtry|finally|in)\b', Keyword),
# builtins
(r'(random|length)', Name.Builtin),
# special variables
(r'(player|caller|this|args)', Name.Variable.Instance),
# skip whitespace
(r'\s+', Text),
(r'\n', Text),
# other operators
(r'([!;=,{}&|:.\[\]@()<>?]+)', Operator),
# function call
(r'(\w+)(\()', bygroups(Name.Function, Operator)),
# variables
(r'(\w+)', Text),
]
}
class HybrisLexer(RegexLexer):
"""
For `Hybris <http://www.hybris-lang.org>`_ source code.
.. versionadded:: 1.4
"""
name = 'Hybris'
aliases = ['hybris', 'hy']
filenames = ['*.hy', '*.hyb']
mimetypes = ['text/x-hybris', 'application/x-hybris']
flags = re.MULTILINE | re.DOTALL
tokens = {
'root': [
# method names
(r'^(\s*(?:function|method|operator\s+)+?)'
r'([a-zA-Z_]\w*)'
r'(\s*)(\()', bygroups(Keyword, Name.Function, Text, Operator)),
(r'[^\S\n]+', Text),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
(r'@[a-zA-Z_][\w.]*', Name.Decorator),
(r'(break|case|catch|next|default|do|else|finally|for|foreach|of|'
r'unless|if|new|return|switch|me|throw|try|while)\b', Keyword),
(r'(extends|private|protected|public|static|throws|function|method|'
r'operator)\b', Keyword.Declaration),
(r'(true|false|null|__FILE__|__LINE__|__VERSION__|__LIB_PATH__|'
r'__INC_PATH__)\b', Keyword.Constant),
(r'(class|struct)(\s+)',
bygroups(Keyword.Declaration, Text), 'class'),
(r'(import|include)(\s+)',
bygroups(Keyword.Namespace, Text), 'import'),
(words((
'gc_collect', 'gc_mm_items', 'gc_mm_usage', 'gc_collect_threshold',
'urlencode', 'urldecode', 'base64encode', 'base64decode', 'sha1', 'crc32', 'sha2',
'md5', 'md5_file', 'acos', 'asin', 'atan', 'atan2', 'ceil', 'cos', 'cosh', 'exp',
'fabs', 'floor', 'fmod', 'log', 'log10', 'pow', 'sin', 'sinh', 'sqrt', 'tan', 'tanh',
'isint', 'isfloat', 'ischar', 'isstring', 'isarray', 'ismap', 'isalias', 'typeof',
'sizeof', 'toint', 'tostring', 'fromxml', 'toxml', 'binary', 'pack', 'load', 'eval',
'var_names', 'var_values', 'user_functions', 'dyn_functions', 'methods', 'call',
'call_method', 'mknod', 'mkfifo', 'mount', 'umount2', 'umount', 'ticks', 'usleep',
'sleep', 'time', 'strtime', 'strdate', 'dllopen', 'dlllink', 'dllcall', 'dllcall_argv',
'dllclose', 'env', 'exec', 'fork', 'getpid', 'wait', 'popen', 'pclose', 'exit', 'kill',
'pthread_create', 'pthread_create_argv', 'pthread_exit', 'pthread_join', 'pthread_kill',
'smtp_send', 'http_get', 'http_post', 'http_download', 'socket', 'bind', 'listen',
'accept', 'getsockname', 'getpeername', 'settimeout', 'connect', 'server', 'recv',
'send', 'close', 'print', 'println', 'printf', 'input', 'readline', 'serial_open',
'serial_fcntl', 'serial_get_attr', 'serial_get_ispeed', 'serial_get_ospeed',
'serial_set_attr', 'serial_set_ispeed', 'serial_set_ospeed', 'serial_write',
'serial_read', 'serial_close', 'xml_load', 'xml_parse', 'fopen', 'fseek', 'ftell',
'fsize', 'fread', 'fwrite', 'fgets', 'fclose', 'file', 'readdir', 'pcre_replace', 'size',
'pop', 'unmap', 'has', 'keys', 'values', 'length', 'find', 'substr', 'replace', 'split',
'trim', 'remove', 'contains', 'join'), suffix=r'\b'),
Name.Builtin),
(words((
'MethodReference', 'Runner', 'Dll', 'Thread', 'Pipe', 'Process',
'Runnable', 'CGI', 'ClientSocket', 'Socket', 'ServerSocket',
'File', 'Console', 'Directory', 'Exception'), suffix=r'\b'),
Keyword.Type),
(r'"(\\\\|\\"|[^"])*"', String),
(r"'\\.'|'[^\\]'|'\\u[0-9a-f]{4}'", String.Char),
(r'(\.)([a-zA-Z_]\w*)',
bygroups(Operator, Name.Attribute)),
(r'[a-zA-Z_]\w*:', Name.Label),
(r'[a-zA-Z_$]\w*', Name),
(r'[~^*!%&\[\](){}<>|+=:;,./?\-@]+', Operator),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-f]+', Number.Hex),
(r'[0-9]+L?', Number.Integer),
(r'\n', Text),
],
'class': [
(r'[a-zA-Z_]\w*', Name.Class, '#pop')
],
'import': [
(r'[\w.]+\*?', Name.Namespace, '#pop')
],
}
| [
"guan2296107714@126.com"
] | guan2296107714@126.com |
23a49efc8591c205a10937fedb1c1746d6e4af4e | c8c021ce530a84b6a982c667068d16249e666479 | /archive/UI/demo_google_speech_api.py | 9306fc6078a8422cf7d6f6371ad38c34f03e279a | [] | no_license | SamuelHill/recipeAssistant | ff4f6bbec74f3513ee5f6cf60230711d17057b22 | a5d7a928c81d398e7fa7cc69ac7ccad8a49bd43e | refs/heads/master | 2022-08-27T00:32:24.683274 | 2022-08-05T09:07:29 | 2022-08-05T09:07:29 | 131,057,569 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 7,022 | py | # Copyrig#!/usr/bin/env python
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Google Cloud Speech API sample application using the streaming API.
NOTE: This module requires the additional dependency `pyaudio`. To install
using pip:
pip install pyaudio
Example usage:
python transcribe_streaming_mic.py
"""
# [START import_libraries]
from __future__ import division
import re
import sys
import os
from google.cloud import speech
from google.cloud.speech import enums
from google.cloud.speech import types
import pyaudio
from six.moves import queue
# [END import_libraries]
# Audio recording parameters
RATE = 16000
CHUNK = int(RATE / 10) # 100ms
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = "demo_googleAuth2.json"
class MicrophoneStream(object):
transcript_result = ""
"""Opens a recording stream as a generator yielding the audio chunks."""
def __init__(self, rate, chunk):
self._rate = rate
self._chunk = chunk
# Create a thread-safe buffer of audio data
self._buff = queue.Queue()
self.closed = True
def __enter__(self):
self._audio_interface = pyaudio.PyAudio()
self._audio_stream = self._audio_interface.open(
format=pyaudio.paInt16,
# The API currently only supports 1-channel (mono) audio
# https://goo.gl/z757pE
channels=1, rate=self._rate,
input=True, frames_per_buffer=self._chunk,
# Run the audio stream asynchronously to fill the buffer object.
# This is necessary so that the input device's buffer doesn't
# overflow while the calling thread makes network requests, etc.
stream_callback=self._fill_buffer,
)
self.closed = False
return self
def __exit__(self, type, value, traceback):
self._audio_stream.stop_stream()
self._audio_stream.close()
self.closed = True
# Signal the generator to terminate so that the client's
# streaming_recognize method will not block the process termination.
self._buff.put(None)
self._audio_interface.terminate()
def _fill_buffer(self, in_data, frame_count, time_info, status_flags):
"""Continuously collect data from the audio stream, into the buffer."""
self._buff.put(in_data)
return None, pyaudio.paContinue
def generator(self):
while not self.closed:
# Use a blocking get() to ensure there's at least one chunk of
# data, and stop iteration if the chunk is None, indicating the
# end of the audio stream.
chunk = self._buff.get()
if chunk is None:
return
data = [chunk]
# Now consume whatever other data's still buffered.
while True:
try:
chunk = self._buff.get(block=False)
if chunk is None:
return
data.append(chunk)
except queue.Empty:
break
yield b''.join(data)
# [END audio_stream]
def listen_print_loop(responses):
"""Iterates through server responses and prints them.
The responses passed is a generator that will block until a response
is provided by the server.
Each response may contain multiple results, and each result may contain
multiple alternatives; for details, see https://goo.gl/tjCPAU. Here we
print only the transcription for the top alternative of the top result.
In this case, responses are provided for interim results as well. If the
response is an interim one, print a line feed at the end of it, to allow
the next result to overwrite it, until the response is a final one. For the
final one, print a newline to preserve the finalized transcription.
"""
num_chars_printed = 0
for response in responses:
if not response.results:
continue
# The `results` list is consecutive. For streaming, we only care about
# the first result being considered, since once it's `is_final`, it
# moves on to considering the next utterance.
result = response.results[0]
if not result.alternatives:
continue
# Display the transcription of the top alternative.
transcript = result.alternatives[0].transcript
# Display interim results, but with a carriage return at the end of the
# line, so subsequent lines will overwrite them.
#
# If the previous result was longer than this one, we need to print
# some extra spaces to overwrite the previous result
overwrite_chars = ' ' * (num_chars_printed - len(transcript))
if not result.is_final:
sys.stdout.write(transcript + overwrite_chars + '\r')
sys.stdout.flush()
num_chars_printed = len(transcript)
else:
print(transcript + overwrite_chars)
transcript_result = transcript + overwrite_chars
# Exit recognition if any of the transcribed phrases could be
# one of our keywords.
if re.search(r'\b(exit|quit)\b', transcript, re.I):
print('Exiting..')
break
num_chars_printed = 0
return transcript_result
def main():
# See http://g.co/cloud/speech/docs/languages
# for a list of supported languages.
language_code = 'en-US' # a BCP-47 language tag
client = speech.SpeechClient()
config = types.RecognitionConfig(
encoding=enums.RecognitionConfig.AudioEncoding.LINEAR16,
sample_rate_hertz=RATE,
language_code=language_code)
streaming_config = types.StreamingRecognitionConfig(
config=config,
interim_results=True)
with MicrophoneStream(RATE, CHUNK) as stream:
audio_generator = stream.generator()
requests = (types.StreamingRecognizeRequest(audio_content=content)
for content in audio_generator)
responses = client.streaming_recognize(streaming_config, requests)
# Now, put the transcription responses to use.
return listen_print_loop(responses)
if __name__ == '__main__':
main()
| [
"nneomaoradiegwu2019@u.northwestern.edu"
] | nneomaoradiegwu2019@u.northwestern.edu |
a01d755c67530c2e2ac00511cf45e2da524b4964 | 8da16fb123b64cd7176852ba70c18ff6eeb6ee8b | /Python/Tutorial/Colections/Lists/matriz.py | 8b5a67adff0585583cf80c3d3ec5db48daf004c4 | [] | no_license | Carrazza/Tutoriais | 67ca743532981658a0b8f5e7efbb3d498671e262 | 9098be2b2ccaf8b4cc75250b09b73267056e7f21 | refs/heads/master | 2022-10-12T18:41:29.624003 | 2020-06-11T21:55:59 | 2020-06-11T21:55:59 | 271,637,619 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 123 | py | matriz = []
for x in range(10):
matriz.append([])
for y in range(10):
matriz[x].append(y)
print(matriz) | [
"brunocarrazza@gmail.com"
] | brunocarrazza@gmail.com |
495e3c3956b6601de7ec38f5589268de8a90e8f0 | be0f3dfbaa2fa3d8bbe59229aef3212d032e7dd1 | /DaVinciDev_v38r1p1/InstallArea/x86_64-slc6-gcc49-opt/python/StrippingArchive/Stripping15/StrippingDiMuonNew.py | b2c1acc2a142435c53e815e58b509aa5621c7df9 | [] | no_license | Sally27/backup_cmtuser_full | 34782102ed23c6335c48650a6eaa901137355d00 | 8924bebb935b96d438ce85b384cfc132d9af90f6 | refs/heads/master | 2020-05-21T09:27:04.370765 | 2018-12-12T14:41:07 | 2018-12-12T14:41:07 | 185,989,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 51,139 | py | '''
Inclusive DiMuon lines based on the lines by Gaia Lanfranchi, Alessio Sarti,
with inputs from Joel Bressieux, Giulia Manca, Matthew Needham and Patrick Robbe.
Including the following lines:
1. DiMuonLine
FullDST (keep 10%) and MicroDST
2. DiMuonSameSignLine
FullDST (keep 1%) and MicroDST (keep 10%)
3. DiMuonExclusiveLine
Selection is the same as DiMuonLine at present,
a cut of "1mm upstream of any PV" applied further
4. DiMuonNoPVLine
Using the same selection as DiMuonExclusiveLine at present (except the PV cut)
Requring no reconstructed PV.
5. DiMuonHighMassLine
6. DiMuonHighMassSameSignLine
7. DiMuonLowMassLine
Keep Hlt2UnbiasedDiMuonLowMassDecision triggered events
8. Jpsi2MuMuLine
9. Psi2MuMuLine
10. DiMuonDetachedLine
11. Jpsi2MuMuDetachedLine
More details can be found here:
http://indico.cern.ch/contributionDisplay.py?contribId=2&confId=100755
--------------------------
To include lines for DiMuon stream
--------------------------
from StrippingSelections.StrippingDiMuonNew import DiMuonConf
from StrippingSelections.StrippingDiMuonNew import config_default as config_FullDSTDiMuon
FullDSTDiMuonConf = DiMuonConf( name = None, config =config_FullDSTDiMuon )
stream.appendLines( FullDSTDiMuonConf.lines() )
--------------------------
For MicroDST
--------------------------
from StrippingSelections.StrippingDiMuonNew import DiMuonConf
from StrippingSelections.StrippingDiMuonNew import config_microDST as MicroDSTDiMuon
MicroDSTDiMuonConf = DiMuonConf( name = 'MicroDST', config = MicroDSTDiMuon )
stream.appendLines( MicroDSTDiMuonConf.lines() )
'''
__author__=['Jibo He']
__date__ = '30/09/2010'
__version__= '$Revision: 1.0 $'
__all__ = (
'DiMuonConf'
)
config_default= {
'MicroDST' : False ,
# DiMuon line
'DiMuon_Prescale' : 1. ,
'DiMuon_Postscale' : 1. ,
'DiMuon_checkPV' : False ,
'DiMuon_MuonPT' : 650. , # MeV
'DiMuon_MuonP' : -8000. , # MeV, no cut now
'DiMuon_MuonTRCHI2DOF' : 5. ,
'DiMuon_MinMass' : 2900. , # MeV
'DiMuon_VCHI2PDOF' : 20. ,
'DiMuon_PT' : 3000. , # MeV, no cut now
# DiMuon Same Sign line
'DiMuonSameSign_Prescale' : 0.05 ,
'DiMuonSameSign_Postscale' : 1. ,
'DiMuonSameSign_checkPV' : False ,
# DiMuonPrescaled line
'DiMuonPrescaled_Prescale' : 0.1 ,
'DiMuonPrescaled_Postscale' : 1. ,
'DiMuonPrescaled_checkPV' : False ,
'DiMuonPrescaled_MuonPT' : 650. , # MeV
'DiMuonPrescaled_MuonP' : -8000. , # MeV, no cut now
'DiMuonPrescaled_MuonTRCHI2DOF' : 5. ,
'DiMuonPrescaled_MinMass' : 2900. , # MeV
'DiMuonPrescaled_VCHI2PDOF' : 20. ,
'DiMuonPrescaled_PT' : -1000. , # MeV, no cut now
# DiMuonExclusive line
'DiMuonExclusive_Prescale' : 0. ,
'DiMuonExclusive_Postscale' : 1. ,
'DiMuonExclusive_checkPV' : True ,
'DiMuonExclusive_MuonPT' : 650. , # MeV
'DiMuonExclusive_MuonP' : -8000. , # MeV, no cut now
'DiMuonExclusive_MuonTRCHI2DOF' : 5. ,
'DiMuonExclusive_MinMass' : 2900. ,
'DiMuonExclusive_VCHI2PDOF' : 20. ,
'DiMuonExclusive_PT' : -1000. , # MeV, no cut now
'DiMuonExclusive_DZ' : -1. , # mm, upstream of any PV
# DiMuonNoPV line
'DiMuonNoPV_Prescale' : 1. ,
'DiMuonNoPV_Postscale' : 1. ,
# DiMuon High Mass line
'DiMuonHighMass_Prescale' : 1. ,
'DiMuonHighMass_Postscale' : 1. ,
'DiMuonHighMass_checkPV' : False ,
'DiMuonHighMass_MuonPT' : 650. , # MeV
'DiMuonHighMass_MuonP' : -8000. ,
'DiMuonHighMass_MuonTRCHI2DOF' : 5. ,
'DiMuonHighMass_MinMass' : 8500. , # MeV
'DiMuonHighMass_VCHI2PDOF' : 20. ,
'DiMuonHighMass_PT' : -1000. , # MeV, no cut now
# DiMuon High Mass Same Sign line
'DiMuonHighMassSameSign_Prescale' : 0.5 ,
'DiMuonHighMassSameSign_Postscale' : 1. ,
'DiMuonHighMassSameSign_checkPV' : False ,
# DiMuon Low Mass line
'DiMuonLowMass_Prescale' : 1. ,
'DiMuonLowMass_Postscale' : 1. ,
'DiMuonLowMass_checkPV' : False ,
'DiMuonLowMass_MuonPT' : 650. , # MeV
'DiMuonLowMass_MuonP' : -8000. , # MeV, no cut now
'DiMuonLowMass_MuonTRCHI2DOF' : 5. ,
'DiMuonLowMass_MinMass' : 500. , # MeV
'DiMuonLowMass_VCHI2PDOF' : 20. ,
'DiMuonLowMass_PT' : -1000. , # MeV, no cut now
# Jpsi2MuMu line
'Jpsi2MuMu_Prescale' : 1. ,
'Jpsi2MuMu_Postscale' : 1. ,
'Jpsi2MuMu_checkPV' : False ,
'Jpsi2MuMu_MuonPT' : 650. , # MeV
'Jpsi2MuMu_MuonP' : 8000. , # MeV
'Jpsi2MuMu_MuonPIDmu' : 0. ,
'Jpsi2MuMu_MuonTRCHI2DOF' : 5. ,
'Jpsi2MuMu_MinMass' : 3010. , # MeV
'Jpsi2MuMu_MaxMass' : 3170. , # MeV
'Jpsi2MuMu_VCHI2PDOF' : 20. ,
'Jpsi2MuMu_PT' : 3000. , # MeV
# Psi2MuMu line
'Psi2MuMu_Prescale' : 1. ,
'Psi2MuMu_Postscale' : 1. ,
'Psi2MuMu_checkPV' : False ,
'Psi2MuMu_ParticleName' : "'psi(2S)'", # Particle Name, like "'psi(2S)'"
'Psi2MuMu_MuonPT' : 1000. , # MeV
'Psi2MuMu_MuonP' : 8000. , # MeV
'Psi2MuMu_MuonPIDmu' : 0. ,
'Psi2MuMu_MuonTRCHI2DOF' : 5. ,
'Psi2MuMu_MassWindow' : 120. , # MeV
'Psi2MuMu_VCHI2PDOF' : 20. ,
'Psi2MuMu_PT' : 3000. , # MeV
# DiMuonDetached line
'DiMuonDetached_Prescale' : 0. ,
'DiMuonDetached_Postscale' : 1. ,
'DiMuonDetached_MuonPT' : 500. , # MeV
'DiMuonDetached_MuonP' : -8000. , # MeV, no cut now
'DiMuonDetached_MuonPIDmu' : -5. ,
'DiMuonDetached_MuonTRCHI2DOF' : 5. ,
'DiMuonDetached_MinMass' : 2950. ,
'DiMuonDetached_VCHI2PDOF' : 20. ,
'DiMuonDetached_PT' : -1000. , # MeV, no cut now
'DiMuonDetached_DLS' : 5. , # mm, upstream of any PV
# Jpsi2MuMuDetached line
'Jpsi2MuMuDetached_Prescale' : 1. ,
'Jpsi2MuMuDetached_Postscale' : 1. ,
'Jpsi2MuMuDetached_MuonPT' : 500. , # MeV
'Jpsi2MuMuDetached_MuonP' : -8000. , # MeV, no cut now
'Jpsi2MuMuDetached_MuonPIDmu' : -5. ,
'Jpsi2MuMuDetached_MuonTRCHI2DOF' : 5. ,
'Jpsi2MuMuDetached_MinMass' : 2976.916, # MeV
'Jpsi2MuMuDetached_MaxMass' : 3216.916, # MeV
'Jpsi2MuMuDetached_VCHI2PDOF' : 20. ,
'Jpsi2MuMuDetached_PT' : -1000. , # MeV
'Jpsi2MuMuDetached_DLS' : 3. ,
# Psi2MuMuDetachedDetached line
'Psi2MuMuDetached_Prescale' : 1. ,
'Psi2MuMuDetached_Postscale' : 1. ,
'Psi2MuMuDetached_ParticleName' : "'psi(2S)'", # Particle Name, like "'psi(2S)'"
'Psi2MuMuDetached_MuonPT' : 500. , # MeV
'Psi2MuMuDetached_MuonP' : -8000. , # MeV, no cut now
'Psi2MuMuDetached_MuonPIDmu' : -5. ,
'Psi2MuMuDetached_MuonTRCHI2DOF' : 5. ,
'Psi2MuMuDetached_MassWindow' : 120. , # MeV
'Psi2MuMuDetached_VCHI2PDOF' : 20. ,
'Psi2MuMuDetached_PT' : -1000. , # MeV, no cut now
'Psi2MuMuDetached_DLS' : 5.
}
config_microDST= {
'MicroDST' : True ,
# DiMuon line
'DiMuon_Prescale' : 1. ,
'DiMuon_Postscale' : 1. ,
'DiMuon_checkPV' : False ,
'DiMuon_MuonPT' : 650. , # MeV
'DiMuon_MuonP' : -8000. , # MeV, no cut now
'DiMuon_MuonTRCHI2DOF' : 5. ,
'DiMuon_MinMass' : 2900. , # MeV
'DiMuon_VCHI2PDOF' : 20. ,
'DiMuon_PT' : 3000. , # MeV
# DiMuon Same Sign line
'DiMuonSameSign_Prescale' : 0.05 ,
'DiMuonSameSign_Postscale' : 1. ,
'DiMuonSameSign_checkPV' : False ,
# DiMuonPrescaled line
'DiMuonPrescaled_Prescale' : 0.1 ,
'DiMuonPrescaled_Postscale' : 1. ,
'DiMuonPrescaled_checkPV' : False ,
'DiMuonPrescaled_MuonPT' : 650. , # MeV
'DiMuonPrescaled_MuonP' : -8000. , # MeV, no cut now
'DiMuonPrescaled_MuonTRCHI2DOF' : 5. ,
'DiMuonPrescaled_MinMass' : 2900. , # MeV
'DiMuonPrescaled_VCHI2PDOF' : 20. ,
'DiMuonPrescaled_PT' : -1000. , # MeV, no cut now
# DiMuonExclusive line
'DiMuonExclusive_Prescale' : 1. ,
'DiMuonExclusive_Postscale' : 1. ,
'DiMuonExclusive_checkPV' : True ,
'DiMuonExclusive_MuonPT' : 650. , # MeV
'DiMuonExclusive_MuonP' : -8000. , # MeV, no cut now
'DiMuonExclusive_MuonTRCHI2DOF' : 5. ,
'DiMuonExclusive_MinMass' : 2900. ,
'DiMuonExclusive_VCHI2PDOF' : 20. ,
'DiMuonExclusive_PT' : -1000. , # MeV, no cut now
'DiMuonExclusive_DZ' : -1. , # mm, upstream of any PV
# DiMuonNoPV line
'DiMuonNoPV_Prescale' : 1. ,
'DiMuonNoPV_Postscale' : 1. ,
# DiMuon High Mass line
'DiMuonHighMass_Prescale' : 1. ,
'DiMuonHighMass_Postscale' : 1. ,
'DiMuonHighMass_checkPV' : True ,
'DiMuonHighMass_MuonPT' : 650. , # MeV
'DiMuonHighMass_MuonP' : -8000. ,
'DiMuonHighMass_MuonTRCHI2DOF' : 5. ,
'DiMuonHighMass_MinMass' : 8000. , # MeV
'DiMuonHighMass_VCHI2PDOF' : 20. ,
'DiMuonHighMass_PT' : -1000. , # MeV, no cut now
# DiMuon High Mass Same Sign line
'DiMuonHighMassSameSign_Prescale' : 1. ,
'DiMuonHighMassSameSign_Postscale' : 1. ,
'DiMuonHighMassSameSign_checkPV' : True ,
# DiMuon Low Mass line
'DiMuonLowMass_Prescale' : 1. ,
'DiMuonLowMass_Postscale' : 1. ,
'DiMuonLowMass_checkPV' : True ,
'DiMuonLowMass_MuonPT' : 650. , # MeV
'DiMuonLowMass_MuonP' : -8000. , # MeV, no cut now
'DiMuonLowMass_MuonTRCHI2DOF' : 5. ,
'DiMuonLowMass_MinMass' : 500. , # MeV
'DiMuonLowMass_VCHI2PDOF' : 20. ,
'DiMuonLowMass_PT' : -1000. , # MeV, no cut now
# Jpsi2MuMu line
'Jpsi2MuMu_Prescale' : 1. ,
'Jpsi2MuMu_Postscale' : 1. ,
'Jpsi2MuMu_checkPV' : True ,
'Jpsi2MuMu_MuonPT' : 650. , # MeV
'Jpsi2MuMu_MuonP' : -8000. , # MeV, no cut now
'Jpsi2MuMu_MuonPIDmu' : 0. ,
'Jpsi2MuMu_MuonTRCHI2DOF' : 5. ,
'Jpsi2MuMu_MinMass' : 3010. , # MeV
'Jpsi2MuMu_MaxMass' : 3170. , # MeV
'Jpsi2MuMu_VCHI2PDOF' : 20. ,
'Jpsi2MuMu_PT' : 3000.0 , # MeV
# Psi2MuMu line
'Psi2MuMu_Prescale' : 1. ,
'Psi2MuMu_Postscale' : 1. ,
'Psi2MuMu_checkPV' : True ,
'Psi2MuMu_ParticleName' : "'psi(2S)'", # Particle Name, like "'psi(2S)'"
'Psi2MuMu_MuonPT' : 1000. , # MeV
'Psi2MuMu_MuonP' : 8000. , # MeV
'Psi2MuMu_MuonPIDmu' : 0. ,
'Psi2MuMu_MuonTRCHI2DOF' : 5. ,
'Psi2MuMu_MassWindow' : 120. , # MeV
'Psi2MuMu_VCHI2PDOF' : 20. ,
'Psi2MuMu_PT' : 2000. , # MeV
# DiMuonDetached line
'DiMuonDetached_Prescale' : 1. ,
'DiMuonDetached_Postscale' : 1. ,
'DiMuonDetached_MuonPT' : 500. , # MeV
'DiMuonDetached_MuonP' : -8000. , # MeV, no cut now
'DiMuonDetached_MuonPIDmu' : -5. ,
'DiMuonDetached_MuonTRCHI2DOF' : 5. ,
'DiMuonDetached_MinMass' : 2950. ,
'DiMuonDetached_VCHI2PDOF' : 20. ,
'DiMuonDetached_PT' : -1000. , # MeV, no cut now
'DiMuonDetached_DLS' : 5. , # mm, upstream of any PV
# Jpsi2MuMuDetached line
'Jpsi2MuMuDetached_Prescale' : 1. ,
'Jpsi2MuMuDetached_Postscale' : 1. ,
'Jpsi2MuMuDetached_MuonPT' : 500. , # MeV
'Jpsi2MuMuDetached_MuonP' : -8000. , # MeV, no cut now
'Jpsi2MuMuDetached_MuonPIDmu' : -5. ,
'Jpsi2MuMuDetached_MuonTRCHI2DOF' : 5. ,
'Jpsi2MuMuDetached_MinMass' : 2976.916, # MeV
'Jpsi2MuMuDetached_MaxMass' : 3216.916, # MeV
'Jpsi2MuMuDetached_VCHI2PDOF' : 20. ,
'Jpsi2MuMuDetached_PT' : -1000. , # MeV
'Jpsi2MuMuDetached_DLS' : 3. ,
# Psi2MuMuDetachedDetached line
'Psi2MuMuDetached_Prescale' : 1. ,
'Psi2MuMuDetached_Postscale' : 1. ,
'Psi2MuMuDetached_ParticleName' : "'psi(2S)'", # Particle Name, like "'psi(2S)'"
'Psi2MuMuDetached_MuonPT' : 500. , # MeV
'Psi2MuMuDetached_MuonP' : -8000. , # MeV, no cut now
'Psi2MuMuDetached_MuonPIDmu' : -5. ,
'Psi2MuMuDetached_MuonTRCHI2DOF' : 5. ,
'Psi2MuMuDetached_MassWindow' : 120. , # MeV
'Psi2MuMuDetached_VCHI2PDOF' : 20. ,
'Psi2MuMuDetached_PT' : -1000. , # MeV, no cut now
'Psi2MuMuDetached_DLS' : 5.
}
from Gaudi.Configuration import *
from GaudiConfUtils.ConfigurableGenerators import FilterDesktop
from Configurables import LoKi__VoidFilter
from PhysSelPython.Wrappers import Selection, DataOnDemand, EventSelection
from StrippingConf.StrippingLine import StrippingLine
from StrippingUtils.Utils import LineBuilder
class DiMuonConf(LineBuilder):
__configuration_keys__ = (
"MicroDST",
# DiMuon line
'DiMuon_Prescale',
'DiMuon_Postscale',
'DiMuon_checkPV',
'DiMuon_MuonPT',
'DiMuon_MuonP',
'DiMuon_MuonTRCHI2DOF',
'DiMuon_MinMass',
'DiMuon_VCHI2PDOF',
'DiMuon_PT',
# DiMuon Same Sign line
'DiMuonSameSign_Prescale',
'DiMuonSameSign_Postscale',
'DiMuonSameSign_checkPV',
# DiMuonPrescaled line
'DiMuonPrescaled_Prescale',
'DiMuonPrescaled_Postscale',
'DiMuonPrescaled_checkPV',
'DiMuonPrescaled_MuonPT',
'DiMuonPrescaled_MuonP',
'DiMuonPrescaled_MuonTRCHI2DOF',
'DiMuonPrescaled_MinMass',
'DiMuonPrescaled_VCHI2PDOF',
'DiMuonPrescaled_PT',
# DiMuonExclusive line
'DiMuonExclusive_Prescale',
'DiMuonExclusive_Postscale',
'DiMuonExclusive_checkPV',
'DiMuonExclusive_MuonPT',
'DiMuonExclusive_MuonP',
'DiMuonExclusive_MuonTRCHI2DOF',
'DiMuonExclusive_MinMass',
'DiMuonExclusive_VCHI2PDOF',
'DiMuonExclusive_PT',
'DiMuonExclusive_DZ',
# DiMuonNoPV line
'DiMuonNoPV_Prescale',
'DiMuonNoPV_Postscale',
# DiMuon High Mass line
'DiMuonHighMass_Prescale',
'DiMuonHighMass_Postscale',
'DiMuonHighMass_checkPV',
'DiMuonHighMass_MuonPT',
'DiMuonHighMass_MuonP',
'DiMuonHighMass_MuonTRCHI2DOF',
'DiMuonHighMass_MinMass',
'DiMuonHighMass_VCHI2PDOF',
'DiMuonHighMass_PT',
# DiMuonHighMassSameSign line
'DiMuonHighMassSameSign_Prescale',
'DiMuonHighMassSameSign_Postscale',
'DiMuonHighMassSameSign_checkPV',
# DiMuon Low Mass line
'DiMuonLowMass_Prescale',
'DiMuonLowMass_Postscale',
'DiMuonLowMass_checkPV',
'DiMuonLowMass_MuonPT',
'DiMuonLowMass_MuonP',
'DiMuonLowMass_MuonTRCHI2DOF',
'DiMuonLowMass_MinMass',
'DiMuonLowMass_VCHI2PDOF',
'DiMuonLowMass_PT',
# Tight Jpsi line
'Jpsi2MuMu_Prescale',
'Jpsi2MuMu_Postscale',
'Jpsi2MuMu_checkPV',
'Jpsi2MuMu_MuonPT',
'Jpsi2MuMu_MuonP',
'Jpsi2MuMu_MuonPIDmu',
'Jpsi2MuMu_MuonTRCHI2DOF',
'Jpsi2MuMu_MinMass',
'Jpsi2MuMu_MaxMass',
'Jpsi2MuMu_VCHI2PDOF',
'Jpsi2MuMu_PT',
# Tight Psi(2S) line
'Psi2MuMu_Prescale',
'Psi2MuMu_Postscale',
'Psi2MuMu_checkPV',
'Psi2MuMu_ParticleName',
'Psi2MuMu_MuonPT',
'Psi2MuMu_MuonP',
'Psi2MuMu_MuonPIDmu',
'Psi2MuMu_MuonTRCHI2DOF',
'Psi2MuMu_MassWindow',
'Psi2MuMu_VCHI2PDOF',
'Psi2MuMu_PT',
# DiMuonDetached line
'DiMuonDetached_Prescale',
'DiMuonDetached_Postscale',
'DiMuonDetached_MuonPT',
'DiMuonDetached_MuonP',
'DiMuonDetached_MuonPIDmu',
'DiMuonDetached_MuonTRCHI2DOF',
'DiMuonDetached_MinMass',
'DiMuonDetached_VCHI2PDOF',
'DiMuonDetached_PT',
'DiMuonDetached_DLS',
# Jpsi2MuMuDetached line
'Jpsi2MuMuDetached_Prescale',
'Jpsi2MuMuDetached_Postscale',
'Jpsi2MuMuDetached_MuonPT',
'Jpsi2MuMuDetached_MuonP',
'Jpsi2MuMuDetached_MuonPIDmu',
'Jpsi2MuMuDetached_MuonTRCHI2DOF',
'Jpsi2MuMuDetached_MinMass',
'Jpsi2MuMuDetached_MaxMass',
'Jpsi2MuMuDetached_VCHI2PDOF',
'Jpsi2MuMuDetached_PT',
'Jpsi2MuMuDetached_DLS',
# Psi2MuMuDetached line
'Psi2MuMuDetached_Prescale',
'Psi2MuMuDetached_Postscale',
'Psi2MuMuDetached_ParticleName',
'Psi2MuMuDetached_MuonPT',
'Psi2MuMuDetached_MuonP',
'Psi2MuMuDetached_MuonPIDmu',
'Psi2MuMuDetached_MuonTRCHI2DOF',
'Psi2MuMuDetached_MassWindow',
'Psi2MuMuDetached_VCHI2PDOF',
'Psi2MuMuDetached_PT',
'Psi2MuMuDetached_DLS'
)
def __init__(self, name, config):
LineBuilder.__init__(self, name, config)
# if name not set outside, set it to empty
if name == None:
name = ""
"""
DiMuon line
"""
self.SelDiMuon = filterDiMuon( name + 'DiMuon',
MuonPT = config['DiMuon_MuonPT'],
MuonP = config['DiMuon_MuonP'],
MuonTRCHI2DOF = config['DiMuon_MuonTRCHI2DOF'],
MuMuMinMass = config['DiMuon_MinMass'],
MuMuVCHI2PDOF = config['DiMuon_VCHI2PDOF'],
MuMuPT = config['DiMuon_PT']
)
self.DiMuonLine = StrippingLine( name + 'DiMuonInc' + 'Line',
prescale = config['DiMuon_Prescale'],
postscale = config['DiMuon_Postscale'],
checkPV = config['DiMuon_checkPV'],
selection = self.SelDiMuon
)
"""
DiMuon same sign line
"""
self.SelDiMuonSameSign = filterDiMuonSameSign( name + 'DiMuonSameSign',
MuonPT = config['DiMuon_MuonPT'],
MuonP = config['DiMuon_MuonP'],
MuonTRCHI2DOF = config['DiMuon_MuonTRCHI2DOF'],
MuMuMinMass = config['DiMuon_MinMass'],
MuMuVCHI2PDOF = config['DiMuon_VCHI2PDOF'],
MuMuPT = config['DiMuon_PT']
)
self.DiMuonSameSignLine = StrippingLine( name + 'DiMuonSameSign' + 'Line',
prescale = config['DiMuonSameSign_Prescale'],
postscale = config['DiMuonSameSign_Postscale'],
checkPV = config['DiMuonSameSign_checkPV'],
selection = self.SelDiMuonSameSign
)
"""
DiMuonPrescaled line
"""
self.SelDiMuonPrescaled = filterDiMuon( name + 'DiMuonPrescaled',
MuonPT = config['DiMuonPrescaled_MuonPT'],
MuonP = config['DiMuonPrescaled_MuonP'],
MuonTRCHI2DOF = config['DiMuonPrescaled_MuonTRCHI2DOF'],
MuMuMinMass = config['DiMuonPrescaled_MinMass'],
MuMuVCHI2PDOF = config['DiMuonPrescaled_VCHI2PDOF'],
MuMuPT = config['DiMuonPrescaled_PT']
)
self.DiMuonPrescaledLine = StrippingLine( name + 'DiMuonPrescaled' + 'Line',
prescale = config['DiMuonPrescaled_Prescale'],
postscale = config['DiMuonPrescaled_Postscale'],
checkPV = config['DiMuonPrescaled_checkPV'],
selection = self.SelDiMuonPrescaled
)
"""
DiMuonExclusiveline
"""
self.SelDiMuonExclusive = filterDiMuonAndDZ( name + 'DiMuonExclusive',
MuonPT = config['DiMuonExclusive_MuonPT'],
MuonP = config['DiMuonExclusive_MuonP'],
MuonTRCHI2DOF = config['DiMuonExclusive_MuonTRCHI2DOF'],
MuMuMinMass = config['DiMuonExclusive_MinMass'],
MuMuVCHI2PDOF = config['DiMuonExclusive_VCHI2PDOF'],
MuMuPT = config['DiMuonExclusive_PT'],
MuMuDZ = config['DiMuonExclusive_DZ']
)
self.DiMuonExclusiveLine = StrippingLine( name + 'DiMuonExclusive' + 'Line',
prescale = config['DiMuonExclusive_Prescale'],
postscale = config['DiMuonExclusive_Postscale'],
checkPV = config['DiMuonExclusive_checkPV'],
selection = self.SelDiMuonExclusive
)
"""
DiMuonNoPVline
"""
self.SelDiMuonNoPV = filterDiMuon( name + 'DiMuonNoPV',
MuonPT = config['DiMuonExclusive_MuonPT'],
MuonP = config['DiMuonExclusive_MuonP'],
MuonTRCHI2DOF = config['DiMuonExclusive_MuonTRCHI2DOF'],
MuMuMinMass = config['DiMuonExclusive_MinMass'],
MuMuVCHI2PDOF = config['DiMuonExclusive_VCHI2PDOF'],
MuMuPT = config['DiMuonExclusive_PT']
)
self.DiMuonNoPVLine = StrippingLine( name + 'DiMuonNoPV' + 'Line',
prescale = config['DiMuonNoPV_Prescale'],
postscale = config['DiMuonNoPV_Postscale'],
checkPV = False,
FILTER = { 'Code' : "CONTAINS('Rec/Vertex/Primary')<0.5" ,
'Preambulo' : [ 'from LoKiTracks.decorators import *' ,
'from LoKiCore.functions import *' ]
},
selection = self.SelDiMuonExclusive
)
"""
DiMuon High Mass line
"""
self.SelDiMuonHighMass = filterDiMuon( name + 'DiMuonHighMass',
MuonPT = config['DiMuonHighMass_MuonPT'],
MuonP = config['DiMuonHighMass_MuonP'],
MuonTRCHI2DOF = config['DiMuonHighMass_MuonTRCHI2DOF'],
MuMuMinMass = config['DiMuonHighMass_MinMass'],
MuMuVCHI2PDOF = config['DiMuonHighMass_VCHI2PDOF'],
MuMuPT = config['DiMuonHighMass_PT']
)
self.DiMuonHighMassLine = StrippingLine( name + 'DiMuonHighMass' + 'Line',
prescale = config['DiMuonHighMass_Prescale'],
postscale = config['DiMuonHighMass_Postscale'],
checkPV = config['DiMuonHighMass_checkPV'],
selection = self.SelDiMuonHighMass
)
"""
DiMuon High Mass Same Sign line
"""
self.SelDiMuonHighMassSameSign = filterDiMuonSameSign( name + 'DiMuonHighMassSameSign',
MuonPT = config['DiMuonHighMass_MuonPT'],
MuonP = config['DiMuonHighMass_MuonP'],
MuonTRCHI2DOF = config['DiMuonHighMass_MuonTRCHI2DOF'],
MuMuMinMass = config['DiMuonHighMass_MinMass'],
MuMuVCHI2PDOF = config['DiMuonHighMass_VCHI2PDOF'],
MuMuPT = config['DiMuonHighMass_PT']
)
self.DiMuonHighMassSameSignLine = StrippingLine( name + 'DiMuonHighMassSameSign' + 'Line',
prescale = config['DiMuonHighMassSameSign_Prescale'],
postscale = config['DiMuonHighMassSameSign_Postscale'],
checkPV = config['DiMuonHighMassSameSign_checkPV'],
selection = self.SelDiMuonHighMassSameSign
)
"""
DiMuon Low Mass line
"""
self.SelDiMuonLowMass = filterDiMuon( name + 'DiMuonLowMass',
MuonPT = config['DiMuonLowMass_MuonPT'],
MuonP = config['DiMuonLowMass_MuonP'],
MuonTRCHI2DOF = config['DiMuonLowMass_MuonTRCHI2DOF'],
MuMuMinMass = config['DiMuonLowMass_MinMass'],
MuMuVCHI2PDOF = config['DiMuonLowMass_VCHI2PDOF'],
MuMuPT = config['DiMuonLowMass_PT']
)
self.DiMuonLowMassLine = StrippingLine( name + 'DiMuonLowMass' + 'Line',
HLT = "HLT_PASS('Hlt2DiMuonLowMassDecision')",
prescale = config['DiMuonLowMass_Prescale'],
postscale = config['DiMuonLowMass_Postscale'],
checkPV = config['DiMuonLowMass_checkPV'],
selection = self.SelDiMuonLowMass
)
"""
Jpsi-> mumu tight line
"""
self.SelJpsi2MuMu = filterJpsi2MuMu( name + 'Jpsi2MuMu',
MuonPT = config['Jpsi2MuMu_MuonPT'],
MuonP = config['Jpsi2MuMu_MuonP'],
MuonPIDmu = config['Jpsi2MuMu_MuonPIDmu'],
MuonTRCHI2DOF = config['Jpsi2MuMu_MuonTRCHI2DOF'],
MuMuMinMass = config['Jpsi2MuMu_MinMass'],
MuMuMaxMass = config['Jpsi2MuMu_MaxMass'],
MuMuVCHI2PDOF = config['Jpsi2MuMu_VCHI2PDOF'],
MuMuPT = config['Jpsi2MuMu_PT']
)
self.Jpsi2MuMuLine = StrippingLine( name + 'Jpsi2MuMu' + 'Line',
prescale = config['Jpsi2MuMu_Prescale'],
postscale = config['Jpsi2MuMu_Postscale'],
checkPV = config['Jpsi2MuMu_checkPV'],
selection = self.SelJpsi2MuMu
)
"""
Psi(2S)->mumu tight line
"""
self.SelPsi2MuMu = filterSignal( name + 'Psi2MuMu',
ParticleName = config['Psi2MuMu_ParticleName'],
MuonPT = config['Psi2MuMu_MuonPT'],
MuonP = config['Psi2MuMu_MuonP'],
MuonPIDmu = config['Psi2MuMu_MuonPIDmu'],
MuonTRCHI2DOF = config['Psi2MuMu_MuonTRCHI2DOF'],
MuMuMassWindow= config['Psi2MuMu_MassWindow'],
MuMuVCHI2PDOF = config['Psi2MuMu_VCHI2PDOF'],
MuMuPT = config['Psi2MuMu_PT']
)
self.Psi2MuMuLine = StrippingLine( name + 'Psi2MuMu' + 'Line',
prescale = config['Psi2MuMu_Prescale'],
postscale = config['Psi2MuMu_Postscale'],
checkPV = config['Psi2MuMu_checkPV'],
selection = self.SelPsi2MuMu
)
"""
DiMuonDetachedline
"""
self.SelDiMuonDetached = filterDiMuonDetached( name + 'DiMuonDetached',
MuonPT = config['DiMuonDetached_MuonPT'],
MuonP = config['DiMuonDetached_MuonP'],
MuonPIDmu = config['DiMuonDetached_MuonPIDmu'],
MuonTRCHI2DOF = config['DiMuonDetached_MuonTRCHI2DOF'],
MuMuMinMass = config['DiMuonDetached_MinMass'],
MuMuVCHI2PDOF = config['DiMuonDetached_VCHI2PDOF'],
MuMuPT = config['DiMuonDetached_PT'],
MuMuDLS = config['DiMuonDetached_DLS']
)
self.DiMuonDetachedLine = StrippingLine( name + 'DiMuonDetached' + 'Line',
prescale = config['DiMuonDetached_Prescale'],
postscale = config['DiMuonDetached_Postscale'],
checkPV = True,
selection = self.SelDiMuonDetached
)
"""
Jpsi2MuMuDetached tight line
"""
self.SelJpsi2MuMuDetached = filterJpsi2MuMuDetached( name + 'Jpsi2MuMuDetached',
MuonPT = config['Jpsi2MuMuDetached_MuonPT'],
MuonP = config['Jpsi2MuMuDetached_MuonP'],
MuonPIDmu = config['Jpsi2MuMuDetached_MuonPIDmu'],
MuonTRCHI2DOF = config['Jpsi2MuMuDetached_MuonTRCHI2DOF'],
MuMuMinMass = config['Jpsi2MuMuDetached_MinMass'],
MuMuMaxMass = config['Jpsi2MuMuDetached_MaxMass'],
MuMuVCHI2PDOF = config['Jpsi2MuMuDetached_VCHI2PDOF'],
MuMuPT = config['Jpsi2MuMuDetached_PT'],
MuMuDLS = config['Jpsi2MuMuDetached_DLS']
)
self.Jpsi2MuMuDetachedLine = StrippingLine( name + 'Jpsi2MuMuDetached' + 'Line',
prescale = config['Jpsi2MuMuDetached_Prescale'],
postscale = config['Jpsi2MuMuDetached_Postscale'],
checkPV = True,
selection = self.SelJpsi2MuMuDetached
)
"""
Psi2MuMuDetached line
"""
self.SelPsi2MuMuDetached = filterSignalDetached( name + 'Psi2MuMuDetached',
ParticleName = config['Psi2MuMuDetached_ParticleName'],
MuonPT = config['Psi2MuMuDetached_MuonPT'],
MuonP = config['Psi2MuMuDetached_MuonP'],
MuonPIDmu = config['Psi2MuMuDetached_MuonPIDmu'],
MuonTRCHI2DOF = config['Psi2MuMuDetached_MuonTRCHI2DOF'],
MuMuMassWindow= config['Psi2MuMuDetached_MassWindow'],
MuMuVCHI2PDOF = config['Psi2MuMuDetached_VCHI2PDOF'],
MuMuPT = config['Psi2MuMuDetached_PT'],
MuMuDLS = config['Psi2MuMuDetached_DLS']
)
self.Psi2MuMuDetachedLine = StrippingLine( name + 'Psi2MuMuDetached' + 'Line',
prescale = config['Psi2MuMuDetached_Prescale'],
postscale = config['Psi2MuMuDetached_Postscale'],
checkPV = True,
selection = self.SelPsi2MuMuDetached
)
if config['MicroDST']:
self.registerLine( self.DiMuonLine )
self.registerLine( self.DiMuonSameSignLine )
self.registerLine( self.DiMuonPrescaledLine )
else:
self.registerLine( self.DiMuonExclusiveLine )
self.registerLine( self.DiMuonNoPVLine )
self.registerLine( self.DiMuonHighMassLine )
self.registerLine( self.DiMuonHighMassSameSignLine )
self.registerLine( self.DiMuonLowMassLine )
self.registerLine( self.Jpsi2MuMuLine )
self.registerLine( self.Psi2MuMuLine )
self.registerLine( self.DiMuonDetachedLine )
self.registerLine( self.Jpsi2MuMuDetachedLine )
self.registerLine( self.Psi2MuMuDetachedLine )
def filterDiMuon( name,
MuonPT,
MuonP,
MuonTRCHI2DOF,
MuMuMinMass,
MuMuVCHI2PDOF,
MuMuPT
):
_StdLooseDiMuon = DataOnDemand( Location = 'Phys/StdLooseDiMuon/Particles' )
MuonCut = "(MINTREE('mu+'==ABSID,PT) > %(MuonPT)s *MeV) & (MINTREE('mu+'==ABSID,P) > %(MuonP)s *MeV) & (MAXTREE('mu+'==ABSID,TRCHI2DOF) < %(MuonTRCHI2DOF)s)" % locals()
MuMuCut = "(MM > %(MuMuMinMass)s) & (VFASPF(VCHI2PDOF)< %(MuMuVCHI2PDOF)s) & (PT > %(MuMuPT)s)" % locals()
_MuMu = FilterDesktop( Code = MuonCut + " & " + MuMuCut )
return Selection( name + "_SelMuMu",
Algorithm = _MuMu,
RequiredSelections = [ _StdLooseDiMuon ]
)
def filterDiMuonSameSign( name,
MuonPT,
MuonP,
MuonTRCHI2DOF,
MuMuMinMass,
MuMuVCHI2PDOF,
MuMuPT
):
_StdLooseDiMuonSameSign = DataOnDemand( Location = 'Phys/StdLooseDiMuonSameSign/Particles' )
MuonCut = "(MINTREE('mu+'==ABSID,PT) > %(MuonPT)s *MeV) & (MINTREE('mu+'==ABSID,P) > %(MuonP)s *MeV) & (MAXTREE('mu+'==ABSID,TRCHI2DOF) < %(MuonTRCHI2DOF)s)" % locals()
MuMuCut = "(MM > %(MuMuMinMass)s) & (VFASPF(VCHI2PDOF)< %(MuMuVCHI2PDOF)s) & (PT > %(MuMuPT)s *MeV)" % locals()
_MuMu = FilterDesktop( Code = MuonCut + " & " + MuMuCut )
return Selection( name + "_SelMuMuSS",
Algorithm = _MuMu,
RequiredSelections = [ _StdLooseDiMuonSameSign ]
)
def filterJpsi2MuMu( name,
MuonPT,
MuonP,
MuonPIDmu,
MuonTRCHI2DOF,
MuMuMinMass,
MuMuMaxMass,
MuMuVCHI2PDOF,
MuMuPT
):
_StdLooseJpsi2MuMu = DataOnDemand( Location = 'Phys/StdLooseJpsi2MuMu/Particles' )
MuonCut = "(MINTREE('mu+'==ABSID,PT) > %(MuonPT)s *MeV) & (MINTREE('mu+'==ABSID,P) > %(MuonP)s *MeV) & (MINTREE('mu+'==ABSID,PIDmu) > %(MuonPIDmu)s) & (MAXTREE('mu+'==ABSID,TRCHI2DOF) < %(MuonTRCHI2DOF)s)" % locals()
MuMuCut = "(MM > %(MuMuMinMass)s) & (MM < %(MuMuMaxMass)s) & (VFASPF(VCHI2PDOF)< %(MuMuVCHI2PDOF)s) & (PT > %(MuMuPT)s)" % locals()
_MuMu = FilterDesktop( Code = MuonCut + " & " + MuMuCut )
return Selection( name + "_SelJpsi2MuMu",
Algorithm = _MuMu,
RequiredSelections = [ _StdLooseJpsi2MuMu ]
)
def filterSignal( name,
ParticleName,
MuonPT,
MuonP,
MuonPIDmu,
MuonTRCHI2DOF,
MuMuMassWindow,
MuMuVCHI2PDOF,
MuMuPT
):
_StdLooseDiMuon = DataOnDemand( Location = 'Phys/StdLooseDiMuon/Particles' )
MuonCut = "(MINTREE('mu+'==ABSID,PT) > %(MuonPT)s *MeV) & (MINTREE('mu+'==ABSID,P) > %(MuonP)s *MeV) & (MINTREE('mu+'==ABSID,PIDmu) > %(MuonPIDmu)s) & (MAXTREE('mu+'==ABSID,TRCHI2DOF) < %(MuonTRCHI2DOF)s)" % locals()
MuMuCut = "(ADMASS(%(ParticleName)s) < %(MuMuMassWindow)s *MeV) & (VFASPF(VCHI2PDOF)< %(MuMuVCHI2PDOF)s) & (PT > %(MuMuPT)s *MeV)" % locals()
_MuMu = FilterDesktop( Code = MuonCut + " & " + MuMuCut )
return Selection( name + "_SelP2MuMu",
Algorithm = _MuMu,
RequiredSelections = [ _StdLooseDiMuon ]
)
def filterDZ( name,
DZAnyPV,
MySelection ):
return EventSelection (
#
LoKi__VoidFilter( name + 'filterDZ',
Code = " ( minMyZ - minPVZ ) < %(DZAnyPV)s*mm " % locals() ,
Preambulo = [ "from LoKiPhys.decorators import *",
"minMyZ = SOURCE('%s') >> min_value( VFASPF(VZ) )" %(MySelection.outputLocation()) ,
"minPVZ = VSOURCE('Rec/Vertex/Primary') >> min_value(VZ) "
]
)
)
def filterDiMuonAndDZ( name,
MuonPT,
MuonP,
MuonTRCHI2DOF,
MuMuMinMass,
MuMuVCHI2PDOF,
MuMuPT,
MuMuDZ
):
_StdLooseDiMuon = DataOnDemand( Location = 'Phys/StdLooseDiMuon/Particles' )
MuonCut = "(MINTREE('mu+'==ABSID,PT) > %(MuonPT)s *MeV) & (MINTREE('mu+'==ABSID,P) > %(MuonP)s *MeV) & (MAXTREE('mu+'==ABSID,TRCHI2DOF) < %(MuonTRCHI2DOF)s)" % locals()
MuMuCut = "(MM > %(MuMuMinMass)s) & (VFASPF(VCHI2PDOF)< %(MuMuVCHI2PDOF)s) & (PT > %(MuMuPT)s) & (BPVVDZ < %(MuMuDZ)s*mm)" % locals()
_MuMu = FilterDesktop( Code = MuonCut + " & " + MuMuCut )
return Selection( name + "_SelMuMu",
Algorithm = _MuMu,
RequiredSelections = [ _StdLooseDiMuon ]
)
def filterDiMuonDetached( name,
MuonPT,
MuonP,
MuonPIDmu,
MuonTRCHI2DOF,
MuMuMinMass,
MuMuVCHI2PDOF,
MuMuPT,
MuMuDLS
):
_StdLooseDiMuon = DataOnDemand( Location = 'Phys/StdLooseDiMuon/Particles' )
MuonCut = "(MINTREE('mu+'==ABSID,PT) > %(MuonPT)s *MeV) & (MINTREE('mu+'==ABSID,P) > %(MuonP)s *MeV) & (MAXTREE('mu+'==ABSID,TRCHI2DOF) < %(MuonTRCHI2DOF)s) & (MINTREE('mu+'==ABSID,PIDmu) > %(MuonPIDmu)s)" % locals()
MuMuCut = "(MM > %(MuMuMinMass)s) & (VFASPF(VCHI2PDOF)< %(MuMuVCHI2PDOF)s) & (PT > %(MuMuPT)s) & (BPVDLS>%(MuMuDLS)s)" % locals()
_MuMu = FilterDesktop( Code = MuonCut + " & " + MuMuCut )
return Selection( name + "_SelMuMu",
Algorithm = _MuMu,
RequiredSelections = [ _StdLooseDiMuon ]
)
def filterJpsi2MuMuDetached( name,
MuonPT,
MuonP,
MuonPIDmu,
MuonTRCHI2DOF,
MuMuMinMass,
MuMuMaxMass,
MuMuVCHI2PDOF,
MuMuPT,
MuMuDLS
):
_StdLooseJpsi2MuMu = DataOnDemand( Location = 'Phys/StdLooseJpsi2MuMu/Particles' )
MuonCut = "(MINTREE('mu+'==ABSID,PT) > %(MuonPT)s *MeV) & (MINTREE('mu+'==ABSID,P) > %(MuonP)s *MeV) & (MAXTREE('mu+'==ABSID,TRCHI2DOF) < %(MuonTRCHI2DOF)s) & (MINTREE('mu+'==ABSID,PIDmu) > %(MuonPIDmu)s)" % locals()
MuMuCut = "(MM > %(MuMuMinMass)s) & (MM < %(MuMuMaxMass)s) & (VFASPF(VCHI2PDOF)< %(MuMuVCHI2PDOF)s) & (PT > %(MuMuPT)s) & (BPVDLS>%(MuMuDLS)s)" % locals()
_MuMu = FilterDesktop( Code = MuonCut + " & " + MuMuCut )
return Selection( name + "_SelJpsi2MuMu",
Algorithm = _MuMu,
RequiredSelections = [ _StdLooseJpsi2MuMu ]
)
def filterSignalDetached( name,
ParticleName,
MuonPT,
MuonP,
MuonPIDmu,
MuonTRCHI2DOF,
MuMuMassWindow,
MuMuVCHI2PDOF,
MuMuPT,
MuMuDLS
):
_StdLooseDiMuon = DataOnDemand( Location = 'Phys/StdLooseDiMuon/Particles' )
MuonCut = "(MINTREE('mu+'==ABSID,PT) > %(MuonPT)s *MeV) & (MINTREE('mu+'==ABSID,P) > %(MuonP)s *MeV) & (MINTREE('mu+'==ABSID,PIDmu) > %(MuonPIDmu)s) & (MAXTREE('mu+'==ABSID,TRCHI2DOF) < %(MuonTRCHI2DOF)s)" % locals()
MuMuCut = "(ADMASS(%(ParticleName)s) < %(MuMuMassWindow)s *MeV) & (VFASPF(VCHI2PDOF)< %(MuMuVCHI2PDOF)s) & (PT > %(MuMuPT)s *MeV) & (BPVDLS>%(MuMuDLS)s)" % locals()
_MuMu = FilterDesktop( Code = MuonCut + " & " + MuMuCut )
return Selection( name + "_SelP2MuMu",
Algorithm = _MuMu,
RequiredSelections = [ _StdLooseDiMuon ]
)
| [
"slavomirastefkova@b2pcx39016.desy.de"
] | slavomirastefkova@b2pcx39016.desy.de |
6a5307ad7db7ca33697b63b6436b59f2d9a19557 | 847273de4b1d814fab8b19dc651c651c2d342ede | /.history/solve_20180621175952.py | 3582f849f7b735ae37b11bf925233f5c6574c087 | [] | no_license | Los4U/sudoku_in_python | 0ba55850afcffeac4170321651620f3c89448b45 | 7d470604962a43da3fc3e5edce6f718076197d32 | refs/heads/master | 2020-03-22T08:10:13.939424 | 2018-07-04T17:21:13 | 2018-07-04T17:21:13 | 139,749,483 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,846 | py |
def findNextCellToFill(grid, i, j):
for x in range(i,9):
for y in range(j,9):
if grid[x][y] == 0:
return x,y
for x in range(0,9):
for y in range(0,9):
if grid[x][y] == 0:
return x,y
return -1,-1
def isValid(grid, i, j, e):
rowOk = all([e != grid[i][x] for x in range(9)])
if rowOk:
columnOk = all([e != grid[x][j] for x in range(9)])
if columnOk:
# finding the top left x,y co-ordinates of the section containing the i,j cell
secTopX, secTopY = 3 *(i//3), 3 *(j//3) #floored quotient should be used here.
for x in range(secTopX, secTopX+3):
for y in range(secTopY, secTopY+3):
if grid[x][y] == e:
return False
return True
return False
def solveSudoku(grid, i=0, j=0):
i,j = findNextCellToFill(grid, i, j)
if i == -1:
return True
for e in range(1,10):
if isValid(grid,i,j,e):
grid[i][j] = e
if solveSudoku(grid, i, j):
return True
# Undo the current cell for backtracking
grid[i][j] = 0
return False
input = [[5,1,7,6,0,0,0,3,4],[2,8,9,0,0,4,0,0,0],[3,4,6,2,0,5,0,9,0],[6,0,2,0,0,0,0,1,0],[0,3,8,0,0,6,0,4,7],[0,0,0,0,0,0,0,0,0],[0,9,0,0,0,0,0,7,8],[7,0,3,4,0,0,5,6,0],[0,0,0,0,0,0,0,0,0]]
solveSudoku(input) | [
"inz.kamil.wos@gmail.com"
] | inz.kamil.wos@gmail.com |
87698240dedfa49bb2aabfe1ee93c284e48fef0d | 0296303ffce6c25fc300016985daa042afa581c0 | /ThreadTest/Threadtest2.py | 3aca4a640ac63dffc57c2c1018434f4b80780b4e | [] | no_license | xerxesnoPT/ThreadTest | 24ebe2315dad30cad0a550437914302fcef04e18 | 5856faa560a6200357a09d9f02bda578604b4ceb | refs/heads/master | 2021-01-25T04:59:01.539730 | 2017-06-28T14:28:22 | 2017-06-28T14:28:22 | 93,497,785 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 403 | py | # -*- coding: utf-8 -*-
from threading import Thread
import threading
def execute(name):
for i in range(0, 5):
print('我的名字是 %s ' % name+threading.current_thread().name)
def main():
t1 = Thread(target=execute, name='1', args=('jack',))
t2 = Thread(target=execute, name='2', args=('Tom',))
t1.start()
t1.join()
t2.start()
if __name__ == '__main__':
main()
| [
"346287244@qq.com"
] | 346287244@qq.com |
a7397e10786125cdc8ee81286b7a97fdbc6f1f78 | 38b8bceafb4d80afc7c77196eb9ee99694191bcf | /wxpython/grid2.py | e9749835196d535abce07a36ed5223c8b385ea9f | [] | no_license | tangc1986/PythonStudy | f6c5b384874e82fbf0b5f51cfb7a7a89a48ec0ff | 1ed1956758e971647426e7096ac2e8cbcca585b4 | refs/heads/master | 2021-01-23T20:39:23.930754 | 2017-10-08T07:40:32 | 2017-10-08T07:42:38 | 42,122,267 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 845 | py | # -*- coding: UTF-8 -*-
__author__ = 'tangchao'
import wx
import wx.grid
class TestFrame(wx.Frame):
rowLabels = ["uno", "dos", "tres", "quatro", "cinco"]
colLabels = ["homer", "marge", "bart", "lisa", "mnaggie"]
def __init__(self):
wx.Frame.__init__(self, None, title="Grid Headers",
size=(500, 200))
grid = wx.grid.Grid(self)
grid.CreateGrid(5, 5)
for row in range(5):
#1 start
grid.SetRowLabelValue(row, self.rowLabels[row])
grid.SetColLabelValue(row, self.colLabels[row])
#1 end
for col in range(5):
grid.SetCellValue(row, col,
"(%s, %s)" % (self.rowLabels[row], self.colLabels[col]))
app = wx.PySimpleApp()
frame = TestFrame()
frame.Show()
app.MainLoop()
| [
"tangc1986@gmail.com"
] | tangc1986@gmail.com |
f870f70bfe5f8b58c4498a23bcc61fb7fcb14708 | 584189cb433a6c41c2a87df4d4541c41a1c47275 | /main_semantic_rotate.py | 19b4e441fb4b858bc73ca01f94c9928ded2d76d9 | [
"MIT"
] | permissive | ChalieChang1028/Semantify-NN | b4782102cb89f6b03d3ac33173bdf0d6abd16c5e | d641e413955f1a1f0b742313b48c8c0ad4df8278 | refs/heads/master | 2023-03-16T08:06:05.842953 | 2020-06-15T15:09:55 | 2020-06-15T15:09:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,718 | py | import faulthandler;
faulthandler.enable()
import numpy as np
import os, sys, random, time, math, argparse
from utils.setup_mnist import MNIST
from utils.setup_cifar import CIFAR
from utils.setup_gtsrb import GTSRB
import utils.save_nlayer_weights as nl
from utils.utils import generate_data
from algo_Semantic import Semantic
def handle_parser(parser):
parser.add_argument('--model',
default="mnist",
choices=["mnist", "cifar", "gtsrb"],
help='model to be used')
parser.add_argument('--eps',
default=100.0,
type=float,
help="theta for verification")
parser.add_argument('--delta',
default=0.5,
type=float,
help="size of explicit splits")
parser.add_argument('--subdiv',
default=5,
type=int,
help="num divisions")
parser.add_argument('--hidden',
default=1024,
type=int,
help="number of hidden neurons per layer")
parser.add_argument('--numlayer',
default=2,
type=int,
help='number of layers in the model')
parser.add_argument('--numimage',
default=2,
type=int,
help='number of images to run')
parser.add_argument('--startimage',
default=0,
type=int,
help='start image')
parser.add_argument('--hsl',
default="lighten",
choices=["lighten", "saturate"],
help='model to be used')
parser.add_argument('--norm',
default="i",
type=str,
choices=["i", "1", "2"],
help='perturbation norm: "i": Linf, "1": L1, "2": L2')
parser.add_argument('--LP',
action="store_true",
help='use LP to get bounds for final output')
parser.add_argument('--LPFULL',
action="store_true",
help='use FULL LP to get bounds for output')
parser.add_argument('--quad',
action="store_true",
help='use quadratic bound to imporve 2nd layer output')
parser.add_argument('--warmup',
action="store_true",
help='warm up before the first iteration')
parser.add_argument('--modeltype',
default="vanilla",
choices=["vanilla", "dropout", "distill", "adv_retrain"],
help="select model type")
parser.add_argument('--targettype',
default="top2",
choices=["untargeted", "least", "top2", "random"],
help='untargeted minimum distortion')
parser.add_argument('--steps',
default=15,
type=int,
help='how many steps to binary search')
parser.add_argument('--activation',
default="relu",
choices=["relu", "tanh", "sigmoid", "arctan", "elu", "hard_sigmoid", "softplus"])
parser.add_argument('--test_minUB',
action="store_true",
help='test the idea of minimize UB of g(x) in Fast-Lin')
parser.add_argument('--test_estLocalLips',
action="store_true",
help='test the idea of estimating local lipschitz constant using Fast-Lin')
parser.add_argument('--test_probnd',
default="none",
choices=["gaussian_iid", "gaussian_corr", "uniform", "none"],
help="select input distribution")
parser.add_argument('--test_weightpert',
action="store_true",
help="perturb weight matrices")
return parser
if __name__ == "__main__":
#### parser ####
parser = argparse.ArgumentParser(description='compute activation bound for CIFAR and MNIST')
parser = handle_parser(parser)
args = parser.parse_args()
nhidden = args.hidden
# quadratic bound only works for ReLU
assert ((not args.quad) or args.activation == "relu")
# for all activations we can use general framework
targeted = True
if args.targettype == "least":
target_type = 0b0100
elif args.targettype == "top2":
target_type = 0b0001
elif args.targettype == "random":
target_type = 0b0010
elif args.targettype == "untargeted":
target_type = 0b10000
targeted = False
if args.modeltype == "vanilla":
suffix = ""
else:
suffix = "_" + args.modeltype
# try models/mnist_3layer_relu_1024
activation = args.activation
modelfile = "models/" + args.model + "_" + str(args.numlayer) + "layer_" + activation + "_" + str(nhidden) + suffix
if not os.path.isfile(modelfile):
# if not found, try models/mnist_3layer_relu_1024_1024
modelfile += ("_" + str(nhidden)) * (args.numlayer - 2) + suffix
# if still not found, try models/mnist_3layer_relu
if not os.path.isfile(modelfile):
modelfile = "models/" + args.model + "_" + str(args.numlayer) + "layer_" + activation + "_" + suffix
# if still not found, try models/mnist_3layer_relu_1024_best
if not os.path.isfile(modelfile):
modelfile = "models/" + args.model + "_" + str(args.numlayer) + "layer_" + activation + "_" + str(
nhidden) + suffix + "_best"
if not os.path.isfile(modelfile):
raise (RuntimeError("cannot find model file"))
if args.LP or args.LPFULL:
# use gurobi solver
import gurobipy as grb
# config = tf.ConfigProto()
# config.gpu_options.allow_growth = True
# with tf.Session(config=config) as sess:
if args.model == "mnist":
data = MNIST()
model = nl.NLayerModel([nhidden] * (args.numlayer - 1), modelfile, activation=activation)
elif args.model == "cifar":
data = CIFAR()
model = nl.NLayerModel([nhidden] * (args.numlayer - 1), modelfile, image_size=32, image_channel=3,
activation=activation)
elif args.model == "gtsrb":
data = GTSRB()
model = nl.NLayerModel([nhidden] * (args.numlayer - 1), modelfile, image_size=28, image_channel=3,
activation=activation, num_labels = 43)
else:
raise (RuntimeError("unknown model: " + args.model))
print("Evaluating", modelfile)
sys.stdout.flush()
random.seed(1215)
np.random.seed(1215)
"""
Generate data
"""
inputs, targets, true_labels, true_ids, img_info = generate_data(data, samples=args.numimage, targeted=targeted,
random_and_least_likely=True,
target_type=target_type,
predictor=model.model.predict,
start=args.startimage)
# get the logit layer predictions
preds = model.model.predict(inputs)
Nsamp = 0
r_sum = 0.0
r_gx_sum = 0.0
"""
Start computing robustness bound
"""
print("starting robustness verification on {} images!".format(len(inputs)))
sys.stdout.flush()
sys.stderr.flush()
total_time_start = time.time()
# compute worst case bound: no need to pass in sess, model and data
# just need to pass in the weights, true label, norm, x0, prediction of x0, number of layer and eps
Semantic_BND = Semantic(model)
total_verifiable = 0
lower, upper = 0.0, 0.0
for i in range(len(inputs)):
Nsamp += 1
p = args.norm
predict_label = np.argmax(true_labels[i])
target_label = np.argmax(targets[i])
theta = args.eps
theta_delta = args.delta
verifiable = True
divisions = args.subdiv
start = time.time()
lower_bound, upper_bound = -theta, theta
for t in range(int(np.ceil(theta / theta_delta))):
start_1 = time.time()
# run CROWN
robustness = Semantic_BND.certify_eps_implicit(predict_label, target_label, t * theta_delta,
(t + 1) * theta_delta, inputs[i], divisions=divisions)
print("verified", time.time() - start_1)
# check if the provided eps is verifiable
if robustness <= 0:
lower_bound = -1* t * theta_delta
upper_bound = t * theta_delta
verifiable = False
lower_bound = -1 * t * theta_delta
upper_bound = t * theta_delta
break
start_1 = time.time()
# run CROWN
robustness = Semantic_BND.certify_eps_implicit(predict_label, target_label, -(t + 1) * theta_delta,
-t * theta_delta, inputs[i], divisions=divisions)
print("verified", time.time() - start_1)
# check if the provided eps is verifiable
if robustness <= 0:
lower_bound = -1 * t * theta_delta
upper_bound = (t + 1) * theta_delta
verifiable = False
break
if verifiable:
total_verifiable += 1
print("[L1] model = {}, seq = {}, id = {}, true_class = {}, target_class = {}, info = {}, "
"verifiable = {}, lower_bound = {}, upper_bound = {}, time = {:.4f}, total_time = {:.4f}"
.format(modelfile, i, true_ids[i], predict_label, target_label, img_info[i],
verifiable, lower_bound, upper_bound, time.time() - start, time.time() - start))
lower += lower_bound
upper += upper_bound
sys.stdout.flush()
sys.stderr.flush()
print("[L0] model = {}, info = {}, numimage = {}, lower_bound_avg = {}, uper_bound_avg = {}, total verifiable = {:.2f}%, time = {:.4f}, total_time = {:.4f}".format(modelfile, img_info[i], Nsamp, lower/Nsamp, upper/Nsamp, 100 * total_verifiable / Nsamp, time.time() - start, time.time() - total_time_start))
sys.stdout.flush()
sys.stderr.flush() | [
"jeetjmohapatra@gmail.com"
] | jeetjmohapatra@gmail.com |
5b46c342299287a5827b31764025e34f373be71a | e173d2c3b0d6c0a91ef22d3d119ae44335827396 | /imdb_lstm.py | 873ccb1d9aeb86a394a243cef29ffbf74509b021 | [] | no_license | angetato/Custom-Optimizer-on-Keras | a3e6e87b3a547119f4bfb0d0c334dbe8c5a1aab5 | 5e0ffe1290a01c79dba1d7974df16d41dcae2ccd | refs/heads/master | 2020-04-23T04:12:03.375760 | 2020-02-06T07:32:47 | 2020-02-06T07:32:47 | 170,901,679 | 24 | 8 | null | null | null | null | UTF-8 | Python | false | false | 3,877 | py | '''Trains an LSTM model on the IMDB sentiment classification task.
The dataset is actually too small for LSTM to be of any advantage
compared to simpler, much faster methods such as TF-IDF + LogReg.
# Notes
- RNNs are tricky. Choice of batch size is important,
choice of loss and optimizer is critical, etc.
Some configurations won't converge.
- LSTM loss decrease patterns during training can be quite different
from what you see with CNNs/MLPs/etc.
'''
from __future__ import print_function
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers import Dense, Embedding
from keras.layers import LSTM
from keras.datasets import imdb
from keras.utils import plot_model
from adam import Adam
from aadam import AAdam
from sgd import SGD
from asgd import ASGD
from adagrad import Adagrad
from aadagrad import AAdagrad
import numpy as np
import pandas as pd
max_features = 5000
# cut texts after this number of words (among top max_features most common words)
maxlen = 80
batch_size = 32
print('Loading data...')
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features)
print(len(x_train), 'train sequences')
print(len(x_test), 'test sequences')
print('Pad sequences (samples x time)')
x_train = sequence.pad_sequences(x_train, maxlen=maxlen)
x_test = sequence.pad_sequences(x_test, maxlen=maxlen)
print('x_train shape:', x_train.shape)
print('x_test shape:', x_test.shape)
print('Build model...')
model = Sequential()
model.add(Embedding(max_features, 32))
model.add(LSTM(32, dropout=0.2, recurrent_dropout=0.2))
model.add(Dense(1, activation='sigmoid'))
plot_model(model, to_file='model_imdb.png',show_shapes=True)
results_acc = []
result_acc= []
results_loss = []
result_loss = []
test_acc_results = []
test_loss_results = []
l= [Adam(lr=0.001,amsgrad = True), AAdam(lr=0.001,amsgrad = True),Adam(lr=0.001,amsgrad = False), AAdam(lr=0.001,amsgrad = False),Adagrad(),AAdagrad(),SGD(),ASGD() ] #, Adam(lr=0.001, amsgrad = True), AAdam(lr=0.001, amsgrad = True)]
for opt in l:
model.compile(loss='binary_crossentropy',
optimizer=opt,
metrics=['accuracy'])
#model.save_weights('initial_weights_imdb.h5')
model.load_weights('initial_weights_imdb.h5')
initial_weights = model.get_weights()
result_acc = []
result_loss = []
test_loss = []
test_acc = []
for i in range (2):
model.set_weights(initial_weights)
result_acc_e = []
result_loss_e = []
test_acc_e = []
test_loss_e = []
for j in range (10):
history = model.fit(x_train, y_train,batch_size=batch_size,epochs=1,verbose=0)
'''if j % 2 == 0 :
test_loss_j, test_acc_j = model.evaluate(x_test, y_test)
test_acc_e.append(test_acc_j)
test_loss_e.append(test_loss_j)'''
result_acc_e.append(history.history['acc'][0])
result_loss_e.append(history.history['loss'][0])
test_loss.append(test_loss_e)
test_acc.append(test_acc_e)
result_acc.append(result_acc_e)
result_loss.append(result_loss_e)
print("##### NEW OPTIMIZER #####")
print(opt)
print(np.mean(result_acc,axis=0))
print(np.mean(result_loss,axis=0))
print(np.mean(test_acc,axis=0))
print(np.mean(test_loss,axis=0))
results_acc.append(np.mean(result_acc,axis=0))
results_loss.append(np.mean(result_loss,axis=0))
test_acc_results.append(np.mean(test_acc,axis=0))
test_loss_results.append(np.mean(test_loss,axis=0))
df = pd.DataFrame(results_acc)
df.to_csv("results/imdb_acc_train_lstm.csv")
df = pd.DataFrame(results_loss)
df.to_csv("results/imdb_loss_train_lstm.csv")
df = pd.DataFrame(test_acc_results)
df.to_csv("results/imdb_acc_test_lstm.csv")
df = pd.DataFrame(test_loss_results)
df.to_csv("results/imdb_loss_test_lstm.csv")
| [
"noreply@github.com"
] | angetato.noreply@github.com |
3a8a43bea8ce7431e19f70707e05a2eee6bca393 | 68543ebd7a7cbb5e5d092f2380dbbbd05a923b90 | /scripts/extra/find_class.py | e47db117e299573242800b6f6cf94c042f437d2b | [] | no_license | chromatices/Rotate_box_mAP | d459cb495b09916b0e8a175aefb8c6fdf392c173 | 013d45731e1c7b227a27ff4848c1ca5e3fe88855 | refs/heads/master | 2021-07-09T20:30:56.347629 | 2021-04-29T13:02:11 | 2021-04-29T13:02:11 | 243,504,877 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,472 | py | import sys
import os
import glob
# make sure that the cwd() in the beginning is the location of the python script (so that every path makes sense)
os.chdir(os.path.dirname(os.path.abspath(__file__)))
if len(sys.argv) != 2:
print("Error: wrong format.\nUsage: python find_class.py [class_name]")
sys.exit(0)
searching_class_name = sys.argv[1]
def find_class(class_name):
file_list = glob.glob('*.txt')
file_list.sort()
# iterate through the text files
file_found = False
for txt_file in file_list:
# open txt file lines to a list
with open(txt_file) as f:
content = f.readlines()
# remove whitespace characters like `\n` at the end of each line
content = [x.strip() for x in content]
# go through each line of eache file
for line in content:
class_name = line.split()[0]
if class_name == searching_class_name:
print(" " + txt_file)
file_found = True
break
if not file_found:
print(" No file found with that class")
parent_path = os.path.abspath(os.path.join(os.getcwd(), os.pardir))
parent_path = os.path.abspath(os.path.join(parent_path, os.pardir))
GT_PATH = os.path.join(parent_path, 'input','ground-truth')
DR_PATH = os.path.join(parent_path, 'input','rotated-horizontal-detection-results')
print("ground-truth folder:")
os.chdir(GT_PATH)
find_class(searching_class_name)
print("rotated-horizontal-detection-results folder:")
os.chdir(DR_PATH)
find_class(searching_class_name)
| [
"pjh403@naver.com"
] | pjh403@naver.com |
ed979ab630c289963e03474eb637faa48e40ab30 | ccdd61e4813c8a0a9f28d23c2ee5b02790cc7456 | /find_kallsyms.py | 6abcbcbc6093bfc8366c561666dd2fec2826f1f2 | [] | no_license | freemanZYQ/ida-kallsyms | 65084ffa65c3d8456fc227b7391ed0c87fbdbd50 | 4f0beb659a3b65e4b1c5056ad9ebba6ac4572b21 | refs/heads/master | 2020-08-15T07:51:23.810578 | 2019-10-10T18:01:48 | 2019-10-10T18:04:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,930 | py | #!/usr/bin/env python3
import logging
import struct
import sys
def try_parse_token_index(rodata, endianness, offset):
index_fmt = endianness + 'H'
index, = struct.unpack(index_fmt, rodata[offset:offset + 2])
assert index == 0, 'The first token index must be 0'
indices = [index]
for _ in range(255):
offset += 2
index, = struct.unpack(index_fmt, rodata[offset:offset + 2])
if index <= indices[-1]:
return None # Token indices must be monotonically increasing.
indices.append(index)
return indices
def find_token_indices(rodata, endianness):
token_index_offset = 0
while True:
# kallsyms_token_index is an array of monotonically increasing 256
# shorts, the first of which is 0. It is located right after
# kallsyms_token_table, which is a sequence of null-terminated strings.
# Therefore, look for 1+2 consecutive zeroes.
token_index_offset = rodata.find(
b'\x00\x00\x00', token_index_offset) + 1
if token_index_offset == 0:
break
token_index = try_parse_token_index(
rodata, endianness, token_index_offset)
if token_index is not None:
yield token_index_offset, token_index
def try_parse_token_table(rodata, token_index, start_offset, end_offset):
tokens = []
for i in range(256):
token_start_offset = start_offset + token_index[i]
if i == 255:
# Last token ends at the end of the table.
token_end_offset = end_offset
else:
# Other tokens end at the start of the next respective token.
token_end_offset = start_offset + token_index[i + 1]
token = rodata[token_start_offset:token_end_offset]
if b'\x00' in token[:-1]:
return None # Tokens must be printable.
if token[-1] != 0:
return None # Tokens must be null-terminated.
tokens.append(token[:-1])
return tokens
def find_token_tables(rodata, token_index, token_index_offset):
last_token_offset = token_index_offset
while True:
# kallsyms_token_table is a sequence of 256 null-terminated strings.
# Find the last token by looking for a trailing \0.
token_table_end_offset = last_token_offset
last_token_offset = rodata.rfind(
b'\x00', 0, last_token_offset - 1) + 1
if last_token_offset == 0:
break
# The last kallsyms_token_index element corresponds to the last token.
# Use that information to locate kallsyms_token_table.
token_table_offset = last_token_offset - token_index[-1]
if token_table_offset < 0:
continue
token_table = try_parse_token_table(
rodata, token_index, token_table_offset, token_table_end_offset)
if token_table is not None:
yield token_table_offset, token_table
def find_markers(rodata, endianness, token_table_offset):
# In 4.20 the size of markers was reduced to 4 bytes.
for marker_fmt, marker_size in (
(endianness + 'I', 4),
(endianness + 'Q', 8),
):
first = True
marker_offset = token_table_offset - marker_size
markers = []
while True:
# kallsyms_markers is an array of monotonically increasing offsets,
# which starts with 0. It is aligned on an 8-byte boundary, so if
# the element size is 4 bytes and their number is odd, it is zero-
# padded at the end.
marker, = struct.unpack(
marker_fmt, rodata[marker_offset:marker_offset + marker_size])
if first:
first = False
if marker == 0 and marker_size == 4:
# Skip padding.
marker_offset -= marker_size
continue
elif len(markers) > 0 and marker >= markers[-1]:
# The array is not monotonically increasing.
return
markers.append(marker)
if marker == 0:
# We found the first element.
break
marker_offset -= marker_size
if marker_size == 4 and len(markers) == 2:
# Marker size must be 8 bytes, and we must be taking the upper
# part, which is always 0, for the first marker.
continue
markers.reverse()
yield marker_offset, markers
def is_name_ok(rodata, token_lengths, offset):
n_tokens = rodata[offset]
if n_tokens == 0 or n_tokens >= 128:
# Tokens are at least one byte long. Names must not be empty, and they
# must be at most 127 characters long.
return False
offset += 1
name_length = 0
for _ in range(n_tokens):
# The caller is expected to have verified that the name entry does not
# span past the end of kallsyms_names, so just fetch the next token.
name_length += token_lengths[rodata[offset]]
if name_length >= 128:
# Name is longer than 127 characters.
return False
offset += 1
return True
def extract_name(rodata, token_table, offset):
# Name must have already been checked, just expand tokens.
n_tokens = rodata[offset]
name = b''
for _ in range(n_tokens):
offset += 1
name += token_table[rodata[offset]]
return name
def find_num_syms(rodata, endianness, token_table, markers_offset):
# kallsyms_names is a sequence of length-prefixed entries ending with
# padding to an 8-byte boundary, followed by kallsyms_markers.
# Unfortunately, some guesswork is required to locate the start of
# kallsyms_names given that we know the start of kallsyms_markers.
num_syms_fmt = endianness + 'I'
token_lengths = [len(token) for token in token_table]
# Indexed by (markers_offset - offset - 1). Each element is a number of
# name entries that follow the respective offset, or None if that offset is
# not a start of a valid name entry.
name_counts = []
# Whether offset still points to one of the trailing zeroes.
trailing_zeroes = True
offset = markers_offset
while offset >= 9:
offset -= 1
if rodata[offset] != 0:
# Trailing zeroes have ended.
trailing_zeroes = False
next_name_offset = offset + rodata[offset] + 1
if next_name_offset >= markers_offset:
# The current name entry spans past the end of kallsyms_names. This
# is allowed if we are still looking at trailing zeroes.
name_counts.append(0 if trailing_zeroes else None)
continue
next_name_count = name_counts[markers_offset - next_name_offset - 1]
if next_name_count is None:
# The next name entry is invalid, which means the current name
# entry cannot be valid.
name_counts.append(None)
continue
if is_name_ok(rodata, token_lengths, offset):
# The current name entry is valid. Check whether it is preceded by
# kallsyms_num_syms value, which is consistent with the number of
# name entries we've seen so far.
name_counts.append(next_name_count + 1)
num_syms1, = struct.unpack(num_syms_fmt, rodata[offset - 4:offset])
if name_counts[-1] == num_syms1:
num_syms_offset = offset - 4
break
num_syms2, = struct.unpack(
num_syms_fmt, rodata[offset - 8:offset - 4])
if name_counts[-1] == num_syms2:
num_syms_offset = offset - 8
break
else:
# The current name entry is not valid. This is allowed if we are
# still looking at trailing zeroes.
name_counts.append(0 if trailing_zeroes else None)
else:
return
# We've found kallsyms_names, now parse it.
names = []
for _ in range(name_counts[-1]):
names.append(extract_name(rodata, token_table, offset).decode())
offset += rodata[offset] + 1
yield num_syms_offset, names
def get_addresses(rodata, endianness, num_syms_offset, num_syms):
# Right now this function understands just one format: non-percpu
# kallsyms_offsets followed by kallsyms_relative_base.
address_fmt = endianness + 'i'
kallsyms_relative_base, = struct.unpack(
endianness + 'Q', rodata[num_syms_offset - 8:num_syms_offset])
addresses_offset = num_syms_offset - 8 - num_syms * 4
if addresses_offset % 8 != 0:
addresses_offset -= 4
offset = addresses_offset
addresses = []
for _ in range(num_syms):
raw, = struct.unpack(address_fmt, rodata[offset:offset + 4])
if raw >= 0:
addresses.append(raw)
else:
addresses.append(kallsyms_relative_base - 1 - raw)
offset += 4
return addresses_offset, addresses
def find_kallsyms_in_rodata(rodata, endianness):
for token_index_offset, token_index in find_token_indices(
rodata, endianness):
logging.debug(
'0x%08X: kallsyms_token_index=%s',
token_index_offset, token_index)
for token_table_offset, token_table in find_token_tables(
rodata, token_index, token_index_offset):
logging.debug(
'0x%08X: kallsyms_token_table=%s',
token_table_offset, token_table)
for markers_offset, markers in find_markers(
rodata, endianness, token_table_offset):
logging.debug(
'0x%08X: kallsyms_markers=%s',
markers_offset, markers)
for num_syms_offset, names in find_num_syms(
rodata, endianness, token_table, markers_offset):
logging.debug(
'0x%08X: kallsyms_num_syms=%s',
num_syms_offset, len(names))
addresses_offset, addresses = get_addresses(
rodata, endianness, num_syms_offset, len(names))
kallsyms_end = token_index_offset + (256 * 2)
kallsyms_size = kallsyms_end - addresses_offset
logging.debug(
'0x%08X: kallsyms[0x%08X]',
addresses_offset, kallsyms_size)
return zip(addresses, names)
return []
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
if len(sys.argv) != 3:
print('Usage: {} PATH ENDIANNESS'.format(sys.argv[0]))
sys.exit(1)
rodata_path, endianness = sys.argv[1:]
with open(rodata_path, 'rb') as fp:
rodata = bytearray(fp.read())
for address, name in find_kallsyms_in_rodata(rodata, endianness):
print('{:016X} {}'.format(address, name))
| [
"mephi42@gmail.com"
] | mephi42@gmail.com |
6081ede0f1bbbf19c3f269d58a1c1ce19a7644f3 | 111a7ecc379f367e44d7174596592373c39beacd | /实验6/Otsuhold.py | e401f9671c8432de52345f279ee5c4b65d1ccbd0 | [] | no_license | TenWoods/OpenCVLearn | 8a966fac83bdb49414a266cbc0f679281c6d11d0 | 5255087ccd6f228bc715a8e6a54e4b4d814bbb7b | refs/heads/master | 2020-04-09T05:19:26.628791 | 2018-12-19T15:42:26 | 2018-12-19T15:42:26 | 160,060,034 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,147 | py | import cv2
import numpy as np
from matplotlib import pyplot as plt
img = cv2.imread('noisy.tif',0)
# global thresholding
ret1,th1 = cv2.threshold(img,127,255,cv2.THRESH_BINARY)
# Otsu's thresholding
ret2,th2 = cv2.threshold(img,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
# Otsu's thresholding after Gaussian filtering
blur = cv2.GaussianBlur(img,(5,5),0)
ret3,th3 = cv2.threshold(blur,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
# plot all the images and their histograms
images = [img, 0, th1,
img, 0, th2,
blur, 0, th3]
titles = ['Original Noisy Image','Histogram','Global Thresholding (v=127)',
'Original Noisy Image','Histogram',"Otsu's Thresholding",
'Gaussian filtered Image','Histogram',"Otsu's Thresholding"]
for i in range(3):
plt.subplot(3,3,i*3+1),plt.imshow(images[i*3],'gray')
plt.title(titles[i*3]), plt.xticks([]), plt.yticks([])
plt.subplot(3,3,i*3+2),plt.hist(images[i*3].ravel(),256)
plt.title(titles[i*3+1]), plt.xticks([]), plt.yticks([])
plt.subplot(3,3,i*3+3),plt.imshow(images[i*3+2],'gray')
plt.title(titles[i*3+2]), plt.xticks([]), plt.yticks([])
plt.show()
| [
"yangwx0905@sina.com"
] | yangwx0905@sina.com |
23c9567258f5693d54873d0270f84d0a3a2feb74 | 29cc64194bf5bbd699db2ef400a6a149dd45c09d | /home/migrations/0006_auto_20170703_0755.py | f6a3e4de0000d5f217f59b567589be7179e582e3 | [] | no_license | jianshen92/egg_cms | 8fc6a50a732b6510115d59affa6736569aab1166 | aa21790c91684085c806fad48e30ba8b5ae6e9e1 | refs/heads/master | 2022-12-15T17:54:41.108692 | 2017-12-18T11:22:09 | 2017-12-18T11:22:09 | 92,932,356 | 2 | 1 | null | 2022-12-07T23:56:38 | 2017-05-31T10:01:11 | CSS | UTF-8 | Python | false | false | 558 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-07-03 07:55
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('home', '0005_homepage_test_live'),
]
operations = [
migrations.RenameField(
model_name='homepage',
old_name='test_live',
new_name='live_youtube_channel',
),
migrations.RemoveField(
model_name='homepage',
name='live_youtube_id',
),
]
| [
"jianshen@dangodigital.com"
] | jianshen@dangodigital.com |
2ff18d199c7dc6c74ba3d86a5b370656662df71e | f888c29c477dee563e1197a78ff933c8a257e5ef | /M18/p1/build_search_index.py | 7a129cab13c5d063f06b6d892ae60bad46dc1142 | [] | no_license | pvsteja/cspp1-assignments | 3b95fc969636d2327a542a5f4c5840404b575a37 | 8d361d8569c67ce6a633dbeeae751ea9515eb280 | refs/heads/master | 2020-03-24T23:03:52.712214 | 2018-08-25T12:31:49 | 2018-08-25T12:31:49 | 142,858,629 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,623 | py | '''
Tiny Search Engine - Part 1 - Build a search index
In this programming assingment you are given with some text documents as input.
Complete the program below to build a search index. Don't worry, it is explained below.
A search index is a python dictionary.
The keys of this dictionary are words contained in ALL the input text documents.
The values are a list of documents such that the key/word appears in each document atleast once.
The document in the list is represented as a tuple.
The tuple has 2 items. The first item is the document ID.
Document ID is represented by the list index.
For example: the document ID of the third document in the list is 2
The second item of the tuple is the frequency of the word occuring in the document.
Here is the sample format of the dictionary.
{
word1: [(doc_id, frequency),(doc_id, frequency),...],
word2: [(doc_id, frequency),(doc_id, frequency),...],
.
.
}
'''
# helper function to load the stop words from a file
def load_stopwords(filename):
'''
loads stop words from a file and returns a dictionary
'''
stopwords = {}
with open(filename, 'r') as f_stopwords:
for line in f_stopwords:
stopwords[line.strip()] = 0
return stopwords
def word_list(text):
'''
Change case to lower and split the words using a SPACE
Clean up the text by remvoing all the non alphabet characters
return a list of words
'''
# print(text)
for i, j in enumerate(text):
for char in j:
if not (char.isalnum() or char == ' '):
text[i] = text[i].replace(char, '')
list_of_words = [doc.lower().split() for doc in text]
return list_of_words
def build_search_index(docs):
'''
Process the docs step by step as given below
'''
# initialize a search index (an empty dictionary)
# iterate through all the docs
# keep track of doc_id which is the list index corresponding the document
# hint: use enumerate to obtain the list index in the for loop
# clean up doc and tokenize to words list
# add or update the words of the doc to the search index
# return search index
# print(docs)
srch_indx = {}
stop_words = load_stopwords('stopwords.txt')
documents = word_list(docs)
for i, j in enumerate(documents):
for word in j:
if (word not in stop_words) and (word not in srch_indx):
srch_indx[word] = [(i, j.count(word))]
elif (word not in stop_words) and (word in srch_indx):
srch_indx[word] += [(i, j.count(word))]
for word in srch_indx:
srch_indx[word] = sorted(list(set(srch_indx[word])))
return srch_indx
# helper function to print the search index
# use this to verify how the search index looks
def print_search_index(index):
'''
print the search index
'''
keys = sorted(index.keys())
for key in keys:
print(key, " - ", index[key])
# main function that loads the docs from files
def main():
'''
main function
'''
# empty document list
documents = []
# iterate for n times
lines = int(input())
# iterate through N times and add documents to the list
for i in range(lines):
documents.append(input())
i += 1
# print(word_list(documents))
# print(build_search_index(word_list(documents)))
# call print to display the search index
print_search_index(build_search_index(documents))
if __name__ == '__main__':
main()
| [
"tejapathri6@msitprogram.net"
] | tejapathri6@msitprogram.net |
385f06f1167d365bef81540fdd203d40fdaff0c3 | eb4d843eff0f0990f3103d1e71fe484a26d8fd62 | /0x0A-i18n/3-app.py | 124f7c63d8cd19a9d4b8aa8de6d7b25a7248c11c | [] | no_license | abu-bakarr/holbertonschool-web_back_end | d43ad7fd21b5ef1bdbc0b8236f444ca2b605cbf9 | 0c235315b6c67e4cf26977c80f51e995da762fb1 | refs/heads/main | 2023-02-03T14:38:28.516854 | 2020-12-23T18:18:08 | 2020-12-23T18:18:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 792 | py | #!/usr/bin/env python3
"""[basic flask app]
"""
from flask import Flask, request, render_template
from flask_babel import Babel
app = Flask(__name__)
class Config():
"""[config for babel]
Returns:
[type]: [config]
"""
LANGUAGES = ["en", "fr"]
BABEL_DEFAULT_LOCALE = "en"
BABEL_DEFAULT_TIMEZONE = "UTC"
app.config.from_object(Config)
babel = Babel(app)
@babel.localeselector
def get_locale():
"""[get local lng]
Returns:
[type]: [local lng]
"""
return request.accept_languages.best_match(Config.LANGUAGES)
@app.route('/')
def hello_world():
"""[basic template]
Returns:
[type]: [template]
"""
return render_template('3-index.html')
if __name__ == "__main__":
app.run(host="0.0.0.0", port="5000")
| [
"yesid.dev93@gmail.com"
] | yesid.dev93@gmail.com |
4322e050d3909e3df8ab3c4a1ef098fa511d9eb0 | ab4f74d127bfc89813ee359bb9c779eca5426ddc | /script/label_image.runfiles/org_tensorflow/tensorflow/contrib/signal/python/ops/mfcc_ops.py | 65b5335b3ae79e1ab35ee59ea3fb837590a4b44e | [
"MIT"
] | permissive | harshit-jain-git/ImageNET | cdfd5a340b62862ad8d1cc3b9a0f30cccc481744 | 1cd4c2b70917e4709ce75422c0205fe3735a1b01 | refs/heads/master | 2022-12-11T12:47:46.795376 | 2017-12-19T05:47:26 | 2017-12-19T05:47:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 94 | py | /home/co/Documents/ImageClassifier/tensorflow/tensorflow/contrib/signal/python/ops/mfcc_ops.py | [
"harshitjain1371999@gmail.com"
] | harshitjain1371999@gmail.com |
7a0acd889fe040d4b68a8df5988526eb46bc755e | 0528cf2e0bb8c48d0a55d5c429c61257476a97d0 | /DemonScar/potionclass.py | 64ddd2e857435b33ac54f95a1de1f531ea35c27a | [] | no_license | waenriqu/ProyectoPython | fb6886be1f59b7ce336736511c9ae8224592866d | 99376ffd26780e54a27cb94acc28466cd976c3bf | refs/heads/master | 2021-01-10T00:52:59.605426 | 2013-01-09T02:25:50 | 2013-01-09T02:25:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 101 | py | class Potion:
name="Potion"
recover=25
def recoverSet(self, cant):
recover=cant
| [
"mistery7@hotmail.es"
] | mistery7@hotmail.es |
bdbbf5654ff113436137c7ad543ce8236591f985 | e76361c888f404804a8e7c0e07f17f201598389e | /core/python/kungfu/finance/ledger.py | 68244c9db5e06cc7d32f0d921c9cb1911daac63b | [
"Apache-2.0"
] | permissive | peer2peer/kungfu | e069653205eff04b7f4dd16970a1a60d278b1a3a | 1ec448efffee99cc5d0c54118380ae792c0367be | refs/heads/master | 2020-06-18T02:48:36.258376 | 2019-07-08T07:07:43 | 2019-07-08T07:07:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,230 | py |
from kungfu.finance.position import *
from kungfu.wingchun.utils import *
from kungfu.wingchun.constants import *
class Ledger:
def __init__(self, **kwargs):
self._initial_equity = kwargs.pop("initial_equity", 0.0)
self._static_equity = kwargs.pop("static_equity", 0.0)
self._avail = kwargs.pop("avail", 0.0)
self._realized_pnl = kwargs.pop("realized_pnl", 0.0)
self._positions = kwargs.pop("positions", {})
for pos in self._positions.values():
pos.ledger = self
if self._initial_equity <= 0.0:
self._initial_equity = self.dynamic_equity # fill initial equity
if self._static_equity <= 0.0:
self._static_equity = self.dynamic_equity
@property
def avail(self):
return self._avail
@avail.setter
def avail(self, value):
self._avail = value
@property
def message(self):
return {
"avail": self.avail,
"margin": self.margin,
"market_value": self.market_value,
"initial_equity": self.initial_equity,
"dynamic_equity": self.dynamic_equity,
"static_equity": self.static_equity,
"realized_pnl": self.realized_pnl,
"unrealized_pnl": self.unrealized_pnl
}
@property
def positions(self):
return self._positions.values()
@property
def margin(self):
return sum([position.margin for position in self._positions.values()])
@property
def market_value(self):
return sum([position.market_value for position in self._positions.values()])
@property
def initial_equity(self):
return self._initial_equity
@property
def static_equity(self):
return self._static_equity
@property
def dynamic_equity(self):
total_value = self.avail
for pos in self._positions.values():
if pos.instrument_type == InstrumentType.Future:
total_value += (pos.margin + pos.position_pnl)
else:
total_value += pos.market_value
return total_value
@property
def realized_pnl(self):
return self._realized_pnl
@realized_pnl.setter
def realized_pnl(self, value):
self._realized_pnl = value
@property
def unrealized_pnl(self):
return sum([position.unrealized_pnl for position in self._positions.values()])
def apply_quote(self, quote):
self._get_position(quote.instrument_id, quote.exchange_id).apply_quote(quote)
def apply_trade(self, trade):
self._get_position(trade.instrument_id, trade.exchange_id).apply_trade(trade)
def _get_position(self, instrument_id, exchange_id):
symbol_id = get_symbol_id(instrument_id, exchange_id)
if symbol_id not in self._positions:
instrument_type = get_instrument_type(instrument_id, exchange_id)
cls = StockPostion if instrument_type == InstrumentType.Stock else FuturePosition
self._positions[symbol_id] = cls(ledger = self, instrument_id = instrument_id, exchange_id = exchange_id, instrument_type = instrument_type)
return self._positions[symbol_id]
| [
"qing.lu@taurus.ai"
] | qing.lu@taurus.ai |
defbb44e1d411385fef7704cea33d25e54f21421 | d80bfb056d381b023c4d1ec52f6fe070a478b8d3 | /auto_download.py | 58136dedf2e52744290845a450ebc76672ae4245 | [
"MIT"
] | permissive | katsukixyz/izone_archiver | f1860cd7e0fb6994e989c19153ffbbe759fb6ecd | 0e9923bdd8cc323c7a9d7d3abb05bb46c55df637 | refs/heads/master | 2023-03-05T13:53:53.833042 | 2021-02-07T18:19:04 | 2021-02-07T18:19:04 | 335,512,522 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,536 | py | from bs4 import BeautifulSoup
import requests
import time
import json
import os
from tqdm import tqdm
import datetime
import urllib
headers = {
'Referer': "https://www.vlive.tv/channel/C1B7AF/board/5464"
}
all_vlives_endpoint = "https://www.vlive.tv/globalv-web/vam-web/post/v1.0/board-5464/posts?appId=8c6cc7b45d2568fb668be6e05b6e5a3b&fields=attachments,author,availableActions,board%7BboardId,title,boardType,payRequired,includedCountries,excludedCountries%7D,channel%7BchannelName,channelCode%7D,commentCount,contentType,createdAt,emotionCount,excludedCountries,includedCountries,isCommentEnabled,isHiddenFromStar,lastModifierMember,notice,officialVideo,plainBody,postId,postVersion,reservation,starReactions,targetMember,thumbnail,title,url,viewerEmotionId,writtenIn,sharedPosts,originPost{}&sortType=LATEST&limit=100&gcc=KR"
def get_partial_list(afterStr):
obj = requests.get(all_vlives_endpoint.format(afterStr), headers = headers).json()
return obj['paging'], obj['data']
def get_all_vlives():
links = []
i = 0
while True:
if i == 0:
pagingParams, partialData = get_partial_list('')
links.extend(partialData)
if len(pagingParams) == 0:
break
i += 1
else:
pagingParams, partialData = get_partial_list("&after="+pagingParams['nextParams']['after'])
links.extend(partialData)
if 'nextParams' not in pagingParams:
#reached end of list
break
return links
def download_elements(vlive):
attempts = 0
title = vlive['title']
date = datetime.datetime.utcfromtimestamp(vlive['createdAt']/1000).strftime("%Y%m%d%H%M")
video_id = str(vlive['officialVideo']['videoSeq'])
postUrl = vlive['url']
vodId = vlive['officialVideo']['vodId']
print(date, video_id, title)
video_path = "D:/izone/" + date + '_' +video_id
naver_link_endpoint = "https://www.vlive.tv/globalv-web/vam-web/video/v1.0/vod/%s/inkey?appId=8c6cc7b45d2568fb668be6e05b6e5a3b&gcc=KR"
headers = {
'Referer': postUrl
}
naver_link_r = requests.get(naver_link_endpoint % video_id, headers = headers).json()
#testing purposes
if not 'inkey' in naver_link_r:
print(naver_link_r)
naver_key = naver_link_r['inkey']
naver_link = "https://apis.naver.com/rmcnmv/rmcnmv/vod/play/v2.0/%s?key=%s" % (vodId, naver_key)
video_r = requests.get(naver_link).json()
video_res = video_r['videos']['list']
sorted_video_res = sorted(video_res, key = lambda k: k['encodingOption']['height'])
video_link = sorted_video_res[-1]['source']
print(video_link)
if os.path.exists(video_path):
for roots, dirs, files in os.walk(video_path):
if 'captions' in video_r:
#if the video has captions
for language in video_r['captions']['list']:
code_and_type = language['language'] + '-' + language['type']
sub_link = language['source']
if not os.path.exists(video_path + '/' + code_and_type + '/'):
os.mkdir(video_path + '/' + code_and_type)
urllib.request.urlretrieve(sub_link, video_path + '/' + code_and_type + '/' + code_and_type + ".vtt")
# sub_r = requests.get(sub_link)
# with open(video_path + '/' + code_and_type + '/' + code_and_type + '.vtt', 'wb') as f:
# f.write(sub_r.content)
print('Acquired ' + code_and_type + '.vtt')
if not any('.mp4' in x for x in files):
#no video
while attempts < 5:
try:
urllib.request.urlretrieve(video_link, video_path + '/' + video_id + '.mp4')
# vid_r = requests.get(video_link)
# with open(video_path + '/' + video_id + '.mp4', 'wb') as f:
# f.write(vid_r.content)
print('Acquired ' + video_id + '.mp4')
break
except:
attempts += 1
pass
if not 'title.txt' in files:
#no title
with open(video_path + '/' + 'title.txt', 'w', encoding = 'utf-8') as f:
f.write(title)
print('Acquired title.txt')
#top level dir
break
else:
# should not happen in auto_download
matching_id_dir = [x for x in os.listdir("D:/izone/") if video_id in x.split("_")[1]]
if len(matching_id_dir) != 0:
matching_id_date = matching_id_dir[0].split("_")[0]
if not matching_id_date == date:
print('SAME VIDEO ID EXISTS, DIFFERENT DATE: ', date, video_id)
print(matching_id_dir[0])
#if new time is less than 10 minutes apart from matching id date
if (datetime.datetime.strptime(date, "%Y%m%d%H%M") - datetime.datetime.strptime(matching_id_date, "%Y%m%d%H%M")).total_seconds() < 600:
print('Updating date...')
os.rename("D:/izone/"+matching_id_dir[0], "D:/izone/" + date + '_' + video_id)
else:
os.mkdir(video_path)
while attempts < 5:
try:
urllib.request.urlretrieve(video_link, video_path + '/' + video_id+'.mp4')
# vid_r = requests.get(video_link)
# with open(video_path + '/' + video_id + '.mp4', 'wb') as f:
# f.write(vid_r.content)
print('Acquired ' + video_id + '.mp4')
break
except:
attempts += 1
pass
if 'captions' in video_r:
#if video has captions
for language in video_r['captions']['list']:
code_and_type = language['language'] + '-' + language['type']
sub_link = language['source']
os.mkdir(video_path + '/' + code_and_type)
urllib.request.urlretrieve(sub_link, video_path + '/' + code_and_type + '/' + code_and_type + ".vtt")
# sub_r = requests.get(sub_link)
# with open(video_path + '/' + code_and_type + '/' + code_and_type + '.vtt', 'wb') as f:
# f.write(sub_r.content)
print('Acquired ' + code_and_type + '.vtt')
with open(video_path + '/' + 'title.txt', 'w', encoding = 'utf-8') as f:
f.write(title)
print('Acquired title.txt')
j = 0
while True:
links = get_all_vlives()
print('# of videos found: ' + str(len(links)))
if j == 0:
num_vids = len(links)
if len(links) > num_vids:
print('New Vlive found.')
#NEW VLIVE
while True:
links = get_all_vlives()
if 'status' in links[0]['officialVideo'] and 'ON_AIR' == links[0]['officialVideo']['status']:
#new vlive is an ongoing livestream
print('Ongoing.')
time.sleep(120)
else:
print('Not ongoing.')
#new vlive is not an ongoing livestream
break
download_elements(links[0])
else:
pass
num_vids = len(links)
j += 1
time.sleep(300)
| [
"katsuki@katsuki.xyz"
] | katsuki@katsuki.xyz |
ba3716f0dc54c992ee423cea11f9fbcde6fadde9 | 9cc3135d5fcd781c0542a905c61dc19b0ceeffef | /alien_colors_ver1-1.py | cad614ac833cdc33de423b5b07873c40dfe1f32c | [] | no_license | bkalcho/python-crash-course | 411d8af223fb6974d4f890c0f82c9e56b062359c | 8425649a2ecd5abeeb438e816400f270d937758e | refs/heads/master | 2022-09-11T13:47:56.837256 | 2022-08-23T10:04:35 | 2022-08-23T10:04:35 | 69,810,386 | 14 | 8 | null | 2022-08-23T10:04:36 | 2016-10-02T17:14:41 | Python | UTF-8 | Python | false | false | 185 | py | # Author: Bojan G. Kalicanin
# Date: 29-Sep-2016
# If alien color is not green nothing will be printed
alien_color = 'red'
if alien_color == 'green':
print('You earned 5 points.') | [
"bojan.g.kalicanin@gmail.com"
] | bojan.g.kalicanin@gmail.com |
ca92e0ffc2eba7844653088c1a43f89204a6c7ee | b83191179deceba77983fa31ae7543f69115ef45 | /reflectometry/offspec/kineticraw.py | 98d7a2d5e75ad4d0a8e03764267eb72bbe593868 | [] | no_license | mantidproject/scriptrepository | ed6d605b4cd9d1921f953d375d2f4a2fab23e1e8 | f563d4f24eb90bd2288e85135793178f84089d42 | refs/heads/master | 2023-08-04T08:27:44.148595 | 2023-07-25T13:21:27 | 2023-07-25T13:21:27 | 9,932,097 | 3 | 2 | null | 2022-04-01T13:24:36 | 2013-05-08T08:41:34 | Python | UTF-8 | Python | false | false | 48,635 | py | import offspec_offset2 as nr
reload(nr)
nr.current_detector = nr.old_detector
from math import *
import numpy as np
import os,sys,re, time
def getLog(w,log_name):
# Get handle to the workspace
try:
h=mtd[w]
except:
print "Can't get Workspace handle"
#
# Get access to SampleDetails
s=h.getSampleDetails().getLogData(log_name).value
return s
def writemap_csv(wksp,times,fname):
dir=os.path.dirname(fname+'Qscale.csv')
try:
os.stat(dir)
except:
os.mkdir(dir)
f=open(fname+'Qscale.csv','w')
w1=mtd[wksp]
xarray=w1.readX(0)
npts=len(xarray)
nhist=w1.getNumberHistograms()
x1=np.zeros(npts-1)
for i in range(npts-1):
x1[i]=(xarray[i]+xarray[i+1])/2.0
s=""
for i in range(npts-2):
s+="%g," % (x1[i])
s+="%g\n" % (x1[npts-2])
f.write(s)
f.close()
f=open(fname+'timeScale.csv','w')
s=""
for i in range(len(times)-1):
s+="%g," % (times[i])
s+="%g\n" % (times[len(times)-1])
f.write(s)
f.close()
f=open(fname+'ZData.csv','w')
s=""
for i in range(nhist):
yarray=w1.readY(i)
s=""
for j in range(npts-2):
s+="%g," % (yarray[j])
s+="%g\n" % (yarray[npts-2])
f.write(s)
f.close()
f=open(fname+'Errors.csv','w')
for i in range(nhist):
earray=w1.readE(i)
s=""
for j in range(npts-2):
s+="%g," % (earray[j])
s+="%g\n" % (earray[npts-2])
f.write(s)
f.close()
def writeXYE(wksp,fname):
a1=Mantid.getMatrixWorkspace(wksp)
x1=a1.readX(0)
X1=n.zeros((len(x1)-1))
for i in range(0,len(x1)-1):
X1[i]=(x1[i]+x1[i+1])/2.0
y1=a1.readY(0)
e1=a1.readE(0)
f=open(fname,'w')
for i in range(len(X1)):
s=""
s+="%g " % X1[i]
s+="%g " % y1[i]
s+="%g\n" % e1[i]
f.write(s)
f.close()
def loadlatest(currentrun=None):
path = "Z:/"
runfiles = os.listdir(path)
targetname = "OFFSPEC000"+currentrun
endings = []
for file in runfiles:
filelist = file.split('.n',1)
if targetname in filelist[0]:
try:
endings.append(filelist[1])
except: pass
sortedendings = sorted(endings)
print targetname+'.n'+sortedendings[-1]
return targetname+'.n'+sortedendings[-1]
def loaddata(rnum, path = 'u://',loadcrpt=0):
try:
Load(Filename=path+'OFFSPEC000'+str(rnum)+'.nxs', OutputWorkspace=str(rnum), LoaderName='LoadEventNexus', LoaderVersion=1, LoadMonitors=True)
except:
try:
if loadcrpt == 0:
updatefile=loadlatest(str(rnum))
Load(Filename='z:/'+updatefile, OutputWorkspace=str(rnum), LoaderName='LoadEventNexus', LoaderVersion=1, LoadMonitors=True)
else:
print 'trying to load crpt snapshot'
Load(Filename='z:/snapshot_crpt.nxs', OutputWorkspace=str(rnum), LoaderName='LoadEventNexus', LoaderVersion=1, LoadMonitors=True)
except:
raise Exception('Could not find data')
return str(rnum)
def timeslice(rnum,btime,etime,output,loadcrpt=0):
loaddata(rnum,loadcrpt=loadcrpt)
try:
FilterByTime(InputWorkspace=str(rnum), OutputWorkspace=str(rnum)+'_slice', StartTime=btime,StopTime=etime)
except:
raise Exception('Error in slicing')
Rebin(str(rnum)+'_slice','5.0,20.0,100000.0',PreserveEvents=False,OutputWorkspace=str(rnum)+'_slicereb')
a1=mtd[str(rnum)]
gr=a1.getRun()
tamps=gr.getProtonCharge()
print 'tamps=',str(tamps)
a2=mtd[str(rnum)+'_slice']
ua=a2.getRun().getProtonCharge()
print 'ua=',str(ua)
monnorm=mtd[str(rnum)+'_monitors']*ua/tamps
Rebin(monnorm,'5.0,20.0,100000.0',OutputWorkspace=str(rnum)+'monreb')
ConjoinWorkspaces(str(rnum)+'monreb',str(rnum)+'_slicereb',CheckOverlapping=False)
RenameWorkspace(str(rnum)+'monreb',OutputWorkspace=output+'_'+str(btime)+'-'+str(etime))
DeleteWorkspace(str(rnum))
DeleteWorkspace(str(rnum)+'_monitors')
DeleteWorkspace(str(rnum)+'_slice')
return output+'_'+str(btime)+'-'+str(etime)
def doDSSCorrections(wksp,angle=1.2,nper=0,Nqx=200,Nqz=200):
# get the lambda and theta arrays from the original data
thetaf=[]
if nper == 0:
suffix1='detnorm'
suffix2='qxqz'
suffix3='qxlam'
else:
suffix1='detnorm_'+str(nper)
suffix2='qxqz_'+str(nper)
suffix3='qxlam_'+str(nper)
a1=mtd[wksp+suffix1]
nth=a1.getNumberHistograms()
ntc=len(a1.dataY(0))
thetaf=a1.getAxis(1).extractValues()
thetaf=thetaf*pi/180.0
lambda0=a1.getAxis(0).extractValues()
lambda1=[]
for i in range(len(lambda0)-1):
lambda1.append(0.5*(lambda0[i]+lambda0[i+1]))
dthf=float(nth-1)/(thetaf[-1]-thetaf[0])
dlam=float(ntc-1)/(lambda1[-1]-lambda1[0])
# get the qx and qz arrays from the data we just created
a2=mtd[wksp+suffix2]
lmin=lambda0[0]
lmax=lambda0[-1]
lamstep=(lmax-lmin)/(Nqz-1)
lam2=[]
for i in range(Nqz):
lam2.append(lmin+i*lamstep)
qz=a2.getAxis(1).extractValues()
qx=a2.getAxis(0).extractValues()
cthetai=cos(angle*pi/180.0)
sthetai=sin(angle*pi/180.0)
thetai=angle*pi/180.0
thetaf0=thetaf[0]
lambda0=lambda1[0]
for i in range(Nqz):
qzi=qz[i]
#qzi=lam2[i]
pi2qz=2.0*pi/qzi
for j in range(Nqx):
qxj=qx[j]
ang=(qzi*cthetai-qxj*sthetai)/sqrt(qzi*qzi+qxj*qxj)
#ang=cthetai-(qxj*qzi/(2.0*pi))
ang=min(1.0,ang)
ang=asin(ang)
#ang=acos(ang)
if qxj==0.0:
ang=thetai
else:
ang=pi-ang-atan(qzi/qxj)
if ang > pi:
ang=ang-pi
lam=pi2qz*(sthetai+sin(ang))
#lam=qzi
xind=(ang-thetaf0)*dthf
yind=(lam-lambda0)*dlam
indy=int(yind)
indx=int(xind)
if indy >= 0 and indy <= ntc-2 and indx >= 0 and indx <= nth-2:
dyind=yind-float(indy)
dxind=xind-float(indx)
ofsp00=a1.dataY(indx)[indy]
ofsp01=a1.dataY(indx)[indy+1]
ofsp10=a1.dataY(indx+1)[indy]
ofsp11=a1.dataY(indx+1)[indy+1]
offsp1=(1.0-dxind)*ofsp00+dxind*ofsp10
offsp2=(1.0-dxind)*ofsp01+dxind*ofsp11
a2.dataY(i)[j]=(1.0-dyind)*offsp1+dyind*offsp2
ofsp00=a1.dataE(indx)[indy]
ofsp00=ofsp00*ofsp00
ofsp01=a1.dataE(indx)[indy+1]
ofsp01=ofsp01*ofsp01
ofsp10=a1.dataE(indx+1)[indy]
ofsp10=ofsp10*ofsp10
ofsp11=a1.dataE(indx+1)[indy+1]
ofsp11=ofsp11*ofsp11
offsp1=((1.0-dxind)*(1.0-dxind))*ofsp00+(dxind*dxind)*ofsp10
offsp2=((1.0-dxind)*(1.0-dxind))*ofsp01+(dxind*dxind)*ofsp11
a2.dataE(i)[j]=sqrt(abs((1.0-dyind)*(1.0-dyind)*offsp1+dyind*dyind*offsp2))
else:
a2.dataY(i)[j]=0.0
a2.dataE(i)[j]=0.0
w1=mtd[wksp+suffix2]*1.0
w2=mtd[wksp+suffix2]
a2=w1.getAxis(1)
def DSqxqz(run1,wksp,angle=1.2,qxqzlimits='-5e-4,5e-4,0.02,0.1',binning1=["1.5","0.02","14.0","2"],Nqx=200,Nqz=200,withpol=1):
halftheta = angle/2.0
#binning1=["1.0","0.05","14.0","2"]
if withpol==1:
nr.nrPNRFn(run1,wksp,str(halftheta),'none',"114","112","116",binning1,"",'0',['2','1'],'0',dofloodnorm=2)
else:
nr.nrNRFn(run1,wksp,str(halftheta),'none',"114","112","116",binning1,"",dofloodnorm=2)
# Delete the norm and RvQ workspaces as they have the wrong angle
DeleteWorkspace(wksp+'RvQ')
DeleteWorkspace(wksp+'norm')
ConvertSpectrumAxis(InputWorkspace=wksp+"detnorm", OutputWorkspace=wksp+"detnorm", Target='SignedTheta')
ConvertToReflectometryQ(InputWorkspace=wksp+'detnorm', OverrideIncidentTheta=True, IncidentTheta=angle, Extents=qxqzlimits, OutputAsMDWorkspace=False, OutputWorkspace=wksp+"qxqz", NumberBinsQx=Nqx, NumberBinsQz=Nqz)
if withpol == 1:
doDSSCorrections(wksp,angle,1,Nqx=Nqx,Nqz=Nqz)
doDSSCorrections(wksp,angle,2,Nqx=Nqx,Nqz=Nqz)
else:
doDSSCorrections(wksp,angle,0,Nqx=Nqx,Nqz=Nqz)
def _offspecslice_simple(rnum,btime,etime,qmin,qmax,output, binning,theta=0.7, DB="LDDB05k",spec=114,loadcrpt=0):
wksp =timeslice(rnum,btime,etime,output,loadcrpt=loadcrpt)
nr.nrNRFn("",wksp,str(theta),DB,spec,"105","122",binning,"",usewkspname=1)
DeleteWorkspace(wksp)
DeleteWorkspace(wksp+'norm')
def offspecslice2(rnum,qmin,qmax,output,start = 0, tslice=None,nslices = None,sarray=[], theta=0.7, binning=["1.5","0.02","14.0","2"],spec=114,loadcrpt=0):
slicearray = sarray[:]
slicenames=[] #this will be a list of workspace names for all the slices created
datatimes = [] # this will contain the meantime for each dataset
if tslice or nslices: # if tslice or nslices exist they will take precedence over slicearray
testws = loaddata(rnum,loadcrpt=loadcrpt)
runtotaltime = getLog(testws, 'duration')
print "Total runtime in seconds: " + str(runtotaltime)
DeleteWorkspace(testws)
if nslices:
tslice = ceil((runtotaltime - start)/(nslices))
slicearray.append(start)
while slicearray[-1] < runtotaltime:
slicearray.append(slicearray[-1]+tslice)
slicearray[-1] = runtotaltime # lastentry is some random number > than total runduration, set equal to runduration, this means the last slice has a different length to the others
print "Time boundaries:\n"
print slicearray
print "Start making slices:\n"
for idx in range(len(slicearray)):
try:
start = slicearray[idx]; end = slicearray[idx+1]
datatimes.append(0.5*(start+end)) # calculate the time for this dataset for saving later
print "\nCreated slice "+str(datatimes[-1])
_offspecslice_simple(rnum, start, end, qmin, qmax, output, binning=binning, theta=theta,spec=spec,loadcrpt=loadcrpt)
except:
print datatimes
break
def offspecPlot(wksp, (xmin, xmax), (ymin,ymax), (zmin,zmax),logscale='z'):
p = plot2D(wksp)
l=p.activeLayer()
l.setScale(0, ymin, ymax)
l.setScale(1, zmin, zmax)
l.setScale(2, xmin, xmax)
if 'z' in logscale:
l.setAxisScale(1, zmin, zmax, Layer.Log10)
elif 'x' in logscale:
l.setAxisScale(2, xmin, xmax, Layer.Log10)
elif 'y' in logscale:
l.setAxisScale(0, ymin, ymax, Layer.Log10)
def QxQzcuts(name, qzmin=None, qzmax=None,plot=False):
if qzmin:
outputname = name+'_cut_'+str(qzmin)+'-'+str(qzmax)
else:
outputname = name+'_cut_all'
Transpose(InputWorkspace=name, OutputWorkspace=outputname)
Integration(InputWorkspace=outputname, RangeLower=qzmin, RangeUpper=qzmax, OutputWorkspace=outputname)
Transpose(InputWorkspace=outputname, OutputWorkspace=outputname)
if plot:
plot(outputname,0, tool='plot_spectrum', error_bars=True)
yscale('log')
def offspecQplot(rnum,qmin,qmax,output, nslices=None,sarray = [], angle=0.7,Nqx=50,Nqz=50,qxqzlimits='-2e-4,2e-4,0.01,0.05', zmin=1e-4, zmax=0.01,qzmin=None,qzmax=None,binning=["1.5","0.02","14.0","2"],spec=114,loadcrpt=0):
limitlist = qxqzlimits.split(',')
xmin=float(limitlist[0]); xmax=float(limitlist[1])
ymin=float(limitlist[2]);ymax=float(limitlist[3])
offspecslice2(rnum,qmin,qmax,'wrong',nslices = nslices,sarray=sarray, theta=angle/2.0,spec=spec,loadcrpt=loadcrpt)
names=mtd.getObjectNames()
for name in names:
m = re.search('^wrong',name)
if m:
n = re.search('^wrong{1}(.*)detnorm{1}$', name)
if n:
print name
newname = re.sub('wrong',output, name)
newname = re.sub('detnorm','',newname)
print "newname: "+newname
ConvertSpectrumAxis(InputWorkspace=name, OutputWorkspace=name, Target='SignedTheta')
try:
ConvertToReflectometryQ(InputWorkspace=name, OverrideIncidentTheta=True, IncidentTheta=angle, Extents=qxqzlimits, OutputAsMDWorkspace=False, OutputWorkspace=newname+"qxqz", NumberBinsQx=Nqx, NumberBinsQz=Nqz)
except:
ConvertToReflectometryQ(InputWorkspace=name, OverrideIncidentTheta=True, IncidentTheta=angle, Extents=qxqzlimits, OutputAsMDWorkspace=False, OutputWorkspace=newname+"qxqz", NumberBinsQx=Nqx, NumberBinsQz=Nqz,OutputVertexes='somevertexes')
CloneWorkspace(name,OutputWorkspace=newname+'detnorm')
doDSSCorrections(newname,angle,0,Nqx=Nqx,Nqz=Nqz)
DeleteWorkspace(newname+'detnorm')
offspecPlot(newname+'qxqz', (xmin, xmax), (ymin,ymax), (zmin,zmax),logscale='z')
QxQzcuts(newname+'qxqz',qzmin,qzmax)
DeleteWorkspace(mtd[name])
offspecslice2(rnum,qmin,qmax,output,nslices = nslices,sarray=sarray, theta=angle,spec=spec,loadcrpt=loadcrpt)
def chopit(rnum,btime,etime,tslice,output,slicearray=None,usearray=0,sf=1.0, save = True,binning=["1.5","0.02","14.0","2"],loadcrpt=0):
nslice=int((etime*1.0-btime)/(tslice*1.0))
slicenames=[]
print 'nslice=',str(nslice)
if usearray==0:
slicearray=[]
slicearray.append(btime)
for i in range(1,nslice+1):
slicearray.append(btime+(i*tslice))
if slicearray[-1] < etime:
slicearray.append(etime)
nslice=nslice+1
for i in range(nslice-1):
btime2=slicearray[i]
etime2=slicearray[i+1]
try:
wksp=timeslice(rnum,btime2,etime2,output,loadcrpt=loadcrpt)
slicenames.append(wksp)
print slicenames
except:
print 'time slicing failed'
break
nr.nrNRFn("",wksp,"0.700","LDDB05k","114","110","120",binning,"",usewkspname=1,sf=sf)
#Rebin(wksp+"RvQ","0.011,-0.025,0.09",OutputWorkspace=wksp+"RvQ")
Rebin(wksp+"RvQ","0.011,-0.02,0.09",OutputWorkspace=wksp+"RvQ")
DeleteWorkspace(wksp)
DeleteWorkspace(wksp+'detnorm')
DeleteWorkspace(wksp+'norm')
CloneWorkspace(slicenames[0]+'RvQ',OutputWorkspace=output+'_allslices' )
for i in range(1,len(slicenames)):
ConjoinWorkspaces(output+'_allslices',slicenames[i]+'RvQ',CheckOverlapping=0)
DeleteWorkspace(slicenames[0]+'RvQ')
datatimes=[]
for i in range(nslice-1):
datatimes.append(0.5*(slicearray[i]+slicearray[i+1]))
writemap_csv(output+'_allslices',datatimes,'C:/everything/userthings/'+output+'/'+output)
if save:
print "\n Trying to save the following slices: \n"
saveslices(output+'_allslices','C:/everything/userthings/'+output+'/')
def saveslices(inputwksp, dir = None):
if dir:
userdirectory = dir
else:
userdirectory = "C:/everything/userthings/"
spectrum = 0
print spectrum
while True:
try:
filename = userdirectory + inputwksp + "_" + str(spectrum) + ".dat"
SaveAscii(inputwksp, filename, SpectrumList = [spectrum], WriteSpectrumID = False, CommentIndicator = "#", Separator = "Tab", ColumnHeader = False)
spectrum += 1
print spectrum
except:
print "End of slices reached, this one does not exist: " + str(spectrum)
break
def slice_the_data(rnum,output,start = 0, tslice=None,nslices = None ,sarray=[],usearray=0,sf=1.0,userdirectory = 'U://',binning=["1.5","0.02","14.0","2"],loadcrpt=0):
slicearray = sarray[:]
slicenames=[] #this will be a list of workspace names for all the slices created
datatimes = [] # this will contain the meantime for each dataset
if tslice or nslices: # if tslice or nslices exist they will take precedence over slicearray
testws = loaddata(rnum,loadcrpt=loadcrpt)
runtotaltime = getLog(testws, 'duration')
print "Total runtime in seconds: " + str(runtotaltime)
DeleteWorkspace(testws)
if nslices:
tslice = ceil((runtotaltime - start)/(nslices))
slicearray.append(start)
while slicearray[-1] < runtotaltime:
slicearray.append(slicearray[-1]+tslice)
slicearray[-1] = runtotaltime # lastentry is some random number > than total runduration, set equal to runduration, this means the last slice has a different length to the others
print "Time boundaries:\n"
print slicearray
print "Start making slices:\n"
for idx in range(len(slicearray)):
try:
start = slicearray[idx]; end = slicearray[idx+1]
datatimes.append(0.5*(start+end)) # calculate the time for this dataset for saving later
wksp = timeslice(rnum,start,end,output,loadcrpt=loadcrpt)
except:
break
slicenames.append(wksp)
nr.nrNRFn("",wksp,"0.7","LDDB05k","114","110","118",binning,"",usewkspname=1,sf=sf)
Rebin(wksp+"RvQ","0.011,-0.01,0.09",OutputWorkspace=wksp+"RvQ")
DeleteWorkspace(wksp)
DeleteWorkspace(wksp+'detnorm')
DeleteWorkspace(wksp+'norm')
if idx == 0:
CloneWorkspace(slicenames[0]+'RvQ',OutputWorkspace=output+'_allslices')
else:
ConjoinWorkspaces(output+'_allslices',slicenames[-1]+'RvQ',CheckOverlapping=0)
DeleteWorkspace(slicenames[0]+'RvQ')
writemap_csv(output+'_allslices',datatimes,userdirectory + output + '/'+output)
print "\n Trying to save the following slices: \n"
saveslices(output+'_allslices', userdirectory+ output + '/')
def offspecslice(rnum,btime,etime,qmin,qmax,output,spec=114,loadcrpt=0):
wksp=timeslice(rnum,btime,etime,output,loadcrpt=loadcrpt)
nr.nrNRFn("",wksp,"0.7","LDDB05k",spec,"110","118",binning,"",usewkspname=1)
ConvertUnits(wksp+'detnorm','MomentumTransfer',OutputWorkspace=wksp+'detnormQ')
Rebin(wksp+'detnormQ','0.011,-0.01,0.09',OutputWorkspace=wksp+'detnormQ')
Integration(wksp+'detnormQ',qmin,qmax,OutputWorkspace=wksp+'detnormQ_Int')
Transpose(wksp+'detnormQ_Int',OutputWorkspace=wksp+'detnormQ_'+str(qmin)+'_'+str(qmax))
binning=["1.5","0.02","14.0","2"]
combine_binning=["0.0085","-0.015","0.3"]
times = [i for i in range(0,19800,1800)]
times2= [i for i in range(25200,36000,7200)]
timearray = times+times2+[36644]
#[0, 1800, , 900, 1200, 1500,1800,2100,2400,2700,3000,3300,3600,7200,10800,14400,18000, 36644.0]
#offspecQplot('35723',0.01,0.06,'test2',nslices=20,sarray = [] , Nqx=150, Nqz=150, zmin=5e-7, zmax=0.01)
QxQzcuts('test2_3665.0-7330.0qxqz', qzmin=0.03, qzmax=0.033)
nr.nrDBFn("34193+34195","w93","34194","w94","LDDB05","108","120","6.0",binning,"",fitspline=10,diagnostics="0")
nr.nrDBFn("34220+34222","w20","34221","w21","LDDB05piezo","108","120","8.0",binning,"",fitspline=10,diagnostics="0")
saveslices("Mg1_loading1_1160mbar_chopit_60sec_allslices")
#Old Direct Beam
#nr.nrDBFn("35737","w37","35738","w38","LDDB05k","108","120","4.5",binning,"",fitspline=10,diagnostics="0")
#nr.nrDBFn("35739","w39","35740","w40","LDDB05s","108","120","5.3",binning,"",fitspline=10,diagnostics="0")
#New Direct Beam
nr.nrDBFn("35816+35818+35820+35822+35824+35826+35828+35830","w37","35817+35819+35821+35823+35825+35827+35829","w38","LDDB05k","108","120","10.0",binning,"",fitspline=10,diagnostics="0")
nr.nrDBFn("35785+35787+35789+35791+35793+35795+35797+35799+35801+35803+35805+35807+35809+35811+35813+35815","w39","35786+35788+35790+35792+35794+35796+35798+35800+35802+35804+35806+35808+35810+35812+35814","w40","LDDB05s","108","120","8.0",binning,"",fitspline=10,diagnostics="0")
#nr.nrDBFn("35816","w37","35817","w38","LDDB05k","108","120","4.5",binning,"",fitspline=10,diagnostics="0")
#Copy and paste the list called 'j' here from the output bar below,
j=[0, 3601.0, 7202.0, 10803.0, 14404.0, 18005.0, 21606.0, 25207.0, 28808.0, 32409.0, 36010.0, 39611.0, 43212.0, 46813.0, 50414.0, 54015.0, 57616.0, 61217.0, 64818.0, 68419.0, 72019.0]
qzmin = 0.022
qzmax=0.035
for i in range(len(j)-1):
QxQzcuts('test2_'+str(j[i])+'-'+str(j[i+1])+'qxqz', qzmin=qzmin, qzmax=qzmax,plot=False)
SaveAscii('test2_'+str(j[i])+'-'+str(j[i+1])+'qxqz'+'_cut_'+str(qzmin)+'-'+str(qzmax), 'U:/VanWell/July_2015/Cuts/test2_'+str(j[i])+'-'+str(j[i+1])+'qxqz'+'_cut_'+str(qzmin)+'-'+str(qzmax)+'.dat', WriteSpectrumID = False, CommentIndicator = "#", Separator = "Tab", ColumnHeader = False)
print os.getcwd()
#
#=====================================================================================================================================================
#
###########################
#Mg-1
###########################
#Virgin Sample in Air
#Start Time: 14/07/2015 14:53
#Folder Name Pictures: ISIS/July2015/ NA
nr.nrNRFn("35715","Mg1_VirginAir_1000mbar_030C_th=0.5","0.5","LDDB05s","114","110","118",binning,"")
nr.nrNRFn("35716","Mg1_VirginAir_1000mbar_030C_th=2.0","2.0","LDDB05s","114","110","118",binning,"")
nr.NRCombineDatafn("Mg1_VirginAir_1000mbar_030C_th=0.5RvQ,Mg1_VirginAir_1000mbar_030C_th=2.0RvQ","Mg1_VirginAir_1000mbar_030C_anglesCombined","0","","","0",combine_binning,1.0,"2")
#Run 35717 Not usefull.
#Increased temperature to T=80 C in about 5 min before start of the run.
# <T<81.6 <T_heater<91 C
#Some dirt on the O-ring of the top part of the caused a failed attempt to vacuum pump the sample.
#Virgin Sample in Vacuum
#Start Time: 14/07/2015 15:22
#Folder Name Pictures: ISIS/July2015/ NA
nr.nrNRFn("35718","Mg1_Virgin_0000mbar_080C_th=0.5","0.5","LDDB05s","114","110","118",binning,"")
nr.nrNRFn("35719","Mg1_Virgin_0000mbar_080C_th=1.7","1.7","LDDB05s","114","110","118",binning,"")
nr.NRCombineDatafn("Mg1_Virgin_0000mbar_080C_th=0.5RvQ,Mg1_Virgin_0000mbar_080C_th=1.7RvQ","Mg1_Virgin_0000mbar_080C_anglesCombined","0","","","0",combine_binning,1.0,"2")
#Virgin Sample in Vacuum: Kinetic run
#Start Time: 14/07/2015 18:03
#Folder Name Pictures: ISIS/July2015/ NA
chopit2(35720,'Mg1_Virgin_0000mbar_080C_th0.7_600', nslices=1)
#chopit(35720,0,601,60,'Mg1_Virgin_0000mbar_080C_th0.7_60')
#noticed from plots that graphs are different from static measurements
loaddata(35720)
#Scanned realized theta, th=0.707
#Start Time: 14/07/2015 18:50
#Folder Name Pictures: ISIS/July2015/ NA
chopit(35721,0,601,600,'Mg1_Virgin_0000mbar_080C_th0.7_600')
#offset in time considering the pictures: 2minutes earlier than Time at Offspec
#Start Time: 14/07/2015 19:39:50
#Stop Time: 15/07/2015 05:50:50
#Total length 10:10:14
#Folder Name Pictures: ISIS/July2015/Mg1/loading1_1200mbar_080C
chopit(35722,120,1321,60,'Mg1_loading1_1200mbar_080C_60s')
chopit(35722,120,3721,300,'Mg1_loading1_1200mbar_080C_300s')
chopit(35722,120,7321,600,'Mg1_loading1_1200mbar_080C_600s')
chopit(35722,120,36600,1800,'Mg1_loading1_1200mbar_080C_1800s')
chopit(35722,120,36600,3600,'Mg1_loading1_1200mbar_080C_3600s')
#2:00 start to increase the pressure (97230 at Project_X pressure software) Flow=100 sscm, Vout=0V
#2:30 P=150 mbar
#3:00 P=250
#4:00 P=400
#4:45 P=500
#5:10 P=600
#6:49 P=800
#7:50 P=900
#9:06 P=1000
#11:21 P=1100 flow to 10 sccm
#14:30 P=1185
#14:55 P=1200
#16:18 P=1220
#19:46 P=1200
#24:55 P=1150 mbar
#Change of Temperature to 70 degrees.
#Start Time: 15/07/2015 05:50:53
#Stop Time: 16/07/2015 01:51:12
#Total length:20:00:19
#Folder Name Pictures: ISIS/July2015/Mg1/loading1_1200mbar_080C (Unchanged as compared with T=80 C)
chopit(35723,0,1201,60,'Mg1_loading1_1200mbar_070C_60s')
chopit(35723,0,3601,600,'Mg1_loading1_1200mbar_070C_600s')
chopit(35723,0,72001,1800,'Mg1_loading1_1200mbar_070C_1800s')
chopit(35723,0,72001,3600,'Mg1_loading1_1200mbar_070C_3600s')
offspecslice(35723,120,3701,0.015,0.04,"Mg1_loading1_1200mbar_070C_th0.7_1800_offspec",spec=116.5)
#Static Measurement Mg1_loaded1_1200mbar_070C
#Start Time: 16/07/2015 02:00:00
#Stop Time: 16/07/2015 ????
#Longer Since beam was down for about 16 minutes.
#Folder Name Pictures: NA
nr.nrNRFn("35724","Mg1_loaded1_1200mbar_070C_th=0.5","0.5","LDDB05s","114","110","118",binning,"")
nr.nrNRFn("35725","Mg1_loaded1_1200mbar_070C_th=1.7","1.7","LDDB05s","114","110","118",binning,"")
nr.NRCombineDatafn("Mg1_loaded1_1200mbar_070C_th=0.5RvQ,Mg1_loaded1_1200mbar_070C_th=1.7RvQ","Mg1_loaded1_1200mbar_070C_anglesCombined","0","","","0",combine_binning,1.0,"2")
#Unloading1
#Start Time: 16/07/2015 03:40:54
#Stop Time: 16/07/2015 09:54:51
#Total length: 06:13:57
#Folder Name Pictures: ISIS/July2015/Mg1/unloading1_0000mbar_070C
chopit(35726,0,1201,60,'Mg1_unloading1_0000mbar_070C_60s')
chopit(35726,0,3601,300,'Mg1_unloading1_0000mbar_070C_300s')
chopit(35726,0,18001,600,'Mg1_unloading1_0000mbar_070C_600s')
chopit(35726,0,21601,1800,'Mg1_unloading1_0000mbar_070C_1800s')
#01:00: Started to decrease pressure from 1180mbar to 0mbar.
#01:30: Final pressure of 0000mbar reached.
reload(nr)
nr.current_detector = nr.old_detector
offspecQplot('35726',0.01,0.06,'Mg1_unloading1_0000mbar_070C_OFFSPEC',spec="117",nslices=2,sarray = [] , Nqx=150, Nqz=150, zmin=5e-7, zmax=0.01)
#Static Measurement Mg1_unloaded1_0000mbar_070C
#Start Time: 16/07/2015 09:59:05
#Stop Time: 16/07/2015 11:00:42
#Folder Name Pictures: NA
nr.nrNRFn("35727","Mg1_unloaded1_0000mbar_070C_th=0.5","0.5","LDDB05s","114","110","118",binning,"")
nr.nrNRFn("35728","Mg1_unloaded1_0000mbar_070C_th=1.7","1.7","LDDB05s","114","110","118",binning,"")
nr.NRCombineDatafn("Mg1_unloaded1_0000mbar_070C_th=0.5RvQ,Mg1_unloaded1_0000mbar_070C_th=1.7RvQ","Mg1_unloaded1_0000mbar_070C_anglesCombined","0","","","0",combine_binning,1.0,"2")
#loading2
#Start Time: 16/07/2015 11:19:36
#Stop Time: 16/07/2015 15:20:01
#Total length: 4:00:25
#Folder Name Pictures: ISIS/July2015/Mg1/loading2_1200mbar_070C
chopit(35729,0,601,60,'Mg1_loading2_1200mbar_070C_60s')
chopit(35729,0,3601,300,'Mg1_loading2_1200mbar_070C_300s')
chopit(35729,0,3601,600,'Mg1_loading2_1200mbar_070C_600s')
chopit(35729,0,14401,1800,'Mg1_loading2_1200mbar_070C_1800s')
#240056 at Project_X pressure software corresponds with 00:00
#00:30: Started to increase pressure from 0mbar to 1200mbar. Flow=10 sscm, Vout=0V
#01:10: 100 mbar
#01:42: 200 mbar
#02:58: 400 mbar
#03:30 500 mbar
#04:10 600 mbar
#05:20 800 mbar
#06:05 900 mbar
#07:00 1000 mbar
#08:00 1100 mbar
#10:00 1200 mbar
offspecQplot('35729',0.01,0.06,'Mg1_loading2_1200mbar_070C_OFFSPEC',nslices=4,sarray = [] , Nqx=150, Nqz=150, zmin=5e-7, zmax=0.01)
#Static Measurement Mg1_loaded2_1200mbar_070C
#Start Time: 16/07/2015 09:59:05
#Stop Time: 16/07/2015 10:59:34
#Folder Name Pictures: NA
nr.nrNRFn("35730","Mg1_loaded2_1200mbar_070C_th=0.5","0.5","LDDB05s","114","110","118",binning,"")
nr.nrNRFn("35731","Mg1_loaded2_1200mbar_070C_th=2.0","2.0","LDDB05s","114","110","118",binning,"")
nr.NRCombineDatafn("Mg1_loaded2_1200mbar_070C_th=0.5RvQ,Mg1_loaded2_1200mbar_070C_th=2.0RvQ","Mg1_loaded2_1200mbar_070C_anglesCombined","0","","","0",combine_binning,1.0,"2")
#kinetic measurement Mg1_unloading2_0000mbar_070C
#Start Time: 16/07/2015 16:51:14
#Stop Time: 16/07/2015 21:57:36
#Total length: 5:06:22
#Folder Name Pictures: ISIS/July2015/Mg1/unloading2_0000mbar_070C
chopit(35732,0,1201,120,'Mg1_unloading2_0000mbar_070C_th0.7_120sec')
chopit(35732,0,3601,600,'Mg1_unloading2_0000mbar_070C_th0.7_600sec')
chopit(35732,0,18001,1800,'Mg1_unloading2_0000mbar_070C_1800s')
#01:00: Started to decrease pressure from 1180mbar to 0mbar.
#02:00: Final pressure of 0000mbar reached.
#03:00: Pictures started.
offspecQplot('35732',0.01,0.06,'Mg1_unloading2_0000mbar_070C_OFFSPEC',nslices=10,sarray = [] , Nqx=150, Nqz=150, zmin=5e-7, zmax=0.01)
#Static Measurement Mg1_unloaded2_0000mbar_070C
#Start Time: 16/07/2015 22:03:39
#Stop Time: 16/07/2015 23:04:05
#Folder Name Pictures: NA
nr.nrNRFn("35733","Mg1_unloaded2_0000mbar_070C_th=0.5","0.5","LDDB05s","114","110","118",binning,"")
nr.nrNRFn("35734","Mg1_unloaded2_0000mbar_070C_th=2.0","2.0","LDDB05s","114","110","118",binning,"")
nr.NRCombineDatafn("Mg1_unloaded2_0000mbar_070C_th=0.5RvQ,Mg1_unloaded2_0000mbar_070C_th=2.0RvQ","Mg1_unloaded2_0000mbar_070C_anglesCombined","0","","","0",combine_binning,1.0,"2")
#kinetic measurement Mg1_unloaded2_000mbar_cooling070to030C
#in run title: Mg1_unloading_0000mbar_070C !!!!!!!!!!!!!!
#no piezoslit installed !!!!!!! This means that effective 'sample slit' = 70*sim(0.7) = 0.85 mm
#Start Time: 16/07/2015 23:18:54
#Stop Time: 16/07/2015 00:15
#Total length: 57:10
#Folder Name Pictures: NA
chopit(35735,0,1201,120,'Mg1_unloading2_0000mbar_cooling070to030C_th0.7_120sec')
chopit(35735,0,3001,300,'Mg1_unloading2_0000mbar_cooling070to030C_th0.7_300sec')
#23:15: Started to decrease temperature: set value -> 30C, sample T starts with 80C !
#23:21 T_sample=75C
#23:28 65C
#23:33 60C
#23:48 50C
#00:00 45C
#00:15 40C
#during cooling no changes visible
#Install piezo slit
#Vent sample cell at 00:33
#kinetic measurement Mg1_unloaded2_air_030C
#Start Time: 17/07/2015 00:36:27
#Stop Time: 17/07/2015 01:36
#Total length: 1:00
#Folder Name Pictures: NA
chopit(35736,0,1201,120,'Mg1_unloading2_air_030C_th0.7_120sec')
chopit(35736,0,3001,300,'Mg1_unloading2_air_030C_th0.7_300sec')
#sample changes as a result of the air! after 50 min no changes visible
offspecQplot('35736',0.01,0.06,'Mg1_unloading2_air_070C_OFFSPEC',nslices=2,sarray = [] , Nqx=150, Nqz=150, zmin=5e-7, zmax=0.01)
#######################
#direct beam measurements
#######################
#for kinetic measurements:
#piezo slit should be in the beam. This slit was removed and then the sample taken out.
#Piezo slit replaced and aligned without sample in the straight beam (theta=0)
# intensity was too high. width 1st slit changed from 30 to 3 mm, then coutrate 0.4 kHz
# runs 35737 and 3538: Start Tine: 17/07/2015 02:39:01; Stop time: 17/07/2015 3:42:27
#for static measurements:
#no piezo slit
# runs 35739 and 35740: Start Tine: 17/07/2015 3:46:03; Stop time: 17/07/2015 4:46
nr.nrDBFn("35737","w37","35738","w38","LDDB05k","108","120","4.5",binning,"",fitspline=10,diagnostics="0")
nr.nrDBFn("35739","w39","35740","w40","LDDB05s","108","120","5.3",binning,"",fitspline=10,diagnostics="0")
##################
#Mg-1
#################
#Static Measurement Mg1_unloaded2Air_1000mbar_030C
#Start Time: 17/07/2015 05;30:49
#Stop Time: 17/07/2015 06:33:55
#Total length: 1:03:06
#Folder Name Pictures: NA
nr.nrNRFn("35741","Mg1_unloaded2_air_030C_th=0.5","0.5","LDDB05s","114","110","118",binning,"")
nr.nrNRFn("35742","Mg1_unloaded2_air_030C_th=2.0","2.0","LDDB05s","114","110","118",binning,"")
nr.NRCombineDatafn("Mg1_unloaded2_air_030C_th=0.5RvQ,Mg1_unloaded2_air_030C_th=2.0RvQ","Mg1_unloaded2_air_030C_anglesCombined","0","","","0",combine_binning,1.0,"2")
#Mg1_unloaded2_Air_21072015_13:00_30C
nr.nrNRFn("35779","Mg1_unloaded2_Air_21072015_13:00_30C_th=0.5","0.5","LDDB05s","114","110","118",binning,"")
nr.nrNRFn("35780","Mg1_unloaded2_Air_21072015_13:00_30C_th=2.0","2.0","LDDB05s","114","110","118",binning,"")
nr.NRCombineDatafn("Mg1_unloaded2_Air_21072015_13:00_30C_th=0.5RvQ,Mg1_unloaded2_Air_21072015_13:00_30C_th=2.0RvQ","Mg1_unloaded2_Air_21072015_13:00_30C_anglesCombined","0","","","0",combine_binning,1.0,"2")
######################################
#Hf-1
######################################
#Static Measurement Hf1_Virgin_0000mbar_120C
#Start Time: 17/07/2015 07;02:39
#Stop Time: 17/07/2015 08:04;46
#Total length: 1:03:06
#Folder Name Pictures: NA
nr.nrNRFn("35743","Hf1_Virgin_0000mbar_120C_th=0.5","0.5","LDDB05s","114","110","118",binning,"")
nr.nrNRFn("35744","Hf1_Virgin_0000mbar_120C_th=2.0","2.0","LDDB05s","114","110","118",binning,"")
nr.NRCombineDatafn("Hf1_Virgin_0000mbar_120C_th=0.5RvQ,Hf1_Virgin_0000mbar_120C_th=2.0RvQ","Hf1_Virgin_0000mbar_120C_anglesCombined","0","","","0",combine_binning,1.0,"2")
offspecQplot('35744',0.01,0.06,'Hf1_Virgin_0000mbar_120C_OFFSPEC',nslices=1,sarray = [] , Nqx=150, Nqz=150, zmin=5e-7, zmax=0.01)
#Kinetic Measurement Hf1_loading_0010mbar_120C
#Start Time: 17/07/2015 08:22:54
#Stop Time: 17/07/2015 16:12:47
#Total length: 07:49;53
#Folder Name Pictures: NA
chopit(35745,480,1681,300,'Hf1_loading1_0010mbar_120C_th0.7_300sec')
chopit(35745,480,6481,600,'Hf1_loading1_0010mbar_120C_th0.7_600sec')
chopit(35745,480,27481,1800,'Hf1_loading1_0010mbar_120C_th0.7_1800sec')
chopit(35745,480,27481,3600,'Hf1_loading1_0010mbar_120C_th0.7_3600sec')
#5520 at Project_X pressure software corresponds with 00:00
#08:00 Started to increase pressure from 0mbar to 10mbar. Flow=10 sscm, Vout=0V. Ppump=2.94
#08:15 10 mbar
#2:20:00 increased Vout to 9.9V (auto) Ppum 3.69
offspecQplot('35745',0.01,0.06,'Hf1_loading1_0010mbar_120C_OFFSPEC',nslices=7,sarray = [] , Nqx=150, Nqz=150, zmin=5e-7, zmax=0.01)
#Kinetic Measurement Hf1_loading1_1000mbar_120C
#Start Time: 17/07/2015 16:16:59
#Stop Time: 17/07/2015 19:17:00
#Total length: 03:14:01
#Folder Name Pictures: NA
chopit(35746,0,1201,60,'Hf1_loading1_1000mbar_120C_th0.7_60sec')
chopit(35746,0,10801,600,'Hf1_loading1_1000mbar_120C_th0.7_600sec')
#01:00 150 mbar
#02:00 300 mbar
#03;30 600 mbar
#05:00 800 mbar
#06:00 900 mbar
#07:30 1000 mbar
offspecQplot('35746',0.01,0.06,'Hf1_loading1_1000mbar_120C_OFFSPEC',nslices=6,sarray = [] , Nqx=150, Nqz=150, zmin=5e-7, zmax=0.01)
#Kinetic Measurement Hf1_unloading1_0000mbar_120C
#Start Time: 17/07/2015 19:34:23
#Stop Time: 17/07/2015 20:35:00
#Total length: 1:00:37
#Folder Name Pictures: NA
chopit(35747,0,601,60,'Hf1_unloading1_0000mbar_120C_60sec')
chopit(35747,0,3601,300,'Hf1_unloading1_0000mbar_120C_300sec')
#chopit(35747,0,3601,600,'Hf1_unloading1_0000mbar_120C_th0.7_600sec')
#01:00 0 mbar
offspecQplot('35747',0.01,0.06,'Hf1_unloading1_0000mbar_120C_OFFSPEC',nslices=4,sarray = [] , Nqx=150, Nqz=150, zmin=5e-7, zmax=0.01)
#Kinetic Measurement Hf1_unloading1_Air_120C
#Start Time: 17/07/2015 20:37:34
#Stop Time: 17/07/2015 21:08:14
#Total length: 30:40
#Folder Name Pictures: NA
chopit(35748,0,1801,300,'Hf1_unloading1_Air_120C_300sec')
#Delay between allowing air to enter the cell and the start of the measurement of about 2 min.
#Relatively Large initial effect seen. (almost) nothing afterwards)
offspecQplot('35748',0.01,0.06,'Hf1_unloading1_Air_120C_OFFSPEC',nslices=2,sarray = [] , Nqx=150, Nqz=150, zmin=5e-7, zmax=0.01)
###########################################################################################
#Mg-2
###########################################################################################
#Virgin Sample in Vacuum
#Start Time: 17/07/2015 22:12:25
#Stop Time: 17/07/2015 23:12:54
#Total length: 1:00:29
#Folder Name Pictures: ISIS/July2015/ NA
nr.nrNRFn("35749","Mg2_Virgin_0000mbar_070C_th=0.5","0.5","LDDB05s","114","110","118",binning,"")
nr.nrNRFn("35750","Mg2_Virgin_0000mbar_070C_th=2.0","2.0","LDDB05s","114","110","118",binning,"")
nr.NRCombineDatafn("Mg2_Virgin_0000mbar_070C_th=0.5RvQ,Mg2_Virgin_0000mbar_070C_th=2.0RvQ","Mg2_Virgin_0000mbar_070C_anglesCombined","0","","","0",combine_binning,1.0,"2")
#loading1
#Start Time: 17/07/2015 23:29:13
#Stop Time: 18/07/2015 22:56:30
#Total length: 23:33:17
#Folder Name Pictures: ISIS/July2015/Mg2/Mg2_loading1_1200mbar_070C
chopit(35751,120,721,60,'Mg2_loading1_1200mbar_th0.7_60s')
chopit(35751,120,3721,300,'Mg2_loading1_1200mbar_070C_300s')
chopit(35751,120,84721,3600,'Mg2_loading1_1200mbar_070C_3600s')
#stopped since DAQ did not work properly.
offspecQplot('35751',0.01,0.06,'Mg2_loading1_1200mbar_070C_OFFSPEC',nslices=10,sarray = [] , Nqx=150, Nqz=150, zmin=5e-7, zmax=0.01)
#Start Time: 18/07/2015 23:04:11
#Stop Time: 19/07/2015 08:04:29
#Total length: 9:00;18
#Folder Name Pictures: ISIS/July2015/Mg2/Mg2_loading1_1200mbar_070C
chopit(35752,0,32401,3600,'Mg2_loading1_1200mbar_3600sec-2')
offspecQplot('35752',0.01,0.06,'Mg2_loading1_1200mbar_070C_OFFSPEC-2',nslices=5,sarray = [] , Nqx=150, Nqz=150, zmin=5e-7, zmax=0.01)
#Loaded1 Sample
#Start Time: 19/07/2015 08:07:55
#Stop Time: 19/07/2015 09:38:25
#Total length: 1;00:30
#Folder Name Pictures: ISIS/July2015/ NA
nr.nrNRFn("35753","Mg2_loaded1_1200mbar_070C_th=0.5","0.5","LDDB05s","114","110","118",binning,"")
nr.nrNRFn("35754","Mg2_loaded1_1200mbar_070C_th=2.0","2.0","LDDB05s","114","110","118",binning,"")
nr.NRCombineDatafn("Mg2_loaded1_1200mbar_070C_th=0.5RvQ,Mg2_loaded1_1200mbar_070C_th=2.0RvQ","Mg2_loaded1_1200mbar_070C_anglesCombined","0","","","0",combine_binning,1.0,"2")
#unloading1 Sample
#Start Time: 19/07/2015 09:27:23
#Stop Time: 19/07/2015 17:12:39
#Total length: 07:45:16
#Folder Name Pictures: ISIS/July2015/Mg2/Mg2_unloading1_0000mbar_070C
chopit(35755,0,601,60,'Mg2_unloading1_0000mbar_070C_th0.7_60sec')
chopit(35755,0,3601,300,'Mg2_unloading1_0000mbar_070C_th0.7_300sec')
chopit(35755,0,27001,1800,'Mg2_unloading1_0000mbar_070C_th0.7_1800sec')
#1:00 Final pressure of 0 mbar reached.
offspecQplot('35755',0.01,0.06,'Mg2_unloading1_0000mbar_070C_OFFSPEC',nslices=7,sarray = [] , Nqx=150, Nqz=150, zmin=5e-7, zmax=0.01)
#unloaded1 Sample
#Start Time: 19/07/2015 17:15:45
#Stop Time: 19/07/2015 18:19:41
#Total length: 1:03:56
#Folder Name Pictures: ISIS/July2015/ NA
nr.nrNRFn("35756","Mg2_unloaded1_0000mbar_070C_th=0.5","0.5","LDDB05s","114","110","118",binning,"")
nr.nrNRFn("35757","Mg2_unloaded1_0000mbar_070C_th=2.0","2.0","LDDB05s","114","110","118",binning,"")
nr.NRCombineDatafn("Mg2_unloaded1_0000mbar_070C_th=0.5RvQ,Mg2_unloaded1_0000mbar_070C_th=2.0RvQ","Mg2_unloaded1_0000mbar_070C_anglesCombined","0","","","0",combine_binning,1.0,"2")
#Start Time: 19/07/2015 18:32:46
#Stop Time: 19/07/2015 23:10:01
#Total length: 4:37:15
#Folder Name Pictures: ISIS/July2015/Mg2/Mg2_loading2_1200mbar_070C
chopit(35758,0,601,60,'Mg2_loading2_1200mbar_070C_th0.7_60sec')
chopit(35758,0,3601,300,'Mg2_loading2_1200mbar_070C_th0.7_300sec')
chopit(35758,0,16201,1800,'Mg2_loading2_1200mbar_070C_th0.7_1800sec')
#214550 at Project_X pressure software corresponds with 00:00
#00:39 Started to increase pressure from 0mbar to 1200mbar. Flow=10 sscm, Vout=0V. Ppump=2.94
#08:30 Final pressure reached
offspecQplot('35758',0.01,0.06,'Mg2_loading2_0000mbar_070C_OFFSPEC',nslices=4,sarray = [] , Nqx=150, Nqz=150, zmin=5e-7, zmax=0.01)
#loaded2 Sample
#Start Time: 19/07/2015 23:13:53
#Stop Time: 20/07/2015 00:14:22
#Total length: 1:00:29
#Folder Name Pictures: ISIS/July2015/ NA
nr.nrNRFn("35759","Mg2_loaded2_1200mbar_070C_th=0.5","0.5","LDDB05s","114","110","118",binning,"")
nr.nrNRFn("35760","Mg2_loaded2_1200mbar_070C_th=2.0","2.0","LDDB05s","114","110","118",binning,"")
nr.NRCombineDatafn("Mg2_loaded2_1200mbar_070C_th=0.5RvQ,Mg2_loaded2_1200mbar_070C_th=2.0RvQ","Mg2_loaded2_1200mbar_070C_anglesCombined","0","","","0",combine_binning,1.0,"2")
#Start Time: 20/07/2015 00:28:42
#Stop Time: 20/07/2015 06:45:27
#Total length: 6:16:45
#Folder Name Pictures: ISIS/July2015/Mg2/Mg2_unloading2_0100mbar_070C
chopit(35761,0,601,60,'Mg2_unloading2_0100mbar_070C_th0.7_60sec')
chopit(35761,0,3601,300,'Mg2_unloading2_0100mbar_070C_th0.7_300sec')
chopit(35761,0,21601,1800,'Mg2_unloading2_0100mbar_070C_th0.7_1800sec')
#236000 at Project_X pressure software corresponds with 00:00
#00:25 Started to decrease pressure from 1200mbar to 100mbar. Flow=10 sscm, Vout=5V. Ppump=
#03:00 Camara Switched on.
#03:00 200 mbar reached
offspecQplot('35761',0.01,0.06,'Mg2_unloading2_0100mbar_070C_OFFSPEC',nslices=6,sarray = [] , Nqx=150, Nqz=150, zmin=5e-7, zmax=0.01)
#unloaded2 Sample @100 mbar
#Start Time: 20/07/2015
#Stop Time: 20/07/2015
#Total length: 1:00:29
#Folder Name Pictures: ISIS/July2015/ NA
nr.nrNRFn("35762","Mg2_unloaded2_0100mbar_070C_th=0.5","0.5","LDDB05s","114","110","118",binning,"")
nr.nrNRFn("35763","Mg2_unloaded2_0100mbar_070C_th=2.0","2.0","LDDB05s","114","110","118",binning,"")
nr.NRCombineDatafn("Mg2_unloaded2_0100mbar_070C_th=0.5RvQ,Mg2_unloaded2_0100mbar_070C_th=2.0RvQ","Mg2_unloaded2_0100mbar_070C_anglesCombined","0","","","0",combine_binning,1.0,"2")
#Start Time: 20/07/2015 08:02:28
#Stop Time: 20/07/2015 12:09:55
#Total length: 04:07:28
#Folder Name Pictures: ISIS/July2015/Mg2/Mg2_unloading2_0040mbar_070C
chopit(35764,0,301,60,'Mg2_unloading2_0040mbar_070C_th0.7_60sec')
chopit(35764,0,7201,600,'Mg2_unloading2_0040mbar_070C_th0.7_600sec')
chopit(35764,0,14401,1800,'Mg2_unloading2_0040mbar_070C_th0.7_1800sec')
#263400 at Project_X pressure software corresponds with 00:00
#00:15 Started to decrease pressure from 1200mbar to 100mbar. Flow=10 sscm, Vout=5V. Ppump=
#01:30 70 mbar
#03:00 50 mbar
#05:30 40 mbar
offspecQplot('35764',0.01,0.06,'Mg2_unloading2_0040mbar_070C_OFFSPEC',nslices=6,sarray = [] , Nqx=150, Nqz=150, zmin=5e-7, zmax=0.01)
#unloaded2 Sample @ 40 mbar
#Start Time:20/07/2015 12:12:45
#Stop Time: 20/07/2015 13:13:14
#Total length: 1:00:29
#Folder Name Pictures: ISIS/July2015/ NA
nr.nrNRFn("35765","Mg2_unloaded2_0040mbar_070C_th=0.5","0.5","LDDB05s","114","110","118",binning,"")
nr.nrNRFn("35766","Mg2_unloaded2_0040mbar_070C_th=2.0","2.0","LDDB05s","114","110","118",binning,"")
nr.NRCombineDatafn("Mg2_unloaded2_0040mbar_070C_th=0.5RvQ,Mg2_unloaded2_0040mbar_070C_th=2.0RvQ","Mg2_unloaded2_0040mbar_070C_anglesCombined","0","","","0",combine_binning,1.0,"2")
#unloading2 Sample @ 0 mbar
#Start Time: 20/07/2015 14:04:22
#Stop Time: 21/07/2015 01:09:09
#Total length: 11:04:47
#Folder Name Pictures: ISIS/July2015/Mg2/Mg2_unloading2_0000mbar_070C
chopit(35767,0,601,60,'Mg2_unloading2_0000mbar_070C_th0.7_60sec')
chopit(35767,0,2401,300,'Mg2_unloading2_0000mbar_070C_th0.7_300sec')
chopit(35767,0,14401,600,'Mg2_unloading2_0000mbar_070C_th0.7_600sec')
chopit(35767,0,39601,1800,'Mg2_unloading2_0000mbar_070C_th0.7_1800sec')
#instantaniously set to vacuum
offspecQplot('35767',0.01,0.06,'Mg2_unloading2_0000mbar_070C_OFFSPEC',nslices=11,sarray = [] , Nqx=150, Nqz=150, zmin=5e-7, zmax=0.01)
#unloaded2 Sample @ 0 mbar
#Start Time:21/07/2015 01:12:23
#Stop Time: 21/07/2015 02:12:53
#Total length: 01:00:30
#Folder Name Pictures: ISIS/July2015/ NA
nr.nrNRFn("35768","Mg2_unloaded2_0000mbar_070C_th=0.5","0.5","LDDB05s","114","110","118",binning,"")
nr.nrNRFn("35769","Mg2_unloaded2_0000mbar_070C_th=2.0","2.0","LDDB05s","114","110","118",binning,"")
nr.NRCombineDatafn("Mg2_unloaded2_0000mbar_070C_th=0.5RvQ,Mg2_unloaded2_0000mbar_070C_th=2.0RvQ","Mg2_unloaded2_0000mbar_070C_anglesCombined","0","","","0",combine_binning,1.0,"2")
#unloading2 Sample @Air
#Start Time: 21/07/2015 02:23:21
#Stop Time: 21/07/2015 03:23:45
#Total length: 1:00:24
#Folder Name Pictures: ISIS/July2015/Mg2/Mg2_unloading2_0000mbar_070C
chopit(35770,0,601,60,'Mg2_unloading2_air_030C_th0.7_60sec')
chopit(35770,0,3601,300,'Mg2_unloading2_air_030C_th0.7_300sec')
#First minute of unloading not captured since valve in blockhouse had to be opened.
#Gradual decrease of temperature during the run.
offspecQplot('35770',0.01,0.06,'Mg2_unloading2_air_030C_OFFSPEC',nslices=2,sarray = [] , Nqx=150, Nqz=150, zmin=5e-7, zmax=0.01)
##############
#Mg2_unloaded2_Air_21072015_13:00_30C
nr.nrNRFn("35781","Mg2_unloaded2_Air_21072015_13:00_30C_th=0.5","0.5","LDDB05s","114","110","118",binning,"")
nr.nrNRFn("35782","Mg2_unloaded2_Air_21072015_13:00_30C_th=2.0","2.0","LDDB05s","114","110","118",binning,"")
nr.NRCombineDatafn("Mg2_unloaded2_Air_21072015_13:00_30C_th=0.5RvQ,Mg2_unloaded2_Air_21072015_13:00_30C_th=2.0RvQ","Mg2_unloaded2_Air_21072015_13:00_30C_anglesCombined","0","","","0",combine_binning,1.0,"2")
##############
############################################################################################################
#Mg-3
############################################################################################################
#General Remark: Sample looks extremely dirty!
#Virgin state looks reasonably similar to Mg-1 and Mg-2
#Virgin Sample @ 0 mbar
#Start Time:21/07/2015 03:43:40
#Stop Time: 21/07/2015
#Total length:
#Folder Name Pictures: ISIS/July2015/ NA
nr.nrNRFn("35771","Mg3_Virgin_0000mbar_070C_th=0.5","0.5","LDDB05s","114","110","118",binning,"")
nr.nrNRFn("35772","Mg3_Virgin_0000mbar_070C_th=2.0","2.0","LDDB05s","114","110","118",binning,"")
nr.NRCombineDatafn("Mg3_Virgin_0000mbar_070C_th=0.5RvQ,Mg3_Virgin_0000mbar_070C_th=2.0RvQ","Mg3_Virgin_0000mbar_070C_anglesCombined","0","","","0",combine_binning,1.0,"2")
#Gradual increase of Temperature from 30 to 70C During the run. (reasonably fast)
#loading1 @ 300 mbar
#Start Time:21/07/2015 04:56:38
#Stop Time: 21/07/2015 06:47:22
#Total length: 1:50:44
#Folder Name Pictures: ISIS/July2015/Mg3/Mg3_loading1_0300mbar_070C
chopit(35773,0,1801,60,'Mg3_loading1_0300mbar_070C_60s')
chopit(35773,0,6001,300,'Mg3_loading1_0300mbar_070C_300s')
chopit(35773,0,6001,600,'Mg3_loading1_0300mbar_070C_600s')
# 338627 t Project_X pressure software corresponds with 00:00
# 01:00 150 mbar
# 02:00
offspecQplot('35773',0.01,0.06,'Mg3_loading1_0300mbar_070C_OFFSPEC',nslices=5,sarray = [] , Nqx=150, Nqz=150, zmin=5e-7, zmax=0.01)
#loaded1 @ 300 mbar
#Start Time:21/07/2015 06:52:25
#Stop Time: 21/07/2015
#Total length:
#Folder Name Pictures: ISIS/July2015/ NA
nr.nrNRFn("35774","Mg3_loaded1_0300mbar_070C_th=0.5","0.5","LDDB05s","114","110","118",binning,"")
nr.nrNRFn("35775","Mg3_loaded1_0300mbar_070C_th=2.0","2.0","LDDB05s","114","110","118",binning,"")
nr.NRCombineDatafn("Mg3_loaded1_0300mbar_070C_th=0.5RvQ,Mg3_loaded1_0300mbar_070C_th=2.0RvQ","Mg3_loaded1_0300mbar_070C_anglesCombined","0","","","0",combine_binning,1.0,"2")
#unloading1 @ 000 mbar
#Start Time:21/07/2015 8:07:22
#Stop Time: 21/07/2015 09:20:54
#Total length: 1:13:32
#Folder Name Pictures: ISIS/July2015/Mg3/Mg3_unloading1_0000mbar_070C
chopit(35776,0,601,60,'Mg3_unloading1_0000mbar_070C_60s')
chopit(35776,0,4201,300,'Mg3_unloading1_0000mbar_070C_300s')
chopit(35776,0,4201,600,'Mg3_unloading1_0000mbar_070C_600s')
# instantaneous vacuum at start of run
offspecQplot('35776',0.01,0.06,'Mg3_unloading1_0000mbar_070C_OFFSPEC',nslices=5,sarray = [] , Nqx=150, Nqz=150, zmin=5e-7, zmax=0.01)
#unloaded1 @ 000 mbar
#Start Time:21/07/2015 9:24:54
#Stop Time: 21/07/2015
#Total length:
#Folder Name Pictures: ISIS/July2015/ NA
nr.nrNRFn("35777","Mg3_unloaded1_0000mbar_070C_th=0.5","0.5","LDDB05s","114","110","118",binning,"")
nr.nrNRFn("35778","Mg3_unloaded1_0000mbar_070C_th=2.0","2.0","LDDB05s","114","110","118",binning,"")
nr.NRCombineDatafn("Mg3_unloaded1_0000mbar_070C_th=0.5RvQ,Mg3_unloaded1_0000mbar_070C_th=2.0RvQ","Mg3_unloaded1_0000mbar_070C_anglesCombined","0","","","0",combine_binning,1.0,"2")
###########################################################################################################
#cleaned substrate
nr.nrNRFn("35783","cleaned_substrate_th=0.5","0.5","LDDB05s","114","110","118",binning,"")
nr.nrNRFn("35784","cleaned_substrate_th=2.0","2.0","LDDB05s","114","110","118",binning,"")
nr.NRCombineDatafn("cleaned_substrate_th=0.5RvQ,cleaned_substrate_th=2.0RvQ","cleaned_substrate_anglesCombined","0","","","0",combine_binning,1.0,"2")
#################################################################################3
#looking at off-spec intensities
offspecQplot('35726',0.01,0.06,'Mg1_unloading1_0000mbar_070C_OFFSPEC',nslices=6,sarray = [] , Nqx=150, Nqz=150, zmin=5e-7, zmax=0.01)
#j=[0, 3601.0, 7202.0, 10803.0, 14404.0, 18005.0, 21606.0, 25207.0, 28808.0, 32409.0, 36010.0, 39611.0, 43212.0, 46813.0, 50414.0, 54015.0, 57616.0, 61217.0, 64818.0, 68419.0, 72019.0]
j=[0, 3740.0, 7480.0, 11220.0, 14960.0, 18700.0, 22437.0]
qzmin = 0.025
qzmax=0.030
for i in range(len(j)-1):
QxQzcuts('Mg1_unloading1_0000mbar_070C_OFFSPEC_'+str(j[i])+'-'+str(j[i+1])+'qxqz', qzmin=qzmin, qzmax=qzmax,plot=False)
SaveAscii('Mg1_unloading1_0000mbar_070C_OFFSPEC_'+str(j[i])+'-'+str(j[i+1])+'qxqz'+'_cut_'+str(qzmin)+'-'+str(qzmax), 'U:/VanWell/July_2015/Cuts/Mg1_unloading1_0000mbar_070C_OFFSPEC_'+str(j[i])+'-'+str(j[i+1])+'qxqz'+'_cut_'+str(qzmin)+'-'+str(qzmax)+'.dat', WriteSpectrumID = False, CommentIndicator = "#", Separator = "Tab", ColumnHeader = False)
New function: can be used like this:
chopit2(34253, 'test', tslice = 30000, userdirectory = "U:/vanWell/April_2015/savetest/" )
or like this:
chopit2(34253, 'test', nslices = 5, userdirectory = "U:/vanWell/April_2015/savetest/" )
or like this:
chopit2(35755, 'test', start = 10000, nslices = 4, userdirectory = "U:/vanWell/April_2015/savetest/",loadcrpt=1 )
saves the individual data slices as dat files as well in the folders already created.
help(chopit2)
############################################
| [
"mantid-publisher@builds.mantidproject.org"
] | mantid-publisher@builds.mantidproject.org |
ccab0aa42ec4c0223f0a0dc999e0a97fcb427b0c | 56cf6ed165c4fe90782dc03c60f5a976d33064a8 | /batch/rootplots/finalplots.py | 2118fdf2fdc0e850d7fae41f9e33d60ee63c8444 | [] | no_license | aminnj/scouting | 36b5a08927e8fa6061cbd1b70ce23b674c56bcc1 | ed7bd442aaa1f53b21378d2a7fbf10ca7869ecc7 | refs/heads/master | 2021-06-24T05:09:20.248605 | 2021-04-11T05:11:27 | 2021-04-11T05:11:27 | 208,143,579 | 1 | 2 | null | 2019-11-25T22:49:29 | 2019-09-12T20:47:01 | Jupyter Notebook | UTF-8 | Python | false | false | 14,830 | py | import os
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import glob
from matplotlib.colors import LogNorm
from tqdm.auto import tqdm
import time
import re
import subprocess
import json
import requests
import uproot4
from yahist import Hist1D, Hist2D
def set_plotting_style():
from matplotlib import rcParams
rcParams["font.family"] = "sans-serif"
rcParams["font.sans-serif"] = ["Helvetica", "Arial", "Liberation Sans", "Bitstream Vera Sans", "DejaVu Sans"]
rcParams['legend.fontsize'] = 11
rcParams['legend.labelspacing'] = 0.2
rcParams['hatch.linewidth'] = 0.5 # https://stackoverflow.com/questions/29549530/how-to-change-the-linewidth-of-hatch-in-matplotlib
rcParams['axes.xmargin'] = 0.0 # rootlike, no extra padding within x axis
rcParams['axes.labelsize'] = 'x-large'
rcParams['axes.formatter.use_mathtext'] = True
rcParams['legend.framealpha'] = 0.65
rcParams['axes.labelsize'] = 'x-large'
rcParams['axes.titlesize'] = 'large'
rcParams['xtick.labelsize'] = 'large'
rcParams['ytick.labelsize'] = 'large'
rcParams['figure.subplot.hspace'] = 0.1
rcParams['figure.subplot.wspace'] = 0.1
rcParams['figure.subplot.right'] = 0.97
rcParams['figure.subplot.top'] = 0.92
rcParams['figure.max_open_warning'] = 0
rcParams['figure.dpi'] = 100
rcParams["axes.formatter.limits"] = [-5,4] # scientific notation if log(y) outside this
def add_cms_info_1d(ax, typ="Preliminary", lumi="101", xtype=0.105):
ax.text(0.0, 1.01,"CMS", horizontalalignment='left', verticalalignment='bottom', transform = ax.transAxes, name="Arial", weight="bold", size=15)
ax.text(xtype, 1.01,typ, horizontalalignment='left', verticalalignment='bottom', transform = ax.transAxes, name="Arial", style="italic", size=14)
if lumi is not None:
ax.text(0.99, 1.01,"%s fb${}^\mathregular{-1}$ (13 TeV)" % (lumi), horizontalalignment='right', verticalalignment='bottom', transform = ax.transAxes, size=13)
else:
ax.text(0.99, 1.01,"(13 TeV)", horizontalalignment='right', verticalalignment='bottom', transform = ax.transAxes, size=13)
def add_cms_info_2d(ax, typ="Preliminary", lumi="101", xtype=0.15):
ax.text(0.0, 1.01,"CMS", horizontalalignment='left', verticalalignment='bottom', transform = ax.transAxes, name="Arial", weight="bold", size=14)
ax.text(xtype, 1.01,"Preliminary", horizontalalignment='left', verticalalignment='bottom', transform = ax.transAxes, name="Arial", style="italic", size=13)
ax.text(0.99, 1.01,"%s fb${}^\mathregular{-1}$ (13 TeV)" % (lumi), horizontalalignment='right', verticalalignment='bottom', transform = ax.transAxes, size=12)
# ax.text(0.99, 1.01,"(13 TeV)", horizontalalignment='right', verticalalignment='bottom', transform = ax.transAxes, size=12)
def to_yahist(h, overflow=False):
if "TH1" in str(type(h)):
c, e = h.to_numpy(flow=overflow)
if overflow:
c[1] += c[0]
c[-2] += c[-1]
c = c[1:-1]
e = e[1:-1]
h = Hist1D.from_bincounts(c, e)
else:
c, ex, ey = h.to_numpy(flow=False)
h = Hist2D.from_bincounts(c.T, (ex, ey))
return h
set_plotting_style()
# model_info = {
# ("bphi",0.5,1): dict(label=r"B$\rightarrow\phi$ (0.5GeV,c$\tau$=1mm)", color=[0.98,0.85,0.29], fname="output_BToPhi_mphi0p5_ctau1mm.root"),
# ("bphi",2,10): dict(label=r"B$\rightarrow\phi$ (2GeV,c$\tau$=10mm)", color=[0.94,0.58,0.21], fname="output_BToPhi_mphi2_ctau10mm.root"),
# ("bphi",4,100): dict(label=r"B$\rightarrow\phi$ (4GeV,c$\tau$=100mm)", color=[0.92,0.28,0.15], fname="output_BToPhi_mphi4_ctau100mm.root"),
# ("hzd",2,100): dict(label=r"H$\rightarrow \mathrm{Z_d Z_d}$ (2GeV,c$\tau$=100mm)", color=[0.46,0.98,0.73], fname="output_HToZdZdTo2Mu2X_mzd2_ctau100mm.root"),
# ("hzd",8,10): dict(label=r"H$\rightarrow \mathrm{Z_d Z_d}$ (8GeV,c$\tau$=10mm)", color=[0.33,0.73,0.98], fname="output_HToZdZdTo2Mu2X_mzd8_ctau10mm.root"),
# ("hzd",15,1): dict(label=r"H$\rightarrow \mathrm{Z_d Z_d}$ (15GeV,c$\tau$=1mm)", color=[0.53,0.10,0.96], fname="output_HToZdZdTo2Mu2X_mzd15_ctau1mm.root"),
# }
model_info = {
("bphi",0.5,1): dict(label=r"B$\rightarrow\phi$ (0.5GeV,c$\tau$=1mm)", color="C0", fname="output_BToPhi_mphi0p5_ctau1mm.root"),
("bphi",2,10): dict(label=r"B$\rightarrow\phi$ (2GeV,c$\tau$=10mm)", color="C1", fname="output_BToPhi_mphi2_ctau10mm.root"),
("bphi",4,100): dict(label=r"B$\rightarrow\phi$ (4GeV,c$\tau$=100mm)", color="C2", fname="output_BToPhi_mphi4_ctau100mm.root"),
("hzd",2,100): dict(label=r"H$\rightarrow \mathrm{Z_d Z_d}$ (2GeV,c$\tau$=100mm)", color="C4", fname="output_HToZdZdTo2Mu2X_mzd2_ctau100mm.root"),
("hzd",8,10): dict(label=r"H$\rightarrow \mathrm{Z_d Z_d}$ (8GeV,c$\tau$=10mm)", color="C3", fname="output_HToZdZdTo2Mu2X_mzd8_ctau10mm.root"),
("hzd",15,1): dict(label=r"H$\rightarrow \mathrm{Z_d Z_d}$ (15GeV,c$\tau$=1mm)", color="C5", fname="output_HToZdZdTo2Mu2X_mzd15_ctau1mm.root"),
}
os.system("mkdir -p plots_selection")
def plot_1():
with uproot4.open("mcoutputs/main/output_HToZdZdTo2Mu2X_mzd8_ctau10mm.root") as f:
fig, ax = plt.subplots()
label = model_info[("hzd",8,10)]["label"]
h1 = to_yahist(f["DV_rho_tot"], overflow=False).rebin(2)
h1.plot(ax=ax, label=f"{label}, before veto", color="k", lw=2.0)
h2 = to_yahist(f["DV_rho_matveto"], overflow=False).rebin(2)
eff = h2.integral/h1.integral * 100.
h2.plot(ax=ax, label=f"{label}, after veto (eff. = {eff:.1f}%)", color="C3", lw=1.0)
add_cms_info_1d(ax, lumi=None, typ="Simulation")
ax.set_ylim(bottom=0.)
ax.set_ylabel("Unweighted events", ha="right", y=1.)
ax.set_xlabel(r"$l_\mathrm{xy}$ (cm)", ha="right", x=1., labelpad=-1.0)
fname = f"plots_selection/signal_passL1_lxy_materialveto.pdf"
print(fname)
fig.savefig(fname)
os.system(f"ic {fname}")
def plot_2():
with uproot4.open("dataoutputs/main/output.root") as f:
fig, ax = plt.subplots()
label = r"Data"
h1 = to_yahist(f["DV_rho_tot"], overflow=False)
h1.plot(ax=ax, label=f"{label}, before veto", color="k", lw=2.0)
h2 = to_yahist(f["DV_rho_matveto"], overflow=False)
eff = h2.integral/h1.integral * 100.
h2.plot(ax=ax, label=f"{label}, after veto", color="C3", lw=1.0)
add_cms_info_1d(ax)
ax.set_yscale("log")
ax.set_ylabel("Events", ha="right", y=1.)
ax.set_xlabel(r"$l_\mathrm{xy}$ (cm)", ha="right", x=1., labelpad=-1.0)
fname = f"plots_selection/data_passL1_lxy_materialveto.pdf"
print(fname)
fig.savefig(fname)
os.system(f"ic {fname}")
def plot_3():
with uproot4.open("dataoutputs/main/output.root") as f:
for saxis in ["xy", "rhoz"]:
for which in ["all", "pass"]:
fig, ax = plt.subplots()
hname = None
if saxis == "xy":
if which == "all": hname = "DV_y_vs_x_tot"
if which == "pass": hname = "DV_y_vs_x_matveto"
xlabel = "DV x (cm)"
ylabel = "DV y (cm)"
if saxis == "rhoz":
if which == "all": hname = "DV_rho_vs_z_tot"
if which == "pass": hname = "DV_rho_vs_z_matveto"
xlabel = "DV z (cm)"
ylabel = r"DV $\rho$ (cm)"
h = to_yahist(f[hname])
h.plot(ax=ax, logz=True, cmap="viridis")
add_cms_info_2d(ax)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_aspect(1.0 if saxis == "xy" else 2.5)
fname = f"plots_selection/passL1_DV_{saxis}_{which}.pdf"
print(fname)
fig.savefig(fname)
os.system(f"ic {fname}")
def plot_4():
with uproot4.open("dataoutputs/nm1/output.root") as f:
for which in ["nDV", "nMuon"]:
fig, ax = plt.subplots()
hname = f"{which}_vs_run"
xlabel = "run number"
ylabel = f"average reco. {which}"
h = to_yahist(f[hname])
h = h.restrict(300000,None)
h = h.rebin(2,1)
h = h.profile("x")
h.plot(ax=ax, show_errors=True, ms=2., color="k")
add_cms_info_1d(ax)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
if which == "nDV":
ax.set_ylim(1.4,1.6)
if which == "nMuon":
ax.set_ylim(2.4,2.6)
fname = f"plots_selection/passL1_{which}_vs_run.pdf"
print(fname)
fig.savefig(fname)
os.system(f"ic {fname}")
def plot_5():
f_data = uproot4.open("dataoutputs/nm1/output.root")
f_mc = uproot4.open("mcoutputs/nm1/output_HToZdZdTo2Mu2X_mzd8_ctau10mm.root")
for which, xlabel in [
("xError", "DV x Error (cm)"),
("yError", "DV y Error (cm)"),
("zError", "DV z Error (cm)"),
("chi2ndof", "DV chi2/ndof"),
("lxy", "$l_\mathrm{xy}$ (cm)"),
]:
fig, ax = plt.subplots()
hname = f"{which}_inc"
ylabel = "Events"
h1 = to_yahist(f_data[hname])
h1.plot(ax=ax, color="k", label="Data")
label = model_info[("hzd",8,10)]["label"]
h2 = to_yahist(f_mc[hname])
h2 *= h1.integral/h2.integral
h2.plot(ax=ax, color="C3", label=label)
add_cms_info_1d(ax)
ax.set_xlabel(xlabel, ha="right", x=1., labelpad=-1.0)
ax.set_ylabel("Events", ha="right", y=1.)
ax.set_yscale("log")
fname = f"plots_selection/passL1_DV_{which}.pdf"
print(fname)
fig.savefig(fname)
os.system(f"ic {fname}")
f_data.close()
f_mc.close()
def plot_6():
f_data = uproot4.open("dataoutputs/main/output.root")
hists_data = dict()
for k,v in f_data.items():
if "_lxy" not in k: continue
k = str(k).rsplit(";",1)[0]
hists_data[k] = to_yahist(v)
f_data.close()
hists_mc = dict()
for mk,model in model_info.items():
print(mk, model)
hists_mc[mk] = dict()
fname = model["fname"]
f_mc = uproot4.open(f"mcoutputs/main/{fname}")
for k,v in f_mc.items():
if "_lxy" not in k: continue
k = str(k).rsplit(";",1)[0]
h = to_yahist(v)
h = Hist1D(h, label=model["label"], color=model["color"])
hists_mc[mk][k] = h
f_mc.close()
for which, xlabel, log, line in [
("dimupt_full", r"dimuon $p_\mathrm{T}$", False, 25.),
("mu2pt_trig", r"trailing muon $p_\mathrm{T}$", False, None),
("mu2eta_trig", r"trailing muon $\eta$", False, None),
("mu2chi2ndof_trig", r"Trailing muon $\chi^2/\mathrm{ndof}$", False, 3.),
("mu2trkmeas_trig", r"Trailing muon tracker layers with meas.", False, 6.),
("absdphimudv_passid", r"|$\Delta\phi(\mu,\vec{\mathrm{DV}})$|", True, 0.02),
("absdphimumu_passid", r"|$\Delta\phi(\mu_1,\mu_2)$|", False, 2.8),
("mu2trackiso_passkin", r"Trailing muon relative track isolation", True, 0.1),
("mu2drjet_passkin", r"$\Delta R(\mu_2,\mathrm{jet})$", True, 0.3),
("mu2excesshits_baseline", r"Trailing muon n(valid-expected) pixel hits", False, 0.5),
("logabsetaphi_baseline", r"$\mathrm{log_{10}abs}(\Delta\eta_{\mu\mu}/\Delta\phi_{\mu\mu})$", False, 1.25),
("mindxy_extraiso", r"minimum $|d_\mathrm{xy}|$", True, None),
("mindxysig_extraiso", r"minimum $d_\mathrm{xy}$ significance", True, 2.),
("mindxyscaled_extraiso", r"minimum lifetime-scaled |$d_\mathrm{xy}$|", True, 0.1),
("mu2pt_incl", r"trailing muon $p_\mathrm{T}$", False, None),
("mu2eta_incl", r"trailing muon $\eta$", False, None),
("mu2chi2ndof_incl", r"Trailing muon $\chi^2/\mathrm{ndof}$", False, 3.),
("mu2trkmeas_incl", r"Trailing muon tracker layers with meas.", False, 6.),
("absdphimudv_incl", r"|$\Delta\phi(\mu\mu,\vec{\mathrm{DV}})$|", True, 0.02),
("absdphimumu_incl", r"|$\Delta\phi(\mu_1,\mu_2)$|", False, 2.8),
("mu2trackiso_incl", r"Trailing muon relative track isolation", True, 0.1),
("mu2drjet_incl", r"$\Delta R(\mu_2,\mathrm{jet})$", True, 0.3),
("mu2excesshits_incl", r"Trailing muon n(valid-expected) pixel hits", False, 0.5),
("logabsetaphi_incl", r"$\mathrm{log_{10}abs}(\Delta\eta_{\mu\mu}/\Delta\phi_{\mu\mu})$", False, 1.25),
("mindxy_incl", r"minimum $|d_\mathrm{xy}|$", True, None),
("mindxysig_incl", r"minimum $d_\mathrm{xy}$ significance", True, 2.),
("mindxyscaled_incl", r"minimum lifetime-scaled |$d_\mathrm{xy}$|", True, 0.1),
]:
hnames = set([k.rsplit("_",1)[0] for k in hists_data.keys() if k.startswith(which)])
for basehname in hnames:
lxystr = basehname.split("_lxy",1)[1].split("_")[0]
lxylow, lxyhigh = list(map(float, lxystr.replace("p",".").split("to")))
fig, ax = plt.subplots()
h = hists_data[f"{basehname}_lowmass"]
N = h.integral
h = h.normalize()
label = "Data (mass < 5 GeV)"
if which in ["dimupt_full"]:
label += f" [N = {int(N):,}]"
h.plot(ax=ax, show_errors=True, color="k", label=label, ms=3.5)
h = hists_data[f"{basehname}_highmass"]
N = h.integral
h = h.normalize()
label = "Data (mass > 5 GeV)"
if which in ["dimupt_full"]:
label += f" [N = {int(N):,}]"
h.plot(ax=ax, show_errors=True, color="b", label=label, ms=3.5)
for mk in hists_mc.keys():
h = hists_mc[mk][f"{basehname}_allmass"]
h = h.normalize()
h.plot(ax=ax, histtype="step")
if line is not None:
ax.axvline(line,color="red",linestyle="--")
add_cms_info_1d(ax)
ax.set_xlabel(xlabel, ha="right", x=1., labelpad=-1.0)
ax.set_ylabel("Fraction of events", ha="right", y=1.)
ax.set_title(rf"{lxylow} cm < $l_\mathrm{{xy}}$ < {lxyhigh} cm", color=(0.2,0.2,0.2))
if log:
ax.set_yscale("log")
fname = f"plots_selection/{basehname}.pdf"
print(fname)
fig.savefig(fname)
# os.system(f"ic {fname}")
if __name__ == "__main__":
pass
# # plot_1()
# plot_2()
# plot_3()
# # plot_4()
# plot_5()
plot_6()
| [
"amin.nj@gmail.com"
] | amin.nj@gmail.com |
44fc97f12a2e510958c28f70f62cf21130b4828a | 1f67ed4f987bc85176cce05dfd6c42fd48441190 | /maskrcnn.py | 9082d048a38b3918a86e0477138949a626baa3c2 | [
"MIT"
] | permissive | zhjpqq/mask-rcnn-pytorch | b569c9aa9342f0db9cc040653f1387b451f3bac0 | 9c0fd6c47bc8dc89b8cf6535cba723805174978b | refs/heads/master | 2021-04-15T08:46:08.735296 | 2018-03-20T05:19:39 | 2018-03-20T05:19:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,997 | py | from backbone.resnet_101_fpn import ResNet_101_FPN
from proposal.rpn import RPN
from head.cls_bbox import ClsBBoxHead_fc as ClsBBoxHead
from head.mask import MaskHead
from pooling.roi_align import RoiAlign
from util.utils import calc_iou, calc_maskrcnn_loss, coord_corner2center, coord_center2corner
import os
import random
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from configparser import ConfigParser
# TODO: speed up training and inference
# TODO: optimize GPU memory consumption
class MaskRCNN(nn.Module):
"""Mask R-CNN model.
References: https://arxiv.org/pdf/1703.06870.pdf
Notes: In comments below, we assume N: batch size, M: number of roi,
C: feature map channel, H: image height, W: image width,
(x1, y1, x2, y2) stands for top-left and bottom-right coord of bounding box,
without normalization, (x, y, w, h) stands for center coord, height and
width of bounding box.
"""
def __init__(self, num_classes, img_size):
super(MaskRCNN, self).__init__()
self.config = ConfigParser()
self.config.read(os.path.join(os.path.dirname(os.path.realpath(__file__)), "config.ini"))
self.num_classes = num_classes
self.fpn = ResNet_101_FPN()
self.rpn = RPN(dim=256)
self.roi_align = RoiAlign(grid_size=(14, 14))
self.cls_box_head = ClsBBoxHead(depth=256, pool_size=14, num_classes=num_classes)
self.mask_head = MaskHead(depth=256, pool_size=14, num_classes=num_classes,
img_size=img_size)
def forward(self, x, gt_classes=None, gt_bboxes=None, gt_masks=None):
"""
Args:
x: image data. NxCxHxW.
gt_classes: NxM, ground truth class ids.
gt_bboxes: NxMx4(x1, y1, x2, y2), ground truth bounding boxes.
gt_masks: NxMxHxW, ground truth masks.
Returns:
result(list of lists of dict): Outer list composed of mini-batch, inner list
composed of detected objects per image, dict composed of "cls_pred": class id,
"bbox_pred" : bounding-box with tuple (x1, y1, x2, y2), "mask_pred" : mask
prediction with tuple (H,W).
So, result[0][0]['cls_pred'] stands for class id of the first detected objects
in first image of mini-batch.
"""
p2, p3, p4, p5, p6 = self.fpn(x)
rpn_features_rpn = [p2, p3, p4, p5, p6]
fpn_features = [p2, p3, p4, p5]
img_shape = x.data.new(x.size(0), 2).zero_()
img_shape[:, 0] = x.size(2)
img_shape[:, 1] = x.size(3)
rois, rpn_loss_cls, rpn_loss_bbox = self.rpn(rpn_features_rpn, gt_bboxes, img_shape)
cls_targets, bbox_targets, mask_targets = None, None, None
if self.training:
assert gt_classes is not None
assert gt_bboxes is not None
assert gt_masks is not None
gen_result = self._generate_targets(rois, gt_classes, gt_bboxes, gt_masks)
rois, cls_targets, bbox_targets, mask_targets = gen_result
rois_pooling = self._roi_align_fpn(fpn_features, rois, x.size(2), x.size(3))
cls_prob, bbox_reg = self.cls_box_head(rois_pooling)
mask_prob = self.mask_head(rois_pooling)
result = self._process_result(x.size(0), rois, cls_prob, bbox_reg, mask_prob)
if self.training:
# reshape back to (NxM) from NxM
cls_targets = cls_targets.view(-1)
bbox_targets = bbox_targets.view(-1, bbox_targets.size(2))
mask_targets = mask_targets.view(-1, mask_targets.size(2), mask_targets.size(3))
maskrcnn_loss = calc_maskrcnn_loss(cls_prob, bbox_reg, mask_prob, cls_targets,
bbox_targets, mask_targets)
loss = rpn_loss_cls + rpn_loss_bbox + maskrcnn_loss
return result, loss
else:
return result
def _process_result(self, batch_size, proposals, cls_prob, bbox_reg, mask_prob):
"""Process heads output to get the final result.
"""
result = []
# reshape back to NxM from (NxM)
cls_prob = cls_prob.view(batch_size, -1, cls_prob.size(1))
bbox_reg = bbox_reg.view(batch_size, -1, bbox_reg.size(1), bbox_reg.size(2))
mask_prob = mask_prob.view(batch_size, -1, mask_prob.size(1), mask_prob.size(2),
mask_prob.size(3))
cls_id_prob, cls_id = torch.max(cls_prob, 2)
cls_threshold = float(self.config['Test']['cls_threshold'])
# remove background and predicted ids whose probability below threshold.
keep_index = (cls_id > 0) & (cls_id_prob >= cls_threshold)
for i in range(cls_prob.size(0)):
objects = []
for j in range(cls_prob.size(1)):
pred_dict = {'cls_pred': None, 'bbox_pred': None, 'mask_pred': None}
if keep_index[i, j].all():
pred_dict['cls_pred'] = cls_id[i, j]
dx, dy, dw, dh = bbox_reg[i, j, cls_id[i, j], :]
x, y, w, h = coord_corner2center(proposals[i, j, :])
px, py = w * dx + x, h * dy + y
pw, ph = w * torch.exp(dw), h * torch.exp(dh)
px1, py1, px2, py2 = coord_center2corner((px, py, pw, ph))
pred_dict['bbox_pred'] = (px1, py1, px2, py2)
mask_threshold = self.config['Test']['mask_threshold']
pred_dict['mask_pred'] = mask_prob[i, j] >= mask_threshold
objects.append(pred_dict)
result.append(objects)
return result
def _generate_targets(self, proposals, gt_classes, gt_bboxes, gt_masks):
"""Process proposals from RPN to generate rois to feed predict heads, and
corresponding head targets.
Args:
proposals: NxMx5(idx, x1, y1, x2, y2), proposals from RPN.
gt_classes: NxR, ground truth class ids.
gt_bboxes: NxRx4(x1, y1, x2, y2), ground truth bounding boxes.
gt_masks: NxRxHxW, ground truth masks.
Returns:
rois: NxSx5(idx, x1, y1, x2, y2), rois to feed RoIAlign.
cls_targets: NxS, train targets for classification.
bbox_targets: NxSx4(x, y, w, h), train targets for bounding box regression.
mask_targets: NxSxHxW, train targets for mask prediction.
Notes: In above, M: number of rois from FRN, R: number of ground truth objects,
S: number of rois to train.
"""
train_rois_num = int(self.config['Train']['train_rois_num'])
batch_size = proposals.size(0)
num_proposals = proposals.size(1)
num_gt_bboxes = gt_bboxes.size(1)
mask_size = (28, 28)
rois = proposals.new(batch_size, num_proposals, num_gt_bboxes, 2, 5).zero_()
cls_targets = gt_classes.new(batch_size, num_proposals, num_gt_bboxes, 2).zero_()
bbox_targets = gt_bboxes.new(batch_size, num_proposals, num_gt_bboxes, 2, 4).zero_()
mask_targets = gt_masks.new(batch_size, num_proposals, num_gt_bboxes, 2, mask_size[0],
mask_size[1]).zero_()
for i in range(batch_size):
for j in range(num_proposals):
for k in range(num_gt_bboxes):
iou = calc_iou(proposals[i, j, 1:], gt_bboxes[i, k, :])
pos_neg_idx = 1
if iou < 0.5:
pos_neg_idx = 0
rois[i, j, k, pos_neg_idx, :] = proposals[i, j, :]
cls_targets[i, j, k, pos_neg_idx] = gt_classes[i, k]
# transform bbox coord from (x1, y1, x2, y2) to (x, y, w, h).
x, y, w, h = coord_corner2center(proposals[i, j, 1:])
gt_x, gt_y, gt_w, gt_h = coord_corner2center(gt_bboxes[i, k, :])
# calculate bbox regression targets, see RCNN paper for the formula.
tx, ty = (gt_x - x) / w, (gt_y - y) / h
tw, th = torch.log(gt_w / w), torch.log(gt_h / h)
bbox_targets[i, j, k, pos_neg_idx, :] = torch.cat([tx, ty, tw, th])
# mask target is intersection between proposal and ground truth mask.
# downsample to size typical 28x28.
x1, y1, x2, y2 = proposals[i, j, 1:]
x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
if x1 < x2 and y1 < y2:
mask = gt_masks[i, k, x1:x2, y1:y2].unsqueeze(0)
mask_resize = F.adaptive_avg_pool2d(Variable(mask), output_size=mask_size)
mask_targets[i, j, k, pos_neg_idx, :, :] = mask_resize.data
rois = rois.view(batch_size, num_proposals * num_gt_bboxes, 2, -1)
cls_targets = cls_targets.view(batch_size, num_proposals * num_gt_bboxes, 2)
bbox_targets = bbox_targets.view(batch_size, num_proposals * num_gt_bboxes, 2, -1)
mask_targets = mask_targets.view(batch_size, num_proposals * num_gt_bboxes, 2,
mask_size[0], mask_size[1])
# train_rois should have 1:3 positive negative ratio, see Mask R-CNN paper.
rois_neg = rois[:, :, 0, :]
rois_pos = rois[:, :, 1, :]
cls_targets_neg = cls_targets[:, :, 0]
cls_targets_pos = cls_targets[:, :, 1]
bbox_targets_neg = bbox_targets[:, :, 0, :]
bbox_targets_pos = bbox_targets[:, :, 1, :]
mask_targets_pos = mask_targets[:, :, 1, :, :]
neg_num = rois_neg.size(1)
pos_num = rois_pos.size(1)
sample_size_neg = int(0.75 * train_rois_num)
sample_size_pos = train_rois_num - sample_size_neg
sample_size_neg = sample_size_neg if sample_size_neg <= neg_num else neg_num
sample_size_pos = sample_size_pos if sample_size_pos <= pos_num else pos_num
sample_index_neg = random.sample(range(neg_num), sample_size_neg)
sample_index_pos = random.sample(range(pos_num), sample_size_pos)
rois_neg_sampled = rois_neg[:, sample_index_neg, :]
rois_pos_sampled = rois_pos[:, sample_index_pos, :]
cls_targets_neg_sampled = cls_targets_neg[:, sample_index_neg]
cls_targets_pos_sampled = cls_targets_pos[:, sample_index_pos]
bbox_targets_neg_sampled = bbox_targets_neg[:, sample_index_neg, :]
bbox_targets_pos_sampled = bbox_targets_pos[:, sample_index_pos, :]
mask_targets_pos_sampled = mask_targets_pos[:, sample_index_pos, :, :]
rois = torch.cat([rois_neg_sampled, rois_pos_sampled], 1)
cls_targets = torch.cat([cls_targets_neg_sampled, cls_targets_pos_sampled], 1)
bbox_targets = torch.cat([bbox_targets_neg_sampled, bbox_targets_pos_sampled], 1)
# mask targets only define on positive rois.
mask_targets = mask_targets_pos_sampled
return rois, Variable(cls_targets), Variable(bbox_targets), Variable(mask_targets)
def _roi_align_fpn(self, fpn_features, rois, img_width, img_height):
"""When use fpn backbone, set RoiAlign use different levels of fpn feature pyramid
according to RoI size.
Args:
fpn_features: (p2, p3, p4, p5),
rois: NxMx5(n, x1, y1, x2, y2), RPN proposals.
img_width: Input image width.
img_height: Input image height.
Returns:
rois_pooling: (NxM)xCxHxW, rois after use RoIAlign.
"""
# Flatten NxMx4 to (NxM)x4
rois_reshape = rois.view(-1, rois.size(-1))
bboxes = rois_reshape[:, 1:]
bbox_indexes = rois_reshape[:, 0]
rois_pooling_batches = [[] for _ in range(rois.size(0))]
bbox_levels = [[] for _ in range(len(fpn_features))]
bbox_idx_levels = [[] for _ in range(len(fpn_features))]
# iterate bbox to find which level of pyramid features to feed.
for idx, bbox in enumerate(bboxes):
# in feature pyramid network paper, alpha is 224 and image short side 800 pixels,
# for using of small image input, like maybe short side 256, here alpha is
# parameterized by image short side size.
alpha = 224 * (img_width if img_width <= img_height else img_height) / 800
bbox_width = torch.abs(rois.new([bbox[0] - bbox[2]]).float())
bbox_height = torch.abs(rois.new([bbox[1] - bbox[3]]).float())
log2 = torch.log(torch.sqrt(bbox_height * bbox_width)) / torch.log(
rois.new([2]).float()) / alpha
level = torch.floor(4 + log2) - 2 # minus 2 to make level 0 indexed
# rois small or big enough may get level below 0 or above 3.
level = int(torch.clamp(level, 0, 3))
bbox = bbox.type_as(bboxes).unsqueeze(0)
bbox_idx = rois.new([bbox_indexes[idx]]).int()
bbox_levels[level].append(bbox)
bbox_idx_levels[level].append(bbox_idx)
for level in range(len(fpn_features)):
if len(bbox_levels[level]) != 0:
bbox = Variable(torch.cat(bbox_levels[level]))
bbox_idx = Variable(torch.cat(bbox_idx_levels[level]))
roi_pool_per_level = self.roi_align(fpn_features[level], bbox, bbox_idx)
for idx, batch_idx in enumerate(bbox_idx_levels[level]):
rois_pooling_batches[int(batch_idx)].append(roi_pool_per_level[idx])
rois_pooling = torch.cat([torch.cat(i) for i in rois_pooling_batches])
rois_pooling = rois_pooling.view(-1, fpn_features[0].size(1), rois_pooling.size(1),
rois_pooling.size(2))
return rois_pooling
| [
"geeshangxu@gmail.com"
] | geeshangxu@gmail.com |
8397f3dff7065369c3241f30cb194735f66008e4 | a8927d693f885e202021379da0244d5991fdcba5 | /classe3/exercice1.py | 557cb5348b2692c4435cc76b6c1362fb461548ff | [] | no_license | HassanHbar/pynet_ansible | 28c007193c612752b212763c3f38d0f5c024dc3b | a0cc9cd696bf1e9d0448876d39022da1140a55be | refs/heads/master | 2020-04-15T13:37:21.042826 | 2016-08-31T10:32:43 | 2016-08-31T10:32:43 | 58,642,117 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,315 | py | #!/usr/bin/env python
'''
Using SNMPv3 create a script that detects router configuration changes.
If the running configuration has changed, then send an email notification to
identifying the router that changed and the time that it changed.
'''
import cPickle as pickle
import datetime
import os.path
import email_helper
from snmp_helper import snmp_get_oid_v3, snmp_extract
#This constant permit us to know if there is a change during the first
# 5 minutes
RELOAD_WINDOW = 30000
ip_addr1 = "184.105.247.70"
ip_addr2 = "184.105.247.71"
a_user = 'pysnmp'
my_key = "galileo1"
auth_key = my_key
encrypt_key = my_key
snmp_user = (a_user, auth_key, encrypt_key)
pynet_rtr1 = (ip_addr1, 161)
pynet_rtr2 = (ip_addr2, 161)
net_dev_file = 'netdev.pkl'
def extract_snmp_data_from_devices(a_device):
'''
extract SNMP data (SYS_NAME, SYS_UPTIME, RUN_LAST_CHANGED) from each
device and pack it in the list called snmp_results []
'''
RUN_LAST_CHANGED = '1.3.6.1.4.1.9.9.43.1.1.1.0'
SYS_NAME = '1.3.6.1.2.1.1.5.0'
SYS_UPTIME = '1.3.6.1.2.1.1.3.0'
snmp_results = []
for oid in (SYS_NAME, SYS_UPTIME, RUN_LAST_CHANGED):
try:
value = snmp_extract(snmp_get_oid_v3(a_device, snmp_user, oid=oid))
snmp_results.append(int(value))
except ValueError:
snmp_results.append(value)
return snmp_results
def extract_saved_data(file_name):
'''
extract saved data from the pickle file
'''
# Check that the pickle file exists
DEBUG1 = True
if not os.path.isfile(file_name):
return {}
# if the pickle file is not empty return the content, if empy return an
# empty dictionary
net_devices = {}
with open(net_dev_file, 'r') as f:
while DEBUG1:
try:
net_devices = pickle.load(f)
DEBUG1 = False
except IOError:
break
return net_devices
def save_data_to_file(file_name, data_dict):
'''
this function store retreived data to the file_name
'''
if file_name.count(".") == 1:
_,out_format = file_name.split(".")
else:
raise ValueError("Invalid file name: {0}".format(file_name))
if out_format == 'pkl':
with open(file_name, 'w') as f:
pickle.dump(data_dict, f)
def email_notification(router, time):
'''
this function send an email notification to receptient indicating that
an equipement has a configuration cahnge.
'''
sender = 'hassanh@mhdinfotech.com'
recepient = 'hassanhbar@gmail.com'
subject = router + ' has a configuration change at ' + str(datetime.timedelta(seconds=time/100))
message = '''
Hi,
this is to inform you that {0} had a configuration change at {1}.
Best regards,
Hassan HBAR.
'''.format(router, str(datetime.timedelta(seconds=time/100)))
email_helper.send_mail(recepient, subject, message, sender)
def main():
'''
Check if the running-configuration has changed, send an email notification when
this occurs. the logic here is the following:
1) We extract saved data
2) we request SNMP data
3) We compare requested SNMP data and saved data to define if config was changed(
I follow here same logic as your soluton)
4) save SNMP data in the pickle file
'''
snmp_data = {}
current_data = {}
saved_data = extract_saved_data(net_dev_file)
for a_device in (pynet_rtr1, pynet_rtr2):
device_name, uptime, last_changed = extract_snmp_data_from_devices(a_device)
current_data[device_name] = {'device_name':device_name,\
'uptime':uptime, 'last_changed':last_changed}
print "\nConnected to device = {0}".format(device_name)
print "Last changed timestamp = {0}".format(last_changed)
print "Uptime = {0}".format(uptime)
# see if this device has been previously saved
if device_name in saved_data.keys():
snmp_saved_data = saved_data[device_name]
print "{0} Already Saved {1}".format(device_name, (35 - len(device_name))*'.'),
#Check for a reboot (did uptime decrease or last_changed decrease?)
if uptime < snmp_saved_data['uptime'] or last_changed < snmp_saved_data['last_changed']:
if last_changed <= RELOAD_WINDOW:
print "DEVICE RELOADED...not changed"
else:
print "DEVICE RELOADED...and changed, email notification is sent"
email_notification(device_name, last_changed)
# running-config last_changed is the same
elif last_changed == snmp_saved_data['last_changed']:
print "not changed"
# running-config was modified
elif last_changed > snmp_saved_data['last_changed']:
print "CHANGED, email notification is sent"
email_notification(device_name, last_changed)
else:
# New device, just save it
print "{0} {1}".format(device_name, (35 - len(device_name))*'.'),
print "saving new device"
# Write the devices to pickle file
save_data_to_file(net_dev_file, current_data)
if __name__ == "__main__":
main()
| [
"Hassanhbar@gmail.com"
] | Hassanhbar@gmail.com |
42a51fbfbf765fe3650c8ab9c41927a8259c62ff | 9a0ada115978e9600ad7f1eab65fcc8825f637cf | /work_in_progress/_old/stage_aligment_convert/remove_stage_point.py | 45542356653d90923ad1ca5276940178c3a9f832 | [] | no_license | ver228/work-in-progress | c1971f8d72b9685f688a10e4c5a1b150fa0812da | ef5baecc324da4550f81edb0513d38f039ee3429 | refs/heads/master | 2018-12-16T22:18:55.457290 | 2018-09-14T09:27:49 | 2018-09-14T09:27:49 | 56,165,301 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,965 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 7 19:45:26 2017
@author: ajaver
"""
import tables
import numpy as np
import os
import pymysql
from tierpsy.analysis.contour_orient.correctVentralDorsal import switchCntSingleWorm
if __name__ == '__main__':
conn = pymysql.connect(host='localhost', database='single_worm_db')
cur = conn.cursor(pymysql.cursors.DictCursor)
sql = '''
SELECT *
FROM experiments_full
'''
cur.execute(sql)
f_data = cur.fetchall()
for irow, row in enumerate(f_data):
fpath = os.path.join(row['results_dir'], row['base_name'])
masked_file = fpath + '.hdf5'
skeletons_file = fpath + '_skeletons.hdf5'
if os.path.exists(skeletons_file):
print(irow+1, len(f_data))
switchCntSingleWorm(skeletons_file)
# with tables.File(skeletons_file, 'r+') as fid:
# if '/stage_movement' in fid:
# exit_flag = fid.get_node('/stage_movement')._v_attrs['has_finished']
# if exit_flag > 0:
# frame_diffs = fid.get_node('/stage_movement/frame_diffs')[:]
# if exit_flag > 1 or np.any(frame_diffs<0):
#
# print(exit_flag, irow, row['base_name'])
# if '/stage_movement' in fid:
# fid.remove_node('/stage_movement', recursive=True)
# if '/provenance_tracking/STAGE_ALIGMENT' in fid:
# fid.remove_node('/provenance_tracking/STAGE_ALIGMENT', recursive=True)
#
# for ext in ['_features.hdf5', '.wcon.zip']:
# fname = fpath + ext
# if os.path.exists(fname):
# os.remove(fname) | [
"ajaver@MRC-8791.local"
] | ajaver@MRC-8791.local |
62f18555406a30cd5f9b3427fe90ead7c3d09475 | 6f2ea51d289a3ffb91abbc318931adb8e800f17c | /02_pygame_loop.py | b762363a57878ef9a6ef36332521e431200e1da9 | [] | no_license | kathcode/PyGame | d91833ecfdcaefa31abd8d4c1994550b23f58167 | a4055eecab0a639c1168f1542665ec2921be8282 | refs/heads/master | 2018-09-18T21:07:37.274764 | 2018-06-06T05:01:40 | 2018-06-06T05:01:40 | 125,936,653 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,281 | py | # Import the pygame library
import pygame
# Initialize the game engine
pygame.init()
# Colors
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
GREEN = (0, 255, 0)
RED = (255, 0, 0)
# Dimensions
dimensions = (700, 500)
screen = pygame.display.set_mode(dimensions)
# Window title
pygame.display.set_caption("Kath learning pygame")
# Iterate until the user clicks on the close button.
close_window = False
# It is used to manage how quickly the screen is updated
clock = pygame.time.Clock()
# ---------- Main Loop of the Program ----------
# Dictionary of events
events = {
pygame.QUIT: "The user requested to leave.",
pygame.KEYDOWN: "The user pressed a key.",
pygame.KEYUP: "The user released a key.",
pygame.MOUSEBUTTONDOWN: "The user pressed a mouse button"
}
while not close_window:
for event in pygame.event.get():
# If the event is in the event dictionary
if event.type in events:
# Print the message
print(events[event.type])
# Close
if event.type == pygame.QUIT:
close_window = True
# Cleare the screen
screen.fill(WHITE)
# Update the screen
pygame.display.flip()
# Limited to 20 frames per second
clock.tick(20)
# Close the program
pygame.quit()
| [
"noreply@github.com"
] | kathcode.noreply@github.com |
9cb2d61e079371fcc100cf7044132bb2dd66c7b8 | 0e01f9b8479124d346a57efde464a1982f11b187 | /Game/airplaneDay01.py | e73dd605224997e255fadc0213b5befb8c6d9cb4 | [] | no_license | arlose/MathAndAlgorithm | 60011537e06065a95f11950d1c327f2c05a4b1b8 | 1be4f94cfde96db45957cf7bf355daef1e465fbb | refs/heads/master | 2022-12-10T13:26:27.472708 | 2019-06-21T09:26:48 | 2019-06-21T09:26:48 | 173,125,553 | 0 | 0 | null | 2022-11-22T03:32:31 | 2019-02-28T14:21:20 | Jupyter Notebook | UTF-8 | Python | false | false | 951 | py | import pygame #导入pygame工具包
import time #导入时间工具
pygame.init() #检查工具箱
sc=pygame.display.set_mode((480,800),0,32) #新建指定大小的窗体
bg=pygame.image.load('background.png') #载入背景图片
#载入飞机图片
hero0 = pygame.image.load('hero0.png')
hero1 = pygame.image.load('hero1.png')
#定义飞机的位置以及计数器
herox=240
heroy=400
num=0
while True:
sc.blit(bg,(0,0)) #贴背景
#计数器加1
num = num+1
#实现飞机两个图片的切换喷气
if num%2 ==1:
sc.blit(hero0,(herox,heroy)) #贴飞机图片1
else:
sc.blit(hero1,(herox,heroy)) #贴飞机图片2
#每次循环等待一些时间 0.01
time.sleep(0.01)
pygame.display.update() #刷新屏幕
#实现窗体关闭
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
| [
"arlose.fj@gmail.com"
] | arlose.fj@gmail.com |
a2f0d52c017d46c8f915230ab28e22f1cf8ab942 | 14035a4b00a4306d1d75456cf63b499e551f838e | /vad_file.py | bd7f4046f004b5a5e0dc8acbc9dadcc4f79d167a | [] | no_license | monisankha/ClusterGAN_Diar | bb0cdafa903c6ac34eaa50ebca3b4ba487a0ef70 | f27a7ebe274ef5060347f3cccb65bff4eb7a1595 | refs/heads/master | 2022-12-05T13:53:43.529709 | 2020-08-29T18:35:49 | 2020-08-29T18:35:49 | 217,415,707 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,712 | py | import os, glob
import numpy as np
import decimal
import argparse
import scipy.signal
def round_half_up(number):
return int(decimal.Decimal(number).quantize(decimal.Decimal('1'), rounding=decimal.ROUND_HALF_UP))
def vad_file(wavFile, rttmFile):
frame_rate = 100
file = open(rttmFile, 'r')
duration = os.popen('soxi -D ' + wavFile).readlines()[0][:-1]
total_frame = float(duration) * float(frame_rate)
x = file.readlines()
l = len(x) # No of lines in the rttm file
vad = [0] * int(total_frame)
for it in range(l):
a = x[it].split(' ') # First line read of rttm file
f1 = int(round_half_up(float(a[3]) * frame_rate)) # Starting frame index
f2 = int(round_half_up((float(a[3]) + float(a[4])) * frame_rate)) # Ending frame index
vad[f1:f2] = [1] * (f2-f1)
vad = np.asarray(vad)
return vad
def data_prep_vad(wavFile, rttmFile):
path = os.getcwd()
logger = open(os.path.join(path, "wavList"), 'w')
logger.write("{:s}\n".format(wavFile))
logger.close()
file_name = wavFile.split('/')[-1][:-4]
vad = vad_file(wavFile, rttmFile)
iter_path = path + '/vad/kaldiVAD/'
if not os.path.exists(iter_path):
os.makedirs(iter_path)
logger1 = open(os.path.join(iter_path, file_name + ".csv"), 'w')
for i1 in range(len(vad)):
logger1.write("{:d}\n".format(vad[i1]))
logger1.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser('')
parser.add_argument('--wavFile', type=str)
parser.add_argument('--rttmFile', type=str)
args = parser.parse_args()
wavFile = args.wavFile
rttmFile = args.rttmFile
data_prep_vad(wavFile, rttmFile)
| [
"monisankha.pal@gmail.com"
] | monisankha.pal@gmail.com |
fc4489fe4def15e7a8ccd94df2f27d10fc6dad76 | 537259790008b727c03c56ec55a6aaaeeeaf65a3 | /scrapers/tvrelease_scraper.py | 533a9a8e18bb3485693ce0a1c03222774e2bd2a3 | [] | no_license | djbijo/salts | a5781ac9958b77c2acfacf4f73a5286e0b91d8e2 | 9eaa736701833eedf6796403da33d648aaf348f8 | refs/heads/master | 2020-12-11T03:26:15.843807 | 2015-04-09T18:35:45 | 2015-04-09T18:35:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,354 | py | """
SALTS XBMC Addon
Copyright (C) 2014 tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import scraper
import urllib
import urlparse
import re
import xbmcaddon
from salts_lib import log_utils
from salts_lib.constants import VIDEO_TYPES
from salts_lib.db_utils import DB_Connection
from salts_lib.constants import QUALITIES
BASE_URL = 'http://tv-release.net'
QUALITY_MAP = {'MOVIES-XVID': QUALITIES.MEDIUM, 'TV-XVID': QUALITIES.HIGH, 'TV-MP4': QUALITIES.HIGH,
'TV-480P': QUALITIES.HIGH, 'MOVIES-480P': QUALITIES.HIGH, 'TV-720P': QUALITIES.HD, 'MOVIES-720P': QUALITIES.HD}
class TVReleaseNet_Scraper(scraper.Scraper):
base_url = BASE_URL
def __init__(self, timeout=scraper.DEFAULT_TIMEOUT):
self.timeout = timeout
self.db_connection = DB_Connection()
self.base_url = xbmcaddon.Addon().getSetting('%s-base_url' % (self.get_name()))
@classmethod
def provides(cls):
return frozenset([VIDEO_TYPES.MOVIE, VIDEO_TYPES.EPISODE])
@classmethod
def get_name(cls):
return 'TVRelease.Net'
def resolve_link(self, link):
return link
def format_source_label(self, item):
return '[%s] %s' % (item['quality'], item['host'])
def get_sources(self, video):
source_url = self.get_url(video)
hosters = []
if source_url:
url = urlparse.urljoin(self.base_url, source_url)
html = self._http_get(url, cache_limit=.5)
q_str = ''
match = re.search('>Category.*?td_col">([^<]+)', html)
if match:
q_str = match.group(1).upper()
pattern = "td_cols.*?href='([^']+)"
for match in re.finditer(pattern, html):
url = match.group(1)
if re.search('\.rar(\.|$)', url):
continue
hoster = {'multi-part': False, 'class': self, 'views': None, 'url': url, 'rating': None, 'direct': False}
hoster['host'] = urlparse.urlsplit(url).hostname
hoster['quality'] = self._get_quality(video, hoster['host'], QUALITY_MAP.get(q_str, None))
hosters.append(hoster)
return hosters
def get_url(self, video):
return self._blog_get_url(video, delim=' ')
@classmethod
def get_settings(cls):
settings = super(TVReleaseNet_Scraper, cls).get_settings()
settings = cls._disable_sub_check(settings)
name = cls.get_name()
settings.append(' <setting id="%s-filter" type="slider" range="0,180" option="int" label=" Filter results older than (0=No Filter) (days)" default="30" visible="eq(-6,true)"/>' % (name))
settings.append(' <setting id="%s-select" type="enum" label=" Automatically Select" values="Most Recent|Highest Quality" default="0" visible="eq(-7,true)"/>' % (name))
return settings
def search(self, video_type, title, year):
search_url = urlparse.urljoin(self.base_url, '/?s=')
search_url += urllib.quote(title)
if video_type == VIDEO_TYPES.EPISODE:
search_url += '&cat=TV-XviD,TV-Mp4,TV-720p,TV-480p,'
else:
search_url += '&cat=Movies-XviD,Movies-720p,Movies-480p'
html = self._http_get(search_url, cache_limit=.25)
pattern = "posts_table.*?<a[^>]+>(?P<quality>[^<]+).*?href='(?P<url>[^']+)'>(?P<post_title>[^<]+).*?(?P<date>[^>]+)</td></tr>"
date_format = '%Y-%m-%d %H:%M:%S'
return self._blog_proc_results(html, pattern, date_format, video_type, title, year)
def _http_get(self, url, cache_limit=8):
return super(TVReleaseNet_Scraper, self)._cached_http_get(url, self.base_url, self.timeout, cache_limit=cache_limit)
| [
"tknorris@gmail.com"
] | tknorris@gmail.com |
935dc1a21aaa9f5479016b944facd3f3ac49a78b | 2bcaca13b7145bea978b1c89d36c30651a01f5f5 | /22b-pygame.py | 195e9ce2cb2666e053ce70921717e1b29881a6bf | [] | no_license | jonaskrogell/adventofcode2017 | 44d566aed0e074d2f0a0c84babf00204c9c6f7a9 | 6a5e080dfe83735c6f868e868002986ef55cd9db | refs/heads/master | 2021-09-01T11:01:17.954667 | 2017-12-26T17:06:09 | 2017-12-26T17:06:09 | 113,969,281 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,697 | py | #!env python3
import sys
import pygame
import time
pygame.init()
BLACK = ( 0, 0, 0)
WHITE = (255, 255, 255)
BLUE = ( 0, 0, 255)
GREEN = ( 0, 255, 0)
RED = (255, 0, 0)
infected = (245, 30, 30)
warning = (245, 137, 30)
marked = (245, 30, 137)
infected = (255, 255, 255)
warning = (150, 150, 150)
marked = (100, 100, 100)
size = [1920, 1080]
screen = pygame.display.set_mode(size)
pygame.display.set_caption("AoC 22")
clock = pygame.time.Clock()
def renderMap(cur_x, cur_y):
min_x = None
min_y = None
max_x = None
max_y = None
for key in virusmap:
if min_x is None or key[0] < min_x:
min_x = key[0]
if max_x is None or key[0] > max_x:
max_x = key[0]
if min_y is None or key[1] < min_y:
min_y = key[1]
if max_y is None or key[1] > max_y:
max_y = key[1]
print('Map size (x,y):', min_x, '-', max_x, ',', min_y, '-', max_y)
margin = 3
for y in range(min_y - margin, max_y + 1 + margin):
for x in range(min_x - margin, max_x + 1 + margin):
item = '.'
if (x, y) in virusmap:
item = virusmap[x, y]
print(item, end='')
if y == cur_y and x == cur_x - 1:
print('[', end='')
elif y == cur_y and x == cur_x:
print(']', end='')
else:
print(' ', end='')
print()
virusmap = {}
y = 0
for row in sys.stdin.read().split('\n'):
if len(row.strip()) == 0:
continue
x = 0
for dot in row:
virusmap[x, y] = dot
x += 1
y += 1
x = int((x - 1) / 2)
y = int((y - 1) / 2)
print('Starting position (x,y):', x, y)
# renderMap(x, y)
directions = ['up', 'right', 'down', 'left']
direction = 0
infections = 0
screen.fill(BLACK)
def draw(x, y, color):
zoom = 6
x = x - 25
y = y - 10
x = x * zoom + int(size[0] / 2)
y = y * zoom + int(size[1] / 2)
# pygame.draw.circle(screen, color, [x , y], 2)
pygame.draw.rect(screen, color, [x, y, zoom - 1, zoom - 1])
c = 0
for step in range(1000000):
# clock.tick(100)
for event in pygame.event.get():
if event.type == pygame.QUIT:
break
if step % 100000 == 0:
print('Step:', step, 'Direction:', directions[direction], 'Pos (x,y):', x, y)
if (x, y) in virusmap and virusmap[x, y] != '.':
if virusmap[x, y] == '#':
# turn right
direction = (direction + 1) % len(directions)
virusmap[x, y] = 'F'
draw(x, y, marked)
elif virusmap[x, y] == 'W':
virusmap[x, y] = '#'
infections += 1
draw(x, y, infected)
elif virusmap[x, y] == 'F':
# turn in reverse
direction = (direction + 2) % len(directions)
virusmap[x, y] = '.'
draw(x, y, BLACK)
else:
# turn left
direction = (direction - 1) % len(directions)
# infect
virusmap[x, y] = 'W'
draw(x, y, warning)
if directions[direction] == 'up':
y -= 1
if directions[direction] == 'down':
y += 1
if directions[direction] == 'right':
x += 1
if directions[direction] == 'left':
x -= 1
draw(x, y, RED)
if step % 278 == 0:
pygame.display.flip()
pygame.image.save(screen, '22-images/{num:06d}.png'.format(num=c))
c += 1
# renderMapGame(x, y)
# renderMapGame(x, y)
print('Total infections:', infections)
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
break
pygame.display.flip()
time.sleep(1)
pygame.quit()
| [
"jonas@krogell.se"
] | jonas@krogell.se |
eaf9697142f3c3468172273bd83cf5309fa1211d | 7da856d91cba898924088874d626446f933dcab9 | /test_fixture8.py | 1f1c705bab3cc1e9a550d0c885dd2f93c15e4764 | [] | no_license | Notker367/Autotest | cee3fbd5b8453da37930da507bc7fd65faf5c43a | 8a8a7a9d1f33dc10ac3054bebe2827aea5ad17bf | refs/heads/master | 2023-03-21T11:59:54.570108 | 2021-03-07T12:54:26 | 2021-03-07T12:54:26 | 303,544,830 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 756 | py | import pytest
from selenium import webdriver
link = "http://selenium1py.pythonanywhere.com/"
@pytest.fixture(scope="function")
def browser():
print("\nstart browser for test..")
browser = webdriver.Chrome(executable_path=r"D:\WebDrvers\Chrome\chromedriver_win32\chromedriver.exe")
yield browser
print("\nquit browser..")
browser.quit()
class TestMainPage1():
@pytest.mark.smoke
def test_guest_should_see_login_link(self, browser):
browser.get(link)
browser.find_element_by_css_selector("#login_link")
@pytest.mark.regression
def test_guest_should_see_basket_link_on_the_main_page(self, browser):
browser.get(link)
browser.find_element_by_css_selector(".basket-mini .btn-group > a") | [
"notstrauss@gmail.com"
] | notstrauss@gmail.com |
bd288c568dd8ea0b72470388549c92443ff19e78 | cedf43531497300b20f76597e6aa4053f866dde7 | /check-expiradate-domains/get_domain_msg-old.py | a555d19d04260b8025271dfb85d8479911e8fe40 | [] | no_license | MoeList/check_domain_info | 2ccbedfb4f7dcc6db1d0faafc159ea080e7fe0d7 | d2bf17493d9877bff7dadc0c4a6610ff23a79bd7 | refs/heads/master | 2021-12-15T10:17:12.994419 | 2017-08-12T09:29:52 | 2017-08-12T09:29:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,956 | py | #!/usr/bin/env python
# encoding: utf-8
#by luwen
import re
import sys
import time
import json
import urllib
import urllib2
import smtplib
import datetime
from email.mime.text import MIMEText
from email.header import Header
def sendmail(from_addr,password,to_addr,smtpServer,subject,content):
from_addr = from_addr
password = password
to_addr = to_addr
smtp_server = smtpServer
msg = MIMEText(content,'plain','utf-8')
msg['From'] = from_addr
msg['To'] = ','.join(to_addr)
msg['Subject'] = subject
server = smtplib.SMTP(smtp_server,25)
#server.set_debuglevel(1)
server.login(from_addr,password)
server.sendmail(from_addr,to_addr,msg.as_string())
server.quit()
def get_domain(reqUrl,key,reqDomain):
reqUrl = reqUrl
data = {'key': key,'host': reqDomain}
#构造数据格式
postData = urllib.urlencode(data)
req = urllib2.Request(reqUrl,postData)
#解析json
msg = json.load(urllib2.urlopen(req))
return msg
if __name__ == "__main__":
#发送邮件参数
from_addr = '***@**.com'
to_addr = ['**@**.com']
#to_addr = ['luwen@jf.com']
password = '****'
smtpServer = 'smtp.**.com'
#请求查询域名信息参数
url = 'http://api.91cha.com/whois'
key = '*********************'
#今天日期
today = datetime.datetime.now().strftime('%Y-%m-%d')
todayStr = today.split('-')
#格式化今天日期
#todayStr = datetime.datetime.strptime(today,'%Y-%M-%d')
d1 = datetime.datetime(int(todayStr[0]), int(todayStr[1]), int(todayStr[2]))
with open('domain.txt') as file:
for domain in file:
host = domain.strip('\n')
try:
pass
msgHost = get_domain(url,key,host)
except Exception,e:
print Exception,":",e
continue
#判断查询是否成功
if msgHost['state'] != 1:
errorCode = msgHost['state']
subject = '%s域名查询出错' %host
content = "%s域名查询出错,错误代码%s,请检查\n" %(host,errorCode)
to_addr = ['**@jf.com']
sendmail(from_addr,password,to_addr,smtpServer,subject,content)
time.sleep(10)
continue
#到期日期
expired = msgHost['data']['expiretime']
#格式化到期日期
expiredStr = expired.split('-')
d2 = datetime.datetime(int(expiredStr[0]), int(expiredStr[1]), int(expiredStr[2]))
#剩余时间
periodTime = (d2 - d1).days
print periodTime
if periodTime < 60:
subject = '%s域名即将过期提醒邮件' %host
content = "%s域名还有%s天过期,请注意续费\n" %(host,periodTime)
sendmail(from_addr,password,to_addr,smtpServer,subject,content)
time.sleep(10)
| [
"mikeluwen@gmail.com"
] | mikeluwen@gmail.com |
bffe670277917a2e2e2d58df614bf6055207db99 | b845ab78628211a377ccc9bcfd0ba727ef4a2849 | /tempmail_api/models/rpc.py | 58984947e1e8e4054e8c462c6e6dd8d8cafbdf5a | [] | no_license | MD-Levitan/TempMailApi | fbe111d88278b0b21914a2bfded216e42d959426 | be4da628b8a1786d20aad3cb573396ff830660ff | refs/heads/main | 2023-04-01T04:04:12.832920 | 2021-04-06T09:16:11 | 2021-04-06T09:16:11 | 322,036,292 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,598 | py | import json
from dataclasses import dataclass
from typing import List, Optional, Any
@dataclass
class JsonRpcMessage:
jsonrpc: str = "2.0" # jsonrpc ptocol vereiosn
id: str = "jsonrpc"
def __init__(self, data: dict):
if data is not None:
self.jsonrpc = data.get("jsonrpc", "2.0")
self.id = data.get("id", "jsonrpc")
else:
self.jsonrpc = "2.0"
self.id = "jsonrpc"
def json(self) -> dict:
return {"jsonrpc": self.jsonrpc, "id": self.id}
@dataclass
class JsonRpcRequest(JsonRpcMessage):
method: str = None # protocol method
params: dict = None # dict with params for method
def __init__(self, data: dict):
super().__init__(data)
if data is not None:
self.method = data.get("method", None)
self.params = data.get("params", None)
def json(self) -> dict:
result = super().json()
result.update(self.__dict__)
if self.params is None:
result.update({"params": dict()})
return json.dumps(result)
@dataclass
class Error:
code: int = None
subCode = None
message: str = None
def __init__(self, data: dict):
if data is not None:
self.__dict__ = data
@dataclass
class JsonRpcResponse(JsonRpcMessage):
result: dict = None # dict with result
error: dict = None
def __init__(self, data: dict):
super().__init__(data)
if data is not None:
self.result = data.get("result", None)
self.error = Error(data.get("error", None))
| [
"ovsyanka@protonmail.com"
] | ovsyanka@protonmail.com |
5b9a0e8151fc4c44ee36a6bf9630696e3772d3bf | c9500ad778b8521aaa85cb7fe3239989efaa4799 | /plugins/proofpoint_tap/unit_test/test_get_blocked_clicks.py | 20515642f86f5bf197f87ee9b53be6017f8d31ab | [
"MIT"
] | permissive | rapid7/insightconnect-plugins | 5a6465e720f114d71b1a82fe14e42e94db104a0b | 718d15ca36c57231bb89df0aebc53d0210db400c | refs/heads/master | 2023-09-01T09:21:27.143980 | 2023-08-31T10:25:36 | 2023-08-31T10:25:36 | 190,435,635 | 61 | 60 | MIT | 2023-09-14T08:47:37 | 2019-06-05T17:05:12 | Python | UTF-8 | Python | false | false | 3,159 | py | import sys
import os
from unittest.mock import patch
from komand_proofpoint_tap.actions.get_blocked_clicks import GetBlockedClicks
from insightconnect_plugin_runtime.exceptions import PluginException
from komand_proofpoint_tap.util.exceptions import ApiException
from test_util import Util
from unittest import TestCase
from parameterized import parameterized
sys.path.append(os.path.abspath("../"))
@patch("requests.request", side_effect=Util.mocked_requests_get)
class TestGetBlockedClicks(TestCase):
@classmethod
def setUpClass(cls) -> None:
cls.action = Util.default_connector(GetBlockedClicks())
@parameterized.expand(
[
[
"blocked_clicks",
Util.read_file_to_dict("inputs/get_blocked_clicks.json.inp"),
Util.read_file_to_dict("expected/get_blocked_clicks.json.exp"),
],
[
"blocked_clicks_cleared_status",
Util.read_file_to_dict("inputs/get_blocked_clicks_cleared_status.json.inp"),
Util.read_file_to_dict("expected/get_blocked_clicks_cleared_status.json.exp"),
],
[
"blocked_clicks_without_url",
Util.read_file_to_dict("inputs/get_blocked_clicks_without_url.json.inp"),
Util.read_file_to_dict("expected/get_blocked_clicks_without_url.json.exp"),
],
[
"blocked_clicks_without_time_start",
Util.read_file_to_dict("inputs/get_blocked_clicks_without_time_start.json.inp"),
Util.read_file_to_dict("expected/get_blocked_clicks_without_time_start.json.exp"),
],
[
"blocked_clicks_without_time_end",
Util.read_file_to_dict("inputs/get_blocked_clicks_without_time_end.json.inp"),
Util.read_file_to_dict("expected/get_blocked_clicks_without_time_end.json.exp"),
],
[
"blocked_clicks_without_time_start_end",
Util.read_file_to_dict("inputs/get_blocked_clicks_without_time_start_end.json.inp"),
Util.read_file_to_dict("expected/get_blocked_clicks_without_time_start_end.json.exp"),
],
]
)
def test_get_blocked_clicks(self, mock_request, test_name, input_params, expected):
actual = self.action.run(input_params)
self.assertDictEqual(actual, expected)
@parameterized.expand(
[
[
"blocked_clicks_timerange_invalid",
Util.read_file_to_dict("inputs/get_blocked_clicks_timerange_invalid.json.inp"),
PluginException.causes[PluginException.Preset.BAD_REQUEST],
PluginException.assistances[PluginException.Preset.BAD_REQUEST],
],
]
)
def test_get_blocked_clicks_raise_exception(self, mock_request, test_name, input_params, cause, assistance):
with self.assertRaises(ApiException) as error:
self.action.run(input_params)
self.assertEqual(error.exception.cause, cause)
self.assertEqual(error.exception.assistance, assistance)
| [
"noreply@github.com"
] | rapid7.noreply@github.com |
fdd4c896536b5c22b9f6cf002acf1d30ca14b1ba | ddc3d96e7ce83f696df888f53673163da546b79d | /letterGame.py | 1f2d330779e9701585172a88edfbd8182e69741b | [] | no_license | YesManKablam/CountdownLetterGame | 9e2e77983b6d2d9d4048976d6a77c2be13862e83 | 655a259f504a73e4d9b94f09ecda805576c79fdc | refs/heads/master | 2021-01-10T14:39:10.648537 | 2016-03-31T14:08:28 | 2016-03-31T14:08:28 | 52,441,728 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,689 | py | # G00301273
# John Conor Kenny
# Countdown Letter Game
# Imports a timer function to see how long it takes to run the program
import time
start_time = time.time()
# Opens the wordlist and asigns it to a list
with open('newDic.txt', 'r') as fileopen:
words = [line.strip() for line in fileopen]
# Imports the random function and sets up lists for the final anagram, the vowels and the consonants
import random
anagram = []
vowels = ['a','e','i','o','u']
consonant = ['b','c','d','f','g','h','j','k','l','m','n','p','q','r','s','t','v','x','z','w','y']
# Loops here will append the required vowels and consonants to the anagram
# It then fills the remaining space with either consonants or vowels
for i in range (0,3):
anagram.append(random.choice(vowels))
for i in range (0,4):
anagram.append(random.choice(consonant))
for i in range (0,2):
anagram.append(random.choice(vowels + consonant))
# Imports the shuffle function and jumbles the anagram so that it doesn't really look like it follows a format
from random import shuffle
shuffle(anagram)
# Prints the anagram, removing things like commas
print (''.join(anagram))
# Imports the permuation function from itertools and sets up our counter and list for the total permutations
#anagram = ['a','u','c','t','i','o','n','e','d']
#anagram = ['a','r','r','o','g','a','n','t','t']
#anagram = ['t','e','s','t','t','t','t','t','t']
# This looops 9 times, since the anagram will always be 9 characters long
# The permutations function finds every combo of words characters that you give it
# You can specify how many charaters you actually want to use, for example if you did:
# perms += [''.join(p) for p in permutations("test", 2)]
# You will get "te, et" as your output
# So, we feed it our generated anagram, then will start at 1 and increase every time it loops intil it get's to 9
# This will give you every possible combination of charcters in the word we give it, since the word will always be 9 characters long
#def finder(ana):
# from itertools import permutations
# perms = []
# perms += [''.join(p) for p in permutations(ana)]
# results = (set(words) & set(perms))
# if not results:
# perms = []
# for i in range (0,9):
# j = 0
# perms += [''.join(p) for p in permutations(ana, 2)]
# j = j + 1
# results = (set(words) & set(perms))
# return (results)
# else:
# return (results)
#a = finder(anagram)
#print (a)
# Changed the permutation loop to break on the longest result when found.
# Ugly as sin, but when it actually is faster, since the longer permutations are found first,
# There are out of the way from the start. It works back from there, meaning each new set of permutations is generated faster than the last set.
# However, most results are going to be around 5 characters long. Which means there isn't all that much of a difference in time made.
from itertools import permutations
perms = []
perms += [''.join(p) for p in permutations(anagram)]
results = (set(words) & set(perms))
if not results:
perms = [''.join(p) for p in permutations(anagram, (len(anagram) - 1))]
results = (set(words) & set(perms))
if not results:
perms = []
perms = [''.join(p) for p in permutations(anagram, 7)]
results = (set(words) & set(perms))
if not results:
perms = []
perms = [''.join(p) for p in permutations(anagram, 6)]
results = (set(words) & set(perms))
if not results:
perms = []
perms = [''.join(p) for p in permutations(anagram, 5)]
results = (set(words) & set(perms))
if not results:
perms = []
perms = [''.join(p) for p in permutations(anagram, 4)]
results = (set(words) & set(perms))
if not results:
perms = []
perms = [''.join(p) for p in permutations(anagram, 3)]
results = (set(words) & set(perms))
print (results)
# Here, results will be given only the words that are both in the perms list and in the word list
# They are converted to sets, which only display the unique items in a list.
#results = (set(words) & set(perms))
# This line here will allow you to order the output by the length of the words
#sortedwords = sorted(results, key=len)
# This prints out all of the overlap between the lists, and it also prints out the longest word in that list
#print (results)
#print (sortedwords[-1])
# Displays running time of the project
print("--- %s seconds ---" % (time.time() - start_time))
| [
"conorkenny183@gmail.com"
] | conorkenny183@gmail.com |
31c3750fdc7139640b5abca1dc42a63d24e2ff42 | 5eb17611ffa175da388888c5a9b866c674865d5b | /birbcam/exposureadjust/sleep.py | db04a2e91ca41a376dbfbb3e2de6984665a56172 | [
"MIT"
] | permissive | stogs/birbcam | 2823739e740b91fdeb28f96c0bcc061299a220be | b44c95744d81d063f12dfb2521019ff89787c45a | refs/heads/main | 2023-07-16T19:11:48.911057 | 2021-08-30T23:24:21 | 2021-08-30T23:24:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 391 | py | from .exposurestate import ExposureState
from time import time
import logging
class Sleep(ExposureState):
def __init__(self, waitTime):
super().__init__()
self._releaseTime = time() + waitTime
logging.info(f"[Sleep] for {waitTime}")
def update(self, camera, image):
if time() < self._releaseTime:
return
self._changeState(None)
| [
"gallahad@me.com"
] | gallahad@me.com |
9751b47661d97074ea93280984aa3a93a3a7246f | 6b81296eff6aac2b81326a3f97a7240321d085d1 | /pycampaign06[for loop].py | 6fa1c9058afbcf87d31d21acee1273479a816d0b | [
"Unlicense"
] | permissive | EssamSami5155/PyCampaign20 | 0d267c586e6060824c147a54a1cbc8d01c672e87 | 7c8dba63de1a499742c748a1b85d00eeebbb38d6 | refs/heads/master | 2022-12-17T06:32:49.112717 | 2020-09-20T09:49:51 | 2020-09-20T09:49:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,738 | py | # Repeating events
# Using for loop
import turtle # turtle is a library that helps us to draw.
turtle.color("blue")
turtle.forward(100)
turtle.right(45)
turtle.color("green")
turtle.forward(50)
turtle.right(45)
turtle.color("grey")
turtle.forward(100)
# turtle commands
# right(x) - rotate right x degrees.
# left(x) - rotate left x degrees.
# colour("x") - change pen color to x.
# forward(x) - move forward x.
# backward(x) - move backward x.
# drawing square with turtle
import turtle
turtle.forward(100)
turtle.right(90)
turtle.forward(100)
turtle.right(90)
turtle.forward(100)
turtle.right(90)
turtle.forward(100)
# this is a bad way to complete this task.
# we can use loops to make this task easiar.
# Loops allows us to repeat the same line of code as often as we want.
# exp-
import turtle
for steps in range(4): # for loop is a special kind of loop which allows us to specifice how many time we need to execute this code.
turtle.forward(65)
turtle.left(90)
# in this code "steps" is a variable. we can name it anything.
# Nested loops
import turtle
for steps in range(4):
turtle.forward(100)
turtle.right(90)
for moresteps in range(4):
turtle.forward(50)
turtle.right(90)
# variables inside loop
import turtle
shawki=8
for steps in range(shawki):
turtle.forward(100)
turtle.right(360/shawki)
for moresteps in range(shawki):
turtle. forward(50)
turtle.right(360/shawki)
# In python counting starts to 0. But we can specify numbers to count to or form.
for steps in range(1,10,2):
print(steps)
# here counting starts to 1 from 10. but it skips 1 numbers after each step.
# we can also tell python exactly what values we want to use in the loop.
for steps in[1,2,3,4,5]:
print(steps)
# even we dont have to use numbers.
import turtle
for steps in ["red","blue","green","black"]:
turtle.color(steps)
turtle.forward(100)
turtle.right(90)
print(steps)
# Drawing a nested object
import turtle
print("Today we are going to draw an object using turtle librery in python.")
print("Tell us your opinion")
user=int(input("How many sides the object will have?\n"))
for steps in range(user):
turtle.forward(160)
turtle.right(360/user)
for moresteps in range(user):
turtle.forward(50)
turtle.right(360/user)
# displaying febonacci series using for loop
first=0
second=1
n=int(input("enter how many numbers you want in this series: "))
for i in range(n):
print(first)
temp=first
first=second
second=temp+second
# display the sum of the series:1,3,5,7,9,11.......1119 using list
#first method-
first = 1
listf=[]
while first<=1119:
listf.append(first)
first=first+2
num=len(listf)
v1=listf[0]
v2=listf[-1]
sum=(v1+v2)*num/2
print(sum)
# second method
first = 1
total=0
listf=[]
while first<=1119:
listf.append(first)
first=first+2
for steps in listf:
total=total+steps
print(total)
# third method
# list function converts to list
# range function is used to create a range of numbers.
# here range function indicates 1 to 1121, but not including 1121.
# and the third part indicates the gap between two number.
c=list(range(1,1121,2))
total=0
for steps in c:
total=total+steps
print(total)
# fourth method
# without using list
total=0
for steps in range(1,1121,2):
total=total+steps
#or total+=steps
print(total)
#fifth method
# using while loop
total=0
j=1
while j < 1121:
total += j
j += 2
print(total)
# sixth method
# easiest method
# one line code
print(sum(range(1,1121,2)))
# sum of those values which are the multiple of 3 from a range.
total=0
for steps in range(1,10000):
if steps % 3 == 0:
total += steps
print(total)
# sum of those values which are the multiple of 3 and 5 less than 100.
total=0
for steps in range(1,100):
if steps % 3 == 0 and steps % 5 == 0:
total += steps
print(total)
# displaying a lists first value 1 time, second value 2 time, third value 3 time,....
a=["banana","apple","mango"]
for i in range(len(a)):
for j in range(i+1):
print(a[i])
# break keyword.
nums=[1,2,3,4,5]
for n in nums:
if n == 3:
print("found!")
break
print(n)
# when the conditional is true, break keyword will breaks out the loop. It will ignore the value 3.
# continue keyword
# what if we want to ignore a value but not break out of the loop completely?
nums=[1,2,3,4,5]
for n in nums:
if n == 3:
print("found!")
continue
print(n)
# continue will skip to next value of the loop.
turtle.done() | [
"ahammadshawki8@gmail.com"
] | ahammadshawki8@gmail.com |
8e342fda3a94a05c58c38e8e184d902cc8d9cd7a | c247a1979a843d03cda72229514f124a6d30f3b6 | /testproject_26580/settings.py | f3d0557da66a902ae2670ed1f3b61d54dbd0466f | [] | no_license | crowdbotics-apps/testproject-26580 | da91f9b5c5bde6ddebba9f11da0f141bbc4eb1b7 | ebc95b90a2b0621069ba4549fbc962456d9163ad | refs/heads/master | 2023-04-28T23:36:20.340815 | 2021-05-11T23:43:26 | 2021-05-11T23:43:26 | 366,539,875 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,784 | py | """
Django settings for testproject_26580 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
import logging
env = environ.Env()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
LOCAL_APPS = [
'home',
'modules',
'users.apps.UsersConfig',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'rest_auth.registration',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
'django_extensions',
'drf_yasg',
'storages',
]
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'testproject_26580.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'testproject_26580.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {
'default': env.db()
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MIDDLEWARE += ['whitenoise.middleware.WhiteNoiseMiddleware']
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static')]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "optional"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# AWS S3 config
AWS_ACCESS_KEY_ID = env.str("AWS_ACCESS_KEY_ID", "")
AWS_SECRET_ACCESS_KEY = env.str("AWS_SECRET_ACCESS_KEY", "")
AWS_STORAGE_BUCKET_NAME = env.str("AWS_STORAGE_BUCKET_NAME", "")
AWS_STORAGE_REGION = env.str("AWS_STORAGE_REGION", "")
USE_S3 = (
AWS_ACCESS_KEY_ID and
AWS_SECRET_ACCESS_KEY and
AWS_STORAGE_BUCKET_NAME and
AWS_STORAGE_REGION
)
if USE_S3:
AWS_S3_CUSTOM_DOMAIN = env.str("AWS_S3_CUSTOM_DOMAIN", "")
AWS_S3_OBJECT_PARAMETERS = {"CacheControl": "max-age=86400"}
AWS_DEFAULT_ACL = env.str("AWS_DEFAULT_ACL", "public-read")
AWS_MEDIA_LOCATION = env.str("AWS_MEDIA_LOCATION", "media")
AWS_AUTO_CREATE_BUCKET = env.bool("AWS_AUTO_CREATE_BUCKET", True)
DEFAULT_FILE_STORAGE = env.str(
"DEFAULT_FILE_STORAGE", "home.storage_backends.MediaStorage"
)
MEDIA_URL = '/mediafiles/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'mediafiles')
# Swagger settings for api docs
SWAGGER_SETTINGS = {
"DEFAULT_INFO": f"{ROOT_URLCONF}.api_info",
}
if DEBUG or not (EMAIL_HOST_USER and EMAIL_HOST_PASSWORD):
# output email to console instead of sending
if not DEBUG:
logging.warning("You should setup `SENDGRID_USERNAME` and `SENDGRID_PASSWORD` env vars to send emails.")
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
f2974a6b04cff695d832393c8e12eb61902a51b5 | 9470a1124f5e83aa6c3beb317a5e1fbacae44dea | /app/engine.py | 4d4a8cc14a1689ac404a452d2632dbd5886cc589 | [] | no_license | blakewest/music_engine | 6884d7937b45e2bdefd862f6fff2ddb13f529fdc | 1ba804d95848bd59c74e437e135b32c253eedb80 | refs/heads/master | 2020-04-08T06:38:53.834880 | 2019-01-21T22:16:54 | 2019-01-21T22:16:54 | 159,104,170 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,453 | py | import random
from music21 import midi, note, converter
from app.note_predictor import NotePredictor
from copy import deepcopy
class MusicEngine():
def __init__(self):
pass
def run(self, melody=None, filename=None, melody_track=1):
assert melody or filename, "You must either provide a Music21 Stream object, or a midi filename"
if filename:
midi_file = self.open_midi_file(filename)
stream = midi.translate.midiFileToStream(midi_file)
melody = stream.parts[melody_track]
melody = deepcopy(melody)
next_note = NotePredictor().next_note(melody)
print("Predicted next note to be:", next_note.pitch)
melody.append(next_note)
player = midi.realtime.StreamPlayer(melody)
print("New full melody is...")
[print(note.pitch, ' ', end='') for note in self.all_notes(melody)]
print("Playing the new melody")
player.play()
def all_notes(self, melody):
return melody.flat.getElementsByClass('Note')
def suggest_new_melody(self, melody):
options = self.analyze_melody(snippet)
new_note = random.sample(options, 1)[0]
print("Appending", new_note)
snippet.append(new_note)
return snippet
def analyze_melody(self, melody, strategy="same_direction"):
print("Analyzing...")
last_two_notes = list(filter(lambda n: isinstance(n, note.Note), melody))[-2:]
last_pitch = last_two_notes[-1].pitch.ps
last_movement = last_two_notes[-1].pitch.ps - last_two_notes[0].pitch.ps
return [note.Note(last_pitch + last_movement, type="whole"), note.Note(last_pitch - last_movement, type="whole")]
def open_midi_file(self, filename):
print("Opening file...")
mf = midi.MidiFile()
mf.open(filename)
mf.read()
return mf
def find_rhythmic_pattern(self, durations):
# This method should try to notice, for instance,
# that a melody typically starts on the "and of 1",
# or that it uses 16th notes, etc. Like.. it should try
# to find *themes* within the rhythms
# Prob just start with trying to find any one of those.
# STEP 1 is to play around with the data in jupyter notebook
pass
# MusicEngine().run('./all_my_loving.midi')
# ascending_melody = converter.parse("tinyNotation: 4/4 c4 d4 e4")
# MusicEngine().run(melody=ascending_melody)
| [
"bwest87@gmail.com"
] | bwest87@gmail.com |
fdac87ca8f360d05930ce17b568a31129f568dd4 | 333422c3251f00eb52a97afd6344f4dc02e2e7eb | /HiggsAnalysis/VBFHiggsToZZto2l2b/python/vbfHZZllbbElectronIdSequences_cff.py | 11757c784164eb4c25a762cd0a14264afbcd7b66 | [] | no_license | mtosi/UserCode | 640c666ae7ff5654a82d4c06caf10901db36755e | 6d9d237b2c7362d43c1a1f66672ab6b7a6d59cfe | refs/heads/master | 2021-01-20T00:58:50.417565 | 2009-08-10T12:17:46 | 2009-08-10T12:17:46 | 11,195,044 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,092 | py | import FWCore.ParameterSet.Config as cms
from RecoEgamma.ElectronIdentification.electronIdCutBasedClassesExt_cfi import *
import RecoEgamma.ElectronIdentification.electronIdCutBasedClassesExt_cfi
eidClassLoose = RecoEgamma.ElectronIdentification.electronIdCutBasedClassesExt_cfi.eidCutBasedClassesExt.clone()
eidClassLoose.src = "overlapElectronResolver"
import RecoEgamma.ElectronIdentification.electronIdCutBasedClassesExt_cfi
eidClassMedium = RecoEgamma.ElectronIdentification.electronIdCutBasedClassesExt_cfi.eidCutBasedClassesExt.clone()
eidClassMedium.electronQuality = 'medium'
eidClassMedium.src = "overlapElectronResolver"
import RecoEgamma.ElectronIdentification.electronIdCutBasedClassesExt_cfi
eidClassTight = RecoEgamma.ElectronIdentification.electronIdCutBasedClassesExt_cfi.eidCutBasedClassesExt.clone()
eidClassTight.electronQuality = 'tight'
eidClassTight.src = "overlapElectronResolver"
vbfHZZllbbElectronIdSequence = cms.Sequence( eidClassLoose
+ eidClassMedium
+ eidClassTight )
| [
""
] | |
41dc105eb07adb417e1c322ec9271ad8365af2c2 | 9a06c8ab42e0fbce88f06a1bd7237c4d5cae592a | /code/python_lesson/runoob/09摄氏度转华氏度.py | 783060706ad78ee49e750443b5c09375203fe90d | [
"MIT"
] | permissive | lxl0928/learning_python | 19040ca3ae92e5c07a1e813c707d625aa0ba8cb2 | ff0c6457186e7aa5b6ed9cafaea1dba616ce493a | refs/heads/master | 2023-05-13T13:59:28.815564 | 2021-03-22T02:08:53 | 2021-03-22T02:08:53 | 162,232,496 | 4 | 1 | MIT | 2023-05-01T20:15:55 | 2018-12-18T04:59:15 | Python | UTF-8 | Python | false | false | 342 | py | #! /usr/bin/python3
# -*- coding: utf-8 -*-
# Date: 2016.08.02
# Filename: 09.py
# Author: Timilong
# 用户输入摄氏温度
celsius = float(input("请输入摄氏温度: "))
# 计算华氏温度
fahrenheit = (celsius * 1.8) + 32
# 打印华氏温度
print("%0.1f摄氏温度转化为华氏温度为%0.1f" % (celsius, fahrenheit))
| [
"lixiaolong@sensoro.com"
] | lixiaolong@sensoro.com |
0e2787ef74f44eb12fa0c18764f39d577adee6f6 | b7b303882d9cef608471378c0654f74afb404686 | /code3 - average_sentiment.py | ed09de9f12fdbfff50378708a6c3faedb8993237 | [] | no_license | Jerrin-rajans/Stock-prediction-using-Twitter-sentiment-analysis | c86cb02cccdba8de3dc0ecd469383d03b465f45f | 15180c9d787319d6c7a5e62f337fd2d6894c4960 | refs/heads/master | 2020-07-27T14:18:25.301005 | 2017-04-13T10:06:06 | 2017-04-13T10:06:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,433 | py | import datetime
import pandas as pd
import collections
df = pd.read_csv('labeled.csv')
date_list = df.date.tolist()
dates=set(date_list)
#print(len(dates))
result=pd.DataFrame()
cnt = collections.Counter(date_list)
print(cnt)
od = collections.OrderedDict(sorted(cnt.items()))
od_close = collections.OrderedDict(sorted(cnt.items()))
#print(od) #prints dates and no of rows for each date
open_score = dict()
close_score = dict()
for k,v in od.items():
print(k,v)
open_score[k]=0
close_score[k]=0
od_close[k] = 0
next_date=[]
for i,r in df.iterrows():
date=str(r.date)
date1 = datetime.datetime.strptime(str(r.date),"%m/%d/%Y") #.strftime("%Y-%m-%d")
delta = datetime.timedelta(days = 1)
prev_date = date1-delta
time=datetime.datetime.strptime(str(r.time),'%H:%M')
time_open=datetime.datetime.strptime('09:00','%H:%M')
time_close=datetime.datetime.strptime('16:00','%H:%M')
#before open time
#opening price of current day
if time<=time_open:
#continue
if len(next_date)>0:
open_score[date] += sum(next_date)
#open_score[date] += float(r.sentiment)
od[date] += len(next_date)
od[prev_date.strftime("%#m/%#d/%Y")] -= len(next_date) #.lstrip("0%d/").replace(" 0", " ")
next_date = []
open_score[date] += float(r.sentiment)
#after close time
#opening price of next day
elif time>time_close:
next_date.append(float(r.sentiment))
#closing price of current day (9am - 4pm slot)
else:
od[date] -= 1
od_close[date] += 1
close_score[date] += float(r.sentiment)
continue
#print(od_close)
#calculating average
for k,v in od.items():
print(k,v)
open_score[k] = open_score[k]/v
if od_close[k] != 0:
close_score[k] = close_score[k]/od_close[k]
#print(k,open_score[k])
#print(k,close_score[k])
#print(len(score))
#print(score)
##df=pd.DataFrame(open_score,close_score)
##df=df.transpose()
result = pd.DataFrame()
c = 0
for k,v in od.items():
temp=pd.DataFrame({'open_score':[open_score[k]],'close_score':[close_score[k]],'date':[k]}) #'index':[c],
#c += 1
result = pd.concat([result,temp])
result = result.reset_index('date')
print(result)
#result.to_csv('Reliance_score.csv',sep=',',encoding='utf-8')
#result.to_json('Reliance_score.json')
| [
"cuthinho.crystal95@gmail.com"
] | cuthinho.crystal95@gmail.com |
f990af5381e0a8852f766dfa23da937f422cf1bd | fd3c0e1b06a7533e6b0133e59358b11ae5123e76 | /captcha_api/celery_worker.py | 4733c1f4717b4075427604194b4e4140569cbc79 | [
"Apache-2.0"
] | permissive | SonicNorg/captcha-api | 939276418b3c6244dac23412c6d7c6b993bda29c | 3be825d024fd4da96ee8f95088d451b4ffac52c0 | refs/heads/master | 2023-06-03T21:34:35.018420 | 2021-06-22T14:18:54 | 2021-06-22T14:18:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 378 | py | from celery.schedules import crontab
from .app import celery, create_app
from .tasks import delete_old_captchas
app = create_app()
@celery.on_after_configure.connect
def setup_periodic_tasks(sender, **kwargs):
# Executes every hour the delete old captchas task
sender.add_periodic_task(
crontab(minute=0, hour="*/1"),
delete_old_captchas.s(),
)
| [
"cristian.schuszter@cern.ch"
] | cristian.schuszter@cern.ch |
1337f2878c504d9d15a39baca3d7e460d62f6bc4 | c422cfdcd0303395b62a383611dca19236ea0e15 | /core/migrations/0009_diaperchange_amount.py | 991260de5bff332950e762549154f4f031abc2fc | [
"BSD-2-Clause-Views",
"BSD-2-Clause"
] | permissive | Alan01252/babybuddy | c18d26769458fbfd60d7e5493c1fab911d624ddd | 5382527dc84530fe56a65c7452620bba41bfd668 | refs/heads/master | 2022-12-18T17:09:21.064011 | 2020-09-16T11:33:07 | 2020-09-16T11:33:07 | 291,678,434 | 1 | 0 | BSD-2-Clause | 2020-08-31T09:57:07 | 2020-08-31T09:57:06 | null | UTF-8 | Python | false | false | 419 | py | # Generated by Django 3.0.2 on 2020-01-26 21:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0008_auto_20190607_1422'),
]
operations = [
migrations.AddField(
model_name='diaperchange',
name='amount',
field=models.FloatField(blank=True, null=True, verbose_name='Amount'),
),
]
| [
"chris@chrxs.net"
] | chris@chrxs.net |
d289d25acaf78e7bb51c689c1de4b4495a3bbd9a | 244ecfc2017a48c70b74556be8c188e7a4815848 | /res/scripts/client/gui/scaleform/daapi/view/lobby/fortifications/fortdisabledefenceperiodwindow.py | d81f15c0ee94a51d408d7c2853b5cbd29a9df04e | [] | no_license | webiumsk/WOT-0.9.12 | c1e1259411ba1e6c7b02cd6408b731419d3174e5 | 5be5fd9186f335e7bae88c9761c378ff5fbf5351 | refs/heads/master | 2021-01-10T01:38:36.523788 | 2015-11-18T11:33:37 | 2015-11-18T11:33:37 | 46,414,438 | 1 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 4,109 | py | # 2015.11.18 11:54:00 Střední Evropa (běžný čas)
# Embedded file name: scripts/client/gui/Scaleform/daapi/view/lobby/fortifications/FortDisableDefencePeriodWindow.py
import BigWorld
from adisp import process
from gui import SystemMessages
from gui.Scaleform.daapi.view.lobby.fortifications.fort_utils.FortSoundController import g_fortSoundController
from gui.Scaleform.daapi.view.lobby.fortifications.fort_utils.FortViewHelper import FortViewHelper
from gui.Scaleform.daapi.view.meta.FortDisableDefencePeriodWindowMeta import FortDisableDefencePeriodWindowMeta
from gui.Scaleform.locale.FORTIFICATIONS import FORTIFICATIONS as ALIAS, FORTIFICATIONS
from gui.Scaleform.locale.SYSTEM_MESSAGES import SYSTEM_MESSAGES
from gui.shared.formatters import text_styles
from gui.shared.fortifications.context import DefencePeriodCtx
from helpers import i18n
class FortDisableDefencePeriodWindow(FortDisableDefencePeriodWindowMeta, FortViewHelper):
def __init__(self, _ = None):
super(FortDisableDefencePeriodWindow, self).__init__()
self.__inputChecker = None
self.__controlNumber = self.fortCtrl.getFort().getTotalDefRes()
return
def initInputChecker(self):
self.__inputChecker.errorMsg = self.__makeInputCheckerError()
self.__inputChecker.questionTitle = self.__makeInputCheckerTitle()
self.__inputChecker.questionBody = self.__makeInputCheckerBody()
self.__inputChecker.setControlNumbers(self.__controlNumber, BigWorld.wg_getIntegralFormat)
def onWindowClose(self):
self.destroy()
def onClickApplyButton(self):
self.__setup()
def onDefenceHourShutdown(self):
if self.fortCtrl.getFort().isDefenceHourShutDown():
self.destroy()
def onShutdownDowngrade(self):
self.destroy()
def _onRegisterFlashComponent(self, viewPy, alias):
self.__inputChecker = viewPy
self.initInputChecker()
def _populate(self):
super(FortDisableDefencePeriodWindow, self)._populate()
self.startFortListening()
if self.fortCtrl.getFort().isDefenceHourShutDown():
return self.destroy()
self.__makeMainData()
def _dispose(self):
self.__inputChecker = None
self.stopFortListening()
super(FortDisableDefencePeriodWindow, self)._dispose()
return
def __makeInputCheckerError(self):
return text_styles.error(i18n.makeString(ALIAS.DEMOUNTBUILDING_ERRORMESSAGE))
def __makeInputCheckerTitle(self):
return text_styles.middleTitle(i18n.makeString(ALIAS.DISABLEDEFENCEPERIODWINDOW_INPUTCHECKER_TITLE))
def __makeInputCheckerBody(self):
controlNumber = BigWorld.wg_getIntegralFormat(self.__controlNumber)
controlNumber = text_styles.middleTitle(str(controlNumber))
questionBody = text_styles.standard(i18n.makeString(ALIAS.DISABLEDEFENCEPERIODWINDOW_INPUTCHECKER_BODY, controlNumber=controlNumber))
return questionBody
def __makeMainData(self):
titleText = text_styles.main(i18n.makeString(FORTIFICATIONS.DISABLEDEFENCEPERIODWINDOW_MAINTEXT_TITLE))
redText = text_styles.error(i18n.makeString(FORTIFICATIONS.DISABLEDEFENCEPERIODWINDOW_MAINTEXT_BODYREDTEXT))
bodyText = text_styles.main(i18n.makeString(FORTIFICATIONS.DISABLEDEFENCEPERIODWINDOW_MAINTEXT_BODY, redText=redText))
self.as_setDataS({'titleText': titleText,
'bodyText': bodyText})
@process
def __setup(self):
result = yield self.fortProvider.sendRequest(DefencePeriodCtx(waitingID='fort/settings'))
if result:
g_fortSoundController.playDefencePeriodDeactivated()
SystemMessages.g_instance.pushI18nMessage(SYSTEM_MESSAGES.FORTIFICATION_DEFENCEHOURDEACTIVATED, type=SystemMessages.SM_TYPE.Warning)
self.destroy()
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\client\gui\scaleform\daapi\view\lobby\fortifications\fortdisabledefenceperiodwindow.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2015.11.18 11:54:00 Střední Evropa (běžný čas)
| [
"info@webium.sk"
] | info@webium.sk |
1d6d89dd402a1014ea003cc594770dd2a2538c49 | 6203b9132af8f78c6cb12242bd223fa17d14f31e | /leetcode/problems/556.py | 82b5c6ed99fea3e9b4e3d051b24c25cb28c78248 | [] | no_license | joshuap233/algorithms | 82c608d7493b0d21989b287a2e246ef739e60443 | dc68b883362f3ddcfb433d3d83d1bbf925bbcf02 | refs/heads/master | 2023-08-23T12:44:42.675137 | 2021-09-28T02:37:01 | 2021-09-28T02:37:01 | 230,285,450 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 686 | py | # https://leetcode-cn.com/problems/next-greater-element-iii/
# 556. 下一个更大元素 III
class Solution:
"""与下一个字典序的写法一样"""
MAXI = 2 ** 31 - 1
def nextGreaterElement(self, n: int) -> int:
if n <= 9:
return -1
s = list(str(n))
for i in range(len(s) - 2, -1, -1):
if s[i] < s[i + 1]:
break
else:
return -1
for j in range(len(s) - 1, i, -1):
if s[j] > s[i]:
break
s[i], s[j] = s[j], s[i]
s[i + 1:] = s[len(s) - 1:i:-1] # 逆序
ret = int(''.join(s))
return ret if ret <= self.MAXI else -1
| [
"shushugo233@gmail.com"
] | shushugo233@gmail.com |
39e6ef5ba97cd9e38d0837feeffdeb1c622ab405 | 81050b57d0b880b6cd4861a3b6a6c1899bb781a8 | /examples/AssB_Skeleton.py | c32e58bed8a4a1917a880f4fd653852c81c0d8bc | [] | no_license | raj0974722/energy-in-the-built-environment-assignment-b | 6c4c739133f2fb0ca71fbccedcc38eac8d141d5b | 34592a75669550723eefec88cb2671d0144f1f5f | refs/heads/main | 2023-09-06T06:13:10.978647 | 2021-10-13T10:36:16 | 2021-10-13T10:36:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,895 | py | # -*- coding: utf-8 -*-
"""
% Energy in the Built Environment
% Assignment 2: Optimal Home Energy Management
% Dr. Tarek AlSkaif
"""
import gurobipy as gp
import csv
import pandas as pd #for csv reading
import numpy as np
import matplotlib.pyplot as plt #for plotting
"""
Import your input data for the model
"""
# dynamic electricity prices vector
#household's 15-min PV generation vector
#household's 15-min demand vector
"""
Parameters value
"""
######## Time-step
Delta_t = 0.25 # 15 minute (0.25 hour) intervals
T=24*3*1/Delta_t #number of time-slots (in three days)
######## Limits on grid and max, min, and initial SOC
Pgridmax = 3 #[kW]
Pbatmax = 4 #[kW]
SoC_min = 0.2 #[-] (battery min state of charge)
SoC_max = 1 #[-] (battery max state of charge)
SoC0 = 0.5 #[-] (initial battery state of charge at the beginning of the day)
C_bat = 13.5 #battery capacity parameter for a Tesla Powerwall rated at 13,5 [kWh]
eff_dis = 0.94 #battery discharging efficeicny
eff_ch = 0.94 #battery charging efficeicny
######## Plot power demand and PV generation data
f1 = plt.figure(1)
"""
Step 1: Create a model
"""
"""
Step 2: Define variables
"""
######## Define your decision variables for the time horizon using addVars
"""
Step 3: Add constraints
"""
######## Nonnegative variables
######## Power balance formula
######## Battery SoC dynamics constraint
######## SoC constraints
######## Power boundaries
"""
Step 4: Set objective function
"""
"""
Step 5: Solve model
"""
"""
Step 6: Print variables values for optimal solution
"""
######## Get the values of the decision variables
"""
Step 7: Plot optimal power output from each generator
"""
######## Plot results
f2 = plt.figure(2)
| [
"r.h.vaneldik@students.uu.nl"
] | r.h.vaneldik@students.uu.nl |
5c7543cc47d1040815f40e4fe51c0f025f62db7b | bf08aa7c8bf352c1349a5192f3a2beb4e124d67e | /modules/Commander.py | e7e155f79eaa2b763b14eea774fcf4e72bb5c893 | [] | no_license | kubovy/raspi-project | b6e6cf5fe85c416ef0c880aebdefcc4ed5851b17 | 3a5d7407a64616b72409fb715f6e6618d3151257 | refs/heads/master | 2020-04-12T06:57:21.324126 | 2019-09-07T22:53:48 | 2019-09-07T23:17:21 | 162,352,615 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,150 | py | #!/usr/bin/python2
# -*- coding:utf-8 -*-
#
# Author: Jan Kubovy (jan@kubovy.eu)
#
import prctl
import subprocess
import traceback
from threading import *
from lib.Module import Module
class Check(object):
def __init__(self, command, interval=10):
self.command = command
self.interval = interval
class Commander(Module):
"""Commander module"""
module_mqtt = None
__timer_map = {}
__last_values = {}
def __init__(self, checks=None, debug=False):
super(Commander, self).__init__(debug=debug)
for check in [] if checks is None else checks:
self.__enqueue(check)
def on_mqtt_message(self, path, payload):
if len(path) > 0: # {service}/control/commander
if len(path) == 1 and path[0] == "shutdown": # {service}/control/commander/shutdown
subprocess.call(["shutdown", "now"])
elif len(path) == 1 and path[0] == "restart": # {service}/control/commander/restart
subprocess.call(["reboot"])
else:
try:
result = subprocess.Popen('/usr/local/bin/mqtt-cli ' + path.join(" ") + ' ' + payload,
stdout=subprocess.PIPE,
shell=True).communicate()[0].strip()
self.__process_result(result)
except:
self.logger.error("Unexpected Error!")
traceback.print_exc()
def finalize(self):
super(Commander, self).finalize()
for key in self.__timer_map.keys():
self.logger.debug("Timer " + key + " = " + str(self.__timer_map[key]))
if self.__timer_map[key] is not None:
self.__timer_map[key].cancel()
def __enqueue(self, check):
timer = Timer(check.interval, self.__trigger, [check])
self.__timer_map[check.command] = timer
timer.daemon = True
timer.start()
def __trigger(self, check):
prctl.set_name(Commander.__name__)
try:
result = subprocess.Popen('/usr/local/bin/mqtt-cli ' + check.command,
stdout=subprocess.PIPE,
shell=True).communicate()[0].strip()
self.__process_result(result)
except:
self.logger.error("Unexpected Error!")
traceback.print_exc()
if not self.finalizing:
self.__enqueue(check)
def __process_result(self, result):
if result is not None and result != '':
for line in result.splitlines():
try:
parts = line.split(":", 1)
if parts[0] not in self.__last_values.keys() or parts[1] != self.__last_values[parts[0]]:
self.__last_values[parts[0]] = parts[1]
if self.module_mqtt is not None:
self.module_mqtt.publish(parts[0], parts[1], module=self)
except:
self.logger.error("Unexpected Error!")
traceback.print_exc()
| [
"jan@kubovy.eu"
] | jan@kubovy.eu |
13f9e87c63f89dc4365e47dafc87aa91bc474392 | 9a9b627dd5e01c8c2f83c253fcc1d9d1622b8103 | /gcn/utils.py | df2f00e0469f9d8cf0f5246ed32effbf7cc80155 | [
"MIT"
] | permissive | discovershu/GCN_1 | bc25268b78a182ccfc70552c9403c66d1982309a | d8b6206790d608999e92243249f92770f1542076 | refs/heads/master | 2020-04-11T09:03:34.226267 | 2018-12-13T16:03:59 | 2018-12-13T16:03:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,546 | py | import numpy as np
import pickle as pkl
import networkx as nx
import scipy.sparse as sp
from scipy.sparse.linalg.eigen.arpack import eigsh
import sys
def parse_index_file(filename):
"""Parse index file."""
index = []
for line in open(filename):
index.append(int(line.strip()))
return index
def sample_mask(idx, l):
"""Create mask."""
mask = np.zeros(l)
mask[idx] = 1
return np.array(mask, dtype=np.bool)
def load_data(dataset_str):
"""
Loads input data from gcn/data directory
ind.dataset_str.x => the feature vectors of the training instances as scipy.sparse.csr.csr_matrix object;
ind.dataset_str.tx => the feature vectors of the test instances as scipy.sparse.csr.csr_matrix object;
ind.dataset_str.allx => the feature vectors of both labeled and unlabeled training instances
(a superset of ind.dataset_str.x) as scipy.sparse.csr.csr_matrix object;
ind.dataset_str.y => the one-hot labels of the labeled training instances as numpy.ndarray object;
ind.dataset_str.ty => the one-hot labels of the test instances as numpy.ndarray object;
ind.dataset_str.ally => the labels for instances in ind.dataset_str.allx as numpy.ndarray object;
ind.dataset_str.graph => a dict in the format {index: [index_of_neighbor_nodes]} as collections.defaultdict
object;
ind.dataset_str.test.index => the indices of test instances in graph, for the inductive setting as list object.
All objects above must be saved using python pickle module.
:param dataset_str: Dataset name
:return: All data input files loaded (as well the training/test data).
"""
names = ['x', 'y', 'tx', 'ty', 'allx', 'ally', 'graph']
objects = []
for i in range(len(names)):
with open("data/ind.{}.{}".format(dataset_str, names[i]), 'rb') as f:
if sys.version_info > (3, 0):
objects.append(pkl.load(f, encoding='latin1'))
else:
objects.append(pkl.load(f))
x, y, tx, ty, allx, ally, graph = tuple(objects)
test_idx_reorder = parse_index_file("data/ind.{}.test.index".format(dataset_str))
test_idx_range = np.sort(test_idx_reorder)
if dataset_str == 'citeseer':
# Fix citeseer dataset (there are some isolated nodes in the graph)
# Find isolated nodes, add them as zero-vecs into the right position
test_idx_range_full = range(min(test_idx_reorder), max(test_idx_reorder)+1)
tx_extended = sp.lil_matrix((len(test_idx_range_full), x.shape[1]))
tx_extended[test_idx_range-min(test_idx_range), :] = tx
tx = tx_extended
ty_extended = np.zeros((len(test_idx_range_full), y.shape[1]))
ty_extended[test_idx_range-min(test_idx_range), :] = ty
ty = ty_extended
features = sp.vstack((allx, tx)).tolil()
features[test_idx_reorder, :] = features[test_idx_range, :]
adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph))
labels = np.vstack((ally, ty))
labels[test_idx_reorder, :] = labels[test_idx_range, :]
idx_test = test_idx_range.tolist()
idx_train = range(len(y))
idx_val = range(len(y), len(y)+500)
train_mask = sample_mask(idx_train, labels.shape[0])
val_mask = sample_mask(idx_val, labels.shape[0])
test_mask = sample_mask(idx_test, labels.shape[0])
y_train = np.zeros(labels.shape)
y_val = np.zeros(labels.shape)
y_test = np.zeros(labels.shape)
y_train[train_mask, :] = labels[train_mask, :]
y_val[val_mask, :] = labels[val_mask, :]
y_test[test_mask, :] = labels[test_mask, :]
return adj, features, y_train, y_val, y_test, train_mask, val_mask, test_mask
def sparse_to_tuple(sparse_mx):
"""Convert sparse matrix to tuple representation."""
def to_tuple(mx):
if not sp.isspmatrix_coo(mx):
mx = mx.tocoo()
coords = np.vstack((mx.row, mx.col)).transpose()
values = mx.data
shape = mx.shape
return coords, values, shape
if isinstance(sparse_mx, list):
for i in range(len(sparse_mx)):
sparse_mx[i] = to_tuple(sparse_mx[i])
else:
sparse_mx = to_tuple(sparse_mx)
return sparse_mx
def preprocess_features(features):
"""Row-normalize feature matrix and convert to tuple representation"""
rowsum = np.array(features.sum(1))
r_inv = np.power(rowsum, -1).flatten()
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = sp.diags(r_inv)
features = r_mat_inv.dot(features)
return sparse_to_tuple(features)
def normalize_adj(adj):
"""Symmetrically normalize adjacency matrix."""
adj = sp.coo_matrix(adj)
rowsum = np.array(adj.sum(1))
d_inv_sqrt = np.power(rowsum, -0.5).flatten()
d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.
d_mat_inv_sqrt = sp.diags(d_inv_sqrt)
return adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo()
def preprocess_adj(adj):
"""Preprocessing of adjacency matrix for simple GCN model and conversion to tuple representation."""
adj_normalized = normalize_adj(adj + sp.eye(adj.shape[0]))
return sparse_to_tuple(adj_normalized)
def construct_feed_dict(features, support, labels, labels_mask, placeholders):
"""Construct feed dictionary."""
feed_dict = dict()
feed_dict.update({placeholders['labels']: labels})
feed_dict.update({placeholders['labels_mask']: labels_mask})
feed_dict.update({placeholders['features']: features})
feed_dict.update({placeholders['support'][i]: support[i] for i in range(len(support))})
feed_dict.update({placeholders['num_features_nonzero']: features[1].shape})
return feed_dict
def chebyshev_polynomials(adj, k):
"""Calculate Chebyshev polynomials up to order k. Return a list of sparse matrices (tuple representation)."""
print("Calculating Chebyshev polynomials up to order {}...".format(k))
adj_normalized = normalize_adj(adj)
laplacian = sp.eye(adj.shape[0]) - adj_normalized
largest_eigval, _ = eigsh(laplacian, 1, which='LM')
scaled_laplacian = (2. / largest_eigval[0]) * laplacian - sp.eye(adj.shape[0])
t_k = list()
t_k.append(sp.eye(adj.shape[0]))
t_k.append(scaled_laplacian)
def chebyshev_recurrence(t_k_minus_one, t_k_minus_two, scaled_lap):
s_lap = sp.csr_matrix(scaled_lap, copy=True)
return 2 * s_lap.dot(t_k_minus_one) - t_k_minus_two
for i in range(2, k+1):
t_k.append(chebyshev_recurrence(t_k[-1], t_k[-2], scaled_laplacian))
return sparse_to_tuple(t_k)
if __name__ == '__main__':
load_data('cora') | [
"43221966+zxj32@users.noreply.github.com"
] | 43221966+zxj32@users.noreply.github.com |
eaf7eb54279dd2c9b56de4229d8ca84d8371f765 | d4d22d272aa0b3d820eb53e74c756e1a283a1737 | /projects/01_fyyur/starter_code/migrations/versions/679779ce0d82_.py | 875f2af9730d2cff0b6aa1a41326bb05ea4821ef | [] | no_license | samadarshad/FSND | 4df5dad6ec78a0cfeb518c0f7525bfd006ff7f62 | 09fb9c5988b968fbf9b1cf9efc403768c73d5213 | refs/heads/master | 2023-02-05T09:00:09.240158 | 2020-12-28T13:17:10 | 2020-12-28T13:17:10 | 311,561,423 | 0 | 0 | null | 2020-12-18T09:23:50 | 2020-11-10T06:02:39 | Python | UTF-8 | Python | false | false | 1,887 | py | """empty message
Revision ID: 679779ce0d82
Revises: 508072839f98
Create Date: 2020-11-11 15:58:27.099438
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '679779ce0d82'
down_revision = '508072839f98'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('Show',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('start_time', sa.DateTime(), nullable=False),
sa.Column('artist_id', sa.Integer(), nullable=False),
sa.Column('venue_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['artist_id'], ['Artist.id'], ),
sa.ForeignKeyConstraint(['venue_id'], ['Venue.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.add_column('Artist', sa.Column('seeking_description', sa.String(), nullable=True))
op.add_column('Artist', sa.Column('seeking_venue', sa.Boolean(), nullable=True))
op.add_column('Artist', sa.Column('website', sa.String(), nullable=True))
op.add_column('Venue', sa.Column('genres', sa.ARRAY(sa.String(length=120)), nullable=True))
op.add_column('Venue', sa.Column('seeking_description', sa.String(), nullable=True))
op.add_column('Venue', sa.Column('seeking_talent', sa.Boolean(), nullable=True))
op.add_column('Venue', sa.Column('website', sa.String(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('Venue', 'website')
op.drop_column('Venue', 'seeking_talent')
op.drop_column('Venue', 'seeking_description')
op.drop_column('Venue', 'genres')
op.drop_column('Artist', 'website')
op.drop_column('Artist', 'seeking_venue')
op.drop_column('Artist', 'seeking_description')
op.drop_table('Show')
# ### end Alembic commands ###
| [
"samadarshad@hotmail.co.uk"
] | samadarshad@hotmail.co.uk |
54cfe6d8e5e89f6a84eb6e65c60a0509c7eafed2 | a05aad4430cdc75c4dd86cd3388a984fe0689841 | /Find_lines_start_with.py | 1eba2740778f05cf48aa8028fbaa7fd08afdea86 | [] | no_license | FDSGAB/Python-Programs | e3901a486f438bfdcffdd1e621368c7353a547d6 | 322721f33ef7bb326afae6f142a54a507b602eea | refs/heads/master | 2022-12-08T02:08:54.685932 | 2020-08-19T16:01:58 | 2020-08-19T16:01:58 | 288,767,413 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 259 | py | fname = input("Enter file name: ")
fh = open(fname)
count = 0
for line in fh:
if line.startswith('From '):
x=line.split()
print(x[1])
count=count+1
print("There were", count, "lines in the file with From as the first word") | [
"noreply@github.com"
] | FDSGAB.noreply@github.com |
22229c7fae45e05cb97e02b3869b3bd63233ea24 | d7ccacb98b2ec24753c1a6b726f1d2a0363d30e4 | /conduit/apps/authentication/views.py | f28330f21f877929629f86e20b703b8b8575ea94 | [] | no_license | srdavicho/conduit-django | 8dc625903aeac2f18ddd6fdc4badcbe8abdf37ed | f66f955eb4eb0e77196d2a37da35c7072b79727e | refs/heads/master | 2021-01-19T09:05:22.915104 | 2017-04-25T10:45:59 | 2017-04-25T10:45:59 | 87,721,346 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,141 | py |
from rest_framework import status
from rest_framework.generics import RetrieveUpdateAPIView
from rest_framework.permissions import AllowAny, IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from .renderers import UserJSONRenderer
from .serializers import (
LoginSerializer, RegistrationSerializer, UserSerializer,
)
class UserRetrieveUpdateAPIView(RetrieveUpdateAPIView):
permission_classes = (IsAuthenticated,)
renderer_classes = (UserJSONRenderer,)
serializer_class = UserSerializer
def retrieve(self, request, *args, **kwargs):
# There is nothing to validate or save here. Instead, we just want the
# serializer to handle turning our `User` object into something that
# can be JSONified and sent to the client.
serializer = self.serializer_class(request.user)
return Response(serializer.data, status=status.HTTP_200_OK)
def update(self, request, *args, **kwargs):
user_data = request.data.get('user',{})
serializer_data = {
'username': user_data.get('username', request.user.username),
'email': user_data.get('email', request.user.email),
'profile':{
'bio': user_data.get('bio', request.user.profile.bio),
'image': user_data.get('image', request.user.profile.image)
}
}
# Here is that serialize, validate, save pattern we talked about
# before.
serializer = self.serializer_class(
request.user, data=serializer_data, partial=True
)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK)
class LoginAPIView(APIView):
permission_classes = (AllowAny,)
renderer_classes = (UserJSONRenderer,)
serializer_class = LoginSerializer
def post(self, request):
user = request.data.get('user', {})
# Notice here that we do not call `serializer.save()` like we did for
# the registration endpoint. This is because we don't actually have
# anything to save. Instead, the `validate` method on our serializer
# handles everything we need.
serializer = self.serializer_class(data=user)
serializer.is_valid(raise_exception=True)
return Response(serializer.data, status=status.HTTP_200_OK)
class RegistrationAPIView(APIView):
# Allow any user (authenticated or not) to hit this endpoint.
permission_classes = (AllowAny,)
renderer_classes = (UserJSONRenderer,)
serializer_class = RegistrationSerializer
def post(self, request):
user = request.data.get('user', {})
# The create serializer, validate serializer, save serializer pattern
# below is common and you will see it a lot throughout this course and
# your own work later on. Get familiar with it.
serializer = self.serializer_class(data=user)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
| [
"david.gaudreault@gmail.com"
] | david.gaudreault@gmail.com |
99ccf909e1b7071804da551122f2a3d7c85bb020 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/192/usersdata/273/70732/submittedfiles/al6.py | 62617a79d4eba687c0a500c294d12922ab0a48f2 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 185 | py | # -*- coding: utf-8 -*-
i= 2
c= 0
n= int(input('digite o valor de n: '))
while(i<n):
if (n%i)==0:
c=c+1
print(i)
i=i+1
if i==0
print(' primo')
if i>0
print('NAO PRIMO')) | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
eab8438590ab388eb4e5179491ddaa4908016ce1 | 108ace57959804d4b7345827ccb19fb7033cb131 | /dict.py | ff3908bf4db51390753d40fd4d26846ccdb433dd | [] | no_license | RajeshReddy91/Notes | 35aa100a7817cbd260ce5d8ca532f7e1636b8320 | 14c248e8401e3c5adf29b4ec19514e931e10d8e0 | refs/heads/master | 2022-12-08T03:06:55.376107 | 2022-11-29T04:09:28 | 2022-11-29T04:09:28 | 232,319,878 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,425 | py | """
sorted() method sorts the given sequence either in ascending order or in descending order and always return
the a sorted list. This method does not effect the original sequence.
sort() function is very similar to sorted() but unlike sorted it returns nothing and makes changes to the
original sequence. Moreover, sort() is a method of list class and can only be used with lists.
"""
# Dup keys are not allowed, but dup values are allowed
# If we try to add key-value where this key is already present, then old value will be replaced with new value.
# order is not applicable and all elements will be inserted based on hash of keys.
# hence indexing and slicing are not applicable
# keys and values can be heterogeneous objects. ie. dict = {100:'rjesh', reddy:'lname'}
# dict is mutable
"""
1.dict()
2.len()
3.clear()
4.get()
5.pop()
6.popitem()
7.keys()
8.values()
9.items()
10.setdefault()
11.update()
12.copy()
"""
#***** Dict comprehension *************
# we can create dict in following ways
#1
d={}
print(type(d))
print(d)
#2
e = dict()
print(type(e))
print(e)
# adding key-value pairs
d={}
d['fname'] = 'rajesh'
d['lname'] = 'reddy'
d['addr'] = 'karnataka'
d['fname'] = 'raj' # if we enter dup key then value will be updated with new value
print(d)
d = {'fanme' : 'rajesh',
'lanme' : 'reddy',
'578975989' : 'phone_num'}
print (d)
# How to access data from dict? by using 'key'
d = {'fname' : 'rajesh',
'lname' : 'reddy',
'578975989' : 'phone_num'}
print(d['fname'])
# How to access data from dict? by using 'key'
d = {'fname' : 'rajesh',
'lname' : 'reddy',
578975989 : 'phone_num'}
# print (d['fname'])
key = eval(input('enter any key : ')) # if we use int for type conversion or just input then we will get error
if key in d:
print (d[key])
else:
print ('specified key is not present in dict')
# How to add key-values to empty dict?
mlist = {}
while True:
name = input('enter name os stdnt :')
marks = int(input('enter stdnt marks : '))
mlist[name] = marks
print('marks entered succesfuly')
option = input('do u want add another entery?[Yes|No]')
if option.lower()=='no':
break
print('these are the marks entered')
print(mlist)
# OR - when u enter valid option yes|no
mlist = {}
while True:
name = input('enter name os stdnt :')
marks = int(input('enter stdnt marks : '))
mlist[name] = marks
print('marks entered succesfuly')
option = input('do u want add another entery?[Yes|No]')
while True:
if option.lower()=='no':
option='no'
break
elif option.lower()=='yes':
option='yes'
break
else:
option=input('plz enter valid i/p [yes|no]:')
if option=='no':
break
print('these are the marks entered')
print(mlist)
# simlified
mlist = {}
while True:
name = input('enter name os stdnt :')
marks = int(input('enter stdnt marks : '))
mlist[name] = marks
print('marks entered succesfuly')
option = input('do u want add another entery?[Yes|No]')
while True:
if option.lower() in ('yes','no'):
break
else:
option=input('plz enter valid i/p [yes|no]:')
if option=='no':
break
print('these are the marks entered')
print(mlist)
# more simplified
mlist = {}
while True:
name = input('enter name os stdnt :')
marks = int(input('enter stdnt marks : '))
mlist[name] = marks
print('marks entered succesfuly')
option = input('do u want add another entery?[Yes|No]')
while option.lower() not in ('yes','no'):
option=input('plz enter valid i/p [yes|no]:')
if option=='no':
break
print('these are the marks entered')
for x in mlist:
print('{}\t\t{}'.format(x,mlist[x]))
# insert and update in dict
d = {100 : 'raj', 200 : 'reddy', 300 : 'navalli'}
print(d)
d[400]='xyz'
print(d)
d[100] = 'rajesh'
print(d)
# to delete
del d[100] # we can also use d.pop[key] - after executing it will display deleted key
# delete arbitrary item and display
d.popitem()
# to get ascii value of alphabets and delete
# del and pop u can use only when u know key, popitem() can be used without knowing key
i=0
d={}
while i<26:
d[chr(65+i)] = 65+i
i+=1
print (d)
while len(d) != 0:
print('processing item:',d.popitem())
print('now d is empty',d)
# to delete whole dict
d.clear() #o/p {}
# if we use "del d" then whole dict will be deleted.
del d
############ important methods and functions related to dict
d=dict()
print(d) # will print empty dict
e=dict({100:'a', 200:'b', 300:'c'})
print(e)
# using list of tuples
f=dict([(1,'a'),(2,'b'),(3,'c')])
print(f)
# using tuples of tuples
g=dict([(1,'a'),(2,'b'),(3,'c')])
print(g)
# using list of list
h=dict([[1,'a'],[2,'b'],[3,'c']])
print(h)
# len(d) - returns number of items (key-value pairs) in the dict
# to get values associated with keys
# d[key] - will throw error if key is not present
# or d.get(key) - will return "None" if key is not present
# if key is not avaiable then print return NA
d = {1:'a', 2:'b', 3:'c', 4:'c'}
print(d.get(1, 'NA'))
print(d.get(5, 'NA')) # if key 1 is not avaiable then it will return NA
# interview question - get only keys and sort it
# k = d.keys() # k will hold all the keys which are in dict d, k will be of type dict_keys
# will get A-Z with their ascii values
i=0
d={}
while i<26:
d[chr(65+i)] = 65+i
i+=1
for k in d.keys(): # interview answer
print(sorted(k))
"""
The primary difference between the list sort() function and the sorted() function is that the sort() function
will modify the list it is called on. The sorted() function will create a new list containing a sorted version
of the list it is given. The sorted() function will not modify the list passed as a parameter. If you want to
sort a list but still have the original unsorted version, then you would use the sorted() function.
If maintaining the original order of the list is unimportant, then you can call the sort() function on the list.
A second important difference is that the sorted() function will return a list so you must assign the returned
data to a new variable. The sort() function modifies the list in-place and has no return value.
"""
d.keys()
d.values() # will give u all values in the dict and d will be of type dict_values
d.items() # will return key-value pairs, return type will be dict_items in the form of list of tuples
# to access both keys and values
d = {1:'a', 2:'b', 3:'c', 4:'c'}
for k, v in d.items():
print('{}:{}'.format(k,v))
# if the specified key is available, then it will display corresponding value,
# if the specified key is not available, then provided key-value pair will be added as a new item to the dict
d = {1:'a', 2:'b', 3:'c', 4:'c'}
print(d.setdefault(1,'d')) #if key=1 is there then it will display its value,
print(d)
print(d.setdefault(5,'d')) # if its not there then key=5 with value=d will be added
print(d)
# add all the items present in d2 into d1
d1 = {1:'a', 2:'b', 3:'c', 4:'c'}
d2 = {5:'d', 6:'e', 7:'f', 8:'g'}
print(d1)
d1.update(d2)
print(d1)
# if key present in d2 is already there in d1 then value of that key in d1 will be updated with key=value present in d2
# update will try to replicate whatever is present in d2 into d1
d1 = {1:'a', 2:'b', 3:'c', 4:'c'}
d2 = {4:'d', 6:'e', 7:'f', 8:'g'}
print(d1)
d1.update(d2)
print(d1)
# d2 = d1 ==> duplicate reference variable and aliasing
# d2 = d1.copy() ==> duplicate object and cloning
d1 = {1:'a', 2:'b', 3:'c', 4:'c'}
d2=d1.copy()
print(d1) # both d1 and d2 will have same values but memory allocation will be different
print(d2)
print(id(d1))
print(id(d2))
print(d1 is d2)
d2=d1
print(d1 is d2) # True
print(id(d1)) # both d1 and d2 will have same memory allocation
print(id(d2))
d1[1]='d' # updating key value in d1
print(d1)
print(d2) # same will be reflected in d2
#wap to find number of occurrence of vowels in a given word
#************* Dict comprehension *************
# {1:1, 2:4, 3:9, 4:16. 5:25}
# d = { key:value for x in range(1, 6) | if condition}
d = { x:x*x for x in range(1,6) }
print(d)
alpha = { x:chr(64+x) for x in range(1,26) } # print alphabets
print(alpha)
#######################################
# wap to enter stdnt name and marks, and display marks by taking stdnts name as input
n = int(input('plz enter number of students:'))
d={}
for i in range(n):
name = input('enter stdnt name:')
marks = int(input('enter marks: '))
d[name] = marks
print('all stdnts data entered')
while True:
name = input('enter stdnt name to get marks : ')
if name in d:
print('marks obtained by {} is : {}'.format(name, d[name]))
else:
print('invalid name, plz enter correct name')
option = input('Do you want to get marks of another student?[yes|no]')
while option.lower() not in ['yes','no']:
option = input('plz enter valid option[yes,no]: ')
if option.lower() == 'no':
break
print('thanks for using our application')
| [
"noreply@github.com"
] | RajeshReddy91.noreply@github.com |
6f0c2d03064d88a0781bb717e46c3b691dc715b3 | c3f33a6db6f6cd4bcb6a7eb799d005908954ebed | /magic_methods/tests/test_unary_operators.py | 2c79d938e3dd180d375fcdc35ff5d7aefbe6f89b | [] | no_license | technolingo/highpy | b7feffdd6ffb6be52629be751d53af75a55b208b | c3fd3a1dfeb904a05f174939c4f444288b567807 | refs/heads/master | 2022-10-14T14:51:33.389922 | 2021-01-10T17:30:51 | 2021-01-10T17:30:51 | 164,992,939 | 0 | 0 | null | 2022-09-30T19:21:42 | 2019-01-10T05:00:53 | Python | UTF-8 | Python | false | false | 926 | py | from ..unary_operators import Number as N
def test_negative():
a = -N(5)
assert isinstance(a, N)
assert a.number == -5
def test_positive():
a = +N(-5)
assert isinstance(a, N)
assert a.number == 5
def test_absolute():
a = abs(N(-5))
assert isinstance(a, N)
assert a.number == 5
def test_bitwise_invert():
a = ~N(5)
assert isinstance(a, N)
assert a.number == -6
def test_complex():
a = complex(N(5))
assert isinstance(a, complex)
assert a == (5 + 0j)
def test_int():
a = int(N(5.1))
assert isinstance(a, int)
assert a == 5
def test_float():
a = float(N(5))
assert isinstance(a, float)
assert a == 5.0
# === Only available in Python 2 ===
# def test_oct():
# a = oct(N(5))
# assert isinstance(a, oct)
# assert a == '0o5'
# def test_hex():
# a = hex(N(5))
# assert isinstance(a, hex)
# assert a == '0x5'
| [
"11096690+technolingo@users.noreply.github.com"
] | 11096690+technolingo@users.noreply.github.com |
017d0a368e8e5d10a0f7367cfc150195048389dd | ccc3ff9e6629d485825fe232a89b486654741ba9 | /unidade07/ajeita_lista/p.py | c9a599443537d2aa36a1f622105b316e226aad93 | [] | no_license | tulioac/ExerciciosTST | f5e483684c9e7bc91bc9924d0dd69a0d38e6efd1 | bd6b79355a95004a713c95df82537d0e3f4c71c4 | refs/heads/master | 2020-06-03T17:09:08.424871 | 2019-06-20T23:40:56 | 2019-06-20T23:40:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,014 | py | def minimo_impar(lista, posicao):
minimo = 10000000
posicao_min = -1
for i in range (posicao, len(lista)):
if lista[i] < minimo and lista[i] % 2 != 0:
minimo = lista[i]
posicao_min = i
return posicao_min
def maximo_par(lista, posicao):
maximo = -10000000
posicao_max = -1
for i in range (posicao, len(lista)):
if lista[i] > maximo and lista[i] % 2 == 0:
maximo = lista[i]
posicao_max = i
return posicao_max
def troca_posicao(lista, atual, desejada):
if atual > desejada:
lista.insert(desejada, lista[atual])
lista.pop(atual+1)
else:
lista.insert(desejada+1, lista[atual])
lista.pop(atual)
return lista
def ajeita_lista(lista):
for i in range(len(lista)):
posicao_minimo_impar = minimo_impar(lista, i)
troca_posicao(lista, posicao_minimo_impar, i)
posicao_maximo_par = maximo_par(lista, i)
troca_posicao(lista, posicao_maximo_par, i) | [
"tutex1500@gmail.com"
] | tutex1500@gmail.com |
6ec95f89ce993de65e468f212786248298f66665 | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-5/d79b6c84068e6f5fe995a74e39cd3f63d86bb294-<parse_lldp_intf>-bug.py | ddbc6fc1aa3322f069ebbc9cb05db83582c1618e | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 134 | py | def parse_lldp_intf(self, data):
match = re.search('Interface:\\s*(\\S+)', data, re.M)
if match:
return match.group(1) | [
"dg1732004@smail.nju.edu.cn"
] | dg1732004@smail.nju.edu.cn |
7a63e1a4a6717671c7176bf91eda13961f655536 | 99094cc79bdbb69bb24516e473f17b385847cb3a | /72.Edit Distance/Solution.py | 11b4f12f5ee723dcde3137a39b89d6242e6e0462 | [] | no_license | simonxu14/LeetCode_Simon | 7d389bbfafd3906876a3f796195bb14db3a1aeb3 | 13f4595374f30b482c4da76e466037516ca3a420 | refs/heads/master | 2020-04-06T03:33:25.846686 | 2016-09-10T00:23:11 | 2016-09-10T00:23:11 | 40,810,940 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 696 | py | __author__ = 'Simon'
class Solution(object):
def minDistance(self, word1, word2):
"""
:type word1: str
:type word2: str
:rtype: int
"""
dp = [[0 for j in range(len(word2)+1)] for i in range(len(word1)+1)]
for j in range(len(word2)+1):
dp[0][j] = j
for i in range(len(word1)+1):
dp[i][0] = i
for i in range(1,len(word1)+1):
for j in range(1,len(word2)+1):
if word1[i-1] == word2[j-1]:
dp[i][j] = dp[i-1][j-1]
else:
dp[i][j] = min(dp[i-1][j], dp[i][j-1], dp[i-1][j-1]) + 1
return dp[len(word1)][len(word2)] | [
"simonxu14@gmail.com"
] | simonxu14@gmail.com |
b9c5ac147c500ee983edcc9fe10950a1a98fd9ce | 3d7039903da398ae128e43c7d8c9662fda77fbdf | /database/JavaScript/juejin_2514.py | eb9dae4e3fea3f75cbab068f9fcccaaa3a6b1488 | [] | no_license | ChenYongChang1/spider_study | a9aa22e6ed986193bf546bb567712876c7be5e15 | fe5fbc1a5562ff19c70351303997d3df3af690db | refs/heads/master | 2023-08-05T10:43:11.019178 | 2021-09-18T01:30:22 | 2021-09-18T01:30:22 | 406,727,214 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 64,994 | py | {"err_no": 0, "err_msg": "success", "data": [{"article_id": "6996311879895547912", "article_info": {"article_id": "6996311879895547912", "user_id": "1565333361004279", "category_id": "6809637767543259144", "tag_ids": [6809640407484334093, 6809640398105870343], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "记录寄生继承和ES6 Class继承", "brief_content": "寄生组合继承 这是一种接近完美(接下来会展开)的继承方式,先来看其它一些继承方式有助于记忆; 原型链继承 这就是原型链继承, 优点:继承了父类的模板和原型 缺点: 子类修改了父类的引用类型的变量,会影", "is_english": 0, "is_original": 1, "user_index": 2.038840227317261, "original_type": 0, "original_author": "", "content": "", "ctime": "1628955897", "mtime": "1628997899", "rtime": "1628997899", "draft_id": "6994787042970304519", "view_count": 59, "collect_count": 0, "digg_count": 1, "comment_count": 0, "hot_index": 3, "is_hot": 0, "rank_index": 0.00093716, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "1565333361004279", "user_name": "401", "company": "摇浆部", "job_title": "前端开发", "avatar_large": "https://sf1-ttcdn-tos.pstatp.com/img/mosaic-legacy/3793/3131589739~300x300.image", "level": 1, "description": "", "followee_count": 36, "follower_count": 3, "post_article_count": 12, "digg_article_count": 175, "got_digg_count": 26, "got_view_count": 808, "post_shortmsg_count": 0, "digg_shortmsg_count": 1, "isfollowed": false, "favorable_author": 0, "power": 34, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631693180, "id_type": 9, "tag_alias": "", "post_article_count": 88830, "concern_user_count": 527705}, {"id": 2546519, "tag_id": "6809640398105870343", "tag_name": "JavaScript", "color": "#616161", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/5d70fd6af940df373834.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1435884803, "mtime": 1631693185, "id_type": 9, "tag_alias": "", "post_article_count": 67405, "concern_user_count": 398957}], "user_interact": {"id": 6996311879895547912, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151606430102020911002C000E98"}, {"article_id": "6844904165060116487", "article_info": {"article_id": "6844904165060116487", "user_id": "1996368846268334", "category_id": "6809637767543259144", "tag_ids": [6809640398105870343], "visible_level": 0, "link_url": "https://juejin.im/post/6844904165060116487", "cover_image": "", "is_gfw": 0, "title": "【JS】节点截图的最终解决方案dom-to-image与html2canvas", "brief_content": "...", "is_english": 0, "is_original": 1, "user_index": 0.33339625134645, "original_type": 0, "original_author": "", "content": "", "ctime": "1590050167", "mtime": "1598574980", "rtime": "1590050653", "draft_id": "6845076786070421511", "view_count": 3295, "collect_count": 13, "digg_count": 13, "comment_count": 2, "hot_index": 179, "is_hot": 0, "rank_index": 0.00093654, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "1996368846268334", "user_name": "清一色天空", "company": "摸鱼划水公司", "job_title": "前端开发", "avatar_large": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/2019/9/24/16d62c7958563afa~tplv-t2oaga2asx-image.image", "level": 3, "description": "架构神聊者以及炫酷canvas、css3的好奇宝宝", "followee_count": 19, "follower_count": 283, "post_article_count": 15, "digg_article_count": 41, "got_digg_count": 2224, "got_view_count": 146734, "post_shortmsg_count": 25, "digg_shortmsg_count": 17, "isfollowed": false, "favorable_author": 0, "power": 3091, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546519, "tag_id": "6809640398105870343", "tag_name": "JavaScript", "color": "#616161", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/5d70fd6af940df373834.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1435884803, "mtime": 1631693185, "id_type": 9, "tag_alias": "", "post_article_count": 67405, "concern_user_count": 398957}], "user_interact": {"id": 6844904165060116487, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151606430102020911002C000E98"}, {"article_id": "6844904194189557773", "article_info": {"article_id": "6844904194189557773", "user_id": "3966693685068510", "category_id": "6809637767543259144", "tag_ids": [6809640398105870343], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "纯JS打造级联选择器控件,仿ElementUi(附源码)", "brief_content": "公司之前有过Vue开发的项目,用到了ElementUi的级联选择器控件。不得了了,产品爸爸们开始作妖了,哎呦不错哦,我要用它到我这个项目里(项目以Js + Php为架构,前后端不分离)。 “这个需求很简单,怎么实现我不管。” 既然battle不过,那没办法,只能写一个纯Js插件…", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1592391713", "mtime": "1605850413", "rtime": "1592449129", "draft_id": "6845076826671300616", "view_count": 2553, "collect_count": 24, "digg_count": 32, "comment_count": 7, "hot_index": 166, "is_hot": 0, "rank_index": 0.00093618, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "3966693685068510", "user_name": "大王叫我来爬山", "company": "北京亿欧网盟科技有限公司", "job_title": "前端开发工程师", "avatar_large": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/2019/7/12/16be4e3755776605~tplv-t2oaga2asx-image.image", "level": 2, "description": "擅长前端各项技能及框架", "followee_count": 17, "follower_count": 40, "post_article_count": 5, "digg_article_count": 128, "got_digg_count": 144, "got_view_count": 11259, "post_shortmsg_count": 0, "digg_shortmsg_count": 1, "isfollowed": false, "favorable_author": 0, "power": 256, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546519, "tag_id": "6809640398105870343", "tag_name": "JavaScript", "color": "#616161", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/5d70fd6af940df373834.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1435884803, "mtime": 1631693185, "id_type": 9, "tag_alias": "", "post_article_count": 67405, "concern_user_count": 398957}], "user_interact": {"id": 6844904194189557773, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151606430102020911002C000E98"}, {"article_id": "6996480955292598302", "article_info": {"article_id": "6996480955292598302", "user_id": "2348212567683421", "category_id": "6809637767543259144", "tag_ids": [6809640398105870343, 6809640407484334093], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "进阶教程 6. 正则应用", "brief_content": "上文介绍了正则的入门,本文将接着说正则虽然很傲娇,但是她也是风情万种,她能帮你解决很多复杂问题,不来看看么", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1628995457", "mtime": "1629011180", "rtime": "1629011180", "draft_id": "6996475762391908360", "view_count": 47, "collect_count": 0, "digg_count": 3, "comment_count": 0, "hot_index": 5, "is_hot": 0, "rank_index": 0.00093599, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "2348212567683421", "user_name": "和雍", "company": "滴滴出行", "job_title": "前端开发", "avatar_large": "https://sf3-ttcdn-tos.pstatp.com/img/user-avatar/91f7199852a14f4febd3e2ced75e03c5~300x300.image", "level": 2, "description": "我的野摩托经常喝多了", "followee_count": 19, "follower_count": 9, "post_article_count": 32, "digg_article_count": 78, "got_digg_count": 181, "got_view_count": 2740, "post_shortmsg_count": 1, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 208, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546519, "tag_id": "6809640398105870343", "tag_name": "JavaScript", "color": "#616161", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/5d70fd6af940df373834.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1435884803, "mtime": 1631693185, "id_type": 9, "tag_alias": "", "post_article_count": 67405, "concern_user_count": 398957}, {"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631693180, "id_type": 9, "tag_alias": "", "post_article_count": 88830, "concern_user_count": 527705}], "user_interact": {"id": 6996480955292598302, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151606430102020911002C000E98"}, {"article_id": "7000191211721981960", "article_info": {"article_id": "7000191211721981960", "user_id": "317104195124526", "category_id": "6809637767543259144", "tag_ids": [6809640398105870343], "visible_level": 0, "link_url": "", "cover_image": "https://p1-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/3a8e4e9c6e094b979800ee5957d7d4f7~tplv-k3u1fbpfcp-watermark.image", "is_gfw": 0, "title": "深入理解 JavaScript 原型", "brief_content": "JavaScript 中有个特殊的存在:对象。每个对象还都拥有一个原型对象,并可以从中继承方法和属性。本文已参与掘金创作者训练营第三期「话题写作」赛道", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1629859072", "mtime": "1630315146", "rtime": "1629872649", "draft_id": "7000185291805818893", "view_count": 42, "collect_count": 0, "digg_count": 1, "comment_count": 0, "hot_index": 3, "is_hot": 0, "rank_index": 0.00093556, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "317104195124526", "user_name": "chengdwu", "company": "", "job_title": "web前端", "avatar_large": "https://sf3-ttcdn-tos.pstatp.com/img/user-avatar/c59b4e44c8ca03af03acceffc1a263a2~300x300.image", "level": 1, "description": "", "followee_count": 3, "follower_count": 1, "post_article_count": 6, "digg_article_count": 6, "got_digg_count": 15, "got_view_count": 1557, "post_shortmsg_count": 0, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 30, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546519, "tag_id": "6809640398105870343", "tag_name": "JavaScript", "color": "#616161", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/5d70fd6af940df373834.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1435884803, "mtime": 1631693185, "id_type": 9, "tag_alias": "", "post_article_count": 67405, "concern_user_count": 398957}], "user_interact": {"id": 7000191211721981960, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151606430102020911002C000E98"}, {"article_id": "6844904160198918158", "article_info": {"article_id": "6844904160198918158", "user_id": "747323640250926", "category_id": "6809637767543259144", "tag_ids": [6809640398105870343], "visible_level": 0, "link_url": "https://juejin.im/post/6844904160198918158", "cover_image": "", "is_gfw": 0, "title": "你可能不太了解的JSON", "brief_content": "这个知识点面试中被问的概率虽然很低,但是也有可能会问。 注意:JSON的key一定要用双引号,以及value如果是字符串也一定要用双引号。 原生的JSON对象得到以下浏览器支持。", "is_english": 0, "is_original": 1, "user_index": 5.8380451654058, "original_type": 0, "original_author": "", "content": "", "ctime": "1589561405", "mtime": "1598961882", "rtime": "1589721783", "draft_id": "6845076777862184968", "view_count": 3002, "collect_count": 8, "digg_count": 21, "comment_count": 4, "hot_index": 175, "is_hot": 0, "rank_index": 0.00093548, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "747323640250926", "user_name": "iskeepingon", "company": "", "job_title": "", "avatar_large": "https://sf6-ttcdn-tos.pstatp.com/img/user-avatar/fc685c76669816629c70836efbce4242~300x300.image", "level": 2, "description": "", "followee_count": 0, "follower_count": 39, "post_article_count": 30, "digg_article_count": 35, "got_digg_count": 154, "got_view_count": 30555, "post_shortmsg_count": 0, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 459, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546519, "tag_id": "6809640398105870343", "tag_name": "JavaScript", "color": "#616161", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/5d70fd6af940df373834.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1435884803, "mtime": 1631693185, "id_type": 9, "tag_alias": "", "post_article_count": 67405, "concern_user_count": 398957}], "user_interact": {"id": 6844904160198918158, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151606430102020911002C000E98"}, {"article_id": "6844903924172865543", "article_info": {"article_id": "6844903924172865543", "user_id": "430664257386558", "category_id": "6809637767543259144", "tag_ids": [6809640398105870343], "visible_level": 0, "link_url": "https://juejin.im/post/6844903924172865543", "cover_image": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/2019/8/25/16cc9264d8fa31b1~tplv-t2oaga2asx-image.image", "is_gfw": 0, "title": "Typescript版图解Functor , Applicative 和 Monad", "brief_content": "本文是经典的Functors, Applicatives, And Monads In Pictures的Typescript翻译版本。 Functor/Applicative/Monad是函数式编程中的一些比较‘基础’的概念,反正我是不认同‘基础’这个说法的,笔者也阅读过很多…", "is_english": 0, "is_original": 1, "user_index": 11.014284655693, "original_type": 0, "original_author": "", "content": "", "ctime": "1566742852", "mtime": "1600063966", "rtime": "1566783690", "draft_id": "6845076429311311886", "view_count": 4463, "collect_count": 51, "digg_count": 77, "comment_count": 8, "hot_index": 308, "is_hot": 0, "rank_index": 0.00093545, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "430664257386558", "user_name": "荒山", "company": "惟客", "job_title": "前端", "avatar_large": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/2019/12/20/16f22fbb0b9c7925~tplv-t2oaga2asx-image.image", "level": 6, "description": "草根前端", "followee_count": 74, "follower_count": 11368, "post_article_count": 47, "digg_article_count": 629, "got_digg_count": 16455, "got_view_count": 731610, "post_shortmsg_count": 35, "digg_shortmsg_count": 55, "isfollowed": false, "favorable_author": 1, "power": 23771, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546519, "tag_id": "6809640398105870343", "tag_name": "JavaScript", "color": "#616161", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/5d70fd6af940df373834.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1435884803, "mtime": 1631693185, "id_type": 9, "tag_alias": "", "post_article_count": 67405, "concern_user_count": 398957}], "user_interact": {"id": 6844903924172865543, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151606430102020911002C000E98"}, {"article_id": "6844903700176044040", "article_info": {"article_id": "6844903700176044040", "user_id": "219558055512414", "category_id": "6809637767543259144", "tag_ids": [6809640369764958215, 6809640398105870343, 6809640528267706382, 6809641048927633415], "visible_level": 0, "link_url": "https://juejin.im/post/6844903700176044040", "cover_image": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/2018/10/29/166bd55ef487de3c~tplv-t2oaga2asx-image.image", "is_gfw": 0, "title": "Vue SPA 项目webpack打包优化指南", "brief_content": "最近一个小伙伴问我他们公司的Vue后台项目怎么首次加载要十多秒太慢了,有什么能优化的,于是乎我打开了他们的网站,发现主要耗时在加载vendor.js文件这个文件高达2M,于是乎我就拿来他们的代码看看,进行了一番折腾。最终还是取得了不错的效果。 对于网页性能,如何提升加载速度、等…", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1540538178", "mtime": "1599636214", "rtime": "1540547063", "draft_id": "6845075639045718023", "view_count": 7445, "collect_count": 165, "digg_count": 122, "comment_count": 2, "hot_index": 496, "is_hot": 0, "rank_index": 0.00093544, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "219558055512414", "user_name": "AKing", "company": "前端打工人", "job_title": "前端打工人", "avatar_large": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/mirror-assets/168e086a15081d2a8d5~tplv-t2oaga2asx-image.image", "level": 3, "description": "代码搬运工", "followee_count": 18, "follower_count": 686, "post_article_count": 22, "digg_article_count": 81, "got_digg_count": 1021, "got_view_count": 80820, "post_shortmsg_count": 0, "digg_shortmsg_count": 2, "isfollowed": false, "favorable_author": 0, "power": 1816, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546498, "tag_id": "6809640369764958215", "tag_name": "Vue.js", "color": "#41B883", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/7b5c3eb591b671749fee.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432234520, "mtime": 1631693194, "id_type": 9, "tag_alias": "", "post_article_count": 31257, "concern_user_count": 313520}, {"id": 2546519, "tag_id": "6809640398105870343", "tag_name": "JavaScript", "color": "#616161", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/5d70fd6af940df373834.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1435884803, "mtime": 1631693185, "id_type": 9, "tag_alias": "", "post_article_count": 67405, "concern_user_count": 398957}, {"id": 2546614, "tag_id": "6809640528267706382", "tag_name": "Webpack", "color": "#6F94DB", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/73e856b07f83b4231c1e.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1440920866, "mtime": 1631692726, "id_type": 9, "tag_alias": "", "post_article_count": 6704, "concern_user_count": 204077}, {"id": 2546989, "tag_id": "6809641048927633415", "tag_name": "CDN", "color": "#000000", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/f20d8ce529685521d23c.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1489954158, "mtime": 1631638663, "id_type": 9, "tag_alias": "", "post_article_count": 651, "concern_user_count": 11068}], "user_interact": {"id": 6844903700176044040, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151606430102020911002C000E98"}, {"article_id": "6844904013247283214", "article_info": {"article_id": "6844904013247283214", "user_id": "4054654612943303", "category_id": "6809637767543259144", "tag_ids": [6809640398105870343], "visible_level": 0, "link_url": "https://juejin.im/post/6844904013247283214", "cover_image": "", "is_gfw": 0, "title": "前端UI设计稿对比工具 - chrome+webpack插件", "brief_content": "(切图仔)前端开发者一大重要的职责就是将UI画稿转化为实际可用的页面,效果图的还原度在相当大的程度上决定了UI和PM的满意度一般情况下,拿到设计稿后,懒散点的可能直接看图软件打开,肉眼测距就开搞了,负", "is_english": 0, "is_original": 1, "user_index": 10.697075171448, "original_type": 0, "original_author": "", "content": "", "ctime": "1575431485", "mtime": "1600270546", "rtime": "1575436078", "draft_id": "6845076564409843720", "view_count": 4125, "collect_count": 49, "digg_count": 42, "comment_count": 6, "hot_index": 254, "is_hot": 0, "rank_index": 0.00093541, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "4054654612943303", "user_name": "清夜", "company": "字节跳动", "job_title": "前端挖坑学院首席JS打字员", "avatar_large": "https://sf3-ttcdn-tos.pstatp.com/img/user-avatar/3edac05f29d8ed4e7ffdf499fda846bb~300x300.image", "level": 4, "description": "HTML堆砌者、 CSS 表演艺术家、高级IF-ELSE开发工程师、API调用专家、后端接口测试专员、文档制造者、bug路由器", "followee_count": 23, "follower_count": 951, "post_article_count": 47, "digg_article_count": 72, "got_digg_count": 3399, "got_view_count": 216915, "post_shortmsg_count": 9, "digg_shortmsg_count": 3, "isfollowed": false, "favorable_author": 1, "power": 5572, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546519, "tag_id": "6809640398105870343", "tag_name": "JavaScript", "color": "#616161", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/5d70fd6af940df373834.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1435884803, "mtime": 1631693185, "id_type": 9, "tag_alias": "", "post_article_count": 67405, "concern_user_count": 398957}], "user_interact": {"id": 6844904013247283214, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151606430102020911002C000E98"}, {"article_id": "6992959237248778254", "article_info": {"article_id": "6992959237248778254", "user_id": "1028798616438151", "category_id": "6809637767543259144", "tag_ids": [6809640398105870343], "visible_level": 0, "link_url": "", "cover_image": "https://p6-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/7a1086006730433f8efa797abcfea2d3~tplv-k3u1fbpfcp-watermark.image", "is_gfw": 0, "title": "JavaScript从根源了解深浅拷贝问题", "brief_content": "从JavaScript的变量开始介绍,逐步了解为什么会出现深浅拷贝问题,引用值和原始值的区别,以及如何解决。", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1628175238", "mtime": "1628241274", "rtime": "1628225566", "draft_id": "6992957978638827551", "view_count": 64, "collect_count": 1, "digg_count": 4, "comment_count": 0, "hot_index": 7, "is_hot": 0, "rank_index": 0.00093534, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "1028798616438151", "user_name": "我家没有洗发水", "company": "", "job_title": "web前端", "avatar_large": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/2020/3/29/17126d9ca15770fc~tplv-t2oaga2asx-image.image", "level": 1, "description": "正在学习前端,在这里记录一些笔记ψ(._. )>", "followee_count": 6, "follower_count": 2, "post_article_count": 7, "digg_article_count": 16, "got_digg_count": 22, "got_view_count": 332, "post_shortmsg_count": 0, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 25, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546519, "tag_id": "6809640398105870343", "tag_name": "JavaScript", "color": "#616161", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/5d70fd6af940df373834.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1435884803, "mtime": 1631693185, "id_type": 9, "tag_alias": "", "post_article_count": 67405, "concern_user_count": 398957}], "user_interact": {"id": 6992959237248778254, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151606430102020911002C000E98"}, {"article_id": "6992938207230820389", "article_info": {"article_id": "6992938207230820389", "user_id": "474636479897303", "category_id": "6809637767543259144", "tag_ids": [6809640398105870343, 6809640407484334093], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "Event Loop事件循环", "brief_content": "这是我参与8月更文挑战的第5天,活动详情查看:8月更文挑战 1. JavaScript为什么是单线程的? JavaScript的单线程,与它的用途有关。作为浏览器脚本语言,JavaScript的主要用", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1628170380", "mtime": "1628225154", "rtime": "1628225154", "draft_id": "6992937858017263623", "view_count": 75, "collect_count": 1, "digg_count": 4, "comment_count": 0, "hot_index": 7, "is_hot": 0, "rank_index": 0.00093519, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "474636479897303", "user_name": "敲代码有瘾", "company": "", "job_title": "前端", "avatar_large": "https://sf3-ttcdn-tos.pstatp.com/img/user-avatar/e1d7446b60cec3cceb23431f49737188~300x300.image", "level": 1, "description": "敲代码有瘾", "followee_count": 9, "follower_count": 6, "post_article_count": 7, "digg_article_count": 35, "got_digg_count": 30, "got_view_count": 665, "post_shortmsg_count": 0, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 36, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546519, "tag_id": "6809640398105870343", "tag_name": "JavaScript", "color": "#616161", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/5d70fd6af940df373834.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1435884803, "mtime": 1631693185, "id_type": 9, "tag_alias": "", "post_article_count": 67405, "concern_user_count": 398957}, {"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631693180, "id_type": 9, "tag_alias": "", "post_article_count": 88830, "concern_user_count": 527705}], "user_interact": {"id": 6992938207230820389, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151606430102020911002C000E98"}, {"article_id": "6844904104582447118", "article_info": {"article_id": "6844904104582447118", "user_id": "4265760849141943", "category_id": "6809637767543259144", "tag_ids": [6809640398105870343], "visible_level": 0, "link_url": "https://juejin.im/post/6844904104582447118", "cover_image": "", "is_gfw": 0, "title": "JavaScript常见笔试题(持续更新)", "brief_content": "1. Promise模拟实现 2. Promise.all实现 4. call实现 5. apply实现 6. bind实现 7. 继承方法", "is_english": 0, "is_original": 1, "user_index": 7.0590559426532, "original_type": 0, "original_author": "", "content": "", "ctime": "1585294947", "mtime": "1598558636", "rtime": "1585295364", "draft_id": "6845076703862063111", "view_count": 3065, "collect_count": 105, "digg_count": 43, "comment_count": 3, "hot_index": 199, "is_hot": 0, "rank_index": 0.00093475, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "4265760849141943", "user_name": "ren", "company": "", "job_title": "", "avatar_large": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/2020/2/19/1705dbbcee2b3c54~tplv-t2oaga2asx-image.image", "level": 2, "description": "", "followee_count": 5, "follower_count": 28, "post_article_count": 6, "digg_article_count": 1, "got_digg_count": 105, "got_view_count": 7560, "post_shortmsg_count": 0, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 180, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546519, "tag_id": "6809640398105870343", "tag_name": "JavaScript", "color": "#616161", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/5d70fd6af940df373834.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1435884803, "mtime": 1631693185, "id_type": 9, "tag_alias": "", "post_article_count": 67405, "concern_user_count": 398957}], "user_interact": {"id": 6844904104582447118, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151606430102020911002C000E98"}, {"article_id": "6879606179924803591", "article_info": {"article_id": "6879606179924803591", "user_id": "1873223546578589", "category_id": "6809637767543259144", "tag_ids": [6809640398105870343], "visible_level": 0, "link_url": "", "cover_image": "https://p1-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/489b4fa661ef47c0aa11853d29883ee3~tplv-k3u1fbpfcp-zoom-1.image", "is_gfw": 0, "title": "JavaScript 类型 — 重学 JavaScript", "brief_content": "JavaScript 中最小的结构,同学们已知的有什么呢?我想同学们都应该会想到一些东西,比如一些关键字,数字 123,或者 String 字符等等。这里我们从最小的单位,字面值和运行时类型开始讲起。 这里分为语法(Grammer)和运行时(Runtime)两个部分。 有一个设…", "is_english": 0, "is_original": 1, "user_index": 8.408036803013594, "original_type": 0, "original_author": "", "content": "", "ctime": "1601783395", "mtime": "1601785970", "rtime": "1601785970", "draft_id": "6877042786198257672", "view_count": 1743, "collect_count": 15, "digg_count": 21, "comment_count": 0, "hot_index": 108, "is_hot": 0, "rank_index": 0.00093492, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "1873223546578589", "user_name": "三钻", "company": "微信搜:技术银河", "job_title": "前端", "avatar_large": "https://sf1-ttcdn-tos.pstatp.com/img/user-avatar/b167b7a36ca8da3225bf4ffb3257aaa8~300x300.image", "level": 3, "description": "专心、专注、专研,与行业中的同学们一起终生学习", "followee_count": 82, "follower_count": 729, "post_article_count": 51, "digg_article_count": 98, "got_digg_count": 1809, "got_view_count": 98422, "post_shortmsg_count": 8, "digg_shortmsg_count": 7, "isfollowed": false, "favorable_author": 0, "power": 2793, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546519, "tag_id": "6809640398105870343", "tag_name": "JavaScript", "color": "#616161", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/5d70fd6af940df373834.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1435884803, "mtime": 1631693185, "id_type": 9, "tag_alias": "", "post_article_count": 67405, "concern_user_count": 398957}], "user_interact": {"id": 6879606179924803591, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151606430102020911002C000E98"}, {"article_id": "6941761567046238245", "article_info": {"article_id": "6941761567046238245", "user_id": "3104676565755117", "category_id": "6809637767543259144", "tag_ids": [6809640398105870343], "visible_level": 0, "link_url": "", "cover_image": "https://p1-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/8e5b1513771746a98eb2d0ac97c8c9d5~tplv-k3u1fbpfcp-watermark.image", "is_gfw": 0, "title": "报表可视化搭建平台 - 筛选器联动优化 | 项目复盘", "brief_content": "项目目标: 报表通过可视化搭建的方式,来缩短数据报表开发周期。减少研发同学的依赖,解放生产力。支持 PC 端和移动端展示。 目标用户: BI 分析师、HR 或者效能改进部门。 本身是整个可视化搭建生态中的一员,整个可视化搭建生态底层引擎由单独一个前端小组开发和维护,然后再和业务…", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1616254933", "mtime": "1616375334", "rtime": "1616293822", "draft_id": "6941647622163660807", "view_count": 623, "collect_count": 7, "digg_count": 17, "comment_count": 1, "hot_index": 49, "is_hot": 0, "rank_index": 0.00093475, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "3104676565755117", "user_name": "CAI", "company": "xxx", "job_title": "前端", "avatar_large": "https://sf1-ttcdn-tos.pstatp.com/img/user-avatar/03f5fe02198e30db440a52d278833ede~300x300.image", "level": 2, "description": "", "followee_count": 50, "follower_count": 29, "post_article_count": 8, "digg_article_count": 272, "got_digg_count": 74, "got_view_count": 6126, "post_shortmsg_count": 7, "digg_shortmsg_count": 12, "isfollowed": false, "favorable_author": 0, "power": 135, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546519, "tag_id": "6809640398105870343", "tag_name": "JavaScript", "color": "#616161", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/5d70fd6af940df373834.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1435884803, "mtime": 1631693185, "id_type": 9, "tag_alias": "", "post_article_count": 67405, "concern_user_count": 398957}], "user_interact": {"id": 6941761567046238245, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151606430102020911002C000E98"}, {"article_id": "6844903513613418503", "article_info": {"article_id": "6844903513613418503", "user_id": "4336129589120072", "category_id": "6809637767543259144", "tag_ids": [6809640398105870343], "visible_level": 0, "link_url": "https://github.com/zuopf769/notebook/blob/master/fe/前端全(无)埋点之页面停留时长统计/README.md", "cover_image": "", "is_gfw": 0, "title": "前端全(无)埋点之页面停留时长统计", "brief_content": "本文讲解了传统的通过beforunload或者unload事件发送页面停留时长的时候丢点的问题;罗列了几种解决问题的思路。", "is_english": 0, "is_original": 0, "user_index": 0, "original_type": 1, "original_author": "", "content": "", "ctime": "1511510436", "mtime": "1598438455", "rtime": "1511510436", "draft_id": "0", "view_count": 10999, "collect_count": 77, "digg_count": 153, "comment_count": 8, "hot_index": 710, "is_hot": 0, "rank_index": 0.00093465, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "4336129589120072", "user_name": "zuopf769", "company": "yonyou->baidu->ofo->mtdp", "job_title": "fe", "avatar_large": "https://sf3-ttcdn-tos.pstatp.com/img/mosaic-legacy/3792/5112637127~300x300.image", "level": 2, "description": "前端、node", "followee_count": 334, "follower_count": 1073, "post_article_count": 36, "digg_article_count": 112, "got_digg_count": 2056, "got_view_count": 202537, "post_shortmsg_count": 0, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 832, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546519, "tag_id": "6809640398105870343", "tag_name": "JavaScript", "color": "#616161", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/5d70fd6af940df373834.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1435884803, "mtime": 1631693185, "id_type": 9, "tag_alias": "", "post_article_count": 67405, "concern_user_count": 398957}], "user_interact": {"id": 6844903513613418503, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151606430102020911002C000E98"}, {"article_id": "6844903728990912525", "article_info": {"article_id": "6844903728990912525", "user_id": "3755587449653150", "category_id": "6809637767543259144", "tag_ids": [6809640398105870343], "visible_level": 0, "link_url": "https://juejin.im/post/6844903728990912525", "cover_image": "", "is_gfw": 0, "title": "你的 JS 代码本可以更加优雅", "brief_content": "有时感觉挺有趣的是在群里聊天时的自嘲,「xx 项目在经过我一年的不断努力下,终于变得不可维护」。个人认为,维护是一件比开发更富挑战性的事情,前人的代码是否规范优雅会很直接地影响我们的工作效率和心情。 所以,我们更要时刻地去注意我们代码的质量,也许你的代码已经足够规范,但在某种程…", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1543720517", "mtime": "1598481424", "rtime": "1543722532", "draft_id": "6845075777390657543", "view_count": 5352, "collect_count": 190, "digg_count": 176, "comment_count": 30, "hot_index": 473, "is_hot": 0, "rank_index": 0.00093455, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "3755587449653150", "user_name": "淘淘笙悦", "company": "", "job_title": "野生小前端", "avatar_large": "https://sf3-ttcdn-tos.pstatp.com/img/user-avatar/699d994be9e4e06d321c36844f33c52a~300x300.image", "level": 3, "description": "", "followee_count": 8, "follower_count": 248, "post_article_count": 21, "digg_article_count": 37, "got_digg_count": 2224, "got_view_count": 98870, "post_shortmsg_count": 1, "digg_shortmsg_count": 3, "isfollowed": false, "favorable_author": 0, "power": 3212, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546519, "tag_id": "6809640398105870343", "tag_name": "JavaScript", "color": "#616161", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/5d70fd6af940df373834.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1435884803, "mtime": 1631693185, "id_type": 9, "tag_alias": "", "post_article_count": 67405, "concern_user_count": 398957}], "user_interact": {"id": 6844903728990912525, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151606430102020911002C000E98"}, {"article_id": "6844903711991414791", "article_info": {"article_id": "6844903711991414791", "user_id": "940837682306830", "category_id": "6809637767543259144", "tag_ids": [6809640357354012685, 6809640398105870343, 6809640407484334093, 6809640614175604744], "visible_level": 0, "link_url": "https://juejin.im/post/6844903711991414791", "cover_image": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/2018/11/12/167066041df474b9~tplv-t2oaga2asx-image.image", "is_gfw": 0, "title": "[译] React 的今天和明天 I —— 现状和问题", "brief_content": "早上好。大家好,欢迎来到 React 大会。今天来到这里我感到非常激动。我非常高兴可以给你们做开场演讲。 我是 Sophie Alpert,个人主页是 sophiebits.com。我是 Facebook 的 React 核心小组的开发经理。 你们正在使用的 React 做的很…", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1542000112", "mtime": "1599642818", "rtime": "1542010625", "draft_id": "6845075649419870221", "view_count": 5973, "collect_count": 143, "digg_count": 174, "comment_count": 13, "hot_index": 485, "is_hot": 0, "rank_index": 0.00093454, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "940837682306830", "user_name": "清秋", "company": "公众号:Frontend Radio", "job_title": "Web前端", "avatar_large": "https://sf6-ttcdn-tos.pstatp.com/img/user-avatar/be943b52b5091019bf92eeec817413c9~300x300.image", "level": 3, "description": "你好,我是清秋,一个有着教师梦的 Web 前端非典型程序员。业余画家、设计师、写手,PMP,后端一般,算法还可,数据挖掘背景。北邮硕士毕业后一直在某股份制银行软件开发部工作,一晃已经五年了。", "followee_count": 232, "follower_count": 2944, "post_article_count": 41, "digg_article_count": 227, "got_digg_count": 1820, "got_view_count": 135794, "post_shortmsg_count": 64, "digg_shortmsg_count": 53, "isfollowed": false, "favorable_author": 0, "power": 3177, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546490, "tag_id": "6809640357354012685", "tag_name": "React.js", "color": "#61DAFB", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/f655215074250f10f8d4.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432234367, "mtime": 1631692935, "id_type": 9, "tag_alias": "", "post_article_count": 16999, "concern_user_count": 226420}, {"id": 2546519, "tag_id": "6809640398105870343", "tag_name": "JavaScript", "color": "#616161", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/5d70fd6af940df373834.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1435884803, "mtime": 1631693185, "id_type": 9, "tag_alias": "", "post_article_count": 67405, "concern_user_count": 398957}, {"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631693180, "id_type": 9, "tag_alias": "", "post_article_count": 88830, "concern_user_count": 527705}, {"id": 2546676, "tag_id": "6809640614175604744", "tag_name": "掘金翻译计划", "color": "#0081ff", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/95f7e8be776556ab8d82.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1454716787, "mtime": 1631689800, "id_type": 9, "tag_alias": "", "post_article_count": 2502, "concern_user_count": 42848}], "user_interact": {"id": 6844903711991414791, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151606430102020911002C000E98"}, {"article_id": "6854573221983813645", "article_info": {"article_id": "6854573221983813645", "user_id": "2594503169948727", "category_id": "6809637767543259144", "tag_ids": [6809640398105870343], "visible_level": 0, "link_url": "https://juejin.im/post/6854573221983813645", "cover_image": "", "is_gfw": 0, "title": "javascript 事件流和事件委托", "brief_content": "javascript事件流和事件委托DOM事件流事件对象👉event:MDN传送门事件类型事件委托👉彻底弄懂JS事件委托的概念和作用相关文章推荐:👉EventLoop事件循环机制(浏览器)👉javas", "is_english": 0, "is_original": 1, "user_index": 2.7992049380886, "original_type": 0, "original_author": "", "content": "", "ctime": "1595923488", "mtime": "1599102471", "rtime": "1595927245", "draft_id": "6854812682176954382", "view_count": 2445, "collect_count": 27, "digg_count": 19, "comment_count": 3, "hot_index": 144, "is_hot": 0, "rank_index": 0.00093428, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "2594503169948727", "user_name": "zlevai", "company": "", "job_title": "@前端", "avatar_large": "https://sf3-ttcdn-tos.pstatp.com/img/user-avatar/ab2aa610532cdb960c465521032c3b27~300x300.image", "level": 2, "description": "知新温故事", "followee_count": 91, "follower_count": 22, "post_article_count": 40, "digg_article_count": 415, "got_digg_count": 102, "got_view_count": 20776, "post_shortmsg_count": 1, "digg_shortmsg_count": 12, "isfollowed": false, "favorable_author": 0, "power": 309, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546519, "tag_id": "6809640398105870343", "tag_name": "JavaScript", "color": "#616161", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/5d70fd6af940df373834.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1435884803, "mtime": 1631693185, "id_type": 9, "tag_alias": "", "post_article_count": 67405, "concern_user_count": 398957}], "user_interact": {"id": 6854573221983813645, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151606430102020911002C000E98"}, {"article_id": "6844903619582509063", "article_info": {"article_id": "6844903619582509063", "user_id": "3051900006063838", "category_id": "6809637767543259144", "tag_ids": [6809640361531539470, 6809640398105870343], "visible_level": 0, "link_url": "https://bailinlin.github.io/2018/06/08/node-notes/", "cover_image": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/2018/6/12/163f27e92cc8d28b~tplv-t2oaga2asx-image.image", "is_gfw": 0, "title": "精读《深入浅出Node.js》", "brief_content": "从不同的视角介绍了 Node 内在的特点和结构。由首章Node 介绍为索引,涉及Node 的各个方面,主要内容包含模块机制的揭示、异步I/O 实现原理的展现、异步编程的探讨、内存控制的介绍、二进制数据Buffer 的细节、Node 中的网络编程...", "is_english": 0, "is_original": 0, "user_index": 0, "original_type": 1, "original_author": "", "content": "", "ctime": "1528781772", "mtime": "1598456123", "rtime": "1528783263", "draft_id": "0", "view_count": 4960, "collect_count": 160, "digg_count": 310, "comment_count": 22, "hot_index": 580, "is_hot": 0, "rank_index": 0.00093416, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "3051900006063838", "user_name": "程序员解决师", "company": "CBU首席程序员解决师", "job_title": "程序员解决师", "avatar_large": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/2019/2/1/168a798dd33ef955~tplv-t2oaga2asx-image.image", "level": 3, "description": "", "followee_count": 26, "follower_count": 5208, "post_article_count": 64, "digg_article_count": 506, "got_digg_count": 6328, "got_view_count": 189658, "post_shortmsg_count": 58, "digg_shortmsg_count": 3, "isfollowed": false, "favorable_author": 0, "power": 4534, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546492, "tag_id": "6809640361531539470", "tag_name": "Node.js", "color": "#e81864", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/f16f548d25028a1fdd80.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432234488, "mtime": 1631690352, "id_type": 9, "tag_alias": "", "post_article_count": 11514, "concern_user_count": 280711}, {"id": 2546519, "tag_id": "6809640398105870343", "tag_name": "JavaScript", "color": "#616161", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/5d70fd6af940df373834.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1435884803, "mtime": 1631693185, "id_type": 9, "tag_alias": "", "post_article_count": 67405, "concern_user_count": 398957}], "user_interact": {"id": 6844903619582509063, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151606430102020911002C000E98"}, {"article_id": "6844903728328212488", "article_info": {"article_id": "6844903728328212488", "user_id": "2770425030912094", "category_id": "6809637767543259144", "tag_ids": [6809640398105870343], "visible_level": 0, "link_url": "https://juejin.im/post/6844903728328212488", "cover_image": "", "is_gfw": 0, "title": "JS函数节流和函数防抖", "brief_content": "1.为什么需要函数防抖和函数节流?2.什么是函数防抖和函数节流2.1函数防抖(debounce)2.2函数节流(throttle)3.应用场景类型场景函数防抖1.手机号、邮箱输入检测2.搜索框搜索输入", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1543567750", "mtime": "1598481234", "rtime": "1543569899", "draft_id": "6845075776610517005", "view_count": 7007, "collect_count": 107, "digg_count": 110, "comment_count": 14, "hot_index": 474, "is_hot": 0, "rank_index": 0.00093399, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "2770425030912094", "user_name": "JarvisJie", "company": "", "job_title": "研发新菜", "avatar_large": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/2018/7/30/164ea7735e0f7d92~tplv-t2oaga2asx-image.image", "level": 2, "description": "Less is more.", "followee_count": 23, "follower_count": 26, "post_article_count": 8, "digg_article_count": 169, "got_digg_count": 290, "got_view_count": 21473, "post_shortmsg_count": 2, "digg_shortmsg_count": 4, "isfollowed": false, "favorable_author": 0, "power": 504, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546519, "tag_id": "6809640398105870343", "tag_name": "JavaScript", "color": "#616161", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/5d70fd6af940df373834.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1435884803, "mtime": 1631693185, "id_type": 9, "tag_alias": "", "post_article_count": 67405, "concern_user_count": 398957}], "user_interact": {"id": 6844903728328212488, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151606430102020911002C000E98"}], "cursor": "eyJ2IjoiNzAwNzYxNTY2NjYwOTk3OTQwMCIsImkiOjc0ODB9", "count": 22117, "has_more": true} | [
"www.1759633997@qq.com"
] | www.1759633997@qq.com |
3deafce27a6c9cd45c663119aca5437b23d6a7bf | 3ca773b6d8f85fee67c2a53036be445710fe9213 | /conferencias/urls.py | 860be88acb2ba91b8432e310a2f8d902b177fcf4 | [] | no_license | huguito17/conferencias | 821965356906aaa120b7262229e182aadfa13bff | 607b4af0ece6bc20b115224f8d9be613b8065207 | refs/heads/main | 2023-06-06T17:07:51.428073 | 2021-06-30T04:59:36 | 2021-06-30T04:59:36 | 375,822,654 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,395 | py | """conferencias URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.conf import settings
from django.conf.urls.static import static
from app_registro import views
urlpatterns = [
path('admin/', admin.site.urls),
path('', views.index, name='index'),
path('participantes/', views.participantes, name="participantes"),
path('participantes/<int:id>/eliminar/', views.eliminar_participante, name='eliminar_participante'),
path('participantes/<int:id>/editar/', views.editar_participante, name='editar_participante'),
path('conferencistas/', views.conferencistas, name="conferencistas"),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"hmurillo464@hotmail.com"
] | hmurillo464@hotmail.com |
274db040fee93dd8379dadb0798a9f30beb0a14e | b3cbdd1e476907b76bbc7838e312aa59f622b637 | /6 kyu/Mexican_Wave.py | f913acd39c9b7a242fdd43761544c20897f782e0 | [] | no_license | elYaro/Codewars-Katas-Python | 88be0dc23ad0b9927d04867b606525d0758b35df | a3ba79f9a81db9acb86da52de615eb0e235debf5 | refs/heads/master | 2020-04-04T07:56:46.702534 | 2018-11-01T20:45:21 | 2018-11-01T20:45:21 | 155,766,076 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,667 | py | '''
Introduction
The wave (known as the Mexican wave in the English-speaking world outside North America) is an example of metachronal rhythm achieved in a packed
stadium when successive groups of spectators briefly stand, yell, and raise their arms. Immediately upon stretching to full height, the spectator
returns to the usual seated position. The result is a wave of standing spectators that travels through the crowd, even though individual spectators
never move away from their seats. In many large arenas the crowd is seated in a contiguous circuit all the way around the sport field, and so the
wave is able to travel continuously around the arena; in discontiguous seating arrangements, the wave can instead reflect back and forth through the crowd.
When the gap in seating is narrow, the wave can sometimes pass through it. Usually only one wave crest will be present at any given time in an arena,
although simultaneous, counter-rotating waves have been produced. (Source Wikipedia)
Task
In this simple Kata your task is to create a function that turns a string into a Mexican Wave. You will be passed a string and you must return that
string in an array where an uppercase letter is a person standing up.
Rules
1. The input string will always be lower case but maybe empty.
2. If the character in the string is whitespace then pass over it as if it was an empty seat.
Example
wave("hello") => ["Hello", "hEllo", "heLlo", "helLo", "hellO"]
Good luck and enjoy!
'''
def wave(str):
b = []
for i in range (0,len(str)):
a = list(str)
a[i] = a[i].upper()
if a[i] != " ":
b.append("".join(a))
return b | [
"nowakowski.priv@gmail.com"
] | nowakowski.priv@gmail.com |
f41bb0f627ed6d8f5fd7b2f6953ef836320c19d9 | 9b68695d6d7d05bdfdcb087db532d66188cfbcdb | /bsmsm/spiders/spider.py | 67165b7f5f3a4693d22e7d719589e6d28ffc76e2 | [] | no_license | hristo-grudev/bsmsm | 1f100180535b564cd8ca59fd62b35de4cf25b460 | e7035250b07e21e25299967eee065ea588369857 | refs/heads/main | 2023-03-13T13:13:48.075506 | 2021-03-05T08:32:08 | 2021-03-05T08:32:08 | 344,745,104 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 985 | py | import scrapy
from scrapy.loader import ItemLoader
from ..items import BsmsmItem
from itemloaders.processors import TakeFirst
class BsmsmSpider(scrapy.Spider):
name = 'bsmsm'
start_urls = ['https://www.bsm.sm/it/news-bsm.php']
def parse(self, response):
post_links = response.xpath('//div[@class="titolo-news bold"]/a/@href').getall()
yield from response.follow_all(post_links, self.parse_post)
def parse_post(self, response):
title = response.xpath('//h1//text()').get()
description = response.xpath('//span[@itemprop="description"]//text()[normalize-space()]').getall()
description = [p.strip() for p in description]
description = ' '.join(description).strip()
date = response.xpath('//div[@class="bold"]/text()').get()
item = ItemLoader(item=BsmsmItem(), response=response)
item.default_output_processor = TakeFirst()
item.add_value('title', title)
item.add_value('description', description)
item.add_value('date', date)
return item.load_item()
| [
"hr.grudev@gmail.com"
] | hr.grudev@gmail.com |
5cede2d8dc7514e832642702879a7bfb5fda8dbf | cce9731855063d821ba7d76989e090a91fb577cd | /chapter-4/whatsport.py | 01055edd5b573948cd2dd316d2acf56099c198c0 | [] | no_license | lBetterManl/PyDataAnalysis | 1a10d4f4778dff6f30b664bece666511468a5794 | f7ffdcaeb5a963fe8ea0cf9b5d5bd8a53b02ecc6 | refs/heads/master | 2021-08-28T19:43:09.036375 | 2017-12-13T03:19:46 | 2017-12-13T03:19:46 | 111,801,197 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,606 | py | # -*- coding: UTF-8 -*-
class Classifier:
def __init__(self, filename):
self.medianAndDeviation = []
# 读取文件
f = open(filename)
lines = f.readlines()
f.close()
self.format = lines[0].strip().split('\t')
print format
self.data = []
for line in lines[1:]:
fields = line.strip().split('\t')
ignore = []
vector = []
for i in range(len(fields)):
if self.format[i] == 'num':
vector.append(int(fields[i]))
elif self.format[i] == 'comment':
ignore.append(fields[i])
elif self.format[i] == 'class':
classification = fields[i]
self.data.append((classification, vector, ignore))
self.rawData = list(self.data)
# 获取向量的长度
self.vlen = len(self.data[0][1])
# 标准化数据
for i in range(self.vlen):
self.normalizeColumn(i)
def getMedian(self, alist):
if alist == []:
return []
blist = sorted(alist)
length = len(alist)
if length%2 == 1:
return blist[int(((length+1)/2)-1)]
else:
v1 = blist[int(length/2)]
v2 = blist[(int(length/2)-1)]
return (v1+v2)/2.0
def getAbsoluteStandardDeviation(self, alist, median):
"""given alist and median return absolute standard deviation"""
sum = 0
for item in alist:
sum += abs(item - median)
return sum / len(alist)
def normalizeColumn(self, columnNumber):
"""given a column number, normalize that column in self.data"""
# first extract values to list
col = [v[1][columnNumber] for v in self.data]
median = self.getMedian(col)
asd = self.getAbsoluteStandardDeviation(col, median)
# print("Median: %f ASD = %f" % (median, asd))
self.medianAndDeviation.append((median, asd))
for v in self.data:
v[1][columnNumber] = (v[1][columnNumber] - median) / asd
def normalizeVector(self, v):
"""We have stored the median and asd for each column.
We now use them to normalize vector v"""
vector = list(v)
for i in range(len(vector)):
(median, asd) = self.medianAndDeviation[i]
vector[i] = (vector[i] - median) / asd
return vector
###
### END NORMALIZATION
##################################################
def manhattan(self, vector1, vector2):
"""Computes the Manhattan distance."""
return sum(map(lambda v1, v2: abs(v1 - v2), vector1, vector2))
def nearestNeighbor(self, itemVector):
"""return nearest neighbor to itemVector"""
return ((0, ("REPLACE THIS LINE WITH CORRECT RETURN", [0], [])))
def classify(self, itemVector):
"""Return class we think item Vector is in"""
return (self.nearestNeighbor(self.normalizeVector(itemVector))[1][0])
def unitTest():
classifier = Classifier('athletesTrainingSet.txt')
br = ('Basketball', [72, 162], ['Brittainey Raven'])
nl = ('Gymnastics', [61, 76], ['Viktoria Komova'])
cl = ("Basketball", [74, 190], ['Crystal Langhorne'])
# first check normalize function
brNorm = classifier.normalizeVector(br[1])
nlNorm = classifier.normalizeVector(nl[1])
clNorm = classifier.normalizeVector(cl[1])
assert (brNorm == classifier.data[1][1])
assert (nlNorm == classifier.data[-1][1])
print('normalizeVector fn OK')
# check distance
assert (round(classifier.manhattan(clNorm, classifier.data[1][1]), 5) == 1.16823)
assert (classifier.manhattan(brNorm, classifier.data[1][1]) == 0)
assert (classifier.manhattan(nlNorm, classifier.data[-1][1]) == 0)
print('Manhattan distance fn OK')
# Brittainey Raven's nearest neighbor should be herself
result = classifier.nearestNeighbor(brNorm)
assert (result[1][2] == br[2])
# Nastia Liukin's nearest neighbor should be herself
result = classifier.nearestNeighbor(nlNorm)
assert (result[1][2] == nl[2])
# Crystal Langhorne's nearest neighbor is Jennifer Lacy"
assert (classifier.nearestNeighbor(clNorm)[1][2][0] == "Jennifer Lacy")
print("Nearest Neighbor fn OK")
# Check if classify correctly identifies sports
assert (classifier.classify(br[1]) == 'Basketball')
assert (classifier.classify(cl[1]) == 'Basketball')
assert (classifier.classify(nl[1]) == 'Gymnastics')
print('Classify fn OK')
unitTest()
| [
"yhclove159"
] | yhclove159 |
6e02c80f59e0b644527191a90c3a158c3136c5f0 | 2abbf38befe7c1ca9552868712c5aede68174b61 | /engineCrawler/util.py | 14306878bf553d93739e4fc06be525166cca9215 | [
"MIT"
] | permissive | ehoraizon/engineCrawler | f8ea95d30a32f13c202777c9ce91b45d5c369705 | 9a69b2982a4c1e89158507b3703a9ef7575e1e85 | refs/heads/main | 2023-01-22T00:15:49.344004 | 2020-11-29T10:59:38 | 2020-11-29T10:59:38 | 306,536,043 | 7 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,105 | py | import os
import threading
import imagehash
import numpy as np
from PIL import Image, UnidentifiedImageError
def slices(lista, steps=5):
x = len(lista)
sl = []
bef_steps = 0
curr_steps = steps
while True:
sl.append(lista[bef_steps:curr_steps])
if curr_steps + steps > x - 1:
sl.append(lista[curr_steps:])
break
bef_steps = curr_steps
curr_steps += steps
return sl
HASHING_METHODS = {
"AHASHING" : imagehash.average_hash,
"PHASHING" : imagehash.phash,
"DHASHING" : imagehash.dhash,
"WHASHING" : imagehash.whash,
"COLORHASHING" : imagehash.colorhash
}
class DuplInFolder:
def __init__(self, hash_method="AHASHING", similarity=50):
self.hashing = HASHING_METHODS[hash_method]
self.similarity = similarity
def setPath(self, path):
self.path = path
def getFiles(self):
files = [
os.path.join(self.path, x)
for x in os.listdir(self.path)
if os.path.isfile(self.path + os.sep + x) and \
'.json' not in x
]
_files = list(range(len(files)))
hashings = []
rm_ind = set()
for sli in slices(_files):
ths = [
threading.Thread(self._load(files[i], rm_ind, hashings, i))
for i in sli
]
[x.start() for x in ths]
[x.join() for x in ths]
self.erase([files[i] for i in rm_ind])
files = np.array([x for i,x in enumerate(files) if i not in rm_ind])
self.similarity = hashings[0].shape[0]*hashings[0].shape[1]*self.similarity/100
return files, np.array(hashings)
def _load(self, file, rm_files, hashings, n):
loaded = None
try:
loaded = self.hashing(Image.open(file)).hash
except UnidentifiedImageError:
print('No image format : ', file)
rm_files.add(n)
except:
rm_files.add(n)
else:
hashings.append(loaded)
def erase(self, rm_files):
def rm(file_path):
os.remove(file_path)
rm_files = slices(rm_files)
for sli in rm_files:
ths = [
threading.Thread(target=rm, args=(x,))
for x in sli
]
[x.start() for x in ths]
[x.join() for x in ths]
def check(self):
file_paths, hashing = self.getFiles()
base_dt = file_paths[0].split(os.sep)[-1].split('-')[0]+'-'
ind = sum([1 for x in file_paths if base_dt in x])
res = np.unique(
np.where(
np.array(
[
np.sum(np.sum(x == hashing, axis=1),axis=1)
for x in hashing
]
) > self.similarity
)[1],
return_counts=True
)
self.erase(
list(
file_paths[res[0][ind:][np.where(res[1][ind:] > 1)]]
)
) | [
"erich.info.work@gmail.com"
] | erich.info.work@gmail.com |
dfcc76777b82ff4e0e00f8715bf8f234f7907333 | 7a527060afabd2e0867d5dcf4b75592b43ef5005 | /Leetcode/简单+剑指offer题/面试题49. 丑数.py | d4544c4e9bfbe5eb88767af5250c4fe4854899b0 | [] | no_license | Stevenzzz1996/MLLCV | ff01a276cf40142c1b28612cb5b43e563ad3a24a | 314953b759212db5ad07dcb18854bf6d120ba172 | refs/heads/master | 2023-02-10T18:11:30.399042 | 2021-01-05T12:05:21 | 2021-01-05T12:05:21 | 267,804,954 | 6 | 1 | null | null | null | null | UTF-8 | Python | false | false | 106 | py | #!usr/bin/env python
# -*- coding:utf-8 -*-
# author: sfhong2020 time:2020/4/1 20:38
# 堆/动态规划
| [
"2499143041@qq.com"
] | 2499143041@qq.com |
e26da968438b368d72f24961954c85edd5d09261 | 3e96d00152069d511fecdf5e53365a44b7e2e3af | /数据统计/test.py | 1612cd301b5b15e3bd8bf1c87c9fca855cd4d6fc | [] | no_license | cilaaaa/PythonTools | 23150185c2b2dbe415a6c508310c0aa78ad967a0 | 33dec2949f2666e585b381606ddfe5bfc23e2d40 | refs/heads/master | 2020-05-26T21:19:02.325156 | 2019-05-24T07:43:18 | 2019-05-24T07:43:18 | 188,376,736 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 807 | py | __author__ = 'Cila'
import urllib
import re
def getHtmlData(url):
# 请求
headers = {
'User-Agent': 'Mozilla/5.0 (Linux; Android 4.1.1; Nexus 7 Build/JRO03D) AppleWebKit/535.19 (KHTML, like Gecko) Chrome/18.0.1025.166 Safari/535.19'}
request = urllib.request.Request(url, headers=headers)
response = urllib.request.urlopen(request)
data = response.read()
# 设置解码方式
data = data.decode('utf-8')
return data
for i in range(1,999999):
url = "http://hrcxi.cn.com/?TWI=2sNAA0Bt07tAhW3eta&page=VEI="
try:
data = getHtmlData(url)
print(i)
except:
data = ""
# title = re.findall("<h4 class=\"title\">(.*)</h4>",data)
# if len(title)> 0:
# if("蛊魂铃" in title[0]):
# print(title[0] + ": " +url) | [
"472185361@qq.com"
] | 472185361@qq.com |
1e8ef4de1607e1cc8d39eb411fda21d27e17dbb7 | ccbfc7818c0b75929a1dfae41dc061d5e0b78519 | /aliyun-openapi-python-sdk-master/aliyun-python-sdk-r-kvstore/aliyunsdkr_kvstore/request/v20150101/SwitchTempInstanceRequest.py | fc14d771d9b5d1b07ba06e98344c8a776066f9dd | [
"Apache-2.0"
] | permissive | P79N6A/dysms_python | 44b634ffb2856b81d5f79f65889bfd5232a9b546 | f44877b35817e103eed469a637813efffa1be3e4 | refs/heads/master | 2020-04-28T15:25:00.368913 | 2019-03-13T07:52:34 | 2019-03-13T07:52:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,737 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class SwitchTempInstanceRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'R-kvstore', '2015-01-01', 'SwitchTempInstance','redisa')
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId) | [
"1478458905@qq.com"
] | 1478458905@qq.com |
f145669aff7fbea06fad520e259a73a98c38acf1 | 4d37cd82449dbd11bbabd509f0014a03ccbbdbf8 | /module2-sql-for-analysis/insert_titanic.py | 1ac5eed80f06fde0cc72d05bf35735451bedb59a | [
"MIT"
] | permissive | krsmith/DS-Unit-3-Sprint-2-SQL-and-Databases | 23d764241c61e5ab5a61657a6ee318505bd6d2c1 | 9617528ad5fd23354623926b819f98f9a063d252 | refs/heads/master | 2020-04-23T17:20:33.952730 | 2019-02-22T17:43:42 | 2019-02-22T17:43:42 | 171,328,384 | 0 | 0 | MIT | 2019-02-20T18:42:33 | 2019-02-18T17:40:53 | Python | UTF-8 | Python | false | false | 887 | py | import psycopg2 as pg
import pandas as pd
pg.connect
dbname =
user =
password =
host =
conn = pg.connect(dbname=dbname, user=user, password=password, host=host)
pg_cur = conn.cursor()
titanic = pd.read_csv('titanic.csv')
titanic.Name = titanic.Name.replace("'", '', regex=True)
titanic_list = titanic.values.tolist()
create_titanic_table = """
CREATE TABLE titanic (
person_id SERIAL PRIMARY KEY,
survived int,
pclass int,
name varchar(100),
sex varchar(6),
age int,
siblings_spouses_aboard int,
parents_children_aboard int,
fare float
);"""
pg_cur.execute(create_titanic_table)
conn.commit()
for item in titanic_list:
insert_item = """INSERT INTO titanic (
survived, pclass, name, sex, age, siblings_spouses_aboard,
parents_children_aboard, fare)
VALUES""" + str(tuple(item))
pg_cur.execute(insert_item)
conn.commit()
| [
"krsmith310@gmail.com"
] | krsmith310@gmail.com |
323db64056731b5279b9046caeddd533e1bd56b7 | 8cefd99cf0b7fa8de3b2fd692b47a652672ba5db | /credit_card_detection/myutils.py | 3b30dd83e27c8ae29a25625c7e6750f1f71a4f1d | [] | no_license | XXXXLHBXXXX/opencv- | 009d8778088a62fbf8f0148326e99ce9043edaee | 0f8147564afc4d9f05410774ee7bc8ace38aa8d1 | refs/heads/master | 2022-11-22T16:15:08.967217 | 2020-07-15T12:09:44 | 2020-07-15T12:09:44 | 279,856,224 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 959 | py | import cv2
def sort_contours(cnts, method = "left-2-right"):
reverse = False
i = 0
if method == "right-2-left" or method == "bottom-2-top":
reverse = True
if method == "top-2-bottom" or method == "bottom-2-top":
i = 1
boundingBoxes = [cv2.boundingRect(c) for c in cnts]
#使用一个最小的举行,将找到的形状包起来x,y,h,w
(cnts,boundingBoxes) = zip(*sorted(zip(cnts, boundingBoxes),
key=lambda b:b[1][i],reverse=reverse))
return cnts,boundingBoxes
def resize(image, width=None, height=None, inter=cv2.INTER_AREA):
dim = None
(h,w) = image.shape[:2]
if width is None and height is None:
return image
if width is None:
r = height / float(h)
dim = (int(w * r),height)
else:
r = width / float(w)
dim = (width,int(h * r))
resized = cv2.resize(image, dim, interpolation=inter)
return resized
| [
"15386697277@163.com"
] | 15386697277@163.com |
5c77958a70db3fdb38303d8bf678113803c62984 | d57b51ec207002e333b8655a8f5832ed143aa28c | /.history/gos_20200614055443.py | e65771e72edd5ad03a64cf83c5b0bcf4ef404048 | [] | no_license | yevheniir/python_course_2020 | b42766c4278a08b8b79fec77e036a1b987accf51 | a152d400ab4f45d9d98d8ad8b2560d6f0b408c0b | refs/heads/master | 2022-11-15T07:13:24.193173 | 2020-07-11T15:43:26 | 2020-07-11T15:43:26 | 278,890,802 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,414 | py | # # Імпорт фажливих бібліотек
# from BeautifulSoup import BeautifulSoup
# import urllib2
# import re
# # Створення функції пошуку силок
# def getLinks(url):
# # отримання та присвоєння контенту сторінки в змінну
# html_page = urllib2.urlopen(url)
# # Перетворення контенту в обєкт бібліотеки BeautifulSoup
# soup = BeautifulSoup(html_page)
# # створення пустого масиву для лінків
# links = []
# # ЗА ДОПОМОГОЮ ЧИКЛУ ПРОХЛДИМСЯ ПО ВСІХ ЕЛЕМЕНТАХ ДЕ Є СИЛКА
# for link in soup.findAll('a', attrs={'href': re.compile("^http://")}):
# # Додаємо всі силки в список
# links.append(link.get('href'))
# # повертаємо список
# return links
# -----------------------------------------------------------------------------------------------------------
# # # Імпорт фажливих бібліотек
# import subprocess
# # Створення циклу та використання функції range для генерації послідовних чисел
# for ping in range(1,10):
# # генерування IP адреси базуючись на номері ітерації
# address = "127.0.0." + str(ping)
# # виклик функції call яка робить запит на IP адрес та запис відповіді в змінну
# res = subprocess.call(['ping', '-c', '3', address])
# # За допомогою умовних операторів перевіряємо відповідь та виводимо результат
# if res == 0:
# print "ping to", address, "OK"
# elif res == 2:
# print "no response from", address
# else:
# print "ping to", address, "failed!"
# -----------------------------------------------------------------------------------------------------------
# Імпорт фажливих бібліотек
import requests
for pic_
with open('pic1.jpg', 'wb') as handle:
response = requests.get(pic_url, stream=True)
if not response.ok:
print(response)
for block in response.iter_content(1024):
if not block:
break
handle.write(block) | [
"yevheniira@intelink-ua.com"
] | yevheniira@intelink-ua.com |
603df67e796255ceac6f2dd0f26b276e88613a8b | d10f72595c2d25cd0a8ff6f754f0b4e7918d598d | /src/medium.py | 48bcf3151794a5b318dfddba47dbd5900aa40b83 | [
"MIT"
] | permissive | joshuajharris/alfred-medium-workflow | 47159c8cddf6c2859571687403dbcf6bafcae2b6 | 3708f53962e6dbb3102a5e1e88454b95b07f7b3e | refs/heads/master | 2021-01-25T07:41:04.824157 | 2017-06-07T21:03:33 | 2017-06-07T21:03:33 | 93,655,464 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,923 | py | #!/usr/bin/python
# encoding: utf-8
#
# Copyright © 2014 deanishe@deanishe.net
#
# MIT Licence. See http://opensource.org/licenses/MIT
#
# Created on 2014-12-29
#
"""medium.py [options] <query>
Browse and search medium users, posts.
Usage:
medium.py <query>
medium.py [-p]
medium.py (-h | --help)
medium.py --version
Options:
-p, --post Open post
-h, --help Show this help text
-version Show version.
"""
import json
import os
import requests
import subprocess
import sys
from workflow import Workflow3
############### CONSTANTS ###############
HELP_URL = 'https://github.com/joshuajharris/alfred-medium-workflow'
VERSION = '0.1.0'
UPDATE_SETTINGS = {
'github_slug': 'joshuajharris/alfred-medium-workflow',
'version': VERSION
}
URL = 'https://medium.com'
SEARCH_URL = URL + '/search'
POST_URL = URL + '/post'
ICON_UPDATE = os.path.join(os.path.dirname(__file__), 'update-available.png')
USER_AGENT = 'Alfred-Medium-Workflow/{version} ({url})'
############### HELPERS ###############
def open_url(url):
"""Open URL in default browser."""
log.debug('Opening : %s', url)
subprocess.call(['open', url])
def sanitize_json(raw):
"""Sanitizes json, removes bs ])}while(1);</x>"""
j = json.loads(raw[raw.index('{'):])
return j
def get_posts_from_payload(j):
return j['payload']['value']['posts']
############### MEDIUM API ###############
def search_posts(q):
"""gonna search posts here"""
payload = {'q': q, 'format': 'json'}
r = requests.get(SEARCH_URL, params=payload)
log.debug(r.url)
log.debug(r.status_code)
if r.status_code == 200:
j = sanitize_json(r.text)
posts = get_posts_from_payload(j)
log.debug("Number of results: %d", len(posts))
return posts;
############### WORKFLOW ###############
def show_options():
wf.add_item(
"Start Typing to search for posts.",
valid=False) # makes it not actionable
wf.send_feedback()
return 0;
def add_posts(posts):
for post in posts:
url = '{}/{}'.format(POST_URL, post['id'])
it = wf.add_item(
post['title'],
"{} words".format(post['virtuals']['wordCount']),
autocomplete= u'{}/'.format(post['title']),
arg=url,
uid=post['id'],
quicklookurl=url,
valid=True)
it.setvar('post_url', url)
it.setvar('argv', '-p')
wf.send_feedback()
return 0
def main(wf):
"""RUN WORKFLOW"""
from docopt import docopt
args = docopt(__doc__, argv=wf.args, version=VERSION)
log.debug('args : %r', args)
# Run Script actions
# ------------------------------------------------------------------
done = False
if args.get('--post'):
open_url(os.getenv('post_url'))
done = True
if done:
return
####################################################################
# Script Filter
####################################################################
# Updates
# ------------------------------------------------------------------
if wf.update_available:
wf.add_item('A newer version is available',
'↩ to install update',
autocomplete='workflow:update',
icon=ICON_UPDATE)
query = args.get('<query>')
log.debug('query : %r', query)
# Show popular subreddits
# ------------------------------------------------------------------
if query == '':
show_options()
else:
posts = search_posts(query)
add_posts(posts)
# Parse query
# ------------------------------------------------------------------
if __name__ == '__main__':
wf = Workflow3(help_url=HELP_URL,
update_settings=UPDATE_SETTINGS)
log = wf.logger
sys.exit(wf.run(main))
| [
"joshua.jharris13@gmail.com"
] | joshua.jharris13@gmail.com |
abe78bc49b85c74a1b2f4932b3ed2e0bab37eb16 | ffa21e4415ead5106f7f846bc24b0d308ace90b5 | /swagger_client/models/forecast_transaction.py | be10b2bc4700700721d9092cecf9dddd1c89aefa | [] | no_license | steini58/swagger-client | fa7b6f077e5a1b01e42c4420b214b19e1d364e4e | e5fd7bf28f8529746e18bdd799c86ad78310ffd5 | refs/heads/master | 2020-03-29T09:14:26.644065 | 2018-09-20T13:29:14 | 2018-09-20T13:29:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,665 | py | # coding: utf-8
"""
[AHOI cookbook](/ahoi/docs/cookbook/index.html) [Data Privacy](/sandboxmanager/#/privacy) [Terms of Service](/sandboxmanager/#/terms) [Imprint](https://sparkassen-hub.com/impressum/) © 2016‐2017 Starfinanz - Ein Unternehmen der Finanz Informatik # noqa: E501
OpenAPI spec version: 2.1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from swagger_client.models.balance import Balance # noqa: F401,E501
from swagger_client.models.forecast import Forecast # noqa: F401,E501
from swagger_client.models.transaction import Transaction # noqa: F401,E501
class ForecastTransaction(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'forecast_balance': 'Balance',
'account_id': 'str',
'transactions': 'list[Transaction]'
}
attribute_map = {
'forecast_balance': 'forecastBalance',
'account_id': 'accountId',
'transactions': 'transactions'
}
def __init__(self, forecast_balance=None, account_id=None, transactions=None): # noqa: E501
"""ForecastTransaction - a model defined in Swagger""" # noqa: E501
self._forecast_balance = None
self._account_id = None
self._transactions = None
self.discriminator = None
self.forecast_balance = forecast_balance
self.account_id = account_id
self.transactions = transactions
@property
def forecast_balance(self):
"""Gets the forecast_balance of this ForecastTransaction. # noqa: E501
Balance forecast # noqa: E501
:return: The forecast_balance of this ForecastTransaction. # noqa: E501
:rtype: Balance
"""
return self._forecast_balance
@forecast_balance.setter
def forecast_balance(self, forecast_balance):
"""Sets the forecast_balance of this ForecastTransaction.
Balance forecast # noqa: E501
:param forecast_balance: The forecast_balance of this ForecastTransaction. # noqa: E501
:type: Balance
"""
if forecast_balance is None:
raise ValueError("Invalid value for `forecast_balance`, must not be `None`") # noqa: E501
self._forecast_balance = forecast_balance
@property
def account_id(self):
"""Gets the account_id of this ForecastTransaction. # noqa: E501
Id of account this entry belongs to # noqa: E501
:return: The account_id of this ForecastTransaction. # noqa: E501
:rtype: str
"""
return self._account_id
@account_id.setter
def account_id(self, account_id):
"""Sets the account_id of this ForecastTransaction.
Id of account this entry belongs to # noqa: E501
:param account_id: The account_id of this ForecastTransaction. # noqa: E501
:type: str
"""
if account_id is None:
raise ValueError("Invalid value for `account_id`, must not be `None`") # noqa: E501
self._account_id = account_id
@property
def transactions(self):
"""Gets the transactions of this ForecastTransaction. # noqa: E501
List of unappliedTransaction # noqa: E501
:return: The transactions of this ForecastTransaction. # noqa: E501
:rtype: list[Transaction]
"""
return self._transactions
@transactions.setter
def transactions(self, transactions):
"""Sets the transactions of this ForecastTransaction.
List of unappliedTransaction # noqa: E501
:param transactions: The transactions of this ForecastTransaction. # noqa: E501
:type: list[Transaction]
"""
if transactions is None:
raise ValueError("Invalid value for `transactions`, must not be `None`") # noqa: E501
self._transactions = transactions
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ForecastTransaction):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"peter.steinberg@natur-und-genuss.de"
] | peter.steinberg@natur-und-genuss.de |
587f6bb1d0835d73eb2c77ed586dc5e4739098c3 | 6f83b55feb37d3525eb07fedc264f572a09d4f27 | /iluminación/SanitarioOFF.py | 95303c70947c1f133ed343ff0945e3417e3b3658 | [] | no_license | DanB450x/Domotica | ebd94dd56b56c29a4fab0c0f0b10a0cf65ee347a | 82defb16652c4b27c3800a6662631c727406a180 | refs/heads/main | 2023-09-03T14:16:27.642454 | 2021-10-27T16:48:04 | 2021-10-27T16:48:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 202 | py | import RPi.GPIO as GPIO
import time
GPIO.setwarnings(False)
ledpin = 12
GPIO.setmode(GPIO.BOARD)
GPIO.setup(ledpin, GPIO.OUT)
GPIO.output(ledpin, GPIO.HIGH)
GPIO.cleanup()
| [
"noreply@github.com"
] | DanB450x.noreply@github.com |
fd76f0ac26f62e12289104fc7897580d5c62f5a0 | 24af5357b791ee482ba62e3227dd7060aa7a195f | /classical/big_lattice.py | 65069ebda5e25cc6d12f97663d98ea349ee3e921 | [] | no_license | augustinmuster/SQGOL | f6f5118d2e91a13a4344d0a5421da47a92f83ef4 | 71bd234698f53fd87c1cc585cc05630e1e9fd181 | refs/heads/master | 2023-01-03T00:56:12.667201 | 2020-11-02T20:05:28 | 2020-11-02T20:05:28 | 309,482,302 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,749 | py |
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.animation as animation
#definitions of the possible states for the lattice
ALIVE=1
DEAD=0
states=[ALIVE,DEAD]
#lattice size
L = 50
# populate grid with more DEAD cells than ALIVE cells, but randomly
grid = np.random.choice(states, L*L, p=[0.1,0.9]).reshape(L, L)
#-------------------------some visualisation stuff and functions------------------------------
#define color map for the plot
cmap = mpl.colors.ListedColormap(['black','white'])
bounds = [-1,0.5,1.5]
norm = mpl.colors.BoundaryNorm(bounds, cmap.N)
def update(data):
#print("update")
global grid
newGrid = grid.copy()
for i in range(L):
for j in range(L):
total = (grid[i, (j-1)%L] + grid[i, (j+1)%L] + grid[(i-1)%L, j] + grid[(i+1)%L, j] + grid[(i-1)%L, (j-1)%L] + grid[(i-1)%L, (j+1)%L] + grid[(i+1)%L, (j-1)%L] + grid[(i+1)%L, (j+1)%L])
if grid[i, j] == ALIVE:
if (total < 2) or (total > 3):
newGrid[i, j] = DEAD
else:
if total == 3:
newGrid[i, j] = ALIVE
mat.set_data(newGrid)
grid = newGrid
return [mat]
# ################################################# FROM HERE SPECIFIC STUFF####################################################
#simulate the classical game of life from this random lattice
fig, ax = plt.subplots()
mat = ax.matshow(grid,cmap=cmap, norm=norm)
ani = animation.FuncAnimation(fig, update, interval=300, save_count=200)
# Set up formatting for the movie files
Writer = animation.writers['ffmpeg']
writer = Writer(fps=3, metadata=dict(artist='Me'), bitrate=1800)
ani.save('big_lattice.mp4', writer=writer)
plt.show()
| [
"augustin@must-r.com"
] | augustin@must-r.com |
21a01cc49756cfc03d28892fdaa8dae0b1c06ef9 | d496cfbea8fe9d1cf9c71aceddf8cf3290cf43b7 | /Python101.py | a38a6bf3f4eabbd06dd7b4aac53d8f26ec24f73c | [] | no_license | rcragans/unit1 | b4f1c731ae03bba0d5f4fc8e824e5e6e2f3e1868 | 3ac6f98d018a09affff56c6ce934e9448ceb8d2f | refs/heads/master | 2020-04-06T11:25:59.278474 | 2018-11-20T20:33:02 | 2018-11-20T20:33:02 | 157,416,730 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,368 | py | import random
userName = raw_input("What is your name? ")
secret_number = random.randint(1,10)
gameOn = True
allowedGuesses = 5
userGuesses = 0
keepPlaying = True
while(keepPlaying):
while(gameOn):
userGuess = input("Guess a number between 1 and 10: ")
userGuesses += 1
if (int(userGuess) == secret_number):
gameOn = False
print ("Great job %s. Game Over" % userName)
else:
if (userGuesses == allowedGuesses):
gameOn = False
print ("You are out of guesses! The number was %i" % secret_number)
elif (int(userGuess) > int(secret_number)):
print ("%s, %i is too high" % (userName,userGuess))
print ("You have %i guesses left!") % (int(allowedGuesses)-int(userGuesses))
else:
print ("%s, %i is too low" % (userName, userGuess))
print ("Guess again...")
print ("You have %i guesses left!") % (int(allowedGuesses)-int(userGuesses))
playAgain = raw_input("Would you like to play again? Y or N")
if (playAgain == "N"):
keepPlaying = False
print ("Thanks for playing, %s" % userName)
elif(playAgain == "Y"):
secret_number = random.randint(1,10)
userGuesses = 0
gameOn = True
else:
print ("Huh?")
| [
"rcragans@gmail.com"
] | rcragans@gmail.com |
64711fb3acf5532d847d70ff0681440ccfa011c7 | 9bc98478292d3b3c5374d1785837f02fd0639eff | /Logistic Regression with Differential privacy/plot_results.py | c8f643bdcd5e59c3581f647a7e98d470a4476523 | [] | no_license | IngvarBjarki/master_thesis | d685f2678a6951ce91b5961d9d55dcf86d8be000 | a94ebcccc25a8a01263c814b0edb3fdbc4fc723a | refs/heads/master | 2021-07-04T11:22:54.286703 | 2020-08-13T18:56:40 | 2020-08-13T18:56:40 | 138,305,115 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 10,249 | py | # -*- coding: utf-8 -*-
"""
Created on Sun Apr 8 12:35:03 2018
@author: s161294
"""
import json
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import scipy.stats as stats
# the color palette dose not have enough colors so we add colors that go well with it
#colors = sns.color_palette("Set1", n_colors = 9) + [(1.0, 191/255, 0.0)] + sns.color_palette("Set2", n_colors = 3)[0:3:2]
colors = [(0.8941176470588236, 0.10196078431372549, 0.10980392156862745),
(0.21568627450980393, 0.49411764705882355, 0.7215686274509804),
( 95/255, 158/255, 160/255),
(0.596078431372549, 0.3058823529411765, 0.6392156862745098),
(0.9686274509803922, 0.5058823529411764, 0.7490196078431373),
(0.30196078431372547, 0.6862745098039216, 0.2901960784313726),
(0.6, 0.6, 0.6),
(1.0, 0.4980392156862745, 0.0),
(0.6509803921568628, 0.33725490196078434, 0.1568627450980392),
(1.0, 191/255, 0.0)
]
#colors = sns.color_palette("Set1", n_colors = 9) + [(1.0, 191/255, 0.0)] + sns.color_palette("Set2", n_colors = 3)[0:3:2]
sns.set_palette(colors)
sns.set_style('darkgrid')
with open(r"C:\Users\s161294\OneDrive - Danmarks Tekniske Universitet\Thesis\differential_privacy_logistic_regression\results.json", 'r') as f:
results = json.load(f)
with open(r"C:\Users\s161294\OneDrive - Danmarks Tekniske Universitet\Thesis\differential_privacy_logistic_regression\standard_devations.json", 'r') as f:
standard_devations = json.load(f)
with open(r"C:\Users\s161294\OneDrive - Danmarks Tekniske Universitet\Thesis\differential_privacy_logistic_regression\noise_and_weights.json") as f:
noise_and_weights = json.load(f)
with open(r"C:\Users\s161294\OneDrive - Danmarks Tekniske Universitet\Thesis\differential_privacy_logistic_regression\additional_params.json") as f:
additional_params = json.load(f)
total_amount_of_data_in_interval = additional_params['total_amount_of_data_in_interval']
epsilons = additional_params['epsilons']
num_splits = len(total_amount_of_data_in_interval)
num_simulations = 48
#%%
# get the dict on nice format
keys = list(results.keys())
for key in keys:
print(key)
new_key = eval(key)
results[new_key] = results.pop(key)
standard_devations[new_key] = standard_devations.pop(key)
#%%
# start by plotting the results
fig = plt.figure(figsize=(7,4))
ax = plt.subplot(111)
all_limits = []
# we use the student t distribution as we use the sample mean and sigma
t_critical = stats.t.ppf(q = 0.95, df= num_simulations - 1)
for i, result in enumerate(sorted(results)):
limits = []
for j in range(len(standard_devations[result])):
limit = t_critical * standard_devations[result][j] / np.sqrt(num_simulations)
limits.append(limit)
# result[1] is the string represantation of the result
ax.errorbar(total_amount_of_data_in_interval, results[result], yerr= limits, label = result[1], color = colors[i],\
fmt='-o',capsize=2, markersize=5)
all_limits.append(limits)
plt.legend(bbox_to_anchor=(1.05, 0.85), loc=2, borderaxespad=0.)
#Shrink current axis by 25%
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.75, box.height])
plt.ylabel('Error rate')
plt.xlabel('Amount of training data [N]')
plt.title('Regularized Logistic Regression with Differential privacy')
#%%
# =============================================================================
# # to make the plot look better in power point
# plt.rcParams.update({'text.color' : "white",
# 'axes.labelcolor' : "white",
# 'xtick.color':'white',
# 'ytick.color':'white',
# 'figure.facecolor':'#485d70'})
#
# =============================================================================
#plt.savefig('error_rate_log_regress.png', facecolor=fig.get_facecolor(), edgecolor='none')
plt.savefig('error_rate_log_regress.eps', format = 'eps')
plt.show()
# close look at the ones closes to the weights
fig = plt.figure(figsize=(7,4))
ax = plt.subplot(111)
num_worst_to_skip = 3
for i, result in enumerate(sorted(results)):
if i > num_worst_to_skip:
# result[1] is the string represantation of the result
ax.errorbar(total_amount_of_data_in_interval, results[result], yerr= all_limits[i], label = result[1], color = colors[i], fmt='-o',capsize=2, markersize=5)
ylim = ax.get_ylim()
plt.legend(bbox_to_anchor=(1.05, 0.75), loc=2, borderaxespad=0., fontsize = 12)
#Shrink current axis by 25%
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.75, box.height])
plt.yscale('log')
plt.ylabel('log(Error rate)', fontsize = 12)
plt.xlabel('Amount of training data [N]', fontsize = 12)
plt.savefig('error_rate_log_regress2.eps', format = 'eps')
plt.show()
fig = plt.figure(figsize=(7,4))
ax = plt.subplot(111)
num_worst_to_skip = 1
for i, result in enumerate(sorted(results)):
if i > num_worst_to_skip:
ax.errorbar(total_amount_of_data_in_interval, results[result], yerr= all_limits[i], label = result[1], color = colors[i], fmt='-o',capsize=2, markersize = 5)
plt.legend(bbox_to_anchor=(1.05, 0.8), loc=2, borderaxespad=0., fontsize = 12)
#Shrink current axis by 25%
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.75, box.height])
plt.ylabel('log(Error rate)', fontsize = 12)
plt.xlabel('Amount of training data [N]', fontsize = 12)
#plt.ylim(ylim)
plt.yscale('log')
plt.savefig('error_rate_log_regress3.eps', format = 'eps')
plt.show()
#%%
fig = plt.figure(figsize=(7,4))
ax = plt.subplot(111)
keys = sorted(list(results.keys()))
for i, lim in enumerate(all_limits):
ax.plot(total_amount_of_data_in_interval, lim, '-*',color = colors[i], label = keys[i][1], markersize = 5)
plt.legend(bbox_to_anchor=(1.05, 0.85), loc=2, borderaxespad=0.)
#Shrink current axis by 25%
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.85, box.height])
plt.xlabel('Amount of training data [N]')
plt.ylabel('Confidance interval')
plt.savefig('LogisticConfidenceMagnitude.eps', format = 'eps')
plt.show()
#%%
# investegate the weights
#%%
# plot the magnitude and the distributtion of all the weights and noises generated
x_labels = ['$\epsilon = {}$'.format(eps) for eps in epsilons]
x_labels.append('weights')
# we know that the first run has the greates axis, so we capture it
biggest_axis_boxplot = None
biggest_axis_boxplot_sinh = None
biggest_axis_magnitude = None
for i, n in enumerate(noise_and_weights):
item = noise_and_weights[n]
if int(n) == total_amount_of_data_in_interval[0] or int(n) == total_amount_of_data_in_interval[int(num_splits / 2)] or int(n) == total_amount_of_data_in_interval[-1]:
noise_and_weights_distribution = []
noise_and_weights_magnitude = []
for eps in item:
noise_and_weights_distribution.append(noise_and_weights[n][eps])
noise_and_weights_magnitude.append([abs(value) for value in noise_and_weights[n][eps]])
num_labels = len(noise_and_weights_distribution)
#plt.title('Distribution of noise and weights for n = {}'.format(n))
ax = sns.boxplot(data=noise_and_weights_distribution)
plt.xticks(range(num_labels), x_labels, rotation=45, fontsize = 12)
#plt.savefig('distributionOfNoiseWeights_n={}.png'.format(n))
plt.savefig('distributionOfNoiseWeights_n={}.eps'.format(n), format = 'eps', bbox_inches="tight")
if i == 0:
biggest_axis_boxplot = ax.get_ylim()
else:
ax.set_ylim(biggest_axis_boxplot)
plt.show()
# lets do inverse hyperbolic transformation
inv = [np.arcsinh(i) for i in noise_and_weights_distribution]
#plt.title('Distribution of noise and weights for n = %s with $\mathrm{sinh}^{-1}$ transformation' % n)
ax = sns.boxplot(data=inv)
plt.xticks(range(num_labels), x_labels, rotation=45, fontsize = 12)
if i == 0:
biggest_axis_boxplot_sinh = ax.get_ylim()
else:
ax.set_ylim(biggest_axis_boxplot_sinh)
#plt.savefig('distributionOfNoiseWeightsLog_n={}.png'.format(n))
plt.savefig('distributionOfNoiseWeightsLog_n={}.eps'.format(n), format = 'eps', bbox_inches="tight")
plt.show()
#plt.title('Magnitude off noise and the weights.. n = {} with log transformation'.format(n))
ax = sns.barplot(data=noise_and_weights_magnitude , estimator = sum)
plt.yscale('log')
plt.xticks(range(num_labels), x_labels, rotation=45, fontsize = 12)
if i == 0:
biggest_axes_magnitude = ax.get_ylim()
else:
print('third')
ax.set_ylim(biggest_axes_magnitude)
#plt.savefig('magnitudeOfNoiseAndWeights_n_{}.png'.format(n))
plt.savefig('magnitudeOfNoiseAndWeights_n_{}.eps'.format(n), format = 'eps', bbox_inches="tight")
plt.show()
#%%
# write variances of the noise and mean of the weights to pandas inorder to make
# a excel file to copy into latex.....
x_labels = ['$\epsilon = {}$'.format(eps) for eps in epsilons]
x_labels.append('E(weights)')
x_labels.append('var(weights)')
names = ['interval'] + x_labels
statistics = []
for i, n in enumerate(noise_and_weights):
item = noise_and_weights[n]
statistics.append([])
statistics[-1].append(total_amount_of_data_in_interval[i])
for j, eps in enumerate(item):
name = x_labels[j]
print(name)
# if the name stars with $ we know it is an epsilon
if name[0] == '$':
# get the variance of all the noise's
statistics[-1].append(np.var(noise_and_weights[n][eps]))
else:
# get the variance and the mean of the weights
statistics[-1].append(np.mean(noise_and_weights[n][eps]))
statistics[-1].append(np.var(noise_and_weights[n][eps]))
statistics = pd.DataFrame(statistics, columns = names)
writer = pd.ExcelWriter('output.xlsx')
statistics.to_excel(writer, 'Sheet1')
writer.save()
print('done')
#
#
# =============================================================================
| [
"s161294@win.dtu.dk"
] | s161294@win.dtu.dk |
6f5d0f02cbaf99845a9c757d04d20e9b07a01330 | f2af6a5d41bbc08bc7b3291bbdc9d28020bf4c78 | /Practica_5/doc.py | b7e381e0ca0958e0ec55693700f2acbb5c6dca26 | [] | no_license | alejandroag/gcom | 6925db1a803492188f234692d1e40a5542a4fc1f | 0421a33bd78be32cf3eeb24cc9c7f3c9b2833dce | refs/heads/master | 2021-03-12T19:33:21.572270 | 2015-04-20T13:45:00 | 2015-04-20T13:45:00 | 34,068,914 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,589 | py | import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.patches import Circle
from matplotlib.lines import Line2D
#from matplotlib.widgets import Button
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
import Tkinter as Tk
import ttk as T
matplotlib.use('TkAgg')
from polynomial_curve_fitting import polynomial_curve_fitting
class DrawPoints:
def __init__(self, fig, ax):
self.fig = fig
self.ax = ax
self.points = None
self.N = 0
self.patchList = []
self.exists_touched_circle = False
self.cid_press = fig.canvas.mpl_connect('button_press_event', self.on_press)
self.cid_release_button = fig.canvas.mpl_connect('button_release_event',
self.on_release)
def on_press(self, event):
if event.inaxes!=self.ax: return
c = Circle((event.xdata, event.ydata), 0.2, color='b')
patch=self.ax.add_patch(c)
self.patchList.append(patch)
ar = np.array([[event.xdata,event.ydata]])
if self.points != None:
self.points = np.concatenate((self.points,ar),axis=0)
else:
self.points = ar
self.N = self.N + 1
self.fig.canvas.draw()
def on_release(self, event):
self.exists_touched_circle = False
return
def getN(self):
if self.N == 0:
return 10
else:
return self.N
def getPoints(self):
if self.points != None:
return self.points
else:
x = np.random.randint(-10, 10, size=(10, 2))
for i in range (1,10):
c = Circle((x[i,0], x[i,1]), 0.2, color='b')
patch=self.ax.add_patch(c)
self.patchList.append(patch)
return x
def clearPoints(self):
for c in self.patchList:
c.remove()
self.patchList = []
self.points = None
self.N = 0
class Window:
def start(self,root,fig,ax):
self.degree = None
self.fig = fig
self.ax = ax
self.curve = None
self.control = None
self.L = 0
button = Tk.Button(master=root, text='Quit', command=quit)
button.pack(side=Tk.BOTTOM)
points = Tk.Label(root, text="Points", font=("Helvetica", 14), bg = 'silver')
points.place(x=5, y=40)
method = Tk.Label(root, text="Method", font=("Helvetica", 14), bg = 'silver')
method.place(x=445, y=40)
#Degree
label_D = Tk.Label(root, text="Degree", font=("Helvetica", 12), bg = 'silver')
label_D.place(x=445, y=90)
self.vD = Tk.StringVar()
lD_entry = Tk.Entry(root, textvariable=self.vD, width=3)
lD_entry.place(x=505, y=90)
#L
label_L = Tk.Label(root, text="L", font=("Helvetica", 12), bg = 'silver')
label_L.place(x=564, y=90)
self.vL = Tk.StringVar()
lL_entry = Tk.Entry(root, textvariable=self.vL, width=3)
lL_entry.place(x=584, y=90)
bnewt = Tk.Button(master=root, text='Newton', command=self.newton)
bnewt.place(x=445, y=140)
bnewt_lib = Tk.Button(master=root, text='Newton lib', command=self.newtonlib)
bnewt_lib.place(x=545, y=140)
bls = Tk.Button(master=root, text='Least sq', command=self.ls)
bls.place(x=445, y=190)
bls_lib = Tk.Button(master=root, text='Least sq lib', command=self.lslib)
bls_lib.place(x=545, y=190)
self.vChev = Tk.IntVar()
bchev = Tk.Checkbutton(master=root, text='Chebyshev', variable=self.vChev, font=("Helvetica", 12), bg = 'silver')
bchev.place(x=445, y=240)
bclear = Tk.Button(master=root, text='Clear', command=self.clear)
bclear.place(x=445, y=380)
self.draw_points = DrawPoints(fig, ax)
canvas.mpl_connect('key_press_event', self.draw_points)
def draw(self, poly, x, color):
self.clean_up()
self.curve = Line2D(poly[:, 0], poly[:, 1])
self.control= Line2D(x[:, 0], x[:, 1])
self.curve.set_color(color)
self.ax.add_line(self.curve)
self.fig.canvas.draw()
def clean_up(self):
if self.curve != None:
self.curve.remove()
self.curve = None
def newton(self):
N = self.draw_points.getN()
if self.vChev.get():
knots = 'chebyshev'
else:
knots = np.linspace(0, 1, N)
x = self.draw_points.getPoints()
num_points = 200
poly = polynomial_curve_fitting(x, knots, method='newton',
libraries=False, num_points=num_points)
self.draw(poly,x,'cyan')
def newtonlib(self):
N = self.draw_points.getN()
x = self.draw_points.getPoints()
if self.vChev.get():
knots = 'chebyshev'
else:
knots = np.linspace(0, 1, N)
num_points = 200
poly = polynomial_curve_fitting(x, knots, method='newton',
libraries=True, num_points=num_points)
self.draw(poly,x,'navy')
def ls(self):
self.getD()
self.getL()
N = self.draw_points.getN()
x = self.draw_points.getPoints()
if self.vChev.get():
knots = 'chebyshev'
else:
knots = np.linspace(0, 1, N)
num_points = 200
poly = polynomial_curve_fitting(x, knots, method='least_squares', L=self.L,
libraries=False, num_points=num_points, degree=self.degree)
self.draw(poly,x,'r')
def lslib(self):
self.getD()
N = self.draw_points.getN()
x = self.draw_points.getPoints()
if self.vChev.get():
knots = 'chebyshev'
else:
knots = np.linspace(0, 1, N)
num_points = 200
poly = polynomial_curve_fitting(x, knots, method='least_squares',
libraries=True, num_points=num_points, degree=self.degree)
self.draw(poly,x,'crimson')
def clear(self):
self.draw_points.clearPoints()
self.clean_up()
self.fig.canvas.draw()
def on_key_event(self, event):
print('you pressed %s'%event.key)
key_press_handler(event, canvas, toolbar)
def quit(self):
root.quit() # stops mainloop
root.destroy() # this is necessary on Windows to prevent
# Fatal Python Error: PyEval_RestoreThread: NULL tstate
def getN(self, event):
self.N = self.vN.get()
def getD(self):
if self.vD.get() != "":
self.degree = int(self.vD.get())
else:
self.degree = None
def getL(self):
if self.vL.get() != "":
self.L = float(self.vL.get())
else:
self.L = 0
if __name__ == '__main__':
root = Tk.Tk()
fig = plt.figure()
ax = fig.add_subplot(111, aspect=1)
ax.set_xlim(-10, 10)
ax.set_ylim(-10, 10)
plt.subplots_adjust(right=0.65)
canvas = FigureCanvasTkAgg(fig, master=root)
canvas.show()
canvas.get_tk_widget().pack(side=Tk.TOP)
toolbar = NavigationToolbar2TkAgg( canvas, root )
toolbar.update()
canvas._tkcanvas.pack()
toolbar = Tk.Frame(root)
toolbar.pack(side=Tk.BOTTOM, fill="x")
window = Window()
window.start(root,fig,ax)
Tk.mainloop()
| [
"vic.gonzalez.92@gmail.com"
] | vic.gonzalez.92@gmail.com |
0ed93b929f09cacefcc977d44076809aaee5ca4c | 8420a07441b5499dbb5516f559ab15db62e19955 | /wbdc2021-preliminary/src/inferenceLightgbm.py | f490d64efd4b30a59c6eb34e0dd2d97d1e08743b | [] | no_license | WisleyWang/2021WeChat_Big_Data_Challenge | f17e6fb241e58074d0077bae8f1d88aa4fd61b2a | 0dff566b44b0b3893bd9fb0ba30904ac9975bcb2 | refs/heads/main | 2023-07-07T17:43:51.036730 | 2021-08-17T05:39:49 | 2021-08-17T05:39:49 | 396,781,757 | 6 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,960 | py | from configLightgbm import *
import joblib
import pandas as pd
import gc
if __name__=='__main__':
cols_add=[]#['sim_graphEmbed_feedid_userid','sim_w2v_tag_maxprob_userid'] #除了重要性之外其他特征
cols_list=[]
cols_total=[]
for y in ACTION_LIST[:4]:
cols_y=[]
importanceDF=pd.read_csv(FEATURE_PATH+'importanceDF_val_{}_testab.csv'.format(y))
cols_y=list(importanceDF['column'][:300])
cols_y.extend(cols_add)
cols_list.append(cols_y)
cols_total.extend(cols_y)
cols_total=set(cols_total)
"""
划分数据集
"""
dataDF=pd.read_pickle(FEATURE_PATH+'dataDFtestab.pkl')
play_cols = ['is_finish', 'play_times','play_stay', 'play', 'stay','stay_minus_play']
KEYTAG=['manual_keyword_list', 'machine_keyword_list', 'manual_tag_list', 'machine_tag_list',
'description', 'ocr', 'asr']
# cols = [f for f in dataDF.columns if f not in ['date_'] + play_cols + ACTION_LIST+KEYTAG]
# dataDF = reduce_mem(dataDF, [f for f in dataDF.columns if f not in ['date_'] + play_cols + ACTION_LIST])
train = dataDF[~dataDF['read_comment'].isna()].reset_index(drop=True)
test = dataDF[dataDF['read_comment'].isna()].reset_index(drop=True)
# trn_x = train[train['date_'] < 14].reset_index(drop=True)
# val_x = train[train['date_'] == 14].reset_index(drop=True)
# dataDF.to_pickle(FEATURE_PATH+'dataDF{}.pkl'.format(892))
del dataDF
gc.collect()
for seed in SEED_LIST:
for idx,y in enumerate(ACTION_LIST[:4]):
print('=========', y, '=========')
# 模型加载
clf = joblib.load(MODEL_PATH+'lgb_model_testab_{}_seed{}.pkl'.format(y,seed))
test[y] = clf.predict_proba(test[cols_list[idx]])[:, 1]
test[['userid', 'feedid'] + ACTION_LIST[:4]].to_csv(
SUBMIT_PATH+'sub_lgb_testab_seed{}.csv'.format(seed),
index=False
)
| [
"903953316@qq.com"
] | 903953316@qq.com |
12af77b777f0b22e6ab1b759691c9c5f16ddbdc6 | 294c9404f5d884a9a0d1264ec133de944ae1601f | /main.py | 30385cb2f328844bbcaa478e818e97c956936feb | [] | no_license | Absherr/TornadoChat | f8fdc58baecb7d3e64cb581353fe0c9d621a7418 | 87828be9d8c776bb06987e92f167ba0966a1b5ee | refs/heads/master | 2020-04-17T09:13:10.698051 | 2013-03-14T12:42:14 | 2013-03-14T12:42:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,237 | py | import json
import datetime
import tornado.ioloop
import tornado.web
import os
history = []
class NewMsgHandler(tornado.web.RequestHandler):
def post(self):
login = self.get_argument("login", None)
date = str(datetime.datetime.now().strftime('%d-%h-%Y, %H:%m:%S'))
msg = self.get_argument("msg", None)
history.append((login,date,msg))
self.write(json.dumps({"login":login,"date":date,"msg":msg}))
class GetListHandler(tornado.web.RequestHandler):
def get(self):
d = {}
d['length']=len(history)
for i in range(len(history)):
d[i]={"login":history[i][0],"date":history[i][1],"msg":history[i][2]}
self.write(json.dumps(d))
class ChatHandler(tornado.web.RequestHandler):
def get(self):
self.render("chat.html")
settings = dict(
template_path=os.path.join(os.path.dirname(__file__), "templates"),
)
application = tornado.web.Application([
(r"/", ChatHandler),
(r"/getList", GetListHandler),
(r"/new", NewMsgHandler),
(r'/static/(.*)', tornado.web.StaticFileHandler, {'path': "static"}),
],**settings)
if __name__ == "__main__":
application.listen(8000)
tornado.ioloop.IOLoop.instance().start() | [
"absherr@absherr-VirtualBox.(none)"
] | absherr@absherr-VirtualBox.(none) |
55d5457523106be301584f485d2044be5c180be7 | ed32eb1eb0a328a4ffe89e178fc4987470f333cd | /exercise/day1-4/compute.py | 7e64771516f775b9bee62dbd5f5d8fe460b8b9c5 | [] | no_license | xiaoyaojjian/py_learn | c6f5bdf31bcebf29dd914e81e6be9305a61265cc | 95e494ea823d2074a05c1c2a49595002a1576093 | refs/heads/master | 2020-12-05T23:22:11.017066 | 2016-09-08T01:13:08 | 2016-09-08T01:13:08 | 67,654,055 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 445 | py | """
计算器, 用了eval() 没有任何意义了, 四则运算应该单独写一个函数处理
"""
import re
a = '1 - 2 * ( (60-30 +(-40/5) * (9-2*5/3 + 7 /3*99/4*2998 +10 * 568/14 )) - (-4*3)/ (16-3*2) )'
print(eval(a))
def get_brackets_data(formula):
return re.findall('\(([^()]+)\)', formula)
while re.search('[()]', a):
for i in get_brackets_data(a):
a = a.replace('(%s)' % i, str(eval(i)))
print(a)
print(eval(a)) | [
"q2868765@qq.com"
] | q2868765@qq.com |
406d8534dccbf16335e377bc84090dc8a7f0a43d | f7d48b2f00fdb399eb0fccb8756f7d177ce80c5f | /nationalDaySpider/spider.py | af21a0e2894a1600ab938c1cc061ad9c5a940015 | [] | no_license | Songlynn/DataAnalysisProjects | 55f2f1b3c8c2316c9daa5cf48ff670ea90ca1441 | f0c8d2f4889424e48164c830ec6f726af99c71af | refs/heads/master | 2020-03-30T07:36:48.054702 | 2018-10-08T05:23:51 | 2018-10-08T05:23:51 | 150,952,692 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,218 | py | import requests # 模拟请求
import re # 正则表达式
import json
class NationalDaySpider:
baseUrl = 'http://zhishu.sogou.com/index/searchHeat'
addresses = [
"布达拉宫", "稻城亚丁", "故宫", "张家界", "九寨沟", "丽江古城", "雅鲁藏布江大峡谷", "乐山大佛", "万里长城",
"宏村", "鼓浪屿", "婺源", "纳木错", "外滩", "三清山","三亚", "乌镇", "凤凰古城", "峨眉山", "青海湖", "黄山",
"洱海", "元阳梯田", "长白山天池", "周庄", "桂林", "长江三峡", "呼伦贝尔", "月牙泉", "颐和园", "黄果树瀑布",
"华山", "阿坝", "壶口瀑布", "龙脊梯田", "维多利亚港", "香格里拉", "泸沽湖", "鸟巢", "可可西里", "秦始皇兵马俑",
"西双版纳", "趵突泉", "大连", "中山陵", "大兴安岭", "大雁塔", "丹霞山", "都江堰", "贺兰山", "夫子庙", "龙虎山",
"恒山", "衡山", "黄帝陵", "黄龙景区", "晋祠", "井冈山", "喀纳斯", "海口", "楼兰古城", "景德镇", "庐山", "罗平",
"莫高窟", "帕米尔高原", "平遥古城", "普陀山", "千户苗寨", "曲阜三孔", "日月潭", "三峡大坝", "三星堆遗址",
"沙坡头", "神农架", "瘦西湖", "苏州园林", "泰山", "避暑山庄", "太湖", "滕王阁", "五大连池", "武当山", "西湖",
"阳朔西街", "西塘", "西夏王陵", "雁荡山", "殷墟", "玉龙雪山", "云冈石窟", "千岛湖", "朱家角", "北戴河",
"自贡恐龙博物馆"
]
urlList = []
# 拼接url
def set_url(self):
for index, address in enumerate(self.addresses):
# http://zhishu.sogou.com/index/searchHeat?kwdNamesStr={address1,address2...}&timePeriodType=MONTH&dataType=SEARCH_ALL&queryType=INPUT
if index % 5 == 0:
url = self.baseUrl + '?kwdNamesStr=' + address
end = 5 if index + 4 <= len(self.addresses) else len(self.addresses) - index + 1
for i in range(1, end):
url += ',' + self.addresses[index+i]
url += '&timePeriodType=MONTH&dataType=SEARCH_ALL&queryType=INPUT'
self.urlList.append(url)
def get_data(self):
try:
self.set_url()
dayData = []
monthData = []
for url in self.urlList:
print('当前地址为:' + url)
res = requests.get(url) # 使用requests模块模拟请求获取数据
data = re.findall(r'root.SG.data = (.*)}]};', res.text) # 使用正则表达式re模块获取数据
total_json = json.loads(data[0] + "}]}") # 将数据转换为json格式
print(total_json)
info_list = total_json["infoList"] # 景点的总体数据
pv_list = total_json["pvList"] # 景点每日的数据
'''
数据格式
total_json = {
'infoList': [
{
'kwdName': xxx,
'kwdSumPv': {
'sumPv': xxx
}
'avgWapPv': xxx,
'ratioWapChain': xxx,
'ratioMonth': xxx,
'ratioChain': xxx,
'avgPv': xxx,
'ratioWapMonth': xxx
},
... ...
],
'pvList': [
[
{
'pv': xxx,
'date': xxx,
'kwdId': xxx,
'id': xxx,
'isPeak': xxx,
},
... ...
], # 地点1
[], # 地点2
... ...
]
}
'''
for index, info in enumerate(info_list):
for pvDate in pv_list[index]:
dayData.append([info['kwdName'], pvDate['date'], pvDate['pv']]) # 景点名,日期,日访问量
monthData.append([info['kwdName'], info['avgPv'], info['kwdSumPv']['sumPv']]) # 景点,平均访问量,总访问量
return dayData, monthData
except:
print('exception')
# 将数据存储到txt中
def save_data(self, dayData, monthData):
f1 = open('dayData.txt', 'w+')
f1.write('景点, 日期, 日访问量\n')
for line in dayData:
f1.write(", ".join('%s' % k for k in line) + '\n')
f1.close()
f2 = open('monthData.txt', 'w+')
f2.write('景点, 平均访问量, 总访问量\n')
for line in monthData:
f2.write(", ".join('%s' % k for k in line) + '\n')
f2.close()
sp = NationalDaySpider()
dayData, monthData = sp.get_data()
sp.save_data(dayData, monthData)
| [
"slynn610131181@163.com"
] | slynn610131181@163.com |
9a349bdff55428263cf1cc45710f3ca4b16e54ac | 5de9a76cd5a587b8779d8cf5c94499fd8335e7ee | /Arquitectura con Proxy/proxy.py | b2e69d9afc1b3736cfad4e7ebe74bc0ceef888f0 | [] | no_license | Yefri97/zeromq-projects | 1396843b67c0168fc61adbb7c0db85c637031402 | 296b1a2e4fb2aee2aea53c045b10995e0b0d3dfe | refs/heads/master | 2021-01-04T22:02:48.931637 | 2020-05-05T23:06:30 | 2020-05-05T23:06:30 | 240,776,838 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,276 | py | # python proxy.py
import zmq
context = zmq.Context()
socket = context.socket(zmq.REP)
socket.bind("tcp://*:5555")
print("Proxy Iniciado", flush = True)
iterator = 0
nServers = 0
servers = list()
available = list()
registry = {}
while True:
message = socket.recv_multipart()
action = message[0].decode()
if action == 'registrar':
ip, port, capacity = (message[1], message[2], message[3])
servers.append(ip + b":" + port)
available.append(int(capacity.decode()))
socket.send_multipart([b"Servidor Registrado"])
nServers = nServers + 1
print("Servidor Registrado", flush = True)
elif action == 'guardar':
filename, part, weight = (message[1], message[2], message[3])
filename = filename.decode()
if filename not in registry:
registry[filename] = list()
registry[filename].append(part + b";" + servers[iterator])
available[iterator] -= int(weight.decode())
socket.send_multipart([servers[iterator]])
iterator = (iterator + 1) % nServers
elif action == 'obtener':
filename = message[1]
socket.send_multipart([data for data in registry[filename.decode()]])
elif action == 'listar':
socket.send_multipart([file.encode() for file in registry])
else:
socket.send_multipart([b"error"])
print("Acción no Valida")
| [
"yefri.gaitan97@gmail.com"
] | yefri.gaitan97@gmail.com |
5ca9483288faf50a5d3982a59eab82a8315ff809 | 3d6b3002dcc252eb8954ae2ea2bf1c50d9beac9a | /codebuild-signdetection.py | 94ee2998fa404a376e0fff1a854ecfe51e2d3cc9 | [] | no_license | dhirajgoyal86/AiDevOps-M3_P2 | 3d4aa6fcb91f11118b52de1cd9a18f6cf5fae349 | 804dc351c740c694ca2a99aeb2c2dc4462950cf7 | refs/heads/main | 2023-02-17T22:32:38.143135 | 2021-01-20T16:17:48 | 2021-01-20T16:17:48 | 329,303,854 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,561 | py | #!/usr/bin/env python
# coding: utf-8
# # Running on new images
# This notebook will walk you step by step through the process of using a pre-trained model to detect traffic signs in an image.
# # Imports
import warnings
warnings.filterwarnings('ignore')
import numpy as np
import os
import tensorflow as tf
from matplotlib import pyplot as plt
from PIL import Image
import glob as glob
import boto3
from io import BytesIO
#get_ipython().system(' ls')
# # Environment setup
#get_ipython().run_line_magic('matplotlib', 'inline')
import sys
# Append your Tensorflow object detection and darkflow directories to your path
#sys.path.append('/home/ec2-user/SageMaker/objectdetection/object_detection') # ~/tensorflow/models/research/object_detection
#sys.path.append('/home/ec2-user/SageMaker/SignDetection/aarcos/darkflow') # ~/darkflow
from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as vis_util
# # Tensorflow Object Detection API
# ## Model preparation
# In[4]:
# MODEL_NAME = 'faster_rcnn_inception_resnet_v2_atrous'
# MODEL_NAME = 'faster_rcnn_resnet_101'
# MODEL_NAME = 'faster_rcnn_resnet50'
# MODEL_NAME = 'faster_rcnn_inception_v2'
# MODEL_NAME = 'rfcn_resnet101'
# MODEL_NAME = 'ssd_inception_v2'
MODEL_NAME = 'ssd_mobilenet_v1'
# In[5]:
# Path to frozen detection graph. This is the actual model that is used for the traffic sign detection.
MODEL_PATH = os.path.join('models_signdetection', MODEL_NAME)
PATH_TO_CKPT = os.path.join(MODEL_PATH,'inference_graph/frozen_inference_graph.pb')
# List of the strings that is used to add correct label for each box.
PATH_TO_LABELS = os.path.join('gtsdb_data', 'gtsdb3_label_map.pbtxt')
NUM_CLASSES = 3
# In[6]:
print(MODEL_PATH)
print(PATH_TO_CKPT)
#!ls -ltr
def getImage(key_string):
s3_resource = boto3.client('s3')
# key = 'working-storage/traffic.jpg'
img_data = s3_resource.get_object(Bucket=BUCKET, Key=key_string)['Body'].read()
image = Image.open(BytesIO(img_data))
return BytesIO(img_data)
def makeS3path(key):
return 's3://' + BUCKET + '/' + key
def getList(bucket_name_string, prefix_string):
s3 = boto3.resource('s3')
my_bucket = s3.Bucket(bucket_name_string)
keys = []
for my_bucket_object in my_bucket.objects.filter(Prefix=prefix_string):
if my_bucket_object.key.endswith('jpg'):
keys.append(my_bucket_object.key)
#print(keys)
return (keys)
def copy_to_working(bucket_name, key_read, key_write):
s3r = boto3.resource('s3')
bucket = s3r.Bucket(bucket_name)
copy_source = {
'Bucket': bucket_name,
'Key': key_read
}
bucket.copy(copy_source, key_write)
def numpy_to_s3(bucket_name,key_write,image_np):
s3 = boto3.resource('s3')
from PIL import Image
import io
img = Image.fromarray(image_np).convert('RGB')
out_img = BytesIO()
img.save(out_img, format='JPEG')
out_img.seek(0)
s3.Bucket(bucket_name).put_object(Key=key_write,Body=out_img,ContentType='image/jpg')
# ## Load a (frozen) Tensorflow model into memory
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
# ## Loading label map
# Label maps map indices to category names, so that when our convolution network predicts `2`, we know that this corresponds to `mandatory`.
# In[8]:
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index (categories)
print(label_map)
# ## Helper code
# In[9]:
def load_image_into_numpy_array(image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape((im_height, im_width, 3)).astype(np.uint8)
# ## Detection
# In[10]:
# For the sake of simplicity we will use only 2 images:
# image1.jpg
# image2.jpg
# If you want to test the code with your images, just add path to the images to the TEST_IMAGE_PATHS.
PATH_TO_TEST_IMAGES_DIR = 'test_images'
TEST_IMAGE_PATHS = glob.glob(os.path.join(PATH_TO_TEST_IMAGES_DIR, '*.jpg'))
# Size, in inches, of the output images.
IMAGE_SIZE = (20, 20)
print(TEST_IMAGE_PATHS)
# In[11]:
#BUCKET = 'sagemaker-aidevops'
BUCKET = 'cop-group10'
prefix = 'working-storage/'
#TEST_IMAGE_PATHS = getList(BUCKET,prefix )
TEST_IMAGE_PATHS = ['working-storage/sample.jpg']
with detection_graph.as_default():
with tf.Session(graph=detection_graph) as sess:
for idx, image_path in enumerate(TEST_IMAGE_PATHS):
print('Index is: ', idx, image_path)
image = Image.open(getImage(image_path))
# the array based representation of the image will be used later in order to prepare the
# result image with boxes and labels on it.
image_np = load_image_into_numpy_array(image)
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image_np, axis=0)
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
scores = detection_graph.get_tensor_by_name('detection_scores:0')
classes = detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
# Actual detection.
(boxes, scores, classes, num_detections) = sess.run(
[boxes, scores, classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
# Visualization of the results of a detection.
# Code in next cell
objects = []
for index, value in enumerate(classes[0]):
object_dict = {}
if scores[0, index] > 0.5:
object_dict[(category_index.get(value)).get('name').encode('utf8')] = scores[0, index]
objects.append(object_dict)
print (objects)
#toDB(makeS3path(image_path), result(objects, image_path))
#copy_to_working(bucket_name, key_read, key_write)
#numpy_to_s3('sagemaker-aidevops','sign-detection/sample.jpg',image_np)
# Visualization of the results of a detection.
vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
category_index,
use_normalized_coordinates=True,
line_thickness=6)
numpy_to_s3('cop-group10','sign-detection/sample.jpg',image_np)
'''
plt.figure(idx, figsize=IMAGE_SIZE)
plt.axis('off')
plt.imshow(image_np)
# # Darkflow
# ## Model preparation
# In[13]:
from darkflow.net.build import TFNet
import cv2
MODEL_NAME = 'yolo_v2'
MODEL_PATH = os.path.join('models', MODEL_NAME)
options = {'model': os.path.join(MODEL_PATH, '{}.cfg'.format(MODEL_NAME)),
'labels': os.path.join(MODEL_PATH, 'labels.txt'),
'backup': MODEL_PATH,
'load' : 50500,
'threshold': 0.5,
'gpu' : 1.0}
tfnet = TFNet(options)
# ## Helper code
# In[14]:
def plot_rectangle(bbox, ax, class_name, edgecolor, confidence=None):
xmin = bbox[0]
ymin = bbox[1]
xmax = bbox[2]
ymax = bbox[3]
left = xmin
right = xmax
top = ymin
bot = ymax
ax.add_patch(
plt.Rectangle((left, top),
right-left,
bot-top, fill=False,
edgecolor=edgecolor, linewidth=3.5)
)
label = '{:s}'.format(class_name)
label_pos_y = top-10
if confidence:
label += ' {0:.2f}'.format(confidence)
label_pos_y = bot+20
ax.text(left, label_pos_y,label,
bbox=dict(facecolor=edgecolor, alpha=0.5),
fontsize=14, color='white')
# In[15]:
def get_label_id(label_name):
for category in categories:
if category['name'] == label_name:
return category['id']
# ## Detection
# In[16]:
min_score_thresh = 0.5
for image_path in TEST_IMAGE_PATHS:
fig, ax = plt.subplots(figsize=(20, 20))
image = Image.open(image_path)
image_name = os.path.basename(image_path)
width, height = image.size
ax.imshow(image)
image_np = load_image_into_numpy_array(image)
image_np = image_np[:,:,::-1] # rgb -> bgr
pred_results = tfnet.return_predict(image_np)
for idx, det in enumerate(pred_results):
score = det['confidence']
if score > min_score_thresh:
bbox = det['topleft']['x'], det['topleft']['y'], det['bottomright']['x'], det['bottomright']['y']
label = get_label_id(det['label'])
plot_rectangle(bbox,ax,category_index[label]['name'],'red', score)
plt.draw()
fig.tight_layout()
plt.axis('off')
plt.show()
# In[ ]:
'''
| [
"70766568+dhirajgoyal86@users.noreply.github.com"
] | 70766568+dhirajgoyal86@users.noreply.github.com |
28d8d71beb62cb4c16ede93725b55071389a63f4 | a35382d76e080e2eabf63b0171f4f98e54a40970 | /58174.Enzo.Ponce/RomanNumbers/Test_roman_numbers.py | bad6aae1b1129750b6b7cbaec2cf01c04597ffa7 | [] | no_license | dqmdz/2020.01 | a92be182ce4251dac4e20a16e371699a077556a4 | 2b55c88494bce177b8d6aad5337f4df5e2cc6f95 | refs/heads/master | 2022-08-02T04:53:00.173312 | 2020-05-26T00:12:34 | 2020-05-26T00:12:34 | 255,603,230 | 0 | 1 | null | 2020-04-21T13:32:32 | 2020-04-14T12:29:22 | Python | UTF-8 | Python | false | false | 277 | py | import unittest
from Roman_numbers import roman_to_decimal
class TestRomanNumbers(unittest.TestCase):
def test_I_roman_to_decimal(self):
decimal_number = roman_to_decimal('I')
self.assertEqual(decimal_number, 1)
if __name__== 'main__':
unittest.main() | [
"noreply@github.com"
] | dqmdz.noreply@github.com |
4577eaed8369402971817fc693acae6518a09f80 | bd81142f05e57b637cc0ddd63edbc3c6b5b4a0a2 | /knowledge-driven-dialogue/generative_pt/tools/conversation_client.py | 598d22bca00ebba8dd12eac1105b2e8df08d391f | [
"MIT"
] | permissive | Chriszhangmw/ChatBots | 876d751f30d1d8ea759440fe1e7d4beb6ef94087 | 0735918e326bd6ff20b70388ae199ec11d9cbc11 | refs/heads/master | 2021-12-14T04:10:53.452552 | 2021-11-28T12:23:10 | 2021-11-28T12:23:10 | 210,681,028 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,061 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
################################################################################
#
# Copyright (c) 2019 Baidu.com, Inc. All Rights Reserved
#
################################################################################
"""
File: conversation_client.py
"""
import sys
import socket
SERVER_IP = "127.0.0.1"
SERVER_PORT = 8601
def conversation_client(text):
"""
conversation_client
"""
mysocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
mysocket.connect((SERVER_IP, SERVER_PORT))
mysocket.sendall(text.encode())
result = mysocket.recv(4096).decode()
mysocket.close()
return result
def main():
"""
main
"""
if len(sys.argv) < 2:
print("Usage: " + sys.argv[0] + " eval_file")
exit()
for line in open(sys.argv[1]):
response = conversation_client(line.strip())
print(response)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print("\nExited from the program ealier!")
| [
"zhangmw_play@163.com"
] | zhangmw_play@163.com |
ef9cf22c70cc95224a2aa0d3757046683102e3b8 | 89b2431c4188ad84717caef4031118c2befdeefb | /main.py | 7f37517de1ccecadac1b8e4016972585f7384701 | [] | no_license | phiree/lkaksjer | c4730d11c43aa3c330647c69201ba5078a2191e7 | 0be4fea326d5bc82abd09410869bf66b6d36268b | refs/heads/master | 2016-09-06T03:23:11.109998 | 2014-01-18T10:44:10 | 2014-01-18T10:44:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,114 | py | #!/usr/bin/env python
#-*-coding: utf-8 -*-
import psutil,os,time,datetime
from config import *
from GetFrontWindow import *
from threading import *
from persistence import *
class GetActivityInformation(Thread):
def __init__(self):
Thread.__init__(self)
self.stopped=False
self.frenquency=frequency_readprocess
self.preProcess=''
self.totalEntry=0
self.entryList=entryList
self.myLock=myLock
def run(self):
while not self.stopped:
time.sleep(self.frenquency)
frontWindowInfo=wininfoFactory().GetFrontWindowInfo()
pname=frontWindowInfo[0]
wtext=frontWindowInfo[1]
self.lastTime=self.frenquency
self.preProcess=pname
re=RecordEntry()
re.pname=pname
re.wtext=wtext
re.lastTime=self.lastTime
self.myLock.acquire()
self.entryList.append(re)
self.totalEntry+=1
self.myLock.release()
class PersistenceThread(Thread):
'''save entry to server'''
def __init__(self):
Thread.__init__(self)
self.t=0
self.entryList=entryList
self.repeatFrenquency=frequency_savetolocal
def run(self):
while True:
myLock.acquire()
self.t=self.t+len(self.entryList)
PersistenceThread.SaveToLocal(self.entryList)
del self.entryList[:]
myLock.release()
time.sleep(self.repeatFrenquency)
def SaveToLocal(self,entryList):
CreatePersistence("sqlite").SaveToLocal(entryList)
class RecordEntry:
def __init__(self):
self.time=datetime.datetime.now()
pass
self.pname=''
self.wtext=''
self.lastTime=0
def __str__(self):
return self.pname+'--'+self.wtext+'--'+str(self.lastTime)+'--'+str(self.time)
if __name__=="__main__":
myLock=Lock()
entryList=[]
info= GetActivityInformation()
info.start()
PersistenceThread=PersistenceThread()
PersistenceThread.start()
| [
"phiree@gmail.com"
] | phiree@gmail.com |
37ff3f4c7cd06185e95a3f479f9839a436d7996d | d557dc177f77a5ab28ab44e1e542f3e44c537bfc | /软件工程/单元测试练习/p_queue/p_queue.py | 24435e69ffa5161505264565b76ae2d58519ec46 | [
"MIT"
] | permissive | swy20190/courses_in_THU | 6cf8f5520e875f311b516ae96c72a128edc453eb | d24b8edf7cb456918b309715a376418e7510e851 | refs/heads/master | 2020-12-13T23:06:12.755356 | 2020-02-27T03:47:38 | 2020-02-27T03:47:38 | 234,554,686 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,447 | py | """Heap queue algorithm (a.k.a. priority queue).
Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for
all k, counting elements from 0. For the sake of comparison,
non-existing elements are considered to be infinite. The interesting
property of a heap is that a[0] is always its smallest element.
Usage:
heap = [] # creates an empty heap
heappush(heap, item) # pushes a new item on the heap
item = heappop(heap) # pops the smallest item from the heap
item = heap[0] # smallest item on the heap without popping it
heapify(x) # transforms list into a heap, in-place, in linear time
item = heapreplace(heap, item) # pops and returns smallest item, and adds
# new item; the heap size is unchanged
Our API differs from textbook heap algorithms as follows:
- We use 0-based indexing. This makes the relationship between the
index for a node and the indexes for its children slightly less
obvious, but is more suitable since Python uses 0-based indexing.
- Our heappop() method returns the smallest item, not the largest.
These two make it possible to view the heap as a regular Python list
without surprises: heap[0] is the smallest item, and heap.sort()
maintains the heap invariant!
"""
# Original code by Kevin O'Connor, augmented by Tim Peters and Raymond Hettinger
__about__ = """Heap queues
[explanation by François Pinard]
Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for
all k, counting elements from 0. For the sake of comparison,
non-existing elements are considered to be infinite. The interesting
property of a heap is that a[0] is always its smallest element.
The strange invariant above is meant to be an efficient memory
representation for a tournament. The numbers below are `k', not a[k]:
0
1 2
3 4 5 6
7 8 9 10 11 12 13 14
15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
In the tree above, each cell `k' is topping `2*k+1' and `2*k+2'. In
a usual binary tournament we see in sports, each cell is the winner
over the two cells it tops, and we can trace the winner down the tree
to see all opponents s/he had. However, in many computer applications
of such tournaments, we do not need to trace the history of a winner.
To be more memory efficient, when a winner is promoted, we try to
replace it by something else at a lower level, and the rule becomes
that a cell and the two cells it tops contain three different items,
but the top cell "wins" over the two topped cells.
If this heap invariant is protected at all time, index 0 is clearly
the overall winner. The simplest algorithmic way to remove it and
find the "next" winner is to move some loser (let's say cell 30 in the
diagram above) into the 0 position, and then percolate this new 0 down
the tree, exchanging values, until the invariant is re-established.
This is clearly logarithmic on the total number of items in the tree.
By iterating over all items, you get an O(n ln n) sort.
A nice feature of this sort is that you can efficiently insert new
items while the sort is going on, provided that the inserted items are
not "better" than the last 0'th element you extracted. This is
especially useful in simulation contexts, where the tree holds all
incoming events, and the "win" condition means the smallest scheduled
time. When an event schedule other events for execution, they are
scheduled into the future, so they can easily go into the heap. So, a
heap is a good structure for implementing schedulers (this is what I
used for my MIDI sequencer :-).
Various structures for implementing schedulers have been extensively
studied, and heaps are good for this, as they are reasonably speedy,
the speed is almost constant, and the worst case is not much different
than the average case. However, there are other representations which
are more efficient overall, yet the worst cases might be terrible.
Heaps are also very useful in big disk sorts. You most probably all
know that a big sort implies producing "runs" (which are pre-sorted
sequences, which size is usually related to the amount of CPU memory),
followed by a merging passes for these runs, which merging is often
very cleverly organised[1]. It is very important that the initial
sort produces the longest runs possible. Tournaments are a good way
to that. If, using all the memory available to hold a tournament, you
replace and percolate items that happen to fit the current run, you'll
produce runs which are twice the size of the memory for random input,
and much better for input fuzzily ordered.
Moreover, if you output the 0'th item on disk and get an input which
may not fit in the current tournament (because the value "wins" over
the last output value), it cannot fit in the heap, so the size of the
heap decreases. The freed memory could be cleverly reused immediately
for progressively building a second heap, which grows at exactly the
same rate the first heap is melting. When the first heap completely
vanishes, you switch heaps and start a new run. Clever and quite
effective!
In a word, heaps are useful memory structures to know. I use them in
a few applications, and I think it is good to keep a `heap' module
around. :-)
--------------------
[1] The disk balancing algorithms which are current, nowadays, are
more annoying than clever, and this is a consequence of the seeking
capabilities of the disks. On devices which cannot seek, like big
tape drives, the story was quite different, and one had to be very
clever to ensure (far in advance) that each tape movement will be the
most effective possible (that is, will best participate at
"progressing" the merge). Some tapes were even able to read
backwards, and this was also used to avoid the rewinding time.
Believe me, real good tape sorts were quite spectacular to watch!
From all times, sorting has always been a Great Art! :-)
"""
__all__ = ['heappush', 'heappop', 'heapify', 'heapreplace',
'heappushpop']
def heappush(heap, item):
"""Push item onto heap, maintaining the heap invariant."""
heap.append(item)
_siftdown(heap, 0, len(heap) - 1)
def heappop(heap):
"""Pop the smallest item off the heap, maintaining the heap invariant."""
if len(heap) == 0:
return False
else:
lastelt = heap.pop() # raises appropriate IndexError if heap is empty
if len(heap) == 0:
return lastelt
else:
item, heap[0] = heap[0], lastelt
_siftup(heap, 0)
return item
def heapreplace(heap, item):
"""Pop and return the current smallest value, and add the new item.
This is more efficient than heappop() followed by heappush(), and can be
more appropriate when using a fixed-size heap. Note that the value
returned may be larger than item! That constrains reasonable uses of
this routine unless written as part of a conditional replacement:
if item > heap[0]:
item = heapreplace(heap, item)
"""
if len(heap) == 0:
return False
item, heap[0] = heap[0], item # raises appropriate IndexError if heap is empty
_siftup(heap, 0)
return item
def heappushpop(heap, item):
"""Fast version of a heappush followed by a heappop."""
if heap and heap[0] < item:
item, heap[0] = heap[0], item
_siftup(heap, 0)
return item
def heapify(x):
"""Transform list into a heap, in-place, in O(len(x)) time."""
n = len(x)
# Transform bottom-up. The largest index there's any point to looking at
# is the largest with a child index in-range, so must have 2*i + 1 < n,
# or i < (n-1)/2. If n is even = 2*j, this is (2*j-1)/2 = j-1/2 so
# j-1 is the largest, which is n//2 - 1. If n is odd = 2*j+1, this is
# (2*j+1-1)/2 = j so j-1 is the largest, and that's again n//2-1.
for i in reversed(range(n // 2)):
_siftup(x, i)
# 'heap' is a heap at all indices >= startpos, except possibly for pos. pos
# is the index of a leaf with a possibly out-of-order value. Restore the
# heap invariant.
def _siftdown(heap, startpos, pos):
newitem = heap[pos]
# Follow the path to the root, moving parents down until finding a place
# newitem fits.
while pos > startpos:
parentpos = (pos - 1) >> 1
parent = heap[parentpos]
if newitem < parent:
heap[pos] = parent
pos = parentpos
continue
break
heap[pos] = newitem
# The child indices of heap index pos are already heaps, and we want to make
# a heap at index pos too. We do this by bubbling the smaller child of
# pos up (and so on with that child's children, etc) until hitting a leaf,
# then using _siftdown to move the oddball originally at index pos into place.
#
# We *could* break out of the loop as soon as we find a pos where newitem <=
# both its children, but turns out that's not a good idea, and despite that
# many books write the algorithm that way. During a heap pop, the last array
# element is sifted in, and that tends to be large, so that comparing it
# against values starting from the root usually doesn't pay (= usually doesn't
# get us out of the loop early). See Knuth, Volume 3, where this is
# explained and quantified in an exercise.
#
# Cutting the # of comparisons is important, since these routines have no
# way to extract "the priority" from an array element, so that intelligence
# is likely to be hiding in custom comparison methods, or in array elements
# storing (priority, record) tuples. Comparisons are thus potentially
# expensive.
#
# On random arrays of length 1000, making this change cut the number of
# comparisons made by heapify() a little, and those made by exhaustive
# heappop() a lot, in accord with theory. Here are typical results from 3
# runs (3 just to demonstrate how small the variance is):
#
# Compares needed by heapify Compares needed by 1000 heappops
# -------------------------- --------------------------------
# 1837 cut to 1663 14996 cut to 8680
# 1855 cut to 1659 14966 cut to 8678
# 1847 cut to 1660 15024 cut to 8703
#
# Building the heap by using heappush() 1000 times instead required
# 2198, 2148, and 2219 compares: heapify() is more efficient, when
# you can use it.
#
# The total compares needed by list.sort() on the same lists were 8627,
# 8627, and 8632 (this should be compared to the sum of heapify() and
# heappop() compares): list.sort() is (unsurprisingly!) more efficient
# for sorting.
def _siftup(heap, pos):
endpos = len(heap)
startpos = pos
newitem = heap[pos]
# Bubble up the smaller child until hitting a leaf.
childpos = 2 * pos + 1 # leftmost child position
while childpos < endpos:
# Set childpos to index of smaller child.
rightpos = childpos + 1
if rightpos < endpos and not heap[childpos] < heap[rightpos]:
childpos = rightpos
# Move the smaller child up.
heap[pos] = heap[childpos]
pos = childpos
childpos = 2 * pos + 1
# The leaf at pos is empty now. Put newitem there, and bubble it up
# to its final resting place (by sifting its parents down).
heap[pos] = newitem
_siftdown(heap, startpos, pos)
| [
"K201410109@163.com"
] | K201410109@163.com |
7c798ad338b51e878e172a3da28ff73f4c3cf298 | 9ff764a4dd29952bc6e2756341319ff3d6021843 | /packages/coinstac-common/test/models/computation/test-command-bad-syntax.py | c8cd70020142dd34ffcc1ab81ffc48d6ceacfa9a | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | satra/coinstac | 9a3b5f48a4276d90888ae48090907fccacbe8aa9 | ddc96baa99324fd632398de1e5f59414c7e83a88 | refs/heads/master | 2020-04-05T23:28:57.469175 | 2016-06-24T03:40:33 | 2016-06-24T03:40:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 62 | py | import json;
SYNTAX ERROR
print json.dumps({ "bar": "baz" });
| [
"creed@mrn.org"
] | creed@mrn.org |
f16cb712ee4f00988d39bc640e1410e23b8615a6 | 86a2ca4c707b8b44009c32ff132658184511feea | /src/basic_practice/duongdt_Basic_006_String.py | acb055792a7069f14c8b432d134199c71523606f | [
"MIT"
] | permissive | simplesoft-duongdt3/PythonCourse | 09702c40faf476deefcf450b8e19c846b006510b | b0209a45844bf477de57080f983cad0d36220ce5 | refs/heads/main | 2023-04-17T00:28:44.941234 | 2021-04-28T11:02:01 | 2021-04-28T11:02:01 | 323,338,257 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,963 | py | def main():
for i in "Doan Thanh Duong":
print(i)
myName = "dOAN thanh duong"
print(len(myName))
print(myName.capitalize())
myName = "Sáng chói lòa cả chiến mã"
print(myName) # Sáng chói lòa cả chiến mã
print(len(myName)) # 31
#for i in myName:
# print(i)
myName = "Sáng chói lòa cả chiến mã"
print(myName) # Sáng chói lòa cả chiến mã
print(len(myName)) # 25
#for i in myName:
# print(i)
myName = "Sáng chói lòa cả chiến mã"
print("ng in myName = " + str("ng" in myName))
myName = "Sáng chói lòa cả chiến mã"
print("th not in myName = " + str("th" not in myName))
myName = "Sáng chói lòa cả chiến mã"
print("myName[0:5]=" + myName[0:5])
print("myName[:5]=" + myName[:5])
print("myName[2:]=" + myName[2:])
print("myName[-5:-2]=" + myName[-5:-2])
messageTemplate = "{} đã có mặt tại lớp học vào ngày {}"
message = messageTemplate.format("Mr. Duong", "30/11/2020")
print(message)
message1 = "{0} đã có mặt tại lớp học vào ngày {1}"
message = message1.format("Mr. Duong", "30/11/2020")
print(message)
message1 = "{1} đã có mặt tại lớp học vào ngày {0}"
message = message1.format("30/11/2020", "Mr. Duong")
print(message)
message1 = "{1} đã có mặt tại lớp học vào ngày {0}, người thông báo: {1}"
message = message1.format("30/11/2020", "Mr. Duong")
print(message)
message1 = "{name} đã có mặt vào ngày {date}, người thông báo: {name}"
message = message1.format(date = "30/11/2020", name = "Mr. Duong")
print(message)
message1 = "{name} chấm {num_students:,} bài thi vào ngày {date}, điểm trung bình {avg:.2f}"
message = message1.format(date = "30/11/2020", name = "Mr. Duong", num_students = 1097, avg = 8.7)
print(message)
if __name__ == "__main__":
main()
else:
print ("duongdt_Basic_006_String.py imported") | [
"thanhduong.doan@nhnent.com"
] | thanhduong.doan@nhnent.com |
cd7536fbdfbd4277136ae6edaee967cd1d86ab18 | 60618d48e09a140926d97b01cb9b6f76fcc65703 | /data analysis/itheima/plot.py | 6a18b4bded99c6dbf7247578045daa0392a6d27a | [] | no_license | Incipe-win/Python | ca8f36cc8785eb13512f71a3cf10149d4e1b855e | 5bab36b90591c74dedb6ead3484a279b90a1bcbd | refs/heads/master | 2021-01-07T08:11:42.293541 | 2020-12-06T09:17:02 | 2020-12-06T09:17:02 | 241,629,236 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,102 | py | from matplotlib import pyplot as plt
import random
from matplotlib import font_manager
my_font = font_manager.FontProperties(
fname="/usr/share/fonts/opentype/noto/NotoSansCJK-Bold.ttc")
# import matplotlib
#
# font = {"family": "Noto Sans Mono",
# "weight": "bold",
# "size": "larger"
# }
# matplotlib.rc("font", **font)
# x = range(2, 26, 2)
# y = [15, 13, 14.5, 17, 20, 25, 26, 26, 24, 22, 18, 15]
#
# plt.figure(num="hh", figsize=(20, 8), dpi=80)
# plt.tick_params(axis='x', colors="green")
# x_ticks = [i/2 for i in range(4, 49)]
# x_labels = ["h" + str(i) for i in range(1, 14)]
# plt.xticks(x_ticks[::3], x_labels)
# plt.yticks(range(min(y), max(y) + 1))
#
# plt.plot(x, y)
# plt.savefig("./test.svg")
# plt.show()
# y = [random.randint(20, 35) for i in range(120)]
# cnt = 10
# x = []
# for i in range(120):
# if i == 60:
# cnt += 1
# i %= 60
# s = str(i) if i >= 10 else "0" + str(i)
# x.append(str(cnt) + ":" + s)
# plt.figure(figsize=(100, 15), dpi=80)
# plt.tick_params(axis='both', colors="green")
# plt.xticks(list(range(120))[::3], labels=x[::3], rotation=45,
# fontproperties=my_font)
# plt.yticks(range(19, 36))
# plt.xlabel("时间", fontproperties=my_font)
# plt.ylabel("温度 单位(摄氏度)", fontproperties=my_font)
# plt.title("10~12点每分钟气温变化情况", fontproperties=my_font)
# plt.plot(x, y)
# plt.show()
y1 = [1, 0, 1, 1, 2, 4, 3, 2, 3, 4, 4, 5, 6, 5, 4, 3, 3, 1, 1, 1]
y2 = [1, 0, 3, 1, 2, 2, 3, 3, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1]
x = ["{}岁".format(i) for i in range(11, 31)]
plt.figure(figsize=(20, 15), dpi=80)
plt.tick_params(axis="both", colors="green")
plt.xticks(list(range(20)), labels=x, rotation=45, fontproperties=my_font)
plt.yticks(range(min(y1), max(y1)))
plt.xlabel("xx", fontproperties=my_font)
plt.ylabel("xxxx 单位(个)", fontproperties=my_font)
plt.title("xx~xxxx", fontproperties=my_font)
plt.grid(alpha=0.4)
plt.plot(x, y1, color="green", label="xx")
plt.plot(x, y2, color="blue", label="xx")
plt.legend(prop=my_font)
# plt.show()
plt.savefig("./plot.svg")
| [
"whc_9_13@163.com"
] | whc_9_13@163.com |
40d6ad7c3c49e310f10e435aee22d2aa9b19a03c | 68eb441faf3f9415fbcbc8330f9b01ad6933bede | /ebook/machinelearningdemo/MachineLearningLessonPro/ML_1/3.0loadfile_fromdata.py | 1711ef93a3ae8eea6d78e080a3ca39a2781775f4 | [] | no_license | OrriO/jupyter_myworkspace | fb8e97865f15abe2fb3aa01985fdb4f34317f15f | a592ab92f38a1cd466c454bb36fd0002c75202a9 | refs/heads/master | 2023-06-01T02:00:36.986439 | 2021-07-08T13:44:26 | 2021-07-08T13:44:26 | 381,997,768 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 784 | py | #-*- coding: utf-8 -*-
# @Time : 2018/12/6 10:08
# @Author : Z
# @Email : S
# @File : 3.0loadfile_fromdata.py
from surprise import BaselineOnly
from surprise import Dataset
from surprise import Reader
from surprise.model_selection import cross_validate
import os
# path to dataset file
file_path = os.path.expanduser('./u.data')
# As we're loading a custom dataset, we need to define a reader. In the
# movielens-100k dataset, each line has the following format:
# 'user item rating timestamp', separated by '\t' characters.
reader = Reader(line_format='user item rating timestamp', sep='\t')
data = Dataset.load_from_file(file_path, reader=reader)
# We can now use this dataset as we please, e.g. calling cross_validate
cross_validate(BaselineOnly(), data, verbose=True) | [
"guojj1@guahao.com"
] | guojj1@guahao.com |
e0316a8da231c8653503dd97ee089c353d4f5de5 | cc1345172273ee2771c5530fa8533440eb8b6ffa | /server/src/app.py | bcb6add2e4ae42280799af3b5ab18bd087b87af8 | [] | no_license | adam-weinberger/fastapi-vue-crud | d08b6a5eb205a695247bbf3fe8625fb3cfff0221 | e0645b3b26186a2f0353f1610e2d04de20b1c6fa | refs/heads/master | 2023-05-14T12:45:48.601085 | 2021-03-12T08:00:34 | 2021-03-12T08:00:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,744 | py | from typing import List
from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
from starlette.middleware.cors import CORSMiddleware
app = FastAPI()
#: Configure CORS
origins = [
"http://localhost:8080",
]
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
#: Initialize list of books
class Book(BaseModel):
title: str
author: str
read: bool
BOOKS: List[Book] = []
@app.on_event("startup")
async def startup_event():
BOOKS.clear()
BOOKS.append(Book(title="On the Road", author="Jack Kerouac", read=True))
BOOKS.append(
Book(
title="Harry Potter and the Philosopher's Stone",
author="J. K. Rowling",
read=False,
)
)
BOOKS.append(Book(title="Green Eggs and Ham", author="Dr. Seuss", read=True))
def _assert_book_id_exists(book_id: int):
if book_id < 0 or book_id > len(BOOKS):
raise HTTPException(status_code=404, detail="Book not found")
#: Describe all Pydantic Response classes
class ResponseBase(BaseModel):
status: str
code: int
messages: List[str] = []
class PongResponse(ResponseBase):
data: str = "Pong!"
class BookResponse(ResponseBase):
data: Book
class ListBooksResponse(ResponseBase):
data: List[Book]
#: Mount routes
@app.get("/")
def index():
return {
"status": "ok",
"code": 200,
"data": "Welcome, please check /docs or /redoc",
}
@app.get("/ping", response_model=PongResponse)
def return_pong():
return {"status": "ok", "code": 200}
@app.get("/books", response_model=ListBooksResponse)
def get_all_books():
return {"status": "ok", "code": 200, "data": BOOKS}
@app.post("/books", status_code=201, response_model=BookResponse)
def create_book(book: Book):
BOOKS.append(book)
return {
"status": "success",
"code": 201,
"messages": ["Book added !"],
"data": book,
}
@app.put("/books/{book_id}", response_model=BookResponse)
def edit_book(book_id: int, book: Book):
_assert_book_id_exists(book_id)
BOOKS[book_id] = book
return {
"status": "success",
"code": 200,
"messages": ["Book edited !"],
"data": book,
}
@app.delete("/books/{book_id}", response_model=BookResponse)
def remove_book(book_id: int):
_assert_book_id_exists(book_id)
removed_book = BOOKS.pop(book_id)
return {
"status": "success",
"code": 200,
"messages": ["Book removed !"],
"data": removed_book,
}
#: Start application
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=8000)
| [
"fanilo.andrianasolo@worldline.com"
] | fanilo.andrianasolo@worldline.com |
848af52212c1373f3c209261294cf0e00fd3b8da | 41f7510ba7862c312ff5600337cd41e830722a98 | /homework_4_sol.py | 81212b65541d41aab367f4601151a93e870c947f | [] | no_license | aamira97/homework | d99c70cece4b03614be1ae4f305a06d9e98226fe | 77f806b36ff8c5eda58337b81c22e2e56af31691 | refs/heads/main | 2023-08-25T12:19:29.335690 | 2021-10-19T07:20:05 | 2021-10-19T07:20:05 | 405,324,996 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,752 | py | """
Homework 4
Deadline: 23 AUG, 20:00
"""
"""
Problem 1
You have two strings. Put one in the middle of the other one.
Example: s1 = "Environment", s2 = "Earth", result should be "EnviroEarthnment"
"""
# s1 = "Environment"
# s2 = "Earth"
# fin = s1[:6] + s2 + s1[6:]
# print(fin)
"""
Problem 2
You have five strings. Create two strings, 1 containing all the beginning letters of the five strings,
and 1 containing all the ending letter of the 5 strings.
"""
# s1 = "qwerty"
# s2 = "asdfg"
# s3 = "tyu"
# s4 = "1234"
# s5 = "p"
#
# str1 = s1[0] + s2[0] + s3[0] + s4[0] + s5[0]
# str2 = s1[-1] + s2[-1] + s3[-1] + s4[-1] + s5[-1]
"""
Problem 3
Create a function that gets a name. If the length of the name is odd (կենտ) it returns the name all in upper case.
If the length of the name is even (զույգ) just return it.
"""
# def upper_func(name):
# if len(name) % 2 != 0:
# return name.upper()
#
# return name
#
# print(upper_func('Ani'))
"""
Problem 4
You have a CNN article. You want to find out how many times the words 'university', 'vaccine',
'student' (but not 'students') appear in the text.
You also want to find out how many numbers from 1 to 5 can be found in the text.
"""
article = """ (CNN)The University of Virginia has disenrolled 238 students for its fall semester on Friday for not
complying with the university's Covid-19 vaccine mandate, according to a university spokesperson.
UVA requires "all students who live, learn, or work in person at the university" to be fully vaccinated
for the upcoming 2021-2022 academic year, according to current university Covid-19 policies.
Out of the 238 incoming Fall semester students, only 49 of them were actually enrolled in classes, and the remaining
189 "may not have been planning to return to the university this fall at all," UVA spokesperson Brian Coy told CNN.
"Disenrolled means you're not eligible to take courses," Coy said.
He added that students who were enrolled at the university on Wednesday still have a week to update their status
at which point they can re-enroll.
"""
# print(article.count('university') + article.count('University'))
# print(article.count('student') - article.count('students'))
# print(article.count('vaccine'))
# print(article.count("1") + article.count("2") + article.count("3") + article.count("4") + article.count("5"))
"""
Problem 5
Find out if there is '2021-2022' string in the article and slice it.
"""
# print(article.find('2021-2022'))
# # print(article.index())
# print(article[323:332])
"""
Problem 6
Create a function that gets a string and returns the same string but the half of it UPPERCASE.
(It's okay if the string has odd number of characters and half is not the exact half)
"""
# def half_upper(str1):
# half_lenght = len(str1) // 2
# print(half_lenght)
# return str1[:half_lenght] + str1[half_lenght:].upper()
#
#
# print(half_upper("amirjanyan"))
"""
Problem 7
Write a function that takes a name and a (future) profession and returns the sentence
"I am Ani Amirjanyan and I am a backend developer.".
Use .format or f" "
"""
# def form(name, profession):
# return f"I am {name} and I am a {profession}."
#
# print(form('Ani Amirjanyan', 'backend developer'))
"""
Problem 8
Create a function that takes a 3 digit number (can't take more or less digits) and returns the reverse number.
Example: take "987" return 789. (It is okay if the result starts with 0)
"""
# def reverse(num_str):
# n1 = num_str[-1]
# n2 = num_str[1]
# n3 = num_str[0]
# n_final = n1 + n2 + n3
# return int(n_final)
#
#
# print(reverse("987"))
# # ------------------------------
#
# def reverse(number):
# num = str(number)
# return num[::-1]
#
# print(reverse(876))
| [
"naynajrima@gmail.com"
] | naynajrima@gmail.com |
2db05030ee6eaf584a4d21d85a0dde25ead6c3a9 | 5d177d94f5e19b272605afb6e47da269b0fa8ba2 | /helpers.py | 7cb3acee100097bb29a79f51929dd79069ab31c9 | [] | no_license | glebysg/NLP_final_project | 47055c388130508a95dd5856b089b4ab87cb16fd | 4ebcca6b120f1682229ec05e9f776b2ebb395abb | refs/heads/master | 2021-10-27T08:10:39.684059 | 2019-04-16T22:51:21 | 2019-04-16T22:51:21 | 114,297,896 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 780 | py | import os
import pickle
def save_object(obj, filename):
with open(filename, 'wb') as output:
pickle.dump(obj, output, pickle.HIGHEST_PROTOCOL)
def load_obj(name):
with open(name, 'rb') as f:
return pickle.load(f)
def get_dataset_dict(path, file_reader, dict_name):
full_dict_name = os.path.join(path,dict_name)
if os.path.isfile(full_dict_name):
return load_obj(full_dict_name)
else:
dataset_dict = {}
for feat_out in file_reader:
key = (int(feat_out)-1)
if key in dataset_dict:
dataset_dict[key] += 1
else:
dataset_dict[key] = 1
file_reader.close()
save_object(dataset_dict,full_dict_name)
return dataset_dict
| [
"glebysg@gmail.com"
] | glebysg@gmail.com |
b5c16ed602c6af63b7a315bb9b66fe8e4f9019c3 | f43539e064c6dd504df07861cd136cc9f121bc5e | /htmlparsing.py | 9ea8b134f6395268d323d0ef68c09c3eb6ceb863 | [] | no_license | mageshz/pythonscripts | 83e00ffb5735489a8f013fddfee693f0573eb908 | d8b694a5f34cc890fc37ebc7dbc53b8b2708e575 | refs/heads/master | 2021-01-23T06:58:44.954459 | 2017-03-28T04:26:50 | 2017-03-28T04:26:50 | 86,414,686 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 228 | py | from BeautifulSoup import BeautifulSoup
import urllib2
wiki = "https://en.wikipedia.org/wiki/List_of_state_and_union_territory_capitals_in_India"
page = urllib2.urlopen(wiki)
soup = BeautifulSoup(page)
print soup.prettify()
| [
"mageshzmoorthy777@gmail.com"
] | mageshzmoorthy777@gmail.com |
895dea6e30b2a7797a662e32966deece1ba3e57b | 32efd4deab945dabb22133e2d118458e2a61d672 | /GetpageSource/urllib2Go/GetPageSourceURLLIBRandom.py | f33ce7ed8a85130cfc9baba88054c9d8d07f7965 | [] | no_license | PyZyyong/spider | 0a5a219b68a057df756977968191829c93efe538 | d3863abfe0fa3a9ba4d51172e43267da9ec2e9d6 | refs/heads/master | 2021-07-17T06:15:14.464045 | 2017-10-23T12:38:34 | 2017-10-23T12:38:34 | 107,979,975 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,136 | py | #coding:utf-8
import urllib2
import GetpageSource.MyAgent
import GetpageSource.urllib2Go.GetPageSource
import random
class GetPageSourceURLLIBRandom(GetpageSource.urllib2Go.GetPageSource.GetPageSource):
def __init__(self,url):
GetpageSource.urllib2Go.GetPageSource.GetPageSource.__init__(self,url)
def getsource(self):
myagent= GetpageSource.MyAgent.Myagent()#创建一个代理对象 ,
myagentlist=[] #列表,保存字典内部的代理信息
for key in myagent.pcUserAgent: #循环字典
myagentlist.append(myagent.pcUserAgent[key])
randomagent= random.choice( myagentlist) #随机抓取
#print type(randomagent) 字符串类型
randomlist=randomagent.split(":")
#print randomlist 代理信息
header = {randomlist[0]: randomlist[1]}
request = urllib2.Request(self.url, headers=header) # 请求,修改,模拟http.
data = urllib2.urlopen(request).read() # 打开请求,抓取数据
return data
'''
testfirefox= GetPageSourceURLLIBRandom("http://www.baidu.com")
print testfirefox.getsource()
'''
| [
"31562013+PyZyyong@users.noreply.github.com"
] | 31562013+PyZyyong@users.noreply.github.com |
ed6019a55cbe49b15e4cbe71343c9ea879f3e984 | bd14c979335112b7718b0feda18ebf0e3b40fe5c | /arihon_biginners/review_of_binsearch.py | 3ae4f4958eb3c9ebc42c27e83fb5e6cc36c26f9e | [] | no_license | ababa831/atcoder_beginners | 22c57b15333d110126d1b1afadc0ff5e8784fc4f | 1a30882ce7f20f312045d5dc7bfaa5688cc8a88e | refs/heads/master | 2023-03-07T15:47:19.750682 | 2020-03-04T19:53:45 | 2020-03-04T19:53:45 | 143,360,607 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 413 | py | # Review of a simple bit search algorithm
D = 3
print('Number of digit', D)
combinations = []
for i in range(1 << D):
flaged = []
for j in range(D):
if (i >> j) & 1:
flaged.append(j + 1)
print('Binary {} has flags at digit {}'.format(bin(i), flaged))
combinations.append(flaged)
print('Total number of combinations ', len(combinations))
print('Combinations: ', combinations)
| [
"flvonlineconverter@gmail.com"
] | flvonlineconverter@gmail.com |
a95d45f9f346d68a02f6e9758cc7c0d36ad359d9 | 7ce3028447d060a489dda937564dea54c027e8cb | /venv/Scripts/pip3-script.py | 4a6fbc32f077d266bfb35f66590f2b742488f907 | [] | no_license | anindya-vedant/GONN | 9a03895237ac603fc11543c7ab714d117ad182ef | 8b80e6f780c2ff9c43e853ad86d6b2f93ba09554 | refs/heads/master | 2020-07-15T20:34:02.754713 | 2019-09-01T07:24:31 | 2019-09-01T07:24:31 | 205,643,349 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 396 | py | #!"D:\AIML lab\Project\venv\Scripts\python.exe"
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3')()
)
| [
"anindyavedant@gmail.com"
] | anindyavedant@gmail.com |
300db1854113aa50896708a70a8a68129dbaa76e | 935c1ecc3c32077025d66e627761771ef7920f7e | /vinod2pp4.py | eb8959169634a9fe422d0f6629b589c7407f539a | [] | no_license | vinoth12599/programer | 69645ad49b20b428eb64229f088d41e7886badf2 | b869d7e06a0e50aacb256b048569cdd13867209d | refs/heads/master | 2020-07-11T22:35:02.198565 | 2019-08-27T09:08:13 | 2019-08-27T09:08:13 | 204,658,221 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 73 | py | a="vinoth"
mylist=[2,3,4,[4,5,6,[6,8,a],4,5,6],2,1,0]
mylist.index(4)
| [
"noreply@github.com"
] | vinoth12599.noreply@github.com |
ee20ecec1b3e04391f5faaaf4adfc48effabb756 | d3f39991e5bbe455837da8d34d3f73c9d2a41eb0 | /env/Lib/site-packages/graphene/types/resolver.py | 1f395b5018c03ab4ff39ffbd1bc8ed13d32a8c07 | [
"MIT"
] | permissive | nerdyator/graphene-django-cookbook | cf6974b13b71f57248489d8e19d895e33225ba13 | e19aec7a6e910898558b16e910c7392b0b219390 | refs/heads/master | 2022-12-25T01:45:40.324096 | 2017-06-15T08:29:26 | 2017-06-15T08:29:26 | 94,413,959 | 0 | 1 | MIT | 2022-12-19T17:39:38 | 2017-06-15T07:46:21 | Python | UTF-8 | Python | false | false | 491 | py | def attr_resolver(attname, default_value, root, args, context, info):
return getattr(root, attname, default_value)
def dict_resolver(attname, default_value, root, args, context, info):
return root.get(attname, default_value)
default_resolver = attr_resolver
def set_default_resolver(resolver):
global default_resolver
assert callable(resolver), 'Received non-callable resolver.'
default_resolver = resolver
def get_default_resolver():
return default_resolver
| [
"amit.chavan90@gmail.com"
] | amit.chavan90@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.