content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
"""
Defines an AI which picks randomly
"""
import random
from quagen.ai import AI
class RandomAI(AI):
"""
Literally picks a random spot to move. More useful for benchmarking other
AIs than used against a human in a real game.
"""
def get_max_strength(self):
"""
Returns:
(int) The max strength / level of this AI
"""
return 0
def choose_move(self):
"""
Asks the AI to choose a next move given the current state of the
game and board.
Returns:
(tuple) Board coordinates in the form of (x, y)
"""
available_spots = self.get_movable_spots()
if not available_spots:
raise Exception("No available moves for AI to choose from.")
random.shuffle(available_spots)
return available_spots.pop()
| [
37811,
198,
7469,
1127,
281,
9552,
543,
11103,
15456,
198,
37811,
198,
11748,
4738,
198,
198,
6738,
627,
11286,
13,
1872,
1330,
9552,
628,
198,
4871,
14534,
20185,
7,
20185,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
48414,
... | 2.463977 | 347 |
#------------------------------------------------------------------------------
#
# Copyright (c) 2005, Enthought, Inc.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in enthought/LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
#
# Thanks for using Enthought open source!
#
# Author: David C. Morrill
# Date: 10/07/2004
#
#------------------------------------------------------------------------------
""" Defines the View class used to represent the structural content of a
Traits-based user interface.
"""
#-------------------------------------------------------------------------
# Imports:
#-------------------------------------------------------------------------
from __future__ import absolute_import
from pyface.ui_traits import Image
from traits.api import (
Any,
Bool,
Callable,
Enum,
Event,
Float,
Instance,
List,
Str,
Trait,
TraitPrefixList)
from .view_element import ViewElement, ViewSubElement
from .ui import UI
from .ui_traits import (
AButton,
AnObject,
Buttons,
DockStyle,
EditorStyle,
ExportType,
HelpId,
Image,
SequenceTypes,
ViewStatus)
from .handler import Handler, default_handler
from .group import Group
from .item import Item
from .include import Include
import six
#-------------------------------------------------------------------------
# Trait definitions:
#-------------------------------------------------------------------------
# Name of the view trait:
AnId = Str(desc='the name of the view')
# Contents of the view trait (i.e., a single Group object):
Content = Instance(Group, desc='the content of the view')
# An optional model/view factory for converting the model into a viewable
# 'model_view' object
AModelView = Callable(desc='the factory function for converting a model '
'into a model/view object')
# Reference to a Handler object trait:
AHandler = Any(desc='the handler for the view')
# Dialog window title trait:
ATitle = Str(desc='the window title for the view')
# User interface 'kind' trait. The values have the following meanings:
#
# * 'panel': An embeddable panel. This type of window is intended to be used as
# part of a larger interface.
# * 'subpanel': An embeddable panel that does not display command buttons,
# even if the View specifies them.
# * 'modal': A modal dialog box that operates on a clone of the object until
# the user commits the change.
# * 'nonmodal': A nonmodal dialog box that operates on a clone of the object
# until the user commits the change
# * 'live': A nonmodal dialog box that immediately updates the object.
# * 'livemodal': A modal dialog box that immediately updates the object.
# * 'popup': A temporary, frameless popup dialog that immediately updates the
# object and is active only while the mouse pointer is in the dialog.
# * 'info': A temporary, frameless popup dialog that immediately updates the
# object and is active only while the dialog is still over the invoking
# control.
# * 'wizard': A wizard modal dialog box. A wizard contains a sequence of
# pages, which can be accessed by clicking **Next** and **Back** buttons.
# Changes to attribute values are applied only when the user clicks the
# **Finish** button on the last page.
AKind = Trait('live', TraitPrefixList(
'panel', 'subpanel', 'modal', 'nonmodal', 'livemodal',
'live', 'popup', 'popover', 'info', 'wizard'),
desc='the kind of view window to create',
cols=4)
# Apply changes handler:
OnApply = Callable(desc='the routine to call when modal changes are applied '
'or reverted')
# Is the dialog window resizable?
IsResizable = Bool(False, desc='whether dialog can be resized or not')
# Is the view scrollable?
IsScrollable = Bool(False, desc='whether view should be scrollable or not')
# The valid categories of imported elements that can be dragged into the view:
ImportTypes = List(Str, desc='the categories of elements that can be '
'dragged into the view')
# The view position and size traits:
Width = Float(-1E6, desc='the width of the view window')
Height = Float(-1E6, desc='the height of the view window')
XCoordinate = Float(-1E6, desc='the x coordinate of the view window')
YCoordinate = Float(-1E6, desc='the y coordinate of the view window')
# The result that should be returned if the user clicks the window or dialog
# close button or icon
CloseResult = Enum(None, True, False,
desc='the result to return when the user clicks the '
'window or dialog close button or icon')
# The KeyBindings trait:
AKeyBindings = Instance('traitsui.key_bindings.KeyBindings',
desc='the global key bindings for the view')
#-------------------------------------------------------------------------
# 'View' class:
#-------------------------------------------------------------------------
class View(ViewElement):
""" A Traits-based user interface for one or more objects.
The attributes of the View object determine the contents and layout of
an attribute-editing window. A View object contains a set of Group,
Item, and Include objects. A View object can be an attribute of an
object derived from HasTraits, or it can be a standalone object.
"""
#-------------------------------------------------------------------------
# Trait definitions:
#-------------------------------------------------------------------------
# A unique identifier for the view:
id = AnId
# The top-level Group object for the view:
content = Content
# The menu bar for the view. Usually requires a custom **handler**:
menubar = Any # Instance( pyface.action.MenuBarManager )
# The toolbar for the view. Usually requires a custom **handler**:
toolbar = Any # Instance( pyface.action.ToolBarManager )
# Status bar items to add to the view's status bar. The value can be:
#
# - **None**: No status bar for the view (the default).
# - string: Same as [ StatusItem( name = string ) ].
# - StatusItem: Same as [ StatusItem ].
# - [ [StatusItem|string], ... ]: Create a status bar with one field for
# each StatusItem in the list (or tuple). The status bar fields are
# defined from left to right in the order specified. A string value is
# converted to: StatusItem( name = string ):
statusbar = ViewStatus
# List of button actions to add to the view. The **traitsui.menu**
# module defines standard buttons, such as **OKButton**, and standard sets
# of buttons, such as **ModalButtons**, which can be used to define a value
# for this attribute. This value can also be a list of button name strings,
# such as ``['OK', 'Cancel', 'Help']``. If set to the empty list, the
# view contains a default set of buttons (equivalent to **LiveButtons**:
# Undo/Redo, Revert, OK, Cancel, Help). To suppress buttons in the view,
# use the **NoButtons** variable, defined in **traitsui.menu**.
buttons = Buttons
# The default button to activate when Enter is pressed. If not specified,
# pressing Enter will not activate any button.
default_button = AButton
# The set of global key bindings for the view. Each time a key is pressed
# while the view has keyboard focus, the key is checked to see if it is one
# of the keys recognized by the KeyBindings object. If it is, the matching
# KeyBinding's method name is checked to see if it is defined on any of the
# object's in the view's context. If it is, the method is invoked. If the
# result of the method is **False**, then the search continues with the
# next object in the context. If any invoked method returns a non-False
# value, processing stops and the key is marked as having been handled. If
# all invoked methods return **False**, or no matching KeyBinding object is
# found, the key is processed normally. If the view has a non-empty *id*
# trait, the contents of the **KeyBindings** object will be saved as part
# of the view's persistent data:
key_bindings = AKeyBindings
# The Handler object that provides GUI logic for handling events in the
# window. Set this attribute only if you are using a custom Handler. If
# not set, the default Traits UI Handler is used.
handler = AHandler
# The factory function for converting a model into a model/view object:
model_view = AModelView
# Title for the view, displayed in the title bar when the view appears as a
# secondary window (i.e., dialog or wizard). If not specified, "Edit
# properties" is used as the title.
title = ATitle
# The name of the icon to display in the dialog window title bar:
icon = Image
# The kind of user interface to create:
kind = AKind
# The default object being edited:
object = AnObject
# The default editor style of elements in the view:
style = EditorStyle
# The default docking style to use for sub-groups of the view. The following
# values are possible:
#
# * 'fixed': No rearrangement of sub-groups is allowed.
# * 'horizontal': Moveable elements have a visual "handle" to the left by
# which the element can be dragged.
# * 'vertical': Moveable elements have a visual "handle" above them by
# which the element can be dragged.
# * 'tabbed': Moveable elements appear as tabbed pages, which can be
# arranged within the window or "stacked" so that only one appears at
# at a time.
dock = DockStyle
# The image to display on notebook tabs:
image = Image
# Called when modal changes are applied or reverted:
on_apply = OnApply
# Can the user resize the window?
resizable = IsResizable
# Can the user scroll the view? If set to True, window-level scroll bars
# appear whenever the window is too small to show all of its contents at
# one time. If set to False, the window does not scroll, but individual
# widgets might still contain scroll bars.
scrollable = IsScrollable
# The category of exported elements:
export = ExportType
# The valid categories of imported elements:
imports = ImportTypes
# External help context identifier, which can be used by a custom help
# handler. This attribute is ignored by the default help handler.
help_id = HelpId
# Requested x-coordinate (horizontal position) for the view window. This
# attribute can be specified in the following ways:
#
# * A positive integer: indicates the number of pixels from the left edge
# of the screen to the left edge of the window.
# * A negative integer: indicates the number of pixels from the right edge
# of the screen to the right edge of the window.
# * A floating point value between 0 and 1: indicates the fraction of the
# total screen width between the left edge of the screen and the left edge
# of the window.
# * A floating point value between -1 and 0: indicates the fraction of the
# total screen width between the right edge of the screen and the right
# edge of the window.
x = XCoordinate
# Requested y-coordinate (vertical position) for the view window. This
# attribute behaves exactly like the **x** attribute, except that its value
# indicates the position of the top or bottom of the view window relative
# to the top or bottom of the screen.
y = YCoordinate
# Requested width for the view window, as an (integer) number of pixels, or
# as a (floating point) fraction of the screen width.
width = Width
# Requested height for the view window, as an (integer) number of pixels, or
# as a (floating point) fraction of the screen height.
height = Height
# Class of dropped objects that can be added:
drop_class = Any
# Event when the view has been updated:
updated = Event
# What result should be returned if the user clicks the window or dialog
# close button or icon?
close_result = CloseResult
# Note: Group objects delegate their 'object' and 'style' traits to the
# View
#-- Deprecated Traits (DO NOT USE) ---------------------------------------
ok = Bool(False)
cancel = Bool(False)
undo = Bool(False)
redo = Bool(False)
apply = Bool(False)
revert = Bool(False)
help = Bool(False)
#-------------------------------------------------------------------------
# Initializes the object:
#-------------------------------------------------------------------------
def __init__(self, *values, **traits):
""" Initializes the object.
"""
ViewElement.__init__(self, **traits)
self.set_content(*values)
#-------------------------------------------------------------------------
# Sets the content of a view:
#-------------------------------------------------------------------------
def set_content(self, *values):
""" Sets the content of a view.
"""
content = []
accum = []
for value in values:
if isinstance(value, ViewSubElement):
content.append(value)
elif type(value) in SequenceTypes:
content.append(Group(*value))
elif (isinstance(value, six.string_types) and
(value[:1] == '<') and (value[-1:] == '>')):
# Convert string to an Include value:
content.append(Include(value[1:-1].strip()))
else:
content.append(Item(value))
# If there are any 'Item' objects in the content, wrap the content in a
# Group:
for item in content:
if isinstance(item, Item):
content = [Group(*content)]
break
# Wrap all of the content up into a Group and save it as our content:
self.content = Group(container=self, *content)
#-------------------------------------------------------------------------
# Creates a UI user interface object:
#-------------------------------------------------------------------------
def ui(self, context, parent=None, kind=None,
view_elements=None, handler=None,
id='', scrollable=None,
args=None):
""" Creates a **UI** object, which generates the actual GUI window or
panel from a set of view elements.
Parameters
----------
context : object or dictionary
A single object or a dictionary of string/object pairs, whose trait
attributes are to be edited. If not specified, the current object is
used.
parent : window component
The window parent of the View object's window
kind : string
The kind of window to create. See the **AKind** trait for details.
If *kind* is unspecified or None, the **kind** attribute of the
View object is used.
view_elements : ViewElements object
The set of Group, Item, and Include objects contained in the view.
Do not use this parameter when calling this method directly.
handler : Handler object
A handler object used for event handling in the dialog box. If
None, the default handler for Traits UI is used.
id : string
A unique ID for persisting preferences about this user interface,
such as size and position. If not specified, no user preferences
are saved.
scrollable : Boolean
Indicates whether the dialog box should be scrollable. When set to
True, scroll bars appear on the dialog box if it is not large enough
to display all of the items in the view at one time.
"""
handler = handler or self.handler or default_handler()
if not isinstance(handler, Handler):
handler = handler()
if args is not None:
handler.trait_set(**args)
if not isinstance(context, dict):
context = context.trait_context()
context.setdefault('handler', handler)
handler = context['handler']
if self.model_view is not None:
context['object'] = self.model_view(context['object'])
self_id = self.id
if self_id != '':
if id != '':
id = '%s:%s' % (self_id, id)
else:
id = self_id
if scrollable is None:
scrollable = self.scrollable
ui = UI(view=self,
context=context,
handler=handler,
view_elements=view_elements,
title=self.title,
id=id,
scrollable=scrollable)
if kind is None:
kind = self.kind
ui.ui(parent, kind)
return ui
#-------------------------------------------------------------------------
# Replaces any items which have an 'id' with an Include object with the
# same 'id', and puts the object with the 'id' into the specified
# ViewElements object:
#-------------------------------------------------------------------------
def replace_include(self, view_elements):
""" Replaces any items that have an ID with an Include object with
the same ID, and puts the object with the ID into the specified
ViewElements object.
"""
if self.content is not None:
self.content.replace_include(view_elements)
#-------------------------------------------------------------------------
# Returns a 'pretty print' version of the View:
#-------------------------------------------------------------------------
def __repr__(self):
""" Returns a "pretty print" version of the View.
"""
if self.content is None:
return '()'
return "( %s )" % ', '.join(
[item.__repr__() for item in self.content.content])
| [
2,
10097,
26171,
198,
2,
198,
2,
220,
15069,
357,
66,
8,
5075,
11,
2039,
28895,
11,
3457,
13,
198,
2,
220,
1439,
2489,
10395,
13,
198,
2,
198,
2,
220,
770,
3788,
318,
2810,
1231,
18215,
739,
262,
2846,
286,
262,
347,
10305,
198,... | 3.104488 | 5,905 |
import picamera
import time
import os
import json
import pygame
import RPi.GPIO as GPIO
from PIL import Image, ImageDraw, ImageFont
import traceback
prev_button_status = False
time_pressed = 0
taking_pictures = False
button_pin = 17
num_pictures = 1
first_delay = 5
following_delay = 5
screen_width = 1920
screen_height = 1080
overlay_renderer = None
text_color = None
text_font = None
text_size = None
messages = []
with picamera.PiCamera() as camera:
try:
pygame.mixer.init()
setup_directories()
import_settings()
camera.start_preview()
# the screen is likely not the size of the display, so crop it to fit
camera.preview.crop = (320, 420, screen_width, screen_height)
#camera.preview.fullscreen = True
#camera.preview.window = (320,420,screen_width,screen_height)
add_preview_overlay('Press red button to begin!')
while True:
inputState = GPIO.input(button_pin)
if inputState:
on_button_down()
else:
on_button_up()
except BaseException as err:
print('Error: {}'.format(err))
finally:
clean_up()
| [
11748,
8301,
18144,
198,
11748,
640,
198,
11748,
28686,
198,
11748,
33918,
198,
11748,
12972,
6057,
198,
11748,
25812,
72,
13,
16960,
9399,
355,
50143,
198,
6738,
350,
4146,
1330,
7412,
11,
7412,
25302,
11,
7412,
23252,
198,
11748,
12854,... | 2.42623 | 488 |
# Exercício Python 01: Faça um programa que leia a largura e a altura de uma parede em metros, calcule a sua área e a quantidade de tinta necessária para pintá-la, sabendo que cada litro de tinta pinta uma área de 2 metros quadrados.
l = float(input('Qual a largura da parede: '))
a = float(input('Qual a altura da parede: '))
area = l * a
qtd_tinta = area / 2
print(f'A parede tem uma área de {area}m² e irá precisar de {qtd_tinta} Litros de tinta.') | [
2,
1475,
2798,
8836,
66,
952,
11361,
5534,
25,
18350,
50041,
23781,
1430,
64,
8358,
443,
544,
257,
2552,
5330,
304,
257,
5988,
5330,
390,
334,
2611,
279,
1144,
68,
795,
1138,
4951,
11,
2386,
23172,
257,
424,
64,
6184,
94,
21468,
304... | 2.489011 | 182 |
"""
Convert results files.
"""
import os
import sys
import pickle
import yaml
if __name__ == "__main__":
output_path = sys.argv[1]
joint_config_path = sys.argv[2]
result_paths = sys.argv[3:]
with open(joint_config_path, "r") as fp:
config = yaml.load(fp)
output = dict(config=config)
for i, result_path in enumerate(result_paths):
model_name = os.path.basename(result_path).split(".")[0]
with open(result_path, "rb") as fp:
results = pickle.load(fp)
output[model_name] = [
results["results"],
results["gp_parameters"],
results["gp_predictions"]
]
with open(output_path, "wb") as fp:
pickle.dump(output, fp)
| [
37811,
198,
3103,
1851,
2482,
3696,
13,
198,
37811,
628,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
2298,
293,
198,
11748,
331,
43695,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
628,
220,
220,
220,
5072,
... | 2.139205 | 352 |
# coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class CdmCreateClusterReqCluster:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'schedule_boot_time': 'str',
'is_schedule_boot_off': 'bool',
'instances': 'list[Instance]',
'datastore': 'Datastore',
'schedule_off_time': 'str',
'vpc_id': 'str',
'name': 'str',
'sys_tags': 'list[SysTags]',
'is_auto_off': 'bool'
}
attribute_map = {
'schedule_boot_time': 'scheduleBootTime',
'is_schedule_boot_off': 'isScheduleBootOff',
'instances': 'instances',
'datastore': 'datastore',
'schedule_off_time': 'scheduleOffTime',
'vpc_id': 'vpcId',
'name': 'name',
'sys_tags': 'sys_tags',
'is_auto_off': 'isAutoOff'
}
def __init__(self, schedule_boot_time=None, is_schedule_boot_off=None, instances=None, datastore=None, schedule_off_time=None, vpc_id=None, name=None, sys_tags=None, is_auto_off=None):
"""CdmCreateClusterReqCluster - a model defined in huaweicloud sdk"""
self._schedule_boot_time = None
self._is_schedule_boot_off = None
self._instances = None
self._datastore = None
self._schedule_off_time = None
self._vpc_id = None
self._name = None
self._sys_tags = None
self._is_auto_off = None
self.discriminator = None
if schedule_boot_time is not None:
self.schedule_boot_time = schedule_boot_time
if is_schedule_boot_off is not None:
self.is_schedule_boot_off = is_schedule_boot_off
if instances is not None:
self.instances = instances
if datastore is not None:
self.datastore = datastore
if schedule_off_time is not None:
self.schedule_off_time = schedule_off_time
if vpc_id is not None:
self.vpc_id = vpc_id
if name is not None:
self.name = name
if sys_tags is not None:
self.sys_tags = sys_tags
if is_auto_off is not None:
self.is_auto_off = is_auto_off
@property
def schedule_boot_time(self):
"""Gets the schedule_boot_time of this CdmCreateClusterReqCluster.
定时开机的时间,CDM集群会在每天这个时间开机
:return: The schedule_boot_time of this CdmCreateClusterReqCluster.
:rtype: str
"""
return self._schedule_boot_time
@schedule_boot_time.setter
def schedule_boot_time(self, schedule_boot_time):
"""Sets the schedule_boot_time of this CdmCreateClusterReqCluster.
定时开机的时间,CDM集群会在每天这个时间开机
:param schedule_boot_time: The schedule_boot_time of this CdmCreateClusterReqCluster.
:type: str
"""
self._schedule_boot_time = schedule_boot_time
@property
def is_schedule_boot_off(self):
"""Gets the is_schedule_boot_off of this CdmCreateClusterReqCluster.
选择是否启用定时开关机功能。定时开关机功能和自动关机功能不可同时开启
:return: The is_schedule_boot_off of this CdmCreateClusterReqCluster.
:rtype: bool
"""
return self._is_schedule_boot_off
@is_schedule_boot_off.setter
def is_schedule_boot_off(self, is_schedule_boot_off):
"""Sets the is_schedule_boot_off of this CdmCreateClusterReqCluster.
选择是否启用定时开关机功能。定时开关机功能和自动关机功能不可同时开启
:param is_schedule_boot_off: The is_schedule_boot_off of this CdmCreateClusterReqCluster.
:type: bool
"""
self._is_schedule_boot_off = is_schedule_boot_off
@property
def instances(self):
"""Gets the instances of this CdmCreateClusterReqCluster.
节点列表,请参见instances参数说明
:return: The instances of this CdmCreateClusterReqCluster.
:rtype: list[Instance]
"""
return self._instances
@instances.setter
def instances(self, instances):
"""Sets the instances of this CdmCreateClusterReqCluster.
节点列表,请参见instances参数说明
:param instances: The instances of this CdmCreateClusterReqCluster.
:type: list[Instance]
"""
self._instances = instances
@property
def datastore(self):
"""Gets the datastore of this CdmCreateClusterReqCluster.
:return: The datastore of this CdmCreateClusterReqCluster.
:rtype: Datastore
"""
return self._datastore
@datastore.setter
def datastore(self, datastore):
"""Sets the datastore of this CdmCreateClusterReqCluster.
:param datastore: The datastore of this CdmCreateClusterReqCluster.
:type: Datastore
"""
self._datastore = datastore
@property
def schedule_off_time(self):
"""Gets the schedule_off_time of this CdmCreateClusterReqCluster.
定时关机的时间,定时关机时系统不会等待未完成的作业执行完成
:return: The schedule_off_time of this CdmCreateClusterReqCluster.
:rtype: str
"""
return self._schedule_off_time
@schedule_off_time.setter
def schedule_off_time(self, schedule_off_time):
"""Sets the schedule_off_time of this CdmCreateClusterReqCluster.
定时关机的时间,定时关机时系统不会等待未完成的作业执行完成
:param schedule_off_time: The schedule_off_time of this CdmCreateClusterReqCluster.
:type: str
"""
self._schedule_off_time = schedule_off_time
@property
def vpc_id(self):
"""Gets the vpc_id of this CdmCreateClusterReqCluster.
指定虚拟私有云ID,用于集群网络配置
:return: The vpc_id of this CdmCreateClusterReqCluster.
:rtype: str
"""
return self._vpc_id
@vpc_id.setter
def vpc_id(self, vpc_id):
"""Sets the vpc_id of this CdmCreateClusterReqCluster.
指定虚拟私有云ID,用于集群网络配置
:param vpc_id: The vpc_id of this CdmCreateClusterReqCluster.
:type: str
"""
self._vpc_id = vpc_id
@property
def name(self):
"""Gets the name of this CdmCreateClusterReqCluster.
集群名称
:return: The name of this CdmCreateClusterReqCluster.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this CdmCreateClusterReqCluster.
集群名称
:param name: The name of this CdmCreateClusterReqCluster.
:type: str
"""
self._name = name
@property
def sys_tags(self):
"""Gets the sys_tags of this CdmCreateClusterReqCluster.
企业项目信息,请参见•sys_tags参数说明
:return: The sys_tags of this CdmCreateClusterReqCluster.
:rtype: list[SysTags]
"""
return self._sys_tags
@sys_tags.setter
def sys_tags(self, sys_tags):
"""Sets the sys_tags of this CdmCreateClusterReqCluster.
企业项目信息,请参见•sys_tags参数说明
:param sys_tags: The sys_tags of this CdmCreateClusterReqCluster.
:type: list[SysTags]
"""
self._sys_tags = sys_tags
@property
def is_auto_off(self):
"""Gets the is_auto_off of this CdmCreateClusterReqCluster.
选择是否启用自动关机功能,自动关机功能和定时开关机功能不可同时开启。如果选择自动关机,则当集群中无作业运行且无定时作业时,等待15分钟后集群将自动关机来帮您节约成本
:return: The is_auto_off of this CdmCreateClusterReqCluster.
:rtype: bool
"""
return self._is_auto_off
@is_auto_off.setter
def is_auto_off(self, is_auto_off):
"""Sets the is_auto_off of this CdmCreateClusterReqCluster.
选择是否启用自动关机功能,自动关机功能和定时开关机功能不可同时开启。如果选择自动关机,则当集群中无作业运行且无定时作业时,等待15分钟后集群将自动关机来帮您节约成本
:param is_auto_off: The is_auto_off of this CdmCreateClusterReqCluster.
:type: bool
"""
self._is_auto_off = is_auto_off
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CdmCreateClusterReqCluster):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
11748,
302,
198,
11748,
2237,
628,
198,
198,
6738,
289,
84,
707,
68,
291,
75,
2778,
21282,
74,
7295,
13,
26791,
13,
4023,
62,
26791,
1330,
5336,
270,
1096,
62,
1640,
62,
46911,
1634,
628,
... | 1.821098 | 5,299 |
# -*- coding: utf-8 -*-
"""
TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-权限中心(BlueKing-IAM) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
# 目标是统一使用page_size/page参数
# WebAPI: 使用config/default.py里DEFAULT_PAGINATION_CLASS默认配置的CustomLimitOffsetPagination,后续需要前端配合一起调整为page_size/page参数
# OpenAPI:
# 对于已开放接口admin.list_groups/admin.list_group_member/mgmt.list_group/mgmt.list_group_member使用CompatiblePagination兼容limit/offset和page_size/page
# 对于OpenAPI新接口,需要ViewSet需要显示配置pagination_class=CustomPageNumberPagination
from collections import OrderedDict
from rest_framework.pagination import LimitOffsetPagination, PageNumberPagination
from rest_framework.response import Response
class CustomLimitOffsetPagination(LimitOffsetPagination):
"""该分页器继承LimitOffsetPagination后只对用于Web API返回的数据里去除previous和next参数"""
class CustomPageNumberPagination(PageNumberPagination):
"""该分页器继承PageNumberPagination后只对用于Open API返回的数据里去除previous和next参数"""
page_size_query_param = "page_size"
def _positive_int(self, integer_string, strict=False, cutoff=None):
"""
Cast a string to a strictly positive integer.
copied from https://github.com/encode/django-rest-framework/blob/master/rest_framework/pagination.py#L22
"""
ret = int(integer_string)
if ret < 0 or (ret == 0 and strict):
raise ValueError()
if cutoff:
return min(ret, cutoff)
return ret
def get_page_number(self, request, paginator=None):
"""重载:去除支持page_number='last'等用于模板渲染的表达,仅仅支持数字"""
page_number = request.query_params.get(self.page_query_param, 1)
return self._positive_int(page_number, strict=True)
def get_limit_offset_pair(self, request):
"""将page_size/page转换为limit/offset,虽然对外OpenAPI是page_size/page,但内部处理时直接获取分页时需要用到limit/offset"""
limit = self.get_page_size(request)
offset = (self.get_page_number(request) - 1) * limit
return limit, offset
class CompatiblePagination(CustomPageNumberPagination):
"""默认page_size/page分页参数,兼容limit/offset"""
limit_query_param = "limit"
offset_query_param = "offset"
def get_page_number(self, request, paginator=None):
"""
重载:去除支持page_number='last'等用于模板渲染的表达,仅仅支持数字
支持从offset计算出page_number
@parma paginator,默认None,其他值也无效
"""
page_number = request.query_params.get(self.page_query_param, 1)
# 优先使用page参数,如果不存在而offset参数存在,则使用offset推算出page
if self.page_query_param not in request.query_params and self.offset_query_param in request.query_params:
page_size = self.get_page_size(request)
offset = self._get_offset(request)
# Note: 这里默认offset可整除page_size,对于无法整除,说明之前offset/limit没有被正确用于分页
page_number = (offset // page_size) + 1
return self._positive_int(page_number, strict=True)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
24893,
1087,
14573,
15708,
318,
10607,
284,
1104,
262,
1280,
2723,
2055,
416,
1642,
5525,
241,
251,
165,
110,
116,
162,
247,
118,
12859,
239,
12,
30266,
225,... | 1.981546 | 1,734 |
# Generated by Django 3.0.11 on 2021-01-06 13:35
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
513,
13,
15,
13,
1157,
319,
33448,
12,
486,
12,
3312,
1511,
25,
2327,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.875 | 32 |
# %%
import json
from pathlib import Path
import requests
#%%
secrets_file = Path("./bing-search/secrets.json")
if secrets_file.exists() is True:
with open(secrets_file, "rt") as json_data:
secrets = json.load(json_data)
else:
secrets = {
"config_id": "xxxxxx",
"subscription_key1": "xxxxxx",
"subscription_key2": "xxxxx",
"endpoint": "https://api.cognitive.microsoft.com/bingcustomsearch/v7.0"
}
#%%
#%%
r = getRequest(secrets,'training')
print(r.json)
#%%
resp = json.loads(r.text)
# print(json.dumps(resp, indent=2, sort_keys=True))
# %%
webPages = resp['webPages']['value']
for w in webPages:
print(f"id: {'id' in w.keys()}, "
f"searchTags :{'searchTags' in w.keys()}")
#%%
for w in webPages:
print(f"url: {w['displayUrl']}")
#%%
| [
2,
43313,
198,
11748,
33918,
198,
6738,
3108,
8019,
1330,
10644,
198,
11748,
7007,
628,
198,
2,
16626,
198,
2363,
8004,
62,
7753,
796,
10644,
7,
1911,
14,
4623,
12,
12947,
14,
2363,
8004,
13,
17752,
4943,
198,
198,
361,
13141,
62,
7... | 2.334311 | 341 |
import os
import shlex
import subprocess
import threading
import select
import functools
import sublime
def _process(commands, callback=None, working_dir=None, wait_for_completion=None, **kwargs):
'''Process one or more OS commands.'''
if wait_for_completion is None:
wait_for_completion = False
# We're expecting a list of commands, so if we only have one, convert
# it to a list:
#
if isinstance(commands, str):
commands = [commands]
results = []
# Windows needs STARTF_USESHOWWINDOW in order to start the process with a
# hidden window.
#
# See:
#
# http://stackoverflow.com/questions/1016384/cross-platform-subprocess-with-hidden-window
#
startupinfo = None
if os.name == 'nt':
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
# Now we can execute each command:
#
for command in commands:
# Split the command properly, in case it has options and
# parameters:
#
command = shlex.split(command)
try:
proc = subprocess.Popen(command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=working_dir,
startupinfo=startupinfo)
# We're going to keep polling the command and either:
#
# 1. we get None to tell us that the command is still running, or;
# 2. we get a return code to indicate that the command has finished.
#
return_code = None
while return_code is None:
return_code = proc.poll()
# If there's no error then see what we got from the command:
#
if return_code is None or return_code == 0:
r, _, _ = select.select([proc.stdout], [], [])
if r:
# Process whatever output we can get:
#
output = True
while output:
output = proc.stdout.readline().decode()
# If the caller wants everything in one go, or
# there is no callback function, then batch up
# the output. Otherwise pass it back to the
# caller as it becomes available:
#
if wait_for_completion is True or callback is None:
results += output
else:
sublime.set_timeout_async(functools.partial(callback, *args, **kwargs), 0)
except subprocess.CalledProcessError as e:
sublime.set_timeout_async(functools.partial(callback, *args, **kwargs), 0)
except OSError as e:
if e.errno == 2:
sublime.message_dialog('Command not found\n\nCommand is: %s' % command)
else:
raise e
# Concatenate all of the results and return the value. If we've been
# using the callback then just make one last call with 'None' to indicate
# that we're finished:
#
result = ''.join(results)
if callback is None:
return result
if wait_for_completion is True:
sublime.set_timeout_async(functools.partial(callback, *args, **kwargs), 0)
sublime.set_timeout_async(functools.partial(callback, *args, **kwargs), 0)
| [
11748,
28686,
198,
11748,
427,
2588,
198,
11748,
850,
14681,
198,
11748,
4704,
278,
198,
11748,
2922,
198,
11748,
1257,
310,
10141,
198,
198,
11748,
41674,
198,
198,
4299,
4808,
14681,
7,
9503,
1746,
11,
23838,
28,
14202,
11,
1762,
62,
... | 2.043889 | 1,800 |
from nltk import word_tokenize, WordNetLemmatizer
from plotly.graph_objs import Scatter, Bar
from wordcloud import WordCloud
def generate_plots(df):
"""
Generate plot objected to be rendered int the dashboard:
- Bar chart to plot distribution of genre
- Bar chart to plot distribution of disaster category types
- Word cloud to plot frequency of word in message content
INPUT
df - training set, pd.DataFrame
OUTPUT
graphs - list of plotly objects, List
"""
genre_counts = df.groupby('genre').count()['message']
genre_names = list(genre_counts.index)
# melt dataframe
df1 = df.melt(id_vars=['id', 'message', 'original', 'genre'], var_name='category', value_name='active')
# Graph 2 - Distribution of category types
category_counts = df1[df1.active == 1].groupby('category').agg({'message': 'count'}) \
.reset_index().sort_values(by='message', ascending=True)
category_names = category_counts['category'].values
# Graph 3 - Wordcloud of sample of messages (Sample of 100 messages)
words = df.sample(100)['message'].apply(_tokenize).values
words = [word for word_list in words for word in word_list]
# create visuals
graphs = [
{
'data': [
Bar(
x=genre_names,
y=genre_counts
)
],
'layout': {
'title': 'Distribution of Message Genres',
'yaxis': {
'title': "Count"
},
'xaxis': {
'title': "Genre"
}
}
},
{
'data': [
Bar(
x=category_counts['message'],
y=category_names,
orientation='h'
)
],
'layout': {
'title': 'Distribution of Disaster category types',
'yaxis': {
'title': "Count"
},
'xaxis': {
'title': "Category"
},
'margin': dict(l=150, r=15, pad=10)
}
}
]
wc = _plotly_wordcloud(' '.join(words))
graphs.append(wc)
return graphs
def _tokenize(text):
"""
Tokenize words from input sentences
INPUT
text - message content, str
OUTPUT
cleaned tokens - cleaned tokens after tokenization phase, List
"""
tokens = word_tokenize(text)
lemmatizer = WordNetLemmatizer()
clean_tokens = []
for tok in tokens:
clean_tok = lemmatizer.lemmatize(tok).lower().strip()
clean_tokens.append(clean_tok)
return clean_tokens
def _plotly_wordcloud(text):
"""
Word cloud plot. Based on: https://github.com/PrashantSaikia/Wordcloud-in-Plotly
INPUT
text - message content, str
OUTPUT
chart - word cloud chart, plotly objects
"""
wc = WordCloud(max_words=200,
max_font_size=40,
min_font_size=2,
min_word_length=3)
wc.generate(text)
word_list = []
freq_list = []
fontsize_list = []
position_list = []
orientation_list = []
color_list = []
for (word, freq), fontsize, position, orientation, color in wc.layout_:
word_list.append(word)
freq_list.append(freq)
fontsize_list.append(fontsize)
position_list.append(position)
orientation_list.append(orientation)
color_list.append(color)
# get the positions
x = []
y = []
for i in position_list:
x.append(i[0])
y.append(i[1])
new_freq_list = []
for i in freq_list:
new_freq_list.append(i * 100)
new_freq_list
wc_plot_data = {
'data': [
Scatter(
x=x,
y=y,
textfont=dict(size=new_freq_list,
color=color_list),
hoverinfo='text',
hovertext=['{0}: {1}'.format(w, f) for w, f in zip(word_list, freq_list)],
mode='text',
text=word_list
)
],
'layout': {
'title': 'Message: Word cloud',
'xaxis': {'showgrid': False,
'showticklabels': False,
'zeroline': False},
'yaxis': {'showgrid': False,
'showticklabels': False,
'zeroline': False},
}
}
return wc_plot_data
| [
6738,
299,
2528,
74,
1330,
1573,
62,
30001,
1096,
11,
9678,
7934,
43,
368,
6759,
7509,
198,
6738,
7110,
306,
13,
34960,
62,
672,
8457,
1330,
1446,
1436,
11,
2409,
198,
6738,
1573,
17721,
1330,
9678,
18839,
628,
198,
4299,
7716,
62,
... | 1.936847 | 2,391 |
#!/usr/bin/env python
"分支子进程,用os.wait观察其退出状态"
import os
EXIT_STAT_INT = 0
def child():
"子进程"
global EXIT_STAT_INT
EXIT_STAT_INT += 1
print('Hello from child', os.getpid(), EXIT_STAT_INT)
os._exit(EXIT_STAT_INT)
def main():
"父进程"
while True:
new_pid_int = os.fork()
if new_pid_int == 0:
child()
else:
pid_int, status_int = os.wait()
print('Parent got', pid_int, status_int, status_int >> 8)
if input() == 'q':
break
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
1,
26344,
228,
162,
242,
107,
36310,
32573,
249,
163,
101,
233,
171,
120,
234,
18796,
101,
418,
13,
17077,
164,
100,
224,
43380,
253,
17739,
114,
34460,
222,
49035,
118,
163,
232,
35... | 1.891473 | 258 |
from abc import ABC
import numpy as np
l1 = L1()
l2 = L2()
| [
6738,
450,
66,
1330,
9738,
198,
198,
11748,
299,
32152,
355,
45941,
628,
628,
198,
198,
75,
16,
796,
406,
16,
3419,
198,
75,
17,
796,
406,
17,
3419,
198
] | 2.166667 | 30 |
import os
import re
import math
import tqdm
import requests
import zlib
from collections import namedtuple
Downloaded_file = namedtuple('Downloaded_file', 'path basename crc32')
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='''
A file downloader, which returns the full filepath, basename, and CRC32 hash for the downloaded file - if successful.
''')
parser.add_argument("URL", help="URL to be downloaded", type=str)
parser.add_argument("-d", "--dirname", help="Directory to download to. [d]", type=str, default=os.getcwd())
parser.add_argument("-cs", "--chunk_size", help="Chunksize in bytes.", type=int, default=8192)
args = parser.parse_args()
try:
download = download_file(**vars(args))
except:
print("Download not successful!")
raise
else:
print("Download successsfully finished!")
print("File saved as: {}".format(download.path))
print("CRC32: {}".format(download.crc32)) | [
11748,
28686,
198,
11748,
302,
198,
11748,
10688,
198,
11748,
256,
80,
36020,
198,
11748,
7007,
198,
11748,
1976,
8019,
198,
6738,
17268,
1330,
3706,
83,
29291,
198,
198,
10002,
276,
62,
7753,
796,
3706,
83,
29291,
10786,
10002,
276,
62... | 2.737401 | 377 |
import os
import pkgutil
import re
from collections import namedtuple
from subprocess import check_call
from typing import Dict, List, NamedTuple, Tuple
from cognite.client._api_client import APIClient, CogniteCollectionResponse, CogniteResponse
| [
11748,
28686,
198,
11748,
279,
10025,
22602,
198,
11748,
302,
198,
6738,
17268,
1330,
3706,
83,
29291,
198,
6738,
850,
14681,
1330,
2198,
62,
13345,
198,
6738,
19720,
1330,
360,
713,
11,
7343,
11,
34441,
51,
29291,
11,
309,
29291,
198,
... | 3.691176 | 68 |
#!/usr/bin/env python3
import discord
from discord.ext import commands
import json
import random
import asyncio
import subprocess
import operator
import configparser
import glob
import emoji
import datetime
import sys
import traceback
client = commands.Bot(command_prefix=("plzz ", "Plzz ", "plz ", "Plz "))
client.remove_command("help")
# custom imports
from itemindex import Item, ItemIndex
from database import Database
lastid = {}
config = configparser.ConfigParser()
config.read_file(open("config.conf", "r"))
token = config.get("config", "token")
channelid = config.get("config", "triviachannel")
triviaminwait = int(config.get("config", "triviaminwait"))
triviamaxwait = int(config.get("config", "triviamaxwait"))
index = ItemIndex("main")
enabletrubot = config.getboolean('trubot', 'enabled')
trutoken = config.get("trubot", "token")
if enabletrubot:
trubot = commands.Bot(command_prefix="trubot ")
trubot.remove_command("help")
trucooldown = False
truversion = config.get("trubot", "version")
truemoji = config.get("trubot", "emoji")
#read "database"
db = Database("userdata.db")
#read counters
with open("counters.json", "r") as f:
counters = json.load(f)
qdir = glob.glob("./questions/*.json")
questions = []
for i in qdir:
with open(i, "r") as f:
q = json.load(f)
questions.append(q)
trivia = {
"next": True,
"channel": None
}
triviamultiplier = 10
levelcost = 60
effectemoji = {
"dice": "<:dice:632295947552030741>",
"uno": "<:unoshield:720992427216863302>",
"vault": "<:vault:699266653791322172>"
}
#=================================== item defintions ===================================#
index.add(
name="Points",
emoji="<:coin:632592319245451286>",
lootboxmax=400,
lootboxweight=2000,
genaliases=False
)
async def item_lootbox(ctx):
"""
This is a loot box, in case you hadn't guessed.
Here are some drop rates i guess:
"""
embed = discord.Embed(title="Loot Box opened!", description="You got:", colour=discord.Colour(0x70a231))
embed.set_author(name=ctx.author.name, icon_url=ctx.author.avatar_url)
for i in range(3):
weights = []
for item in index.items:
weights.append(item.lootboxweight)
addthis = random.choices(index.items, weights)[0]
amount = random.randint(1, addthis.lootboxmax)
if addthis.id == 0:
embed.add_field(name="<:coin:632592319245451286>", value=f"{amount} Points", inline=True)
db.update_bal(ctx.author.id, amount)
else:
embed.add_field(name=addthis.emoji, value=f"{amount}x {addthis.name}", inline=True)
db.give_item(ctx.author.id, addthis.id, amount)
await ctx.send(embed=embed)
return True
index.add(
use=item_lootbox,
name="Loot Box",
emoji="<:lootbox:632286669592199217>",
aliases=[],
description="Some say it's gambling, so imma add it while it's legal...",
buy=500,
sell=250
)
async def item_dice(ctx, amount: int):
"""
Using this instead of gambling increases your chance of winning to 66%.
(Don't use this at a real casino, cus i think thats illegal)
*\* it has been reworked! wow*
"""
odds = [66, 34]
return await int_gamble(ctx, amount, odds)
index.add(
use=item_dice,
name="Loaded Dice",
emoji="<:dice:632295947552030741>",
sell=100,
useargs="i"
)
async def item_spambot(ctx, member):
"""
You all know that feeling when your friends are online,
but they never respond to your messages.
This item is the solution to that problem!
Using this item on another user will spam the chat with their pings.
Now available for only $19.99! *terms and conditions apply*
*\* please dont abuse :)*
"""
for i in range(4):
await ctx.send(member.mention + ": get the fuck over here")
amount = random.randint(10, 30)
await ctx.send(ctx.author.mention + f": {member.mention} was so startled they dropped {amount} <:coin:632592319245451286>")
db.update_bal(member.id, -amount)
db.update_bal(ctx.author.id, amount) # add cooldown?
return True
index.add(
use=item_spambot,
name="Spambot",
emoji="<:spambot:632466831063646221>",
aliases=["bot", "spam"],
description="Spams the crap out of your target",
lootboxmax=5,
lootboxweight=2000,
buy=100,
sell=15,
useargs="m"
)
async def item_mask(ctx, member):
"""
You filthy thief!
Stealing up to 300 points from your friends using this item is not nice.
I sure do hope they have vaults active to stop you...
*\* Elysium Corp is not responsible for any bans as the result of robbing admins*
"""
memscore = db.get_bal(member.id)
if memscore >= 300:
amount = random.randint(40, 300)
elif memscore < 50:
amount = 0
else:
amount = random.randint(40, memscore)
if amount:
memeff = db.get_eff(member.id)
if "uno" in memeff:
amount = amount * 2
db.update_bal(member.id, amount)
db.update_bal(ctx.author.id, -amount)
await ctx.send(ctx.author.mention + f": You robbed {member.mention}, but they had an uno reverse card active! you lost `{amount}` points!")
db.rem_eff(member.id, "uno")
elif "vault" in memeff:
await ctx.send(ctx.author.mention + f": You robbed {member.mention}, but they had a vault active and you lost your mask!")
db.rem_eff(member.id, "vault")
else:
db.update_bal(member.id, -amount)
db.update_bal(ctx.author.id, amount)
db.log(member.id, "steal", ctx.author.id, amount)
await ctx.send(ctx.author.mention + f": You robbed {member.mention}, you managed to get away with `{amount}` points!")
else:
await ctx.send(ctx.author.mention + f": You cant rob {member.mention}! They're way too poor, thats pathetic... *shakes head disapprovingly*")
return False
return True
index.add(
use=item_mask,
name="Robbers Mask",
emoji="<:balaclava:632658938437042212>",
aliases=[],
description="Use this to steal some points from your buddies, i'm sure they won't hate you...",
lootboxmax=1,
lootboxweight=400,
buy=400,
sell=200,
useargs="m"
)
async def item_bread(ctx):
"""
Okay seriously thats gross, its almost like a new lifeform has evolved here!
Wait you wanna eat this? Would you even survive that?!
Actually eating it seems to just remove 10 points.
But it doesn't stop you from eating it is you have no points left, that might be worth investigating...
*\* this might get really useful later on, once i get around to adding the thing*
"""
await ctx.send(ctx.author.mention + ": You ate the Moldy Bread, why the fuck would you do that? *backs away slowly*\nU got -10 <:coin:632592319245451286> cus thats just nasty")
db.update_bal(ctx.author.id, -10)
return True
index.add(
use=item_bread,
name="Moldy Bread",
emoji="<:moldybread:632921575649443909>",
aliases=[],
description="Why would you keep this?",
lootboxmax=1,
lootboxweight=100,
buy=20,
sell=5
)
async def item_fortune(ctx):
"""
"A fortune cookie is a crisp and sugary cookie usually made from flour, sugar, vanilla, and sesame seed oil with a piece of paper inside, a "fortune", on which is an aphorism, or a vague prophecy."
*(from https://en.wikipedia.org/wiki/Fortune_cookie)*
Well Wikipedia isn't wrong, but there might be more to it than that...
*\* expect more here in the future*
"""
await ctx.send(ctx.author.mention + f""": You cracked open the cookie, the little piece of paper inside says:\n```{subprocess.check_output(["/usr/games/fortune"]).decode("utf-8")}```""")
if random.randint(1, 10) == 1:
cash = random.randint(50, 300)
await ctx.send(ctx.author.mention + f""": There were also {cash} <:coin:632592319245451286> hidden inside!""")
db.update_bal(ctx.author.id, cash)
# add item drops rarely, better than lootbox?
return True
index.add(
use=item_fortune,
name="Fortune Cookie",
emoji="<:fortunecookie:633286682195525653>",
aliases=[],
description="Tells you your fortune i guess, sometimes has something hidden inside tho",
lootboxmax=10,
lootboxweight=1500,
buy=80,
sell=5
)
async def item_nuke(ctx, member):
"""
Pretty cool right, having a nuke in your pocket?
Using this is more effective than just putting on a mask, just like real life!
This is more effective at stealing points, but there's also some collateral damage.
*\* tbh, it is not that good, thats why it's on discount :/*
"""
memscore = db.get_bal(member.id)
if memscore >= 500:
amount = random.randint(0, 500)
elif memscore < 0:
amount = -random.randint(0, -memscore)
elif memscore < 500:
amount = random.randint(0, memscore)
memeff = db.get_eff(member.id)
if "uno" in memeff:
amount = amount * 2
db.update_bal(member.id, int(amount / 2))
db.update_bal(ctx.author.id, -amount)
await ctx.send(ctx.author.mention + f": You yeeted a nuke at {member.mention}, but they had an uno reverse card active! you lost `{amount}` points, and half of them were destroyed!")
db.rem_eff(member.id, "uno")
elif "vault" in memeff:
await ctx.send(ctx.author.mention + f": You yeeted a nuke at {member.mention}, but they had a vault active!")
db.rem_eff(member.id, "vault")
else:
db.update_bal(member.id, -amount)
db.update_bal(ctx.author.id, int(amount / 2))
db.log(member.id, "steal", ctx.author.id, amount)
await ctx.send(ctx.author.mention + f": You yeeted a nuke at {member.mention}, you stole `{amount}` points, but half of them were destroyed!")
return True
index.add(
use=item_nuke,
name="Nuke",
emoji="<:nuke:671718044078440448>",
aliases=[],
description="Steals points from pals but destroys half of 'em, **90% discount!**",
lootboxmax=1,
lootboxweight=200,
buy=100,
sell=50,
useargs="m"
)
async def item_nuke2(ctx, member):
"""
Fuck your friends in the ass today with the new NUKE 2: ELECTRIC BOOGALOO,
as opposed to the old nuke this one is acually better than a mask! *wow*
This is exactly the same as the normal nuke, but more destructive.
*\* where the normal nuke destroyed one city, this one destroys about 5 at least*
"""
memscore = db.get_bal(member.id)
if memscore >= 1000:
amount = random.randint(0, 1000)
elif memscore < 0:
amount = -random.randint(0, -memscore)
elif memscore < 1000:
amount = random.randint(int(memscore * 0.4), memscore)
memeff = db.get_eff(member.id)
if "uno" in memeff:
amount = amount * 2
db.update_bal(member.id, int(amount / 2))
db.update_bal(ctx.author.id, -amount)
await ctx.send(ctx.author.mention + f": You yeeted a nuke 2: electric boogaloo at {member.mention}, but they had an uno reverse card active! you lost `{amount}` points, and half of them were destroyed!")
db.rem_eff(member.id, "uno")
elif "vault" in memeff:
await ctx.send(ctx.author.mention + f": You yeeted a nuke 2: electric boogaloo at {member.mention}, but they had a vault active!")
db.rem_eff(member.id, "vault")
else:
db.update_bal(member.id, -amount)
db.update_bal(ctx.author.id, int(amount / 2))
db.log(member.id, "steal", ctx.author.id, amount)
await ctx.send(ctx.author.mention + f": You yeeted a nuke 2: electric boogaloo at {member.mention}, you stole `{amount}` points, but half of them were destroyed!")
return True
index.add(
use=item_nuke2,
name="Nuke 2: Electric Boogaloo",
emoji="<:nuke2:698057397574303784>",
aliases=["nuke2"],
description="The cooler daniel, no discount here",
lootboxmax=1,
lootboxweight=80,
buy=1000,
sell=300,
useargs="m"
)
async def item_unoshield(ctx):
"""
Okay so we're playing uno now apparently,
this acually seems pretty useful though.
The reverse shield can reverse a single rob or nuke.
After that it just disappears. like, \*poof\*, and its gone.
*\* oh yeah, and it doubles the amount robbed. so thats nice...*
"""
if "uno" in db.get_eff(ctx.author.id):
await ctx.send(ctx.author.mention + f""": You already have a reverse shield active""")
return False
else:
await ctx.send(ctx.author.mention + f""": Uno Reverse Shield activate! you are now protected from one rob/nuke""")
db.give_eff(ctx.author.id, "uno")
return True
index.add(
use=item_unoshield,
name="Reverse Shield",
emoji="<:unoshield:720992427216863302>",
aliases=["unoshield", "reverseshield", "shield"],
description="Use this to ward off those pesky thieves once and for all",
lootboxmax=1,
lootboxweight=40,
buy=1200,
sell=800,
genaliases=False
)
async def item_vault(ctx):
"""
This miraculous item seems to be able to stop an entire nuke!
It does also break from a simple robbery though...
Use a vault to protect your points from robberies and nukes.
After activating a vault it will protect your balance from a single attack!
You can have 3 vaults active at once, plus a single uno card.
*\* It's not as useful since the introduction of the lockpick...*
"""
autheff = db.get_eff(ctx.author.id)
if "vault" in autheff:
if autheff["vault"] < 3:
db.give_eff(ctx.author.id, "vault")
else:
await ctx.send(ctx.author.mention + f""": You already have 3 vaults active""")
return False
else:
db.give_eff(ctx.author.id, "vault")
await ctx.send(ctx.author.mention + f""": Used item""")
return True
index.add(
use=item_vault,
name="Vault",
emoji="<:vault:699266653791322172>",
aliases=[],
description="Protect your precious points, stops one attack each, 3 allowed active at once",
lootboxmax=1,
lootboxweight=400,
buy=200,
sell=150
)
async def item_lockpick(ctx, member):
"""
Wasting masks and nukes on removing vaults is not very nice is it?
This item can remove a single active vault from someone's balance.
Once there are no vaults left it's pretty much useless...
*\* okay so i might have made this a bit too common...*
"""
if "vault" in db.get_eff(member.id):
db.rem_eff(member.id, "vault")
await ctx.send(ctx.author.mention + f""": You cracked one of {member.mention}'s vaults!'""")
else:
await ctx.send(ctx.author.mention + f""": {member.mention} has no vaults active""")
return False
return True
index.add(
use=item_lockpick,
name="Lockpick",
emoji="<:lockpick:699275348675657788>",
aliases=[],
description="Removes a vault, nothing else",
lootboxmax=2,
lootboxweight=1000,
buy=300,
sell=150,
useargs="m"
)
async def item_rulebook(ctx, member):
"""
Wasting masks and nukes on removing ~~vaults~~ **reverse shields** is not very nice is it?
This item can remove a single active ~~vault~~ **reverse shields** from someone's balance.
Once there are no ~~vaults~~ **reverse shields** left it's pretty much useless...
*\* okay so i might have made this a bit too uncommon...*
"""
if "uno" in db.get_eff(member.id):
db.rem_eff(member.id, "uno")
await ctx.send(ctx.author.mention + f""": You annihilated {member.mention}'s reverse shield!""")
else:
await ctx.send(ctx.author.mention + f""": {member.mention} has no reverse shield active""")
return False
return True
index.add(
use=item_rulebook,
name="Uno Rulebook",
emoji="<:rulebook:718503942153044081>",
aliases=["rulebook", "rules", "unorulebook", "book", "rule"],
description="Another counter item",
lootboxmax=1,
lootboxweight=20,
buy=1400,
sell=1000,
useargs="m",
genaliases=False
)
async def item_unocard(ctx):
"""
This uno card seems different...
If this item is used within one hour of a rob/nuke the effect can be reversed, making the attacker lose points!
This is *not* limited to only the most recent rob/nuke.
*\* this seemed a bit more appropriate for the uno card*
"""
dbreturn = db.latest_log(ctx.author.id, "steal")
try:
affected, amount = dbreturn
except TypeError:
await ctx.send(ctx.author.mention + f""": You have no recent robs to reverse""")
return False
db.update_bal(affected, -amount * 2)
db.update_bal(ctx.author.id, amount * 2)
await ctx.send(ctx.author.mention + f""": You reversed {client.get_user(affected).mention}'s rob of {amount} {index.get_by_id(0).emoji}!""")
return True
index.add(
use=item_unocard,
name="Uno Reverse Card",
emoji="<:unoreverse:699194687646597130>",
aliases=["unoreverse"],
description="Reverse that shit!",
lootboxmax=1,
lootboxweight=300,
buy=500,
sell=250
)
#=================================== /item defintions ==================================#
print("connecting...")
#WIP
# @client.event
# async def on_command_error(ctx, exception):
# if hasattr(ctx.command, 'on_error'):
# return
# print('Ignoring exception in command {}:'.format(ctx.command), file=sys.stderr)
# traceback.print_exception(type(exception), exception, exception.__traceback__, file=sys.stderr)
# await ctx.send("")
# @client.event
# async def on_member_join(member):
# db.setup_user(member.id)
@client.event
@client.command(aliases=["bal", "money", "status"])
async def points(ctx, *args):
"""CAPITALISM BOYS"""
member = await getmember(ctx, args)
memeff = db.get_eff(member.id)
vaults = []
if "vault" in memeff:
vaultsnum = memeff["vault"]
else:
vaultsnum = 0
for i in range(vaultsnum):
vaults.append(effectemoji["vault"])
for i in range(3 - vaultsnum):
vaults.append("⭕")
if "uno" in memeff:
vaults.append("(" + effectemoji["uno"] + ")")
vaults = " ".join(vaults)
embed = discord.Embed(title="Status:", description=f"{db.get_bal(member.id)} <:coin:632592319245451286>\n**Active vaults:**\n{vaults}", colour=discord.Colour(0x70a231))
embed.set_author(name=member.name, icon_url=member.avatar_url)
await ctx.send(embed=embed)
@client.command(aliases=["payme", "daily"])
@client.command(aliases=["bet", "casino"])
async def gamble(ctx, amount=None):
"""Come on, have a try. You have a 50% chance to double your bet"""
if not amount:
await ctx.send("How much to gamble?")
return
try:
amount = int(amount)
except ValueError:
await ctx.send("Thats not number tho")
return
odds = [50, 50]
await int_gamble(ctx, amount, odds)
@gamble.error
@client.command(aliases=["inv", "items"])
async def inventory(ctx, *args):
"""SHOW ME WHAT YOU GOT"""
member = await getmember(ctx, args)
embed = discord.Embed(title="Inventory:", colour=discord.Colour(0x70a231))
embed.set_author(name=member.name, icon_url=member.avatar_url)
inv = db.get_inv(member.id)
if inv == []:
embed.title = "Inventory empty..."
else:
for item in inv:
itemobj = index.get_by_id(item[0])
embed.add_field(name=itemobj.emoji, value=f"""{item[1]}x {itemobj.name}""", inline=True)
await ctx.send(embed=embed)
@client.command()
@commands.is_owner()
@client.command()
@commands.is_owner()
@client.command()
@commands.is_owner()
@client.command(aliases=["info", "tellmemore"])
async def iteminfo(ctx, *, item=""):
"""U wanna know what some of this shit does?"""
if not item:
await ctx.send("What item do u wanna know about?")
return
item = index.get_by_alias(item)
if item and item.longdesc:
embed = discord.Embed(title=str(item), description=item.longdesc, colour=discord.Colour(0x70a231))
embed.set_author(name=client.user.name, icon_url=client.user.avatar_url)
if item.id == 1:
maxweight = 0
for iitem in index.items:
maxweight += iitem.lootboxweight
for iitem in index.items:
if iitem.lootboxweight:
embed.add_field(name=str(iitem), value=f"Chance: {round(iitem.lootboxweight / maxweight * 100, 2)}%", inline=True)
await ctx.send(embed=embed)
else:
await ctx.send("That item does not exist...")
@iteminfo.error
@client.command(aliases=["open", "eat"])
async def use(ctx, *args):
"""Do something with your random crap"""
authorid = ctx.author.id
if not args:
await ctx.send("Pls tell me item thx")
return
item = index.get_by_alias(args[0])
if not item or not item.use:
await ctx.send("Unknown item that")
return
if not db.has_item(authorid, item.id):
await ctx.send("You dont own that shit man")
return
if item.useargs == "m":
if len(args) == 1:
await ctx.send("Please tell me who to use this shit on aight?")
return
else:
if ctx.message.mentions:
member = ctx.message.mentions[0]
else:
member = ctx.guild.get_member_named(args[1])
if not member:
await ctx.send("Thats not person tho")
return
rmitem = await item.use(ctx, member)
elif item.useargs == "i":
if len(args) == 1:
await ctx.send("How much to gamble?") # change this later
return
try:
amount = int(args[1])
except ValueError:
await ctx.send("Thats not number tho")
return
rmitem = await item.use(ctx, amount)
else:
rmitem = await item.use(ctx)
if rmitem:
db.rem_item(authorid, item.id)
@client.command(aliases=["xp"])
async def level(ctx, *args):
"""Is this an mmorpg or somethin?"""
member = await getmember(ctx, args)
level = db.get("level", member.id)
xp = db.get("xp", member.id)
if member == ctx.author:
await ctx.send(f"""{member.mention}: Yeah boi, u r level `{level}` & ur `{xp}/{levelcost}` to the next level""")
else:
await ctx.send(f"""{member.mention}: Is level `{level}` & theyr `{xp}/{levelcost}` to the next level""")
@client.command(aliases=["richest", "leaderboard"])
async def baltop(ctx):
"""See who to rob"""
top = []
for userid in db.all_users():
top.append((userid, db.get_bal(userid)))
top = sorted(top, key=operator.itemgetter(1))[::-1]
embed = discord.Embed(title="Top 10 points:", description="━━━━━━━━━━━━━━━", colour=discord.Colour(0x70a231))
amount = 0
for user in top:
if top.index(user) < (amount + 10):
if ctx.guild.get_member(int(user[0])):
embed.add_field(name=str(top.index(user) + 1 - amount) + ". " + ctx.guild.get_member(int(user[0])).display_name, value=f"""{user[1]} <:coin:632592319245451286>""", inline=False)
else:
amount += 1
await ctx.send(embed=embed)
@client.command(aliases=["toprank"])
async def ranks(ctx):
"""See who to rob"""
top = []
for userid in db.all_users():
top.append((userid, db.get("level", userid)))
top = sorted(top, key=operator.itemgetter(1))[::-1]
embed = discord.Embed(title="Top 10 levels:", description="━━━━━━━━━━━━━━━", colour=discord.Colour(0x70a231))
amount = 0
for user in top:
if top.index(user) < (amount + 10):
if ctx.guild.get_member(int(user[0])):
embed.add_field(name=str(top.index(user) + 1 - amount) + ". " + ctx.guild.get_member(int(user[0])).display_name, value=f"""level: {user[1]}""", inline=False)
else:
amount += 1
await ctx.send(embed=embed)
@client.command(aliases=["coin"])
async def coinflip(ctx):
"""I think its pretty self-explanatory tbh"""
await ctx.send(random.choice(["Heads!", "Tails!"]))
@client.command(aliases=["choice"])
async def choose(ctx, *args):
"""I think its pretty self-explanatory tbh"""
if len(args) > 1:
await ctx.send(random.choice(args))
else:
await ctx.send("come on, gimme somethin to work with here...")
@client.command(aliases=["buy"])
async def shop(ctx, buythis=None, amount=1):
"""Yo whattup come buy some stuffs"""
if not buythis:
embed = discord.Embed(title="For sale:", colour=discord.Colour(0x70a231))
embed.set_author(name="yo whattup come buy some stuffs", icon_url=client.user.avatar_url)
for item in index.items:
if item.buy and item.description:
embed.add_field(name=f"**{str(item)}** - {item.buy} Points", value=item.description, inline=False)
await ctx.send(embed=embed)
return
if amount < 1:
await ctx.send("I should have seen this coming...")
return
item = index.get_by_alias(buythis)
if not item or not item.buy:
await ctx.send("I don't sell that")
return
if db.get_bal(ctx.author.id) >= item.buy * amount:
await ctx.send(f"{ctx.author.mention}: you bought {amount} {str(item)} for {item.buy * amount} <:coin:632592319245451286>")
db.update_bal(ctx.author.id, -(item.buy * amount))
db.give_item(ctx.author.id, item.id, amount)
else:
await ctx.send("U ain't got da cash m8")
@client.command(aliases=["sellitem"])
async def sell(ctx, sellthis, amount=1):
"""Yo whattup come buy some stuffs"""
if amount < 1:
await ctx.send("I should have seen this coming...")
return
item = index.get_by_alias(sellthis)
if not item or not item.sell:
await ctx.send("I don't buy that")
return
if not db.has_item(ctx.author.id, item.id, amount):
await ctx.send("You dont own that shit man")
return
await ctx.send(f"{ctx.author.mention}: you sold {amount} {str(item)} for {item.sell * amount} <:coin:632592319245451286>")
db.update_bal(ctx.author.id, item.sell * amount)
db.rem_item(ctx.author.id, item.id, amount)
@client.command()
#================================= item quick-commands =================================#
@client.command()
@client.command()
@client.command()
#================================= /item quick-commands ================================#
#=================================== trubot section ====================================#
@trubot.event
#================================== \trubot section ====================================#
if channelid:
bgtask = client.loop.create_task(background())
bgtask2 = client.loop.create_task(background2())
#client.run(token)
loop = asyncio.get_event_loop()
loop.create_task(client.start(token))
if enabletrubot:
loop.create_task(trubot.start(trutoken))
loop.run_forever() | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
11748,
36446,
198,
6738,
36446,
13,
2302,
1330,
9729,
198,
11748,
33918,
198,
11748,
4738,
198,
11748,
30351,
952,
198,
11748,
850,
14681,
198,
11748,
10088,
198,
11748,
4566,
48610,
... | 2.441956 | 11,164 |
# Review Questions
# Algorithm Workbench
# 1. Write a statement that creates a list with the following
# strings: 'Einstein','Newton','Copernicus', and 'Kepler'.
names = ['Einstein','Newton','Copernicus','Kepler']
print(names)
# 2. Assume names references a list. Write a for loop
# that displays each element of the list.
names = ['Einstein','Newton','Copernicus','Kepler']
for name in names:
print(name)
# 3. Assume the list 'numbers1' has 100 elements, and
# 'numbers2' is an empty list. Write code that copies
# the values in 'numbers1' to 'numbers2'.
numbers1 = [1,2,3,4,5,6,7,8,9,10]
numbers2 = []
numbers2 = numbers1
print(numbers2)
# 4. Draw a Flowchart (skip)
# 5. Write a function that accepts a list as an argument
# (assume the list contains integers) and returns the total
# of the values in the list.
# (stuck)
list()
# 6. Assume the 'names' variable references a list of strings.
# Wrtie code that determines whether 'Ruby' is in the names
# list. If it is, display the message 'Hello Ruby'. Otherwise,
# display the message 'No Ruby'.
names = ['Ninh','Andrew','Ruby']
if 'Ruby' in names:
print('Hello Ruby')
else:
print('No Ruby')
# 7. What will the following code print?
list1 = [40,50,60]
list2 = [10,20,30]
list3 = list1 + list2
print(list3)
# A. [40,50,60,10,20,30]
# 8. Assume 'list1' is a list of integers. Write a statement
# that uses a list comprehension to create a second list
# containing the squares of the elements of 'list1'.
list1 = [2,3,4,5,6]
list2 = [item**2 for item in list1]
print(list2)
# 9. Assume 'list1' is a list of integers. Write a statement
# that uses a list comprehension to create a second list
# containing the elements of 'list1' that are greater than 100.
list1 = [60,90,100,112,212]
list2 = [item for item in list1 if item > 100]
print(list2)
# 10. Assume 'list1' is a list of integers. Write a statement
# that uses a list comprehension to create a second list
# containing the elements of 'list1' that are even numbers.
# (stuck)
list1 = [60,90,100,112,212]
list2 = [item for item in list1 if list1]
print(list2)
# 11. Write a statement that creates a two-dimensional list
# with 5 rows and 3 columns. Then write nested loops that get
# an integer value from the user for each element in the list.
# (stuck)
list1 = [[1,2,3],
[4,5,6],
[7,8,9],
[10,12,13],
[14,15,16]]
for list in list1:
for element in list:
print(element)
# End | [
2,
6602,
20396,
198,
198,
2,
978,
42289,
5521,
26968,
198,
198,
2,
352,
13,
19430,
257,
2643,
326,
8075,
257,
1351,
351,
262,
1708,
198,
2,
13042,
25,
705,
36,
11962,
41707,
3791,
1122,
41707,
13379,
1142,
24552,
3256,
290,
705,
889... | 2.851852 | 864 |
# Copyright 2017 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A module to manipulate device manufacturing information.
The "Device Data" is a set of mapping data storing manufacturing information for
DUT (device under test), including peripheral information (for example if
touchscreen should be available or not), per-device provisioned data (also known
as Vital Product Data - VPD; for example serial number or shipping region), test
status (has passed SMT, FATP, or other stations).
The Device Data is shared by tests in Chrome OS Factory Software, the test
harness "Goofy" and the "Shopfloor Service API". The values usually come from
pre-defined values, shopfloor backend, or set by tests using manual selection
or barcode scanner (especially serial numbers).
Device Data can be considered as a mapping or dictionary, with following keys:
- ``serials``: A dictionary for serial numbers of device, components, and
mainboard. All serial numbers here will be logged by `testlog`, including:
- ``serial_number``: The serial number of "device" itself (printed on device
panel).
- ``mlb_serial_number``: The serial number of main logic board (mainboard).
- ``component``: A dictionary to indicate what peripherals should exist, for
example:
- ``has_touchscreen=True``: A touch screen should be available.
- ``has_dram=2``: Two DRAM components should be available.
- ``vpd``: A dict for what VPD values need to be set, including:
- ``ro``: VPD values in RO section (RO_VPD), usually including:
- ``region``: Region code as defined in http://go/cros-regions.
- ``rw``: VPD values in RW section (RW_VPD), usually including:
- ``ubind_attribute``: User registration code.
- ``gbind_attribute``: Group registration code.
- ``hwid``: A value of probed Hardware ID.
- ``factory``: A dict for manufacturing flow control, used by shopfloor
backends. See Shopfloor Service API for more details.
For example, a typical device usually has both device serial number and
main board serial number, region VPD, registration codes, thus the device data
will be set to::
{
'serials': {
'serial_number': 'SN1234567890',
'mlb_serial_number': 'MLB1234567890'
},
'vpd': {
'ro': {
'region': 'us'
},
'rw': {
'ubind_attribute': '12345',
'gbind_attribute': '54321',
}
}
}
Using Device Data
-----------------
Device Data is internally stored as Python dict inside shelves, and provided by
`cros.factory.test.state` module via RPC calls.
To get all data as single dict,
use ``GetAllDeviceData``. To get partial data, use ``GetDeviceData`` with key
names joined using dot. For example, to fetch only the ``ro`` values inside
``vpd``::
GetDeviceData('vpd.ro')
The key names are also defined in this module. All constants starting with
``KEY_`` are complete key names for ``GetDeviceData`` to use. Constants starting
with ``NAME_`` are key names (no dot) of the single dict. For example, following
calls are equivalent if ``vpd.ro.region`` exists::
GetDeviceData('vpd.ro.region')
GetDeviceData('vpd').get('ro').get('region')
GetDeviceData(KEY_VPD).get(NAME_RO).get(NAME_REGION)
GetDeviceData(KEY_VPD_RO).get(NAME_REGION)
GetDeviceData(KEY_VPD_REGION)
If ``vpd.ro`` does not exist, ``get('ro')`` will return None so you can't invoke
another ``get('region')`` on it. So using the complete key path
(``vpd.ro.region``) provides an easier way to retrieve single value without
worrying if the intermediate dictionaries exist or not.
Using Serial Number
-------------------
There are some special helpers to access serial number. ``GetSerialNumber`` and
``SetSerialNumber`` expect names of serial numbers (``NAME_*``). But as a syntax
sugar, they will also accept keys with ``KEY_SERIALS`` prefixed. For example,
following calls are equivalent::
GetSerialNumber('serial_number')
GetSerialNumber(NAME_SERIAL_NUMBER)
GetSerialNumber(KEY_SERIAL_NUMBER)
GetDeviceData(KEY_SERIAL_NUMBER)
Note when setting serial numbers (``SetSerialNumber``), a value evaluates to
false (None, false, empty string...) will **delete** the stored serial number.
API Spec
--------
"""
import collections.abc
import logging
import os
# pylint: disable=wildcard-import,unused-wildcard-import
from cros.factory.test.device_data_constants import *
from cros.factory.test import event
from cros.factory.test.rules import privacy
from cros.factory.test import state
from cros.factory.utils import config_utils
from cros.factory.utils import shelve_utils
# Helper utility for manipulating keys.
JoinKeys = shelve_utils.DictKey.Join
def _GetInstance():
"""An internal helper utility to get DEVICE_DATA from state module."""
return state.GetInstance().data_shelf[state.KEY_DEVICE_DATA]
def CheckValidDeviceDataKey(key, key_prefix=None):
"""Checks if given key is a valid device data key.
Args:
key: A string of key for device data.
key_prefix: Key must start with this token.
Raises:
KeyError if the key is not valid.
"""
prefix, dot, postfix = key.partition('.')
if key_prefix and prefix != key_prefix:
raise KeyError('Key %s must start with %s.' % (key, key_prefix))
top_level_keys = [KEY_SERIALS, KEY_HWID, KEY_VPD, KEY_COMPONENT, KEY_FACTORY]
if prefix not in top_level_keys:
raise KeyError('Key %s must start with one of %r' % (key, top_level_keys))
if prefix == KEY_SERIALS:
if '.' in postfix:
raise KeyError('Serial number name must not contain dots: %s' % postfix)
elif prefix == KEY_HWID:
if dot != '':
raise KeyError('HWID must not have sub keys: %s' % postfix)
elif prefix == KEY_VPD:
vpd_sections = [NAME_RO, NAME_RW]
section, unused_dot, name = postfix.partition('.')
if section not in vpd_sections:
raise KeyError('VPD key [%s] must be in the sections: %s' %
(key, vpd_sections))
if '.' in name:
raise KeyError('VPD entry name must not contain dots: %s' % name)
return True
def GetDeviceData(key, default=None):
"""Returns the device data associated by key.
Args:
key: A string of key to access device data.
default: The default value if key does not exist.
Returns:
Associated value if key exists in device data, otherwise the value specified
by default. Defaults to None.
"""
if not isinstance(key, str):
raise KeyError('key must be a string')
return _GetInstance()[key].Get(default)
def GetAllDeviceData():
"""Returns all device data in a single dict."""
return _GetInstance().Get({})
def GetDeviceDataSelector():
"""Returns the data shelf selector rooted at device data.
This is primarily used by invocation module to resolve TestListArgs.
"""
return _GetInstance()
def DeleteDeviceData(delete_keys, optional=False):
"""Deletes given keys from device data.
Args:
delete_keys: A list of keys (or a single string) to be deleted.
optional: False to raise a KeyError if not found.
Returns:
The updated dictionary.
"""
if isinstance(delete_keys, str):
delete_keys = [delete_keys]
logging.info('Deleting device data: %s', delete_keys)
delete_device_keys = [shelve_utils.DictKey.Join(state.KEY_DEVICE_DATA, key)
for key in delete_keys]
instance = state.GetInstance()
instance.DataShelfDeleteKeys(delete_device_keys, optional)
data = instance.DataShelfGetValue(state.KEY_DEVICE_DATA, True) or {}
logging.info('Updated device data; complete device data is now %s',
privacy.FilterDict(data))
_PostUpdateSystemInfo()
return data
def VerifyDeviceData(device_data):
"""Verifies whether all fields in the device data dictionary are valid.
Args:
device_data: A dict with key/value pairs to verify.
Raises:
`ValueError` if the device data is invalid.
"""
for key, value in device_data.items():
if key.startswith(JoinKeys(KEY_COMPONENT, 'has_')):
if value is not None and not isinstance(value, (bool, int)):
raise ValueError('Values in the "component" domain should be None or'
' in type of either `bool` or `int`.')
def UpdateDeviceData(new_device_data):
"""Updates existing device data with given new dict data.
Args:
new_device_data: A dict with key/value pairs to update. Old values
are overwritten.
Returns:
The updated dictionary.
"""
new_device_data = FlattenData(new_device_data)
logging.info('Updating device data: setting %s',
privacy.FilterDict(new_device_data))
VerifyDeviceData(new_device_data)
instance = state.GetInstance()
instance.DataShelfUpdateValue(state.KEY_DEVICE_DATA, new_device_data)
data = instance.DataShelfGetValue(state.KEY_DEVICE_DATA, True) or {}
logging.info('Updated device data; complete device data is now %s',
privacy.FilterDict(data))
_PostUpdateSystemInfo()
return data
def _GetSerialNumberKey(name):
"""Returns a full path or serial number key for Device Data API to access."""
if '.' not in name:
return JoinKeys(KEY_SERIALS, name)
CheckValidDeviceDataKey(name, KEY_SERIALS)
return name
def _GetSerialNumberName(key):
"""Returns the name part of serial number key."""
return _GetSerialNumberKey(key).partition('.')[2]
def GetAllSerialNumbers():
"""Returns all serial numbers available in device data as dict."""
return GetDeviceData(KEY_SERIALS, {})
def ClearAllSerialNumbers():
"""Clears all serial numbers stored in device data."""
DeleteDeviceData([KEY_SERIALS], optional=True)
def GetSerialNumber(name=NAME_SERIAL_NUMBER):
"""Returns a serial number (default to device serial number)."""
return GetDeviceData(_GetSerialNumberKey(name))
def SetSerialNumber(name, value):
"""Sets a serial number to give nvalue.
Args:
name: A string to indicate serial number name.
value: A string representing the serial number, or anything evaluated
as False to delete the serial number.
"""
UpdateSerialNumbers({name: value})
def UpdateSerialNumbers(dict_):
"""Updates stored serial numbers by given dict.
Args:
dict_: A mapping of serial number names and values to change.
A value evaluated as False will delete the serial number from device
data.
"""
assert isinstance(dict_, dict)
new_dict = {}
keys_to_delete = []
for key, value in dict_.items():
if value:
new_dict[_GetSerialNumberName(key)] = value
else:
keys_to_delete.append(_GetSerialNumberKey(key))
if dict_:
UpdateDeviceData({KEY_SERIALS: new_dict})
if keys_to_delete:
DeleteDeviceData(keys_to_delete, optional=True)
def FlattenData(data, parent=''):
"""An helper utility to flatten multiple layers of dict into one dict.
For example, {'a': {'b': 'c'}} => {'a.b': 'c'}
Args:
data: The dict type data to be flattened.
parent: A string to encode as key prefix for recursion.
Returns:
A flattened dict.
"""
items = []
for k, v in data.items():
new_key = JoinKeys(parent, k) if parent else k
if isinstance(v, collections.abc.Mapping):
items.extend(FlattenData(v, new_key).items())
else:
items.append((new_key, v))
return dict(items)
def LoadConfig(config_name=None):
"""Helper utility to load a JSON config that represents device data.
Args:
config_name: A string for name to be passed to config_utils.LoadConfig.
Returns:
A dictionary as device data (already flattened).
"""
return FlattenData(
config_utils.LoadConfig(config_name, schema_name='device_data'))
def UpdateDeviceDataFromVPD(key_map, vpd_data):
"""Update device data from VPD data.
Please see pytest `read_device_data_from_vpd` for more details.
For both `key_map` and `vpd_data`, they should be a dictionary, with at most
two keys: 'ro' and 'rw' (NAME_RO and NAME_RW). key_map['ro'] and
key_map['rw'] should follow the format of ro_key_map and rw_key_map in
`read_device_data_from_vpd`. If key_map is None, a default key_map will be
used.
"""
if key_map is None:
key_map = {
NAME_RO: DEFAULT_RO_VPD_KEY_MAP,
NAME_RW: DEFAULT_RW_VPD_KEY_MAP,
}
assert isinstance(key_map, dict)
assert isinstance(vpd_data, dict)
data = {}
for section in [NAME_RO, NAME_RW]:
if section not in key_map:
continue
vpd_section = vpd_data.get(section, {})
for rule in key_map[section].items():
for vpd_key in vpd_section:
if _MatchKey(rule, vpd_key):
data_key = _DeriveDeviceDataKey(rule, vpd_key)
if vpd_section[vpd_key].upper() in ['TRUE', 'FALSE']:
data[data_key] = (vpd_section[vpd_key].upper() == 'TRUE')
else:
data[data_key] = vpd_section[vpd_key]
UpdateDeviceData(data)
def _DeriveDeviceDataKey(rule, vpd_key):
"""Derive device data key from `vpd_key` according to `rule`.
This is a helper function for UpdateDeviceDataFromVPD.
Args:
rule: a tuple (<VPD key>, <device data key>), for example:
('serial_number', 'serials.serial_number'). If VPD key ends with '*',
maps all VPD starts with the prefix to device data. For example,
('foo.*', 'bar') will maps all 'foo.*' in VPD to 'bar.*' in device data.
That is, 'foo.region' will become 'bar.region'.
vpd_key: use this VPD key to derive device key.
"""
expected_key = rule[0]
if not expected_key.endswith('*'):
return rule[1]
# Remove the prefix.
vpd_key = vpd_key[len(expected_key[:-1]):]
# Pre-pend new prefix.
return JoinKeys(rule[1], vpd_key)
| [
2,
15069,
2177,
383,
18255,
1505,
7294,
46665,
13,
1439,
2489,
10395,
13,
198,
2,
5765,
286,
428,
2723,
2438,
318,
21825,
416,
257,
347,
10305,
12,
7635,
5964,
326,
460,
307,
198,
2,
1043,
287,
262,
38559,
24290,
2393,
13,
198,
198,... | 2.986894 | 4,578 |
import json
import requests
data_dir = 'data/'
countries = requests.get('https://restcountries.eu/rest/v2/all?fields=name;alpha2Code').json()
# download stats for each country
for country in countries:
code = country['alpha2Code']
print(code)
response = requests.get('https://api.wigle.net/api/v2/stats/regions?country={}'.format(code)).json()
with open('{}stats_regions_{}.json'.format(data_dir, code), 'w+') as new_file:
json.dump({ 'name': country['name'], 'code': response['country'], 'encryption': response['encryption'] }, new_file)
# download wifi count for all countries
response = requests.get('https://api.wigle.net/api/v2/stats/countries').json()
with open('{}stats_countries_wifi_count.json'.format(data_dir), 'w+') as stats_countries_file:
json.dump(response, stats_countries_file) | [
11748,
33918,
198,
11748,
7007,
198,
198,
7890,
62,
15908,
796,
705,
7890,
14,
6,
198,
9127,
1678,
796,
7007,
13,
1136,
10786,
5450,
1378,
2118,
9127,
1678,
13,
12496,
14,
2118,
14,
85,
17,
14,
439,
30,
25747,
28,
3672,
26,
26591,
... | 2.878049 | 287 |
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
from aes import AESCipher
import json
if __name__ == "__main__":
key = 'd4f7d2adf42c34a3'
iv = "5c6ca7c26b1b068d"
body_plaintext_1 = '{"status": 200, "message": "ok", "result": {"id": 4, "version": "1.1.2", "rsa": "1234567890", "patch": {"released": [], "deleted": [{"id": 40}, {"id": 42}, {"id": 43}, {"id": 48}, {"id": 67}, {"id": 68}, {"id": 69}, {"id": 72}, {"id": 73}, {"id": 74}, {"id": 75}]}}}'
body_plaintext_2 = '{"status": 200, "message": "ok"}'
body_ciphertext_1 = '{"status": 200, "message": "ok", "result": "QsTubOGmgNQrq3XMy9ALHV9umA2l8ZwKNb0HpRyzxHSZSPckMKsqcA9UeUs6P+6uQtcqZSY/Ci9ub9q0X5K6xAEb49fUIyexdbbAFkqovjn803VGL2fsreB8A4uGgrGlHcd5uooKQO1pqHh1I0xOOyhrObD80l9ixOIp84K2YJWlbu2XfyxzT5dLP9JpkgoqklLhsTEvb2vIgJoIxWs7QbVzh+frxPd/M03uhgiZtRUdrQ//Wb/H2v6q5H0df9qtwUizmF82tIjhNYRYpxMybcqHMRlxvVxVc4bcT5dHVMw="}'
body_ciphertext_2 = '{"status": 200, "message": "ok"}'
ret = encrypt_result(key, iv, body_plaintext_1)
print(ret)
encrypt_result(key, iv, body_plaintext_2)
ret = decrypt_result(key, iv, body_ciphertext_1)
decrypt_result(key, iv, body_ciphertext_2)
print(ret)
assert body_plaintext_1 == ret
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
41002,
12,
23,
532,
9,
12,
198,
198,
6738,
257,
274,
1330,
34329,
34,
10803,
198,
11748,
33918,
628,
628,
198,
361,
11593,
3672,
834,
6624,
366,
834,
... | 1.915757 | 641 |
from fopid_control import fpid
from struct import pack, unpack
import socket
from select import select
'''
This is a proof-of-concept code for running real-time
FO PID control from Python using a UDP socket
'''
UDP_IP = "127.0.0.1"
UDP_PORT_REMOTE = 5101 # The port where the computed control law is sent (client port)
UDP_PORT_LOCAL = 5110 # Port to which the error signal must be sent (server port)
UDP_PORT_CTRL = 5201 # Control port. Allows to, e.g., set controller parameters (server port)
UDP_BUFFER_SIZE = 4096
# Set the initial FOPID parameters. The parameters of the approximation are NOT set in this example
params = {"fpid": {"Kp": 1, "Ki": 1, "Kd": 1, "lam": 0.5, "mu": 0.5}}
# This initializes and computes the controller. From now on, you can access all of the data
# of this approximation *and* can run the control algorithm.
fpid_c = fpid.fpid_2iir(params)
# Latest computed control law
last_out = 0
# Change the parameters of the FOPID controller
# Compute the control law
print("Starting control system server...")
print ("Server IP:", UDP_IP)
print ("Server port [receive e(t) from controlled process]:", UDP_PORT_LOCAL)
print ("Server control port [receive modified FOPID parameters]:", UDP_PORT_CTRL)
print ("Remote (client) port [send computed control law u(t)]:", UDP_PORT_REMOTE)
# Local socket: server
locsock = socket.socket(socket.AF_INET, # Internet
socket.SOCK_DGRAM) # UDP
# Remote socket: client
remsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Local socket for controlling the server: server
ctrlsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
locsock.bind((UDP_IP, UDP_PORT_LOCAL))
ctrlsock.bind((UDP_IP, UDP_PORT_CTRL))
inp = [locsock, ctrlsock]
print ("Server started successfully. Waiting for communications.")
# Run the select loop
while True:
iready, _, _ = select(inp, [], [])
for s in iready:
if s == locsock:
do_control_io(locsock, remsock)
elif s == ctrlsock:
change_serv_params(ctrlsock) | [
6738,
277,
404,
312,
62,
13716,
1330,
277,
35317,
198,
6738,
2878,
1330,
2353,
11,
555,
8002,
198,
11748,
17802,
198,
6738,
2922,
1330,
2922,
198,
198,
7061,
6,
198,
1212,
318,
257,
6617,
12,
1659,
12,
43169,
2438,
329,
2491,
1103,
... | 2.82296 | 723 |
# This is a sample Python script.
# Press Shift+F10 to execute it or replace it with your code.
# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.
from HTMLObfuscator import HTMLObfuscator
import optparse
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
parser = optparse.OptionParser()
parser.add_option('-i', help='arguments', dest='html_input_file_path', action='store')
parser.add_option('-o', help='arguments', dest='html_output_file_path', action='store')
(opts, args) = parser.parse_args()
html_input_file_path = opts.html_input_file_path
html_output_file_path = opts.html_output_file_path
html_obfuscator = HTMLObfuscator()
with open(html_input_file_path, "r") as f:
html = f.read()
html_obf = html_obfuscator.obfuscate_html(html)
with open(html_output_file_path, "w", encoding="utf-8") as f:
f.write(html_obf)
| [
2,
770,
318,
257,
6291,
11361,
4226,
13,
198,
198,
2,
4332,
15576,
10,
37,
940,
284,
12260,
340,
393,
6330,
340,
351,
534,
2438,
13,
198,
2,
4332,
11198,
15576,
284,
2989,
8347,
329,
6097,
11,
3696,
11,
2891,
9168,
11,
4028,
11,
... | 2.793605 | 344 |
from unittest.case import skip
from test.deploy.deploy_test_case import DeployTestCase
| [
6738,
555,
715,
395,
13,
7442,
1330,
14267,
198,
198,
6738,
1332,
13,
2934,
1420,
13,
2934,
1420,
62,
9288,
62,
7442,
1330,
34706,
14402,
20448,
628
] | 3.296296 | 27 |
from .CellState import CellState
import copy
import pyopencl as cl
import sys
import os
import pickle
import csv
import numpy
import inspect
import imp
import configparser
import importlib
class Simulator:
"""
This class is in charge of running the simulation, creating the various models
and stepping them forward in time. It is the control interface for the gui
or script that is running the simulation.
Stores a map from cell_id to CellState, which stores the current simulation
state of each cell.
Constructed on a user-defined python file. This file implements a
function setup(Simulator, Gui) that constructs the requiredx modules
(Regulator, Signalling, Integrator), and calls Simulator.init(). It
can also create Renderers and add them by calling
Simulator.addRenderer(renderer) so that the simulation can be
visualised.
"""
## Construct an empty simulator object. This object will not be able to
# do anything yet unti we use 'init' method to specify the models for
# physical interaction, genetic circuit, diffusion and integrator.
## Get an id for the next cell to be created
## Get the index (into flat arrays) of the next cell to be created
# Currently, the user-defined regulation module creates the
# biophysics, regulation, and signalling objects in a function
# setup().
#
# We pass in the empty simulator object, ie. setup(sim)
# and have the user-defined func initialise the 3 modules
## Specify models to be used by simulator object. The four inputs are
# 'phys' = physical model of cell iteractions
# 'reg' = regulatory model of biochemical circuit in the cell
# 'sig' = signaling model of intercellular chemical reaction diffusion.
# 'integ' = integrator
## Set up the OpenCL contex, the configuration is set up the first time, and is saved in the config file
## Get the OpenCL context and queue for running kernels
## set cell states from a given dict
## Add a graphics renderer - this should not be here --> GUI
## Reset the simulation back to initial conditions
# Divide a cell to two daughter cells
## Add a new cell to the simulator
#---
# Some functions to modify existing cells (e.g. from GUI)
# Eventually prob better to have a generic editCell() that deals with this stuff
#
## Proceed to the next simulation step
# This method is where objects phys, reg, sig and integ are called
## Import cells to the simulator from csv file. The file contains a list of 7-coordinates {pos,dir,len} (comma delimited) of each cell - also, there should be no cells around - ie run this from an empty model instead of addcell
## Write current simulation state to an output file
# Populate simulation from saved data pickle
# Populate simulation from saved data pickle
| [
6738,
764,
28780,
9012,
1330,
12440,
9012,
198,
11748,
4866,
198,
11748,
12972,
9654,
565,
355,
537,
198,
11748,
25064,
198,
11748,
28686,
198,
11748,
2298,
293,
198,
11748,
269,
21370,
198,
11748,
299,
32152,
198,
11748,
10104,
198,
1174... | 3.66624 | 782 |
## @ingroup Methods-Weights-Correlations-General_Aviation
# landing_gear.py
#
# Created: Feb 2018, M. Vegh
# Modified:
# ----------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------
from SUAVE.Core import Data,Units
import numpy as np
# ----------------------------------------------------------------------
# Landing Gear
# ----------------------------------------------------------------------
## @ingroup Methods-Weights-Correlations-General_Aviation
def landing_gear(landing_weight, Nult, strut_length_main, strut_length_nose):
"""
Calculate the weight of the landing gear
Source: Raymer- Aircraft Design: a Conceptual Approach (pg 460 in 4th edition)
Inputs:
Nult - ultimate landing load factor
landing_weight- landing weight of the aircraft [kilograms]
Outputs:
weight - weight of the landing gear [kilograms]
Assumptions:
calculating the landing gear weight based on the landing weight, load factor, and strut length
"""
#unpack
W_l = landing_weight/Units.lbs
l_n = strut_length_nose/Units.inches
l_m = strut_length_main/Units.inches
main_weight = .095*((Nult*W_l)**.768)*(l_m/12.)**.409
nose_weight = .125*((Nult*W_l)**.566)*(l_n/12.)**.845
#pack outputs
output = Data
output.main = main_weight*Units.lbs
output.nose = nose_weight*Units.lbs
return output | [
2235,
2488,
278,
3233,
25458,
12,
1135,
2337,
12,
10606,
39468,
12,
12218,
62,
7355,
3920,
198,
2,
9581,
62,
31763,
13,
9078,
198,
2,
220,
198,
2,
15622,
25,
220,
3158,
2864,
11,
337,
13,
8016,
456,
198,
2,
40499,
25,
220,
198,
... | 2.830909 | 550 |
import numpy as np
from math import pi
from numpy.linalg import norm
def circle_point(p, q):
"""
special math operation
:param p: a vector (1,4)
:param q: a vector (1,4)
:return: a vector with shape (1,4)
"""
p_0 = p[0]
p_v = p[1:]
q_0 = q[0]
q_v = q[1:]
result = np.zeros([4])
result[0] = p_0 * q_0 - p_v @ q_v
result[1:] = p_0 * q_v + q_0 * p_v + np.cross(p_v, q_v)
return result
def box_x(u):
"""
speical math operation
:param u: a vector (1, 3)
:return: 3 by 3 matrix
"""
return np.array([[0, -u[2], u[1]], [u[2], 0, -u[0]], [-u[1], u[0], 0]])
def p_L(p):
"""
special math operation
:param p: a vector (1, 4)
:return: a 3 by 3 matrix
"""
pL_11 = p[0]
pL_12 = -p[1:]
pL_1 = np.hstack((pL_11, pL_12))
pL_21 = p[1:].reshape([3,1])
pL_22 = p[0]*np.diag([1,1,1]) + box_x(p[1:])
pL_2 = np.hstack((pL_21, pL_22))
pL = np.vstack((pL_1, pL_2))
return pL
def q_R(p):
"""
special math operation
:param p: a vector (1, 4)
:return: a 3 by 3 matrix
"""
pL_11 = p[0]
pL_12 = -p[1:]
pL_1 = np.hstack((pL_11, pL_12))
pL_21 = p[1:].reshape([3,1])
pL_22 = p[0]*np.diag([1,1,1]) - box_x(p[1:])
pL_2 = np.hstack((pL_21, pL_22))
pL = np.vstack((pL_1, pL_2))
return pL
def rotation(q, x):
"""
rotation the current orientation given the rotation vector
:param q: rotation matrix expressed as quaternion
:param x: the orginal orientation (4 by 1) with the first entry to be 0
:return: rotated vector (4 by 1) expressed as quaternion
"""
result = circle_point(q, x)
result = circle_point(result, quaternion_conjugate(q))
return result
def rotation_quaternion(nv, a):
"""
generate the rotation quaternion given the rotated angle and the unit vector
:param nv: the unit vector which is orthgonal to the rotate angle
:param a: the rotated angle
:return: a vector (4 * 1) as rotation_quaternion
"""
result1 = np.cos(a/2)
result2 = -np.sin(a/2)*nv
return np.hstack((result1, result2))
def exp_p_exact(eta):
"""
exact way for quanternion exp
:param eta:
:return:
"""
exp_eta = np.zeros(4)
exp_eta[0] = np.cos(norm(eta))
exp_eta[1:] = eta/norm(eta)*np.sin(norm(eta))
return exp_eta
def transfer_q_to_R(quant):
"""
trasfer the quaternion to rotation matrix
:param q: the quanternion vector (4 * 1)
:return: the 3 by 3 rotation matrix
"""
q = unit_q(quant).T
R = np.zeros((9, q.shape[1]))
R[0] = q[0] ** 2 + q[1] ** 2 - q[2] ** 2 - q[3] ** 2
R[1] = 2 * (q[1] * q[2] - q[0] * q[3])
R[2] = 2 * (q[1] * q[3] + q[0] * q[2])
R[3] = 2 * (q[1] * q[2] + q[0] * q[3])
R[4] = q[0] ** 2 - q[1] ** 2 + q[2] ** 2 - q[3] ** 2
R[5] = 2 * (q[2] * q[3] - q[0] * q[1])
R[6] = 2 * (q[1] * q[3] - q[0] * q[2])
R[7] = 2 * (q[2] * q[3] + q[0] * q[1])
R[8] = q[0] ** 2 - q[1] ** 2 - q[2] ** 2 + q[3] ** 2
if R.shape[1] == 1:
return np.reshape(R, (3, 3))
else:
return R.T
def quaternion_representation(p):
"""
:param p: a vector (3 * 1)
:return: the quaternion representation of the vector
"""
p_qua = np.zeros(4)
p_qua[1:] = p
return p_qua
def transfer_matirx_to_orientation(R):
"""
transfer the rotation matrix to orientation (yaw, pitch and roll)
:param R: the 3 by 3 Rotation matrix
:return: (yaw, pitch, roll)
"""
yaw = np.arctan(R[0,1]/R[0,0])*180/pi
pitch = -np.arcsin(R[0,2])*180/pi
roll = np.arctan(R[1,2]/R[2,2])*180/pi
return yaw, pitch, roll
def transfer_quanternion_to_orientation(q):
"""
transfer quanternion representation to orientation (yaw, pitch and roll)
:param q: the quanternion representation
:return: (yaw, pitch, roll)
"""
R = transfer_q_to_R(q)
yaw, pitch, roll = transfer_matirx_to_orientation(R)
return [yaw, pitch, roll]
def q_inv(q):
''' Quaternion inversion
Parameters
----------
q: array_like, shape ([3,4],) or (N,[3/4])
quaternion or quaternion vectors
Returns
-------
qinv : inverse quaternion(s)
Notes
-----
.. math::
q^{-1} = \\frac{q_0 - \\vec{q}}{|q|^2}
More info under
http://en.wikipedia.org/wiki/Quaternion
Examples
--------
>>> quat.q_inv([0,0,0.1])
array([-0., -0., -0.1])
>>> quat.q_inv([[cos(0.1),0,0,sin(0.1)],
>>> [cos(0.2),0,sin(0.2),0]])
array([[ 0.99500417, -0. , -0. , -0.09983342],
[ 0.98006658, -0. , -0.19866933, -0. ]])
'''
q = np.atleast_2d(q)
if q.shape[1] == 3:
return -q
else:
qLength = np.sum(q ** 2, 1)
qConj = q * np.r_[1, -1, -1, -1]
return (qConj.T / qLength).T
def normalize(v):
''' Normalization of a given vector (with image)
Parameters
----------
v : array (N,) or (M,N)
input vector
Returns
-------
v_normalized : array (N,) or (M,N)
normalized input vector
.. image:: ../docs/Images/vector_normalize.png
:scale: 33%
Example
-------
>>> skinematics.vector.normalize([3, 0, 0])
array([[ 1., 0., 0.]])
>>> v = [[np.pi, 2, 3], [2, 0, 0]]
>>> skinematics.vector.normalize(v)
array([[ 0.6569322 , 0.41821602, 0.62732404],
[ 1. , 0. , 0. ]])
Notes
-----
.. math::
\\vec{n} = \\frac{\\vec{v}}{|\\vec{v}|}
'''
from numpy.linalg import norm
if np.array(v).ndim == 1:
vectorFlag = True
else:
vectorFlag = False
v = np.double(np.atleast_2d(v)) # otherwise I get in trouble 2 lines down, if v is integer!
length = norm(v, axis=1)
v[length != 0] = (v[length != 0].T / length[length != 0]).T
if vectorFlag:
v = v.ravel()
return v
def convert(rMat, to='quat'):
"""
Converts a rotation matrix to the corresponding quaternion.
Assumes that R has the shape (3,3), or the matrix elements in columns
Parameters
----------
rMat : array, shape (3,3) or (N,9)
single rotation matrix, or matrix with rotation-matrix elements.
to : string
Currently, only 'quat' is supported
Returns
-------
outQuat : array, shape (4,) or (N,4)
corresponding quaternion vector(s)
Notes
-----
.. math::
\\vec q = 0.5*copysign\\left( {\\begin{array}{*{20}{c}}
{\\sqrt {1 + {R_{xx}} - {R_{yy}} - {R_{zz}}} ,}\\\\
{\\sqrt {1 - {R_{xx}} + {R_{yy}} - {R_{zz}}} ,}\\\\
{\\sqrt {1 - {R_{xx}} - {R_{yy}} + {R_{zz}}} ,}
\\end{array}\\begin{array}{*{20}{c}}
{{R_{zy}} - {R_{yz}}}\\\\
{{R_{xz}} - {R_{zx}}}\\\\
{{R_{yx}} - {R_{xy}}}
\\end{array}} \\right)
More info under
http://en.wikipedia.org/wiki/Quaternion
Examples
--------
>>> rotMat = array([[cos(alpha), -sin(alpha), 0],
>>> [sin(alpha), cos(alpha), 0],
>>> [0, 0, 1]])
>>> rotmat.convert(rotMat, 'quat')
array([[ 0.99500417, 0. , 0. , 0.09983342]])
"""
if to != 'quat':
raise IOError('Only know "quat"!')
if rMat.shape == (3, 3) or rMat.shape == (9,):
rMat = np.atleast_2d(rMat.ravel()).T
else:
rMat = rMat.T
q = np.zeros((4, rMat.shape[1]))
R11 = rMat[0]
R12 = rMat[1]
R13 = rMat[2]
R21 = rMat[3]
R22 = rMat[4]
R23 = rMat[5]
R31 = rMat[6]
R32 = rMat[7]
R33 = rMat[8]
# Catch small numerical inaccuracies, but produce an error for larger problems
epsilon = 1e-10
if np.min(np.vstack((1 + R11 - R22 - R33, 1 - R11 + R22 - R33, 1 - R11 - R22 + R33))) < -epsilon:
raise ValueError('Problems with defintion of rotation matrices')
q[1] = 0.5 * np.copysign(np.sqrt(np.abs(1 + R11 - R22 - R33)), R32 - R23)
q[2] = 0.5 * np.copysign(np.sqrt(np.abs(1 - R11 + R22 - R33)), R13 - R31)
q[3] = 0.5 * np.copysign(np.sqrt(np.abs(1 - R11 - R22 + R33)), R21 - R12)
q[0] = np.sqrt(1 - (q[1] ** 2 + q[2] ** 2 + q[3] ** 2))
return q.T
def unit_q(inData):
''' Utility function, which turns a quaternion vector into a unit quaternion.
If the input is already a full quaternion, the output equals the input.
Parameters
----------
inData : array_like, shape (3,) or (N,3)
quaternions or quaternion vectors
Returns
-------
quats : array, shape (4,) or (N,4)
corresponding unit quaternions.
Notes
-----
More info under
http://en.wikipedia.org/wiki/Quaternion
Examples
--------
>>> quats = array([[0,0, sin(0.1)],[0, sin(0.2), 0]])
>>> quat.unit_q(quats)
array([[ 0.99500417, 0. , 0. , 0.09983342],
[ 0.98006658, 0. , 0.19866933, 0. ]])
'''
inData = np.atleast_2d(inData)
(m, n) = inData.shape
if (n != 3) & (n != 4):
raise ValueError('Quaternion must have 3 or 4 columns')
if n == 3:
qLength = 1 - np.sum(inData ** 2, 1)
numLimit = 1e-12
# Check for numerical problems
if np.min(qLength) < -numLimit:
raise ValueError('Quaternion is too long!')
else:
# Correct for numerical problems
qLength[qLength < 0] = 0
outData = np.hstack((np.c_[np.sqrt(qLength)], inData))
else:
outData = inData
return outData
def q_mult(p, q):
'''
Quaternion multiplication: Calculates the product of two quaternions r = p * q
If one of both of the quaterions have only three columns,
the scalar component is calculated such that the length
of the quaternion is one.
The lengths of the quaternions have to match, or one of
the two quaternions has to have the length one.
If both p and q only have 3 components, the returned quaternion
also only has 3 components (i.e. the quaternion vector)
Parameters
----------
p,q : array_like, shape ([3,4],) or (N,[3,4])
quaternions or quaternion vectors
Returns
-------
r : quaternion or quaternion vector (if both
p and q are contain quaternion vectors).
Notes
-----
.. math::
q \\circ p = \\sum\\limits_{i=0}^3 {q_i I_i} * \\sum\\limits_{j=0}^3 \\
{p_j I_j} = (q_0 p_0 - \\vec{q} \\cdot \\vec{p}) + (q_0 \\vec{p} + p_0 \\
\\vec{q} + \\vec{q} \\times \\vec{p}) \\cdot \\vec{I}
More info under
http://en.wikipedia.org/wiki/Quaternion
Examples
--------
>>> p = [cos(0.2), 0, 0, sin(0.2)]
>>> q = [[0, 0, 0.1],
>>> [0, 0.1, 0]]
>>> r = quat.q_mult(p,q)
'''
flag3D = False
p = np.atleast_2d(p)
q = np.atleast_2d(q)
if p.shape[1] == 3 & q.shape[1] == 3:
flag3D = True
if len(p) != len(q):
assert (len(p) == 1 or len(q) == 1), \
'Both arguments in the quaternion multiplication must have the same number of rows, unless one has only one row.'
p = unit_q(p).T
q = unit_q(q).T
if np.prod(np.shape(p)) > np.prod(np.shape(q)):
r = np.zeros(np.shape(p))
else:
r = np.zeros(np.shape(q))
r[0] = p[0] * q[0] - p[1] * q[1] - p[2] * q[2] - p[3] * q[3]
r[1] = p[1] * q[0] + p[0] * q[1] + p[2] * q[3] - p[3] * q[2]
r[2] = p[2] * q[0] + p[0] * q[2] + p[3] * q[1] - p[1] * q[3]
r[3] = p[3] * q[0] + p[0] * q[3] + p[1] * q[2] - p[2] * q[1]
if flag3D:
# for rotations > 180 deg
r[:, r[0] < 0] = -r[:, r[0] < 0]
r = r[1:]
r = r.T
return r
def rotate_vector(vector, q):
'''
Rotates a vector, according to the given quaternions.
Note that a single vector can be rotated into many orientations;
or a row of vectors can all be rotated by a single quaternion.
Parameters
----------
vector : array, shape (3,) or (N,3)
vector(s) to be rotated.
q : array_like, shape ([3,4],) or (N,[3,4])
quaternions or quaternion vectors.
Returns
-------
rotated : array, shape (3,) or (N,3)
rotated vector(s)
.. image:: ../docs/Images/vector_rotate_vector.png
:scale: 33%
Notes
-----
.. math::
q \\circ \\left( {\\vec x \\cdot \\vec I} \\right) \\circ {q^{ - 1}} = \\left( {{\\bf{R}} \\cdot \\vec x} \\right) \\cdot \\vec I
More info under
http://en.wikipedia.org/wiki/Quaternion
Examples
--------
>>> mymat = eye(3)
>>> myVector = r_[1,0,0]
>>> quats = array([[0,0, sin(0.1)],[0, sin(0.2), 0]])
>>> quat.rotate_vector(myVector, quats)
array([[ 0.98006658, 0.19866933, 0. ],
[ 0.92106099, 0. , -0.38941834]])
>>> quat.rotate_vector(mymat, [0, 0, sin(0.1)])
array([[ 0.98006658, 0.19866933, 0. ],
[-0.19866933, 0.98006658, 0. ],
[ 0. , 0. , 1. ]])
'''
vector = np.atleast_2d(vector)
qvector = np.hstack((np.zeros((vector.shape[0], 1)), vector))
vRotated = q_mult(q, q_mult(qvector, q_inv(q)))
vRotated = vRotated[:, 1:]
if min(vRotated.shape) == 1:
vRotated = vRotated.ravel()
return vRotated | [
11748,
299,
32152,
355,
45941,
198,
6738,
10688,
1330,
31028,
198,
6738,
299,
32152,
13,
75,
1292,
70,
1330,
2593,
628,
198,
4299,
9197,
62,
4122,
7,
79,
11,
10662,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
2041,
10688,
4... | 2.032021 | 6,527 |
'''
Created on Dec 23, 2016
@author: safdar
'''
import cv2
from operations.baseoperation import Operation
import pickle
from utils.plotter import Image
from utils.plotter import Graph
| [
7061,
6,
198,
41972,
319,
4280,
2242,
11,
1584,
198,
198,
31,
9800,
25,
1932,
27455,
198,
7061,
6,
198,
11748,
269,
85,
17,
198,
6738,
4560,
13,
8692,
27184,
1330,
14680,
198,
11748,
2298,
293,
198,
6738,
3384,
4487,
13,
29487,
353,... | 3.363636 | 55 |
# Copyright 2018 Timo Nolle
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# ==============================================================================
import numpy as np
from sklearn import metrics
from april.anomalydetection.utils import anomaly_ratio
from april.enums import Heuristic
def best_heuristic(taus, theta, y_true, **kwargs):
"""h_best in the paper."""
f1s = [metrics.f1_score(y_true.compressed(), theta(tau=tau, **kwargs).compressed()) for tau in taus]
return taus[np.argmax(f1s)]
def elbow_heuristic(taus, theta, **kwargs):
"""h_elbow in the paper."""
if len(taus) < 4:
return taus[-1]
r = np.array([anomaly_ratio(theta(tau=tau, **kwargs)) for tau in taus])
step = taus[1:] - taus[:-1]
r_prime_prime = (r[2:] - 2 * r[1:-1] + r[:-2]) / (step[1:] * step[:-1])
return {
Heuristic.ELBOW_DOWN: taus[np.argmax(r_prime_prime) + 1],
Heuristic.ELBOW_UP: taus[np.argmin(r_prime_prime) + 1]
}
| [
2,
220,
15069,
2864,
5045,
78,
399,
349,
293,
198,
2,
198,
2,
220,
770,
1430,
318,
1479,
3788,
25,
345,
460,
17678,
4163,
340,
290,
14,
273,
13096,
198,
2,
220,
340,
739,
262,
2846,
286,
262,
22961,
3611,
5094,
13789,
355,
3199,
... | 2.79078 | 564 |
'''
Author: Leon-Francis
Date: 2021-07-07 22:13:56
Contact: leon_francis@163.com
LastEditTime: 2021-07-09 00:46:49
LastEditors: Leon-Francis
Description: socket_server
FilePath: /Network_Security_Experiment/client.py
(C)Copyright 2020-2021, Leon-Francis
'''
import socket
from crypto import get_RSA_keys, get_random_bytes, crypto_encode, crypto_decode, get_msg_len, receive_nonce, send_nonce
from Crypto.PublicKey import RSA
from Crypto.Cipher import AES, PKCS1_OAEP
from Crypto.Signature import PKCS1_v1_5
from Crypto.Hash import SHA256
import sys
HOST = socket.gethostname()
PORT = 11223
MSG_HEADER_LEN = 10
with socket.socket() as s:
s.connect((HOST, PORT))
print(f'Connected to {HOST}:{PORT}')
private_key, public_key = get_RSA_keys()
s.send(public_key)
server_pub_key = s.recv(271)
while True:
print('1, send message')
print('2, upload file')
print('3, download file')
print('4, close the connect')
command = input('Plz input the number 1-4: ')
if command == '4':
s.send(b'\04')
break
if command == '1':
message = input('Plz input the massage: ')
s.send(b'\31')
message, nonce = crypto_encode(message, private_key,
server_pub_key)
send_nonce(s, private_key, server_pub_key, nonce)
s.send(get_msg_len(message).encode('utf-8'))
s.send(message)
if command == '2':
file_name = input('Plz input the file name: ')
file_path = 'file_2/' + file_name
s.send(b'\32')
file_name, nonce = crypto_encode(file_name, private_key,
server_pub_key)
send_nonce(s, private_key, server_pub_key, nonce)
s.send(get_msg_len(file_name).encode('utf-8'))
s.send(file_name)
with open(file_path, 'r') as f:
for line in f:
s.send(b'\02')
line, nonce = crypto_encode(line, private_key,
server_pub_key)
send_nonce(s, private_key, server_pub_key, nonce)
s.send(get_msg_len(line).encode('utf-8'))
s.send(line)
s.send(b'\03')
if command == '3':
s.send(b'\33')
print('The file in server is as follows:')
while True:
more_file_name = s.recv(1)
if more_file_name == b'\03':
break
nonce = receive_nonce(s, private_key, server_pub_key)
file_name_header = s.recv(MSG_HEADER_LEN)
file_name_len = int(file_name_header.decode('utf-8'))
file_name = s.recv(file_name_len)
file_name = crypto_decode(file_name, private_key,
server_pub_key, nonce)
print(file_name)
download_file_name = input(
'Which one you want to download?(-1 to quit) ')
if download_file_name == '-1':
s.send(b'\03')
else:
s.send(b'\02')
old_file_name = download_file_name
file_path = 'file_2/' + download_file_name
download_file_name, nonce = crypto_encode(
download_file_name, private_key, server_pub_key)
send_nonce(s, private_key, server_pub_key, nonce)
s.send(get_msg_len(download_file_name).encode('utf-8'))
s.send(download_file_name)
with open(file_path, 'w') as f:
while True:
more_line = s.recv(1)
if more_line == b'\03':
break
nonce = receive_nonce(s, private_key, server_pub_key)
line_header = s.recv(MSG_HEADER_LEN)
line_len = int(line_header.decode('utf-8'))
line = s.recv(line_len)
line = crypto_decode(line, private_key, server_pub_key,
nonce)
f.writelines(line)
print(f'download {old_file_name} from {HOST}')
print('Disconnect') | [
7061,
6,
198,
13838,
25,
10592,
12,
42885,
271,
198,
10430,
25,
33448,
12,
2998,
12,
2998,
2534,
25,
1485,
25,
3980,
198,
17829,
25,
443,
261,
62,
8310,
1192,
271,
31,
24136,
13,
785,
198,
5956,
18378,
7575,
25,
33448,
12,
2998,
1... | 1.780041 | 2,455 |
import unittest
from flatten import parse_line
import extractionMap as extractionMap
# userClick: "tutorial.scr,9-13-2018,12:16:12:890,1536866172890,1,1.0,userClick:297_639;region:QnA;NA;NA,false,false,false,false,false,false"
# clickActionLabel: "tutorial.scr,9-13-2018,13:58:54:296,1536872334296,75,75.2,userClick:476_477;region:gameArea;target:actionLabel-step-75-action-AttackQ4;clickActionLabel:D2ESCAPED-COLON Attack Q4,false,false,false,false,false,false"
# clickActionLabelDenied: "tutorial.scr,9-13-2018,12:22:39:120,1536866559120,1,1.0,userClick:469_478;region:gameArea;target:actionLabel-step-75-action-AttackQ4;clickActionLabelDenied:D2ESCAPED-COLON Attack Q4,false,false,false,false,false,false"
# touchCumRewardLabel: "tutorial.scr,9-13-2018,13:48:47:455,1536871727455,1,1.0,userClick:85_80;region:gameArea;target:null;touchCumRewardLabel:total score,false,false,false,false,false,false"
# touchCumRewardValueFor: "tutorial.scr,9-13-2018,13:49:23:343,1536871763343,1,1.0,userClick:206_75;region:gameArea;target:rewardtotalscore;touchCumRewardValueFor:total score,false,false,false,false,false,false"
| [
11748,
555,
715,
395,
198,
6738,
27172,
268,
1330,
21136,
62,
1370,
198,
11748,
22236,
13912,
355,
22236,
13912,
198,
2,
2836,
8164,
25,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
... | 2.164336 | 572 |
from macpie import lltools
| [
6738,
8352,
21749,
1330,
32660,
31391,
628,
628,
628,
628,
628,
628,
198
] | 3 | 13 |
__author__ = 'ershadmoi'
import re
import sys
# Small utility method to copy between two streams
# Main method that will do code generation magic
# Call main only if explicity invoked
if __name__ == "__main__":
main(sys.argv)
| [
834,
9800,
834,
796,
705,
364,
18108,
5908,
72,
6,
198,
198,
11748,
302,
198,
11748,
25064,
198,
198,
2,
10452,
10361,
2446,
284,
4866,
1022,
734,
15190,
198,
198,
2,
8774,
2446,
326,
481,
466,
2438,
5270,
5536,
198,
198,
2,
4889,
... | 3.232877 | 73 |
from django.conf import settings
from storages.backends.s3boto3 import S3Boto3Storage
| [
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
336,
273,
1095,
13,
1891,
2412,
13,
82,
18,
65,
2069,
18,
1330,
311,
18,
33,
2069,
18,
31425,
628,
628
] | 2.966667 | 30 |
#
# Copyright 2018-2019 IBM Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from maxfw.model import MAXModelWrapper
import numpy as np
import re
import tensorflow as tf
from tensorflow.python.saved_model import tag_constants
import logging
from core.utils import get_processing_word, load_vocab, pad_sequences
from config import DEFAULT_MODEL_PATH, MODEL_META_DATA as model_meta
logger = logging.getLogger()
| [
2,
198,
2,
15069,
2864,
12,
23344,
19764,
11421,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
118... | 3.560606 | 264 |
'''
Problem 47
The first two consecutive numbers to have two distinct prime factors are:
14 = 2 × 7
15 = 3 × 5
The first three consecutive numbers to have three distinct prime factors are:
644 = 2² × 7 × 23
645 = 3 × 5 × 43
646 = 2 × 17 × 19.
Find the first four consecutive integers to have four distinct prime factors each. What is the first of these numbers?
ITS SUPER SLOW LOL, but works
'''
import numpy as np
from math import sqrt
from functools import reduce
#great function found in stackoverflow by agf
if __name__=='__main__':
main() | [
7061,
6,
198,
40781,
6298,
198,
464,
717,
734,
12785,
3146,
284,
423,
734,
7310,
6994,
5087,
389,
25,
198,
198,
1415,
796,
362,
13958,
767,
198,
1314,
796,
513,
13958,
642,
198,
198,
464,
717,
1115,
12785,
3146,
284,
423,
1115,
7310... | 3.423313 | 163 |
import jieba
test_sent = (
"李小福是创新办主任也是云计算方面的专家; 什么是八一双鹿\n"
"例如我输入一个带“韩玉赏鉴”的标题,在自定义词库中也增加了此词为N类\n"
"「台中」正確應該不會被切開。mac上可分出「石墨烯」;此時又可以分出來凱特琳了。"
)
words = jieba.cut(test_sent)
print('/'.join(words))
# -------------加载自定义词典----------------
# import jieba
# jieba.load_userdict("userdict.txt")
# import jieba.posseg as pseg
#
# jieba.add_word('石墨烯')
# jieba.add_word('凱特琳')
# jieba.del_word('自定义词')
#
# test_sent = (
# "李小福是创新办主任也是云计算方面的专家; 什么是八一双鹿\n"
# "例如我输入一个带“韩玉赏鉴”的标题,在自定义词库中也增加了此词为N类\n"
# "「台中」正確應該不會被切開。mac上可分出「石墨烯」;此時又可以分出來凱特琳了。"
# )
# words = jieba.cut(test_sent)
# print('/'.join(words))
# --------------加载自定义词典结束-------------
"""
<不使用自定义词典进行分割>
李小福/是/创新/办/主任/也/是/云/计算/方面/的/专家/;/ /什么/是/八/一双/鹿/
/例如/我/输入/一个/带/“/韩玉/赏鉴/”/的/标题/,/在/自定义词/库中/也/增加/了/此/词为/N/类/
/「/台/中/」/正確/應該/不會/被/切開/。/mac/上/可/分出/「/石墨/烯/」/;/此時/又/可以/分出/來凱/特琳/了/。
<使用自定义词典进行分割>
李小福/是/创新办/主任/也/是/云计算/方面/的/专家/;/ /什么/是/八一双鹿/
/例如/我/输入/一个/带/“/韩玉赏鉴/”/的/标题/,/在/自定义/词库/中/也/增加/了/此/词为/N/类/
/「/台中/」/正確/應該/不會/被/切開/。/mac/上/可/分出/「/石墨烯/」/;/此時/又/可以/分出/來/凱特琳/了/。
从上面的两个对比中可以看出来 "创新办","云计算","凯特琳","石墨烯","韩玉鉴赏","台中"等都被完整的切分出来了
""" | [
11748,
474,
494,
7012,
628,
198,
9288,
62,
34086,
796,
357,
198,
1,
30266,
236,
22887,
237,
163,
99,
237,
42468,
26344,
249,
23877,
108,
27950,
252,
10310,
119,
20015,
119,
20046,
253,
42468,
12859,
239,
164,
106,
94,
163,
106,
245,
... | 0.791636 | 1,363 |
"""
Implementation of attack methods. Running this file as a program will
apply the attack to the model specified by the config file and store
the examples in an .npy file.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
from scipy.spatial.distance import pdist, squareform
from matplotlib import pyplot as plt
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = '2'
DATA_DIR = './data/fashion'
import time
if __name__ == '__main__':
import json
import sys
import math
from tensorflow.examples.tutorials.mnist import input_data
from model import Model
with open('config.json') as config_file:
config = json.load(config_file)
model_file = tf.train.latest_checkpoint(config['model_dir'])
if model_file is None:
print('No model found')
sys.exit()
model = Model()
attack = LinfPGDAttack(model,
config['epsilon'],
config['k'],
config['a'],
config['random_start'],
config['loss_func'])
saver = tf.train.Saver()
fmnist = input_data.read_data_sets(DATA_DIR, one_hot=False)
with tf.Session() as sess:
# Restore the checkpoint
saver.restore(sess, model_file)
# Iterate over the samples batch-by-batch
num_eval_examples = config['num_eval_examples']
eval_batch_size = config['eval_batch_size']
num_batches = int(math.ceil(num_eval_examples / eval_batch_size))
print('Iterating over {} batches'.format(num_batches))
x_adv_final = np.copy(fmnist.test.images)
for restart in range(10):
# Initialize permutation
permutation = np.arange(num_eval_examples)
idx = np.arange(num_eval_examples)
# Initialize data
x_test, y_test = np.copy(fmnist.test.images), np.copy(fmnist.test.labels)
x_adv = x_test + np.random.uniform(-attack.epsilon, attack.epsilon, x_test.shape)
# per round
t0 = time.time()
for ibatch in range(num_batches):
bstart = ibatch * eval_batch_size
bend = min(bstart + eval_batch_size, num_eval_examples)
x_batch = x_test[bstart:bend, :]
x_batch_adv = x_adv[bstart:bend, :]
y_batch = y_test[bstart:bend]
x_adv[bstart:bend, :] = attack.perturb(x_batch, x_batch_adv, y_batch, sess)
print('round Linf: ', np.max(np.abs(x_adv - fmnist.test.images)))
print('round adv acc: ', sess.run(attack.model.accuracy, feed_dict={attack.model.x_input: x_adv,
attack.model.y_input: fmnist.test.labels}))
prediction = sess.run(attack.model.correct_prediction, feed_dict={attack.model.x_input: x_adv,
attack.model.y_input: fmnist.test.labels})
## Replace with wrong sample
for i in range(prediction.shape[0]):
if not prediction[i]:
x_adv_final[i] = x_adv[i]
t1 = time.time()
print('restart: ', restart, ' time per batch: ', t1 - t0)
print('L2: ', np.mean(np.square(x_adv_final - fmnist.test.images)))
print('Linf: ', np.max(np.abs(x_adv_final - fmnist.test.images)))
print('adv acc: ', sess.run(attack.model.accuracy, feed_dict={attack.model.x_input: x_adv_final,
attack.model.y_input: fmnist.test.labels}))
print('Storing examples')
path = config['store_adv_path']
np.save(path, x_adv_final)
print('Examples stored in {}'.format(path))
| [
37811,
201,
198,
3546,
32851,
286,
1368,
5050,
13,
18162,
428,
2393,
355,
257,
1430,
481,
201,
198,
39014,
262,
1368,
284,
262,
2746,
7368,
416,
262,
4566,
2393,
290,
3650,
201,
198,
1169,
6096,
287,
281,
764,
77,
9078,
2393,
13,
20... | 2.04375 | 1,920 |
#!/usr/bin/python3
'''
Semelhantes aos scripts for_sem_else_v1 e for_com_else_v2
Porem usando conjunto,set
Ficando bem mais simples de entender
Explicacao das linhas 26 e 27
Chamamos o metodo intersection do conjunto PALAVRAS_PROIBIDAS
Ele precisa de outro conjunto para funcionar
Usando o set, transformandomos o variavel texto em um conjunto
Usando o lower para ficar tudo minusculo e o
split para cortar a frase em palavras pelo espacoes em branco
Caso a variavel intersecao nao tenha nenhum valor o python
interpretara como Falso caso contrário vendadeiro
'''
# Criando um conjunto
PALAVRAS_PROIBIDAS = {'futebol', 'religião', 'política'}
textos = [
'João gosta de futebol e política',
'A praia foi divertida',
]
for texto in textos:
intersecao = PALAVRAS_PROIBIDAS.intersection(set(texto.lower().split()))
if intersecao:
print('Texto possui palavras proibidas:', intersecao)
else:
print('Texto autorizado:', texto)
# Fontes:
# Curso Python 3 - Curso Completo do Básico ao Avançado Udemy Aula 84 a 86
# https://github.com/cod3rcursos/curso-python/tree/master/estruturas_controle
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
198,
7061,
6,
198,
13900,
417,
71,
39781,
257,
418,
14750,
329,
62,
43616,
62,
17772,
62,
85,
16,
304,
329,
62,
785,
62,
17772,
62,
85,
17,
198,
47,
29625,
514,
25440,
11644,
403,
1... | 2.44586 | 471 |
"""Package to visualise and simulate a Rubik's Cube."""
class Cube:
"""
A class which represents a Rubik's Cube.
Attributes
----------
cube : list of list of list of str
3-dimentional list of size 6*N*N for an NxN cube which stores
all stickers of the cube. Each sticker must be a character in
'ULFRBD' and can be accessed by cube[s][y][x] where s is the
face (0-5 representing the U, L, F, R, B, D faces respectively)
and y, x are the row and column indexes of the sticker.
size : int
Number of cubies on each edge of the cube.
moves : list of str
List of moves applied to the cube after smoves.
smoves : list of str
List of moves applied as a scramble or as set-up moves.
show_style : dict of {str: int or list}
Stores the default or last used style for showing the cube.
colours : dict of {str: tuple of int}
Stores the RGB values for the colour of each of the 6 faces.
Raises
------
CubeError
If there is a problem which stops a cube method.
MoveError
If a move cannot be interpreted or is invalid.
"""
from ._colour import colour
from ._input import input
from ._invert import invert
from ._link import link
from ._mirror import mirror
from ._move import move
from ._movecount import movecount
from ._play import play
from ._repeat import repeat
from ._reset import reset
from ._scramble import scramble
from ._show import show, show2D, show3D
from ._simplify import simplify
from ._solve import solve
from ._speedsolve import speedsolve
from ._undo import undo
def __init__(self, size: int = 3):
"""
Initialise Cube class with size of cube.
Parameters
----------
size : int, default=3
Number of cubies on each edge of the cube.
"""
self.cube = [[[i]*size for _ in range(size)] for i in 'ULFRBD']
self.size = size
self.moves = []
self.smoves = []
self.show_style = {
'DIMENSION': 3,
'3D': [38, 28, 0.1, 180, 9],
'2D': ['\u2588\u2588', 'fg,colour', True]
}
self.colours = {
'U': (255,255,255),
'L': (255,165,0),
'F': (0,128,0),
'R': (255,0,0),
'B': (0,0,255),
'D': (255,255,0)
}
class CubeError(Exception):
"""Raise when there is a problem which stops a cube method."""
class MoveError(Exception):
"""Raise when a move cannot be interpreted or is invalid."""
| [
37811,
27813,
284,
5874,
786,
290,
29308,
257,
6256,
1134,
338,
23315,
526,
15931,
201,
198,
201,
198,
201,
198,
4871,
23315,
25,
201,
198,
220,
220,
220,
37227,
201,
198,
220,
220,
220,
317,
1398,
543,
6870,
257,
6256,
1134,
338,
2... | 2.319421 | 1,174 |
f1 = open("./part/pic-import.tex",mode="r",encoding="utf-8")
content = f1.read()
f1.close()
rep = content.replace(r' \\','')
with open("./part/pic-import.tex",mode="w",encoding="utf-8") as f2:
f2.write(rep)
| [
69,
16,
796,
1280,
7,
1911,
14,
3911,
14,
16564,
12,
11748,
13,
16886,
1600,
14171,
2625,
81,
1600,
12685,
7656,
2625,
40477,
12,
23,
4943,
198,
11299,
796,
277,
16,
13,
961,
3419,
198,
69,
16,
13,
19836,
3419,
198,
198,
7856,
796... | 2.193878 | 98 |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Layer which represents linear function. See class level comment.
This layer applies a linear transformation to the input tensor with an optional
bias term. It supports monotonicity, monotonic dominance and fixed-norm
constraints.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from . import linear_lib
from . import utils
import numpy as np
import tensorflow as tf
from tensorflow import keras
LINEAR_LAYER_KERNEL_NAME = "linear_layer_kernel"
LINEAR_LAYER_BIAS_NAME = "linear_layer_bias"
class Linear(keras.layers.Layer):
# pyformat: disable
"""Layer which represents linear function.
Monotonicity can be specified for any input dimension in which case learned
weight for that dimension is guaranteed to be either non negative for
increasing or non positive for decreasing monotonicity.
Monotonic dominance can be specified for any pair of dimensions referred to as
*dominant* and *weak* dimensions such that the effect (slope) in the direction
of the *dominant* dimension to be greater than that of the *weak* dimension
for any point. Both dominant and weak dimensions must be increasing.
Range dominance can be specified for any pair of *dominant* and *weak*
dimensions such that the range of possible outputs to be greater if one varies
the *dominant* dimension than if one varies the *weak* dimension for any
point. We require the slope of the *dominant* dimension scaled by its input
range to be greater than the slope of the *weak* dimension similarly scaled by
its input range. Both dimensions must have the same direction of monotonicity
and their input min and max must be provided.
Weights can be constrained to have a fixed norm.
Input shape:
- if `units == 1`: tensor of shape: `(batch_size, num_input_dims)`.
- if `units > 1`: tensor of shape: `(batch_size, units, num_input_dims)`
Output shape:
Rank-2 tensor with shape: (batch_size, units)
Attributes:
- All `__init__ `arguments.
kernel: layer's kernel.
bias: layer's bias. Only available if `use_bias == True`.
Example:
```python
layer = tfl.layers.Linear(
num_input_dims=8,
# Monotonicity constraints can be defined per dimension or for all dims.
monotonicities='increasing',
use_bias=True,
# You can force the L1 norm to be 1. Since this is a monotonic layer,
# the coefficients will sum to 1, making this a "weighted average".
normalization_order=1)
```
"""
# pyformat: enable
def __init__(self,
num_input_dims,
units=1,
monotonicities=None,
monotonic_dominances=None,
range_dominances=None,
input_min=None,
input_max=None,
use_bias=True,
normalization_order=None,
kernel_initializer="random_uniform",
bias_initializer="random_uniform",
kernel_regularizer=None,
bias_regularizer=None,
**kwargs):
"""initializes an instance of `Linear`.
Args:
num_input_dims: Number of input dimensions.
units: Output dimension of the layer.
monotonicities: None or list or tuple of length 'num_input_dims' of
{'decreasing', 'none', 'increasing', -1, 0, 1} which specifies if the
model output should be monotonic in corresponding feature, using
'increasing' or 1 to indicate increasing monotonicity, 'decreasing' or
-1 to indicate decreasing monotonicity and 'none' or 0 to indicate no
monotonicity constraints..
In case of decreasing monotonicity corresponding weight will be
constrained to be non positive, in case of increasing non-negative.
Instead of a list or tuple single value can be specified to indicate the
monotonicity constraint across all dimensions.
monotonic_dominances: None or list of two-element tuples. First element is
the index of the dominant dimension. Second element is the index of the
weak dimension.
range_dominances: None or list of two-element tuples. First element is the
index of the dominant dimension. Second element is the index of the weak
dimension. Both dominant and weak dimensions must have input_min and
input_max set.
input_min: None of list or tuple of length 'num_input_dims' of either
'none' or float which specifies the minimum value to clip by for each
dimension.
input_max: None of list or tuple of length 'num_input_dims' of either
'none' or float which specifies the maximum value to clip by for each
dimension.
use_bias: Whether linear function has bias.
normalization_order: If specified learned weights will be adjusted to have
norm 1. Norm will be computed by: `tf.norm(tensor,
ord=normalization_order)`.
kernel_initializer: Any keras initializer to be applied to kernel.
bias_initializer: Any keras initializer to be applied to bias. Only valid
if `use_bias == True`.
kernel_regularizer: None or single element or list of any Keras
regularizer objects.
bias_regularizer: None or single element or list of any Keras regularizer
objects.
**kwargs: Other args passed to `tf.keras.layers.Layer` initializer.
Raises:
ValueError: if monotonicity specified incorrectly.
"""
super(Linear, self).__init__(**kwargs)
self.num_input_dims = num_input_dims
self.units = units
if isinstance(monotonicities, list) or isinstance(monotonicities, tuple):
self.monotonicities = list(monotonicities)
elif monotonicities is not None:
self.monotonicities = [monotonicities] * self.num_input_dims
else:
self.monotonicities = [0] * self.num_input_dims
self.monotonic_dominances = monotonic_dominances
self.range_dominances = range_dominances
self.input_min = input_min
self.input_max = input_max
# Verify hyperparameters after converting monotonicities to list because
# internally everything expects monotonicites to be list or tuple rather
# than single element.
linear_lib.verify_hyperparameters(
num_input_dims=self.num_input_dims, monotonicities=self.monotonicities)
self.use_bias = use_bias
self.normalization_order = normalization_order
self.kernel_initializer = keras.initializers.get(kernel_initializer)
if use_bias:
self.bias_initializer = keras.initializers.get(bias_initializer)
self.kernel_regularizer = []
if kernel_regularizer:
if callable(kernel_regularizer):
kernel_regularizer = [kernel_regularizer]
for reg in kernel_regularizer:
self.kernel_regularizer.append(keras.regularizers.get(reg))
self.bias_regularizer = []
if bias_regularizer:
if callable(bias_regularizer):
bias_regularizer = [bias_regularizer]
for reg in bias_regularizer:
self.bias_regularizer.append(keras.regularizers.get(reg))
if units == 1:
input_shape = (None, num_input_dims)
else:
input_shape = (None, units, num_input_dims)
self.input_spec = keras.layers.InputSpec(
dtype=self.dtype, shape=input_shape)
def build(self, input_shape):
"""Standard Keras build() method.
Args:
input_shape: Must be: (batch_size, num_input_dims) if units == 1, or
(batch_size, units, num_input_dims) if units > 1.
Raises:
ValueError: If shape is invalid.
"""
linear_lib.verify_hyperparameters(
num_input_dims=self.num_input_dims,
units=self.units,
input_shape=input_shape)
if (any(self.monotonicities) or self.monotonic_dominances or
self.range_dominances or self.normalization_order):
constraints = LinearConstraints(
monotonicities=self.monotonicities,
monotonic_dominances=self.monotonic_dominances,
range_dominances=self.range_dominances,
input_min=self.input_min,
input_max=self.input_max,
normalization_order=self.normalization_order)
else:
constraints = None
if not self.kernel_regularizer:
kernel_reg = None
elif len(self.kernel_regularizer) == 1:
kernel_reg = self.kernel_regularizer[0]
else:
# Keras interface assumes only one regularizer, so summ all regularization
# losses which we have.
kernel_reg = lambda x: tf.add_n([r(x) for r in self.kernel_regularizer])
self.kernel = self.add_weight(
LINEAR_LAYER_KERNEL_NAME,
# 1 column matrix rather than verctor for matrix multiplication.
shape=[self.num_input_dims, self.units],
initializer=self.kernel_initializer,
regularizer=kernel_reg,
constraint=constraints,
dtype=self.dtype)
if self.use_bias:
if not self.bias_regularizer:
bias_reg = None
elif len(self.bias_regularizer) == 1:
bias_reg = self.bias_regularizer[0]
else:
bias_reg = lambda x: tf.add_n([r(x) for r in self.bias_regularizer])
self.bias = self.add_weight(
LINEAR_LAYER_BIAS_NAME,
shape=[] if self.units == 1 else [self.units],
initializer=self.bias_initializer,
regularizer=bias_reg,
constraint=None,
dtype=self.dtype)
input_min = utils.canonicalize_input_bounds(self.input_min)
input_max = utils.canonicalize_input_bounds(self.input_max)
if ((input_min and input_min.count(None) < len(input_min)) or
(input_max and input_max.count(None) < len(input_max))):
lower_bounds = [val if val is not None else -np.inf
for val in input_min or [None] * self.num_input_dims]
upper_bounds = [val if val is not None else np.inf
for val in input_max or [None] * self.num_input_dims]
self.clip_value_min = tf.constant(lower_bounds, dtype=self.dtype)
self.clip_value_max = tf.constant(upper_bounds, dtype=self.dtype)
else:
self.clip_value_min = None
self.clip_value_max = None
super(Linear, self).build(input_shape)
def call(self, inputs):
"""Standard Keras call() method."""
if self.clip_value_min is not None and self.clip_value_max is not None:
inputs = tf.clip_by_value(inputs,
clip_value_min=self.clip_value_min,
clip_value_max=self.clip_value_max)
if self.units == 1:
result = tf.matmul(inputs, self.kernel)
else:
result = tf.reduce_sum(inputs * tf.transpose(self.kernel), axis=-1)
if self.use_bias:
result += self.bias
return result
def compute_output_shape(self, input_shape):
"""Standard Keras compute_output_shape() method."""
del input_shape
return [None, self.units]
def get_config(self):
"""Standard Keras get_config() method."""
config = {
"num_input_dims": self.num_input_dims,
"units": self.units,
"monotonicities": self.monotonicities,
"use_bias": self.use_bias,
"normalization_order": self.normalization_order,
"monotonic_dominances": self.monotonic_dominances,
"range_dominances": self.range_dominances,
"input_min": self.input_min,
"input_max": self.input_max,
"kernel_initializer":
keras.initializers.serialize(self.kernel_initializer),
"kernel_regularizer": [
keras.regularizers.serialize(r) for r in self.kernel_regularizer
],
} # pyformat: disable
if self.use_bias:
config["bias_initializer"] = keras.initializers.serialize(
self.bias_initializer)
config["bias_regularizer"] = [
keras.regularizers.serialize(r) for r in self.bias_regularizer
]
config.update(super(Linear, self).get_config())
return config
# Default eps is bigger than one for other layers because normalization is
# prone to numerical errors.
def assert_constraints(self, eps=1e-4):
"""Asserts that weights satisfy all constraints.
In graph mode builds and returns list of assertion ops.
In eager mode directly executes assertions.
Args:
eps: Allowed constraints violation.
Returns:
List of assertion ops in graph mode or immediately asserts in eager mode.
"""
return linear_lib.assert_constraints(
weights=self.kernel,
monotonicities=utils.canonicalize_monotonicities(self.monotonicities),
monotonic_dominances=self.monotonic_dominances,
range_dominances=self.range_dominances,
input_min=utils.canonicalize_input_bounds(self.input_min),
input_max=utils.canonicalize_input_bounds(self.input_max),
normalization_order=self.normalization_order,
eps=eps)
class LinearConstraints(keras.constraints.Constraint):
# pyformat: disable
"""Applies monotonicity constraints and normalization to TFL Linear layer.
Monotonicity is specified per input dimension in which case learned weight for
those dimensions is guaranteed to be either non negative for increasing or non
positive for decreasing monotonicity.
Monotonic dominance can be specified for any pair of dimensions referred to as
*dominant* and *weak* dimensions such that the effect (slope) in the direction
of the *dominant* dimension to be greater than that of the *weak* dimension
for any point. Both dominant and weak dimensions must be increasing.
Range dominance can be specified for any pair of *dominant* and *weak*
dimensions such that the range of possible outputs to be greater if one varies
the *dominant* dimension than if one varies the *weak* dimension for any
point. We require the slope of the *dominant* dimension scaled by its input
range to be greater than the slope of the *weak* dimension similarly scaled by
its input range. Both dimensions must have the same direction of monotonicity
and their input min and max must be provided.
Weights can be constrained to have norm 1.
Attributes:
- All `__init__` arguments.
"""
# pyformat: enable
def __init__(self, monotonicities, monotonic_dominances=None,
range_dominances=None, input_min=None, input_max=None,
normalization_order=None):
"""initializes an instance of `LinearConstraints`.
Args:
monotonicities: Same meaning as corresponding parameter of `Linear`.
monotonic_dominances: Same meaning as corresponding parameter of `Linear`.
range_dominances: Same meaning as corresponding parameter of `Linear`.
input_min: Same meaning as corresponding parameter of `Linear`.
input_max: Same meaning as corresponding parameter of `Linear`.
normalization_order: Same meaning as corresponding parameter of `Linear`.
"""
linear_lib.verify_hyperparameters(monotonicities=monotonicities,
monotonic_dominances=monotonic_dominances,
range_dominances=range_dominances,
input_min=input_min,
input_max=input_max)
self.monotonicities = monotonicities
self.monotonic_dominances = monotonic_dominances
self.range_dominances = range_dominances
self.input_min = input_min
self.input_max = input_max
self.normalization_order = normalization_order
def __call__(self, w):
"""Applies constraints to w.
Args:
w: Tensor which represents weights of TFL linear layer. Must have shape:
`(len(self.monotonicities), 1)`.
Raises:
ValueError: if shape of `w` is not `(len(self.monotonicities), 1)`.
Returns:
Tensor `w` with monotonicity constraints and normalization applied to it.
"""
return linear_lib.project(
weights=w,
monotonicities=utils.canonicalize_monotonicities(self.monotonicities),
monotonic_dominances=self.monotonic_dominances,
range_dominances=self.range_dominances,
input_min=utils.canonicalize_input_bounds(self.input_min),
input_max=utils.canonicalize_input_bounds(self.input_max),
normalization_order=self.normalization_order)
def get_config(self):
"""Standard Keras get_config() method."""
return {
"monotonicities": self.monotonicities,
"monotonic_dominances": self.monotonic_dominances,
"range_doinances": self.range_dominances,
"input_min": self.input_min,
"input_max": self.input_max,
"normalization_order": self.normalization_order
} # pyformat: disable
| [
2,
15069,
13130,
3012,
11419,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
921,
743,
733... | 2.673032 | 6,441 |
from django.db import models
from profiles.models import Profile
from tools.random_token import generate_random_token
from .spell import Spell
class Spellbook(models.Model):
"""A spellbook of a character"""
"""Spellbook name"""
name = models.CharField(
verbose_name="Name",
max_length=50,
)
"""Profile of the user that owns the spellbook"""
profile = models.ForeignKey(Profile, on_delete=models.CASCADE,)
"""Spells present in the spellbook"""
spells = models.ManyToManyField(Spell, through='SpellUsage')
"""Token that gives access to the spellbook, independently to the profile"""
token = models.CharField(max_length=32, default=generate_random_token)
@property
def n_spells(self):
"""Returns the number of spells contained in the spellbook"""
return self.spells.count()
def has_spell_prepared(self, spell):
"""Checks if a spell is prepared for that spellbook"""
spell_usage = self.spell_usages.get(spell=spell, spellbook=self)
return spell_usage.prepared
@property
def spells_with_preparations(self):
"""Constructs a list of spells and adds is_prepared to each one"""
spells_with_preparations = []
for spell in self.spells.all():
spell.is_prepared = self.has_spell_prepared(spell)
spells_with_preparations.append(spell)
return sorted(spells_with_preparations, key=lambda spell: ~spell.is_prepared)
def slot_level(self, level):
"""Return the spell slot of a given level"""
try:
return self.slots.get(level=level)
except models.ObjectDoesNotExist:
return None
def reset_slots(self):
"""Reset all available slots capacity to their maximum"""
self.slots.all().update(current_capacity=models.F('max_capacity'))
class SpellUsage(models.Model):
"""Link between a spell and a spellbook that defines if it is prepared"""
"""The spell of the connection"""
spell = models.ForeignKey(Spell, on_delete=models.CASCADE, related_name='spell_usages')
"""The spellbook of the connection"""
spellbook = models.ForeignKey(Spellbook, on_delete=models.CASCADE, related_name='spell_usages')
"""Whether the spell is prepared or not"""
prepared = models.BooleanField(default=False)
| [
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
198,
6738,
16545,
13,
27530,
1330,
13118,
198,
6738,
4899,
13,
25120,
62,
30001,
1330,
7716,
62,
25120,
62,
30001,
198,
198,
6738,
764,
46143,
1330,
11988,
628,
198,
4871,
11988,
2070,
7,
... | 2.808841 | 837 |
from flytekitplugins.spark import Spark
from flytekitplugins.spark.task import new_spark_session
import flytekit
from flytekit import task
from flytekit.common.tasks.sdk_runnable import ExecutionParameters
from flytekit.extend import Image, ImageConfig, SerializationSettings
| [
6738,
6129,
660,
15813,
37390,
13,
2777,
668,
1330,
17732,
198,
6738,
6129,
660,
15813,
37390,
13,
2777,
668,
13,
35943,
1330,
649,
62,
2777,
668,
62,
29891,
198,
198,
11748,
6129,
660,
15813,
198,
6738,
6129,
660,
15813,
1330,
4876,
... | 3.4875 | 80 |
import os, fnmatch, importlib
BASEDIR = os.path.dirname(os.path.abspath(__file__))
SELF_FILE = os.path.abspath(__file__)
SERVICE_FILENAME = os.environ.get("SERVICE_FILENAME", "services.py")
all_services = find_all(SERVICE_FILENAME, BASEDIR)
for file_path in all_services:
relative_python_path = file_path.replace("/", ".").replace(".py", "")
print(f"Loading services in... {relative_python_path}")
exec(f"from {relative_python_path} import *")
| [
11748,
28686,
11,
24714,
15699,
11,
1330,
8019,
198,
198,
33,
42827,
4663,
796,
28686,
13,
6978,
13,
15908,
3672,
7,
418,
13,
6978,
13,
397,
2777,
776,
7,
834,
7753,
834,
4008,
198,
50,
37738,
62,
25664,
796,
28686,
13,
6978,
13,
... | 2.586592 | 179 |
import pytest
import requests
import httpretty
import json
from context import optimizely
@httpretty.activate
@httpretty.activate
| [
11748,
12972,
9288,
198,
11748,
7007,
198,
11748,
2638,
16100,
198,
11748,
33918,
198,
6738,
4732,
1330,
27183,
306,
628,
198,
31,
4023,
16100,
13,
39022,
628,
198,
31,
4023,
16100,
13,
39022,
198
] | 3.941176 | 34 |
import unittest
import datetime as dt
from src.config.appConfig import getConfig
from src.fetchers.genUnitOutagesFetcher import fetchMajorGenUnitOutages
from src.typeDefs.outage import IOutage
from typing import List
| [
11748,
555,
715,
395,
198,
11748,
4818,
8079,
355,
288,
83,
198,
6738,
12351,
13,
11250,
13,
1324,
16934,
1330,
651,
16934,
198,
6738,
12351,
13,
34045,
3533,
13,
5235,
26453,
7975,
1095,
37,
316,
2044,
1330,
21207,
24206,
13746,
26453,... | 3.460317 | 63 |
''' Global and Local Variable Scopes
Variables defined inside a function body have a local scope.
Variables defined outside a function have a global scope.
Global variables can be accessed anywhere in your python file.
Localvariables can only be accesses inside the function it belongs to.
I can use the global keyword inside a function definition to make the value of a local variable global.
'''
# x = 10 # global variable
# def my_number(x):
# print(x) # 10
# x = 7 #local variable
# print("My fav number is ", x)
# my_number(x) # My fav number is 7
# print(x) # 10 ## x from line 11
# How to change the value of the global variable
x = 10 # global variable
my_number() # My fav number is 7
print(x) # 7 ## global variable x after I have changed the global variable on line 36
| [
7061,
6,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
8060,
290,
10714,
35748,
1446,
13920,
198,
23907,
2977,
5447,
2641,
257,
2163,
1767,
423,
257,
1957,
8354,
13,
198,
23907,
2977,
5447,
235... | 3.181132 | 265 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-06-08 13:17
from __future__ import unicode_literals
from django.db import migrations, models
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
37770,
352,
13,
940,
13,
22,
319,
2177,
12,
3312,
12,
2919,
1511,
25,
1558,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
... | 2.736842 | 57 |
# coding=utf-8
"""
Manages Github requests
"""
import json
import typing
import requests
from esst import LOGGER
BASE_URL = 'https://api.github.com/'
def get_latest_release(owner: str, repo: str) -> typing.Tuple[str, str, str]:
"""
Args:
owner: owner of the Github repo
repo: name of the Github repo
Returns: latest version, asset name, asset download URL
"""
resp = _make_request(f'repos/{owner}/{repo}/releases/latest')
return resp['tag_name'], resp['assets'][0]['name'], resp['assets'][0]['browser_download_url']
| [
2,
19617,
28,
40477,
12,
23,
198,
37811,
198,
5124,
1095,
38994,
7007,
198,
37811,
198,
198,
11748,
33918,
198,
11748,
19720,
198,
198,
11748,
7007,
198,
198,
6738,
1658,
301,
1330,
41605,
30373,
198,
198,
33,
11159,
62,
21886,
796,
7... | 2.695238 | 210 |
import os
import matplotlib.pyplot as plt
from osgeo import gdal
import numpy
import pygeoprocessing
CUR_DIR = os.path.dirname(__file__)
DATA_DIR = os.path.join(CUR_DIR, '..', '..', 'docker', 'data')
if __name__ == '__main__':
unproj_dem = os.path.join(DATA_DIR, 'ASTGTM2_N37W120_dem.tif')
render(unproj_dem,
'ASTER N37W120 (unprojected)',
'ASTER-N37W120-unprojected.png')
render(os.path.join(DATA_DIR, 'N37W120.tif'),
'ASTER N37W120 (UTM zone 11N)',
'ASTER-N37W120-UTM11N.png')
alaska_srs = """PROJCS["WGS 84 / North Pole LAEA Alaska",
GEOGCS["WGS 84",
DATUM["WGS_1984",
SPHEROID["WGS 84",6378137,298.257223563,
AUTHORITY["EPSG","7030"]],
AUTHORITY["EPSG","6326"]],
PRIMEM["Greenwich",0,
AUTHORITY["EPSG","8901"]],
UNIT["degree",0.01745329251994328,
AUTHORITY["EPSG","9122"]],
AUTHORITY["EPSG","4326"]],
UNIT["metre",1,
AUTHORITY["EPSG","9001"]],
PROJECTION["Lambert_Azimuthal_Equal_Area"],
PARAMETER["latitude_of_center",90],
PARAMETER["longitude_of_center",-150],
PARAMETER["false_easting",0],
PARAMETER["false_northing",0],
AUTHORITY["EPSG","3572"],
AXIS["X",UNKNOWN],
AXIS["Y",UNKNOWN]]"""
out_filename = 'ASTER_alaska.tif'
pygeoprocessing.reproject_dataset_uri(
original_dataset_uri=unproj_dem,
pixel_spacing=30,
output_wkt=alaska_srs,
resampling_method='nearest',
output_uri=out_filename)
render(out_filename,
'ASTER N37W120 (North Pole LAEA Alaska)',
'ASTER-N37W120-northpole.png')
| [
11748,
28686,
198,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
6738,
28686,
469,
78,
1330,
308,
31748,
198,
11748,
299,
32152,
198,
11748,
12972,
469,
404,
305,
919,
278,
198,
198,
34,
4261,
62,
34720,
796,
2868... | 1.954492 | 857 |
import numpy as np
from matplotlib import pyplot as plt
T = 10e-6 # signal duration (s)
Dt = 0.1e-6 # sampling interval (s)
T1 = 5e-6 # pulse "on" duration (s)
A = 1 # signal amplitude
t = np.arange(-T/2, T/2, Dt) # time axis
x = Α * (np.abs(t) <= T1/2).astype(float)
plt.close('all') # Close all figures
plt.figure(1) # open a new figure
plt.plot(t, x) # plot the signal
| [
11748,
299,
32152,
355,
45941,
198,
6738,
2603,
29487,
8019,
1330,
12972,
29487,
355,
458,
83,
198,
198,
51,
796,
838,
68,
12,
21,
220,
220,
1303,
6737,
9478,
357,
82,
8,
198,
35,
83,
796,
657,
13,
16,
68,
12,
21,
1303,
19232,
1... | 1.868526 | 251 |
# -*- coding: utf-8 -*-
#
# Copyright(c) 2014 palmhold.com
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from six import PY2
from tornado import escape
from tornado.web import HTTPError
# HTTP status code
HTTP_OK = 200
ERROR_BAD_REQUEST = 400
ERROR_UNAUTHORIZED = 401
ERROR_FORBIDDEN = 403
ERROR_NOT_FOUND = 404
ERROR_METHOD_NOT_ALLOWED = 405
ERROR_INTERNAL_SERVER_ERROR = 500
# Custom error code
ERROR_WARNING = 1001
ERROR_DEPRECATED = 1002
ERROR_MAINTAINING = 1003
ERROR_UNKNOWN_ERROR = 9999
# default errors
_unknown_error = "unknow_error"
_unknown_message = "Unknown error"
_error_types = {400: "bad_request",
401: "unauthorized",
403: "forbidden",
404: "not_found",
405: "method_not_allowed",
500: "internal_server_error",
1001: "warning",
1002: "deprecated",
1003: "maintaining",
9999: _unknown_error}
ERROR_MESSAGES = {400: "Bad request",
401: "Unauthorized",
403: "Forbidden",
404: "Not found",
405: "Method not allowed",
500: "Internal server error",
1001: "Warning",
1002: "Deprecated",
1003: "Maintaining",
9999: _unknown_message}
class TemplateContextError(DjinnError):
"""Template context variable does not exist."""
pass
class HTTPAPIError(HTTPError):
"""API error handling exception
API server always returns formatted JSON to client even there is
an internal server error.
"""
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
198,
2,
15069,
7,
66,
8,
1946,
18057,
2946,
13,
785,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
7... | 2.428409 | 880 |
import csv
readMonedas() | [
11748,
269,
21370,
198,
198,
961,
9069,
276,
292,
3419
] | 2.5 | 10 |
# qr_lstsq_eigs.py
"""Volume 1A: QR 2 (Least Squares and Computing Eigenvalues).
<Name>
<Class>
<Date>"""
import numpy as np
from cmath import sqrt
from scipy import linalg as la
from matplotlib import pyplot as plt
# Problem 1
def least_squares(A, b):
"""Calculate the least squares solutions to Ax = b using QR decomposition.
Inputs:
A ((m,n) ndarray): A matrix of rank n <= m.
b ((m, ) ndarray): A vector of length m.
Returns:
x ((n, ) ndarray): The solution to the normal equation.
"""
raise NotImplementedError("Problem 1 Incomplete")
# Problem 2
def line_fit():
"""Load the data from housing.npy. Use least squares to calculate the line
that best relates height to weight.
Plot the original data points and the least squares line together.
"""
raise NotImplementedError("Problem 2 Incomplete")
# Problem 3
def polynomial_fit():
"""Load the data from housing.npy. Use least squares to calculate
the polynomials of degree 3, 6, 9, and 12 that best fit the data.
Plot the original data points and each least squares polynomial together
in individual subplots.
"""
raise NotImplementedError("Problem 3 Incomplete")
def plot_ellipse(a, b, c, d, e):
"""Plot an ellipse of the form ax^2 + bx + cxy + dy + ey^2 = 1."""
theta = np.linspace(0, 2*np.pi, 200)
cos_t, sin_t = np.cos(theta), np.sin(theta)
A = a*(cos_t**2) + c*cos_t*sin_t + e*(sin_t**2)
B = b*cos_t + d*sin_t
r = (-B + np.sqrt(B**2 + 4*A))/(2*A)
plt.plot(r*cos_t, r*sin_t, lw=2)
plt.gca().set_aspect("equal", "datalim")
# Problem 4
def ellipse_fit():
"""Load the data from ellipse.npy. Use least squares to calculate the
ellipse that best fits the data.
Plot the original data points and the least squares ellipse together.
"""
raise NotImplementedError("Problem 4 Incomplete")
# Problem 5
def power_method(A, N=20, tol=1e-12):
"""Compute the dominant eigenvalue of A and a corresponding eigenvector
via the power method.
Inputs:
A ((n,n) ndarray): A square matrix.
N (int): The maximum number of iterations.
tol (float): The stopping tolerance.
Returns:
(foat): The dominant eigenvalue of A.
((n, ) ndarray): An eigenvector corresponding to the dominant
eigenvalue of A.
"""
raise NotImplementedError("Problem 5 Incomplete")
# Problem 6
def qr_algorithm(A, N=50, tol=1e-12):
"""Compute the eigenvalues of A via the QR algorithm.
Inputs:
A ((n,n) ndarray): A square matrix.
N (int): The number of iterations to run the QR algorithm.
tol (float): The threshold value for determining if a diagonal block
is 1x1 or 2x2.
Returns:
((n, ) ndarray): The eigenvalues of A.
"""
raise NotImplementedError("Problem 6 Incomplete")
| [
2,
10662,
81,
62,
75,
6448,
80,
62,
68,
9235,
13,
9078,
198,
37811,
31715,
352,
32,
25,
42137,
362,
357,
3123,
459,
5056,
3565,
290,
38589,
412,
9324,
27160,
737,
198,
27,
5376,
29,
198,
27,
9487,
29,
198,
27,
10430,
29,
37811,
... | 2.575514 | 1,119 |
import pandas as pd
import duckdb
import datetime
| [
11748,
19798,
292,
355,
279,
67,
198,
11748,
22045,
9945,
198,
11748,
4818,
8079,
198
] | 3.333333 | 15 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright (c) 2014-2015 pocsuite developers (http://seebug.org)
See the file 'docs/COPYING' for copying permission
"""
#命令行
from pocsuite import pocsuite_cli
#验证模块
from pocsuite import pocsuite_verify
#攻击模块
from pocsuite import pocsuite_attack
#控制台模式
from pocsuite import pocsuite_console
from pocsuite.api.request import req
from pocsuite.api.poc import register
from pocsuite.api.poc import Output, POCBase
register(MongodbPOC)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
198,
15269,
357,
66,
8,
1946,
12,
4626,
279,
420,
2385,
578,
6505,
357,
4023,
1378,
325,
1765,
1018,
13,
... | 2.214612 | 219 |
"""Run CNN from NAIP data."""
import tensorflow as tf
import params
# Should import these from gather_cliff_images or from common parameters file
# TODO: (what I was doing)
# inputs are 8 x 8 x 16 tensors along with 16 x 1 tensors -- tf doesn't like this
# can we have a dense feeding into a cnn, or vice versa?
def separate_input_and_output(x):
"""Return tuple with input and output features."""
inputs = [x.get(key) for key in params.NAIP_KEYS if key not in ['mp_score', 'cliff_id']]
outputs = [x.get('mp_score')]
return inputs, outputs
# Use glob here once more tfrecord files available
filenames = ['../../data/naip_shards/naip_shard_0.tfrecord.gz']
dataset = tf.data.TFRecordDataset(filenames, compression_type='GZIP')
dataset = dataset.take(10)
dataset = dataset.map(lambda x: tf.io.parse_single_example(x, params.FEATURES_DICT))
print(iter(dataset.take(1)).next())
dataset = dataset.map(separate_input_and_output)
print('#' * 80)
dataset = dataset.shuffle(params.SHUFFLE_BUFFER_SIZE, reshuffle_each_iteration=True)
dataset = dataset.batch(params.BATCH_SIZE)
dataset = dataset.repeat(params.EPOCHS)
print(iter(dataset.take(1)).next())
# from tensorflow.python.keras import layers
# from tensorflow.python.keras import losses
# from tensorflow.python.keras import models
# from tensorflow.python.keras import metrics
# from tensorflow.python.keras import optimizers
# def conv_block(input_tensor, num_filters):
# encoder = layers.Conv2D(num_filters, (3, 3), padding='same')(input_tensor)
# encoder = layers.BatchNormalization()(encoder)
# encoder = layers.Activation('relu')(encoder)
# encoder = layers.Conv2D(num_filters, (3, 3), padding='same')(encoder)
# encoder = layers.BatchNormalization()(encoder)
# encoder = layers.Activation('relu')(encoder)
# return encoder
# def encoder_block(input_tensor, num_filters):
# encoder = conv_block(input_tensor, num_filters)
# encoder_pool = layers.MaxPooling2D((2, 2), strides=(2, 2))(encoder)
# return encoder_pool, encoder
# def decoder_block(input_tensor, concat_tensor, num_filters):
# decoder = layers.Conv2DTranspose(num_filters, (2, 2), strides=(2, 2), padding='same')(input_tensor)
# decoder = layers.concatenate([concat_tensor, decoder], axis=-1)
# decoder = layers.BatchNormalization()(decoder)
# decoder = layers.Activation('relu')(decoder)
# decoder = layers.Conv2D(num_filters, (3, 3), padding='same')(decoder)
# decoder = layers.BatchNormalization()(decoder)
# decoder = layers.Activation('relu')(decoder)
# decoder = layers.Conv2D(num_filters, (3, 3), padding='same')(decoder)
# decoder = layers.BatchNormalization()(decoder)
# decoder = layers.Activation('relu')(decoder)
# return decoder
# def get_model():
# inputs = layers.Input(shape=[None, None, len(BANDS)]) # 256
# encoder0_pool, encoder0 = encoder_block(inputs, 32) # 128
# encoder1_pool, encoder1 = encoder_block(encoder0_pool, 64) # 64
# encoder2_pool, encoder2 = encoder_block(encoder1_pool, 128) # 32
# encoder3_pool, encoder3 = encoder_block(encoder2_pool, 256) # 16
# encoder4_pool, encoder4 = encoder_block(encoder3_pool, 512) # 8
# center = conv_block(encoder4_pool, 1024) # center
# decoder4 = decoder_block(center, encoder4, 512) # 16
# decoder3 = decoder_block(decoder4, encoder3, 256) # 32
# decoder2 = decoder_block(decoder3, encoder2, 128) # 64
# decoder1 = decoder_block(decoder2, encoder1, 64) # 128
# decoder0 = decoder_block(decoder1, encoder0, 32) # 256
# outputs = layers.Conv2D(1, (1, 1), activation='sigmoid')(decoder0)
# model = models.Model(inputs=[inputs], outputs=[outputs])
# model.compile(
# optimizer=optimizers.get(OPTIMIZER),
# loss=losses.get(LOSS),
# metrics=[metrics.get(metric) for metric in METRICS])
# return model
# l = list(parsed_dataset.as_numpy_iterator())
# print(l[0])
# for i, item in enumerate(l[:50]):
# print(i, item['cliff_id'], item['height'])
# print(len(l)) | [
37811,
10987,
8100,
422,
11746,
4061,
1366,
526,
15931,
198,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
11748,
42287,
628,
198,
2,
10358,
1330,
777,
422,
6431,
62,
565,
733,
62,
17566,
393,
422,
2219,
10007,
2393,
628,
198,
2,
16... | 2.565531 | 1,526 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Paint Factory Puzzle
# Author: mu.ammad.ud.din@gmail.com
# Last Update: 09 March 2019
# License: MIT
from support_services.utils import compareCustomers, compareChoices, analyzePop, compare_to_key
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
23467,
19239,
23966,
198,
2,
6434,
25,
38779,
13,
26035,
13,
463,
13,
25194,
31,
14816,
13,
785,
198,
2... | 2.704082 | 98 |
import urllib
import sqlite3
from datetime import datetime
from math import log
# Download stock data from Google Finance
if __name__ == "__main__":
try:
# Get indices tickers
index_list=[".DJI",".INX"]
# Open SQLite database
sql_conn=sqlite3.connect("/home/kirill/project/data/stocks.db")
print("Stocks database opened sucessfully")
# Load data for each ticker
for index in index_list:
print(index)
get_data(86400,index)
# Close SQLite database
sql_conn.close()
print("Index data from Google Finance is loaded successfully in SQLite database")
print("Stocks database closed sucessfully")
except BaseException:
pass
| [
11748,
2956,
297,
571,
201,
198,
11748,
44161,
578,
18,
201,
198,
6738,
4818,
8079,
1330,
4818,
8079,
201,
198,
6738,
10688,
1330,
2604,
201,
198,
201,
198,
2,
10472,
4283,
1366,
422,
3012,
15007,
201,
198,
220,
220,
220,
220,
201,
... | 2.191489 | 376 |
"""
Contains Qt-widgets to visualize measurements.
"""
import math
import time
from PyQt5.QtCore import Qt, QTimer, pyqtSlot, QPointF
from PyQt5.QtGui import QPen, QBrush, QPolygonF, QLinearGradient, QColor, QPainter
from PyQt5.QtWidgets import QWidget, QHBoxLayout, QGraphicsScene, QGraphicsView, QGraphicsTextItem
from domain.model import Measurement
def deg2rad(degree):
"""
:param degree: angle in degree
:return: angle in radians
"""
return degree * math.pi / 180.0
def rad2deg(rad):
"""
:param rad: angle in radians
:return: angle in degree
"""
return rad / math.pi * 180.0
def scale(input_interval, output_interval, value):
"""
For two intervals (input and output)
Scales a specific value linear from [input_min, input_max] to [output_min, output_max].
"""
min_to, max_to = output_interval
min_from, max_from = input_interval
mapped_value = min_to + (max_to - min_to) * ((value - min_from) / (max_from - min_from))
return mapped_value
class RadarWidget(QWidget):
"""
Widget to visualize Measurements in a radar like matter.
"""
@pyqtSlot(Measurement)
@pyqtSlot()
def draw_radar(self):
"""
Discards the current content and redraws all elements on the graphic scene.
"""
# decide which color to use
bg_color, line_color = Qt.black, Qt.green
self.scene.clear()
self.scene.addRect(0, 0, self.width(), self.height(), brush=QBrush(bg_color))
self._add_crosshair()
self._add_circles(self.circle_count, self.add_text_labels)
# for each measurement, draw a line
for measurement in self.measurements:
assert isinstance(measurement, Measurement)
added_time = self.added_time[measurement]
self._add_measurement(measurement.distance, measurement.angle, added_time)
# for the latest 2 measurements, draw an angle visualizer
for measurement in self.measurements[-2:]:
self._add_latest_input_line(measurement.angle)
@pyqtSlot()
def clear_measurements(self):
"""Deletes all previous measurements"""
self.measurements = []
def _purge_old_measurements(self):
"""Removes measurements that are older than the fade_out_time"""
min_time = time.time() - self.fade_out_time
to_delete = [measurement for measurement in self.measurements if measurement not in self.added_time or self.added_time[measurement] < min_time]
self.measurements = [measurement for measurement in self.measurements if measurement not in to_delete]
def _add_latest_input_line(self, angle):
"""Adds a line to the graphic scene that visualizes a scanned angle"""
mx, my = self._get_middle()
angle_rad = deg2rad(angle)
angle_1_rad = deg2rad(angle - self.measurement_angle/2.0)
angle_2_rad = deg2rad(angle + self.measurement_angle/2.0)
length = max(self.width(), self.height())
start_point = (mx, my)
p1 = (mx + length * math.cos(angle_1_rad), my + length * math.sin(angle_1_rad))
p2 = (mx + length * math.cos(angle_2_rad), my + length * math.sin(angle_2_rad))
gradient_start_point, gradient_end_point = (mx, my), (mx + length * math.cos(angle_rad), my + length * math.sin(angle_rad))
gradient = QLinearGradient(*gradient_start_point, *gradient_end_point)
gradient.setColorAt(0, Qt.transparent)
gradient.setColorAt(0.8, Qt.red)
gradient.setColorAt(1, Qt.darkRed)
triangle = QPolygonF()
triangle.append(QPointF(*start_point))
triangle.append(QPointF(*p1))
triangle.append(QPointF(*p2))
triangle.append(QPointF(*start_point))
self.scene.addPolygon(triangle, pen=QPen(Qt.transparent), brush=QBrush(gradient))
def _get_middle(self):
"""
returns a 2-tuple representing the middle coordinates from the upper left corner
:return: x,y
"""
return self.width()/2, self.height()/2
def _add_measurement(self, length, angle, added_time):
"""
Adds a visualization for a measured distance to the scene
:param length: length in cm
:param angle: the angle
"""
mx, my = self._get_middle()
angle_rad = deg2rad(angle)
ex, ey = mx + length * math.cos(angle_rad), my + length * math.sin(angle_rad)
age = time.time() - added_time
age = age if age < self.fade_out_time else self.fade_out_time
alpha_channel_value = scale((0, self.fade_out_time), (255, 0), age)
assert 0 <= alpha_channel_value <= 255
brush_color = QColor(self.measured_distances_color)
brush_color.setAlpha(alpha_channel_value)
brush = QBrush(brush_color)
tpen = QPen(brush_color)
self.scene.addLine(mx, my, ex, ey, pen=tpen)
self.scene.addEllipse(ex-self.dot_width/2, ey-self.dot_width/2, self.dot_width, self.dot_width, pen=tpen, brush=brush)
def _add_crosshair(self):
"""
Adds vertical, horizontal and diagonal crosshairs to the graphic scene
"""
pen = QPen(self.crosshair_line_color)
pen.setWidth(self.line_width)
pen.setStyle(Qt.DotLine)
width, height = self.width(), self.height()
mx, my = self._get_middle()
# horizontal
self.scene.addLine(0, my, width, my, pen=pen)
# vertical
self.scene.addLine(mx, 0, mx, height, pen=pen)
# 45°
self.scene.addLine(0, 0, width, height, pen=pen)
self.scene.addLine(width, 0, 0, height, pen=pen)
def _add_circles(self, n, add_text_labels=True):
"""
Adds n circles to the graphic scene.
:param n: the number of circles
"""
pen = QPen(self.circle_line_color)
pen.setStyle(Qt.DotLine)
pen.setWidth(self.line_width)
width, height = self.width(), self.height()
stepw, steph = width/n, height/n
mx, my = self._get_middle()
for i in range(1, n+1):
w, h = width - i * stepw, height - i * steph
self.scene.addEllipse((width-w)/2, (height-h)/2, w, h, pen)
if add_text_labels:
text = QGraphicsTextItem()
text.setDefaultTextColor(self.text_label_color)
text.setPlainText(str(int(w/2)))
text.setPos(mx+w/2.0, my)
text2 = QGraphicsTextItem()
text2.setDefaultTextColor(self.text_label_color)
text2.setPlainText(str(int(-w / 2)))
text2.setPos(mx - w / 2.0, my)
self.scene.addItem(text)
self.scene.addItem(text2)
| [
37811,
198,
4264,
1299,
33734,
12,
28029,
11407,
284,
38350,
13871,
13,
198,
37811,
198,
198,
11748,
10688,
198,
11748,
640,
198,
6738,
9485,
48,
83,
20,
13,
48,
83,
14055,
1330,
33734,
11,
1195,
48801,
11,
12972,
39568,
38963,
11,
11... | 2.28678 | 2,950 |
"""Unit tests for orbitpy.preprocess module.
"""
import unittest
import json
import numpy
import sys, os | [
37811,
26453,
5254,
329,
13066,
9078,
13,
3866,
14681,
8265,
13,
198,
37811,
198,
198,
11748,
555,
715,
395,
198,
11748,
33918,
198,
11748,
299,
32152,
198,
11748,
25064,
11,
28686
] | 3.387097 | 31 |
import numpy as np
from .base import Metric
| [
11748,
299,
32152,
355,
45941,
198,
198,
6738,
764,
8692,
1330,
3395,
1173,
628
] | 3.285714 | 14 |
import logging
import pandas as pd
from mlblocks import MLPipeline
LOGGER = logging.getLogger(__name__)
| [
11748,
18931,
198,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
25962,
27372,
1330,
10373,
47,
541,
4470,
198,
198,
25294,
30373,
796,
18931,
13,
1136,
11187,
1362,
7,
834,
3672,
834,
8,
628,
198
] | 3 | 36 |
#!/usr/bin/python3.7
# -*- coding: utf-8 -*-
# @Time : 2019/5/7 23:08
# @Email : jtyoui@qq.com
# @Software: PyCharm
from .Japan import cut, analysis # 日语单词提取
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
13,
22,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2488,
7575,
220,
220,
220,
1058,
13130,
14,
20,
14,
22,
2242,
25,
2919,
198,
2,
2488,
15333,
220,
1058,
4... | 1.873563 | 87 |
import numpy as np
from scipy import stats
from scipy.signal import butter, filtfilt
import warnings
from .data import peakdet
def estimate_fundamental(dat, samplerate, highcut=3000, normalize=-1, four_search_range=(-20, 20)):
"""
Estimates the fundamental frequency in the data.
:param dat: one dimensional array
:param samplerate: sampling rate of that array
:param highcut: highcut for the filter
:param normalize: whether to normalize the data or not
:param four_search_range: search range in the Fourier domain in Hz
:return: fundamental frequency
"""
filtered_data = butter_lowpass_filter(dat, highcut, samplerate, order=5)
if normalize > 0:
filtered_data = normalize_signal(filtered_data, samplerate, norm_window=normalize)
n = len(filtered_data)
t = np.arange(n) / samplerate
_, eod_peak_idx, _, eod_trough_idx = peakdet(filtered_data)
diff_eod_peak_t = np.diff(t[eod_peak_idx])
freq_from_median = 1 / np.median(diff_eod_peak_t)
f, w = amplitude_spec(filtered_data, samplerate)
f[(w < freq_from_median + four_search_range[0]) & (w > freq_from_median + four_search_range[1])] = -np.Inf
freq_from_fourier = np.argmax(f)
return abs(w[freq_from_fourier])
| [
11748,
299,
32152,
355,
45941,
198,
6738,
629,
541,
88,
1330,
9756,
198,
6738,
629,
541,
88,
13,
12683,
282,
1330,
9215,
11,
1226,
27110,
2326,
198,
11748,
14601,
198,
198,
6738,
764,
7890,
1330,
9103,
15255,
628,
628,
628,
628,
198,
... | 2.597536 | 487 |
all_firewalls = {u'firewalls': [{u'budget': 10287,
u'cloud': {u'cloudId': 1},
u'customer': {u'customerId': 11111},
u'description': u'application1203',
u'firewallId': 125571,
u'name': u'application1203',
u'owningAccount': {u'accountId': 16000},
u'owningUser': {u'userId': 12345},
u'providerId': u'sg-70f89f40',
u'region': {u'regionId': 19344},
u'removable': True,
u'status': u'ACTIVE'},
{u'budget': 10287,
u'cloud': {u'cloudId': 1},
u'customer': {u'customerId': 11111},
u'description': u'database1203',
u'firewallId': 125572,
u'name': u'database1203',
u'owningAccount': {u'accountId': 16000},
u'owningUser': {u'userId': 12345},
u'providerId': u'sg-72f89f42',
u'region': {u'regionId': 19344},
u'removable': True,
u'status': u'ACTIVE'},
{u'budget': 10287,
u'cloud': {u'cloudId': 1},
u'customer': {u'customerId': 11111},
u'description': u'default group',
u'firewallId': 116387,
u'name': u'default',
u'owningAccount': {u'accountId': 16000},
u'providerId': u'sg-28891318',
u'region': {u'regionId': 19344},
u'removable': True,
u'status': u'ACTIVE'},
{u'budget': 10287,
u'cloud': {u'cloudId': 1},
u'customer': {u'customerId': 11111},
u'description': u'dpando-http',
u'firewallId': 116967,
u'name': u'dpandohttp',
u'owningAccount': {u'accountId': 16000},
u'owningUser': {u'userId': 12345},
u'providerId': u'sg-1cb02a2c',
u'region': {u'regionId': 19344},
u'removable': True,
u'status': u'ACTIVE'},
{u'budget': 10287,
u'cloud': {u'cloudId': 1},
u'customer': {u'customerId': 11111},
u'description': u'dpando-mysql',
u'firewallId': 116966,
u'name': u'dpandomysql',
u'owningAccount': {u'accountId': 16000},
u'owningUser': {u'userId': 12345},
u'providerId': u'sg-00b02a30',
u'region': {u'regionId': 19344},
u'removable': True,
u'status': u'ACTIVE'},
{u'budget': 10287,
u'cloud': {u'cloudId': 1},
u'customer': {u'customerId': 11111},
u'description': u'dpandonewfw',
u'firewallId': 125580,
u'name': u'dpandonewfw',
u'owningAccount': {u'accountId': 16000},
u'owningUser': {u'userId': 12345},
u'providerId': u'sg-8cdfb8bc',
u'region': {u'regionId': 19344},
u'removable': True,
u'status': u'ACTIVE'},
{u'budget': 10287,
u'cloud': {u'cloudId': 1},
u'customer': {u'customerId': 11111},
u'description': u'keithdb',
u'firewallId': 117163,
u'name': u'keithdb',
u'owningAccount': {u'accountId': 16000},
u'owningUser': {u'userId': 12346},
u'providerId': u'sg-4aa73d7a',
u'region': {u'regionId': 19344},
u'removable': True,
u'status': u'ACTIVE'},
{u'budget': 10287,
u'cloud': {u'cloudId': 1},
u'customer': {u'customerId': 11111},
u'description': u'keithweb',
u'firewallId': 117164,
u'name': u'keithweb',
u'owningAccount': {u'accountId': 16000},
u'owningUser': {u'userId': 12346},
u'providerId': u'sg-46a73d76',
u'region': {u'regionId': 19344},
u'removable': True,
u'status': u'ACTIVE'}]}
one_firewall = {u'firewalls': [
{u'budget': 10287,
u'cloud': {u'cloudId': 1},
u'customer': {u'customerId': 11111},
u'description': u'default group',
u'firewallId': 116387,
u'name': u'default',
u'owningAccount': {u'accountId': 16000},
u'providerId': u'sg-28891318',
u'region': {u'regionId': 19344},
u'removable': True,
u'status': u'ACTIVE'}]}
firewall_rules = {u'rules': [{u'direction': u'INGRESS',
u'firewall': {u'firewallId': 116387},
u'firewallRuleId': 3706471,
u'source': u'217.240.165.28/32',
u'sourceType': u'CIDR',
u'destination': u'406',
u'destinationType': u'GLOBAL',
u'startPort': 2003,
u'endPort': 2003,
u'precedence': 0,
u'protocol': u'TCP'},
{u'direction': u'INGRESS',
u'firewall': {u'firewallId': 116387},
u'firewallRuleId': 3706472,
u'source': u'217.250.165.28/32',
u'sourceType': u'CIDR',
u'destination': u'406',
u'destinationType': u'GLOBAL',
u'startPort': 80,
u'endPort': 80,
u'precedence': 0,
u'protocol': u'TCP'},
{u'direction': u'INGRESS',
u'firewall': {u'firewallId': 116387},
u'firewallRuleId': 3707022,
u'source': u'100.250.100.28/32',
u'sourceType': u'CIDR',
u'destination': u'406',
u'destinationType': u'GLOBAL',
u'startPort': 443,
u'endPort': 443,
u'precedence': 0,
u'protocol': u'TCP'},
{u'direction': u'INGRESS',
u'firewall': {u'firewallId': 116387},
u'firewallRuleId': 3707710,
u'source': u'100.210.200.28/32',
u'sourceType': u'CIDR',
u'destination': u'406',
u'destinationType': u'GLOBAL',
u'startPort': 8080,
u'endPort': 8080,
u'precedence': 0,
u'protocol': u'TCP'},
{u'direction': u'INGRESS',
u'firewall': {u'firewallId': 116387},
u'firewallRuleId': 3742520,
u'source': u'150.110.200.28/32',
u'sourceType': u'CIDR',
u'destination': u'406',
u'destinationType': u'GLOBAL',
u'startPort': 1020,
u'endPort': 1020,
u'precedence': 0,
u'protocol': u'UDP'}]}
one_firewall_rule = {u'rules': [{u'direction': u'INGRESS',
u'firewall': {u'firewallId': 116387},
u'firewallRuleId': 3706471,
u'source': u'217.240.165.28/32',
u'sourceType': u'CIDR',
u'destination': u'406',
u'destinationType': u'GLOBAL',
u'startPort': 2003,
u'endPort': 2003,
u'precedence': 0,
u'protocol': u'TCP'}]}
no_rules = {u'rules': []}
| [
439,
62,
6495,
86,
5691,
796,
1391,
84,
6,
6495,
86,
5691,
10354,
685,
90,
84,
6,
37315,
10354,
838,
27800,
11,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
... | 1.379774 | 7,436 |
import torch
import torch.nn as nn
import torch.nn.functional as F
import os
from .backbone import Basenet
| [
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
198,
11748,
28686,
198,
198,
6738,
764,
1891,
15992,
1330,
6455,
268,
316,
198,
220,
220,
220,
220,
628,
198,
220,
220,
220,
... | 2.403509 | 57 |
import matplotlib.pyplot as plt
import matplotlib.collections as plt_collections
from matplotlib.widgets import Button
import csv
csvFilaPath = 'log.csv'
frames = dict()
with open(csvFilaPath, 'r') as file:
rows = csv.reader(file, delimiter=',')
for row in rows:
if len(row) < 3 or not row[0].startswith('0x'):
print 'skip row:', row
continue
id = row[0]
try:
time = int(row[1])
except ValueError:
print 'incorrect row time:', row
continue
try:
bytes = map(lambda x: int(x), row[2:])
except ValueError:
print 'incorrect row numbers:', row
continue
if not id in frames:
frames[id] = FrameStream()
frame = frames[id]
if len(frame.bytesStream) == 0:
frame.bytesStream = [[x] for x in bytes]
else:
if len(bytes) != len(frame.bytesStream):
continue # frame damaged
for b, byte in enumerate(bytes):
frame.bytesStream[b].append(byte)
frame.timeStream.append(time)
assert len(frame.timeStream) == len(frame.bytesStream[0])
print 'CSV parsed'
errorsCount = 0
for (_, frame) in frames.items():
for byteStream in frame.bytesStream:
for i, b5 in enumerate(zip(byteStream[:-4], byteStream[1:-3], byteStream[2:-2], byteStream[3:-1], byteStream[4:])):
if b5[2] != b5[0] and b5[0] == b5[1] and b5[0] == b5[3] and b5[0] == b5[4]:
byteStream[i+2] = b5[0]
errorsCount += 1
print 'errors fixed (bytes):', errorsCount
# calculate constancy intervals (means how much time the byte keeps constant bitmask):
for (_, frame) in frames.items():
for byteStream in frame.bytesStream:
frame.constTimestamps.append(list())
if len(set(byteStream)) > 10:
continue # skip bytes with real numeric data
timeStart = 0
for i, b2 in enumerate(zip(byteStream[:-1], byteStream[1:])):
if (b2[0] != b2[1]) or (i == len(byteStream) - 2):
if timeStart < (i - 10):
frame.constTimestamps[-1].append([timeStart, i])
timeStart = i+1
for key, frame in frames.iteritems():
print key, "constant intervals: [", ', '.join([str(len(x)) for x in frame.constTimestamps]), "]"
# make 4 subplots with shared X axis:
fig, axs = plt.subplots(4, 1, sharex='all')
plt.xlim(frames.values()[0].timeStream[0] * 0.001, frames.values()[0].timeStream[-1] * 0.001)
# fig.tight_layout() - brakes buttons
plt.subplots_adjust(left=0.05, right=0.99, top=0.9, bottom=0.1)
# display graphs of parsed and calculated RPM, Torque, etc. :
axs[0].set_title('0x280 parsed')
x = map(lambda time: time * 0.001, frames['0x280'].timeStream)
y = map(lambda (b1, b2): (b1*256 + b2) / 4 / 10, zip(frames['0x280'].bytesStream[3], frames['0x280'].bytesStream[2]))
axs[0].plot(x, y, label='RPM /10')
y = map(lambda b: b * 2, frames['0x280'].bytesStream[1])
axs[0].plot(x, y, label='Torque')
y = map(lambda b: b, frames['0x280'].bytesStream[5])
axs[0].plot(x, y, label='Acceleration pedal')
axs[0].fill_between(x, 0, y, label='Acceleration pedal', facecolor='red', alpha=0.25)
# byte #0 contains bitmask of clutch and acceleration pedals, display area when the clutch is pressed:
y = map(lambda b: b, frames['0x280'].bytesStream[0])
collection = plt_collections.BrokenBarHCollection.span_where(x, ymin=0, ymax=20, where=[dy <= 1 for dy in y], facecolor='green', alpha=0.25)
axs[0].add_collection(collection)
# dispaly speed steering angle and L/R forces:
axs[1].set_title('0x1A0, 0xC2, 0x540, 0x5C0 parsed')
x = map(lambda time: time * 0.001, frames['0x1A0'].timeStream)
y = map(lambda (b1, b2): (b1*256 + b2) * 0.005, zip(frames['0x1A0'].bytesStream[3], frames['0x1A0'].bytesStream[2]))
axs[1].plot(x, y, label='Speed')
y = map(lambda b: b, frames['0x1A0'].bytesStream[1])
# byte #1 contains bitmask for brake pedal, display area when brake is pressed:
collection = plt_collections.BrokenBarHCollection.span_where(x, ymin=-10, ymax=10, where=[dy == 72 for dy in y], facecolor='red', alpha=0.25)
axs[1].add_collection(collection)
x = map(lambda time: time * 0.001, frames['0xC2'].timeStream)
y = map(lambda (b1, b2): ((128 - b1 if b1 & 128 else b1) * 256 + b2) / 256, zip(frames['0xC2'].bytesStream[1], frames['0xC2'].bytesStream[0]))
axs[1].plot(x, y, label='Steering')
axs[1].fill_between(x, 0, y, label='Steering', facecolor='green', alpha=0.25)
x = map(lambda time: time * 0.001, frames['0x540'].timeStream)
y = map(lambda b: b, frames['0x540'].bytesStream[7])
axs[1].plot(x, y, label='Gear')
x = map(lambda time: time * 0.001, frames['0x5C0'].timeStream)
y = map(lambda b: b, frames['0x5C0'].bytesStream[2])
axs[1].plot(x, y, label='L. force')
axs[1].fill_between(x, [128 for a in y], y, label='L. force', facecolor='blue', alpha=0.25)
axs[0].legend(fontsize='x-small')
axs[1].legend(fontsize='x-small')
frameIdIndex= 0
frameId = frames.keys()[frameIdIndex]
axprev = plt.axes([0.88, 0.01, 0.05, 0.05])
bprev = Button(axprev, '<')
bprev.on_clicked(prevClick)
axnext = plt.axes([0.94, 0.01, 0.05, 0.05])
bnext = Button(axnext, '>')
bnext.on_clicked(nextClick)
fig.suptitle(csvFilaPath)
displayCustomFrame(frameId)
plt.show()
| [
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
2603,
29487,
8019,
13,
4033,
26448,
355,
458,
83,
62,
4033,
26448,
198,
6738,
2603,
29487,
8019,
13,
28029,
11407,
1330,
20969,
198,
11748,
269,
21370,
198,
198,
40664... | 2.288611 | 2,318 |
import os
import sys
root_dir_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
sys.path.append(root_dir_path)
| [
11748,
28686,
198,
11748,
25064,
198,
198,
15763,
62,
15908,
62,
6978,
796,
28686,
13,
6978,
13,
397,
2777,
776,
7,
418,
13,
6978,
13,
22179,
7,
418,
13,
6978,
13,
15908,
3672,
7,
834,
7753,
834,
828,
705,
492,
6,
4008,
198,
17597... | 2.357143 | 56 |
# file: train_cdsa_f_att.py
# author: Yue Jian
# Copyright (C) 2020. All Rights Reserved.
import pandas as pd
import numpy as np
import jieba
import copy
from keras.preprocessing.text import Tokenizer
from keras_preprocessing.sequence import pad_sequences
from keras.models import Model
from keras.layers import Input, Dense, LSTM, Dropout, Embedding,Bidirectional
from keras import backend as K
from keras.engine.topology import Layer
from gensim.models.word2vec import Word2Vec
from bert_serving.client import BertClient
w2v_path = './w2v/*.vec'
model_path = './model/cdsa_f_att/*.h5'
train = pd.read_csv('./data/*.csv')
train['sen_cut'] = train['comment'].astype(str).apply(jieba.lcut)
X_train = train['sen_cut'].apply(lambda x: ' '.join(x)).tolist()
y_train = pd.get_dummies((np.asarray(train["label"])))
text = np.array(X_train)
vocab_size = 30000
maxlen = 120
print("开始统计语料的词频信息...")
t = Tokenizer(vocab_size)
t.fit_on_texts(text)
word_index = t.word_index
print('完整的字典大小:', len(word_index))
print("开始序列化句子...")
X_train = t.texts_to_sequences(X_train)
print("开始对齐句子序列...")
X_train = pad_sequences(X_train, maxlen=maxlen, padding='post')
print("完成!")
# 移除低频词
small_word_index = copy.deepcopy(word_index) # 防止原来的字典被改变
x = list(t.word_counts.items())
s = sorted(x, key=lambda p:p[1], reverse=True)
print("移除word_index字典中的低频词...")
for item in s[20000:]:
small_word_index.pop(item[0]) # 对字典pop
print("完成!")
wv_model = Word2Vec.load(w2v_path)
bc = BertClient()
word_matrix = np.random.uniform(size=(vocab_size+1,300))
bert_matrix = np.random.uniform(size=(vocab_size+1,768))
embedding_matrix = np.random.uniform(size=(vocab_size + 1, 1068))
print("构建embedding_matrix...")
for word, index in small_word_index.items():
try:
word_vector = wv_model[word]
word_matrix[index] = word_vector
except:
print("Word: [",index,"] not in wvmodel! Use random embedding instead.")
pass
try:
bert_vector = bc.encode([word])
bert_matrix[index] = bert_vector
except:
print("Word: [",index, "] not in bertmodel! Use random embedding instead.")
pass
try:
embedding_matrix[index] = np.hstack((word_matrix[index], bert_matrix[index]))
except:
print("embedding_matrix: [",index,"] hstack error!" )
print("完成!")
print("Embedding matrix shape:\n",embedding_matrix.shape)
# Attention mechanism
wv_dim = 1068
n_timesteps = maxlen
# BiLSTM
inputs = Input(shape=(maxlen,))
embedding_sequences = Embedding(vocab_size+1, wv_dim, input_length=maxlen, weights=[embedding_matrix])(inputs)
lstm = Bidirectional(LSTM(128, return_sequences= True))(embedding_sequences)
l = AttentionLayer()(lstm)
l = Dense(128, activation="tanh")(l)
l = Dropout(0.5)(l)
l = Dense(2, activation="softmax")(l)
m = Model(inputs, l)
m.summary()
m.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
m.fit(X_train, y_train, epochs=10, batch_size=32, validation_split=0.2)
m.save(model_path)
| [
2,
2393,
25,
4512,
62,
66,
9310,
64,
62,
69,
62,
1078,
13,
9078,
198,
2,
1772,
25,
32854,
40922,
198,
2,
15069,
357,
34,
8,
12131,
13,
1439,
6923,
33876,
13,
198,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355... | 2.222802 | 1,342 |
import setuptools
packages = setuptools.find_packages(exclude=['docs', 'tests'])
setuptools.setup(name='shnootalk_cc_server', version='testing', packages=packages)
| [
11748,
900,
37623,
10141,
198,
198,
43789,
796,
900,
37623,
10141,
13,
19796,
62,
43789,
7,
1069,
9152,
28,
17816,
31628,
3256,
705,
41989,
6,
12962,
198,
2617,
37623,
10141,
13,
40406,
7,
3672,
11639,
1477,
77,
1025,
971,
62,
535,
62... | 3.113208 | 53 |
import pytest
import securemailbox
from securemailbox.models import Message, Mailbox
@pytest.fixture
# Force fixture to be used if not explicitly imported
@pytest.fixture(autouse=True)
| [
11748,
12972,
9288,
198,
198,
11748,
5713,
4529,
3524,
198,
6738,
5713,
4529,
3524,
13,
27530,
1330,
16000,
11,
11099,
3524,
628,
198,
31,
9078,
9288,
13,
69,
9602,
628,
198,
2,
5221,
29220,
284,
307,
973,
611,
407,
11777,
17392,
198,... | 3.454545 | 55 |
import pickle
import numpy as np
if __name__ == '__main__':
model = None
cv = None
# with open('assets/cv.pkl', 'rb') as f:
# cv = pickle.load(f)
#
# with open('assets/lda15.pkl', 'rb') as f:
# model = pickle.load(f)[0]
#
# text = "My husband and I wanted to get away but didnt have time in our schedule this year to plan an extensive " \
# "vacation as we usually do. We hesitated booking an 'all inclusive' package as thats not our cup of tea. " \
# "We did stay in an EXCELLENCE room status which I would highly suggest. Now that I know what I know I " \
# "would not go back unless I could book that level of a room. Sorry we just aren't the 'carnival cruise' " \
# "rowdy couple and do not like that atmosphere. The wait staff at this resort is nothing short of Amazing. " \
# "They work so hard to please you and make your stay perfect. At the Excellence Pool by building 8, " \
# "Martha and Antonio were just THE BEST. We were there for 10 days and only went off property once to Isla " \
# "and I wouldnt even do that again. Really no reason to leave the resort. Just keep in mind there IS a " \
# "dress code for the restaurants and many of the require pants for men. They DO go by that and wont let you " \
# "in if you arent dressed appropriately. Luckily the hubs and I love to dress up so that wasnt a problem " \
# "but I overhead people complaining about it. I think most places in the states have a dress code but we " \
# "arent used to them getting followed. They really should send an email after booking stating this " \
# "information and how critical it is. Maybe they did and I didnt read it but we were fine and had enough " \
# "clothing that worked. The food was good for all inclusive food. We are kind of foodies so not the level " \
# "we would choose but again for what it was for it was really good. We also had a couple spa days. We have " \
# "been to many spas in many places but this one was by far the best my husband and I both have had. I " \
# "wouldnt hesitate booking another getaway here to relax and get pampered "
# tags = get_review_asbects(text, model, cv)
# print(tags)
| [
11748,
2298,
293,
198,
11748,
299,
32152,
355,
45941,
628,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
2746,
796,
6045,
198,
220,
220,
220,
269,
85,
796,
6045,
198,
220,
220,
220,
130... | 2.957233 | 795 |
# -*- coding: utf-8 -*-
from atf_regparser.preprocs import USCode
from regparser.test_utils.xml_builder import XMLBuilder
def test_uscode_transform():
"""US Code issues"""
with XMLBuilder("PART") as ctx:
with ctx.REGTEXT(ID="RT1"):
with ctx.SECTION():
ctx.SECTNO(u"§ 478.103")
ctx.HD("18 U.S.C. 922(x)", SOURCE="HD3")
ctx.P("Some Content")
ctx.HD("Whatever", SOURCE="HED")
xml = ctx.xml
USCode().transform(xml)
uscode = xml.xpath("//USCODE")
assert uscode
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
379,
69,
62,
2301,
48610,
13,
3866,
1676,
6359,
1330,
1294,
10669,
198,
6738,
842,
48610,
13,
9288,
62,
26791,
13,
19875,
62,
38272,
1330,
23735,
32875,
628,
198,
... | 1.992982 | 285 |
import numpy as np
from yt.testing import assert_array_almost_equal
from yt.utilities.math_utils import \
get_sph_r_component, \
get_sph_theta_component, \
get_sph_phi_component, \
get_cyl_r_component, \
get_cyl_z_component, \
get_cyl_theta_component, \
get_cyl_r, get_cyl_theta, \
get_cyl_z, get_sph_r, \
get_sph_theta, get_sph_phi
# Randomly generated coordinates in the domain [[-1,1],[-1,1],-1,1]]
coords = np.array([[-0.41503037, -0.22102472, -0.55774212],
[ 0.73828247, -0.17913899, 0.64076921],
[ 0.08922066, -0.94254844, -0.61774511],
[ 0.10173242, -0.95789145, 0.16294352],
[ 0.73186508, -0.3109153 , 0.75728738],
[ 0.8757989 , -0.41475119, -0.57039201],
[ 0.58040762, 0.81969082, 0.46759728],
[-0.89983356, -0.9853683 , -0.38355343]]).T
| [
11748,
299,
32152,
355,
45941,
198,
198,
6738,
331,
83,
13,
33407,
1330,
6818,
62,
18747,
62,
28177,
62,
40496,
198,
6738,
331,
83,
13,
315,
2410,
13,
11018,
62,
26791,
1330,
3467,
198,
220,
220,
220,
651,
62,
82,
746,
62,
81,
62,... | 1.771593 | 521 |
from django.core.management.base import BaseCommand
from django.utils import timezone
from geo.models import County, AdminRegion, Tract
from indicators.models import CensusValue, CensusVariable, CensusSource
REGIONAL_COUNTIES = (
'Allegheny',
'Washington',
'Butler',
'Armstrong',
'Westmoreland',
'Beaver'
'Fayette',
'Greene',
'Indiana',
'Lawrence',
)
CHUNK_SIZE = 40
current_counties = County.objects.filter(name__in=REGIONAL_COUNTIES, statefp=42)
def chunks(lst, n):
"""Yield successive n-sized chunks from lst."""
for i in range(0, len(lst), n):
yield lst[i:i + n]
| [
6738,
42625,
14208,
13,
7295,
13,
27604,
13,
8692,
1330,
7308,
21575,
198,
6738,
42625,
14208,
13,
26791,
1330,
640,
11340,
198,
198,
6738,
40087,
13,
27530,
1330,
3418,
11,
32053,
47371,
11,
309,
974,
198,
6738,
21337,
13,
27530,
1330,... | 2.566802 | 247 |
from typing import (
Any, Dict, List, Literal, Optional, Sequence, SupportsInt, Union
)
from ..route import Route
from ..utils import MISSING
from .base import Requester
| [
6738,
19720,
1330,
357,
198,
220,
220,
220,
4377,
11,
360,
713,
11,
7343,
11,
25659,
1691,
11,
32233,
11,
45835,
11,
45267,
5317,
11,
4479,
198,
8,
198,
198,
6738,
11485,
38629,
1330,
18956,
198,
6738,
11485,
26791,
1330,
49684,
2751,... | 3.52 | 50 |
import os
import random
import string
import pymongo
import pysam
from utils import Xpos
class SequencesClient(object):
'''Manages CRAMS for all chromosomes. Assumes one CRAM per chromosome.'''
@staticmethod
@staticmethod
| [
11748,
28686,
198,
11748,
4738,
198,
11748,
4731,
198,
198,
11748,
279,
4948,
25162,
198,
11748,
279,
893,
321,
198,
6738,
3384,
4487,
1330,
1395,
1930,
628,
198,
4871,
24604,
3007,
11792,
7,
15252,
2599,
198,
220,
220,
220,
705,
7061,
... | 3.273973 | 73 |
import folium
# Latitude, Longitude
LOCATION_DATA = [
("41.90093256", "12.48331626"),
("41.89018285", "12.49235900"),
("41.89868519", "12.47684474"),
("41.89454167", "12.48303163"),
("41.90226256", "12.45739340"),
("41.90269661", "12.46635787"),
("41.91071023", "12.47635640"),
("41.90266442", "12.49624457")
]
LOCATION_NAMES = [
"Trevi Fountain",
"Colosseum",
"Pantheon",
"Piazza Venezia",
"St. Peter’s Square",
"Mausoleum of Hadrian",
"Piazza del Popolo",
"Fountain of the Naiads"
]
if __name__ == '__main__':
folium_map = folium.Map()
for cords, name in zip(LOCATION_DATA, LOCATION_NAMES):
folium.Marker(location=[cords[0], cords[1]],
popup=f"Lattitude:<br>{cords[0]}<br>"
f"Longitude:<br>{cords[1]}<br>"
f"Name:<br>{name}"
).add_to(folium_map)
south_west_corner = min(LOCATION_DATA)
north_east_corner = max(LOCATION_DATA)
folium_map.fit_bounds([south_west_corner, north_east_corner])
folium_map.save("FoliumMap.html")
| [
11748,
5955,
1505,
198,
198,
2,
5476,
3984,
11,
5882,
3984,
198,
29701,
6234,
62,
26947,
796,
685,
198,
220,
220,
220,
5855,
3901,
13,
12865,
6052,
11645,
1600,
366,
1065,
13,
2780,
2091,
1433,
2075,
12340,
198,
220,
220,
220,
5855,
... | 1.919795 | 586 |
# -*- coding: utf-8 -*-
import threading
import socket
import select
import Queue
import re
import UserRoom
import Utils
# def find_key_in_ring(self, typeOfKeyRing, id_key, type_of_searched_key):
# if typeOfKeyRing == 'pub':
# ring = self.pub_keyring
# else:
# ring = self.priv_keyring
#
# for key in ring:
# print(key)
# if key.key_id == id_key:
# if type_of_searched_key == 'pub':
# return key.pub_key
# else:
# return key.priv_key
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
4704,
278,
198,
11748,
17802,
198,
11748,
2922,
198,
11748,
4670,
518,
198,
11748,
302,
198,
11748,
11787,
41178,
198,
11748,
7273,
4487,
628,
198,
220,
220,
220,
... | 1.896774 | 310 |
# -*- coding: utf-8 -*-
# Copyright (c) 2012-2020, Anima Istanbul
#
# This module is part of anima and is released under the MIT
# License: http://www.opensource.org/licenses/MIT
def createArnoldTextureSettings():
"""The patched version of the original file
"""
import pymel.core as pm
import maya.cmds as cmds
import pymel.versions as versions
from mtoa.ui.globals import settings
pm.setUITemplate('attributeEditorTemplate', pushTemplate=True)
pm.columnLayout(adjustableColumn=True)
pm.attrControlGrp(
'autotx',
cc=settings.updateAutoTxSettings,
label="Auto-convert Textures to TX (Disabled in Anima)",
attribute='defaultArnoldRenderOptions.autotx',
enable=False
)
pm.attrControlGrp('use_existing_tiled_textures',
label="Use Existing TX Textures",
attribute='defaultArnoldRenderOptions.use_existing_tiled_textures')
# disable autotx
pm.setAttr('defaultArnoldRenderOptions.autotx', 0)
settings.updateAutoTxSettings()
cmds.separator()
# don't create texture_automip for 2017 as autoTx is ON by default
maya_version = versions.shortName()
if int(float(maya_version)) < 2017:
pm.attrControlGrp('texture_automip',
label="Auto-mipmap",
attribute='defaultArnoldRenderOptions.textureAutomip')
pm.attrControlGrp('texture_accept_unmipped',
label="Accept Unmipped",
attribute='defaultArnoldRenderOptions.textureAcceptUnmipped')
cmds.separator()
pm.checkBoxGrp('ts_autotile',
cc=settings.updateAutotileSettings,
label='',
label1='Auto-tile')
pm.connectControl('ts_autotile', 'defaultArnoldRenderOptions.autotile',
index=2)
pm.intSliderGrp('ts_texture_autotile',
label="Tile Size",
minValue=16,
maxValue=64,
fieldMinValue=16,
fieldMaxValue=1024
)
pm.connectControl('ts_texture_autotile',
'defaultArnoldRenderOptions.textureAutotile', index=1)
pm.connectControl('ts_texture_autotile',
'defaultArnoldRenderOptions.textureAutotile', index=2)
pm.connectControl('ts_texture_autotile',
'defaultArnoldRenderOptions.textureAutotile', index=3)
'''pm.attrControlGrp('texture_autotile',
label="Auto-tile Size",
attribute='defaultArnoldRenderOptions.textureAutotile')'''
pm.attrControlGrp('texture_accept_untiled',
label="Accept Untiled",
attribute='defaultArnoldRenderOptions.textureAcceptUntiled')
pm.attrControlGrp('texture_max_memory_MB',
label="Max Cache Size (MB)",
attribute='defaultArnoldRenderOptions.textureMaxMemoryMB')
pm.attrControlGrp('texture_max_open_files',
label="Max Open Files",
attribute='defaultArnoldRenderOptions.textureMaxOpenFiles')
cmds.separator()
cmds.attrControlGrp('texture_diffuse_blur',
label="Diffuse Blur",
attribute='defaultArnoldRenderOptions.textureDiffuseBlur')
# cmds.attrControlGrp('texture_glossy_blur',
# label="Glossy Blur",
# attribute='defaultArnoldRenderOptions.textureGlossyBlur')
pm.setParent('..')
pm.setUITemplate(popTemplate=True)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
15069,
357,
66,
8,
2321,
12,
42334,
11,
1052,
8083,
25299,
198,
2,
198,
2,
770,
8265,
318,
636,
286,
2355,
64,
290,
318,
2716,
739,
262,
17168,
198,
2,
13789,
2... | 2.129522 | 1,714 |
import pafy
import os
songs = """
www.youtube.com/watch?v=PT2_F-1esPk
https://www.youtube.com/watch?v=ek7cafqgYB4
https://www.youtube.com/watch?v=HQnC1UHBvWA
https://www.youtube.com/watch?v=s8XIgR5OGJc
"""
#songs="https://www.youtube.com/watch?v=otv7lJmLJtQ"
#unsure of their artist/title
unsure_songs = []
root = "./outputs/"
_verbose = True
_debug = True
#return a tuple/3 (yt object, artist, title)
song_data = []
for i, songurl in enumerate(songs.split("\n")):
print_v("Parsing " + str(i) + " of " + str(len(songs.split("\n"))))
metadata = getter(songurl)
if metadata:
song_data.append(metadata)
directory_constructor(song_data) | [
11748,
279,
1878,
88,
198,
11748,
28686,
198,
198,
82,
28079,
796,
37227,
198,
2503,
13,
11604,
13,
785,
14,
8340,
30,
85,
28,
11571,
17,
62,
37,
12,
16,
274,
47,
74,
198,
5450,
1378,
2503,
13,
11604,
13,
785,
14,
8340,
30,
85,
... | 2.275862 | 290 |
# coding=utf-8
# Copyright 2019 The Tensor2Robot Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test fixture for T2R models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gin
import mock
from tensor2robot.input_generators import default_input_generator
from tensor2robot.utils import train_eval
from tensor2robot.utils import train_eval_test_utils
import tensorflow as tf
from tensorflow.python.tpu import tpu # pylint: disable=g-direct-tensorflow-import
TRAIN = tf.estimator.ModeKeys.TRAIN
_MAX_TRAIN_STEPS = 2
_BATCH_SIZE = 2
_USE_TPU_WRAPPER = True
class T2RModelFixture(object):
"""Fixture to quickly test estimator models."""
def random_train(self, module_name, model_name, **module_kwargs):
"""Trains a T2R model with random inputs."""
tf_model = getattr(module_name, model_name)(**module_kwargs)
params = self._get_params(
model_dir=self._test_case.create_tempdir().full_path, **module_kwargs)
input_generator = default_input_generator.DefaultRandomInputGenerator(
batch_size=params['batch_size'])
initialize_system = tpu.initialize_system
with mock.patch.object(
tpu, 'initialize_system', autospec=True) as mock_init:
mock_init.side_effect = initialize_system
train_eval.train_eval_model(
t2r_model=tf_model,
input_generator_train=input_generator,
max_train_steps=params['max_train_steps'],
model_dir=params['model_dir'],
use_tpu_wrapper=params['use_tpu_wrapper'])
if self._use_tpu:
mock_init.assert_called()
train_eval_test_utils.assert_output_files(
test_case=self._test_case,
model_dir=params['model_dir'],
expected_output_filename_patterns=train_eval_test_utils
.DEFAULT_TRAIN_FILENAME_PATTERNS)
def recordio_train(self, module_name, model_name, file_patterns,
**module_kwargs):
"""Trains the model with a RecordIO dataset for a few steps."""
tf_model = getattr(module_name, model_name)(**module_kwargs)
params = self._get_params(
model_dir=self._test_case.create_tempdir().full_path, **module_kwargs)
input_generator = default_input_generator.DefaultRecordInputGenerator(
file_patterns, batch_size=params['batch_size'])
initialize_system = tpu.initialize_system
with mock.patch.object(
tpu, 'initialize_system', autospec=True) as mock_init:
mock_init.side_effect = initialize_system
train_eval.train_eval_model(
t2r_model=tf_model,
input_generator_train=input_generator,
max_train_steps=params['max_train_steps'],
model_dir=params['model_dir'],
use_tpu_wrapper=params['use_tpu_wrapper'])
if self._use_tpu:
mock_init.assert_called()
train_eval_test_utils.assert_output_files(
test_case=self._test_case,
model_dir=params['model_dir'],
expected_output_filename_patterns=train_eval_test_utils
.DEFAULT_TRAIN_FILENAME_PATTERNS)
def random_predict(self, module_name, model_name, **module_kwargs):
"""Runs predictions through a model with random inputs."""
tf_model = getattr(module_name, model_name)(**module_kwargs)
input_generator = default_input_generator.DefaultRandomInputGenerator(
batch_size=1)
for prediction in train_eval.predict_from_model(
t2r_model=tf_model,
input_generator_predict=input_generator,
model_dir=self._test_case.create_tempdir().full_path):
return prediction
return None
| [
2,
19617,
28,
40477,
12,
23,
198,
2,
15069,
13130,
383,
309,
22854,
17,
14350,
313,
46665,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
... | 2.589711 | 1,594 |
#2.08 microns.
import numpy as np
from scipy.interpolate import RectBivariateSpline
from scipy.interpolate import interp2d
from scipy.integrate import cumtrapz
import opticstools as ot
import matplotlib.pyplot as plt
mm = np.genfromtxt('2x2_4um_ex.m00', skip_header=4)
x = np.linspace(-8,8,97)
y = np.linspace(0-7.5,25.5-7.5,97)
xy_new = np.linspace(-7.5,7.5,64)
x_ix = np.interp(xy_new, x, np.arange(97))
y_ix = np.interp(xy_new, y, np.arange(97))
ee_func = RectBivariateSpline(y,x, mm)
ee_square_small = ee_func(xy_new, xy_new)
ee_square = np.zeros( (128,128) )
ee_square[32:32+64,32:32+64] = ee_square_small
dx = xy_new[1]-xy_new[0]
wave_in_pix = 4.0/2.4/0.238
#After a Fourier transform, 1 fourier pixel is this many
ft_pix_scale = wave_in_pix/128
far_field = np.abs(np.fft.fftshift(np.fft.fft2(np.fft.fftshift(ee_square))))**2
xf = ot.azimuthalAverage(far_field, returnradii=True, center=[64,64], binsize=1)
plt.clf()
plt.plot(xf[0]*ft_pix_scale, xf[1]/xf[1][0], label='Azimuthally Averaged Int')
plt.xlim([0,1])
x_for_sum = np.concatenate([[0],xf[0]*ft_pix_scale])
y_for_sum = np.concatenate([[xf[1][0]],xf[1]])
y_for_sum[-1]=0
encircled = cumtrapz(y_for_sum*x_for_sum, x_for_sum)
encircled /= encircled[-1]
plt.plot(x_for_sum[1:], encircled, label='Encircled Energy')
plt.legend()
plt.xlabel('sin(theta)')
plt.ylabel('Intensity or Int Sum') | [
2,
17,
13,
2919,
12314,
12212,
13,
198,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
629,
541,
88,
13,
3849,
16104,
378,
1330,
48599,
33,
42524,
26568,
500,
198,
6738,
629,
541,
88,
13,
3849,
16104,
378,
1330,
987,
79,
17,
67,
1... | 2.076687 | 652 |
#!/usr/bin/env python3
# -*- coding=utf-8 -*-
import cv2 as cv
import numpy as np
"""
决策树算法
opencv中机器学习模块的决策树算法分为两个类别,一个是随机森林,另一个强化分类,这两种算法都属于决策树算法,相关api如下
cv.ml.StatModel.predict(samples, results, flags)
- sample 输入样本
- results 预测结果
"""
if "__main__" == __name__:
main()
cv.destroyAllWindows()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
201,
198,
2,
532,
9,
12,
19617,
28,
40477,
12,
23,
532,
9,
12,
201,
198,
201,
198,
11748,
269,
85,
17,
355,
269,
85,
201,
198,
11748,
299,
32152,
355,
45941,
201,
198,
201,
198,
... | 1.188153 | 287 |
"""
matplotlylib
============
This module converts matplotlib figure objects into JSON structures which can
be understood and visualized by Plotly.
Most of the functionality should be accessed through the parent directory's
'tools' module or 'plotly' package.
"""
from __future__ import absolute_import
from .renderer import PlotlyRenderer
from .mplexporter import Exporter
from .tools import mpl_to_plotly
| [
37811,
198,
6759,
29487,
306,
8019,
198,
25609,
198,
198,
1212,
8265,
26161,
2603,
29487,
8019,
3785,
5563,
656,
19449,
8573,
543,
460,
198,
1350,
7247,
290,
5874,
1143,
416,
28114,
306,
13,
198,
198,
6943,
286,
262,
11244,
815,
307,
... | 3.914286 | 105 |
"""
100 DAYS OF PROGRAMMING CHALLENGE
+++++++ DAY 8 ++++++++
Write a program that accepts a comma separated sequence of words as input and prints the words in a comma-separated sequence after sorting them alphabetically.
Suppose the following input is supplied to the program:
without,hello,bag,world
Then, the output should be:
bag,hello,without,world
"""
print(",".join(sorted(input("Input a list of words: ? ").split(","))))
| [
37811,
198,
3064,
24644,
50,
3963,
46805,
44,
2751,
5870,
7036,
1677,
8264,
198,
14030,
45340,
24644,
807,
1343,
14030,
45340,
198,
16594,
257,
1430,
326,
18178,
257,
39650,
11266,
8379,
286,
2456,
355,
5128,
290,
20842,
262,
2456,
287,
... | 3.666667 | 117 |
import os
import datetime
import sys
import json
import numpy as np
from elasticsearch import Elasticsearch, RequestsHttpConnection
TYPE_HEADER_NAME = "Ce-Type"
REQUEST_ID_HEADER_NAME = "Ce-Requestid"
CLOUD_EVENT_ID = "Ce-id"
# in seldon case modelid is node in graph as graph can have multiple models
MODELID_HEADER_NAME = "Ce-Modelid"
NAMESPACE_HEADER_NAME = "Ce-Namespace"
# endpoint distinguishes default, canary, shadow, A/B etc.
ENDPOINT_HEADER_NAME = "Ce-Endpoint"
TIMESTAMP_HEADER_NAME = "CE-Time"
# inferenceservicename is k8s resource name for SeldonDeployment or InferenceService
INFERENCESERVICE_HEADER_NAME = "Ce-Inferenceservicename"
LENGTH_HEADER_NAME = "Content-Length"
DOC_TYPE_NAME = None
| [
11748,
28686,
198,
11748,
4818,
8079,
198,
11748,
25064,
198,
11748,
33918,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
27468,
12947,
1330,
48567,
12947,
11,
9394,
3558,
43481,
32048,
198,
198,
25216,
62,
37682,
1137,
62,
20608,
796,
3... | 2.822835 | 254 |
# Generated by Django 2.1.4 on 2018-12-10 05:38
from django.db import migrations, models
import django.db.models.deletion
| [
2,
2980,
515,
416,
37770,
362,
13,
16,
13,
19,
319,
2864,
12,
1065,
12,
940,
8870,
25,
2548,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
42625,
14208,
13,
9945,
13,
27530,
13,
2934,
1616,
295,
6... | 2.818182 | 44 |
from chapter_11.name_function import get_formatted_name
print("enter q to quit")
while True:
first = input("enter first name: ")
if first == 'q':
break
last = input("enter last name: ")
if last == 'q':
break
print(get_formatted_name(first, last)) | [
6738,
6843,
62,
1157,
13,
3672,
62,
8818,
1330,
651,
62,
687,
16898,
62,
3672,
628,
198,
4798,
7203,
9255,
10662,
284,
11238,
4943,
198,
4514,
6407,
25,
198,
220,
220,
220,
717,
796,
5128,
7203,
9255,
717,
1438,
25,
366,
8,
198,
2... | 2.567568 | 111 |
# -*- coding: utf-8 -*-
# @Author : Evil0ctal
# @Time : 2021/07/09
# @Function:
# Enter the number of hours and salary rate, calculate the total salary and output to the console.
hours = input("Please input the total hours of the employee worked: ")
rate = input("Please input the hour rate of the employee pay: ")
if hours.isdigit() and rate.isdigit():
hours = int(hours)
rate = int(rate)
if int(hours) < 40:
print("Total:" + str(hours*rate))
else:
extra_pay = hours - 40
print("Total:" + str((40 * rate) + (extra_pay * rate * 1.5)))
else:
print('Please input a number only')
'''
Please input the total hours of the employee worked: 10
Please input the hour rate of the employee pay: 10
Total:100
Please input the total hours of the employee worked: 40
Please input the hour rate of the employee pay: 10
Total:400.0
Please input the total hours of the employee worked: 41
Please input the hour rate of the employee pay: 10
Total:415.0
Please input the total hours of the employee worked: aaa
Please input the hour rate of the employee pay: aaa
Please input a number only
'''
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2488,
13838,
220,
1058,
10461,
15,
310,
282,
198,
2,
2488,
7575,
220,
220,
220,
1058,
33448,
14,
2998,
14,
2931,
198,
2,
2488,
22203,
25,
198,
2,
6062,
262,
1271,... | 3.121547 | 362 |
from pieces.queen import Queen
# Represent a rook piece
# Inherits from queen to reuse code for horizontal movement checking
# Inherited constructor
# Update the piece value
# The rook is allowed to move horizontally to any square that is
# unobstructed and does not contain a friendly piece.
# Returns a list of available moves that a piece may make, given a particular board.
# First, check every square to the right of the rook.
# Then, check every square to the left of the rook.
# Then, check every square above the rook.
# Finally, check every square below the rook.
# For each iteration, stop the iteration whenever you reach an inaccessible square.
| [
6738,
5207,
13,
4188,
268,
1330,
7542,
201,
198,
201,
198,
201,
198,
2,
10858,
257,
10929,
3704,
201,
198,
2,
47025,
896,
422,
16599,
284,
32349,
2438,
329,
16021,
3356,
10627,
201,
198,
201,
198,
220,
220,
220,
1303,
47025,
863,
23... | 3.577114 | 201 |
import numpy as np
import pandas as pd
def CIA(df:pd.DataFrame):
"""
Chemical Index of Alteration
type: molecular
ref: Nesbitt and Young (1982)
"""
return 100 * df.Al2O3 / (df.Al2O3 + df.CaO + df.Na2O + df.K2O)
def CIW(df:pd.DataFrame):
"""
Chemical Index of Weathering
type: molecular
ref: Harnois (1988)
"""
return 100. * df.Al2O3 / (df.Al2O3 + df.CaO + df.Na2O)
def PIA(df:pd.DataFrame):
"""
Plagioclase Index of Alteration
type: molecular
ref: Fedo et al. (1995)
"""
return 100. * (df.Al2O3 - df.K2O) / (df.Al2O3 + df.CaO + df.Na2O - df.K2O)
def SAR(df:pd.DataFrame):
"""
Silica-Alumina Ratio
type: molecular
ref:
"""
return df.SiO2/df.Al2O3
def SiTiIndex(df:pd.DataFrame):
"""
Silica-Titania Index
Jayaverdena and Izawa (1994)
type: molecular
ref:
"""
# may need to recalculate titania from titanium ppm
si_ti = df.SiO2/df.TiO2
si_al = df.SiO2/df.Al2O3
al_ti = df.Al2O3/df.TiO2
return si_ti / (si_ti + si_al + al_ti)
def WIP(df:pd.DataFrame):
"""
Weathering Index of Parker
Parker (1970)
type: molecular
ref:
"""
return 2 * df.Na2O / 0.35 + df.MgO / 0.9 + \
2 * df.K2O / 0.25 + df.CaO / 0.7
| [
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
628,
198,
4299,
7688,
7,
7568,
25,
30094,
13,
6601,
19778,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
24872,
12901,
286,
32770,
341,
198,
220,
220,
220,
... | 2.013846 | 650 |
from enum import Enum
import pytest
from antidote.core import DependencyContainer
from antidote.exceptions import DuplicateDependencyError, UndefinedContextError
from antidote.providers.indirect import IndirectProvider
from antidote.providers.factory import FactoryProvider
@pytest.fixture
@pytest.fixture
@pytest.fixture
@pytest.mark.parametrize(
'first,second',
[
((ServiceA,), (ServiceB,)),
((ServiceA,), (ServiceB, Profile.B)),
((ServiceA, Profile.A), (ServiceB,)),
((ServiceA, Profile.A), (ServiceB, Profile.A)),
]
)
| [
6738,
33829,
1330,
2039,
388,
198,
198,
11748,
12972,
9288,
198,
198,
6738,
50131,
13,
7295,
1330,
37947,
1387,
29869,
198,
6738,
50131,
13,
1069,
11755,
1330,
49821,
5344,
35,
2690,
1387,
12331,
11,
13794,
18156,
21947,
12331,
198,
6738,... | 2.778302 | 212 |
"""
evaluates a trained Neural Network on its salient features regarding the time and feature dimension
creates a saliency heatmap
model should be trained beforehand
"""
import pandas
import pandas as pd
import torch
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.pyplot import step
from statsmodels.tsa.vector_ar.var_model import forecast
import sys
import os
sys.path.append("../")
MAIN_PATH = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(MAIN_PATH)
import utils.datahandler as dh
import utils.tensorloader as dl
from utils.confighandler import read_config, write_config
from utils.cli import parse_basic, parse_with_loss, query_true_false
import json
from random import gauss
from random import seed
from pandas.plotting import autocorrelation_plot
import utils.modelhandler as mh
import utils.metrics as metrics
import itertools
import torch.nn as nn
import optuna
from matplotlib import cm
from matplotlib.colors import ListedColormap, LinearSegmentedColormap
from time import perf_counter
# creates saliency map for one timestep:
if __name__ == "__main__":
MAIN_PATH = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
INTERPRETATION_PATH = './oracles/interpretation/'
sys.path.append(MAIN_PATH)
MAX_BATCH_SIZE = 10
MAX_EPOCHS = 10000
N_TRIALS = 50 #hyperparameter tuning trials
MODEL_NAME = 'opsd_PSCC_few' #path relative to targets folder
SEP = ';' #seperation for csv data
criterion = metrics.rmse # loss function criterion
#time steps to interpret (beginning of history horizon) to calculate: index in csv table -2 -history horizon
timesteps = [35784]
print('model: ', MODEL_NAME)
## Data preperation
# import config and extract relevant config variables
CONFIG_PATH = './targets/' + MODEL_NAME + '/config.json'
config_file = os.path.join(MAIN_PATH, CONFIG_PATH)
CONFIG = read_config(config_path=config_file, main_path=MAIN_PATH)
target_id = CONFIG['target_id']
encoder_features = CONFIG['encoder_features']
decoder_features = CONFIG['decoder_features']
history_horizon = CONFIG['history_horizon']
forecast_horizon = CONFIG['forecast_horizon']
feature_groups = CONFIG['feature_groups']
path = os.path.join(MAIN_PATH, INTERPRETATION_PATH)
if not os.path.exists(path):
os.mkdir(path)
model_name = CONFIG["model_name"]
model_interpretation_path = os.path.join(path, model_name + '/')
if not os.path.exists(model_interpretation_path):
os.mkdir(model_interpretation_path)
interpretation_plot_path = os.path.join(model_interpretation_path, 'plots/')
if not os.path.exists(interpretation_plot_path):
os.mkdir(interpretation_plot_path)
tensor_path = os.path.join(model_interpretation_path, 'Tensors/')
if not os.path.exists(tensor_path):
os.mkdir(tensor_path)
cuda_id = CONFIG["cuda_id"]
if torch.cuda.is_available():
DEVICE = 'cuda'
if cuda_id is not None:
torch.cuda.set_device(cuda_id)
print('Device: ', DEVICE)
print('Current CUDA ID: ', torch.cuda.current_device())
else:
DEVICE = 'cpu'
print(DEVICE)
# import data as df
data_path = CONFIG["data_path"]
print('reading csv...')
df = pd.read_csv(os.path.join(MAIN_PATH, data_path), sep=SEP)
time_column = df.loc[:, "Time"]
print('done')
# scale all data
print('scaling data...')
df, scalers = dt.scale_all(df, feature_groups=feature_groups)
print('done')
# load input data into tensors
print('loading input data...')
dataloader = dl.make_dataloader(df,
target_id,
encoder_features,
decoder_features,
history_horizon=history_horizon,
forecast_horizon=forecast_horizon,
shuffle=False).to(DEVICE)
print('Done')
length = dataloader.dataset.targets.shape[0] # length of sequence per batch
num_features1 = dataloader.number_features1()
num_features2 = dataloader.number_features2()
number_of_targets = dataloader.dataset.targets.shape[2]
print('timesteps', length)
print('num_features1', num_features1)
print('num_features2', num_features2)
print('targets:', number_of_targets)
print('history_horizon', history_horizon)
print('forecast_horizon', forecast_horizon)
## load the trained NN
print('load net...')
INMODEL = os.path.join(MAIN_PATH, CONFIG["output_path"], CONFIG["model_name"])
net = torch.load(INMODEL, map_location=torch.device(DEVICE))
print('Done.')
t0_start=perf_counter()
results_df = pd.DataFrame(columns=['RMSE PERTURBATED',
'RMSE ORIGINAL',
'RMSE DIFFERENCE PERTURBATED ORIGINAL'],
index=timesteps)
for timestep in timesteps:
t1_start = perf_counter()
print('\n\ntimestep: ', timestep)
datetime = pd.to_datetime(time_column.iloc[timestep:timestep+history_horizon+forecast_horizon])
tensor_save_path = os.path.join(tensor_path, str(timestep) + '/')
if not os.path.exists(tensor_save_path):
os.mkdir(tensor_save_path)
# get original inputs and predictions
inputs1 = torch.unsqueeze(dataloader.dataset.inputs1[timestep], dim=0)
inputs2 = torch.unsqueeze(dataloader.dataset.inputs2[timestep], dim=0)
targets = torch.unsqueeze(dataloader.dataset.targets[timestep], dim=0)
with torch.no_grad():
predictions, _ = net(inputs1, inputs2)
## obtain reference input data
features1_references, features2_references = create_reference(dataloader, timestep, MAX_BATCH_SIZE)
## create saliency map
print('create saliency maps...')
study = optuna.create_study()
study.optimize(
objective,
n_trials=N_TRIALS)
print('Done')
#load best saliency map
best_trial_id = study.best_trial.number
saliency_map, perturbated_prediction = load_interpretation_tensors(tensor_save_path, best_trial_id)
#save plot for best saliency map
create_saliency_plot(timestep,
datetime,
saliency_map,
targets,
predictions,
perturbated_prediction,
inputs1,
inputs2,
interpretation_plot_path)
t1_stop = perf_counter()
print("Elapsed time: ", t1_stop-t1_start)
#calculate rmse of perturbated prediction and original prediction in respect to target value
rmse_perturbated = criterion(targets, torch.unsqueeze(torch.unsqueeze(torch.mean(perturbated_prediction[0],dim=0), dim=0),dim=0)).cpu().detach().numpy()
rmse_original = criterion(targets, predictions).cpu().detach().numpy()
rmse_diff = rmse_perturbated - rmse_original # difference in rmse scores between perturbated and original prediction
data = {
'RMSE PERTURBATED': rmse_perturbated,
'RMSE ORIGINAL': rmse_original,
'RMSE DIFFERENCE PERTURBATED ORIGINAL': rmse_diff}
results_df.loc[timestep] = data
save_path = model_interpretation_path
results_df.to_csv(save_path+'rmse.csv', sep=';', index=True)
t0_stop = perf_counter()
print("Total elapsed time: ", t0_stop-t0_start)
| [
37811,
198,
18206,
12632,
257,
8776,
47986,
7311,
319,
663,
49156,
3033,
5115,
262,
640,
290,
3895,
15793,
198,
20123,
274,
257,
3664,
6160,
4894,
8899,
198,
19849,
815,
307,
8776,
27091,
198,
37811,
198,
11748,
19798,
292,
198,
11748,
... | 2.243305 | 3,473 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# License: MIT (see LICENSE file provided)
# vim: set expandtab tabstop=4 shiftwidth=4 softtabstop=4:
"""
**polib** allows you to manipulate, create, modify gettext files (pot, po
and mo files). You can load existing files, iterate through it's entries,
add, modify entries, comments or metadata, etc... or create new po files
from scratch.
**polib** provides a simple and pythonic API, exporting only three
convenience functions (*pofile*, *mofile* and *detect_encoding*), and the
four core classes, *POFile*, *MOFile*, *POEntry* and *MOEntry* for creating
new files/entries.
**Basic example**:
>>> import polib
>>> # load an existing po file
>>> po = polib.pofile('tests/test_utf8.po')
>>> for entry in po:
... # do something with entry...
... pass
>>> # add an entry
>>> entry = polib.POEntry(msgid='Welcome', msgstr='Bienvenue')
>>> entry.occurrences = [('welcome.py', '12'), ('anotherfile.py', '34')]
>>> po.append(entry)
>>> # to save our modified po file:
>>> # po.save()
>>> # or you may want to compile the po file
>>> # po.save_as_mofile('tests/test_utf8.mo')
"""
__author__ = 'David JEAN LOUIS <izimobil@gmail.com>'
__version__ = '0.4.1'
__all__ = ['pofile', 'POFile', 'POEntry', 'mofile', 'MOFile', 'MOEntry',
'detect_encoding', 'escape', 'unescape']
import struct
import textwrap
import warnings
default_encoding = 'utf-8'
# function pofile() {{{
def pofile(fpath, **kwargs):
"""
Convenience function that parse the po/pot file *fpath* and return
a POFile instance.
**Keyword arguments**:
- *fpath*: string, full or relative path to the po/pot file to parse
- *wrapwidth*: integer, the wrap width, only useful when -w option was
passed to xgettext (optional, default to 78)
- *autodetect_encoding*: boolean, if set to False the function will
not try to detect the po file encoding (optional, default to True)
- *encoding*: string, an encoding, only relevant if autodetect_encoding
is set to False
**Example**:
>>> import polib
>>> po = polib.pofile('tests/test_weird_occurrences.po')
>>> po #doctest: +ELLIPSIS
<POFile instance at ...>
>>> import os, tempfile
>>> for fname in ['test_iso-8859-15.po', 'test_utf8.po']:
... orig_po = polib.pofile('tests/'+fname)
... tmpf = tempfile.NamedTemporaryFile().name
... orig_po.save(tmpf)
... try:
... new_po = polib.pofile(tmpf)
... for old, new in zip(orig_po, new_po):
... if old.msgid != new.msgid:
... old.msgid
... new.msgid
... if old.msgstr != new.msgstr:
... old.msgid
... new.msgid
... finally:
... os.unlink(tmpf)
"""
if kwargs.get('autodetect_encoding', True) == True:
enc = detect_encoding(fpath)
else:
enc = kwargs.get('encoding', default_encoding)
parser = _POFileParser(fpath)
instance = parser.parse()
instance.wrapwidth = kwargs.get('wrapwidth', 78)
instance.encoding = enc
return instance
# }}}
# function mofile() {{{
def mofile(fpath, **kwargs):
"""
Convenience function that parse the mo file *fpath* and return
a MOFile instance.
**Keyword arguments**:
- *fpath*: string, full or relative path to the mo file to parse
- *wrapwidth*: integer, the wrap width, only useful when -w option was
passed to xgettext to generate the po file that was used to format
the mo file (optional, default to 78)
- *autodetect_encoding*: boolean, if set to False the function will
not try to detect the po file encoding (optional, default to True)
- *encoding*: string, an encoding, only relevant if autodetect_encoding
is set to False
**Example**:
>>> import polib
>>> mo = polib.mofile('tests/test_utf8.mo')
>>> mo #doctest: +ELLIPSIS
<MOFile instance at ...>
>>> import os, tempfile
>>> for fname in ['test_iso-8859-15.mo', 'test_utf8.mo']:
... orig_mo = polib.mofile('tests/'+fname)
... tmpf = tempfile.NamedTemporaryFile().name
... orig_mo.save(tmpf)
... try:
... new_mo = polib.mofile(tmpf)
... for old, new in zip(orig_mo, new_mo):
... if old.msgid != new.msgid:
... old.msgstr
... new.msgstr
... finally:
... os.unlink(tmpf)
"""
if kwargs.get('autodetect_encoding', True) == True:
enc = detect_encoding(fpath)
else:
enc = kwargs.get('encoding', default_encoding)
parser = _MOFileParser(fpath)
instance = parser.parse()
instance.wrapwidth = kwargs.get('wrapwidth', 78)
instance.encoding = enc
return instance
# }}}
# function detect_encoding() {{{
def detect_encoding(fpath):
"""
Try to detect the encoding used by the file *fpath*. The function will
return polib default *encoding* if it's unable to detect it.
**Keyword argument**:
- *fpath*: string, full or relative path to the mo file to parse.
**Examples**:
>>> print(detect_encoding('tests/test_noencoding.po'))
utf-8
>>> print(detect_encoding('tests/test_utf8.po'))
UTF-8
>>> print(detect_encoding('tests/test_utf8.mo'))
UTF-8
>>> print(detect_encoding('tests/test_iso-8859-15.po'))
ISO_8859-15
>>> print(detect_encoding('tests/test_iso-8859-15.mo'))
ISO_8859-15
"""
import re
rx = re.compile(r'"?Content-Type:.+? charset=([\w_\-:\.]+)')
f = open(fpath)
for l in f:
match = rx.search(l)
if match:
f.close()
return match.group(1).strip()
f.close()
return default_encoding
# }}}
# function escape() {{{
def escape(st):
"""
Escape special chars and return the given string *st*.
**Examples**:
>>> escape('\\t and \\n and \\r and " and \\\\')
'\\\\t and \\\\n and \\\\r and \\\\" and \\\\\\\\'
"""
st = st.replace('\\', r'\\')
st = st.replace('\t', r'\t')
st = st.replace('\r', r'\r')
st = st.replace('\n', r'\n')
st = st.replace('\"', r'\"')
return st
# }}}
# function unescape() {{{
def unescape(st):
"""
Unescape special chars and return the given string *st*.
**Examples**:
>>> unescape('\\\\t and \\\\n and \\\\r and \\\\" and \\\\\\\\')
'\\t and \\n and \\r and " and \\\\'
"""
st = st.replace(r'\"', '"')
st = st.replace(r'\n', '\n')
st = st.replace(r'\r', '\r')
st = st.replace(r'\t', '\t')
st = st.replace(r'\\', '\\')
return st
# }}}
# class _BaseFile {{{
class _BaseFile(list):
"""
Common parent class for POFile and MOFile classes.
This class must **not** be instanciated directly.
"""
def __init__(self, fpath=None, wrapwidth=78, encoding=default_encoding):
"""
Constructor.
**Keyword arguments**:
- *fpath*: string, path to po or mo file
- *wrapwidth*: integer, the wrap width, only useful when -w option
was passed to xgettext to generate the po file that was used to
format the mo file, default to 78 (optional).
"""
list.__init__(self)
# the opened file handle
self.fpath = fpath
# the width at which lines should be wrapped
self.wrapwidth = wrapwidth
# the file encoding
self.encoding = encoding
# header
self.header = ''
# both po and mo files have metadata
self.metadata = {}
self.metadata_is_fuzzy = 0
def __str__(self):
"""String representation of the file."""
ret = []
entries = [self.metadata_as_entry()] + \
[e for e in self if not e.obsolete]
for entry in entries:
ret.append(entry.__str__(self.wrapwidth))
for entry in self.obsolete_entries():
ret.append(entry.__str__(self.wrapwidth))
return '\n'.join(ret)
def __repr__(self):
"""Return the official string representation of the object."""
return '<%s instance at %x>' % (self.__class__.__name__, id(self))
def metadata_as_entry(self):
"""Return the metadata as an entry"""
e = POEntry(msgid='')
mdata = self.ordered_metadata()
if mdata:
strs = []
for name, value in mdata:
# Strip whitespace off each line in a multi-line entry
value = '\n'.join([v.strip() for v in value.split('\n')])
strs.append('%s: %s' % (name, value))
e.msgstr = '\n'.join(strs) + '\n'
return e
def save(self, fpath=None, repr_method='__str__'):
"""
Save the po file to file *fpath* if no file handle exists for
the object. If there's already an open file and no fpath is
provided, then the existing file is rewritten with the modified
data.
**Keyword arguments**:
- *fpath*: string, full or relative path to the file.
- *repr_method*: string, the method to use for output.
"""
if self.fpath is None and fpath is None:
raise IOError('You must provide a file path to save() method')
contents = getattr(self, repr_method)()
if fpath is None:
fpath = self.fpath
mode = 'w'
if repr_method == 'to_binary':
mode += 'b'
fhandle = open(fpath, mode)
fhandle.write(contents)
fhandle.close()
def find(self, st, by='msgid'):
"""
Find entry which msgid (or property identified by the *by*
attribute) matches the string *st*.
**Keyword arguments**:
- *st*: string, the string to search for
- *by*: string, the comparison attribute
**Examples**:
>>> po = pofile('tests/test_utf8.po')
>>> entry = po.find('Thursday')
>>> entry.msgstr
'Jueves'
>>> entry = po.find('Some unexistant msgid')
>>> entry is None
True
>>> entry = po.find('Jueves', 'msgstr')
>>> entry.msgid
'Thursday'
"""
try:
return [e for e in self if getattr(e, by) == st][0]
except IndexError:
return None
def ordered_metadata(self):
"""
Convenience method that return the metadata ordered. The return
value is list of tuples (metadata name, metadata_value).
"""
# copy the dict first
metadata = self.metadata.copy()
data_order = [
'Project-Id-Version',
'Report-Msgid-Bugs-To',
'POT-Creation-Date',
'PO-Revision-Date',
'Last-Translator',
'Language-Team',
'MIME-Version',
'Content-Type',
'Content-Transfer-Encoding'
]
ordered_data = []
for data in data_order:
try:
value = metadata.pop(data)
ordered_data.append((data, value))
except KeyError:
pass
# the rest of the metadata won't be ordered there are no specs for this
keys = metadata.keys()
list(keys).sort()
for data in keys:
value = metadata[data]
ordered_data.append((data, value))
return ordered_data
def to_binary(self):
"""Return the mofile binary representation."""
import struct
import array
output = ''
offsets = []
ids = strs = ''
entries = self.translated_entries()
# the keys are sorted in the .mo file
entries.sort(cmp)
# add metadata entry
mentry = self.metadata_as_entry()
mentry.msgstr = mentry.msgstr.replace('\\n', '').lstrip() + '\n'
entries = [mentry] + entries
entries_len = len(entries)
for e in entries:
# For each string, we need size and file offset. Each string is
# NUL terminated; the NUL does not count into the size.
msgid = e._decode(e.msgid)
msgstr = e._decode(e.msgstr)
offsets.append((len(ids), len(msgid), len(strs), len(msgstr)))
ids += msgid + '\0'
strs += msgstr + '\0'
# The header is 7 32-bit unsigned integers.
keystart = 7*4+16*entries_len
# and the values start after the keys
valuestart = keystart + len(ids)
koffsets = []
voffsets = []
# The string table first has the list of keys, then the list of values.
# Each entry has first the size of the string, then the file offset.
for o1, l1, o2, l2 in offsets:
koffsets += [l1, o1+keystart]
voffsets += [l2, o2+valuestart]
offsets = koffsets + voffsets
output = struct.pack("IIIIIII",
0x950412de, # Magic number
0, # Version
entries_len, # # of entries
7*4, # start of key index
7*4+entries_len*8, # start of value index
0, 0) # size and offset of hash table
output += array.array("I", offsets).tostring()
output += ids
output += strs
return output
# }}}
# class POFile {{{
class POFile(_BaseFile):
'''
Po (or Pot) file reader/writer.
POFile objects inherit the list objects methods.
**Example**:
>>> po = POFile()
>>> entry1 = POEntry(
... msgid="Some english text",
... msgstr="Un texte en anglais"
... )
>>> entry1.occurrences = [('testfile', 12),('another_file', 1)]
>>> entry1.comment = "Some useful comment"
>>> entry2 = POEntry(
... msgid="Peace in some languages",
... msgstr="Pace سلام שלום Hasîtî 和平"
... )
>>> entry2.occurrences = [('testfile', 15),('another_file', 5)]
>>> entry2.comment = "Another useful comment"
>>> entry3 = POEntry(
... msgid='Some entry with quotes " \\"',
... msgstr='Un message unicode avec des quotes " \\"'
... )
>>> entry3.comment = "Test string quoting"
>>> po.append(entry1)
>>> po.append(entry2)
>>> po.append(entry3)
>>> po.header = "Some Header"
>>> print(po)
# Some Header
msgid ""
msgstr ""
<BLANKLINE>
#. Some useful comment
#: testfile:12 another_file:1
msgid "Some english text"
msgstr "Un texte en anglais"
<BLANKLINE>
#. Another useful comment
#: testfile:15 another_file:5
msgid "Peace in some languages"
msgstr "Pace سلام שלום Hasîtî 和平"
<BLANKLINE>
#. Test string quoting
msgid "Some entry with quotes \\" \\""
msgstr "Un message unicode avec des quotes \\" \\""
<BLANKLINE>
'''
def __str__(self):
"""Return the string representation of the po file"""
ret, headers = '', self.header.split('\n')
for header in headers:
if header[:1] in [',', ':']:
ret += '#%s\n' % header
else:
ret += '# %s\n' % header
return ret + _BaseFile.__str__(self)
def save_as_mofile(self, fpath):
"""
Save the binary representation of the file to *fpath*.
**Keyword arguments**:
- *fpath*: string, full or relative path to the file.
"""
_BaseFile.save(self, fpath, 'to_binary')
def percent_translated(self):
"""
Convenience method that return the percentage of translated
messages.
**Example**:
>>> import polib
>>> po = polib.pofile('tests/test_pofile_helpers.po')
>>> po.percent_translated()
50
>>> po = POFile()
>>> po.percent_translated()
100
"""
total = len([e for e in self if not e.obsolete])
if total == 0:
return 100
translated = len(self.translated_entries())
return int((100.00 / float(total)) * translated)
def translated_entries(self):
"""
Convenience method that return a list of translated entries.
**Example**:
>>> import polib
>>> po = polib.pofile('tests/test_pofile_helpers.po')
>>> len(po.translated_entries())
6
"""
return [e for e in self if e.translated() and not e.obsolete]
def untranslated_entries(self):
"""
Convenience method that return a list of untranslated entries.
**Example**:
>>> import polib
>>> po = polib.pofile('tests/test_pofile_helpers.po')
>>> len(po.untranslated_entries())
6
"""
return [e for e in self if not e.translated() and not e.obsolete]
def fuzzy_entries(self):
"""
Convenience method that return the list of 'fuzzy' entries.
**Example**:
>>> import polib
>>> po = polib.pofile('tests/test_pofile_helpers.po')
>>> len(po.fuzzy_entries())
2
"""
return [e for e in self if 'fuzzy' in e.flags]
def obsolete_entries(self):
"""
Convenience method that return the list of obsolete entries.
**Example**:
>>> import polib
>>> po = polib.pofile('tests/test_pofile_helpers.po')
>>> len(po.obsolete_entries())
4
"""
return [e for e in self if e.obsolete]
def merge(self, refpot):
"""
XXX this could not work if encodings are different, needs thinking
and general refactoring of how polib handles encoding...
Convenience method that merge the current pofile with the pot file
provided. It behaves exactly as the gettext msgmerge utility:
- comments of this file will be preserved, but extracted comments
and occurrences will be discarded
- any translations or comments in the file will be discarded,
however dot comments and file positions will be preserved
**Keyword argument**:
- *refpot*: object POFile, the reference catalog.
**Example**:
>>> import polib
>>> refpot = polib.pofile('tests/test_merge.pot')
>>> po = polib.pofile('tests/test_merge_before.po')
>>> po.merge(refpot)
>>> expected_po = polib.pofile('tests/test_merge_after.po')
>>> str(po) == str(expected_po)
True
"""
for entry in refpot:
e = self.find(entry.msgid)
if e is None:
e = POEntry()
self.append(e)
e.merge(entry)
# ok, now we must "obsolete" entries that are not in the refpot
# anymore
for entry in self:
if refpot.find(entry.msgid) is None:
entry.obsolete = True
# }}}
# class MOFile {{{
class MOFile(_BaseFile):
'''
Mo file reader/writer.
MOFile objects inherit the list objects methods.
**Example**:
>>> mo = MOFile()
>>> entry1 = POEntry(
... msgid="Some english text",
... msgstr="Un texte en anglais"
... )
>>> entry2 = POEntry(
... msgid="I need my dirty cheese",
... msgstr="Je veux mon sale fromage"
... )
>>> entry3 = MOEntry(
... msgid='Some entry with quotes " \\"',
... msgstr='Un message unicode avec des quotes " \\"'
... )
>>> mo.append(entry1)
>>> mo.append(entry2)
>>> mo.append(entry3)
>>> print(mo)
msgid ""
msgstr ""
<BLANKLINE>
msgid "Some english text"
msgstr "Un texte en anglais"
<BLANKLINE>
msgid "I need my dirty cheese"
msgstr "Je veux mon sale fromage"
<BLANKLINE>
msgid "Some entry with quotes \\" \\""
msgstr "Un message unicode avec des quotes \\" \\""
<BLANKLINE>
'''
def __init__(self, fpath=None, wrapwidth=78):
"""
MOFile constructor.
See _BaseFile.__construct.
"""
_BaseFile.__init__(self, fpath, wrapwidth)
self.magic_number = None
self.version = 0
def save_as_pofile(self, fpath):
"""
Save the string representation of the file to *fpath*.
**Keyword argument**:
- *fpath*: string, full or relative path to the file.
"""
_BaseFile.save(self, fpath)
def save(self, fpath):
"""
Save the binary representation of the file to *fpath*.
**Keyword argument**:
- *fpath*: string, full or relative path to the file.
"""
_BaseFile.save(self, fpath, 'to_binary')
def percent_translated(self):
"""
Convenience method to keep the same interface with POFile instances.
"""
return 100
def translated_entries(self):
"""
Convenience method to keep the same interface with POFile instances.
"""
return self
def untranslated_entries(self):
"""
Convenience method to keep the same interface with POFile instances.
"""
return []
def fuzzy_entries(self):
"""
Convenience method to keep the same interface with POFile instances.
"""
return []
def obsolete_entries(self):
"""
Convenience method to keep the same interface with POFile instances.
"""
return []
# }}}
# class _BaseEntry {{{
class _BaseEntry(object):
"""
Base class for POEntry or MOEntry objects.
This class must *not* be instanciated directly.
"""
def __init__(self, *args, **kwargs):
"""Base Entry constructor."""
self.msgid = kwargs.get('msgid', '')
self.msgstr = kwargs.get('msgstr', '')
self.msgid_plural = kwargs.get('msgid_plural', '')
self.msgstr_plural = kwargs.get('msgstr_plural', {})
self.obsolete = kwargs.get('obsolete', False)
self.encoding = kwargs.get('encoding', default_encoding)
def __repr__(self):
"""Return the official string representation of the object."""
return '<%s instance at %x>' % (self.__class__.__name__, id(self))
def __str__(self, wrapwidth=78):
"""
Common string representation of the POEntry and MOEntry
objects.
"""
if self.obsolete:
delflag = '#~ '
else:
delflag = ''
# write the msgid
ret = []
ret += self._str_field("msgid", delflag, "", self.msgid)
# write the msgid_plural if any
if self.msgid_plural:
ret += self._str_field("msgid_plural", delflag, "", self.msgid_plural)
if self.msgstr_plural:
# write the msgstr_plural if any
msgstrs = self.msgstr_plural
keys = list(msgstrs)
keys.sort()
for index in keys:
msgstr = msgstrs[index]
plural_index = '[%s]' % index
ret += self._str_field("msgstr", delflag, plural_index, msgstr)
else:
# otherwise write the msgstr
ret += self._str_field("msgstr", delflag, "", self.msgstr)
ret.append('')
return '\n'.join(ret)
# }}}
# class POEntry {{{
class POEntry(_BaseEntry):
"""
Represents a po file entry.
**Examples**:
>>> entry = POEntry(msgid='Welcome', msgstr='Bienvenue')
>>> entry.occurrences = [('welcome.py', 12), ('anotherfile.py', 34)]
>>> print(entry)
#: welcome.py:12 anotherfile.py:34
msgid "Welcome"
msgstr "Bienvenue"
<BLANKLINE>
>>> entry = POEntry()
>>> entry.occurrences = [('src/some-very-long-filename-that-should-not-be-wrapped-even-if-it-is-larger-than-the-wrap-limit.c', 32), ('src/eggs.c', 45)]
>>> entry.comment = 'A plural translation. This is a very very very long line please do not wrap, this is just for testing comment wrapping...'
>>> entry.tcomment = 'A plural translation. This is a very very very long line please do not wrap, this is just for testing comment wrapping...'
>>> entry.flags.append('c-format')
>>> entry.msgid = 'I have spam but no egg !'
>>> entry.msgid_plural = 'I have spam and %d eggs !'
>>> entry.msgstr_plural[0] = "J'ai du jambon mais aucun oeuf !"
>>> entry.msgstr_plural[1] = "J'ai du jambon et %d oeufs !"
>>> print(entry)
#. A plural translation. This is a very very very long line please do not
#. wrap, this is just for testing comment wrapping...
# A plural translation. This is a very very very long line please do not wrap,
# this is just for testing comment wrapping...
#: src/some-very-long-filename-that-should-not-be-wrapped-even-if-it-is-larger-than-the-wrap-limit.c:32
#: src/eggs.c:45
#, c-format
msgid "I have spam but no egg !"
msgid_plural "I have spam and %d eggs !"
msgstr[0] "J'ai du jambon mais aucun oeuf !"
msgstr[1] "J'ai du jambon et %d oeufs !"
<BLANKLINE>
"""
def __init__(self, *args, **kwargs):
"""POEntry constructor."""
_BaseEntry.__init__(self, *args, **kwargs)
self.comment = kwargs.get('comment', '')
self.tcomment = kwargs.get('tcomment', '')
self.occurrences = kwargs.get('occurrences', [])
self.flags = kwargs.get('flags', [])
def __str__(self, wrapwidth=78):
"""
Return the string representation of the entry.
"""
if self.obsolete:
return _BaseEntry.__str__(self)
ret = []
# comment first, if any (with text wrapping as xgettext does)
if self.comment != '':
comments = self._decode(self.comment).split('\n')
for comment in comments:
if wrapwidth > 0 and len(comment) > wrapwidth-3:
ret += textwrap.wrap(comment, wrapwidth,
initial_indent='#. ',
subsequent_indent='#. ',
break_long_words=False)
else:
ret.append('#. %s' % comment)
# translator comment, if any (with text wrapping as xgettext does)
if self.tcomment != '':
tcomments = self._decode(self.tcomment).split('\n')
for tcomment in tcomments:
if wrapwidth > 0 and len(tcomment) > wrapwidth-2:
ret += textwrap.wrap(tcomment, wrapwidth,
initial_indent='# ',
subsequent_indent='# ',
break_long_words=False)
else:
ret.append('# %s' % tcomment)
# occurrences (with text wrapping as xgettext does)
if self.occurrences:
filelist = []
for fpath, lineno in self.occurrences:
if lineno:
filelist.append('%s:%s' % (self._decode(fpath), lineno))
else:
filelist.append('%s' % (self._decode(fpath)))
filestr = ' '.join(filelist)
if wrapwidth > 0 and len(filestr)+3 > wrapwidth:
# XXX textwrap split words that contain hyphen, this is not
# what we want for filenames, so the dirty hack is to
# temporally replace hyphens with a char that a file cannot
# contain, like "*"
lines = textwrap.wrap(filestr.replace('-', '*'),
wrapwidth,
initial_indent='#: ',
subsequent_indent='#: ',
break_long_words=False)
# end of the replace hack
for line in lines:
ret.append(line.replace('*', '-'))
else:
ret.append('#: '+filestr)
# flags
if self.flags:
flags = []
for flag in self.flags:
flags.append(flag)
ret.append('#, %s' % ', '.join(flags))
ret.append(_BaseEntry.__str__(self))
return '\n'.join(ret)
def __cmp__(self, other):
'''
Called by comparison operations if rich comparison is not defined.
**Tests**:
>>> a = POEntry(msgid='a', occurrences=[('b.py', 1), ('b.py', 3)])
>>> b = POEntry(msgid='b', occurrences=[('b.py', 1), ('b.py', 3)])
>>> c1 = POEntry(msgid='c1', occurrences=[('a.py', 1), ('b.py', 1)])
>>> c2 = POEntry(msgid='c2', occurrences=[('a.py', 1), ('a.py', 3)])
>>> po = POFile()
>>> po.append(a)
>>> po.append(b)
>>> po.append(c1)
>>> po.append(c2)
>>> po.sort()
>>> print(po)
#
msgid ""
msgstr ""
<BLANKLINE>
#: a.py:1 a.py:3
msgid "c2"
msgstr ""
<BLANKLINE>
#: a.py:1 b.py:1
msgid "c1"
msgstr ""
<BLANKLINE>
#: b.py:1 b.py:3
msgid "a"
msgstr ""
<BLANKLINE>
#: b.py:1 b.py:3
msgid "b"
msgstr ""
<BLANKLINE>
'''
def compare_occurrences(a, b):
"""
Compare an entry occurrence with another one.
"""
if a[0] != b[0]:
return a[0] < b[0]
if a[1] != b[1]:
return a[1] < b[1]
return 0
# First: Obsolete test
if self.obsolete != other.obsolete:
if self.obsolete:
return -1
else:
return 1
# Work on a copy to protect original
occ1 = self.occurrences[:]
occ2 = other.occurrences[:]
# Sorting using compare method
occ1.sort(compare_occurrences)
occ2.sort(compare_occurrences)
# Comparing sorted occurrences
pos = 0
for entry1 in occ1:
try:
entry2 = occ2[pos]
except IndexError:
return 1
pos = pos + 1
if entry1[0] != entry2[0]:
if entry1[0] > entry2[0]:
return 1
else:
return -1
if entry1[1] != entry2[1]:
if entry1[1] > entry2[1]:
return 1
else:
return -1
# Finally: Compare message ID
if self.msgid > other.msgid: return 1
else: return -1
def translated(self):
"""
Return True if the entry has been translated or False.
"""
if self.obsolete or 'fuzzy' in self.flags:
return False
if self.msgstr != '':
return True
if self.msgstr_plural:
for pos in self.msgstr_plural:
if self.msgstr_plural[pos] == '':
return False
return True
return False
def merge(self, other):
"""
Merge the current entry with the given pot entry.
"""
self.msgid = other.msgid
self.occurrences = other.occurrences
self.comment = other.comment
self.flags = other.flags
self.msgid_plural = other.msgid_plural
if other.msgstr_plural:
for pos in other.msgstr_plural:
try:
# keep existing translation at pos if any
self.msgstr_plural[pos]
except KeyError:
self.msgstr_plural[pos] = ''
# }}}
# class MOEntry {{{
class MOEntry(_BaseEntry):
"""
Represents a mo file entry.
**Examples**:
>>> entry = MOEntry()
>>> entry.msgid = 'translate me !'
>>> entry.msgstr = 'traduisez moi !'
>>> print(entry)
msgid "translate me !"
msgstr "traduisez moi !"
<BLANKLINE>
"""
def __str__(self, wrapwidth=78):
"""
Return the string representation of the entry.
"""
return _BaseEntry.__str__(self, wrapwidth)
# }}}
# class _POFileParser {{{
class _POFileParser(object):
"""
A finite state machine to parse efficiently and correctly po
file format.
"""
def __init__(self, fpath):
"""
Constructor.
**Keyword argument**:
- *fpath*: string, path to the po file
"""
self.fhandle = open(fpath, 'r')
self.instance = POFile(fpath=fpath)
self.transitions = {}
self.current_entry = POEntry()
self.current_state = 'ST'
self.current_token = None
# two memo flags used in handlers
self.msgstr_index = 0
self.entry_obsolete = 0
# Configure the state machine, by adding transitions.
# Signification of symbols:
# * ST: Beginning of the file (start)
# * HE: Header
# * TC: a translation comment
# * GC: a generated comment
# * OC: a file/line occurence
# * FL: a flags line
# * MI: a msgid
# * MP: a msgid plural
# * MS: a msgstr
# * MX: a msgstr plural
# * MC: a msgid or msgstr continuation line
all_ = ['ST', 'HE', 'GC', 'OC', 'FL', 'TC', 'MS', 'MP', 'MX', 'MI']
self.add('TC', ['ST', 'HE'], 'HE')
self.add('TC', ['GC', 'OC', 'FL', 'TC', 'MS', 'MP', 'MX', 'MI'], 'TC')
self.add('GC', all_, 'GC')
self.add('OC', all_, 'OC')
self.add('FL', all_, 'FL')
self.add('MI', ['ST', 'HE', 'GC', 'OC', 'FL', 'TC', 'MS', 'MX'], 'MI')
self.add('MP', ['TC', 'GC', 'MI'], 'MP')
self.add('MS', ['MI', 'MP', 'TC'], 'MS')
self.add('MX', ['MI', 'MX', 'MP', 'TC'], 'MX')
self.add('MC', ['MI', 'MP', 'MS', 'MX'], 'MC')
def parse(self):
"""
Run the state machine, parse the file line by line and call process()
with the current matched symbol.
"""
i, lastlen = 1, 0
for line in self.fhandle:
line = line.strip()
if line == '':
i = i+1
continue
if line[:3] == '#~ ':
line = line[3:]
self.entry_obsolete = 1
else:
self.entry_obsolete = 0
self.current_token = line
if line[:2] == '#:':
# we are on a occurrences line
self.process('OC', i)
elif line[:7] == 'msgid "':
# we are on a msgid
self.process('MI', i)
elif line[:8] == 'msgstr "':
# we are on a msgstr
self.process('MS', i)
elif line[:1] == '"':
# we are on a continuation line or some metadata
self.process('MC', i)
elif line[:14] == 'msgid_plural "':
# we are on a msgid plural
self.process('MP', i)
elif line[:7] == 'msgstr[':
# we are on a msgstr plural
self.process('MX', i)
elif line[:3] == '#, ':
# we are on a flags line
self.process('FL', i)
elif line[:2] == '# ' or line == '#':
if line == '#': line = line + ' '
# we are on a translator comment line
self.process('TC', i)
elif line[:2] == '#.':
# we are on a generated comment line
self.process('GC', i)
i = i+1
if self.current_entry:
# since entries are added when another entry is found, we must add
# the last entry here (only if there are lines)
self.instance.append(self.current_entry)
# before returning the instance, check if there's metadata and if
# so extract it in a dict
firstentry = self.instance[0]
if firstentry.msgid == '': # metadata found
# remove the entry
firstentry = self.instance.pop(0)
self.instance.metadata_is_fuzzy = firstentry.flags
key = None
for msg in firstentry.msgstr.splitlines():
try:
key, val = msg.split(':', 1)
self.instance.metadata[key] = val.strip()
except:
if key is not None:
self.instance.metadata[key] += '\n'+ msg.strip()
# close opened file
self.fhandle.close()
return self.instance
def add(self, symbol, states, next_state):
"""
Add a transition to the state machine.
Keywords arguments:
symbol -- string, the matched token (two chars symbol)
states -- list, a list of states (two chars symbols)
next_state -- the next state the fsm will have after the action
"""
for state in states:
action = getattr(self, 'handle_%s' % next_state.lower())
self.transitions[(symbol, state)] = (action, next_state)
def process(self, symbol, linenum):
"""
Process the transition corresponding to the current state and the
symbol provided.
Keywords arguments:
symbol -- string, the matched token (two chars symbol)
linenum -- integer, the current line number of the parsed file
"""
try:
(action, state) = self.transitions[(symbol, self.current_state)]
if action():
self.current_state = state
except Exception, exc:
raise IOError('Syntax error in po file (line %s)' % linenum)
# state handlers
def handle_he(self):
"""Handle a header comment."""
if self.instance.header != '':
self.instance.header += '\n'
self.instance.header += self.current_token[2:]
return 1
def handle_tc(self):
"""Handle a translator comment."""
if self.current_state in ['MC', 'MS', 'MX']:
self.instance.append(self.current_entry)
self.current_entry = POEntry()
if self.current_entry.tcomment != '':
self.current_entry.tcomment += '\n'
self.current_entry.tcomment += self.current_token[2:]
return True
def handle_gc(self):
"""Handle a generated comment."""
if self.current_state in ['MC', 'MS', 'MX']:
self.instance.append(self.current_entry)
self.current_entry = POEntry()
if self.current_entry.comment != '':
self.current_entry.comment += '\n'
self.current_entry.comment += self.current_token[3:]
return True
def handle_oc(self):
"""Handle a file:num occurence."""
if self.current_state in ['MC', 'MS', 'MX']:
self.instance.append(self.current_entry)
self.current_entry = POEntry()
occurrences = self.current_token[3:].split()
for occurrence in occurrences:
if occurrence != '':
try:
fil, line = occurrence.split(':')
if not line.isdigit():
fil = fil + line
line = ''
self.current_entry.occurrences.append((fil, line))
except:
self.current_entry.occurrences.append((occurrence, ''))
return True
def handle_fl(self):
"""Handle a flags line."""
if self.current_state in ['MC', 'MS', 'MX']:
self.instance.append(self.current_entry)
self.current_entry = POEntry()
self.current_entry.flags += self.current_token[3:].split(', ')
return True
def handle_mi(self):
"""Handle a msgid."""
if self.current_state in ['MC', 'MS', 'MX']:
self.instance.append(self.current_entry)
self.current_entry = POEntry()
self.current_entry.obsolete = self.entry_obsolete
self.current_entry.msgid = unescape(self.current_token[7:-1])
return True
def handle_mp(self):
"""Handle a msgid plural."""
self.current_entry.msgid_plural = unescape(self.current_token[14:-1])
return True
def handle_ms(self):
"""Handle a msgstr."""
self.current_entry.msgstr = unescape(self.current_token[8:-1])
return True
def handle_mx(self):
"""Handle a msgstr plural."""
index, value = self.current_token[7], self.current_token[11:-1]
self.current_entry.msgstr_plural[index] = unescape(value)
self.msgstr_index = index
return True
def handle_mc(self):
"""Handle a msgid or msgstr continuation line."""
if self.current_state == 'MI':
self.current_entry.msgid += unescape(self.current_token[1:-1])
elif self.current_state == 'MP':
self.current_entry.msgid_plural += \
unescape(self.current_token[1:-1])
elif self.current_state == 'MS':
self.current_entry.msgstr += unescape(self.current_token[1:-1])
elif self.current_state == 'MX':
msgstr = self.current_entry.msgstr_plural[self.msgstr_index] +\
unescape(self.current_token[1:-1])
self.current_entry.msgstr_plural[self.msgstr_index] = msgstr
# don't change the current state
return False
# }}}
# class _MOFileParser {{{
class _MOFileParser(object):
"""
A class to parse binary mo files.
"""
BIG_ENDIAN = 0xde120495
LITTLE_ENDIAN = 0x950412de
def __init__(self, fpath):
"""_MOFileParser constructor."""
self.fhandle = open(fpath, 'rb')
self.instance = MOFile(fpath)
def parse_magicnumber(self):
"""
Parse the magic number and raise an exception if not valid.
"""
def parse(self):
"""
Build the instance with the file handle provided in the
constructor.
"""
magic_number = self._readbinary('<I', 4)
if magic_number == self.LITTLE_ENDIAN:
ii = '<II'
elif magic_number == self.BIG_ENDIAN:
ii = '>II'
else:
raise IOError('Invalid mo file, magic number is incorrect !')
self.instance.magic_number = magic_number
# parse the version number and the number of strings
self.instance.version, numofstrings = self._readbinary(ii, 8)
# original strings and translation strings hash table offset
msgids_hash_offset, msgstrs_hash_offset = self._readbinary(ii, 8)
# move to msgid hash table and read length and offset of msgids
self.fhandle.seek(msgids_hash_offset)
msgids_index = []
for i in range(numofstrings):
msgids_index.append(self._readbinary(ii, 8))
# move to msgstr hash table and read length and offset of msgstrs
self.fhandle.seek(msgstrs_hash_offset)
msgstrs_index = []
for i in range(numofstrings):
msgstrs_index.append(self._readbinary(ii, 8))
# build entries
for i in range(numofstrings):
self.fhandle.seek(msgids_index[i][1])
msgid = self.fhandle.read(msgids_index[i][0])
self.fhandle.seek(msgstrs_index[i][1])
msgstr = self.fhandle.read(msgstrs_index[i][0])
if i == 0: # metadata
raw_metadata, metadata = msgstr.split('\n'), {}
for line in raw_metadata:
tokens = line.split(':', 1)
if tokens[0] != '':
try:
metadata[tokens[0]] = tokens[1].strip()
except IndexError:
metadata[tokens[0]] = ''
self.instance.metadata = metadata
continue
entry = MOEntry(msgid=msgid, msgstr=msgstr)
self.instance.append(entry)
# close opened file
self.fhandle.close()
return self.instance
def _readbinary(self, fmt, numbytes):
"""
Private method that unpack n bytes of data using format <fmt>.
It returns a tuple or a mixed value if the tuple length is 1.
"""
bytes = self.fhandle.read(numbytes)
tup = struct.unpack(fmt, bytes)
if len(tup) == 1:
return tup[0]
return tup
# }}}
# __main__ {{{
if __name__ == '__main__':
"""
**Main function**::
- to **test** the module just run: *python polib.py [-v]*
- to **profile** the module: *python polib.py -p <some_pofile.po>*
"""
import sys
if len(sys.argv) > 2 and sys.argv[1] == '-p':
import profile
profile.run('test("'+sys.argv[2]+'")')
else:
import doctest
doctest.testmod()
# }}}
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
198,
2,
13789,
25,
17168,
357,
3826,
38559,
24290,
2393,
2810,
8,
198,
2,
43907,
25,
900,
4292,
8658,
7400,
11338,... | 2.09882 | 21,534 |
from Cells.Cell import Cell
from Feedback import Feedback
| [
6738,
39794,
13,
28780,
1330,
12440,
198,
6738,
37774,
1330,
37774,
628
] | 4.916667 | 12 |
from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
| [
6738,
42625,
14208,
13,
18211,
1330,
2034,
16934,
198,
6738,
42625,
14208,
13,
26791,
13,
41519,
1330,
334,
1136,
5239,
62,
75,
12582,
355,
4808,
198
] | 3.461538 | 26 |
# -*- coding: utf-8 -*-
import urlparse
from flask import current_app
def is_allowed_archive(filename):
""" Check if a filename is a allowed archive """
return '.' in filename and filename.rsplit('.', 1)[1] in \
current_app.config['SUPPORTED_ARCHIVE_EXTENSIONS']
def is_meta_file(filename):
""" Check if a filename is a supported meta file """
return '.' in filename and filename.rsplit('.', 1)[1] in \
current_app.config['SUPPORTED_META_EXTENSIONS']
def is_model_file(filename):
"""
Check if a filename has an supported models extension
This is not a surefire way to detect if the file is a model, but
acts as basic guard and guideline.
"""
return '.' in filename and filename.rsplit('.', 1)[1] in \
current_app.config['SUPPORTED_MODEL_EXTENSIONS']
def is_allowed_file(filename):
""" Check if a filename has an allowed extension """
return '.' in filename and filename.rsplit('.', 1)[1] in \
current_app.config['ALLOWED_EXTENSIONS']
def is_allowed_host(url):
""" Check if a URL download is allowed """
if "*" in current_app.config['ALLOWED_DOWNLOAD_HOSTS']:
return True
host = urlparse.urlparse(url).netloc
return host in current_app.config['ALLOWED_DOWNLOAD_HOSTS'] | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
19016,
29572,
198,
198,
6738,
42903,
1330,
1459,
62,
1324,
628,
198,
4299,
318,
62,
40845,
62,
17474,
7,
34345,
2599,
198,
220,
220,
220,
37227,
6822,
611,
... | 2.746269 | 469 |
from django.contrib import admin
# Register your models here.
from .models import Board
admin.site.register(Board) | [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
198,
2,
17296,
534,
4981,
994,
13,
198,
6738,
764,
27530,
1330,
5926,
198,
198,
28482,
13,
15654,
13,
30238,
7,
29828,
8
] | 3.625 | 32 |