content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
# -*- coding: utf-8 -*-
from weatherScraper.items import TempData
from weatherScraper.items import InputData
import scrapy
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
6193,
3351,
38545,
13,
23814,
1330,
24189,
6601,
198,
6738,
6193,
3351,
38545,
13,
23814,
1330,
23412,
6601,
198,
11748,
15881,
88,
628
] | 3.263158 | 38 |
"""
This file contains a lambda function reacting to query update sqs event and sending notifications for large queries
Currently only slack notifications are supported
"""
import json
import logging
from typing import Sequence
from . import settings
from .model import UnknownEventException
from .notificators.notificator import Notificator
logger = logging.getLogger()
def lambda_handler(event, context):
"""
This handles events and may handle more than one event type.
If you have more events that you'd like to be notified on,
register them in settings.py under NOTIFICATORS
"""
notificators: Sequence[Notificator] = [notificator(config=settings) for notificator in settings.NOTIFICATORS]
for record in event['Records']:
for notificator in notificators:
if notificator.is_record_type_handled(record):
notificator.handle_single_event(body=record['body'])
break
else:
logging.error("ERROR! Unknown event type!")
logging.debug(json.dumps(event))
raise UnknownEventException("ERROR! Unknown event type!")
| [
37811,
198,
1212,
2393,
4909,
257,
37456,
2163,
33413,
284,
12405,
4296,
19862,
82,
1785,
290,
7216,
19605,
329,
1588,
20743,
198,
198,
21327,
691,
30740,
19605,
389,
4855,
198,
37811,
198,
198,
11748,
33918,
198,
11748,
18931,
198,
6738,... | 3.018568 | 377 |
import logging
from api.entryprocessor.wiki.en import ENWiktionaryProcessor as ENWiktionaryProcessor
from redis_wikicache import RedisPage, RedisSite
"""
Requires RedisSite and RedisPage wrappers.
All data must have been imported there.
see redis_wikicache.py for more details.
"""
logger = logging.getLogger('processor')
enwikt = RedisSite('en', 'wiktionary')
if __name__ == '__main__':
test_random()
# test_one()
| [
11748,
18931,
198,
198,
6738,
40391,
13,
13000,
41341,
13,
15466,
13,
268,
1330,
12964,
33010,
5378,
560,
18709,
273,
355,
12964,
33010,
5378,
560,
18709,
273,
198,
6738,
2266,
271,
62,
20763,
291,
4891,
1330,
2297,
271,
9876,
11,
2297,... | 3.006993 | 143 |
#!/user/bin/env python
# -*- coding: utf-8 -*-
"""
------------------------------------
@Project : nightwalker
@Time : 2020/10/13 11:18
@Auth : luozhongwen
@Email : luozw@inhand.com.cn
@File : __init__.py
@IDE : PyCharm
------------------------------------
"""
| [
2,
48443,
7220,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
198,
3880,
650,
198,
31,
16775,
1058,
1755,
20783,
198,
31,
7575,
220,
220,
220,
1058,
12131,
14,
940,
14,... | 2.564815 | 108 |
from gym.envs.registration import register
register('NetHackChallengeBatched-v0',
entry_point='nle_batched_env.NetHackChallengeBatchedEnv')
| [
6738,
11550,
13,
268,
14259,
13,
2301,
33397,
1330,
7881,
198,
198,
30238,
10786,
7934,
32833,
41812,
3540,
33,
14265,
12,
85,
15,
3256,
220,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
5726,
62,
4122,
11639,
77,
293... | 2.610169 | 59 |
# The MIT License (MIT)
# Copyright (c) 2018 Koki Saitoh
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ================================================================================
import numpy as np
# Review needed
| [
2,
383,
17168,
13789,
357,
36393,
8,
198,
198,
2,
15069,
357,
66,
8,
2864,
509,
18228,
311,
4548,
1219,
198,
198,
2,
2448,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
11,
284,
597,
1048,
16727,
257,
4866,
220,
198,
2,
286,
428,
... | 3.849231 | 325 |
#!/usr/bin/python3
import pygame
from classes.wall import Wall
from classes.block import Block
from classes.player import Player
from classes.ghost import Ghost
black = (0,0,0)
white = (255,255,255)
blue = (0,0,255)
green = (0,255,0)
red = (255,0,0)
purple = (255,0,255)
yellow = (255,255,0)
# Commands
print("")
print("\033[36m📚 HOW TO PLAY?\033[0m")
print("\033[32m🟢 Move Pacman using UP KEY 🔼, DOWN KEY 🔽, LEFT KEY ◀️ and RIGHT KEY ▶️ \033[0m")
print("\033[31m🔴 Press the \"ESCAPE\" KEY on the PACMAN GAME OVER screen to end the game! \033[0m")
print("")
#Add music
pygame.mixer.init()
pygame.mixer.music.load('pacman.mp3')
pygame.mixer.music.play(-1, 0.0)
# Default locations for Pacman and Ghosts
w = 303-16 #Width
pacman_height = (7*60)+19
monster_height = (4*60)+19
blinky_height = (3*60)+19
inky_width = 303-16-32
clyde_width = 303+(32-16)
#Pinky movements
Pinky_directions = [
[0,-30,4],
[15,0,9],
[0,15,11],
[-15,0,23],
[0,15,7],
[15,0,3],
[0,-15,3],
[15,0,19],
[0,15,3],
[15,0,3],
[0,15,3],
[15,0,3],
[0,-15,15],
[-15,0,7],
[0,15,3],
[-15,0,19],
[0,-15,11],
[15,0,9]
]
#Blinky movements
Blinky_directions = [
[0,-15,4],
[15,0,9],
[0,15,11],
[15,0,3],
[0,15,7],
[-15,0,11],
[0,15,3],
[15,0,15],
[0,-15,15],
[15,0,3],
[0,-15,11],
[-15,0,3],
[0,-15,11],
[-15,0,3],
[0,-15,3],
[-15,0,7],
[0,-15,3],
[15,0,15],
[0,15,15],
[-15,0,3],
[0,15,3],
[-15,0,3],
[0,-15,7],
[-15,0,3],
[0,15,7],
[-15,0,11],
[0,-15,7],
[15,0,5]
]
#Inky movements
Inky_directions = [
[30,0,2],
[0,-15,4],
[15,0,10],
[0,15,7],
[15,0,3],
[0,-15,3],
[15,0,3],
[0,-15,15],
[-15,0,15],
[0,15,3],
[15,0,15],
[0,15,11],
[-15,0,3],
[0,-15,7],
[-15,0,11],
[0,15,3],
[-15,0,11],
[0,15,7],
[-15,0,3],
[0,-15,3],
[-15,0,3],
[0,-15,15],
[15,0,15],
[0,15,3],
[-15,0,15],
[0,15,11],
[15,0,3],
[0,-15,11],
[15,0,11],
[0,15,3],
[15,0,1],
]
#Clyde movements
Clyde_directions = [
[-30,0,2],
[0,-15,4],
[15,0,5],
[0,15,7],
[-15,0,11],
[0,-15,7],
[-15,0,3],
[0,15,7],
[-15,0,7],
[0,15,15],
[15,0,15],
[0,-15,3],
[-15,0,11],
[0,-15,7],
[15,0,3],
[0,-15,11],
[15,0,9],
]
pinky_movements_list = len(Pinky_directions)-1
blinky_movements_list = len(Blinky_directions)-1
inky_movements_list = len(Inky_directions)-1
clyde_movements_list = len(Clyde_directions)-1
pygame.init()
# Create an 606x606 sized screen
screen = pygame.display.set_mode([606, 606])
# Window Title
pygame.display.set_caption('Pacman')
# Surface Creation
background = pygame.Surface(screen.get_size())
background = background.convert()
background.fill(black)
clock = pygame.time.Clock()
pygame.font.init()
font = pygame.font.Font("freesansbold.ttf", 24)
# This creates all level 1 walls | [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
11748,
12972,
6057,
198,
6738,
6097,
13,
11930,
1330,
5007,
198,
6738,
6097,
13,
9967,
1330,
9726,
198,
6738,
6097,
13,
7829,
1330,
7853,
198,
6738,
6097,
13,
38933,
1330,
9897,
198,
198,... | 1.867093 | 1,407 |
from django.test import TestCase
| [
6738,
42625,
14208,
13,
9288,
1330,
6208,
20448,
628
] | 3.777778 | 9 |
# -*- coding: utf-8 -*-
if __name__ == "__main__":
main()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
628,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
1388,
3419,
198
] | 1.969697 | 33 |
# -*- coding: utf-8 -*-
from amplify.agent.containers.abstract import AbstractCollector
from amplify.agent.context import context
from amplify.agent.nginx.config.config import ERROR_LOG_LEVELS
from amplify.agent.nginx.log.error import NginxErrorLogParser
from amplify.agent.util.tail import FileTail
__author__ = "Mike Belov"
__copyright__ = "Copyright (C) Nginx, Inc. All rights reserved."
__credits__ = ["Mike Belov", "Andrei Belov", "Ivan Poluyanov", "Oleg Mamontov", "Andrew Alexeev", "Grant Hulegaard"]
__license__ = ""
__maintainer__ = "Mike Belov"
__email__ = "dedm@nginx.com"
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
47366,
13,
25781,
13,
3642,
50221,
13,
397,
8709,
1330,
27741,
31337,
273,
198,
6738,
47366,
13,
25781,
13,
22866,
1330,
4732,
198,
6738,
47366,
13,
25781,
13,
782... | 2.974619 | 197 |
import copy
import re
import numpy as np
from plotly import colors
from ..util import color_intervals, process_cmap
# Constants
# ---------
# Trace types that are individually positioned with their own domain.
# These are traces that don't overlay on top of each other in a shared subplot,
# so they are positioned individually. All other trace types are associated
# with a layout subplot type (xaxis/yaxis, polar, scene etc.)
#
# Each of these trace types has a `domain` property with `x`/`y` properties
_domain_trace_types = {'parcoords', 'pie', 'table', 'sankey', 'parcats'}
# Subplot types that are each individually positioned with a domain
#
# Each of these subplot types has a `domain` property with `x`/`y` properties.
# Note that this set does not contain `xaxis`/`yaxis` because these behave a
# little differently.
_subplot_types = {'scene', 'geo', 'polar', 'ternary', 'mapbox'}
# For most subplot types, a trace is associated with a particular subplot
# using a trace property with a name that matches the subplot type. For
# example, a `scatter3d.scene` property set to `'scene2'` associates a
# scatter3d trace with the second `scene` subplot in the figure.
#
# There are a few subplot types that don't follow this pattern, and instead
# the trace property is just named `subplot`. For example setting
# the `scatterpolar.subplot` property to `polar3` associates the scatterpolar
# trace with the third polar subplot in the figure
_subplot_prop_named_subplot = {'polar', 'ternary', 'mapbox'}
# Mapping from trace type to subplot type(s).
_trace_to_subplot = {
# xaxis/yaxis
'bar': ['xaxis', 'yaxis'],
'box': ['xaxis', 'yaxis'],
'candlestick': ['xaxis', 'yaxis'],
'carpet': ['xaxis', 'yaxis'],
'contour': ['xaxis', 'yaxis'],
'contourcarpet': ['xaxis', 'yaxis'],
'heatmap': ['xaxis', 'yaxis'],
'heatmapgl': ['xaxis', 'yaxis'],
'histogram': ['xaxis', 'yaxis'],
'histogram2d': ['xaxis', 'yaxis'],
'histogram2dcontour': ['xaxis', 'yaxis'],
'ohlc': ['xaxis', 'yaxis'],
'pointcloud': ['xaxis', 'yaxis'],
'scatter': ['xaxis', 'yaxis'],
'scattercarpet': ['xaxis', 'yaxis'],
'scattergl': ['xaxis', 'yaxis'],
'violin': ['xaxis', 'yaxis'],
# scene
'cone': ['scene'],
'mesh3d': ['scene'],
'scatter3d': ['scene'],
'streamtube': ['scene'],
'surface': ['scene'],
# geo
'choropleth': ['geo'],
'scattergeo': ['geo'],
# polar
'barpolar': ['polar'],
'scatterpolar': ['polar'],
'scatterpolargl': ['polar'],
# ternary
'scatterternary': ['ternary'],
# mapbox
'scattermapbox': ['mapbox']
}
# Aliases - map common style options to more common names
STYLE_ALIASES = {'line_width': 'width', 'alpha': 'opacity',
'cell_height': 'height', 'marker': 'symbol'}
# Regular expression to extract any trailing digits from a subplot-style
# string.
_subplot_re = re.compile('\D*(\d+)')
def _get_subplot_number(subplot_val):
"""
Extract the subplot number from a subplot value string.
'x3' -> 3
'polar2' -> 2
'scene' -> 1
'y' -> 1
Note: the absence of a subplot number (e.g. 'y') is treated by plotly as
a subplot number of 1
Parameters
----------
subplot_val: str
Subplot string value (e.g. 'scene4')
Returns
-------
int
"""
match = _subplot_re.match(subplot_val)
if match:
subplot_number = int(match.group(1))
else:
subplot_number = 1
return subplot_number
def _get_subplot_val_prefix(subplot_type):
"""
Get the subplot value prefix for a subplot type. For most subplot types
this is equal to the subplot type string itself. For example, a
`scatter3d.scene` value of `scene2` is used to associate the scatter3d
trace with the `layout.scene2` subplot.
However, the `xaxis`/`yaxis` subplot types are exceptions to this pattern.
For example, a `scatter.xaxis` value of `x2` is used to associate the
scatter trace with the `layout.xaxis2` subplot.
Parameters
----------
subplot_type: str
Subplot string value (e.g. 'scene4')
Returns
-------
str
"""
if subplot_type == 'xaxis':
subplot_val_prefix = 'x'
elif subplot_type == 'yaxis':
subplot_val_prefix = 'y'
else:
subplot_val_prefix = subplot_type
return subplot_val_prefix
def _get_subplot_prop_name(subplot_type):
"""
Get the name of the trace property used to associate a trace with a
particular subplot type. For most subplot types this is equal to the
subplot type string. For example, the `scatter3d.scene` property is used
to associate a `scatter3d` trace with a particular `scene` subplot.
However, for some subplot types the trace property is not named after the
subplot type. For example, the `scatterpolar.subplot` property is used
to associate a `scatterpolar` trace with a particular `polar` subplot.
Parameters
----------
subplot_type: str
Subplot string value (e.g. 'scene4')
Returns
-------
str
"""
if subplot_type in _subplot_prop_named_subplot:
subplot_prop_name = 'subplot'
else:
subplot_prop_name = subplot_type
return subplot_prop_name
def _normalize_subplot_ids(fig):
"""
Make sure a layout subplot property is initialized for every subplot that
is referenced by a trace in the figure.
For example, if a figure contains a `scatterpolar` trace with the `subplot`
property set to `polar3`, this function will make sure the figure's layout
has a `polar3` property, and will initialize it to an empty dict if it
does not
Note: This function mutates the input figure dict
Parameters
----------
fig: dict
A plotly figure dict
"""
layout = fig.setdefault('layout', {})
for trace in fig.get('data', None):
trace_type = trace.get('type', 'scatter')
subplot_types = _trace_to_subplot.get(trace_type, [])
for subplot_type in subplot_types:
subplot_prop_name = _get_subplot_prop_name(subplot_type)
subplot_val_prefix = _get_subplot_val_prefix(subplot_type)
subplot_val = trace.get(subplot_prop_name, subplot_val_prefix)
# extract trailing number (if any)
subplot_number = _get_subplot_number(subplot_val)
if subplot_number > 1:
layout_prop_name = subplot_type + str(subplot_number)
else:
layout_prop_name = subplot_type
if layout_prop_name not in layout:
layout[layout_prop_name] = {}
def _get_max_subplot_ids(fig):
"""
Given an input figure, return a dict containing the max subplot number
for each subplot type in the figure
Parameters
----------
fig: dict
A plotly figure dict
Returns
-------
dict
A dict from subplot type strings to integers indicating the largest
subplot number in the figure of that subplot type
"""
max_subplot_ids = {subplot_type: 0
for subplot_type in _subplot_types}
max_subplot_ids['xaxis'] = 0
max_subplot_ids['yaxis'] = 0
for trace in fig.get('data', []):
trace_type = trace.get('type', 'scatter')
subplot_types = _trace_to_subplot.get(trace_type, [])
for subplot_type in subplot_types:
subplot_prop_name = _get_subplot_prop_name(subplot_type)
subplot_val_prefix = _get_subplot_val_prefix(subplot_type)
subplot_val = trace.get(subplot_prop_name, subplot_val_prefix)
# extract trailing number (if any)
subplot_number = _get_subplot_number(subplot_val)
max_subplot_ids[subplot_type] = max(
max_subplot_ids[subplot_type], subplot_number)
return max_subplot_ids
def _offset_subplot_ids(fig, offsets):
"""
Apply offsets to the subplot id numbers in a figure.
Note: This function mutates the input figure dict
Note: This function assumes that the normalize_subplot_ids function has
already been run on the figure, so that all layout subplot properties in
use are explicitly present in the figure's layout.
Parameters
----------
fig: dict
A plotly figure dict
offsets: dict
A dict from subplot types to the offset to be applied for each subplot
type. This dict matches the form of the dict returned by
get_max_subplot_ids
"""
# Offset traces
for trace in fig.get('data', None):
trace_type = trace.get('type', 'scatter')
subplot_types = _trace_to_subplot.get(trace_type, [])
for subplot_type in subplot_types:
subplot_prop_name = _get_subplot_prop_name(subplot_type)
# Compute subplot value prefix
subplot_val_prefix = _get_subplot_val_prefix(subplot_type)
subplot_val = trace.get(subplot_prop_name, subplot_val_prefix)
subplot_number = _get_subplot_number(subplot_val)
offset_subplot_number = (
subplot_number + offsets.get(subplot_type, 0))
if offset_subplot_number > 1:
trace[subplot_prop_name] = (
subplot_val_prefix + str(offset_subplot_number))
else:
trace[subplot_prop_name] = subplot_val_prefix
# layout subplots
layout = fig.setdefault('layout', {})
new_subplots = {}
for subplot_type in offsets:
offset = offsets[subplot_type]
if offset < 1:
continue
for layout_prop in list(layout.keys()):
if layout_prop.startswith(subplot_type):
subplot_number = _get_subplot_number(layout_prop)
new_subplot_number = subplot_number + offset
new_layout_prop = subplot_type + str(new_subplot_number)
new_subplots[new_layout_prop] = layout.pop(layout_prop)
layout.update(new_subplots)
# xaxis/yaxis anchors
x_offset = offsets.get('xaxis', 0)
y_offset = offsets.get('yaxis', 0)
for layout_prop in list(layout.keys()):
if layout_prop.startswith('xaxis'):
xaxis = layout[layout_prop]
anchor = xaxis.get('anchor', 'y')
anchor_number = _get_subplot_number(anchor) + y_offset
if anchor_number > 1:
xaxis['anchor'] = 'y' + str(anchor_number)
else:
xaxis['anchor'] = 'y'
elif layout_prop.startswith('yaxis'):
yaxis = layout[layout_prop]
anchor = yaxis.get('anchor', 'x')
anchor_number = _get_subplot_number(anchor) + x_offset
if anchor_number > 1:
yaxis['anchor'] = 'x' + str(anchor_number)
else:
yaxis['anchor'] = 'x'
# annotations/shapes/images
for layout_prop in ['annotations', 'shapes', 'images']:
for obj in layout.get(layout_prop, []):
if x_offset:
xref = obj.get('xref', 'x')
if xref != 'paper':
xref_number = _get_subplot_number(xref)
obj['xref'] = 'x' + str(xref_number + x_offset)
if y_offset:
yref = obj.get('yref', 'y')
if yref != 'paper':
yref_number = _get_subplot_number(yref)
obj['yref'] = 'y' + str(yref_number + y_offset)
def _scale_translate(fig, scale_x, scale_y, translate_x, translate_y):
"""
Scale a figure and translate it to sub-region of the original
figure canvas.
Note: If the input figure has a title, this title is converted into an
annotation and scaled along with the rest of the figure.
Note: This function mutates the input fig dict
Note: This function assumes that the normalize_subplot_ids function has
already been run on the figure, so that all layout subplot properties in
use are explicitly present in the figure's layout.
Parameters
----------
fig: dict
A plotly figure dict
scale_x: float
Factor by which to scale the figure in the x-direction. This will
typically be a value < 1. E.g. a value of 0.5 will cause the
resulting figure to be half as wide as the original.
scale_y: float
Factor by which to scale the figure in the y-direction. This will
typically be a value < 1
translate_x: float
Factor by which to translate the scaled figure in the x-direction in
normalized coordinates.
translate_y: float
Factor by which to translate the scaled figure in the x-direction in
normalized coordinates.
"""
data = fig.setdefault('data', [])
layout = fig.setdefault('layout', {})
# Scale/translate traces
for trace in data:
trace_type = trace.get('type', 'scatter')
if trace_type in _domain_trace_types:
perform_scale_translate(trace)
# Scale/translate subplot containers
for prop in layout:
for subplot_type in _subplot_types:
if prop.startswith(subplot_type):
perform_scale_translate(layout[prop])
for prop in layout:
if prop.startswith('xaxis'):
xaxis = layout[prop]
x_domain = xaxis.get('domain', [0, 1])
xaxis['domain'] = scale_translate_x(x_domain)
elif prop.startswith('yaxis'):
yaxis = layout[prop]
y_domain = yaxis.get('domain', [0, 1])
yaxis['domain'] = scale_translate_y(y_domain)
# convert title to annotation
# This way the annotation will be scaled with the reset of the figure
annotations = layout.get('annotations', [])
title = layout.pop('title', None)
if title:
titlefont = layout.pop('titlefont', {})
title_fontsize = titlefont.get('size', 17)
min_fontsize = 12
titlefont['size'] = round(min_fontsize +
(title_fontsize - min_fontsize) * scale_x)
annotations.append({
'text': title,
'showarrow': False,
'xref': 'paper',
'yref': 'paper',
'x': 0.5,
'y': 1.01,
'xanchor': 'center',
'yanchor': 'bottom',
'font': titlefont
})
layout['annotations'] = annotations
# annotations
for obj in layout.get('annotations', []):
if obj.get('xref', None) == 'paper':
obj['x'] = obj.get('x', 0.5) * scale_x + translate_x
obj['y'] = obj.get('y', 0.5) * scale_y + translate_y
def merge_figure(fig, subfig):
"""
Merge a sub-figure into a parent figure
Note: This function mutates the input fig dict, but it does not mutate
the subfig dict
Parameters
----------
fig: dict
The plotly figure dict into which the sub figure will be merged
subfig: dict
The plotly figure dict that will be copied and then merged into `fig`
"""
# traces
data = fig.setdefault('data', [])
data.extend(copy.deepcopy(subfig.get('data', [])))
# layout
layout = fig.setdefault('layout', {})
_merge_layout_objs(layout, subfig.get('layout', {}))
def _merge_layout_objs(obj, subobj):
"""
Merge layout objects recursively
Note: This function mutates the input obj dict, but it does not mutate
the subobj dict
Parameters
----------
obj: dict
dict into which the sub-figure dict will be merged
subobj: dict
dict that sill be copied and merged into `obj`
"""
for prop, val in subobj.items():
if isinstance(val, dict) and prop in obj:
# recursion
_merge_layout_objs(obj[prop], val)
elif (isinstance(val, list) and
obj.get(prop, None) and
isinstance(obj[prop][0], dict)):
# append
obj[prop].extend(val)
else:
# init/overwrite
obj[prop] = copy.deepcopy(val)
def _compute_subplot_domains(widths, spacing):
"""
Compute normalized domain tuples for a list of widths and a subplot
spacing value
Parameters
----------
widths: list of float
List of the desired withs of each subplot. The length of this list
is also the specification of the number of desired subplots
spacing: float
Spacing between subplots in normalized coordinates
Returns
-------
list of tuple of float
"""
# normalize widths
widths_sum = float(sum(widths))
total_spacing = (len(widths) - 1) * spacing
widths = [(w / widths_sum)*(1-total_spacing) for w in widths]
domains = []
for c in range(len(widths)):
domain_start = c * spacing + sum(widths[:c])
domain_stop = min(1, domain_start + widths[c])
domains.append((domain_start, domain_stop))
return domains
def figure_grid(figures_grid,
row_heights=None,
column_widths=None,
row_spacing=0.15,
column_spacing=0.15,
share_xaxis=False,
share_yaxis=False):
"""
Construct a figure from a 2D grid of sub-figures
Parameters
----------
figures_grid: list of list of (dict or None)
2D list of plotly figure dicts that will be combined in a grid to
produce the resulting figure. None values maybe used to leave empty
grid cells
row_heights: list of float (default None)
List of the relative heights of each row in the grid (these values
will be normalized by the function)
column_widths: list of float (default None)
List of the relative widths of each column in the grid (these values
will be normalized by the function)
row_spacing: float (default 0.15)
Vertical spacing between rows in the gird in normalized coordinates
column_spacing: float (default 0.15)
Horizontal spacing between columns in the grid in normalized
coordinates
share_xaxis: bool (default False)
Share x-axis between sub-figures in the same column. This will only
work if each sub-figure has a single x-axis
share_yaxis: bool (default False)
Share y-axis between sub-figures in the same row. This will only work
if each subfigure has a single y-axis
Returns
-------
dict
A plotly figure dict
"""
# compute number of rows/cols
rows = len(figures_grid)
columns = len(figures_grid[0])
# Initialize row heights / column widths
if not row_heights:
row_heights = [1 for _ in range(rows)]
if not column_widths:
column_widths = [1 for _ in range(columns)]
# Compute domain widths/heights for subplots
column_domains = _compute_subplot_domains(column_widths, column_spacing)
row_domains = _compute_subplot_domains(row_heights, row_spacing)
output_figure = {'data': [], 'layout': {}}
for r, (fig_row, row_domain) in enumerate(zip(figures_grid, row_domains)):
for c, (fig, column_domain) in enumerate(zip(fig_row, column_domains)):
if fig:
fig = copy.deepcopy(fig)
_normalize_subplot_ids(fig)
subplot_offsets = _get_max_subplot_ids(output_figure)
if share_xaxis:
subplot_offsets['xaxis'] = c
if r != 0:
# Only use xaxes from bottom row
fig.get('layout', {}).pop('xaxis', None)
if share_yaxis:
subplot_offsets['yaxis'] = r
if c != 0:
# Only use yaxes from first column
fig.get('layout', {}).pop('yaxis', None)
_offset_subplot_ids(fig, subplot_offsets)
scale_x = column_domain[1] - column_domain[0]
scale_y = row_domain[1] - row_domain[0]
_scale_translate(fig,
scale_x, scale_y,
column_domain[0], row_domain[0])
merge_figure(output_figure, fig)
return output_figure
def get_colorscale(cmap, levels=None, cmin=None, cmax=None):
"""Converts a cmap spec to a plotly colorscale
Args:
cmap: A recognized colormap by name or list of colors
levels: A list or integer declaring the color-levels
cmin: The lower bound of the color range
cmax: The upper bound of the color range
Returns:
A valid plotly colorscale
"""
ncolors = levels if isinstance(levels, int) else None
if isinstance(levels, list):
ncolors = len(levels) - 1
if isinstance(cmap, list) and len(cmap) != ncolors:
raise ValueError('The number of colors in the colormap '
'must match the intervals defined in the '
'color_levels, expected %d colors found %d.'
% (ncolors, len(cmap)))
try:
palette = process_cmap(cmap, ncolors)
except Exception as e:
colorscale = colors.PLOTLY_SCALES.get(cmap)
if colorscale is None:
raise e
return colorscale
if isinstance(levels, int):
colorscale = []
scale = np.linspace(0, 1, levels+1)
for i in range(levels+1):
if i == 0:
colorscale.append((scale[0], palette[i]))
elif i == levels:
colorscale.append((scale[-1], palette[-1]))
else:
colorscale.append((scale[i], palette[i-1]))
colorscale.append((scale[i], palette[i]))
return colorscale
elif isinstance(levels, list):
palette, (cmin, cmax) = color_intervals(
palette, levels, clip=(cmin, cmax))
return colors.make_colorscale(palette)
| [
11748,
4866,
198,
11748,
302,
198,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
7110,
306,
1330,
7577,
198,
198,
6738,
11485,
22602,
1330,
3124,
62,
3849,
12786,
11,
1429,
62,
66,
8899,
198,
198,
2,
4757,
1187,
198,
2,
45337,
198,
... | 2.310794 | 9,598 |
from django.db import migrations
from enumfields.fields import EnumIntegerField
from ..enums import LogEntryKind
| [
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
198,
6738,
33829,
25747,
13,
25747,
1330,
2039,
388,
46541,
15878,
198,
198,
6738,
11485,
268,
5700,
1330,
5972,
30150,
35854,
628
] | 3.833333 | 30 |
#!/usr/bin/env python3
# Author: Milos Buncic
# Date: 2020/06/10
# Description: Generate main configuration file (hooks.json) for webhook service
import os
import sys
import json
# Webhook working directory
WH_DIR = '/etc/webhook'
# Webhook configuration file
WH_CONFIG = '{}/hooks.json'.format(WH_DIR)
def writefile(filename, text):
""" Write JSON object to file """
try:
with open(filename, 'w') as f:
f.write(json.dumps(text, indent=2, ensure_ascii=False, sort_keys=True))
f.write('\n')
except IOError:
print('Error while writing to file')
def readfile(filename):
""" Read JSON from file and return dict """
try:
with open(filename, 'r') as f:
return json.load(f)
except IOError:
print('Error while reading from file')
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
6434,
25,
4460,
418,
347,
19524,
291,
198,
2,
7536,
25,
12131,
14,
3312,
14,
940,
198,
2,
12489,
25,
2980,
378,
1388,
8398,
2393,
357,
25480,
82,
13,
17752,
8,
329,
3992,
... | 2.772109 | 294 |
# define globals
subdomainsList = [] | [
2,
8160,
15095,
874,
198,
7266,
3438,
1299,
8053,
796,
17635
] | 3.272727 | 11 |
import roslib; roslib.load_manifest('rcommander_web')
import rcommander_web.rcommander_auto_server as rcs
import sys
import rospy
rospy.init_node('rcommander_web_test')
path = sys.argv[1]
robot = MyRobotClass()
rcs.run(robot, path)
| [
11748,
686,
6649,
571,
26,
686,
6649,
571,
13,
2220,
62,
805,
8409,
10786,
81,
9503,
4066,
62,
12384,
11537,
198,
11748,
374,
9503,
4066,
62,
12384,
13,
81,
9503,
4066,
62,
23736,
62,
15388,
355,
374,
6359,
198,
11748,
25064,
198,
1... | 2.452632 | 95 |
# -*- coding:utf-8 -*-
'''
有一个XxY的网格,一个机器人只能走格点且只能向右或向下走,要从左上角走到右下角。请设计一个算法,计算机器人有多少种走法。注意这次的网格中有些障碍点是不能走的。
给定一个int[][] map(C++ 中为vector >),表示网格图,若map[i][j]为1则说明该点不是障碍点,否则则为障碍。另外给定int x,int y,表示网格的大小。请返回机器人从(0,0)走到(x - 1,y - 1)的走法数,为了防止溢出,请将结果Mod 1000000007。保证x和y均小于等于50
'''
# test case
m = [[1,1,1,1],[1,1,1,1],[1,1,1,1],[1,1,1,1],[1,1,1,1],[1,1,1,1],[1,1,1,1],[1,0,1,1],[0,1,1,1],[1,1,1,1],[1,1,1,1]]
x = 11
y = 4
res = 196
print(Robot().countWays(m, x, y) == res) | [
2,
532,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
201,
198,
7061,
6,
201,
198,
17312,
231,
31660,
10310,
103,
55,
87,
56,
21410,
163,
121,
239,
43718,
120,
171,
120,
234,
31660,
10310,
103,
17312,
118,
161,
247,
101,
21689,
2... | 0.882353 | 544 |
from ms_deisotope.peak_set import DeconvolutedPeakSet
from ms_deisotope.spectrum_graph import (PathFinder, amino_acids, MassWrapper)
from brainpy import SimpleComposition
H2O = SimpleComposition({"H": 2, "O": 1})
NH3 = SimpleComposition({"H": 3, "N": 1})
default_losses = [MassWrapper("-H2O", -H2O.mass()), MassWrapper("-NH3", -NH3.mass())]
| [
6738,
13845,
62,
2934,
271,
313,
3008,
13,
36729,
62,
2617,
1330,
4280,
261,
10396,
7241,
6435,
461,
7248,
198,
6738,
13845,
62,
2934,
271,
313,
3008,
13,
4443,
6582,
62,
34960,
1330,
357,
15235,
37,
5540,
11,
23206,
62,
330,
2340,
... | 2.562963 | 135 |
# -*- coding: utf-8 -*-
from celery import shared_task
from django.http import HttpResponse
import fitnesse.import_data
@shared_task
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
18725,
1924,
1330,
4888,
62,
35943,
198,
6738,
42625,
14208,
13,
4023,
1330,
367,
29281,
31077,
198,
11748,
4197,
2516,
325,
13,
11748,
62,
7890,
628,
198,
31... | 2.854167 | 48 |
import os
import time
import magic
import settings as cfg
| [
11748,
28686,
198,
11748,
640,
198,
11748,
5536,
198,
198,
11748,
6460,
355,
30218,
70,
198
] | 3.6875 | 16 |
"""
BSAM CLI version
"""
__version__ = '0.7.12'
| [
37811,
198,
4462,
2390,
43749,
2196,
198,
37811,
198,
198,
834,
9641,
834,
796,
705,
15,
13,
22,
13,
1065,
6,
198
] | 2.227273 | 22 |
# Slicable Set.
#
# @author Oktay Acikalin <oktay.acikalin@gmail.com>
# @copyright Oktay Acikalin
# @license MIT (LICENSE.txt)
# from diamond.decorators import time
| [
2,
311,
677,
540,
5345,
13,
198,
2,
198,
2,
2488,
9800,
220,
220,
220,
6762,
83,
323,
317,
979,
74,
14414,
1279,
482,
83,
323,
13,
330,
1134,
14414,
31,
14816,
13,
785,
29,
198,
2,
2488,
22163,
4766,
6762,
83,
323,
317,
979,
7... | 2.356164 | 73 |
DEBUG = True
FLASK_ENV = 'development'
| [
30531,
796,
6407,
198,
3697,
1921,
42,
62,
1677,
53,
796,
705,
31267,
6,
198
] | 2.6 | 15 |
import time
| [
11748,
640,
628,
628,
198
] | 3.2 | 5 |
from .bot import CalcBot | [
6738,
764,
13645,
1330,
2199,
66,
20630
] | 3.428571 | 7 |
import pytest
from gaphor.ui.consolewindow import ConsoleWindow
import gaphor.services.componentregistry
@pytest.fixture
@pytest.fixture
| [
11748,
12972,
9288,
198,
198,
6738,
308,
6570,
273,
13,
9019,
13,
41947,
17497,
1330,
24371,
27703,
198,
11748,
308,
6570,
273,
13,
30416,
13,
42895,
2301,
4592,
628,
198,
198,
31,
9078,
9288,
13,
69,
9602,
628,
198,
31,
9078,
9288,
... | 3.130435 | 46 |
if __name__ == '__main__':
letra = letras()
try:
for i in range(ord('M')):
print(next(letra))
if i == 'M':
break
except Exception as e:
print(e)
| [
201,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
201,
198,
201,
198,
220,
220,
220,
220,
220,
220,
220,
1309,
430,
796,
1309,
8847,
3419,
201,
198,
220,
220,
220,
220,
220,
220,
220,
1949,
25,
201,
198,
220,
220... | 1.550296 | 169 |
import sys
import os
import json
from datetime import datetime
import argparse
import pandas as pd
import numpy as np
from skimage import io, filters, measure, color, exposure, morphology, feature, img_as_float, img_as_uint
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
from scipy.optimize import curve_fit
import re
# parse input
parser = argparse.ArgumentParser()
# path to metadata file with specific columns:
# 1. file_path = path to each separate image file (even separated by channels)
# 2. sample_name = name of sample that you want to be output
# 3. replicate = the replicate number for each set of images. The combination of output_name + replicate should be unique for that sample
# 4. channel_id = 1 or 2 (currently only supports 2 channels)
# 5. min_intensity = minimum intensity threshold for that channel. Anything below = 0.
# 6. max_intensity = maximum intensity threshold for that channel. Anything above = max_intensity
parser.add_argument("metadata_file")
parser.add_argument("--o", type=str) # output directory name
# parser.add_argument('--no-legend', dest='legend_flag', action='store_false')
# parser.set_defaults(threshold_flag=True, fit_flag=True, legend_flag=True)
input_args = parser.parse_args()
metadata_path = input_args.metadata_file
if not os.path.isfile(metadata_path):
sys.exit('ERROR: Could not find metadata file')
output_dirs = read_metadata(input_args)
generate_images(output_dirs, input_args) | [
11748,
25064,
198,
11748,
28686,
198,
11748,
33918,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
11748,
1822,
29572,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
1341,
9060,
1330,
33245,
11,
16628,... | 3.408257 | 436 |
# Generated by Django 3.1.5 on 2021-02-21 13:29
from django.db import migrations
| [
2,
2980,
515,
416,
37770,
513,
13,
16,
13,
20,
319,
33448,
12,
2999,
12,
2481,
1511,
25,
1959,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
628
] | 2.766667 | 30 |
#!/usr/bin/env python3
import setuptools
with open("README.rst", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="jclib",
version="0.0.1",
author="Joel Cross",
author_email="joel@kazbak.co.uk",
description="Reusable code",
long_description=long_description,
url="https://github.com/ukch/jclib",
packages=["jclib"],
license="MIT",
classifiers=(
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.6",
),
)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
900,
37623,
10141,
198,
198,
4480,
1280,
7203,
15675,
11682,
13,
81,
301,
1600,
366,
81,
4943,
355,
277,
71,
25,
198,
220,
220,
220,
890,
62,
11213,
796,
277,
71,
1... | 2.390438 | 251 |
#!/usr/bin/python
# Copyright 2020 Fluid Numerics LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import httplib
import os
import shlex
import socket
import subprocess
import time
import urllib
import urllib2
#END install_hip_fortran
except:
print('Startup script failed!')
copy_logs_to_bucket()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
15069,
12131,
1610,
27112,
399,
6975,
873,
11419,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
... | 3.355102 | 245 |
import numpy as np
import os
from ctapipe.core.traits import Unicode, List, Int, Bool
from ctapipe.calib.camera import CameraCalibrator
from ctapipe.calib.camera.calibrator import integration_correction
from ctapipe.image.reducer import DataVolumeReducer
from ctapipe.image.extractor import ImageExtractor
from ctapipe.io.hdf5tableio import HDF5TableReader
from ctapipe.io.containers import MonitoringContainer
from ctapipe.calib.camera import gainselection
from lstchain.calib.camera.pulse_time_correction import PulseTimeCorrection
__all__ = ['LSTCameraCalibrator','get_charge_correction']
class LSTCameraCalibrator(CameraCalibrator):
"""
Calibrator to handle the LST camera calibration chain, in order to fill
the DL1 data level in the event container.
"""
extractor_product = Unicode(
'LocalPeakWindowSum',
help='Name of the charge extractor to be used'
).tag(config=True)
reducer_product = Unicode(
'NullDataVolumeReducer',
help='Name of the DataVolumeReducer to use'
).tag(config=True)
calibration_path = Unicode(
'',
help='Path to LST calibration file'
).tag(config=True)
time_calibration_path = Unicode(
'',
help='Path to drs4 time calibration file'
).tag(config=True)
allowed_tels = List(
[1],
help='List of telescope to be calibrated'
).tag(config=True)
gain_threshold = Int(
4094,
allow_none=True,
help='Threshold for the gain selection in ADC'
).tag(config=True)
charge_scale = List(
[1,1],
help='Multiplicative correction factor for charge estimation [HG,LG]'
).tag(config=True)
apply_charge_correction = Bool(
False,
help='Apply charge pulse shape charge correction'
).tag(config=True)
def __init__(self, **kwargs):
"""
Parameters
----------
reducer_product : ctapipe.image.reducer.DataVolumeReducer
The DataVolumeReducer to use. If None, then
NullDataVolumeReducer will be used by default, and waveforms
will not be reduced.
extractor_product : ctapipe.image.extractor.ImageExtractor
The ImageExtractor to use. If None, then LocalPeakWindowSum
will be used by default.
calibration_path :
Path to LST calibration file to get the pedestal and flat-field corrections
kwargs
"""
super().__init__(**kwargs)
# load the waveform charge extractor
self.image_extractor = ImageExtractor.from_name(
self.extractor_product,
config=self.config
)
self.log.info(f"extractor {self.extractor_product}")
print("EXTRACTOR", self.image_extractor)
self.data_volume_reducer = DataVolumeReducer.from_name(
self.reducer_product,
config=self.config
)
self.log.info(f" {self.reducer_product}")
# declare gain selector if the threshold is defined
if self.gain_threshold:
self.gain_selector = gainselection.ThresholdGainSelector(
threshold=self.gain_threshold
)
# declare time calibrator if correction file exist
if os.path.exists(self.time_calibration_path):
self.time_corrector = PulseTimeCorrection(
calib_file_path=self.time_calibration_path
)
else:
raise IOError(f"Time calibration file {self.time_calibration_path} not found!")
# calibration data container
self.mon_data = MonitoringContainer()
# initialize the MonitoringContainer() for the moment it reads it from a hdf5 file
self._initialize_correction()
# initialize the pulse shape corrections
if self.apply_charge_correction:
# get the pulse shape corrections
pulse_correction = get_charge_correction(
self.image_extractor.window_width,
self.image_extractor.window_shift,
)
else:
# no pulse shape correction by default
pulse_correction = np.ones(2)
self.log.info(f"Pulse shape charge correction {pulse_correction}")
# global charge corrections : pulse shape * scale
self.charge_correction = pulse_correction * self.charge_scale
self.log.info(f"Total charge correction {self.charge_correction}")
def _initialize_correction(self):
"""
Read the correction from hdf5 calibration file
"""
self.mon_data.tels_with_data = self.allowed_tels
self.log.info(f"read {self.calibration_path}")
try:
with HDF5TableReader(self.calibration_path) as h5_table:
for telid in self.allowed_tels:
# read the calibration data
table = '/tel_' + str(telid) + '/calibration'
next(h5_table.read(table, self.mon_data.tel[telid].calibration))
# read pedestal data
table = '/tel_' + str(telid) + '/pedestal'
next(h5_table.read(table, self.mon_data.tel[telid].pedestal))
# read flat-field data
table = '/tel_' + str(telid) + '/flatfield'
next(h5_table.read(table, self.mon_data.tel[telid].flatfield))
# read the pixel_status container
table = '/tel_' + str(telid) + '/pixel_status'
next(h5_table.read(table, self.mon_data.tel[telid].pixel_status))
except Exception:
self.log.exception(
f"Problem in reading calibration file {self.calibration_path}"
)
raise
def _calibrate_dl0(self, event, telid):
"""
create dl0 level, for the moment copy the r1
"""
waveforms = event.r1.tel[telid].waveform
if self._check_r1_empty(waveforms):
return
event.dl0.event_id = event.r1.event_id
# if not already done, initialize the event monitoring containers
if event.mon.tel[telid].calibration.dc_to_pe is None:
event.mon.tel[telid].calibration = self.mon_data.tel[telid].calibration
event.mon.tel[telid].flatfield = self.mon_data.tel[telid].flatfield
event.mon.tel[telid].pedestal = self.mon_data.tel[telid].pedestal
event.mon.tel[telid].pixel_status = self.mon_data.tel[telid].pixel_status
#
# subtract the pedestal per sample and multiply for the calibration coefficients
#
event.dl0.tel[telid].waveform = (
(waveforms - self.mon_data.tel[telid].calibration.pedestal_per_sample[:, :, np.newaxis])
* self.mon_data.tel[telid].calibration.dc_to_pe[:, :, np.newaxis])
def _calibrate_dl1(self, event, telid):
"""
create calibrated dl1 image and calibrate it
"""
waveforms = event.dl0.tel[telid].waveform
if self._check_dl0_empty(waveforms):
return
if self.image_extractor.requires_neighbors():
camera = event.inst.subarray.tel[telid].camera
self.image_extractor.neighbors = camera.neighbor_matrix_where
charge, pulse_time = self.image_extractor(waveforms)
# correct charge for width integration
corrected_charge = charge * self.charge_correction[:,np.newaxis]
# correct time with drs4 correction if available
if self.time_corrector:
pulse_time = self.time_corrector.get_corr_pulse(event, pulse_time)
# add flat-fielding time correction
pulse_time_ff_corrected = pulse_time + self.mon_data.tel[telid].calibration.time_correction
# perform the gain selection if the threshold is defined
if self.gain_threshold:
waveforms, gain_mask = self.gain_selector(event.r1.tel[telid].waveform)
event.dl1.tel[telid].image = corrected_charge[gain_mask, np.arange(charge.shape[1])]
event.dl1.tel[telid].pulse_time = pulse_time_ff_corrected[gain_mask, np.arange(pulse_time_ff_corrected.shape[1])]
# remember which channel has been selected
event.r1.tel[telid].selected_gain_channel = gain_mask
# if threshold == None
else:
event.dl1.tel[telid].image = corrected_charge
event.dl1.tel[telid].pulse_time = pulse_time_ff_corrected
def get_charge_correction(window_width, window_shift):
"""
Obtain charge correction from the reference pulse shape,
this function is will be not necessary in ctapipe 0.8
Parameters
----------
window_width: width of the integration window
window_shift: shift of the integration window
Returns
-------
pulse_correction: pulse correction for HG and LG, np.array(2)
"""
# read the pulse shape file (to be changed for ctapipe version 0.8)
try:
# read pulse shape from oversampled file
pulse_ref_file = (os.path.join(os.path.dirname(__file__),
"../../data/oversampled_pulse_LST_8dynode_pix6_20200204.dat")
)
hg_pulse_shape = []
lg_pulse_shape = []
with open(pulse_ref_file, 'r') as file:
pulse_time_slice, pulse_time_step = file.readline().split()
for line in file:
if "#" not in line:
columns = line.split()
hg_pulse_shape.append(float(columns[0]))
lg_pulse_shape.append(float(columns[1]))
pulse_shape = np.array((hg_pulse_shape, lg_pulse_shape))
pulse_correction = integration_correction(pulse_shape.shape[0],
pulse_shape,
float(pulse_time_step),
float(pulse_time_slice),
window_width,
window_shift
)
except:
print(f"Problem in reading calibration file {self.calibration_path}")
raise
return np.array(pulse_correction) | [
11748,
299,
32152,
355,
45941,
198,
11748,
28686,
198,
6738,
269,
44335,
3757,
13,
7295,
13,
9535,
896,
1330,
34371,
11,
7343,
11,
2558,
11,
347,
970,
198,
6738,
269,
44335,
3757,
13,
9948,
571,
13,
25695,
1330,
20432,
9771,
2889,
135... | 2.196379 | 4,695 |
# Main
userName = "Admin"
passWord = "Admin"
print("What is the username?")
passWord = input()
print("what is the password?")
passWord = input()
test = checkPassword(userName, passWord)
if test == True:
print("Login successful")
else:
print("Login not successful")
| [
201,
198,
2,
8774,
201,
198,
7220,
5376,
796,
366,
46787,
1,
201,
198,
6603,
26449,
796,
366,
46787,
1,
201,
198,
4798,
7203,
2061,
318,
262,
20579,
1701,
8,
201,
198,
6603,
26449,
796,
5128,
3419,
201,
198,
4798,
7203,
10919,
318,
... | 2.841584 | 101 |
from rest_framework import serializers
from .models import NewsType, HelloNews
from pictures.serializers import GroupSerializerDetail, MemberSerializerDetail
| [
6738,
1334,
62,
30604,
1330,
11389,
11341,
198,
198,
6738,
764,
27530,
1330,
3000,
6030,
11,
18435,
9980,
198,
6738,
5986,
13,
46911,
11341,
1330,
4912,
32634,
7509,
11242,
603,
11,
10239,
32634,
7509,
11242,
603,
628,
628,
628
] | 4.205128 | 39 |
"""
Shared utility functions.
"""
import json
def read_local_config(config_fname):
''' Read configs from local, non-git commited json file.
'''
with open(config_fname) as fp:
local_config = json.load(fp)
return local_config
| [
37811,
198,
2484,
1144,
10361,
5499,
13,
198,
37811,
198,
11748,
33918,
628,
198,
4299,
1100,
62,
12001,
62,
11250,
7,
11250,
62,
69,
3672,
2599,
198,
220,
220,
220,
705,
7061,
4149,
4566,
82,
422,
1957,
11,
1729,
12,
18300,
725,
86... | 2.631579 | 95 |
from TelegramBotAPI.types.type import Type
from TelegramBotAPI.types.field import Field
from TelegramBotAPI.types.primitive import Integer, String, Boolean, Float, InputFile
from TelegramBotAPI.types.compound import ReplyKeyboardMarkup, ReplyKeyboardHide, ForceReply
from TelegramBotAPI.types.compound import Update, Message, User, UserProfilePhotos, File
| [
6738,
50203,
20630,
17614,
13,
19199,
13,
4906,
1330,
5994,
198,
6738,
50203,
20630,
17614,
13,
19199,
13,
3245,
1330,
7663,
198,
6738,
50203,
20630,
17614,
13,
19199,
13,
19795,
1800,
1330,
34142,
11,
10903,
11,
41146,
11,
48436,
11,
2... | 3.845361 | 97 |
from MCNPtools import calculate_materials
#
# Stainless Steel 316
#
ss316=calculate_materials.mixture('SS316')
ss316.mass_density=7.95
ss316.add_mixture( 'P' , 210.00, mode='mass')
ss316.add_mixture( 'Mo' , 1990.00, mode='mass')
ss316.add_mixture( 'Zn' , 12.00, mode='mass')
ss316.add_mixture( 'Pb' , 40.00, mode='mass')
ss316.add_mixture( 'Co' , 1330.00, mode='mass')
ss316.add_mixture( 'Ni' , 87600.00, mode='mass')
ss316.add_mixture( 'Si' , 4010.00, mode='mass')
ss316.add_mixture( 'Mn' , 13600.00, mode='mass')
ss316.add_mixture( 'Fe' , 715109.00, mode='mass')
ss316.add_mixture( 'Cr' , 172000.00, mode='mass')
ss316.add_mixture( 'Mg' , 3.10, mode='mass')
ss316.add_mixture( 'V' , 800.00, mode='mass')
ss316.add_mixture( 'Cu' , 1240.00, mode='mass')
ss316.add_mixture( 'Ag' , 6.00, mode='mass')
ss316.add_mixture( 'Ti' , 128.00, mode='mass')
ss316.add_mixture( 'Ca' , 4.40, mode='mass')
ss316.add_mixture( 'Al' , 35.00, mode='mass')
ss316.add_mixture( 'Sr' , 0.30, mode='mass')
ss316.add_mixture( 'K' , 2.10, mode='mass')
ss316.add_mixture( 'Cs' , 13.00, mode='mass')
ss316.add_mixture( 'Rb' , 5.00, mode='mass')
ss316.add_mixture( 'W' , 1350.00, mode='mass')
ss316.add_mixture( 'Ga' , 123.00, mode='mass')
ss316.add_mixture( 'C' , 390.00, mode='mass')
ss316.finalize()
#
# Neodymium permanent magnets
#
neomag = calculate_materials.mixture('NeoMag')
neomag.mass_density=7.4
neomag.add_mixture( 'Nd' , 2.00, mode='atom')
neomag.add_mixture( 'Fe' , 14.00, mode='atom')
neomag.add_mixture( 'B' , 1.00, mode='atom')
neomag.finalize()
#
# zircaloy-2
#
zircII = calculate_materials.mixture('Zircaloy-II')
zircII.mass_density=11.32
zircII.add_mixture( 'Zr',980700.0,mode='mass')
zircII.add_mixture( 'Sn', 14600.0,mode='mass')
zircII.add_mixture( 'Fe', 1500.0,mode='mass')
zircII.add_mixture( 'Cr', 2500.0,mode='mass')
zircII.add_mixture( 'Ni', 500.0,mode='mass')
zircII.add_mixture( 'Hf', 200.0,mode='mass')
zircII.finalize()
#
# T91 steel
# wt%, from http://pure.qub.ac.uk/portal/files/17599818/341_manuscript.pdf
t91 = calculate_materials.mixture('T91')
t91.mass_density=7.76
t91.add_mixture( 'Fe', 89.4, mode='mass')
t91.add_mixture( 'C' , 0.10, mode='mass')
t91.add_mixture( 'Si', 0.26, mode='mass')
t91.add_mixture( 'V' , 0.2 , mode='mass')
t91.add_mixture( 'Cr', 8.45, mode='mass')
t91.add_mixture( 'Mn', 0.46, mode='mass')
t91.add_mixture( 'Ni', 0.17, mode='mass')
t91.add_mixture( 'Mo', 0.92, mode='mass')
t91.add_mixture( 'Nb', 0.04, mode='mass')
t91.finalize()
#
# SIMP steel
# wt%, from http://pure.qub.ac.uk/portal/files/17599818/341_manuscript.pdf # couldn't find reference for density...
simp = calculate_materials.mixture('SIMP')
simp.mass_density=7.8
simp.add_mixture( 'Fe', 84.53, mode='mass')
simp.add_mixture( 'C' , 0.25 , mode='mass')
simp.add_mixture( 'Si', 1.5 , mode='mass')
simp.add_mixture( 'V', 0.2 , mode='mass')
simp.add_mixture( 'Cr', 10.8 , mode='mass')
simp.add_mixture( 'Mn', 0.5 , mode='mass')
simp.add_mixture( 'W', 1.2 , mode='mass')
simp.add_mixture( 'Mo', 0.9 , mode='mass')
simp.add_mixture( 'Nb', 0.01 , mode='mass')
simp.add_mixture( 'Ta', 0.11 , mode='mass')
simp.finalize()
#
# CLAM steel
# http://www.sciencedirect.com/science/article/pii/S0261306914004774 # couldn't find reference for density...
clam = calculate_materials.mixture('CLAM')
clam.mass_density=7.8
clam.add_mixture( 'Fe', 88.709, mode='mass')
clam.add_mixture( 'C', 0.091, mode='mass')
clam.add_mixture( 'Cr', 8.93 , mode='mass')
clam.add_mixture( 'Mn', 0.49 , mode='mass')
clam.add_mixture( 'W', 1.51 , mode='mass')
clam.add_mixture( 'V', 0.15 , mode='mass')
clam.add_mixture( 'Ta', 0.15 , mode='mass')
clam.finalize()
#
# Al6061
# http://asm.matweb.com/search/SpecificMaterial.asp?bassnum=MA6061t6
al_6061 = calculate_materials.mixture('Al6061')
al_6061.mass_density=2.7
al_6061.add_mixture( 'Al', 97.2 , mode='mass')
al_6061.add_mixture( 'Cr', 0.195, mode='mass')
al_6061.add_mixture( 'Cu', 0.275, mode='mass')
al_6061.add_mixture( 'Fe', 0.35 , mode='mass')
al_6061.add_mixture( 'Mg', 1.0 , mode='mass')
al_6061.add_mixture( 'Mn', 0.075, mode='mass')
al_6061.add_mixture( 'Si', 0.6 , mode='mass')
al_6061.add_mixture( 'Ti', 0.075, mode='mass')
al_6061.add_mixture( 'Zn', 0.125, mode='mass')
al_6061.finalize()
#
# Ti6Al4V
#
ti6al4v = calculate_materials.mixture('Ti6Al4V')
ti6al4v.mass_density=4.43
ti6al4v.add_mixture( 'Ti', 6.0 , mode='atom')
ti6al4v.add_mixture( 'Al', 4.0 , mode='atom')
ti6al4v.add_mixture( 'V' , 1.0 , mode='atom')
ti6al4v.finalize()
#
# Ti3AlC2
#
ti3alc2 = calculate_materials.mixture('Ti3AlC2')
ti3alc2.mass_density=4.24
ti3alc2.add_mixture( 'Ti', 3.0 , mode='atom')
ti3alc2.add_mixture( 'Al', 1.0 , mode='atom')
ti3alc2.add_mixture( 'C' , 2.0 , mode='atom')
ti3alc2.finalize()
#
# Ti3SiC2
#
ti3sic2 = calculate_materials.mixture('Ti3SiC2')
ti3sic2.mass_density=4.53
ti3sic2.add_mixture( 'Ti', 3.0 , mode='atom')
ti3sic2.add_mixture( 'Si', 1.0 , mode='atom')
ti3sic2.add_mixture( 'C' , 2.0 , mode='atom')
ti3sic2.finalize()
#
# Ti2AlC
#
ti3alc = calculate_materials.mixture('Ti2AlC')
ti3alc.mass_density=4.25
ti3alc.add_mixture( 'Ti', 3.0 , mode='atom')
ti3alc.add_mixture( 'Al', 1.0 , mode='atom')
ti3alc.add_mixture( 'C' , 1.0 , mode='atom')
ti3alc.finalize()
#
# Y2O3
#
y2o3 = calculate_materials.mixture('Y2O3')
y2o3.mass_density=5.010
y2o3.add_mixture('Y',2.0,mode='atom')
y2o3.add_mixture('O',3.0,mode='atom')
y2o3.finalize()
#
# 9Cr-ODS
#
cr9_ods = calculate_materials.mixture('9Cr-ODS')
cr9_ods.mass_density=7.8
cr9_ods.add_mixture( 'Fe' , 87.85 , mode='mass')
cr9_ods.add_mixture( 'Cr' , 9.0 , mode='mass')
cr9_ods.add_mixture( 'W' , 2.5 , mode='mass')
cr9_ods.add_mixture( 'Ti' , 0.4 , mode='mass')
cr9_ods.add_mixture( 'Y2O3' , 0.25 , mode='mass')
cr9_ods.finalize()
#
# 14Cr-ODS
#
cr14_ods = calculate_materials.mixture('14Cr-ODS')
cr14_ods.mass_density=7.8
cr14_ods.add_mixture( 'Fe' , 82.85 , mode='mass')
cr14_ods.add_mixture( 'Cr' , 14.0 , mode='mass')
cr14_ods.add_mixture( 'W' , 2.5 , mode='mass')
cr14_ods.add_mixture( 'Ti' , 0.4 , mode='mass')
cr14_ods.add_mixture( 'Y2O3' , 0.25 , mode='mass')
cr14_ods.finalize()
#
# Fe-Cr alloy
#
fecr = calculate_materials.mixture('FeCr')
fecr.mass_density=7.8
fecr.add_mixture( 'Fe', 88.0 , mode='mass')
fecr.add_mixture( 'Cr', 12.0 , mode='mass')
fecr.finalize()
#
# SiC
#
sic = calculate_materials.mixture('SiC')
sic.mass_density=3.21
sic.add_mixture( 'Si', 1.0 , mode='atom')
sic.add_mixture( 'C', 1.0 , mode='atom')
sic.finalize()
#
# B4C
#
b4c = calculate_materials.mixture('B4C')
b4c.mass_density=2.52
b4c.add_mixture( 'B', 4.0 , mode='atom')
b4c.add_mixture( 'C', 1.0 , mode='atom')
b4c.finalize()
#
# high density polyethylene
#
hdpe = calculate_materials.mixture('HDPE')
hdpe.mass_density=0.95
hdpe.add_mixture( 'H', 4.0 , mode='atom')
hdpe.add_mixture( 'C', 2.0 , mode='atom')
hdpe.finalize()
#
# Borated concrete
#
concrete_borated = calculate_materials.mixture('Borated Concrete')
concrete_borated.mass_density=3.10
concrete_borated.add_mixture( 'H ', 0.005600, mode='mass')
concrete_borated.add_mixture( 'B ', 0.010400, mode='mass')
concrete_borated.add_mixture( 'O ', 0.338000, mode='mass')
concrete_borated.add_mixture( 'F ', 0.002300, mode='mass')
concrete_borated.add_mixture( 'Na', 0.012100, mode='mass')
concrete_borated.add_mixture( 'Mg', 0.002300, mode='mass')
concrete_borated.add_mixture( 'Al', 0.006400, mode='mass')
concrete_borated.add_mixture( 'Si', 0.033100, mode='mass')
concrete_borated.add_mixture( 'S ', 0.091500, mode='mass')
concrete_borated.add_mixture( 'K ', 0.001000, mode='mass')
concrete_borated.add_mixture( 'Ca', 0.062600, mode='mass')
concrete_borated.add_mixture( 'Mn', 0.000200, mode='mass')
concrete_borated.add_mixture( 'Fe', 0.021900, mode='mass')
concrete_borated.add_mixture( 'Zn', 0.006600, mode='mass')
concrete_borated.add_mixture( 'Ba', 0.401300, mode='mass')
concrete_borated.finalize()
#
# borosilicate glass
#
glass_borosilicate = calculate_materials.mixture('Borosilicate Glass')
glass_borosilicate.mass_density=2.23
glass_borosilicate.add_mixture( 'B ', 0.040066, mode='mass')
glass_borosilicate.add_mixture( 'O ', 0.539559, mode='mass')
glass_borosilicate.add_mixture( 'Na', 0.028191, mode='mass')
glass_borosilicate.add_mixture( 'Al', 0.011644, mode='mass')
glass_borosilicate.add_mixture( 'Si', 0.377220, mode='mass')
glass_borosilicate.add_mixture( 'K ', 0.003321, mode='mass')
glass_borosilicate.finalize()
#
# borosilicate glass PSI from NAA irradiation
#
glass_borosilicate_PSI = calculate_materials.mixture('Borosilicate Glass PSI NAA')
glass_borosilicate_PSI.mass_density=2.23
glass_borosilicate_PSI.add_mixture( 'B ', 0.040066, mode='mass')
glass_borosilicate_PSI.add_mixture( 'O ', 0.539559, mode='mass')
glass_borosilicate_PSI.add_mixture( 'Na', 0.028191, mode='mass')
glass_borosilicate_PSI.add_mixture( 'Al', 0.011644, mode='mass')
glass_borosilicate_PSI.add_mixture( 'Si', 0.377220, mode='mass')
glass_borosilicate_PSI.add_mixture( 'K ', 0.003321, mode='mass')
glass_borosilicate_PSI.add_mixture( 'Sc', 0.58*1e-6, mode='mass')
glass_borosilicate_PSI.add_mixture( 'La', 3.87*1e-6, mode='mass')
glass_borosilicate_PSI.finalize()
#
# plate glass
#
glass_plate = calculate_materials.mixture('Plate Glass')
glass_plate.mass_density=2.40
glass_plate.add_mixture( 'O ', 0.459800, mode='mass')
glass_plate.add_mixture( 'Na', 0.096441, mode='mass')
glass_plate.add_mixture( 'Si', 0.336553, mode='mass')
glass_plate.add_mixture( 'Ca', 0.107205, mode='mass')
glass_plate.finalize()
#
# plate glass PSI from NAA irradiation
#
glass_plate_PSI = calculate_materials.mixture('Plate Glass PSI NAA')
glass_plate_PSI.mass_density=2.40
glass_plate_PSI.add_mixture( 'O ', 0.459800, mode='mass')
glass_plate_PSI.add_mixture( 'Na', 0.096441, mode='mass')
glass_plate_PSI.add_mixture( 'Si', 0.336553, mode='mass')
glass_plate_PSI.add_mixture( 'Ca', 0.107205, mode='mass')
glass_plate_PSI.add_mixture( 'Sc', 0.58*1e-6, mode='mass')
glass_plate_PSI.add_mixture( 'La', 3.87*1e-6, mode='mass')
glass_plate_PSI.finalize()
#
# m=2 guide layers
#
guide_layers = calculate_materials.mixture('Guide Layers')
guide_layers.mass_density=2.40
guide_layers.add_mixture( 'Ni', 7.07348E-01, mode='volume')
guide_layers.add_mixture( 'Ti', 2.92652E-01, mode='volume')
guide_layers.finalize()
#
# m=2 guide layers, PSI from NAA irradiation
#
guide_layers_PSI = calculate_materials.mixture('Guide Layers PSI NAA')
guide_layers_PSI.mass_density=2.40
guide_layers_PSI.add_mixture( 'Ni', 0.82573012, mode='mass')
guide_layers_PSI.add_mixture( 'Ti', 0.17426988, mode='mass')
guide_layers_PSI.add_mixture( 'Co', 5.96*1e-6, mode='mass')
guide_layers_PSI.add_mixture( 'Cr', 1800*1e-6, mode='mass')
guide_layers_PSI.finalize()
#
# Schrottbeton/heavy concrete/heavy mortar
# Assay number 10 : Representative Schrottbeton
#
schrottbeton = calculate_materials.mixture('Schrottbeton')
schrottbeton.mass_density=5.7
schrottbeton.add_mixture( 'H ', 3000.00, mode='mass')
schrottbeton.add_mixture( 'Li', 0.10, mode='mass')
schrottbeton.add_mixture( 'Be', 0.09, mode='mass')
schrottbeton.add_mixture( 'B ', 63.0 , mode='mass')
schrottbeton.add_mixture( 'C ', 3000.00, mode='mass')
schrottbeton.add_mixture( 'N ', 70.00, mode='mass')
schrottbeton.add_mixture( 'O ', 80000.00, mode='mass')
schrottbeton.add_mixture( 'Na', 87.00, mode='mass')
schrottbeton.add_mixture( 'Mg', 837.00, mode='mass')
schrottbeton.add_mixture( 'Al', 3270.00, mode='mass')
schrottbeton.add_mixture( 'Si', 17600.00, mode='mass')
schrottbeton.add_mixture( 'P ', 5770.00, mode='mass')
schrottbeton.add_mixture( 'S ', 2560.00, mode='mass')
schrottbeton.add_mixture( 'Cl', 30.00, mode='mass')
schrottbeton.add_mixture( 'K ', 260.00, mode='mass')
schrottbeton.add_mixture( 'Ca', 280.00, mode='mass')
schrottbeton.add_mixture( 'Ti', 555.00, mode='mass')
schrottbeton.add_mixture( 'V ', 510.00, mode='mass')
schrottbeton.add_mixture( 'Cr', 710.00, mode='mass')
schrottbeton.add_mixture( 'Mn', 4300.00, mode='mass')
schrottbeton.add_mixture( 'Fe', 861000.00, mode='mass')
schrottbeton.add_mixture( 'Co', 120.00, mode='mass')
schrottbeton.add_mixture( 'Ni', 615.00, mode='mass')
schrottbeton.add_mixture( 'Cu', 1700.00, mode='mass')
schrottbeton.add_mixture( 'Zn', 77.00, mode='mass')
schrottbeton.add_mixture( 'Ga', 10.00, mode='mass')
schrottbeton.add_mixture( 'As', 238.00, mode='mass')
schrottbeton.add_mixture( 'Rb', 3.00, mode='mass')
schrottbeton.add_mixture( 'Sr', 140.00, mode='mass')
schrottbeton.add_mixture( 'Y ', 1.00, mode='mass')
schrottbeton.add_mixture( 'Zr', 10.00, mode='mass')
schrottbeton.add_mixture( 'Nb', 1.00, mode='mass')
schrottbeton.add_mixture( 'Mo', 50.00, mode='mass')
schrottbeton.add_mixture( 'Ag', 0.70, mode='mass')
schrottbeton.add_mixture( 'Sn', 5.00, mode='mass')
schrottbeton.add_mixture( 'Sb', 70.00, mode='mass')
schrottbeton.add_mixture( 'Te', 100.00, mode='mass')
schrottbeton.add_mixture( 'Cs', 6.00, mode='mass')
schrottbeton.add_mixture( 'Ba', 55.00, mode='mass')
schrottbeton.add_mixture( 'La', 5.00, mode='mass')
schrottbeton.add_mixture( 'Ce', 1.00, mode='mass')
schrottbeton.add_mixture( 'Sm', 1.00, mode='mass')
schrottbeton.add_mixture( 'Eu', 0.10, mode='mass')
schrottbeton.add_mixture( 'Gd', 0.50, mode='mass')
schrottbeton.add_mixture( 'Dy', 0.30, mode='mass')
schrottbeton.add_mixture( 'Ho', 0.50, mode='mass')
schrottbeton.add_mixture( 'Yb', 0.10, mode='mass')
schrottbeton.add_mixture( 'Lu', 20.00, mode='mass')
schrottbeton.add_mixture( 'W ', 380.00, mode='mass')
schrottbeton.add_mixture( 'Tl', 2.00, mode='mass')
schrottbeton.add_mixture( 'Pb', 6.00, mode='mass')
schrottbeton.add_mixture( 'Bi', 1.00, mode='mass')
schrottbeton.add_mixture( 'Th', 0.50, mode='mass')
schrottbeton.add_mixture( 'U ', 1.00, mode='mass')
schrottbeton.finalize()
#
#
# schrottbeton from some old mcnp definition
#
schrottbeton2 = calculate_materials.mixture('Schrottbeton-PSI')
schrottbeton2.mass_density=4.783
schrottbeton2.add_mixture( 'H ', 0.004395, mode='mass')
schrottbeton2.add_mixture( 'O ', 0.184241, mode='mass')
schrottbeton2.add_mixture( 'Fe', 0.765413, mode='mass')
schrottbeton2.add_mixture( 'Ca', 0.002704, mode='mass')
schrottbeton2.add_mixture( 'Si', 0.020705, mode='mass')
schrottbeton2.add_mixture( 'Al', 0.002077, mode='mass')
schrottbeton2.finalize()
#
# Bundeseisen , calculated from drawing 0-10009.22.026
#
bundeseisen = calculate_materials.mixture('Bundeseisen')
bundeseisen.mass_density=7.80488
bundeseisen.add_mixture( 'C' , 2689. , mode='mass')
bundeseisen.add_mixture( 'Na' , 2. , mode='mass')
bundeseisen.add_mixture( 'Mg' , 5. , mode='mass')
bundeseisen.add_mixture( 'Al' , 100. , mode='mass')
bundeseisen.add_mixture( 'Si' , 7451. , mode='mass')
bundeseisen.add_mixture( 'P' , 344. , mode='mass')
bundeseisen.add_mixture( 'S' , 303. , mode='mass')
bundeseisen.add_mixture( 'K' , 17. , mode='mass')
bundeseisen.add_mixture( 'Ca' , 13. , mode='mass')
bundeseisen.add_mixture( 'Ti' , 12. , mode='mass')
bundeseisen.add_mixture( 'V' , 310. , mode='mass')
bundeseisen.add_mixture( 'Cr' , 728. , mode='mass')
bundeseisen.add_mixture( 'Mn' , 9699. , mode='mass')
bundeseisen.add_mixture( 'Fe' , 971278. , mode='mass')
bundeseisen.add_mixture( 'Co' , 153. , mode='mass')
bundeseisen.add_mixture( 'Ni' , 1796. , mode='mass')
bundeseisen.add_mixture( 'Cu' , 3701. , mode='mass')
bundeseisen.add_mixture( 'Zn' , 70. , mode='mass')
bundeseisen.add_mixture( 'As' , 245. , mode='mass')
bundeseisen.add_mixture( 'Rb' , 19. , mode='mass')
bundeseisen.add_mixture( 'Mo' , 95. , mode='mass')
bundeseisen.add_mixture( 'Sn' , 769. , mode='mass')
bundeseisen.add_mixture( 'Cs' , 93. , mode='mass')
bundeseisen.add_mixture( 'Yb' , 1. , mode='mass')
bundeseisen.add_mixture( 'W' , 3. , mode='mass')
bundeseisen.add_mixture( 'Pb' , 84. , mode='mass')
bundeseisen.finalize()
#
# Granatsand, Australien
# 4.1 g/cc solid, 2.4 bulk, from http://www.abritec.ch/index.php?nav=5,32
granatsand1 = calculate_materials.mixture('Granatsand 1')
granatsand1.mass_density=2.40
granatsand1.add_mixture( 'P' ,240. , mode='mass')
granatsand1.add_mixture( 'S' ,270. , mode='mass')
granatsand1.add_mixture( 'As' ,20. , mode='mass')
granatsand1.add_mixture( 'Sn' ,20. , mode='mass')
granatsand1.add_mixture( 'Hg' ,5. , mode='mass')
granatsand1.add_mixture( 'Mo' ,3. , mode='mass')
granatsand1.add_mixture( 'Cr' ,142. , mode='mass')
granatsand1.add_mixture( 'Zn' ,43.4 , mode='mass')
granatsand1.add_mixture( 'Pb' ,46. , mode='mass')
granatsand1.add_mixture( 'Co' ,34. , mode='mass')
granatsand1.add_mixture( 'Cd' ,1.0 , mode='mass')
granatsand1.add_mixture( 'Ni' ,20. , mode='mass')
granatsand1.add_mixture( 'B' ,7. , mode='mass')
granatsand1.add_mixture( 'Si' ,17.6e4 , mode='mass')
granatsand1.add_mixture( 'Mn' ,0.72e4 , mode='mass')
granatsand1.add_mixture( 'Fe' ,22.9e4 , mode='mass')
granatsand1.add_mixture( 'Mg' ,3.57e4 , mode='mass')
granatsand1.add_mixture( 'V' ,140. , mode='mass')
granatsand1.add_mixture( 'Be' ,0.5 , mode='mass')
granatsand1.add_mixture( 'Cu' ,2. , mode='mass')
granatsand1.add_mixture( 'Ag' ,2. , mode='mass')
granatsand1.add_mixture( 'Ti' ,89.5 , mode='mass')
granatsand1.add_mixture( 'Zr' ,33. , mode='mass')
granatsand1.add_mixture( 'Ca' ,1.03e4 , mode='mass')
granatsand1.add_mixture( 'Al' ,10.6e4 , mode='mass')
granatsand1.add_mixture( 'Sr' ,11.8 , mode='mass')
granatsand1.add_mixture( 'Ba' ,64.4 , mode='mass')
granatsand1.add_mixture( 'Na' ,72. , mode='mass')
granatsand1.add_mixture( 'Li' ,8. , mode='mass')
granatsand1.add_mixture( 'K' ,85. , mode='mass')
granatsand1.add_mixture( 'Rb' ,1. , mode='mass')
granatsand1.add_mixture( 'Cs' ,10. , mode='mass')
granatsand1.add_mixture( 'Se' ,20. , mode='mass')
granatsand1.add_mixture( 'W' ,10. , mode='mass')
granatsand1.add_mixture( 'Te' ,108. , mode='mass')
granatsand1.add_mixture( 'Sb' ,10. , mode='mass')
granatsand1.add_mixture( 'Re' ,5. , mode='mass')
granatsand1.add_mixture( 'Bi' ,15. , mode='mass')
granatsand1.add_mixture( 'Ir' ,10. , mode='mass')
granatsand1.add_mixture( 'Os' ,20. , mode='mass')
granatsand1.add_mixture( 'In' ,20. , mode='mass')
granatsand1.add_mixture( 'Ru' ,10. , mode='mass')
granatsand1.add_mixture( 'Au' ,5. , mode='mass')
granatsand1.add_mixture( 'Ge' ,10. , mode='mass')
granatsand1.add_mixture( 'Ta' ,10. , mode='mass')
granatsand1.add_mixture( 'Ga' ,23. , mode='mass')
granatsand1.add_mixture( 'Pr' ,50. , mode='mass')
granatsand1.add_mixture( 'Nb' ,2. , mode='mass')
granatsand1.add_mixture( 'Hf' ,10. , mode='mass')
granatsand1.add_mixture( 'Pd' ,10. , mode='mass')
granatsand1.add_mixture( 'Rh' ,5. , mode='mass')
granatsand1.add_mixture( 'Tl' ,26. , mode='mass')
granatsand1.add_mixture( 'U' ,10. , mode='mass')
granatsand1.add_mixture( 'Th' ,5. , mode='mass')
granatsand1.add_mixture( 'Lu' ,17.2 , mode='mass')
granatsand1.add_mixture( 'Yb' ,46.9 , mode='mass')
granatsand1.add_mixture( 'Gd' ,98. , mode='mass')
granatsand1.add_mixture( 'Ho' ,8. , mode='mass')
granatsand1.add_mixture( 'Tm' ,5. , mode='mass')
granatsand1.add_mixture( 'Dy' ,30. , mode='mass')
granatsand1.add_mixture( 'Sm' ,10. , mode='mass')
granatsand1.add_mixture( 'Sc' ,1333. , mode='mass')
granatsand1.add_mixture( 'Tb' ,5. , mode='mass')
granatsand1.add_mixture( 'Y' ,322. , mode='mass')
granatsand1.add_mixture( 'La' ,15. , mode='mass')
granatsand1.add_mixture( 'Eu' ,0.5 , mode='mass')
granatsand1.add_mixture( 'Er' ,35. , mode='mass')
granatsand1.add_mixture( 'Pr' ,3. , mode='mass')
granatsand1.add_mixture( 'Ce' ,25. , mode='mass')
granatsand1.add_mixture( 'Nd' ,11. , mode='mass')
granatsand1.add_mixture( 'O' ,432070.8, mode='mass') # balance
granatsand1.finalize()
#
# Granatsand, "Neuer Abschirmsand"
# 4.1 g/cc solid, 2.4 bulk, from http://www.abritec.ch/index.php?nav=5,32
granatsand2 = calculate_materials.mixture('Granatsand 2')
granatsand2.mass_density=2.40
granatsand2.add_mixture( 'Lu' , 15.3 , mode='mass')
granatsand2.add_mixture( 'Yb' , 18.4 , mode='mass')
granatsand2.add_mixture( 'Gd' , 103. , mode='mass')
granatsand2.add_mixture( 'Ho' , 1. , mode='mass')
granatsand2.add_mixture( 'Tm' , 2. , mode='mass')
granatsand2.add_mixture( 'Dy' , 1. , mode='mass')
granatsand2.add_mixture( 'Sm' , 5. , mode='mass')
granatsand2.add_mixture( 'Sc' , 119. , mode='mass')
granatsand2.add_mixture( 'Tb' , 10. , mode='mass')
granatsand2.add_mixture( 'Y' , 133. , mode='mass')
granatsand2.add_mixture( 'La' , 12. , mode='mass')
granatsand2.add_mixture( 'Eu' , 1. , mode='mass')
granatsand2.add_mixture( 'Er' , 13. , mode='mass')
granatsand2.add_mixture( 'Pr' , 10. , mode='mass')
granatsand2.add_mixture( 'Ce' , 10. , mode='mass')
granatsand2.add_mixture( 'Nd' , 10. , mode='mass')
granatsand2.add_mixture( 'P' , 450. , mode='mass')
granatsand2.add_mixture( 'S' , 370. , mode='mass')
granatsand2.add_mixture( 'As' , 20. , mode='mass')
granatsand2.add_mixture( 'Sn' , 20. , mode='mass')
granatsand2.add_mixture( 'Hg' , 20. , mode='mass')
granatsand2.add_mixture( 'Mo' , 5. , mode='mass')
granatsand2.add_mixture( 'Zn' , 79. , mode='mass')
granatsand2.add_mixture( 'Pb' , 20. , mode='mass')
granatsand2.add_mixture( 'Co' , 30. , mode='mass')
granatsand2.add_mixture( 'Cd' , 1. , mode='mass')
granatsand2.add_mixture( 'Ni' , 3. , mode='mass')
granatsand2.add_mixture( 'B' , 10. , mode='mass')
granatsand2.add_mixture( 'Si' , 206000. , mode='mass')
granatsand2.add_mixture( 'Mn' , 5550. , mode='mass')
granatsand2.add_mixture( 'Fe' , 301000. , mode='mass')
granatsand2.add_mixture( 'Cr' , 49. , mode='mass')
granatsand2.add_mixture( 'Mg' , 28100. , mode='mass')
granatsand2.add_mixture( 'V' , 74. , mode='mass')
granatsand2.add_mixture( 'Be' , 0.5 , mode='mass')
granatsand2.add_mixture( 'Cu' , 26. , mode='mass')
granatsand2.add_mixture( 'Ag' , 9. , mode='mass')
granatsand2.add_mixture( 'Ti' , 1790. , mode='mass')
granatsand2.add_mixture( 'Zr' , 84. , mode='mass')
granatsand2.add_mixture( 'Ca' , 14900. , mode='mass')
granatsand2.add_mixture( 'Al' , 127000. , mode='mass')
granatsand2.add_mixture( 'Sr' , 9.86 , mode='mass')
granatsand2.add_mixture( 'Ba' , 225. , mode='mass')
granatsand2.add_mixture( 'Na' , 837. , mode='mass')
granatsand2.add_mixture( 'Li' , 5. , mode='mass')
granatsand2.add_mixture( 'K' , 1050. , mode='mass')
granatsand2.add_mixture( 'Cs' , 3. , mode='mass')
granatsand2.add_mixture( 'Rb' , 18. , mode='mass')
granatsand2.add_mixture( 'Se' , 20. , mode='mass')
granatsand2.add_mixture( 'W' , 10. , mode='mass')
granatsand2.add_mixture( 'Te' , 50. , mode='mass')
granatsand2.add_mixture( 'Sb' , 20. , mode='mass')
granatsand2.add_mixture( 'Re' , 5. , mode='mass')
granatsand2.add_mixture( 'Bi' , 10. , mode='mass')
granatsand2.add_mixture( 'Ir' , 10. , mode='mass')
granatsand2.add_mixture( 'Os' , 20. , mode='mass')
granatsand2.add_mixture( 'In' , 50. , mode='mass')
granatsand2.add_mixture( 'Ru' , 10. , mode='mass')
granatsand2.add_mixture( 'Au' , 10. , mode='mass')
granatsand2.add_mixture( 'Ge' , 10. , mode='mass')
granatsand2.add_mixture( 'Ta' , 10. , mode='mass')
granatsand2.add_mixture( 'Ga' , 28. , mode='mass')
granatsand2.add_mixture( 'Pt' , 50. , mode='mass')
granatsand2.add_mixture( 'Nb' , 5. , mode='mass')
granatsand2.add_mixture( 'Hf' , 10. , mode='mass')
granatsand2.add_mixture( 'Pd' , 10. , mode='mass')
granatsand2.add_mixture( 'Rh' , 10. , mode='mass')
granatsand2.add_mixture( 'Tl' , 50. , mode='mass')
granatsand2.add_mixture( 'U' , 10. , mode='mass')
granatsand2.add_mixture( 'Th' , 5. , mode='mass')
granatsand2.add_mixture( 'O' , 311364.94 , mode='mass') # balance
granatsand2.finalize()
#
# soil, from PNNL-15870 Rev. 1
#
soil = calculate_materials.mixture('soil')
soil.mass_density=1.52
soil.add_mixture( 'H ', 0.316855, mode='atom')
soil.add_mixture( 'O ', 0.501581, mode='atom')
soil.add_mixture( 'Al', 0.039951, mode='atom')
soil.add_mixture( 'Si', 0.141613, mode='atom')
soil.finalize()
#
# asphalt/bitumen full density from PNNL-15870 Rev. 1
#
asphalt = calculate_materials.mixture('asphalt')
asphalt.mass_density=1.3
asphalt.add_mixture( 'H ', 0.586755, mode='atom')
asphalt.add_mixture( 'C ', 0.402588, mode='atom')
asphalt.add_mixture( 'N ', 0.002463, mode='atom')
asphalt.add_mixture( 'O ', 0.001443, mode='atom')
asphalt.add_mixture( 'S ', 0.006704, mode='atom')
asphalt.add_mixture( 'V ', 0.000044, mode='atom')
asphalt.add_mixture( 'Ni', 0.000003, mode='atom')
asphalt.finalize()
#
# polyurethane foam insulation, from PNNL-15870 Rev. 1, swisspor PUR premium
#
pur = calculate_materials.mixture('pur')
pur.mass_density=0.03
pur.add_mixture( 'H ', 0.360023, mode='atom')
pur.add_mixture( 'C ', 0.400878, mode='atom')
pur.add_mixture( 'N ', 0.076459, mode='atom')
pur.add_mixture( 'O ', 0.162639, mode='atom')
pur.finalize()
#
# swisspor Drain WS20, 20mm polypropylene stuff, mostly air
#
ws20 = calculate_materials.mixture('ws20')
ws20.mass_density=0.0325
ws20.add_mixture( 'H ', 0.666653, mode='atom')
ws20.add_mixture( 'C ', 0.333347, mode='atom')
ws20.finalize()
#
# E24 Steel, 0.05% Co, >99% Fe+C
#
E24 = calculate_materials.mixture('E24-insert')
E24.mass_density=7.85
E24.add_mixture('Fe' , 0.990 , mode='mass')
E24.add_mixture('C ' , 0.0044 , mode='mass')
E24.add_mixture('Co' , 0.0005 , mode='mass')
E24.add_mixture('Si' , 1.63200000e-04, mode='mass')
E24.add_mixture('Mn' , 7.34400000e-04, mode='mass')
E24.add_mixture('S ' , 6.52800000e-05, mode='mass')
E24.add_mixture('P ' , 5.71200000e-05, mode='mass')
E24.add_mixture('Cr' , 1.63200000e-03, mode='mass')
E24.add_mixture('Mo' , 3.26400000e-04, mode='mass')
E24.add_mixture('Ni' , 2.12160000e-03, mode='mass')
E24.finalize()
#
# GRANITE, from PNNL
#
granite = calculate_materials.mixture('granite')
granite.mass_density=2.69
granite.add_mixture( 'O ', 0.484170, mode='mass')
granite.add_mixture( 'Na', 0.027328, mode='mass')
granite.add_mixture( 'Mg', 0.004274, mode='mass')
granite.add_mixture( 'Al', 0.076188, mode='mass')
granite.add_mixture( 'Si', 0.336169, mode='mass')
granite.add_mixture( 'K ', 0.034144, mode='mass')
granite.add_mixture( 'Ca', 0.012985, mode='mass')
granite.add_mixture( 'Ti', 0.001795, mode='mass')
granite.add_mixture( 'Mn', 0.000387, mode='mass')
granite.add_mixture( 'Fe', 0.021555, mode='mass')
granite.add_mixture( 'Pb', 0.001004, mode='mass')
granite.finalize()
#
# boric acid
#
boric_acid = calculate_materials.mixture('boric_acid')
boric_acid.mass_density=1.435
boric_acid.add_mixture( 'O', 3.0, mode='atom')
boric_acid.add_mixture( 'H', 3.0, mode='atom')
boric_acid.add_mixture( 'B', 1.0, mode='atom')
boric_acid.finalize()
#
# borax
#
borax = calculate_materials.mixture('borax')
borax.mass_density=1.73
borax.add_mixture( 'O ', 17., mode='atom')
borax.add_mixture( 'H ', 20., mode='atom')
borax.add_mixture( 'B ', 4., mode='atom')
borax.add_mixture( 'Na', 2., mode='atom')
borax.finalize()
#
# light water
#
light_water = calculate_materials.mixture('light water')
light_water.add_mixture( 'O', 1.0, mode='atom')
light_water.add_mixture( 'H', 2.0, mode='atom')
light_water.mass_density=1.0
light_water.finalize()
#
# 5% boron polyethylene
#
bpe5 = calculate_materials.mixture('borated polyethylene 5%')
bpe5.mass_density=1.07
bpe5.add_mixture( 'H', 13.57143, mode='mass')
bpe5.add_mixture( 'C', 81.42857, mode='mass')
bpe5.add_mixture( 'B', 5.00000, mode='mass')
bpe5.finalize()
#
# 5% boron polyethylene
#
bpe_b4c5 = calculate_materials.mixture('borated polyethylene 5wt% B4C')
bpe_b4c5.mass_density=1.07
bpe_b4c5.add_mixture( 'HDPE', 0.95, mode='mass')
bpe_b4c5.add_mixture( 'B4C', 0.05, mode='mass')
bpe_b4c5.finalize()
#
# 30% boron polyethylene
#
bpe30 = calculate_materials.mixture('borated polyethylene 30%')
bpe30.mass_density=1.19
bpe30.add_mixture( 'H', 10.0, mode='mass')
bpe30.add_mixture( 'C', 60.0, mode='mass')
bpe30.add_mixture( 'B', 30.0, mode='mass')
bpe30.finalize()
#
# SIEMENS composition concrete
#
concrete_seimens = calculate_materials.mixture('concrete, SIEMENS')
concrete_seimens.mass_density=2.3
concrete_seimens.add_mixture( 'H' , 3.3E-3 , mode='mass')
concrete_seimens.add_mixture( 'O' , 5.597E-1, mode='mass')
concrete_seimens.add_mixture( 'Al' , 4.86E-2 , mode='mass')
concrete_seimens.add_mixture( 'Si' , 1.942E-1, mode='mass')
concrete_seimens.add_mixture( 'Ca' , 1.942E-1, mode='mass')
concrete_seimens.finalize()
#
#
# gravel, rock average of 5 types, PNNL
#
#
gravel = calculate_materials.mixture('gravel')
gravel.mass_density=2.662
gravel.add_mixture('H ' , 0.001657, mode='mass')
gravel.add_mixture('C ' , 0.026906, mode='mass')
gravel.add_mixture('O ' , 0.488149, mode='mass')
gravel.add_mixture('Na' , 0.012403, mode='mass')
gravel.add_mixture('Mg' , 0.023146, mode='mass')
gravel.add_mixture('Al' , 0.054264, mode='mass')
gravel.add_mixture('Si' , 0.246249, mode='mass')
gravel.add_mixture('S ' , 0.000577, mode='mass')
gravel.add_mixture('K ' , 0.018147, mode='mass')
gravel.add_mixture('Ca' , 0.089863, mode='mass')
gravel.add_mixture('Ti' , 0.003621, mode='mass')
gravel.add_mixture('Mn' , 0.000386, mode='mass')
gravel.add_mixture('Fe' , 0.033377, mode='mass')
gravel.add_mixture('Pb' , 0.001255, mode='mass')
gravel.finalize()
#
# dry air @ 20 degC
#
dryair = calculate_materials.mixture('dry air')
dryair.mass_density=0.00120479
dryair.add_mixture('C' , 0.000150, mode='atom')
dryair.add_mixture('N' , 0.784431, mode='atom')
dryair.add_mixture('O' , 0.210748, mode='atom')
dryair.add_mixture('Ar', 0.004671, mode='atom')
dryair.finalize()
#
# 45% RH air @ 24 degC
#
air_45RH_24C = calculate_materials.mixture('air 45RH 24C')
air_45RH_24C.mass_density=0.0011935
air_45RH_24C.add_mixture('dry air' , 0.99172, mode='mass')
air_45RH_24C.add_mixture('light water' , 0.00828, mode='mass')
air_45RH_24C.finalize()
#
# pure He3
#
He3=calculate_materials.mixture('He3')
He3.atom_fractions[2003]=1.0
He3.mass_density=0.00018
He3.finalize()
#
# low efficiency He3 counter
#
low_eff_he3 = calculate_materials.mixture('low eff he3')
low_eff_he3.mass_density=3.485e-3
low_eff_he3.add_mixture('Kr' , 1.2, mode='atom')
low_eff_he3.add_mixture('He3' , 0.05, mode='atom')
low_eff_he3.finalize()
#
# normal efficiency He3 counter
#
normal_eff_he3 = calculate_materials.mixture('normal eff he3')
normal_eff_he3.mass_density=4.468741e-3
normal_eff_he3.add_mixture('Kr' , 1.2, mode='atom')
normal_eff_he3.add_mixture('He3' , 2.3, mode='atom')
normal_eff_he3.finalize()
#
# ST37 steel comp from K Geismann, 1993
#
st37 = calculate_materials.mixture('ST37')
st37.mass_density=7.85
st37.add_mixture('P ' , 120.0, mode='mass')
st37.add_mixture('S ' , 20.0, mode='mass')
st37.add_mixture('As' , 40.0, mode='mass')
st37.add_mixture('Sn' , 20.0, mode='mass')
st37.add_mixture('Hg' , 30.0, mode='mass')
st37.add_mixture('Mn' , 80.0, mode='mass')
st37.add_mixture('Cr' , 226.0, mode='mass')
st37.add_mixture('Zn' , 160.0, mode='mass')
st37.add_mixture('Pb' , 20.0, mode='mass')
st37.add_mixture('Co' , 34.0, mode='mass')
st37.add_mixture('Cd' , 2.0, mode='mass')
st37.add_mixture('Ni' , 477.0, mode='mass')
st37.add_mixture('B ' , 3.0, mode='mass')
st37.add_mixture('Si' , 1800.0, mode='mass')
st37.add_mixture('Mn' , 10700.0, mode='mass')
st37.add_mixture('Fe' , 983000.0, mode='mass')
st37.add_mixture('Mg' , 8.8, mode='mass')
st37.add_mixture('V ' , 64.0, mode='mass')
st37.add_mixture('Be' , 0.5, mode='mass')
st37.add_mixture('Cu' , 157.0, mode='mass')
st37.add_mixture('Ag' , 3.0, mode='mass')
st37.add_mixture('Ti' , 8.0, mode='mass')
st37.add_mixture('Zr' , 2.0, mode='mass')
st37.add_mixture('Ca' , 20.0, mode='mass')
st37.add_mixture('Al' , 288.0, mode='mass')
st37.add_mixture('Sr' , 0.3, mode='mass')
st37.add_mixture('Ba' , 1.2, mode='mass')
st37.add_mixture('Na' , 10.0, mode='mass')
st37.add_mixture('Li' , 2.0, mode='mass')
st37.add_mixture('K ' , 5.4, mode='mass')
st37.add_mixture('Rb' , 5.0, mode='mass')
st37.add_mixture('Cs' , 9.0, mode='mass')
st37.add_mixture('Se' , 50.0, mode='mass')
st37.add_mixture('W ' , 50.0, mode='mass')
st37.add_mixture('Te' , 200.0, mode='mass')
st37.add_mixture('Sb' , 20.0, mode='mass')
st37.add_mixture('Re' , 5.0, mode='mass')
st37.add_mixture('Bi' , 30.0, mode='mass')
st37.add_mixture('Ir' , 30.0, mode='mass')
st37.add_mixture('Os' , 3.0, mode='mass')
st37.add_mixture('In' , 30.0, mode='mass')
st37.add_mixture('Ru' , 10.0, mode='mass')
st37.add_mixture('Au' , 20.0, mode='mass')
st37.add_mixture('Ge' , 20.0, mode='mass')
st37.add_mixture('Ta' , 20.0, mode='mass')
st37.add_mixture('Ga' , 30.0, mode='mass')
st37.add_mixture('Pt' , 20.0, mode='mass')
st37.add_mixture('Nb' , 10.0, mode='mass')
st37.add_mixture('Hf' , 20.0, mode='mass')
st37.add_mixture('Pd' , 20.0, mode='mass')
st37.add_mixture('Rh' , 10.0, mode='mass')
st37.add_mixture('Tl' , 10.0, mode='mass')
st37.add_mixture('U ' , 20.0, mode='mass')
st37.add_mixture('Th' , 10.0, mode='mass')
st37.add_mixture('Lu' , 5.0, mode='mass')
st37.add_mixture('Yb' , 2.0, mode='mass')
st37.add_mixture('Gd' , 5.0, mode='mass')
st37.add_mixture('Ho' , 5.0, mode='mass')
st37.add_mixture('Tm' , 5.0, mode='mass')
st37.add_mixture('Dy' , 5.0, mode='mass')
st37.add_mixture('Sm' , 10.0, mode='mass')
st37.add_mixture('Sc' , 2.0, mode='mass')
st37.add_mixture('Tb' , 20.0, mode='mass')
st37.add_mixture('Y ' , 2.0, mode='mass')
st37.add_mixture('La' , 5.0, mode='mass')
st37.add_mixture('Eu' , 2.0, mode='mass')
st37.add_mixture('Er' , 5.0, mode='mass')
st37.add_mixture('Pr' , 20.0, mode='mass')
st37.add_mixture('Ce' , 10.0, mode='mass')
st37.add_mixture('Nd' , 10.0, mode='mass')
st37.add_mixture('C ' , 1340.0, mode='mass')
st37.finalize()
#
# BEAM steel comp from S. Koechli, 2017 (Cr Co No Mo Fe, remaining adapted from ST37)
#
beam_steel_ICPOES = calculate_materials.mixture('ICPOES - beam steel')
beam_steel_ICPOES.mass_density=7.85
beam_steel_ICPOES.add_mixture('Cr' , 365.0, mode='mass')
beam_steel_ICPOES.add_mixture('Co' , 136.0, mode='mass')
beam_steel_ICPOES.add_mixture('Ni' , 536.0, mode='mass')
beam_steel_ICPOES.add_mixture('Mo' , 64.0, mode='mass')
beam_steel_ICPOES.add_mixture('Fe' , 990575.0, mode='mass')
beam_steel_ICPOES.add_mixture('P ' , 6.41582632e+01, mode='mass')
beam_steel_ICPOES.add_mixture('S ' , 1.06930439e+01, mode='mass')
beam_steel_ICPOES.add_mixture('As' , 2.13860877e+01, mode='mass')
beam_steel_ICPOES.add_mixture('Sn' , 1.06930439e+01, mode='mass')
beam_steel_ICPOES.add_mixture('Hg' , 1.60395658e+01, mode='mass')
beam_steel_ICPOES.add_mixture('Mn' , 4.27721755e+01, mode='mass')
beam_steel_ICPOES.add_mixture('Zn' , 8.55443510e+01, mode='mass')
beam_steel_ICPOES.add_mixture('Pb' , 1.06930439e+01, mode='mass')
beam_steel_ICPOES.add_mixture('Si' , 9.62373948e+02, mode='mass')
beam_steel_ICPOES.add_mixture('Mn' , 5.72077847e+03, mode='mass')
beam_steel_ICPOES.add_mixture('V ' , 3.42177404e+01, mode='mass')
beam_steel_ICPOES.add_mixture('Cu' , 8.39403944e+01, mode='mass')
beam_steel_ICPOES.add_mixture('Ca' , 1.06930439e+01, mode='mass')
beam_steel_ICPOES.add_mixture('Al' , 1.53979832e+02, mode='mass')
beam_steel_ICPOES.add_mixture('Na' , 5.34652193e+00, mode='mass')
beam_steel_ICPOES.add_mixture('Se' , 2.67326097e+01, mode='mass')
beam_steel_ICPOES.add_mixture('W ' , 2.67326097e+01, mode='mass')
beam_steel_ICPOES.add_mixture('Te' , 1.06930439e+02, mode='mass')
beam_steel_ICPOES.add_mixture('Sb' , 1.06930439e+01, mode='mass')
beam_steel_ICPOES.add_mixture('Bi' , 1.60395658e+01, mode='mass')
beam_steel_ICPOES.add_mixture('Ir' , 1.60395658e+01, mode='mass')
beam_steel_ICPOES.add_mixture('In' , 1.60395658e+01, mode='mass')
beam_steel_ICPOES.add_mixture('Ru' , 5.34652193e+00, mode='mass')
beam_steel_ICPOES.add_mixture('Au' , 1.06930439e+01, mode='mass')
beam_steel_ICPOES.add_mixture('Ge' , 1.06930439e+01, mode='mass')
beam_steel_ICPOES.add_mixture('Ta' , 1.06930439e+01, mode='mass')
beam_steel_ICPOES.add_mixture('Ga' , 1.60395658e+01, mode='mass')
beam_steel_ICPOES.add_mixture('Pt' , 1.06930439e+01, mode='mass')
beam_steel_ICPOES.add_mixture('Nb' , 5.34652193e+00, mode='mass')
beam_steel_ICPOES.add_mixture('Hf' , 1.06930439e+01, mode='mass')
beam_steel_ICPOES.add_mixture('Pd' , 1.06930439e+01, mode='mass')
beam_steel_ICPOES.add_mixture('Rh' , 5.34652193e+00, mode='mass')
beam_steel_ICPOES.add_mixture('Tl' , 5.34652193e+00, mode='mass')
#beam_steel_ICPOES.add_mixture('U ' , 1.06930439e+01, mode='mass')
#beam_steel_ICPOES.add_mixture('Th' , 5.34652193e+00, mode='mass')
beam_steel_ICPOES.add_mixture('Sm' , 5.34652193e+00, mode='mass')
beam_steel_ICPOES.add_mixture('Tb' , 1.06930439e+01, mode='mass')
beam_steel_ICPOES.add_mixture('Pr' , 1.06930439e+01, mode='mass')
beam_steel_ICPOES.add_mixture('Ce' , 5.34652193e+00, mode='mass')
beam_steel_ICPOES.add_mixture('Nd' , 5.34652193e+00, mode='mass')
beam_steel_ICPOES.add_mixture('C ' , 7.16433939e+02, mode='mass')
beam_steel_ICPOES.finalize()
#
# SRM 361 - AISI 4340 Steel (chip form)
#
srm361_steel = calculate_materials.mixture('SRM 361 - AISI 4340 steel')
srm361_steel.mass_density=7.85
srm361_steel.add_mixture('Al', 0.021 , mode='mass')
srm361_steel.add_mixture('Nd', 0.00075 , mode='mass')
srm361_steel.add_mixture('Sb', 0.0042 , mode='mass')
srm361_steel.add_mixture('Ni', 2.00 , mode='mass')
srm361_steel.add_mixture('As', 0.017 , mode='mass')
srm361_steel.add_mixture('Nb', 0.022 , mode='mass')
srm361_steel.add_mixture('Ca', 0.00010 , mode='mass')
srm361_steel.add_mixture('P ', 0.014 , mode='mass')
srm361_steel.add_mixture('C ', 0.383 , mode='mass')
srm361_steel.add_mixture('Si', 0.222 , mode='mass')
srm361_steel.add_mixture('Ce', 0.0040 , mode='mass')
srm361_steel.add_mixture('Ag', 0.0004 , mode='mass')
srm361_steel.add_mixture('Cr', 0.694 , mode='mass')
srm361_steel.add_mixture('S ', 0.0143 , mode='mass')
srm361_steel.add_mixture('Co', 0.032 , mode='mass')
srm361_steel.add_mixture('Ta', 0.020 , mode='mass')
srm361_steel.add_mixture('Cu', 0.042 , mode='mass')
srm361_steel.add_mixture('Sn', 0.010 , mode='mass')
srm361_steel.add_mixture('Pb', 0.000025 , mode='mass')
srm361_steel.add_mixture('Ti', 0.020 , mode='mass')
srm361_steel.add_mixture('Mg', 0.00026 , mode='mass')
srm361_steel.add_mixture('W ', 0.017 , mode='mass')
srm361_steel.add_mixture('Mn', 0.66 , mode='mass')
srm361_steel.add_mixture('V ', 0.011 , mode='mass')
srm361_steel.add_mixture('Mo', 0.19 , mode='mass')
srm361_steel.add_mixture('Zr', 0.009 , mode='mass')
srm361_steel.add_mixture('B ', 4.78e-6 , mode='mass')
srm361_steel.add_mixture('Bi', 0.0004 , mode='mass')
srm361_steel.add_mixture('O ', 0.0009 , mode='mass')
srm361_steel.add_mixture('Au', 0.00005 , mode='mass')
srm361_steel.add_mixture('Pr', 0.0003 , mode='mass')
srm361_steel.add_mixture('Hf', 0.0002 , mode='mass')
srm361_steel.add_mixture('Se', 0.004 , mode='mass')
srm361_steel.add_mixture('H ', 0.0005 , mode='mass')
srm361_steel.add_mixture('Sr', 0.0005 , mode='mass')
srm361_steel.add_mixture('Te', 0.0006 , mode='mass')
srm361_steel.add_mixture('La', 0.001 , mode='mass')
srm361_steel.add_mixture('Zn', 0.0001 , mode='mass')
srm361_steel.add_mixture('N ', 0.0037 , mode='mass')
srm361_steel.add_mixture('Fe', 95.57971022 , mode='mass')
srm361_steel.finalize()
#
# SRM 2165 - Low Alloy Steel
#
srm2165_steel = calculate_materials.mixture('SRM 2165 - low alloy steel')
srm2165_steel.mass_density=7.85
srm2165_steel.add_mixture('Sb' , 0.0010, mode='mass')
srm2165_steel.add_mixture('As' , 0.0010, mode='mass')
srm2165_steel.add_mixture('Cr' , 0.050, mode='mass')
srm2165_steel.add_mixture('Co' , 0.0012, mode='mass')
srm2165_steel.add_mixture('Cu' , 0.0013, mode='mass')
srm2165_steel.add_mixture('Pb' , 0.0003, mode='mass')
srm2165_steel.add_mixture('Mn' , 0.144, mode='mass')
srm2165_steel.add_mixture('Mo' , 0.0055, mode='mass')
srm2165_steel.add_mixture('Ni' , 0.155, mode='mass')
srm2165_steel.add_mixture('Nb' , 0.0004, mode='mass')
srm2165_steel.add_mixture('P ' , 0.0052, mode='mass')
srm2165_steel.add_mixture('Ag' , 0.0002, mode='mass')
srm2165_steel.add_mixture('S ' , 0.0036, mode='mass')
srm2165_steel.add_mixture('Sn' , 0.002, mode='mass')
srm2165_steel.add_mixture('Ti' , 0.0051, mode='mass')
srm2165_steel.add_mixture('V ' , 0.0040, mode='mass')
srm2165_steel.add_mixture('Al' , 6.0e-5, mode='mass')
srm2165_steel.add_mixture('Bi' , 1.0e-6, mode='mass')
srm2165_steel.add_mixture('Mg' , 1.0e-6, mode='mass')
srm2165_steel.add_mixture('Se' , 3.5e-5, mode='mass')
srm2165_steel.add_mixture('Si' , 4.0e-5, mode='mass')
srm2165_steel.add_mixture('Ta' , 4.0e-5, mode='mass')
srm2165_steel.add_mixture('Te' , 3.0e-5, mode='mass')
srm2165_steel.add_mixture('Fe' , 99.619993, mode='mass')
srm2165_steel.finalize()
#
# BEAM steel for guide bunker (first elements are MAX from ICPOES and NAA measurements, remaining adapted from ST37)
#
beam_steel = calculate_materials.mixture('best guess beam steel')
beam_steel.mass_density=7.85
beam_steel.add_mixture('Cr' , 442.0, mode='mass')
beam_steel.add_mixture('Co' , 148.0, mode='mass')
beam_steel.add_mixture('Ni' , 536.0, mode='mass')
beam_steel.add_mixture('Mo' , 86.0, mode='mass')
beam_steel.add_mixture('Mn' , 3777.0, mode='mass')
beam_steel.add_mixture('As' , 233.0, mode='mass')
beam_steel.add_mixture('Sb' , 26.0, mode='mass')
beam_steel.add_mixture('W ' , 8.0, mode='mass')
#
beam_steel.add_mixture('P ' , 6.41582632e+01, mode='mass')
beam_steel.add_mixture('S ' , 1.06930439e+01, mode='mass')
beam_steel.add_mixture('Sn' , 1.06930439e+01, mode='mass')
beam_steel.add_mixture('Hg' , 1.60395658e+01, mode='mass')
beam_steel.add_mixture('Zn' , 8.55443510e+01, mode='mass')
beam_steel.add_mixture('Pb' , 1.06930439e+01, mode='mass')
beam_steel.add_mixture('Si' , 9.62373948e+02, mode='mass')
beam_steel.add_mixture('Mn' , 5.72077847e+03, mode='mass')
beam_steel.add_mixture('V ' , 3.42177404e+01, mode='mass')
beam_steel.add_mixture('Cu' , 8.39403944e+01, mode='mass')
beam_steel.add_mixture('Ca' , 1.06930439e+01, mode='mass')
beam_steel.add_mixture('Al' , 1.53979832e+02, mode='mass')
beam_steel.add_mixture('Na' , 5.34652193e+00, mode='mass')
beam_steel.add_mixture('Se' , 2.67326097e+01, mode='mass')
beam_steel.add_mixture('Te' , 1.06930439e+02, mode='mass')
beam_steel.add_mixture('Bi' , 1.60395658e+01, mode='mass')
beam_steel.add_mixture('Ir' , 1.60395658e+01, mode='mass')
beam_steel.add_mixture('In' , 1.60395658e+01, mode='mass')
beam_steel.add_mixture('Ru' , 5.34652193e+00, mode='mass')
beam_steel.add_mixture('Au' , 1.06930439e+01, mode='mass')
beam_steel.add_mixture('Ge' , 1.06930439e+01, mode='mass')
beam_steel.add_mixture('Ta' , 1.06930439e+01, mode='mass')
beam_steel.add_mixture('Ga' , 1.60395658e+01, mode='mass')
beam_steel.add_mixture('Pt' , 1.06930439e+01, mode='mass')
beam_steel.add_mixture('Nb' , 5.34652193e+00, mode='mass')
beam_steel.add_mixture('Hf' , 1.06930439e+01, mode='mass')
beam_steel.add_mixture('Pd' , 1.06930439e+01, mode='mass')
beam_steel.add_mixture('Rh' , 5.34652193e+00, mode='mass')
beam_steel.add_mixture('Tl' , 5.34652193e+00, mode='mass')
beam_steel.add_mixture('U ' , 1.06930439e+01, mode='mass')
beam_steel.add_mixture('Th' , 5.34652193e+00, mode='mass')
beam_steel.add_mixture('Sm' , 5.34652193e+00, mode='mass')
beam_steel.add_mixture('Tb' , 1.06930439e+01, mode='mass')
beam_steel.add_mixture('Pr' , 1.06930439e+01, mode='mass')
beam_steel.add_mixture('Ce' , 5.34652193e+00, mode='mass')
beam_steel.add_mixture('Nd' , 5.34652193e+00, mode='mass')
beam_steel.add_mixture('C ' , 7.16433939e+02, mode='mass')
#
beam_steel.add_mixture('Fe' , 986521.58391623,mode='mass') # calculated from difference
beam_steel.finalize()
#
# borated aluminum matrix
#
boral = calculate_materials.mixture('boral')
boral.add_mixture('Al' , 80.0, mode='volume')
boral.add_mixture('B4C' , 20.0, mode='volume')
boral.finalize()
#
# silicon dioxide
#
silicon_dioxide = calculate_materials.mixture('SiO2')
silicon_dioxide.add_mixture('Si', 1., mode='atom')
silicon_dioxide.add_mixture('O' , 2., mode='atom')
silicon_dioxide.mass_density=2.648
silicon_dioxide.finalize()
#
# aluminum (III) oxide
#
aluminum3_oxide = calculate_materials.mixture('Al2O3')
aluminum3_oxide.add_mixture('Al', 2., mode='atom')
aluminum3_oxide.add_mixture('O' , 3., mode='atom')
aluminum3_oxide.mass_density=3.95
aluminum3_oxide.finalize()
#
# titanium dioxide
#
titanium_dioxide = calculate_materials.mixture('TiO2')
titanium_dioxide.add_mixture('Ti', 1., mode='atom')
titanium_dioxide.add_mixture('O' , 2., mode='atom')
titanium_dioxide.mass_density= 4.23
titanium_dioxide.finalize()
#
# iron (II) oxide
#
iron2_oxide = calculate_materials.mixture('FeO')
iron2_oxide.add_mixture('Fe', 1., mode='atom')
iron2_oxide.add_mixture('O' , 1., mode='atom')
iron2_oxide.mass_density=5.745
iron2_oxide.finalize()
#
# calcium oxide
#
calcium_oxide = calculate_materials.mixture('CaO')
calcium_oxide.add_mixture('Ca', 1., mode='atom')
calcium_oxide.add_mixture('O' , 1., mode='atom')
calcium_oxide.mass_density=3.34
calcium_oxide.finalize()
#
# magnesium oxide
#
magnesium_oxide = calculate_materials.mixture('MgO')
magnesium_oxide.add_mixture('Mg', 1., mode='atom')
magnesium_oxide.add_mixture('O' , 1., mode='atom')
magnesium_oxide.mass_density=3.6
magnesium_oxide.finalize()
#
# sodium oxide
#
sodium_oxide = calculate_materials.mixture('Na2O')
sodium_oxide.add_mixture('Na', 2., mode='atom')
sodium_oxide.add_mixture('O' , 1., mode='atom')
sodium_oxide.mass_density=2.27
sodium_oxide.finalize()
#
# potassium oxide
#
potassium_oxide = calculate_materials.mixture('K2O')
potassium_oxide.add_mixture('K' , 2., mode='atom')
potassium_oxide.add_mixture('O' , 1., mode='atom')
potassium_oxide.mass_density=2.32
potassium_oxide.finalize()
#
# boron trioxide
#
boron_trioxide = calculate_materials.mixture('B2O3')
boron_trioxide.add_mixture('B' , 2., mode='atom')
boron_trioxide.add_mixture('O' , 3., mode='atom')
boron_trioxide.mass_density=2.55
boron_trioxide.finalize()
#
# phosphorus pentoxide
#
phosphorus_pentoxide = calculate_materials.mixture('P2O5')
phosphorus_pentoxide.add_mixture('P' , 2., mode='atom')
phosphorus_pentoxide.add_mixture('O' , 5., mode='atom')
phosphorus_pentoxide.mass_density=2.39
phosphorus_pentoxide.finalize()
#
# stone wool, from https://doi.org/10.1006/rtph.2000.1418
#
stone_wool = calculate_materials.mixture('stone wool')
stone_wool.add_mixture('SiO2' , 46.50, mode='mass')
stone_wool.add_mixture('Al2O3', 10.50, mode='mass')
stone_wool.add_mixture('TiO2' , 2.00, mode='mass')
stone_wool.add_mixture('FeO' , 5.50, mode='mass')
stone_wool.add_mixture('CaO' , 17.50, mode='mass')
stone_wool.add_mixture('MgO' , 11.00, mode='mass')
stone_wool.add_mixture('Na2O' , 2.78, mode='mass')
stone_wool.add_mixture('K2O' , 1.25, mode='mass')
stone_wool.add_mixture('B2O3' , 1.00, mode='mass')
stone_wool.add_mixture('P2O5' , 1.00, mode='mass')
stone_wool.mass_density=0.1
stone_wool.finalize()
#
# inconel alloy 600 resistance wire
#
inconel600 = calculate_materials.mixture('inconel600')
inconel600.add_mixture('Ni',72.00, mode='mass')
inconel600.add_mixture('Cr',15.50, mode='mass')
inconel600.add_mixture('Fe', 8.00, mode='mass')
inconel600.add_mixture('Mn', 1.00, mode='mass')
inconel600.add_mixture('Cu', 0.50, mode='mass')
inconel600.add_mixture('Si', 0.50, mode='mass')
inconel600.add_mixture('C', 0.150, mode='mass')
inconel600.add_mixture('S', 0.015, mode='mass')
inconel600.mass_density=8.47005084
inconel600.finalize()
#
# inconel alloy 600 resistance wire
#
yag = calculate_materials.mixture('YAG')
yag.add_mixture('Y' , 3.00, mode='atom')
yag.add_mixture('Al', 5.00, mode='atom')
yag.add_mixture('O' ,12.00, mode='atom')
yag.mass_density=4.56
yag.finalize()
| [
6738,
13122,
22182,
31391,
1330,
15284,
62,
33665,
82,
198,
198,
2,
198,
2,
46540,
7851,
34131,
198,
2,
198,
824,
33400,
28,
9948,
3129,
378,
62,
33665,
82,
13,
76,
9602,
10786,
5432,
33400,
11537,
198,
824,
33400,
13,
22208,
62,
43... | 2.118438 | 23,050 |
from django.apps import AppConfig
from django.utils.translation import gettext_lazy as _
| [
6738,
42625,
14208,
13,
18211,
1330,
2034,
16934,
198,
6738,
42625,
14208,
13,
26791,
13,
41519,
1330,
651,
5239,
62,
75,
12582,
355,
4808,
628
] | 3.6 | 25 |
#
# Copyright 2018 Picovoice Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import platform
from enum import Enum
import numpy as np
from pocketsphinx import get_model_path
from pocketsphinx.pocketsphinx import Decoder
from engines import Porcupine
from engines import snowboydetect
class Engines(Enum):
"""Different wake-word engines."""
POCKET_SPHINX = 'Pocketsphinx'
PORCUPINE = 'Porcupine'
PORCUPINE_TINY = "PorcupineTiny"
SNOWBOY = 'Snowboy'
class Engine(object):
"""Base class for wake-word engine."""
def process(self, pcm):
"""
Processes a frame of audio looking for a specific wake-word.
:param pcm: frame of audio.
:return: result of detection.
"""
raise NotImplementedError()
def release(self):
"""Releases the resources acquired by the engine."""
raise NotImplementedError()
@property
def frame_length(self):
"""Number of audio samples per frame expected by the engine."""
return 512
@staticmethod
def sensitivity_range(engine_type):
"""Getter for sensitivity range of different engines to use in the benchmark."""
if engine_type is Engines.PORCUPINE:
return np.linspace(0.0, 1.0, 10)
if engine_type is Engines.PORCUPINE_TINY:
return np.linspace(0.0, 1.0, 10)
if engine_type is Engines.POCKET_SPHINX:
return np.logspace(-10, 20, 10)
if engine_type is Engines.SNOWBOY:
return np.linspace(0.4, 0.6, 10)
raise ValueError('No sensitivity range for %s', engine_type.value)
@staticmethod
def create(engine_type, keyword, sensitivity):
"""
Factory method.
:param engine_type: type of engine.
:param keyword: keyword to be detected.
:param sensitivity: detection sensitivity.
:return: engine instance.
"""
if engine_type is Engines.POCKET_SPHINX:
return PocketSphinxEngine(keyword, sensitivity)
if engine_type is Engines.PORCUPINE:
return PorcupineEngine(keyword, sensitivity)
if engine_type is Engines.PORCUPINE_TINY:
return PorcupineTinyEngine(keyword, sensitivity)
if engine_type is Engines.SNOWBOY:
return SnowboyEngine(keyword, sensitivity)
return ValueError('Cannot create engine of type %s', engine_type.value)
class PocketSphinxEngine(Engine):
"""Pocketsphinx engine."""
def __init__(self, keyword, sensitivity):
"""
Constructor.
:param keyword: keyword to be detected.
:param sensitivity: detection sensitivity.
"""
# Set the configuration.
config = Decoder.default_config()
config.set_string('-logfn', '/dev/null')
# Set recognition model to US
config.set_string('-hmm', os.path.join(get_model_path(), 'en-us'))
config.set_string('-dict', os.path.join(get_model_path(), 'cmudict-en-us.dict'))
config.set_string('-keyphrase', keyword)
config.set_float('-kws_threshold', sensitivity)
self._decoder = Decoder(config)
self._decoder.start_utt()
class PorcupineEngineBase(Engine):
"""Base class for different variants of Porcupine engine."""
def __init__(self, sensitivity, model_file_path, keyword_file_path):
"""
Constructor.
:param sensitivity: detection sensitivity.
:param model_file_path: path to model file.
:param keyword_file_path: path to keyword file.
"""
library_path = os.path.join(
os.path.dirname(__file__),
'engines/porcupine/lib/linux/%s/libpv_porcupine.so' % platform.machine())
self._porcupine = Porcupine(
library_path=library_path,
model_file_path=model_file_path,
keyword_file_path=keyword_file_path,
sensitivity=sensitivity)
class PorcupineEngine(PorcupineEngineBase):
"""Original variant of Porcupine."""
def __init__(self, keyword, sensitivity):
"""
Constructor.
:param keyword: keyword to be detected.
:param sensitivity: detection sensitivity.
"""
model_file_path = os.path.join(
os.path.dirname(__file__),
'engines/porcupine/lib/common/porcupine_params.pv')
keyword_file_path = os.path.join(
os.path.dirname(__file__),
'engines/porcupine/resources/keyword_files/%s_linux.ppn' % keyword.lower())
super().__init__(sensitivity, model_file_path, keyword_file_path)
class PorcupineTinyEngine(PorcupineEngineBase):
"""Tiny variant of Porcupine engine."""
def __init__(self, keyword, sensitivity):
"""
Constructor.
:param keyword: keyword to be detected.
:param sensitivity: detection sensitivity.
"""
model_file_path = os.path.join(
os.path.dirname(__file__),
'engines/porcupine/lib/common/porcupine_tiny_params.pv')
keyword_file_path = os.path.join(
os.path.dirname(__file__),
'engines/porcupine/resources/keyword_files/%s_linux_tiny.ppn' % keyword.lower())
super().__init__(sensitivity, model_file_path, keyword_file_path)
class SnowboyEngine(Engine):
"""Snowboy engine."""
def __init__(self, keyword, sensitivity):
"""
Constructor.
:param keyword: keyword to be detected.
:param sensitivity: detection sensitivity.
"""
keyword = keyword.lower()
if keyword == 'alexa':
model_relative_path = 'engines/snowboy/resources/alexa/alexa-avs-sample-app/alexa.umdl'
else:
model_relative_path = 'engines/snowboy/resources/models/%s.umdl' % keyword
model_str = os.path.join(os.path.dirname(__file__), model_relative_path).encode()
resource_filename = os.path.join(os.path.dirname(__file__), 'engines/snowboy/resources/common.res').encode()
self._snowboy = snowboydetect.SnowboyDetect(resource_filename=resource_filename, model_str=model_str)
self._snowboy.SetSensitivity(str(sensitivity).encode())
| [
2,
198,
2,
15069,
2864,
15085,
709,
2942,
3457,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,... | 2.466519 | 2,718 |
from os import environ
from pymysql import connect, Error
# Test if the connection is successful; print errors if it was not
| [
6738,
28686,
1330,
551,
2268,
198,
198,
6738,
279,
4948,
893,
13976,
1330,
2018,
11,
13047,
628,
198,
220,
220,
220,
1303,
6208,
611,
262,
4637,
318,
4388,
26,
3601,
8563,
611,
340,
373,
407,
198
] | 3.666667 | 36 |
'''Functional test for deferred.py'''
import pytest
import tempfile
import uuid
from hil import config, deferred, model, api
from hil.model import db, Switch
from hil.errors import SwitchError
from hil.test_common import config_testsuite, config_merge, \
fresh_database
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
fresh_database = pytest.fixture(fresh_database)
DeferredTestSwitch = None
class RevertPortError(SwitchError):
"""An exception thrown by the switch implementation's revert_port.
This is used as part of the error handling tests.
"""
def new_db():
""" returns a new database connection"""
local_app = Flask(__name__.split('.')[0])
uri = config.cfg.get('database', 'uri')
local_app.config.update(SQLALCHEMY_TRACK_MODIFICATIONS=False)
local_app.config.update(SQLALCHEMY_DATABASE_URI=uri)
local_db = SQLAlchemy(local_app)
return local_db
@pytest.fixture
def configure():
"""Configure HIL.
The tests in this module require two separate sessions, so if the
configuration specifies an in-memory sqlite database, we use a
temporary file instead.
"""
config_testsuite()
# if we are using sqlite's in memory db, then change uri to a db on disk
uri = config.cfg.get('database', 'uri')
if uri == 'sqlite:///:memory:':
with tempfile.NamedTemporaryFile() as temp_db:
uri = 'sqlite:///' + temp_db.name
config_merge({
'database': {
'uri': uri
},
})
config.load_extensions()
yield
else:
config.load_extensions()
yield
@pytest.fixture()
@pytest.fixture()
def switch(_deferred_test_switch_class):
"""Get an instance of DeferredTestSwitch."""
return DeferredTestSwitch(
label='switch',
hostname='http://example.com',
username='admin',
password='admin',
)
def new_nic(name):
"""Create a new nic named ``name``, and an associated Node + Obm.
The new nic is attached to a new node each time, and the node is added to
the project named 'anvil-nextgen-####' """
unique_id = str(uuid.uuid4())
project = model.Project('anvil-nextgen-' + unique_id)
label = str(uuid.uuid4())
node = model.Node(
label=label,
obmd_uri='http://obmd.example.com/nodes/' + label,
obmd_admin_token='secret',
)
if node.project is None:
project.nodes.append(node)
return model.Nic(node, name, '00:11:22:33:44:55')
@pytest.fixture()
def network():
"""Create a test network (and associated project) to work with."""
project = model.Project('anvil-nextgen')
return model.Network(project, [], True, '102', 'hammernet')
pytestmark = pytest.mark.usefixtures('configure')
def test_apply_networking(switch, network, fresh_database):
'''Test to validate apply_networking commits actions incrementally
This test verifies that the apply_networking() function in hil/deferred.py
incrementally commits actions, which ensures that any error on an action
will not require a complete rerun of the prior actions (e.g. if an error
is thrown on the 3rd action, the 1st and 2nd action will have already been
committed)
The test also verifies that if a new networking action fails, then the
old networking actions in the queue were commited.
'''
nic = []
actions = []
# initialize 3 nics and networking actions
for i in range(0, 2):
interface = 'gi1/0/%d' % (i)
nic.append(new_nic(str(i)))
nic[i].port = model.Port(label=interface, switch=switch)
unique_id = str(uuid.uuid4())
actions.append(model.NetworkingAction(nic=nic[i],
new_network=network,
channel='vlan/native',
type='modify_port',
uuid=unique_id,
status='PENDING'))
# Create another aciton of type revert_port. This action is invalid for the
# test switch because the switch raises an error when the networking action
# is of type revert port.
unique_id = str(uuid.uuid4())
nic.append(new_nic('2'))
nic[2].port = model.Port(label=interface, switch=switch)
actions.append(model.NetworkingAction(nic=nic[2],
new_network=None,
uuid=unique_id,
channel='',
status='PENDING',
type='revert_port'))
# get some nic attributes before we close this db session.
nic2_label = nic[2].label
nic2_node = nic[2].owner.label
for action in actions:
db.session.add(action)
db.session.commit()
# simple check to ensure that right number of actions are added.
total_count = db.session.query(model.NetworkingAction).count()
assert total_count == 3
deferred.apply_networking()
# close the session opened by `apply_networking` when `handle_actions`
# fails; without this the tests would just stall (when using postgres)
db.session.close()
local_db = new_db()
errored_action = local_db.session \
.query(model.NetworkingAction) \
.order_by(model.NetworkingAction.id).filter_by(status='ERROR') \
.one_or_none()
# Count the number of actions with different statuses
error_count = local_db.session \
.query(model.NetworkingAction).filter_by(status='ERROR').count()
pending_count = local_db.session \
.query(model.NetworkingAction).filter_by(status='PENDING').count()
done_count = local_db.session \
.query(model.NetworkingAction).filter_by(status='DONE').count()
# test that there's only 1 action that errored out in the queue and that it
# is of type revert_port
assert error_count == 1
assert errored_action.type == 'revert_port'
assert pending_count == 0
assert done_count == 2
local_db.session.commit()
local_db.session.close()
# add another action on a nic with a previously failed action.
api.network_create('corsair', 'admin', '', '105')
api.node_connect_network(nic2_node, nic2_label, 'corsair')
# the api call should delete the errored action on that nic, and a new
# pending action should appear.
local_db = new_db()
errored_action = local_db.session \
.query(model.NetworkingAction) \
.order_by(model.NetworkingAction.id).filter_by(status='ERROR') \
.one_or_none()
pending_action = local_db.session \
.query(model.NetworkingAction) \
.order_by(model.NetworkingAction.id).filter_by(status='PENDING') \
.one_or_none()
assert errored_action is None
assert pending_action is not None
local_db.session.commit()
local_db.session.close()
| [
7061,
6,
22203,
282,
1332,
329,
28651,
13,
9078,
7061,
6,
198,
198,
11748,
12972,
9288,
198,
11748,
20218,
7753,
198,
11748,
334,
27112,
198,
198,
6738,
289,
346,
1330,
4566,
11,
28651,
11,
2746,
11,
40391,
198,
6738,
289,
346,
13,
... | 2.424502 | 2,914 |
import os
import pytest
from datadog_checks.dev.docker import docker_run
from .common import DOCKER_DIR, ENDPOINT, URL, registry_file_path
INSTANCE = {
"stats_endpoint": ENDPOINT,
"registry_file_path": registry_file_path("empty"),
}
@pytest.fixture(scope="session")
@pytest.fixture
| [
11748,
28686,
198,
198,
11748,
12972,
9288,
198,
198,
6738,
4818,
324,
519,
62,
42116,
13,
7959,
13,
45986,
1330,
36253,
62,
5143,
198,
198,
6738,
764,
11321,
1330,
360,
11290,
1137,
62,
34720,
11,
12964,
6322,
46,
12394,
11,
10289,
1... | 2.660714 | 112 |
#!/usr/bin/env python3
import sys
import os
import math
import copy
MCELL_PATH = os.environ.get('MCELL_PATH', '')
if MCELL_PATH:
sys.path.append(os.path.join(MCELL_PATH, 'lib'))
else:
print("Error: variable MCELL_PATH that is used to find the mcell library was not set.")
sys.exit(1)
import mcell as m
from parameters import *
if len(sys.argv) == 3 and sys.argv[1] == '-seed':
# overwrite value SEED defined in module parameters
SEED = int(sys.argv[2])
model = m.Model()
# ---- configuration ----
model.config.time_step = TIME_STEP
model.config.seed = 1
model.config.total_iterations = ITERATIONS
model.config.partition_dimension = 10
model.config.subpartition_dimension = 2.5
# ---- default configuration overrides ----
# ---- add components ----
model.load_bngl('model.bngl')
model2 = copy.deepcopy(model)
model2.config.seed = 4
# ---- initialization and execution ----
model.initialize()
model2.initialize()
model.run_iterations(ITERATIONS)
model2.run_iterations(ITERATIONS)
print(model.find_count('c').get_current_value())
assert model.find_count('c').get_current_value() == 34
print(model2.find_count('c').get_current_value())
assert model2.find_count('c').get_current_value() == 25
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
25064,
198,
11748,
28686,
198,
11748,
10688,
198,
11748,
4866,
198,
198,
44,
5222,
3069,
62,
34219,
796,
28686,
13,
268,
2268,
13,
1136,
10786,
44,
5222,
3069,
62,
3421... | 2.816514 | 436 |
import matplotlib
matplotlib.use('MACOSX')
import matplotlib.pyplot as plt
import numpy as np
from metrics.full_face_vertex_deviation import compute_vertex_deviations_over_sequence, compute_vertex_deviations_for_actor, \
compute_vertex_deviations_for_all
# d3dfacs_path = "/Users/Akash_Sengupta/Documents/Datasets/d3dfacs_alignments"
# compute_vertex_deviations_for_all(d3dfacs_path)
actor_path = "/Users/Akash_Sengupta/Documents/Datasets/d3dfacs_alignments/Joe"
deviations_array = compute_vertex_deviations_for_actor(actor_path, save_deviation_image=True, reduce='mean',
normalise_render=False, rigid_transform=True)
# mean_deviations = np.mean(deviations_array, axis=0)
# print(mean_deviations.shape)
# plt.figure()
# plt.hist(mean_deviations)
# plt.show()
| [
11748,
2603,
29487,
8019,
198,
6759,
29487,
8019,
13,
1904,
10786,
44721,
2640,
55,
11537,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
20731,
13,
12853,
62,
2550... | 2.446429 | 336 |
#
# Copyright (c) 2019-2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import glob
import logging
import os
import pprint
import HPODatasets
hpo_log = logging.getLogger("hpo_log")
class HPOConfig(object):
"""Cloud integrated RAPIDS HPO functionality with AWS SageMaker focus"""
sagemaker_directory_structure = {
"train_data": "/opt/ml/input/data/training",
"model_store": "/opt/ml/model",
"output_artifacts": "/opt/ml/output",
}
def parse_configuration(self):
"""Parse the ENV variables [ set in the dockerfile ]
to determine configuration settings"""
hpo_log.info("\nparsing configuration from environment settings...")
dataset_type = "Airline"
model_type = "RandomForest"
compute_type = "single-GPU"
cv_folds = 3
try:
# parse dataset choice
dataset_selection = os.environ["DATASET_DIRECTORY"].lower()
if dataset_selection in ["1_year", "3_year", "10_year"]:
dataset_type = "Airline"
elif dataset_selection in ["nyc_taxi"]:
dataset_type = "NYCTaxi"
else:
dataset_type = "BYOData"
# parse model type
model_selection = os.environ["ALGORITHM_CHOICE"].lower()
if model_selection in ["randomforest"]:
model_type = "RandomForest"
elif model_selection in ["xgboost"]:
model_type = "XGBoost"
# parse compute choice
compute_selection = os.environ["ML_WORKFLOW_CHOICE"].lower()
if "multigpu" in compute_selection:
compute_type = "multi-GPU"
elif "multicpu" in compute_selection:
compute_type = "multi-CPU"
elif "singlecpu" in compute_selection:
compute_type = "single-CPU"
elif "singlegpu" in compute_selection:
compute_type = "single-GPU"
# parse CV folds
cv_folds = int(os.environ["CV_FOLDS"])
except KeyError as error:
hpo_log.info(f"Configuration parser failed : {error}")
assert dataset_type in ["Airline", "NYCTaxi", "BYOData"]
assert model_type in ["RandomForest", "XGBoost"]
assert compute_type in ["single-GPU", "multi-GPU", "single-CPU", "multi-CPU"]
assert cv_folds >= 1
hpo_log.info(
f" Dataset: {dataset_type}\n"
f" Compute: {compute_type}\n"
f" Algorithm: {model_type}\n"
f" CV_folds: {cv_folds}\n"
)
return dataset_type, model_type, compute_type, cv_folds
def parse_hyper_parameter_inputs(self, input_args):
"""Parse hyperparmeters provided by the HPO orchestrator"""
hpo_log.info("parsing model hyperparameters from command line arguments...log") # noqa
parser = argparse.ArgumentParser()
if "XGBoost" in self.model_type:
# intentionally breaking PEP8 below for argument alignment
parser.add_argument("--max_depth", type=int, default=5) # noqa
parser.add_argument("--num_boost_round", type=int, default=10) # noqa
parser.add_argument("--subsample", type=float, default=0.9) # noqa
parser.add_argument("--learning_rate", type=float, default=0.3) # noqa
parser.add_argument("--reg_lambda", type=float, default=1) # noqa
parser.add_argument("--gamma", type=float, default=0.0) # noqa
parser.add_argument("--alpha", type=float, default=0.0) # noqa
parser.add_argument("--seed", type=int, default=0) # noqa
args, unknown_args = parser.parse_known_args(input_args)
model_params = {
"max_depth": args.max_depth,
"num_boost_round": args.num_boost_round,
"learning_rate": args.learning_rate,
"gamma": args.gamma,
"lambda": args.reg_lambda,
"random_state": args.seed,
"verbosity": 0,
"seed": args.seed,
"objective": "binary:logistic",
}
if "single-CPU" in self.compute_type:
model_params.update({"nthreads": os.cpu_count()})
if "GPU" in self.compute_type:
model_params.update({"tree_method": "gpu_hist"})
else:
model_params.update({"tree_method": "hist"})
elif "RandomForest" in self.model_type:
# intentionally breaking PEP8 below for argument alignment
parser.add_argument("--max_depth", type=int, default=5) # noqa
parser.add_argument("--n_estimators", type=int, default=10) # noqa
parser.add_argument("--max_features", type=float, default=1.0) # noqa
parser.add_argument("--n_bins", type=float, default=64) # noqa
parser.add_argument("--bootstrap", type=bool, default=True) # noqa
parser.add_argument("--random_state", type=int, default=0) # noqa
args, unknown_args = parser.parse_known_args(input_args)
model_params = {
"max_depth": args.max_depth,
"n_estimators": args.n_estimators,
"max_features": args.max_features,
"n_bins": args.n_bins,
"bootstrap": args.bootstrap,
"random_state": args.random_state,
}
else:
raise Exception(f"!error: unknown model type {self.model_type}")
hpo_log.info(pprint.pformat(model_params, indent=5))
return model_params
def detect_data_inputs(self, directory_structure):
"""
Scan mounted data directory to determine files to ingest.
Notes: single-CPU pandas read_parquet needs a directory input
single-GPU cudf read_parquet needs a list of files
multi-CPU/GPU can accept either a list or a directory
"""
parquet_files = glob.glob(os.path.join(directory_structure["train_data"], "*.parquet"))
csv_files = glob.glob(os.path.join(directory_structure["train_data"], "*.csv"))
if len(csv_files):
hpo_log.info("CSV input files detected")
target_files = csv_files
input_file_type = "CSV"
elif len(parquet_files):
hpo_log.info("Parquet input files detected")
if "single-CPU" in self.compute_type:
# pandas read_parquet needs a directory input
target_files = directory_structure["train_data"] + "/"
else:
target_files = parquet_files
input_file_type = "Parquet"
else:
raise Exception("! No [CSV or Parquet] input files detected")
n_datafiles = len(target_files)
assert n_datafiles > 0
pprint.pprint(target_files)
hpo_log.info(f"detected {n_datafiles} files as input")
if "Airline" in self.dataset_type:
dataset_columns = HPODatasets.airline_feature_columns
dataset_label_column = HPODatasets.airline_label_column
dataset_dtype = HPODatasets.airline_dtype
elif "NYCTaxi" in self.dataset_type:
dataset_columns = HPODatasets.nyctaxi_feature_columns
dataset_label_column = HPODatasets.nyctaxi_label_column
dataset_dtype = HPODatasets.nyctaxi_dtype
elif "BYOData" in self.dataset_type:
dataset_columns = HPODatasets.BYOD_feature_columns
dataset_label_column = HPODatasets.BYOD_label_column
dataset_dtype = HPODatasets.BYOD_dtype
return (target_files, input_file_type, dataset_columns, dataset_label_column, dataset_dtype)
| [
2,
198,
2,
15069,
357,
66,
8,
13130,
12,
42334,
11,
15127,
23929,
44680,
6234,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
... | 2.180625 | 3,809 |
import unittest
import random
import numpy as np
from pyreinforce.memory import Memory, EpisodicMemory
_seed = 123
_random = random.Random()
_random.seed(_seed)
if __name__ == '__main__':
unittest.main()
| [
11748,
555,
715,
395,
198,
11748,
4738,
198,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
12972,
260,
259,
3174,
13,
31673,
1330,
14059,
11,
4551,
271,
29512,
30871,
628,
198,
62,
28826,
796,
17031,
198,
198,
62,
25120,
796,
47... | 2.818182 | 77 |
import logging
from dateutil.relativedelta import relativedelta
from hdx.scraper.base_scraper import BaseScraper
from hdx.utilities.dateparse import default_date, parse_date
from hdx.utilities.dictandlist import dict_of_lists_add
logger = logging.getLogger(__name__)
| [
11748,
18931,
198,
198,
6738,
3128,
22602,
13,
2411,
265,
1572,
12514,
1330,
48993,
1572,
12514,
198,
6738,
289,
34350,
13,
1416,
38545,
13,
8692,
62,
1416,
38545,
1330,
7308,
3351,
38545,
198,
6738,
289,
34350,
13,
315,
2410,
13,
4475,... | 3.176471 | 85 |
from .discover_movies import *
from .entry_points import *
from .list_movies import *
from .search_movies import *
| [
6738,
764,
67,
29392,
62,
76,
20526,
1330,
1635,
198,
6738,
764,
13000,
62,
13033,
1330,
1635,
198,
6738,
764,
4868,
62,
76,
20526,
1330,
1635,
198,
6738,
764,
12947,
62,
76,
20526,
1330,
1635,
198
] | 3.194444 | 36 |
'''
Generates and samples the PDF from the Imagenet dataset. Data is expected to be imagefiles in 8bit png format.
The probability values at arbitrary points are interpolated from the surrounding values by linear interpolation.
Dumps a list per sample in a directory, containing [samples] or [samples, grid_samples], where each is of size [n_dim, n_samples]
Args:
data_dir: STRING, Directory with the images. data_dir is directly searched with glob.glob, thus '/*' is appended to the directory, if not already there. defaults to data/Imagenet/*
size: INT, size of the drawn sample distribution
with_grid: BOOL, if True a grid with the corresponding true probability values is also generated (for nicer plots)
grid_number: INT, number of samples in the grid
'''
import argparse
from deep_density_estimation.PDF_Generation import Prob_dist_from_2D as pdf2
parser = argparse.ArgumentParser()
parser.add_argument('--size', type=int, default=1000, help='INT, size of the drawn sample distribution')
parser.add_argument('--data_dir', default='data/Imagenet/*', help='STRING, Directory with the images. data_dir is directly searched with glob.glob, thus /* is appended to the directory, if not already there. defaults to data/Stock_Data/*/*')
parser.add_argument('--grid_number', type=int, default=200, help='INT, number of samples in the grid')
parser.add_argument('--verbose', action='store_true', help='adds some verbose information')
parser.add_argument('--with_grid', action='store_true', help='BOOL, if True a grid with the corresponding true probability values is also generated (for nicer plots)')
args = parser.parse_args()
size = args.size
data_dir = args.data_dir
grid_number = args.grid_number
verbose = args.verbose
with_grid = args.with_grid
if __name__ == "__main__":
main() | [
7061,
6,
198,
8645,
689,
290,
8405,
262,
12960,
422,
262,
1846,
11286,
316,
27039,
13,
6060,
318,
2938,
284,
307,
2939,
16624,
287,
807,
2545,
279,
782,
5794,
13,
220,
220,
198,
464,
12867,
3815,
379,
14977,
2173,
389,
39555,
515,
4... | 3.475096 | 522 |
import re
import requests
import json
if __name__ == '__main__':
trainDate = "2018-05-21"
fromStation = "西安"
toStation = "成都"
ltq = LeftTicketQuery()
staticonNameCode = ltq.queryStation()
stationCodeName = dict((v, k) for k, v in staticonNameCode.items())
leftTicketUrl = ltq.makeLeftTicketUrl(trainDate, fromStation, toStation, staticonNameCode)
if leftTicketUrl != None:
tickets = ltq.queryLeftTicket(leftTicketUrl, stationCodeName)
for ticket in tickets:
ticketLine = "车次:" + ticket["station_train_code"] + " 出发日期:" + ticket["start_train_date"] + " 出发站:" + \
ticket["from_station_name"] + " 到达站:" + ticket["to_station_name"] + " 出发时间:" + ticket["start_time"] + " 到达时间:" + ticket[
"arrive_time"] + " 历时:" + ticket["lishi"] + " 商务座特等座:" + ticket["swz_num"] + " 一等座:" + ticket["zy_num"] + " 二等座:" + \
ticket["ze_num"] + " 高级软卧:" + ticket.get("yyrw_num", "") + " 软卧:" + ticket["rw_num"] + " 动卧:" + \
ticket["srrb_num"] + " 硬卧:" + ticket["yw_num"] + " 软座:" + ticket["rz_num"] + " 硬座:" + ticket["yz_num"] + " 无座:" + ticket["wz_num"]
print(ticketLine)
else:
print("Query left ticket wrong!")
| [
11748,
302,
198,
11748,
7007,
198,
11748,
33918,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
4512,
10430,
796,
366,
7908,
12,
2713,
12,
2481,
1,
198,
220,
220,
220,
422,
12367,
796,
3... | 1.754458 | 729 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2015 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import heapq
import os
import sys
import time
try:
from time import monotonic as now
except ImportError:
from time import time as now
import argparse
import psutil
import six
from quietus import util
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
220,
220,
220,
15069,
357,
34,
8,
1853,
16551,
0,
3457,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
2... | 3.160535 | 299 |
from django.shortcuts import render
| [
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
628,
198
] | 3.8 | 10 |
import youtube_dl
import os
import sys
directory = (os.getcwd() + '/Downloads')
ydl_opts = {}
try:
main()
except IndexError:
print('Não foi detectado nenhum URL!')
| [
11748,
35116,
62,
25404,
201,
198,
11748,
28686,
201,
198,
11748,
25064,
201,
198,
201,
198,
34945,
796,
357,
418,
13,
1136,
66,
16993,
3419,
1343,
31051,
10002,
82,
11537,
201,
198,
5173,
75,
62,
404,
912,
796,
23884,
201,
198,
28311... | 2.277108 | 83 |
#!/usr/bin/env python
# Copyright 2017 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DialogFlow API Context Python sample showing how to manage session
contexts.
Examples:
python context_management.py -h
python context_management.py --project-id PROJECT_ID \
list --session-id SESSION_ID
python context_management.py --project-id PROJECT_ID \
create --session-id SESSION_ID --context-id CONTEXT_ID
python context_management.py --project-id PROJECT_ID \
delete --session-id SESSION_ID --context-id CONTEXT_ID
"""
import argparse
# [START dialogflow_list_contexts]
# [END dialogflow_list_contexts]
# [START dialogflow_create_context]
# [END dialogflow_create_context]
# [START dialogflow_delete_context]
# [END dialogflow_delete_context]
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
'--project-id',
help='Project/agent id. Required.',
required=True)
subparsers = parser.add_subparsers(dest='command')
list_parser = subparsers.add_parser(
'list', help=list_contexts.__doc__)
list_parser.add_argument(
'--session-id',
required=True)
create_parser = subparsers.add_parser(
'create', help=create_context.__doc__)
create_parser.add_argument(
'--session-id',
required=True)
create_parser.add_argument(
'--context-id',
help='The id of the context.',
required=True)
create_parser.add_argument(
'--lifespan-count',
help='The lifespan_count of the context. Defaults to 1.',
default=1)
delete_parser = subparsers.add_parser(
'delete', help=delete_context.__doc__)
delete_parser.add_argument(
'--session-id',
required=True)
delete_parser.add_argument(
'--context-id',
help='The id of the context.',
required=True)
args = parser.parse_args()
if args.command == 'list':
list_contexts(args.project_id, args.session_id, )
elif args.command == 'create':
create_context(
args.project_id, args.session_id, args.context_id,
args.lifespan_count)
elif args.command == 'delete':
delete_context(args.project_id, args.session_id, args.context_id)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
2,
15069,
2177,
3012,
11419,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
... | 2.668831 | 1,078 |
from math import log
| [
6738,
10688,
1330,
2604,
628,
198
] | 3.833333 | 6 |
import apps.common.func.InitDjango
from all_models.models import TbUser
if __name__ == "__main__":
print(UserService.getUsers()[0].loginname)
print(UserService.getUserByLoginname(UserService.getUsers()[0].loginname)) | [
11748,
6725,
13,
11321,
13,
20786,
13,
31768,
35,
73,
14208,
198,
6738,
477,
62,
27530,
13,
27530,
1330,
309,
65,
12982,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
3601,
7,
12982,
16177,
1... | 2.848101 | 79 |
import numpy as np
from ms2deepscore.plotting import create_confusion_matrix_plot
| [
11748,
299,
32152,
355,
45941,
198,
6738,
13845,
17,
22089,
26675,
13,
29487,
889,
1330,
2251,
62,
10414,
4241,
62,
6759,
8609,
62,
29487,
628
] | 3.32 | 25 |
from django.db import models
from django.conf import settings
from django.core.validators import RegexValidator
from model_utils import Choices
from helpers.models import TimestampedModel
class ServiceKeyword(TimestampedModel):
"""
Model to keep track of service keywords.
"""
name = models.CharField(max_length=300)
def __unicode__(self):
"""
Show the object in a readable way.
"""
return self.name
class Service(TimestampedModel):
"""
Model to keep track of service.
"""
name = models.CharField(max_length=300)
keywords = models.ManyToManyField(ServiceKeyword,
related_name="services")
def __unicode__(self):
"""
Show the object in a readable way.
"""
return self.name
class Provider(TimestampedModel):
"""
Model to keep track of solution provider details.
"""
TYPE_CHOICES = Choices(
(1, 'individual', 'Individual'),
(2, 'company', 'Company'),
)
name = models.CharField(max_length=300)
user = models.OneToOneField(settings.AUTH_USER_MODEL,
related_name='provider',
blank=False)
type = models.PositiveIntegerField(choices=TYPE_CHOICES)
services = models.ManyToManyField(Service,
related_name="providers")
def __unicode__(self):
"""
Show the object in a readable way.
"""
return self.name
class Address(TimestampedModel):
"""
Model to keep track of provider contacts.
"""
STATE_CHOICES = Choices(
(1, 'dubai', 'Dubai'),
(2, 'abu_dhabi', 'AbuDhabi'),
)
provider = models.ForeignKey(Provider,
related_name="addresses")
address_label = models.CharField(max_length=200)
building_name = models.CharField(max_length=200)
street = models.CharField(max_length=200)
area = models.CharField(max_length=200)
state = models.PositiveIntegerField(choices=STATE_CHOICES)
latitude = models.FloatField(null=True, blank=True)
longitude = models.FloatField(null=True, blank=True)
makani = models.IntegerField(null=True, blank=True)
contact_name = models.CharField(max_length=200)
email = models.EmailField(blank=False)
telephone = models.CharField(max_length=10,
blank=True,
validators=[
RegexValidator(
regex=r'^\d+$',
message='Only digits are allowed'
)])
mobile_number = models.CharField(max_length=12,
blank=True,
validators=[
RegexValidator(
regex=r'^\d+$',
message='Only digits are allowed'
)])
def __unicode__(self):
"""
Show the object in a readable way.
"""
return self.address_label | [
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
7295,
13,
12102,
2024,
1330,
797,
25636,
47139,
1352,
198,
198,
6738,
2746,
62,
26791,
1330,
10031,
1063,
198,
198,
6... | 1.963863 | 1,688 |
from __future__ import absolute_import
from celery import shared_task
@shared_task
@shared_task
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
6738,
18725,
1924,
1330,
4888,
62,
35943,
628,
198,
31,
28710,
62,
35943,
628,
198,
31,
28710,
62,
35943,
198
] | 3.448276 | 29 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from flask.ext.mail import Mail
mail = Mail()
from flask.ext.cors import CORS
cors = CORS()
from flask.ext.security import Security, SQLAlchemyUserDatastore
from .models import User, Role, db
datastore = SQLAlchemyUserDatastore(db, User, Role)
security = Security(datastore=datastore)
from flask_jwt import JWT
from flask import abort, jsonify, current_app
from datetime import datetime
jwt = JWT()
@jwt.authentication_handler
@jwt.identity_handler
@jwt.auth_response_handler
@jwt.jwt_payload_handler
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
628,
198,
198,
6738,
42903,
13,
2302,
13,
4529,
1330,
11099,
198,
4529,
796,
11099,
3419,
628,
198,
6738,
42903,
13,
2302... | 2.934343 | 198 |
from wikidata.client import Client
from urllib.parse import urlparse
import wikipedia
import requests
from requests.adapters import HTTPAdapter
from .ceimport import cache
s = requests.Session()
adapter = HTTPAdapter(max_retries=5, pool_connections=100, pool_maxsize=100)
s.mount("https://", adapter)
@cache.dict()
@cache.dict()
def load_person_from_wikipedia_wikidata(wikidata_url, language):
"""Given a wikidata url, get information from wikipedia
TODO: Check language against valid list
TODO: Image from wikipedia is different to that of wikidata"""
entity = get_entity_for_wikidata(wikidata_url)
wikipedia_url = get_url_for_wikipedia(entity, language)
description = get_description_for_wikipedia(entity, language)
label = entity.label.get(language)
# TODO: Remove html tags from the description
if label:
title = f"{label} - Wikipedia"
return {
"title": title,
"name": label,
"description": description,
"contributor": "https://wikipedia.org/",
"source": wikipedia_url,
"format_": "text/html",
"language": language
}
else:
return {}
@cache.dict()
def load_person_from_wikipedia_wikipedia(wikipedia_url, language):
"""Given a wikipedia url, get information from wikipedia
TODO: Check language against valid list
TODO: Image from wikipedia is different to that of wikidata"""
page = get_page_for_wikipedia(wikipedia_url)
label = page.title
description = page.summary
if label:
title = f"{label} - Wikipedia"
return {
"title": title,
"name": label,
"description": description,
"contributor": "https://wikipedia.org/",
"source": wikipedia_url,
"format_": "text/html",
"language": language
}
else:
return {}
def _get_normalized_query(data, query):
"""If you query with an _ in a query (e.g. from a wikipedia url, then wikipedia will "normalize" it,
so that it doesn't have the _ or other characters in it"""
normalized = data.get("query", {}).get("normalized")
if normalized:
for n in normalized:
if n["from"] == query:
return n["to"]
return query
@cache.dict()
def get_wikidata_id_from_wikipedia_url(wp_url):
"""Get the wikidata id for this URL if it exists
Returns None if the page has no wikidata id"""
if "en.wikipedia.org" not in wp_url:
raise WikipediaException("Can only use en.wikipedia.org urls")
parts = urlparse(wp_url)
# Remove /wiki/
# some titles may have / in them so we can't take the last part after splitting on /
wp_title = "/".join(parts[2:])
param_url = "https://en.wikipedia.org/w/api.php?action=query&prop=pageprops&titles={}&format=json"
full_url = param_url.format(wp_title)
r = s.get(full_url)
data = r.json()
title = _get_normalized_query(data, wp_title)
pages = data.get("query", {}).get("pages")
for pid, pdata in pages.items():
if pdata["title"] == title:
return pdata.get("pageprops", {}).get("wikibase_item")
return None
| [
6738,
47145,
312,
1045,
13,
16366,
1330,
20985,
198,
6738,
2956,
297,
571,
13,
29572,
1330,
19016,
29572,
198,
11748,
47145,
11151,
198,
198,
11748,
7007,
198,
6738,
7007,
13,
324,
12126,
1330,
14626,
47307,
198,
198,
6738,
764,
344,
11... | 2.531348 | 1,276 |
import json
import os.path
from datetime import date
from rich import box
from rich.console import Console
from rich.table import Table
from vika.config import NOTE_DEFAULT_PATH
| [
11748,
33918,
198,
11748,
28686,
13,
6978,
198,
6738,
4818,
8079,
1330,
3128,
198,
6738,
5527,
1330,
3091,
198,
6738,
5527,
13,
41947,
1330,
24371,
198,
6738,
5527,
13,
11487,
1330,
8655,
198,
198,
6738,
410,
9232,
13,
11250,
1330,
2455... | 3.829787 | 47 |
#!/usr/bin/env python
# -*- coding: utf8 -*-
# Created by Xavier Yin on 2015/2/28
from modules.conf import Config
from modules import run_as_post_receive
import os
import sys
CURRENT_FILE_PATH = os.path.abspath(__file__)
CURRENT_WORK_DIR = os.path.dirname(CURRENT_FILE_PATH)
GIT_DIR = os.path.dirname(CURRENT_WORK_DIR)
config_file = os.path.join(CURRENT_WORK_DIR, "ring.json")
config = Config(config_file)
if __name__ == "__main__":
old, new, ref = sys.stdin.read().split()
head, body, foot = ref.split("/", 2)
config.commit.new = new
config.commit.old = old
config.commit.ref = "branch" if body == "heads" else "tag"
config.commit.sha1 = foot
run_as_post_receive(config) | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
23,
532,
9,
12,
198,
2,
15622,
416,
30825,
37201,
319,
1853,
14,
17,
14,
2078,
198,
198,
6738,
13103,
13,
10414,
1330,
17056,
198,
6738,
13103,
... | 2.545126 | 277 |
import torch
import torch.nn as nn
import numpy as np
from coordinated import configs
| [
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
22080,
1330,
4566,
82,
198
] | 3.625 | 24 |
'''Autogenerated by get_gl_extensions script, do not edit!'''
from OpenGL import platform as _p, constants as _cs, arrays
from OpenGL.GL import glget
import ctypes
EXTENSION_NAME = 'GL_EXT_histogram'
_p.unpack_constants( """GL_HISTOGRAM_EXT 0x8024
GL_PROXY_HISTOGRAM_EXT 0x8025
GL_HISTOGRAM_WIDTH_EXT 0x8026
GL_HISTOGRAM_FORMAT_EXT 0x8027
GL_HISTOGRAM_RED_SIZE_EXT 0x8028
GL_HISTOGRAM_GREEN_SIZE_EXT 0x8029
GL_HISTOGRAM_BLUE_SIZE_EXT 0x802A
GL_HISTOGRAM_ALPHA_SIZE_EXT 0x802B
GL_HISTOGRAM_LUMINANCE_SIZE_EXT 0x802C
GL_HISTOGRAM_SINK_EXT 0x802D
GL_MINMAX_EXT 0x802E
GL_MINMAX_FORMAT_EXT 0x802F
GL_MINMAX_SINK_EXT 0x8030
GL_TABLE_TOO_LARGE_EXT 0x8031""", globals())
glget.addGLGetConstant( GL_HISTOGRAM_EXT, (1,) )
glget.addGLGetConstant( GL_MINMAX_EXT, (1,) )
@_f
@_p.types(None,_cs.GLenum,_cs.GLboolean,_cs.GLenum,_cs.GLenum,ctypes.c_void_p)
@_f
@_p.types(None,_cs.GLenum,_cs.GLenum,arrays.GLfloatArray)
@_f
@_p.types(None,_cs.GLenum,_cs.GLenum,arrays.GLintArray)
@_f
@_p.types(None,_cs.GLenum,_cs.GLboolean,_cs.GLenum,_cs.GLenum,ctypes.c_void_p)
@_f
@_p.types(None,_cs.GLenum,_cs.GLenum,arrays.GLfloatArray)
@_f
@_p.types(None,_cs.GLenum,_cs.GLenum,arrays.GLintArray)
@_f
@_p.types(None,_cs.GLenum,_cs.GLsizei,_cs.GLenum,_cs.GLboolean)
@_f
@_p.types(None,_cs.GLenum,_cs.GLenum,_cs.GLboolean)
@_f
@_p.types(None,_cs.GLenum)
@_f
@_p.types(None,_cs.GLenum)
def glInitHistogramEXT():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( EXTENSION_NAME )
| [
7061,
6,
16541,
519,
877,
515,
416,
651,
62,
4743,
62,
2302,
5736,
4226,
11,
466,
407,
4370,
0,
7061,
6,
198,
6738,
30672,
1330,
3859,
355,
4808,
79,
11,
38491,
355,
4808,
6359,
11,
26515,
198,
6738,
30672,
13,
8763,
1330,
1278,
1... | 2.112175 | 731 |
# -*- coding: UTF-8 -*-
# Copyright 2014-2018 Luc Saffre
# License: BSD (see file COPYING for details)
"""
Database models for this plugin.
"""
from __future__ import unicode_literals
from builtins import str
from django.utils.translation import ugettext_lazy as _
from lino.api import dd
from lino.modlib.uploads.models import *
from lino_xl.lib.contacts.mixins import ContactRelated
from lino_xl.lib.cal.utils import update_reminder
from lino_xl.lib.cal.choicelists import Recurrencies
from lino_xl.lib.contacts.roles import ContactsUser
# add = UploadAreas.add_item
# add('10', _("Job search uploads"), 'job_search')
# add('20', _("Medical uploads"), 'medical')
# add('30', _("Career uploads"), 'career')
class UploadType(UploadType):
"""Extends the library model by adding `warn_expiry` info.
"""
warn_expiry_unit = Recurrencies.field(
_("Expiry warning (unit)"),
default=Recurrencies.as_callable('monthly'),
blank=True) # iCal:DURATION
warn_expiry_value = models.IntegerField(
_("Expiry warning (value)"),
default=2)
# dd.update_field(
# 'uploads.UploadType', 'upload_area', default=UploadAreas.job_search.as_callable)
class Upload(Upload, mixins.ProjectRelated, ContactRelated,
mixins.DateRange):
"""Extends the library model by adding the `ContactRelated`,
`ProjectRelated` and `DateRange` mixins and two fields.
.. attribute:: remark
A remark about this document.
.. attribute:: needed
Whether this particular upload is a needed document. Default value
is `True` if the new Upload has an UploadType with a nonempty
`warn_expiry_unit`.
"""
# valid_from = models.DateField(_("Valid from"), blank=True, null=True)
# valid_until = models.DateField(_("Valid until"), blank=True, null=True)
remark = models.TextField(_("Remark"), blank=True)
needed = models.BooleanField(_("Needed"), default=True)
def update_reminders(self):
"""Overrides :meth:`lino.core.model.Model.update_reminders`.
"""
ut = self.type
if not ut or not ut.warn_expiry_unit:
return
if not self.needed:
return
update_reminder(
1, self, self.user,
self.end_date,
_("%s expires") % str(ut),
ut.warn_expiry_value,
ut.warn_expiry_unit)
dd.update_field(
Upload, 'company', verbose_name=_("Issued by (Organization)"))
dd.update_field(
Upload, 'contact_person',
verbose_name=_("Issued by (Person)"))
dd.update_field(Upload, 'start_date', verbose_name=_("Valid from"))
dd.update_field(Upload, 'end_date', verbose_name=_("Valid until"))
# dd.update_field(
# Upload, 'upload_area', default=UploadAreas.job_search.as_callable)
class UploadDetail(dd.DetailLayout):
"The Detail layout for Upload"
main = """
user project id
type description start_date end_date needed
company contact_person contact_role
file owner
remark cal.TasksByController
"""
LibraryUploads = Uploads
class MyExpiringUploads(MyUploads):
"Expiring uploads for client coached by me"
required_roles = dd.login_required((OfficeUser, OfficeOperator))
label = _("My expiring uploads")
help_text = _("Show needed uploads whose validity expires soon")
column_names = "project type description_link user \
start_date end_date needed *"
order_by = ['end_date']
@classmethod
class UploadsByClient(AreaUploads, UploadsByController):
"Uploads by Client"
master = dd.plugins.clients.client_model # 'pcsw.Client'
master_key = 'project'
column_names = "type end_date needed description_link user *"
required_roles = dd.login_required(ContactsUser, (OfficeUser, OfficeOperator))
# auto_fit_column_widths = True
# debug_sql = "20140519"
insert_layout = """
file
type end_date
description
"""
@classmethod
@classmethod
| [
2,
532,
9,
12,
19617,
25,
41002,
12,
23,
532,
9,
12,
198,
2,
15069,
1946,
12,
7908,
7598,
311,
2001,
260,
198,
2,
13789,
25,
347,
10305,
357,
3826,
2393,
27975,
45761,
329,
3307,
8,
628,
198,
37811,
198,
38105,
4981,
220,
329,
4... | 2.620825 | 1,527 |
# Copyright 2021 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import tarfile
import tempfile
from io import BytesIO
from minio import Minio
from minio.error import NoSuchKey, NoSuchBucketPolicy, ResponseError
from pprint import pprint
from swagger_server.util import ApiError
from tarfile import TarFile
from urllib3 import Timeout
from werkzeug.datastructures import FileStorage
_namespace = os.environ.get("POD_NAMESPACE", "kubeflow")
_host = os.environ.get("MINIO_SERVICE_SERVICE_HOST", "minio-service.%s.svc.cluster.local" % _namespace)
_port = os.environ.get("MINIO_SERVICE_SERVICE_PORT", "9000")
_access_key = 'minio'
_secret_key = 'minio123'
_bucket_policy_sid = "AllowPublicReadAccess"
_bucket_policy_stmt = {
"Sid": _bucket_policy_sid,
"Action": ["s3:GetObject"],
"Effect": "Allow",
"Principal": {"AWS": ["*"]},
"Resource": []
}
_bucket_policy_template = {
"Version": "2012-10-17",
"Statement": [_bucket_policy_stmt]
}
| [
2,
15069,
33448,
19764,
10501,
198,
2,
220,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
220,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
220,
198,
... | 2.94027 | 519 |
import base64
import base58
import base91
b16table = '0123456789ABCDEF'
b32table = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ234567'
b58table = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
b64table = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'
b85table = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz!#$%&()*+-;<=>?@^_`{|}~'
b91table = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789!#$%&()*+,./:;<=>?@[]^_`{|}~"'
| [
11748,
2779,
2414,
198,
11748,
2779,
3365,
198,
11748,
2779,
6420,
628,
198,
65,
1433,
11487,
796,
705,
486,
1954,
2231,
3134,
4531,
24694,
32988,
6,
198,
65,
2624,
11487,
796,
705,
24694,
32988,
17511,
23852,
42,
31288,
45,
3185,
48,
... | 1.851145 | 262 |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""manage_vms tests."""
from builtins import object
from builtins import range
from builtins import str
import copy
import functools
import mock
import unittest
from google.cloud import ndb
import six
from datastore import data_types
from google_cloud_utils import compute_engine_projects
from handlers.cron import manage_vms
from handlers.cron.helpers import bot_manager
from tests.test_libs import helpers as test_helpers
from tests.test_libs import test_utils
INSTANCE_GROUPS = {
'oss-fuzz-linux-zone2-pre-proj2': {
'targetSize': 1,
},
'oss-fuzz-linux-zone2-pre-proj3': {
'targetSize': 499,
},
'oss-fuzz-linux-zone2-pre-proj4': {
'targetSize': 99,
},
'oss-fuzz-linux-zone2-pre-proj5': {
'targetSize': 99,
}
}
INSTANCE_TEMPLATES = {
'oss-fuzz-linux-zone2-pre-proj2': {
'description': '{"version": 1}',
'properties': {
'metadata': {
'items': [],
},
'disks': [{
'initializeParams': {
'diskSizeGb': '30',
},
}],
'serviceAccounts': [{
'email':
'email',
'scopes': [
'https://www.googleapis.com/auth/'
'devstorage.full_control',
'https://www.googleapis.com/auth/logging.write',
'https://www.googleapis.com/auth/userinfo.email',
'https://www.googleapis.com/auth/appengine.apis',
'https://www.googleapis.com/auth/prodxmon',
'https://www.googleapis.com/auth/bigquery',
]
}],
}
},
'oss-fuzz-linux-zone2-pre-proj3': {
'description': '{"version": 1}',
'properties': {
'metadata': {
'items': [],
},
'disks': [{
'initializeParams': {
'diskSizeGb': '30',
},
}],
'serviceAccounts': [{
'email':
'email',
'scopes': [
'https://www.googleapis.com/auth/'
'devstorage.full_control',
'https://www.googleapis.com/auth/logging.write',
'https://www.googleapis.com/auth/userinfo.email',
'https://www.googleapis.com/auth/appengine.apis',
'https://www.googleapis.com/auth/prodxmon',
'https://www.googleapis.com/auth/bigquery',
]
}],
}
},
'oss-fuzz-linux-zone2-pre-proj4': {
'description': '{"version": 0}',
'properties': {
'metadata': {
'items': [],
},
'disks': [{
'initializeParams': {
'diskSizeGb': '30',
},
}],
'serviceAccounts': [{
'email':
'email',
'scopes': [
'https://www.googleapis.com/auth/'
'devstorage.full_control',
'https://www.googleapis.com/auth/logging.write',
'https://www.googleapis.com/auth/userinfo.email',
'https://www.googleapis.com/auth/appengine.apis',
'https://www.googleapis.com/auth/prodxmon',
'https://www.googleapis.com/auth/bigquery',
]
}],
}
},
'oss-fuzz-linux-zone2-pre-proj5': {
'description': '{"version": 1}',
'properties': {
'metadata': {
'items': [],
},
'disks': [{
'initializeParams': {
'diskSizeGb': '30',
},
}],
'serviceAccounts': [{
'email':
'email',
'scopes': [
'https://www.googleapis.com/auth/'
'devstorage.full_control',
'https://www.googleapis.com/auth/logging.write',
'https://www.googleapis.com/auth/userinfo.email',
'https://www.googleapis.com/auth/appengine.apis',
'https://www.googleapis.com/auth/prodxmon',
'https://www.googleapis.com/auth/bigquery',
]
}],
}
}
}
INSTANCES = {
'oss-fuzz-linux-zone3-host': [{
'instance': 'https://blah/oss-fuzz-linux-zone3-host-abcd',
}, {
'instance': 'https://blah/oss-fuzz-linux-zone3-host-efgh',
}],
'oss-fuzz-linux-zone3-worker-proj1': [{
'instance': 'https://blah/oss-fuzz-linux-zone3-worker-proj1-%04d' % i
} for i in range(1, 2)],
'oss-fuzz-linux-zone3-worker-proj2': [{
'instance': 'https://blah/oss-fuzz-linux-zone3-worker-proj2-%04d' % i
} for i in range(1, 5)],
'oss-fuzz-linux-zone3-worker-proj3': [{
'instance': 'https://blah/oss-fuzz-linux-zone3-worker-proj3-%04d' % i
} for i in range(1, 10)],
'oss-fuzz-linux-zone3-worker-proj4': [{
'instance': 'https://blah/oss-fuzz-linux-zone3-worker-proj4-%04d' % i
} for i in range(1, 2)],
'oss-fuzz-linux-zone3-worker-proj5': [{
'instance': 'https://blah/oss-fuzz-linux-zone3-worker-proj5-%04d' % i
} for i in range(1, 2)],
'oss-fuzz-linux-zone3-host-high-end': [{
'instance': 'https://blah/oss-fuzz-linux-zone3-host-high-end-1'
}],
'oss-fuzz-linux-zone3-worker-high-end-proj6': [{
'instance': ('https://blah/'
'oss-fuzz-linux-zone3-worker-high-end-proj6-%04d' % i)
} for i in range(1, 3)],
}
def mock_resource(spec):
"""Mock resource."""
resource = mock.Mock(spec=spec)
resource.created = False
resource.body = None
resource.create.side_effect = create
resource.get.side_effect = get
resource.exists.side_effect = exists
resource.delete.side_effect = delete
return resource
class MockBotManager(object):
"""Mock BotManager."""
def _get_resource(self, name, cache, values, spec):
"""Get resource."""
if name in cache:
return cache[name]
resource = mock_resource(spec=spec)
if name in values:
resource.created = True
resource.body = values[name]
cache[name] = resource
return resource
def instance_group(self, name):
"""Get an InstanceGroup resource with the given name."""
resource = self._get_resource(name, self.instance_groups, INSTANCE_GROUPS,
bot_manager.InstanceGroup)
if name in INSTANCES:
resource.list_managed_instances.return_value = INSTANCES[name]
return resource
def instance_template(self, name):
"""Get an InstanceTemplate resource with the given name."""
return self._get_resource(name, self.instance_templates, INSTANCE_TEMPLATES,
bot_manager.InstanceTemplate)
def expected_instance_template(gce_project_name,
name,
project_name,
disk_size_gb=None,
service_account=None,
tls_cert=False):
"""Get the expected instance template for a project."""
gce_project = compute_engine_projects.load_project(gce_project_name)
expected = copy.deepcopy(gce_project.get_instance_template(name))
expected['properties']['metadata']['items'].append({
'key': 'task-tag',
'value': project_name,
})
if disk_size_gb:
disk = expected['properties']['disks'][0]
disk['initializeParams']['diskSizeGb'] = disk_size_gb
if service_account:
expected['properties']['serviceAccounts'][0]['email'] = service_account
if tls_cert:
expected['properties']['metadata']['items'].extend([{
'key': 'tls-cert',
'value': project_name + '_cert',
}, {
'key': 'tls-key',
'value': project_name + '_key',
}])
return expected
def expected_host_instance_template(gce_project_name, name):
"""Get the expected instance template for a project."""
gce_project = compute_engine_projects.load_project(gce_project_name)
return copy.deepcopy(gce_project.get_instance_template(name))
@test_utils.with_cloud_emulators('datastore')
class CronTest(unittest.TestCase):
"""Test manage_vms cron."""
def test_update_cpus(self):
"""Tests CPU distribution cron."""
self.maxDiff = None # pylint: disable=invalid-name
manager = manage_vms.OssFuzzClustersManager('clusterfuzz-external')
manager.update_clusters()
proj1 = ndb.Key(data_types.OssFuzzProjectInfo, 'proj1').get()
self.assertIsNotNone(proj1)
self.assertDictEqual({
'name':
'proj1',
'clusters': [{
'cluster': 'oss-fuzz-linux-zone2-pre',
'cpu_count': 100,
'gce_zone': 'us-east2-a',
}, {
'cluster': 'oss-fuzz-linux-zone3-worker',
'cpu_count': 1,
'gce_zone': 'us-central1-d',
}],
}, proj1.to_dict())
proj2 = ndb.Key(data_types.OssFuzzProjectInfo, 'proj2').get()
self.assertIsNotNone(proj2)
self.assertDictEqual({
'name':
'proj2',
'clusters': [{
'cluster': 'oss-fuzz-linux-zone2-pre',
'cpu_count': 200,
'gce_zone': 'us-east2-a',
}, {
'cluster': 'oss-fuzz-linux-zone3-worker',
'cpu_count': 4,
'gce_zone': 'us-central1-d',
}],
}, proj2.to_dict())
proj3 = ndb.Key(data_types.OssFuzzProjectInfo, 'proj3').get()
self.assertIsNotNone(proj3)
self.assertDictEqual({
'name':
'proj3',
'clusters': [{
'cluster': 'oss-fuzz-linux-zone2-pre',
'cpu_count': 499,
'gce_zone': 'us-east2-a',
}, {
'cluster': 'oss-fuzz-linux-zone3-worker',
'cpu_count': 9,
'gce_zone': 'us-central1-d',
}],
}, proj3.to_dict())
proj4 = ndb.Key(data_types.OssFuzzProjectInfo, 'proj4').get()
self.assertIsNotNone(proj4)
self.assertDictEqual({
'name':
'proj4',
'clusters': [{
'cluster': 'oss-fuzz-linux-zone2-pre',
'cpu_count': 99,
'gce_zone': 'us-east2-a',
}, {
'cluster': 'oss-fuzz-linux-zone3-worker',
'cpu_count': 1,
'gce_zone': 'us-central1-d',
}],
}, proj4.to_dict())
proj5 = ndb.Key(data_types.OssFuzzProjectInfo, 'proj5').get()
self.assertIsNotNone(proj5)
self.assertDictEqual({
'name':
'proj5',
'clusters': [{
'cluster': 'oss-fuzz-linux-zone2-pre',
'cpu_count': 99,
'gce_zone': 'us-east2-a',
}, {
'cluster': 'oss-fuzz-linux-zone3-worker',
'cpu_count': 1,
'gce_zone': 'us-central1-d',
}],
}, proj5.to_dict())
proj6 = ndb.Key(data_types.OssFuzzProjectInfo, 'proj6').get()
self.assertIsNotNone(proj6)
self.assertDictEqual({
'name':
'proj6',
'clusters': [{
'cluster': 'oss-fuzz-linux-zone3-worker-high-end',
'cpu_count': 2,
'gce_zone': 'us-central1-d',
}],
}, proj6.to_dict())
old_proj = ndb.Key(data_types.OssFuzzProjectInfo, 'old_proj').get()
self.assertIsNone(old_proj)
mock_bot_manager = self.mock.BotManager('clusterfuzz-external',
'us-east2-a')
# proj1: new project.
mock_bot_manager.instance_template(
'oss-fuzz-linux-zone2-pre-proj1').create.assert_called_with(
expected_instance_template('clusterfuzz-external',
'external-pre-zone2', 'proj1'))
mock_bot_manager.instance_group(
'oss-fuzz-linux-zone2-pre-proj1').create.assert_called_with(
'oss-fuzz-linux-zone2-pre-proj1',
'oss-fuzz-linux-zone2-pre-proj1',
size=100,
wait_for_instances=False)
mock_bot_manager.instance_group(
'oss-fuzz-linux-zone2-pre-proj1').resize.assert_not_called()
# proj2: already exists. needs a resize. old cluster should be deleted.
mock_bot_manager.instance_template(
'oss-fuzz-linux-zone2-pre-proj2').create.assert_not_called()
mock_bot_manager.instance_template(
'oss-fuzz-linux-zone2-pre-proj2').delete.assert_not_called()
mock_bot_manager.instance_group(
'oss-fuzz-linux-zone2-pre-proj2').create.assert_not_called()
mock_bot_manager.instance_group(
'oss-fuzz-linux-zone2-pre-proj2').delete.assert_not_called()
mock_bot_manager.instance_group(
'oss-fuzz-linux-zone2-pre-proj2').resize.assert_called_with(
200, wait_for_instances=False)
mock_bot_manager.instance_template(
'old-cluster-proj2').delete.assert_called()
mock_bot_manager.instance_group('old-cluster-proj2').delete.assert_called()
# proj3: already exists. no changes needed.
mock_bot_manager.instance_template(
'oss-fuzz-linux-zone2-pre-proj3').delete.assert_not_called()
mock_bot_manager.instance_template(
'oss-fuzz-linux-zone2-pre-proj3').create.assert_not_called()
mock_bot_manager.instance_group(
'oss-fuzz-linux-zone2-pre-proj3').create.assert_not_called()
mock_bot_manager.instance_group(
'oss-fuzz-linux-zone2-pre-proj3').resize.assert_not_called()
mock_bot_manager.instance_group(
'oss-fuzz-linux-zone2-pre-proj3').delete.assert_not_called()
# proj4: needs a template update (version change).
mock_bot_manager.instance_template(
'oss-fuzz-linux-zone2-pre-proj4').delete.assert_called()
mock_bot_manager.instance_template(
'oss-fuzz-linux-zone2-pre-proj4').create.assert_called_with(
expected_instance_template('clusterfuzz-external',
'external-pre-zone2', 'proj4'))
mock_bot_manager.instance_group(
'oss-fuzz-linux-zone2-pre-proj4').delete.assert_called()
mock_bot_manager.instance_group(
'oss-fuzz-linux-zone2-pre-proj4').create.assert_called_with(
'oss-fuzz-linux-zone2-pre-proj4',
'oss-fuzz-linux-zone2-pre-proj4',
size=99,
wait_for_instances=False)
mock_bot_manager.instance_group(
'oss-fuzz-linux-zone2-pre-proj4').resize.assert_not_called()
# proj5: needs a template update (disk size change).
mock_bot_manager.instance_template(
'oss-fuzz-linux-zone2-pre-proj5').delete.assert_called()
mock_bot_manager.instance_template(
'oss-fuzz-linux-zone2-pre-proj5').create.assert_called_with(
expected_instance_template(
'clusterfuzz-external',
'external-pre-zone2',
'proj5',
disk_size_gb=10))
mock_bot_manager.instance_group(
'oss-fuzz-linux-zone2-pre-proj5').delete.assert_called()
mock_bot_manager.instance_group(
'oss-fuzz-linux-zone2-pre-proj5').create.assert_called_with(
'oss-fuzz-linux-zone2-pre-proj5',
'oss-fuzz-linux-zone2-pre-proj5',
size=99,
wait_for_instances=False)
mock_bot_manager.instance_group(
'oss-fuzz-linux-zone2-pre-proj5').resize.assert_not_called()
# proj6: high end project.
for j in range(1, 6):
mock_bot_manager.instance_group(
'oss-fuzz-linux-zone3-worker-high-end-proj' +
str(j)).create.assert_not_called()
mock_bot_manager.instance_group(
'oss-fuzz-linux-zone3-worker-high-end-proj6').create.assert_called()
# old_proj: deleted.
mock_bot_manager.instance_group(
'oss-fuzz-linux-zone2-pre-old-proj').create.assert_not_called()
mock_bot_manager.instance_template(
'oss-fuzz-linux-zone2-pre-old-proj').delete.assert_called()
mock_bot_manager.instance_group(
'oss-fuzz-linux-zone2-pre-old-proj').delete.assert_called()
# host instances: created.
mock_bot_manager.instance_template(
'oss-fuzz-linux-zone3-host').create.assert_called_with(
expected_host_instance_template('clusterfuzz-external',
'host-zone3'))
mock_bot_manager.instance_group(
'oss-fuzz-linux-zone3-host').create.assert_called_with(
'oss-fuzz-linux-zone3-host',
'oss-fuzz-linux-zone3-host',
size=2,
wait_for_instances=False)
mock_bot_manager.instance_group(
'oss-fuzz-linux-zone3-host-high-end').create.assert_called_with(
'oss-fuzz-linux-zone3-host-high-end',
'oss-fuzz-linux-zone3-host-high-end',
size=1,
wait_for_instances=False)
# Worker instances: created.
mock_bot_manager.instance_template(
'oss-fuzz-linux-zone3-worker-proj1').create.assert_called_with(
expected_instance_template(
'clusterfuzz-external',
'worker-zone3',
'proj1',
service_account='proj1@serviceaccount.com',
tls_cert=True))
mock_bot_manager.instance_group(
'oss-fuzz-linux-zone3-worker-proj1').create.assert_called_with(
'oss-fuzz-linux-zone3-worker-proj1',
'oss-fuzz-linux-zone3-worker-proj1',
size=1,
wait_for_instances=False)
mock_bot_manager.instance_template(
'oss-fuzz-linux-zone3-worker-proj2').create.assert_called_with(
expected_instance_template(
'clusterfuzz-external',
'worker-zone3',
'proj2',
service_account='proj2@serviceaccount.com',
tls_cert=True))
mock_bot_manager.instance_group(
'oss-fuzz-linux-zone3-worker-proj2').create.assert_called_with(
'oss-fuzz-linux-zone3-worker-proj2',
'oss-fuzz-linux-zone3-worker-proj2',
size=4,
wait_for_instances=False)
six.assertCountEqual(self, [{
'instance_num': 0,
'worker_name': u'oss-fuzz-linux-zone3-worker-proj1-0001',
'project_name': u'proj1',
'host_name': u'oss-fuzz-linux-zone3-host-abcd'
}, {
'instance_num': 1,
'worker_name': u'oss-fuzz-linux-zone3-worker-proj2-0001',
'project_name': u'proj2',
'host_name': u'oss-fuzz-linux-zone3-host-abcd'
}, {
'instance_num': 2,
'worker_name': u'oss-fuzz-linux-zone3-worker-proj2-0002',
'project_name': u'proj2',
'host_name': u'oss-fuzz-linux-zone3-host-abcd'
}, {
'instance_num': 3,
'worker_name': u'oss-fuzz-linux-zone3-worker-proj2-0003',
'project_name': u'proj2',
'host_name': u'oss-fuzz-linux-zone3-host-abcd'
}, {
'instance_num': 4,
'worker_name': u'oss-fuzz-linux-zone3-worker-proj2-0004',
'project_name': u'proj2',
'host_name': u'oss-fuzz-linux-zone3-host-abcd'
}, {
'instance_num': 5,
'worker_name': u'oss-fuzz-linux-zone3-worker-proj3-0001',
'project_name': u'proj3',
'host_name': u'oss-fuzz-linux-zone3-host-abcd'
}, {
'instance_num': 6,
'worker_name': u'oss-fuzz-linux-zone3-worker-proj3-0002',
'project_name': u'proj3',
'host_name': u'oss-fuzz-linux-zone3-host-abcd'
}, {
'instance_num': 7,
'worker_name': u'oss-fuzz-linux-zone3-worker-proj3-0003',
'project_name': u'proj3',
'host_name': u'oss-fuzz-linux-zone3-host-abcd'
}, {
'instance_num': 0,
'worker_name': u'oss-fuzz-linux-zone3-worker-proj3-0004',
'project_name': u'proj3',
'host_name': u'oss-fuzz-linux-zone3-host-efgh'
}, {
'instance_num': 1,
'worker_name': u'oss-fuzz-linux-zone3-worker-proj3-0005',
'project_name': u'proj3',
'host_name': u'oss-fuzz-linux-zone3-host-efgh'
}, {
'instance_num': 2,
'worker_name': u'oss-fuzz-linux-zone3-worker-proj3-0006',
'project_name': u'proj3',
'host_name': u'oss-fuzz-linux-zone3-host-efgh'
}, {
'instance_num': 3,
'worker_name': u'oss-fuzz-linux-zone3-worker-proj3-0007',
'project_name': u'proj3',
'host_name': u'oss-fuzz-linux-zone3-host-efgh'
}, {
'instance_num': 4,
'worker_name': u'oss-fuzz-linux-zone3-worker-proj3-0008',
'project_name': u'proj3',
'host_name': u'oss-fuzz-linux-zone3-host-efgh'
}, {
'instance_num': 5,
'worker_name': u'oss-fuzz-linux-zone3-worker-proj3-0009',
'project_name': u'proj3',
'host_name': u'oss-fuzz-linux-zone3-host-efgh'
}, {
'instance_num': 6,
'worker_name': u'oss-fuzz-linux-zone3-worker-proj4-0001',
'project_name': u'proj4',
'host_name': u'oss-fuzz-linux-zone3-host-efgh'
}, {
'instance_num': 7,
'worker_name': u'oss-fuzz-linux-zone3-worker-proj5-0001',
'project_name': u'proj5',
'host_name': u'oss-fuzz-linux-zone3-host-efgh'
}, {
'instance_num': 0,
'worker_name': u'oss-fuzz-linux-zone3-worker-high-end-proj6-0001',
'project_name': u'proj6',
'host_name': u'oss-fuzz-linux-zone3-host-high-end-1'
}, {
'instance_num': 1,
'worker_name': u'oss-fuzz-linux-zone3-worker-high-end-proj6-0002',
'project_name': u'proj6',
'host_name': u'oss-fuzz-linux-zone3-host-high-end-1'
}], [
assignment.to_dict()
for assignment in data_types.HostWorkerAssignment.query()
])
class OssFuzzDistributeCpusTest(unittest.TestCase):
"""Tests OSS-Fuzz CPU distribution."""
def test_equal(self):
"""Tests for each project receiving equal share."""
projects = [
data_types.OssFuzzProject(name='proj1', cpu_weight=1.0),
data_types.OssFuzzProject(name='proj2', cpu_weight=1.0),
data_types.OssFuzzProject(name='proj3', cpu_weight=1.0),
]
result = manage_vms.OssFuzzClustersManager(
'clusterfuzz-external').distribute_cpus(projects, 30)
self.assertListEqual([10, 10, 10], result)
def test_equal_uneven(self):
"""Tests for each project receiving equal share with an uneven division."""
projects = [
data_types.OssFuzzProject(name='proj1', cpu_weight=1.0),
data_types.OssFuzzProject(name='proj2', cpu_weight=1.0),
data_types.OssFuzzProject(name='proj3', cpu_weight=1.0),
]
result = manage_vms.OssFuzzClustersManager(
'clusterfuzz-external').distribute_cpus(projects, 31)
self.assertListEqual([11, 10, 10], result)
result = manage_vms.OssFuzzClustersManager(
'clusterfuzz-external').distribute_cpus(projects, 32)
self.assertListEqual([11, 11, 10], result)
def test_weight_preference(self):
"""Tests that remainders are given to projects with higher weights
first.
"""
projects = [
data_types.OssFuzzProject(name='proj1', cpu_weight=1.0),
data_types.OssFuzzProject(name='proj2', cpu_weight=1.01),
data_types.OssFuzzProject(name='proj3', cpu_weight=1.1),
]
result = manage_vms.OssFuzzClustersManager(
'clusterfuzz-external').distribute_cpus(projects, 4)
self.assertListEqual([1, 1, 2], result)
result = manage_vms.OssFuzzClustersManager(
'clusterfuzz-external').distribute_cpus(projects, 5)
self.assertListEqual([1, 2, 2], result)
def test_not_enough(self):
"""Tests allocation with not enough CPUs."""
projects = [
data_types.OssFuzzProject(name='proj1', cpu_weight=1.0),
data_types.OssFuzzProject(name='proj2', cpu_weight=1.0),
data_types.OssFuzzProject(name='proj3', cpu_weight=1.0),
]
result = manage_vms.OssFuzzClustersManager(
'clusterfuzz-external').distribute_cpus(projects, 1)
self.assertListEqual([1, 0, 0], result)
result = manage_vms.OssFuzzClustersManager(
'clusterfuzz-external').distribute_cpus(projects, 2)
self.assertListEqual([1, 1, 0], result)
def test_minimum(self):
"""Tests that projects are given a minimum share."""
projects = [
data_types.OssFuzzProject(name='proj1', cpu_weight=0.0),
data_types.OssFuzzProject(name='proj2', cpu_weight=0.0),
data_types.OssFuzzProject(name='proj3', cpu_weight=0.0),
]
result = manage_vms.OssFuzzClustersManager(
'clusterfuzz-external').distribute_cpus(projects, 3)
self.assertListEqual([1, 1, 1], result)
result = manage_vms.OssFuzzClustersManager(
'clusterfuzz-external').distribute_cpus(projects, 10)
self.assertListEqual([4, 3, 3], result)
def test_maximum(self):
"""Tests that projects are capped at the maximum share."""
projects = [
data_types.OssFuzzProject(name='proj1', cpu_weight=1.0),
data_types.OssFuzzProject(name='proj2', cpu_weight=1.0),
data_types.OssFuzzProject(name='proj3', cpu_weight=1.0),
]
result = manage_vms.OssFuzzClustersManager(
'clusterfuzz-external').distribute_cpus(projects, 10000)
self.assertListEqual([1000, 1000, 1000], result)
def test_primes(self):
"""Test a bunch of different distributions."""
projects = [
data_types.OssFuzzProject(name='proj1', cpu_weight=2.0),
data_types.OssFuzzProject(name='proj2', cpu_weight=3.0),
data_types.OssFuzzProject(name='proj3', cpu_weight=5.0),
data_types.OssFuzzProject(name='proj4', cpu_weight=7.0),
data_types.OssFuzzProject(name='proj5', cpu_weight=11.0),
]
result = manage_vms.OssFuzzClustersManager(
'clusterfuzz-external').distribute_cpus(projects, 101)
self.assertListEqual([7, 10, 18, 26, 40], result)
self.assertEqual(101, sum(result))
result = manage_vms.OssFuzzClustersManager(
'clusterfuzz-external').distribute_cpus(projects, 887)
self.assertListEqual([63, 95, 158, 222, 349], result)
self.assertEqual(887, sum(result))
result = manage_vms.OssFuzzClustersManager(
'clusterfuzz-external').distribute_cpus(projects, 2741)
self.assertListEqual([214, 313, 509, 705, 1000], result)
self.assertEqual(2741, sum(result))
result = manage_vms.OssFuzzClustersManager(
'clusterfuzz-external').distribute_cpus(projects, 3571)
self.assertListEqual([356, 483, 738, 994, 1000], result)
self.assertEqual(3571, sum(result))
@test_utils.with_cloud_emulators('datastore')
class AssignHostWorkerTest(unittest.TestCase):
"""Tests host -> worker assignment."""
def test_assign_keep_existing(self):
"""Test that assignment keeps existing assignments."""
host_names = ['host']
worker_instances = [
manage_vms.WorkerInstance(name='worker-proj-0', project='proj'),
manage_vms.WorkerInstance(name='worker-proj-1', project='proj'),
manage_vms.WorkerInstance(name='worker-proj-2', project='proj'),
manage_vms.WorkerInstance(name='worker-proj-3', project='proj'),
manage_vms.WorkerInstance(name='worker-proj-4', project='proj'),
manage_vms.WorkerInstance(name='worker-proj-5', project='proj'),
manage_vms.WorkerInstance(name='worker-proj-6', project='proj'),
manage_vms.WorkerInstance(name='worker-proj-7', project='proj'),
]
data_types.HostWorkerAssignment(
host_name='host',
instance_num=2,
worker_name='worker-proj-6',
project_name='proj',
id='host-2').put()
data_types.HostWorkerAssignment(
host_name='host',
instance_num=3,
worker_name='worker-proj-1',
project_name='proj',
id='host-3').put()
data_types.HostWorkerAssignment(
host_name='host',
instance_num=0,
worker_name='worker-nonexistent-1',
project_name='nonexistent',
id='host-0').put()
manager = manage_vms.OssFuzzClustersManager('clusterfuzz-external')
new_assignments = manager.do_assign_hosts_to_workers(
host_names, worker_instances, 8)
self.assertListEqual([
{
'host_name': u'host',
'instance_num': 0,
'project_name': 'proj',
'worker_name': 'worker-proj-0'
},
{
'host_name': u'host',
'instance_num': 1,
'project_name': 'proj',
'worker_name': 'worker-proj-2'
},
{
'host_name': u'host',
'instance_num': 4,
'project_name': 'proj',
'worker_name': 'worker-proj-3'
},
{
'host_name': u'host',
'instance_num': 5,
'project_name': 'proj',
'worker_name': 'worker-proj-4'
},
{
'host_name': u'host',
'instance_num': 6,
'project_name': 'proj',
'worker_name': 'worker-proj-5'
},
{
'host_name': u'host',
'instance_num': 7,
'project_name': 'proj',
'worker_name': 'worker-proj-7'
},
], [assignment.to_dict() for assignment in new_assignments])
| [
2,
15069,
13130,
3012,
11419,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
921,
743,
733... | 2.01275 | 14,824 |
# https://github.com/jeongukjae/terminal-palette/blob/master/README.md
from terminal_palette import Palette
pal = Palette()
class Symbol:
"""
# Symbol
An object that encapsulates properties of a symbol with color and decorator.
"""
def get(self, char=None, fg=None, bg=None, *decos):
"""
Returns the symbol as a formatted string. Any properties passed as arguments will override the ones from this symbol.
"""
if char is None:
char = self.char
if fg is None:
fg = self.fg
if bg is None:
bg = self.bg
if len(decos) == 0:
decos = self.decos
# reset formattings before it
symbol = pal.reset(char)
# foreground coloring
foreground = {
'default': pal.default(symbol),
'red': pal.red(symbol),
'black': pal.black(symbol),
'blue': pal.blue(symbol),
'cyan': pal.cyan(symbol),
'green': pal.green(symbol),
'magenta': pal.magenta(symbol),
'white': pal.white(symbol),
'yellow': pal.yellow(symbol),
'bright_black': pal.bright_black(symbol),
'bright_blue': pal.bright_blue(symbol),
'bright_cyan': pal.bright_cyan(symbol),
'bright_green': pal.bright_green(symbol),
'bright_magenta': pal.bright_magenta(symbol),
'bright_red': pal.bright_red(symbol),
'bright_white': pal.bright_white(symbol),
'bright_yellow': pal.bright_yellow(symbol)
}
# print(foreground[fg])
symbol = foreground[fg]
# background coloring
background = {
'default': pal.bg_default(symbol),
'red': pal.bg_red(symbol),
'black': pal.bg_black(symbol),
'blue': pal.bg_blue(symbol),
'cyan': pal.bg_cyan(symbol),
'green': pal.bg_green(symbol),
'magenta': pal.bg_magenta(symbol),
'white': pal.bg_white(symbol),
'yellow': pal.bg_yellow(symbol),
'bright_black': pal.bg_bright_black(symbol),
'bright_blue': pal.bg_bright_blue(symbol),
'bright_cyan': pal.bg_bright_cyan(symbol),
'bright_green': pal.bg_bright_green(symbol),
'bright_magenta': pal.bg_bright_magenta(symbol),
'bright_red': pal.bg_bright_red(symbol),
'bright_white': pal.bg_bright_white(symbol),
'bright_yellow': pal.bg_bright_yellow(symbol),
}
# print(background[bg])
symbol = background[bg]
# decorations
if 'bold' in decos:
pal.bold(symbol)
if 'underline' in decos:
pal.underline(symbol)
if 'reversed' in decos:
pal.reversed(symbol)
return symbol
| [
2,
3740,
1378,
12567,
13,
785,
14,
18015,
506,
2724,
73,
3609,
14,
23705,
282,
12,
18596,
5857,
14,
2436,
672,
14,
9866,
14,
15675,
11682,
13,
9132,
198,
198,
6738,
12094,
62,
18596,
5857,
1330,
3175,
5857,
198,
198,
18596,
796,
317... | 1.997909 | 1,435 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# -*- encoding: utf-8 -*-
#
# Copyright (c) 2020 anqi.huang@outlook.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import types
import requests
import logging
import time
import sys
import os
import base64
import hashlib
from threading import Thread
sys.path.append(os.path.join(os.path.dirname(__file__), "../"))
DEBUG = sys.flags.debug or 'pydevd' in sys.modules
TEST = 'PYTEST_CURRENT_TEST' in os.environ
# https://work.weixin.qq.com/help?person_id=1&doc_id=13376
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
532,
9,
12,
21004,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
198,
2,
15069,
357,
66,
8,
12131,
281,
4060... | 3.087613 | 331 |
import unittest
import dolphindb as ddb
import numpy as np
import pandas as pd
from setup import HOST, PORT, WORK_DIR, DATA_DIR
from numpy.testing import assert_array_equal, assert_array_almost_equal
from pandas.testing import assert_series_equal
from pandas.testing import assert_frame_equal
if __name__ == '__main__':
unittest.main() | [
201,
198,
11748,
555,
715,
395,
201,
198,
11748,
288,
10196,
521,
65,
355,
288,
9945,
201,
198,
11748,
299,
32152,
355,
45941,
201,
198,
11748,
19798,
292,
355,
279,
67,
201,
198,
6738,
9058,
1330,
367,
10892,
11,
350,
9863,
11,
309... | 2.848 | 125 |
import jsonpickle
from typing import Dict, Union, Optional, List, Type
from Domain.entity import Entity
from Repository.repository import Repository
| [
11748,
33918,
27729,
293,
198,
6738,
19720,
1330,
360,
713,
11,
4479,
11,
32233,
11,
7343,
11,
5994,
198,
198,
6738,
20021,
13,
26858,
1330,
20885,
198,
6738,
1432,
13264,
13,
260,
1930,
37765,
1330,
1432,
13264,
628
] | 3.973684 | 38 |
import sys
import qtvscodestyle as qtvsc
from qtvscodestyle.examples.widget_gallery.ui.main_ui import UI
from qtvscodestyle.qtpy.QtCore import Qt, Slot
from qtvscodestyle.qtpy.QtWidgets import QApplication, QColorDialog, QFileDialog, QMainWindow
if __name__ == "__main__":
app = QApplication(sys.argv)
# Fix the svg icon display becoming low quality in Qt5.
# PyQt6 doesn't have attribute AA_UseHighDpiPixmaps.
if hasattr(Qt.ApplicationAttribute, "AA_UseHighDpiPixmaps"):
app.setAttribute(Qt.ApplicationAttribute.AA_UseHighDpiPixmaps) # type: ignore
win = WidgetGallery()
win.menuBar().setNativeMenuBar(False)
app.setStyleSheet(qtvsc.load_stylesheet(qtvsc.Theme.DARK_VS))
win.show()
app.exec()
| [
11748,
25064,
198,
198,
11748,
10662,
14981,
1416,
375,
10992,
355,
10662,
14981,
1416,
198,
6738,
10662,
14981,
1416,
375,
10992,
13,
1069,
12629,
13,
42655,
62,
24460,
13,
9019,
13,
12417,
62,
9019,
1330,
12454,
198,
6738,
10662,
14981,... | 2.58885 | 287 |
import sys
import numpy as np
from PyQt5.QtWidgets import QWidget, QApplication, QPushButton, QColorDialog, QInputDialog, QMenu
from PyQt5.QtGui import QPainter, QColor, QPen, QBrush
from PyQt5.QtCore import Qt, QTimer
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = GameOfLife(70, 40, 15)
sys.exit(app.exec_())
| [
11748,
25064,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
9485,
48,
83,
20,
13,
48,
83,
54,
312,
11407,
1330,
1195,
38300,
11,
1195,
23416,
11,
1195,
49222,
21864,
11,
1195,
10258,
44204,
11,
1195,
20560,
44204,
11,
1195,
2338... | 2.47482 | 139 |
"""
Created on 17 Jun 2019
@author: Bruno Beloff (bruno.beloff@southcoastscience.com)
eth0: connected to Wired connection 1
"TP-LINK USB 10/100/1000 LAN"
ethernet (r8152), 98:DE:D0:04:9B:CC, hw, mtu 1500
ip4 default
inet4 192.168.1.88/24
inet6 fe80::131d:325a:f7bd:e3e/64
wlan0: connected to TP-Link_0F04
"Broadcom "
wifi (device), B8:27:EB:56:50:8F, hw, mtu 1500
inet4 192.168.1.122/24
inet6 fe80::212a:9d31:4b3e:59c/64
"""
import re
import sys
import time
from collections import OrderedDict
from subprocess import Popen, TimeoutExpired, PIPE
from scs_core.data.json import JSONable
# --------------------------------------------------------------------------------------------------------------------
class NMCLi(JSONable):
"""
classdocs
"""
TIMEOUT = 20.0 # seconds (no longer, or NMCLI will timeout itself)
RESTART_WAIT = 60.0 # seconds
# ----------------------------------------------------------------------------------------------------------------
@classmethod
@classmethod
# ----------------------------------------------------------------------------------------------------------------
@classmethod
# ----------------------------------------------------------------------------------------------------------------
def __init__(self, connections):
"""
Constructor
"""
self.__connections = connections # dictionary
# ----------------------------------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------------
@property
# ----------------------------------------------------------------------------------------------------------------
| [
37811,
198,
41972,
319,
1596,
7653,
13130,
198,
198,
31,
9800,
25,
31045,
3944,
2364,
357,
1671,
36909,
13,
6667,
2364,
31,
35782,
1073,
5773,
4234,
13,
785,
8,
198,
198,
2788,
15,
25,
5884,
284,
39721,
4637,
352,
198,
220,
220,
220... | 3.43705 | 556 |
# -*- coding: utf-8 -*-
if __name__ == "__main__":
main()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
628,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
1388,
3419,
198
] | 1.969697 | 33 |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# formats: ipynb,py
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.9.1+dev
# kernelspec:
# display_name: Python [conda env:annorxiver]
# language: python
# name: conda-env-annorxiver-py
# ---
# # Exploratory Data Analysis-BioRxiv
# This notebook is designed to generate descriptive statistics for a snapshot of the BioRxiv repository. The following information is obtained:
# 1. if the article is a research article
# 2. if the article is a new, contradictory, or confirmatory analysis
# 3. the category assigned to each research article (pi self assigns)
# 4. the type of section headers contain in each research article
# ## Load the environment to parse BioRxiv
# +
from pathlib import Path
import re
from itertools import product
import lxml.etree as ET
import pandas as pd
import plotnine as p9
from tqdm import tqdm_notebook
# -
biorxiv_files = Path("../biorxiv_articles").rglob("*.xml")
# ## Parse BioRxiv
if (
not Path("output/biorxiv_article_metadata.tsv").exists()
and not Path("output/biorxiv_article_sections.tsv").exists()
):
article_metadata = []
article_sections = []
type_mapper = {
"author-type": "author_type",
"heading": "heading",
"hwp-journal-coll": "category",
}
xml_parser = ET.XMLParser(encoding="UTF-8", recover=True)
for file in tqdm_notebook(biorxiv_files):
article = file.with_suffix("").name
root = ET.parse(open(file, "rb"), parser=xml_parser).getroot()
# Grab the subject category
metadata = {
type_mapper[x.attrib["subj-group-type"]]: x.getchildren()[0].text.lower()
for x in root.xpath("//subj-group")
}
metadata.update(
{"document": f"{article}.xml", "doi": root.xpath("//article-id")[0].text}
)
article_metadata.append(metadata)
# Grab the section titles
section_objs = list(
filter(
lambda x: "id" in x.attrib
and re.search(r"s[\d]+$", x.attrib["id"]) is not None,
root.xpath("//sec"),
)
)
title_objs = list(map(lambda x: x.xpath("title//text()"), section_objs))
title_objs = list(filter(lambda x: len(x) > 0, title_objs))
# edge case in the xml where
# a tag contains the following: <title>A<sc>bstract</sc></title>
# why is there a <sc> tag?
if any(list(map(lambda x: len(x) > 1, title_objs))):
# filter out weird characters ⓘ
# cant think of a better way to handle these types of edge cases
title_objs = list(
map(
lambda headers: list(filter(lambda token: token != "ⓘ", headers)),
title_objs,
)
)
title_objs = list(
map(lambda x: x[0] + x[1] if len(x) > 1 else x, title_objs)
)
abstract_section = root.xpath("//abstract/title//text()")
if len(abstract_section) > 0:
# in case of a parse error that splits A from bstract
if len(abstract_section) > 1:
abstract_section = ["".join(abstract_section)]
title_objs = title_objs + [abstract_section]
title_objs = list(map(lambda x: x[0].rstrip().lower(), title_objs))
article_sections += list(
map(
lambda x: {"section": header_group_mapper(x[0]), "document": x[1]},
product(title_objs, [article]),
)
)
# +
if not Path("output/biorxiv_article_metadata.tsv").exists():
metadata_df = (
pd.DataFrame.from_records(article_metadata)
.fillna({"category": "none", "author_type": "none", "heading": "none"})
.assign(
category=lambda x: x.category.apply(
lambda x: " ".join(x.split("_")) if "_" in x else x
)
)
.replace(
{
"heading": {
"bioinformatics": "none",
"genomics": "none",
"zoology": "none",
"evolutionary biology": "none",
"animal behavior and cognition": "none",
"ecology": "none",
"genetics": "none",
}
}
)
)
metadata_df.to_csv("output/biorxiv_article_metadata.tsv", sep="\t", index=False)
else:
metadata_df = pd.read_csv("output/biorxiv_article_metadata.tsv", sep="\t")
metadata_df.head()
# +
if not Path("output/biorxiv_article_sections.tsv").exists():
sections_df = pd.DataFrame.from_records(article_sections)
sections_df.to_csv("output/biorxiv_article_sections.tsv", sep="\t", index=False)
else:
sections_df = pd.read_csv("output/biorxiv_article_sections.tsv", sep="\t")
sections_df.head()
# -
# # Regular Research Articles?
# BioRxiv claims that each article should be a research article. The plot below mainly confirms that statement.
g = (
p9.ggplot(metadata_df, p9.aes(x="author_type"))
+ p9.geom_bar(
size=10,
fill="#253494",
)
+ p9.theme_seaborn(context="paper", style="ticks", font="Arial", font_scale=1)
)
print(g)
metadata_df["author_type"].value_counts()
# # BioRxiv Research Article Categories
# Categories assigned to each research article. Neuroscience dominates majority of the articles as expected.
# +
category_list = metadata_df.category.value_counts().index.tolist()[::-1]
# plot nine doesn't implement reverse keyword for scale x discrete
# ugh...
g = (
p9.ggplot(metadata_df, p9.aes(x="category"))
+ p9.geom_bar(size=10, fill="#253494", position=p9.position_dodge(width=3))
+ p9.scale_x_discrete(limits=category_list)
+ p9.coord_flip()
+ p9.theme_seaborn(context="paper", style="ticks", font="Arial", font_scale=1)
+ p9.theme(text=p9.element_text(size=12))
)
g.save("output/figures/preprint_category.svg")
g.save("output/figures/preprint_category.png", dpi=300)
print(g)
# -
metadata_df["category"].value_counts()
# # New, Confirmatory, Contradictory Results?
# +
heading_list = metadata_df.heading.value_counts().index.tolist()[::-1]
g = (
p9.ggplot(metadata_df, p9.aes(x="heading"))
+ p9.geom_bar(size=10, fill="#253494")
+ p9.scale_x_discrete(limits=heading_list)
+ p9.coord_flip()
+ p9.theme_seaborn(context="paper", style="ticks", font="Arial", font_scale=1)
)
g.save("output/figures/preprint_headings.png", dpi=500)
print(g)
# -
metadata_df["heading"].value_counts()
# # BioRxiv Section Articles
# +
section_list = sections_df.section.value_counts()
section_list = section_list[section_list > 800].index.to_list()[::-1]
g = (
p9.ggplot(sections_df[sections_df.section.isin(section_list)])
+ p9.aes(x="section")
+ p9.geom_bar(position="dodge", fill="#253494")
+ p9.scale_x_discrete(limits=section_list)
+ p9.coord_flip()
+ p9.theme_seaborn(context="paper", style="ticks", font="Arial", font_scale=1)
)
g.save("output/figures/preprint_sections.png", dpi=500)
print(g)
# -
section_list = sections_df.section.value_counts()
section_list[section_list > 800]
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
11420,
198,
2,
474,
929,
88,
353,
25,
198,
2,
220,
220,
474,
929,
88,
5239,
25,
198,
2,
220,
220,
220,
220,
17519,
25,
20966,
2047,
65,
11,
9078,
198,
2,
220,... | 2.222425 | 3,282 |
from django.urls import path, include
urlpatterns = [
path('blog/', include('BlogManage.blog.urls')),
]
| [
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
11,
2291,
198,
198,
6371,
33279,
82,
796,
685,
198,
220,
220,
220,
3108,
10786,
14036,
14,
3256,
2291,
10786,
42383,
5124,
496,
13,
14036,
13,
6371,
82,
11537,
828,
198,
60,
198
] | 2.658537 | 41 |
import math
if __name__ == "__main__":
main()
| [
11748,
10688,
628,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
1388,
3419,
198
] | 2.409091 | 22 |
import logging
import subprocess
from collections import namedtuple
import enum
import functools
from ssg.constants import MULTI_PLATFORM_MAPPING
from ssg.constants import PRODUCT_TO_CPE_MAPPING
from ssg.constants import FULL_NAME_TO_PRODUCT_MAPPING
Scenario_run = namedtuple(
"Scenario_run",
("rule_id", "script"))
Scenario_conditions = namedtuple(
"Scenario_conditions",
("backend", "scanning_mode", "remediated_by", "datastream"))
IGNORE_KNOWN_HOSTS_OPTIONS = (
"-o", "StrictHostKeyChecking=no",
"-o", "UserKnownHostsFile=/dev/null",
)
@functools.total_ordering
| [
11748,
18931,
198,
11748,
850,
14681,
198,
6738,
17268,
1330,
3706,
83,
29291,
198,
11748,
33829,
198,
11748,
1257,
310,
10141,
198,
6738,
37786,
70,
13,
9979,
1187,
1330,
337,
16724,
40,
62,
6489,
1404,
21389,
62,
44,
24805,
2751,
198,... | 2.666667 | 225 |
# from flask import Flask
import random
from marshmallow import Schema, fields, pre_load, validate
from flask_marshmallow import Marshmallow
from datetime import datetime
# from app import db
from flask_sqlalchemy import SQLAlchemy
from ..misc import funcs as funcs
ma = Marshmallow()
db = SQLAlchemy()
# as a challenge contains multiple exercises and an exercise can be part of many challenges, we have a many-to-many relationship here.
# so we implement like suggeste here: https://flask-sqlalchemy.palletsprojects.com/en/2.x/models/
challenge_exercises = db.Table('challenge_exercises',
db.Column('challenge_id', db.String(), db.ForeignKey(
'challenges.id'), primary_key=True),
db.Column('exercise_id', db.String(), db.ForeignKey(
'exercises.id'), primary_key=True)
)
# in this case we implement like described in the documentation for SQLAlchemy v1.3 (which is also valid for 1.4): https://docs.sqlalchemy.org/en/13/orm/basic_relationships.html#association-pattern
| [
2,
422,
42903,
1330,
46947,
198,
11748,
4738,
198,
6738,
22397,
42725,
1330,
10011,
2611,
11,
7032,
11,
662,
62,
2220,
11,
26571,
198,
6738,
42903,
62,
76,
5406,
42725,
1330,
9786,
42725,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
... | 2.538803 | 451 |
#!/usr/bin/python
#
# Copyright (c) 2013 Mikkel Schubert <MikkelSch@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import errno
import itertools
import logging
import os
import time
import coloredlogs
_LOG_LEVELS = {
"info": logging.INFO,
"warning": logging.WARNING,
"error": logging.ERROR,
"debug": logging.DEBUG,
}
_LOG_FORMAT = "%(asctime)s %(name)s %(levelname)s %(message)s"
_LOG_ENABLED = False
def add_argument_group(parser, default="info"):
"""Adds an option-group to an OptionParser object, with options
pertaining to logging. Note that 'initialize' expects the config
object to have these options."""
group = parser.add_argument_group("Logging")
group.add_argument(
"--log-file", default=None, help="Write log-messages to this file.",
)
group.add_argument(
"--log-level",
default=default,
choices=("info", "warning", "error", "debug"),
help="Log messages at the specified level [%(default)s]",
)
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
198,
2,
15069,
357,
66,
8,
2211,
17722,
7750,
3059,
84,
4835,
1279,
44,
1134,
7750,
14874,
31,
14816,
13,
785,
29,
198,
2,
198,
2,
2448,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
... | 3.172144 | 639 |
from typing import Optional, Callable, Mapping, Any, List
import abc
import torch as tc
from drl.agents.architectures.abstract import Architecture
class StatelessArchitecture(Architecture, metaclass=abc.ABCMeta):
"""
Abstract class for stateless (i.e., memoryless) architectures.
"""
def __init__(
self,
w_init: Optional[Callable[[tc.Tensor], None]],
b_init: Optional[Callable[[tc.Tensor], None]],
**kwargs: Mapping[str, Any]):
"""
Args:
w_init (Optional[Callable[[torch.Tensor], None]]): Weight initializer.
b_init (Optional[Callable[[torch.Tensor], None]]): Bias initializer.
**kwargs (Mapping[str, Any]): Keyword arguments.
"""
super().__init__()
self._w_init = w_init
self._b_init = b_init
@property
@abc.abstractmethod
def input_shape(self) -> List[int]:
"""
Returns:
Input shape without batch dimension.
"""
@property
@abc.abstractmethod
def output_dim(self) -> int:
"""
Returns:
Dimensionality of output features.
"""
@abc.abstractmethod
def forward(self, x, **kwargs):
"""
Forward method.
"""
class HeadEligibleArchitecture(StatelessArchitecture, metaclass=abc.ABCMeta):
"""
Abstract class for StatelessArchitecture classes
that can be used as prediction heads.
"""
def __init__(
self,
input_dim: int,
output_dim: int,
w_init: Callable[[tc.Tensor], None],
b_init: Callable[[tc.Tensor], None],
**kwargs: Mapping[str, Any]):
"""
Args:
input_dim: Input dimensionality.
Note that for HeadEligibleArchitectures, the input is assumed
to be one-dimensional.
output_dim: Output dimensionality.
w_init: Weight initializer.
b_init: Bias initializer.
**kwargs: Keyword arguments.
"""
super().__init__(w_init, b_init)
self._input_dim = input_dim
self._output_dim = output_dim
@property
@property
| [
6738,
19720,
1330,
32233,
11,
4889,
540,
11,
337,
5912,
11,
4377,
11,
7343,
198,
11748,
450,
66,
198,
198,
11748,
28034,
355,
37096,
198,
198,
6738,
1553,
75,
13,
49638,
13,
998,
5712,
942,
13,
397,
8709,
1330,
29778,
628,
198,
4871... | 2.171906 | 1,018 |
"""A fast channel state information parser for Intel, Atheros and Nexmon."""
from . import _csiread
class Intel(_csiread.Intel):
"""Parse CSI obtained using 'Linux 802.11n CSI Tool'.
Args:
file (str or None): CSI data file. If ``str``, ``read`` and ``readstp``
methods are allowed. If ``None``, ``seek`` and ``pmsg`` methods are
allowed.
nrxnum (int, optional): Number of receive antennas. Default: 3
ntxnum (int, optional): Number of transmit antennas. Default: 2
pl_size (int, optional): The size of payload to be used. Default: 0
if_report (bool, optional): Report the parsed result. Default: ``True``
bufsize (int, optional): The maximum amount of packets to be parsed.
If ``0`` and file is ``str``, all packets will be parsed. If ``0``
and file is ``None``, this parameter is ignored by `pmsg` method.
Default: 0
Attributes:
file (str, readonly): CSI data file
count (int, readonly): Count of 0xbb packets parsed
timestamp_low (ndarray): The low 32 bits of the NIC's 1 MHz clock. It
wraps about every 4300 seconds, or 72 minutes.
bfee_count (ndarray): The count of the total number of beamforming
measurements that have been recorded by the driver and sent to
userspace. The netlink channel between the kernel and userspace is
lossy, so these can be used to detect measurements that were
dropped in this pipe.
Nrx (ndarray): The number of antennas used to receive the packet.
Ntx (ndarray): The number of space/time streams transmitted.
rssi_a (ndarray): RSSI measured by the receiving NIC at the input to
antenna port A. This measurement is made during the packet preamble.
This value is in dB relative to an internal reference.
rssi_b (ndarray): See ``rssi_a``
rssi_c (ndarray): See ``rssi_a``
noise (ndarray): Noise
agc (ndarray): Automatic Gain Control (AGC) setting in dB
perm (ndarray): Tell us how the NIC permuted the signals from the 3
receive antennas into the 3 RF chains that process the measurements.
rate (ndarray): The rate at which the packet was sent, in the same
format as the ``rate_n_flags``.
csi (ndarray): The CSI itself, normalized to an internal reference.
It is a Count×30×Nrx×Ntx 4-D matrix where the second dimension is
across 30 subcarriers in the OFDM channel. For a 20 MHz-wide
channel, these correspond to about half the OFDM subcarriers, and
for a 40 MHz-wide channel, this is about one in every 4 subcarriers.
stp (ndarray): World timestamp recorded by the modified ``log_to_file``.
fc (ndarray): Frame control
dur (ndarray): Duration
addr_des (ndarray): Destination MAC address
addr_src (ndarray): Source MAC address
addr_bssid (ndarray): BSSID MAC address
seq (ndarray): Serial number of packet
payload (ndarray): MAC frame to be used
Examples:
>>> csifile = "../material/5300/dataset/sample_0x1_ap.dat"
>>> csidata = csiread.Intel(csifile, nrxnum=3, ntxnum=2, pl_size=10)
>>> csidata.read()
>>> csi = csidata.get_scaled_csi()
>>> print(csidata.csi.shape)
References:
1. `Linux 802.11n CSI Tool <https://dhalperi.github.io/linux-80211n-csitool/>`_
2. `linux-80211n-csitool-supplementary <https://github.com/dhalperi/linux-80211n-csitool-supplementary>`_
3. `Linux 802.11n CSI Tool-FAQ <https://dhalperi.github.io/linux-80211n-csitool/faq.html>`_
"""
def read(self):
"""Parse data if 0xbb and 0xc1 packets
Examples:
>>> csifile = "../material/5300/dataset/sample_0x1_ap.dat"
>>> csidata = csiread.Intel(csifile)
>>> csidata.read()
"""
super().read()
def seek(self, file, pos, num):
"""Read packets from a specific position
This method allows us to read different parts of different files
randomly. It could be useful in Machine Learning. However, it could be
very slow when reading files in HDD for the first time. For this case,
it is better to do a pre-read with ``read()`` first.
Args:
file (str): CSI data file.
pos (int): Position of file descriptor corresponding to the packet.
Currently, it must be returned by the function in
``example/csiseek.py``.
num (int): Number of packets to be read. ``num <= bufsize`` must be
true. If ``0``, all packets after ``pos`` will be read.
Examples:
>>> csifile = "../material/5300/dataset/sample_0x1_ap.dat"
>>> csidata = csiread.Intel(None, bufsize=16)
>>> for i in range(10):
>>> csidata.seek(csifile, 0, i+1)
>>> print(csidata.csi.shape)
"""
super().seek(file, pos, num)
def pmsg(self, data):
"""Parse message in real time
Args:
data (bytes): A bytes object representing the data received by udp
socket
Returns:
int: The status code. If ``0xbb`` and ``0xc1``, parse message
successfully. Otherwise, the ``data`` is not a CSI packet.
Examples:
>>> import socket
>>> import csiread
>>>
>>> csidata = csiread.Intel(None)
>>> with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s:
>>> s.bind(('127.0.0.1', 10011))
>>> while True:
>>> data, address_src = s.recvfrom(4096)
>>> code = csidata.pmsg(data)
>>> if code == 0xbb:
>>> print(csidata.csi.shape)
"""
return super().pmsg(data)
def readstp(self, endian='little'):
"""Parse timestamp recorded by the modified ``log_to_file``
``file.dat`` and ``file.datstp`` must be in the same directory.
Args:
endian (str): The byte order of ``file.datstp``, it can be
``little`` and ``big``. Default: ``little``
Returns:
int: Timestamp of the first packet.
Examples:
>>> csifile = "../material/5300/dataset/sample_0x1_ap.dat"
>>> csidata = csiread.Intel(csifile)
>>> first_stp = csidata.readstp()
>>> print(first_stp)
"""
return super().readstp(endian)
def get_total_rss(self):
"""Calculate the Received Signal Strength[RSS] in dBm from CSI
Examples:
>>> csifile = "../material/5300/dataset/sample_0x1_ap.dat"
>>> csidata = csiread.Intel(csifile)
>>> csidata.read()
>>> rssi = csidata.get_total_rss()
>>> print(rssi.shape)
"""
return super().get_total_rss()
def get_scaled_csi(self, inplace=False):
"""Convert CSI to channel matrix H
Args:
inplace (bool): Optionally do the operation in-place. Default: False
Returns:
ndarray: Channel matrix H
Examples:
>>> csifile = "../material/5300/dataset/sample_0x1_ap.dat"
>>> csidata = csiread.Intel(csifile)
>>> csidata.read()
>>> scaled_csi = csidata.get_scaled_csi(False)
>>> print(scaled_csi.shape)
>>> print("scaled_csi is csidata.csi: ", scaled_csi is csidata.csi)
"""
return super().get_scaled_csi(inplace)
def get_scaled_csi_sm(self, inplace=False):
"""Convert CSI to pure channel matrix H
This version undoes Intel's spatial mapping to return the pure MIMO
channel matrix H.
Args:
inplace (bool): Optionally do the operation in-place. Default: False
Returns:
ndarray: The pure MIMO channel matrix H.
Examples:
>>> csifile = "../material/5300/dataset/sample_0x1_ap.dat"
>>> csidata = csiread.Intel(csifile)
>>> csidata.read()
>>> scaled_csi_sm = csidata.get_scaled_csi_sm(False)
>>> print(scaled_csi.shape)
>>> print("scaled_csi_sm is csidata.csi: ", scaled_csi_sm is csidata.csi)
"""
return super().get_scaled_csi_sm(inplace)
def apply_sm(self, scaled_csi):
"""Undo the input spatial mapping
Args:
scaled_csi (ndarray): Channel matrix H.
Returns:
ndarray: The pure MIMO channel matrix H.
Examples:
>>> csifile = "../material/5300/dataset/sample_0x1_ap.dat"
>>> csidata = csiread.Intel(csifile)
>>> csidata.read()
>>> scaled_csi = csidata.get_scaled_csi()
>>> scaled_csi_sm = csidata.apply_sm(scaled_csi)
>>> print(scaled_csi_sm.shape)
"""
return super().apply_sm(scaled_csi)
class Atheros(_csiread.Atheros):
"""Parse CSI obtained using 'Atheros CSI Tool'.
Args:
file (str or None): CSI data file. If ``str``, ``read`` and ``readstp``
methods are allowed. If ``None``, ``seek`` and ``pmsg`` methods are
allowed.
nrxnum (int, optional): Number of receive antennas. Default: 3
ntxnum (int, optional): Number of transmit antennas. Default: 2
pl_size (int, optional): The size of payload to be used. Default: 0
tones (int, optional): The number of subcarrier. It can be 56 and 114.
Default: 56
if_report (bool, optional): Report the parsed result. Default: ``True``
bufsize (int, optional): The maximum amount of packets to be parsed.
If ``0`` and file is ``str``, all packets will be parsed. If ``0``
and file is ``None``, this parameter is ignored by ``pmsg`` method.
Default: 0
Attributes:
file (str, readonly): CSI data file
count (int, readonly): Count of CSI packets parsed
timestamp (ndarray): The time when packet is received, expressed in μs
csi_len (ndarray): The csi data length in the received data buffer,
expressed in bytes
tx_channel (ndarray): The center frequency of the wireless channel,
expressed in MHz
err_info (ndarray): The phy error code, set to 0 if correctly received
noise_floor (ndarray): The noise floor, expressed in dB. But it needs
to be update and is set to 0 in current version.
Rate (ndarray): The data rate of the received packet. Its value is a
unsigned 8 bit integer number and the mapping between this value
and the rate choice of 802.11 protocol
bandWidth (ndarray): The channel bandwidth. It is 20MHz if set to 0 and
40MHz if set to 1
num_tones (ndarray): The number of subcarrier that used for data
transmission.
nr (ndarray): Number of receiving antenna
nc (ndarray): Number of transmitting antenna
rsssi (ndarray): The rssi of combination of all active chains
rssi_1 (ndarray): The rssi of active chain 0
rssi_2 (ndarray): The rssi of active chain 1
rssi_3 (ndarray): The rssi of active chain 2
payload_len (ndarray): The payload length of received packet, expressed
in bytes.
csi (ndarray): CSI
payload (ndarray): MAC frame(MPDU) to be used
Examples:
>>> csifile = "../material/atheros/dataset/ath_csi_1.dat"
>>> csidata = csiread.Atheros(csifile, nrxnum=3, ntxnum=2, pl_size=10, tones=56)
>>> csidata.read(endian='little')
>>> print(csidata.csi.shape)
References:
1. `Atheros CSI Tool <https://wands.sg/research/wifi/AtherosCSI/>`_
2. `Atheros-CSI-Tool-UserSpace-APP <https://github.com/xieyaxiongfly/Atheros-CSI-Tool-UserSpace-APP>`_
3. `Atheros CSI Tool User Guide <https://wands.sg/research/wifi/AtherosCSI/document/Atheros-CSI-Tool-User-Guide.pdf>`_
"""
def read(self, endian='little'):
"""Parse data
Args:
endian (str): The byte order of ``file.dat``, it can be ``little``
and ``big``. Default: ``little``
Examples:
>>> csifile = "../material/atheros/dataset/ath_csi_1.dat"
>>> csidata = csiread.Atheros(csifile)
>>> csidata.read()
"""
super().read(endian)
def seek(self, file, pos, num, endian='little'):
"""Read packets from a specific position
This method allows us to read different parts of different files
randomly. It could be useful in Machine Learning. However, it could be
very slow when reading files in HDD for the first time. For this case,
it is better to do a pre-read with ``read()`` first.
Args:
file (str): CSI data file.
pos (int): Position of file descriptor corresponding to the packet.
Currently, it must be returned by the function in
`example/csiseek.py`.
num (int): Number of packets to be read. ``num <= bufsize`` must be
true. If ``0``, all packets after ``pos`` will be read.
endian (str): The byte order of ``file.dat``, it can be ``little``
and ``big``. Default: ``little``
Examples:
>>> csifile = "../material/atheros/dataset/ath_csi_1.dat"
>>> csidata = csiread.Atheros(None, bufsize=16)
>>> for i in range(10):
>>> csidata.seek(csifile, 0, i+1)
>>> print(csidata.csi.shape)
"""
super().seek(file, pos, num, endian)
def pmsg(self, data, endian='little'):
"""Parse message in real time
Args:
data (bytes): A bytes object representing the data received by udp
socket
endian (str): The byte order of ``file.dat``, it can be ``little``
and ``big``. Default: ``little``
Returns:
int: The status code. If ``0xff00``, parse message successfully.
Otherwise, the ``data`` is not a CSI packet.
Examples:
>>> import socket
>>> import csiread
>>>
>>> csidata = csiread.Atheros(None)
>>> with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s:
>>> s.bind(('127.0.0.1', 10011))
>>> while True:
>>> data, address_src = s.recvfrom(4096)
>>> code = csidata.pmsg(data)
>>> if code == 0xff00:
>>> print(csidata.csi.shape)
"""
return super().pmsg(data, endian)
def readstp(self, endian='little'):
"""Parse timestamp recorded by the modified ``recv_csi``
``file.dat`` and ``file.datstp`` must be in the same directory.
Args:
endian (str): The byte order of ``file.datstp``, it can be
``little`` and ``big``. Default: ``little``
Examples:
>>> csifile = "../material/atheros/dataset/ath_csi_1.dat"
>>> csidata = csiread.Atheros(csifile)
>>> first_stp = csidata.readstp()
>>> print(first_stp)
"""
return super().readstp(endian)
class Nexmon(_csiread.Nexmon):
"""Parse CSI obtained using 'nexmon_csi'.
Args:
file (str or None): CSI data file ``.pcap``. If ``str``, ``read``
methods is allowed. If ``None``, ``seek`` and ``pmsg`` methods are
allowed.
chip (str): WiFi Chip, it can be '4339', '43455c0', '4358' and '4366c0'.
bw (int): bandwidth, it can be 20, 40 and 80.
if_report (bool, optional): Report the parsed result. Default: `True`
bufsize (int, optional): The maximum amount of packets to be parsed. If
``0`` and file is ``str``, all packets will be parsed. If ``0`` and
file is ``None``, this parameter is ignored by `pmsg` method.
Default: 0
Attributes:
file (str, readonly): CSI data file
count (int, readonly): Count of csi packets parsed
chip (str, readonly): Chip type we set
bw (int, readonly): Bandwidth we set
nano (bool, readonly): nanosecond-resolution or not
sec (ndarray): Time the packet was captured
usec (ndarray): The microseconds when this packet was captured, as an
offset to ``sec`` if ``nano`` is True. The nanoseconds when the
packet was captured, as an offset to ``sec`` if ``nano`` is False.
caplen (ndarray): The number of bytes of packet data actually captured
and saved in the file
wirelen (ndarray): The length of the packet as it appeared on the
network when it was captured
magic (ndarray): Four magic bytes ``0x11111111``
src_addr (ndarray): Source MAC address
seq (ndarray): Sequence number of the Wi-Fi frame that triggered the
collection of the CSI contained in packets
core (ndarray): Core
spatial (ndarray): Spatial stream
chan_spec (ndarray): (unknown)
chip_version (ndarray): The chip version
csi (ndarray): CSI
Examples:
>>> csifile = "../material/nexmon/dataset/example.pcap"
>>> csidata = csiread.Nexmon(csifile, chip='4358', bw=80)
>>> csidata.read()
>>> print(csidata.csi.shape)
References:
1. `nexmon_csi <https://github.com/seemoo-lab/nexmon_csi>`_
2. `rdpcap <https://github.com/secdev/scapy/blob/master/scapy/utils.py>`_
3. `Libpcap File Format <https://wiki.wireshark.org/Development/LibpcapFileFormat>`_
"""
def read(self):
"""Parse data
Examples:
>>> csifile = "../material/nexmon/dataset/example.pcap"
>>> csidata = csiread.Nexmon(csifile, chip='4358', bw=80)
>>> csidata.read()
>>> print(csidata.csi.shape)
"""
super().read()
def seek(self, file, pos, num):
"""Read packets from specific position
This method allows us to read different parts of different files
randomly. It could be useful in Machine Learning. However, it could be
very slow when reading files in HDD for the first time. For this case,
it is better to use `read()` for a pre-read first.
Args:
file (str): CSI data file ``.pcap``.
pos (int): Position of file descriptor corresponding to the packet.
Currently, it must be returned by the function in
``example/csiseek.py``.
num (int): Number of packets to be read. ``num <= bufsize`` must be
true. If ``0``, all packets after ``pos`` will be read.
Examples:
>>> csifile = "../material/nexmon/dataset/example.pcap"
>>> csidata = csiread.Nexmon(None, chip='4358', bw=80, bufsize=4)
>>> for i in range(4):
>>> csidata.seek(csifile, 0, i+1)
>>> print(csidata.csi.shape)
"""
super().seek(file, pos, num)
def pmsg(self, data, endian='little'):
"""Parse message in real time
Args:
data (bytes): A bytes object representing the data received by raw
socket
endian (str): The byte order of ``file.dat``, it can be ``little``
and ``big``. Default: ``little``
Returns:
int: The status code. If ``0xf100``, parse message successfully.
Otherwise, the ``data`` is not a CSI packet.
Examples:
>>> import socket
>>> import csiread
>>>
>>> csidata = csiread.Nexmon(None, chip='4358', bw=80)
>>> with socket.socket(socket.PF_PACKET, socket.SOCK_RAW, socket.htons(0x3)) as s:
>>> while True:
>>> data, address_src = s.recvfrom(4096)
>>> code = csidata.pmsg(data)
>>> if code == 0xf100:
>>> print(csidata.csi.shape)
"""
return super().pmsg(data, endian)
class AtherosPull10(Atheros):
"""Parse CSI obtained using 'Atheros CSI Tool' pull 10.
The same as Atheros
References:
1. `Atheros-CSI-Tool-UserSpace-APP pull 10 <https://github.com/xieyaxiongfly/Atheros-CSI-Tool-UserSpace-APP/pull/10>`_
"""
def read(self):
"""Parse data
Examples:
>>> csifile = "../material/atheros/dataset/ath_csi_1.dat"
>>> csidata = csiread.Atheros(csifile)
>>> csidata.read()
"""
with open(self.file, 'rb') as f:
endian = 'big' if f.read(1) == b'\xff' else 'little'
self.seek(self.file, 1, 0, endian)
class NexmonPull46(Nexmon):
"""Parse CSI obtained using 'nexmon_csi' pull 46.
Args:
See ``Nexmon``
Attributes:
rssi (ndarray): rssi
fc (ndarray): frame control
others: see ``Nexmon``
References:
1. `nexmon_csi pull 46 <https://github.com/seemoo-lab/nexmon_csi/pull/46>`_
"""
def seek(self, file, pos, num):
"""Read packets from specific position, see ``Nexmon.seek``"""
super().seek(file, pos, num)
self.__pull46()
def pmsg(self, data, endian='little'):
"""Parse message in real time
Args:
data (bytes): A bytes object representing the data received by raw
socket
endian (str): The byte order of ``file.dat``, it can be ``little``
and ``big``. Default: ``little``
Returns:
int: The status code. If ``0xf101``, parse message successfully.
Otherwise, the ``data`` is not a CSI packet.
"""
super().pmsg(data, endian)
self.__pull46()
return 0xf101
| [
37811,
32,
3049,
6518,
1181,
1321,
30751,
329,
8180,
11,
27751,
4951,
290,
14786,
2144,
526,
15931,
198,
198,
6738,
764,
1330,
4808,
6359,
557,
324,
628,
198,
4871,
8180,
28264,
6359,
557,
324,
13,
24123,
2599,
198,
220,
220,
220,
372... | 2.223987 | 9,947 |
import conftest # Add root path to sys.path
import os
import matplotlib.pyplot as plt
from PathPlanning.WavefrontCPP import wavefront_coverage_path_planner
wavefront_coverage_path_planner.do_animation = False
if __name__ == '__main__':
conftest.run_this_test(__file__)
| [
11748,
369,
701,
395,
220,
1303,
3060,
6808,
3108,
284,
25064,
13,
6978,
198,
11748,
28686,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
6738,
10644,
20854,
768,
13,
39709,
8534,
8697,
47,
1330,
6769,
8534,
62,
1... | 2.81 | 100 |
# Markdown builder
#
# Starts the .R script for Markdown generation
#
# Inputs:
# - Coverage_Track.tab
# - regions.bed
# - Non_synonymous_variants_summary.tab
#
# Output:
# - Run_report.html
#
# Parameters:
# None
rule make_markdown :
input:
"Non_synonymous_variants_summary.tab",
"Coverage_Track.tab"
output:
Rmd = "Run_report.html"
params:
bind = config["BIND"],
cont = config["CONT"]
shell:
"""
singularity exec -B {params.bind} {params.cont}\
R -e \
"rmarkdown::render('./modules/markdown_gen.Rmd', output_file='../{output.Rmd}')"
rm -rf Run_report_files
"""
| [
2,
2940,
2902,
27098,
198,
2,
198,
2,
220,
220,
50181,
262,
764,
49,
4226,
329,
2940,
2902,
5270,
198,
2,
198,
2,
220,
220,
23412,
82,
25,
198,
2,
220,
220,
220,
220,
220,
220,
532,
33998,
62,
24802,
13,
8658,
198,
2,
220,
220... | 2.082596 | 339 |
import pygame as pg
from screen import resource_path | [
11748,
12972,
6057,
355,
23241,
198,
6738,
3159,
1330,
8271,
62,
6978
] | 4.333333 | 12 |
import datetime
import enum
import hashlib
import pandas as pd
from faker import Faker
from sqlalchemy import BigInteger
from sqlalchemy import Boolean
from sqlalchemy import Column
from sqlalchemy import create_engine
from sqlalchemy import Date
from sqlalchemy import DateTime
from sqlalchemy import Float
from sqlalchemy import ForeignKey
from sqlalchemy import Integer
from sqlalchemy import JSON
from sqlalchemy import String
from sqlalchemy import text
from sqlalchemy import TIMESTAMP
from sqlalchemy.exc import SQLAlchemyError
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
Base = declarative_base()
username = "etna"
password = "etna"
host = "localhost"
# port = 3306
DB_NAME = "mybookingservices"
engine = create_engine(
f"mysql+pymysql://{username}:{password}@{host}/",
)
with engine.connect() as conn:
conn.execute(f"CREATE DATABASE IF NOT EXISTS {DB_NAME}")
db_engine = create_engine(
f"mysql+pymysql://{username}:{password}@{host}/{DB_NAME}",
)
# Place de garage (25$)
# Ajout d'un lit bébé (sans frais additionnels)
# Pack romance (50$), doit être réservé avec deux jours d'avance
# Petit déjeuner (30$)
Hotels.addresses = relationship(
"Addresses",
order_by=Addresses.id,
back_populates="hotels",
uselist=False,
)
Hotels.rooms = relationship(
"Rooms",
order_by=Rooms.id,
back_populates="hotels",
)
Rooms.booking = relationship(
"Booking",
order_by=Booking.id,
back_populates="rooms",
)
Users.booking = relationship(
"Booking",
order_by=Booking.id,
back_populates="users",
)
PricePolicies.rooms = relationship(
"Rooms",
order_by=Rooms.id,
back_populates="price_policies",
)
Base.metadata.create_all(db_engine)
fake = Faker(["fr_FR"])
fake_us = Faker(["en_US"])
hotel_names = ['Carlton', 'Lutetia']
# Generate fake data for Hotel
hotel_data = []
for i in range(2):
name = hotel_names[i]
phone = fake.phone_number()
website = fake.uri()
description = fake.catch_phrase()
owner = fake.name()
row = (name, phone, website, description, owner)
hotel_data.append(row)
try:
print("[+] inserting data into hotels table")
query = "INSERT INTO `hotels` (`name`, `telephone`, `website`, `description`, \
`owner`) VALUES( % s, % s, % s, % s, % s)"
id = db_engine.execute(query, hotel_data)
except SQLAlchemyError as e:
error = str(e.__dict__["orig"])
print(error)
# Generate fake data for Address
address_data = []
for i in range(1, 3):
hotel_id = i
number = fake.building_number()
street = fake.street_name()
town = fake.city()
postal_code = fake.postcode()
row = (hotel_id, number, street, town, postal_code)
address_data.append(row)
try:
print("[+] inserting data into addresses table")
query = "INSERT INTO `addresses` (`hotel_id`, `number`, `street`, \
`town`, `postal_code`) VALUES(%s,%s,%s,%s,%s)"
id = db_engine.execute(query, address_data)
except SQLAlchemyError as e:
print(query)
error = str(e.__dict__["orig"])
print(error)
# Generate fake data for Rooms
rooms_data = [
(1, "S", 3, 720),
(1, "JS", 2, 500),
(1, "CD", 2, 300),
(1, "CS", 2, 150),
(1, "CS", 2, 150),
(2, "SR", 5, 1000),
(2, "SR", 5, 1000),
]
try:
print("[+] inserting data into rooms table")
query = "INSERT INTO `rooms` (`hotel_id`, `room`, `capacity`, `price`) \
VALUES(%s,%s,%s,%s)"
id = db_engine.execute(query, rooms_data)
except SQLAlchemyError as e:
error = str(e.__dict__["orig"])
print(error)
s = hashlib.sha3_224()
# Generate fake data for user
user_data = []
for i in range(2):
fake_pass = fake_us.password(length=12)
print(f"admin{i} password : {fake_pass}")
sha3_hashed_pass = s.update(fake_pass.encode())
first_name = fake.first_name()
last_name = fake.last_name()
role = 'ADMIN'
email = f"admin{i}@mybooking.services"
telephone = fake.phone_number()
password = s.hexdigest()
row = (first_name, last_name, role, email, telephone, password)
user_data.append(row)
for _ in range(498):
sha3_hashed_pass = s.update(fake_us.password(length=12).encode())
first_name = fake.first_name()
last_name = fake.last_name()
role = 'USER'
email = fake.ascii_free_email()
telephone = fake.phone_number()
password = s.hexdigest()
row = (first_name, last_name, role, email, telephone, password)
user_data.append(row)
try:
print("[+] inserting data into users table")
query = "INSERT INTO `users` (`first_name`, `last_name`, `role`, `email`, \
`telephone`, `password`) VALUES(%s,%s,%s,%s,%s,%s)"
id = db_engine.execute(query, user_data)
except SQLAlchemyError as e:
error = str(e.__dict__["orig"])
print(error)
# Generate fake data for Booking
odb = '{"parking": 1, "baby_cot": 1, "romance_pack": 1, "breakfast": 1}'
booking_data = [
(3, 54, 2, odb, 1095, '2021-05-21', '2021-05-24'),
(7, 138, 5, odb, 3055, '2021-05-14', '2021-05-17'),
(6, 417, 1, odb, 5900, '2021-07-05', '2021-07-11'),
(1, 342, 2, odb, 933, '2021-07-16', '2021-07-17'),
(4, 94, 2, odb, 255, '2021-07-19', '2021-07-20'),
(2, 224, 2, odb, 555, '2021-07-21', '2021-07-22'),
(1, 78, 3, odb, 2265, '2021-09-18', '2021-09-20'),
(4, 19, 2, odb, 240, '2021-09-29', '2021-09-30'),
(6, 318, 2, odb, 2255, '2021-10-08', '2021-10-10'),
(3, 241, 3, odb, 375, '2021-10-27', '2021-10-28'),
]
try:
print("[+] inserting data into users table")
query = "INSERT INTO `booking` (`room_id`, `user_id`, `capacity_book`, \
`option`, `order_price`, `booking_start_date`, `booking_end_date`) \
VALUES(%s,%s,%s,%s,%s,%s,%s)"
id = db_engine.execute(query, booking_data)
except SQLAlchemyError as e:
error = str(e.__dict__["orig"])
print(error)
# Pour les nuits de vendredi et samedi le prix des chambres est majoré de 15%
# Les nuits du mercredi et jeudi sont minoré de 10%
# Si une seule personne occupe la chambre le prix est minoré de 5%
# Generate fake data for PricePolicy
price_policy_data = [
(1, "Wednesday Minoration", 1, -10, 3, True),
(1, "Thursday Minoration", 1, -10, 4, True),
(1, "Friday Majoration", 1, 15, 5, True),
(1, "Saturday Majoration", 1, 15, 6, True),
(2, "Wednesday Minoration", 1, -10, 3, True),
(2, "Thursday Minoration", 1, -10, 4, True),
(2, "Friday Majoration", 1, 15, 5, True),
(2, "Saturday Majoration", 1, 15, 6, True),
(3, "Wednesday Minoration", 1, -10, 3, True),
(3, "Thursday Minoration", 1, -10, 4, True),
(3, "Friday Majoration", 1, 15, 5, True),
(3, "Saturday Majoration", 1, 15, 6, True),
(4, "Wednesday Minoration", 1, -10, 3, True),
(4, "Thursday Minoration", 1, -10, 4, True),
(4, "Friday Majoration", 1, 15, 5, True),
(4, "Saturday Majoration", 1, 15, 6, True),
(5, "Wednesday Minoration", 1, -10, 3, True),
(5, "Thursday Minoration", 1, -10, 4, True),
(5, "Friday Majoration", 1, 15, 5, True),
(5, "Saturday Majoration", 1, 15, 6, True),
(6, "Wednesday Minoration", 1, -10, 3, True),
(6, "Thursday Minoration", 1, -10, 4, True),
(6, "Friday Majoration", 1, 15, 5, True),
(6, "Saturday Majoration", 1, 15, 6, True),
(7, "Wednesday Minoration", 1, -10, 3, True),
(7, "Thursday Minoration", 1, -10, 4, True),
(7, "Friday Majoration", 1, 15, 5, True),
(7, "Saturday Majoration", 1, 15, 6, True),
]
print("[+] inserting data into price_policies")
try:
query = "INSERT INTO `price_policies` (`room_id`, `name`, \
`price_policy_type`, `room_majoration`, `day_number`, \
`is_default`) VALUES(%s,%s,%s,%s,%s,%s)"
id = db_engine.execute(query, price_policy_data)
except SQLAlchemyError as e:
error = str(e.__dict__["orig"])
print(error)
price_policy_data_v2 = [
(1, "Capacity Minoration", 2, -5, 1, True),
(2, "Capacity Minoration", 2, -5, 1, True),
(3, "Capacity Minoration", 2, -5, 1, True),
(4, "Capacity Minoration", 2, -5, 1, True),
(5, "Capacity Minoration", 2, -5, 1, True),
(6, "Capacity Minoration", 2, -5, 1, True),
(7, "Capacity Minoration", 2, -5, 1, True),
]
print("[+] inserting data into price_policies v2")
try:
query = "INSERT INTO `price_policies` (`room_id`, `name`, \
`price_policy_type`, `room_majoration`, `capacity_limit`, \
`is_default`) VALUES(%s,%s,%s,%s,%s,%s)"
id = db_engine.execute(query, price_policy_data_v2)
except SQLAlchemyError as e:
error = str(e.__dict__["orig"])
print(error)
# Generate fake data for Options
options_data = [
("Parking", 25),
("Baby cot", 0),
("Romance pack", 50),
("Breakfast", 30),
]
try:
print("[+] inserting data into options table")
query = "INSERT INTO `options` (`name`, `price`) \
VALUES(%s,%s)"
id = db_engine.execute(query, options_data)
except SQLAlchemyError as e:
error = str(e.__dict__["orig"])
print(error)
# Generate Calendar
date_data = []
sdate = datetime.date(2021, 1, 1)
edate = datetime.date(2022, 12, 31)
date_range = pd.date_range(sdate, edate - datetime.timedelta(days=1), freq='d')
for e in date_range:
date = e.strftime("%Y-%m-%d")
day = e.strftime("%d")
day_name = e.strftime("%A")
day_week = e.strftime("%w")
month_name = e.strftime("%B")
month = e.strftime("%m")
year = e.strftime("%Y")
row = (date, day, day_name, day_week, month_name, month, year)
date_data.append(row)
try:
print("[+] inserting data into calendar table")
query = "INSERT INTO `calendar` (`date`, `day`, `day_name`, \
`day_week`, `month_name`, `month`, `year`) \
VALUES(%s,%s,%s,%s,%s,%s,%s)"
id = db_engine.execute(query, date_data)
except SQLAlchemyError as e:
print(query)
error = str(e.__dict__["orig"])
print(error)
| [
11748,
4818,
8079,
198,
11748,
33829,
198,
11748,
12234,
8019,
198,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
277,
3110,
1330,
376,
3110,
198,
6738,
44161,
282,
26599,
1330,
4403,
46541,
198,
6738,
44161,
282,
26599,
1330,
41146,
... | 2.334744 | 4,254 |
from jinja2 import Environment, FileSystemLoader
import jinja2_stringcase
import stringcase
import csv
import openpyxl
from gencode.internal import default
import gencode.text_helpers as text_helpers
import os
# Config Loading
# helper
| [
6738,
474,
259,
6592,
17,
1330,
9344,
11,
9220,
11964,
17401,
198,
11748,
474,
259,
6592,
17,
62,
8841,
7442,
198,
11748,
4731,
7442,
198,
11748,
269,
21370,
198,
11748,
1280,
9078,
87,
75,
198,
6738,
2429,
8189,
13,
32538,
1330,
4277... | 3.402778 | 72 |
import tensorflow as tf
from stagedml.imports.tf import ( Model, Layer, Tensor, Adam, LayerNormalization )
from stagedml.models.transformer.imports import (
transformer_loss, get_padding_bias, get_padding,
get_position_encoding, get_decoder_self_attention_bias,
sequence_beam_search, EOS_ID, LearningRateSchedule, train_input_fn,
map_data_for_transformer_fn, compute_bleu, bleu_tokenize )
from stagedml.models.transformer.metrics import Metrics
from stagedml.models.transformer.attention import Attention, SelfAttention
from stagedml.models.transformer.ffn import FeedForwardNetwork
from stagedml.models.transformer.embedding import EmbeddingSharedWeights
from typing import Any, List, Tuple
def create_train_model(params:dict)->Model:
"""Creates transformer model for training."""
with tf.name_scope("model"):
inputs = tf.keras.layers.Input((None,), dtype="int64", name="inputs")
targets = tf.keras.layers.Input((None,), dtype="int64", name="targets")
internal_model = TransformerLayer(params, name='transformerv2')
logits = internal_model([inputs, targets], training=True)
vocab_size = params["vocab_size"]
label_smoothing = params["label_smoothing"]
if params["enable_metrics_in_training"]:
logits = Metrics(vocab_size)(logits, targets)
logits = tf.keras.layers.Lambda(lambda x: x, name="logits", dtype=tf.float32)(logits)
model = Model([inputs, targets], logits)
# TODO(reedwm): Can we do this loss in float16 instead of float32?
loss = transformer_loss(logits, targets, label_smoothing, vocab_size)
model.add_loss(loss)
return model
def create_optimizer(params:dict)->Adam:
"""Creates optimizer."""
# TODO(b/139414679): Explore the difference between using
# LearningRateSchedule and callback for GPU runs, and try to merge them.
lr_schedule = LearningRateSchedule(
params["learning_rate"],
params["hidden_size"],
params["learning_rate_warmup_steps"])
opt = Adam(params["learning_rate"],
params["optimizer_adam_beta1"],
params["optimizer_adam_beta2"],
epsilon=params["optimizer_adam_epsilon"])
assert not (params["dtype"] == tf.float16)
return opt
class PrePostProcessingWrapper(Layer):
"""Wrapper class that applies layer pre-processing and post-processing."""
def call(self, x:Tensor, *args, **kwargs)->Tensor:
"""Calls wrapped layer with same parameters."""
# Preprocessing: apply layer normalization
training = kwargs["training"]
# print(type(x), x.dtype)
# print(x.shape)
y = self.layer_norm(x)
# Get layer output
y = self.layer(y, *args, **kwargs)
# Postprocessing: apply dropout and residual connection
if training:
y = tf.nn.dropout(y, rate=self.postprocess_dropout)
return x + y
| [
11748,
11192,
273,
11125,
355,
48700,
198,
6738,
23393,
4029,
13,
320,
3742,
13,
27110,
1330,
357,
9104,
11,
34398,
11,
309,
22854,
11,
7244,
11,
34398,
26447,
1634,
1267,
198,
198,
6738,
23393,
4029,
13,
27530,
13,
7645,
16354,
13,
3... | 2.804175 | 1,006 |
# Copyright 2019 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ros2action.api import action_type_completer
from ros2action.api import get_action_path
from ros2action.verb import VerbExtension
class ShowVerb(VerbExtension):
"""Output the action definition."""
| [
2,
15069,
13130,
4946,
8090,
47061,
5693,
11,
3457,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
... | 3.694064 | 219 |
import sys
def main(args):
"""main() will be run if you run this script directly"""
parse_server_status(
"protopy/vassal_engine/data/2021-03-vassal-players-month.txt",
"server_status.csv",
)
def run():
"""Entry point for the runnable script."""
sys.exit(main(sys.argv[1:]))
if __name__ == "__main__":
"""main calls run()."""
run()
| [
11748,
25064,
628,
198,
198,
4299,
1388,
7,
22046,
2599,
198,
220,
220,
220,
37227,
12417,
3419,
481,
307,
1057,
611,
345,
1057,
428,
4226,
3264,
37811,
198,
220,
220,
220,
21136,
62,
15388,
62,
13376,
7,
198,
220,
220,
220,
220,
22... | 2.372671 | 161 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @first_date 20140829
# @date
# @version 0.0
# @brief stop dokuwiki Apache server
import os
import time
import signal
import subprocess
os.chdir(r'server')
print("Stop Apache Server")
p = subprocess.Popen(["ApacheKill.exe"])
### Replace "ApacheKill.exe" ###
# is_success_killed = False
# with open('apache_log.txt', 'r') as io_r:
# pid = io_r.readline()
# print(pid)
# pid = int(pid)
# if pid > 100:
# os.kill(pid, signal.CTRL_C_EVENT) # signal.SIGTERM
# is_success_killed = True
# time.sleep(3)
# if is_success_killed:
# os.remove("apache_log.txt")
print("It's Stopped")
time.sleep(3)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
220,
2488,
11085,
62,
4475,
220,
220,
220,
580,
26200,
1959,
198,
2,
220,
2488,
4475,
198,
2,
220,
2488,
9641,
2... | 2.227564 | 312 |
#!/usr/bin/env python3
from collections import*
n = int(input())
a = list(map(int, input().split()))
l = []
r = []
for i, j in enumerate(a):
l += [i + j]
r += [i - j]
c = 0
l = Counter(l)
for r in r:
c += l.get(r, 0)
print(c) | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
6738,
17268,
1330,
9,
198,
198,
77,
796,
493,
7,
15414,
28955,
198,
64,
796,
1351,
7,
8899,
7,
600,
11,
5128,
22446,
35312,
3419,
4008,
198,
198,
75,
796,
17635,
198,
81,
796,
... | 2.086957 | 115 |
# -*- coding: utf-8 -*-
import requests
import os, sys
import pandas as pd
import urllib.parse
import logging
#import asyncio
import concurrent.futures
from pathlib import Path
'''
Script to download dataset from https://climateknowledgeportal.worldbank.org/api/data/get-download-data
'''
DATASET_FOLDER = '../../datasets/'
# Destination
PATH = os.path.join(DATASET_FOLDER, 'precipitation/')
nature_of_data = ['projection', 'historical']
# Read countries list
df = pd.read_csv('../../datasets/worldbank_countries.csv')
countries_code = df.code.to_list()
countries_name = df.name.to_list()
variables = ['pr']
past_time_series = ["1901-2016"]
futu_time_series = ["2020_2039", "2040_2059", "2060_2079", "2080_2099"]
logger = logging.getLogger("download")
formatter = logging.Formatter("%(asctime)s - %(name)-12s %(levelname)-8s %(message)s")
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler("download.log")
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
logger.addHandler(fh)
logger.info(f'Starting...')
'''
Pour la construire : https://climateknowledgeportal.worldbank.org/api/data/get-download-data
/ projection/historical (2 options)
/ mavg/manom for mean/change (2 options)
/ climate variable pr/tas for precipitation/temperature (45 options in futur, 2 in past)
/ Scenario : rcp85 (4 options in futur, 0 in past)
/ period : 2080_2099 (4 options in futur, 5 in past)
/ country code (197 options)
/ country name (197 options)
https://climateknowledgeportal.worldbank.org/api/data/get-download-data
/historical/pr/1931-1960/FRA/France
https://climateknowledgeportal.worldbank.org/api/data/get-download-data
/projection/mavg/pr/rcp85/2060_2079/FRA/France
https://climateknowledgeportal.worldbank.org/api/data/get-download-data
/projection/mavg/pr/rcp85/2080-2099/FRA/France
https://climateknowledgeportal.worldbank.org/api/data/get-download-data/projection/mavg/pr/rcp85/2060_2079/FRA/France
https://climateknowledgeportal.worldbank.org/api/data/get-download-data/projection/mavg/pr/rcp85/2060-2079/FRA/France
'''
nb_iter = 0
#asyncloop = asyncio.get_event_loop()
#tasks = []
with concurrent.futures.ThreadPoolExecutor(max_workers=32) as executor:
futures = []
for country_code, country_name in zip(countries_code, countries_name):
for nature in nature_of_data:
time_series=past_time_series if nature == 'historical' else futu_time_series
data_type = '' if nature == 'historical' else '/mavg'
projection = '' if nature == 'historical' else '/rcp85'
for period in time_series:
nb_iter += 1
# Build URL
url = 'https://climateknowledgeportal.worldbank.org/api/data/get-download-data/' \
+ f'{nature}{data_type}/pr{projection}/{period}/{country_code}/{urllib.parse.quote_plus(country_name)}'
# build destination name
filename = '_'.join([nature, period, country_code]) + '.csv'
destination = os.path.join(PATH, filename)
#tasks.append(asyncloop.create_task(get_url(url, destination)))
futures.append(executor.submit(get_url, url=url, destination=destination))
for future in concurrent.futures.as_completed(futures):
#print(future.result())
logger.debug(f'Done {future.result()}')
# for task in tasks:
# await task
logger.info(f'Done after {nb_iter} iterations.')
# https://climateknowledgeportal.worldbank.org/api/data/get-download-data/projection/mavg/pr/rcp85/2060_2079/BDI/Burundi | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
7007,
198,
11748,
28686,
11,
25064,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
2956,
297,
571,
13,
29572,
198,
11748,
18931,
198,
2,
11748,
30351,
952,
198... | 2.48223 | 1,435 |
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class AlertRuleTemplatePropertiesBase(Model):
"""Base alert rule template property bag.
Variables are only populated by the server, and will be ignored when
sending a request.
:param alert_rules_created_by_template_count: the number of alert rules
that were created by this template
:type alert_rules_created_by_template_count: int
:ivar created_date_utc: The time that this alert rule template has been
added.
:vartype created_date_utc: datetime
:param description: The description of the alert rule template.
:type description: str
:param display_name: The display name for alert rule template.
:type display_name: str
:param required_data_connectors: The required data sources for this
template
:type required_data_connectors:
list[~securityinsights.models.AlertRuleTemplateDataSource]
:param status: The alert rule template status. Possible values include:
'Installed', 'Available', 'NotAvailable'
:type status: str or ~securityinsights.models.TemplateStatus
"""
_validation = {
'created_date_utc': {'readonly': True},
}
_attribute_map = {
'alert_rules_created_by_template_count': {'key': 'alertRulesCreatedByTemplateCount', 'type': 'int'},
'created_date_utc': {'key': 'createdDateUTC', 'type': 'iso-8601'},
'description': {'key': 'description', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'required_data_connectors': {'key': 'requiredDataConnectors', 'type': '[AlertRuleTemplateDataSource]'},
'status': {'key': 'status', 'type': 'str'},
}
| [
2,
19617,
28,
40477,
12,
23,
198,
2,
16529,
35937,
198,
2,
6127,
7560,
416,
5413,
357,
49,
8,
11160,
19452,
6127,
35986,
13,
198,
2,
19179,
743,
2728,
11491,
4069,
290,
481,
307,
2626,
611,
262,
2438,
318,
198,
2,
16935,
515,
13,
... | 3.207729 | 621 |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
class CC1(object):
"""Docstring for CC1. """
def __init__(self):
"""TODO: to be defined1. """
# 这个类调用了CC1里面的方法, CC1与CC2并没有关系
class CC2(object):
"""Docstring for CC2. """
def __init__(self):
"""TODO: to be defined1. """
class CC3(CC1, CC2):
"""Docstring for MyClass. """
def __init__(self):
"""TODO: to be defined1. """
c = CC3()
c.cc3_method()
# result:
# cc1_method
# cc2_method
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
4871,
12624,
16,
7,
15252,
2599,
628,
220,
220,
220,
37227,
23579,
8841,
329,
12624,
16,
13,
37227,
628,
220,
220,
... | 1.891051 | 257 |
# External Libraries
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
Base = declarative_base()
| [
2,
34579,
46267,
198,
6738,
44161,
282,
26599,
13,
2302,
13,
32446,
283,
876,
1330,
2377,
283,
876,
62,
8692,
198,
6738,
44161,
282,
26599,
1330,
2251,
62,
18392,
198,
6738,
44161,
282,
26599,
13,
579,
1330,
629,
19458,
62,
29891,
11,... | 3.571429 | 56 |
from django.conf.urls.static import static
from django.urls import include, path
from . import settings
urlpatterns = [
path('', include('frontend.urls')),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
| [
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
13,
12708,
1330,
9037,
198,
6738,
42625,
14208,
13,
6371,
82,
1330,
2291,
11,
3108,
198,
6738,
764,
1330,
6460,
628,
198,
6371,
33279,
82,
796,
685,
198,
220,
220,
220,
3108,
10786,
3256,
... | 3.026316 | 76 |
from __future__ import print_function, unicode_literals
import six, os, sys, time, bz2
signal = None
try:
import signal
except ImportError:
pass
from zope.interface import implementer
from twisted.internet import reactor, utils, defer
from twisted.python import usage, procutils, filepath, log as tw_log
from twisted.application import service, internet
from foolscap.api import Tub, Referenceable
from foolscap.logging.interfaces import RILogGatherer, RILogObserver
from foolscap.logging.incident import IncidentClassifierBase, TIME_FORMAT
from foolscap.logging import flogfile
from foolscap.util import move_into_place
class CreateGatherOptions(usage.Options):
"""flogtool create-gatherer GATHERER_DIRECTORY"""
stdout = sys.stdout
stderr = sys.stderr
optFlags = [
("bzip", "b", "Compress each output file with bzip2"),
("quiet", "q", "Don't print instructions to stdout"),
]
optParameters = [
("port", "p", "tcp:3117", "TCP port to listen on (strports string)"),
("location", "l", None, "(required) Tub location hints to use in generated FURLs. e.g. 'tcp:example.org:3117'"),
("rotate", "r", None,
"Rotate the output file every N seconds."),
]
@implementer(RILogObserver)
@implementer(RILogGatherer)
class GathererService(GatheringBase):
# create this with 'flogtool create-gatherer BASEDIR'
# run this as 'cd BASEDIR && twistd -y gatherer.tac'
"""Run a service that gathers logs from multiple applications.
The LogGatherer sits in a corner and receives log events from many
applications at once. At startup, it runs a Tub and emits the gatherer's
long-term FURL. You can then configure your applications to connect to
this FURL when they start and pass it a reference to their LogPublisher.
The gatherer will subscribe to the publisher and save all the resulting
messages in a serialized flogfile.
Applications can use code like the following to create a LogPublisher and
pass it to the gatherer::
def tub_ready(self):
# called when the Tub is available for registerReference
lp = LogPublisher('logport.furl')
lp.setServiceParent(self.tub)
log_gatherer_furl = self.get_config('log_gatherer.furl')
if log_gatherer_furl:
self.tub.connectTo(log_gatherer_furl,
self._log_gatherer_connected, lp)
def _log_gatherer_connected(self, rref, lp):
rref.callRemote('logport', self.nodeid, lp)
This LogGatherer class is meant to be run by twistd from a .tac file, but
applications that want to provide the same functionality can just
instantiate it with a distinct basedir= and call startService.
"""
verbose = True
furlFile = "log_gatherer.furl"
tacFile = "gatherer.tac"
LOG_GATHERER_TACFILE = """\
# -*- python -*-
# we record the path when 'flogtool create-gatherer' is run, in case flogtool
# was run out of a source tree. This is somewhat fragile, of course.
stashed_path = [
%(path)s]
import sys
needed = [p for p in stashed_path if p not in sys.path]
sys.path = needed + sys.path
from foolscap.logging import gatherer
from twisted.application import service
rotate = %(rotate)s
use_bzip = %(use_bzip)s
gs = gatherer.GathererService(rotate, use_bzip)
application = service.Application('log_gatherer')
gs.setServiceParent(application)
"""
###################
# Incident Gatherer
class CreateIncidentGatherOptions(usage.Options):
"""flogtool create-incident-gatherer BASEDIR"""
stdout = sys.stdout
stderr = sys.stderr
optFlags = [
("quiet", "q", "Don't print instructions to stdout"),
]
optParameters = [
("port", "p", "tcp:3118", "TCP port to listen on (strports string)"),
("location", "l", None, "(required) Tub location hints to use in generated FURLs. e.g. 'tcp:example.org:3118'"),
]
@implementer(RILogObserver)
@implementer(RILogGatherer)
class IncidentGathererService(GatheringBase, IncidentClassifierBase):
# create this with 'flogtool create-incident-gatherer BASEDIR'
# run this as 'cd BASEDIR && twistd -y gatherer.tac'
"""Run a service that gathers Incidents from multiple applications.
The IncidentGatherer sits in a corner and receives incidents from many
applications at once. At startup, it runs a Tub and emits the gatherer's
long-term FURL. You can then configure your applications to connect to
this FURL when they start and pass it a reference to their LogPublisher.
The gatherer will subscribe to the publisher and save all the resulting
incidents in the incidents/ directory, organized by the publisher's
tubid. The gatherer will also run a set of user-supplied classifier
functions on the incidents and put the filenames (one line per incident)
into files in the categories/ directory.
This IncidentGatherer class is meant to be run as a standalone service
from bin/flogtool, but by careful subclassing and setup it could be run
as part of some other application.
"""
verbose = True
furlFile = "log_gatherer.furl"
tacFile = "gatherer.tac"
INCIDENT_GATHERER_TACFILE = """\
# -*- python -*-
# we record the path when 'flogtool create-incident-gatherer' is run, in case
# flogtool was run out of a source tree. This is somewhat fragile, of course.
stashed_path = [
%(path)s]
import sys
needed = [p for p in stashed_path if p not in sys.path]
sys.path = needed + sys.path
from foolscap.logging import gatherer
from twisted.application import service
gs = gatherer.IncidentGathererService()
# To add a classifier function, store it in a neighboring file named
# classify_*.py, in a function named classify_incident(). All such files will
# be loaded at startup:
#
# %% cat classify_foolscap.py
# import re
# TUBCON_RE = re.compile(r'^Tub.connectorFinished: WEIRD, <foolscap.connection.TubConnector instance at \w+> is not in \[')
# def classify_incident(trigger):
# # match some foolscap messages
# m = trigger.get('message', '')
# if TUBCON_RE.search(m):
# return 'foolscap-tubconnector'
# %%
application = service.Application('incident_gatherer')
gs.setServiceParent(application)
"""
| [
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
11,
28000,
1098,
62,
17201,
874,
198,
11748,
2237,
11,
28686,
11,
25064,
11,
640,
11,
275,
89,
17,
198,
12683,
282,
796,
6045,
198,
28311,
25,
198,
220,
220,
220,
1330,
6737,
198,
16341... | 2.887201 | 2,172 |