text string | size int64 | token_count int64 |
|---|---|---|
# This file is part of Pynguin.
#
# Pynguin is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pynguin is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pynguin. If not, see <https://www.gnu.org/licenses/>.
"""An executor that executes a test under the inspection of the MonkeyType tool."""
import contextlib
import logging
import os
import sys
from typing import Any, Dict, Iterable, List, Optional
import astor
from monkeytype.config import DefaultConfig
from monkeytype.db.base import CallTraceStore, CallTraceThunk
from monkeytype.encoding import CallTraceRow, serialize_traces
from monkeytype.tracing import CallTrace, CallTraceLogger, CallTracer
import pynguin.configuration as config
import pynguin.testcase.execution.executioncontext as ctx
import pynguin.testcase.testcase as tc
class _MonkeyTypeCallTraceStore(CallTraceStore):
def __init__(self):
self._values: Dict[str, Any] = {}
def add(self, traces: Iterable[CallTrace]) -> None:
for row in serialize_traces(traces):
self._values[row.module] = (
row.qualname,
row.arg_types,
row.return_type,
row.yield_type,
)
def filter(
self, module: str, qualname_prefix: Optional[str] = None, limit: int = 2000
) -> List[CallTraceThunk]:
result: List[CallTraceThunk] = []
for stored_module, row in self._values.items():
is_qualname = qualname_prefix is not None and qualname_prefix in row[0]
if stored_module == module or is_qualname:
result.append(
CallTraceRow(
module=module,
qualname=row[0],
arg_types=row[1],
return_type=row[2],
yield_type=row[3],
)
)
return result if len(result) < limit else result[:limit]
@classmethod
def make_store(cls, connection_string: str) -> "CallTraceStore":
return cls()
def list_modules(self) -> List[str]:
return [k for k, _ in self._values.items()]
class _MonkeyTypeCallTraceLogger(CallTraceLogger):
def __init__(self) -> None:
self._traces: List[CallTrace] = []
def log(self, trace: CallTrace) -> None:
self._traces.append(trace)
@property
def traces(self) -> List[CallTrace]:
"""Provides the collected traces"""
return self._traces
class _MonkeyTypeConfig(DefaultConfig):
def trace_store(self) -> CallTraceStore:
return _MonkeyTypeCallTraceStore()
def trace_logger(self) -> CallTraceLogger:
return _MonkeyTypeCallTraceLogger()
# pylint:disable=too-few-public-methods
class MonkeyTypeExecutor:
"""An executor that executes a test under the inspection of the MonkeyType tool."""
_logger = logging.getLogger(__name__)
def __init__(self):
""""""
self._config = _MonkeyTypeConfig()
self._tracer = CallTracer(
logger=self._config.trace_logger(),
code_filter=self._config.code_filter(),
sample_rate=self._config.sample_rate(),
)
self._call_traces: List[CallTrace] = []
def execute(self, test_cases: List[tc.TestCase]) -> List[CallTrace]:
"""Execute the given test cases."""
with open(os.devnull, mode="w") as null_file:
with contextlib.redirect_stdout(null_file):
for test_case in test_cases:
exec_ctx = ctx.ExecutionContext(test_case)
self._execute_ast_nodes(exec_ctx)
self._filter_and_append_call_traces()
return self._call_traces
def _execute_ast_nodes(self, exec_ctx: ctx.ExecutionContext):
for node in exec_ctx.executable_nodes():
try:
if self._logger.isEnabledFor(logging.DEBUG):
self._logger.debug("Executing %s", astor.to_source(node))
code = compile(node, "<ast>", "exec")
sys.setprofile(self._tracer)
# pylint: disable=exec-used
exec(code, exec_ctx.global_namespace, exec_ctx.local_namespace) # nosec
except BaseException as err: # pylint: disable=broad-except
failed_stmt = astor.to_source(node)
self._logger.info(
"Fatal! Failed to execute statement with MonkeyType\n%s%s",
failed_stmt,
err.args,
)
break
finally:
sys.setprofile(None)
def _filter_and_append_call_traces(self) -> None:
assert isinstance(self._tracer.logger, _MonkeyTypeCallTraceLogger)
module_name = config.INSTANCE.module_name
for trace in self._tracer.logger.traces:
func_name = trace.funcname
if func_name.startswith(module_name):
self._call_traces.append(trace)
| 5,434 | 1,564 |
"Test history entries for migrated, obsolete fields"
from datetime import (
time,
timedelta,
)
from decimal import Decimal
from typing import (
Any,
Dict,
)
from django.contrib.auth.models import User
from django.db import models
from wicked_historian.usersmuggler import usersmuggler
from wicked_historian.utils import FieldDescription
from testapp.factories import BookFactory
from testapp.models import (
Author,
Book,
BookEditHistory,
Language,
OBSOLETE_BOOK_FIELD_CHOICES,
)
from .base import FreezeTimeTestCase
class GettingHistoryEntriesForChangedFieldsTestCase(FreezeTimeTestCase):
UNKNOWN_FIELD_ID = 'unknown_field_id'
def setUp(self):
super().setUp()
# test languages
self.languages = {
'english': Language.objects.create(name='english'),
'polish': Language.objects.create(name='polish'),
}
# test authors
self.authors = {
'william_shakespeare': Author.objects.create(name='William Shakespeare'),
'john_paul_ii': Author.objects.create(name='John Paul II'),
'nostradamus': Author.objects.create(name='Nostradamus'),
}
self.user = User.objects.create(username='john.smith')
with usersmuggler.set_user(self.user):
self.book = BookFactory( # type: Book
title='Macbeth',
issue_year=1603,
language=self.languages['english'],
has_pictures=False,
literary_period=2,
date_of_publication=(self.frozen_time + timedelta(days=1)).date(),
moment_of_appearance_on_torrents=self.frozen_time + timedelta(hours=1),
ebook_length=timedelta(days=1, hours=3, minutes=12, seconds=7),
number_of_downloads_on_torrents=1223372036854775808,
encrypted_book=b'some_data',
cash_lost_because_of_piracy=Decimal('666666666.66'),
plain_text='foo',
first_download_hour=time(hour=1),
)
self.book.authors.set([self.authors['william_shakespeare']])
self.book = Book.objects.get(pk=self.book.pk) # just to reset any instance attributes used for creating history
self.field_choices_by_name = {description.name: description for description in BookEditHistory.FIELDS_DESCRIPTIONS}
self.obsolete_field_by_name = {description.name: description for description in OBSOLETE_BOOK_FIELD_CHOICES}
BookEditHistory.objects.all().delete()
def test_unknown_field(self):
self.create_fake_history_entry(
self.UNKNOWN_FIELD_ID,
old_value=1603,
new_value=2018,
)
with self.assertRaises(BookEditHistory.UnknownFieldException):
BookEditHistory.get_for(self.book)
def test_deleted_field_with_choices(self):
self.create_fake_history_entry(
self.obsolete_field_by_name['age'].id,
old_value=1,
new_value=2,
)
history_entry = self.get_last_history_entry(self.book)
self.assertDictEqual(history_entry, {
'change_date': self.frozen_time,
'user': self.user,
'field_verbose_name': 'age',
'old_value': 'XV',
'new_value': 'XIX',
})
def test_deleted_char_field(self):
self.create_fake_history_entry(
self.obsolete_field_by_name['description'].id,
old_value='abc',
new_value='xyz',
)
history_entry = self.get_last_history_entry(self.book)
self.assertDictEqual(history_entry, {
'change_date': self.frozen_time,
'user': self.user,
'field_verbose_name': 'description',
'old_value': 'abc',
'new_value': 'xyz',
})
def test_deleted_foreign_key_field(self):
william_shakespeare = {'pk': self.authors['william_shakespeare'].pk, 'str': str(self.authors['william_shakespeare'])}
john_paul_ii = {'pk': self.authors['john_paul_ii'].pk, 'str': str(self.authors['john_paul_ii'])}
self.create_fake_history_entry(
self.obsolete_field_by_name['author'].id,
old_value=william_shakespeare,
new_value=john_paul_ii,
)
history_entry = self.get_last_history_entry(self.book)
self.assertDictEqual(history_entry, {
'change_date': self.frozen_time,
'user': self.user,
'field_verbose_name': 'author',
'old_value': william_shakespeare,
'new_value': john_paul_ii,
})
def test_deleted_many_to_many_field(self):
english = {'pk': self.languages['english'].pk, 'str': str(self.languages['english'])}
polish = {'pk': self.languages['polish'].pk, 'str': str(self.languages['polish'])}
self.create_fake_history_entry(
self.obsolete_field_by_name['languages'].id,
old_value=[english],
new_value=[english, polish]
)
history_entry = self.get_last_history_entry(self.book)
self.assertDictEqual(history_entry, {
'change_date': self.frozen_time,
'user': self.user,
'field_verbose_name': 'languages',
'old_value': [english],
'new_value': [english, polish]
})
def test_different_id_for_different_type_with_the_same_name(self):
first = FieldDescription('description', models.TextField())
second = FieldDescription('description', models.CharField())
third = FieldDescription('description', models.CharField(max_length=50))
self.assertNotEqual(first.id, second.id)
self.assertEqual(second.id, third.id)
def test_changed_from_string_to_int(self):
self.create_fake_history_entry(
self.field_choices_by_name['issue_year'].id,
old_value='MDCIII',
new_value='MMXVIII'
)
history_entry = self.get_last_history_entry(self.book)
self.assertDictEqual(history_entry, {
'change_date': self.frozen_time,
'user': self.user,
'field_verbose_name': 'issue year',
'old_value': 'MDCIII',
'new_value': 'MMXVIII'
})
def test_presence_of_field_names_on_fields_descriptions_list(self):
field_names = {description.name for description in BookEditHistory.FIELDS_DESCRIPTIONS}
self.assertEqual(field_names, {
'age',
'author',
'authors',
'book_shelf_slot',
'cash_lost_because_of_piracy',
'date_of_publication',
'description',
'ebook_length',
'encrypted_book',
'first_download_hour',
'has_pictures',
'id',
'issue_number',
'issue_year',
'language',
'languages',
'literary_period',
'moment_of_appearance_on_torrents',
'number_of_downloads_on_torrents',
'number_of_pages',
'plain_text',
'text_as_pdf',
'title',
'pirates',
'printers',
'chapter_set',
})
@staticmethod
def get_last_history_entry(book: Book) -> Dict[str, Any]:
return BookEditHistory.get_for(book)[0]
def create_fake_history_entry(self, field: str, old_value: Any, new_value: Any) -> BookEditHistory:
return BookEditHistory.objects.create(**{
'model': self.book,
'user': self.user,
'change_date': self.frozen_time,
'field': field,
'old_value': old_value,
'new_value': new_value
})
| 7,778 | 2,428 |
from UdonPie import UnityEngine
from UdonPie.Undefined import *
class HideFlags:
def __new__(cls, arg1=None):
'''
:returns: HideFlags
:rtype: UnityEngine.HideFlags
'''
pass
| 219 | 71 |
# Generated by Django 2.2.5 on 2019-11-01 06:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('solicitudes', '0005_auto_20191101_0115'),
]
operations = [
migrations.RemoveField(
model_name='solicitud',
name='tipo_ejecucion',
),
migrations.AddField(
model_name='solicitud',
name='tipo_ejecucion',
field=models.ManyToManyField(to='solicitudes.TipoEjecucion'),
),
]
| 536 | 190 |
"""
####################################################################################################
# Copyright Info : Copyright (c) Davar Lab @ Hikvision Research Institute. All rights reserved.
# Filename : tp_head.py
# Abstract : Text Perceptron head structure, mainly including losses for segmentation part and regression part
# Current Version: 1.0.0
# Author : Liang Qiao
# Date : 2020-05-31
# Modified Date : 2020-11-26
# Modified by : inusheng
# Comments : Code and comment standardized
######################################################################################################
"""
import numpy as np
import torch
import torch.nn as nn
from mmdet.models.builder import build_loss
from mmdet.models.registry import HEADS
from mmdet.ops import ConvModule
from mmdet.core import force_fp32, auto_fp16
def make_one_hot(input_tensor, num_classes):
"""
Description:
convert a feature map of shape [N, 1, H, W] into its one-hot encoding version of shape [N, C, H, W],
where C is the number of classes.
Arguments:
input_tensor: input tensor, [N, 1, *]
num_classes : the number of classes of feature maps
Returns:
one-hot encoding of input tensor, [N, num_classes, *]
"""
input_tensor = input_tensor[:, np.newaxis, ::]
shape = np.array(input_tensor.shape)
shape[1] = num_classes
shape = tuple(shape)
result = torch.zeros(shape)
result = result.scatter_(1, input_tensor.cpu(), 1).to(input_tensor.device)
return result
@HEADS.register_module
class TPHead(nn.Module):
"""
Description:
Text Perceptron head structure,
this head is used for further feature extraction and generate loss wrt ground-truth labels.
Arguments:
in_channels : the number of channels of input feature maps
conv_out_channels: the number of channels of output feature maps
conv_cfg : configuration of conv filters
norm_cfg : configuration of normalization
loss_seg : segmentation loss
loss_reg_head : regression loss of head area
loss_reg_tail : regression loss of tail area
loss_reg_bond : regression loss of center area
"""
def __init__(self,
in_channels=256,
conv_out_channels=256,
conv_cfg=None,
norm_cfg=None,
loss_seg=None,
loss_reg_head=None,
loss_reg_bond=None,
loss_reg_tail=None,
):
super().__init__()
self.in_channels = in_channels
self.conv_out_channels = conv_out_channels
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
assert loss_seg is not None
self.loss_seg = build_loss(loss_seg)
self.loss_reg_head = loss_reg_head
self.loss_reg_bond = loss_reg_bond
self.loss_reg_tail = loss_reg_tail
if loss_reg_head is not None:
self.loss_reg_head = build_loss(loss_reg_head)
if loss_reg_tail is not None:
self.loss_reg_tail = build_loss(loss_reg_tail)
if loss_reg_bond is not None:
self.loss_reg_bond = build_loss(loss_reg_bond)
# define extra conv filters for long text feature extraction
self.P4_conv = ConvModule(self.in_channels, self.conv_out_channels,
kernel_size=3, stride=1, padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
self.P4_1x7_conv = ConvModule(self.conv_out_channels,
self.conv_out_channels,
kernel_size=(1, 7), stride=(1, 1),
padding=(0, 3), conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
self.channel4_1x7_conv = ConvModule(self.in_channels,
self.conv_out_channels,
kernel_size=(1, 7), stride=(1, 1),
padding=(0, 3),
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
self.rpn4 = ConvModule(self.conv_out_channels, self.conv_out_channels,
3, padding=1, conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
self.seg_branch_conv = ConvModule(self.conv_out_channels,
self.conv_out_channels, 3, padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
self.reg_branch_conv = ConvModule(self.conv_out_channels,
self.conv_out_channels, 3, padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
self.conv_logits_text = nn.Conv2d(self.conv_out_channels, 1, 1)
self.conv_logits_head = nn.Conv2d(self.conv_out_channels, 1, 1)
self.conv_logits_tail = nn.Conv2d(self.conv_out_channels, 1, 1)
self.conv_logits_bond = nn.Conv2d(self.conv_out_channels, 1, 1)
self.conv_regress_head = nn.Conv2d(self.conv_out_channels, 4, 1)
self.conv_regress_tail = nn.Conv2d(self.conv_out_channels, 4, 1)
self.conv_regress_bond = nn.Conv2d(self.conv_out_channels, 4, 1)
self.relu = nn.ReLU(inplace=True)
def init_weights(self):
"""
Description:
network parameters initialization
"""
for module in [self.conv_logits_text, self.conv_logits_head,
self.conv_logits_tail, self.conv_logits_bond,
self.conv_regress_bond,self.conv_regress_tail,
self.conv_regress_head]:
if module is None:
continue
nn.init.xavier_normal_(module.weight)
nn.init.constant_(module.bias, 0)
@auto_fp16()
def forward(self, x):
"""
Description:
network forward pass
"""
# compute loss from 4x feature maps only
# you can add other supervisions on feature maps in terms of your compute resources
x_4 = x[0]
# extract long text feature
x_p4 = self.P4_conv(x_4)
x_4_1x7 = self.channel4_1x7_conv(x_4)
x_p4_1x7 = self.P4_1x7_conv(x_p4)
x_4 = x_p4_1x7 + x_p4 + x_4_1x7
x_4 = self.rpn4(x_4)
# generate predicted segmentation map
x_4_seg = self.seg_branch_conv(x_4)
score_text_pred = self.conv_logits_text(x_4_seg) # segmentation map for center area [N, 1, H, W]
score_head_pred = self.conv_logits_head(x_4_seg) # segmentation map for head area [N, 1, H, W]
score_tail_pred = self.conv_logits_tail(x_4_seg) # segmentation map for tail area [N, 1, H, W]
score_bond_pred = self.conv_logits_bond(x_4_seg) # segmentation map for top and bottom boundaries area [N, 1, H, W]
# generate predicted regression map
x4_reg = self.seg_branch_conv(x_4)
reg_head_pred = self.conv_regress_head(x4_reg) # predicted regression map for head corner points [N, 4, H, W]
reg_tail_pred = self.conv_regress_tail(x4_reg) # predicted regression map for tail corner points [N, 4, H, W]
reg_bond_pred = self.conv_regress_bond(x4_reg) # predicted regression map for center area [N, 4, H, W]
return score_text_pred, score_head_pred, score_tail_pred, score_bond_pred, reg_head_pred, reg_tail_pred, reg_bond_pred
def get_target(self, gt_masks):
"""
Description:
generate ground-truth labels
Arguments:
gt_masks : input ground-truth labels
gt_mask:[:,0] : gt_score_map
gt_mask:[:,1] : gt_score_map_mask, 1 Care / 0 Not Care
gt_mask:[:,2:6] : gt_geo_map_head
gt_mask:[:,6:10] : gt_geo_map_head_weight
gt_mask:[:,10:14]: gt_geo_map_tail
gt_mask:[:,14:18]: gt_geo_map_tail_weight
gt_mask:[:,18:22]: gt_geo_map_bond
gt_mask:[:,22:26]: gt_geo_map_bond_weight
Returns:
score_text_target : one-hot encoding of segmentation map ground-truth of center area of shape [N, 1, H, W]
score_head_target : one-hot encoding of segmentation map ground-truth of head area of shape [N, 1, H, W]
score_tail_target : one-hot encoding of segmentation map ground-truth of tail area of shape [N, 1, H, W]
score_bond_target : one-hot encoding of segmentation map ground-truth of top and bottom boundaries, [N, 1, H, W]
score_map_masks_target : mask of segmentation map ground-truth, [N, 1, H, W]
geo_head_target : ground-truth of head corner points regression, [N, 4, H, W]
geo_head_weights_target: weights of ground-truth of head regression, [N, 4, H, W]
geo_tail_target : gound-truth of tail corner points regression, [N, 4, H, W]
geo_tail_weights_target: weights of ground-truth of tail regression, [N, 4, H, W]
geo_bond_target : ground-truth of top and bottom boundaries regression, [N, 4, H, W]
geo_bond_weights_target: weights of ground-truth of top and bottom boundaries regression, [N, 4, H, W]
"""
assert len(gt_masks[0]) == 26
score_map_target = gt_masks[:, 0, :, :].long()
score_map_masks_target = gt_masks[:, 1, :, :].float()
geo_head_target = gt_masks[:, 2:6, :, :]
geo_head_weights_target = gt_masks[:, 6:10, :, :]
geo_tail_target = gt_masks[:, 10:14, :, :]
geo_tail_weights_target = gt_masks[:, 14:18, :, :]
geo_bond_target = gt_masks[:, 18:22, :, :]
geo_bond_weights_target = gt_masks[:, 22:, :, :]
# convert into one-hot encodings
score_map_one_hot = make_one_hot(score_map_target, 5).float()
score_text_target = score_map_one_hot[:, 1: 2, :, :]
score_head_target = score_map_one_hot[:, 2: 3, :, :]
score_tail_target = score_map_one_hot[:, 3: 4, :, :]
score_bond_target = score_map_one_hot[:, 4: 5, :, :]
return score_text_target, score_head_target, score_tail_target, score_bond_target, score_map_masks_target,\
geo_head_target, geo_head_weights_target, geo_tail_target, geo_tail_weights_target, geo_bond_target,\
geo_bond_weights_target
@force_fp32(apply_to=('mask_pred',))
def loss(self, mask_pred, mask_targets):
score_text_pred, score_head_pred, score_tail_pred, score_bond_pred, reg_head_pred, reg_tail_pred, reg_bond_pred = mask_pred
score_text_target, score_head_target, score_tail_target, score_bond_target, score_map_masks_target, \
geo_head_target, geo_head_weights_target, geo_tail_target, geo_tail_weights_target, geo_bond_target, \
geo_bond_weights_target = mask_targets
loss = dict()
# compute segmentation loss
loss["loss_seg_text"] = self.loss_seg(score_text_pred, score_text_target, weight=score_map_masks_target)
loss["loss_seg_head"] = self.loss_seg(score_head_pred, score_head_target, weight=score_map_masks_target)
loss["loss_seg_tail"] = self.loss_seg(score_tail_pred, score_tail_target, weight=score_map_masks_target)
loss["loss_seg_bond"] = self.loss_seg(score_bond_pred, score_bond_target, weight=score_map_masks_target)
# compute regression loss
if self.loss_reg_head is not None:
loss_reg_head = self.loss_reg_head(reg_head_pred, geo_head_target,
weight=geo_head_weights_target)
loss["loss_reg_head"] = loss_reg_head
if self.loss_reg_tail is not None:
loss_reg_tail = self.loss_reg_tail(reg_tail_pred, geo_tail_target,
weight=geo_tail_weights_target)
loss["loss_reg_tail"] = loss_reg_tail
if self.loss_reg_bond is not None:
loss_reg_bond = self.loss_reg_bond(reg_bond_pred, geo_bond_target,
weight=geo_bond_weights_target)
loss["loss_reg_bond"] = loss_reg_bond
return loss
| 12,639 | 4,206 |
import wx, wx.lib.newevent
import wx.lib.ogl as ogl
from myhdl import Signal, always, intbv
from MyHDLSim.sequential import ClkDriver
# OGL object to draw a signal
class SignalOGLShape(ogl.CompositeShape):
""" This shape is used exclusively to contruct the SIGNAL main shape.
The shape is initially based within an 80x80 square, centered """
def __init__(self, canvas, label):
ogl.CompositeShape.__init__(self)
self.SetCanvas(canvas)
# Adds the 3 layed boxes to the Signal shape
outterBox = ogl.RectangleShape(80, 80)
self._innerBox = ogl.RectangleShape(60, 60)
labelBox = ogl.RectangleShape(20,30)
# Sets inital color brushes for boxes
brush = wx.Brush("WHITE", wx.SOLID)
outterBox.SetBrush(brush)
self._innerBox.SetBrush(brush)
labelBox.SetBrush(wx.Brush("MEDIUM TURQUOISE", wx.SOLID))
labelBox.AddText(label)
self.AddChild(outterBox)
self.AddChild(self._innerBox)
self.AddChild(labelBox)
constraint = ogl.Constraint(ogl.CONSTRAINT_MIDALIGNED_TOP, outterBox, [labelBox])
constraint2 = ogl.Constraint(ogl.CONSTRAINT_CENTRED_BOTH, outterBox, [self._innerBox])
self.AddConstraint(constraint)
self.AddConstraint(constraint2)
self.Recompute()
# If we don't do this, the shapes will be able to move on their
# own, instead of moving the composite
outterBox.SetDraggable(False)
self._innerBox.SetDraggable(False)
labelBox.SetDraggable(False)
# If we don't do this the shape will take all left-clicks for itself
outterBox.SetSensitivityFilter(0)
# Manual override for normal function: Allows us to easily change the inner box color
def SetBrush(self, brush):
self._innerBox.SetBrush(brush)
class SignalWrapper:
""" This class wraps a MyHDL.Signal object
Handles wx event listening, toggling, and getting an object to draw
"""
def __init__(self, canvas, signal = None, width = 1, label = ''):
"""
@todo assert that width >= 1
"""
self._label = label
if (width == 1):
self._signal = Signal(signal)
else:
self._signal = Signal(intbv(signal)[width:])
self._shape = ogl.RectangleShape(10,10)
self._shape.AddText(str(self._signal.val))
def SetSwitch(self, canvas, label):
""" Setting a label, need to recreate shape
"""
self._label = label
self._shape = SignalOGLShape(canvas, label)
self._shape.AddText(str(self._signal.val))
def SetProbe(self, canvas, a, label):
""" Sets signal as a probe, need to recreate shape
"""
self._label = label
self._signal = a._signal
self._shape = SignalOGLShape(canvas, label)
self._shape.AddText(str(self._signal.val))
canvas.ConnectWires(self._shape, a.GetShape())
def SetInputProbe(self, canvas, a, label):
""" Sets signal as a probe, need to recreate shape
@todo May be an output probe as well, name could change
"""
self._label = label
self._signal = a._signal
canvas.ConnectWires(self._shape, a.GetShape())
def SetClockDriver(self, canvas, label, period = 20):
""" Setting as a clock, need to recreate shape
"""
self._label = label
self._shape = SignalOGLShape(canvas, label)
self._shape.AddText("Clock")
self._inst = ClkDriver(self._signal, period)
def GetSignal(self):
""" Get the underlying object
"""
return self._signal
def Toggle(self):
""" Toggle the signal value
If it was unitialized, just assert it
"""
if (len(self._signal) > 1):
if (self._signal + 1 == self._signal.max):
self._signal.next = 0
else:
self._signal.next = self._signal + 1
elif (self._signal == None):
self._signal.next = True
else:
self._signal.next = not self._signal
def Update(self):
""" This visually refreshes a Signal
Caller must know when Signal has changed
"""
self._shape.ClearText()
if self._shape.GetBrush() != wx.TRANSPARENT_BRUSH:
# For now we are printing TEXT "True", "False", or "None"
# Also setting inner box color (White: False; Black: True; Grey: None)
if (self._signal.val == None):
self._shape.AddText(str(self._signal.val))
self._shape.SetBrush(wx.Brush("GREY", wx.SOLID))
self._shape.SetTextColour("BLACK")
elif (len(self._signal) > 1):
self._shape.AddText(bin(self._signal.val))
self._shape.SetBrush(wx.Brush("WHITE", wx.SOLID))
self._shape.SetTextColour("BLACK")
elif (bool(self._signal.val) == False):
self._shape.AddText(str(bool(self._signal.val)))
self._shape.SetBrush(wx.Brush("WHITE", wx.SOLID))
self._shape.SetTextColour("BLACK")
else:
self._shape.AddText(str(bool(self._signal.val)))
self._shape.SetBrush(wx.Brush("BLACK", wx.SOLID))
self._shape.SetTextColour("WHITE")
def SetX(self, x):
self._x = x
def SetY(self, y):
self._y = y
def GetShape(self):
return self._shape
def GetInstance(self):
""" Get instance for simulator """
return self._inst
| 5,757 | 1,740 |
import json
import sys
from frankenbot.bot import Bot
from frankenbot.console_chat_interface import ConsoleChatInterface
from frankenbot.persistence.json.json_unserializer import JSONUnserializer
bot_def = "bot_def/restaurantsearch.json"
unserializer = JSONUnserializer()
#simpleResponseGenerator object
bot = unserializer.load_from_file(bot_def)
chat = ConsoleChatInterface(bot)
chat.start_chat()
# 'show me chines restaurants in the north'
# ________________________________
# from rasa_nlu.training_data import load_data
# from rasa_nlu.config import RasaNLUModelConfig
# from rasa_nlu.model import Trainer, Interpreter
# from rasa_nlu import config
#
# train_data = load_data('../intent_categorization_example_rasa/rasa_dataset.json')
# trainer = Trainer(config.load("../intent_categorization_example_rasa/config_spacy.yaml"))
#
# # train the model and save it to a folder
# trainer.train(train_data)
# model_directory = trainer.persist('../intent_categorization_example_rasa/projects/')
#
# # load the model in the interpreter
# interpreter = Interpreter.load(model_directory)
#
#
# # run two utterances through the interpreter
# def nlu(utterance):
# nlu_result = interpreter.parse(utterance)
# print("utterance: " + utterance)
# print("nlu resutl: " + str(nlu_result["intent"]))
#
#
# nlu(u"i'm looking for a place in the north of town")
# nlu(u"Good Morning World")
# nlu(u"vegg") | 1,403 | 460 |
#!/usr/bin/env python
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import ipaddress
def set_bit_at_position(position, data):
mask = 1 << position
return data | mask
def change_format_input_string(original_input):
byte_array_str_list = str(original_input)
byte_array_input_string = ''
count = 0
for value in original_input:
if(count != 0):
byte_array_input_string += ':'
append_str = value[2:]
# if only one digit is present, add another 0
if(len(append_str) == 1):
append_str = '0' + append_str
byte_array_input_string += append_str
count += 1
return byte_array_input_string
def format_display_string(line):
display_string = ''
count = 0
for char in line:
if(count % 2 == 0) and count != 0:
display_string += ':'
display_string += char
count += 1
return display_string
def convert_to_chan_num_list(line):
values = line.split(':')
total_num_channels = 129
hex_string = ''
binary_string = ''
count = 0
for v in values:
if(' ' in v):
v = v.split(' ')[0]
# Step 2: Convert the string to a hex number
hex_num = int(v, 16)
# Step 3: Invert the binary values if needed (we do not use this feature)
new_value_first = 0
new_value_last = 0
inverted_hex_num = hex_num
# inverted_hex_num = hex_num ^ 0b11111111
new_value = inverted_hex_num
# Step 4: Combine the inverted values
hex_string += str(hex(new_value))[2:]
string_to_reverse = str('{0:08b}'.format(new_value))
reversedstring=''.join(reversed(string_to_reverse))
binary_string += reversedstring
channel_num = 0
channel_list = list()
# 1110 1111 inverse is 0001 0000
# Step 5: Loop through binary string and add channels
for c in (binary_string):
if(channel_num == total_num_channels):
break
if(c == '1'):
# add this channel
channel_list.append(channel_num)
channel_num+=1
channel_list_display_string = ''
lst = channel_list
result = str(lst[0])
end = None
for index, num in enumerate(lst[1:]):
if num - 1 == lst[index]: # the slice shifts the index by 1 for us
end = str(num)
else:
if end:
result += '-' + end
end = None
result += ':' + str(num)
# Catch the last term
if end:
result += '-' + str(num)
channel_list_display_string = result
return channel_list_display_string
def convert_to_bitmask(input_line='0-128'):
included_ch_list = (input_line.split(':')) # 0-10, 15-20 etc
real_channel_list = list()
for each_entry in included_ch_list:
start_channel = int(each_entry.split('-')[0]) # 0
try:
end_channel = int(each_entry.split('-')[1]) # 10
except Exception:
# in the case of no end channel specified, it means only one channel selected
end_channel = start_channel
pass
for current_channel in range(start_channel, end_channel + 1):
real_channel_list.append(current_channel)
count = 0
channel_mask_byte = 0
channel_mask_byte_inverted = 0
eight_multiple = 8
# convert channel list from right to left
while(count in range(0, len(real_channel_list))):
if(count == 129):
break
channel_mask_byte = set_bit_at_position(real_channel_list[count], channel_mask_byte)
if(count+1 == len(real_channel_list)):
break
if(int(real_channel_list[count+1]) >= eight_multiple):
eight_multiple += 8
count += 1
final_channel = int(real_channel_list[-1])
mask = 0b1
channel_mask_byte_inverted = channel_mask_byte
# increment by 1 to include the last channel
final_channel += 1
while(final_channel % 8 != 0):
# make sure you have an even number of bytes
final_channel += 1
# invert every single bit
"""for bit in range(0, final_channel):
channel_mask_byte_inverted ^= (mask)
# shift the mask to the left by 1
mask = mask << 1"""
value = (hex(channel_mask_byte_inverted)[2:].strip())
# make sure 17 byte pairs are used
value = value.zfill(34)
channel_mask_correct_endian = value
if len(str(value)) > 34:
# if length is greater than 34, only use the first 17 bytes
value = value[0:34]
channel_mask_inverted_hex = bytearray.fromhex(value)
channel_mask_inverted_hex.reverse()
channel_mask_correct_endian = channel_mask_inverted_hex.hex()
return channel_mask_correct_endian, channel_mask_inverted_hex
# Helper util function to parse received PROP_ROUTING_TABLE_UPDATE property info
def parse_routingtable_property(propRoutingTableAddrInfo):
"""
Internal utility function to convert Routing Table Addr Info into structure
Returns changed_type and dictionary entry
"""
routingTableEntry = {}
update_type = -1
dst_ipv6_addr = ""
try:
# 2 bytes = length of structure; 1 byte = change type; 16 bytes Dest IPv6 address;
# 1 byte = prefix len ; 16 bytes = next hop IPv6 address; 4 bytes = lifetime
routingTableStruct = propRoutingTableAddrInfo[0:len(propRoutingTableAddrInfo)]
changed_info = routingTableStruct[2:3] # C
dst_addr = routingTableStruct[3:19] # 6
prefix_length = routingTableStruct[19:20] # C
next_hop_addr = routingTableStruct[20:36] # 6
lifetime = routingTableStruct[36:40] # L
update_type = int.from_bytes(changed_info, "little", signed=False)
dst_ipv6_addr = ipaddress.IPv6Address(dst_addr)
routingTableEntry["prefixLen"] = int.from_bytes(prefix_length, "little", signed=False)
routingTableEntry["nextHopAddr"] = ipaddress.IPv6Address(next_hop_addr)
routingTableEntry["lifetime"] = int.from_bytes(lifetime, "little", signed=False)
except Exception as es:
print("Exception raised during Parsing Routing Table")
print(es)
return(update_type, dst_ipv6_addr, routingTableEntry)
| 6,801 | 2,183 |
from typing import TYPE_CHECKING, Optional
from ModularChess.movements.Movement import Movement, MovementData
if TYPE_CHECKING:
from ModularChess.pieces.Piece import Piece
from ModularChess.utils.Position import Position
class EnPassant(Movement):
def __init__(self, piece: "Piece", new_position: "Position", captured_piece: "Piece",
is_valid_move: Optional[bool] = None):
moves = [MovementData(captured_piece, captured_piece.position, None),
MovementData(piece, piece.position, new_position)]
super().__init__(moves, piece=piece, destination=new_position, is_valid_move=is_valid_move)
def __str__(self) -> str:
if self.piece.board.dimensions == 2:
move = self.piece.abbreviation()
same_pieces = self.piece.board.pieces[self.player][type(self.piece)]
if self.movements[-1].destination_position is not None and \
len([piece for piece in same_pieces if
piece.check_piece_valid_move(self.movements[-1].destination_position)]) \
> 1:
move += str(self.movements[-1].initial_position)
if len(self) == 2: # Capture
move += "x"
return move + str(self.movements[-1].destination_position) + ("+" if self.is_check else "")
return super().__str__()
| 1,383 | 422 |
# -*- coding: utf-8 -*-
"""
julabo.py
Contains Julabo temperature control
see documentation http://www.julabo.com/sites/default/files/downloads/manuals/french/19524837-V2.pdf at section 10.2.
:copyright: (c) 2015 by Maxime DAUPHIN
:license: MIT, see LICENSE for details
"""
import serial
import time
from .pytemperaturectrl import TemperatureControl
class Julabo(TemperatureControl):
"""Julabo Temperature control implementation"""
# see Julabo doc
MIN_TIME_INTERVAL = 0.250
def __init__(self, *args, **kwargs):
super(TemperatureControl, self).__init__()
self.serial = None
def checkIfOpen(self):
""" Check if serial port is open """
if self.serial == None:
raise Exception("Please call open function before all communication")
def open(self, com_port, baudrate=4800):
""" Open serial communication"""
self.serial = serial.Serial(com_port,
baudrate=baudrate,
bytesize=serial.SEVENBITS,
parity=serial.PARITY_EVEN,
stopbits=serial.STOPBITS_ONE,
timeout=1,
xonxoff=False,
rtscts=True,
dsrdtr=False)
def close(self):
""" Close serial communication"""
self.checkIfOpen()
if self.serial != None :
self.serial.close()
def power(self, on):
"""set power to on or off"""
self.checkIfOpen()
time.sleep(self.MIN_TIME_INTERVAL)
value = 1 if on else 0
self.serial.write(b'f"out_mode_05 {value}\r\n"')
def getVersion(self):
"""retrieve engine version"""
self.checkIfOpen()
time.sleep(self.MIN_TIME_INTERVAL)
self.serial.write(b'version\r\n')
return self.serial.readline()
def getStatus(self):
"""retrieve engine status"""
self.checkIfOpen()
time.sleep(self.MIN_TIME_INTERVAL)
self.serial.write(b'status\r\n')
return self.serial.readline()
def setWorkTemperature(self, temperature_in_degree):
"""set setpoint temperature"""
self.checkIfOpen()
time.sleep(self.MIN_TIME_INTERVAL)
self.serial.write(b'f"out_sp_00 {temperature_in_degree}\r\n"')
def getWorkTemperature(self):
"""get setpoint temperature"""
self.checkIfOpen()
time.sleep(self.MIN_TIME_INTERVAL)
self.serial.write(b'in_sp_00\r\n')
return float(self.serial.readline())
def getCurrentTemperature(self):
"""get current tank temperature"""
self.checkIfOpen()
time.sleep(self.MIN_TIME_INTERVAL)
self.serial.write(b'in_pv_00\r\n')
return float(self.serial.readline())
| 2,387 | 978 |
"""
Some basic matrix-related functionality.
"""
def cumulative2d(grid):
"""
>>> cumulative2d([[2, 5, 4], [3, 8, 1]])
[[0, 0, 0, 0], [0, 2, 7, 11], [0, 5, 18, 23]]
"""
rows = []
for row in grid:
rrr = [0]
last = 0
for col in row:
last += col
rrr.append(last)
rows.append(rrr)
blocks = []
last = [0]*len(rows[0])
blocks.append(last)
for row in rows:
last = list(map(sum, zip(last, row)))
blocks.append(last)
return blocks
def transpose(grid):
"""
Switches rows and columns.
>>> transpose([[1, 2, 3], [4, 5, 6]])
[[1, 4], [2, 5], [3, 6]]
"""
R = len(grid)
C = len(grid[0])
inverted = []
for r in range(C):
row = [c[r] for c in grid]
inverted.append(row)
return inverted
def moment(array):
"""
>>> moment([5, 6, 7, 2, 4])
[0, 6, 14, 6, 16]
"""
return list(map(lambda i_v: i_v[0]*i_v[1], enumerate(array)))
def moment2d(grid):
"""
>>> moment2d([[5, 6, 7, 2, 4]])
[[0, 6, 14, 6, 16]]
"""
return list(map(moment, grid)) | 1,143 | 492 |
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from lxml import etree
import json
class DouyuSpider:
def __init__(self):
""" 初始化
"""
start_url = 'https://www.douyu.com/g_LOL'
self.browser = webdriver.Chrome()
self.browser.get(start_url)
def get_one_page(self):
self.browser.execute_script('window.scrollTo(0, document.body.scrollHeight)')
path = '//*[@id="listAll"]/div[2]/ul/li[8]/div/a[1]/div[2]/div[2]/span'
method = EC.presence_of_element_located((By.XPATH, path))
wait = WebDriverWait(self.browser, 10)
# self.browser.refresh()
wait.until(method, message='加载超时')
self.browser.execute_script('window.scrollTo(0, document.body.scrollHeight)')
html = etree.HTML(self.browser.page_source)
li_list = html.xpath('//ul[@class="layout-Cover-list"]/li')
li_num = len(li_list)
# //*[@id="listAll"]/div[2]/ul/li[1]/div/a[1]/div[2]/div[1]/h3
title_path = '//*[@id="listAll"]/div[2]/ul/li[{}]/div/a[1]/div[2]/div[1]/h3/@title'
# hot_path = '//*[@id="listAll"]/div[2]/ul/li[{}]/div/a[1]/div[2]/div[2]/h2'
hot_path = '//*[@id="listAll"]/div[2]/ul/li[{}]/div/a[1]/div[2]/div[2]/span/text()'
room_path = '//*[@id="listAll"]/div[2]/ul/li[{}]/div/a[1]/@href'
user_path = '//*[@id="listAll"]/div[2]/ul/li[{}]/div/a[1]/div[2]/div[2]/h2/text()'
items = []
item = {}
for num in range(li_num):
item['title'] = html.xpath(title_path.format(num+1))
# item['title'] = html.xpath(title_path.format(num+1)).get_attribute('title')
item['hot'] = html.xpath(hot_path.format(num+1))
item['room_url'] = html.xpath(room_path.format(num+1))
item['user'] = html.xpath(user_path.format(num+1))
if num % 20 == 0:
print(f'完成第{num+1}条数据')
print(item)
items.append(item)
return items
def fetch_one_page(self):
path = '//*[@id="listAll"]/div[2]/ul/li[8]/div/a[1]/div[2]/div[2]/span'
method = EC.presence_of_element_located((By.XPATH, path))
wait = WebDriverWait(self.browser, 10)
# self.browser.refresh()
wait.until(method, message='加载超时')
self.browser.execute_script('window.scrollTo(0, document.body.scrollHeight)')
self.browser.execute_script('window.scrollTo(0, document.body.scrollHeight)')
li_list = self.browser.find_elements_by_xpath('//ul[@class="layout-Cover-list"]/li')
li_num = len(li_list)
# //*[@id="listAll"]/div[2]/ul/li[1]/div/a[1]/div[2]/div[1]/h3
title_path = '//*[@id="listAll"]/div[2]/ul/li[{}]/div/a[1]/div[2]/div[1]/h3'
# hot_path = '//*[@id="listAll"]/div[2]/ul/li[{}]/div/a[1]/div[2]/div[2]/h2'
hot_path = '//*[@id="listAll"]/div[2]/ul/li[{}]/div/a[1]/div[2]/div[2]/span'
room_path = '//*[@id="listAll"]/div[2]/ul/li[{}]/div/a[1]'
user_path = '//*[@id="listAll"]/div[2]/ul/li[{}]/div/a[1]/div[2]/div[2]/h2'
items = []
for num in range(li_num):
item = {}
item['title'] = self.browser.find_element_by_xpath(title_path.format(num+1)).get_attribute('title')
item['hot'] = self.browser.find_element_by_xpath(hot_path.format(num+1)).text
item['room_url'] = self.browser.find_element_by_xpath(room_path.format(num+1)).get_attribute('href')
item['user'] = self.browser.find_element_by_xpath(user_path.format(num+1)).text
if num % 20 == 0:
print(f'完成第{num+1}条数据')
print(item)
items.append(item)
return items
def save_content(self, items):
with open('douyu.json', 'a+', encoding='utf-8') as f:
for item in items:
# print(item)
json.dump(item, f, ensure_ascii=False)
f.write('\n')
def get_next_url(self, num):
self.browser.execute_script('window.scrollTo(0, document.body.scrollHeight)')
max_num = self.browser.find_element_by_xpath('//*[@id="listAll"]/div[2]/div/ul/li[last()-1]/a').text
if num < int(max_num):
self.browser.find_element_by_xpath('//*[@id="listAll"]/div[2]/div/span/span/input').clear()
self.browser.find_element_by_xpath('//*[@id="listAll"]/div[2]/div/span/span/input').send_keys(num+1)
self.browser.find_element_by_xpath('//*[@id="listAll"]/div[2]/div/span/span/span').click()
next_flag = True
if num >= int(max_num):
next_flag = False
return next_flag, max_num
def run(self):
next_flag = True
num = 0
while next_flag:
items = self.get_one_page()
self.save_content(items)
if num % 2 == 0:
self.browser.implicitly_wait(5)
num += 1
print('*'*10 + f'完成第{num}页' + '*'*10)
next_flag, max_num = self.get_next_url(num)
print(max_num)
if __name__ == '__main__':
dou_spider = DouyuSpider()
dou_spider.run()
| 5,361 | 1,959 |
#!/usr/bin/env python
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
# Read version info from bitcoinscript/version.py
version_vars = {}
with open("bitcoinscript/version.py") as fp:
exec(fp.read(), version_vars)
version_string = version_vars['__version_string__']
setup(
name='bitcoinscript',
description='Bitcoin Script Debugger and Interactive Shell',
long_description=long_description,
version=version_string,
author='fungibit',
author_email='fungibit@yandex.com',
url='https://github.com/fungibit/bitcoinscript',
license='MIT',
packages=find_packages(exclude=['tests*', 'bin']),
platforms = ["POSIX", "Windows"],
keywords='bitcoin, script, bitcoin-script, blockchain',
)
| 960 | 314 |
# ___ ___ ___ ___ ___ ___
# /\ \ /\ \ /\ \ /\ \ /\ \ /\ \
# /::\ \ /::\ \ \:\ \ /::\ \ /::\ \ /::\ \
# /:/\:\ \ /:/\:\ \ \:\ \ /:/\:\ \ /:/\:\ \ /:/\:\ \
# /:/ \:\ \ /:/ \:\ \ /::\ \ /::\~\:\ \ /::\~\:\ \ /::\~\:\ \
# /:/__/ \:\__\/:/__/ \:\__\ /:/\:\__\/:/\:\ \:\__\/:/\:\ \:\__\/:/\:\ \:\__\
# \:\ \ /:/ /\:\ \ \/__//:/ \/__/\/_|::\/:/ /\:\~\:\ \/__/\:\~\:\ \/__/
# \:\ /:/ / \:\ \ /:/ / |:|::/ / \:\ \:\__\ \:\ \:\__\
# \:\/:/ / \:\ \ \/__/ |:|\/__/ \:\ \/__/ \:\ \/__/
# \::/ / \:\__\ |:| | \:\__\ \:\__\
# \/__/ \/__/ \|__| \/__/ \/__/
#
#
#
# .----------------.----------------.
# /| /| /|
# / | / | / |
# / | 6 / | 7 / |
# / | / | / |
# .----------------.----+-----------. |
# /| . ---------/|----.----------/|----.
# / | /| / | /| / | /|
# / | / | 4 / | / | 5 / | / |
# / | / | / | / | / | / |
# . -------------- .----------------. |/ |
# | . ---+------|----.----+------|----. |
# | /| .______|___/|____.______|___/|____.
# | / | / 2 | / | / 3 | / | /
# | / | / | / | / | / | /
# . ---+---------- . ---+---------- . | /
# | |/ | |/ | |/ z
# | . ----------|----.-----------|----. ^ y
# | / 0 | / 1 | / | /
# | / | / | / | /
# | / | / | / o----> x
# . -------------- . -------------- .
#
#
# Face Refinement:
#
# 2_______________3 _______________
# | | | | |
# ^ | | | 2 | 3 |
# | | | | | |
# | | x | ---> |-------+-------|
# t1 | | | | |
# | | | 0 | 1 |
# |_______________| |_______|_______|
# 0 t0--> 1
#
#
# Face and Edge naming conventions:
#
# fZp
# |
# 6 ------eX3------ 7
# /| | / |
# /eZ2 . / eZ3
# eY2 | fYp eY3 |
# / | / fXp|
# 4 ------eX2----- 5 |
# |fXm 2 -----eX1--|---- 3 z
# eZ0 / | eY1 ^ y
# | eY0 . fYm eZ1 / | /
# | / | | / | /
# 0 ------eX0------1 o----> x
# |
# fZm
#
#
# fX fY
# 2___________3 2___________3
# | e1 | | e1 |
# | | | |
# e0 | x | e2 z e0 | x | e2 z
# | | ^ | | ^
# |___________| |___> y |___________| |___> x
# 0 e3 1 0 e3 1
# fZ
# 2___________3
# | e1 |
# | |
# e0 | x | e2 y
# | | ^
# |___________| |___> x
# 0 e3 1
from discretize.base import BaseTensorMesh
from discretize.operators import InnerProducts, DiffOperators
from discretize.mixins import InterfaceMixins, TreeMeshIO
from discretize.utils import as_array_n_by_dim
from discretize._extensions.tree_ext import _TreeMesh, TreeCell
import numpy as np
import scipy.sparse as sp
import warnings
from discretize.utils.code_utils import deprecate_property
class TreeMesh(
_TreeMesh, BaseTensorMesh, InnerProducts, DiffOperators, TreeMeshIO, InterfaceMixins
):
"""Class for QuadTree (2D) and OcTree (3D) meshes.
Tree meshes are numerical grids where the dimensions of each cell are powers of 2
larger than some base cell dimension. Unlike the :class:`~discretize.TensorMesh`
class, gridded locations and numerical operators for instances of ``TreeMesh``
cannot be simply constructed using tensor products. Furthermore, each cell
is an instance of ``TreeMesh`` is an instance of the
:class:`~discretize.tree_mesh.TreeCell` .
Parameters
----------
h : (dim) iterable of int, numpy.ndarray, or tuple
Defines the cell widths of the *underlying tensor mesh* along each axis. The
length of the iterable object is equal to the dimension of the mesh (2 or 3).
For a 3D mesh, the list would have the form *[hx, hy, hz]*. The number of cells
along each axis **must be a power of 2** .
Along each axis, the user has 3 choices for defining the cells widths for the
underlying tensor mesh:
- :class:`int` -> A unit interval is equally discretized into `N` cells.
- :class:`numpy.ndarray` -> The widths are explicity given for each cell
- the widths are defined as a :class:`list` of :class:`tuple` of the form *(dh, nc, [npad])*
where *dh* is the cell width, *nc* is the number of cells, and *npad* (optional)
is a padding factor denoting exponential increase/decrease in the cell width
for each cell; e.g. *[(2., 10, -1.3), (2., 50), (2., 10, 1.3)]*
origin : (dim) iterable, default: 0
Define the origin or 'anchor point' of the mesh; i.e. the bottom-left-frontmost
corner. By default, the mesh is anchored such that its origin is at [0, 0, 0].
For each dimension (x, y or z), The user may set the origin 2 ways:
- a ``scalar`` which explicitly defines origin along that dimension.
- **{'0', 'C', 'N'}** a :class:`str` specifying whether the zero coordinate along
each axis is the first node location ('0'), in the center ('C') or the last
node location ('N') (see Examples).
Examples
--------
Here we generate a basic 2D tree mesh.
>>> from discretize import TreeMesh
>>> import numpy as np
>>> import matplotlib.pyplot as plt
Define base mesh (domain and finest discretization),
>>> dh = 5 # minimum cell width (base mesh cell width)
>>> nbc = 64 # number of base mesh cells
>>> h = dh * np.ones(nbc)
>>> mesh = TreeMesh([h, h])
Define corner points for a rectangular box, and subdived the mesh within the box
to the maximum refinement level.
>>> x0s = [120.0, 80.0]
>>> x1s = [240.0, 160.0]
>>> levels = [mesh.max_level]
>>> mesh.refine_box(x0s, x1s, levels)
>>> mesh.plot_grid()
>>> plt.show()
"""
_meshType = "TREE"
_aliases = {
**BaseTensorMesh._aliases,
**DiffOperators._aliases,
**{
"ntN": "n_total_nodes",
"ntEx": "n_total_edges_x",
"ntEy": "n_total_edges_y",
"ntEz": "n_total_edges_z",
"ntE": "n_total_edges",
"ntFx": "n_total_faces_x",
"ntFy": "n_total_faces_y",
"ntFz": "n_total_faces_z",
"ntF": "n_total_faces",
"nhN": "n_hanging_nodes",
"nhEx": "n_hanging_edges_x",
"nhEy": "n_hanging_edges_y",
"nhEz": "n_hanging_edges_z",
"nhE": "n_hanging_edges",
"nhFx": "n_hanging_faces_x",
"nhFy": "n_hanging_faces_y",
"nhFz": "n_hanging_faces_z",
"nhF": "n_hanging_faces",
"gridhN": "hanging_nodes",
"gridhFx": "hanging_faces_x",
"gridhFy": "hanging_faces_y",
"gridhFz": "hanging_faces_z",
"gridhEx": "hanging_edges_x",
"gridhEy": "hanging_edges_y",
"gridhEz": "hanging_edges_z",
},
}
_items = {"h", "origin", "cell_state"}
# inheriting stuff from BaseTensorMesh that isn't defined in _QuadTree
def __init__(self, h=None, origin=None, **kwargs):
if "x0" in kwargs:
origin = kwargs.pop("x0")
super().__init__(h=h, origin=origin)
cell_state = kwargs.pop("cell_state", None)
cell_indexes = kwargs.pop("cell_indexes", None)
cell_levels = kwargs.pop("cell_levels", None)
if cell_state is None:
if cell_indexes is not None and cell_levels is not None:
cell_state = {}
cell_state["indexes"] = cell_indexes
cell_state["levels"] = cell_levels
if cell_state is not None:
indexes = cell_state["indexes"]
levels = cell_state["levels"]
self.__setstate__((indexes, levels))
def __repr__(self):
"""Plain text representation."""
mesh_name = "{0!s}TreeMesh".format(("Oc" if self.dim == 3 else "Quad"))
top = "\n" + mesh_name + ": {0:2.2f}% filled\n\n".format(self.fill * 100)
# Number of cells per level
level_count = self._count_cells_per_index()
non_zero_levels = np.nonzero(level_count)[0]
cell_display = ["Level : Number of cells"]
cell_display.append("-----------------------")
for level in non_zero_levels:
cell_display.append("{:^5} : {:^15}".format(level, level_count[level]))
cell_display.append("-----------------------")
cell_display.append("Total : {:^15}".format(self.nC))
extent_display = [" Mesh Extent "]
extent_display.append(" min , max ")
extent_display.append(" ---------------------------")
dim_label = {0: "x", 1: "y", 2: "z"}
for dim in range(self.dim):
n_vector = getattr(self, "nodes_" + dim_label[dim])
extent_display.append(
"{}: {:^13},{:^13}".format(dim_label[dim], n_vector[0], n_vector[-1])
)
for i, line in enumerate(extent_display):
if i == len(cell_display):
cell_display.append(" " * (len(cell_display[0]) - 3 - len(line)))
cell_display[i] += 3 * " " + line
h_display = [" Cell Widths "]
h_display.append(" min , max ")
h_display.append("-" * (len(h_display[0])))
h_gridded = self.h_gridded
mins = np.min(h_gridded, axis=0)
maxs = np.max(h_gridded, axis=0)
for dim in range(self.dim):
h_display.append("{:^10}, {:^10}".format(mins[dim], maxs[dim]))
for i, line in enumerate(h_display):
if i == len(cell_display):
cell_display.append(" " * len(cell_display[0]))
cell_display[i] += 3 * " " + line
return top + "\n".join(cell_display)
def _repr_html_(self):
"""html representation"""
mesh_name = "{0!s}TreeMesh".format(("Oc" if self.dim == 3 else "Quad"))
level_count = self._count_cells_per_index()
non_zero_levels = np.nonzero(level_count)[0]
dim_label = {0: "x", 1: "y", 2: "z"}
h_gridded = self.h_gridded
mins = np.min(h_gridded, axis=0)
maxs = np.max(h_gridded, axis=0)
style = " style='padding: 5px 20px 5px 20px;'"
# Cell level table:
cel_tbl = "<table>\n"
cel_tbl += "<tr>\n"
cel_tbl += "<th" + style + ">Level</th>\n"
cel_tbl += "<th" + style + ">Number of cells</th>\n"
cel_tbl += "</tr>\n"
for level in non_zero_levels:
cel_tbl += "<tr>\n"
cel_tbl += "<td" + style + ">{}</td>\n".format(level)
cel_tbl += "<td" + style + ">{}</td>\n".format(level_count[level])
cel_tbl += "</tr>\n"
cel_tbl += "<tr>\n"
cel_tbl += (
"<td style='font-weight: bold; padding: 5px 20px 5px 20px;'> Total </td>\n"
)
cel_tbl += "<td" + style + "> {} </td>\n".format(self.nC)
cel_tbl += "</tr>\n"
cel_tbl += "</table>\n"
det_tbl = "<table>\n"
det_tbl += "<tr>\n"
det_tbl += "<th></th>\n"
det_tbl += "<th" + style + " colspan='2'>Mesh extent</th>\n"
det_tbl += "<th" + style + " colspan='2'>Cell widths</th>\n"
det_tbl += "</tr>\n"
det_tbl += "<tr>\n"
det_tbl += "<th></th>\n"
det_tbl += "<th" + style + ">min</th>\n"
det_tbl += "<th" + style + ">max</th>\n"
det_tbl += "<th" + style + ">min</th>\n"
det_tbl += "<th" + style + ">max</th>\n"
det_tbl += "</tr>\n"
for dim in range(self.dim):
n_vector = getattr(self, "nodes_" + dim_label[dim])
det_tbl += "<tr>\n"
det_tbl += "<td" + style + ">{}</td>\n".format(dim_label[dim])
det_tbl += "<td" + style + ">{}</td>\n".format(n_vector[0])
det_tbl += "<td" + style + ">{}</td>\n".format(n_vector[-1])
det_tbl += "<td" + style + ">{}</td>\n".format(mins[dim])
det_tbl += "<td" + style + ">{}</td>\n".format(maxs[dim])
det_tbl += "</tr>\n"
det_tbl += "</table>\n"
full_tbl = "<table>\n"
full_tbl += "<tr>\n"
full_tbl += "<td style='font-weight: bold; font-size: 1.2em; text-align: center;'>{}</td>\n".format(
mesh_name
)
full_tbl += "<td style='font-size: 1.2em; text-align: center;' colspan='2'>{0:2.2f}% filled</td>\n".format(
100 * self.fill
)
full_tbl += "</tr>\n"
full_tbl += "<tr>\n"
full_tbl += "<td>\n"
full_tbl += cel_tbl
full_tbl += "</td>\n"
full_tbl += "<td>\n"
full_tbl += det_tbl
full_tbl += "</td>\n"
full_tbl += "</tr>\n"
full_tbl += "</table>\n"
return full_tbl
@BaseTensorMesh.origin.setter
def origin(self, value):
# first use the BaseTensorMesh to set the origin to handle "0, C, N"
BaseTensorMesh.origin.fset(self, value)
# then update the TreeMesh with the hidden value
self._set_origin(self._origin)
@property
def vntF(self):
"""
Vector number of total faces along each axis
This property returns the total number of hanging and
non-hanging faces along each axis direction. The returned
quantity is a list of integers of the form [nFx,nFy,nFz].
Returns
-------
list of int
Vector number of total faces along each axis
"""
return [self.ntFx, self.ntFy] + ([] if self.dim == 2 else [self.ntFz])
@property
def vntE(self):
"""
Vector number of total edges along each axis
This property returns the total number of hanging and
non-hanging edges along each axis direction. The returned
quantity is a list of integers of the form [nEx,nEy,nEz].
Returns
-------
list of int
Vector number of total edges along each axis
"""
return [self.ntEx, self.ntEy] + ([] if self.dim == 2 else [self.ntEz])
@property
def stencil_cell_gradient(self):
if getattr(self, "_stencil_cell_gradient", None) is None:
self._stencil_cell_gradient = sp.vstack(
[self.stencil_cell_gradient_x, self.stencil_cell_gradient_y]
)
if self.dim == 3:
self._stencil_cell_gradient = sp.vstack(
[self._stencil_cell_gradient, self.stencil_cell_gradient_z]
)
return self._stencil_cell_gradient
@property
def cell_gradient(self):
if getattr(self, "_cell_gradient", None) is None:
i_s = self.face_boundary_indices
ix = np.ones(self.nFx)
ix[i_s[0]] = 0.0
ix[i_s[1]] = 0.0
Pafx = sp.diags(ix)
iy = np.ones(self.nFy)
iy[i_s[2]] = 0.0
iy[i_s[3]] = 0.0
Pafy = sp.diags(iy)
MfI = self.get_face_inner_product(invMat=True)
if self.dim == 2:
Pi = sp.block_diag([Pafx, Pafy])
elif self.dim == 3:
iz = np.ones(self.nFz)
iz[i_s[4]] = 0.0
iz[i_s[5]] = 0.0
Pafz = sp.diags(iz)
Pi = sp.block_diag([Pafx, Pafy, Pafz])
self._cell_gradient = (
-Pi * MfI * self.face_divergence.T * sp.diags(self.cell_volumes)
)
return self._cell_gradient
@property
def cell_gradient_x(self):
if getattr(self, "_cell_gradient_x", None) is None:
nFx = self.nFx
i_s = self.face_boundary_indices
ix = np.ones(self.nFx)
ix[i_s[0]] = 0.0
ix[i_s[1]] = 0.0
Pafx = sp.diags(ix)
MfI = self.get_face_inner_product(invMat=True)
MfIx = sp.diags(MfI.diagonal()[:nFx])
self._cell_gradient_x = (
-Pafx * MfIx * self.face_x_divergence.T * sp.diags(self.cell_volumes)
)
return self._cell_gradient_x
@property
def cell_gradient_y(self):
if getattr(self, "_cell_gradient_y", None) is None:
nFx = self.nFx
nFy = self.nFy
i_s = self.face_boundary_indices
iy = np.ones(self.nFy)
iy[i_s[2]] = 0.0
iy[i_s[3]] = 0.0
Pafy = sp.diags(iy)
MfI = self.get_face_inner_product(invMat=True)
MfIy = sp.diags(MfI.diagonal()[nFx : nFx + nFy])
self._cell_gradient_y = (
-Pafy * MfIy * self.face_y_divergence.T * sp.diags(self.cell_volumes)
)
return self._cell_gradient_y
@property
def cell_gradient_z(self):
if self.dim == 2:
raise TypeError("z derivative not defined in 2D")
if getattr(self, "_cell_gradient_z", None) is None:
nFx = self.nFx
nFy = self.nFy
i_s = self.face_boundary_indices
iz = np.ones(self.nFz)
iz[i_s[4]] = 0.0
iz[i_s[5]] = 0.0
Pafz = sp.diags(iz)
MfI = self.get_face_inner_product(invMat=True)
MfIz = sp.diags(MfI.diagonal()[nFx + nFy :])
self._cell_gradient_z = (
-Pafz * MfIz * self.face_z_divergence.T * sp.diags(self.cell_volumes)
)
return self._cell_gradient_z
@property
def face_x_divergence(self):
if getattr(self, "_face_x_divergence", None) is None:
self._face_x_divergence = self.face_divergence[:, : self.nFx]
return self._face_x_divergence
@property
def face_y_divergence(self):
if getattr(self, "_face_y_divergence", None) is None:
self._face_y_divergence = self.face_divergence[
:, self.nFx : self.nFx + self.nFy
]
return self._face_y_divergence
@property
def face_z_divergence(self):
if getattr(self, "_face_z_divergence", None) is None:
self._face_z_divergence = self.face_divergence[:, self.nFx + self.nFy :]
return self._face_z_divergence
def point2index(self, locs):
"""Finds cells that contain the given points.
Returns an array of index values of the cells that contain the given
points
Parameters
----------
locs: (N, dim) array_like
points to search for the location of
Returns
-------
(N) array_like of int
Cell indices that contain the points
"""
locs = as_array_n_by_dim(locs, self.dim)
inds = self._get_containing_cell_indexes(locs)
return inds
def cell_levels_by_index(self, indices):
"""Fast function to return a list of levels for the given cell indices
Parameters
----------
index: (N) array_like
Cell indexes to query
Returns
-------
(N) numpy.ndarray of int
Levels for the cells.
"""
return self._cell_levels_by_indexes(indices)
def get_interpolation_matrix(
self, locs, location_type="CC", zeros_outside=False, **kwargs
):
"""Produces interpolation matrix
Parameters
----------
loc : (N, dim) array_like
Location of points to interpolate to
location_type: str, optional
What to interpolate
location_type can be:
- 'CC' -> scalar field defined on cell centers
- 'Ex' -> x-component of field defined on edges
- 'Ey' -> y-component of field defined on edges
- 'Ez' -> z-component of field defined on edges
- 'Fx' -> x-component of field defined on faces
- 'Fy' -> y-component of field defined on faces
- 'Fz' -> z-component of field defined on faces
- 'N' -> scalar field defined on nodes
Returns
-------
(N, n_loc_type) scipy.sparse.csr_matrix
the interpolation matrix
"""
if "locType" in kwargs:
warnings.warn(
"The locType keyword argument has been deprecated, please use location_type. "
"This will be removed in discretize 1.0.0",
DeprecationWarning,
)
location_type = kwargs["locType"]
if "zerosOutside" in kwargs:
warnings.warn(
"The zerosOutside keyword argument has been deprecated, please use zeros_outside. "
"This will be removed in discretize 1.0.0",
DeprecationWarning,
)
zeros_outside = kwargs["zerosOutside"]
locs = as_array_n_by_dim(locs, self.dim)
if location_type not in ["N", "CC", "Ex", "Ey", "Ez", "Fx", "Fy", "Fz"]:
raise Exception(
"location_type must be one of N, CC, Ex, Ey, Ez, Fx, Fy, or Fz"
)
if self.dim == 2 and location_type in ["Ez", "Fz"]:
raise Exception("Unable to interpolate from Z edges/face in 2D")
locs = np.require(np.atleast_2d(locs), dtype=np.float64, requirements="C")
if location_type == "N":
Av = self._getNodeIntMat(locs, zeros_outside)
elif location_type in ["Ex", "Ey", "Ez"]:
Av = self._getEdgeIntMat(locs, zeros_outside, location_type[1])
elif location_type in ["Fx", "Fy", "Fz"]:
Av = self._getFaceIntMat(locs, zeros_outside, location_type[1])
elif location_type in ["CC"]:
Av = self._getCellIntMat(locs, zeros_outside)
return Av
@property
def permute_cells(self):
"""Permutation matrix re-ordering of cells sorted by x, then y, then z
Returns
-------
(n_cells, n_cells) scipy.sparse.csr_matrix
"""
# TODO: cache these?
P = np.lexsort(self.gridCC.T) # sort by x, then y, then z
return sp.identity(self.nC).tocsr()[P]
@property
def permute_faces(self):
"""Permutation matrix re-ordering of faces sorted by x, then y, then z
Returns
-------
(n_faces, n_faces) scipy.sparse.csr_matrix
"""
# TODO: cache these?
Px = np.lexsort(self.gridFx.T)
Py = np.lexsort(self.gridFy.T) + self.nFx
if self.dim == 2:
P = np.r_[Px, Py]
else:
Pz = np.lexsort(self.gridFz.T) + (self.nFx + self.nFy)
P = np.r_[Px, Py, Pz]
return sp.identity(self.nF).tocsr()[P]
@property
def permute_edges(self):
"""Permutation matrix re-ordering of edges sorted by x, then y, then z
Returns
-------
(n_edges, n_edges) scipy.sparse.csr_matrix
"""
# TODO: cache these?
Px = np.lexsort(self.gridEx.T)
Py = np.lexsort(self.gridEy.T) + self.nEx
if self.dim == 2:
P = np.r_[Px, Py]
if self.dim == 3:
Pz = np.lexsort(self.gridEz.T) + (self.nEx + self.nEy)
P = np.r_[Px, Py, Pz]
return sp.identity(self.nE).tocsr()[P]
@property
def cell_state(self):
""" The current state of the cells on the mesh.
This represents the x, y, z indices of the cells in the base tensor mesh, as
well as their levels. It can be used to reconstruct the mesh.
Returns
-------
dict
dictionary with two entries:
- ``"indexes"``: the indexes of the cells
- ``"levels"``: the levels of the cells
"""
indexes, levels = self.__getstate__()
return {"indexes": indexes.tolist(), "levels": levels.tolist()}
def validate(self):
return self.finalized
def equals(self, other):
try:
if self.finalized and other.finalized:
return super().equals(other)
except AttributeError:
pass
return False
def __reduce__(self):
return TreeMesh, (self.h, self.origin), self.__getstate__()
cellGrad = deprecate_property("cell_gradient", "cellGrad", removal_version="1.0.0", future_warn=False)
cellGradx = deprecate_property(
"cell_gradient_x", "cellGradx", removal_version="1.0.0", future_warn=False
)
cellGrady = deprecate_property(
"cell_gradient_y", "cellGrady", removal_version="1.0.0", future_warn=False
)
cellGradz = deprecate_property(
"cell_gradient_z", "cellGradz", removal_version="1.0.0", future_warn=False
)
cellGradStencil = deprecate_property(
"cell_gradient_stencil", "cellGradStencil", removal_version="1.0.0", future_warn=False
)
nodalGrad = deprecate_property(
"nodal_gradient", "nodalGrad", removal_version="1.0.0", future_warn=False
)
nodalLaplacian = deprecate_property(
"nodal_laplacian", "nodalLaplacian", removal_version="1.0.0", future_warn=False
)
faceDiv = deprecate_property("face_divergence", "faceDiv", removal_version="1.0.0", future_warn=False)
faceDivx = deprecate_property(
"face_x_divergence", "faceDivx", removal_version="1.0.0", future_warn=False
)
faceDivy = deprecate_property(
"face_y_divergence", "faceDivy", removal_version="1.0.0", future_warn=False
)
faceDivz = deprecate_property(
"face_z_divergence", "faceDivz", removal_version="1.0.0", future_warn=False
)
edgeCurl = deprecate_property("edge_curl", "edgeCurl", removal_version="1.0.0", future_warn=False)
maxLevel = deprecate_property("max_used_level", "maxLevel", removal_version="1.0.0", future_warn=False)
vol = deprecate_property("cell_volumes", "vol", removal_version="1.0.0", future_warn=False)
areaFx = deprecate_property("face_x_areas", "areaFx", removal_version="1.0.0", future_warn=False)
areaFy = deprecate_property("face_y_areas", "areaFy", removal_version="1.0.0", future_warn=False)
areaFz = deprecate_property("face_z_areas", "areaFz", removal_version="1.0.0", future_warn=False)
area = deprecate_property("face_areas", "area", removal_version="1.0.0", future_warn=False)
edgeEx = deprecate_property("edge_x_lengths", "edgeEx", removal_version="1.0.0", future_warn=False)
edgeEy = deprecate_property("edge_y_lengths", "edgeEy", removal_version="1.0.0", future_warn=False)
edgeEz = deprecate_property("edge_z_lengths", "edgeEz", removal_version="1.0.0", future_warn=False)
edge = deprecate_property("edge_lengths", "edge", removal_version="1.0.0", future_warn=False)
permuteCC = deprecate_property(
"permute_cells", "permuteCC", removal_version="1.0.0", future_warn=False
)
permuteF = deprecate_property("permute_faces", "permuteF", removal_version="1.0.0", future_warn=False)
permuteE = deprecate_property("permute_edges", "permuteE", removal_version="1.0.0", future_warn=False)
faceBoundaryInd = deprecate_property(
"face_boundary_indices", "faceBoundaryInd", removal_version="1.0.0", future_warn=False
)
cellBoundaryInd = deprecate_property(
"cell_boundary_indices", "cellBoundaryInd", removal_version="1.0.0", future_warn=False
)
_aveCC2FxStencil = deprecate_property(
"average_cell_to_total_face_x", "_aveCC2FxStencil", removal_version="1.0.0", future_warn=False
)
_aveCC2FyStencil = deprecate_property(
"average_cell_to_total_face_y", "_aveCC2FyStencil", removal_version="1.0.0", future_warn=False
)
_aveCC2FzStencil = deprecate_property(
"average_cell_to_total_face_z", "_aveCC2FzStencil", removal_version="1.0.0", future_warn=False
)
_cellGradStencil = deprecate_property(
"stencil_cell_gradient", "_cellGradStencil", removal_version="1.0.0", future_warn=False
)
_cellGradxStencil = deprecate_property(
"stencil_cell_gradient_x", "_cellGradxStencil", removal_version="1.0.0", future_warn=False
)
_cellGradyStencil = deprecate_property(
"stencil_cell_gradient_y", "_cellGradyStencil", removal_version="1.0.0", future_warn=False
)
_cellGradzStencil = deprecate_property(
"stencil_cell_gradient_z", "_cellGradzStencil", removal_version="1.0.0", future_warn=False
)
| 29,850 | 10,122 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Copyright (c) 2019, Linear Labs Technology
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
from yahoo_finance_api2 import share
import torch.tensor as tt
import torch,random
def get_targets(rows):
"""
This is where you can build your target.
For the tutorial we're only concerned about whether we should buy or sell.
So we'll create a rule for this, we'll need to set this up as regression ( on the open set (-1,1) ). Our rule is as follows:
If the stock closes below the open, we should have sold (t-1 = sell at close, t = buy at close).
If the stock closes above the open, we should have bought (t-1 = buy at close, t = sell at close).
While under the constraint of maximizing our profit
We can obviously make this more complex, even with the data we have, but for this tutorial,
If you want to create your own targets, this is where you should do it. Below is the accessible data structure passed to this function.
rows = {
"timestamp": [
1557149400000,
],
"open": [
126.38999938964844,
],
"high": [
128.55999755859375,
],
"low": [
126.11000061035156,
],
"close": [
128.14999389648438,
],
"volume": [
24239800,
]
}
"""
# targets: sell = -1, buy = +1
# set to sell at beginning of the trading day
# we assume that unless the it's going down, buy.
# later we'll add some business logic to determine the actual action of purchasing
# return [ tt([0.]) ] + [ tt([ 0 if (rows['close'][i-2] > rows['open'][i-2]) and (rows['close'][i] > rows['open'][i]) else (1 if random.random() > .7 else 2 )]) for i in range(2,len(rows['open'])) ]
return [ tt( [ [ [ rows['high'][i] ] ] ] ) for i in range(1,len(rows['open'])) ]
def get_inputs(rows):
# you could also use a pandas DataFrame
return [ tt( [ [ [ rows['open'][i],rows['close'][i],rows['volume'][i],rows['low'][i],rows['high'][i] ] ] ]) for i in range(len(rows['open'])-1 ) ]
def main(args):
# default grab the last 75 days
import datetime
if args.csv:
import pandas as pd
data = pd.read_csv(args.csv)
else:
today = datetime.date.today()
ticker = share.Share(args.ticker)
data = ticker.get_historical(share.PERIOD_TYPE_DAY,args.start,share.FREQUENCY_TYPE_MINUTE,int(60/args.frequency))
torch.save({
'inputs':get_inputs(data),
'targets':get_targets(data)
},args.output_file)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-t','--ticker',help="enter the stock ticker symbol",required=True)
parser.add_argument('-s','--start',help="start date of data to grab. default is 75 days ago",default=75,type=int)
parser.add_argument('-o','--output_file',help="name of the output file to save the dataset",default='trader.ds')
parser.add_argument('-f','--frequency',help='how frequent to sample each day of trading (in hourly fractions)',type=int,default=1)
parser.add_argument('--csv',help='the csv file to load instead of downloading fresh data',default=None)
main( parser.parse_args() ) | 4,053 | 1,281 |
import os
from project import app, db
class BaseConfig:
"""Base configuration"""
TESTING = False
SQLALCHEMY_TRACK_MODIFICATIONS = False
print('Running through config')
class DevelopmentConfig(BaseConfig):
"""Development configuration"""
SQLALCHEMY_DATABASE_URI = os.environ.get('POSTGRES_URL')
MASTER_STATION = os.environ.get('MASTER_STATION')
MASTER_ELEVATOR = os.environ.get('MASTER_ELEVATOR')
MONGO_URI = os.environ.get('MONGO_URI')
MONGO_DBNAME = 'eva_dev'
class TestingConfig(BaseConfig):
"""Testing configuration"""
TESTING = True
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_TEST_URL')
class ProductionConfig(BaseConfig):
"""Production configuration"""
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') | 790 | 278 |
from sqlalchemy import Column,Integer,String, Float
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import create_engine
from config.envvar import *
Base = declarative_base()
class Steps(Base):
__tablename__ = "steps"
id = Column(Integer, primary_key=True, autoincrement=True)
recipe_id = Column(String(128),index=True)
position = Column(Integer)
items = Column(String(256))
engine = create_engine('mysql+pymysql://'+db_config["DB_USER"]+':'+db_config["DB_PASSWORD"]+'@'+db_config["DB_HOST"]+'/'
+ db_config["DB_NAME"])
Base.metadata.create_all(engine)
| 625 | 210 |
class ChoiceGenerator:
'''
Generates (nonrecursively) all of the combinations of a choose b, where a, b
are nonnegative integers and a >= b. The values of a and b are given in the
constructor, and the sequence of choices is obtained by repeatedly calling
the next() method. When the sequence is finished, null is returned.
A valid combination for the sequence of combinations for a choose b
generated by this class is an array x[] of b integers i, 0 <= i < a, such
that x[j] < x[j + 1] for each j from 0 to b - 1.
'''
def __init__(self, a, b):
'''
Constructs a new choice generator for a choose b. Once this
initialization has been performed, successive calls to next() will
produce the series of combinations. To begin a new series at any time,
call this init method again with new values for a and b.
Parameters
----------
a: the number of objects being selected from.
b: the number of objects in the desired selection.
Returns
-------
ChoiceGenerator : ChoiceGenerator instance
'''
self.a = a
self.b = b
self.diff = a - b
self.choiceLocal = []
for i in range(b - 1):
self.choiceLocal.append(i)
if b > 0:
self.choiceLocal.append(b - 2)
self.choiceReturned = [0 for i in range(b)]
self.begun = False
def fill(self, index):
self.choiceLocal[index] += 1
for i in range(index + 1, self.b):
self.choiceLocal[i] = self.choiceLocal[i - 1] + 1
def next(self):
i = self.b
while i > 0:
i -= 1
if self.choiceLocal[i] < (i + self.diff):
self.fill(i)
self.begun = True
for j in range(self.b):
self.choiceReturned[j] = self.choiceLocal[j]
return self.choiceReturned
if self.begun:
return None
else:
self.begun = True
for j in range(self.b):
self.choiceReturned[j] = self.choiceLocal[j]
return self.choiceReturned
| 2,184 | 617 |
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 11 11:17:21 2020
@author: eilxaix
"""
import pandas as pd
import re
def remove_hashtag(t):
t=re.sub('-',' ', t)
t=' '.join(t.split())
return t
def read_csv_data(df):
title = [remove_hashtag(i) for i in df['Document Title']]
abstract = [remove_hashtag(i) for i in df['Abstract']]
doc = [title[i] + '. ' + abstract[i] for i in range(len(df))]
inspec_controlled = [remove_hashtag(i) for i in df['INSPEC Controlled Terms']]
inspec_uncontrolled = [remove_hashtag(i) for i in df['INSPEC Non-Controlled Terms']]
for i in range(len(inspec_uncontrolled)):
inspec_uncontrolled[i] = [k.lower() for k in inspec_uncontrolled[i].split(';')]
for i in range(len(inspec_controlled)):
inspec_controlled[i] = [k.lower() for k in inspec_controlled[i].split(';')]
data = {'title': title, 'abstract': abstract, 'title+abs': doc, 'inspec_controlled': inspec_controlled,'inspec_uncontrolled':inspec_uncontrolled}
return data
# =============================================================================
# data = read_csv_data(pd.read_csv('../../dataset/ieee_xai/ieee_xai.csv'))
# =============================================================================
| 1,259 | 450 |
# Generated from Scicopia.g4 by ANTLR 4.9.2
# encoding: utf-8
from antlr4 import *
from io import StringIO
import sys
if sys.version_info[1] > 5:
from typing import TextIO
else:
from typing.io import TextIO
def serializedATN():
with StringIO() as buf:
buf.write("\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\3\23")
buf.write("<\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t\7\3\2")
buf.write("\6\2\20\n\2\r\2\16\2\21\3\3\3\3\3\3\3\3\5\3\30\n\3\3\4")
buf.write("\3\4\3\4\3\4\3\4\3\4\5\4 \n\4\3\5\3\5\7\5$\n\5\f\5\16")
buf.write("\5\'\13\5\3\5\3\5\3\5\7\5,\n\5\f\5\16\5/\13\5\3\5\5\5")
buf.write("\62\n\5\3\6\3\6\3\6\3\6\5\68\n\6\3\7\3\7\3\7\4%-\2\b\2")
buf.write("\4\6\b\n\f\2\3\4\2\6\t\13\20\2?\2\17\3\2\2\2\4\27\3\2")
buf.write("\2\2\6\37\3\2\2\2\b\61\3\2\2\2\n\63\3\2\2\2\f9\3\2\2\2")
buf.write("\16\20\5\4\3\2\17\16\3\2\2\2\20\21\3\2\2\2\21\17\3\2\2")
buf.write("\2\21\22\3\2\2\2\22\3\3\2\2\2\23\30\5\6\4\2\24\30\5\b")
buf.write("\5\2\25\30\5\f\7\2\26\30\5\n\6\2\27\23\3\2\2\2\27\24\3")
buf.write("\2\2\2\27\25\3\2\2\2\27\26\3\2\2\2\30\5\3\2\2\2\31\32")
buf.write("\7\n\2\2\32 \5\b\5\2\33\34\7\n\2\2\34 \5\n\6\2\35\36\7")
buf.write("\n\2\2\36 \5\f\7\2\37\31\3\2\2\2\37\33\3\2\2\2\37\35\3")
buf.write("\2\2\2 \7\3\2\2\2!%\7\3\2\2\"$\13\2\2\2#\"\3\2\2\2$\'")
buf.write("\3\2\2\2%&\3\2\2\2%#\3\2\2\2&(\3\2\2\2\'%\3\2\2\2(\62")
buf.write("\7\3\2\2)-\7\4\2\2*,\13\2\2\2+*\3\2\2\2,/\3\2\2\2-.\3")
buf.write("\2\2\2-+\3\2\2\2.\60\3\2\2\2/-\3\2\2\2\60\62\7\4\2\2\61")
buf.write("!\3\2\2\2\61)\3\2\2\2\62\t\3\2\2\2\63\64\7\13\2\2\64\67")
buf.write("\7\5\2\2\658\5\b\5\2\668\5\f\7\2\67\65\3\2\2\2\67\66\3")
buf.write("\2\2\28\13\3\2\2\29:\t\2\2\2:\r\3\2\2\2\t\21\27\37%-\61")
buf.write("\67")
return buf.getvalue()
class ScicopiaParser ( Parser ):
grammarFileName = "Scicopia.g4"
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
sharedContextCache = PredictionContextCache()
literalNames = [ "<INVALID>", "'\"'", "'''", "':'", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "'-'", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"'('", "')'" ]
symbolicNames = [ "<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"DASH", "NUM", "COMPOUND", "APOSTROPHE", "NOT", "ALPHA",
"DIGITS", "ABBREV", "CHARGED", "ALPHANUM", "STRING",
"LPAR", "RPAR", "WHITESPACE" ]
RULE_query = 0
RULE_part = 1
RULE_exclude = 2
RULE_quotes = 3
RULE_prefixed = 4
RULE_term = 5
ruleNames = [ "query", "part", "exclude", "quotes", "prefixed", "term" ]
EOF = Token.EOF
T__0=1
T__1=2
T__2=3
DASH=4
NUM=5
COMPOUND=6
APOSTROPHE=7
NOT=8
ALPHA=9
DIGITS=10
ABBREV=11
CHARGED=12
ALPHANUM=13
STRING=14
LPAR=15
RPAR=16
WHITESPACE=17
def __init__(self, input:TokenStream, output:TextIO = sys.stdout):
super().__init__(input, output)
self.checkVersion("4.9.2")
self._interp = ParserATNSimulator(self, self.atn, self.decisionsToDFA, self.sharedContextCache)
self._predicates = None
class QueryContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def part(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ScicopiaParser.PartContext)
else:
return self.getTypedRuleContext(ScicopiaParser.PartContext,i)
def getRuleIndex(self):
return ScicopiaParser.RULE_query
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterQuery" ):
listener.enterQuery(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitQuery" ):
listener.exitQuery(self)
def query(self):
localctx = ScicopiaParser.QueryContext(self, self._ctx, self.state)
self.enterRule(localctx, 0, self.RULE_query)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 13
self._errHandler.sync(self)
_la = self._input.LA(1)
while True:
self.state = 12
self.part()
self.state = 15
self._errHandler.sync(self)
_la = self._input.LA(1)
if not ((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << ScicopiaParser.T__0) | (1 << ScicopiaParser.T__1) | (1 << ScicopiaParser.DASH) | (1 << ScicopiaParser.NUM) | (1 << ScicopiaParser.COMPOUND) | (1 << ScicopiaParser.APOSTROPHE) | (1 << ScicopiaParser.NOT) | (1 << ScicopiaParser.ALPHA) | (1 << ScicopiaParser.DIGITS) | (1 << ScicopiaParser.ABBREV) | (1 << ScicopiaParser.CHARGED) | (1 << ScicopiaParser.ALPHANUM) | (1 << ScicopiaParser.STRING))) != 0)):
break
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class PartContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def exclude(self):
return self.getTypedRuleContext(ScicopiaParser.ExcludeContext,0)
def quotes(self):
return self.getTypedRuleContext(ScicopiaParser.QuotesContext,0)
def term(self):
return self.getTypedRuleContext(ScicopiaParser.TermContext,0)
def prefixed(self):
return self.getTypedRuleContext(ScicopiaParser.PrefixedContext,0)
def getRuleIndex(self):
return ScicopiaParser.RULE_part
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterPart" ):
listener.enterPart(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitPart" ):
listener.exitPart(self)
def part(self):
localctx = ScicopiaParser.PartContext(self, self._ctx, self.state)
self.enterRule(localctx, 2, self.RULE_part)
try:
self.state = 21
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,1,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 17
self.exclude()
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 18
self.quotes()
pass
elif la_ == 3:
self.enterOuterAlt(localctx, 3)
self.state = 19
self.term()
pass
elif la_ == 4:
self.enterOuterAlt(localctx, 4)
self.state = 20
self.prefixed()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ExcludeContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def NOT(self):
return self.getToken(ScicopiaParser.NOT, 0)
def quotes(self):
return self.getTypedRuleContext(ScicopiaParser.QuotesContext,0)
def prefixed(self):
return self.getTypedRuleContext(ScicopiaParser.PrefixedContext,0)
def term(self):
return self.getTypedRuleContext(ScicopiaParser.TermContext,0)
def getRuleIndex(self):
return ScicopiaParser.RULE_exclude
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterExclude" ):
listener.enterExclude(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitExclude" ):
listener.exitExclude(self)
def exclude(self):
localctx = ScicopiaParser.ExcludeContext(self, self._ctx, self.state)
self.enterRule(localctx, 4, self.RULE_exclude)
try:
self.state = 29
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,2,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 23
self.match(ScicopiaParser.NOT)
self.state = 24
self.quotes()
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 25
self.match(ScicopiaParser.NOT)
self.state = 26
self.prefixed()
pass
elif la_ == 3:
self.enterOuterAlt(localctx, 3)
self.state = 27
self.match(ScicopiaParser.NOT)
self.state = 28
self.term()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class QuotesContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return ScicopiaParser.RULE_quotes
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterQuotes" ):
listener.enterQuotes(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitQuotes" ):
listener.exitQuotes(self)
def quotes(self):
localctx = ScicopiaParser.QuotesContext(self, self._ctx, self.state)
self.enterRule(localctx, 6, self.RULE_quotes)
try:
self.state = 47
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [ScicopiaParser.T__0]:
self.enterOuterAlt(localctx, 1)
self.state = 31
self.match(ScicopiaParser.T__0)
self.state = 35
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,3,self._ctx)
while _alt!=1 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1+1:
self.state = 32
self.matchWildcard()
self.state = 37
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,3,self._ctx)
self.state = 38
self.match(ScicopiaParser.T__0)
pass
elif token in [ScicopiaParser.T__1]:
self.enterOuterAlt(localctx, 2)
self.state = 39
self.match(ScicopiaParser.T__1)
self.state = 43
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,4,self._ctx)
while _alt!=1 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1+1:
self.state = 40
self.matchWildcard()
self.state = 45
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,4,self._ctx)
self.state = 46
self.match(ScicopiaParser.T__1)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class PrefixedContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def ALPHA(self):
return self.getToken(ScicopiaParser.ALPHA, 0)
def quotes(self):
return self.getTypedRuleContext(ScicopiaParser.QuotesContext,0)
def term(self):
return self.getTypedRuleContext(ScicopiaParser.TermContext,0)
def getRuleIndex(self):
return ScicopiaParser.RULE_prefixed
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterPrefixed" ):
listener.enterPrefixed(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitPrefixed" ):
listener.exitPrefixed(self)
def prefixed(self):
localctx = ScicopiaParser.PrefixedContext(self, self._ctx, self.state)
self.enterRule(localctx, 8, self.RULE_prefixed)
try:
self.enterOuterAlt(localctx, 1)
self.state = 49
self.match(ScicopiaParser.ALPHA)
self.state = 50
self.match(ScicopiaParser.T__2)
self.state = 53
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [ScicopiaParser.T__0, ScicopiaParser.T__1]:
self.state = 51
self.quotes()
pass
elif token in [ScicopiaParser.DASH, ScicopiaParser.NUM, ScicopiaParser.COMPOUND, ScicopiaParser.APOSTROPHE, ScicopiaParser.ALPHA, ScicopiaParser.DIGITS, ScicopiaParser.ABBREV, ScicopiaParser.CHARGED, ScicopiaParser.ALPHANUM, ScicopiaParser.STRING]:
self.state = 52
self.term()
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class TermContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def CHARGED(self):
return self.getToken(ScicopiaParser.CHARGED, 0)
def DASH(self):
return self.getToken(ScicopiaParser.DASH, 0)
def NUM(self):
return self.getToken(ScicopiaParser.NUM, 0)
def COMPOUND(self):
return self.getToken(ScicopiaParser.COMPOUND, 0)
def ALPHA(self):
return self.getToken(ScicopiaParser.ALPHA, 0)
def ABBREV(self):
return self.getToken(ScicopiaParser.ABBREV, 0)
def ALPHANUM(self):
return self.getToken(ScicopiaParser.ALPHANUM, 0)
def APOSTROPHE(self):
return self.getToken(ScicopiaParser.APOSTROPHE, 0)
def DIGITS(self):
return self.getToken(ScicopiaParser.DIGITS, 0)
def STRING(self):
return self.getToken(ScicopiaParser.STRING, 0)
def getRuleIndex(self):
return ScicopiaParser.RULE_term
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterTerm" ):
listener.enterTerm(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitTerm" ):
listener.exitTerm(self)
def term(self):
localctx = ScicopiaParser.TermContext(self, self._ctx, self.state)
self.enterRule(localctx, 10, self.RULE_term)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 55
_la = self._input.LA(1)
if not((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << ScicopiaParser.DASH) | (1 << ScicopiaParser.NUM) | (1 << ScicopiaParser.COMPOUND) | (1 << ScicopiaParser.APOSTROPHE) | (1 << ScicopiaParser.ALPHA) | (1 << ScicopiaParser.DIGITS) | (1 << ScicopiaParser.ABBREV) | (1 << ScicopiaParser.CHARGED) | (1 << ScicopiaParser.ALPHANUM) | (1 << ScicopiaParser.STRING))) != 0)):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
| 17,705 | 6,291 |
from pathlib import Path
import sys
sys.path.append(str(Path(__file__).resolve().parents[1]))
from werewolf import create_app
from werewolf.database import db
app = create_app('db')
with app.app_context():
db.drop_all()
db.create_all()
| 259 | 95 |
import numpy as np
import pytest
from sklego.common import flatten
from sklego.linear_model import ProbWeightRegression
from tests.conftest import nonmeta_checks, regressor_checks, general_checks
@pytest.mark.parametrize("test_fn", flatten([
nonmeta_checks,
general_checks,
regressor_checks
]))
def test_estimator_checks(test_fn):
regr_min_zero = ProbWeightRegression(non_negative=True)
test_fn(ProbWeightRegression.__name__ + '_min_zero_true', regr_min_zero)
regr_not_min_zero = ProbWeightRegression(non_negative=False)
test_fn(ProbWeightRegression.__name__ + '_min_zero_true_false', regr_not_min_zero)
def test_shape_trained_model(random_xy_dataset_regr):
X, y = random_xy_dataset_regr
mod_no_intercept = ProbWeightRegression()
assert mod_no_intercept.fit(X, y).coefs_.shape == (X.shape[1], )
np.testing.assert_approx_equal(mod_no_intercept.fit(X, y).coefs_.sum(), 1.0, significant=4)
| 936 | 349 |
# -*- coding: utf-8 -*-
"""
Created on Sat Jun 29 19:44:28 2019
@author: Administrator
"""
class Solution:
def canWin(self, s: str) -> bool:
ans = self.generatePossibleNextMoves(s)
print(ans)
count = 0
for state in ans:
# print(state)
for k in range(len(state)-1):
if state[k:k+2] == '++':
count += 1
break
# return True
if count == len(ans):
return False
else:
return True
def generatePossibleNextMoves(self, s):
res = []
for i in range(len(s) - 1):
if s[i:i+2] == "++":
res.append(s[:i] + "--" + s[i+2:]) #用到了字符串拼接
return res
solu = Solution()
s = "++++"
s = "+++++"
s = "++++++"
print(solu.canWin(s)) | 854 | 306 |
"""The CSDMS Web Modeling Tool (WMT) execution server."""
__version__ = '0.3'
| 79 | 30 |
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
save_results = False
def plot_stored_GEM_reults(interval_x=None, interval_y=None):
if interval_x is None:
#interval_list_x = [0.499, 0.506] # 1
interval_list_x = [0, 1]
#interval_list_x = [0.299, 0.305] # 2
#interval_list_x = [0.949, 0.953] # 3
#interval_list_x = [0.049, 0.052]
else:
interval_list_x = interval_x
if interval_y is None:
interval_list_y = [80, 345]
else:
interval_list_y = interval_y
folder_name = 'GEM/data'
df_DDPG = pd.read_pickle('GEM/data/DDPG_data')
df_DDPG_I = pd.read_pickle('GEM/data/SEC_DDPG_data')
df_PI = pd.read_pickle('GEM/data/GEM_PI_a4.pkl')
ts = 1e-4
t_test = np.arange(0, len(df_DDPG['i_d_mess'][0]) * ts, ts).tolist()
t_PI_2 = np.arange(-ts, len(df_PI['i_d_mess']) * ts-ts, ts).tolist()
t_reward = np.arange(-ts-ts, round((len(df_DDPG['v_d_mess'][0])) * ts - ts -ts, 4), ts).tolist()
reward_sec = df_DDPG_I['Reward_test'].tolist()[0]
reward = df_DDPG['Reward_test'].tolist()[0]
reward_PI = df_PI['Reward'].tolist()
if save_results:
params = {'backend': 'ps',
'text.latex.preamble': [r'\usepackage{gensymb}'
r'\usepackage{amsmath,amssymb,mathtools}'
r'\newcommand{\mlutil}{\ensuremath{\operatorname{ml-util}}}'
r'\newcommand{\mlacc}{\ensuremath{\operatorname{ml-acc}}}'],
'axes.labelsize': 12.5, # fontsize for x and y labels (was 10)
'axes.titlesize': 12.5,
'font.size': 12.5, # was 10
'legend.fontsize': 12.5, # was 10
'xtick.labelsize': 12,
'ytick.labelsize': 12,
'text.usetex': True,
'figure.figsize': [5.2, 5.625],#[4.5, 7.5],
'font.family': 'serif',
'lines.linewidth': 1.2
}
matplotlib.rcParams.update(params)
fig, axs = plt.subplots(3, 1)
axs[1].plot(t_test, [i * 160 * 1.41 for i in df_DDPG_I['i_q_mess'].tolist()[0]], 'r', label='$\mathrm{SEC}$')
axs[1].plot(t_test, [i * 160 * 1.41 for i in df_DDPG['i_q_mess'].tolist()[0]], '-.r',
label='$\mathrm{DDPG}_\mathrm{}$')
axs[1].plot(t_test, [i * 160 * 1.41 for i in df_PI['i_q_mess'].tolist()], '--r',
label='$\mathrm{PI}_\mathrm{}$')
axs[1].plot(t_test, [i * 160 * 1.41 for i in df_DDPG_I['i_q_ref'].tolist()[0]], ':', color='gray',
label='$\mathrm{i}_\mathrm{q}^*$', linewidth=2)
axs[1].plot(t_test, [i * 160 * 1.41 for i in df_PI['i_q_ref'].tolist()], ':', color='gray',
label='$\mathrm{i}_\mathrm{q}^*$', linewidth=2)
axs[1].grid()
# axs[1].legend()
axs[1].set_xlim(interval_list_x)
axs[1].set_ylim([-0.5 * 160 * 1.41, 0.55 * 160 * 1.41]) # 1
#axs[1].set_ylim([-0 * 160 * 1.41, 0.4 * 160 * 1.41]) # 2
#axs[1].set_ylim([0.37 * 160 * 1.41, 0.52 * 160 * 1.41]) # 3
# axs[0].set_xlabel(r'$t\,/\,\mathrm{s}$')
axs[1].tick_params(axis='x', colors='w')
axs[1].set_ylabel("$i_{\mathrm{q}}\,/\,{\mathrm{A}}$")
axs[1].tick_params(direction='in')
axs[0].plot(t_test, [i * 160 * 1.41 for i in df_DDPG_I['i_d_mess'].tolist()[0]], 'b',
label='$\mathrm{SEC}_\mathrm{}$')
axs[0].plot(t_test, [i * 160 * 1.41 for i in df_DDPG['i_d_mess'].tolist()[0]], '-.b',
label='$\mathrm{DDPG}_\mathrm{}$')
axs[0].plot(t_test, [i * 160 * 1.41 for i in df_PI['i_d_mess'].tolist()], '--b',
label='$\mathrm{PI}_\mathrm{}$')
axs[0].plot(t_test, [i * 160 * 1.41 for i in df_DDPG_I['i_d_ref'].tolist()[0]], ':', color='gray',
label='$i_\mathrm{}^*$', linewidth=2)
axs[0].grid()
axs[0].legend(bbox_to_anchor = (0, 1.02, 1, 0.2), loc="lower left",mode="expand", borderaxespad=0, ncol=4)
axs[0].set_xlim(interval_list_x)
axs[0].set_ylim([-0.78 * 160 * 1.41, 0.05 * 160 * 1.41]) #
axs[0].set_ylim([-0.78 * 160 * 1.41, 0.05 * 160 * 1.41]) # 1
#axs[0].set_ylim([-0.9 * 160 * 1.41, 0.005 * 160 * 1.41]) # 2
#axs[0].set_ylim([-1 * 160 * 1.41, -0.2 * 160 * 1.41]) # 3
axs[0].tick_params(axis='x', colors='w')
axs[0].set_ylabel("$i_{\mathrm{d}}\,/\,{\mathrm{A}}$")
axs[0].tick_params(direction='in')
fig.subplots_adjust(wspace=0, hspace=0.05)
axs[2].plot(t_reward, [i * 200 for i in df_DDPG_I['v_q_mess'].tolist()[0]], 'r', label='$\mathrm{SEC}$')
axs[2].plot(t_reward, [i * 200 for i in df_DDPG['v_q_mess'].tolist()[0]], '-.r',
label='$\mathrm{DDPG}_\mathrm{}$')
axs[2].plot(t_PI_2, [i * 200 for i in df_PI['v_q_mess'].tolist()], '--r',
label='$\mathrm{PI}_\mathrm{}$')
axs[2].plot(t_reward, [i * 200 for i in df_DDPG_I['v_d_mess'].tolist()[0]], 'b', label='$\mathrm{SEC}$')
axs[2].plot(t_reward, [i * 200 for i in df_DDPG['v_d_mess'].tolist()[0]], '-.b',
label='$\mathrm{DDPG}_\mathrm{}$')
axs[2].plot(t_PI_2, [i * 200 for i in df_PI['v_d_mess'].tolist()], '--b',
label='$\mathrm{PI}_\mathrm{}$')
#axs[2].plot(t_reward, df_DDPG_I['v_q_mess'].tolist()[0], 'r', label='$\mathrm{SEC}$')
#axs[2].plot(t_reward, df_DDPG['v_q_mess'].tolist()[0], '-.r',
# label='$\mathrm{DDPG}_\mathrm{}$')
#axs[2].plot(t_reward, df_PI['v_q_mess'].tolist(), '--r',
# label='$\mathrm{PI}_\mathrm{}$')
# axs[2].plot(t_reward, df_DDPG_I['v_d_mess'].tolist()[0], 'b', label='$\mathrm{SEC}$')
# axs[2].plot(t_reward, df_DDPG['v_d_mess'].tolist()[0], '--b', label='$\mathrm{DDPG}_\mathrm{}$')
# axs[2].plot(t_PI_3, df_PI['v_d_mess'].tolist(), '--b', label='$\mathrm{PI}_\mathrm{}$')
axs[2].grid()
# axs[1].legend()
axs[2].set_xlim(interval_list_x)
#axs[2].set_ylim([-100, 100])
# axs[0].set_xlabel(r'$t\,/\,\mathrm{s}$')
#axs[2].set_xlabel(r'$t\,/\,\mathrm{s}$')
#axs[2].tick_params(axis='x', colors='w')
axs[2].set_xlabel(r'$t\,/\,\mathrm{s}$')
axs[2].set_ylabel("$v_{\mathrm{dq}}\,/\,{\mathrm{V}}$")
#axs[2].set_ylabel("$u_{\mathrm{dq}}\,/\, v_\mathrm{DC}\,/\,2$")
axs[2].tick_params(direction='in')
"""
axs[3].plot(t_test, reward_sec, 'b', label=f' SEC-DDPG: '
f'{round(sum(reward_sec[int(interval_list_x[0] / ts):int(interval_list_x[1] / ts)]) / ((interval_list_x[1] - interval_list_x[0]) / ts), 4)}')
axs[3].plot(t_test, reward, 'r', label=f'DDPG: '
f'{round(sum(reward[int(interval_list_x[0] / ts):int(interval_list_x[1] / ts)]) / ((interval_list_x[1] - interval_list_x[0]) / ts), 4)}')
axs[3].plot(t_PI_2, reward_PI, '--r', label=f'PI: '
f'{round(sum(reward_PI[int(interval_list_x[0] / ts):int(interval_list_x[1] / ts)]) / ((interval_list_x[1] - interval_list_x[0]) / ts), 4)}')
axs[3].grid()
axs[3].set_xlim(interval_list_x)
#axs[3].legend()
axs[3].set_ylabel("Reward")
plt.show()
"""
plt.show()
if save_results:
fig.savefig(f'{folder_name}/GEM_DDPG_I_noI_idq1.pgf')
fig.savefig(f'{folder_name}/GEM_DDPG_I_noI_idq1.png')
fig.savefig(f'{folder_name}/GEM_DDPG_I_noI_idq1.pdf')
plt.plot(t_test, reward_sec, 'b', label=f' SEC-DDPG: '
f'{round(sum(reward_sec[int(interval_list_x[0] / ts):int(interval_list_x[1] / ts)]) / ((interval_list_x[1] - interval_list_x[0]) / ts), 4)}')
plt.plot(t_test, reward, 'r', label=f'DDPG: '
f'{round(sum(reward[int(interval_list_x[0] / ts):int(interval_list_x[1] / ts)]) / ((interval_list_x[1] - interval_list_x[0]) / ts), 4)}')
plt.plot(t_test, reward_PI, '--r', label=f'PI: '
f'{round(sum(reward_PI[int(interval_list_x[0] / ts):int(interval_list_x[1] / ts)]) / ((interval_list_x[1] - interval_list_x[0]) / ts), 4)}')
plt.grid()
plt.xlim(interval_list_x)
plt.legend()
plt.ylabel("Reward")
plt.show()
plot_stored_GEM_reults() | 8,284 | 3,730 |
#
# Copyright 2020 Antoine Sanner
# 2020 Lars Pastewka
#
# ### MIT license
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import numpy as np
from NuMPI import MPI
from Adhesion.Interactions import Potential, SoftWall
class PowerLaw(Potential):
r""" Polynomial interaction wiches value, first and second derivatives are
0 at the cutoff radius :math:`r_c`
.. math ::
(r < r_c) \ (1 - r / r_c)^p
With the exponent :math:`p >= 3`
"""
name = "PowerLaw"
def __init__(self, work_of_adhesion, cutoff_radius, exponent=3,
communicator=MPI.COMM_WORLD):
"""
Parameters:
-----------
work_of_adhesion: float or ndarray
surface energy at perfect contact
cutoff_radius: float or ndarray
distance :math:`r_c` at which the potential has decayed to 0
"""
self.cutoff_radius = self.rho = cutoff_radius
self.work_of_adhesion = work_of_adhesion
self.exponent = exponent
SoftWall.__init__(self, communicator=communicator)
def __repr__(self, ):
return (
"Potential '{0.name}': "
"work_of_adhesion = {0.work_of_adhesion},"
"cutoff_radius = {0.cutoff_radius}, exponent = {0.exponent}"
).format(self)
def __getstate__(self):
state = super().__getstate__(), \
self.exponent, self.rho, self.work_of_adhesion
return state
def __setstate__(self, state):
superstate, self.exponent, self.rho, self.work_of_adhesion = state
super().__setstate__(superstate)
@property
def has_cutoff(self):
return True
@property
def r_min(self):
return None
@property
def r_infl(self):
return None
@property
def max_tensile(self):
return - self.work_of_adhesion / self.rho * self.exponent
def evaluate(self, gap, potential=True, gradient=False, curvature=False,
mask=None):
r = np.asarray(gap)
if mask is None:
mask = (slice(None), ) * len(r.shape)
w = self.work_of_adhesion if np.isscalar(self.work_of_adhesion) \
else self.work_of_adhesion[mask]
rc = self.rho if np.isscalar(self.rho) else self.rho[mask]
p = self.exponent
g = (1 - r / rc)
V = dV = ddV = None
gpm2 = g ** (p - 2)
gpm1 = gpm2 * g
if potential:
V = np.where(g > 0, - w * gpm1 * g, 0)
if gradient:
dV = np.where(g > 0, p * w / rc * gpm1, 0)
if curvature:
ddV = np.where(g > 0, - p * (p - 1) * w / rc ** 2 * gpm2, 0)
return V, dV, ddV
| 3,725 | 1,261 |
import os
import glob
from ast import literal_eval
import numpy as np
import sympy
from sympy import pi, sin, cos, var
from sympy.printing import ccode
from compmech.conecyl.sympytools import mprint_as_sparse, pow2mult
var('x1t, x1r, x2t, x2r')
var('y1t, y1r, y2t, y2r')
var('xi1, xi2')
var('c0, c1')
subs = {
}
def List(*e):
return list(e)
header_c = """
#include <stdlib.h>
#include <math.h>
#if defined(_WIN32) || defined(__WIN32__)
#define EXPORTIT __declspec(dllexport)
#else
#define EXPORTIT
#endif
"""
printstr_full = header_c
printstr_12 = header_c
printstr_c0c1 = header_c
header_h = """
#if defined(_WIN32) || defined(__WIN32__)
#define IMPORTIT __declspec(dllimport)
#else
#define IMPORTIT
#endif
"""
printstr_full_h = header_h
printstr_12_h = header_h
printstr_c0c1_h = header_h
for i, filepath in enumerate(
glob.glob(r'.\bardell_integrals_mathematica\fortran_*.txt')):
print(filepath)
with open(filepath) as f:
filename = os.path.basename(filepath)
names = filename[:-4].split('_')
lines = [line.strip() for line in f.readlines()]
string = ''.join(lines)
string = string.replace('\\','')
tmp = eval(string)
print '\tfinished eval'
printstr = ''
if '_12' in filepath:
name = '_'.join(names[1:3])
printstr += 'EXPORTIT double integral_%s(double xi1, double xi2, int i, int j,\n' % name
printstr += ' double x1t, double x1r, double x2t, double x2r,\n'
printstr += ' double y1t, double y1r, double y2t, double y2r) {\n'
elif '_c0c1' in filepath:
name = '_'.join(names[1:3])
printstr += 'EXPORTIT double integral_%s(double c0, double c1, int i, int j,\n' % name
printstr += ' double x1t, double x1r, double x2t, double x2r,\n'
printstr += ' double y1t, double y1r, double y2t, double y2r) {\n'
else:
name = names[1]
printstr += 'EXPORTIT double integral_%s(int i, int j,\n' % name
printstr += ' double x1t, double x1r, double x2t, double x2r,\n'
printstr += ' double y1t, double y1r, double y2t, double y2r) {\n'
printstr_h = '\n'
printstr_h += '#ifndef BARDELL_%s_H\n' % name.upper()
printstr_h += '#define BARDELL_%s_H\n' % name.upper()
printstr_h += printstr.replace(' {', ';').replace('EXPORTIT', 'IMPORTIT')
printstr_h += '#endif /** BARDELL_%s_H */\n' % name.upper()
printstr_h += '\n'
matrix = sympy.Matrix(np.atleast_2d(tmp))
for i in range(matrix.shape[0]):
activerow = False
for j in range(matrix.shape[1]):
if matrix[i, j] == 0:
continue
if not activerow:
activerow = True
if i == 0:
printstr += ' switch(i) {\n'
else:
printstr += ' default:\n'
printstr += ' return 0.;\n'
printstr += ' }\n'
printstr += ' case %d:\n' % i
printstr += ' switch(j) {\n'
printstr += ' case %d:\n' % j
printstr += ' return %s;\n' % ccode(matrix[i, j].evalf())
printstr += ' default:\n'
printstr += ' return 0.;\n'
printstr += ' }\n'
printstr += ' default:\n'
printstr += ' return 0.;\n'
printstr += ' }\n'
printstr += '}\n'
if '_12' in filepath:
printstr_12_h += printstr_h
filepath = r'..\..\..\compmech\lib\src\bardell_integral_%s_12.c' % name[:-3]
with open(filepath, 'w') as g:
g.write(printstr_12 + printstr)
elif '_c0c1' in filepath:
printstr_c0c1_h += printstr_h
filepath = r'..\..\..\compmech\lib\src\bardell_integral_%s_c0c1.c' % name[:-5]
with open(filepath, 'w') as g:
g.write(printstr_c0c1 + printstr)
else:
printstr_full += printstr
printstr_full_h += printstr_h
with open(r'..\..\..\compmech\include\bardell.h', 'w') as g:
g.write(printstr_full_h)
with open(r'..\..\..\compmech\lib\src\bardell.c', 'w') as g:
g.write(printstr_full)
with open(r'..\..\..\compmech\include\bardell_12.h', 'w') as g:
g.write(printstr_12_h)
with open(r'..\..\..\compmech\include\bardell_c0c1.h', 'w') as g:
g.write(printstr_c0c1_h)
| 4,687 | 1,688 |
import time
import numpy as np
from sparsecoding import sparse_encode_omp
from sparsecoding import sparse_encode_nnmp
from distributedpowermethod import distri_powermethod
def cloud_ksvd(X, AtomN, dict_init, s, NodeN, networkGraph, vec_init, max_iter, powerIterations, consensusIterations):
sigdim = X[0].shape[0]
D = np.zeros([sigdim, AtomN, max_iter+1, NodeN])
for i in range(NodeN):
D[:,:,0,i] = dict_init
theta = [[] for i in range(NodeN)]
for iters in range(max_iter):
t0 = time.time()
for nodes in range(NodeN):
D[:,:,iters+1,nodes] = np.copy(D[:,:,iters,nodes])
theta[nodes] = sparse_encode_omp(X[nodes], D[:,:,iters+1,nodes], s)
for k in range(AtomN):
multipliedMat = np.zeros([sigdim,sigdim,NodeN])
index_temp = [[] for i in range(NodeN)]
ER_temp = [[] for i in range(NodeN)]
for nodes in range(NodeN):
indexes = np.nonzero(theta[nodes][k,:]!=0)[0]
index_temp[nodes] = indexes
if len(indexes)>0:
tempcoef = theta[nodes][:,indexes]
tempcoef[k,:] = 0
ER = X[nodes][:,indexes] - np.dot(D[:,:,iters+1,nodes], tempcoef)
ER_temp[nodes] = ER
multipliedMat[:,:,nodes] = np.dot(ER, ER.T)
if np.max(np.abs(multipliedMat))>0:
newatom = distri_powermethod(vec_init, multipliedMat, NodeN, networkGraph, powerIterations, consensusIterations)
for nodes in range(NodeN):
D[:,k,iters+1,nodes] = newatom[nodes,:]
if len(index_temp[nodes])>0:
theta[nodes][k,index_temp[nodes]] = np.dot(ER_temp[nodes].T, newatom[nodes,:])
dt = time.time() - t0
print('the %dth iteration takes %f seconds' %(iters,dt))
return D
def cloud_nnksvd(X, AtomN, dict_init, s, NodeN, networkGraph, vec_init, max_iter, updatec_iter, powerIterations, consensusIterations):
alteritern = 10
sigdim = X[0].shape[0]
D = np.zeros([sigdim, AtomN, max_iter+1, NodeN])
for i in range(NodeN):
D[:,:,0,i] = dict_init
theta = [[] for i in range(NodeN)]
for iters in range(max_iter):
t0 = time.time()
for nodes in range(NodeN):
D[:,:,iters+1,nodes] = np.copy(D[:,:,iters,nodes])
theta[nodes] = sparse_encode_nnmp(X[nodes], D[:,:,iters+1,nodes], s, updatec_iter)
for k in range(AtomN):
multipliedMat = np.zeros([sigdim,sigdim,NodeN])
index_temp = [[] for i in range(NodeN)]
ER_temp = [[] for i in range(NodeN)]
v_temp = [[] for i in range(NodeN)]
for nodes in range(NodeN):
indexes = np.nonzero(theta[nodes][k,:]!=0)[0]
index_temp[nodes] = indexes
if len(indexes)>0:
tempcoef = theta[nodes][:,indexes]
tempcoef[k,:] = 0
ER = X[nodes][:,indexes] - np.dot(D[:,:,iters+1,nodes], tempcoef)
ER_temp[nodes] = ER
multipliedMat[:,:,nodes] = np.dot(ER, ER.T)
if np.max(np.abs(multipliedMat))>0:
newatom = distri_powermethod(vec_init, multipliedMat, NodeN, networkGraph, powerIterations, consensusIterations)
flag = 0
for nodes in range(NodeN):
if len(index_temp[nodes])>0:
v_temp[nodes] = np.dot(ER_temp[nodes].T, newatom[nodes,:])
if np.all(v_temp[nodes]<0)==True:
flag = 1
break
if flag==1:
newatom = -newatom
for nodes in range(NodeN):
if len(index_temp[nodes])>0:
v_temp[nodes] = np.dot(ER_temp[nodes].T, newatom[nodes,:])
newatom = (newatom>0)*newatom
for nodes in range(NodeN):
if len(index_temp[nodes])>0:
v_temp[nodes] = (v_temp[nodes]>0)*v_temp[nodes]
for subiters in range(alteritern):
sumUpper = np.zeros([NodeN,sigdim])
sumLower = np.zeros(NodeN)
for nodes in range(NodeN):
sumUpper[nodes,:] = np.dot(ER_temp[nodes], v_temp[nodes])
sumLower[nodes] = np.dot(v_temp[nodes], v_temp[nodes])
for consiter in range(consensusIterations):
sumUpper = np.dot(networkGraph, sumUpper)
sumLower = np.dot(networkGraph, sumLower)
division = np.tile(sumLower, (sigdim, 1)).T
newu = sumUpper/division
newu = (newu>0)*newu
for nodes in range(NodeN):
if len(index_temp[nodes])>0:
v_temp[nodes] = np.dot(newu[nodes,:], ER_temp[nodes])/np.dot(newu[nodes,:], newu[nodes,:])
v_temp[nodes] = (v_temp[nodes]>0)*v_temp[nodes]
del newatom
newatom = np.copy(newu)
for nodes in range(NodeN):
if len(index_temp[nodes])>0:
normu = np.linalg.norm(newatom[nodes,:])
D[:,k,iters+1,nodes] = newatom[nodes,:]/normu
theta[nodes][k,index_temp[nodes]] = v_temp[nodes]*normu
dt = time.time() - t0
print('the %dth iteration takes %f seconds' %(iters,dt))
return D | 6,277 | 2,034 |
# Simple Game
# Demonstrates importing modules
import games, random
print("Welcome to the world's simplest game!\n")
again = None
while again != "n":
players = []
num = games.ask_number(question = "How many players? (2 - 5): ",
low = 2, high = 5)
for i in range(num):
name = input("Player name: ")
score = random.randrange(100) + 1
player = games.Player(name, score)
players.append(player)
print("\nHere are the game results:")
for player in players:
print(player)
again = games.ask_yes_no("\nDo you want to play again? (y/n): ")
input("\n\nPress the enter key to exit.")
| 677 | 214 |
import engine
class Player:
def talk(self, message: str) -> None:
engine_version = engine.get_version()
engine.print_log(message=f"Engine version = {engine_version}")
| 189 | 56 |
import argparse
import yomikatawa as yomi
def create_parser():
parser = argparse.ArgumentParser(description="A command line interface for https://yomikatawa.com.")
parser.add_argument("-c", "--category", type=str, default="kanji", help="print possible choices on error")
parser.add_argument("-r", "--romaji", action="store_true", dest="print_romaji", help="output romaji")
parser.add_argument("-s", "--same-reading", action="store_true", dest="print_same_reading_words", help="output words with same reading")
parser.add_argument("input_word", type=str)
return parser
def main():
args = create_parser().parse_args()
result = yomi.search(args.input_word, category=args.category)
print("Hiragana: " + result.hiragana)
if args.print_romaji:
print("Romaji: " + result.romaji)
if args.print_same_reading_words:
print("Same reading: " + str(result.same_reading_words))
if __name__ == "__main__":
main()
| 964 | 311 |
""" A module hosting all algorithms devised by Izzo """
import time
import numpy as np
from numpy import cross, pi
from numpy.linalg import norm
from scipy.special import hyp2f1
def izzo2015(
mu,
r1,
r2,
tof,
M=0,
prograde=True,
low_path=True,
maxiter=35,
atol=1e-5,
rtol=1e-7,
full_output=False,
):
r"""
Solves Lambert problem using Izzo's devised algorithm.
Parameters
----------
mu: float
Gravitational parameter, equivalent to :math:`GM` of attractor body.
r1: numpy.array
Initial position vector.
r2: numpy.array
Final position vector.
M: int
Number of revolutions. Must be equal or greater than 0 value.
prograde: bool
If `True`, specifies prograde motion. Otherwise, retrograde motion is imposed.
low_path: bool
If two solutions are available, it selects between high or low path.
maxiter: int
Maximum number of iterations.
atol: float
Absolute tolerance.
rtol: float
Relative tolerance.
full_output: bool
If True, the number of iterations is also returned.
Returns
-------
v1: numpy.array
Initial velocity vector.
v2: numpy.array
Final velocity vector.
numiter: list
Number of iterations.
Notes
-----
This is the algorithm devised by Dario Izzo[1] in 2015. It inherits from
the one developed by Lancaster[2] during the 60s, following the universal
formulae approach. It is one of the most modern solvers, being a complete
Lambert's problem solver (zero and Multiple-revolution solutions). It shows
high performance and robustness while requiring no more than four iterations
to reach a solution.
All credits of the implementation go to Juan Luis Cano Rodríguez and the
poliastro development team, from which this routine inherits. Some changes
were made to adapt it to `lamberthub` API. In addition, the hypergeometric
function is the one from SciPy.
Copyright (c) 2012-2021 Juan Luis Cano Rodríguez and the poliastro development team
References
----------
[1] Izzo, D. (2015). Revisiting Lambert’s problem. Celestial Mechanics
and Dynamical Astronomy, 121(1), 1-15.
[2] Lancaster, E. R., & Blanchard, R. C. (1969). A unified form of
Lambert's theorem (Vol. 5368). National Aeronautics and Space
Administration.
"""
# Check that input parameters are safe
#assert_parameters_are_valid(mu, r1, r2, tof, M)
# Chord
c = r2 - r1
c_norm, r1_norm, r2_norm = norm(c), norm(r1), norm(r2)
# Semiperimeter
s = (r1_norm + r2_norm + c_norm) * 0.5
# Versors
i_r1, i_r2 = r1 / r1_norm, r2 / r2_norm
i_h = cross(i_r1, i_r2)
i_h = i_h / norm(i_h)
# Geometry of the problem
ll = np.sqrt(1 - min(1.0, c_norm / s))
# Compute the fundamental tangential directions
if i_h[2] < 0:
ll = -ll
i_t1, i_t2 = cross(i_r1, i_h), cross(i_r2, i_h)
else:
i_t1, i_t2 = cross(i_h, i_r1), cross(i_h, i_r2)
# Correct transfer angle parameter and tangential vectors regarding orbit's
# inclination
ll, i_t1, i_t2 = (-ll, -i_t1, -i_t2) if prograde is False else (ll, i_t1, i_t2)
# Non dimensional time of flight
T = np.sqrt(2 * mu / s ** 3) * tof
# Find solutions and filter them
x, y, numiter, tpi = _find_xy(ll, T, M, maxiter, atol, rtol, low_path)
# Reconstruct
gamma = np.sqrt(mu * s / 2)
rho = (r1_norm - r2_norm) / c_norm
sigma = np.sqrt(1 - rho ** 2)
# Compute the radial and tangential components at initial and final
# position vectors
V_r1, V_r2, V_t1, V_t2 = _reconstruct(x, y, r1_norm, r2_norm, ll, gamma, rho, sigma)
# Solve for the initial and final velocity
v1 = V_r1 * (r1 / r1_norm) + V_t1 * i_t1
v2 = V_r2 * (r2 / r2_norm) + V_t2 * i_t2
return (v1, v2, numiter, tpi) if full_output is True else (v1, v2)
def _reconstruct(x, y, r1, r2, ll, gamma, rho, sigma):
"""Reconstruct solution velocity vectors."""
V_r1 = gamma * ((ll * y - x) - rho * (ll * y + x)) / r1
V_r2 = -gamma * ((ll * y - x) + rho * (ll * y + x)) / r2
V_t1 = gamma * sigma * (y + ll * x) / r1
V_t2 = gamma * sigma * (y + ll * x) / r2
return [V_r1, V_r2, V_t1, V_t2]
def _find_xy(ll, T, M, maxiter, atol, rtol, low_path):
"""Computes all x, y for given number of revolutions."""
# For abs(ll) == 1 the derivative is not continuous
assert abs(ll) < 1
M_max = np.floor(T / pi)
T_00 = np.arccos(ll) + ll * np.sqrt(1 - ll ** 2) # T_xM
# Refine maximum number of revolutions if necessary
if T < T_00 + M_max * pi and M_max > 0:
_, T_min = _compute_T_min(ll, M_max, maxiter, atol, rtol)
if T < T_min:
M_max -= 1
# Check if a feasible solution exist for the given number of revolutions
# This departs from the original paper in that we do not compute all solutions
if M > M_max:
raise ValueError("No feasible solution, try lower M!")
# Initial guess
x_0 = _initial_guess(T, ll, M, low_path)
# Start Householder iterations from x_0 and find x, y
x, numiter, tpi = _householder(x_0, T, ll, M, atol, rtol, maxiter)
y = _compute_y(x, ll)
return x, y, numiter, tpi
def _compute_y(x, ll):
"""Computes y."""
return np.sqrt(1 - ll ** 2 * (1 - x ** 2))
def _compute_psi(x, y, ll):
"""Computes psi.
"The auxiliary angle psi is computed using Eq.(17) by the appropriate
inverse function"
"""
if -1 <= x < 1:
# Elliptic motion
# Use arc cosine to avoid numerical errors
return np.arccos(x * y + ll * (1 - x ** 2))
elif x > 1:
# Hyperbolic motion
# The hyperbolic sine is bijective
return np.arcsinh((y - x * ll) * np.sqrt(x ** 2 - 1))
else:
# Parabolic motion
return 0.0
def _tof_equation(x, T0, ll, M):
"""Time of flight equation."""
return _tof_equation_y(x, _compute_y(x, ll), T0, ll, M)
def _tof_equation_y(x, y, T0, ll, M):
"""Time of flight equation with externally computated y."""
if M == 0 and np.sqrt(0.6) < x < np.sqrt(1.4):
eta = y - ll * x
S_1 = (1 - ll - x * eta) * 0.5
Q = 4 / 3 * hyp2f1(3, 1, 5 / 2, S_1)
T_ = (eta ** 3 * Q + 4 * ll * eta) * 0.5
else:
psi = _compute_psi(x, y, ll)
T_ = np.divide(
np.divide(psi + M * pi, np.sqrt(np.abs(1 - x ** 2))) - x + ll * y,
(1 - x ** 2),
)
return T_ - T0
def _tof_equation_p(x, y, T, ll):
# TODO: What about derivatives when x approaches 1?
return (3 * T * x - 2 + 2 * ll ** 3 * x / y) / (1 - x ** 2)
def _tof_equation_p2(x, y, T, dT, ll):
return (3 * T + 5 * x * dT + 2 * (1 - ll ** 2) * ll ** 3 / y ** 3) / (1 - x ** 2)
def _tof_equation_p3(x, y, _, dT, ddT, ll):
return (7 * x * ddT + 8 * dT - 6 * (1 - ll ** 2) * ll ** 5 * x / y ** 5) / (
1 - x ** 2
)
def _compute_T_min(ll, M, maxiter, atol, rtol):
"""Compute minimum T."""
if ll == 1:
x_T_min = 0.0
T_min = _tof_equation(x_T_min, 0.0, ll, M)
else:
if M == 0:
x_T_min = np.inf
T_min = 0.0
else:
# Set x_i > 0 to avoid problems at ll = -1
x_i = 0.1
T_i = _tof_equation(x_i, 0.0, ll, M)
x_T_min = _halley(x_i, T_i, ll, atol, rtol, maxiter)
T_min = _tof_equation(x_T_min, 0.0, ll, M)
return [x_T_min, T_min]
def _initial_guess(T, ll, M, low_path):
"""Initial guess."""
if M == 0:
# Single revolution
T_0 = np.arccos(ll) + ll * np.sqrt(1 - ll ** 2) + M * pi # Equation 19
T_1 = 2 * (1 - ll ** 3) / 3 # Equation 21
if T >= T_0:
x_0 = (T_0 / T) ** (2 / 3) - 1
elif T < T_1:
x_0 = 5 / 2 * T_1 / T * (T_1 - T) / (1 - ll ** 5) + 1
else:
# This is the real condition, which is not exactly equivalent
# elif T_1 < T < T_0
x_0 = (T_0 / T) ** (np.log2(T_1 / T_0)) - 1
return x_0
else:
# Multiple revolution
x_0l = (((M * pi + pi) / (8 * T)) ** (2 / 3) - 1) / (
((M * pi + pi) / (8 * T)) ** (2 / 3) + 1
)
x_0r = (((8 * T) / (M * pi)) ** (2 / 3) - 1) / (
((8 * T) / (M * pi)) ** (2 / 3) + 1
)
# Filter out the solution
x_0 = np.max([x_0l, x_0r]) if low_path is True else np.min([x_0l, x_0r])
return x_0
def _halley(p0, T0, ll, atol, rtol, maxiter):
"""Find a minimum of time of flight equation using the Halley method.
Note
----
This function is private because it assumes a calling convention specific to
this module and is not really reusable.
"""
for ii in range(1, maxiter + 1):
y = _compute_y(p0, ll)
fder = _tof_equation_p(p0, y, T0, ll)
fder2 = _tof_equation_p2(p0, y, T0, fder, ll)
if fder2 == 0:
raise RuntimeError("Derivative was zero")
fder3 = _tof_equation_p3(p0, y, T0, fder, fder2, ll)
# Halley step (cubic)
p = p0 - 2 * fder * fder2 / (2 * fder2 ** 2 - fder * fder3)
if abs(p - p0) < rtol * np.abs(p0) + atol:
return p
p0 = p
raise RuntimeError("Failed to converge")
def _householder(p0, T0, ll, M, atol, rtol, maxiter):
"""Find a zero of time of flight equation using the Householder method.
Note
----
This function is private because it assumes a calling convention specific to
this module and is not really reusable.
"""
# The clock starts together with the iteration
tic = time.perf_counter()
for numiter in range(1, maxiter + 1):
y = _compute_y(p0, ll)
fval = _tof_equation_y(p0, y, T0, ll, M)
T = fval + T0
fder = _tof_equation_p(p0, y, T, ll)
fder2 = _tof_equation_p2(p0, y, T, fder, ll)
fder3 = _tof_equation_p3(p0, y, T, fder, fder2, ll)
# Householder step (quartic)
p = p0 - fval * (
(fder ** 2 - fval * fder2 / 2)
/ (fder * (fder ** 2 - fval * fder2) + fder3 * fval ** 2 / 6)
)
if abs(p - p0) < rtol * np.abs(p0) + atol:
# Stop the clock and compute the time per iteration
tac = time.perf_counter()
tpi = (tac - tic) / numiter
return p, numiter, tpi
p0 = p
raise RuntimeError("Failed to converge") | 10,873 | 4,352 |
'''
Test deleting SG with 2 attached NICs.
@author: Youyk
'''
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.test_state as test_state
import zstackwoodpecker.zstack_test.zstack_test_security_group as test_sg_header
import zstackwoodpecker.zstack_test.zstack_test_sg_vm as test_sg_vm_header
import apibinding.inventory as inventory
test_stub = test_lib.lib_get_test_stub()
test_obj_dict = test_state.TestStateDict()
Port = test_state.Port
def test():
'''
Test image requirements:
1. have nc to check the network port
2. have "nc" to open any port
3. it doesn't include a default firewall
VR image is a good candiate to be the guest image.
'''
test_util.test_dsc("Create 3 VMs with vlan VR L3 network and using VR image.")
vm1 = test_stub.create_sg_vm()
test_obj_dict.add_vm(vm1)
vm2 = test_stub.create_sg_vm()
test_obj_dict.add_vm(vm2)
vm1.check()
vm2.check()
test_util.test_dsc("Create security groups.")
sg1 = test_stub.create_sg()
sg_vm = test_sg_vm_header.ZstackTestSgVm()
test_obj_dict.set_sg_vm(sg_vm)
l3_uuid = vm1.vm.vmNics[0].l3NetworkUuid
vr_vm = test_lib.lib_find_vr_by_vm(vm1.vm)[0]
vm2_ip = test_lib.lib_get_vm_nic_by_l3(vm2.vm, l3_uuid).ip
rule1 = test_lib.lib_gen_sg_rule(Port.rule1_ports, inventory.TCP, inventory.INGRESS, vm2_ip)
rule2 = test_lib.lib_gen_sg_rule(Port.rule2_ports, inventory.TCP, inventory.INGRESS, vm2_ip)
rule3 = test_lib.lib_gen_sg_rule(Port.rule3_ports, inventory.TCP, inventory.INGRESS, vm2_ip)
sg1.add_rule([rule1])
sg1.add_rule([rule2])
sg1.add_rule([rule3])
sg_vm.check()
nic_uuid1 = vm1.vm.vmNics[0].uuid
nic_uuid2 = vm2.vm.vmNics[0].uuid
# nic_uuid3 = vm2.vm.vmNics[0].uuid
vm1_nics = (nic_uuid1, vm1)
vm2_nics = (nic_uuid2, vm2)
# vm3_nics = (nic_uuid3, vm3)
#test_stub.lib_add_sg_rules(sg1.uuid, [rule0, rule1])
test_util.test_dsc("Add nic to security group 1.")
test_util.test_dsc("Allowed ingress ports: %s" % test_stub.rule1_ports)
#sg_vm.attach(sg1, [vm1_nics, vm2_nics, vm3_nics])
sg_vm.attach(sg1, [vm1_nics, vm2_nics])
sg_vm.check()
sg_vm.delete_sg(sg1)
sg_vm.check()
vm1.destroy()
test_obj_dict.rm_vm(vm1)
vm2.destroy()
test_obj_dict.rm_vm(vm2)
test_util.test_pass('Delete Security Group with 2 attached NICs Success')
#Will be called only if exception happens in test().
def error_cleanup():
test_lib.lib_error_cleanup(test_obj_dict)
| 2,688 | 1,128 |
# Figure 5 in https://arxiv.org/pdf/1909.13404.pdf (towards modular and programmable architecture search)
import sane_tikz.core as stz
import sane_tikz.formatting as fmt
frame_height = 9.5
frame_width = 10.0
frame_spacing = 0.2
frame_roundness = 0.6
frame_line_width = 4.5 * fmt.standard_line_width
module_height = 1.6
module_width = 2.8
io_height = 0.40
io_long_side = 0.9
io_short_side = 1.0 * io_long_side
io_spacing = 0.12
p_height = 1.2 * io_height
p_width = 1.2
p_spacing = io_spacing / 2.0
h_width = 1 * p_width
h_height = 1.3 * p_height
h_spacing = io_spacing / 2.0
io_corner_roundness = 0.0
module_roundness = 0.0
line_width = 2.0 * fmt.standard_line_width
module_inner_vertical_spacing = 0.1
delta_increment = 0.0
horizontal_module_spacing = 0.2
vertical_module_spacing = 0.2
spacing_between_module_and_hyperp = 0.8
spacing_between_hyperp_and_hyperp = 0.4
arrow_length = vertical_module_spacing
name2color = fmt.google_slides_named_colors()
connect_s_fmt = fmt.combine_tikz_strs(
[fmt.arrow_heads("end"), fmt.line_width(line_width)])
input_s_fmt = fmt.combine_tikz_strs([
fmt.line_width(line_width),
])
output_s_fmt = fmt.combine_tikz_strs([
fmt.line_width(line_width),
])
property_s_fmt = fmt.combine_tikz_strs([
fmt.line_width(line_width),
])
module_s_fmt = fmt.combine_tikz_strs([
fmt.line_width(line_width),
])
hyperp_s_fmt = fmt.combine_tikz_strs([
fmt.line_width(line_width),
])
frame_s_fmt = fmt.combine_tikz_strs([
fmt.rounded_corners(frame_roundness),
fmt.line_width(frame_line_width),
])
unassigned_h_s_fmt = fmt.combine_tikz_strs([
fmt.anchor("left_center"),
])
assigned_h_s_fmt = fmt.combine_tikz_strs([
fmt.anchor("left_center"),
])
def input(name):
x1 = io_short_side / 2.0
x2 = io_long_side / 2.0
r = stz.closed_path([[-x1, io_height], [x1, io_height], [x2, 0], [-x2, 0]],
input_s_fmt)
l = stz.latex(stz.center_coords(r), name)
return [r, l]
def output(name):
x1 = io_long_side / 2.0
x2 = io_short_side / 2.0
r = stz.closed_path([[-x1, io_height], [x1, io_height], [x2, 0], [-x2, 0]],
output_s_fmt)
l = stz.latex(stz.center_coords(r), name)
return [r, l]
def property(name, width_scale=1.0, height_scale=1.0):
e = stz.ellipse([0, 0], width_scale * p_width / 2.0,
height_scale * p_height / 2.0, property_s_fmt)
l = stz.latex(stz.center_coords(e), name)
return [e, l]
def module(module_name,
input_names,
output_names,
hyperp_names,
p_width_scale=1.0):
i_lst = [input(s) for s in input_names]
o_lst = [output(s) for s in output_names]
m = stz.rectangle([0, 0], [module_width, -module_height], module_s_fmt)
l = stz.latex(stz.center_coords(m), "\\textbf{%s}" % module_name)
stz.distribute_horizontally_with_spacing(i_lst, io_spacing)
stz.translate_bbox_top_left_to_coords(
i_lst, [module_inner_vertical_spacing, -module_inner_vertical_spacing])
stz.distribute_horizontally_with_spacing(o_lst, io_spacing)
stz.translate_bbox_bottom_left_to_coords(o_lst, [
module_inner_vertical_spacing,
-module_height + module_inner_vertical_spacing
])
if len(hyperp_names) > 0:
h_lst = [property(s, p_width_scale) for s in hyperp_names]
stz.distribute_vertically_with_spacing(h_lst, p_spacing)
stz.translate_bbox_top_right_to_coords(h_lst, [
module_width - module_inner_vertical_spacing,
-module_inner_vertical_spacing - delta_increment
])
return [[m, l], i_lst, o_lst, h_lst]
else:
return [[m, l], i_lst, o_lst]
def independent_hyperparameter(name, values_expr, value=None):
e = stz.ellipse([0, 0], h_width / 2.0, h_height / 2.0, hyperp_s_fmt)
l = stz.latex(stz.center_coords(e), "\\textbf{%s}" % name)
fn_cs = stz.coords_from_bbox_with_fn(e, stz.right_center_coords)
if value is None:
l_vs = stz.latex(fn_cs, "\\textbf{[%s]}" % (values_expr,),
unassigned_h_s_fmt)
return [e, l, l_vs]
else:
v_cs = stz.coords_from_bbox_with_fn(e, stz.right_center_coords)
l_v = stz.latex(v_cs, "\\textbf{%s}" % value, assigned_h_s_fmt)
return [e, l, l_v]
def dependent_hyperparameter(name, hyperp_names, fn_expr, value=None):
e = stz.ellipse([0, 0], h_width / 2.0, h_height / 2.0, hyperp_s_fmt)
if value is None:
e["horizontal_radius"] *= 2.1 * e["horizontal_radius"]
l_cs = stz.center_coords(e)
if value is None:
l_cs = stz.translate_coords_horizontally(l_cs, 0.1)
l = stz.latex(l_cs, "\\textbf{%s}" % name)
if value is None:
fn_cs = stz.coords_from_bbox_with_fn(e, stz.right_center_coords)
l_fn = stz.latex(fn_cs, "\\textbf{fn: %s}" % (fn_expr,),
unassigned_h_s_fmt)
p = property("x", 0.25, 0.7)
p_cs = stz.translate_coords_horizontally(
stz.coords_from_bbox_with_fn(e, stz.left_center_coords), 0.1)
stz.translate_bbox_left_center_to_coords(p, p_cs)
return [e, l, l_fn, p]
else:
v_cs = stz.coords_from_bbox_with_fn(e, stz.right_center_coords)
l_v = stz.latex(v_cs, "\\textbf{%s}" % value, assigned_h_s_fmt)
return [e, l, l_v]
def dense(idx):
return module("Dense-%d" % idx, ["in"], ["out"], ["units"])
def conv2d(idx):
return module("Conv2D-%d" % idx, ["in"], ["out"], ["filters"], 1.1)
def dropout(idx):
return module("Dropout-%d" % idx, ["in"], ["out"], ["prob"], 0.9)
def optional(idx):
return module("Optional-%d" % idx, ["in"], ["out"], ["opt"])
def concat(idx):
return module("Concat-%d" % idx, ["in0", "in1"], ["out"], [])
def repeat(idx):
return module("Repeat-%d" % idx, ["in"], ["out"], ["k"], 0.5)
def connect_modules(m_from, m_to, output_idx, input_idx):
return stz.line_segment(
stz.coords_from_bbox_with_fn(m_from[2][output_idx],
stz.bottom_center_coords),
stz.coords_from_bbox_with_fn(m_to[1][input_idx], stz.top_center_coords),
connect_s_fmt)
def connect_hyperp_to_module(h, m, property_idx):
return stz.line_segment(
stz.coords_from_bbox_with_fn(h[:2], stz.left_center_coords),
stz.coords_from_bbox_with_fn(m[3][property_idx],
stz.right_center_coords), connect_s_fmt)
def connect_hyperp_to_hyperp(h_from, h_to):
return stz.line_segment(
stz.coords_from_bbox_with_fn(h_from[:2], stz.right_center_coords),
stz.coords_from_bbox_with_fn(h_to[3], stz.top_center_coords),
connect_s_fmt)
def frame(frame_idx):
assert frame_idx >= 0 and frame_idx <= 3
c1 = conv2d(1)
o = optional(1)
r1 = repeat(1)
r2 = repeat(2)
cc = concat(1)
c2 = conv2d(2)
c3 = conv2d(3)
c4 = conv2d(4)
d = dropout(1)
stz.distribute_horizontally_with_spacing([r1, r2],
horizontal_module_spacing)
stz.distribute_horizontally_with_spacing([c2, [c3, c4]],
horizontal_module_spacing)
modules = []
if frame_idx == 0:
stz.distribute_vertically_with_spacing([cc, [r1, r2], o, c1],
vertical_module_spacing)
stz.align_centers_horizontally([cc, [r1, r2], o, c1], 0)
modules.extend([c1, o, r1, r2, cc])
else:
stz.distribute_vertically_with_spacing([c4, c3],
vertical_module_spacing)
stz.distribute_horizontally_with_spacing([c2, [c3, c4]],
horizontal_module_spacing)
stz.align_centers_vertically([[c3, c4], c2], 0)
if frame_idx == 1:
stz.distribute_vertically_with_spacing([cc, [c2, c3, c4], o, c1],
vertical_module_spacing)
stz.align_centers_horizontally([cc, [c2, c3, c4], o, c1], 0)
modules.extend([c1, o, c2, c3, c4, cc])
else:
stz.distribute_vertically_with_spacing([cc, [c2, c3, c4], d, c1],
vertical_module_spacing)
stz.align_centers_horizontally([cc, [c2, c3, c4], d, c1], 0)
modules.extend([c1, d, c2, c3, c4, cc])
module_connections = []
if frame_idx == 0:
module_connections.extend([
connect_modules(c1, o, 0, 0),
connect_modules(o, r1, 0, 0),
connect_modules(o, r2, 0, 0),
connect_modules(r1, cc, 0, 0),
connect_modules(r2, cc, 0, 1),
])
else:
if frame_idx == 1:
module_connections.extend([
connect_modules(c1, o, 0, 0),
connect_modules(o, c2, 0, 0),
connect_modules(o, c3, 0, 0),
])
else:
module_connections.extend([
connect_modules(c1, d, 0, 0),
connect_modules(d, c2, 0, 0),
connect_modules(d, c3, 0, 0),
])
module_connections.extend([
connect_modules(c3, c4, 0, 0),
connect_modules(c2, cc, 0, 0),
connect_modules(c4, cc, 0, 1),
])
# # hyperparameters
if frame_idx <= 1:
h_o = independent_hyperparameter("IH-2", "0, 1")
else:
h_o = independent_hyperparameter("IH-2", "0, 1", "1")
if frame_idx <= 0:
h_r1 = dependent_hyperparameter("DH-1", ["x"], "2*x")
h_r2 = independent_hyperparameter("IH-3", "1, 2, 4")
else:
h_r1 = dependent_hyperparameter("DH-1", ["x"], "2*x", "2")
h_r2 = independent_hyperparameter("IH-3", "1, 2, 4", "1")
if frame_idx <= 2:
h_c1 = independent_hyperparameter("IH-1", "64, 128")
h_c2 = independent_hyperparameter("IH-4", "64, 128")
h_c3 = independent_hyperparameter("IH-5", "64, 128")
h_c4 = independent_hyperparameter("IH-6", "64, 128")
h_d = independent_hyperparameter("IH-7", "0.25, 0.5")
else:
h_c1 = independent_hyperparameter("IH-1", "64, 128", "64")
h_c2 = independent_hyperparameter("IH-4", "64, 128", "128")
h_c3 = independent_hyperparameter("IH-5", "64, 128", "128")
h_c4 = independent_hyperparameter("IH-6", "64, 128", "64")
h_d = independent_hyperparameter("IH-7", "0.25, 0.5", "0.5")
def place_hyperp_right_of(h, m):
y_p = stz.center_coords(m[3])[1]
stz.align_centers_vertically([h], y_p)
stz.place_to_the_right(h, m, spacing_between_module_and_hyperp)
hyperparameters = []
place_hyperp_right_of(h_c1, c1)
if frame_idx in [0, 1]:
place_hyperp_right_of(h_o, o)
hyperparameters.append(h_o)
if frame_idx == 0:
place_hyperp_right_of(h_r1, r2)
stz.place_above_and_align_to_the_right(h_r2, h_r1, 0.8)
hyperparameters.extend([h_r1, h_r2, h_c1])
else:
place_hyperp_right_of(h_c1, c1)
place_hyperp_right_of(h_c3, c3)
place_hyperp_right_of(h_c4, c4)
stz.place_below(h_c2, h_c1, 3.0)
hyperparameters.extend([h_c1, h_c2, h_c3, h_c4])
if frame_idx in [2, 3]:
place_hyperp_right_of(h_d, d)
hyperparameters.extend([h_d])
unreachable_hyperps = []
if frame_idx == 1:
stz.distribute_vertically_with_spacing([h_r1, h_r2], 0.2)
unreachable_hyperps.extend([h_r1, h_r2])
if frame_idx >= 2:
stz.distribute_vertically_with_spacing([h_o, h_r1, h_r2], 0.2)
unreachable_hyperps.extend([h_r1, h_r2, h_o])
hyperparameters.extend(unreachable_hyperps)
cs_fn = lambda e: stz.coords_from_bbox_with_fn(e, stz.left_center_coords)
if frame_idx == 0:
stz.translate_bbox_left_center_to_coords(h_r2, cs_fn([h_o, h_r1]))
elif frame_idx == 1:
stz.translate_bbox_left_center_to_coords(h_c2, cs_fn([h_o, h_c3]))
else:
stz.translate_bbox_left_center_to_coords(h_c2, cs_fn([h_d, h_c3]))
hyperp_connections = [
connect_hyperp_to_module(h_c1, c1, 0),
]
if frame_idx in [0, 1]:
hyperp_connections.extend([connect_hyperp_to_module(h_o, o, 0)])
if frame_idx == 0:
hyperp_connections.extend([
connect_hyperp_to_module(h_r1, r2, 0),
connect_hyperp_to_module(h_r2, r1, 0),
connect_hyperp_to_hyperp(h_r2, h_r1)
])
else:
hyperp_connections.extend([
connect_hyperp_to_module(h_c2, c2, 0),
connect_hyperp_to_module(h_c3, c3, 0),
connect_hyperp_to_module(h_c4, c4, 0),
])
if frame_idx in [2, 3]:
hyperp_connections.append(connect_hyperp_to_module(h_d, d, 0))
f = stz.rectangle_from_width_and_height([0, 0], frame_height, frame_width,
frame_s_fmt)
e = [modules, module_connections, hyperparameters, hyperp_connections]
stz.translate_bbox_center_to_coords(
f, stz.translate_coords_horizontally(stz.center_coords(e), 0.8))
if len(unreachable_hyperps) > 0:
stz.translate_bbox_bottom_right_to_coords(unreachable_hyperps,
stz.bbox(e)[1])
# frame id
s = ["a", "b", "c", "d"][frame_idx]
label = [stz.latex([0, 0], "\\Huge \\textbf %s" % s)]
stz.translate_bbox_top_left_to_coords(
label,
stz.translate_coords_antidiagonally(
stz.coords_from_bbox_with_fn(f, stz.top_left_coords), 0.6))
return e + [f, label]
def search_space_transition():
e0 = frame(0)
e1 = frame(1)
e2 = frame(2)
e3 = frame(3)
e = [e0, e1, e2, e3]
def get_idx(e_frame, indices):
e = e_frame
for idx in indices:
e = e[idx]
return e
def highlight(e_frame, indices, idx, color):
e = get_idx(e_frame, indices)
s_fmt = fmt.combine_tikz_strs([e["tikz_str"], fmt.fill_color(color)])
e['tikz_str'] = s_fmt
# highlight new modules
highlight(e1, [0, 2, 0, 0], 0, "light_green_2")
highlight(e1, [0, 3, 0, 0], 0, "light_green_2")
highlight(e1, [0, 4, 0, 0], 0, "light_green_2")
highlight(e2, [0, 1, 0, 0], 0, "light_green_2")
# highlight new hyperparameters
highlight(e1, [2, 2, 0], 0, "light_green_2")
highlight(e1, [2, 3, 0], 0, "light_green_2")
highlight(e1, [2, 4, 0], 0, "light_green_2")
highlight(e2, [2, 4, 0], 0, "light_green_2")
# highlight assigned hyperparameters
highlight(e1, [2, 5, 0], 0, "light_red_2")
highlight(e1, [2, 6, 0], 0, "light_red_2")
highlight(e2, [2, 7, 0], 0, "light_red_2")
highlight(e3, [2, 0, 0], 0, "light_red_2")
highlight(e3, [2, 1, 0], 0, "light_red_2")
highlight(e3, [2, 2, 0], 0, "light_red_2")
highlight(e3, [2, 3, 0], 0, "light_red_2")
highlight(e3, [2, 4, 0], 0, "light_red_2")
# arrange the four frames
stz.align_tops(e, 0.0)
stz.distribute_horizontally_with_spacing([e0, e1], frame_spacing)
stz.distribute_horizontally_with_spacing([e2, e3], frame_spacing)
stz.distribute_vertically_with_spacing([[e2, e3], [e0, e1]], frame_spacing)
stz.draw_to_tikz_standalone(e, "deep_architect.tex", name2color)
search_space_transition()
| 15,365 | 6,307 |
import time
import json
from basetestcase import BaseTestCase
from couchbase_helper.documentgenerator import doc_generator
from couchbase_helper.durability_helper import DurabilityHelper, \
DurableExceptions
from couchbase_helper.tuq_generators import JsonGenerator
from membase.api.rest_client import RestConnection
from mc_bin_client import MemcachedClient, MemcachedError
from remote.remote_util import RemoteMachineShellConnection
from table_view import TableView
"""
Capture basic get, set operations, also the meta operations.
This is based on some 4.1.1 test which had separate
bugs with incr and delete with meta and I didn't see an obvious home for them.
This is small now but we will reactively add things
These may be parameterized by:
- full and value eviction
- DGM and non-DGM
"""
class basic_ops(BaseTestCase):
def setUp(self):
super(basic_ops, self).setUp()
self.key = 'test_docs'.rjust(self.key_size, '0')
nodes_init = self.cluster.servers[1:self.nodes_init] \
if self.nodes_init != 1 else []
self.task.rebalance([self.cluster.master], nodes_init, [])
self.cluster.nodes_in_cluster.extend([self.cluster.master]+nodes_init)
self.bucket_util.create_default_bucket(
replica=self.num_replicas, compression_mode=self.compression_mode,
bucket_type=self.bucket_type)
self.bucket_util.add_rbac_user()
self.src_bucket = self.bucket_util.get_all_buckets()
self.durability_helper = DurabilityHelper(
self.log, len(self.cluster.nodes_in_cluster),
durability=self.durability_level,
replicate_to=self.replicate_to,
persist_to=self.persist_to)
# Reset active_resident_threshold to avoid further data load as DGM
self.active_resident_threshold = 0
self.cluster_util.print_cluster_stats()
self.bucket_util.print_bucket_stats()
self.log.info("==========Finished Basic_ops base setup========")
def tearDown(self):
super(basic_ops, self).tearDown()
def do_basic_ops(self):
KEY_NAME = 'key1'
KEY_NAME2 = 'key2'
self.log.info('Starting basic ops')
rest = RestConnection(self.cluster.master)
default_bucket = self.bucket_util.get_all_buckets()[0]
smart_client = VBucketAwareMemcached(rest, default_bucket)
sdk_client = smart_client.get_client()
# mcd = client.memcached(KEY_NAME)
# MB-17231 - incr with full eviction
rc = sdk_client.incr(KEY_NAME, delta=1)
self.log.info('rc for incr: {0}'.format(rc))
# MB-17289 del with meta
rc = sdk_client.set(KEY_NAME, 0, 0,
json.dumps({'value': 'value2'}))
self.log.info('set is: {0}'.format(rc))
# cas = rc[1]
# wait for it to persist
persisted = 0
while persisted == 0:
opaque, rep_time, persist_time, persisted, cas = sdk_client.observe(KEY_NAME)
try:
rc = sdk_client.evict_key(KEY_NAME)
except MemcachedError as exp:
self.fail("Exception with evict meta - {0}".format(exp))
CAS = 0xabcd
try:
# key, exp, flags, seqno, cas
rc = mcd.del_with_meta(KEY_NAME2, 0, 0, 2, CAS)
except MemcachedError as exp:
self.fail("Exception with del_with meta - {0}".format(exp))
# Reproduce test case for MB-28078
def do_setWithMeta_twice(self):
mc = MemcachedClient(self.cluster.master.ip, 11210)
mc.sasl_auth_plain(self.cluster.master.rest_username,
self.cluster.master.rest_password)
mc.bucket_select('default')
try:
mc.setWithMeta('1', '{"Hello":"World"}', 3600, 0, 1,
0x1512a3186faa0000)
except MemcachedError as error:
self.log.info("<MemcachedError #%d ``%s''>"
% (error.status, error.message))
self.fail("Error on First setWithMeta()")
stats = mc.stats()
self.log.info('curr_items: {0} and curr_temp_items:{1}'
.format(stats['curr_items'], stats['curr_temp_items']))
self.log.info("Sleeping for 5 and checking stats again")
time.sleep(5)
stats = mc.stats()
self.log.info('curr_items: {0} and curr_temp_items:{1}'
.format(stats['curr_items'], stats['curr_temp_items']))
try:
mc.setWithMeta('1', '{"Hello":"World"}', 3600, 0, 1,
0x1512a3186faa0000)
except MemcachedError as error:
stats = mc.stats()
self.log.info('After 2nd setWithMeta(), curr_items: {} and curr_temp_items:{}'
.format(stats['curr_items'],
stats['curr_temp_items']))
if int(stats['curr_temp_items']) == 1:
self.fail("Error on second setWithMeta(), expected curr_temp_items to be 0")
else:
self.log.info("<MemcachedError #%d ``%s''>"
% (error.status, error.message))
def generate_docs_bigdata(self, docs_per_day, start=0,
document_size=1024000):
json_generator = JsonGenerator()
return json_generator.generate_docs_bigdata(
start=start, end=docs_per_day, value_size=document_size)
def test_doc_size(self):
def check_durability_failures():
self.log.error(task.sdk_acked_curd_failed.keys())
self.log.error(task.sdk_exception_crud_succeed.keys())
self.assertTrue(
len(task.sdk_acked_curd_failed) == 0,
"Durability failed for docs: %s" % task.sdk_acked_curd_failed.keys())
self.assertTrue(
len(task.sdk_exception_crud_succeed) == 0,
"Durability failed for docs: %s" % task.sdk_acked_curd_failed.keys())
"""
Basic tests for document CRUD operations using JSON docs
"""
doc_op = self.input.param("doc_op", None)
def_bucket = self.bucket_util.buckets[0]
ignore_exceptions = list()
retry_exceptions = list()
# Stat validation reference variables
verification_dict = dict()
ref_val = dict()
ref_val["ops_create"] = 0
ref_val["ops_update"] = 0
ref_val["ops_delete"] = 0
ref_val["rollback_item_count"] = 0
ref_val["sync_write_aborted_count"] = 0
ref_val["sync_write_committed_count"] = 0
one_less_node = self.nodes_init == self.num_replicas
if self.durability_level:
pass
#ignore_exceptions.append(
# "com.couchbase.client.core.error.RequestTimeoutException")
if self.target_vbucket and type(self.target_vbucket) is not list:
self.target_vbucket = [self.target_vbucket]
self.log.info("Creating doc_generator..")
# Load basic docs into bucket
doc_create = doc_generator(
self.key, 0, self.num_items, doc_size=self.doc_size,
doc_type=self.doc_type, target_vbucket=self.target_vbucket,
vbuckets=self.vbuckets)
self.log.info("Loading {0} docs into the bucket: {1}"
.format(self.num_items, def_bucket))
task = self.task.async_load_gen_docs(
self.cluster, def_bucket, doc_create, "create", 0,
batch_size=self.batch_size, process_concurrency=self.process_concurrency,
replicate_to=self.replicate_to, persist_to=self.persist_to,
durability=self.durability_level,
timeout_secs=self.sdk_timeout,
ryow=self.ryow,
check_persistence=self.check_persistence)
self.task.jython_task_manager.get_task_result(task)
if self.ryow:
check_durability_failures()
# Retry doc_exception code
self.log.info("Validating failed doc's (if any) exceptions")
doc_op_info_dict = dict()
doc_op_info_dict[task] = self.bucket_util.get_doc_op_info_dict(
def_bucket, "create", exp=0, replicate_to=self.replicate_to,
persist_to=self.persist_to, durability=self.durability_level,
timeout=self.sdk_timeout, time_unit="seconds",
ignore_exceptions=ignore_exceptions,
retry_exceptions=retry_exceptions)
self.bucket_util.verify_doc_op_task_exceptions(doc_op_info_dict,
self.cluster)
if len(doc_op_info_dict[task]["unwanted"]["fail"].keys()) != 0:
self.fail("Failures in retry doc CRUDs: {0}"
.format(doc_op_info_dict[task]["unwanted"]["fail"]))
self.log.info("Wait for ep_all_items_remaining to become '0'")
self.bucket_util._wait_for_stats_all_buckets()
# Update ref_val
ref_val["ops_create"] = self.num_items + len(task.fail.keys())
ref_val["sync_write_committed_count"] = self.num_items
# Validate vbucket stats
verification_dict["ops_create"] = ref_val["ops_create"]
verification_dict["rollback_item_count"] = \
ref_val["rollback_item_count"]
if self.durability_level:
verification_dict["sync_write_aborted_count"] = \
ref_val["sync_write_aborted_count"]
verification_dict["sync_write_committed_count"] = \
ref_val["sync_write_committed_count"]
failed = self.durability_helper.verify_vbucket_details_stats(
def_bucket, self.cluster_util.get_kv_nodes(),
vbuckets=self.vbuckets, expected_val=verification_dict,
one_less_node=one_less_node)
if failed:
self.fail("Cbstat vbucket-details verification failed")
# Verify initial doc load count
self.log.info("Validating doc_count in buckets")
self.bucket_util.verify_stats_all_buckets(self.num_items)
self.log.info("Creating doc_generator for doc_op")
num_item_start_for_crud = int(self.num_items / 2)
doc_update = doc_generator(
self.key, 0, num_item_start_for_crud,
doc_size=self.doc_size, doc_type=self.doc_type,
target_vbucket=self.target_vbucket, vbuckets=self.vbuckets)
expected_num_items = self.num_items
num_of_mutations = 1
if doc_op == "update":
self.log.info("Performing 'update' mutation over the docs")
task = self.task.async_load_gen_docs(
self.cluster, def_bucket, doc_update, "update", 0,
batch_size=self.batch_size, process_concurrency=self.process_concurrency,
replicate_to=self.replicate_to, persist_to=self.persist_to,
durability=self.durability_level,
timeout_secs=self.sdk_timeout,
ryow=self.ryow,
check_persistence=self.check_persistence)
self.task.jython_task_manager.get_task_result(task)
ref_val["ops_update"] = (doc_update.end - doc_update.start
+ len(task.fail.keys()))
if self.durability_level:
ref_val["sync_write_committed_count"] += \
(doc_update.end - doc_update.start)
if self.ryow:
check_durability_failures()
# Read all the values to validate update operation
task = self.task.async_load_gen_docs(
self.cluster, def_bucket, doc_update, "read", 0,
batch_size=self.batch_size, process_concurrency=self.process_concurrency,
timeout_secs=self.sdk_timeout)
self.task.jython_task_manager.get_task_result(task)
op_failed_tbl = TableView(self.log.error)
op_failed_tbl.set_headers(["Update failed key", "CAS", "Value"])
for key, value in task.success.items():
if json.loads(str(value["value"]))["mutated"] != 1:
op_failed_tbl.add_row([key, value["cas"], value["value"]])
op_failed_tbl.display("Update failed for keys:")
if len(op_failed_tbl.rows) != 0:
self.fail("Update failed for few keys")
elif doc_op == "delete":
self.log.info("Performing 'delete' mutation over the docs")
task = self.task.async_load_gen_docs(
self.cluster, def_bucket, doc_update, "delete", 0,
batch_size=self.batch_size, process_concurrency=self.process_concurrency,
replicate_to=self.replicate_to, persist_to=self.persist_to,
durability=self.durability_level,
timeout_secs=self.sdk_timeout,
ryow=self.ryow, check_persistence=self.check_persistence)
self.task.jython_task_manager.get_task_result(task)
expected_num_items = self.num_items \
- (self.num_items - num_item_start_for_crud)
ref_val["ops_delete"] = (doc_update.end - doc_update.start
+ len(task.fail.keys()))
if self.durability_level:
ref_val["sync_write_committed_count"] += \
(doc_update.end - doc_update.start)
if self.ryow:
check_durability_failures()
# Read all the values to validate update operation
task = self.task.async_load_gen_docs(
self.cluster, def_bucket, doc_update, "read", 0,
batch_size=10, process_concurrency=8,
timeout_secs=self.sdk_timeout)
self.task.jython_task_manager.get_task_result(task)
op_failed_tbl = TableView(self.log.error)
op_failed_tbl.set_headers(["Delete failed key", "CAS", "Value"])
for key, value in task.success.items():
op_failed_tbl.add_row([key, value["cas"], value["value"]])
op_failed_tbl.display("Delete failed for keys:")
if len(op_failed_tbl.rows) != 0:
self.fail("Delete failed for few keys")
else:
self.log.warning("Unsupported doc_operation")
self.log.info("Wait for ep_all_items_remaining to become '0'")
self.bucket_util._wait_for_stats_all_buckets()
# Validate vbucket stats
verification_dict["ops_create"] = ref_val["ops_create"]
verification_dict["ops_update"] = ref_val["ops_update"]
verification_dict["ops_delete"] = ref_val["ops_delete"]
verification_dict["rollback_item_count"] = \
ref_val["rollback_item_count"]
if self.durability_level:
verification_dict["sync_write_aborted_count"] = \
ref_val["sync_write_aborted_count"]
verification_dict["sync_write_committed_count"] = \
ref_val["sync_write_committed_count"]
failed = self.durability_helper.verify_vbucket_details_stats(
def_bucket, self.cluster_util.get_kv_nodes(),
vbuckets=self.vbuckets, expected_val=verification_dict,
one_less_node=one_less_node)
if failed:
self.fail("Cbstat vbucket-details verification failed")
self.log.info("Validating doc_count")
self.bucket_util.verify_stats_all_buckets(expected_num_items)
def test_large_doc_size(self):
# bucket size=256MB, when Bucket gets filled 236MB then test starts failing
# document size=2MB, No of docs = 221 , load 250 docs
# generate docs with size >= 1MB , See MB-29333
self.doc_size *= 1024000
gens_load = self.generate_docs_bigdata(
docs_per_day=self.num_items, document_size=self.doc_size)
for bucket in self.bucket_util.buckets:
task = self.task.async_load_gen_docs(
self.cluster, bucket, gens_load, "create", 0,
batch_size=10, process_concurrency=8,
replicate_to=self.replicate_to, persist_to=self.persist_to,
durability=self.durability_level,
timeout_secs=self.sdk_timeout)
self.task.jython_task_manager.get_task_result(task)
# check if all the documents(250) are loaded with default timeout
self.bucket_util.verify_stats_all_buckets(self.num_items)
def test_large_doc_20MB(self):
# test reproducer for MB-29258,
# Load a doc which is greater than 20MB
# with compression enabled and check if it fails
# check with compression_mode as active, passive and off
val_error = DurableExceptions.ValueTooLargeException
gens_load = self.generate_docs_bigdata(
docs_per_day=1, document_size=(self.doc_size * 1024000))
for bucket in self.bucket_util.buckets:
task = self.task.async_load_gen_docs(
self.cluster, bucket, gens_load, "create", 0,
batch_size=10, process_concurrency=8,
replicate_to=self.replicate_to, persist_to=self.persist_to,
durability=self.durability_level,
timeout_secs=self.sdk_timeout)
self.task.jython_task_manager.get_task_result(task)
if self.doc_size > 20:
if len(task.fail.keys()) == 0:
self.log_failure("No failures during large doc insert")
for doc_id, doc_result in task.fail.items():
if val_error not in str(doc_result["error"]):
self.log_failure("Invalid exception for key %s: %s"
% (doc_id, doc_result))
else:
if len(task.success.keys()) == 0:
self.log_failure("Failures during large doc insert")
for bucket in self.bucket_util.buckets:
if self.doc_size > 20:
# failed with error "Data Too Big" when document size > 20MB
self.bucket_util.verify_stats_all_buckets(0)
else:
self.bucket_util.verify_stats_all_buckets(1)
gens_update = self.generate_docs_bigdata(
docs_per_day=1, document_size=(21 * 1024000))
task = self.task.async_load_gen_docs(
self.cluster, bucket, gens_update, "create", 0,
batch_size=10,
process_concurrency=8,
replicate_to=self.replicate_to,
persist_to=self.persist_to,
durability=self.durability_level,
timeout_secs=self.sdk_timeout)
self.task.jython_task_manager.get_task_result(task)
if len(task.success.keys()) != 0:
self.log_failure("Large docs inserted for keys: %s"
% task.success.keys())
if len(task.fail.keys()) == 0:
self.log_failure("No failures during large doc insert")
for doc_id, doc_result in task.fail.items():
if val_error not in str(doc_result["error"]):
self.log_failure("Invalid exception for key %s: %s"
% (doc_id, doc_result))
self.bucket_util.verify_stats_all_buckets(1)
self.validate_test_failure()
def test_diag_eval_curl(self):
# Check if diag/eval can be done only by local host
self.disable_diag_eval_on_non_local_host = \
self.input.param("disable_diag_eval_non_local", False)
port = self.cluster.master.port
# check if local host can work fine
cmd = []
cmd_base = 'curl http://{0}:{1}@localhost:{2}/diag/eval ' \
.format(self.cluster.master.rest_username,
self.cluster.master.rest_password, port)
command = cmd_base + '-X POST -d \'os:cmd("env")\''
cmd.append(command)
command = cmd_base + '-X POST -d \'case file:read_file("/etc/passwd") of {ok, B} -> io:format("~p~n", [binary_to_term(B)]) end.\''
cmd.append(command)
shell = RemoteMachineShellConnection(self.cluster.master)
for command in cmd:
output, error = shell.execute_command(command)
self.assertNotEquals("API is accessible from localhost only", output[0])
# Disable allow_nonlocal_eval
if not self.disable_diag_eval_on_non_local_host:
command = cmd_base + '-X POST -d \'ns_config:set(allow_nonlocal_eval, true).\''
_, _ = shell.execute_command(command)
# Check ip address on diag/eval will not work fine when allow_nonlocal_eval is disabled
cmd = []
cmd_base = 'curl http://{0}:{1}@{2}:{3}/diag/eval ' \
.format(self.cluster.master.rest_username,
self.cluster.master.rest_password,
self.cluster.master.ip, port)
command = cmd_base + '-X POST -d \'os:cmd("env")\''
cmd.append(command)
command = cmd_base + '-X POST -d \'case file:read_file("/etc/passwd") of {ok, B} -> io:format("~p~n", [binary_to_term(B)]) end.\''
cmd.append(command)
for command in cmd:
output, error = shell.execute_command(command)
if self.disable_diag_eval_on_non_local_host:
self.assertEquals("API is accessible from localhost only",
output[0])
else:
self.assertNotEquals("API is accessible from localhost only",
output[0])
def verify_stat(self, items, value="active"):
mc = MemcachedClient(self.cluster.master.ip, 11210)
mc.sasl_auth_plain(self.cluster.master.rest_username,
self.cluster.master.rest_password)
mc.bucket_select('default')
stats = mc.stats()
self.assertEquals(stats['ep_compression_mode'], value)
self.assertEquals(int(stats['ep_item_compressor_num_compressed']),
items)
self.assertNotEquals(int(stats['vb_active_itm_memory']),
int(stats['vb_active_itm_memory_uncompressed']))
def test_compression_active_and_off(self):
"""
test reproducer for MB-29272,
Load some documents with compression mode set to active
get the cbstats
change compression mode to off and wait for minimum 250ms
Load some more documents and check the compression is not done
epengine.basic_ops.basic_ops.test_compression_active_and_off,items=10000,compression_mode=active
:return:
"""
# Load some documents with compression mode as active
gen_create = doc_generator("eviction1_",
start=0,
end=self.num_items,
doc_size=self.doc_size)
gen_create2 = doc_generator("eviction2_",
start=0,
end=self.num_items,
doc_size=self.doc_size)
def_bucket = self.bucket_util.get_all_buckets()[0]
task = self.task.async_load_gen_docs(
self.cluster, def_bucket, gen_create, "create", 0,
batch_size=10, process_concurrency=8,
replicate_to=self.replicate_to, persist_to=self.persist_to,
durability=self.durability_level,
timeout_secs=self.sdk_timeout)
self.task.jython_task_manager.get_task_result(task)
self.bucket_util._wait_for_stats_all_buckets()
self.bucket_util.verify_stats_all_buckets(self.num_items)
remote = RemoteMachineShellConnection(self.cluster.master)
for bucket in self.bucket_util.buckets:
# change compression mode to off
output, _ = remote.execute_couchbase_cli(
cli_command='bucket-edit', cluster_host="localhost:8091",
user=self.cluster.master.rest_username,
password=self.cluster.master.rest_password,
options='--bucket=%s --compression-mode off' % bucket.name)
self.assertTrue(' '.join(output).find('SUCCESS') != -1,
'compression mode set to off')
# sleep for 10 sec (minimum 250sec)
time.sleep(10)
# Load data and check stats to see compression
# is not done for newly added data
task = self.task.async_load_gen_docs(
self.cluster, def_bucket, gen_create2, "create", 0,
batch_size=10, process_concurrency=8,
replicate_to=self.replicate_to, persist_to=self.persist_to,
durability=self.durability_level,
timeout_secs=self.sdk_timeout)
self.task.jython_task_manager.get_task_result(task)
self.bucket_util._wait_for_stats_all_buckets()
self.bucket_util.verify_stats_all_buckets(self.num_items*2)
def do_get_random_key(self):
# MB-31548, get_Random key gets hung sometimes.
mc = MemcachedClient(self.cluster.master.ip, 11210)
mc.sasl_auth_plain(self.cluster.master.rest_username,
self.cluster.master.rest_password)
mc.bucket_select('default')
count = 0
while count < 1000000:
count += 1
try:
mc.get_random_key()
except MemcachedError as error:
self.fail("<MemcachedError #%d ``%s''>"
% (error.status, error.message))
if count % 1000 == 0:
self.log.info('The number of iteration is {}'.format(count))
| 25,801 | 7,836 |
import matplotlib.pyplot as plt
import yaml
import os
workspace = "/workspace/mnt/storage/guangcongzheng/zju_zgc/guided-diffusion"
num_samples = 192
log = os.path.join(workspace, 'log/imagenet1000_classifier256x256_channel128_upperbound/predict/model500000_imagenet1000_stepsddim25_sample{}_selectedClass'.format(num_samples))
legends = []
plt.figure()
for class_id in range(3):
fid = []
for scale in range(1,21):
result_name = 'result_scale{}.0_class{}_stepsddim25_sample{}.yaml'.format(scale, class_id, num_samples)
result_path = os.path.join(log,result_name)
with open(result_path, "r") as stream:
try:
result_dict = yaml.safe_load(stream)
except yaml.YAMLError as exc:
print(exc)
fid.append(result_dict['fid'])
print(result_dict)
plt.plot(fid)
plt.xlabel('classifier scale')
plt.ylabel(fid)
legends.append('sample{}_class{}'.format(num_samples, class_id))
plt.legend(legends)
plt.show()
| 1,025 | 386 |
from bs4 import BeautifulSoup
import requests
import urllib.request
from datetime import datetime
import time
from PIL import Image, ImageDraw, ImageFont
import ctypes
import os
import shutil
import socket
import sys
def is_connected(hostname):
try:
# see if we can resolve the host name -- tells us if there is
# a DNS listening
host = socket.gethostbyname(hostname)
# connect to the host -- tells us if the host is actually
# reachable
s = socket.create_connection((host, 80), 2)
return True
except:
pass
return False
if __name__ == "__main__":
# check internet connection
while True:
#
if not is_connected("www.google.com"):
print("@author: Swapnil Mali \nPlease check your internet connection, will try again after 30 seconds..")
time.sleep(30)
continue
# move shortcut to main.exe to startup folder
try:
# get user name
user = os.getlogin()
path = r'C:\Users\{}\AppData\Roaming\Microsoft\Windows\Start Menu\Programs\Startup\main - Shortcut.lnk'.format(user)
# print(path)
shutil.move(r'main - Shortcut.lnk', path)
except FileNotFoundError:
pass
# just credit and copyright stuff
print('@author: Swapnil Mali \n\n(Note: New wallpaper is available everyday after 2.00 pm)')
print("Downloading Today's Wallpaper...please wait!!")
# get image link from the website page
res = requests.get('https://bing.wallpaper.pics/')
soup = BeautifulSoup(res.text, 'lxml')
image_box = soup.find('a', {'class': 'cursor_zoom'})
image = image_box.find('img')
link = image['src']
# download and save the image
filename = datetime.now().strftime('%d-%m-%y')
urllib.request.urlretrieve(link, '{}.jpg'.format(filename))
# for copyright overlaying text over the image
image = Image.open('{}.jpg'.format(filename))
font_type = ImageFont.truetype('fonts/Quicksand-Bold.otf', 44)
draw = ImageDraw.Draw(image)
draw.text(xy=(800, 1000), text='© Swapnil Mali', fill=(0, 0, 0), font=font_type)
# image.show()
image.save('{}.jpg'.format(filename))
print("\n\n-------------------------------------------\nDone..New wallpaper saved as '{}.jpg'\n-------------------------------------------".format(filename))
time.sleep(1)
# set new image as desktop background
directory = os.getcwd()
image_path = '{}\{}.jpg'.format(directory, filename)
print("\nSetting new Wallpaper..".format(filename))
ctypes.windll.user32.SystemParametersInfoW(20, 0, image_path, 3)
time.sleep(2)
print("Done..Closing this window")
time.sleep(2)
sys.exit()
| 2,882 | 846 |
# coding=utf-8
# @Author : zhzhx2008
# @Time : 18-10-9
import os
import warnings
import jieba
import numpy as np
from keras import Input
from keras import Model
from keras import backend as K
from keras import initializers, regularizers, constraints
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.engine.topology import Layer
from keras.layers import Dropout, Bidirectional
from keras.layers import Embedding, Dense
from keras.layers import LSTM, SpatialDropout1D
from keras.preprocessing.sequence import pad_sequences
from keras.preprocessing.text import Tokenizer
from keras.utils import to_categorical
from sklearn.model_selection import train_test_split
warnings.filterwarnings("ignore")
seed = 2019
np.random.seed(seed)
def get_labels_datas(input_dir):
datas_word = []
datas_char = []
labels = []
label_dirs = os.listdir(input_dir)
for label_dir in label_dirs:
txt_names = os.listdir(os.path.join(input_dir, label_dir))
for txt_name in txt_names:
with open(os.path.join(input_dir, label_dir, txt_name), 'r') as fin:
content = fin.readline() # 只取第一行
content = content.strip().replace(' ', '')
datas_word.append(' '.join(jieba.cut(content)))
datas_char.append(' '.join(list(content)))
labels.append(label_dir)
return labels, datas_word, datas_char
def get_label_id_map(labels):
labels = set(labels)
id_label_map = {}
label_id_map = {}
for index, label in enumerate(labels):
id_label_map[index] = label
label_id_map[label] = index
return id_label_map, label_id_map
# 《Feed-Forward Networks with Attention Can Solve Some Long-Term Memory Problems》
# [https://arxiv.org/abs/1512.08756]
# https://www.kaggle.com/qqgeogor/keras-lstm-attention-glove840b-lb-0-043
class Attention(Layer):
def __init__(self, step_dim,
W_regularizer=None, b_regularizer=None,
W_constraint=None, b_constraint=None,
bias=True, **kwargs):
"""
Keras Layer that implements an Attention mechanism for temporal data.
Supports Masking.
Follows the work of Raffel et al. [https://arxiv.org/abs/1512.08756]
# Input shape
3D tensor with shape: `(samples, steps, features)`.
# Output shape
2D tensor with shape: `(samples, features)`.
:param kwargs:
Just put it on top of an RNN Layer (GRU/LSTM/SimpleRNN) with return_sequences=True.
The dimensions are inferred based on the output shape of the RNN.
Example:
model.add(LSTM(64, return_sequences=True))
model.add(Attention())
"""
self.supports_masking = True
# self.init = initializations.get('glorot_uniform')
self.init = initializers.get('glorot_uniform')
self.W_regularizer = regularizers.get(W_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.W_constraint = constraints.get(W_constraint)
self.b_constraint = constraints.get(b_constraint)
self.bias = bias
self.step_dim = step_dim
self.features_dim = 0
super(Attention, self).__init__(**kwargs)
def build(self, input_shape):
assert len(input_shape) == 3
self.W = self.add_weight((input_shape[-1],),
initializer=self.init,
name='{}_W'.format(self.name),
regularizer=self.W_regularizer,
constraint=self.W_constraint)
self.features_dim = input_shape[-1]
if self.bias:
self.b = self.add_weight((input_shape[1],),
initializer='zero',
name='{}_b'.format(self.name),
regularizer=self.b_regularizer,
constraint=self.b_constraint)
else:
self.b = None
self.built = True
def compute_mask(self, input, input_mask=None):
# do not pass the mask to the next layers
return None
def call(self, x, mask=None):
# eij = K.dot(x, self.W) TF backend doesn't support it
# features_dim = self.W.shape[0]
# step_dim = x._keras_shape[1]
features_dim = self.features_dim
step_dim = self.step_dim
eij = K.reshape(K.dot(K.reshape(x, (-1, features_dim)), K.reshape(self.W, (features_dim, 1))), (-1, step_dim))
if self.bias:
eij += self.b
eij = K.tanh(eij)
a = K.exp(eij)
# apply mask after the exp. will be re-normalized next
if mask is not None:
# Cast the mask to floatX to avoid float64 upcasting in theano
a *= K.cast(mask, K.floatx())
# in some cases especially in the early stages of training the sum may be almost zero
a /= K.cast(K.sum(a, axis=1, keepdims=True) + K.epsilon(), K.floatx())
a = K.expand_dims(a)
weighted_input = x * a
# print(weighted_input.shape)
# return weighted_input
return K.sum(weighted_input, axis=1)
def compute_output_shape(self, input_shape):
# return input_shape[0], input_shape[1], self.features_dim
return input_shape[0], self.features_dim
input_dir = './data/THUCNews'
labels, datas_word, datas_char = get_labels_datas(input_dir)
id_label_map, label_id_map = get_label_id_map(labels)
labels, labels_test, datas_word, datas_word_test, datas_char, datas_char_test = train_test_split(labels, datas_word, datas_char, test_size=0.3, shuffle=True, stratify=labels)
labels_train, labels_dev, datas_word_train, datas_word_dev, datas_char_train, datas_char_dev = train_test_split(labels, datas_word, datas_char, test_size=0.1, shuffle=True, stratify=labels)
y_train = [label_id_map.get(x) for x in labels_train]
y_dev = [label_id_map.get(x) for x in labels_dev]
y_test = [label_id_map.get(x) for x in labels_test]
num_classes = len(set(y_train))
y_train_index = to_categorical(y_train, num_classes)
y_dev_index = to_categorical(y_dev, num_classes)
y_test_index = to_categorical(y_test, num_classes)
# keras extract feature
tokenizer = Tokenizer()
tokenizer.fit_on_texts(datas_word_train)
# feature5: word index for deep learning
x_train_word_index = tokenizer.texts_to_sequences(datas_word_train)
x_dev_word_index = tokenizer.texts_to_sequences(datas_word_dev)
x_test_word_index = tokenizer.texts_to_sequences(datas_word_test)
max_word_length = max([len(x) for x in x_train_word_index])
x_train_word_index = pad_sequences(x_train_word_index, maxlen=max_word_length)
x_dev_word_index = pad_sequences(x_dev_word_index, maxlen=max_word_length)
x_test_word_index = pad_sequences(x_test_word_index, maxlen=max_word_length)
input = Input(shape=(max_word_length,))
embedding = Embedding(len(tokenizer.word_index) + 1, 128)(input)
embedding = SpatialDropout1D(0.2)(embedding)
# rnn = SimpleRNN(100, return_sequences=True)(embedding)
# rnn = Attention(max_word_length)(rnn)
# rnn = Bidirectional(SimpleRNN(100, return_sequences=True))(embedding)
# rnn = Attention(max_word_length)(rnn)
# rnn = GRU(100, return_sequences=True)(embedding)
# rnn = Attention(max_word_length)(rnn)
# rnn = Bidirectional(GRU(100, return_sequences=True))(embedding)
# rnn = Attention(max_word_length)(rnn)
# rnn = CuDNNGRU(100, return_sequences=True)(embedding)
# rnn = Attention(max_word_length)(rnn)
# rnn = Bidirectional(CuDNNGRU(100, return_sequences=True))(embedding)
# rnn = Attention(max_word_length)(rnn)
# rnn = LSTM(100, return_sequences=True)(embedding)
# rnn = Attention(max_word_length)(rnn)
rnn = Bidirectional(LSTM(100, return_sequences=True))(embedding)
rnn = Attention(max_word_length)(rnn) # metrics value=0.38647342980771826
# rnn = GlobalMaxPool1D()(rnn)# 0.33816425149567464
# rnn = GlobalAvgPool1D()(rnn)# 0.20772946881499268
# rnn = Flatten()(rnn) # 0.3140096618357488
# rnn = concatenate([GlobalMaxPool1D()(rnn), GlobalAvgPool1D()(rnn)])# 0.24396135280097742
# rnn = CuDNNLSTM(100, return_sequences=True)(embedding)
# rnn = Attention(max_word_length)(rnn)
# rnn = Bidirectional(CuDNNLSTM(100, return_sequences=True))(embedding)
# rnn = Attention(max_word_length)(rnn)
drop = Dropout(0.2)(rnn)
output = Dense(num_classes, activation='softmax')(drop)
model = Model(inputs=input, outputs=output)
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
print(model.summary())
model_weight_file = './model_rnn_attention.h5'
model_file = './model_rnn_attention.model'
early_stopping = EarlyStopping(monitor='val_loss', patience=5)
model_checkpoint = ModelCheckpoint(model_weight_file, save_best_only=True, save_weights_only=True)
model.fit(x_train_word_index,
y_train_index,
batch_size=32,
epochs=1000,
verbose=2,
callbacks=[early_stopping, model_checkpoint],
validation_data=(x_dev_word_index, y_dev_index),
shuffle=True)
model.load_weights(model_weight_file)
model.save(model_file)
evaluate = model.evaluate(x_test_word_index, y_test_index, batch_size=32, verbose=2)
print('loss value=' + str(evaluate[0]))
print('metrics value=' + str(evaluate[1]))
# loss value=1.562715420647273
# metrics value=0.2936507960160573 | 9,401 | 3,351 |
# coding: utf8
vpc_cidr = "192.168.0.0/16"
http_cidr = "192.168.1.0/24"
| 73 | 53 |
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange
from fastfold.model.fastnn.kernel import scale_mask_softmax, scale_mask_bias_softmax
from fastfold.model.fastnn.kernel import LayerNorm
from .initializer import glorot_uniform_af
from fastfold.model.fastnn.kernel import bias_sigmod_ele
from fastfold.distributed import gather, scatter
from fastfold.distributed.comm_async import gather_async, gather_async_opp
class DropoutRowwise(nn.Module):
def __init__(self, p):
super(DropoutRowwise, self).__init__()
self.p = p
self.dropout = nn.Dropout(p=p)
def forward(self, x):
dropout_mask = torch.ones_like(x[:, 0:1, :, :])
dropout_mask = self.dropout(dropout_mask)
return dropout_mask * x
class DropoutColumnwise(nn.Module):
def __init__(self, p):
super(DropoutColumnwise, self).__init__()
self.p = p
self.dropout = nn.Dropout(p=p)
def forward(self, x):
dropout_mask = torch.ones_like(x[:, :, 0:1, :])
dropout_mask = self.dropout(dropout_mask)
return dropout_mask * x
class Transition(nn.Module):
def __init__(self, d, n=4):
super(Transition, self).__init__()
self.norm = LayerNorm(d)
self.linear1 = Linear(d, n * d, initializer='relu')
self.linear2 = Linear(n * d, d, initializer='zeros')
def forward(self, src):
x = self.norm(src)
x = self.linear2(F.relu(self.linear1(x)))
return src + x
class OutProductMean(nn.Module):
def __init__(self, n_feat=64, n_feat_out=128, n_feat_proj=32):
super(OutProductMean, self).__init__()
self.layernormM = LayerNorm(n_feat)
self.linear_a = Linear(n_feat, n_feat_proj)
self.linear_b = Linear(n_feat, n_feat_proj)
self.o_linear = Linear(n_feat_proj * n_feat_proj,
n_feat_out,
initializer='zero',
use_bias=True)
def forward(self, M, M_mask):
M = self.layernormM(M)
right_act = self.linear_b(M)
right_act_all, work = gather_async(right_act, dim=2)
# right_act_all = gather(right_act, dim=2)
left_act = self.linear_a(M)
M_mask = M_mask.unsqueeze(-1)
M_mask_col = scatter(M_mask, dim=2)
left_act = M_mask_col * left_act
norm = torch.einsum('bsid,bsjd->bijd', M_mask_col, M_mask)
right_act_all = gather_async_opp(right_act_all, work, dim=2)
right_act_all = M_mask * right_act_all
O = torch.einsum('bsid,bsje->bijde', left_act, right_act_all)
O = rearrange(O, 'b i j d e -> b i j (d e)')
Z = self.o_linear(O)
Z /= (1e-3 + norm)
return Z
class Linear(nn.Linear):
"""
A Linear layer with built-in nonstandard initializations. Called just
like torch.nn.Linear.
Implements the initializers in 1.11.4, plus some additional ones found
in the code.
"""
def __init__(
self,
feature_in: int,
feature_out: int,
initializer: str = 'linear',
use_bias: bool = True,
bias_init: float = 0.,
):
super(Linear, self).__init__(feature_in, feature_out, bias=use_bias)
self.use_bias = use_bias
if initializer == 'linear':
glorot_uniform_af(self.weight, gain=1.0)
elif initializer == 'relu':
glorot_uniform_af(self.weight, gain=2.0)
elif initializer == 'zeros':
nn.init.zeros_(self.weight)
if self.use_bias:
with torch.no_grad():
self.bias.fill_(bias_init)
class SelfAttention(nn.Module):
"""
Multi-Head SelfAttention dealing with [batch_size1, batch_size2, len, dim] tensors
"""
def __init__(self, qkv_dim, c, n_head, out_dim, gating=True, last_bias_fuse=False):
super(SelfAttention, self).__init__()
self.qkv_dim = qkv_dim
self.c = c
self.n_head = n_head
self.out_dim = out_dim
self.gating = gating
self.last_bias_fuse = last_bias_fuse
self.scaling = self.c**(-0.5)
self.to_qkv = Linear(qkv_dim, 3 * n_head * c, initializer='linear', use_bias=False)
# self.to_q = Linear(qkv_dim, n_head * c, initializer='linear', use_bias=False)
# self.to_k = Linear(qkv_dim, n_head * c, initializer='linear', use_bias=False)
# self.to_v = Linear(qkv_dim, n_head * c, initializer='linear', use_bias=False)
if gating:
self.gating_bias = nn.parameter.Parameter(data=torch.ones((n_head * c,)))
self.gating_linear = Linear(qkv_dim, n_head * c, initializer='zero', use_bias=False)
self.o_linear = Linear(n_head * c,
out_dim,
initializer='zero',
use_bias=(not last_bias_fuse))
def forward(self, in_data, mask, nonbatched_bias=None):
"""
:param in_data: [batch_size1, batch_size2, len_qkv, qkv_dim]
:param bias: None or [batch_size1, batch_size2, n_head, len_q, len_kv]
:param nonbatched_bias: None or [batch_size1, n_head, len_q, len_kv]
"""
qkv = self.to_qkv(in_data).chunk(3, dim=-1)
q, k, v = map(lambda t: rearrange(t, 'b1 b2 n (h d) -> b1 b2 h n d', h=self.n_head), qkv)
# q = self.to_q(in_data)
# k = self.to_k(in_data)
# v = self.to_k(in_data)
# q, k, v = map(lambda t: rearrange(t, 'b1 b2 n (h d) -> b1 b2 h n d', h=self.n_head), [q, k, v])
# q = q * self.scaling
logits = torch.matmul(q, k.transpose(-1, -2))
# logits += mask
if nonbatched_bias is not None:
# logits += nonbatched_bias.unsqueeze(1)
bias = gather_async_opp(*nonbatched_bias, dim=1)
bias = rearrange(bias, 'b q k h -> b h q k')
weights = scale_mask_bias_softmax(logits, mask, bias.unsqueeze(1), self.scaling)
else:
weights = scale_mask_softmax(logits, mask, self.scaling)
# weights = torch.softmax(logits, dim=-1)
# weights = softmax(logits)
weighted_avg = torch.matmul(weights, v)
weighted_avg = rearrange(weighted_avg, 'b1 b2 h n d -> b1 b2 n (h d)')
if self.gating:
gate_values = self.gating_linear(in_data)
weighted_avg = bias_sigmod_ele(gate_values, self.gating_bias, weighted_avg)
output = self.o_linear(weighted_avg)
return output
| 6,731 | 2,518 |
# -*- coding:utf-8 -*-
"""
__init__.py
~~~~~~~~
数据收集插件 input
:author: Fufu, 2021/6/7
"""
from abc import abstractmethod
from asyncio import create_task, sleep
from typing import Any
from loguru import logger
from ..libs.plugin import BasePlugin
class InputPlugin(BasePlugin):
"""数据采集插件基类"""
module = 'input'
async def run(self):
"""定时执行收集"""
logger.debug(f'{self.module}.{self.name} is working')
while not self.is_closed():
create_task(self.gather())
await sleep(self.get_interval(60))
logger.debug(f'{self.module}.{self.name} is closed')
@abstractmethod
async def gather(self) -> Any:
"""获取数据"""
pass
def is_closed(self):
"""检查当前插件是否该关闭 (名称不在开启的插件中)"""
if self.name in self.conf.plugins_open:
return False
# 发送插件关闭信号 (特殊 Metric)
self.out_queue.put_nowait(self.metric(None, tag='__CLOSE_SIGNAL__'))
self.conf.plugins_working.discard(self.name)
logger.info(f'Plugin {self.name} is closed')
return True
| 1,093 | 426 |
import yaml
import re
from .SummonableBot import SummonableBot
class Librarian(SummonableBot):
def __init__(self, bot_name, event):
self.help_command = 'help'
self.help_preamble = "Here are my available responses"
self.event = event
with open('./responses.yml') as file:
response_list = yaml.load(file, Loader=yaml.FullLoader)
available_responses = response_list.get('responses').keys()
regex_for_responses = "\\s*|".join(available_responses)
self.summoning_regex = r'(@' + bot_name + r')\s*' + f'({regex_for_responses}\\s*|{self.help_command})'
def __prepare_new_issue_text(self, top_message, links):
s = top_message + """\n\n- """
s += "\n- ".join('['+ l.get('title') + '](' + l.get('url') +')' for l in links)
return s
def __prepare_help_response(self, top_message, responses):
s = top_message + """:\n\n- """
s += "\n- ".join(response for response in responses)
return s
def has_been_summoned(self, comment_body):
return re.search(self.summoning_regex, comment_body, re.MULTILINE)
async def check_library(self, user_help_match):
message = None
with open('./responses.yml') as file:
response_list = yaml.load(file, Loader=yaml.FullLoader)
response_to_fetch = user_help_match.group(2).strip()
if response_to_fetch == self.help_command:
message = self.__prepare_help_response(
self.help_preamble, response_list.get('responses').keys())
else:
requested_response = response_list.get('responses').get(response_to_fetch, '')
message = self.__prepare_new_issue_text(
requested_response.get('message', ''), requested_response.get('helpful_links', []))
if message:
await self.event.add_comment(message)
| 1,933 | 598 |
import pprint
import statistics
from contextlib import suppress
from dataclasses import dataclass
from enum import Enum
from typing import Optional
@dataclass
class ValidCharacter:
definite_locations: set[int]
definite_not_locations: set[int]
class CharacterStatus(Enum):
GRAY = "gray"
GREEN = "green"
YELLOW = "yellow"
@dataclass
class CharacterGuess:
character: str
status: CharacterStatus
@dataclass
class GroupStats:
answer: str
is_potential_solution: bool
number_of_groups: int
average_group_size: float
largest_group: int
class WordGameHelper:
_eliminated_characters: set[str]
_included_characters: dict[str, ValidCharacter]
_original_possible_common_words: set[str]
possible_words: set[str]
possible_common_words: set[str]
def __init__(
self,
possible_words: Optional[set[str]],
possible_common_words: Optional[set[str]],
used_words: Optional[set[str]],
):
self._eliminated_characters = set()
self._included_characters = {}
self.possible_words = possible_words or set()
self.possible_common_words = possible_common_words or set()
self._original_possible_common_words = possible_common_words.copy()
if used_words:
self.possible_words = self.possible_words - used_words
self.possible_common_words = self.possible_common_words - used_words
def make_guess(self, guess: list[CharacterGuess]):
for index, character_guess in enumerate(guess):
self._update_characters(index, character_guess)
self._update_possible_words()
def print_possible_answers(self):
if len(self.possible_words) == 1:
print(f"The answer is {self.possible_words.pop().upper()}.")
return
# possible_answers: list[str] = list(self.possible_words)
# possible_answers.sort()
# print(f"There are {len(possible_answers)} possible answers.")
# print("\n".join(possible_answers))
# print()
if len(self.possible_common_words) == 1:
print(f"The answer is probably {self.possible_common_words.pop().upper()}.")
return
possible_common_answers: list[str] = list(self.possible_common_words)
possible_common_answers.sort()
print(f"There are {len(possible_common_answers)} common possible answers.")
if len(possible_common_answers) < 5:
print("\n".join(possible_common_answers))
if len(possible_common_answers) > 2:
self._get_best_guess()
def _get_best_guess(self):
answer_groups = {}
statuses = [CharacterStatus.GRAY, CharacterStatus.GREEN, CharacterStatus.YELLOW]
stats: list[GroupStats] = []
for index, answer in enumerate(self._original_possible_common_words):
answer_groups[answer] = []
group_lengths = []
for status1 in statuses:
for status2 in statuses:
for status3 in statuses:
for status4 in statuses:
for status5 in statuses:
helper = WordGameHelper(
self.possible_common_words,
self.possible_common_words,
set(),
)
helper.make_guess(
[
CharacterGuess(answer[0], status1),
CharacterGuess(answer[1], status2),
CharacterGuess(answer[2], status3),
CharacterGuess(answer[3], status4),
CharacterGuess(answer[4], status5),
]
)
if len(helper.possible_words) > 0:
group = helper.possible_common_words
answer_groups[answer].append(group)
group_lengths.append(len(group))
average_length = statistics.mean(group_lengths)
group_stats = GroupStats(
answer=answer,
is_potential_solution=answer in self.possible_common_words,
number_of_groups=len(group_lengths),
average_group_size=average_length,
largest_group=max(group_lengths),
)
# pprint.pprint(group_stats)
stats.append(group_stats)
stats.sort(key=lambda x: x.average_group_size)
print(f" The best guesses statistically are:")
count: int = 0
for stat in stats:
if stat.average_group_size > stats[0].average_group_size:
continue
if count > 10:
break
print(
f" {stat.answer}, "
f"is_potential_solution = {stat.is_potential_solution}, "
f"number_of_groups = {stat.number_of_groups}, "
f"average_group_size = {stat.average_group_size}, "
f"largest_group = {stat.largest_group}"
)
count += 1
print(f" The best, possibly-correct guesses statistically are:")
potential_solution_stats = [
stat for stat in stats if stat.is_potential_solution
]
for stat in potential_solution_stats[:10]:
# if stat.average_group_size > potential_solution_stats[0].average_group_size:
# continue
print(
f" {stat.answer}, "
f"is_potential_solution = {stat.is_potential_solution}, "
f"number_of_groups = {stat.number_of_groups}, "
f"average_group_size = {stat.average_group_size}, "
f"largest_group = {stat.largest_group}"
)
def _update_characters(self, position: int, guess: CharacterGuess):
value = self._included_characters.get(
guess.character, ValidCharacter(set(), set())
)
if (
guess.status == CharacterStatus.GRAY
and guess.character not in self._included_characters
):
value.definite_not_locations.add(position)
self._eliminated_characters.add(guess.character)
return
with suppress(KeyError):
self._eliminated_characters.remove(guess.character)
if guess.status in (CharacterStatus.YELLOW, CharacterStatus.GRAY):
value.definite_not_locations.add(position)
else:
value.definite_locations.add(position)
self._included_characters[guess.character] = value
def _update_possible_words(self):
updated_possible_words: set[str] = set()
updated_possible_common_words: set[str] = set()
for word in self.possible_words:
if len(set(word).intersection(self._eliminated_characters)) > 0:
continue
is_valid: bool = True
for character, valid_character in self._included_characters.items():
if not is_valid:
break
if character not in word:
is_valid = False
break
for invalid_location in valid_character.definite_not_locations:
if word[invalid_location] == character:
is_valid = False
break
for valid_location in valid_character.definite_locations:
if word[valid_location] != character:
is_valid = False
break
if not is_valid:
continue
updated_possible_words.add(word)
if word in self.possible_common_words:
updated_possible_common_words.add(word)
self.possible_words = updated_possible_words
self.possible_common_words = updated_possible_common_words
| 8,201 | 2,195 |
# coding=utf-8
#
# @lc app=leetcode id=876 lang=python
#
# [876] Middle of the Linked List
#
# https://leetcode.com/problems/middle-of-the-linked-list/description/
#
# algorithms
# Easy (64.97%)
# Likes: 593
# Dislikes: 42
# Total Accepted: 76.4K
# Total Submissions: 117.5K
# Testcase Example: '[1,2,3,4,5]'
#
# Given a non-empty, singly linked list with head node head, return a middle
# node of linked list.
#
# If there are two middle nodes, return the second middle node.
#
#
#
#
# Example 1:
#
#
# Input: [1,2,3,4,5]
# Output: Node 3 from this list (Serialization: [3,4,5])
# The returned node has value 3. (The judge's serialization of this node is
# [3,4,5]).
# Note that we returned a ListNode object ans, such that:
# ans.val = 3, ans.next.val = 4, ans.next.next.val = 5, and ans.next.next.next
# = NULL.
#
#
#
# Example 2:
#
#
# Input: [1,2,3,4,5,6]
# Output: Node 4 from this list (Serialization: [4,5,6])
# Since the list has two middle nodes with values 3 and 4, we return the second
# one.
#
#
#
#
# Note:
#
#
# The number of nodes in the given list will be between 1 and 100.
#
#
#
#
#
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
def __str__(self):
return "<ListNode %s -> %s>" % (self.val, self.next)
class Solution(object):
def middleNode(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
if not head:
return
elif not head.next:
return head
fast = low = head
while fast:
if fast and fast.next and fast.next.next:
fast = fast.next.next
else:
break
low = low.next
return low if not fast.next else low.next
# if __name__ == '__main__':
# s = Solution()
# print s.middleNode(None)
# head = ListNode(1)
# print s.middleNode(head)
# head.next = ListNode(2)
# print s.middleNode(head)
# head.next.next = ListNode(3)
# print s.middleNode(head)
# head.next.next.next = ListNode(4)
# print s.middleNode(head)
# head.next.next.next.next = ListNode(5)
# print s.middleNode(head)
| 2,249 | 847 |
# New BSD License
#
# Copyright (c) 2007-2019 The scikit-learn developers.
# All rights reserved.
#
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# a. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# b. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# c. Neither the name of the Scikit-learn Developers nor the names of
# its contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
import matplotlib.pyplot as plt
import numpy as np
from sklearn.metrics import confusion_matrix
def plot_confusion_matrix(y_true, y_pred, classes, normalize=False, title=None, cmap=plt.cm.Blues, dpi=70):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
np.set_printoptions(precision=2)
if not title:
if normalize:
title = 'Normalized confusion matrix'
else:
title = 'Confusion matrix, without normalization'
# Compute confusion matrix
cm = confusion_matrix(y_true, y_pred)
# Only use the labels that appear in the data
# classes = classes[unique_labels(y_true, y_pred)]
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
fig, ax = plt.subplots()
fig.set_dpi(dpi)
im = ax.imshow(cm, interpolation='nearest', cmap=cmap)
ax.figure.colorbar(im, ax=ax)
# We want to show all ticks...
ax.set(xticks=np.arange(cm.shape[1]),
yticks=np.arange(cm.shape[0]),
# ... and label them with the respective list entries
xticklabels=classes, yticklabels=classes,
title=title,
ylabel='True label',
xlabel='Predicted label')
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
ax.text(j, i, format(cm[i, j], fmt),
ha="center", va="center",
color="white" if cm[i, j] > thresh else "black")
fig.tight_layout()
return ax
| 3,442 | 1,141 |
import pytest
import sys
sys.path.append('.')
from turingmachine import Transition, Direction, State
def test_create_transition():
q0 = State()
q1 = State()
#In q0 upon reading a move to q1, output b, and move the tape 1 right
q0.create_transition('a', q1, 'b', Direction.RIGHT)
assert q0.transitions['a'].new_state == q1
assert q0.transitions['a'].output_letter == 'b'
assert q0.transitions['a'].movement_direction == Direction.RIGHT
def test_create_multiple_transitions():
q0 = State()
q1 = State()
q2 = State()
q0.create_transition('a', q1, 'b', Direction.RIGHT)
q1.create_transition('c', q2, 'd', Direction.LEFT)
with pytest.raises(KeyError):
q0.transitions['b'] is None
assert q0.transitions['a'].new_state.transitions['c'].new_state == q2
assert q1.transitions['c'].new_state == q2
assert q0.transitions['a'].new_state.transitions['c'].output_letter == 'd'
assert q1.transitions['c'].output_letter == 'd'
assert q0.transitions['a'].new_state.transitions['c'].movement_direction == Direction.LEFT
assert q1.transitions['c'].movement_direction == Direction.LEFT
def test_add_transition():
q0 = State()
q1 = State()
t = Transition(q1, 'b', Direction.RIGHT)
q0.add_transition('a', t)
assert q0.transitions['a'] == t
def test_create_with_transitions():
q0 = State()
t1 = Transition(q0, 'c', Direction.LEFT)
t2 = Transition(q0, 'd', Direction.RIGHT)
q1 = State({'a': t1, 'b' : t2})
assert q1.transitions['a'] == t1
assert q1.transitions['b'] == t2
def test_calc():
q0 = State()
t1 = Transition(q0, 'a', Direction.RIGHT)
q1 = State()
q1.add_transition('b', t1)
res = q1.calc('b')
assert res == t1
| 1,757 | 646 |
class Solution:
def firstMissingPositive(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
# constant space
# [1, len(nums) + 1]
n = len(nums)
for i, num in enumerate(nums):
if num < 0 or num > n:
nums[i] = 0
n += 1
for i, num in enumerate(nums):
idx = num % n
if idx:
nums[idx - 1] += n
for i, num in enumerate(nums, 1):
if num // n == 0:
return i
return n | 558 | 180 |
import os
def returnText():
cwd = os.getcwd().replace(os.environ['HOME'],'~')
lstCwd = str.split(cwd, '/')
if len(lstCwd) > 3:
lstCwd.reverse()
lstCwd = lstCwd[0:3]
lstCwd.append('+')
lstCwd.reverse()
strCwd = '/'.join(lstCwd)
return strCwd
| 294 | 120 |
from unittest import mock
from django.contrib.auth import get_user_model
from django.core import mail
from django.template import TemplateDoesNotExist
from django.test import TestCase
from django.urls import reverse
from ..accounts.adapter import EmailAdapter
from .utils import TestOrganizationMixin
User = get_user_model()
class TestEmailAdapter(TestOrganizationMixin, TestCase):
def test_template_not_present(self):
email = "test@tester.com"
template_prefix = "some_random_name"
with self.assertRaises(TemplateDoesNotExist):
EmailAdapter.send_mail(self, template_prefix, email, {})
@mock.patch('openwisp_users.accounts.adapter.send_email')
def test_assertion_not_raised_when_html_template_missing(self, mail_func):
self._create_user()
queryset = User.objects.filter(username='tester')
self.assertEqual(queryset.count(), 1)
params = {'email': 'test@tester.com'}
self.client.post(reverse('account_reset_password'), params, follow=True)
send_mail_calls = mail_func.call_args_list
send_mail_arguments = send_mail_calls[0][0]
self.assertEqual(send_mail_arguments[0], '[example.com] Password Reset E-mail')
self.assertEqual(send_mail_arguments[2], '')
def test_password_reset_email_sent(self):
self._create_user()
queryset = User.objects.filter(username='tester')
self.assertEqual(queryset.count(), 1)
params = {'email': 'test@tester.com'}
self.client.post(reverse('account_reset_password'), params, follow=True)
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox.pop()
self.assertFalse(email.alternatives)
self.assertIn('Password Reset E-mail', email.subject)
self.assertIn('Click the link below to reset your password', email.body)
| 1,850 | 555 |
from setuptools import setup
setup(
name='vuln_toolkit',
version='0.1',
description='Transfer Learning Toolkit',
url='https://para.cs.umd.edu/purtilo/vulnerability-detection-tool-set/tree/master',
author='Ashton Webster',
author_email='ashton.webster@gmail.com',
license='MIT',
packages=['vuln_toolkit', 'vuln_toolkit.common'],
zip_safe=False
)
| 382 | 141 |
#
# Copyright (c) European Synchrotron Radiation Facility (ESRF)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__authors__ = ["O. Svensson"]
__license__ = "MIT"
__date__ = "10/05/2019"
import json
import shutil
import pprint
from edna2.tasks.AbstractTask import AbstractTask
from edna2.tasks.ISPyBTasks import GetListAutoprocessingResults
class FindHklAsciiForMerge(AbstractTask):
"""
This task receives a list of data collection IDs and returns a
json schema for EXI2
"""
def getInDataSchema(self):
return {
"type": "object",
"properties": {
"token": {"type": "string"},
"proposal": {"type": "string"},
"dataCollectionId": {
"type": "array",
"items": {
"type": "integer",
}
}
}
}
# def getOutDataSchema(self):
# return {
# "type": "object",
# "required": ["dataForMerge"],
# "properties": {
# "dataForMerge": {
# "type": "object",
# "items": {
# "type": "object",
# "properties": {
# "spaceGroup": {"type": "string"}
# }
# }
# }
# }
# }
def run(self, inData):
urlError = None
token = inData['token']
proposal = inData['proposal']
listDataCollectionId = inData['dataCollectionId']
inDataGetListAutoprocessingResults = {
'token': token,
'proposal': proposal,
'dataCollectionId': listDataCollectionId
}
getListAutoprocessingResults = GetListAutoprocessingResults(
inData=inDataGetListAutoprocessingResults
)
getListAutoprocessingResults.execute()
outDataAutoprocessing = getListAutoprocessingResults.outData
if 'error' in outDataAutoprocessing:
urlError = outDataAutoprocessing['error']
else:
index = 1
properties = {}
listOrder = []
for dataCollection in outDataAutoprocessing['dataCollection']:
dataCollectionId = dataCollection['dataCollectionId']
dictEntry = {}
listEnumNames = []
listEnumValues = []
proteinAcronym = None
blSampleName = None
if 'error' in dataCollection['autoprocIntegration']:
urlError = dataCollection['autoprocIntegration']['error']
else:
for autoProcResult in dataCollection['autoprocIntegration']:
if proteinAcronym is None:
proteinAcronym = autoProcResult['Protein_acronym']
blSampleName = autoProcResult['BLSample_name']
for autoProcAttachment in autoProcResult['autoprocAttachment']:
if 'XDS_ASCII' in autoProcAttachment['fileName']:
fileName = autoProcAttachment['fileName']
program = autoProcResult['v_datacollection_processingPrograms']
attachmentId = autoProcAttachment['autoProcProgramAttachmentId']
enumName = '{0:30s} {1}'.format(program, fileName)
listEnumNames.append(enumName)
enumValue = attachmentId
listEnumValues.append(enumValue)
if urlError is None:
entryKey = 'hkl_' + str(dataCollectionId)
if entryKey not in properties:
dictEntry['title'] = 'Select HKL for data Collection #{0} {2} {1}-{2}'.format(
index,
proteinAcronym,
blSampleName
)
dictEntry['enum'] = listEnumValues
dictEntry['enumNames'] = listEnumNames
properties[entryKey] = dictEntry
listOrder.append(entryKey)
entryKey = 'minimum_I/SIGMA_' + str(dataCollectionId)
if entryKey not in properties:
# Minimum sigma
dictEntry = {
'integer': 'string',
'type': 'string',
'title': 'minimum_I/SIGMA for data Collection #{0} {2} {1}-{2}'.format(
index,
proteinAcronym,
blSampleName
)
}
properties[entryKey] = dictEntry
listOrder.append(entryKey)
index += 1
if urlError is None:
schema = {
'properties': properties,
'type': 'object',
'title': 'User input needed'
}
uiSchema = {
'ui:order': listOrder
}
outData = {
"schema": schema,
"uiSchema": uiSchema
}
else:
outData = {
'error': urlError
}
return outData
class FindPipelineForMerge(AbstractTask):
"""
This task receives a list of data collection IDs and returns a
json schema for EXI2
"""
def getInDataSchema(self):
return {
"type": "object",
"properties": {
"token": {"type": "string"},
"proposal": {"type": "string"},
"dataCollectionId": {
"type": "array",
"items": {
"type": "integer",
}
}
}
}
# def getOutDataSchema(self):
# return {
# "type": "object",
# "required": ["dataForMerge"],
# "properties": {
# "dataForMerge": {
# "type": "object",
# "items": {
# "type": "object",
# "properties": {
# "spaceGroup": {"type": "string"}
# }
# }
# }
# }
# }
def run(self, inData):
urlError = None
token = inData['token']
proposal = inData['proposal']
listDataCollectionId = inData['dataCollectionId']
inDataGetListAutoprocessingResults = {
'token': token,
'proposal': proposal,
'dataCollectionId': listDataCollectionId
}
getListAutoprocessingResults = GetListAutoprocessingResults(
inData=inDataGetListAutoprocessingResults
)
getListAutoprocessingResults.execute()
outDataAutoprocessing = getListAutoprocessingResults.outData
if 'error' in outDataAutoprocessing:
urlError = outDataAutoprocessing['error']
else:
index = 1
properties = {}
listOrder = []
dictEntry = {}
for dataCollection in outDataAutoprocessing['dataCollection']:
dataCollectionId = dataCollection['dataCollectionId']
listEnumValues = []
proteinAcronym = None
blSampleName = None
if 'error' in dataCollection['autoprocIntegration']:
urlError = dataCollection['autoprocIntegration']['error']
else:
for autoProcResult in dataCollection['autoprocIntegration']:
if len(autoProcResult['autoprocAttachment']) > 0:
if proteinAcronym is None:
proteinAcronym = autoProcResult['Protein_acronym']
blSampleName = autoProcResult['BLSample_name']
if '1' in autoProcResult['anomalous']:
anom = True
else:
anom = False
for autoProcAttachment in autoProcResult['autoprocAttachment']:
if 'XDS_ASCII' in autoProcAttachment['fileName']:
fileName = autoProcAttachment['fileName']
program = autoProcResult['v_datacollection_processingPrograms']
attachmentId = autoProcAttachment['autoProcProgramAttachmentId']
if anom:
entryKey = program + '_anom'
else:
entryKey = program + '_noanom'
if entryKey not in dictEntry:
dictEntry[entryKey] = []
dictEntry[entryKey].append({'id': attachmentId, 'fileName': fileName})
if urlError is None:
listEnumNames = []
dictInput = {}
for entryKey, listAttachment in dictEntry.items():
if len(listAttachment) == len(outDataAutoprocessing['dataCollection']):
listEnumNames.append(entryKey)
dictInput[entryKey] = listAttachment
index += 1
if len(listEnumNames) > 0:
dictSchema = {
'title': 'Select processing pipeline for data Collection {0}-{1}'.format(
proteinAcronym,
blSampleName
),
'type': 'string',
'enum': listEnumNames,
'enumNames': listEnumNames
}
key = "pipeline"
properties[key] = dictSchema
listOrder.append(key)
# Minimum sigma
dictSchema = {
'integer': 'string',
'type': 'string',
'title': 'minimum_I/SIGMA for data Collection {0}-{1}'.format(
proteinAcronym,
blSampleName
)
}
key = 'minimum_I/SIGMA'
properties[key] = dictSchema
listOrder.append(key)
if urlError is None:
schema = {
'properties': properties,
'type': 'object',
'title': 'User input needed'
}
uiSchema = {
'ui:order': listOrder
}
outData = {
'schema': {
"schema": schema,
"uiSchema": uiSchema
},
'input': dictInput
}
else:
outData = {
'error': urlError
}
return outData
class MergeUtls(AbstractTask):
"""
This task will run the Merge_utls.py program written by Shibom Basu
"""
def run(self, inData):
listHklLp = inData['listHklLp']
workingDir = self.getWorkingDirectory()
index = 1
for hklLp in listHklLp:
dataDir = workingDir / "data{0}".format(index)
dataDir.mkdir(exist_ok=True)
shutil.copy(hklLp['hkl'], str(dataDir / 'XDS_ASCII.HKL'))
index += 1
commandLine = 'Merge_utls.py --root {0} --expt serial-xtal'.format(str(workingDir))
self.runCommandLine(commandLine, logPath=None)
# Find Mergeing_results.json
resultPath = self.getWorkingDirectory() / 'adm_serial-xtal' / 'adm_3' / 'Mergeing_results.json'
if resultPath.exists():
with open(str(resultPath)) as f:
mergeResult = json.loads(f.read())
outData = {'mergeResult': mergeResult}
return outData
| 13,255 | 3,303 |
import numpy as np
from tqdm import *
from utils import DataLoaderX
from dataset import collate
from math import *
def prediction(data, model, batch_size, cuda):
data_loader = DataLoaderX(data, batch_size=batch_size, collate_fn=collate, num_workers=0)
model.training = False
iterator = tqdm(data_loader)
out = []
for sample in iterator:
sample['data'] = sample['data'].float()
if cuda:
out += model(sample['data']).cpu()
else:
out += model(sample['data'])
return out
def recovery(ori_shape, output, size):
if size[0] >= ori_shape[1] or size[1] >= ori_shape[2]:
# de-padding
output = output[0].detach().numpy()
diff_x = size[0] - ori_shape[1]
diff_y = size[1] - ori_shape[2]
return output[:, diff_x // 2:-(diff_x - diff_x // 2),
diff_y // 2:-(diff_y - diff_y // 2)]
h, w = size[0], size[1]
cols = ceil(ori_shape[2] / w)
rows = ceil(ori_shape[1] / h)
assert rows * cols == len(output)
results = np.zeros((ori_shape[0], rows * size[0], cols * size[1]))
for i, out in enumerate(output):
out = out.detach().numpy()
out = out[:, 8:-8, 8:-8]
end_col = (i + 1) % cols * size[1] if (i + 1) % cols > 0 else cols * size[1]
results[:, i // cols * size[0]:(i // cols + 1) * size[0],
i % cols * size[1]:end_col] = out
return results[:, 0:ori_shape[1], 0:ori_shape[2]]
if __name__ == '__main__':
a = np.zeros((4, 3, 3))
print(a[:, 0:-1, 0:-1].shape)
| 1,556 | 586 |
import torch
from torch import nn
from tqdm import tqdm
from entmax import entmax_bisect
import torch.nn.functional as F
# helper function
def eval_decorator(fn):
def inner(model, *args, **kwargs):
was_training = model.training
model.eval()
out = fn(model, *args, **kwargs)
model.train(was_training)
return out
return inner
# top k filtering
def top_p(logits, thres = 0.9):
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cum_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
sorted_indices_to_remove = cum_probs > (1 - thres)
sorted_indices_to_remove[:, 1:] = sorted_indices_to_remove[:, :-1].clone()
sorted_indices_to_remove[:, 0] = 0
sorted_logits[sorted_indices_to_remove] = float('-inf')
return sorted_logits.scatter(1, sorted_indices, sorted_logits)
# topk
def top_k(logits, thres = 0.9):
k = ceil((1 - thres) * logits.shape[-1])
val, ind = torch.topk(logits, k)
probs = torch.full_like(logits, float('-inf'))
probs.scatter_(1, ind, val)
return probs
# top_a
def top_a(logits, min_p_pow=2.0, min_p_ratio=0.02):
probs = F.softmax(logits, dim=-1)
limit = torch.pow(torch.max(probs), min_p_pow) * min_p_ratio
logits[probs < limit] = -float("Inf")
logits[probs >= limit] = 1
return logits
ENTMAX_ALPHA = 1.3
entmax = entmax_bisect
class AutoregressiveWrapper(nn.Module):
def __init__(self, net, ignore_index = -100, pad_value = 0):
super().__init__()
self.pad_value = pad_value
self.ignore_index = ignore_index
self.net = net
self.max_seq_len = net.seq_len
@torch.no_grad()
@eval_decorator
def generate(self, start_tokens, seq_len, eos_token = None, temperature = 1., filter_logits_fn = top_k, filter_thres = 0.9, min_p_pow=2.0, min_p_ratio=0.02, **kwargs):
device = start_tokens.device
num_dims = len(start_tokens.shape)
if num_dims == 1:
start_tokens = start_tokens[None, :]
b, t = start_tokens.shape
out = start_tokens
for _ in tqdm(range(seq_len)):
x = out[:, -self.max_seq_len:]
logits = self.net(x, **kwargs)[:, -1, :]
if filter_logits_fn in {top_k, top_p}:
filtered_logits = filter_logits_fn(logits, thres = filter_thres)
probs = F.softmax(filtered_logits / temperature, dim=-1)
elif filter_logits_fn is top_a:
filtered_logits = filter_logits_fn(logits, min_p_pow = min_p_pow, min_p_ratio= min_p_ratio)
probs = F.softmax(filtered_logits / temperature, dim=-1)
elif filter_logits_fn is entmax:
probs = entmax(logits / temperature, alpha = ENTMAX_ALPHA, dim=-1)
sample = torch.multinomial(probs, 1)
out = torch.cat((out, sample), dim=-1)
if eos_token is not None and (sample == eos_token).all():
break
out = out[:, t:]
if num_dims == 1:
out = out.squeeze(0)
return out
def forward(self, x, **kwargs):
xi, xo = x[:, :-1], x[:, 1:]
out = self.net(xi, **kwargs)
loss = F.cross_entropy(out.transpose(1, 2), xo, ignore_index = self.ignore_index)
return loss
| 3,318 | 1,243 |
# 1478. Allocate Mailboxes
# User Accepted:342
# User Tried:594
# Total Accepted:364
# Total Submissions:1061
# Difficulty:Hard
# Given the array houses and an integer k. where houses[i] is the location of the ith house along a street, your task is to allocate k mailboxes in the street.
# Return the minimum total distance between each house and its nearest mailbox.
# The answer is guaranteed to fit in a 32-bit signed integer.
# Example 1:
# Input: houses = [1,4,8,10,20], k = 3
# Output: 5
# Explanation: Allocate mailboxes in position 3, 9 and 20.
# Minimum total distance from each houses to nearest mailboxes is |3-1| + |4-3| + |9-8| + |10-9| + |20-20| = 5
# Example 2:
# Input: houses = [2,3,5,12,18], k = 2
# Output: 9
# Explanation: Allocate mailboxes in position 3 and 14.
# Minimum total distance from each houses to nearest mailboxes is |2-3| + |3-3| + |5-3| + |12-14| + |18-14| = 9.
# Example 3:
# Input: houses = [7,4,6,1], k = 1
# Output: 8
# Example 4:
# Input: houses = [3,6,14,10], k = 4
# Output: 0
# Constraints:
# n == houses.length
# 1 <= n <= 100
# 1 <= houses[i] <= 10^4
# 1 <= k <= n
# Array houses contain unique integers.
class Solution:
def minDistance(self, houses: List[int], k: int) -> int:
# if k >= len(houses):
# return 0
# houses.sort()
# res = 0
# num = 0
# tmp = houses[1:] + [houses[1]]
# diff = [tmp[i] - houses[i] for i in range(len(houses)-1)]
# diff.sort()
# for i in range(len(diff)):
# res += diff[i]
# num += 2
# k -= 1
# if num + k == len(houses):
# return res
pass
| 1,686 | 661 |
"""
Operadores lógicos
Para agrupar operações com lógica booleana, utilizaremos operadores lógicos.
Python suporta três operadores básicos: not (não), and (e), or (ou). Esses operadores
podem ser traduzidos como não (¬ negação), e (Λ conjunção) e ou (V disjunção).
"""
# Operador not
"""
>>> not True
False
>>> not False
True
"""
# Operador and
"""
O operador and (e) tem sua tabela verdade representada na tabela 3.5. O operador
and (e) resulta verdadeiro apenas quando seus dois operadores forem verdadeiros.
>>> True and True
True
>>> True and False
False
>>> False and True
False
>>> False and False
False
"""
# Operador or
"""
O operador or (ou) é que ele resulta em falso apenas se seus dois operadores também forem falsos. Se apenas um de seus operadores for verdadeiro, ou
se os dois forem, o resultado da operação será verdadeiro.
>>> True or True
True
>>> True or False
True
>>> False or True
True
>>> False or False
False
"""
"""
isso é um comentário
"""
"""
comen
"""
| 990 | 353 |
#
# Copyright (c) 2015-2016 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from nfv_common.alarm import * # noqa: F401,F403
from nfv_vim.alarm._general import clear_general_alarm # noqa: F401
from nfv_vim.alarm._general import raise_general_alarm # noqa: F401
from nfv_vim.alarm._host import host_clear_alarm # noqa: F401
from nfv_vim.alarm._host import host_raise_alarm # noqa: F401
from nfv_vim.alarm._instance import instance_clear_alarm # noqa: F401
from nfv_vim.alarm._instance import instance_manage_alarms # noqa: F401
from nfv_vim.alarm._instance import instance_raise_alarm # noqa: F401
from nfv_vim.alarm._instance_group import clear_instance_group_alarm # noqa: F401
from nfv_vim.alarm._instance_group import raise_instance_group_policy_alarm # noqa: F401
from nfv_vim.alarm._sw_update import clear_sw_update_alarm # noqa: F401
from nfv_vim.alarm._sw_update import raise_sw_update_alarm # noqa: F401
| 944 | 402 |
"""~#SHORTDESCRIPTION#~"""
__version__ = "1.0.0"
__release__ = "1"
__program_name__ = "~#PROJECT#~"
| 101 | 51 |
from typing import Optional
from torch import nn
from rl_multi_agent.experiments.furnmove_grid_marginal_nocl_base_config import (
FurnMoveExperimentConfig,
)
from rl_multi_agent.models import A3CLSTMNStepComCoordinatedActionsEgoGridsEmbedCNN
class FurnMoveGridExperimentConfig(FurnMoveExperimentConfig):
# Increasing the params of marginal to match mixture
final_cnn_channels = 288
@classmethod
def get_init_train_params(cls):
init_train_params = FurnMoveExperimentConfig.get_init_train_params()
init_train_params["environment_args"] = {"min_steps_between_agents": 2}
return init_train_params
@property
def saved_model_path(self) -> Optional[str]:
return None
@classmethod
def create_model(cls, **kwargs) -> nn.Module:
def _create_model(**kwargs):
return A3CLSTMNStepComCoordinatedActionsEgoGridsEmbedCNN(
**{
**dict(
num_inputs=9,
action_groups=cls.episode_class.class_available_action_groups(
include_move_obj_actions=cls.include_move_obj_actions
),
num_agents=cls.num_agents,
state_repr_length=cls.state_repr_length,
occupancy_embed_length=8,
talk_embed_length=cls.talk_embed_length,
agent_num_embed_length=cls.agent_num_embed_length,
reply_embed_length=cls.reply_embed_length,
turn_off_communication=cls.turn_off_communication,
coordinate_actions=cls.coordinate_actions,
coordinate_actions_dim=13 if cls.coordinate_actions else None,
separate_actor_weights=False,
num_talk_symbols=cls.num_talk_symbols,
num_reply_symbols=cls.num_reply_symbols,
final_cnn_channels=cls.final_cnn_channels,
),
**kwargs,
}
)
return _create_model(**kwargs)
def get_experiment():
return FurnMoveGridExperimentConfig()
| 2,225 | 643 |
"""
Request controllers for the external links service.
These may be used handle requests originating from the :mod:`.routes.api`
and/or the :mod:`.routes.ui`.
If the behavior of these controllers diverges along the UI/API lines, then we
can split this into ``controllers/api.py`` and ``controllers/ui.py``.
"""
from typing import Tuple, Any, Dict
from http import HTTPStatus
from werkzeug.datastructures import MultiDict
Response = Tuple[Dict[str, Any], HTTPStatus, Dict[str, str]]
def service_status(params: MultiDict) -> Response:
"""
Handle requests for the service status endpoint.
Returns ``200 OK`` if the service is up and ready to handle requests.
"""
return {'iam': 'ok'}, HTTPStatus.OK, {} | 728 | 212 |
from __future__ import division
from __future__ import unicode_literals
import re
from django.http import HttpResponse
from django.shortcuts import redirect
from django.template import RequestContext
from django.template import loader
from models import *
from apps.core.views import get_bg_color
def list_students(request, classroom_slug):
if not request.user.is_staff:
return redirect('show_page', classroom_slug)
try:
classroom = Classroom.objects.get(slug=classroom_slug)
except:
return redirect('core_index')
context = {
'classroom': classroom,
'bg_color': get_bg_color(request),
}
template = 'roster/list_students.html'
c = RequestContext(request, context)
t = loader.get_template(template)
return HttpResponse(t.render(c))
def edit_student_list(request, classroom_slug):
if not request.user.is_staff:
return redirect('show_page', classroom_slug)
try:
classroom = Classroom.objects.get(slug=classroom_slug)
except:
return redirect('core_index')
students = Student.objects.filter(classroom=classroom)
student_list_csv = ''
for student in students:
student_csv = ','.join([student.last_name,student.first_name,''])
student_list_csv += student_csv + '\n'
context = {
'student_list_csv': student_list_csv,
'classroom': classroom,
'bg_color': get_bg_color(request),
}
template = 'roster/edit_student_list.html'
c = RequestContext(request, context)
t = loader.get_template(template)
return HttpResponse(t.render(c))
def post_student_list(request, classroom_slug):
if not request.user.is_staff:
return redirect('show_page', classroom_slug)
try:
classroom = Classroom.objects.get(slug=classroom_slug)
except:
return redirect('core_index')
students = Student.objects.filter(classroom=classroom)
if 'submit' in request.POST:
for student in students: # really should only delete those not in POST...
student.delete()
student_list = request.POST['student_list_csv'].splitlines()
for line in student_list:
[last_name, first_name, password] = [x.strip() for x in line.split(',')]
username = first_name[0].lower()
username += re.sub(r'[^a-z]', '', last_name.lower())[:7]
try:
student_user = User.objects.get(username=username)
except:
student_user = User()
student_user.username = username
student_user.last_name = last_name
student_user.first_name = first_name
student_user.set_password(password)
student_user.save()
student = Student()
student.classroom = classroom
student.user = student_user
student.save()
student_user.first_name = first_name
student_user.last_name = last_name
student_user.save()
return redirect('list_students', classroom_slug)
| 3,101 | 896 |
from __future__ import unicode_literals
from subprocess import call
from re import search
from random import sample, choice
from csv import reader
from os import popen
from prompt_toolkit import prompt
from prompt_toolkit.completion import WordCompleter
'''
The strings, input and output of this program is in lowercase. => case-insensitive
List of standard OUI:
http://standards-oui.ieee.org/oui/oui.txt
http://standards-oui.ieee.org/oui/oui.csv
'''
# Validating mac address
def mac_validation(mac):
if search(string=mac, pattern=r"^([0-9a-f]{2}:){5}[0-9a-f]{2}$"):
return "Valid mac"
else:
print("Invalid mac. Check it and try again")
quit()
# Validating Interface
def interface_validation(interface):
if search(string=interface, pattern=r"^(eth|wlan)\d{1}$"):
return "Valid interface"
else:
print("Invalid Interface. Check it and try again")
quit()
hex_characters = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f"]
# Checking if user wants to choose new mac address randomly or not
random_or_not = prompt("Do you want your mac address to change randomly? [(Y)es or (N)o]\nOr\nDo you want to choose first part of your mac address based on other manufacturers mac address? [(O)UI]\nOr\nDo you want your mac address back to original one? [(R)everse]\nYour answer: ").lower()
interface = prompt("Please insert name of the interface you want to change its mac: [wlan* or eth*] ").lower()
interface_validation(interface)
if random_or_not == "y" or random_or_not == "yes":
# random mac
random_mac = []
for i in range(6):
random_mac.append("".join(sample(hex_characters, 2)))
random_mac = ":".join(random_mac)
print("Your new mac address will be {0}".format(random_mac))
elif random_or_not == "n" or random_or_not == "no":
# user's new mac
mac = prompt("Please insert your new mac: ").lower()
mac_validation(mac)
elif random_or_not == "r" or random_or_not == "reverse":
# back to normal
if search(string=interface, pattern=r"^eth\d{1}$"):
with open(file="/tmp/eth-old-mac.txt", mode="r", encoding="utf-8") as old_mac:
mac = old_mac.readline()
elif search(string=interface, pattern=r"^wlan\d{1}$"):
with open(file="/tmp/wlan-old-mac.txt", mode="r", encoding="utf-8") as old_mac:
mac = old_mac.readline()
elif random_or_not == "o" or random_or_not == "oui":
oui = {}
# Creating Template of our dictionary (OUI)
with open(file="oui.csv", mode="r", encoding="utf-8") as csvfile:
csvreader = reader(csvfile)
next(csvreader) # ignore first row of csv which is header
for row in csvreader:
oui[str(row[2]).replace(" ", " ")] = []
# Fill values of dictionary (OUI)
with open(file="oui.csv", mode="r", encoding="utf-8") as csvfile:
csvreader = reader(csvfile)
next(csvreader) # ignore first row of csv which is header
for row in csvreader:
value = oui[str(row[2]).replace(" ", " ")]
if len(str(row[1])) > 6:
continue
else:
value.append(str(row[1]))
oui[str(row[2])] = value
# Deleting keys with empty values []
# 273 keys were deleted from list.
for key, value in list(oui.items()):
if value == []:
del oui[key]
random_organization = prompt("Do you want to choose your mac address from specific manufacturer? [(Y)es or (N)o] ").lower()
if random_organization == "y" or random_organization == "yes":
organizations = WordCompleter(list(oui.keys()), ignore_case=True)
organization = prompt("Please select an organization name: ", completer=organizations)
print("You will be using mac address of '{0}' organization.".format(organization))
random_oui = choice(oui.get("{0}".format(organization)))
character_need = 12 - len(random_oui)
mac_without_colon = random_oui + str("".join(sample(hex_characters, character_need)))
mac = mac_without_colon[0:2] + ":" + mac_without_colon[2:4] + ":" + mac_without_colon[4:6] + ":" + mac_without_colon[6:8] + ":" + mac_without_colon[8:10] + ":" + mac_without_colon[10:12]
mac = mac.lower()
print("Your new mac address will be {0}".format(mac))
elif random_organization == "n" or random_organization == "no":
organization = choice(list(oui.keys()))
print("You will be using mac address of '{0}' organization.".format(organization))
random_oui = choice(oui.get("{0}".format(organization)))
character_need = 12 - len(random_oui)
mac_without_colon = random_oui + str("".join(sample(hex_characters, character_need)))
mac = mac_without_colon[0:2] + ":" + mac_without_colon[2:4] + ":" + mac_without_colon[4:6] + ":" + mac_without_colon[6:8] + ":" + mac_without_colon[8:10] + ":" + mac_without_colon[10:12]
mac = mac.lower()
print("Your new mac address will be {0}".format(mac))
else:
print("Please choose your answer correctly!")
quit()
else:
print("Please check your answer!")
quit()
# Saving old mac addresses | delete text files in reverse mode
if random_or_not == "r" or random_or_not == "reverse":
delete = prompt("Do you want to delete files related to your old mac address? [(Y)es or (N)o] ").lower()
if delete == "y" or delete =="yes":
call("rm /tmp/eth-old-mac.txt /tmp/wlan-old-mac.txt", shell=True)
elif delete == "n" or delete =="no":
pass
else:
print("Please check your answer! What do you want to do with old mac address text files?!")
quit()
else:
call("ip addr | grep -E 'ether' | cut --delimiter=' ' -f 6 | sed -n '1p' > /tmp/eth-old-mac.txt", shell=True)
call("ip addr | grep -E 'ether' | cut --delimiter=' ' -f 6 | sed -n '2p' > /tmp/wlan-old-mac.txt", shell=True)
# Checking kernel version to call different commands
kernel_version = popen("uname -r").read()
if float(".".join(kernel_version.split(".")[:2])) < 4.15:
# Start changing mac address for kernel versions lower than 4.15
call("ifconfig {0} down".format(interface), shell=True)
call("ifconfig {0} hw ether {1}".format(interface, mac), shell=True)
call("ifconfig {0} up".format(interface), shell=True)
else:
# Start changing mac address for kernel versions higher than 4.15
call("ip link set {0} down".format(interface), shell=True)
call("ip link set {0} address {1}".format(interface, mac), shell=True)
call("ip link set {0} up".format(interface), shell=True)
print("Done :)")
| 6,635 | 2,171 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# pylint: disable=wrong-import-position
# pylint: disable=protected-access
import concurrent.futures
import io
import os
import sys
import tempfile
import threading
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from ratarmountcore import StenciledFile # noqa: E402
testData = b"1234567890"
tmpFile = tempfile.TemporaryFile()
tmpFile.write(testData)
randomTestData = os.urandom(128 * 1024)
randomTmpFile = tempfile.TemporaryFile()
randomTmpFile.write(randomTestData)
class TestStenciledFile:
@staticmethod
def test_empty_file():
assert StenciledFile(tmpFile, [(0, 0)]).read() == b""
@staticmethod
def test_findStencil():
stenciledFile = StenciledFile(tmpFile, [(1, 2), (2, 2), (0, 2), (4, 4), (1, 8), (0, 1)])
expectedResults = [0, 0, 1, 1, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 5]
for offset, iExpectedStencil in enumerate(expectedResults):
assert stenciledFile._findStencil(offset) == iExpectedStencil
@staticmethod
def test_single_stencil():
assert StenciledFile(tmpFile, [(0, 1)]).read() == b"1"
assert StenciledFile(tmpFile, [(0, 2)]).read() == b"12"
assert StenciledFile(tmpFile, [(0, 3)]).read() == b"123"
assert StenciledFile(tmpFile, [(0, len(testData))]).read() == testData
@staticmethod
def test_1B_stencils():
assert StenciledFile(tmpFile, [(0, 1), (1, 1)]).read() == b"12"
assert StenciledFile(tmpFile, [(0, 1), (2, 1)]).read() == b"13"
assert StenciledFile(tmpFile, [(1, 1), (0, 1)]).read() == b"21"
assert StenciledFile(tmpFile, [(0, 1), (1, 1), (2, 1)]).read() == b"123"
assert StenciledFile(tmpFile, [(1, 1), (2, 1), (0, 1)]).read() == b"231"
@staticmethod
def test_2B_stencils():
assert StenciledFile(tmpFile, [(0, 2), (1, 2)]).read() == b"1223"
assert StenciledFile(tmpFile, [(0, 2), (2, 2)]).read() == b"1234"
assert StenciledFile(tmpFile, [(1, 2), (0, 2)]).read() == b"2312"
assert StenciledFile(tmpFile, [(0, 2), (1, 2), (2, 2)]).read() == b"122334"
assert StenciledFile(tmpFile, [(1, 2), (2, 2), (0, 2)]).read() == b"233412"
@staticmethod
def test_read_with_size():
assert StenciledFile(tmpFile, [(1, 2), (2, 2), (0, 2)]).read(0) == b""
assert StenciledFile(tmpFile, [(1, 2), (2, 2), (0, 2)]).read(1) == b"2"
assert StenciledFile(tmpFile, [(1, 2), (2, 2), (0, 2)]).read(2) == b"23"
assert StenciledFile(tmpFile, [(1, 2), (2, 2), (0, 2)]).read(3) == b"233"
assert StenciledFile(tmpFile, [(1, 2), (2, 2), (0, 2)]).read(4) == b"2334"
assert StenciledFile(tmpFile, [(1, 2), (2, 2), (0, 2)]).read(5) == b"23341"
assert StenciledFile(tmpFile, [(1, 2), (2, 2), (0, 2)]).read(6) == b"233412"
assert StenciledFile(tmpFile, [(1, 2), (2, 2), (0, 2)]).read(7) == b"233412"
@staticmethod
def test_seek_and_tell():
stenciledFile = StenciledFile(tmpFile, [(1, 2), (2, 2), (0, 2)])
for i in range(7):
assert stenciledFile.tell() == i
stenciledFile.read(1)
for i in reversed(range(6)):
assert stenciledFile.seek(-1, io.SEEK_CUR) == i
assert stenciledFile.tell() == i
assert stenciledFile.seek(0, io.SEEK_END) == 6
assert stenciledFile.tell() == 6
assert stenciledFile.seek(20, io.SEEK_END) == 26
assert stenciledFile.tell() == 26
assert stenciledFile.read(1) == b""
assert stenciledFile.seek(-6, io.SEEK_END) == 0
assert stenciledFile.read(1) == b"2"
@staticmethod
def test_reading_from_shared_file():
stenciledFile1 = StenciledFile(tmpFile, [(0, len(testData))])
stenciledFile2 = StenciledFile(tmpFile, [(0, len(testData))])
for i in range(len(testData)):
assert stenciledFile1.read(1) == testData[i : i + 1]
assert stenciledFile2.read(1) == testData[i : i + 1]
@staticmethod
def test_successive_reads(lock=None):
stenciledFile = StenciledFile(randomTmpFile, [(0, len(randomTestData))], lock)
batchSize = 1024
for i in range(len(randomTestData) // batchSize):
assert stenciledFile.read(batchSize) == randomTestData[i * batchSize : (i + 1) * batchSize]
@staticmethod
def test_multithreaded_reading():
parallelism = 24
with concurrent.futures.ThreadPoolExecutor(24) as pool:
lock = threading.Lock()
results = []
for _ in range(parallelism):
results.append(pool.submit(TestStenciledFile.test_successive_reads, lock))
for result in results:
result.result()
| 4,775 | 1,911 |
import datetime
import json
import os
import requests
import smtplib
import ssl
def check_status(config):
new_state = get_current_state()
last_known_state = get_last_known_state()
activated = get_activated(new_state, last_known_state)
deactivated = get_deactivated(new_state, last_known_state)
save_state(new_state)
if len(activated) == 0 and len(deactivated) == 0:
print("No change in the state, will not send any email.")
return
send_email(config, create_msg(activated, deactivated))
def send_email(config, msg):
context = ssl.create_default_context()
with smtplib.SMTP_SSL(config["host"], config["port"], context=context) as server:
server.login(config["sender_email"], config["password"])
server.sendmail(config["sender_email"], config["recipients"], msg)
server.quit()
print("Email sent.")
def create_msg(activated_sensors, deactivated_sensors):
msg = ["Subject: Sensors have changed state", ""]
if len(deactivated_sensors) > 0:
msg.append(
"I am sorry to inform you that one or more sensors might not be"
" active anymore. I have failed to receive status from:"
)
[msg.append("* " + sensor) for sensor in deactivated_sensors]
msg.append("")
if len(activated_sensors) > 0:
msg.append("Some sensors have been activated again:")
[msg.append("* " + sensor) for sensor in activated_sensors]
msg.append("")
msg.append("This message was generated {}".format(datetime.datetime.utcnow()))
msg.append("")
msg.append("Yours sincerely,")
msg.append("Walter")
return "\r\n".join(msg)
def get_activated(new_state, old_state):
result = []
for sensor, value in new_state.items():
if value is True and sensor in old_state and old_state[sensor] is False:
result.append(sensor)
return result
def get_deactivated(new_state, old_state):
result = []
for sensor, value in new_state.items():
if value is False and sensor in old_state and old_state[sensor] is True:
result.append(sensor)
return result
def get_last_known_state():
state_file_path = os.path.join("scripts", "check_status_state.json")
if not os.path.exists(state_file_path):
return {}
with open(state_file_path, "r") as f:
data = f.read()
if data == "":
return {}
return json.loads(data)
def save_state(state):
state_file_path = os.path.join("scripts", "check_status_state.json")
with open(state_file_path, "w+") as f:
json.dump(state, f, ensure_ascii=False, indent=4)
def get_current_state():
return {sensor["name"]: sensor["is_active"] for sensor in get_sensors()}
def get_sensors():
return requests.get("http://localhost:5000/api/sensors").json()
if __name__ == "__main__":
with open(os.path.join("scripts", "check_status_config.json"), "r") as f:
data = f.read()
config = json.loads(data)
check_status(config)
| 3,029 | 972 |
# Settings Manager
# guidanoli
DEFAULT_STGS = {
"commas": "true",
"comments": "true",
"tknlistpath": "tknlist.tk",
"tokenpath": "token.tk"
}
SETTINGS_PATH = "fibonacci.cfg"
TYPE_STR = type("")
TYPE_LIST = type([])
def _validateFilename( filename , extension = "" ):
from re import match
return match("[^<>:\"\\/|?*]+"+extension,filename).groups() != None
def _validateEdit( label , new_value ):
if label == "commas":
return new_value in ["true","false"]
elif label == "comment":
return new_value in ["true", "false"]
elif label == "tknlistpath":
return _validateFilename(new_value,".tk")
elif label == "tokenpath":
return _validateFilename(new_value,".tk")
def _validateString( s ):
# returns True if OK, False if invalid
assert(type(s)==TYPE_STR)
return not( True in [ (c in ['=','\n']) for c in s ] )
def _writeSettings( settings_list ):
assert(type(settings_list)==TYPE_LIST)
try:
f = open(SETTINGS_PATH,"w")
f.write( "\n".join([ "=".join(s) for s in settings_list ]) )
f.close()
except IOError:
print("Could not write cfg file.")
return False
return True
def _getSettingsList():
# returns settings as
# <dict> "ok":boolean
# if ok == True , TYPE_LIST:list
try:
f = open(SETTINGS_PATH,"r")
l = [ p.strip().split('=') for p in f ]
f.close()
except FileNotFoundError:
print("Could not find cfg file. Creating default cfg file...")
if _generateSettingsFile():
print("The default cfg file was created successfully. Re-run me.")
return None
except IOError:
print("Could not read cfg file.")
return None
return l
def _generateSettingsFile():
# generates cfg file according to default settings
# returns True if successful and False if error occurred on I/O
return _writeSettings([ [k,v] for k,v in DEFAULT_STGS.items() ])
def _validateSettingFormat( s ):
if type(s) != TYPE_LIST:
print("Setting isn't table.")
return False
if len(s) != 2:
print("Setting table size is wrong.")
return False
if True in [ type(x) != TYPE_STR for x in s]:
print("Settings variables aren't string.")
return False
if False in [ _validateString(x) for x in s]:
print("Settings variables are invalid.")
return False
return True
def _getSettingLabel( s ):
assert(_validateSettingFormat(s))
return s[0]
def _getSettingValue( s ):
assert(_validateSettingFormat(s))
return s[1]
def _formatSetting( label, new_value ):
return [label,new_value]
def _getSettingValueFromLabel( settings_list , label ):
assert(type(settings_list)==TYPE_LIST)
assert(type(label)==TYPE_STR)
for s in settings_list:
if _getSettingLabel(s) == label:
return _getSettingValue(s)
return None
def _printSettings( settings_list ):
assert(type(settings_list)==TYPE_LIST)
print("{:<20}{:<20}".format("Label","Value"))
print("-"*40)
for s in settings_list:
if not _validateSettingFormat(s):
return
print("{:<20}{:<20}".format(_getSettingLabel(s),_getSettingValue(s)))
if len(settings_list) == 0:
print("No settings found.")
def _editSetting( settings_list , label , new_value ):
# saves the new value in the cfg file
assert(type(settings_list)==TYPE_LIST)
assert(type(label)==TYPE_STR)
assert(type(new_value)==TYPE_STR)
if len(new_value) == 0 or not _validateString(new_value):
print("\nInvalid string for new value.")
return False
lbl_list = [ _getSettingLabel(s) for s in settings_list ]
if not label in lbl_list:
print("\nUnexpected error occurred. Label not in list.")
return False
if not _validateEdit(label,new_value):
print("\nNew value does not meet label requirementes. Check README.")
return False
idx = lbl_list.index(label)
settings_list[idx] = _formatSetting(label,new_value)
return _writeSettings(settings_list)
def getSetting( label ):
# returns setting value through label
# returns None if error occurrs
assert(type(label)==TYPE_STR)
slist = _getSettingsList()
if slist == None:
return None
return _getSettingValueFromLabel(slist,label)
def launch( cmd ):
assert(type(cmd)==TYPE_STR)
if cmd == 'sd':
#resets settings to default
if _generateSettingsFile():
print("Settings were set to default.")
elif cmd in ['se','sv']:
#print settings list
slist = _getSettingsList()
if slist == None:
print("Could not print settings list.\n")
return
_printSettings(slist)
if cmd == 'se':
print()
lbl = input("Label: ")
curr_value = _getSettingValueFromLabel(slist,lbl)
if curr_value == None:
print("Label not recognized.\n")
return
print("Current value for '"+lbl+"': "+curr_value)
new_value = input("Setting new value: ")
if _editSetting(slist,lbl,new_value):
print("New value set successfully.")
else:
print("Command '"+cmd+"' not recognized.")
print()
| 5,340 | 1,627 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright SquirrelNetwork
from core import decorators
from telegram.utils.helpers import mention_markdown
@decorators.public.init
@decorators.delete.init
def init(update,context):
bot = context.bot
administrators = update.effective_chat.get_administrators()
chat = update.effective_chat.id
string = "Group Staff:\n"
for admin in administrators:
user = admin.user
user_first = user.first_name
string += "👮 {}\n".format(mention_markdown(user.id, user_first, version=2))
bot.send_message(chat,string,parse_mode='MarkdownV2') | 632 | 204 |
from ..utils import Object
class Users(Object):
"""
Represents a list of users
Attributes:
ID (:obj:`str`): ``Users``
Args:
total_count (:obj:`int`):
Approximate total count of users found
user_ids (List of :obj:`int`):
A list of user identifiers
Returns:
Users
Raises:
:class:`telegram.Error`
"""
ID = "users"
def __init__(self, total_count, user_ids, **kwargs):
self.total_count = total_count # int
self.user_ids = user_ids # list of int
@staticmethod
def read(q: dict, *args) -> "Users":
total_count = q.get('total_count')
user_ids = q.get('user_ids')
return Users(total_count, user_ids)
| 762 | 249 |
# -*- coding: utf-8 -*-
#######################################################
'''
input
路徑
圖片數量
MOD值
嵌密率
處理內容
輸入一張圖片的資料,包含:
1.資料夾名稱
2.檔案名稱(圖片),單純用來記錄在xlsx檔案中
3.輸出路徑-xlsx
4.嵌密mod值
5.嵌密率
output
產生輸入圖片的xlsx檔(依序將所有圖片的資料寫入xlsx檔中)
包含執行時間
'''
#######################################################
from skimage import io
from openpyxl import Workbook
import openpyxl
import os
import math
import time
def cal_capacity(in_dir,
num_image,
num_mod,
embed_ratio):
wb = Workbook()
ws = wb.active
ws.append(["無LM","mod="+str(num_mod),str(embed_ratio)+"%","256*256"])
ws.append(["檔名","嵌密量","bpp"])
a=[] #儲存各項平均值
for i in range(2):
a.append(0)
for i in range(num_image):
f_code= open(in_dir+"/output{:08d}".format(i)+"/output{:08d}_code.txt".format(i),'r') #打開location map.txt來計算capacity
words = f_code.read()
num_words = len(words)
num_words*=math.log(num_mod,2) #capacity
bpp=num_words/(256*256) #嵌入率(%)(txt和png相同)
ws.append(["output{:08d}".format(i),
float('%.2f'%round(num_words,2)), #四捨五入到指定小數位
float('%.2f'%round(bpp,2))])
a[0]+=num_words
a[1]+=bpp
if i % 250 == 0 :
print(i)
for i in range(2):
a[i]/=num_image
ws.append(["檔名","嵌密量","bpp"])
ws.append([
"",
float('%.2f'%round(a[0],2)),
float('%.2f'%round(a[1],2)),
])
wb.save(in_dir+"/NLM-mod{:d}_capacity".format(num_mod)+"({:d}%).xlsx".format(embed_ratio)) #寫檔後存檔
#---------------------------------------------------------------------------設定區
in_dir="D:\\108resercher\\====######RESEARCH######====\\GAN-research\\12.8\\無LM嵌密結果\\100%MOD3"
num_image = 5000
num_mod = 3
embed_ratio= 100
#---------------------------------------------------------------------------設定區
tStart = time.time() #計時開始
cal_capacity(in_dir,num_image,num_mod,embed_ratio) #執行程式
tEnd = time.time() #計時結束
wb = openpyxl.load_workbook(in_dir+"/NLM-mod{:d}_capacity".format(num_mod)+"({:d}%).xlsx".format(embed_ratio))
ws = wb['Sheet']
ws.append(["total time",str(round(tEnd-tStart,2))+" s"])
wb.save(in_dir+"/NLM-mod{:d}_capacity".format(num_mod)+"({:d}%).xlsx".format(embed_ratio)) #寫檔後存檔
| 2,937 | 1,156 |
from collections import OrderedDict
my_dict = OrderedDict()
def populate(data: dict):
my_dict.update(data)
return True
if __name__ == "__main__":
print(populate({})) | 183 | 63 |
# -*- coding: utf-8 -*-
# Copyright (c) 2021 Brian Scholer (@briantist)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import pytest
from ansible_collections.community.hashi_vault.tests.unit.compat import mock
from ansible_collections.community.hashi_vault.plugins.module_utils._hashi_vault_common import (
HashiVaultOptionGroupBase,
HashiVaultOptionAdapter,
)
PREREAD_OPTIONS = {
'opt1': 'val1',
'opt2': None,
'opt3': 'val3',
'opt4': None,
# no opt5
'opt6': None,
}
LOW_PREF_DEF = {
'opt1': dict(env=['_ENV_1A'], default='never'),
'opt2': dict(env=['_ENV_2A', '_ENV_2B']),
'opt4': dict(env=['_ENV_4A', '_ENV_4B', '_ENV_4C']),
'opt5': dict(env=['_ENV_5A']),
'opt6': dict(env=['_ENV_6A'], default='mosdefault'),
}
@pytest.fixture
def preread_options():
return PREREAD_OPTIONS.copy()
@pytest.fixture
def adapter(preread_options):
return HashiVaultOptionAdapter.from_dict(preread_options)
@pytest.fixture
def option_group_base(adapter):
return HashiVaultOptionGroupBase(adapter)
@pytest.fixture(params=[
# first dict is used to patch the environment vars
# second dict is used to patch the current options to get them to the expected state
#
# envpatch, expatch
({}, {'opt6': 'mosdefault'}),
({'_ENV_1A': 'alt1a'}, {'opt6': 'mosdefault'}),
({'_ENV_3X': 'noop3x'}, {'opt6': 'mosdefault'}),
({'_ENV_2B': 'alt2b'}, {'opt2': 'alt2b', 'opt6': 'mosdefault'}),
({'_ENV_2A': 'alt2a', '_ENV_2B': 'alt2b'}, {'opt2': 'alt2a', 'opt6': 'mosdefault'}),
({'_ENV_4B': 'alt4b', '_ENV_6A': 'defnot', '_ENV_4C': 'alt4c'}, {'opt4': 'alt4b', 'opt6': 'defnot'}),
({'_ENV_1A': 'alt1a', '_ENV_4A': 'alt4a', '_ENV_1B': 'noop1b', '_ENV_4C': 'alt4c'}, {'opt4': 'alt4a', 'opt6': 'mosdefault'}),
({'_ENV_5A': 'noop5a', '_ENV_4C': 'alt4c', '_ENV_2A': 'alt2a'}, {'opt2': 'alt2a', 'opt4': 'alt4c', 'opt6': 'mosdefault'}),
])
def with_env(request, preread_options):
envpatch, expatch = request.param
expected = preread_options.copy()
expected.update(expatch)
with mock.patch.dict(os.environ, envpatch):
yield expected
class TestHashiVaultOptionGroupBase(object):
def test_process_late_binding_env_vars(self, option_group_base, with_env, preread_options):
option_group_base.process_late_binding_env_vars(LOW_PREF_DEF)
assert preread_options == with_env, "Expected: %r\nGot: %r" % (with_env, preread_options)
| 2,592 | 1,038 |
def crescente_e_decrescente():
while True:
n1, n2 = entrada()
if n1 > n2:
print('Decrescente')
elif n1 < n2:
print('Crescente')
else:
break
def entrada():
numeros = list(map(int, input().split(' ')))
numero1 = numeros[0]
numero2 = numeros[1]
return numero1, numero2
crescente_e_decrescente()
| 384 | 134 |
from kivy.lang import Builder
import array
import scipy
import os
import syft as sy
import tensorflow as tf
import numpy
import time
import scipy
import sys
from dataset import get_dataset
from cluster import get_cluster
from PIL import Image
import leargist
from skimage import transform
from imageio import imsave
from kivy.app import App
from kivy.uix.screenmanager import ScreenManager, Screen
from kivy.core.window import Window
from kivy.uix.label import Label
# from kivy.uix.label import Label
from kivy.uix.screenmanager import CardTransition
THRESHOLD = 0
MAX_PATH_SIZE = 22
def read_file(filepath):
with open(filepath, 'rb') as f:
ln = os.path.getsize(filepath)
width = 256
rem = ln % width
a = array.array("B")
a.fromfile(f, ln-rem)
g = numpy.reshape(a, (int(len(a) / width), width))
g = numpy.uint8(g)
print(g)
imsave('/tmp/tmp.png', g)
pilimg = Image.open('/tmp/tmp.png')
img_resized = pilimg.resize((64, 64))
desc = leargist.color_gist(img_resized)
data = desc[0:1024]
data = numpy.resize(data, 1024)
data = data.reshape(32, 32, 1)
return data
def run(filepath):
hook = sy.KerasHook(tf.keras)
client = sy.TFEWorker()
cluster = get_cluster()
client.connect_to_model((1, 32, 32, 1), ((1, 25)), cluster)
_, test_X, _, test_Y = get_dataset()
# time.sleep(5)
data = read_file(filepath)
result = client.query_model(numpy.array([data]))
result = numpy.mean(result)
print("result:", result)
return result > THRESHOLD
class MainScreen(Screen):
pass
class SubScreen(Screen):
def __init__(self, title, img, **kwargs):
self.img = img
self.title = title
super(SubScreen, self).__init__(**kwargs)
class AntivirusApp(App):
def build(self):
self.main = MainScreen(name='main')
self.sm = ScreenManager()
self.sm.switch_to(self.main)
# self.sm.add_widget()
# self.sm.add_widget()
Window.bind(on_dropfile=self._on_file_drop)
return self.sm
def _on_file_drop(self, window, file_path):
result = run(file_path)
file_path = file_path.decode()
if len(file_path) > MAX_PATH_SIZE:
file_path = file_path[:MAX_PATH_SIZE] + "..."
if result:
title = f"Danger! \"{file_path}\" is malware :("
img = "malware"
else:
title = f"Safe! \"{file_path}\" is not malware :)"
img = "doc2"
self.sub = SubScreen(title, f"assets/img/{img}.png", name='sub')
self.sm.switch_to(self.sub)
# Builder.load_file('assets/main.kv')
AntivirusApp().run()
| 2,695 | 969 |
from network.network import Network
import tensorflow as tf
import numpy as np
class VGG16(Network):
def __init__(self, input_shape, class_number, x, y, train=False, learning_rate=0.001):
super().__init__()
self.loss = None
self.accuracy = None
self._build_network(input_shape, class_number, train, learning_rate, x, y)
def _build_network(self, network_input_shape, class_number, train, starter_learning_rate, x, y):
self.x = x
if train:
self.keep_prob = 0.5
self.y_ = y
self.y = tf.one_hot(self.y_, class_number, 1.0, 0.0)
self.conv1_1 = self.conv_layer('conv1_1', layer_input=self.x, shape=[3, 3, self.x.get_shape()[3].value,
64])
self.conv1_2 = self.conv_layer('conv1_2', layer_input=self.conv1_1, shape=[3, 3, 64, 64])
self.max_pool1 = self.max_pool(self.conv1_2)
self.conv2_1 = self.conv_layer('conv2_1', layer_input=self.max_pool1, shape=[3, 3, 64, 128])
self.conv2_2 = self.conv_layer('conv2_2', layer_input=self.conv2_1, shape=[3, 3, 128, 128])
self.max_pool2 = self.max_pool(self.conv2_2)
self.conv3_1 = self.conv_layer('conv3_1', layer_input=self.max_pool2, shape=[3, 3, 128, 256])
self.conv3_2 = self.conv_layer('conv3_2', layer_input=self.conv3_1, shape=[3, 3, 256, 256])
self.conv3_3 = self.conv_layer('conv3_3', layer_input=self.conv3_2, shape=[3, 3, 256, 256])
self.max_pool3 = self.max_pool(self.conv3_3)
self.conv4_1 = self.conv_layer('conv4_1', layer_input=self.max_pool3, shape=[3, 3, 256, 512])
self.conv4_2 = self.conv_layer('conv4_2', layer_input=self.conv4_1, shape=[3, 3, 512, 512])
self.conv4_3 = self.conv_layer('conv4_3', layer_input=self.conv4_2, shape=[3, 3, 512, 512])
self.max_pool4 = self.max_pool(self.conv4_3)
self.conv5_1 = self.conv_layer('conv5_1', layer_input=self.max_pool4, shape=[3, 3, 512, 512])
self.conv5_2 = self.conv_layer('conv5_2', layer_input=self.conv5_1, shape=[3, 3, 512, 512])
self.conv5_3 = self.conv_layer('conv5_3', layer_input=self.conv5_2, shape=[3, 3, 512, 512])
self.max_pool5 = self.max_pool(self.conv5_3)
self.flat_max_pool5 = tf.reshape(self.max_pool5, shape=[-1, 7*7*512])
self.fc6 = self.fully_connected('fc6', self.flat_max_pool5, 4096)
self.fc6 = tf.nn.relu(self.fc6)
self.fc6 = tf.nn.dropout(self.fc6, keep_prob=self.keep_prob)
self.fc7 = self.fully_connected('fc7', self.fc6, 4096)
self.fc7 = tf.nn.relu(self.fc7)
self.fc7 = tf.nn.dropout(self.fc7, keep_prob=self.keep_prob)
self.fc8 = self.fully_connected('fc8', self.fc7, class_number)
if train:
self.global_step = tf.Variable(0, trainable=False)
learning_rate = tf.train.exponential_decay(starter_learning_rate, self.global_step,
decay_steps=100000, decay_rate=0.1, staircase=True)
self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.y, logits=self.fc8))
self.train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(self.loss)
correct_prediction = tf.equal(tf.argmax(self.fc8,1), tf.argmax(self.y,1))
self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
def train(self, session):
if self.loss is None:
raise RuntimeError('Training a testing network!!')
_, loss_value, accuracy_value = session.run([self.train_step, self.loss, self.accuracy])
print('Loss {:.2f} Accuracy {:.2f}'.format(loss_value, accuracy_value))
def test(self, session, batch, labels):
if self.accuracy is None:
raise RuntimeError('Cannot compute accuracy!!')
accuracy = np.mean([session.run(self.accuracy, feed_dict={self.x: [batch[i]],
self.y_: [labels[i]],
self.keep_prob: 1.0})
for i in range(len(batch))])
print('Accuracy: {:.2f}'.format(accuracy))
def _restore_state(self, session):
self.conv1_1 = self._restore_conv(session, 'conv1_1', layer_input=self.x)
self.conv1_2 = self._restore_conv(session, 'conv1_2', layer_input=self.conv1_1)
self.conv2_1 = self._restore_conv(session, 'conv2_1', layer_input=self.max_pool1)
self.conv2_2 = self._restore_conv(session, 'conv2_2', layer_input=self.conv2_1)
self.conv3_1 = self._restore_conv(session, 'conv3_1', layer_input=self.max_pool2)
self.conv3_2 = self._restore_conv(session, 'conv3_2', layer_input=self.conv3_1)
self.conv3_3 = self._restore_conv(session, 'conv3_3', layer_input=self.conv3_2)
self.conv4_1 = self._restore_conv(session, 'conv4_1', layer_input=self.max_pool3)
self.conv4_2 = self._restore_conv(session, 'conv4_2', layer_input=self.conv4_1)
self.conv4_3 = self._restore_conv(session, 'conv4_3', layer_input=self.conv4_2)
self.conv5_1 = self._restore_conv(session, 'conv5_1', layer_input=self.max_pool4)
self.conv5_2 = self._restore_conv(session, 'conv5_2', layer_input=self.conv5_1)
self.conv5_3 = self._restore_conv(session, 'conv5_3', layer_input=self.conv5_2)
self.fc6 = self._restore_fully_connected(session, 'fc6', self.flat_max_pool5)
self.fc6 = tf.nn.relu(self.fc6)
self.fc6 = tf.nn.dropout(self.fc6, keep_prob=self.keep_prob)
self.fc7 = self._restore_fully_connected(session,'fc7', self.fc6)
self.fc7 = tf.nn.relu(self.fc7)
self.fc7 = tf.nn.dropout(self.fc7, keep_prob=self.keep_prob)
self.fc8 = self._restore_fully_connected(session,'fc8', self.fc7)
| 5,910 | 2,270 |
from google.protobuf import descriptor
from google.protobuf import message
from google.protobuf import reflection
from google.protobuf import descriptor_pb2
DESCRIPTOR = descriptor.FileDescriptor(name='ResourceKey.proto', package='EA.Sims4.Network', serialized_pb='\n\x11ResourceKey.proto\x12\x10EA.Sims4.Network"<\n\x0bResourceKey\x12\x0c\n\x04type\x18\x01 \x02(\r\x12\r\n\x05group\x18\x02 \x02(\r\x12\x10\n\x08instance\x18\x03 \x02(\x04"G\n\x0fResourceKeyList\x124\n\rresource_keys\x18\x01 \x03(\x0b2\x1d.EA.Sims4.Network.ResourceKeyB\x0eB\x0cResourceKeys')
_RESOURCEKEY = descriptor.Descriptor(name='ResourceKey', full_name='EA.Sims4.Network.ResourceKey', filename=None, file=DESCRIPTOR, containing_type=None, fields=[descriptor.FieldDescriptor(name='type', full_name='EA.Sims4.Network.ResourceKey.type', index=0, number=1, type=13, cpp_type=3, label=2, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='group', full_name='EA.Sims4.Network.ResourceKey.group', index=1, number=2, type=13, cpp_type=3, label=2, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='instance', full_name='EA.Sims4.Network.ResourceKey.instance', index=2, number=3, type=4, cpp_type=4, label=2, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None)], extensions=[], nested_types=[], enum_types=[], options=None, is_extendable=False, extension_ranges=[], serialized_start=39, serialized_end=99)
_RESOURCEKEYLIST = descriptor.Descriptor(name='ResourceKeyList', full_name='EA.Sims4.Network.ResourceKeyList', filename=None, file=DESCRIPTOR, containing_type=None, fields=[descriptor.FieldDescriptor(name='resource_keys', full_name='EA.Sims4.Network.ResourceKeyList.resource_keys', index=0, number=1, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None)], extensions=[], nested_types=[], enum_types=[], options=None, is_extendable=False, extension_ranges=[], serialized_start=101, serialized_end=172)
_RESOURCEKEYLIST.fields_by_name['resource_keys'].message_type = _RESOURCEKEY
DESCRIPTOR.message_types_by_name['ResourceKey'] = _RESOURCEKEY
DESCRIPTOR.message_types_by_name['ResourceKeyList'] = _RESOURCEKEYLIST
class ResourceKey(message.Message, metaclass=reflection.GeneratedProtocolMessageType):
DESCRIPTOR = _RESOURCEKEY
class ResourceKeyList(message.Message, metaclass=reflection.GeneratedProtocolMessageType):
DESCRIPTOR = _RESOURCEKEYLIST
| 2,840 | 1,014 |
import math
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Polygon
# from matplotlib.ticker import MaxNLocator
# Rafael Redondo (c) Eurecat 2020
colors = np.array([
[[247, 197, 188],[242, 151, 136],[242, 120, 99],[237, 85, 59]],
[[255, 242, 196], [247, 232, 176], [250, 225, 135], [246, 213, 93]],
[[180, 224, 220],[123, 209, 201],[83, 194, 183],[60, 174, 163]],
[[159, 189, 214],[118, 160, 194],[72, 125, 168],[32, 99, 155]]
]) / 255.0
class_colors = np.array([
[[184, 184, 184],[125, 125, 125],[71, 71, 71], [000, 000, 000]],
[[196, 255, 196],[178, 255, 178],[126, 255, 126],[000, 255, 000]],
[[252, 189, 189],[255, 133, 133],[255, 77, 77], [255, 000, 000]],
[[207, 255, 255],[176, 255, 245],[144, 255, 255],[000, 255, 255]],
[[212, 212, 255],[149, 149, 255],[94, 94, 255],[000, 000, 255]],
[[255, 209, 255],[255, 156, 255],[255, 101, 255],[255, 000, 255]]
]) / 255.0
def draw(row, axis, data, method, labels, colors, xlim, hsep, radius):
augment = data.shape[0]
labels = list(labels)
num_elements = len(labels)
# labels.insert(0,'')
titles = ['Sunglasses Augmentation', 'Hands Augmentation', 'Sunglasses+Hands Aug.']
for c in range(len(axis[row])):
axis[row,c].set_aspect(1)
axis[row,c].set_xlim(-1, xlim)
# axis[row,c].set_ylim(-0.4, num_elements*0.5-0.1)
axis[row, c].set_ylim(-0.4, num_elements * hsep - 0.1)
axis[row, c].set_yticklabels(labels)
axis[row, c].set_yticks(np.arange(num_elements) * hsep)
# axis[row, c].yaxis.set_major_locator(MaxNLocator(integer=True))
if c == 0:
axis[row, c].set_ylabel(method, fontsize=fontsizeLarge)
else:
axis[row, c].get_yaxis().set_visible(False)
if row == 0:
axis[row, c].set_title(titles[c], fontsize=fontsizeMedium)
if row == axis.shape[0] - 1:
axis[row, c].set_xlabel('Gain', fontsize=fontsizeMedium)
else:
axis[row, c].get_xaxis().set_visible(False)
axis[row, c].add_patch(Polygon([[-1, -1], [0, -1], [0, 10], [-1, 10]], closed=True, fill=True, facecolor=[0.92,0.9,0.9]))
for m in range(num_elements):
for s in reversed(range(augment)):
sigma = s / float(augment-1)
r = math.sqrt(1 + sigma) * radius
t = m + c * num_elements
circle = plt.Circle((data[s,t] - data[0,t], m * hsep), r, color=colors[m,augment-s-1], edgecolor=None)
axis[row,c].add_artist(circle)
# ----------------------------------------------------------------------------------------------
fcn = np.array([
[94.86221, 89.94708, 78.54365, 90.62491, 94.86221, 89.94708, 78.54365, 90.62491, 94.86221, 89.94708, 78.54365, 90.62491],
[94.87768, 89.34935, 78.5738, 90.65072, 94.91543, 90.04007, 78.89132, 90.71592, 94.87198, 90.01351, 79.12107, 90.64796],
[94.82212, 89.07311, 78.57936, 90.55048, 95.01015, 89.30039, 79.2808, 90.85555, 94.92132, 89.81723, 79.51949, 90.71459],
[94.91106, 88.96342, 79.1459, 90.67938, 94.94046, 89.36422, 79.07776, 90.75119, 94.9023, 90.05563, 79.41762, 90.69509]
])
fcn_classes = np.array([
[94.75722, 86.38252, 71.85863, 61.34205, 72.44731, 84.47418, 94.75722, 86.38252, 71.85863, 61.34205, 72.44731, 84.47418, 94.75722, 86.38252, 71.85863, 61.34205, 72.44731, 84.47418],
[94.74529, 86.52661, 71.92953, 60.08587, 73.81507, 84.34044, 94.78213, 86.77009, 71.78261, 61.24385, 74.51008, 84.25919, 94.74461, 86.72296, 71.41357, 63.01671, 74.86192, 83.96667],
[94.70561, 86.4855, 71.23186, 60.26997, 74.76319, 84.02004, 94.91729, 86.89263, 72.04419, 62.55301, 75.33231, 83.94535, 94.789, 86.70316, 71.52878, 63.19029, 75.79578, 85.10994],
[94.74795, 86.68139, 71.68878, 62.33461, 74.78171, 84.64096, 94.79637, 86.70211, 71.9773, 61.76077, 73.98104, 85.249, 94.72738, 86.72993, 71.767, 62.76649, 75.47534, 85.03957]
])
deeplab = np.array([
[94.6848, 89.71417, 77.94909, 90.37054, 94.6848, 89.71417, 77.94909, 90.37054, 94.6848, 89.71417, 77.94909, 90.37054],
[94.78537, 89.59541, 78.56921, 90.51187, 94.86725, 89.7494, 78.62243, 90.63049, 94.81017, 89.91979, 78.41131, 90.55676],
[94.82899, 90.35099, 79.05202, 90.57593, 94.86047, 90.18027, 78.72145, 90.63608, 94.90303, 90.12334, 79.14438, 90.70572],
[94.89329, 90.06537, 79.38813, 90.67735, 94.9435, 90.07861, 78.87746, 90.75484, 94.89794, 90.37945, 79.37854, 90.70328]
])
deeplab_classes = np.array([
[94.51156, 86.31067, 71.33108, 60.57315, 71.34837, 83.61973, 94.51156, 86.31067, 71.33108, 60.57315, 71.34837, 83.61973, 94.51156, 86.31067, 71.33108, 60.57315, 71.34837, 83.61973],
[94.63511, 86.41761, 71.65853, 61.14348, 74.20494, 83.35558, 94.77132, 86.44033, 71.84831, 62.07319, 72.95129, 83.65015, 94.66345, 86.50839, 71.66187, 59.54201, 74.23029, 83.86187],
[94.66677, 86.3675, 71.90078, 61.5994, 75.30071, 84.47695, 94.73285, 86.59962, 71.78432, 62.6076, 72.75062, 83.8537, 94.75528, 86.66354, 71.85329, 61.44883, 74.97043, 85.17492],
[94.75281, 86.6263, 71.72533, 63.26911, 75.43251, 84.52274, 94.81422, 86.6394, 72.28983, 61.98376, 73.2231, 84.31447, 94.72211, 86.83926, 71.83235, 63.31498, 75.25782, 84.30474]
])
fontsizeSmall = 12
fontsizeMedium = 16
fontsizeLarge = 18
font = {'family':'normal', 'weight':'normal', 'size': fontsizeSmall}
plt.rc('font', **font)
metrics_fig, metrics_axis = plt.subplots(2, 3, sharey=True, sharex=True)
draw(0, metrics_axis, fcn, 'FCN', ('Pixel Acc.', 'Mean Acc.', 'Mean IU', 'Freq.W. IU'), colors, 1.8, 0.4, 0.1)
draw(1, metrics_axis, deeplab, 'DeepLabV3', ('Pixel Acc.', 'Mean Acc.', 'Mean IU', 'Freq.W. IU'), colors, 1.8, 0.4, 0.1)
# class_fig, class_axis = plt.subplots(2, 3, sharey=True)
# draw(0, class_axis, fcn_classes, 'FCN', ('Bkgnd', 'Skin', 'Hair', 'Beard', 'Snglss', 'Wear'), class_colors, 4.5, 0.5, 0.15)
# draw(1, class_axis, deeplab_classes,'DeepLabV3',('Bkgnd', 'Skin', 'Hair', 'Beard', 'Snglss', 'Wear'), class_colors, 4.5, 0.5, 0.15)
plt.show() | 5,787 | 4,217 |
import yaml
import os
import sys
import yaml
import numpy as np
from tqdm import tqdm
import matplotlib.pyplot as plt
from utils import load_poses, load_calib, load_files, load_vertex
from preprocessing.utils import *
from example.laserscan import *
from PC_cluster.ScanLineRun_cluster.build import ScanLineRun_Cluster
# data_path = '../data/sequences/08/velodyne/000030.bin'
# label_path = '../data/sequences/08/labels/000030.label'
CFG = yaml.safe_load(open('../config/semantic-kitti-mos.yaml', 'r'))
config_filename = '../config/mask_preparing.yaml'
if len(sys.argv) > 1:
config_filename = sys.argv[1]
if yaml.__version__ >= '5.1':
config = yaml.load(open(config_filename), Loader=yaml.FullLoader)
else:
config = yaml.load(open(config_filename))
# ground truth info
color_dict = CFG["color_map"]
label_transfer_dict = CFG["learning_map"]
nclasses = len(color_dict)
# mask config
data_folder = config['data_folder']
debug = config['debug']
visualize = config['visualize']
range_image_params = config['range_image']
sequences = config['sequences']
sem_scan = LaserScan(project=True,
flip_sign=False,
H=range_image_params['height'],
W=range_image_params['width'],
fov_up=range_image_params['fov_up'],
fov_down=range_image_params['fov_down'])
cluster=ScanLineRun_Cluster.ScanLineRun_Cluster(0.5, 1)
# create mask folder
for sequence in sequences:
sequence_folder = os.path.join(data_folder, sequence)
visualization_folder = config['visualization_folder']
scan_folder = config['scan_folder']
label_folder = config['label_folder']
mask_image_folder = config['mask_image_folder']
visualization_folder = os.path.join(sequence_folder, visualization_folder)
scan_folder = os.path.join(sequence_folder, scan_folder)
label_folder = os.path.join(sequence_folder, label_folder)
mask_image_folder = os.path.join(sequence_folder, mask_image_folder)
# if not os.path.exists(mask_image_folder):
# os.makedirs(mask_image_folder)
#
# # create mask image visualization folder
# if visualize:
# if not os.path.exists(visualization_folder):
# os.makedirs(visualization_folder)
# load labels
scan_paths = load_files(scan_folder)
# label_paths = load_files(label_folder)
# create scan object
# index_range = list(range(0,len(scan_paths)))
print('Clustering:', sequence, 'Frames: ', str(len(scan_paths)))
for frame_idx in tqdm(range(len(scan_paths))):
cluster_file_name = os.path.join(mask_image_folder, str(frame_idx).zfill(6))
sem_scan.open_scan(scan_paths[frame_idx])
# x_img = sem_scan.proj_xyz[:,:,0]*sem_scan.proj_mask
# y_img = sem_scan.proj_xyz[:,:,0]*sem_scan.proj_mask
# z_img = sem_scan.proj_xyz[:,:,0]*sem_scan.proj_mask
instance_label = cluster.ScanLineRun_cluster(sem_scan.proj_xyz[:,:,0],
sem_scan.proj_xyz[:,:,1],
sem_scan.proj_xyz[:,:,2],
sem_scan.proj_mask,
range_image_params['height'],
range_image_params['width']
)
instance_label = np.array(instance_label)
# ground removal
# clustering
# if visualize:
# fig = plt.figure(frameon=False, figsize=(16, 10))
# fig.set_size_inches(20.48, 0.64)
# ax = plt.Axes(fig, [0., 0., 1., 1.])
# ax.set_axis_off()
# fig.add_axes(ax)
# img = label_new.copy()
# img[img<2]=0
# ax.imshow(img, vmin=0, vmax=1)
# image_name = os.path.join(visualization_folder, str(frame_idx).zfill(6))
# plt.savefig(image_name)
# plt.close()
#
# # save to npy file
# label_new_one_hot = depth_onehot(matrix=label_new, category=[0, 1, 2], on_value=1, off_value=0, channel_first=True)
#
# np.save(mask_file_name, [label_new, label_new_one_hot, sem_scan.proj_idx])
| 4,299 | 1,435 |
class Solution:
def nextGreatestLetter(self, letters, target):
"""
:type letters: List[str]
:type target: str
:rtype: str
"""
for l in letters:
if l > target:
return l
return letters[0]
if __name__ == '__main__':
solution = Solution()
print(solution.nextGreatestLetter(["c", "f", "j"], "a"))
print(solution.nextGreatestLetter(["c", "f", "j"], "c"))
print(solution.nextGreatestLetter(["c", "f", "j"], "k"))
else:
pass
| 529 | 182 |
from math import sqrt
def factorial(n):
"""Computes factorial of n."""
if n == 0:
return 1
else:
recurse = factorial(n-1)
result = n * recurse
return result
def estimate_pi():
factor = (sqrt(2) * 2) / 9801
k = 0
total = 0
while True:
num = factorial(4*k) * (1103 + 26390*k)
den = (factorial(k)**4) * (396**(4*k))
term = factor * num / den
total += term
if abs(term) < 1e-15:
break
k = k + 1
return 1 / total
pi = estimate_pi()
print pi
| 592 | 247 |
# WARNING: Please don't edit this file. It was generated by Python/WinRT v1.0.0-beta.4
import enum
import winsdk
_ns_module = winsdk._import_ns_module("Windows.Graphics.Printing.PrintTicket")
try:
import winsdk.windows.data.xml.dom
except Exception:
pass
try:
import winsdk.windows.foundation
except Exception:
pass
try:
import winsdk.windows.foundation.collections
except Exception:
pass
class PrintTicketFeatureSelectionType(enum.IntEnum):
PICK_ONE = 0
PICK_MANY = 1
class PrintTicketParameterDataType(enum.IntEnum):
INTEGER = 0
NUMERIC_STRING = 1
STRING = 2
class PrintTicketValueType(enum.IntEnum):
INTEGER = 0
STRING = 1
UNKNOWN = 2
PrintTicketCapabilities = _ns_module.PrintTicketCapabilities
PrintTicketFeature = _ns_module.PrintTicketFeature
PrintTicketOption = _ns_module.PrintTicketOption
PrintTicketParameterDefinition = _ns_module.PrintTicketParameterDefinition
PrintTicketParameterInitializer = _ns_module.PrintTicketParameterInitializer
PrintTicketValue = _ns_module.PrintTicketValue
WorkflowPrintTicket = _ns_module.WorkflowPrintTicket
WorkflowPrintTicketValidationResult = _ns_module.WorkflowPrintTicketValidationResult
| 1,202 | 387 |
import threading
import sys
class ReturnValueThread(threading.Thread):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.result = None
def run(self):
if self._target is None:
return # could alternatively raise an exception, depends on the use case
try:
self.result = self._target(*self._args, **self._kwargs)
except Exception as exc:
print(f'{type(exc).__name__}: {exc}', file=sys.stderr) # properly handle the exception
def join(self, *args, **kwargs):
super().join(*args, **kwargs)
return self.result | 638 | 183 |
class Other:
def __init__(self, name21):
self.name21 = name21
def get_name21(self):
return self.name21
@staticmethod
def decode(data):
f_name21 = data["name21"]
if not isinstance(f_name21, unicode):
raise Exception("not a string")
return Other(f_name21)
def encode(self):
data = dict()
if self.name21 is None:
raise Exception("name21: is a required field")
data["name21"] = self.name21
return data
def __repr__(self):
return "<Other name21:{!r}>".format(self.name21)
| 540 | 203 |
import asyncio
class EchoClient(asyncio.Protocol):
def __init__(self, message, loop):
self.mesage = message
self.loop = loop
def connection_made(self, transport):
transport.write(self.mesage.encode("utf-8"))
self.transport = transport
def data_received(self, data):
print(data.decode("utf-8"))
self.transport.close()
def connection_lost(self, exc):
self.loop.stop()
loop = asyncio.get_event_loop()
coro = loop.create_connection(lambda: EchoClient("こんにちわ", loop), "127.0.0.1", 8192)
loop.run_until_complete(coro)
loop.run_forever()
loop.close()
| 623 | 217 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
class ResultMeta(object):
def __init__(self, meta):
self._meta = meta
@property
def width(self):
return int(self._meta['Image-Width'])
@property
def height(self):
return int(self._meta['Image-Height'])
@property
def location(self):
return self._meta['Location']
def __len__(self):
return self.size
| 482 | 146 |
# -*- coding: UTF-8 -*-
from spider import *
def main_view():
print '-------------------------------------------------------'
print 'l.列出项目\ts.查找项目\tc.更新项目\tq.退出'
print '|---0.顺序 \t|---0.关键字'
print '|---1.收藏↑\t|---1.收藏>='
print '|---2.收藏↓\t|---2.收藏<'
print '|---3.评论↑\t|---3.评论>='
print '|---4.评论↓\t|---4.评论<'
print '|---5.评分↑\t|---5.评分>='
print '|---6.评分↓\t|---6.评分<'
print '-------------------------------------------------------'
app = OSCPspider()
while True:
main_view()
op = raw_input("请输入:")
if not op:
continue
elif op[0] == 'l':
flag = app.list_p(op)
elif op[0] == 's':
flag = app.search_p(op)
elif op == 'c':
flag = app.catch_p()
elif op == 'q':
exit()
else:
flag = 'no_op'
if flag == 'no_op':
print "没有该操作!" | 855 | 403 |
import pandas as pd
from matplotlib import pyplot as plt
import numpy as np
import math
from matplotlib import pyplot as plt
from sklearn.preprocessing import LabelEncoder
feature_dict = {i:label for i,label in zip(
range(4),
('sepal length in cm',
'sepal width in cm',
'petal length in cm',
'petal width in cm', ))}
df = pd.io.parsers.read_csv(
filepath_or_buffer='https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data',
header=None,
sep=','
)
print (feature_dict.items())
df.columns = [l for i,l in sorted(feature_dict.items())] + ['class label']
df.dropna(how="all", inplace=True) # to drop the empty line at file-end
df.tail()
X = df[['sepal length in cm','sepal width in cm', 'petal length in cm', 'petal width in cm']].values
y = df['class label'].values
enc = LabelEncoder()
label_encoder = enc.fit(y)
y = label_encoder.transform(y) + 1
label_dict = {1: 'Setosa', 2: 'Versicolor', 3:'Virginica'}
fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(12,6))
for ax,cnt in zip(axes.ravel(), range(4)):
# set bin sizes
min_b = math.floor(np.min(X[:,cnt]))
max_b = math.ceil(np.max(X[:,cnt]))
bins = np.linspace(min_b, max_b, 25)
# plottling the histograms
for lab,col in zip(range(1,4), ('blue', 'red', 'green')):
ax.hist(X[y==lab, cnt],
color=col,
label='class %s' %label_dict[lab],
bins=bins,
alpha=0.5,)
ylims = ax.get_ylim()
# plot annotation
leg = ax.legend(loc='upper right', fancybox=True, fontsize=8)
leg.get_frame().set_alpha(0.5)
ax.set_ylim([0, max(ylims)+2])
ax.set_xlabel(feature_dict[cnt])
ax.set_title('Iris histogram #%s' %str(cnt+1))
# hide axis ticks
ax.tick_params(axis="both", which="both", bottom="off", top="off",
labelbottom="on", left="off", right="off", labelleft="on")
# remove axis spines
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.spines["left"].set_visible(False)
axes[0][0].set_ylabel('count')
axes[1][0].set_ylabel('count')
fig.tight_layout()
plt.show()
np.set_printoptions(precision=4)
mean_vectors = []
for cl in range(1,4):
mean_vectors.append(np.mean(X[y==cl], axis=0))
print('Mean Vector class %s: %s\n' %(cl, mean_vectors[cl-1]))
S_W = np.zeros((4,4))
for cl,mv in zip(range(1,4), mean_vectors):
class_sc_mat = np.zeros((4,4)) # scatter matrix for every class
for row in X[y == cl]:
row, mv = row.reshape(4,1), mv.reshape(4,1) # make column vectors
class_sc_mat += (row-mv).dot((row-mv).T)
S_W += class_sc_mat # sum class scatter matrices
print('within-class Scatter Matrix:\n', S_W)
overall_mean = np.mean(X, axis=0)
S_B = np.zeros((4,4))
for i,mean_vec in enumerate(mean_vectors):
n = X[y==i+1,:].shape[0]
mean_vec = mean_vec.reshape(4,1) # make column vector
overall_mean = overall_mean.reshape(4,1) # make column vector
S_B += n * (mean_vec - overall_mean).dot((mean_vec - overall_mean).T)
print('between-class Scatter Matrix:\n', S_B)
eig_vals, eig_vecs = np.linalg.eig(np.linalg.inv(S_W).dot(S_B))
for i in range(len(eig_vals)):
eigvec_sc = eig_vecs[:,i].reshape(4,1)
print('\nEigenvector {}: \n{}'.format(i+1, eigvec_sc.real))
print('Eigenvalue {:}: {:.2e}'.format(i+1, eig_vals[i].real))
for i in range(len(eig_vals)):
eigv = eig_vecs[:,i].reshape(4,1)
np.testing.assert_array_almost_equal(np.linalg.inv(S_W).dot(S_B).dot(eigv),
eig_vals[i] * eigv,
decimal=6, err_msg='', verbose=True)
print('ok')
# Make a list of (eigenvalue, eigenvector) tuples
eig_pairs = [(np.abs(eig_vals[i]), eig_vecs[:,i]) for i in range(len(eig_vals))]
# Sort the (eigenvalue, eigenvector) tuples from high to low
eig_pairs = sorted(eig_pairs, key=lambda k: k[0], reverse=True)
# Visually confirm that the list is correctly sorted by decreasing eigenvalues
print('Eigenvalues in decreasing order:\n')
for i in eig_pairs:
print(i[0])
print('Variance explained:\n')
eigv_sum = sum(eig_vals)
for i,j in enumerate(eig_pairs):
print('eigenvalue {0:}: {1:.2%}'.format(i+1, (j[0]/eigv_sum).real))
W = np.hstack((eig_pairs[0][1].reshape(4,1), eig_pairs[1][1].reshape(4,1)))
print('Matrix W:\n', W.real)
X_lda = X.dot(W)
assert X_lda.shape == (150,2), "The matrix is not 150x2 dimensional."
def plot_step_lda():
ax = plt.subplot(111)
for label,marker,color in zip(
range(1,4),('^', 's', 'o'),('blue', 'red', 'green')):
plt.scatter(x=X_lda[:,0].real[y == label],
y=X_lda[:,1].real[y == label],
marker=marker,
color=color,
alpha=0.5,
label=label_dict[label]
)
plt.xlabel('LD1')
plt.ylabel('LD2')
leg = plt.legend(loc='upper right', fancybox=True)
leg.get_frame().set_alpha(0.5)
plt.title('LDA: Iris projection onto the first 2 linear discriminants')
# hide axis ticks
plt.tick_params(axis="both", which="both", bottom="off", top="off",
labelbottom="on", left="off", right="off", labelleft="on")
# remove axis spines
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.spines["left"].set_visible(False)
plt.grid()
plt.tight_layout
plt.show()
plot_step_lda()
| 5,653 | 2,175 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.22 on 2019-07-09 23:15
from __future__ import unicode_literals
from django.db import migrations, models
import js_color_picker.fields
class Migration(migrations.Migration):
dependencies = [
('js_services', '0012_auto_20190430_0804'),
]
operations = [
migrations.AddField(
model_name='service',
name='nofollow',
field=models.BooleanField(default=False, verbose_name='nofollow'),
),
migrations.AddField(
model_name='service',
name='noindex',
field=models.BooleanField(default=False, verbose_name='noindex'),
),
migrations.AddField(
model_name='service',
name='show_on_sitemap',
field=models.BooleanField(default=True, verbose_name='Show on sitemap'),
),
migrations.AddField(
model_name='service',
name='show_on_xml_sitemap',
field=models.BooleanField(default=True, verbose_name='Show on xml sitemap'),
),
migrations.AlterField(
model_name='relatedservicesplugin',
name='background_color',
field=js_color_picker.fields.RGBColorField(blank=True, colors={'#2B3B46': 'dark', '#323B50': 'grey', '#919FA6': 'cool grey', '#CBCBCB': 'medium-grey', '#FBDB4C': 'maize', '#f5f5f5': 'light-grey', '#ffffff': 'white'}, mode='choice', null=True, verbose_name='Background Color'),
),
]
| 1,509 | 484 |
""" gentex.texmeas package
"""
import numpy as np
class Texmeas:
"""Class texmeas for generating texture measures from co-occurrence matrix
Parameters
----------
comat: ndarray
Non-normalized co-occurrence matrix - chi-squared conditional distribution
comparisons require the actual number of counts so don't normalize this before
sending in
measure: string
Texture measure (default = 'Statistical Complexity'). Choice of:
* 'CM Entropy'
* 'EM Entropy'
* 'Statistical Complexity'
* 'Energy Uniformity'
* 'Maximum Probability'
* 'Contrast'
* 'Inverse Difference Moment'
* 'Correlation'
* 'Probability of Run Length'
* 'Epsilon Machine Run Length'
* 'Run Length Asymmetry'
* 'Homogeneity'
* 'Cluster Tendency'
* 'Multifractal Spectrum Energy Range'
* 'Multifractal Spectrum Entropy Range'
coordmo: int
Moment of coordinate differences in co-occurrence matrix
needed for calculating 'Contrast' and 'Inverse Difference Moment' (default=0)
probmom: int
Moment of individual cooccurence probabilities
needed for calculating 'Contrast' and 'Inverse Difference Moment' (default=0)
rllen: int
Length of run length used for generating probability
of a run length (the higher this probability the
larger the constant patches on the scale used for generating
the co-occurence matrix) or the epsilon machine run length (default=0)
clusmom: int
Moment used for generating cooccurence cluster tendency (default=0)
samelev: bool
Whether to treat the rows and columns in the cooccurence
matrix as identical 'states' (the methods are very general
so this needn't be the case, e.g. different template shapes
from different images with different quantization levels
could be used to generate the cooccurence matrix which could
be of arbitrary shape)
default = True assumes the cooccurrence matrix is square
and the rows and columns correspond to the same 'state'
betas: array
An array of 3 values, the lower limit, the upper limit and
the number of steps to use as the 'inverse temperature' range
for estimating the multifractal spectrum from an epsilon machine
- getting the range right for an 'arbitrary' epsilon machine is
tricky and is expected to be reset over a number of trials before
getting a full spectrum estimate. For details on the rationale
and algorithm see:
K. Young and J. P. Crutchfield, 'Fluctuation Spectroscopy',
Chaos, Solitons, and Fractals 4 (1993) 5-39.
Attributes
----------
emclus: int
Number of clusters ('states') found when estimating an epsilon machine from the co-occurrence matrix.
emest: bool
Whether or not an epsilon machine has been estimated yet
emmat: float
The estimated epsilon machine as a standard Markov process transition matrix.
condo: 2d-array
Co-occurrence matrix renormalized as a rowise matrix of conditional probabilites - built as part of
epsilon machine estimation
emclasses: list
List of which of the values in emclus each row in condo (and hence the cooccurence matrix) belongs to
clusp: float
Chisquared p value to use for clustering epsilon machine rows
val: float
Value of most recently calculated texture measure
mfsspec: array
Array containing the multifractal spectral estimates obtained
over the range of 'inverse temperatures' provided in betas
currval: string
One of the listed measures method which constitutes the current value in val
"""
def __init__(self, comat, measure="Statistical Complexity", coordmom=0, probmom=0, rllen=0, clusmom=0, clusp=0.001,
samelev=True, betas=[-20, 20, 40]):
self.comat = comat
self.totcount = np.sum(comat) # to get back histo after norm
self.measure = measure
self.coordmom = coordmom
self.probmom = probmom
self.rllen = rllen
self.clusmom = clusmom
self.clusp = clusp # chisquared p value to use for conditional
# distribution similarity
self.emclus = 0 # record the actual number of clusters
# found for the epsilon machine
self.emest = False # whether or not epsilon machine has been
# estimated
self.mfsest = False # whether or not multifractal spectrum has
# been estimated
self.emmat = np.array([]) # epsilon machine pre-array
self.condo = np.array([]) # raw em transition matrix (i..e
# array of conditional distributions
self.emclasses = np.array([]) # list of which class each row
# of self.emmat belongs to
self.samelev = samelev # Boolean for whether pre and post
# epsilon machine states should be
# treated as the same
if self.comat.shape[0] != self.comat.shape[1]:
self.samelev = False # - should automatically be set here
# to false if # rows != #cols in
# co-occurence matrix
self.betas = betas # "inverse temperature" range and
# step for estimating multifractal
# spectrum from epsilon machine
self.val = 0.0
self.currval = ""
self.cme = np.nan # CM Entropy
self.eme = np.nan # EM Entropy
self.stc = np.nan # Statistical Complexity
self.enu = np.nan # Energy Uniformity
self.map = np.nan # Maximum Probability
self.con = np.nan # Contrast
self.idm = np.nan # Inverse Difference Moment
self.cor = np.nan # Correlation
self.prl = np.nan # Probability of Run Length
self.erl = np.nan # Epsilon Machine Run Length
self.rla = np.nan # Run Length Asymmetry
self.hom = np.nan # Homogeneity
self.clt = np.nan # Cluster Tendency
self.mfu = np.nan # Multifractal max,min energy diff.
self.mfs = np.nan # Multifractal max,min entropy diff.
# initial empty array for the multifractal spectrum
# with size equla to the number of steps specified in self.betas
self.mfsspec = np.array([])
# Normalize cooccurence matrix in case it's not
if np.sum(self.comat) != 1.0:
self.comat = np.float_(self.comat) / np.sum(self.comat)
# Actually normalize row vectors... -- NO !! --
# if np.sum(self.comat) != self.comat.shape[0]:
# self.comat = np.transpose(np.transpose(np.float_(self.comat))/np.float_(np.sum(self.comat,axis=1)))
# Calculate an initial texture measure
self.calc_measure(self.measure)
def calc_measure(self, measure='Statistical Complexity', coordmom=0, probmom=0, rllen=0, clusmom=0, samelev=True):
"""Calculates the appropriate texture measure and puts the value in the class variable val and
updates the class variable currval with the passed string
For a discussion of Haralick co-occurrence style texture measures see:
R. M. Haralick, 'Statistical and structural approaches to texture'. Proceedings of the IEEE May 1979, 67(5).
786-804.
Parameters
----------
measure: string
One of the following measure methods (default = 'Statistical Complexity'):
- 'CM Entropy'
- 'EM Entropy'
- 'Statistical Complexity'
- 'Energy Uniformity'
- 'Maximum Probability'
- 'Contrast'
- 'Inverse Difference Moment'
- 'Correlation'
- 'Probability of Run Length'
- 'Epsilon Machine Run Length'
- 'Run Length Asymmetry'
- 'Homogeneity'
- 'Cluster Tendency'
- 'Multifractal Spectrum Energy Range'
- 'Multifractal Spectrum Entropy Range'
"""
self.measure = measure
# Allow for changed values of the following class variables
# to be passed to calc measure
if coordmom != 0:
self.coordmom = coordmom
if probmom != 0:
self.probmom = probmom
if rllen != 0:
self.rllen = rllen
if clusmom != 0:
self.clusmom = clusmom
if samelev == False:
self.samelev = False
if self.measure == "CM Entropy":
if np.isnan(self.cme):
self.cme = np.sum(
-np.where(self.comat > 0.0, self.comat, 1.0) * np.where(self.comat > 0.0, np.log2(self.comat), 0.0))
self.val = self.cme
self.currval = "CM Entropy"
elif self.measure == "EM Entropy":
if np.isnan(self.eme):
import scipy.linalg as L
if not self.emest:
self.est_em()
# get left eigenvector associated with lambda = 1
# (largest eignevalue)
[e, v] = L.eig(np.nan_to_num(self.emmat), left=True, right=False)
# Node probabilities are elements of normalized left eigenvector
# associated with eigenvale 1 (assumes Scipy convention of
# returning sorted eignevalues so eignevalue 1 in this case is
# the first element of the returned eigenvalue array)
# nodep = v[:,0]/sum(v[:,0])
# ---- no longer make the above assumption
# found it was wrong - now specifically ask for eigenvector
# associated with eigenvalue 1 (greatest real part)
maxind = np.where(np.real(e) == np.max(np.real(e)))[0][0]
nodep = v[:, maxind] / sum(v[:, maxind])
self.eme = -np.sum(
np.transpose(nodep * np.ones(self.emmat.shape)) * (self.emmat * np.nan_to_num(np.log2(self.emmat))))
self.val = self.eme
self.currval = "EM Entropy"
elif self.measure == "Statistical Complexity":
if np.isnan(self.stc):
import scipy.linalg as L
# estimate epsilon machine if it hasn't been made
if not self.emest:
self.est_em()
# get left eigenvector associated with lambda = 1
# (largest eignevalue)
[e, v] = L.eig(np.nan_to_num(self.emmat), left=True, right=False)
# Node probabilities are elements of normalized left eigenvector # associated with eigenvale 1 (assumes Scipy convention of
# returning sorted eignevalues so eignevalue 1 in this case is
# the first element of the returned eigenvalue array)
# nodep = v[:,0]/sum(v[:,0])
# ---- no longer make the above assumption
# found it was wrong - now specifically ask for eigenvector
# associated with eigenvalue 1 (greatest real part)
maxind = np.where(np.real(e) == np.max(np.real(e)))[0][0]
nodep = v[:, maxind] / sum(v[:, maxind])
self.stc = -np.sum(nodep * np.log2(nodep))
self.val = self.stc
self.currval = "Statistical Complexity"
elif self.measure == "Energy Uniformity":
if np.isnan(self.enu):
self.enu = np.sum(np.where(self.comat > 0.0, self.comat * self.comat, 0.0))
self.val = self.enu
self.currval = "Energy Uniformity"
elif self.measure == "Maximum Probability":
if self.map is np.nan:
self.map = np.max(self.comat)
self.val = self.map
self.currval = "Maximum Probability"
elif self.measure == "Contrast":
if np.isnan(self.con):
if self.coordmom == 0 or self.probmom == 0:
if self.coordmom == 0:
print("Nonzero coordinate moment is required for calculating Contrast")
if self.probmom == 0:
print("Nonzero probability moment is required for calculating Contrast")
else:
crows = np.zeros(self.comat.shape)
ccols = np.zeros(self.comat.shape)
for i in range(self.comat.shape[0]):
crows[i, :] = i
ccols[:, i] = i
self.con = np.sum((np.abs(crows - ccols) ** self.coordmom) * (self.comat ** self.probmom))
self.val = self.con
self.currval = "Contrast"
elif self.measure == "Inverse Difference Moment":
if np.isnan(self.idm):
if self.coordmom == 0 or self.probmom == 0:
if self.coordmom == 0:
print("Nonzero coordinate moment is required for calculating Inverse Difference Moment")
if self.probmom == 0:
print("Nonzero probability moment is required for calculating Inverse Difference Moment")
else:
crows = np.zeros(self.comat.shape)
ccols = np.zeros(self.comat.shape)
for i in range(self.comat.shape[0]):
crows[i, :] = i
ccols[:, i] = i
codiffs = np.abs(crows - ccols) ** self.coordmom
# Set minimum coordinate difference for which you allow
# probability to be calculated
codiff_eps = 0.0000001
# Do following so test divides don't blow up and
# generte a warning
codiffs_ok = np.where(codiffs > codiff_eps, codiffs, 1.0)
self.idm = np.sum(np.where(codiffs > codiff_eps, (self.comat ** self.probmom) / codiffs_ok, 0.0))
self.val = self.idm
self.currval = "Inverse Difference Moment"
elif self.measure == "Correlation":
if np.isnan(self.cor):
import scipy.stats as ss
crows = np.zeros(self.comat.shape)
ccols = np.zeros(self.comat.shape)
for i in range(self.comat.shape[0]):
crows[i, :] = i + 1 # need to start at 1 for Correlation calcs.
ccols[:, i] = i + 1
rowmom = np.sum(crows * self.comat)
colmom = np.sum(ccols * self.comat)
comatvar = np.var(np.ravel(self.comat * crows))
self.cor = np.sum((crows - rowmom) * (ccols - colmom) * self.comat) / comatvar
self.val = self.cor
self.currval = "Correlation"
elif self.measure == "Probability of Run Length":
if np.isnan(self.prl):
if self.rllen == 0:
print("Nonzero run length is required for calculating Probability of Run Length")
else:
colprobs = np.zeros(self.comat.shape[0])
for i in range(self.comat.shape[0]):
colprobs[i] = np.sum(self.comat[i, :])
self.prl = 0.0
for i in range(self.comat.shape[0]):
if colprobs[i] != 0.0:
self.prl += ((colprobs[i] - self.comat[i, i]) ** 2 * (
self.comat[i, i] ** (self.rllen - 1))) / (colprobs[i] ** self.rllen)
self.val = self.prl
self.currval = "Probability of Run Length"
elif self.measure == "Epsilon Machine Run Length":
if np.isnan(self.erl):
if self.rllen == 0:
print("Nonzero run length is required for calculating Epsilon Machine Run Length")
else:
if not self.emest:
self.est_em()
self.erl = 0.0
colprobs = np.zeros(self.emmat.shape[0])
for i in range(self.emmat.shape[0]):
colprobs[i] = np.sum(self.emmat[i, :])
for i in range(self.emmat.shape[0]):
self.erl += ((colprobs[i] - self.emmat[i, i]) ** 2 * (self.emmat[i, i] ** (self.rllen - 1))) / (
colprobs[i] ** self.rllen)
self.val = self.erl
self.currval = "Epsilon Machine Run Length"
elif self.measure == "Run Length Asymmetry":
if np.isnan(self.rla):
if self.rllen == 0:
print("Nonzero run length is required for calculating Run Length Asymmetry")
else:
colprobs = np.zeros(self.comat.shape[0])
rowprobs = np.zeros(self.comat.shape[0])
for i in range(self.comat.shape[0]):
colprobs[i] = np.sum(self.comat[i, :])
rowprobs[i] = np.sum(self.comat[:, i])
colval = 0.0
rowval = 0.0
for i in range(self.comat.shape[0]):
if colprobs[i] != 0.0:
colval += ((colprobs[i] - self.comat[i, i]) ** 2 * (
self.comat[i, i] ** (self.rllen - 1))) / (colprobs[i] ** self.rllen)
if rowprobs[i] != 0.0:
rowval += ((rowprobs[i] - self.comat[i, i]) ** 2 * (
self.comat[i, i] ** (self.rllen - 1))) / (rowprobs[i] ** self.rllen)
self.rla = np.abs(colval - rowval)
self.val = self.rla
self.currval = "Run Length Asymmetry"
elif self.measure == "Homogeneity":
if np.isnan(self.hom):
crows = np.zeros(self.comat.shape)
ccols = np.zeros(self.comat.shape)
for i in range(self.comat.shape[0]):
crows[i, :] = i
ccols[:, i] = i
self.hom = np.sum((self.comat) / (1 + np.abs(crows - ccols)))
self.val = self.hom
self.currval = "Homogeneity"
elif self.measure == "Cluster Tendency":
if np.isnan(self.clt):
if self.clusmom == 0:
print("Nonzero cluster moment is required for calculating Cluster Tendency")
else:
crows = np.zeros(self.comat.shape)
ccols = np.zeros(self.comat.shape)
for i in range(self.comat.shape[0]):
crows[i, :] = i + 1 # need to start at 1 for Correlation calcs.
ccols[:, i] = i + 1
rowmom = np.sum(crows * self.comat)
colmom = np.sum(ccols * self.comat)
self.clt = np.sum(((crows + ccols - rowmom - colmom) ** self.clusmom) * self.comat)
self.val = self.clt
self.currval = "Cluster Tendency"
elif self.measure == "Multifractal Spectrum Energy Range":
if not self.emest: # estimate epsilon machine
self.est_em()
if not self.mfsest: # estimate multifractal spectrum
self.est_multi_frac_spec()
if self.mfsspec.size != 0:
self.mfu = np.max(self.mfsspec[:, 0]) - np.min(self.mfsspec[:, 0])
else:
self.mfu = 0.0
self.val = self.mfu
self.currval = "Multifractal Spectrum Energy Range"
elif self.measure == "Multifractal Spectrum Entropy Range":
if not self.emest: # estimate epsilon machine
self.est_em()
if not self.mfsest: # estimate multifractal spectrum
self.est_multi_frac_spec()
if self.mfsspec.size != 0:
self.mfs = np.max(self.mfsspec[:, 1]) - np.min(self.mfsspec[:, 1])
else:
self.mfs = 0.0
self.val = self.mfs
self.currval = "Multifractal Spectrum Entropy Range"
else:
"Sorry don't know about texture measure ", self.measure
def est_multi_frac_spec(self):
"""TODO"""
import scipy.linalg as L
self.mfsspec = []
if not self.emest:
self.est_em()
# print "Epsilon machine",self.emmat
if self.betas[2] == 1:
print(
"Only 1 step asked for re. calculating multifractal spectrum, using lower limit specified, i.e. betas[0]")
step = 0
else:
step = (np.float(self.betas[1]) - np.float(self.betas[0])) / (np.float(self.betas[2]) - 1)
for i in range(self.betas[2]):
if i == 0: # in case self.betas[2] = 1 => step = 0
cb = np.float(self.betas[0])
else:
cb = np.float(self.betas[0] + i * step)
if cb == 1.0:
# in this case just do standard metric entrop calc.
# ( e.g. see above EM Entropy calculation for comments)
# as both u and s(u) are equal to the metric entropy
# in this case
[e, v] = L.eig(np.nan_to_num(self.emmat), left=True, right=False)
maxind = np.where(np.real(e) == np.max(np.real(e)))[0][0]
nodep = v[:, maxind] / sum(v[:, maxind])
su = -np.sum(
np.transpose(nodep * np.ones(self.emmat.shape)) * (self.emmat * np.nan_to_num(np.log2(self.emmat))))
self.mfsspec.append([su, su])
# print i,cb,su,su
elif cb == 0.0:
# skip it for now - need to re-figure out beta -> 0 limit
# need placeholder though
splat = 0
else: # cb != 0,1
# get betafied epsilon machine
a = np.where(self.emmat > 0.0, np.exp(cb * np.log(self.emmat)), 0.0)
# get maximum eignvalue and take the log
# ("inv. temp." times "free energy")
[eb, vb] = L.eig(np.nan_to_num(a), left=False, right=True)
maxind = np.where(np.real(eb) == np.max(np.real(eb)))[0][0]
fe = np.log2(np.real(eb[maxind]))
# stochastisize betafied epsilon machine
b = np.dot((1 / eb[maxind]) * np.diag((1 / vb[:, maxind])), np.dot(a, (np.diag(vb[:, maxind]))))
# get metric entropy of stochasticized machine
# - same as "entropy" s(u) as func. of "energy" u
# - i.e. multifractal spectrum is analogue of
# - thermodynamic spectrum s(u) vs. u
[e, v] = L.eig(np.nan_to_num(b), left=True, right=False)
maxind = np.where(np.real(e) == np.max(np.real(e)))[0][0]
nodep = v[:, maxind] / sum(v[:, maxind])
# make sure they're real - sometimes linalg spits
# out complex values with 0 imaginary part
su = abs(-np.sum(np.transpose(nodep * np.ones(b.shape)) * (b * np.nan_to_num(np.log2(b)))))
# then get energy - i.e. "temperature" normalized
# difference between "entropy" and "free energy"
u = abs((su - fe) / cb)
self.mfsspec.append([u, su])
# print i,cb,u,su
self.mfsspec = np.array(np.real(self.mfsspec))
# waste the nan's - e.g. when the range wasn't quite right
self.mfsspec = np.delete(self.mfsspec, np.where(np.isnan(self.mfsspec))[0], 0)
self.mfsest = True
def est_em(self):
"""Estimate an epsilon machine from a co-occurrence matrix with #rows = #cols, done implicitly whenever one
of the related complexity/entropy measures (EM Entropy, Statistical Complexity, Epsilon Machine Run Length)
are calculated.
For info on epsilon machines and the related measures see:
- K. Young, Y. Chen, J. Kornak, G. B. Matson, N. Schuff, 'Summarizing complexity in high dimensions', \
Phys Rev Lett. (2005) Mar 11;94(9):098701.
- C. R. Shalizi and J. P. Crutchfield, 'Computational Mechanics: Pattern and Prediction, Structure and \
Simplicity', Journal of Statistical Physics 104 (2001) 819--881.
- K. Young and J. P. Crutchfield, 'Fluctuation Spectroscopy', Chaos, Solitons, and Fractals 4 (1993) 5-39.
- J. P. Crutchfield and K. Young, 'Computation at the Onset of Chaos', in Entropy, Complexity, and Physics \
of Information, W. Zurek, editor, SFI Studies in the Sciences of Complexity, VIII, Addison-Wesley, Reading,\
Massachusetts (1990) 223-269.
- C. R. Shalizi and J. P. Crutchfield, 'Computational Mechanics: Pattern and Prediction, Structure and \
Simplicity', Journal of Statistical Physics 104 (2001) 819--881.
"""
import scipy.stats as ss
# Make conditional distribution matrix, i.e. epsilon machine
# (row probabilities)
self.condo = np.transpose(np.transpose(self.comat) / np.sum(self.comat, axis=1))
# the following is n^2 - need to figure a better way
found = []
self.emclasses = np.zeros(self.condo.shape[0], int)
onclass = 0
for i in range(self.condo.shape[0]):
if i not in found:
found.append(i)
# if it's dinky just tack it on to class 0
# code below will just combine it in
if np.sum(self.condo[i, :]) < 0.00000001:
self.emclasses[i] = 0
else:
# it's a new one
self.emclasses[i] = onclass
for j in range(i + 1, self.condo.shape[0]):
if j not in found:
# check if rows ("distributions") are "close"
# i.e. p value in chi squred test < self.clusp
tester = ss.chisquare(self.totcount * self.condo[i, :], self.totcount * self.condo[j, :])[1]
if tester < self.clusp: # they're different
found.append(j)
onclass += 1
self.emclasses[j] = onclass
else: # they're not
found.append(j)
self.emclasses[j] = onclass
self.emclus = onclass + 1
for i in range(self.emclus):
rowinds = tuple(np.where(self.emclasses == i)[0])
if i == 0:
a = np.add.reduce(self.comat[rowinds, :], axis=0)
else:
a = np.vstack((a, np.add.reduce(self.comat[rowinds, :], axis=0)))
# If initial/final states are the same need to also combine columns
if self.samelev:
if len(a.shape) > 1:
for i in range(self.emclus):
colinds = tuple(np.where(self.emclasses == i)[0])
# seems like it has to be done rowise first...
if i == 0:
b = np.add.reduce(a[:, colinds], axis=1)
else:
b = np.vstack((b, np.add.reduce(a[:, colinds], axis=1)))
# ... then transposed
else:
for i in range(a.shape[0]):
if i == 0:
b = a
else:
b = np.vstack([b, a])
self.emmat = np.transpose(b)
else: # do it all over again for columns
found = []
self.emclasses = np.zeros(self.condo.shape[1], int)
onclass = 0
for i in range(self.condo.shape[1]):
if i not in found:
found.append(i)
# if it's dinky just tack it on to class 0
# code below will just combine it in
if np.sum(self.condo[:, i]) < 0.00000001:
self.emclasses[i] = 0
else:
# it's a new one
self.emclasses[i] = onclass
for j in range(self.condo.shape[1], i + 1):
if j not in found:
# check if rows ("distributions") are "close"
# i.e. p value in chi squred test < self.clusp
tester = \
ss.chisquare(self.totcount * self.condo[:, i], self.totcount * self.condo[:, j])[1]
if tester < self.clusp: # they're different
found.append(j)
onclass += 1
self.emclasses[j] = onclass
else: # they're not
found.append(j)
self.emclasses[j] = onclass
self.emclus = onclass + 1
for i in range(self.emclus):
colinds = tuple(np.where(self.emclasses == i)[1])
if i == 0:
a = np.add.reduce(self.comat[:, colinds], axis=1)
else:
a = np.vstack((a, np.add.reduce(self.comat[:, colinds], axis=1)))
self.emmat = np.transpose(a)
# and finally turned into a Markov matrix...
self.emmat = np.transpose(np.transpose(self.emmat) / np.sum(self.emmat, axis=1))
self.emest = True
| 29,968 | 8,946 |
#!/usr/bin/env python
# encoding: utf-8
"""
@author: zk
@contact: kun.zhang@nuance.com
@file: dataio.py
@time: 8/27/2019 4:31 PM
@desc:
"""
import os
def load_txt_data(path, mode='utf-8-sig', origin=False):
"""
This func is used to reading txt file
:param origin:
:param path: path where file stored
:param mode:
:type path: str
:return: string lines in file in a list
:rtype: list
"""
if type(path) != str:
raise TypeError
res = []
file = open(path, 'rb')
lines = file.read().decode(mode, 'ignore')
for line in lines.split('\n'):
line = line.strip()
if origin:
res.append(line)
else:
if line:
res.append(line)
file.close()
return res
def load_excel_data(path):
"""
This func is used to reading excel file
:param path: path where file stored
:type path: str
:return: data saved in a pandas DataFrame
:rtype: pandas.DataFrame
"""
if type(path) != str:
raise TypeError
import pandas as pd
return pd.read_excel(path).loc[:]
def load_variable(path):
"""
:param path:
:return:
"""
import pickle
return pickle.load(open(path, 'rb'))
def save_txt_file(data, path, end='\n'):
"""
This func is used to saving data to txt file
support data type:
list: Fully support
dict: Only save dict key
str: will save single char to each line
tuple: Fully support
set: Fully support
:param data: data
:param path: path to save
:type path: str
:param end:
:type end: str
:return: None
"""
if type(data) not in [list, dict, str, tuple, set] or type(path) != str:
raise TypeError
remove_old_file(path)
with open(path, 'a', encoding='utf-8') as f:
for item in data:
f.write(str(item) + end)
def save_variable(variable, path):
"""
:param variable:
:param path:
:return:
"""
import pickle
return pickle.dump(variable, open(path, 'wb'))
def load_file_name(path):
"""
This func can get root, subdir, file_names
:param path:
:type path:str
:return:
"""
for root, dirs, files in os.walk(path):
return root, dirs, files
def load_all_file_name(path, list_name, suffix='', not_include='.py'):
"""
Load all file name including sub folder
:param path:
:param list_name:
:param suffix:
:param not_include:
:return:
"""
for file in os.listdir(path):
file_path = os.path.join(path, file)
if os.path.isdir(file_path) and not_include not in file_path:
load_all_file_name(file_path, list_name, suffix, not_include)
elif os.path.splitext(file_path)[1] == suffix:
list_name.append(file_path)
def check_dir(path):
"""
check dir exists
:param path:
:type path:str
:return:
:rtype: bool
"""
return os.path.exists(path)
def mkdir(path):
"""
:param path:
:type path: str
:return: None
"""
path = path.strip()
if not check_dir(path):
os.makedirs(path)
def remove_old_file(path):
"""
:param path:
:type path: str
:return:
"""
if check_dir(path):
os.remove(path)
def delete_file(path):
os.remove(path)
if __name__ == '__main__':
pass
| 3,373 | 1,147 |
from constants import *
import pygame as pg
from time import sleep
from metronome import *
import math
import numpy as np
from copy import deepcopy
from audio import *
from instructions_panel import *
from loop import *
class MusicMaker:
def __init__(self, screen):
self.pitch = 0
self.screen = screen
self.pitch_range = PITCH_RANGE
self.b_left = 0
self.b_middle = 0
self.b_right = 0
self.saved = None
self.events = set()
self.metronome = Metronome(BUFFERS_PER_MEASURE)
self.is_measure = False
self.using_scales = list(range(1,6))
self.scale = self.using_scales[3]
self.scale_height = SCREEN_DIM[1] / len(self.using_scales)
self.background = None
self.background_needs_update = True
self.instructions = InstructionsPanel()
self.audio_player = None
self.audio_player = AudioPlayer(self)
self.audio_player.run()
def do_step(self):
## Avoid the race condition
while self.audio_player == None:
sleep(.1)
## Gather information from metronome, mouse, and keyboard
is_beat = self.metronome.is_beat(self.audio_player.loop_buffer_index)
self.is_measure = self.metronome.is_measure(self.audio_player.loop_buffer_index)
(m_x, m_y) = pygame.mouse.get_pos()
(last_b_left, last_b_middle, last_b_right) = (self.b_left, self.b_middle, self.b_right)
(self.b_left, self.b_middle, self.b_right) = pygame.mouse.get_pressed()
last_keys = keys[:]
keys.clear()
keys.extend(pygame.key.get_pressed())
## Center scales around mouse
if self.b_middle and not last_b_middle:
self.background_needs_update = True
m_x, m_y = self.center_scales_around(m_x, m_y)
## Run events scheduled for the beginning of the step
for e in sorted(list(self.events), key=lambda e: e[0]):
if e[2] == BEGIN_STEP:
if e[1] == NEXT_BUFFER or ( is_beat and e[1] == NEXT_BEAT ) or ( self.is_measure and e[1] == NEXT_MEASURE ):
self.audio_player.do_action(e[0])
self.events.remove(e)
###########################
## Keyboard and mouse input
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
## These events aren't caught by the pygame.mouse methods
elif event.type == pygame.MOUSEBUTTONDOWN:
## Scroll down
if event.button == 5:
self.audio_player.decrease_volume()
## Scroll up
if event.button == 4:
self.audio_player.increase_volume()
## Window resize
elif event.type == pygame.VIDEORESIZE:
w,h = event.size
min_w, min_h = MIN_DIM
w = max(min_w, w)
h = max(min_h, h)
update_screen_size((w,h))
self.background_needs_update = True
self.scale_height = SCREEN_DIM[1] / len(self.using_scales)
self.screen = pygame.display.set_mode(SCREEN_DIM, pygame.RESIZABLE)
## Get the exact pitch from the mouse x coordinate
self.mouse_pitch = self.coord_to_pitch(m_x, coord=0, reverse=False)
## Close the application
if is_key_mod(ESCAPE, None):
self.audio_player.stop_stream()
print("Ending stream...")
## Start and stop recording
if not keys[SPACE] and self.audio_player.loop_recording:
self.events.add(EVENT_STOP_LOOP_REC)
if keys[SPACE] and not self.audio_player.loop_recording:
self.events.add(EVENT_START_LOOP_REC)
## Start and stop playing of all loops
if is_key_mod(K_P, None) and not last_keys[K_P]:
if self.audio_player.loop_playing:
self.events.add(EVENT_STOP_LOOP_PLAY)
else:
self.events.add(EVENT_START_LOOP_PLAY)
## If a loop is selected:
if self.audio_player.active_loops[0] >= 0 and not self.audio_player.loop_recording:
## Move the active loops left/right by one beat (with wrapping)
if is_key_mod(LEFT, None) and not last_keys[LEFT]:
for i in self.audio_player.active_loops:
self.audio_player.loops[i].horizontal_shift(-1*self.metronome.beat_len)
if is_key_mod(RIGHT, None) and not last_keys[RIGHT]:
for i in self.audio_player.active_loops:
self.audio_player.loops[i].horizontal_shift(self.metronome.beat_len)
## Move the active loops left/right by one buffer (with wrapping)
if is_key_mod(LEFT, SHIFT) and not last_keys[LEFT]:
for i in self.audio_player.active_loops:
self.audio_player.loops[i].horizontal_shift(-1)
if is_key_mod(RIGHT, SHIFT) and not last_keys[RIGHT]:
for i in self.audio_player.active_loops:
self.audio_player.loops[i].horizontal_shift(1)
## Toggle mute on the active loops
if is_key_mod(K_M, None) and not last_keys[K_M]:
for i in self.audio_player.active_loops:
self.audio_player.loops[i].toggle_mute()
## Increase and decrease volume of the active loops
if keys[EQUALS] or keys[PLUS] or keys[KP_PLUS]:
for i in self.audio_player.active_loops:
self.audio_player.loops[i].adjust_volume(.02)
if keys[MINUS] or keys[KP_MINUS]:
for i in self.audio_player.active_loops:
self.audio_player.loops[i].adjust_volume(-.02)
## Copy the active loops below them as a group, and mute the copies
if is_key_mod(K_C, CTRL) and not last_keys[K_C]:
loop_copies = [self.audio_player.loops[i].get_copy() for i in self.audio_player.active_loops]
for i,loop in enumerate(loop_copies):
loop.set_mute(True)
self.audio_player.loops.insert(self.audio_player.active_loops[-1]+1+i, loop)
self.audio_player.active_loops = [x+len(loop_copies) for x in self.audio_player.active_loops]
## Move the active loops up and down in the lineup
other_index = -1
loops = self.audio_player.loops
if is_key_mod(UP, ALT) and not last_keys[UP] and self.audio_player.active_loops[0] > 0:
for index in self.audio_player.active_loops:
other_index = (index-1)%len(self.audio_player.loops)
loops[index], loops[other_index] = loops[other_index], loops[index]
self.audio_player.active_loops = [x-1 for x in self.audio_player.active_loops]
elif is_key_mod(DOWN, ALT) and not last_keys[DOWN] and self.audio_player.active_loops[-1] < len(loops)-1:
for index in self.audio_player.active_loops[::-1]:
other_index = (index+1)%len(self.audio_player.loops)
loops[index], loops[other_index] = loops[other_index], loops[index]
self.audio_player.active_loops = [x+1 for x in self.audio_player.active_loops]
## Add the selected loops
if is_key_mod(K_A, None) and not last_keys[K_A]:
while len(self.audio_player.active_loops) > 1:
i = self.audio_player.active_loops[0]
other = self.audio_player.active_loops.pop()
self.audio_player.loops[i].combine(self.audio_player.loops[other])
del self.audio_player.loops[other]
## Pitch shift the selected loops UP/DOWN
if is_key_mod(UP, CTRL) and is_key_mod(UP, SHIFT) and not last_keys[UP]:
for index in self.audio_player.active_loops:
#Shift up one eighth of a tone
self.audio_player.loops[index].pitch_shift(.25)
elif is_key_mod(UP, CTRL) and not last_keys[UP]:
for index in self.audio_player.active_loops:
#Shift up one semitone
self.audio_player.loops[index].pitch_shift(1)
elif is_key_mod(DOWN, CTRL) and is_key_mod(DOWN, SHIFT) and not last_keys[DOWN]:
for index in self.audio_player.active_loops:
#Shift up one eighth of a tone
self.audio_player.loops[index].pitch_shift(-.25)
elif is_key_mod(DOWN, CTRL) and not last_keys[DOWN]:
for index in self.audio_player.active_loops:
#Shift up one semitone
self.audio_player.loops[index].pitch_shift(-1)
## Delete the current loop with backspace or delete
if (is_key_mod(BACKSPACE, None) and not last_keys[BACKSPACE]) or (is_key_mod(DELETE, None) and not last_keys[DELETE]):
for i in self.audio_player.active_loops[::-1]:
del self.audio_player.loops[i]
self.audio_player.active_loops = [self.audio_player.active_loops[0]]
if self.audio_player.active_loops[0] >= len(self.audio_player.loops):
self.audio_player.active_loops[0] -= 1
else: ## Metronome selected (index -1)
##Only allow changes to the metronome when there are no loops:
if len(self.audio_player.loops) == 0:
## Add or subtract from the metronome length
if is_key_mod(LEFT, None) and not last_keys[LEFT]:
self.metronome.change_measure_length(-self.metronome.beats)
if is_key_mod(RIGHT, None) and not last_keys[RIGHT]:
self.metronome.change_measure_length(self.metronome.beats)
## Add or subtract from the metronome beat count
if is_key_mod(LEFT, SHIFT) and not last_keys[LEFT]:
self.metronome.change_beat_count(-1)
if is_key_mod(RIGHT, SHIFT) and not last_keys[RIGHT]:
self.metronome.change_beat_count(1)
## Toggle justify pitch
if is_key_mod(K_J, None) and not last_keys[K_J]:
self.audio_player.justify_pitch = not self.audio_player.justify_pitch
self.background_needs_update = True
for loop in self.audio_player.loops:
loop.recalculate_buffers()
if not self.audio_player.loop_recording:
## Move the active loop indicator up and down
if is_key_mod(UP, None) and not last_keys[UP]:
self.audio_player.active_loops = [ self.audio_player.active_loops[0] % (len(self.audio_player.loops)+1) - 1 ]
if is_key_mod(DOWN, None) and not last_keys[DOWN]:
self.audio_player.active_loops = [ (self.audio_player.active_loops[-1]+2) % (len(self.audio_player.loops)+1) - 1 ]
## Select a range of loops
if is_key_mod(UP, SHIFT) and not is_key_mod(UP, CTRL) and not last_keys[UP] and self.audio_player.active_loops[0] > 0:
self.audio_player.active_loops.insert(0, self.audio_player.active_loops[0]-1)
if is_key_mod(DOWN, SHIFT) and not is_key_mod(DOWN, CTRL) and not last_keys[DOWN] and self.audio_player.active_loops[0] >= 0 and self.audio_player.active_loops[-1] < len(self.audio_player.loops) - 1:
self.audio_player.active_loops.append(self.audio_player.active_loops[-1]+1)
## Multiply metronome and loops a given number of times
for num in range(0,10):
if is_key_mod(NUMS[num], None) and not last_keys[NUMS[num]]:
self.audio_player.multiply_tracks(num)
## Articulating and continuing a note playing
if self.b_left:
if not self.audio_player.playing:
self.audio_player.articulate()
else:
self.audio_player.settle_to_volume()
## Allowing a note to fade away when not left clicking
if not self.b_left:
self.audio_player.volume_decay()
## Identify the current scale by mouse position
self.scale_index = (self.using_scales[0] + int(m_y / SCREEN_DIM[1] * len(self.using_scales))) %12
self.scale = SCALES[self.scale_index]
## Temporarily align to the chromatic scale on the current scale
if (self.b_right):
self.scale = CHROMATIC_SCALE
## Show and hide the instructions (really for QUESTION_MARK, but SLASH is more accepting)
if (keys[SLASH] and not last_keys[SLASH]):
self.instructions.minimized = not self.instructions.minimized
#######################
## Pitch decisionmaking
## Get scale degree of closest pitch
self.closest_pitch = sorted(self.scale, key=lambda x: min(abs((self.mouse_pitch%12)-x), 12 - abs((self.mouse_pitch%12)-x))) [0]
## Put closest pitch in correct octave
self.closest_pitch += math.floor(self.mouse_pitch / 12) * 12
## Correct an error by rounding up if self.mouse_pitch > 11.5
if abs(self.mouse_pitch - self.closest_pitch) > 10:
self.closest_pitch += 12
## In case we switched scales for the chromatic scale, switch back now that we decided on a closest pitch
self.scale = SCALES[self.scale_index]
## Decide whether to align to the closest pitch, or use the mouse pitch
#if not last_b_middle:
if self.b_left or self.audio_player.volume == 0:
if is_key_mod(K_S, None):
self.pitch = self.mouse_pitch
else:
self.pitch = self.closest_pitch
## Run events scheduled for the end of the step
for e in sorted(list(self.events), key=lambda e: e[0]):
if e[2] == END_STEP:
if e[1] == NEXT_BUFFER or ( is_beat and e[1] == NEXT_BEAT ) or ( self.is_measure and e[1] == NEXT_MEASURE ):
self.audio_player.do_action(e[0])
self.events.remove(e)
self.paint_screen()
def center_scales_around(self, m_x, m_y):
range_width = self.pitch_range[1] - self.pitch_range[0]
range_middle = self.pitch_range[1] - range_width // 2
diff = self.closest_pitch - range_middle
self.pitch_range = (self.pitch_range[0]+diff, self.pitch_range[1]+diff)
y_diff = self.scale_index - self.using_scales[len(self.using_scales)//2]
self.using_scales = [(i+y_diff)%12 for i in self.using_scales]
new_m_x = self.pitch_to_coord(self.mouse_pitch)
new_m_y = m_y-y_diff*self.scale_height
pygame.mouse.set_pos(new_m_x, new_m_y)
return new_m_x, new_m_y
def paint_screen(self):
## Draw the mostly unchanging buffered background
if self.background == None or self.background_needs_update:
self.background = self.redraw_background()
self.screen.blit(self.background, (0,0))
## Draw the active notes
y=0
notes = [l.recorded_notes[self.audio_player.loop_buffer_index] for l in self.audio_player.loops if not l.muted]
self.recorded_notes_to_draw = [rn for sublist in notes for rn in sublist]
for i in self.using_scales:
s = SCALES[i]
self.draw_scale_activity(s, y, self.scale is s)
y += self.scale_height
## Draw metronome
self.metronome.paint_self(self.screen, self.audio_player.loop_buffer_index, -1 in self.audio_player.active_loops)
## Draw the loops
y = 60
x = 10
w = self.metronome.measure_len * self.metronome.visual_buffer_width
h = 30
v_margin = 10
for i in range(len(self.audio_player.loops)):
loop = self.audio_player.loops[i]
loop.paint_self(self.screen, (x,y,w,h), i in self.audio_player.active_loops, self.audio_player.loop_recording)
y += h + v_margin
## Draw the instruction panel
self.instructions.paint_self(self.screen)
pygame.display.flip()
'''
Draws the active elements of a scale (row of notes) on the screen.
'''
def draw_scale_activity(self, scale, y, is_active):
notes_to_draw = [rn for rn in self.recorded_notes_to_draw if rn.scale==scale]
if self.scale == scale:
notes_to_draw.append(RecordedNote(-1, self.pitch, self.audio_player.volume, None, self.scale, None, None))
for p in range(self.pitch_range[0], self.pitch_range[1]+1):
p_i = p % 12
if p_i in scale:
x = self.pitch_to_coord(p, coord=0, reverse=False, scale=scale[0])
color = ACTIVE_COLORS[p_i] if is_active and self.closest_pitch == p else INACTIVE_COLORS[p_i]
##Determine line width based on notes_to_draw:
on_this_pitch = [rn for rn in notes_to_draw if rn.pitch == p]
notes_to_draw = [rn for rn in notes_to_draw if not rn in on_this_pitch]
if len(on_this_pitch) > 0:
sum_volume = sum(map(lambda rn: rn.get_loudness(), on_this_pitch))
line_width = max(INACTIVE_NOTE_WIDTH, int(sum_volume*ACTIVE_NOTE_STRETCH))
pygame.draw.line(self.screen, color, (x,y), (x,y+self.scale_height), line_width)
if get_font() and p_i == scale[0]:
l1 = get_font().render(NOTE_NAMES[p_i], 1, color)
self.screen.blit(l1, (x+10, y+self.scale_height-30))
if is_active:
color = INACTIVE_COLORS[scale[0]]
pygame.draw.line(self.screen, color, (0,y), (SCREEN_DIM[0],y), 4)
pygame.draw.line(self.screen, color, (0,y+self.scale_height), (SCREEN_DIM[0],y+self.scale_height), 4)
## The remaining pitches in notes_to_draw are not on a bar
for rn in notes_to_draw:
line_width = max(INACTIVE_NOTE_WIDTH, int(rn.get_loudness() * ACTIVE_NOTE_STRETCH))
x = self.pitch_to_coord(rn.pitch)
pygame.draw.line(self.screen, FREE_NOTE_COLOR, (x, y), (x,y+self.scale_height), line_width)
'''
Draws the inactive scale elements into a buffer image
'''
def redraw_background(self):
self.background_needs_update = False
screen = pygame.Surface(SCREEN_DIM)
screen.fill(BACK_COLOR)
y=0
for i in self.using_scales:
self.draw_scale_background(screen, SCALES[i], y)
y += self.scale_height
return screen
'''
Draws the inactive elements of one scale onto an image
'''
def draw_scale_background(self, screen, scale, y):
pygame.draw.rect(screen, DARK_COLORS[scale[0]], (0,y,SCREEN_DIM[0],self.scale_height))
pygame.draw.line(screen, SCALE_INACTIVE_SEPARATOR_COLOR, (0,y), (SCREEN_DIM[0],y), 1)
pygame.draw.line(screen, SCALE_INACTIVE_SEPARATOR_COLOR, (0,y+self.scale_height), (SCREEN_DIM[0],y+self.scale_height), 1)
for p in range(self.pitch_range[0], self.pitch_range[1]+1):
p_i = p % 12
if p_i in scale:
x = self.pitch_to_coord(p, coord=0, reverse=False, scale=scale[0])
pygame.draw.line(screen, INACTIVE_COLORS[p_i], (x,y), (x,y+self.scale_height), INACTIVE_NOTE_WIDTH)
if get_font() and p_i == scale[0]:
l1 = get_font().render(NOTE_NAMES[p_i], 1, INACTIVE_COLORS[p_i])
screen.blit(l1, (x+10, y+self.scale_height-30))
def coord_to_pitch(self, y, coord=0, reverse=False):
if reverse:
return (self.pitch_range[1] - self.pitch_range[0]) / SCREEN_DIM[coord] * (SCREEN_DIM[coord] - y) + self.pitch_range[0]
else:
return (self.pitch_range[1] - self.pitch_range[0]) / SCREEN_DIM[coord] * y + self.pitch_range[0]
def pitch_to_coord(self, p, coord=0, reverse=False, scale=None):
if scale != None and self.audio_player.justify_pitch:
p = pitch_to_just_pitch(p, scale)
if reverse:
return SCREEN_DIM[coord] - (p - self.pitch_range[0]) / (self.pitch_range[1] - self.pitch_range[0]) * SCREEN_DIM[coord]
else:
return (p - self.pitch_range[0]) / (self.pitch_range[1] - self.pitch_range[0]) * SCREEN_DIM[coord]
| 20,511 | 6,771 |
import aiohttp
import struct
import json
import re
class eGame:
heartbeat = b'\x00\x00\x00\x12\x00\x12\x00\x01\x00\x07\x00\x00\x00\x01\x00\x00\x00\x00'
heartbeatInterval = 60
@staticmethod
async def get_ws_info(url):
rid = url.split('/')[-1]
page_id = aid = rid
headers = {
'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 11_0 like Mac OS X) AppleWebKit/604.1.38 (KHTML, like Gecko) Version/11.0 Mobile/15A372 Safari/604.1'
}
async with aiohttp.ClientSession() as session:
async with session.get('https://m.egame.qq.com/live?anchorid' + rid, headers=headers) as resp:
res = await resp.text()
res_ = re.findall(r'"videoInfo":(.*),"h5Url"', res)[0]
str_id = json.loads(res_)['pid']
params = {
'param': json.dumps({"0":{"module":"pgg.ws_token_go_svr.DefObj","method":"get_token","param":{"scene_flag":16,"subinfo":{"page":{"scene":1,"page_id":int(page_id),"str_id":str(str_id),"msg_type_list":[1,2]}},"version":1,"message_seq":-1,"dc_param":{"params":{"info":{"aid":aid}},"position":{"page_id":"QG_HEARTBEAT_PAGE_LIVE_ROOM"},"refer":{}},"other_uid":0}}})
}
async with session.post('https://share.egame.qq.com/cgi-bin/pgg_async_fcgi', data=params, headers=headers) as resp:
res = json.loads(await resp.text())
token = res['data']['0']['retBody']['data']['token']
# 开始拼接reg_datas
reg_datas = []
tokenbuf = token.encode('ascii')
bodybuf = struct.pack('!Bi', 7, len(tokenbuf)) + tokenbuf
headerbuf = struct.pack('!ihhhihh', 18 + len(bodybuf), 18, 1, 1, 0, 0, 0)
data = headerbuf + bodybuf
reg_datas.append(data)
reg_datas.append(eGame.heartbeat)
return 'wss://barragepush.egame.qq.com/sub', reg_datas
@staticmethod
def decode_msg(data):
"""
type: 0、3、9用户发言;7、33礼物信息;29、35欢迎信息;24、31系统提醒;23关注信息
"""
msgs = []
msg = {}
s = MessageDecode(data)
body = s.v()['body']
if body:
bin_datas = body['bin_data']
for bin_data in bin_datas:
# if bin_data['type'] in (0, 3, 9):
if bin_data.get('type', '') in (0, 3, 9):
msg['name'] = bin_data['nick']
msg['content'] = bin_data['content']
msg['msg_type'] = 'danmaku'
else:
msg = {'name': '', 'content': '', 'msg_type': 'other'}
msgs.append(msg.copy())
return msgs
else:
msg = {'name': '', 'content': '', 'msg_type': 'None'}
msgs.append(msg.copy())
return msgs
class MessageDecode:
"""
数据解包,还原JS中的操作步骤
"""
def __init__(self, data):
self.data = data
self.ie = {
'event_id': 0,
'msg_type': 1,
'bin_data': 2,
'params': 3,
'start_tm': 4,
'data_list': 6,
'end_tm': 5,
'message_seq': 7,
}
self.ne = {
'uid': 0,
'msgid': 1,
'nick': 2,
'content': 3,
'tm': 4,
'type': 5,
'scenes_flag': 6,
'ext': 7,
'send_scenes': 8
}
self.oe = {
'event_id': 0,
'event_name': 1,
'info': 2,
'params': 3,
'bin_data': 4
}
def v(self):
data = self.data
startPosition = 18
endPosition, = struct.unpack_from('!i', data, 0)
seq, = struct.unpack_from('!i', data, 10)
operation, = struct.unpack_from('!h', data, 8)
if endPosition != len(data):
raise Exception('The received packet length is abnormal')
return {
'seq': seq,
'operation': operation,
'body': self.w(operation, startPosition, endPosition, data)
}
def w(self, operation, startPosition, endPosition, data):
if operation == 3:
return self.x(startPosition, endPosition, data)
else:
return None
def x(self, startPosition, endPosition, data):
i, = struct.unpack_from('!i', data, startPosition)
n = data[startPosition: endPosition]
if len(n) >= (4 + i):
o = n[4:(4 + i)]
a = self.S(o)
y = self.ye(a)
return y
else:
return None
def ye(self, e):
return self.T({
'resultObj': e,
'template': self.ie,
'afterChange': 1,
})
def afterChange(self, e, t, i, n, o):
if t == 'bin_data':
v = []
ve = {}
for m in n:
a = self.S(e, m['ext'])
b = o['msg_type']
if b == 1:
ve = self.T({
'resultObj': a,
'template': self.ne
})
elif b == 2:
ve = self.T({
'resultObj': a,
'template': self.oe
})
v.append(ve.copy())
return v
else:
return n
def T(self, e):
i = e['resultObj']
n = e['template']
o = e.get('beforeChange', '')
r = e.get('afterChange', '')
a = {}
for s in n.keys():
for t in i[0]:
if t['tag'] == n[s]:
q = t
p = q['value']
c = q['ext']
if r:
a[s] = self.afterChange(i[1], s, c, p, a)
else:
a[s] = p
break
return a
def S(self, e, t=0):
if t == '':
t = 0
i = []
n = len(e)
while t < n:
o = self.m(e, t)
dict_ = {
'value': o['value'],
'lastPosition': o['position'],
'ext': o['ext'],
'tag': o['tag']
}
i.append(dict_.copy())
t = o['position']
return i, e
def m(self, e, t):
value = position = ext = ''
i = e
a, = struct.unpack_from('!B', i, t)
tag = (240 & a) >> 4
type = 15 & a
s_position = t + 1
if type == 0:
value, position = self.f0(i, s_position)
elif type == 1:
value, position = self.f1(i, s_position)
elif type == 2:
value, position = self.f2(i, s_position)
elif type == 3:
value, position = self.f3(i, s_position)
elif type == 6:
value, position, ext = self.f6(i, s_position)
elif type == 7:
value, position, ext = self.f7(i, s_position)
elif type == 8:
value, position = self.f8(i, s_position)
elif type == 9:
value, position = self.f9(i, s_position)
elif type == 12:
value, position = self.f12(i, s_position)
elif type == 13:
value, position = self.f13(i, s_position)
i = ''
return {
'i': i,
'tag': tag,
'type': type,
'value': value,
'position': position,
'ext': ext
}
def f0(self, e, t):
o = 1
try:
n, = struct.unpack_from('!B', e, t)
except:
n = ''
return n, t + o
def f1(self, e, t):
o = 2
try:
n, = struct.unpack_from('!H', e, t)
except:
n = ''
return n, t + o
def f2(self, e, t):
o = 4
try:
n, = struct.unpack_from('!I', e, t)
except:
n = ''
return n, t + o
def f3(self, e, t):
e = struct.unpack('!8B', e[t:t + 8])
i = (e[0] << 24) + (e[1] << 16) + (e[2] << 8) + e[3]
o = (e[4] << 24) + (e[5] << 16) + (e[6] << 8) + e[7]
value = (i << 32) + o
position = t + 8
return value, position
def f4(self, e, t):
o = 4
try:
n, = struct.unpack_from('!f', e, t)
except:
n = ''
return n, t + o
def f5(self, e, t):
o = 8
try:
n, = struct.unpack_from('!d', e, t)
except:
n = ''
return n, t + o
def f6(self, e, t):
n, = struct.unpack_from('!B', e, t)
r = t + 1
s = r + n
value = (e[r:s]).decode('utf8', errors='ignore')
return value, s, r
def f7(self, e, t):
n, = struct.unpack_from('!I', e, t)
r = t + 4
s = r + n
value = (e[r:s]).decode('utf8', errors='ignore')
return value, s, r
def f8(self, e, t):
i = {}
b = self.m(e, t)
o = b['value']
r = b['position']
while o > 0:
a = self.m(e, r)
s = self.m(e, a['position'])
if a['tag'] == 0 and s['tag'] == 1:
i[a['value']] = s['value']
r = s['position']
o -= 1
return i, r
def f9(self, e, t):
i = self.m(e, t)
n = i['value']
o = i['position']
r = []
while n > 0:
a = self.m(e, o)
r.append(a.copy())
o = a['position']
n -= 1
return r, o
def f10(self, e, t):
i = []
while True:
n = self.m(e, t)
t = n['position']
if n['type'] == 11:
return i, t
i.append(n['value'].copy())
def f11(self, e, t):
return '', t
def f12(self, e, t):
return 0, t
def f13(self, e, t):
i = self.m(e, t)
return e[(t + i['position']):i['value']], t + i['position'] + i['value']
| 10,084 | 3,503 |
from django.views.generic import DetailView, TemplateView
from star_ratings.models import Rating
from .models import Beer, Snack
class BeerRateView(TemplateView):
model = Beer
template_name = 'minimal_rating_system/article_ratings.html'
def get_context_data(self, **kwargs):
kwargs['article_class'] = "Beer"
kwargs['articles'] = self.model.objects.all().order_by('date_created')
return super(BeerRateView, self).get_context_data(**kwargs)
class SnackRateView(TemplateView):
model = Snack
template_name = 'minimal_rating_system/article_ratings.html'
def get_context_data(self, **kwargs):
kwargs['article_class'] = "Snack"
kwargs['articles'] = self.model.objects.all().order_by('date_created')
return super(SnackRateView, self).get_context_data(**kwargs)
| 832 | 265 |
# Generated by Django 3.2.5 on 2021-07-20 15:33
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('AwardsApp', '0004_alter_userprofile_bio'),
]
operations = [
migrations.RemoveField(
model_name='userprofile',
name='bio',
),
]
| 335 | 117 |
# Imports
from flask import Flask, render_template, session, redirect, request, flash, url_for, abort
from flask_session import Session
from werkzeug.exceptions import default_exceptions, HTTPException, InternalServerError
from werkzeug.security import check_password_hash, generate_password_hash
from cs50 import SQL
import os
import markdown
from time import sleep
from new import login_required
# App Config
app = Flask(__name__)
db = SQL("sqlite:///school.db")
app.debug = True
app.secret_key = b'\xb3\xaaS\xdf\xc0\x1fBc\x9b\x84\x9a\xfaCd\xc3\xd9'
app.static_folder = 'static'
app.config["TEMPLATES_AUTO_RELOAD"] = True
@app.after_request
def after_request(response):
response.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
response.headers["Expires"] = 0
response.headers["Pragma"] = "no-cache"
return response
# Session Config
app.config['SESSION_TYPE'] = 'filesystem'
Session(app)
app.config.from_object(__name__)
# Routes
@app.route('/')
def index():
if session.get("id"):
return redirect(url_for('home'))
return render_template("index.html")
@app.route('/account')
@login_required
def account():
user = db.execute("SELECT * FROM users WHERE id = :id", id=session['id'])[0]
return render_template('account.html', user=user)
@app.route('/changemail', methods=['GET', 'POST'])
@login_required
def mailc():
if request.method == 'POST':
if not request.form.get('mail'):
flash(u'Please fill every credentials.', 'sorry')
return redirect(request.url)
rows2 = db.execute("SELECT * FROM users WHERE mail = :schoolname", schoolname=request.form.get('mail').lower())
if len(rows2) != 0:
flash(u'This mail is already registered.', 'sorry')
return redirect(request.url)
db.execute('UPDATE users SET mail = :mail WHERE id = :id', mail=request.form.get('mail'), id=session['id'])
return redirect(url_for('home'))
else:
return render_template('mail.html')
@app.route('/changepass', methods=['GET', 'POST'])
@login_required
def passc():
if request.method == 'POST':
if not request.form.get('password'):
flash(u'Please fill every credentials.', 'sorry')
return redirect(request.url)
if request.form.get("password") != request.form.get("confirmation"):
flash(u'Passwords do not match.', 'sorry')
return redirect(request.url)
db.execute('UPDATE users SET hash = :passw WHERE id = :id', passw=generate_password_hash(request.form.get('password')), id=session['id'])
return redirect(url_for('home'))
else:
return render_template('pass.html')
@app.route('/changename', methods=['GET', 'POST'])
@login_required
def namec():
if request.method == 'POST':
if not request.form.get('username'):
flash(u'Please fill every credentials.', 'sorry')
return redirect(request.url)
rows = db.execute("SELECT * FROM users WHERE schoolname = :schoolname", schoolname=request.form.get('username').lower())
if len(rows) != 0:
flash(u'This school name is already registered.', 'sorry')
return redirect(request.url)
db.execute('UPDATE users SET schoolname = :name WHERE id = :id', name=request.form.get('username'), id=session['id'])
return redirect(url_for('home'))
else:
return render_template('name.html')
@app.route('/home')
@login_required
def home():
sites = db.execute("SELECT * FROM sites WHERE user_id = :id", id=session['id'])
user = db.execute("SELECT * FROM users WHERE id = :id", id=session['id'])[0]
return render_template('home.html', sites=sites, user=user)
@app.route('/page/<urlheader>')
def pages(urlheader):
if len(db.execute("SELECT * FROM sites WHERE header = :header", header=urlheader)) == 0:
abort(404)
else:
sites = db.execute("SELECT * FROM sites WHERE header = :header", header=urlheader)
file = open('templates/temp.html', 'w')
file.write(sites[0]['content'])
sleep(0.1)
file.close()
return render_template('pages.html')
@app.route('/pages', methods=['GET', 'POST'])
@login_required
def page():
sites = db.execute("SELECT * FROM sites WHERE user_id = :id", id=session['id'])
return render_template('page.html', sites=sites)
@app.route('/login', methods=['GET', 'POST'])
def login():
if session.get("id"):
return redirect(url_for('home'))
if request.method == 'POST':
if not request.form.get("username") or not request.form.get("password"):
flash(u'Please fill every credentials.', 'sorry')
return redirect(request.url)
rows = db.execute("SELECT * FROM users WHERE schoolname = :schoolname", schoolname=request.form.get('username').lower())
if len(rows) != 1 or not check_password_hash(rows[0]["hash"], request.form.get('password')):
flash(u'Invalid username and/or password.', 'sorry')
return redirect(request.url)
session['id'] = rows[0]['id']
flash(u'Logged In!', 'okay')
return redirect(url_for('home'))
else:
return render_template('login.html')
@app.route('/register', methods=['GET', 'POST'])
def register():
if session.get("id"):
return redirect(url_for('home'))
if request.method == 'POST':
# Ensure username was submitted
if not request.form.get("username") or not request.form.get("password") or not request.form.get("mail"):
flash(u'Please fill every credentials.', 'sorry')
return redirect(request.url)
if request.form.get("password") != request.form.get("confirmation"):
flash(u'Passwords do not match.', 'sorry')
return redirect(request.url)
rows = db.execute("SELECT * FROM users WHERE schoolname = :schoolname", schoolname=request.form.get('username').lower())
rows2 = db.execute("SELECT * FROM users WHERE mail = :schoolname", schoolname=request.form.get('mail').lower())
if len(rows) != 0:
flash(u'This school name is already taken.', 'sorry')
return redirect(request.url)
if len(rows2) != 0:
flash(u'This mail is already registered.', 'sorry')
return redirect(request.url)
# Ensure password was submitted
db.execute("INSERT INTO users (schoolname, mail, hash) VALUES (:name, :mail, :hash)", name=request.form.get("username").lower(), mail=request.form.get("mail").lower() , hash=generate_password_hash(request.form.get("password")))
rows = db.execute("SELECT * FROM users WHERE schoolname = :schoolname", schoolname=request.form.get('username').lower())
session["user_id"] = rows[0]["id"]
# Redirect user to home page
os.mkdir('dirs\\' + str(session["user_id"]))
flash(u"Registered!", 'okay')
return redirect(url_for('login'))
else:
return render_template("register.html")
@app.route('/logout')
@login_required
def logout():
session.clear()
return redirect(url_for('login'))
@app.route('/learnmore')
def learnmore():
return render_template('learn.html')
@app.route('/new', methods=['GET', 'POST'])
@login_required
def new():
if request.method == 'POST':
if not request.form.get('header') or not request.form.get('desc') or not request.form.get('content'):
flash('Please fill everything.', 'sorry')
return redirect(url_for('new'))
if len(db.execute("SELECT * FROM sites WHERE header = :header", header=request.form.get('header').lower())) != 0:
flash('Header already exists.', 'sorry')
return redirect(url_for('new'))
db.execute("INSERT INTO sites (header, desc, content, user_id) VALUES (:header, :desc, :content, :id)", header=request.form.get('header'), desc=request.form.get('desc'), content=markdown.markdown(request.form.get('content')), id=session['id'])
flash(u'Created!', 'okay')
return redirect(url_for('home'))
else:
return render_template('new.html')
@app.errorhandler(404)
def page_not_found(e):
# note that we set the 404 status explicitly
return render_template('404.html')
if __name__ == "__main__":
app.run(debug=True)
| 8,530 | 2,677 |
from pycipher import Rot13
import unittest
class TestRot13(unittest.TestCase):
def test_decipher(self):
text = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
declist = ['nopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklm']
dec = Rot13().decipher(text)
self.assertEqual(dec.upper(), declist[0].upper())
def test_encipher(self):
text = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
enclist = ['nopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklm']
enc = Rot13().encipher(text)
self.assertEqual(enc.upper(), enclist[0].upper())
| 621 | 257 |
from typing import Any
from hypothesis import given
from lz.functional import identity
from tests import strategies
@given(strategies.scalars)
def test_basic(object_: Any) -> None:
result = identity(object_)
assert result is object_
| 246 | 72 |
from datetime import datetime, timedelta
from discord import Embed
from discord.ext.commands import Cog
from discord.ext.commands import command
import logging
class Reactionpolls(Cog):
NUMBERS = [
"1️⃣", "2️⃣", "3️⃣", "4️⃣", "5️⃣", "6️⃣", "7️⃣", "8️⃣", "9️⃣", "🔟"
]
def __init__(self, bot):
self.bot = bot
self.log = logging.getLogger("gerfroniabot.reactionpolls")
self.polls = []
@Cog.listener()
async def on_ready(self):
if not self.bot.ready:
self.bot.cogs_ready.ready_up("reactionpolls")
self.log.info("Reactionpolls cog ready")
@command(name="umfrage", aliases=["umf"], brief="Erstelle eine offene Umfrage")
async def make_poll(self, ctx, minutes: int, question: str, *options):
"""
Erstelle eine offene Umfrage, auf die alle anderen Mitglieder mit Emojis reagieren können, um abzustimmen.
Der erste Parameter ist die Dauer in Minuten, nach der der Bot das Ergebnis bekanntgeben wird. Der zweite
Parameter, der gegebenenfalls in "Anführungszeichen" gesetzt werden muss, wenn er Leerzeichen enthält, ist
die Frage, die du den Mitgliedern stellen möchtest. Alle weiteren Parameter (durch Leerzeichen getrennt)
werden als Antwortmöglichkeiten hinzugefügt. Du kannst höchstens zehn Optionen angeben.
"""
if minutes < 1 or minutes > 120:
await ctx.send(":ballot_box_with_check: Die Umfragedauer muss zwischen 1 und 120 Minuten liegen.")
return
if len(options) > 10:
await ctx.send(":ballot_box_with_check: Du kannst nicht mehr als 10 Antwortmöglichkeiten festlegen.")
return
embed = Embed(
title=f":ballot_box_with_check: {question}",
description=f"Umfrage von {ctx.author.display_name}",
timestamp=datetime.utcnow(),
colour=ctx.author.colour
)
run_until = datetime.now() + timedelta(minutes=minutes)
fields= [("Antwortmöglichkeiten", "\n".join([f"{self.NUMBERS[idx]} {option}" for idx, option in enumerate(options)]), False),
("Hilfe", f"Reagiere mit der entsprechenden Zahl auf diese Nachricht, um abzustimmen. "
f"Die Umfrage läuft bis {run_until.strftime('%H:%M')} Uhr.", False)]
for name, value, inline in fields:
embed.add_field(name=name, value=value, inline=inline)
message = await ctx.send(embed=embed)
for emoji in self.NUMBERS[:len(options)]:
await message.add_reaction(emoji)
self.polls.append(message.id)
self.bot.scheduler.add_job(self.complete_poll, "date", run_date=run_until, args=[message.channel.id, message.id])
async def complete_poll(self, channel_id, message_id):
message = await self.bot.get_channel(channel_id).fetch_message(message_id)
most_voted = max(message.reactions, key=lambda r: r.count)
await message.channel.send(f":ballot_box_with_check: Die Abstimmung ist beendet. Option {most_voted.emoji} hat mit {most_voted.count-1} Stimmen gewonnen.")
@Cog.listener()
async def on_raw_reaction_add(self, payload):
if payload.message_id in self.polls:
message = await self.bot.get_channel(payload.channel_id).fetch_message(payload.message_id)
for reaction in message.reactions:
if (not payload.member.bot
and payload.member in await reaction.users().flatten()
and reaction.emoji != payload.emoji.name):
await message.remove_reaction(reaction.emoji, payload.member)
def setup(bot):
bot.add_cog(Reactionpolls(bot))
| 3,703 | 1,193 |
"""
Copyright (C) 2020 ETH Zurich. All rights reserved.
Author: Sergei Vostrikov, ETH Zurich
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Basic libraries
import argparse
import numpy as np
import sys
from os.path import dirname, abspath
from pybf.pybf.io_interfaces import ImageLoader
from pybf.pybf.visualization import plot_image
def visualize_image_dataset(path_to_img_dataset,
save_path=None,
save_visualized_images=False,
show_images=True,
frames_to_plot=None,
low_res_img_to_plot=None,
db_range=None):
# Load beamformed images
imgLoader = ImageLoader(path_to_img_dataset)
# Check path to save images
if save_path is None:
# Construct save path (save to dataset folder)
len_to_cut = len(path_to_img_dataset.split('/')[-1])
save_path = path_to_img_dataset[:-1 - len_to_cut]
# Check simulation flag
if imgLoader._simulation_flag:
scs_coords_xz = imgLoader.get_scatters_coords()[[0,1],:]
else:
scs_coords_xz = None
# Get the coordinates of transducer elements
elements_coord = imgLoader.get_elements_coords()
# Calculate image sizes
pixels_coords = imgLoader.get_pixels_coords()
image_size_x_0 = pixels_coords[0, :].min()
image_size_x_1 = pixels_coords[0, :].max()
image_size_z_0 = pixels_coords[1, :].min()
image_size_z_1 = pixels_coords[1, :].max()
# Check the frames_to_plot list
if frames_to_plot is not None:
if len(frames_to_plot)is 0:
frames_to_plot = imgLoader.frame_indices
else:
frames_to_plot = []
# Check the low_res_img_to_plot list
if low_res_img_to_plot is not None:
if len(low_res_img_to_plot) is 0:
low_res_img_to_plot = imgLoader.lri_indices
else:
low_res_img_to_plot = []
# Iterate over frames amd low resolution images
for n_frame in frames_to_plot:
# Plot Low Resolution Images
for n_lri in low_res_img_to_plot:
# Get data
img_data = imgLoader.get_low_res_image(n_frame, n_lri)
# Extract envelope
img_data = np.abs(img_data)
plot_image(img_data,
elements_coords_xz=elements_coord,
title='Frame ' + str(n_frame) +' LRI ' + str(n_lri),
image_x_range=[image_size_x_0, image_size_x_1],
image_z_range=[image_size_z_0, image_size_z_1],
db_range=db_range,
scatters_coords_xz=scs_coords_xz,
framework='plotly',
save_fig=save_visualized_images,
show=show_images,
path_to_save=save_path)
# Plot High Resolution Image
# Get data
img_data = imgLoader.get_high_res_image(n_frame)
# Extract envelope
img_data = np.abs(img_data)
plot_image(img_data,
elements_coords_xz=elements_coord,
title='Frame ' + str(n_frame) +' HRI',
image_x_range=[image_size_x_0, image_size_x_1],
image_z_range=[image_size_z_0, image_size_z_1],
db_range=db_range,
scatters_coords_xz=scs_coords_xz,
framework='plotly',
save_fig=save_visualized_images,
show=show_images,
path_to_save=save_path)
# Close the file with beamformed images
imgLoader.close_file()
return
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--path_to_img_dataset',
type=str,
default='',
help='Path to the image dataset file.')
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 'True', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'False', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
# Parameters for visualization
parser.add_argument(
'--save_visualized_images',
type=str2bool,
nargs='?',
const=True,
default=False,
help='Flag to save visualized images.')
parser.add_argument(
'--frames_to_plot',
type=int,
nargs="+",
default=None,
help='Space separated list of frames to plot.\
"[]" - plot all frames. "None" - plot none.')
parser.add_argument(
'--low_res_img_to_plot',
type=int,
nargs="+",
default=None,
help='Space separated list of low resolution images to plot.\
"[]" - plot all frames. "None" - plot none.')
parser.add_argument(
'--db_range',
type=float,
default=None,
help='Decibels range for log compression of images ')
FLAGS, unparsed = parser.parse_known_args()
# Run main function
visualize_image_dataset(FLAGS.path_to_img_dataset,
FLAGS.save_visualized_images,
FLAGS.frames_to_plot,
FLAGS.low_res_img_to_plot,
FLAGS.db_range) | 5,975 | 1,867 |
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import mindspore.dataset as ds
IMAGENET_RAWDATA_DIR = "../data/dataset/testImageNetData2/train"
IMAGENET_TFFILE_DIR = ["../data/dataset/test_tf_file_3_images2/train-0000-of-0001.data",
"../data/dataset/test_tf_file_3_images2/train-0000-of-0002.data",
"../data/dataset/test_tf_file_3_images2/train-0000-of-0003.data",
"../data/dataset/test_tf_file_3_images2/train-0000-of-0004.data"]
MNIST_DATA_DIR = "../data/dataset/testMnistData"
MANIFEST_DATA_FILE = "../data/dataset/testManifestData/test.manifest"
CIFAR10_DATA_DIR = "../data/dataset/testCifar10Data"
CIFAR100_DATA_DIR = "../data/dataset/testCifar100Data"
def test_imagenet_rawdata_dataset_size():
ds_total = ds.ImageFolderDatasetV2(IMAGENET_RAWDATA_DIR)
assert ds_total.get_dataset_size() == 6
ds_shard_1_0 = ds.ImageFolderDatasetV2(IMAGENET_RAWDATA_DIR, num_shards=1, shard_id=0)
assert ds_shard_1_0.get_dataset_size() == 6
ds_shard_2_0 = ds.ImageFolderDatasetV2(IMAGENET_RAWDATA_DIR, num_shards=2, shard_id=0)
assert ds_shard_2_0.get_dataset_size() == 3
ds_shard_3_0 = ds.ImageFolderDatasetV2(IMAGENET_RAWDATA_DIR, num_shards=3, shard_id=0)
assert ds_shard_3_0.get_dataset_size() == 2
def test_imagenet_tf_file_dataset_size():
ds_total = ds.TFRecordDataset(IMAGENET_TFFILE_DIR)
assert ds_total.get_dataset_size() == 12
ds_shard_1_0 = ds.TFRecordDataset(IMAGENET_TFFILE_DIR, num_shards=1, shard_id=0)
assert ds_shard_1_0.get_dataset_size() == 12
ds_shard_2_0 = ds.TFRecordDataset(IMAGENET_TFFILE_DIR, num_shards=2, shard_id=0)
assert ds_shard_2_0.get_dataset_size() == 6
ds_shard_3_0 = ds.TFRecordDataset(IMAGENET_TFFILE_DIR, num_shards=3, shard_id=0)
assert ds_shard_3_0.get_dataset_size() == 4
def test_mnist_dataset_size():
ds_total = ds.MnistDataset(MNIST_DATA_DIR)
assert ds_total.get_dataset_size() == 10000
ds_shard_1_0 = ds.MnistDataset(MNIST_DATA_DIR, num_shards=1, shard_id=0)
assert ds_shard_1_0.get_dataset_size() == 10000
ds_shard_2_0 = ds.MnistDataset(MNIST_DATA_DIR, num_shards=2, shard_id=0)
assert ds_shard_2_0.get_dataset_size() == 5000
ds_shard_3_0 = ds.MnistDataset(MNIST_DATA_DIR, num_shards=3, shard_id=0)
assert ds_shard_3_0.get_dataset_size() == 3334
def test_manifest_dataset_size():
ds_total = ds.ManifestDataset(MANIFEST_DATA_FILE)
assert ds_total.get_dataset_size() == 4
ds_shard_1_0 = ds.ManifestDataset(MANIFEST_DATA_FILE, num_shards=1, shard_id=0)
assert ds_shard_1_0.get_dataset_size() == 4
ds_shard_2_0 = ds.ManifestDataset(MANIFEST_DATA_FILE, num_shards=2, shard_id=0)
assert ds_shard_2_0.get_dataset_size() == 2
ds_shard_3_0 = ds.ManifestDataset(MANIFEST_DATA_FILE, num_shards=3, shard_id=0)
assert ds_shard_3_0.get_dataset_size() == 2
def test_cifar10_dataset_size():
ds_total = ds.Cifar10Dataset(CIFAR10_DATA_DIR)
assert ds_total.get_dataset_size() == 10000
ds_shard_1_0 = ds.Cifar10Dataset(CIFAR10_DATA_DIR, num_shards=1, shard_id=0)
assert ds_shard_1_0.get_dataset_size() == 10000
ds_shard_2_0 = ds.Cifar10Dataset(CIFAR10_DATA_DIR, num_shards=2, shard_id=0)
assert ds_shard_2_0.get_dataset_size() == 5000
ds_shard_3_0 = ds.Cifar10Dataset(CIFAR10_DATA_DIR, num_shards=3, shard_id=0)
assert ds_shard_3_0.get_dataset_size() == 3334
ds_shard_7_0 = ds.Cifar10Dataset(CIFAR10_DATA_DIR, num_shards=7, shard_id=0)
assert ds_shard_7_0.get_dataset_size() == 1429
def test_cifar100_dataset_size():
ds_total = ds.Cifar100Dataset(CIFAR100_DATA_DIR)
assert ds_total.get_dataset_size() == 10000
ds_shard_1_0 = ds.Cifar100Dataset(CIFAR100_DATA_DIR, num_shards=1, shard_id=0)
assert ds_shard_1_0.get_dataset_size() == 10000
ds_shard_2_0 = ds.Cifar100Dataset(CIFAR100_DATA_DIR, num_shards=2, shard_id=0)
assert ds_shard_2_0.get_dataset_size() == 5000
ds_shard_3_0 = ds.Cifar100Dataset(CIFAR100_DATA_DIR, num_shards=3, shard_id=0)
assert ds_shard_3_0.get_dataset_size() == 3334
| 4,725 | 2,087 |
# Copyright (c) 2021 Red Hat, Inc.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from oslo_log import log
import tobiko
from tobiko.shell import sh
LOG = log.getLogger(__name__)
def get_iperf_command(parameters, ssh_client):
interface = get_iperf_interface(ssh_client=ssh_client)
return interface.get_iperf_command(parameters)
def get_iperf_interface(ssh_client):
manager = tobiko.setup_fixture(IperfInterfaceManager)
interface = manager.get_iperf_interface(ssh_client=ssh_client)
tobiko.check_valid_type(interface, IperfInterface)
return interface
class IperfInterfaceManager(tobiko.SharedFixture):
def __init__(self):
super(IperfInterfaceManager, self).__init__()
self.client_interfaces = {}
self.interfaces = []
self.default_interface = IperfInterface()
def add_iperf_interface(self, interface):
LOG.debug('Register iperf interface %r', interface)
self.interfaces.append(interface)
def get_iperf_interface(self, ssh_client):
try:
return self.client_interfaces[ssh_client]
except KeyError:
pass
LOG.debug('Assign default iperf interface to SSH client %r',
ssh_client)
self.client_interfaces[ssh_client] = self.default_interface
return self.default_interface
class IperfInterface(object):
def get_iperf_command(self, parameters):
command = sh.shell_command(['iperf3'] +
self.get_iperf_options(parameters))
LOG.debug(f'Got iperf command: {command}')
return command
def get_iperf_options(self, parameters):
options = []
port = parameters.port
if port:
options += self.get_port_option(port)
timeout = parameters.timeout
if timeout and parameters.mode == 'client':
options += self.get_timeout_option(timeout)
output_format = parameters.output_format
if output_format:
options += self.get_output_format_option(output_format)
bitrate = parameters.bitrate
if bitrate and parameters.mode == 'client':
options += self.get_bitrate_option(bitrate)
download = parameters.download
if download and parameters.mode == 'client':
options += self.get_download_option(download)
protocol = parameters.protocol
if protocol and parameters.mode == 'client':
options += self.get_protocol_option(protocol)
options += self.get_mode_option(parameters)
return options
@staticmethod
def get_mode_option(parameters):
mode = parameters.mode
if not mode or mode not in ('client', 'server'):
raise ValueError('iperf mode values allowed: [client|server]')
elif mode == 'client' and not parameters.ip:
raise ValueError('iperf client mode requires a destination '
'IP address')
elif mode == 'client':
return ['-c', parameters.ip]
else: # mode == 'server'
return ['-s', '-D'] # server mode is executed with daemon mode
@staticmethod
def get_download_option(download):
if download:
return ['-R']
else:
return []
@staticmethod
def get_protocol_option(protocol):
if protocol == 'tcp':
return []
elif protocol == 'udp':
return ['-u']
else:
raise ValueError('iperf protocol values allowed: [tcp|udp]')
@staticmethod
def get_timeout_option(timeout):
return ['-t', timeout]
@staticmethod
def get_output_format_option(output_format):
if output_format == 'json':
return ['-J']
else:
raise ValueError('iperf output format values allowed: '
'[json]')
@staticmethod
def get_port_option(port):
return ['-p', port]
@staticmethod
def get_bitrate_option(bitrate):
return ['-b', bitrate]
| 4,651 | 1,308 |
import tensorflow as tf
import pathlib
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
#print(np.version.version)
#np.set_printoptions(precision=4)
dataset=tf.data.Dataset.from_tensor_slices([8,3,0,8,2,1])
num=np.arange(5)
numT=tf.convert_to_tensor(num)
numF=tf.cast(numT,dtype=tf.float32)
print(numT)
print(numF)
print(dataset)
mat=tf.convert_to_tensor(np.zeros([3,3]))
print(mat)
small_list=tf.convert_to_tensor([1,2,3],dtype=tf.float64)
print(small_list)
print(np.random.randint(0,5)) | 512 | 218 |
#LeetCode problem 378: Kth Smallest Element in a Sorted Matrix
class Solution:
def kthSmallest(self, matrix: List[List[int]], k: int) -> int:
res=[]
for i in range(len(matrix)):
for j in range(len(matrix[0])):
res.append(matrix[i][j])
return(sorted(res)[k-1]) | 315 | 105 |