text stringlengths 957 885k |
|---|
<reponame>grassead/opentitan
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
# TODO(drewmacrae) this should be in rules_cc
# pending resolution of https://github.com/bazelbuild/rules_cc/issues/75
load("//rules:bugfix.bzl", "find_cc_toolchain")
"""Rules to build OpenTitan for the RiscV target"""
OPENTITAN_CPU = "@bazel_embedded//constraints/cpu:riscv32"
OPENTITAN_PLATFORM = "@bazel_embedded//platforms:opentitan_rv32imc"
_targets_compatible_with = {
OPENTITAN_PLATFORM: [OPENTITAN_CPU],
}
def _platforms_transition_impl(settings, attr):
return {"//command_line_option:platforms": attr.platform}
_platforms_transition = transition(
implementation = _platforms_transition_impl,
inputs = [],
outputs = ["//command_line_option:platforms"],
)
def _obj_transform(ctx):
cc_toolchain = find_cc_toolchain(ctx)
outputs = []
for src in ctx.files.srcs:
binary = ctx.actions.declare_file("{}.{}".format(src.basename, ctx.attr.suffix))
outputs.append(binary)
ctx.actions.run(
outputs = [binary],
inputs = [src] + cc_toolchain.all_files.to_list(),
arguments = [
"--output-target",
ctx.attr.format,
src.path,
binary.path,
],
executable = cc_toolchain.objcopy_executable,
)
return [DefaultInfo(files = depset(outputs), data_runfiles = ctx.runfiles(files = outputs))]
obj_transform = rule(
implementation = _obj_transform,
cfg = _platforms_transition,
attrs = {
"srcs": attr.label_list(allow_files = True),
"suffix": attr.string(default = "bin"),
"format": attr.string(default = "binary"),
"platform": attr.string(default = OPENTITAN_PLATFORM),
"_cc_toolchain": attr.label(default = Label("@bazel_tools//tools/cpp:current_cc_toolchain")),
"_allowlist_function_transition": attr.label(
default = "@bazel_tools//tools/allowlists/function_transition_allowlist",
),
},
toolchains = ["@rules_cc//cc:toolchain_type"],
)
def _elf_to_disassembly(ctx):
cc_toolchain = find_cc_toolchain(ctx)
outputs = []
for src in ctx.files.srcs:
disassembly = ctx.actions.declare_file("{}.dis".format(src.basename))
outputs.append(disassembly)
ctx.actions.run_shell(
outputs = [disassembly],
inputs = [src] + cc_toolchain.all_files.to_list(),
arguments = [
cc_toolchain.objdump_executable,
src.path,
disassembly.path,
],
command = "$1 --disassemble --headers --line-numbers --source $2 > $3",
)
return [DefaultInfo(files = depset(outputs), data_runfiles = ctx.runfiles(files = outputs))]
elf_to_disassembly = rule(
implementation = _elf_to_disassembly,
cfg = _platforms_transition,
attrs = {
"srcs": attr.label_list(allow_files = True),
"platform": attr.string(default = OPENTITAN_PLATFORM),
"_cc_toolchain": attr.label(default = Label("@bazel_tools//tools/cpp:current_cc_toolchain")),
"_allowlist_function_transition": attr.label(
default = "@bazel_tools//tools/allowlists/function_transition_allowlist",
),
},
toolchains = ["@rules_cc//cc:toolchain_type"],
incompatible_use_toolchain_transition = True,
)
def _elf_to_scrambled(ctx):
outputs = []
for src in ctx.files.srcs:
scrambled = ctx.actions.declare_file("{}.scr.40.vmem".format(src.basename))
outputs.append(scrambled)
ctx.actions.run(
outputs = [scrambled],
inputs = [
src,
ctx.files._tool[0],
ctx.files._config[0],
],
arguments = [
ctx.files._config[0].path,
src.path,
scrambled.path,
],
executable = ctx.files._tool[0].path,
)
return [DefaultInfo(files = depset(outputs), data_runfiles = ctx.runfiles(files = outputs))]
elf_to_scrambled = rule(
implementation = _elf_to_scrambled,
cfg = _platforms_transition,
attrs = {
"srcs": attr.label_list(allow_files = True),
"platform": attr.string(default = OPENTITAN_PLATFORM),
"_tool": attr.label(default = "//hw/ip/rom_ctrl/util:scramble_image.py", allow_files = True),
"_config": attr.label(default = "//hw/top_earlgrey:data/autogen/top_earlgrey.gen.hjson", allow_files = True),
"_allowlist_function_transition": attr.label(
default = "@bazel_tools//tools/allowlists/function_transition_allowlist",
),
},
)
def opentitan_binary(
name,
platform = OPENTITAN_PLATFORM,
per_device_deps = {
"verilator": ["//sw/device/lib/arch:sim_verilator"],
"dv": ["//sw/device/lib/arch:sim_dv"],
"fpga_nexysvideo": ["//sw/device/lib/arch:fpga_nexysvideo"],
"cw310": ["//sw/device/lib/arch:fpga_cw310"],
},
output_bin = True,
output_disassembly = True,
output_scrambled = False,
**kwargs):
"""A helper macro for generating OpenTitan binary artifacts.
This macro is mostly a wrapper around cc_binary, but creates artifacts
for each of the keys in `per_device_deps`. The actual artifacts
created are an ELF file, a BIN file, the disassembly and the scrambled
ROM image. Each of these output targets performs a bazel transition to
the RV32I toolchain to build the target under the correct compiler.
Args:
@param name: The name of this rule.
@param platform: The target platform for the artifacts.
@param per_device_deps: The deps for each of the execution environments.
@param output_bin: Whether or not to emit a BIN file.
@param output_disassembly: Whether or not to emit a disassembly file.
@param output_scrambled: Whether or not to emit a SCR file.
@param **kwargs: Arguments to forward to `cc_binary`.
Emits rules:
For each device in per_device_deps entry:
cc_binary named: name_device
obj_transform named: name_device_elf
optionally:
obj_transform named: name_device_bin
elf_to_dissassembly named: name_device_dis
elf_to_scrambled named: name_device_scr
filegroup named: name
with all the generated rules
"""
copts = kwargs.pop("copts", []) + [
"-nostdlib",
"-ffreestanding",
]
linkopts = kwargs.pop("linkopts", []) + [
"-nostartfiles",
"-nostdlib",
]
deps = kwargs.pop("deps", [])
targets = []
for (device, dev_deps) in per_device_deps.items():
devname = "{}_{}".format(name, device)
native.cc_binary(
name = devname,
deps = deps + dev_deps,
target_compatible_with = _targets_compatible_with[platform],
copts = copts,
linkopts = linkopts,
**kwargs
)
targets.append(":" + devname + "_elf")
obj_transform(
name = devname + "_elf",
srcs = [devname],
format = "elf32-little",
suffix = "elf",
platform = platform,
)
if output_bin:
targets.append(":" + devname + "_bin")
obj_transform(
name = devname + "_bin",
srcs = [devname],
platform = platform,
)
if output_disassembly:
targets.append(":" + devname + "_dis")
elf_to_disassembly(
name = devname + "_dis",
srcs = [devname],
platform = platform,
)
if output_scrambled:
targets.append(":" + devname + "_scr")
elf_to_scrambled(
name = devname + "_scr",
srcs = [devname],
platform = platform,
)
native.filegroup(
name = name,
srcs = targets,
)
|
import pandas as pd
import requests
import os.path
import bs4
from os import path
#Data loader functions belong here. This is where
# information about the data files is found.
def load_max_quant(version="", level='protein',
prefix="Intensity", contains=["_"],
sample_type=""
):
#Takes a file and returns a dataframe.
# version indicates the file path to read from
# The rest of the paramters are used to select the columns.
# By default, it will look for ones starting with 'Reporter intensity'
# that do not contain 'count' or 'corrected' and use the 'Protein IDs'
# column as the indecies. These will be the raw intensity values.
if level=='protein':
path = "data/proteinGroups_{0}.txt".format(version)
url = "data/proteinGroups_{0}_url.txt".format(version)
else:
path = "data/peptides_{0}.txt".format(version)
url = "data/peptides_{0}_url.txt".format(version)
file = download_file(download_to_path=path, url_file_path=url)
#read in data
df = pd.read_csv(file, sep='\t', header=0, index_col=0)
#filter the columns based on the prefix and other "contains" requirements
headings = df.columns
if prefix:#filter by columns beginning in prefix
headings = [i for i in headings if i.startswith(prefix)]
for req in contains:
headings = [i for i in headings if req in i]
#drop contaminents and decoys
df = df.drop(df[df['Potential contaminant'] == '+'].index)
df = df.drop(df[df.Reverse == '+'].index)
if level=='protein':
#optionally, discard those that were only identified by site
#this will not work for peptide
df = df.drop(df[df['Only identified by site'] == '+'].index)
df = df[headings]
# Remove the prefix (ie, "Total Intensity") from the column names
# optionally prepends a sample type (ie, "HeLa"
new_names={}
for c in df.columns.values:
sample_name = c[len(prefix):].strip()
new_names[c] = "{0}_{1}".format(sample_type, sample_name)
df.rename(columns=new_names, inplace=True)
df.head()
return df
def load_FragPipe(month='June', contains=['Subject1'],level='protein',
suffix="Total Intensity"):
#Takes a file and returns a dataframe.
# file: the file path to read from
# The rest of the paramters are used to select the columns.
# By default, it will look for ones ending with 'Total intensity'
# that do not contain 'count' or 'corrected' and use the 'Protein IDs'
# column as the indecies. These will be the raw intensity values.
file_name="data/combined_{0}_{1}_FP.tsv".format(level, month)
url_file_path="data/combined_{0}_{1}_FP_url.txt".format(level,month)
file = download_file(download_to_path=file_name, url_file_path=url_file_path)
if file==1:
print("Error with file download.")
return False
if month=='June':not_contains=['15']#drop extra replicate - Yiran said these two weren't good quality, I just forgot to not run it so for now I'll exclude it at this level
else: not_contains=[]
#read in data
if level == 'protein': index_col = 3
else: index_col=0 #for peptides and by default, take the first column as index
df = pd.read_csv(file, sep='\t', header=0, index_col=index_col)
#filter the columns based on the prefix and other "contains" requirements
headings = df.columns
if suffix:#filter by options such as suffix, contains
headings = [i for i in headings if i.endswith(suffix)]
for req in contains:
headings = [i for i in headings if req in i]
for req in not_contains:
headings = [i for i in headings if req not in i]
df = df[headings]
# Remove the suffix (ie, "Total Intensity") from the column names
new_names={}
for c in df.columns.values:
new_names[c] = c.split(' ')[0]
df.rename(columns=new_names, inplace=True)
df.head()
return df
def download_file(download_to_path="data/datafile.txt", url_file_path="data/url.txt",
password_file_path="data/password.txt", redownload=False):
"""Download a file from a given url to the specified location.
Parameters:
path (str): The path to the file to save the file to on the local machine.
Returns:
str: The path the file was downloaded to.
"""
if redownload or path.exists(download_to_path) == False: #If the file has been downloaded, or the user wants to update, download the file
if path.exists(url_file_path):
url_file = open(url_file_path, 'r')
url = url_file.read().strip()
url_file.close()
else:
print("MISSING URL FILE")
return 1
if path.exists(password_file_path):
password_file = open(password_file_path, 'r')
password = password_file.read().strip()
password_file.close()
else:
print("MISSING PASSWORD FILE")
return 1
for i in range(2):
with requests.Session() as session: # Use a session object to save cookies
# Construct the urls for our GET and POST requests
get_url = url
post_url = get_url.replace("https://byu.box.com/shared", "https://byu.app.box.com/public")
# Send initial GET request and parse the request token out of the response
get_response = session.get(get_url)
soup = bs4.BeautifulSoup(get_response.text, "html.parser")
token_tag = soup.find(id="request_token")
token = token_tag.get("value")
# Send a POST request, with the password and token, to get the data
payload = {
'password': password,
'request_token': <PASSWORD>}
response = session.post(post_url, data=payload)
with open(download_to_path, 'wb') as dest:
dest.write(response.content)
return download_to_path
def load_fasta():
file="data/uniprot-filtered-proteome_3AUP000005640_reviewed_human.fasta"
#file is formated:
#>sp|Q96IY4|CBPB2_HUMAN Carboxypeptidase B2 OS=Homo sapiens OX=9606 GN=CPB2 PE=1 SV=2
#MKLCS...
headings = {}
with open(file) as f:
for line in f:
if line.startswith('>'):#header line
ID = line.split('|')[1]
name=line.split('|')[2].split('=')[0].strip('OS')
headings[ID]=name
headings = pd.Series(list(headings.values()), index=headings.keys())
return headings
def names_max_quant():
file = download_file(download_to_path="data/proteinGroups.txt", url_file_path="data/proteinGroups_url.txt")
df = pd.read_csv(file, sep='\t', header=0, index_col=0, usecols=['Protein IDs','Gene names','Fasta headers'])
return df
def names_FragPipe(month='lymph_forPRIDE', contains=['Subject1']):
file_name="data/combined_protein_{0}_FP.tsv".format(month)
url_file_path="data/combined_protein_{0}_FP_url.txt".format(month)
file = download_file(download_to_path=file_name, url_file_path=url_file_path)
df = pd.read_csv(file, sep='\t', header=0, index_col=0, usecols=['Protein ID','Gene Names','Description'])
return df |
<filename>openslides/agenda/views.py
from html import escape
from django.contrib.auth import get_user_model
from django.db import transaction
from django.utils.translation import ugettext as _
from django.utils.translation import ugettext_lazy
from reportlab.platypus import Paragraph
from openslides.core.config import config
from openslides.utils.exceptions import OpenSlidesError
from openslides.utils.pdf import stylesheet
from openslides.utils.rest_api import (
GenericViewSet,
ListModelMixin,
Response,
RetrieveModelMixin,
UpdateModelMixin,
ValidationError,
detail_route,
list_route,
)
from openslides.utils.views import PDFView
from .access_permissions import ItemAccessPermissions
from .models import Item, Speaker
# Viewsets for the REST API
class ItemViewSet(ListModelMixin, RetrieveModelMixin, UpdateModelMixin, GenericViewSet):
"""
API endpoint for agenda items.
There are the following views: metadata, list, retrieve, create,
partial_update, update, destroy, manage_speaker, speak and tree.
"""
access_permissions = ItemAccessPermissions()
queryset = Item.objects.all()
def check_view_permissions(self):
"""
Returns True if the user has required permissions.
"""
if self.action == 'retrieve':
result = self.get_access_permissions().can_retrieve(self.request.user)
elif self.action in ('metadata', 'list', 'manage_speaker', 'tree'):
result = self.request.user.has_perm('agenda.can_see')
# For manage_speaker and tree requests the rest of the check is
# done in the specific method. See below.
elif self.action in ('partial_update', 'update'):
result = (self.request.user.has_perm('agenda.can_see') and
self.request.user.has_perm('agenda.can_see_hidden_items') and
self.request.user.has_perm('agenda.can_manage'))
elif self.action in ('speak', 'sort_speakers', 'numbering'):
result = (self.request.user.has_perm('agenda.can_see') and
self.request.user.has_perm('agenda.can_manage'))
else:
result = False
return result
def check_object_permissions(self, request, obj):
"""
Checks if the requesting user has permission to see also an
organizational item if it is one.
"""
if obj.is_hidden() and not request.user.has_perm('agenda.can_see_hidden_items'):
self.permission_denied(request)
def get_queryset(self):
"""
Filters organizational items if the user has no permission to see them.
"""
queryset = super().get_queryset()
if not self.request.user.has_perm('agenda.can_see_hidden_items'):
pk_list = [item.pk for item in Item.objects.get_only_agenda_items()]
queryset = queryset.filter(pk__in=pk_list)
return queryset
@detail_route(methods=['POST', 'DELETE'])
def manage_speaker(self, request, pk=None):
"""
Special view endpoint to add users to the list of speakers or remove
them. Send POST {'user': <user_id>} to add a new speaker. Omit
data to add yourself. Send DELETE {'speaker': <speaker_id>} to remove
someone from the list of speakers. Omit data to remove yourself.
Checks also whether the requesting user can do this. He needs at
least the permissions 'agenda.can_see' (see
self.check_view_permissions()). In case of adding himself the
permission 'agenda.can_be_speaker' is required. In case of adding
someone else the permission 'agenda.can_manage' is required. In
case of removing someone else 'agenda.can_manage' is required. In
case of removing himself no other permission is required.
"""
# Retrieve item.
item = self.get_object()
if request.method == 'POST':
# Retrieve user_id
user_id = request.data.get('user')
# Check permissions and other conditions. Get user instance.
if user_id is None:
# Add oneself
if not self.request.user.has_perm('agenda.can_be_speaker'):
self.permission_denied(request)
if item.speaker_list_closed:
raise ValidationError({'detail': _('The list of speakers is closed.')})
user = self.request.user
else:
# Add someone else.
if not self.request.user.has_perm('agenda.can_manage'):
self.permission_denied(request)
try:
user = get_user_model().objects.get(pk=int(user_id))
except (ValueError, get_user_model().DoesNotExist):
raise ValidationError({'detail': _('User does not exist.')})
# Try to add the user. This ensurse that a user is not twice in the
# list of coming speakers.
try:
Speaker.objects.add(user, item)
except OpenSlidesError as e:
raise ValidationError({'detail': str(e)})
message = _('User %s was successfully added to the list of speakers.') % user
else:
# request.method == 'DELETE'
# Retrieve speaker_id
speaker_id = request.data.get('speaker')
# Check permissions and other conditions. Get speaker instance.
if speaker_id is None:
# Remove oneself
queryset = Speaker.objects.filter(
item=item, user=self.request.user).exclude(weight=None)
try:
# We assume that there aren't multiple entries because this
# is forbidden by the Manager's add method. We assume that
# there is only one speaker instance or none.
speaker = queryset.get()
except Speaker.DoesNotExist:
raise ValidationError({'detail': _('You are not on the list of speakers.')})
else:
# Remove someone else.
if not self.request.user.has_perm('agenda.can_manage'):
self.permission_denied(request)
try:
speaker = Speaker.objects.get(pk=int(speaker_id))
except (ValueError, Speaker.DoesNotExist):
raise ValidationError({'detail': _('Speaker does not exist.')})
# Delete the speaker.
speaker.delete()
message = _('Speaker %s was successfully removed from the list of speakers.') % speaker
# Initiate response.
return Response({'detail': message})
@detail_route(methods=['PUT', 'DELETE'])
def speak(self, request, pk=None):
"""
Special view endpoint to begin and end speech of speakers. Send PUT
{'speaker': <speaker_id>} to begin speech. Omit data to begin speech of
the next speaker. Send DELETE to end speech of current speaker.
"""
# Retrieve item.
item = self.get_object()
if request.method == 'PUT':
# Retrieve speaker_id
speaker_id = request.data.get('speaker')
if speaker_id is None:
speaker = item.get_next_speaker()
if speaker is None:
raise ValidationError({'detail': _('The list of speakers is empty.')})
else:
try:
speaker = Speaker.objects.get(pk=int(speaker_id))
except (ValueError, Speaker.DoesNotExist):
raise ValidationError({'detail': _('Speaker does not exist.')})
speaker.begin_speech()
message = _('User is now speaking.')
else:
# request.method == 'DELETE'
try:
# We assume that there aren't multiple entries because this
# is forbidden by the Model's begin_speech method. We assume that
# there is only one speaker instance or none.
current_speaker = Speaker.objects.filter(item=item, end_time=None).exclude(begin_time=None).get()
except Speaker.DoesNotExist:
raise ValidationError(
{'detail': _('There is no one speaking at the moment according to %(item)s.') % {'item': item}})
current_speaker.end_speech()
message = _('The speech is finished now.')
# Initiate response.
return Response({'detail': message})
@detail_route(methods=['POST'])
def sort_speakers(self, request, pk=None):
"""
Special view endpoint to sort the list of speakers.
Expects a list of IDs of the speakers.
"""
# Retrieve item.
item = self.get_object()
# Check data
speaker_ids = request.data.get('speakers')
if not isinstance(speaker_ids, list):
raise ValidationError(
{'detail': _('Invalid data.')})
# Get all speakers
speakers = {}
for speaker in item.speakers.filter(begin_time=None):
speakers[speaker.pk] = speaker
# Check and sort speakers
valid_speakers = []
for speaker_id in speaker_ids:
if not isinstance(speaker_id, int) or speakers.get(speaker_id) is None:
raise ValidationError(
{'detail': _('Invalid data.')})
valid_speakers.append(speakers[speaker_id])
weight = 0
with transaction.atomic():
for speaker in valid_speakers:
speaker.weight = weight
speaker.save()
weight += 1
# Initiate response.
return Response({'detail': _('List of speakers successfully sorted.')})
@list_route(methods=['get', 'put'])
def tree(self, request):
"""
Returns or sets the agenda tree.
"""
if request.method == 'PUT':
if not (request.user.has_perm('agenda.can_manage') and
request.user.has_perm('agenda.can_see_hidden_items')):
self.permission_denied(request)
try:
tree = request.data['tree']
except KeyError as error:
response = Response({'detail': 'Agenda tree is missing.'}, status=400)
else:
try:
Item.objects.set_tree(tree)
except ValueError as error:
response = Response({'detail': str(error)}, status=400)
else:
response = Response({'detail': 'Agenda tree successfully updated.'})
else:
# request.method == 'GET'
response = Response(Item.objects.get_tree())
return response
@list_route(methods=['post'])
def numbering(self, request):
"""
Auto numbering of the agenda according to the config. Manually added
item numbers will be overwritten.
"""
Item.objects.number_all(numeral_system=config['agenda_numeral_system'])
return Response({'detail': _('The agenda has been numbered.')})
# Views to generate PDFs
class AgendaPDF(PDFView):
"""
Create a full agenda-PDF.
"""
required_permission = 'agenda.can_see'
filename = ugettext_lazy('Agenda')
document_title = ugettext_lazy('Agenda')
def append_to_pdf(self, story):
tree = Item.objects.get_tree(only_agenda_items=True, include_content=True)
def walk_tree(tree, ancestors=0):
"""
Generator that yields a two-element-tuple. The first element is an
agenda-item and the second a number for steps to the root element.
"""
for element in tree:
yield element['item'], ancestors
yield from walk_tree(element['children'], ancestors + 1)
for item, ancestors in walk_tree(tree):
item_number = "{} ".format(item.item_number) if item.item_number else ''
if ancestors:
space = " " * 6 * ancestors
story.append(Paragraph(
"%s%s%s" % (space, item_number, escape(item.title)),
stylesheet['Subitem']))
else:
story.append(Paragraph(
"%s%s" % (item_number, escape(item.title)),
stylesheet['Item']))
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Invenio configuration loader.
Invenio-Config is a *base package* of the Invenio digital library framework.
It is usually installed automatically as a dependency. It should facilitate
configuration loading from various sources to an application instance.
The following configuration loaders exists:
- :py:data:`invenio_config.default.InvenioConfigDefault` - ensure required
configuration values are set.
- :py:data:`invenio_config.module.InvenioConfigModule` - for loading
configuration from a Python module.
- :py:data:`invenio_config.entrypoint.InvenioConfigEntryPointModule` - for
loading configuration from a Python module specified by an entry point (by
default ``invenio_config.module``).
- :py:data:`invenio_config.folder.InvenioConfigInstanceFolder` - for loading
configuration from ``cfg`` file in an instance folder.
- :py:data:`invenio_config.env.InvenioConfigEnvironment` - for loading
configuration from environment variables with defined prefix (e.g.
``INVENIO_SECRET_KEY``).
It also includes configuration loader factory that it is used to merge these
sources in predefined order ensuring correct behavior in common scenarios.
Initialization
--------------
Following example needs a writable instance folder, hence we start by creating
a temporary directory.
>>> import tempfile
>>> tmppath = tempfile.mkdtemp()
.. testcode::
:hide:
import atexit
import shutil
atexit.register(lambda: shutil.rmtree(tmppath))
Now we can create a Flask application:
>>> from flask import Flask
>>> app = Flask('myapp', instance_path=tmppath, instance_relative_config=True)
Loaders
-------
You can check default configuration values in newly created ``app``.
>>> 'DEBUG' in app.config
True
>>> app.config.get('SECRET_KEY') is None
True
Default
~~~~~~~
The default configuration loader makes sure that the required configuration
values are always loaded. You should call it **after** all configuration
loaders have been already called.
For instance, the default configuration loader will warn if the ``SECRET_KEY``
is not defined:
>>> import warnings
>>> from invenio_config import InvenioConfigDefault
>>> with warnings.catch_warnings(record=True) as w:
... config_default = InvenioConfigDefault(app=app)
... assert len(w) == 1
>>> app.config['SECRET_KEY']
'CHANGE_ME'
Module
~~~~~~
The module loader accepts an object and proxies the call to
:meth:`flask.Config.from_object`.
Here is an example of a configuration object:
>>> class Config:
... EXAMPLE = 'module'
>>> from invenio_config import InvenioConfigModule
>>> config_module = InvenioConfigModule(app=app, module=Config)
>>> app.config['EXAMPLE']
'module'
Entry point
~~~~~~~~~~~
The entry point loader works similar to the module loader, it just loads the
config module from the entry point ``invenio_config.module``:
>>> from invenio_config import InvenioConfigEntryPointModule
>>> config_ep = InvenioConfigEntryPointModule(app=app)
Instance Folder
~~~~~~~~~~~~~~~
The runtime configuration should be stored in a separate file, ideally located
outiside the actual application package. The configuration files are handled
as Python files where only variables in uppercase are stored in the application
config.
>>> import os
>>> from invenio_config import InvenioConfigInstanceFolder
>>> with open(os.path.join(tmppath, 'myapp.cfg'), 'w') as f:
... result = f.write("EXAMPLE = 'instance folder'")
>>> config_instance_folder = InvenioConfigInstanceFolder(app=app)
>>> app.config['EXAMPLE']
'instance folder'
Environment
~~~~~~~~~~~
Using environment variables is very handy when it comes to configuring
connections to services like database, Redis server, RabbitMQ, etc. used via
containers (e.g. Docker). In order to protect your application from reading
environment variables set by the system or other applications, you should
define a variable prefix used by the loader.
>>> os.environ['MYAPP_EXAMPLE'] = 'environment'
>>> from invenio_config import InvenioConfigEnvironment
>>> config_environment = InvenioConfigEnvironment(app=app, prefix='MYAPP_')
>>> app.config['EXAMPLE']
'environment'
You can also set more complex Python literal variables (e.g. dictionaries or
lists):
>>> os.environ['MYAPP_COMPLEX'] = "{'items': [{'num': 42}, {'foo': 'bar'}]}"
>>> # ...or export MYAPP_COMPLEX="{'items': [{'num': 42}, {'foo': 'bar'}]}"
>>> config_environment = InvenioConfigEnvironment(app=app, prefix='MYAPP_')
>>> app.config['COMPLEX']
{'items': [{'num': 42}, {'foo': 'bar'}]}
Factory Pattern
---------------
The Invenio-Config comes with an opinionated way of loading configuration,
that combines loaders in predictable way. You can use
:func:`invenio_config.utils.create_config_loader` if you would like to:
1. Load configuration from ``invenio_config.module`` entry point group.
2. Load configuration from ``config`` module if provided as argument.
3. Load configuration from the instance folder:
``<app.instance_path>/<app.name>.cfg``.
4. Load configuration keyword arguments provided.
5. Load configuration from environment variables with the prefix
``env_prefix``.
>>> from invenio_config import create_config_loader
>>> app = Flask('myapp', instance_path=tmppath, instance_relative_config=True)
>>> config_loader = create_config_loader(config=Config, env_prefix='MYAPP')
>>> config_loader(app=app, MYARG='config loader')
>>> app.config['EXAMPLE']
'environment'
>>> app.config['MYARG']
'config loader'
"""
from __future__ import absolute_import, print_function
from .default import InvenioConfigDefault
from .env import InvenioConfigEnvironment
from .folder import InvenioConfigInstanceFolder
from .module import InvenioConfigModule
from .entrypoint import InvenioConfigEntryPointModule
from .utils import create_conf_loader, create_config_loader
from .version import __version__
__all__ = (
'__version__',
'InvenioConfigDefault',
'InvenioConfigEntryPointModule',
'InvenioConfigEnvironment',
'InvenioConfigInstanceFolder',
'InvenioConfigModule',
'create_conf_loader',
'create_config_loader',
)
|
# -*- coding: utf-8 -*-
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import megengine.functional as F
import megengine.module as M
import models.backbones.resnet.model as resnet
import layers
class FreeAnchor(M.Module):
"""
Implement RetinaNet (https://arxiv.org/abs/1708.02002).
"""
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.anchor_generator = layers.AnchorBoxGenerator(
anchor_scales=self.cfg.anchor_scales,
anchor_ratios=self.cfg.anchor_ratios,
strides=self.cfg.stride,
offset=self.cfg.anchor_offset,
)
self.box_coder = layers.BoxCoder(cfg.reg_mean, cfg.reg_std)
self.in_features = cfg.in_features
# ----------------------- build backbone ------------------------ #
bottom_up = getattr(resnet, cfg.backbone)(
norm=layers.get_norm(cfg.backbone_norm), pretrained=cfg.backbone_pretrained
)
del bottom_up.fc
# ----------------------- build FPN ----------------------------- #
self.backbone = layers.FPN(
bottom_up=bottom_up,
in_features=cfg.fpn_in_features,
out_channels=cfg.fpn_out_channels,
norm=cfg.fpn_norm,
top_block=layers.LastLevelP6P7(
cfg.fpn_top_in_channel, cfg.fpn_out_channels, cfg.fpn_top_in_feature
),
strides=cfg.fpn_in_strides,
channels=cfg.fpn_in_channels,
)
backbone_shape = self.backbone.output_shape()
feature_shapes = [backbone_shape[f] for f in self.in_features]
# ----------------------- build FreeAnchor Head ----------------- #
self.head = layers.BoxHead(cfg, feature_shapes)
def preprocess_image(self, image):
padded_image = layers.get_padded_tensor(image, 32, 0.0)
normed_image = (
padded_image
- np.array(self.cfg.img_mean, dtype="float32")[None, :, None, None]
) / np.array(self.cfg.img_std, dtype="float32")[None, :, None, None]
return normed_image
def forward(self, image, im_info, gt_boxes=None):
image = self.preprocess_image(image)
features = self.backbone(image)
features = [features[f] for f in self.in_features]
box_logits, box_offsets = self.head(features)
box_logits_list = [
_.transpose(0, 2, 3, 1).reshape(image.shape[0], -1, self.cfg.num_classes)
for _ in box_logits
]
box_offsets_list = [
_.transpose(0, 2, 3, 1).reshape(image.shape[0], -1, 4) for _ in box_offsets
]
anchors_list = self.anchor_generator(features)
all_level_box_logits = F.concat(box_logits_list, axis=1)
all_level_box_offsets = F.concat(box_offsets_list, axis=1)
all_level_anchors = F.concat(anchors_list, axis=0)
if self.training:
loss_dict = self.get_losses(
all_level_anchors, all_level_box_logits,
all_level_box_offsets, gt_boxes, im_info
)
self.cfg.losses_keys = list(loss_dict.keys())
return loss_dict
else:
# currently not support multi-batch testing
assert image.shape[0] == 1
pred_boxes = self.box_coder.decode(
all_level_anchors, all_level_box_offsets[0]
)
pred_boxes = pred_boxes.reshape(-1, 4)
scale_w = im_info[0, 1] / im_info[0, 3]
scale_h = im_info[0, 0] / im_info[0, 2]
pred_boxes = pred_boxes / F.concat(
[scale_w, scale_h, scale_w, scale_h], axis=0
)
clipped_boxes = layers.get_clipped_boxes(
pred_boxes, im_info[0, 2:4]
).reshape(-1, 4)
pred_score = F.sigmoid(all_level_box_logits)[0]
return pred_score, clipped_boxes
def get_losses(self, anchors, pred_logits, pred_offsets, gt_boxes, im_info):
# pylint: disable=too-many-statements
def positive_bag_loss(logits, axis=1):
weight = 1.0 / (1.0 - logits)
weight /= weight.sum(axis=axis, keepdims=True)
bag_prob = (weight * logits).sum(axis=1)
return -layers.safelog(bag_prob)
def negative_bag_loss(logits, gamma):
return (logits ** gamma) * (-layers.safelog(1.0 - logits))
pred_scores = F.sigmoid(pred_logits)
box_prob_list = []
positive_losses = []
clamp_eps = 1e-7
bucket_size = self.cfg.bucket_size
for bid in range(im_info.shape[0]):
boxes_info = gt_boxes[bid, : im_info[bid, 4].astype("int32")]
# id 0 is used for background classes, so -1 first
labels = boxes_info[:, 4].astype("int32") - 1
pred_box = self.box_coder.decode(anchors, pred_offsets[bid]).detach()
overlaps = layers.get_iou(boxes_info[:, :4], pred_box).detach()
thresh1 = self.cfg.box_iou_threshold
thresh2 = F.clip(
overlaps.max(axis=1, keepdims=True),
lower=thresh1 + clamp_eps, upper=1.0
)
gt_pred_prob = F.clip(
(overlaps - thresh1) / (thresh2 - thresh1), lower=0, upper=1.0)
image_boxes_prob = F.zeros(pred_logits.shape[1:]).detach()
# guarantee that nonzero_idx is not empty
if gt_pred_prob.max() > clamp_eps:
_, nonzero_idx = F.cond_take(gt_pred_prob != 0, gt_pred_prob)
# since nonzeros is only 1 dim, use num_anchor to get real indices
num_anchors = gt_pred_prob.shape[1]
anchors_idx = nonzero_idx % num_anchors
gt_idx = nonzero_idx // num_anchors
image_boxes_prob[anchors_idx, labels[gt_idx]] = gt_pred_prob[gt_idx, anchors_idx]
box_prob_list.append(image_boxes_prob)
# construct bags for objects
match_quality_matrix = layers.get_iou(boxes_info[:, :4], anchors).detach()
num_gt = match_quality_matrix.shape[0]
_, matched_idx = F.topk(
match_quality_matrix,
k=bucket_size,
descending=True,
no_sort=True,
)
matched_idx = matched_idx.detach()
matched_idx_flatten = matched_idx.reshape(-1)
gather_idx = labels.reshape(-1, 1)
gather_idx = F.broadcast_to(gather_idx, (num_gt, bucket_size))
gather_src = pred_scores[bid, matched_idx_flatten]
gather_src = gather_src.reshape(num_gt, bucket_size, -1)
matched_score = F.indexing_one_hot(gather_src, gather_idx, axis=2)
topk_anchors = anchors[matched_idx_flatten]
boxes_broad_cast = F.broadcast_to(
F.expand_dims(boxes_info[:, :4], axis=1), (num_gt, bucket_size, 4)
).reshape(-1, 4)
matched_offsets = self.box_coder.encode(topk_anchors, boxes_broad_cast)
reg_loss = layers.smooth_l1_loss(
pred_offsets[bid, matched_idx_flatten],
matched_offsets,
beta=self.cfg.smooth_l1_beta
).sum(axis=-1) * self.cfg.reg_loss_weight
matched_reg_scores = F.exp(-reg_loss)
positive_losses.append(
positive_bag_loss(
matched_score * matched_reg_scores.reshape(-1, bucket_size), axis=1
)
)
num_foreground = im_info[:, 4].sum()
pos_loss = F.concat(positive_losses).sum() / F.maximum(num_foreground, 1)
box_probs = F.stack(box_prob_list, axis=0)
neg_loss = negative_bag_loss(
pred_scores * (1 - box_probs), self.cfg.focal_loss_gamma
).sum() / F.maximum(num_foreground * bucket_size, 1)
alpha = self.cfg.focal_loss_alpha
pos_loss = pos_loss * alpha
neg_loss = neg_loss * (1 - alpha)
loss_dict = {
"total_loss": pos_loss + neg_loss,
"pos_loss": pos_loss,
"neg_loss": neg_loss,
}
return loss_dict
class FreeAnchorConfig:
# pylint: disable=too-many-statements
def __init__(self):
self.backbone = "resnet50"
self.backbone_pretrained = True
self.backbone_norm = "FrozenBN"
self.backbone_freeze_at = 2
self.fpn_norm = None
self.fpn_in_features = ["res3", "res4", "res5"]
self.fpn_in_strides = [8, 16, 32]
self.fpn_in_channels = [512, 1024, 2048]
self.fpn_out_channels = 256
self.fpn_top_in_feature = "p5"
self.fpn_top_in_channel = 256
# ------------------------ data cfg -------------------------- #
self.train_dataset = dict(
name="coco",
root="train2017",
ann_file="annotations/instances_train2017.json",
remove_images_without_annotations=True,
)
self.test_dataset = dict(
name="coco",
root="val2017",
ann_file="annotations/instances_val2017.json",
remove_images_without_annotations=False,
)
self.num_classes = 80
self.img_mean = [103.530, 116.280, 123.675] # BGR
self.img_std = [57.375, 57.120, 58.395]
# ----------------------- net cfg ------------------------- #
self.stride = [8, 16, 32, 64, 128]
self.in_features = ["p3", "p4", "p5", "p6", "p7"]
self.reg_mean = [0.0, 0.0, 0.0, 0.0]
self.reg_std = [0.1, 0.1, 0.2, 0.2]
self.anchor_scales = [
[x, x * 2 ** (1.0 / 3), x * 2 ** (2.0 / 3)] for x in [32, 64, 128, 256, 512]
]
self.anchor_ratios = [[0.5, 1, 2]]
self.anchor_offset = 0.5
self.box_iou_threshold = 0.6
self.bucket_size = 50
self.class_aware_box = False
self.cls_prior_prob = 0.02
# ------------------------ loss cfg -------------------------- #
self.focal_loss_alpha = 0.5
self.focal_loss_gamma = 2
self.smooth_l1_beta = 0 # use L1 loss
self.reg_loss_weight = 0.75
self.num_losses = 3
# ------------------------ training cfg ---------------------- #
self.train_image_short_size = (640, 672, 704, 736, 768, 800)
self.train_image_max_size = 1333
self.basic_lr = 0.01 / 16 # The basic learning rate for single-image
self.momentum = 0.9
self.weight_decay = 1e-4
self.log_interval = 20
self.nr_images_epoch = 80000
self.max_epoch = 54
self.warm_iters = 500
self.lr_decay_rate = 0.1
self.lr_decay_stages = [42, 50]
# ------------------------ testing cfg ----------------------- #
self.test_image_short_size = 800
self.test_image_max_size = 1333
self.test_max_boxes_per_image = 100
self.test_vis_threshold = 0.3
self.test_cls_threshold = 0.05
self.test_nms = 0.5
|
###############################################################################
# _ _ _ #
# | | (_) | | #
# _ __ _ _ | | __ _ _ __ _ __| | #
# | '_ \ | | | | | | / _` | | '__| | | / _` | #
# | |_) | | |_| | | | | (_| | | | | | | (_| | #
# | .__/ \__, | |_| \__,_| |_| |_| \__,_| #
# | | __/ | #
# |_| |___/ #
# #
# Load Arrays of Imaging Data #
# #
# Copyright (c) 2021, <NAME> #
# #
# pylarid is released under the revised (3-clause) BSD license. #
# For details, see LICENSE.txt #
# #
###############################################################################
# test_cast.py: Unit tests for casting between larid.Dset data types
import larid
import math
import numpy
import unittest
class TestCast(unittest.TestCase):
"""Test casting between data types"""
def test_upcast_uint8(self):
# uint8 to same or bigger types
for datatype in ('uint8', 'int16', 'int32', 'float32', 'float64'):
with self.subTest(datatype=datatype):
obj1 = larid.Dset(1, 1, 1, 1, 'uint8', 'tkji')
obj1.data[:] = 1
obj2 = obj1.to_datatype(datatype)
self.assertEqual(obj2.datatype, datatype)
self.assertEqual(obj2.data.dtype, numpy.dtype(datatype))
self.assertEqual(obj2.data[0, 0, 0, 0], 1)
def test_upcast_int16(self):
# int16 to same or bigger types
for datatype in ('int16', 'int32', 'float32', 'float64'):
with self.subTest(datatype=datatype):
obj1 = larid.Dset(1, 1, 1, 1, 'int16', 'tkji')
obj1.data[:] = 1
obj2 = obj1.to_datatype(datatype)
self.assertEqual(obj2.datatype, datatype)
self.assertEqual(obj2.data.dtype, numpy.dtype(datatype))
self.assertEqual(obj2.data[0, 0, 0, 0], 1)
def test_upcast_int32(self):
# int32 to same type or floating-point types
for datatype in ('int32', 'float32', 'float64'):
with self.subTest(datatype=datatype):
obj1 = larid.Dset(1, 1, 1, 1, 'int32', 'tkji')
obj1.data[:] = 1
obj2 = obj1.to_datatype(datatype)
self.assertEqual(obj2.datatype, datatype)
self.assertEqual(obj2.data.dtype, numpy.dtype(datatype))
self.assertEqual(obj2.data[0, 0, 0, 0], 1)
def test_upcast_float32(self):
# float32 to itself or float64
for datatype in ('float32', 'float64'):
with self.subTest(datatype=datatype):
obj1 = larid.Dset(1, 1, 1, 1, 'float32', 'tkji')
obj1.data[:] = 1
obj2 = obj1.to_datatype(datatype)
self.assertEqual(obj2.datatype, datatype)
self.assertEqual(obj2.data.dtype, numpy.dtype(datatype))
self.assertEqual(obj2.data[0, 0, 0, 0], 1)
def test_downcast_float64_to_float32(self):
# float64 to float32
obj1 = larid.Dset(1, 1, 1, 1, 'float64', 'tkji')
obj1.data[:] = 1.2345678
obj2 = obj1.to_datatype('float32')
self.assertEqual(obj2.datatype, 'float32')
self.assertEqual(obj2.data.dtype, numpy.dtype('float32'))
self.assertTrue(math.isclose(obj2.data[0, 0, 0, 0], 1.2345678,
abs_tol=1e-07))
self.assertFalse(math.isclose(obj2.data[0, 0, 0, 0], 1.2345678,
abs_tol=1e-08))
def test_downcast_float64_to_signed(self):
# float64 to signed integer types
for datatype in ('int32', 'int16'):
with self.subTest(datatype=datatype):
obj1 = larid.Dset(1, 1, 1, 1, 'float64', 'tkji')
obj1.data[:] = 1.2345678
obj2 = obj1.to_datatype(datatype)
self.assertEqual(obj2.datatype, datatype)
self.assertEqual(obj2.data.dtype, numpy.dtype(datatype))
self.assertEqual(obj2.data[0, 0, 0, 0], 1)
obj1.data[:] = -1.2345678
obj2 = obj1.to_datatype(datatype)
self.assertEqual(obj2.datatype, datatype)
self.assertEqual(obj2.data.dtype, numpy.dtype(datatype))
self.assertEqual(obj2.data[0, 0, 0, 0], -1)
def test_downcast_float64_to_uint8(self):
# float64 to uint8
obj1 = larid.Dset(1, 1, 1, 1, 'float64', 'tkji')
obj1.data[:] = 1.2345678
obj2 = obj1.to_datatype('uint8')
self.assertEqual(obj2.datatype, 'uint8')
self.assertEqual(obj2.data.dtype, numpy.dtype('uint8'))
self.assertEqual(obj2.data[0, 0, 0, 0], 1)
obj1.data[:] = -1.2345678
obj2 = obj1.to_datatype('uint8')
self.assertEqual(obj2.datatype, 'uint8')
self.assertEqual(obj2.data.dtype, numpy.dtype('uint8'))
self.assertEqual(obj2.data[0, 0, 0, 0], 255)
def test_downcast_float32_to_signed(self):
# float32 to signed integer types
for datatype in ('int32', 'int16'):
with self.subTest(datatype=datatype):
obj1 = larid.Dset(1, 1, 1, 1, 'float32', 'tkji')
obj1.data[:] = 1.2345678
obj2 = obj1.to_datatype(datatype)
self.assertEqual(obj2.datatype, datatype)
self.assertEqual(obj2.data.dtype, numpy.dtype(datatype))
self.assertEqual(obj2.data[0, 0, 0, 0], 1)
obj1.data[:] = -1.2345678
obj2 = obj1.to_datatype(datatype)
self.assertEqual(obj2.datatype, datatype)
self.assertEqual(obj2.data.dtype, numpy.dtype(datatype))
self.assertEqual(obj2.data[0, 0, 0, 0], -1)
def test_downcast_float32_to_uint8(self):
# float32 to uint8
obj1 = larid.Dset(1, 1, 1, 1, 'float32', 'tkji')
obj1.data[:] = 1.2345678
obj2 = obj1.to_datatype('uint8')
self.assertEqual(obj2.datatype, 'uint8')
self.assertEqual(obj2.data.dtype, numpy.dtype('uint8'))
self.assertEqual(obj2.data[0, 0, 0, 0], 1)
obj1.data[:] = -1.2345678
obj2 = obj1.to_datatype('uint8')
self.assertEqual(obj2.datatype, 'uint8')
self.assertEqual(obj2.data.dtype, numpy.dtype('uint8'))
self.assertEqual(obj2.data[0, 0, 0, 0], 255)
def test_downcast_int32_to_int16(self):
# int32 to int16
obj1 = larid.Dset(1, 1, 1, 1, 'int32', 'tkji')
obj1.data[:] = (2 ** 15) - 1
obj2 = obj1.to_datatype('int16')
self.assertEqual(obj2.datatype, 'int16')
self.assertEqual(obj2.data.dtype, numpy.dtype('int16'))
self.assertEqual(obj2.data[0, 0, 0, 0], (2 ** 15) - 1)
obj1.data[:] = (2 ** 15)
obj2 = obj1.to_datatype('int16')
self.assertEqual(obj2.datatype, 'int16')
self.assertEqual(obj2.data.dtype, numpy.dtype('int16'))
self.assertEqual(obj2.data[0, 0, 0, 0], -2 ** 15)
def test_downcast_signed_to_uint8(self):
# int32 and int16 to uint8
for datatype in ('int32', 'int16'):
with self.subTest(datatype=datatype):
obj1 = larid.Dset(1, 1, 1, 1, datatype, 'tkji')
obj1.data[:] = 255
obj2 = obj1.to_datatype('uint8')
self.assertEqual(obj2.datatype, 'uint8')
self.assertEqual(obj2.data.dtype, 'uint8')
self.assertEqual(obj2.data[0, 0, 0, 0], 255)
obj1.data[:] = 256
obj2 = obj1.to_datatype('uint8')
self.assertEqual(obj2.datatype, 'uint8')
self.assertEqual(obj2.data.dtype, 'uint8')
self.assertEqual(obj2.data[0, 0, 0, 0], 0)
obj1.data[:] = -1
obj2 = obj1.to_datatype('uint8')
self.assertEqual(obj2.datatype, 'uint8')
self.assertEqual(obj2.data.dtype, 'uint8')
self.assertEqual(obj2.data[0, 0, 0, 0], 255)
if __name__ == '__main__':
unittest.main()
###############################################################################
|
<filename>templates/go/go_system_paths.py<gh_stars>100-1000
imports=["fmt",
"os",
"path/filepath",
"runtime",
"strings"]
buildcode="""
type fileDesc struct {
isDir bool
fPath string
sName string
}
var globalFile []fileDesc
var sysNativeDone = false
//used by the walk function to process directories / files
// This function gets called every file / directory in the path thats being searched
func walk_path(path string, info os.FileInfo, err error) error {
//temp item holder
var item fileDesc
//check for errors
if err != nil {
fmt.Println(err)
return nil
}
//determine if directory
if info.IsDir(){
item.isDir = true
} else{
item.isDir = false
}
//set addtional parameters into the struct
item.fPath = strings.ToLower(path)
item.sName = strings.ToLower(info.Name())
globalFile = append(globalFile,item)
//You would add check code here to call the combine function to test this path
// plus env vars meet the check
return nil
}
//called similar to python version
func walk_os(scan_dir string) {
//Handle 32bit in 64bit machine sysnative
sys_paths := []string{"c:\\\\windows", "c:\\\\windows\\\\system32"}
//fmt.Println("Arch: "+runtime.GOARCH)
if strings.Contains(runtime.GOARCH, "386") == true {
for _, s_path_check := range sys_paths {
// fmt.Println("Check: "+s_path_check+" vs Check: "+scan_dir)
if strings.Compare(strings.ToLower(scan_dir), strings.ToLower(s_path_check)) == 0 && !sysNativeDone{
fmt.Println("[*] Checking sysnative - searching for 64-Bit path")
sysNativeDone = true
filepath.Walk("c:\\\\Windows\\\\sysnative", walk_path)
}
//else 32bit do nothing special, but continue to walk the given path
}
}
_ = filepath.Walk(scan_dir, walk_path)
fmt.Printf("[*] Total FS Length %v \\n", len(globalFile))
//fmt.Printf("%v",globalFile)
}
"""
callcode="""
if true == strings.HasPrefix(start_loc, "%") {
if true == strings.HasSuffix(start_loc, "%") {
fmt.Println("We have a Env Var for path", start_loc)
// Strip "%"
start_loc = start_loc[1:len(start_loc) - 1]
// Get env path
start_loc = os.Getenv(start_loc)
fmt.Println("Resolv start_loc", start_loc)
if start_loc == ""{
os.Exit(0)
}
}
}
fmt.Println("Len key_combos", len(key_combos), key_combos)
walk_os(start_loc)
fmt.Println("", len(globalFile))
//fmt.Println(globalFile)
var full_payload []byte
for _, itr := range globalFile{
temp_keycombos := make([][]string, len(key_combos))
copy(temp_keycombos, key_combos)
fmt.Printf("[*] Testing File: %v",itr.fPath)
temp_keycombos[i] = []string{itr.fPath}
fmt.Println(temp_keycombos)
full_payload = build_code(lookup_table, payload_hash, minus_bytes, temp_keycombos)
if full_payload != nil{
fmt.Println("not nil")
break
}
}
if full_payload == nil{
fmt.Println(":( Exiting")
os.Exit(0)
}
fmt.Println("Len full_payload:", len(full_payload))
"""
|
__author__ = 'nah'
import pymongo
from bson import Binary
def content_decode(s):
decoded = s
if isinstance(s, str):
decoded = unicode(s.decode('ascii', 'ignore'))
return decoded
class SubmissionStore():
"""Stores submissions obtained from the Canvas API."""
def __init__(self, db_host='localhost', db_port=27017):
"""Construct a submission store working with a mongodb server at the given location."""
self.client = pymongo.MongoClient(db_host, db_port)
self.users_collection = self.client['users']['users']
self.users_collection.ensure_index('user_id')
def _store_single_submission(self, collection, submission, uid = None, key= None, remove_attachments = False):
# assumption is that there can only be one submission per user in the collection
if uid is None or key is None:
if 'user_id' in submission:
query = {'user_id': submission['user_id']}
elif 'id' in submission:
query = {'id': submission['id']}
elif 'username' in submission:
query = {'username': submission['username']}
else:
query = {'sis_user_id': submission['sis_user_id']}
else:
query = {key: uid}
submission[key] = uid
collection.update(query, {'$set': submission}, upsert=True)
if remove_attachments:
collection.update(query, {'$unset': {'attachment-files':''}})
def get_assignment_collection(self, course_id, assignment_id):
"""
Stores the given submissions in the database.
:param course_id: The id of the course to store the submissions unders
:param assignment_id: The id of the assignment to store the assignments unders
:returns: The mongodb collection for this assignment
"""
course_id = str(course_id)
assignment_id = str(assignment_id)
return self.client[course_id][assignment_id]
def store_assignment_submissions(self, course_id, assignment_id, submissions, group_category_id = None, remove_attachments = False):
"""
Stores the given submissions in the database.
:param course_id: The id of the course to store the submissions unders
:param assignment_id: The id of the assignment to store the assignments unders
:param submissions: The submissions themselves, in JSON format. Can be a single submission or an iterable
"""
submissions_collection = self.get_assignment_collection(course_id,assignment_id)
try:
for submission in submissions:
submission['assignment_id'] = assignment_id
if not group_category_id is None:
storage_id = self.get_group_from_category(course_id, group_category_id, submission['user_id'], key='user_id')
storage_key = 'group'
# print 'grouping %s for %s' % (storage_id, submission['user_id'])
else:
storage_id = submission['user_id']
storage_key = 'user_id'
self._store_single_submission(submissions_collection, submission, key=storage_key, uid=storage_id, remove_attachments=remove_attachments)
except TypeError, te:
self._store_single_submission(submissions_collection, submissions, remove_attachments=remove_attachments)
def store_submission_marks(self, course_id, submission, marks_dict):
course_id = str(course_id)
assignment_id = str(submission['assignment_id'])
submissions_collection = self.client[course_id][assignment_id]
query = {'user_id': submission['user_id']}
existing_submission = submissions_collection.find_one(query)
existing_submission['marks'] = marks_dict
submissions_collection.update(query, existing_submission)
def get_submission_attachments(self, course_id, submission):
existing_submission = self.get_stored_submission(course_id, submission['assignment_id'], submission['user_id'])
attachments = None
if existing_submission is not None:
if 'attachment-files' in existing_submission:
attachments = {}
att_dict = existing_submission['attachment-files']
for attachment in att_dict.values():
attachments[attachment['filename']] = attachment['contents']
return attachments
def store_submission_attachments(self, course_id, submission, attachments):
course_id = str(course_id)
assignment_id = str(submission['assignment_id'])
submissions_collection = self.client[course_id][assignment_id]
query = {'user_id': submission['user_id']}
existing_submission = submissions_collection.find_one(query)
att_dict = {}
count = 0
# keys on mongo can't have . or some special characters, so flatten out a bit
for (k, v) in attachments.iteritems():
att_dict[str(count)] = {}
att_dict[str(count)]['filename'] = k
# att_dict[str(count)]['contents'] = content_decode(v)
att_dict[str(count)]['contents'] = Binary(v)
count += 1
existing_submission['attachment-files'] = att_dict
existing_submission['assignment_id'] = submission['assignment_id']
submissions_collection.update(query, existing_submission)
def get_submissions_to_mark(self, course_id, assignment_id):
query = {'$or': [{'grade_matches_current_submission': False}, {'grade': None}]}
return self.get_assignment_submissions(course_id, assignment_id, query)
def get_stored_submission(self, course_id, assignment_id, user_id):
query = {'user_id': user_id}
result = self.get_assignment_submissions(course_id, assignment_id, query, as_list=False)
if result.count() > 0:
return result[0]
else:
return None
def get_assignment_submissions(self, course_id, assignment_id, query={}, as_list=True):
"""
Retrieves submissions for the given course and assignment from the database. Additionally restricts assignments based on query.
:param course_id: The course to fetch the submissions from.
:param assignment_id: The assignment to fetch the submissions from.
:param query: Restricts returned submissions to those matching this query
:param as_list: Returns the results as a list, rather than a pymongo Cursor.
:return: the matching submissions in JSON format.
"""
course_id = str(course_id)
assignment_id = str(assignment_id)
result = self.client[course_id][assignment_id].find(query)
if as_list:
result = list(result)
return result
def store_user(self, user):
self.users_collection.update({'id': user['id']}, user, upsert=True)
return user['login_id']
def store_users(self, users):
map(self.store_user, users)
def get_user(self, uid, key='id'):
return self.users_collection.find_one({key: uid})
def get_username(self, uid, key='id'):
user = self.get_user(uid, key)
if user is None:
return None
else:
return user['login_id']
def store_group(self, course_id, group, members=None):
course_id = str(course_id)
group_category = 'group_' + str(group['group_category_id'])
if members is not None:
group['members'] = members
query = {'id': group['id']}
self.client[course_id][group_category].update(query, {'$set': group}, upsert=True)
def get_course_groups(self, course_id, group_category_id, query={}):
course_id = str(course_id)
group_category = 'group_' + str(group_category_id)
return self.client[course_id][group_category].find(query)
def get_group(self, course_id, group_category_id, group_name):
course_id = str(course_id)
group_category = 'group_' + str(group_category_id)
return self.client[course_id][group_category].find_one({'name': group_name})
def get_group_members(self, course_id, group_category_id, group_name):
group = self.get_group(course_id, group_category_id, group_name)
return [member['user_id'] for member in group['members']]
def get_groups_from_category(self, course_id, group_category_id):
course_id = str(course_id)
group_category = 'group_' + str(group_category_id)
return self.client[course_id][group_category].find()
def get_group_from_category(self, course_id, group_category_id, uid, key='id'):
course_id = str(course_id)
group_category = 'group_' + str(group_category_id)
for group in self.client[course_id][group_category].find():
for member in group['members']:
if member[key] == uid:
return group['name']
return None
def _get_key_document(self):
return self.client['global']['user_data'].find_one(fields=['key'])
def store_key(self, key):
key_doc = self._get_key_document()
global_collection = self.client['global']['user_data']
if key_doc is None:
global_collection.insert({'key': key})
else:
if key != key_doc['key']:
print('Updating key')
query = {'_id' : key_doc['_id']}
global_collection.update(query, {'key': key})
else:
print('key unchanged')
def get_key(self):
key_doc = self._get_key_document()
if key_doc is None:
raise Exception('No key stored.')
else:
return key_doc['key'] |
<gh_stars>0
"""
Calls GMAP.
"""
import subprocess # FIXME use pbcommand wrapper (once this is stable)
import tempfile
import logging
import os.path as op
import os
import sys
import pysam
from pbcommand.cli.core import pbparser_runner
from pbcommand.models import FileTypes, SymbolTypes, get_pbparser
from pbcommand.utils import setup_log
from pbcore.io import ContigSet, ReferenceSet, AlignmentSet
from pbtranscript.Utils import filter_sam
log = logging.getLogger(__name__)
class Constants(object):
TOOL_ID = "pbtranscript.tasks.gmap"
VERSION = "0.1.0"
DRIVER_EXE = "python -m pbtranscript.tasks.gmap --resolved-tool-contract"
def run_gmap(transcripts_file, reference_file, alignment_file, nproc):
db_name = "gmap_db"
ref_fasta_name = transcripts_fasta_name = None
with ReferenceSet(reference_file) as rs:
rfs = rs.toExternalFiles()
assert len(rfs) == 1
ref_fasta_name = rfs[0]
# XXX in our setup, i.e. with the old reference layout, the .fasta might
# be in a 'sequence' directory, with gmap_db one level up, so we try
# a couple of paths to look for an existing gmap_db
ref_fasta_dir = op.dirname(ref_fasta_name)
ref_path = os.getcwd()
for base_path in [ref_fasta_dir, op.dirname(ref_fasta_dir)]:
log.info("Looking for gmap_db in %s" % base_path)
if op.exists(op.join(base_path, db_name)):
log.info("Found existing gmap_db, gmap_build will not be run")
ref_path = base_path
break
else:
log.info("gmap_build will need to be run on %s" % ref_fasta_name)
# workaround for hardcoded paths in gmap_build
bin_dir = os.environ['_SMRT_GMAP_BIN'] # FIXME whatever key Herb uses
args1 = [
"gmap_build",
"-B", bin_dir,
"-k", "12",
"--db=%s" % db_name,
# XXX what to do about the directory here? we really need a way
# to cache or pre-generate these
#"--dir=%s" % ref_path, # FIXME spaces?
"-D", ref_path,
ref_fasta_name,
]
log.info("ARGUMENTS: {a}".format(a=" ".join(args1)))
with open("gmap_build.out", "w") as stdout, \
open("gmap_build.err", "w") as stderr:
rc = subprocess.call(args1, stdout=stdout, stderr=stderr)
assert rc == 0, "unexpected exit code {c}".format(c=rc)
with ContigSet(transcripts_file) as cs:
cfs = cs.toExternalFiles()
assert len(cfs) == 1
transcripts_fasta_name = cfs[0]
args2 = [
"gmap",
"-n", "0",
"-t", str(nproc),
"--sam-use-0M",
"-f", "samse",
"-D", ref_path,
"-d", db_name,
#"--split-output=gmap_tmp",
transcripts_fasta_name,
]
log.info("ARGUMENTS: %s" % " ".join(args2))
sam_file = tempfile.NamedTemporaryFile(suffix=".sam", delete=True).name
with open(sam_file, "w") as stdout, open("gmap.log", "w") as stderr:
rc = subprocess.call(args2, stdout=stdout, stderr=stderr)
assert rc == 0, "unexpected exit code {c}".format(c=rc)
sam_file_2 = tempfile.NamedTemporaryFile(suffix=".sam", delete=True).name
filter_sam(sam_file, sam_file_2)
os.remove(sam_file)
with pysam.AlignmentFile(sam_file_2, "r") as sam_in:
log.info("Writing alignments to %s" % alignment_file)
with pysam.AlignmentFile(alignment_file, "wb",
template=sam_in) as bam_out:
for rec in sam_in:
bam_out.write(rec)
os.remove(sam_file_2)
# FIXME this bam file of course looks nothing like the PacBio standard!
# (which also makes testing difficult, since we usually run pbvalidate on
# all outputs)
#assert subprocess.call(["pbindex", "gmap.aligned.bam"]) == 0
pysam.index(alignment_file)
return 0
def get_contract_parser():
p = get_pbparser(
tool_id=Constants.TOOL_ID,
version=Constants.VERSION,
name=Constants.TOOL_ID,
description=__doc__,
driver_exe=Constants.DRIVER_EXE,
nproc=SymbolTypes.MAX_NPROC)
p.add_input_file_type(FileTypes.DS_CONTIG, "seq_in",
name="ContigSet",
description="Input transcripts")
p.add_input_file_type(FileTypes.DS_REF, "ref_in",
name="ReferenceSet",
description="Reference genome")
p.add_output_file_type(FileTypes.BAM, "aln_out",
name="Alignments",
description="BAM alignments file",
default_name="gmap_alignments")
return p
def args_runner(args):
raise NotImplementedError()
def resolved_tool_contract_runner(rtc):
return run_gmap(
transcripts_file=rtc.task.input_files[0],
reference_file=rtc.task.input_files[1],
alignment_file=rtc.task.output_files[0],
nproc=rtc.task.nproc)
def main(argv=sys.argv[1:]):
mp = get_contract_parser()
return pbparser_runner(
argv=argv,
parser=mp,
args_runner_func=args_runner,
contract_runner_func=resolved_tool_contract_runner,
alog=log,
setup_log_func=setup_log)
if __name__ == "__main__":
sys.exit(main())
|
import tkinter as tk
import tkinter.ttk as ttk
import subprocess
from tkinter import messagebox
from tkinter import scrolledtext
from PIL import ImageTk, Image
import os
from xml.etree import ElementTree as ET
from tkinter import TOP, BOTTOM, LEFT
from pathlib import Path
from .gutil import AmiTree
from .gutil import Gutil
from .gutil import Gutil as gu
from .gutil import CreateToolTip
from .search_lib import AmiSearch, AmiSection, AmiDictionaries, AmiProjects
from urllib.request import urlopen
import tkinterhtml as th
from io import BytesIO
PYGETPAPERS = "pygetpapers"
DICTIONARY_HOME = "/Users/pm286/dictionary"
CEV_DICTIONARY_HOME = "/Users/pm286/projects/CEVOpen/dictionary"
XML_FLAG = "xml"
NOEXEC_FLAG = "noexec"
PDF_FLAG = "pdf"
CSV_FLAG = "csv"
SUPP_FLAG = "supp"
HTML_FLAG = "html"
PDFBOX_FLAG = "pdfbox"
TOTAL_HITS_ARE = "Total Hits are"
WROTE_XML = "Wrote xml"
# select by typing
# https://stackoverflow.com/questions/47839813/python-tkinter-autocomplete-combobox-with-like-search
def button1(event):
"""
:param event:
"""
print("button1", event)
print(dir(event))
tup = event.widget.curselection
print("tup", tup, type(tup),)
if len(tup) > 0:
print(tup[0], event.widget.get(tup[0]))
class AmiGui(tk.Frame):
""" """
def __init__(self, master=None):
super().__init__(master)
self.master = master
self.max_max_hits = 90
self.selected_boxes = []
self.current_project = None
self.ami_tree = None
self.treeview = None
self.main_display_frame = None
self.dashboard = None
self.label = None
self.show_html_frame = False
self.dictionary_content_notebook = None
self.assets = Path(Path(__file__).parent.parent, "assets")
self.pack()
self.current_ami_projects = AmiProjects()
self.create_all_widgets(root)
# self.menu_stuff()
def create_all_widgets(self, master):
""" Main entry
:param master: parent frame
"""
self.create_display_frame(master)
self.create_dashboard(master)
def main_text_display_button1(self, event):
"""
:param event:
"""
# print("Main button 1", event)
pass
def main_text_display_selected(self, event):
"""
:param event:
"""
# print("Main selected", event, event.widget.selection_get())
pass
def process_selection(self, event):
"""
:param event:
"""
text = self.get_main_text_on_release(event)
if text:
response = self.query_wikidata(text)
def get_main_text_on_release(self, event):
"""
:param event:
"""
text = event.widget.selection_get() if event.widget.tag_ranges("sel") else None
return text
def create_display_frame(self, master):
"""
:param master:
"""
from py4ami.file_lib import FileLib
self.main_display_frame = tk.Frame(master)
self.main_display_frame.pack(side=tk.RIGHT)
self.main_text_display = scrolledtext.ScrolledText(
self.main_display_frame, font="Arial, 18", width=60, height=10)
self.main_text_display.insert(tk.END, "text_display")
self.main_text_display.pack(side=tk.BOTTOM, expand=True)
self.main_text_display.bind(
"<Button-1>", self.main_text_display_button1)
self.main_text_display.bind(
"<<Selection>>", self.main_text_display_selected) # dummy
self.main_text_display.bind(
"<ButtonRelease>", self.process_selection) # ACTIVE -> wikidata
self.label_display_var = tk.StringVar(value="label_xml text")
self.label_display = tk.Label(
self.main_display_frame, textvariable=self.label_display_var)
image_path = FileLib.create_absolute_name(
os.path.join(self.assets, "purple_ocimum_basilicum.png"))
if not os.path.exists(image_path):
print(f"Cannot find purple basil: {image_path}")
else:
self.main_image_display = self.create_image_label(image_path)
self.main_image_display.pack()
url = "path://" + \
FileLib.create_absolute_name(os.path.join("test", "index.html"))
if self.show_html_frame:
self.display_in_html_frame(url)
file = FileLib.create_absolute_name(os.path.join(
"diagrams", "luke", "papers20210121", "physrevb.94.125203_1_", "fulltext.pdf"))
if False:
self.open_pdf(file, self.main_text_display, page_num=0)
def display_in_html_frame(self, url):
try:
self.html_frame = self.create_html_view(
self.main_display_frame, url)
if self.html_frame:
self.html_frame.pack()
except Exception as e:
s = f"cannot load url {url}{e}"
raise Exception(s)
def open_pdf(self, file, text, page_num=0):
"""
:param file:
:param text:
:param page_num: (Default value = 0)
"""
import PyPDF2
pdf_file = PyPDF2.PdfFileReader(file)
page = pdf_file.getPage(page_num)
content = page.extractText()
text.insert(1.0, content)
def create_html_view(self, frame, htmlfile):
"""
:param frame:
:param htmlfile:
"""
a = urlopen(htmlfile)
bytez = a.read()
content = bytez.decode()
html = th.HtmlFrame(frame)
html.set_content(content)
return html
def view_main_text(self, file):
"""
:param file:
"""
if file.endswith(".xml"):
# not yet working
# self.view_main_xml_file(path)
with open(file, "r", encoding="utf-8") as f:
content = f.read()
self.view_main_text_content(content)
else:
with open(file, "r", encoding="utf-8") as f:
content = f.read()
self.view_main_text_content(content)
def view_main_text_content(self, content):
"""
:param content:
"""
self.main_text_display.delete("1.0", tk.END)
self.main_text_display.insert(tk.END, content)
def view_main_xml_file(self, file):
"""
:param file:
"""
from pyami.xml_lib import XmlLib
self.xml_root = XmlLib.parse_xml_file_to_root(file)
for child in self.xml_root:
pass
def create_image_label(self, image_path):
"""
:param image_path:
"""
frame_height = 400
frame_width = 800
frame_aspect_ratio = frame_width / frame_height
image = Image.open(image_path)
w, h = image.size
aspect_ratio = w / h
width = frame_width if aspect_ratio > frame_aspect_ratio else int(
frame_height * aspect_ratio)
height = int(
frame_width / aspect_ratio) if aspect_ratio > frame_aspect_ratio else frame_height
image = image.resize((width, height), Image.ANTIALIAS)
img = ImageTk.PhotoImage(image)
if self.label is None:
self.label = ttk.Label(self.main_display_frame)
self.set_image_and_persist(img)
return self.label
def set_image_and_persist(self, img):
"""
:param img:
"""
self.label.configure(image=img)
self.label.image = img # needed to avoid garbage collectiom
def create_dashboard(self, master):
"""
:param master:
"""
self.dashboard = tk.Frame(master)
self.dashboard.pack(side=tk.LEFT)
self.make_ami_widgets(self.dashboard)
def make_ami_widgets(self, master):
"""
:param master:
"""
pg_frame = tk.Frame(master,
highlightbackground="gray",
highlightthickness=1)
pg_frame.pack(side=TOP)
self.make_cproject_frame(pg_frame, tk.TOP)
self.make_dictionary_names_box(pg_frame)
self.make_pygetpapers_query_frame(pg_frame, tk.TOP)
self.make_ami_project(pg_frame)
self.make_section_frame(pg_frame)
self.make_ami_search(pg_frame)
self.make_quit(pg_frame)
return pg_frame
def make_section_frame(self, master):
"""
:param master:
"""
section_frame, title_var = Gutil.make_frame_with_hide(master,
title="Sections",
tooltip="sections to be searched",
)
section_frame.pack()
self.sections_listbox = self.create_generic_listbox(
AmiSection.SECTION_LIST1,
# AmiSection.SECTION_LIST,
master=section_frame,
)
self.sections_listbox.pack(side=BOTTOM)
def make_dictionary_names_box(self, master):
"""dictionary_dict = {
"country": (os.path.join(DICTIONARY_HOME, "openVirus20210120", "country", "country.xml"),
"ISO countries from wikidata"),
"ethics": (os.path.join(DICTIONARY_HOME, "ami3", "ethics.xml"),
"Ethics section terminology"),
"invasive": (os.path.join(CEV_DICTIONARY_HOME, "Invasive_species", "invasive_plant.xml"),
"Invasive plant species from GISD"),
"plant_part": (os.path.join(CEV_DICTIONARY_HOME, "eoPlantPart", "eoplant_part.xml"),
"Plant parts from EO literature"),
"parkinsons": (os.path.join(DICTIONARY_HOME, "ami3", "parkinsons.xml"),
"Terms related to Parkinson's disease"),
}
:param master:
"""
dictionary_frame, _ = Gutil.make_frame_with_hide(master,
title="Dictionaries",
highlightthickness="10",
tooltip="contains dictionary content boxes",
)
dictionary_frame.pack(side=LEFT)
ami_dictionaries = AmiDictionaries()
dictionary_dict = ami_dictionaries.dictionary_dict
self.dcb_frame = self.make_dictionary_content_boxes_frame(
dictionary_frame)
self.dictionary_names_listbox = self.create_generic_listbox(
dictionary_dict.keys(),
master=dictionary_frame,
button_text="display dictionary content",
command=lambda: self.make_dictionary_content_boxes(
self.dcb_frame,
dictionary_dict,
Gutil.get_selections_from_listbox(
self.dictionary_names_listbox)
)
)
dictionary_names = dictionary_dict.keys()
self.xml_box = None
self.xml_var = None
self.pdf_box = None
self.pdf_var = None
self.supp_box = None
self.supp_var = None
self.noexec_box = None
self.noexec_var = None
self.csv_box = None
self.csv_var = None
self.html_box = None
self.html_var = None
self.pdfbox_box = None
self.pdfbox_var = None
self.pygetpapers_flags = {
XML_FLAG: {
Gutil.CBOX_BOX: self.xml_box,
Gutil.CBOX_VAR: self.xml_var,
Gutil.CBOX_TEXT: "XML",
Gutil.CBOX_ON: Gutil.ONVAL,
Gutil.CBOX_OFF: Gutil.OFFVAL,
Gutil.CBOX_DEFAULT: Gutil.ONVAL,
Gutil.CBOX_BRIEF: "-x",
Gutil.CBOX_FULL: "--xml",
Gutil.CBOX_TOOLTIP: "output XML",
},
PDF_FLAG: {
Gutil.CBOX_BOX: self.pdf_box,
Gutil.CBOX_VAR: self.pdf_var,
Gutil.CBOX_TEXT: "PDF",
Gutil.CBOX_ON: Gutil.ONVAL,
Gutil.CBOX_OFF: Gutil.OFFVAL,
Gutil.CBOX_DEFAULT: Gutil.OFFVAL,
Gutil.CBOX_BRIEF: "-p",
Gutil.CBOX_FULL: "--pdf",
Gutil.CBOX_TOOLTIP: "output PDF",
},
SUPP_FLAG: {
Gutil.CBOX_BOX: self.supp_box,
Gutil.CBOX_VAR: self.supp_var,
Gutil.CBOX_TEXT: "SUPP",
Gutil.CBOX_ON: Gutil.ONVAL,
Gutil.CBOX_OFF: Gutil.OFFVAL,
Gutil.CBOX_DEFAULT: Gutil.OFFVAL,
Gutil.CBOX_BRIEF: "-s",
Gutil.CBOX_FULL: "--supp",
Gutil.CBOX_TOOLTIP: "output Supplemental data (often absent)",
},
NOEXEC_FLAG: {
Gutil.CBOX_BOX: self.noexec_box,
Gutil.CBOX_VAR: self.noexec_var,
Gutil.CBOX_TEXT: "-n",
Gutil.CBOX_ON: Gutil.ONVAL,
Gutil.CBOX_OFF: Gutil.OFFVAL,
Gutil.CBOX_DEFAULT: Gutil.OFFVAL,
Gutil.CBOX_BRIEF: "-n",
Gutil.CBOX_FULL: "--no download",
Gutil.CBOX_TOOLTIP: "if checked do not download ",
},
CSV_FLAG: {
Gutil.CBOX_BOX: self.csv_box,
Gutil.CBOX_VAR: self.csv_var,
Gutil.CBOX_TEXT: "CSV",
Gutil.CBOX_ON: Gutil.ONVAL,
Gutil.CBOX_OFF: Gutil.OFFVAL,
Gutil.CBOX_DEFAULT: Gutil.OFFVAL,
Gutil.CBOX_BRIEF: "-c",
Gutil.CBOX_FULL: "--makecsv",
Gutil.CBOX_TOOLTIP: "output metadata as CSV",
},
HTML_FLAG: {
Gutil.CBOX_BOX: self.html_box,
Gutil.CBOX_VAR: self.html_var,
Gutil.CBOX_TEXT: "HTML",
Gutil.CBOX_ON: Gutil.ONVAL,
Gutil.CBOX_OFF: Gutil.OFFVAL,
Gutil.CBOX_DEFAULT: Gutil.OFFVAL,
Gutil.CBOX_FULL: "--makehtml",
Gutil.CBOX_TOOLTIP: "output metadata/abstract as HTML",
},
}
self.flags_keys = self.pygetpapers_flags.keys()
def make_pygetpapers_query_frame(self, master, TOP):
"""
:param master:
:param TOP:
"""
pygetpapers_frame, title_var = Gutil.make_frame_with_hide(master,
title="Pygetpapers",
tooltip="build query from dictionaries, "
"flags and text; and RUN",
)
self.download_save, _ = Gutil.make_frame(pygetpapers_frame,
title="project",
# tooltip="build query from dictionaries, flags and text; and RUN",
)
self.download_save.pack()
sub_frame = self.download_save
self.create_run_button(sub_frame)
button = self.create_make_project_button(sub_frame)
self.make_getpapers_args(pygetpapers_frame)
# TODO MOVE
# self.dcb_frame = self.make_dictionary_content_boxes_frame(pygetpapers_frame)
self.entry_text = Gutil.make_entry_box(pygetpapers_frame, text="query")
return pygetpapers_frame, title_var
def make_ami_project(self, master):
"""
:param master:
"""
ami_project_frame, title_var = Gutil.make_frame_with_hide(master,
title="AMI",
tooltip="process AMI project",
)
ami_project_frame.pack()
section_box = None
section_var = None
self.ami_section_dict = {
Gutil.CBOX_BOX: section_box,
Gutil.CBOX_VAR: section_var,
Gutil.CBOX_TEXT: "make sections",
Gutil.CBOX_ON: Gutil.ONVAL,
Gutil.CBOX_OFF: Gutil.OFFVAL,
Gutil.CBOX_DEFAULT: Gutil.ONVAL, # default is ON
Gutil.CBOX_TOOLTIP: "run ami section to create all sections ",
}
# make sections
Gutil.make_checkbox_from_dict(ami_project_frame, self.ami_section_dict)
self.pdfbox_box = None
self.pdfbox_var = None
self.ami_pdfbox_dict = {
Gutil.CBOX_BOX: self.pdfbox_box,
Gutil.CBOX_VAR: self.pdfbox_var,
Gutil.CBOX_TEXT: "run pdfbox",
Gutil.CBOX_ON: Gutil.ONVAL,
Gutil.CBOX_OFF: Gutil.OFFVAL,
Gutil.CBOX_DEFAULT: Gutil.ONVAL,
Gutil.CBOX_TOOLTIP: "run ami pdfbox to make SVG and images",
}
Gutil.make_checkbox_from_dict(ami_project_frame, self.ami_pdfbox_dict)
def make_ami_search(self, master):
"""
:param master:
"""
self.run_ami_frame, title_var = Gutil.make_frame_with_hide(master,
title="Search",
tooltip="wordcount, or phrases or ami search using dictionaries",
)
run_button_var = tk.StringVar(value="SEARCH PROJECT")
ami_button = tk.Button(
self.run_ami_frame, textvariable=run_button_var, command=self.run_ami_search)
ami_button.pack(side=tk.BOTTOM)
new_project_button = ttk.Button(
self.run_ami_frame,
text='Add CProject',
command=self.add_cproject
)
CreateToolTip(new_project_button,
"select project in Project Box and give mnemonic in entry")
new_project_button.pack(side=tk.BOTTOM, expand=True)
# project name
self.new_project_name_var = tk.StringVar()
self.new_project_name_entry = tk.Entry(self.run_ami_frame,
textvariable=self.new_project_name_var)
self.new_project_name_entry.pack(side=LEFT)
# project desc
self.new_project_desc_var = tk.StringVar()
self.new_project_desc_entry = tk.Entry(self.run_ami_frame,
textvariable=self.new_project_desc_var)
self.new_project_name_entry.pack(side=tk.RIGHT)
self.refresh_project_listbox(self.run_ami_frame)
return self.run_ami_frame, title_var
def refresh_project_listbox(self, run_ami_frame):
"""
:param run_ami_frame:
"""
self.project_names_listbox = self.create_generic_listbox(
self.current_ami_projects.project_dict.keys(),
master=run_ami_frame,
)
self.project_names_listbox.pack(side=BOTTOM)
def add_cproject(self):
""" """
new_dir = self.outdir_var.get()
print("add CProject ", new_dir, "to project list")
label = self.new_project_name_var.get()
description = self.new_project_desc_var.get()
self.current_ami_projects.add_with_check(label, new_dir, description)
self.project_names_listbox.destroy()
self.refresh_project_listbox(self.run_ami_frame)
pass
def run_ami_search(self):
""" """
ami_search = AmiSearch()
ami_search.ami_projects = self.current_ami_projects
ami_guix = self
ami_search.run_search_from_gui(ami_guix)
def make_getpapers_args(self, frame):
"""
:param frame:
"""
getpapers_args_frame = tk.Frame(frame,
highlightbackground="black",
highlightthickness=1)
getpapers_args_frame.pack(side=tk.TOP)
checkbox_frame = tk.Frame(getpapers_args_frame,
highlightbackground="black",
highlightthickness=1)
checkbox_frame.pack(side=tk.TOP)
Gutil.make_help_label(checkbox_frame, tk.LEFT,
"pygetpapers checkboxes")
for key in self.flags_keys:
self.make_pygetpapers_check_button(checkbox_frame, key)
self.spin = Gutil.make_spinbox(
getpapers_args_frame, "maximum hits (-k)", min=1, max=self.max_max_hits)
def make_pygetpapers_check_button(self, master, key):
"""
:param master:
:param key:
"""
cbox_dict = self.pygetpapers_flags[key]
Gutil.make_checkbox_from_dict(master, cbox_dict, side=tk.LEFT)
def create_run_button(self, master):
"""
:param master:
"""
button = tk.Button(master)
button[Gutil.CBOX_TEXT] = "DOWNLOAD"
button[Gutil.CBOX_COMMAND] = self.create_pygetpapers_query_and_run
button.pack(side=LEFT)
self.pygetpapers_command = tk.Entry(master, bg="#ffffdd")
self.pygetpapers_command.pack(side=LEFT, expand=True)
def create_make_project_button(self, master):
"""
:param master:
"""
button = tk.Button(master)
button[Gutil.CBOX_TEXT] = "Make project"
button[Gutil.CBOX_COMMAND] = self.save_project
self.pygetpapers_command = tk.Entry(master, bg="#ffffdd")
self.pygetpapers_command.pack(side=tk.RIGHT, expand=True)
button.pack(side=tk.RIGHT)
button["state"] = "disabled"
return button
def make_dictionary_content_boxes(self, master, dictionary_dict, selected_dict_names):
"""
:param master:
:param dictionary_dict:
:param selected_dict_names:
"""
frame = tk.Frame(master,
highlightcolor="blue",
highlightthickness=10)
frame.pack()
print(f"created dictionary_content_box master{master}")
self.dictionary_content_notebook = ttk.Notebook(frame)
label = tk.Label(self.dictionary_content_notebook,
text="Dictionary Notebook")
CreateToolTip(label, "display of selected dictionaries")
label.pack(side=tk.TOP)
self.dictionary_content_notebook.pack()
self.selected_boxes = []
for dict_name in selected_dict_names:
search_dictionary = dictionary_dict[dict_name]
f1 = tk.Frame(self.dictionary_content_notebook,
highlightcolor="blue",
highlightthickness=2)
self.dictionary_content_notebook.add(f1, text=dict_name)
description = "description??"
curbox = self.make_dictionary_content_box(
self.dictionary_content_notebook, dict_name, search_dictionary.file, desc=description)
curbox.pack()
self.selected_boxes.append(curbox)
def make_cproject_frame(self, master, box_side):
"""
:param master:
:param box_side:
"""
from tkinter import ttk
cproject_frame, title_var = Gutil.make_frame_with_hide(master,
title="CProject",
tooltip="Project directory",
)
cproject_frame.pack(side=TOP)
open_button = ttk.Button(
cproject_frame,
text='Dir',
command=self.select_directory
)
open_button.pack(side=LEFT, expand=True)
display_button = ttk.Button(
cproject_frame,
text='Display',
command=self.display_directory
)
display_button.pack(side=tk.RIGHT, expand=True)
default_dir = os.path.join(os.path.expanduser("~"), "temp")
self.outdir_var = tk.StringVar(None)
self.dir_entry = tk.Entry(
cproject_frame, textvariable=self.outdir_var, width=25)
Gutil.refresh_entry(self.dir_entry, default_dir)
self.dir_entry.pack(side=tk.RIGHT)
return cproject_frame
def select_directory(self):
""" """
from tkinter import filedialog as fd
from tkinter import messagebox
filename = fd.askdirectory(
title='Output directory',
initialdir=os.path.expanduser("~"), # HOME directory
)
Gutil.refresh_entry(self.dir_entry, filename)
def display_directory(self):
""" """
title = "dummy title"
if self.ami_tree is None:
self.ami_tree = AmiTree(self)
self.treeview = self.ami_tree.get_or_create_treeview(
self.main_display_frame, title)
parent = ''
outdir_val = self.outdir_var.get()
self.ami_tree.directory = outdir_val
self.ami_tree.recursive_display(outdir_val, parent, self.treeview)
def make_dictionary_content_box(self, master, dictionary_name, ami_dictionary, desc="Missing desc"):
"""
:param master:
:param dictionary_name:
:param ami_dictionary:
:param desc: (Default value = "Missing desc")
"""
frame, _ = Gutil.make_frame(master,
title=dictionary_name,
tooltip=desc,
)
frame.pack(side=LEFT)
box = self.create_generic_listbox(self.read_entry_names(ami_dictionary),
master=frame, title="select dictionary items")
box.pack(side=BOTTOM)
box.bind("<<ListboxSelect>>", lambda event, self=self, dictionary=dictionary_name:
self.show_dictionary_item(event, dictionary))
return box
"""
https://stackoverflow.com/questions/4299145/getting-the-widget-that-triggered-an-event
"""
def show_dictionary_item(self, event, dictionary_name):
"""
:param event:
:param dictionary_name:
"""
box = event.widget
selections = Gutil.get_selections_from_listbox(box)
selection = selections[0] if len(selections) > 0 else None
if selection is not None:
term = selection.lower()
dictionary = AmiDictionaries().dictionary_dict[dictionary_name]
entry_xml, image_url = dictionary.get_xml_and_image_url(term)
self.view_main_text_content(entry_xml)
if image_url is not None:
with urlopen(image_url) as u:
raw_data = u.read()
im = Image.open(BytesIO(raw_data))
w, h = im.size
if w > 600:
h = int(h * 600 / w)
w = 600
im = im.resize((w, h), Image.ANTIALIAS)
photo = ImageTk.PhotoImage(im)
self.set_image_and_persist(photo)
box.selection_clear(0, tk.END)
def create_generic_listbox(self, items, master=None, command=None, title=None, tooltip=None, button_text="select"):
"""
:param items:
:param master: (Default value = None)
:param command: (Default value = None)
:param title: (Default value = None)
:param tooltip: (Default value = None)
:param button_text: (Default value = "select")
"""
if tooltip is None:
tooltip = f"generic listbox {title}"
frame, title_var = Gutil.make_frame(master,
title=title,
tooltip=tooltip,
# highlightbackground="green",
highlightthickness=2,
)
lb = Gutil.create_listbox_from_list(frame, items)
lb.pack(side=tk.BOTTOM)
if command is not None:
button = tk.Button(frame, text=button_text, command=command,)
button.pack(side=tk.BOTTOM)
return lb
# frames and windows
"""
https://stackoverflow.com/questions/24656138/python-tkinter-attach-scrollbar-to-listbox-as-opposed-to-window/24656407
"""
def run_query_and_get_output(self, args):
"""
:param args:
"""
try:
_, stderr_lines = Gutil.run_subprocess_get_lines(args)
except Exception:
messagebox.showinfo(title="query failed",
message="failed, maybe no output")
return ["failure, probably no hits"]
saved = 0
hits = 0
for line in stderr_lines:
if TOTAL_HITS_ARE in line:
hits = line.split(TOTAL_HITS_ARE)[-1]
if WROTE_XML in line:
saved += 1
messagebox.showinfo(
title="end search", message="finished search, hits: "+str(hits)+", saved: "+str(saved))
return stderr_lines
def create_pygetpapers_query_and_run(self):
""" """
limit = self.spin.get()
query_string = ""
query_string = self.add_query_entry(query_string)
query_string = self.add_dictionary_box_terms(query_string)
if query_string == "":
print("No query, no submission")
messagebox.showinfo(
title="query_output", message="no query or dictionary boxes selected; no submission")
return
self.project_dir = self.outdir_var.get()
if self.project_dir == "":
print("must give outdir")
messagebox.showinfo(title="outdir box", message="must give outdir")
return
cmd_options = [PYGETPAPERS, "-q", query_string,
"-o", self.project_dir, "-k", limit]
self.add_flags_to_query_command(cmd_options)
# print("CMD", cmd_options, "\n", str(cmd_options))
self.pygetpapers_command.insert(0, str(cmd_options))
lines = self.run_query_and_get_output(cmd_options)
self.display_query_output(root, lines)
if self.ami_section_dict[Gutil.CBOX_VAR].get() == Gutil.ONVAL:
self.run_ami_sections()
if self.ami_pdfbox_dict[Gutil.CBOX_VAR].get() == Gutil.ONVAL:
self.run_ami_pdfbox()
def save_project(self):
""" """
pass
def run_ami_sections(self):
""" """
import subprocess
args = ["ami", "-p", self.project_dir, "section"]
# print("making sections", args)
stdout_lines, _ = Gutil.run_subprocess_get_lines(args)
# self.main_text_display(stdout_lines)
print("stdout", stdout_lines)
def run_ami_pdfbox(self):
""" """
import subprocess
args = ["ami", "-p", self.project_dir, "pdfbox"]
stdout_lines, _ = Gutil.run_subprocess_get_lines(args)
# self.main_text_display(stdout_lines)
print("stdout", stdout_lines)
def add_flags_to_query_command(self, cmd_options):
"""
:param cmd_options:
"""
for k, v in self.pygetpapers_flags.items():
if k in self.pygetpapers_flags:
if v[Gutil.CBOX_VAR].get() == Gutil.ONVAL:
option = v[Gutil.CBOX_BRIEF] if Gutil.CBOX_BRIEF in v else None
if option is None:
option = v[Gutil.CBOX_FULL] if Gutil.CBOX_FULL in v else None
if option is None:
print("Cannot find keys for", k)
else:
cmd_options.append(option)
def add_query_entry(self, query_string):
"""
:param query_string:
"""
query_string = self.entry_text.get()
if query_string != "":
query_string = '("' + query_string + '")'
return query_string
def add_dictionary_box_terms(self, lbstr):
"""
:param lbstr:
"""
for box in self.selected_boxes:
select_str = self.make_query_string(box)
if select_str is None or select_str == "":
continue
if lbstr != "":
lbstr += " AND "
lbstr += select_str
return lbstr
def add_if_checked(self, cmd_options, var, val):
"""
:param cmd_options:
:param var:
:param val:
"""
if var is not None and var.get() == gu.ONVAL:
cmd_options.append(val)
def print_check(self):
""" """
s = False
print("check", self.check.getboolean(s))
def make_query_string(self, listbox):
"""
:param listbox:
"""
selected = Gutil.get_selections_from_listbox(listbox)
s = ""
ll = len(selected)
s = '('
s += Gutil.quoteme(selected[0]) if ll > 0 else ""
for i in range(1, ll):
s += " OR " + Gutil.quoteme(selected[i])
s += ')'
return s
def selected_text(event):
"""
:param event:
"""
print("SELECTED", event)
def display_query_output(self, master, lines):
"""
:param master:
:param lines:
"""
# Title Label
frame = tk.Frame(master)
frame.pack(side=BOTTOM)
lab = tk.Label(frame,
text="output",
font=("Arial", 15),
background='white',
foreground="white")
lab.pack(side="bottom")
# .grid(column=0, row=0)
# Creating scrolled text area
# widget with Read only by
# disabling the state
text_area = scrolledtext.ScrolledText(frame,
width=30,
height=8,
font=("Arial", 15))
text_area.pack(side="bottom")
# Inserting Text which is read only
text = "\n".join(lines)
text_area.insert(tk.INSERT, text)
text_area.bind("<Button-1>", button1)
text_area.bind("<<Selected Text>>", self.selected_text)
# Making the text read only
# text_area.configure(state='disabled')
return text_area
def read_entry_names(self, dictionary_file):
"""
:param dictionary_file:
"""
# print(dictionary_file)
assert (os.path.exists(dictionary_file))
elementTree = ET.parse(dictionary_file)
entries = elementTree.findall("entry")
names = [entry.attrib["name"] for entry in entries]
# print("entries", len(names))
names = sorted(names)
return names
def make_quit(self, master):
"""
:param master:
"""
frame, title_var = Gutil.make_frame(master,
title="",
tooltip="quit and destroy windoe",
)
quit = tk.Button(frame, text="QUIT", fg="red",
command=self.master.destroy)
quit.pack(side=tk.BOTTOM)
pass
def make_dictionary_content_boxes_frame(self, master):
"""
:param master:
"""
self.dcb_frame, title_var = Gutil.make_frame(master,
# title="select entries in dictionaries",
tooltip="dictionary content boxes will be added here",
)
self.dcb_frame.pack()
return self.dcb_frame
def query_wikidata(self, text):
from pyami.wikimedia import WikidataBrowser
"""
:param text:
"""
print("launch wikidata browser")
wikidata_browser = WikidataBrowser(self, text)
def create_wikidata_query_url(self, text):
"""
:param text:
"""
BASE_URL = "https://www.wikidata.org/w/index.php?search="
text = text.strip()
text = text.replace(" ", "+")
query = BASE_URL + text
return query
# https://www.wikidata.org/w/index.php?search=lantana+camara&search=lantana+camara&title=Special%3ASearch&go=Go&ns0=1&ns120=1
"""unused"""
def print_console():
""" """
print(console.get("1.0", "end-1c"))
root.after(1000, print_console)
# main
use_console = False # debugging not yet finished
root = tk.Tk()
# screen = Frame(root)
# screen.pack()
app = AmiGui(master=root)
console = tk.Text(app)
# console.pack()
# print_console()
app.mainloop()
|
# 两种测试模式:
# 1. 测试整个文件:
# python -m doctest -v hw01.py
# 2. 测试单个函数:先进入python交互模式,再使用doctest.run_docstring_examples(f, globs, verbose=False, name="NoName", compileflags=None, optionflags=0)¶
# >>> import doctest
# >>> from hw01 import a_plus_abs_b
# >>> doctest.run_docstring_examples(a_plus_abs_b, globs=None, verbose=True, name="a_plus_abs_b")
#
""" Homework 1: Control """
from operator import add, sub
def a_plus_abs_b(a, b):
"""Return a+abs(b), but without calling abs.
>>> a_plus_abs_b(2, 3)
5
>>> a_plus_abs_b(2, -3)
5
"""
if b < 0:
f = sub(a,b)
else:
f = add(a,b)
return f
def two_of_three(a, b, c):
"""Return x*x + y*y, where x and y are the two largest members of the
positive numbers a, b, and c.
>>> two_of_three(1, 2, 3)
13
>>> two_of_three(5, 3, 1)
34
>>> two_of_three(10, 2, 8)
164
>>> two_of_three(5, 5, 5)
50
"""
return a*a+b*b+c*c-min(a,b,c)*min(a,b,c)
def largest_factor(n):
"""Return the largest factor of n that is smaller than n.
>>> largest_factor(15) # factors are 1, 3, 5
5
>>> largest_factor(80) # factors are 1, 2, 4, 5, 8, 10, 16, 20, 40
40
>>> largest_factor(13) # factor is 1 since 13 is prime
1
"""
"*** YOUR CODE HERE ***"
t = 1
for i in range(2,n-1):
if n % i == 0:
t = i
return t
def if_function(condition, true_result, false_result):
"""Return true_result if condition is a true value, and
false_result otherwise.
>>> if_function(True, 2, 3)
2
>>> if_function(False, 2, 3)
3
>>> if_function(3==2, 3+2, 3-2)
1
>>> if_function(3>2, 3+2, 3-2)
5
"""
if condition:
return true_result
else:
return false_result
def with_if_statement():
"""
>>> result = with_if_statement()
2
>>> print(result)
None
"""
if c():
return t()
else:
return f()
def with_if_function():
"""
>>> result = with_if_function()
1
2
>>> print(result)
None
"""
return if_function(c(), t(), f())
def c():
"*** YOUR CODE HERE ***"
return False
def t():
"*** YOUR CODE HERE ***"
return 1
def f():
"*** YOUR CODE HERE ***"
return 2
def hailstone(n):
"""Print the hailstone sequence starting at n and return its
length.
>>> a = hailstone(10)
10
5
16
8
4
2
1
>>> a
7
"""
"*** YOUR CODE HERE ***"
length = 1
while n != 1:
print(n)
if n % 2 == 0:
n = n // 2 # Integer division prevents "1.0" output
else:
n = 3 * n + 1
length = length + 1
print(n) # n is now 1
return length
|
<reponame>bensharkey3/Guess-The-Number<filename>sourcecode.py
import random
import os
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.ticker import PercentFormatter
def verify_guess(i):
'''validates a guess to make sure its a number between 1-100'''
while True:
try:
i = int(i)
if i>0 and i<101:
return i
else:
raise
break
except:
print('not a valid number!')
i = input('guess a number between 1 and 100: ')
def verify_num(i):
'''validates a guess to make sure its a number between 1-100'''
while True:
try:
i = int(i)
if i>0 and i<101:
return i
else:
raise
break
except:
print('not a valid number!')
i = input('Enter a number for the computer to guess: ')
def stats():
'''generates user stats'''
try:
csvfileloc = 'C:\\Users\\user\\Google Drive\\Guess-The-Number\\GuessTheNumberGameData.csv'
df = pd.read_csv(csvfileloc)
except:
try:
csvfileloc = '{}\\GuessTheNumberGameData.csv'.format(os.getcwd())
df = pd.read_csv(csvfileloc)
except:
df = pd.read_csv('https://raw.githubusercontent.com/bensharkey3/Guess-The-Number/master/GuessTheNumberGameData.csv')
csvfileloc = '{}\\GuessTheNumberGameData.csv'.format(os.getcwd())
df.to_csv(csvfileloc, index=False)
df2 = pd.DataFrame([[pd.to_datetime('today'), name, count, diff], [pd.to_datetime('today'), 'the computer', ccount, -diff]], columns=['Date', 'Name', 'NumberOfGuesses', 'DiffToOpponent'])
df = df.append(df2)
df.to_csv(csvfileloc, index=False)
df = pd.read_csv(csvfileloc)
df3 = df.groupby('Name').mean().sort_values(by='NumberOfGuesses').reset_index()
df4 = df.groupby('Name')[['Date']].count()
df5 = df.groupby('Name')[['NumberOfGuesses']].min()
dfprint = df3.merge(df4, on='Name').merge(df5, on='Name')
df6 = df[df['DiffToOpponent'].notnull()].groupby('Name').count()[['Date']]
df6.rename(columns={'Date':'Played'}, inplace=True)
df6['Lost'] = df[df['DiffToOpponent'] > 0].groupby('Name').count()['Date']
df6['Won'] = df[df['DiffToOpponent'] < 0].groupby('Name').count()['Date']
df6['Drew'] = df[df['DiffToOpponent'] == 0].groupby('Name').count()['Date']
dfprint = dfprint.merge(df6, how='left', on='Name')
dfprint.rename(columns={'NumberOfGuesses_x':'Average', 'Date':'Turns', 'NumberOfGuesses_y':'Best'}, inplace=True)
dfprint['Average'] = dfprint['Average'].round(2)
dfprint['W/L%'] = round(dfprint['Won']*100 / (dfprint['Lost'] + dfprint['Won']), 1)
dfprint = dfprint[['Name', 'Average', 'Turns', 'Best', 'Played', 'Won', 'Lost', 'Drew', 'W/L%']]
dfprint.fillna(value=0, inplace=True)
print(dfprint)
'''generates histogram'''
plt.hist(df[(df['Name'] == name)]['NumberOfGuesses'], bins=(0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.5, 9.5), histtype='stepfilled', density=True, color='b', alpha=0.3, label=name)
plt.hist(df['NumberOfGuesses'], bins=(0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.5, 9.5), histtype='stepfilled', density=True, color='r', alpha=0.3, label='all players', )
plt.title("Number of Guesses")
plt.legend(loc='upper left')
plt.xticks([1,2,3,4,5,6,7,8,9])
plt.ylabel("Frequency")
plt.gca().yaxis.set_major_formatter(PercentFormatter(1))
plt.show()
'''run the program'''
name = input('Enter your name: ')
playing = 'y'
while playing == 'y':
answer = random.randint(1,100)
print(' ')
print('ok', name)
print(' ')
guess = verify_guess(input('Guess a number between 1 and 100: '))
count = 1
while guess != answer:
if guess > answer:
print('lower!')
elif guess < answer:
print('higher!')
guess = verify_guess(input('guess again: '))
count += 1
print('you got it!')
print('you guessed it in {} guesses'.format(count))
print(' ')
input('press enter to see how you compared to the computer')
print(' ')
'''computers guesses'''
low = 1
high = 100
ccount = 1
cguess = random.choice([36, 64])
while cguess != answer:
print("the computer's guess {}: {}".format(ccount, cguess))
if cguess > answer:
print('lower!')
elif cguess < answer:
print('higher!')
if cguess > answer:
high = cguess
elif cguess < answer:
low = cguess + 1
cguess = (low+high)//2
ccount += 1
diff = count - ccount
print("the computer's guess {}: {}".format(ccount, cguess))
print('got it!')
print('The computer took {} guesses'.format(ccount))
print(' ')
if ccount > count:
print(' - you defeated the computer by {} guess(es)!'.format(-diff))
print(' ')
elif count > ccount:
print(' - the computer defeated you by {} guess(es)'.format(diff))
print(' ')
else:
print(" - it's a draw!")
print(' ')
stats()
while True:
playagain = str(input('Play again? (enter y or n): '))
if playagain == 'n':
input('Thanks for playing! - press enter to exit')
playing = 'n'
break
elif playagain =='y':
playing = 'y'
break
else:
print('invalid response, please enter y or n')
# to do:
# histograam ticks in between number labels
# table index to start at 1
# table formatting
# formatting zero decimals
# execute code directly from github |
import numpy as np
import mahotas as mh
from laocoon import equalization as eq
class RFP_Pipeline:
"""
A class that represent the pipeline for RFP analysis.
Attributes
----------
dapi_coords : list
Coordinates of the cell "centers" in the DAPI channel. Used as a reference.
checked : list
Keeps track of which cells have already been counted in other channels.
coords : list
Coordinates of all the cell "centers" in the RFP channel.
count : int
The number of cells counted in the image.
Methods
-------
analyze_rfp_hist_eps(file, dapi_coords, checked)
Calculates the number of counted cells and their coordinates with histogram
equalization and Gaussian filter preprocessing and epsilon quality control.
analyze_rfp_hist(file)
Calculates the number of counted cells and their coordinates with histogram
equalization and Gaussian filter preprocessing.
analyze_rfp_eps(file, dapi_coords, checked)
Calculates the number of counted cells and their coordinates with Gaussian
filter preprocessing and epsilon quality control.
analyze_rfp(file)
Calculates the number of counted cells and their coordinates with Gaussian
filter preprocessing.
epsilon(rfp_coords, dapi_coords, checked)
Helper function for implementing epsilon quality control.
"""
def __init__(self, file, checked, dapi_coords, hist=True, epsilon=True):
"""
Parameters
----------
file : str
The path to the image.
checked : list
Keeps track of which cells have already been counted in other channels.
dapi_coords : list
Coordinates of all the cell "centers" in the DAPI channel. Used as a reference.
hist : boolean, optional
Decides whether to perform histogram equalization preprocessing on the image
(default is True).
epsilon : boolean, optional
Decides whether to perform epsilon value quality control on the image
(default is True).
"""
self.dapi_coords = dapi_coords
self.checked = checked
if hist and epsilon:
self.coords, self.count, self.checked = self.analyze_rfp_hist_eps(file, dapi_coords, checked)
if hist and not epsilon:
self.count, self.coords = self.analyze_rfp_hist(file)
if not hist and epsilon:
self.coords, self.count, self.checked = self.analyze_rfp_eps(file, dapi_coords, checked)
else:
self.count, self.coords = self.analyze_rfp(file)
def analyze_rfp_hist_eps(self, file, dapi_coords, checked):
"""
Calculates the number of counted cells and their coordinates with histogram
equalization and Gaussian filter preprocessing and epsilon quality control.
Parameters
----------
file : str
The path to the image.
dapi_coords : list
Coordinates of all the cell "centers" in the DAPI channel. Used as a reference.
checked : list
Keeps track of which cells have already been counted in other channels.
Returns
-------
list
Coordinates of all the cell "centers" in the RFP channel.
int
The number of cells counted in the image.
list
Keeps track of which cells have already been counted in other channels.
"""
img = mh.imread(file)
imgg = mh.colors.rgb2grey(img)
imgg = eq.hist_eq(img)
imggf = mh.gaussian_filter(imgg,4.8).astype(np.uint8)
rmax = mh.regmax(imggf)
rfp_seeds, rfp_nuclei = mh.label(rmax)
rfp_coords = mh.center_of_mass(imgg,labels=rfp_seeds)
count, checked = self.epsilon(rfp_coords,dapi_coords,checked)
return rfp_coords, count, checked
def analyze_rfp_hist(self, file):
"""
Calculates the number of counted cells and their coordinates with histogram
equalization and Gaussian filter preprocessing.
Parameters
----------
file : str
The path to the image.
Returns
-------
int
The number of cells counted in the image.
list
Coordinates of all the cell "centers" in the RFP channel.
"""
img = mh.imread(file)
imgg = mh.colors.rgb2gray(img)
imgg = eq.hist_eq(img)
imggf = mh.gaussian_filter(imgg,16.5).astype(np.uint8)
rmax = mh.regmax(imggf)
rfp_seeds, rfp_nuclei = mh.label(rmax)
rfp_coords = mh.center_of_mass(imgg,labels=rfp_seeds)
return rfp_nuclei,rfp_coords
def analyze_rfp_eps(self, file, dapi_coords, checked):
"""
Calculates the number of counted cells and their coordinates with Gaussian
filter preprocessing and epsilon quality control.
Parameters
----------
file : str
The path to the image.
dapi_coords : list
Coordinates of all the cell "centers" in the DAPI channel. Used as a reference.
checked : list
Keeps track of which cells have already been counted in other channels.
Returns
-------
list
Coordinates of all the cell "centers" in the RFP channel.
int
The number of cells counted in the image.
list
Keeps track of which cells have already been counted in other channels.
"""
img = mh.imread(file)
imgg = mh.colors.rgb2grey(img)
imggf = mh.gaussian_filter(imgg,14).astype(np.uint8)
rmax = mh.regmax(imggf)
rfp_seeds, rfp_nuclei = mh.label(rmax)
rfp_coords = mh.center_of_mass(imgg,labels=rfp_seeds)
count, checked = self.epsilon(rfp_coords,dapi_coords,checked)
return rfp_coords, count, checked
def analyze_rfp(self, file):
"""
Calculates the number of counted cells and their coordinates with Gaussian
filter preprocessing.
Parameters
----------
file : str
The path to the image.
Returns
-------
int
The number of cells counted in the image.
list
Coordinates of all the cell "centers" in the RFP channel.
"""
img = mh.imread(file)
imgg = mh.colors.rgb2grey(img)
imggf = mh.gaussian_filter(imgg,14).astype(np.uint8)
rmax = mh.regmax(imggf)
rfp_seeds, rfp_nuclei = mh.label(rmax)
rfp_coords = mh.center_of_mass(imgg,labels=rfp_seeds)
return rfp_nuclei,rfp_coords
def epsilon(self, rfp_coords, dapi_coords, checked):
"""
Helper function for implementing epsilon quality control.
Parameters
----------
edu_coords : list
Coordinates of all the cell "centers" in the RFP channel.
dapi_coords : list
Coordinates of all the cell "centers" in the DAPI channel. Used as a reference.
checked : list
Keeps track of which cells have already been counted in other channels.
"""
rfp_count = 0
for i in range(len(rfp_coords)):
for j in range(len(dapi_coords)):
dist = (dapi_coords[j][0]-rfp_coords[i][0])*(dapi_coords[j][0]-rfp_coords[i][0])+(dapi_coords[j][1]-rfp_coords[i][1])*(dapi_coords[j][1]-rfp_coords[i][1])
if dist <= 265:
rfp_count += 1
checked[j] += 1
return rfp_count,checked
|
"""
Django settings for YDX project.
Generated by 'django-admin startproject' using Django 1.11.8.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
import sys
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, BASE_DIR)
sys.path.insert(0, os.path.join(BASE_DIR, 'apps'))
sys.path.insert(0, os.path.join(BASE_DIR, 'otherapps'))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '<KEY>'
# SECURITY WARNING: don't run with debug turned on in production!
# DEBUG = False
# ALLOWED_HOSTS = ['*']
DEBUG = True
ALLOWED_HOSTS = []
# AUTH 方法(支持邮箱登录)
AUTHENTICATION_BACKENDS = ('users.views.CustomBackend',)
# UserProfile 覆盖了 django 内置的 user 表
AUTH_USER_MODEL = 'users.UserProfile'
# Application definition
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'crispy_forms',
'xadmin',
'DjangoUeditor',
'captcha',
'courses',
'operation',
'users',
'teachers',
'pure_pagination',
'trade',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'YDX.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.media',
],
},
},
]
WSGI_APPLICATION = 'YDX.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': "ydx2",
'USER': 'root',
'PASSWORD': "<PASSWORD>",
'HOST': "127.0.0.1",
# As spotted on the MySQL 5.7 documentation use default_storage_engine instead!
# 加这句是为兼容后面的自动登陆
'OPTIONS': {'init_command': 'SET default_storage_engine=INNODB;'}
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
# 设置时区
LANGUAGE_CODE = 'zh-hans' # 中文支持,django1.8以后支持;1.8以前是zh-cn
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = False # 默认是Ture,时间是utc时间,由于我们要用本地时间,所用手动修改为false!!!!
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
# STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static'),
]
MEDIA_URL = "/media/"
MEDIA_ROOT = os.path.join(BASE_DIR, "media")
# 发送邮件的setting设置
EMAIL_HOST = 'smtp.qq.com'
EMAIL_PORT = 25
EMAIL_HOST_USER = '你的邮箱'
EMAIL_HOST_PASSWORD = '<PASSWORD>'
EMAIL_USE_TLS = True
EMAIL_FROM = '你的邮箱'
# EMAIL_HOST = 'smtp.qq.com'
# EMAIL_PORT = 465
# EMAIL_HOST_USER = ''
# EMAIL_HOST_PASSWORD = '<PASSWORD>rx<PASSWORD>ga'
# EMAIL_USE_SSL = True
# EMAIL_FROM = ''
# 支付的配置
private_key_path = os.path.join(BASE_DIR, "apps/trade/keys/private_key.txt")
ali_pub_key_path = os.path.join(BASE_DIR, "apps/trade/keys/alipay_key.txt")
# django_simple_captcha 验证码配置
# 格式
CAPTCHA_OUTPUT_FORMAT = u'%(text_field)s %(hidden_field)s %(image)s'
# 噪点样式
CAPTCHA_NOISE_FUNCTIONS = ('captcha.helpers.noise_null', # 没有样式
# 'captcha.helpers.noise_arcs', # 线
'captcha.helpers.noise_dots', # 点
)
# 图片大小
CAPTCHA_IMAGE_SIZE = (100, 50)
CAPTCHA_BACKGROUND_COLOR = '#ffffff'
CAPTCHA_CHALLENGE_FUNCT = 'captcha.helpers.random_char_challenge' # 图片中的文字为随机英文字母,如 mdsh
# CAPTCHA_CHALLENGE_FUNCT = 'captcha.helpers.math_challenge' # 图片中的文字为数字表达式,如1+2=</span>
CAPTCHA_LENGTH = 4 # 字符个数
|
<gh_stars>0
import logging
import time
import gevent
import gevent.socket
import Constants
import GlobalStore
from BotSettingsManager import BotSettingsManager
from IrcMessage import IrcMessage
from MessageLogger import MessageLogger
class DideRobot(object):
def __init__(self, serverfolder):
self.logger = logging.getLogger('DideRobot')
self.logger.info("New bot for server '{}' created".format(serverfolder))
#Initialize some variables (in init() instead of outside it to prevent object sharing between instances)
self.serverfolder = serverfolder
self.ircSocket = None
self.nickname = None # Will get set once we connect, when we know if we have the nickname we want
self.channelsUserList = {} # Will be a dict with joined channels as keys and a list of users in those channels as values
self.isUpdatingChannelsUserList = False
self.isMuted = False
self.connectedAt = None # Will be set to the timestamp on which we connect. 'None' means we're not connected
self.connectionManagerGreenlet = None # This will get a reference to the greenlet keeping the connection alive. If this ends, the bot is closed down
self.shouldReconnect = True
self.reconnectionAttempCount = None # Will keep a count of how many times we've tried to connect, to see if we've exceeded the limit (if any)
self.maxConnectionRetries = None # None means unlimited attempts, can be set by settings file
self.secondsBetweenLineSends = None # If it's 'None', there's no rate limiting, otherwise it's a float of seconds between line sends
self.linesToSend = None # A list with all the lines to send if there is a queue and rate limiting
self.lineSendingGreenlet = None # Will store the greenlet that is currently working its way through the message queue, or is None if there is none
self.commandPrefix = "" # Pulled from the settings file, separate variable because it's referenced a lot
self.commandPrefixLength = 0 # The length if the prefix is also often needed, prevent constant recalculation
#Load the settings, and only connect to the server if that succeeded
self.settings = BotSettingsManager(self.serverfolder)
if self.settings.loadedSuccessfully:
self.parseSettings()
self.messageLogger = MessageLogger(self)
self.connectionManagerGreenlet = gevent.spawn(self.keepServerConnectionAlive)
else:
self.logger.error("|{}| Invalid settings file, shutting down".format(self.serverfolder))
#Also tell the bot manager that we'll stop existing
GlobalStore.bothandler.unregisterBot(self.serverfolder)
def parseSettings(self):
"""Retrieve some frequently-used values, to store them in a more easily-accessible way"""
#The command prefix is going to be needed often, as will its length. Put that in an easy-to-reach place
self.commandPrefix = self.settings['commandPrefix']
self.commandPrefixLength = len(self.commandPrefix)
#Load in the maximum connection settings to try, if there is any
self.maxConnectionRetries = self.settings.get('maxConnectionRetries', -1)
#Assume values smaller than zero mean endless retries
if self.maxConnectionRetries < 0:
self.maxConnectionRetries = None
#Time in seconds (can have decimals) between line sends, because some servers are rate limited
self.secondsBetweenLineSends = self.settings.get('minSecondsBetweenMessages', -1)
if self.secondsBetweenLineSends <= 0:
self.secondsBetweenLineSends = None
def reloadSettings(self):
self.settings.reloadSettings(True)
if self.settings.loadedSuccessfully:
self.parseSettings()
return True
else:
return False
#CONNECTION FUNCTIONS
def keepServerConnectionAlive(self):
while True:
# Open a connection
self.ircSocket = gevent.socket.socket(gevent.socket.AF_INET, gevent.socket.SOCK_STREAM)
self.ircSocket.settimeout(20.0) #Set the timout to establishing a connection, gets increased when we successfully connect
self.logger.info("Connecting to {} ({} on port {})".format(self.serverfolder, self.settings['server'], self.settings['port']))
try:
self.ircSocket.connect((self.settings['server'], self.settings['port']))
except (gevent.socket.timeout, gevent.socket.error, gevent.socket.herror, gevent.socket.gaierror) as e:
self.logger.error("Unable to connect to server '{}' ({}:{}), reason: {}".format(self.serverfolder, self.settings['server'], self.settings['port'], e))
else:
#Connecting was successful, authenticate
if 'password' in self.settings and len(self.settings['password']) > 0:
self.sendLineToServer("PASS " + self.settings['password'])
self.sendLineToServer("NICK {}".format(self.settings['nickname']))
#Use the specified realname, or fall back to the username if none is provided
realname = self.settings.get('realname', self.settings['nickname'])
self.sendLineToServer("USER {} 4 * :{}".format(self.settings['nickname'], realname)) #The '4' means we want WALLOPS messages but not invisibility
#Start listening for replies
self.handleConnection()
#If we reach here, 'handleConnection' returned, so we apparently lost the connection (either accidentally or intentionally)
#Check if we were still sending messages
if self.lineSendingGreenlet:
# Kill the message sending greenlet to prevent errors
self.lineSendingGreenlet.kill()
self.lineSendingGreenlet = None
#Also clear the message queue, just in case something in there caused the disconnect
self.linesToSend = None
#Clear the channels and users lists
self.channelsUserList = {}
self.isUpdatingChannelsUserList = False
#Shutdown here because it only makes sense if we have been connected previously
self.ircSocket.shutdown(gevent.socket.SHUT_RDWR)
# We lost the connection, so close the socket and store that we lost connection
self.ircSocket.close()
self.ircSocket = None
self.connectedAt = None
#If the connection couldn't be established or got closed, check if we need to re-establish it
if not self.shouldReconnect:
self.logger.info("|{}| Connection closed, shouldn't reconnect, closing down this bot".format(self.serverfolder))
break
#If we reached the maximum reconnection attempts, abort
if self.reconnectionAttempCount and self.maxConnectionRetries and self.reconnectionAttempCount >= self.maxConnectionRetries:
self.logger.info("|{}| Reached max connection retry attempts ({}), closing".format(self.maxConnectionRetries, self.serverfolder))
break
if self.reconnectionAttempCount is None:
self.reconnectionAttempCount = 1
else:
self.reconnectionAttempCount += 1
#Wait increasingly long between reconnection attempts, to give the server a chance to restart
sleepTime = self.reconnectionAttempCount ** 3
self.logger.info("Will try reconnecting to '{}' for attempt {} in {} seconds, max attempts is {}".format(
self.serverfolder, self.reconnectionAttempCount, sleepTime, self.maxConnectionRetries if self.maxConnectionRetries else "not set"))
gevent.sleep(sleepTime)
#If we ever leave this loop, the bot is shut down. Unregister ourselves
GlobalStore.bothandler.unregisterBot(self.serverfolder)
def handleConnection(self):
#Keep reading for possible incoming messages
incomingData = ""
# Set the timeout to 10 minutes, so if our computer loses connection to the server/internet, we notice
self.ircSocket.settimeout(600)
while True:
try:
incomingData += self.ircSocket.recv(2048)
except gevent.socket.timeout:
self.logger.warning("|{}| Our connection to the server timed out".format(self.serverfolder))
return
# A closed connection just makes recv return an empty string. Check for that
if incomingData == "":
self.logger.info("|{}| Server closed the connection".format(self.serverfolder))
return
# Handle all completely sent messages (delimited by \r\n), leave any unfinished messages for the next loop
while '\r\n' in incomingData:
line, incomingData = incomingData.split('\r\n', 1)
# First deal with the simplest type of message, PING. Just reply PONG
if line.startswith("PING"):
self.sendLineToServer(line.replace("PING", "PONG", 1), False)
else:
# Let's find out what kind of message this is!
lineParts = line.split(" ")
# A line consists at least of 'source messageType [target] content'
messageSource = lineParts[0]
# It usually starts with a colon, remove that
if messageSource.startswith(":"):
messageSource = messageSource[1:]
messageType = lineParts[1]
#Convert numerical replies to human-readable ones, if applicable. Otherwise make it uppercase, since that's the standard
messageType = Constants.IRC_NUMERIC_TO_NAME.get(messageType, messageType.upper())
#The IRC protocol uses ':' to denote the start of a multi-word string. Join those here too, for easier parsing later
messageParts = lineParts[2:]
for messagePartIndex, messagePart in enumerate(messageParts):
if messagePart.startswith(':'):
#Join all the separate parts of the wordgroup, and remove the starting colon
wordgroup = " ".join(messageParts[messagePartIndex:])[1:]
messageParts[messagePartIndex] = wordgroup
messageParts = messageParts[:messagePartIndex+1]
break
#Check if we have a function to deal with this type of message
messageTypeFunction = getattr(self, "irc_" + messageType, None)
if messageTypeFunction:
messageTypeFunction(messageSource, messageParts)
else:
#No function for this type of message, fall back to a generic function
self.irc_unknown_message_type(messageSource, messageType, lineParts)
def irc_RPL_WELCOME(self, source, parameters):
"""Called when we finished connecting to the server"""
self.logger.info("|{}| Successfully connected".format(self.serverfolder))
# We successfully connected, reset the reconnection count
self.reconnectionAttempCount = None
self.connectedAt = time.time()
# Get the nickname we got assigned from the message
self.nickname = parameters[0]
if self.nickname != self.settings['nickname']:
self.logger.info("|{} Nickname not available. Wanted '{}', got '{}'".format(self.serverfolder, self.settings['nickname'], self.nickname))
# Inform all the modules that we connected
message = IrcMessage("RPL_WELCOME", self, None, source, " ".join(parameters))
GlobalStore.commandhandler.handleMessage(message)
# Join the channels we should, if there are any
if len(self.settings['joinChannels']) == 0:
self.logger.info("|{}| No join channels specified, idling".format(self.serverfolder))
else:
for channel in self.settings['joinChannels']:
self.joinChannel(channel)
def joinChannel(self, channelname):
if channelname[0] not in Constants.CHANNEL_PREFIXES:
channelname = "#" + channelname
if channelname in self.channelsUserList:
self.logger.warning("|{}| Asked to join '{}' but I'm already there".format(self.serverfolder, channelname))
else:
self.sendLineToServer("JOIN {}".format(channelname))
def leaveChannel(self, channelName, leaveMessage="Leaving..."):
if channelName not in self.channelsUserList:
self.logger.warning("|{}| Asked to leave '{}', but I'm not there".format(self.serverfolder, channelName))
else:
self.sendLineToServer("PART {} :{}".format(channelName, leaveMessage))
def setNick(self, nickname):
self.sendLineToServer("NICK " + nickname)
def irc_ERR_NICKNAMEINUSE(self, source, parameters):
# The nickname we want is apparently in use. Just append an underscore and try again
newNicknameAttempt = parameters[1] + "_"
self.logger.info("|{}| Requested nickname '{}' in use, retrying with nickname '{}'".format(self.serverfolder, parameters[1], newNicknameAttempt))
self.nickname = newNicknameAttempt
self.sendLineToServer("NICK " + newNicknameAttempt)
#Create a list of user addresses per channel
def retrieveChannelUsers(self, channel):
self.isUpdatingChannelsUserList = True
#Make sure we don't get duplicate data
if channel in self.channelsUserList:
self.channelsUserList.pop(channel)
self.sendLineToServer("WHO {}".format(channel))
def quit(self, quitMessage=None):
self.shouldReconnect = False
#If we are connected to a server, let it know we want to quit
if self.connectedAt is not None:
if quitMessage:
self.sendLineToServer("QUIT :" + quitMessage)
else:
self.sendLineToServer("QUIT")
# If we're not connected (yet?), stop trying to connect
elif self.connectionManagerGreenlet:
self.logger.info("|{}| Asked to quit, but not connected. Stopping wait before next connection attempt".format(self.serverfolder))
self.connectionManagerGreenlet.kill()
GlobalStore.bothandler.unregisterBot(self.serverfolder)
else:
#I don't know how we'd end up without a connection greenlet, but best handle it anyway
self.logger.info("|{}| Asked to quit and don't have a connection greenlet. Unregistering".format(self.serverfolder))
GlobalStore.bothandler.unregisterBot(self.serverfolder)
#MESSAGE TYPE HANDLING FUNCTIONS
def irc_unknown_message_type(self, source, messageType, messageParts):
self.logger.info("|{}| Received unknown message type '{}' from {}: {}".format(self.serverfolder, messageType, source, " ".join(messageParts)))
def ctcp_unknown_message_type(self, ctcpType, user, messageTarget, message):
self.logger.info("|{}| Received unknown CTCP command '{}' on {} from {}, message '{}'".format(self.serverfolder, ctcpType, messageTarget, user, message))
def irc_RPL_MOTD(self, prefix, params):
self.messageLogger.log("Server message of the day: " + params[1])
def irc_JOIN(self, prefix, params):
"""Called when a user or the bot joins a channel"""
# 'prefix' is the user, 'params' is a list with apparently just one entry, the channel
message = IrcMessage('join', self, prefix, params[0])
self.messageLogger.log("JOIN: {nick} ({address})".format(nick=message.userNickname, address=prefix), params[0])
# If we just joined a channel, or if don't have a record of this channel yet, get all the users in it
if message.userNickname == self.nickname or params[0] not in self.channelsUserList:
self.retrieveChannelUsers(params[0])
# If we don't know this user yet, add it to our list
elif prefix not in self.channelsUserList[params[0]]:
self.channelsUserList[params[0]].append(prefix)
GlobalStore.commandhandler.handleMessage(message)
def irc_PART(self, prefix, params):
"""Called when a user or the bot leaves a channel"""
# 'prefix' is the user, 'params' is a list with only the channel
message = IrcMessage('part', self, prefix, params[0])
self.messageLogger.log("PART: {nick} ({address})".format(nick=message.userNickname, address=prefix), params[0])
# If a user parts before we have a proper channellist built, catch that error
if params[0] not in self.channelsUserList:
self.logger.warning("|{}| Unexpected PART, user '{}' parted from channel '{}' but we had no record of them".format(self.serverfolder, prefix, params[0]))
# Schedule a rebuild of the userlist
if not self.isUpdatingChannelsUserList:
self.retrieveChannelUsers(params[0])
# Keep track of the channels we're in
elif message.userNickname == self.nickname:
self.channelsUserList.pop(params[0])
# Keep track of channel users
elif prefix in self.channelsUserList[params[0]]:
self.channelsUserList[params[0]].remove(prefix)
GlobalStore.commandhandler.handleMessage(message)
def irc_QUIT(self, prefix, params):
"""Called when a user quits"""
# 'prefix' is the user address, 'params' is a single-item list with the quit messages
# log for every channel the user was in that they quit
message = IrcMessage('quit', self, prefix, None, params[0])
logMessage = "QUIT: {nick} ({address}): '{quitmessage}' ".format(nick=message.userNickname, address=prefix, quitmessage=params[0])
for channel, userlist in self.channelsUserList.iteritems():
if prefix in userlist:
self.messageLogger.log(logMessage, channel)
userlist.remove(prefix)
GlobalStore.commandhandler.handleMessage(message)
def irc_KICK(self, prefix, params):
"""Called when a user is kicked"""
# 'prefix' is the kicker, params[0] is the channel, params[1] is the user address of the kicked, params[-1] is the kick reason
message = IrcMessage('kick', self, prefix, params[0], params[-1])
kickedUserNick = params[1].split("!", 1)[0]
self.messageLogger.log("KICK: {} was kicked by {}, reason: '{}'".format(kickedUserNick, message.userNickname, params[-1]), params[0])
# Keep track of the channels we're in
if kickedUserNick == self.nickname:
if params[0] in self.channelsUserList:
self.channelsUserList.pop(params[0])
elif params[1] in self.channelsUserList[params[0]]:
self.channelsUserList[params[0]].remove(params[1])
GlobalStore.commandhandler.handleMessage(message)
def irc_NICK(self, prefix, params):
"""Called when a user or me change their nickname"""
# 'prefix' is the full user address with the old nickname, params[0] is the new nickname
message = IrcMessage('nickchange', self, prefix, None, params[0])
oldnick = message.userNickname
newnick = params[0]
# New nick plus old address
newaddress = newnick + "!" + prefix.split("!", 1)[1]
# If it's about us, apparently a nick change was successful
if oldnick == self.nickname:
self.nickname = newnick
self.logger.info("|{}| Our nick got changed from '{}' to '{}'".format(self.serverfolder, oldnick, self.nickname))
# Log the change in every channel where it's relevant
for channel, userlist in self.channelsUserList.iteritems():
if prefix in userlist:
# Update the userlists for all channels this user is in
userlist.append(newaddress)
userlist.remove(prefix)
self.messageLogger.log("NICK CHANGE: {oldnick} changed their nick to {newnick}".format(oldnick=oldnick, newnick=newnick), channel)
GlobalStore.commandhandler.handleMessage(message)
def irc_MODE(self, prefix, params):
#There are two possible MODE commands
# The first is server-wide. Here the prefix is our user address, the first param is our username, and the second param is the mode change
if len(params) == 2:
self.logger.info("|{}| Our mode got set to '{}'".format(self.serverfolder, params[1]))
# The second is channel-specific. Here the prefix is who or what is making the change, the first param is the channel,
# the second param is the mode change, and the rest of the params are the nicks whose mode got changed
else:
#It's not always a user making the change, check if it's a user address
modeChanger = prefix
if '!' in modeChanger:
modeChanger = modeChanger.split('!', 1)[0]
self.messageLogger.log("{} set mode to '{}' of user(s) {}".format(modeChanger, params[1], ", ".join(params[2:])), params[0])
def irc_TOPIC(self, prefix, params):
self.logger.debug("irc_TOPIC called, prefix is '{}', params is '{}'".format(prefix, params))
def irc_RPL_TOPIC(self, prefix, params):
self.logger.debug("irc_RPL_TOPIC called, prefix is '{}', params is '{}'".format(prefix, params))
def irc_RPL_NOTOPIC(self, prefix, params):
self.logger.debug("irc_RPL_NOTOPIC called, prefix is '{}', params is '{}'".format(prefix, params))
def irc_RPL_WHOREPLY(self, prefix, params):
#'prefix' is the server, 'params' is a list, with meaning [own_nick, channel, other_username, other_address, other_server, other_nick, flags, hops realname]
# Flags can be H for active or G for away, and a * for oper, + for voiced
if params[1] not in self.channelsUserList:
self.channelsUserList[params[1]] = []
self.channelsUserList[params[1]].append("{nick}!{username}@{address}".format(nick=params[5], username=params[2], address=params[3]))
def irc_RPL_ENDOFWHO(self, prefix, params):
self.isUpdatingChannelsUserList = False
self.logger.info("|{}| Userlist for channels {} collected".format(self.serverfolder, ", ".join(self.channelsUserList.keys())))
#CTCP FUNCTIONS
def ctcp_ACTION(self, user, messageSource, messageText):
self.handleMessage(user, messageSource, messageText, 'action')
def ctcp_PING(self, user, messageSource, messageText):
self.messageLogger.log("Received PING request from '{}'".format(user), messageSource)
usernick = user.split('!', 1)[0]
self.sendMessage(usernick, "PING " + messageText if messageText else str(time.time()), 'notice')
def ctcp_SOURCE(self, user, messageSource, messageText):
self.messageLogger.log("Received SOURCE request from '{}'".format(user), messageSource)
usernick = user.split('!', 1)[0]
self.sendMessage(usernick, "Thanks for the interest! You can read through my innards at https://github.com/Didero/DideRobot", 'notice')
def ctcp_TIME(self, user, messageSource, messageText):
self.messageLogger.log("Received TIME request from '{}'".format(user), messageSource)
usernick = user.split('!', 1)[0]
self.sendMessage(usernick, time.strftime("%a %d-%m-%Y %H:%M:%S %Z"), 'notice')
def ctcp_VERSION(self, user, messageSource, messageText):
self.messageLogger.log("Received VERSION request from '{}'".format(user), messageSource)
usernick = user.split('!', 1)[0]
self.sendMessage(usernick, "I don't have a set version, since I'm updated pretty frequently. I appreciate the interest though!", 'notice')
#HUMAN COMMUNICATION FUNCTIONS
def irc_PRIVMSG(self, user, messageParts):
# First part of the messageParts is the channel the message came in from, or the user if it was a PM
# Second part is the actual message
messageSource = messageParts[0]
# If the actual message (past the first colon) starts with 'chr(1)', it means it's a special CTCP message (like an action)
if len(messageParts[1]) > 0 and messageParts[1][0] == Constants.CTCP_DELIMITER:
#First section is the CTCP type
# Make the type uppercase, since that's the standard (also means 'unknown_message_type' can't accidentally be called)
ctcpType = messageParts[1].upper()
messageText = None
#Sometimes a message is appended (like for an ACTION), check for that
if " " in ctcpType:
ctcpType, messageText = messageParts[1].split(" ", 1)
#The message could also end with a 'chr(1)', remove that
if messageText.endswith(Constants.CTCP_DELIMITER):
messageText = messageText[:-1]
#The CTCP type could end with the CTCP delimiter
if ctcpType.endswith(Constants.CTCP_DELIMITER):
ctcpType = ctcpType[:-1]
ctcpType = ctcpType[1:] #Remove the CTCP delimiter
#Check if we have a function to handle this type of CTCP message, otherwise fall back on a default
ctcpFunction = getattr(self, "ctcp_" + ctcpType, None)
if ctcpFunction:
ctcpFunction(user, messageSource, messageText)
else:
self.ctcp_unknown_message_type(ctcpType, user, messageSource, messageText)
#Normal message
else:
self.handleMessage(user, messageSource, messageParts[1], "say")
def irc_NOTICE(self, user, messageParts):
self.handleMessage(user, messageParts[0], messageParts[1], 'notice')
def handleMessage(self, user, channel, messageText, messageType="say"):
"""Called when the bot receives a message, which can be either in a channel or in a private message, as text or an action."""
usernick = user.split("!", 1)[0]
logsource = channel
if channel == self.nickname or channel == '*': #If a server wants to send a message to you before it knows your nick, it uses *
logsource = usernick
logtext = "({source}) "
if messageType == 'say':
logtext = "{user}: {message}"
elif messageType == 'action':
logtext = "*{user} {message}"
elif messageType == 'notice':
logtext = "[notice] {user}: {message}"
self.messageLogger.log(logtext.format(user=usernick, message=messageText), logsource)
message = IrcMessage(messageType, self, user, channel, messageText)
#Let the CommandHandler see if a module needs to do something with this message
GlobalStore.commandhandler.handleMessage(message)
#SENDING OUT MESSAGES
def sendLineToServer(self, lineToSend, shouldLogMessage=True):
if not self.ircSocket:
self.logger.error("|{}| Asked to send line '{}' to server, but socket closed".format(self.serverfolder, lineToSend))
return
if shouldLogMessage:
self.logger.debug("|{}| > {}".format(self.serverfolder, lineToSend))
self.ircSocket.send(lineToSend + "\r\n")
def irc_ERR_NOTEXTTOSEND(self, prefix, params):
self.logger.error("|{}| We just sent an empty line to the server, which is probably a bug in a module!".format(self.serverfolder))
@staticmethod
def formatCtcpMessage(ctcpType, messageText):
return "{delim}{ctcpType} {msg}{delim}".format(delim=Constants.CTCP_DELIMITER, ctcpType=ctcpType, msg=messageText)
def sendLineFromQueue(self):
try:
while True:
#Verify that we're still connected and there is still a message in the queue, otherwise reset everything
if self.connectedAt is None or not self.linesToSend or len(self.linesToSend) == 0:
self.linesToSend = None
self.lineSendingGreenlet = None
break
#Remove the first queued message and send it
self.sendLineToServer(self.linesToSend.pop(0))
#Keep going through the message queue until it's empty
gevent.sleep(self.secondsBetweenLineSends)
except gevent.GreenletExit:
self.logger.info("|{}| Line sender greenlet was killed".format(self.serverfolder))
def queueLineToSend(self, lineToSend):
#If there's no rate limiting, there's no need for queueing either. Send the message now
if not self.secondsBetweenLineSends:
self.sendLineToServer(lineToSend)
return
#If there are no lines queued, the list is set to 'None'. Create a new list
if not self.linesToSend:
self.linesToSend = [lineToSend]
# Add the message to the queue
else:
self.linesToSend.append(lineToSend)
# If there's not yet a greenlet clearing the message queue, create one
if not self.lineSendingGreenlet:
self.lineSendingGreenlet = gevent.spawn(self.sendLineFromQueue)
def sendMessage(self, target, messageText, messageType='say'):
#Only say something if we're not muted, or if it's a private message or a notice
if not self.isMuted or target[0] not in Constants.CHANNEL_PREFIXES or messageType == 'notice':
#Make sure we're not trying to send Unicode
if isinstance(messageText, unicode):
try:
messageText = messageText.encode(encoding='utf-8', errors='replace')
except (UnicodeDecodeError, UnicodeEncodeError):
self.logger.warning("|{}| [sendMessage] Error encoding message to string (is now type '{}'): '{}'".format(self.serverfolder, type(messageText), messageText))
#It can't handle unicode message targets either
if isinstance(target, unicode):
target = target.encode('utf-8')
logtext = ""
messageCommand = "PRIVMSG"
if messageType == 'action':
#An action is just a special type of Say
logtext += "*"
messageText = self.formatCtcpMessage("ACTION", messageText)
elif messageType == 'notice':
logtext += "[notice] "
messageCommand = "NOTICE"
logtext += "{user}: {message}"
line = "{} {} :{}".format(messageCommand, target, messageText)
extraLines = None
#Turn newlines in this line into multiple lines
if '\n' in line or '\r' in line:
extraLines = line.splitlines()
line = extraLines.pop(0)
#Check if the line isn't too long to send
if len(line) >= Constants.MAX_LINE_LENGTH:
if not extraLines:
extraLines = [line[Constants.MAX_LINE_LENGTH:]]
else:
extraLines.insert(0, line[Constants.MAX_LINE_LENGTH:])
line = line[:Constants.MAX_LINE_LENGTH]
if target[0] not in Constants.CHANNEL_PREFIXES:
#If it's a PM, bypass the message queue
self.sendLineToServer(line)
else:
self.queueLineToSend(line)
self.messageLogger.log(logtext.format(user=self.nickname, message=messageText), target)
#Make sure any extra lines get sent too
if extraLines:
for extraLine in extraLines:
self.sendMessage(target, extraLine, messageType)
#USER LIST CHECKING FUNCTIONS
def isUserAdmin(self, user, userNick=None, userAddress=None):
return self.isUserInList(self.settings['admins'], user, userNick, userAddress)
def shouldUserBeIgnored(self, user, userNick=None, userAddress=None):
return self.isUserInList(self.settings['userIgnoreList'], user, userNick, userAddress)
@staticmethod
def isUserInList(userlist, user, userNick=None, userAddress=None):
if user is None:
return False
if user in userlist or user.lower() in userlist:
return True
#If a usernick is provided, use that, otherwise split the full user address ourselves (if possible)
if '!' not in user:
return False
if userNick is None or userAddress is None:
userNick, userAddress = user.split('!', 1)
if userNick in userlist or userNick.lower() in userlist or userAddress in userlist or userAddress.lower() in userlist:
return True
return False
|
#!/usr/bin/python
""" supersid.py
version 1.3
Segregation MVC
SuperSID class is the Controller.
First, it reads the .cfg file specified on the command line (unique accepted parameter) or in ../Config
Then it creates its necessary elements:
- Model: Logger, Sampler
- Viewer: Viewer
using the parameters read in the .cfg file
Finally, it launches an infinite loop to wait for events:
- User input (graphic or text)
- Timer for sampling
- <still missing> network management with client-server protocol
"""
from __future__ import print_function # use the new Python 3 'print' function
import os.path
import argparse
# matplotlib ONLY used in Controller for its PSD function, not for any graphic
from matplotlib.mlab import psd as mlab_psd
# SuperSID Package classes
from sidtimer import SidTimer
from sampler import Sampler
from config import Config
from logger import Logger
class SuperSID():
'''
This is the main class which creates all other objects.
In CMV pattern, this is the Controller.
'''
running = False # class attribute to indicate if the SID application is running
def __init__(self, config_file='', read_file=None):
self.version = "EG 1.4 20150801"
self.timer = None
self.sampler = None
self.viewer = None
# Read Config file here
print("Reading supersid.cfg ...", end='')
# this script accepts a .cfg file as optional argument else we default
# so that the "historical location" or the local path are explored
self.config = Config(os.path.expanduser(config_file) or "supersid.cfg")
# once the .cfg read, some sanity checks are necessary
self.config.supersid_check()
if not self.config.config_ok:
print("ERROR:", self.config.config_err)
exit(1)
else:
print(self.config.filenames) # good for debugging: what .cfg file(s) were actually read
self.config["supersid_version"] = self.version
# Create Logger - Logger will read an existing file if specified as -r|--read script argument
self.logger = Logger(self, read_file)
if 'utc_starttime' not in self.config:
self.config['utc_starttime'] = self.logger.sid_file.sid_params["utc_starttime"]
# Create the viewer based on the .cfg specification (or set default):
# Note: the list of Viewers can be extended provided they implement the same interface
if self.config['viewer'] == 'wx':
# GUI Frame to display real-time VLF Spectrum based on wxPython
# # special case: 'wx' module might not be installed (text mode only) nor even available (python 3)
try:
from wxsidviewer import wxSidViewer
self.viewer = wxSidViewer(self)
wx_imported = True
except ImportError:
print("'wx' module not imported.")
wx_imported = False
elif self.config['viewer'] == 'tk':
# GUI Frame to display real-time VLF Spectrum based on tkinter (python 2 and 3)
from tksidviewer import tkSidViewer
self.viewer = tkSidViewer(self)
elif self.config['viewer'] == 'text':
# Lighter text version a.k.a. "console mode"
from textsidviewer import textSidViewer
self.viewer = textSidViewer(self)
else:
print("ERROR: Unknown viewer", sid.config['viewer'])
exit(2)
# Assign desired psd function for calculation after capture
# currently: using matplotlib's psd
if (self.config['viewer'] == 'wx' and wx_imported) or self.config['viewer'] == 'tk':
self.psd = self.viewer.get_psd # calculate psd and draw result in one call
else:
self.psd = mlab_psd # calculation only
# calculate Stations' buffer_size
self.buffer_size = int(24*60*60 / self.config['log_interval'])
# Create Sampler to collect audio buffer (sound card or other server)
self.sampler = Sampler(self, audio_sampling_rate = self.config['audio_sampling_rate'], NFFT = 1024);
if not self.sampler.sampler_ok:
self.close()
exit(3)
else:
self.sampler.set_monitored_frequencies(self.config.stations);
# Link the logger.sid_file.data buffers to the config.stations
for ibuffer, station in enumerate(self.config.stations):
station['raw_buffer'] = self.logger.sid_file.data[ibuffer]
# Create Timer
self.viewer.status_display("Waiting for Timer ... ")
self.timer = SidTimer(self.config['log_interval'], self.on_timer)
def clear_all_data_buffers(self):
"""Clear the current memory buffers and pass to the next day"""
self.logger.sid_file.clear_buffer(next_day = True)
def on_timer(self):
"""Callback function triggered by SidTimer every 'log_interval' seconds"""
# current_index is the position in the buffer calculated from current UTC time
current_index = self.timer.data_index
utc_now = self.timer.utc_now
# Get new data and pass them to the View
message = "%s [%d] Capturing data..." % (self.timer.get_utc_now(), current_index)
self.viewer.status_display(message, level=1)
signal_strengths = []
try:
data = self.sampler.capture_1sec() # return a list of 1 second signal strength
Pxx, freqs = self.psd(data, self.sampler.NFFT, self.sampler.audio_sampling_rate)
for binSample in self.sampler.monitored_bins:
signal_strengths.append(Pxx[binSample])
except IndexError as idxerr:
print("Index Error:", idxerr)
print("Data len:", len(data))
except TypeError as err_te:
print("Warning:", err_te)
# ensure that one thread at the time accesses the sid_file's' buffers
with self.timer.lock:
# do we need to save some files (hourly) or switch to a new day?
if self.timer.utc_now.minute == 0 and self.timer.utc_now.second < self.config['log_interval']:
if self.config['hourly_save'] == 'YES':
fileName = "hourly_current_buffers.raw.ext.%s.csv" % (self.logger.sid_file.sid_params['utc_starttime'][:10])
self.save_current_buffers(filename=fileName, log_type='raw', log_format='supersid_extended')
# a new day!
if self.timer.utc_now.hour == 0:
# use log_type and log_format(s) requested by the user in the .cfg
for log_format in self.config['log_format'].split(','):
self.save_current_buffers(log_type=self.config['log_type'], log_format=log_format)
self.clear_all_data_buffers()
# Save signal strengths into memory buffers ; prepare message for status bar
message = self.timer.get_utc_now() + " [%d] " % current_index
for station, strength in zip(self.config.stations, signal_strengths):
station['raw_buffer'][current_index] = strength
message += station['call_sign'] + "=%f " % strength
self.logger.sid_file.timestamp[current_index] = utc_now
# end of this thread/need to handle to View to display captured data & message
self.viewer.status_display(message, level=2)
def save_current_buffers(self, filename='', log_type='raw', log_format = 'both'):
''' Save buffer data from logger.sid_file
log_type = raw or filtered
log_format = sid_format|sid_extended|supersid_format|supersid_extended|both|both_extended'''
filenames = []
if log_format.startswith('both') or log_format.startswith('sid'):
fnames = self.logger.log_sid_format(self.config.stations, '', log_type=log_type, extended=log_format.endswith('extended')) # filename is '' to ensure one file per station
filenames += fnames
if log_format.startswith('both') or log_format.startswith('supersid'):
fnames = self.logger.log_supersid_format(self.config.stations, filename, log_type=log_type, extended=log_format.endswith('extended'))
filenames += fnames
return filenames
def on_close(self):
self.close()
def run(self, wx_app = None):
"""Start the application as infinite loop accordingly to need"""
self.__class__.running = True
self.viewer.run()
def close(self):
"""Call all necessary stop/close functions of children objects"""
self.__class__.running = False
if self.sampler:
self.sampler.close()
if self.timer:
self.timer.stop()
if self.viewer:
self.viewer.close()
def about_app(self):
"""return a text indicating various information on the app, incl, versions"""
msg = """This program is designed to detect Sudden Ionosphere Disturbances (SID), \
which are caused by a blast of intense X-ray radiation when there is a Solar Flare on the Sun.\n\n""" + \
"Controller: " + self.version + "\n" + \
"Sampler: " + self.sampler.version + "\n" \
"Timer: " + self.timer.version + "\n" \
"Config: " + self.config.version + "\n" \
"Logger: " + self.logger.version + "\n" \
"Sidfile: " + self.logger.sid_file.version + "\n" + \
"Viewer: " + self.viewer.version + "\n" + \
"\n\nAuthor: <NAME> <EMAIL>" + \
"\n\nVisit http://solar-center.stanford.edu/SID/sidmonitor/ for more information."
return msg
#-------------------------------------------------------------------------------
def exist_file(x):
"""
'Type' for argparse - checks that file exists but does not open.
"""
if not os.path.isfile(x):
raise argparse.ArgumentError("{0} does not exist".format(x))
return x
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-r", "--read", dest="filename", required=False, type=exist_file,
help="Read raw file and continue recording")
parser.add_argument('config_file', nargs='?', default='')
args, unk = parser.parse_known_args()
sid = SuperSID(config_file=args.config_file, read_file=args.filename)
sid.run()
sid.close()
|
#!/usr/bin/env python
"""Standard actions that happen on the client."""
import cStringIO as StringIO
import ctypes
import gzip
import hashlib
import os
import platform
import socket
import sys
import time
import zlib
import psutil
import logging
from grr.client import actions
from grr.client import client_utils_common
from grr.client import vfs
from grr.client.client_actions import tempfiles
from grr.lib import config_lib
from grr.lib import flags
from grr.lib import rdfvalue
from grr.lib import utils
from grr.lib.rdfvalues import crypto
# We do not send larger buffers than this:
MAX_BUFFER_SIZE = 640 * 1024
class ReadBuffer(actions.ActionPlugin):
"""Reads a buffer from a file and returns it to a server callback."""
in_rdfvalue = rdfvalue.BufferReference
out_rdfvalue = rdfvalue.BufferReference
def Run(self, args):
"""Reads a buffer on the client and sends it to the server."""
# Make sure we limit the size of our output
if args.length > MAX_BUFFER_SIZE:
raise RuntimeError("Can not read buffers this large.")
try:
fd = vfs.VFSOpen(args.pathspec, progress_callback=self.Progress)
fd.Seek(args.offset)
offset = fd.Tell()
data = fd.Read(args.length)
except (IOError, OSError), e:
self.SetStatus(rdfvalue.GrrStatus.ReturnedStatus.IOERROR, e)
return
# Now return the data to the server
self.SendReply(offset=offset, data=data,
length=len(data), pathspec=fd.pathspec)
HASH_CACHE = utils.FastStore(100)
class TransferBuffer(actions.ActionPlugin):
"""Reads a buffer from a file and returns it to the server efficiently."""
in_rdfvalue = rdfvalue.BufferReference
out_rdfvalue = rdfvalue.BufferReference
def Run(self, args):
"""Reads a buffer on the client and sends it to the server."""
# Make sure we limit the size of our output
if args.length > MAX_BUFFER_SIZE:
raise RuntimeError("Can not read buffers this large.")
data = vfs.ReadVFS(args.pathspec, args.offset, args.length,
progress_callback=self.Progress)
result = rdfvalue.DataBlob(
data=zlib.compress(data),
compression=rdfvalue.DataBlob.CompressionType.ZCOMPRESSION)
digest = hashlib.sha256(data).digest()
# Ensure that the buffer is counted against this response. Check network
# send limit.
self.ChargeBytesToSession(len(data))
# Now return the data to the server into the special TransferStore well
# known flow.
self.grr_worker.SendReply(
result, session_id=rdfvalue.SessionID("aff4:/flows/W:TransferStore"))
# Now report the hash of this blob to our flow as well as the offset and
# length.
self.SendReply(offset=args.offset, length=len(data),
data=digest)
class HashBuffer(actions.ActionPlugin):
"""Hash a buffer from a file and returns it to the server efficiently."""
in_rdfvalue = rdfvalue.BufferReference
out_rdfvalue = rdfvalue.BufferReference
def Run(self, args):
"""Reads a buffer on the client and sends it to the server."""
# Make sure we limit the size of our output
if args.length > MAX_BUFFER_SIZE:
raise RuntimeError("Can not read buffers this large.")
data = vfs.ReadVFS(args.pathspec, args.offset, args.length)
digest = hashlib.sha256(data).digest()
# Now report the hash of this blob to our flow as well as the offset and
# length.
self.SendReply(offset=args.offset, length=len(data),
data=digest)
class CopyPathToFile(actions.ActionPlugin):
"""Copy contents of a pathspec to a file on disk."""
in_rdfvalue = rdfvalue.CopyPathToFileRequest
out_rdfvalue = rdfvalue.CopyPathToFileRequest
BLOCK_SIZE = 10 * 1024 * 1024
def _Copy(self, dest_fd):
"""Copy from VFS to file until no more data or self.length is reached.
Args:
dest_fd: file object to write to
Returns:
self.written: bytes written
"""
while self.written < self.length:
to_read = min(self.length - self.written, self.BLOCK_SIZE)
data = self.src_fd.read(to_read)
if not data:
break
dest_fd.write(data)
self.written += len(data)
# Send heartbeats for long files.
self.Progress()
return self.written
def Run(self, args):
"""Read from a VFS file and write to a GRRTempFile on disk.
If file writing doesn't complete files won't be cleaned up.
Args:
args: see CopyPathToFile in jobs.proto
"""
self.src_fd = vfs.VFSOpen(args.src_path, progress_callback=self.Progress)
self.src_fd.Seek(args.offset)
offset = self.src_fd.Tell()
self.length = args.length or (1024 ** 4) # 1 TB
self.written = 0
suffix = ".gz" if args.gzip_output else ""
self.dest_fd = tempfiles.CreateGRRTempFile(directory=args.dest_dir,
lifetime=args.lifetime,
suffix=suffix)
self.dest_file = self.dest_fd.name
with self.dest_fd:
if args.gzip_output:
gzip_fd = gzip.GzipFile(self.dest_file, "wb", 9, self.dest_fd)
# Gzip filehandle needs its own close method called
with gzip_fd:
self._Copy(gzip_fd)
else:
self._Copy(self.dest_fd)
pathspec_out = rdfvalue.PathSpec(
path=self.dest_file, pathtype=rdfvalue.PathSpec.PathType.OS)
self.SendReply(offset=offset, length=self.written, src_path=args.src_path,
dest_dir=args.dest_dir, dest_path=pathspec_out,
gzip_output=args.gzip_output)
class ListDirectory(ReadBuffer):
"""Lists all the files in a directory."""
in_rdfvalue = rdfvalue.ListDirRequest
out_rdfvalue = rdfvalue.StatEntry
def Run(self, args):
"""Lists a directory."""
try:
directory = vfs.VFSOpen(args.pathspec, progress_callback=self.Progress)
except (IOError, OSError), e:
self.SetStatus(rdfvalue.GrrStatus.ReturnedStatus.IOERROR, e)
return
files = list(directory.ListFiles())
files.sort(key=lambda x: x.pathspec.path)
for response in files:
self.SendReply(response)
class IteratedListDirectory(actions.IteratedAction):
"""Lists a directory as an iterator."""
in_rdfvalue = rdfvalue.ListDirRequest
out_rdfvalue = rdfvalue.StatEntry
def Iterate(self, request, client_state):
"""Restores its way through the directory using an Iterator."""
try:
fd = vfs.VFSOpen(request.pathspec, progress_callback=self.Progress)
except (IOError, OSError), e:
self.SetStatus(rdfvalue.GrrStatus.ReturnedStatus.IOERROR, e)
return
files = list(fd.ListFiles())
files.sort(key=lambda x: x.pathspec.path)
index = client_state.get("index", 0)
length = request.iterator.number
for response in files[index:index + length]:
self.SendReply(response)
# Update the state
client_state["index"] = index + length
class SuspendableListDirectory(actions.SuspendableAction):
"""Lists a directory as a suspendable client action."""
in_rdfvalue = rdfvalue.ListDirRequest
out_rdfvalue = rdfvalue.StatEntry
def Iterate(self):
try:
fd = vfs.VFSOpen(self.request.pathspec, progress_callback=self.Progress)
except (IOError, OSError), e:
self.SetStatus(rdfvalue.GrrStatus.ReturnedStatus.IOERROR, e)
return
length = self.request.iterator.number
for group in utils.Grouper(fd.ListFiles(), length):
for response in group:
self.SendReply(response)
self.Suspend()
class StatFile(ListDirectory):
"""Sends a StatResponse for a single file."""
in_rdfvalue = rdfvalue.ListDirRequest
out_rdfvalue = rdfvalue.StatEntry
def Run(self, args):
"""Sends a StatResponse for a single file."""
try:
fd = vfs.VFSOpen(args.pathspec, progress_callback=self.Progress)
res = fd.Stat()
self.SendReply(res)
except (IOError, OSError), e:
self.SetStatus(rdfvalue.GrrStatus.ReturnedStatus.IOERROR, e)
return
class ExecuteCommand(actions.ActionPlugin):
"""Executes one of the predefined commands."""
in_rdfvalue = rdfvalue.ExecuteRequest
out_rdfvalue = rdfvalue.ExecuteResponse
def Run(self, command):
"""Run."""
cmd = command.cmd
args = command.args
time_limit = command.time_limit
res = client_utils_common.Execute(cmd, args, time_limit)
(stdout, stderr, status, time_used) = res
# Limit output to 10MB so our response doesn't get too big.
stdout = stdout[:10 * 1024 * 1024]
stderr = stderr[:10 * 1024 * 1024]
result = rdfvalue.ExecuteResponse(
request=command,
stdout=stdout,
stderr=stderr,
exit_status=status,
# We have to return microseconds.
time_used=int(1e6 * time_used))
self.SendReply(result)
class ExecuteBinaryCommand(actions.ActionPlugin):
"""Executes a command from a passed in binary.
Obviously this is a dangerous function, it provides for arbitrary code exec by
the server running as root/SYSTEM.
This is protected by the CONFIG[PrivateKeys.executable_signing_private_key],
which should be stored offline and well protected.
This method can be utilized as part of an autoupdate mechanism if necessary.
NOTE: If the binary is too large to fit inside a single request, the request
will have the more_data flag enabled, indicating more data is coming.
"""
in_rdfvalue = rdfvalue.ExecuteBinaryRequest
out_rdfvalue = rdfvalue.ExecuteBinaryResponse
suffix = ""
def WriteBlobToFile(self, request, suffix=""):
"""Writes the blob to a file and returns its path."""
lifetime = 0
# Only set the lifetime thread on the last chunk written.
if not request.more_data:
lifetime = request.time_limit
# Keep the file for at least 5 seconds after execution.
if lifetime > 0:
lifetime += 5
# First chunk truncates the file, later chunks append.
if request.offset == 0:
mode = "w+b"
else:
mode = "r+b"
temp_file = tempfiles.CreateGRRTempFile(filename=request.write_path,
suffix=suffix, mode=mode)
with temp_file:
path = temp_file.name
temp_file.seek(0, 2)
if temp_file.tell() != request.offset:
raise IOError("Chunks out of order Error.")
# Write the new chunk.
temp_file.write(request.executable.data)
return path
def CleanUp(self, path):
"""Removes the temp file."""
try:
if os.path.exists(path):
os.remove(path)
except (OSError, IOError), e:
logging.info("Failed to remove temporary file %s. Err: %s", path, e)
def Run(self, args):
"""Run."""
# Verify the executable blob.
args.executable.Verify(config_lib.CONFIG[
"Client.executable_signing_public_key"])
path = self.WriteBlobToFile(args, self.suffix)
# Only actually run the file on the last chunk.
if not args.more_data:
self.ProcessFile(path, args)
self.CleanUp(path)
def ProcessFile(self, path, args):
res = client_utils_common.Execute(path, args.args, args.time_limit,
bypass_whitelist=True)
(stdout, stderr, status, time_used) = res
# Limit output to 10MB so our response doesn't get too big.
stdout = stdout[:10 * 1024 * 1024]
stderr = stderr[:10 * 1024 * 1024]
result = rdfvalue.ExecuteBinaryResponse(
stdout=stdout,
stderr=stderr,
exit_status=status,
# We have to return microseconds.
time_used=int(1e6 * time_used))
self.SendReply(result)
class ExecutePython(actions.ActionPlugin):
"""Executes python code with exec.
Obviously this is a dangerous function, it provides for arbitrary code exec by
the server running as root/SYSTEM.
This is protected by CONFIG[PrivateKeys.executable_signing_private_key], which
should be stored offline and well protected.
"""
in_rdfvalue = rdfvalue.ExecutePythonRequest
out_rdfvalue = rdfvalue.ExecutePythonResponse
def Run(self, args):
"""Run."""
time_start = time.time()
class StdOutHook(object):
def __init__(self, buf):
self.buf = buf
def write(self, text):
self.buf.write(text)
args.python_code.Verify(config_lib.CONFIG[
"Client.executable_signing_public_key"])
# The execed code can assign to this variable if it wants to return data.
logging.debug("exec for python code %s", args.python_code.data[0:100])
context = globals().copy()
context["py_args"] = args.py_args.ToDict()
context["magic_return_str"] = ""
# Export the Progress function to allow python hacks to call it.
context["Progress"] = self.Progress
stdout = StringIO.StringIO()
with utils.Stubber(sys, "stdout", StdOutHook(stdout)):
exec(args.python_code.data, context) # pylint: disable=exec-used
stdout_output = stdout.getvalue()
magic_str_output = context.get("magic_return_str")
if stdout_output and magic_str_output:
output = "Stdout: %s\nMagic Str:%s\n" % (stdout_output, magic_str_output)
else:
output = stdout_output or magic_str_output
time_used = time.time() - time_start
# We have to return microseconds.
result = rdfvalue.ExecutePythonResponse(
time_used=int(1e6 * time_used),
return_val=utils.SmartStr(output))
self.SendReply(result)
class Segfault(actions.ActionPlugin):
"""This action is just for debugging. It induces a segfault."""
in_rdfvalue = None
out_rdfvalue = None
def Run(self, unused_args):
"""Does the segfaulting."""
if flags.FLAGS.debug:
logging.warning("Segfault action requested :(")
print ctypes.cast(1, ctypes.POINTER(ctypes.c_void_p)).contents
else:
logging.warning("Segfault requested but not running in debug mode.")
class ListProcesses(actions.ActionPlugin):
"""This action lists all the processes running on a machine."""
in_rdfvalue = None
out_rdfvalue = rdfvalue.Process
def Run(self, unused_arg):
# psutil will cause an active loop on Windows 2000
if platform.system() == "Windows" and platform.version().startswith("5.0"):
raise RuntimeError("ListProcesses not supported on Windows 2000")
for proc in psutil.process_iter():
response = rdfvalue.Process()
process_fields = ["pid", "ppid", "name", "exe", "username", "terminal"]
for field in process_fields:
try:
value = getattr(proc, field)
if value is None:
continue
if callable(value):
value = value()
if not isinstance(value, (int, long)):
value = utils.SmartUnicode(value)
setattr(response, field, value)
except (psutil.NoSuchProcess, psutil.AccessDenied, AttributeError):
pass
try:
for arg in proc.cmdline():
response.cmdline.append(utils.SmartUnicode(arg))
except (psutil.NoSuchProcess, psutil.AccessDenied):
pass
try:
response.nice = proc.nice()
except (psutil.NoSuchProcess, psutil.AccessDenied):
pass
try:
# Not available on Windows.
if hasattr(proc, "uids"):
(response.real_uid, response.effective_uid,
response.saved_uid) = proc.uids()
(response.real_gid, response.effective_gid,
response.saved_gid) = proc.gids()
except (psutil.NoSuchProcess, psutil.AccessDenied):
pass
try:
response.ctime = long(proc.create_time() * 1e6)
response.status = str(proc.status())
except (psutil.NoSuchProcess, psutil.AccessDenied):
pass
try:
# Not available on OSX.
if hasattr(proc, "cwd"):
response.cwd = utils.SmartUnicode(proc.cwd())
except (psutil.NoSuchProcess, psutil.AccessDenied):
pass
try:
response.num_threads = proc.num_threads()
except (psutil.NoSuchProcess, psutil.AccessDenied, RuntimeError):
pass
try:
(response.user_cpu_time,
response.system_cpu_time) = proc.cpu_times()
# This is very time consuming so we do not collect cpu_percent here.
# response.cpu_percent = proc.get_cpu_percent()
except (psutil.NoSuchProcess, psutil.AccessDenied):
pass
try:
response.RSS_size, response.VMS_size = proc.memory_info()
response.memory_percent = proc.memory_percent()
except (psutil.NoSuchProcess, psutil.AccessDenied):
pass
# Due to a bug in psutil, this function is disabled for now
# (https://github.com/giampaolo/psutil/issues/340)
# try:
# for f in proc.open_files():
# response.open_files.append(utils.SmartUnicode(f.path))
# except (psutil.NoSuchProcess, psutil.AccessDenied):
# pass
try:
for c in proc.connections():
conn = response.connections.Append(family=c.family,
type=c.type,
pid=proc.pid)
try:
conn.state = c.status
except ValueError:
logging.info("Encountered unknown connection status (%s).",
c.status)
try:
conn.local_address.ip, conn.local_address.port = c.laddr
# Could be in state LISTEN.
if c.raddr:
conn.remote_address.ip, conn.remote_address.port = c.raddr
except AttributeError:
conn.local_address.ip, conn.local_address.port = c.local_address
# Could be in state LISTEN.
if c.remote_address:
(conn.remote_address.ip,
conn.remote_address.port) = c.remote_address
except (psutil.NoSuchProcess, psutil.AccessDenied):
pass
self.SendReply(response)
# Reading information here is slow so we heartbeat between processes.
self.Progress()
class SendFile(actions.ActionPlugin):
"""This action encrypts and sends a file to a remote listener."""
in_rdfvalue = rdfvalue.SendFileRequest
out_rdfvalue = rdfvalue.StatEntry
BLOCK_SIZE = 1024 * 1024 * 10 # 10 MB
def Send(self, sock, msg):
totalsent = 0
n = len(msg)
while totalsent < n:
sent = sock.send(msg[totalsent:])
if sent == 0:
raise RuntimeError("socket connection broken")
totalsent += sent
def Run(self, args):
"""Run."""
# Open the file.
fd = vfs.VFSOpen(args.pathspec, progress_callback=self.Progress)
if args.address_family == rdfvalue.NetworkAddress.Family.INET:
family = socket.AF_INET
elif args.address_family == rdfvalue.NetworkAddress.Family.INET6:
family = socket.AF_INET6
else:
raise RuntimeError("Socket address family not supported.")
s = socket.socket(family, socket.SOCK_STREAM)
try:
s.connect((args.host, args.port))
except socket.error as e:
raise RuntimeError(str(e))
cipher = crypto.AES128CBCCipher(args.key, args.iv,
crypto.Cipher.OP_ENCRYPT)
while True:
data = fd.read(self.BLOCK_SIZE)
if not data:
break
self.Send(s, cipher.Update(data))
# Send heartbeats for long files.
self.Progress()
self.Send(s, cipher.Final())
s.close()
self.SendReply(fd.Stat())
class StatFS(actions.ActionPlugin):
"""Call os.statvfs for a given list of paths. OS X and Linux only.
Note that a statvfs call for a network filesystem (e.g. NFS) that is
unavailable, e.g. due to no network, will result in the call blocking.
"""
in_rdfvalue = rdfvalue.StatFSRequest
out_rdfvalue = rdfvalue.Volume
def Run(self, args):
if platform.system() == "Windows":
raise RuntimeError("os.statvfs not available on Windows")
for path in args.path_list:
try:
fd = vfs.VFSOpen(rdfvalue.PathSpec(path=path, pathtype=args.pathtype),
progress_callback=self.Progress)
st = fd.StatFS()
mount_point = fd.GetMountPoint()
except (IOError, OSError), e:
self.SetStatus(rdfvalue.GrrStatus.ReturnedStatus.IOERROR, e)
continue
unix = rdfvalue.UnixVolume(mount_point=mount_point)
# On linux pre 2.6 kernels don't have frsize, so we fall back to bsize.
# The actual_available_allocation_units attribute is set to blocks
# available to the unprivileged user, root may have some additional
# reserved space.
result = rdfvalue.Volume(bytes_per_sector=(st.f_frsize or st.f_bsize),
sectors_per_allocation_unit=1,
total_allocation_units=st.f_blocks,
actual_available_allocation_units=st.f_bavail,
unix=unix)
self.SendReply(result)
|
# -*- coding: utf-8 -*-
"""Chemical Engineering Design Library (ChEDL). Utilities for process modeling.
Copyright (C) 2016, 2017, 2018, 2019, 2020 <NAME>
<<EMAIL>>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
This module contains implementations of various numered property equations
used by the DIPPR, the Design Institude for Physical Property Research.
No actual data is included in this module; it is just functional
implementations of the formulas and some of their derivatives/integrals.
For reporting bugs, adding feature requests, or submitting pull requests,
please use the `GitHub issue tracker <https://github.com/CalebBell/chemicals/>`_.
.. contents:: :local:
Equations
---------
.. autofunction:: chemicals.dippr.EQ100
.. autofunction:: chemicals.dippr.EQ101
.. autofunction:: chemicals.dippr.EQ102
.. autofunction:: chemicals.dippr.EQ104
.. autofunction:: chemicals.dippr.EQ105
.. autofunction:: chemicals.dippr.EQ106
.. autofunction:: chemicals.dippr.EQ107
.. autofunction:: chemicals.dippr.EQ114
.. autofunction:: chemicals.dippr.EQ115
.. autofunction:: chemicals.dippr.EQ116
.. autofunction:: chemicals.dippr.EQ127
Jacobians (for fitting)
-----------------------
.. autofunction:: chemicals.dippr.EQ101_fitting_jacobian
.. autofunction:: chemicals.dippr.EQ102_fitting_jacobian
.. autofunction:: chemicals.dippr.EQ105_fitting_jacobian
.. autofunction:: chemicals.dippr.EQ106_fitting_jacobian
.. autofunction:: chemicals.dippr.EQ107_fitting_jacobian
"""
from __future__ import division
__all__ = ['EQ100', 'EQ101', 'EQ102', 'EQ104', 'EQ105', 'EQ106', 'EQ107',
'EQ114', 'EQ115', 'EQ116', 'EQ127',
'EQ101_fitting_jacobian', 'EQ102_fitting_jacobian',
'EQ106_fitting_jacobian', 'EQ105_fitting_jacobian',
'EQ107_fitting_jacobian',
'EQ106_AB', 'EQ106_ABC']
from chemicals.utils import log, exp, sinh, cosh, atan, atanh, sqrt, tanh
from cmath import log as clog
from cmath import sqrt as csqrt
from fluids.numerics import hyp2f1, trunc_exp, trunc_log, numpy as np
order_not_found_msg = ('Only the actual property calculation, first temperature '
'derivative, first temperature integral, and first '
'temperature integral over temperature are supported '
'with order= 0, 1, -1, or -1j respectively')
order_not_found_pos_only_msg = ('Only the actual property calculation, and'
'temperature derivative(s) are supported')
def EQ100(T, A=0, B=0, C=0, D=0, E=0, F=0, G=0, order=0):
r'''DIPPR Equation # 100. Used in calculating the molar heat capacities
of liquids and solids, liquid thermal conductivity, and solid density.
All parameters default to zero. As this is a straightforward polynomial,
no restrictions on parameters apply. Note that high-order polynomials like
this may need large numbers of decimal places to avoid unnecessary error.
.. math::
Y = A + BT + CT^2 + DT^3 + ET^4 + FT^5 + GT^6
Parameters
----------
T : float
Temperature, [K]
A-G : float
Parameter for the equation; chemical and property specific [-]
order : int, optional
Order of the calculation. 0 for the calculation of the result itself;
for 1, the first derivative of the property is returned, for
-1, the indefinite integral of the property with respect to temperature
is returned; and for -1j, the indefinite integral of the property
divided by temperature with respect to temperature is returned. No
other integrals or derivatives are implemented, and an exception will
be raised if any other order is given.
Returns
-------
Y : float
Property [constant-specific; if order == 1, property/K; if order == -1,
property*K; if order == -1j, unchanged from default]
Notes
-----
The derivative with respect to T, integral with respect to T, and integral
over T with respect to T are computed as follows. All derivatives and
integrals are easily computed with SymPy.
.. math::
\frac{d Y}{dT} = B + 2 C T + 3 D T^{2} + 4 E T^{3} + 5 F T^{4}
+ 6 G T^{5}
.. math::
\int Y dT = A T + \frac{B T^{2}}{2} + \frac{C T^{3}}{3} + \frac{D
T^{4}}{4} + \frac{E T^{5}}{5} + \frac{F T^{6}}{6} + \frac{G T^{7}}{7}
.. math::
\int \frac{Y}{T} dT = A \ln{\left (T \right )} + B T + \frac{C T^{2}}
{2} + \frac{D T^{3}}{3} + \frac{E T^{4}}{4} + \frac{F T^{5}}{5}
+ \frac{G T^{6}}{6}
Examples
--------
Water liquid heat capacity; DIPPR coefficients normally listed in J/kmol/K.
>>> EQ100(300, 276370., -2090.1, 8.125, -0.014116, 0.0000093701)
75355.81000000003
References
----------
.. [1] Design Institute for Physical Properties, 1996. DIPPR Project 801
DIPPR/AIChE
'''
if order == 0:
return A + T*(B + T*(C + T*(D + T*(E + T*(F + G*T)))))
elif order == 1:
return B + T*(2.0*C + T*(3.0*D + T*(4.0*E + T*(5.0*F + 6.0*G*T))))
elif order == -1:
return T*(A + T*(B*0.5 + T*(C*(1.0/3.0) + T*(D*0.25 + T*(E*0.2 + T*(F*(1.0/6.0) + G*T*(1.0/7.0)))))))
elif order == -1j:
return A*log(T) + T*(B + T*(C*0.5 + T*(D*(1.0/3.0) + T*(E*0.25 + T*(F*0.2 + G*T*(1.0/6.0))))))
else:
raise ValueError(order_not_found_msg)
def EQ101(T, A, B, C=0.0, D=0.0, E=0.0, order=0):
r'''DIPPR Equation # 101. Used in calculating vapor pressure, sublimation
pressure, and liquid viscosity.
All 5 parameters are required. E is often an integer. As the model is
exponential, a sufficiently high temperature will cause an OverflowError.
A negative temperature (or just low, if fit poorly) may cause a math domain
error.
.. math::
Y = \exp\left(A + \frac{B}{T} + C\cdot \ln T + D \cdot T^E\right)
Parameters
----------
T : float
Temperature, [K]
A-E : float
Parameter for the equation; chemical and property specific [-]
order : int, optional
Order of the calculation. 0 for the calculation of the result itself;
for `n`, the `nth` derivative of the property is returned. No
other integrals or derivatives are implemented, and an exception will
be raised if any other order is given.
Returns
-------
Y : float
Property [constant-specific]
Notes
-----
This function is not integrable for either dT or Y/T dT.
.. math::
\frac{d Y}{dT} = \left(- \frac{B}{T^{2}} + \frac{C}{T}
+ \frac{D E T^{E}}{T}\right) e^{A + \frac{B}{T}
+ C \log{\left(T \right)} + D T^{E}}
.. math::
\frac{d^2 Y}{dT^2} = \frac{\left(\frac{2 B}{T} - C + D E^{2} T^{E}
- D E T^{E} + \left(- \frac{B}{T} + C + D E T^{E}\right)^{2}\right)
e^{A + \frac{B}{T} + C \log{\left(T \right)} + D T^{E}}}{T^{2}}
.. math::
\frac{d^3 Y}{dT^3} = \frac{\left(- \frac{6 B}{T} + 2 C + D E^{3} T^{E}
- 3 D E^{2} T^{E} + 2 D E T^{E} + \left(- \frac{B}{T} + C
+ D E T^{E}\right)^{3} + 3 \left(- \frac{B}{T} + C + D E T^{E}\right)
\left(\frac{2 B}{T} - C + D E^{2} T^{E} - D E T^{E}\right)\right)
e^{A + \frac{B}{T} + C \log{\left(T \right)} + D T^{E}}}{T^{3}}
Examples
--------
Water vapor pressure; DIPPR coefficients normally listed in Pa.
>>> EQ101(300, 73.649, -7258.2, -7.3037, 4.1653E-6, 2)
3537.44834545549
References
----------
.. [1] Design Institute for Physical Properties, 1996. DIPPR Project 801
DIPPR/AIChE
'''
T_inv = 1.0/T
try:
T_E = T**E
except:
T_E = 1e250
expr = trunc_exp(A + B*T_inv + C*trunc_log(T) + D*T_E)
if order == 0:
return expr
elif order == 1:
return T_inv*expr*(-B*T_inv + C + D*E*T_E)
elif order == 2:
x0 = (-B*T_inv + C + D*E*T_E)
return expr*(2.0*B*T_inv - C + D*E*T_E*(E - 1.0) + x0*x0)*T_inv*T_inv
elif order == 3:
E2 = E*E
E3 = E2*E
x0 = (-B*T_inv + C + D*E*T_E)
return expr*(-6.0*B*T_inv + 2.0*C + D*E3*T_E - 3*D*E2*T_E + 2.0*D*E*T_E
+ x0*x0*x0
+ 3.0*(-B*T_inv + C + D*E*T_E)*(2.0*B*T_inv - C + D*E2*T_E - D*E*T_E))*T_inv*T_inv*T_inv
else:
raise ValueError(order_not_found_pos_only_msg)
def EQ102(T, A, B, C=0.0, D=0.0, order=0):
r'''DIPPR Equation # 102. Used in calculating vapor viscosity, vapor
thermal conductivity, and sometimes solid heat capacity. High values of B
raise an OverflowError.
All 4 parameters are required. C and D are often 0.
.. math::
Y = \frac{A\cdot T^B}{1 + \frac{C}{T} + \frac{D}{T^2}}
Parameters
----------
T : float
Temperature, [K]
A-D : float
Parameter for the equation; chemical and property specific [-]
order : int, optional
Order of the calculation. 0 for the calculation of the result itself;
for 1, the first derivative of the property is returned, for
-1, the indefinite integral of the property with respect to temperature
is returned; and for -1j, the indefinite integral of the property
divided by temperature with respect to temperature is returned. No
other integrals or derivatives are implemented, and an exception will
be raised if any other order is given.
Returns
-------
Y : float
Property [constant-specific; if order == 1, property/K; if order == -1,
property*K; if order == -1j, unchanged from default]
Notes
-----
The derivative with respect to T, integral with respect to T, and integral
over T with respect to T are computed as follows. The first derivative is
easily computed; the two integrals required Rubi to perform the integration.
.. math::
\frac{d Y}{dT} = \frac{A B T^{B}}{T \left(\frac{C}{T} + \frac{D}{T^{2}}
+ 1\right)} + \frac{A T^{B} \left(\frac{C}{T^{2}} + \frac{2 D}{T^{3}}
\right)}{\left(\frac{C}{T} + \frac{D}{T^{2}} + 1\right)^{2}}
.. math::
\int Y dT = - \frac{2 A T^{B + 3} \operatorname{hyp2f1}{\left (1,B + 3,
B + 4,- \frac{2 T}{C - \sqrt{C^{2} - 4 D}} \right )}}{\left(B + 3\right)
\left(C + \sqrt{C^{2} - 4 D}\right) \sqrt{C^{2} - 4 D}} + \frac{2 A
T^{B + 3} \operatorname{hyp2f1}{\left (1,B + 3,B + 4,- \frac{2 T}{C
+ \sqrt{C^{2} - 4 D}} \right )}}{\left(B + 3\right) \left(C
- \sqrt{C^{2} - 4 D}\right) \sqrt{C^{2} - 4 D}}
.. math::
\int \frac{Y}{T} dT = - \frac{2 A T^{B + 2} \operatorname{hyp2f1}{\left
(1,B + 2,B + 3,- \frac{2 T}{C + \sqrt{C^{2} - 4 D}} \right )}}{\left(B
+ 2\right) \left(C + \sqrt{C^{2} - 4 D}\right) \sqrt{C^{2} - 4 D}}
+ \frac{2 A T^{B + 2} \operatorname{hyp2f1}{\left (1,B + 2,B + 3,
- \frac{2 T}{C - \sqrt{C^{2} - 4 D}} \right )}}{\left(B + 2\right)
\left(C - \sqrt{C^{2} - 4 D}\right) \sqrt{C^{2} - 4 D}}
Examples
--------
Water vapor viscosity; DIPPR coefficients normally listed in Pa*s.
>>> EQ102(300, 1.7096E-8, 1.1146, 0, 0)
9.860384711890639e-06
References
----------
.. [1] Design Institute for Physical Properties, 1996. DIPPR Project 801
DIPPR/AIChE
'''
if order == 0:
easy = A/(1. + C/T + D/(T*T))
try:
return easy*T**B
except:
return 1e308
elif order == 1:
return (A*B*T**B/(T*(C/T + D/T**2 + 1))
+ A*T**B*(C/T**2 + 2*D/T**3)/(C/T + D/T**2 + 1)**2)
elif order == -1: # numba: delete
# numba-scipy does not support complex numbers so this does not work in numba
# imaginary part is 0
# Hours spend trying to make hyp2f1 use real inputs only: 3
c0 = 3.0 + B # numba: delete
x0 = csqrt(C*C - 4.0*D) # numba: delete
arg3_hyp = (-2.0*T/(C - x0)) # numba: delete
hyp2f1_term1 = hyp2f1(1.0, c0, 4.0+B, arg3_hyp) # numba: delete
hyp2f1_term2 = hyp2f1_term1.real - hyp2f1_term1.imag*1.0j# numba: delete
x5 = 2.0*A*T**(c0)/(c0*x0) # numba: delete
x10 = x5/(C - x0) # numba: delete
x11 = x5/(C + x0) # numba: delete
return float((hyp2f1_term1*x10 - hyp2f1_term2*x11).real) # numba: delete
elif order == -1j: # numba: delete
return float((2*A*T**(2+B)*hyp2f1(1.0, 2.0+B, 3.0+B, -2*T/(C - csqrt(C*C - 4*D)))/( # numba: delete
(2+B)*(C - csqrt(C*C-4*D))*csqrt(C*C-4*D)) -2*A*T**(2+B)*hyp2f1( # numba: delete
1.0, 2.0+B, 3.0+B, -2*T/(C + csqrt(C*C - 4*D)))/((2+B)*(C + csqrt( # numba: delete
C*C-4*D))*csqrt(C*C-4*D))).real) # numba: delete
else:
raise ValueError(order_not_found_msg)
def EQ101_fitting_jacobian(Ts, A, B, C, D, E):
r'''Compute and return the Jacobian of the property predicted by
DIPPR Equation # 101 with respect to all the coefficients. This is used in
fitting parameters for chemicals.
Parameters
----------
Ts : list[float]
Temperatures of the experimental data points, [K]
A-E : float
Parameter for the equation; chemical and property specific [-]
Returns
-------
jac : list[list[float, 5], len(Ts)]
Matrix of derivatives of the equation with respect to the fitting
parameters, [various]
'''
N = len(Ts)
# out = np.zeros((N, 5)) # numba: uncomment
out = [[0.0]*5 for _ in range(N)] # numba: delete
for i in range(N):
x0 = log(Ts[i])
x1 = 1.0/Ts[i]
x2 = Ts[i]**E
x3 = D*x2
x4 = exp(A + B*x1 + C*x0 + x3)
x5 = x0*x4
out[i][0] = x4
out[i][1] = x1*x4
out[i][2] = x5
out[i][3] = x2*x4
out[i][4] = x3*x5
return out
def EQ102_fitting_jacobian(Ts, A, B, C, D):
r'''Compute and return the Jacobian of the property predicted by
DIPPR Equation # 102 with respect to all the coefficients. This is used in
fitting parameters for chemicals.
Parameters
----------
Ts : list[float]
Temperatures of the experimental data points, [K]
A-D : float
Parameter for the equation; chemical and property specific [-]
Returns
-------
jac : list[list[float, 4], len(Ts)]
Matrix of derivatives of the equation with respect to the fitting
parameters, [various]
'''
N = len(Ts)
# out = np.zeros((N, 4)) # numba: uncomment
out = [[0.0]*4 for _ in range(N)] # numba: delete
for i in range(N):
x0 = Ts[i]**B
x1 = 1.0/Ts[i]
x2 = x1*x1
x3 = C*x1 + D*x2 + 1.0
x4 = x0/x3
x5 = A*x4/x3
lnT = log(Ts[i])
out[i][0] = x4
out[i][1] = A*x4*lnT
out[i][2] = -x1*x5
out[i][3] = -x2*x5
return out
def EQ105_fitting_jacobian(Ts, A, B, C, D):
r'''Compute and return the Jacobian of the property predicted by
DIPPR Equation # 105 with respect to all the coefficients. This is used in
fitting parameters for chemicals.
Parameters
----------
Ts : list[float]
Temperatures of the experimental data points, [K]
A-D : float
Parameter for the equation; chemical and property specific [-]
Returns
-------
jac : list[list[float, 4], len(Ts)]
Matrix of derivatives of the equation with respect to the fitting
parameters, [various]
'''
N = len(Ts)
# out = np.zeros((N, 4)) # numba: uncomment
out = [[0.0]*4 for _ in range(N)] # numba: delete
for i in range(N):
r = out[i]
x0 = 1.0 - Ts[i]/C
if D < 1.0 and x0 < 0.0:
r[0] = 1.0/B
r[1] = -A/(B*B)
else:
x1 = x0**D
x2 = x1 + 1.0
x3 = A*B**(-x1 - 1.0)
x4 = x1*x3*log(B)
r[0] = B**(-x2)
r[1] = -x2*x3/B
r[2] = -D*Ts[i]*x4/(C*C*x0)
r[3] = -x4*log(x0)
return out
def EQ106_fitting_jacobian(Ts, Tc, A, B, C, D, E):
r'''Compute and return the Jacobian of the property predicted by
DIPPR Equation # 106 with respect to all the coefficients. This is used in
fitting parameters for chemicals.
Parameters
----------
Ts : list[float]
Temperatures of the experimental data points, [K]
Tc : float
Critical temperature, [K]
A-E : float
Parameter for the equation; chemical and property specific [-]
Returns
-------
jac : list[list[float, 5], len(Ts)]
Matrix of derivatives of the equation with respect to the fitting
parameters, [various]
'''
N = len(Ts)
# out = np.zeros((N, 5)) # numba: uncomment
out = [[0.0]*5 for _ in range(N)] # numba: delete
for i in range(N):
x0 = Ts[i]/Tc
if x0 != 1.0:
x1 = 1.0 - x0
x2 = x1**(B + x0*(C + x0*(D + E*x0)))
x3 = A*x2*log(x1)
r = out[i]
r[0] = x2
r[1] = x3
r[2] = x0*x3
r[3] = x0*x0*x3
r[4] = x0*x0*x0*x3
return out
def EQ107_fitting_jacobian(Ts, A, B, C, D, E):
r'''Compute and return the Jacobian of the property predicted by
DIPPR Equation # 107 with respect to all the coefficients. This is used in
fitting parameters for chemicals.
Parameters
----------
Ts : list[float]
Temperatures of the experimental data points, [K]
A-E : float
Parameter for the equation; chemical and property specific [-]
Returns
-------
jac : list[list[float, 5], len(Ts)]
Matrix of derivatives of the equation with respect to the fitting
parameters, [various]
'''
N = len(Ts)
# out = np.zeros((N, 5)) # numba: uncomment
out = [[0.0]*5 for _ in range(N)] # numba: delete
for i in range(N):
r = out[i]
x1 = 1.0/Ts[i]
x0 = x1*x1
x2 = C*x1
x3 = sinh(x2)
x3_inv = 1.0/x3
x4 = x0*x3_inv*x3_inv
x5 = E*x1
x6 = cosh(x5)
x6_inv = 1.0/x6
x7 = x0*x6_inv*x6_inv
r[0] = 1.0
r[1] = C*C*x4
r[2] = 2.0*B*C*x4*(-x2*cosh(x2)*x3_inv + 1.0)
r[3] = E*E*x7
r[4] = 2.0*D*E*x7*(-x5*sinh(x5)*x6_inv + 1.0)
return out
def EQ104(T, A, B, C=0.0, D=0.0, E=0.0, order=0):
r'''DIPPR Equation #104. Often used in calculating second virial
coefficients of gases. All 5 parameters are required.
C, D, and E are normally large values.
.. math::
Y = A + \frac{B}{T} + \frac{C}{T^3} + \frac{D}{T^8} + \frac{E}{T^9}
Parameters
----------
T : float
Temperature, [K]
A-E : float
Parameter for the equation; chemical and property specific [-]
order : int, optional
Order of the calculation. 0 for the calculation of the result itself;
for 1, the first derivative of the property is returned, for
-1, the indefinite integral of the property with respect to temperature
is returned; and for -1j, the indefinite integral of the property
divided by temperature with respect to temperature is returned. No
other integrals or derivatives are implemented, and an exception will
be raised if any other order is given.
Returns
-------
Y : float
Property [constant-specific; if order == 1, property/K; if order == -1,
property*K; if order == -1j, unchanged from default]
Notes
-----
The derivative with respect to T, integral with respect to T, and integral
over T with respect to T are computed as follows. All expressions can be
obtained with SymPy readily.
.. math::
\frac{d Y}{dT} = - \frac{B}{T^{2}} - \frac{3 C}{T^{4}}
- \frac{8 D}{T^{9}} - \frac{9 E}{T^{10}}
.. math::
\int Y dT = A T + B \ln{\left (T \right )} - \frac{1}{56 T^{8}}
\left(28 C T^{6} + 8 D T + 7 E\right)
.. math::
\int \frac{Y}{T} dT = A \ln{\left (T \right )} - \frac{1}{72 T^{9}}
\left(72 B T^{8} + 24 C T^{6} + 9 D T + 8 E\right)
Examples
--------
Water second virial coefficient; DIPPR coefficients normally dimensionless.
>>> EQ104(300, 0.02222, -26.38, -16750000, -3.894E19, 3.133E21)
-1.1204179007265156
References
----------
.. [1] Design Institute for Physical Properties, 1996. DIPPR Project 801
DIPPR/AIChE
'''
if order == 0:
T2 = T*T
return A + (B + (C + (D + E/T)/(T2*T2*T))/T2)/T
elif order == 1:
T2 = T*T
T4 = T2*T2
return (-B + (-3*C + (-8*D - 9*E/T)/(T4*T))/T2)/T2
elif order == -1:
return A*T + B*log(T) - (28*C*T**6 + 8*D*T + 7*E)/(56*T**8)
elif order == -1j:
return A*log(T) - (72*B*T**8 + 24*C*T**6 + 9*D*T + 8*E)/(72*T**9)
else:
raise ValueError(order_not_found_msg)
def EQ105(T, A, B, C, D, order=0):
r'''DIPPR Equation #105. Often used in calculating liquid molar density.
All 4 parameters are required. C is sometimes the fluid's critical
temperature.
.. math::
Y = \frac{A}{B^{1 + \left(1-\frac{T}{C}\right)^D}}
Parameters
----------
T : float
Temperature, [K]
A-D : float
Parameter for the equation; chemical and property specific [-]
order : int, optional
Order of the calculation. 0 for the calculation of the result itself;
for 1, 2, and 3, that derivative of the property is returned; No
other integrals or derivatives are implemented, and an exception will
be raised if any other order is given.
Returns
-------
Y : float
Property [constant-specific]
Notes
-----
This expression can be integrated in terms of the incomplete gamma function
for dT, however nans are the only output from that function.
For Y/T dT no integral could be found.
.. math::
\frac{d Y}{dT} = \frac{A B^{- \left(1 - \frac{T}{C}\right)^{D} - 1} D
\left(1 - \frac{T}{C}\right)^{D} \log{\left(B \right)}}{C \left(1
- \frac{T}{C}\right)}
.. math::
\frac{d^2 Y}{dT^2} = \frac{A B^{- \left(1 - \frac{T}{C}\right)^{D} - 1}
D \left(1 - \frac{T}{C}\right)^{D} \left(D \left(1 - \frac{T}{C}
\right)^{D} \log{\left(B \right)} - D + 1\right) \log{\left(B \right)}}
{C^{2} \left(1 - \frac{T}{C}\right)^{2}}
.. math::
\frac{d^3 Y}{dT^3} = \frac{A B^{- \left(1 - \frac{T}{C}\right)^{D} - 1}
D \left(1 - \frac{T}{C}\right)^{D} \left(D^{2} \left(1 - \frac{T}{C}
\right)^{2 D} \log{\left(B \right)}^{2} - 3 D^{2} \left(1 - \frac{T}{C}
\right)^{D} \log{\left(B \right)} + D^{2} + 3 D \left(1 - \frac{T}{C}
\right)^{D} \log{\left(B \right)} - 3 D + 2\right) \log{\left(B
\right)}}{C^{3} \left(1 - \frac{T}{C}\right)^{3}}
Examples
--------
Hexane molar density; DIPPR coefficients normally in kmol/m^3.
>>> EQ105(300., 0.70824, 0.26411, 507.6, 0.27537)
7.593170096339237
References
----------
.. [1] Design Institute for Physical Properties, 1996. DIPPR Project 801
DIPPR/AIChE
'''
if order == 0:
problematic = (1. - T/C)
if D < 1.0 and problematic < 0.0:
# Handle the case of a negative D exponent with a (1. - T/C) under 0 which would yield a complex number
problematic = 0.0
ans = A*B**(-(1. + problematic**D))
return ans
elif order == 1:
x0 = 1.0/C
x1 = 1.0 - T*x0
x2 = x1**D
return A*B**(-x2 - 1.0)*D*x0*x2*log(B)/x1
elif order == 2:
x0 = 1.0 - T/C
x1 = x0**D
x2 = D*x1*log(B)
den = 1.0/(C*x0)
return A*B**(-x1 - 1.0)*x2*(1.0 - D + x2)*den*den
elif order == 3:
x0 = 1.0 - T/C
x1 = x0**D
x2 = 3.0*D
x3 = D*D
x4 = log(B)
x5 = x1*x4
den = 1.0/(C*x0)
return A*B**(-x1 - 1.0)*D*x5*(x0**(2.0*D)*x3*x4*x4 + x2*x5 - x2 - 3.0*x3*x5 + x3 + 2.0)*den*den*den
else:
raise ValueError(order_not_found_msg)
def EQ106(T, Tc, A, B, C=0.0, D=0.0, E=0.0, order=0):
r'''DIPPR Equation #106. Often used in calculating liquid surface tension,
and heat of vaporization.
Only parameters A and B parameters are required; many fits include no
further parameters. Critical temperature is also required.
.. math::
Y = A(1-T_r)^{B + C T_r + D T_r^2 + E T_r^3}
.. math::
Tr = \frac{T}{Tc}
Parameters
----------
T : float
Temperature, [K]
Tc : float
Critical temperature, [K]
A-D : float
Parameter for the equation; chemical and property specific [-]
order : int, optional
Order of the calculation. 0 for the calculation of the result itself;
for 1, 2, and 3, that derivative of the property is returned; No
other integrals or derivatives are implemented, and an exception will
be raised if any other order is given.
Returns
-------
Y : float
Property [constant-specific]
Notes
-----
This form is used by Yaws with only the parameters `A` and `B`.
The integral could not be found, but the integral over T actually could,
again in terms of hypergeometric functions.
.. math::
\frac{d Y}{dT} = A \left(- \frac{T}{T_{c}} + 1\right)^{B + \frac{C T}
{T_{c}} + \frac{D T^{2}}{T_{c}^{2}} + \frac{e T^{3}}{T_{c}^{3}}} \left(
\left(\frac{C}{T_{c}} + \frac{2 D T}{T_{c}^{2}} + \frac{3 e T^{2}}
{T_{c}^{3}}\right) \log{\left(- \frac{T}{T_{c}} + 1 \right)} - \frac{B
+ \frac{C T}{T_{c}} + \frac{D T^{2}}{T_{c}^{2}} + \frac{e T^{3}}
{T_{c}^{3}}}{T_{c} \left(- \frac{T}{T_{c}} + 1\right)}\right)
.. math::
\frac{d^2 Y}{dT^2} = \frac{A \left(- \frac{T}{T_{c}} + 1\right)^{B
+ \frac{C T}{T_{c}} + \frac{D T^{2}}{T_{c}^{2}} + \frac{e T^{3}}
{T_{c}^{3}}} \left(2 \left(D + \frac{3 e T}{T_{c}}\right) \log{\left(
- \frac{T}{T_{c}} + 1 \right)} + \left(\left(C + \frac{2 D T}{T_{c}}
+ \frac{3 e T^{2}}{T_{c}^{2}}\right) \log{\left(- \frac{T}{T_{c}}
+ 1 \right)} + \frac{B + \frac{C T}{T_{c}} + \frac{D T^{2}}{T_{c}^{2}}
+ \frac{e T^{3}}{T_{c}^{3}}}{\frac{T}{T_{c}} - 1}\right)^{2}
+ \frac{2 \left(C + \frac{2 D T}{T_{c}} + \frac{3 e T^{2}}{T_{c}^{2}}
\right)}{\frac{T}{T_{c}} - 1} - \frac{B + \frac{C T}{T_{c}} + \frac{D
T^{2}}{T_{c}^{2}} + \frac{e T^{3}}{T_{c}^{3}}}{\left(\frac{T}{T_{c}}
- 1\right)^{2}}\right)}{T_{c}^{2}}
.. math::
\frac{d^3 Y}{dT^3} = \frac{A \left(- \frac{T}{T_{c}} + 1\right)^{B
+ \frac{C T}{T_{c}} + \frac{D T^{2}}{T_{c}^{2}} + \frac{e T^{3}}
{T_{c}^{3}}} \left(\frac{6 \left(D + \frac{3 e T}{T_{c}}\right)}
{\frac{T}{T_{c}} - 1} + \left(\left(C + \frac{2 D T}{T_{c}}
+ \frac{3 e T^{2}}{T_{c}^{2}}\right) \log{\left(- \frac{T}{T_{c}}
+ 1 \right)} + \frac{B + \frac{C T}{T_{c}} + \frac{D T^{2}}{T_{c}^{2}}
+ \frac{e T^{3}}{T_{c}^{3}}}{\frac{T}{T_{c}} - 1}\right)^{3}
+ 3 \left(\left(C + \frac{2 D T}{T_{c}} + \frac{3 e T^{2}}{T_{c}^{2}}
\right) \log{\left(- \frac{T}{T_{c}} + 1 \right)} + \frac{B
+ \frac{C T}{T_{c}} + \frac{D T^{2}}{T_{c}^{2}} + \frac{e T^{3}}
{T_{c}^{3}}}{\frac{T}{T_{c}} - 1}\right) \left(2 \left(D + \frac{3 e T}
{T_{c}}\right) \log{\left(- \frac{T}{T_{c}} + 1 \right)} + \frac{2
\left(C + \frac{2 D T}{T_{c}} + \frac{3 e T^{2}}{T_{c}^{2}}\right)}
{\frac{T}{T_{c}} - 1} - \frac{B + \frac{C T}{T_{c}} + \frac{D T^{2}}
{T_{c}^{2}} + \frac{e T^{3}}{T_{c}^{3}}}{\left(\frac{T}{T_{c}}
- 1\right)^{2}}\right) + 6 e \log{\left(- \frac{T}{T_{c}} + 1 \right)}
- \frac{3 \left(C + \frac{2 D T}{T_{c}} + \frac{3 e T^{2}}{T_{c}^{2}}
\right)}{\left(\frac{T}{T_{c}} - 1\right)^{2}} + \frac{2 \left(B
+ \frac{C T}{T_{c}} + \frac{D T^{2}}{T_{c}^{2}} + \frac{e T^{3}}
{T_{c}^{3}}\right)}{\left(\frac{T}{T_{c}} - 1\right)^{3}}\right)}
{T_{c}^{3}}
Examples
--------
Water surface tension; DIPPR coefficients normally in Pa*s.
>>> EQ106(300, 647.096, 0.17766, 2.567, -3.3377, 1.9699)
0.07231499373541
References
----------
.. [1] Design Institute for Physical Properties, 1996. DIPPR Project 801
DIPPR/AIChE
'''
if order == 0:
Tr = T/Tc
tau = (1.0 - Tr)
if tau <= 0.0:
return 0.0
power = (B + Tr*(C + Tr*(D + E*Tr)))
try:
return A*tau**power
except:
# TODO: after more testing with regression, maybe return a more
# precise value or allow A to impact the result
return 1e300
elif order == 1:
x0 = 1.0/Tc
x1 = T*x0
x2 = 1.0 - x1
x3 = E*x1
x4 = C + x1*(D + x3)
x5 = B + x1*x4
return A*x0*x2**x5*(x5/(x1 - 1.0) + (x1*(D + 2.0*x3) + x4)*log(x2))
elif order == 2:
x0 = T/Tc
x1 = 1.0 - x0
x2 = E*x0
x3 = C + x0*(D + x2)
x4 = B + x0*x3
x5 = log(x1)
x6 = x0 - 1.0
x7 = 1.0/x6
x8 = x0*(D + 2.0*x2) + x3
return (A*x1**x4*(-x4/x6**2 + 2*x5*(D + 3.0*x2) + 2.0*x7*x8
+ (x4*x7 + x5*x8)**2)/Tc**2)
elif order == 3:
x0 = T/Tc
x1 = 1.0 - x0
x2 = E*x0
x3 = C + x0*(D + x2)
x4 = B + x0*x3
x5 = log(x1)
x6 = D + 3.0*x2
x7 = x0 - 1.0
x8 = 1/x7
x9 = x7**(-2)
x10 = x0*(D + 2.0*x2) + x3
x11 = x10*x5 + x4*x8
return (A*x1**x4*(-3*x10*x9 + x11**3 + 3*x11*(2*x10*x8 - x4*x9 + 2*x5*x6)
+ 2*x4/x7**3 + 6*E*x5 + 6*x6*x8)/Tc**3)
else:
raise ValueError(order_not_found_msg)
def EQ106_AB(T, Tc, val, der):
r'''Calculate the coefficients `A` and `B` of the DIPPR Equation #106 using
the value of the function and its derivative at a specific point.
.. math::
A = val \left(\frac{1}{Tc} \left(- T + Tc\right)\right)^{- \frac{der}{val} \left(T - Tc\right)}
.. math::
B = \frac{der}{val} \left(T - Tc\right)
Parameters
----------
T : float
Temperature, [K]
Tc : float
Critical temperature, [K]
val : float
Property value [constant-specific]
der : float
First temperature derivative of property value [constant-specific/K]
Returns
-------
A : float
Parameter for the equation [constant-specific]
B : float
Parameter for the equation [-]
Notes
-----
Examples
--------
>>> val = EQ106(300, 647.096, A=0.17766, B=2.567)
>>> der = EQ106(300, 647.096, A=0.17766, B=2.567, order=1)
>>> EQ106_AB(300, 647.096, val, der)
(0.17766, 2.567)
'''
'''# Derived with:
from sympy import *
T, Tc, A, B, val, der = symbols('T, Tc, A, B, val, der')
Tr = T/Tc
expr = A*(1 - Tr)**B
Eq0 = Eq(expr, val)
Eq1 = Eq(diff(expr, T), der)
s = solve([Eq0, Eq1], [A, B])
'''
x0 = T - Tc
x1 = der*x0/val
A, B = val*(-x0/Tc)**(-x1), x1
return (A, B)
def EQ106_ABC(T, Tc, val, der, der2):
r'''Calculate the coefficients `A`, `B`, and `C` of the DIPPR Equation #106
using, the value of the function and its first and second derivative at a
specific point.
.. math::
A = val \left(\frac{1}{Tc} \left(- T + Tc\right)\right)^{\frac{1}{val^{2}
\left(\log{\left (\frac{1}{Tc} \left(- T + Tc\right) \right )} + 2\right)}
\left(T \left(\log{\left (\frac{1}{Tc} \left(- T + Tc\right) \right )}
+ 1\right) \left(- T der^{2} + T der_{2} val + Tc der^{2} - Tc der_{2}
val + der val\right) - T \left(- T der^{2} + T der_{2} val + Tc der^{2}
- Tc der_{2} val + der val\right) - Tc \left(- T der^{2} + T der_{2} val
+ Tc der^{2} - Tc der_{2} val + der val\right) \log{\left (\frac{1}{Tc}
\left(- T + Tc\right) \right )} - der val \left(T - Tc\right)
\left(\log{\left (\frac{1}{Tc} \left(- T + Tc\right) \right )}
+ 2\right)\right)}
.. math::
B = \frac{1}{val^{2} \left(\log{\left (\frac{1}{Tc} \left(- T + Tc\right)
\right )} + 2\right)} \left(- T \left(\log{\left (\frac{1}{Tc}
\left(- T + Tc\right) \right )} + 1\right) \left(- T der^{2} + T der_{2}
val + Tc der^{2} - Tc der_{2} val + der val\right) + Tc \left(- T der^{2}
+ T der_{2} val + Tc der^{2} - Tc der_{2} val + der val\right)
\log{\left (\frac{1}{Tc} \left(- T + Tc\right) \right )} + der val
\left(T - Tc\right) \left(\log{\left (\frac{1}{Tc} \left(- T + Tc\right) \right )} + 2\right)\right)
.. math::
C = \frac{Tc \left(- T der^{2} + T der_{2} val + Tc der^{2} - Tc der_{2}
val + der val\right)}{val^{2} \left(\log{\left (\frac{1}{Tc}
\left(- T + Tc\right) \right )} + 2\right)}
Parameters
----------
T : float
Temperature, [K]
Tc : float
Critical temperature, [K]
val : float
Property value [constant-specific]
der : float
First temperature derivative of property value [constant-specific/K]
der2 : float
Second temperature derivative of property value [constant-specific/K^2]
Returns
-------
A : float
Parameter for the equation [constant-specific]
B : float
Parameter for the equation [-]
C : float
Parameter for the equation [-]
Notes
-----
Examples
--------
>>> val = EQ106(300, 647.096, A=0.17766, B=2.567, C=-0.01)
>>> der = EQ106(300, 647.096, A=0.17766, B=2.567, C=-0.01, order=1)
>>> der2 = EQ106(300, 647.096, A=0.17766, B=2.567, C=-0.01, order=2)
>>> EQ106_ABC(300, 647.096, val, der, der2)
(0.17766, 2.567, -0.01)
'''
'''# Broken in recent versions of SymPy, SymPy 1.1 is good
from sympy import *
T, Tc, A, B, C, val, der, der2 = symbols('T, Tc, A, B, C, val, der, der2')
Tr = T/Tc
expr = A*(1 - Tr)**(B + C*Tr)
Eq0 = Eq(expr, val)
Eq1 = Eq(diff(expr, T), der)
Eq2 = Eq(diff(expr, T, 2), der2)
s = solve([Eq0, Eq1, Eq2], [A, B, C])
'''
x0 = T - Tc
x1 = -x0/Tc
x2 = log(x1)
x3 = x2 + 2
x4 = 1/(val*val*x3)
x5 = der*val
x6 = der2*val
x7 = der*der
x8 = T*x6 - T*x7 - Tc*x6 + Tc*x7 + x5
x9 = T*x8
x10 = Tc*x8
x11 = x0*x3*x5 + x10*x2 - x9*(x2 + 1)
A, B, C = val*x1**(-x4*(x11 + x9)), x11*x4, x10*x4
return (A, B, C)
def EQ107(T, A=0, B=0, C=0, D=0, E=0, order=0):
r'''DIPPR Equation #107. Often used in calculating ideal-gas heat capacity.
All 5 parameters are required.
Also called the Aly-Lee equation.
.. math::
Y = A + B\left[\frac{C/T}{\sinh(C/T)}\right]^2 + D\left[\frac{E/T}{
\cosh(E/T)}\right]^2
Parameters
----------
T : float
Temperature, [K]
A-E : float
Parameter for the equation; chemical and property specific [-]
order : int, optional
Order of the calculation. 0 for the calculation of the result itself;
for 1, the first derivative of the property is returned, for
-1, the indefinite integral of the property with respect to temperature
is returned; and for -1j, the indefinite integral of the property
divided by temperature with respect to temperature is returned. No
other integrals or derivatives are implemented, and an exception will
be raised if any other order is given.
Returns
-------
Y : float
Property [constant-specific; if order == 1, property/K; if order == -1,
property*K; if order == -1j, unchanged from default]
Notes
-----
The derivative with respect to T, integral with respect to T, and integral
over T with respect to T are computed as follows. The derivative is
obtained via SymPy; the integrals from Wolfram Alpha.
.. math::
\frac{d Y}{dT} = \frac{2 B C^{3} \cosh{\left (\frac{C}{T} \right )}}
{T^{4} \sinh^{3}{\left (\frac{C}{T} \right )}} - \frac{2 B C^{2}}{T^{3}
\sinh^{2}{\left (\frac{C}{T} \right )}} + \frac{2 D E^{3} \sinh{\left
(\frac{E}{T} \right )}}{T^{4} \cosh^{3}{\left (\frac{E}{T} \right )}}
- \frac{2 D E^{2}}{T^{3} \cosh^{2}{\left (\frac{E}{T} \right )}}
.. math::
\int Y dT = A T + \frac{B C}{\tanh{\left (\frac{C}{T} \right )}}
- D E \tanh{\left (\frac{E}{T} \right )}
.. math::
\int \frac{Y}{T} dT = A \ln{\left (T \right )} + \frac{B C}{T \tanh{
\left (\frac{C}{T} \right )}} - B \ln{\left (\sinh{\left (\frac{C}{T}
\right )} \right )} - \frac{D E}{T} \tanh{\left (\frac{E}{T} \right )}
+ D \ln{\left (\cosh{\left (\frac{E}{T} \right )} \right )}
Examples
--------
Water ideal gas molar heat capacity; DIPPR coefficients normally in
J/kmol/K
>>> EQ107(300., 33363., 26790., 2610.5, 8896., 1169.)
33585.90452768923
References
----------
.. [1] Design Institute for Physical Properties, 1996. DIPPR Project 801
DIPPR/AIChE
.. [2] <NAME>., and <NAME>. "Self-Consistent Equations for
Calculating the Ideal Gas Heat Capacity, Enthalpy, and Entropy." Fluid
Phase Equilibria 6, no. 3 (January 1, 1981): 169-79.
doi:10.1016/0378-3812(81)85002-9.
'''
if order == 0:
C_T = C/T
t0 = 2.0*C_T/(trunc_exp(C_T) - trunc_exp(-C_T))
E_T = E/T
t1 = 2.0*E_T/(trunc_exp(-E_T) + trunc_exp(E_T))
return A + B*t0*t0 + D*t1*t1
elif order == 1:
return (2*B*C**3*cosh(C/T)/(T**4*sinh(C/T)**3)
- 2*B*C**2/(T**3*sinh(C/T)**2)
+ 2*D*E**3*sinh(E/T)/(T**4*cosh(E/T)**3)
- 2*D*E**2/(T**3*cosh(E/T)**2))
elif order == -1:
return A*T + B*C/tanh(C/T) - D*E*tanh(E/T)
elif order == -1j:
return (A*log(T) + B*C/tanh(C/T)/T - B*log(sinh(C/T))
- D*E*tanh(E/T)/T + D*log(cosh(E/T)))
else:
raise ValueError(order_not_found_msg)
def EQ114(T, Tc, A, B, C, D, order=0):
r'''DIPPR Equation #114. Rarely used, normally as an alternate liquid
heat capacity expression. All 4 parameters are required, as well as
critical temperature.
.. math::
Y = \frac{A^2}{\tau} + B - 2AC\tau - AD\tau^2 - \frac{1}{3}C^2\tau^3
- \frac{1}{2}CD\tau^4 - \frac{1}{5}D^2\tau^5
.. math::
\tau = 1 - \frac{T}{Tc}
Parameters
----------
T : float
Temperature, [K]
Tc : float
Critical temperature, [K]
A-D : float
Parameter for the equation; chemical and property specific [-]
order : int, optional
Order of the calculation. 0 for the calculation of the result itself;
for 1, the first derivative of the property is returned, for
-1, the indefinite integral of the property with respect to temperature
is returned; and for -1j, the indefinite integral of the property
divided by temperature with respect to temperature is returned. No
other integrals or derivatives are implemented, and an exception will
be raised if any other order is given.
Returns
-------
Y : float
Property [constant-specific; if order == 1, property/K; if order == -1,
property*K; if order == -1j, unchanged from default]
Notes
-----
The derivative with respect to T, integral with respect to T, and integral
over T with respect to T are computed as follows. All expressions can be
obtained with SymPy readily.
.. math::
\frac{d Y}{dT} = \frac{A^{2}}{T_{c} \left(- \frac{T}{T_{c}}
+ 1\right)^{2}} + \frac{2 A}{T_{c}} C + \frac{2 A}{T_{c}} D \left(
- \frac{T}{T_{c}} + 1\right) + \frac{C^{2}}{T_{c}} \left(
- \frac{T}{T_{c}} + 1\right)^{2} + \frac{2 C}{T_{c}} D \left(
- \frac{T}{T_{c}} + 1\right)^{3} + \frac{D^{2}}{T_{c}} \left(
- \frac{T}{T_{c}} + 1\right)^{4}
.. math::
\int Y dT = - A^{2} T_{c} \ln{\left (T - T_{c} \right )} + \frac{D^{2}
T^{6}}{30 T_{c}^{5}} - \frac{T^{5}}{10 T_{c}^{4}} \left(C D + 2 D^{2}
\right) + \frac{T^{4}}{12 T_{c}^{3}} \left(C^{2} + 6 C D + 6 D^{2}
\right) - \frac{T^{3}}{3 T_{c}^{2}} \left(A D + C^{2} + 3 C D
+ 2 D^{2}\right) + \frac{T^{2}}{2 T_{c}} \left(2 A C + 2 A D + C^{2}
+ 2 C D + D^{2}\right) + T \left(- 2 A C - A D + B - \frac{C^{2}}{3}
- \frac{C D}{2} - \frac{D^{2}}{5}\right)
.. math::
\int \frac{Y}{T} dT = - A^{2} \ln{\left (T + \frac{- 60 A^{2} T_{c}
+ 60 A C T_{c} + 30 A D T_{c} - 30 B T_{c} + 10 C^{2} T_{c}
+ 15 C D T_{c} + 6 D^{2} T_{c}}{60 A^{2} - 60 A C - 30 A D + 30 B
- 10 C^{2} - 15 C D - 6 D^{2}} \right )} + \frac{D^{2} T^{5}}
{25 T_{c}^{5}} - \frac{T^{4}}{8 T_{c}^{4}} \left(C D + 2 D^{2}
\right) + \frac{T^{3}}{9 T_{c}^{3}} \left(C^{2} + 6 C D + 6 D^{2}
\right) - \frac{T^{2}}{2 T_{c}^{2}} \left(A D + C^{2} + 3 C D
+ 2 D^{2}\right) + \frac{T}{T_{c}} \left(2 A C + 2 A D + C^{2}
+ 2 C D + D^{2}\right) + \frac{1}{30} \left(30 A^{2} - 60 A C
- 30 A D + 30 B - 10 C^{2} - 15 C D - 6 D^{2}\right) \ln{\left
(T + \frac{1}{60 A^{2} - 60 A C - 30 A D + 30 B - 10 C^{2} - 15 C D
- 6 D^{2}} \left(- 30 A^{2} T_{c} + 60 A C T_{c} + 30 A D T_{c}
- 30 B T_{c} + 10 C^{2} T_{c} + 15 C D T_{c} + 6 D^{2} T_{c}
+ T_{c} \left(30 A^{2} - 60 A C - 30 A D + 30 B - 10 C^{2} - 15 C D
- 6 D^{2}\right)\right) \right )}
Strictly speaking, the integral over T has an imaginary component, but
only the real component is relevant and the complex part discarded.
Examples
--------
Hydrogen liquid heat capacity; DIPPR coefficients normally in J/kmol/K.
>>> EQ114(20, 33.19, 66.653, 6765.9, -123.63, 478.27)
19423.948911676463
References
----------
.. [1] Design Institute for Physical Properties, 1996. DIPPR Project 801
DIPPR/AIChE
'''
if order == 0:
t = 1.-T/Tc
return (A**2./t + B - 2.*A*C*t - A*D*t**2. - C**2.*t**3./3.
- C*D*t**4./2. - D**2*t**5./5.)
elif order == 1:
return (A**2/(Tc*(-T/Tc + 1)**2) + 2*A*C/Tc + 2*A*D*(-T/Tc + 1)/Tc
+ C**2*(-T/Tc + 1)**2/Tc + 2*C*D*(-T/Tc + 1)**3/Tc
+ D**2*(-T/Tc + 1)**4/Tc)
elif order == -1:
return (-A**2*Tc*clog(T - Tc).real + D**2*T**6/(30*Tc**5)
- T**5*(C*D + 2*D**2)/(10*Tc**4)
+ T**4*(C**2 + 6*C*D + 6*D**2)/(12*Tc**3) - T**3*(A*D + C**2
+ 3*C*D + 2*D**2)/(3*Tc**2) + T**2*(2*A*C + 2*A*D + C**2 + 2*C*D
+ D**2)/(2*Tc) + T*(-2*A*C - A*D + B - C**2/3 - C*D/2 - D**2/5))
elif order == -1j:
return (-A**2*clog(T + (-60*A**2*Tc + 60*A*C*Tc + 30*A*D*Tc - 30*B*Tc
+ 10*C**2*Tc + 15*C*D*Tc + 6*D**2*Tc)/(60*A**2 - 60*A*C
- 30*A*D + 30*B - 10*C**2 - 15*C*D - 6*D**2)).real
+ D**2*T**5/(25*Tc**5) - T**4*(C*D + 2*D**2)/(8*Tc**4)
+ T**3*(C**2 + 6*C*D + 6*D**2)/(9*Tc**3) - T**2*(A*D + C**2
+ 3*C*D + 2*D**2)/(2*Tc**2) + T*(2*A*C + 2*A*D + C**2 + 2*C*D
+ D**2)/Tc + (30*A**2 - 60*A*C - 30*A*D + 30*B - 10*C**2
- 15*C*D - 6*D**2)*clog(T + (-30*A**2*Tc + 60*A*C*Tc
+ 30*A*D*Tc - 30*B*Tc + 10*C**2*Tc + 15*C*D*Tc + 6*D**2*Tc
+ Tc*(30*A**2 - 60*A*C - 30*A*D + 30*B - 10*C**2 - 15*C*D
- 6*D**2))/(60*A**2 - 60*A*C - 30*A*D + 30*B - 10*C**2
- 15*C*D - 6*D**2)).real/30)
else:
raise ValueError(order_not_found_msg)
def EQ115(T, A, B, C=0, D=0, E=0, order=0):
r'''DIPPR Equation #115. No major uses; has been used as an alternate
liquid viscosity expression, and as a model for vapor pressure.
Only parameters A and B are required.
.. math::
Y = \exp\left(A + \frac{B}{T} + C\ln T + D T^2 + \frac{E}{T^2}\right)
Parameters
----------
T : float
Temperature, [K]
A-E : float
Parameter for the equation; chemical and property specific [-]
order : int, optional
Order of the calculation. 0 for the calculation of the result itself;
for 1, 2, and 3, that derivative of the property is returned; No
other integrals or derivatives are implemented, and an exception will
be raised if any other order is given.
Returns
-------
Y : float
Property [constant-specific]
Notes
-----
No coefficients found for this expression.
This function is not integrable for either dT or Y/T dT.
.. math::
\frac{d Y}{dT} = \left(- \frac{B}{T^{2}} + \frac{C}{T} + 2 D T
- \frac{2 E}{T^{3}}\right) e^{A + \frac{B}{T} + C \log{\left(T \right)}
+ D T^{2} + \frac{E}{T^{2}}}
.. math::
\frac{d^2 Y}{dT^2} = \left(\frac{2 B}{T^{3}} - \frac{C}{T^{2}} + 2 D
+ \frac{6 E}{T^{4}} + \left(\frac{B}{T^{2}} - \frac{C}{T} - 2 D T
+ \frac{2 E}{T^{3}}\right)^{2}\right) e^{A + \frac{B}{T}
+ C \log{\left(T \right)} + D T^{2} + \frac{E}{T^{2}}}
.. math::
\frac{d^3 Y}{dT^3} =- \left(3 \left(\frac{2 B}{T^{3}} - \frac{C}{T^{2}}
+ 2 D + \frac{6 E}{T^{4}}\right) \left(\frac{B}{T^{2}} - \frac{C}{T}
- 2 D T + \frac{2 E}{T^{3}}\right) + \left(\frac{B}{T^{2}}
- \frac{C}{T} - 2 D T + \frac{2 E}{T^{3}}\right)^{3} + \frac{2 \left(
\frac{3 B}{T} - C + \frac{12 E}{T^{2}}\right)}{T^{3}}\right)
e^{A + \frac{B}{T} + C \log{\left(T \right)} + D T^{2} + \frac{E}{T^{2}}}
References
----------
.. [1] Design Institute for Physical Properties, 1996. DIPPR Project 801
DIPPR/AIChE
'''
if order == 0:
return trunc_exp(A+B/T+C*log(T)+D*T**2 + E/T**2)
elif order == 1:
x0 = T**2
x1 = 1/x0
x2 = 1/T
return (-(B*x1 - C*x2 - 2*D*T + 2*E/T**3)*exp(A + B*x2 + C*log(T) + D*x0 + E*x1))
elif order == 2:
x0 = 1/T
x1 = T**2
x2 = 1/x1
x3 = 2*D
x4 = 2/T**3
return (B*x4 - C*x2 + 6*E/T**4 + x3 + (B*x2 - C*x0 + E*x4 - T*x3)**2)*exp(A + B*x0 + C*log(T) + D*x1 + E*x2)
elif order == 3:
x0 = 1/T
x1 = B*x0
x2 = T**2
x3 = 1/x2
x4 = E*x3
x5 = 2/T**3
x6 = 2*D
x7 = B*x3 - C*x0 + E*x5 - T*x6
return (-(x5*(-C + 3*x1 + 12*x4) + x7**3 + 3*x7*(B*x5 - C*x3 + 6*E/T**4
+ x6))*exp(A + C*log(T) + D*x2 + x1 + x4))
else:
raise ValueError(order_not_found_msg)
def EQ116(T, Tc, A, B, C, D, E, order=0):
r'''DIPPR Equation #116. Used to describe the molar density of water fairly
precisely; no other uses listed. All 5 parameters are needed, as well as
the critical temperature.
.. math::
Y = A + B\tau^{0.35} + C\tau^{2/3} + D\tau + E\tau^{4/3}
.. math::
\tau = 1 - \frac{T}{T_c}
Parameters
----------
T : float
Temperature, [K]
Tc : float
Critical temperature, [K]
A-E : float
Parameter for the equation; chemical and property specific [-]
order : int, optional
Order of the calculation. 0 for the calculation of the result itself;
for 1, the first derivative of the property is returned, for
-1, the indefinite integral of the property with respect to temperature
is returned; and for -1j, the indefinite integral of the property
divided by temperature with respect to temperature is returned. No
other integrals or derivatives are implemented, and an exception will
be raised if any other order is given.
Returns
-------
Y : float
Property [constant-specific; if order == 1, property/K; if order == -1,
property*K; if order == -1j, unchanged from default]
Notes
-----
The derivative with respect to T and integral with respect to T are
computed as follows. The integral divided by T with respect to T has an
extremely complicated (but still elementary) integral which can be read
from the source. It was computed with Rubi; the other expressions can
readily be obtained with SymPy.
.. math::
\frac{d Y}{dT} = - \frac{7 B}{20 T_c \left(- \frac{T}{T_c} + 1\right)^{
\frac{13}{20}}} - \frac{2 C}{3 T_c \sqrt[3]{- \frac{T}{T_c} + 1}}
- \frac{D}{T_c} - \frac{4 E}{3 T_c} \sqrt[3]{- \frac{T}{T_c} + 1}
.. math::
\int Y dT = A T - \frac{20 B}{27} T_c \left(- \frac{T}{T_c} + 1\right)^{
\frac{27}{20}} - \frac{3 C}{5} T_c \left(- \frac{T}{T_c} + 1\right)^{
\frac{5}{3}} + D \left(- \frac{T^{2}}{2 T_c} + T\right) - \frac{3 E}{7}
T_c \left(- \frac{T}{T_c} + 1\right)^{\frac{7}{3}}
Examples
--------
Water liquid molar density; DIPPR coefficients normally in kmol/m^3.
>>> EQ116(300., 647.096, 17.863, 58.606, -95.396, 213.89, -141.26)
55.17615446406527
References
----------
.. [1] Design Institute for Physical Properties, 1996. DIPPR Project 801
DIPPR/AIChE
'''
if order == 0:
tau = 1-T/Tc
return A + B*tau**0.35 + C*tau**(2/3.) + D*tau + E*tau**(4/3.)
elif order == 1:
return (-7*B/(20*Tc*(-T/Tc + 1)**(13/20))
- 2*C/(3*Tc*(-T/Tc + 1)**(1/3))
- D/Tc - 4*E*(-T/Tc + 1)**(1/3)/(3*Tc))
elif order == -1:
return (A*T - 20*B*Tc*(-T/Tc + 1)**(27/20)/27
- 3*C*Tc*(-T/Tc + 1)**(5/3)/5 + D*(-T**2/(2*Tc) + T)
- 3*E*Tc*(-T/Tc + 1)**(7/3)/7)
elif order == -1j:
# 3x increase in speed - cse via sympy
x0 = log(T)
x1 = 0.5*x0
x2 = 1/Tc
x3 = T*x2
x4 = -x3 + 1
x5 = 1.5*C
x6 = x4**0.333333333333333
x7 = 2*B
x8 = x4**0.05
x9 = log(-x6 + 1)
x10 = sqrt(3)
x11 = x10*atan(x10*(2*x6 + 1)/3)
x12 = sqrt(5)
x13 = 0.5*x12
x14 = x13 + 0.5
x15 = B*x14
x16 = sqrt(x13 + 2.5)
x17 = 2*x8
x18 = -x17
x19 = -x13
x20 = x19 + 0.5
x21 = B*x20
x22 = sqrt(x19 + 2.5)
x23 = B*x16
x24 = 0.5*sqrt(0.1*x12 + 0.5)
x25 = x12 + 1
x26 = 4*x8
x27 = -x26
x28 = sqrt(10)*B/sqrt(x12 + 5)
x29 = 2*x12
x30 = sqrt(x29 + 10)
x31 = 1/x30
x32 = -x12 + 1
x33 = 0.5*B*x22
x34 = -x2*(T - Tc)
x35 = 2*x34**0.1
x36 = x35 + 2
x37 = x34**0.05
x38 = x30*x37
x39 = 0.5*B*x16
x40 = x37*sqrt(-x29 + 10)
x41 = 0.25*x12
x42 = B*(-x41 + 0.25)
x43 = x12*x37
x44 = x35 + x37 + 2
x45 = B*(x41 + 0.25)
x46 = -x43
x47 = x35 - x37 + 2
return A*x0 + 2.85714285714286*B*x4**0.35 - C*x1 + C*x11 + D*x0 - D*x3 - E*x1 - E*x11 + 0.75*E*x4**1.33333333333333 + 3*E*x6 + 1.5*E*x9 - x15*atan(x14*(x16 + x17)) + x15*atan(x14*(x16 + x18)) - x21*atan(x20*(x17 + x22)) + x21*atan(x20*(x18 + x22)) + x23*atan(x24*(x25 + x26)) - x23*atan(x24*(x25 + x27)) - x28*atan(x31*(x26 + x32)) + x28*atan(x31*(x27 + x32)) - x33*log(x36 - x38) + x33*log(x36 + x38) + x39*log(x36 - x40) - x39*log(x36 + x40) + x4**0.666666666666667*x5 - x42*log(x43 + x44) + x42*log(x46 + x47) + x45*log(x43 + x47) - x45*log(x44 + x46) + x5*x9 + x7*atan(x8) - x7*atanh(x8)
else:
raise ValueError(order_not_found_msg)
def EQ127(T, A, B, C, D, E, F, G, order=0):
r'''DIPPR Equation #127. Rarely used, and then only in calculating
ideal-gas heat capacity. All 7 parameters are required.
.. math::
Y = A+B\left[\frac{\left(\frac{C}{T}\right)^2\exp\left(\frac{C}{T}
\right)}{\left(\exp\frac{C}{T}-1 \right)^2}\right]
+D\left[\frac{\left(\frac{E}{T}\right)^2\exp\left(\frac{E}{T}\right)}
{\left(\exp\frac{E}{T}-1 \right)^2}\right]
+F\left[\frac{\left(\frac{G}{T}\right)^2\exp\left(\frac{G}{T}\right)}
{\left(\exp\frac{G}{T}-1 \right)^2}\right]
Parameters
----------
T : float
Temperature, [K]
A-G : float
Parameter for the equation; chemical and property specific [-]
order : int, optional
Order of the calculation. 0 for the calculation of the result itself;
for 1, the first derivative of the property is returned, for
-1, the indefinite integral of the property with respect to temperature
is returned; and for -1j, the indefinite integral of the property
divided by temperature with respect to temperature is returned. No
other integrals or derivatives are implemented, and an exception will
be raised if any other order is given.
Returns
-------
Y : float
Property [constant-specific; if order == 1, property/K; if order == -1,
property*K; if order == -1j, unchanged from default]
Notes
-----
The derivative with respect to T, integral with respect to T, and integral
over T with respect to T are computed as follows. All expressions can be
obtained with SymPy readily.
.. math::
\frac{d Y}{dT} = - \frac{B C^{3} e^{\frac{C}{T}}}{T^{4}
\left(e^{\frac{C}{T}} - 1\right)^{2}} + \frac{2 B C^{3}
e^{\frac{2 C}{T}}}{T^{4} \left(e^{\frac{C}{T}} - 1\right)^{3}}
- \frac{2 B C^{2} e^{\frac{C}{T}}}{T^{3} \left(e^{\frac{C}{T}}
- 1\right)^{2}} - \frac{D E^{3} e^{\frac{E}{T}}}{T^{4}
\left(e^{\frac{E}{T}} - 1\right)^{2}} + \frac{2 D E^{3}
e^{\frac{2 E}{T}}}{T^{4} \left(e^{\frac{E}{T}} - 1\right)^{3}}
- \frac{2 D E^{2} e^{\frac{E}{T}}}{T^{3} \left(e^{\frac{E}{T}}
- 1\right)^{2}} - \frac{F G^{3} e^{\frac{G}{T}}}{T^{4}
\left(e^{\frac{G}{T}} - 1\right)^{2}} + \frac{2 F G^{3}
e^{\frac{2 G}{T}}}{T^{4} \left(e^{\frac{G}{T}} - 1\right)^{3}}
- \frac{2 F G^{2} e^{\frac{G}{T}}}{T^{3} \left(e^{\frac{G}{T}}
- 1\right)^{2}}
.. math::
\int Y dT = A T + \frac{B C^{2}}{C e^{\frac{C}{T}} - C}
+ \frac{D E^{2}}{E e^{\frac{E}{T}} - E}
+ \frac{F G^{2}}{G e^{\frac{G}{T}} - G}
.. math::
\int \frac{Y}{T} dT = A \ln{\left (T \right )} + B C^{2} \left(
\frac{1}{C T e^{\frac{C}{T}} - C T} + \frac{1}{C T} - \frac{1}{C^{2}}
\ln{\left (e^{\frac{C}{T}} - 1 \right )}\right) + D E^{2} \left(
\frac{1}{E T e^{\frac{E}{T}} - E T} + \frac{1}{E T} - \frac{1}{E^{2}}
\ln{\left (e^{\frac{E}{T}} - 1 \right )}\right) + F G^{2} \left(
\frac{1}{G T e^{\frac{G}{T}} - G T} + \frac{1}{G T} - \frac{1}{G^{2}}
\ln{\left (e^{\frac{G}{T}} - 1 \right )}\right)
Examples
--------
Ideal gas heat capacity of methanol; DIPPR coefficients normally in
J/kmol/K
>>> EQ127(20., 3.3258E4, 3.6199E4, 1.2057E3, 1.5373E7, 3.2122E3, -1.5318E7, 3.2122E3)
33258.0
References
----------
.. [1] Design Institute for Physical Properties, 1996. DIPPR Project 801
DIPPR/AIChE
'''
if order == 0:
return (A+B*((C/T)**2*exp(C/T)/(exp(C/T) - 1)**2) +
D*((E/T)**2*exp(E/T)/(exp(E/T)-1)**2) +
F*((G/T)**2*exp(G/T)/(exp(G/T)-1)**2))
elif order == 1:
return (-B*C**3*exp(C/T)/(T**4*(exp(C/T) - 1)**2)
+ 2*B*C**3*exp(2*C/T)/(T**4*(exp(C/T) - 1)**3)
- 2*B*C**2*exp(C/T)/(T**3*(exp(C/T) - 1)**2)
- D*E**3*exp(E/T)/(T**4*(exp(E/T) - 1)**2)
+ 2*D*E**3*exp(2*E/T)/(T**4*(exp(E/T) - 1)**3)
- 2*D*E**2*exp(E/T)/(T**3*(exp(E/T) - 1)**2)
- F*G**3*exp(G/T)/(T**4*(exp(G/T) - 1)**2)
+ 2*F*G**3*exp(2*G/T)/(T**4*(exp(G/T) - 1)**3)
- 2*F*G**2*exp(G/T)/(T**3*(exp(G/T) - 1)**2))
elif order == -1:
return (A*T + B*C**2/(C*exp(C/T) - C) + D*E**2/(E*exp(E/T) - E)
+ F*G**2/(G*exp(G/T) - G))
elif order == -1j:
return (A*log(T) + B*C**2*(1/(C*T*exp(C/T) - C*T) + 1/(C*T)
- log(exp(C/T) - 1)/C**2) + D*E**2*(1/(E*T*exp(E/T) - E*T)
+ 1/(E*T) - log(exp(E/T) - 1)/E**2)
+ F*G**2*(1/(G*T*exp(G/T) - G*T) + 1/(G*T) - log(exp(G/T)
- 1)/G**2))
else:
raise ValueError(order_not_found_msg)
dippr_eq_supported_orders = {
EQ100: (0, 1, -1, -1j),
EQ101: (0, 1, 2, 3),
EQ102: (0, 1, -1, -1j),
EQ104: (0, 1, -1, -1j),
EQ105: (0, 1, 2, 3),
EQ106: (0, 1, 2, 3),
EQ107: (0, 1, -1, -1j),
EQ114: (0, 1, -1, -1j),
EQ115: (0, 1, 2, 3),
EQ116: (0, 1, -1, -1j),
EQ127: (0, 1, -1, -1j),
}
|
<gh_stars>1-10
# GA0 DEAP_GA
import json
import math
import numpy as np
import os
import random
import sys
import threading
import time
from deap import base
from deap import creator
from deap import tools
from deap import algorithms
import eqpy
# Global variable names we are going to set from the JSON settings file
global_settings = ["num_iter", "num_pop", "sigma", "mate_pb", "mutate_pb"]
def i2s(i):
""" Convert individual to string """
return "[%0.3f,%0.3f]" % (i[0], i[1])
def obj_func(x):
""" Dummy function for compatibility with DEAP """
assert(False)
def make_random_params():
def random_param():
return random.random() * 4 - 2
x1 = random_param()
x2 = random_param()
return [x1, x2]
def create_list_of_lists_string(list_of_lists, super_delim=";", sub_delim=","):
# super list elements separated by ;
L = []
for x in list_of_lists:
L.append(sub_delim.join(str(n) for n in x))
result = super_delim.join(L)
return result
def queue_map(obj_func, pops):
""" Note that the obj_func is a dummy
pops: data that looks like: [[x1,x2],[x1,x2],...]
"""
if not pops:
return []
eqpy.OUT_put(create_list_of_lists_string(pops))
result = eqpy.IN_get()
split_result = result.split(';')
return [(float(x),) for x in split_result]
def mutate_Gaussian_float(x):
global sigma
x += random.gauss(0, sigma)
return x
# Returns a tuple of one individual
def custom_mutate(individual, indpb):
old_individual = i2s(individual)
for i,m in enumerate(individual):
individual[i] = mutate_Gaussian_float(individual[i])
print("mutate: %s to: %s" % (old_individual, i2s(individual)))
return individual,
def read_in_params_csv(csv_file_name):
return pd.read_csv(csv_file_name)
def cxUniform(ind1, ind2, indpb):
c1, c2 = tools.cxUniform(ind1, ind2, indpb)
return (c1, c2)
def run():
"""
:param num_iter: number of generations
:param num_pop: size of population
:param seed: random seed
:param csv_file_name: csv file name (e.g., "params_for_deap.csv")
"""
eqpy.OUT_put("Settings")
settings_filename = eqpy.IN_get()
load_settings(settings_filename)
# parse settings # num_iter, num_pop, seed,
creator.create("FitnessMin", base.Fitness, weights=(-1.0,))
creator.create("Individual", list, fitness=creator.FitnessMin)
toolbox = base.Toolbox()
toolbox.register("attr_float", random.random)
# toolbox.register("individual", tools.initRepeat, creator.Individual,
# toolbox.attr_float, n=2)
toolbox.register("individual", tools.initIterate, creator.Individual,
make_random_params)
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
toolbox.register("evaluate", obj_func)
toolbox.register("mate", cxUniform, indpb=mate_pb)
toolbox.register("mutate", custom_mutate, indpb=mutate_pb)
toolbox.register("select", tools.selTournament, tournsize=num_pop/2)
toolbox.register("map", queue_map)
pop = toolbox.population(n=num_pop)
hof = tools.HallOfFame(2)
stats = tools.Statistics(lambda ind: ind.fitness.values)
stats.register("avg", np.mean)
stats.register("std", np.std)
stats.register("min", np.min)
stats.register("max", np.max)
# num_iter-1 generations since the initial population is evaluated once first
pop, log = algorithms.eaSimple(pop, toolbox, cxpb=0.5, mutpb=mutate_pb,
ngen=num_iter - 1,
stats=stats, halloffame=hof, verbose=True)
fitnesses = [str(p.fitness.values[0]) for p in pop]
eqpy.OUT_put("FINAL")
# return the final population
eqpy.OUT_put("{0}\n{1}\n{2}".format(create_list_of_lists_string(pop), ';'.join(fitnesses), log))
def load_settings(settings_filename):
print("Reading settings: '%s'" % settings_filename)
try:
with open(settings_filename) as fp:
settings = json.load(fp)
except IOError as e:
print("Could not open: '%s'" % settings_filename)
print("PWD is: '%s'" % os.getcwd())
sys.exit(1)
try:
for s in global_settings:
globals()[s] = settings[s]
random.seed(settings["seed"])
except KeyError as e:
print("Settings file (%s) does not contain key: %s" % (settings_filename, str(e)))
sys.exit(1)
print "num_iter: ", num_iter
print("Settings loaded.")
|
import os
from models.cdmf import CDMF
from models.cdmf2 import CDMF2
from models.convmf import ConvMF
from data_load import DataLoad
import hyperparams as hp
from tqdm import tqdm
import numpy as np
import tensorflow as tf
def load_model(data, model_name):
kwargs = {
'num_all_users': data.num_all_users,
'num_all_movies': data.num_all_movies,
'num_all_info': data.num_all_info,
'num_all_main_actors': data.num_all_main_actors,
'vocab_size': data.vocab_size,
'embedding_size': hp.EMBEDDING_SIZE,
'feature_size': hp.FEATURE_SIZE,
'forced_seq_len': hp.FORCED_SEQ_LEN,
'num_most_info': data.num_most_info,
'num_main_actors': data.num_main_actors,
'dim_hidden1': hp.DIM_HIDDEN1,
'dim_hidden2': hp.DIM_HIDDEN2,
'dim_lantent': hp.DIM_LANTENT,
'filters_size_list': hp.FILTERS_SIZE_LIST,
'num_filters': hp.NUM_FILTERS,
'l2_reg_lambda_u': hp.L2_REG_LAMBDA_U,
'l2_reg_lambda_m': hp.L2_REG_LAMBDA_M,
'l2_reg_lambda_cnn': hp.L2_REG_LAMBDA_CNN,
'l2_reg_lambda_info1': hp.L2_REG_LAMBDA_INFO1,
'l2_reg_lambda_info2': hp.L2_REG_LAMBDA_INFO2,
'l2_reg_lambda_actors1': hp.L2_REG_LAMBDA_ACTORS1,
'l2_reg_lambda_actors2': hp.L2_REG_LAMBDA_ACTORS2}
if model_name.lower() == 'cdmf':
model = CDMF(**kwargs)
elif model_name.lower() == 'convmf':
model = ConvMF(num_all_users=data.num_all_users,
num_all_movies=data.num_all_movies,
vocab_size=8000,
embedding_size=200,
forced_seq_len=hp.FORCED_SEQ_LEN,
dim_lantent=50,
filters_size_list=[3, 4, 5],
num_filters=100,
l2_reg_lambda_cnn=0.02,
l2_reg_lambda_u=0.02,
l2_reg_lambda_m=0.02)
return model, '{}_{}_{}_{}'.format(model_name, hp.DATA, model.l2_reg_lambda_u, model.l2_reg_lambda_m)
if __name__ == '__main__':
with tf.Graph().as_default():
session_conf = tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=False)
sess = tf.Session(config=session_conf)
with sess.as_default():
data = DataLoad(data_path=hp.DATA_PATH,
fnames=hp.FNAMES,
forced_seq_len=hp.FORCED_SEQ_LEN,
vocab_size=hp.VOCAB_SIZE,
paly_times=hp.PLAY_TIMES,
num_main_actors=hp.NUM_MAIN_ACTORS,
batch_size=hp.BATCH_SIZE,
num_epochs=hp.NUM_EPOCHS)
model, model_name = load_model(data, 'cdmf')
# define graph
model.construct_netword()
global_step = tf.Variable(0, name='global_step', trainable=False)
optimizer = tf.train.AdamOptimizer(learning_rate=hp.LEARNING_RATE)
train_op = optimizer.minimize(model.loss_op, global_step=global_step)
# tensorboard graph visualizing
graph_writer = tf.summary.FileWriter(logdir='../graph/{}/'.format(model_name))
graph_writer.add_graph(sess.graph)
# # Summaries for loss and rmse
# loss_summary = tf.summary.scalar("loss", model.loss_op)
# rmse_summary = tf.summary.scalar("rmse", model.rmse_op)
# Train Summaries
# train_summary_op = tf.summary.merge([loss_summary, rmse_summary])
# train_summary_dir = os.path.join("../summaries/{}".format(model_name), "train")
# train_summary_writer = tf.summary.FileWriter(
# train_summary_dir, sess.graph)
# saving dev evaluating reshults
if not os.path.exists('../results/{}/'.format(model_name)):
os.makedirs('../results/{}/'.format(model_name))
with open('../results/{}/res.csv'.format(model_name), 'a', encoding='utf-8') as resfile:
resfile.write('step,loss,rmse\n')
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
bad_count = 0
init_rmse = 0.86
last_best_rmse = init_rmse
last_best_loss = 0
for epoch in range(hp.NUM_EPOCHS):
# load trainset
train_batches = data.load_data('train')
# training
for (batch_X_user, batch_X_movie), batch_Y in tqdm(train_batches):
# unpack
batch_u_oids, batch_bu_seq = batch_X_user
batch_m_oids, batch_info, batch_actors, batch_des, batch_bm_seq = batch_X_movie
batch_r_seq = batch_Y
feed_dict = {
model.m_oids:batch_m_oids,
model.info: batch_info,
model.actors: batch_actors,
model.descriptions: batch_des,
model.u_oids: batch_u_oids,
model.r_seq: batch_r_seq,
model.dropout_keep_prob: hp.DROPOUT_KEEP_PROB}
_, step = sess.run(
[train_op, global_step],
feed_dict=feed_dict)
if step % hp.EVAL_EVERY == 0:
# load devset
dev_iter = data.load_data('dev')
# eval in devset
dev_loss, dev_mse, count = 0.0, 0.0, 0
for (sub_X_user, sub_X_movie), sub_Y in tqdm(dev_iter):
# unpack
sub_u_oids, sub_bu_seq = sub_X_user
sub_m_oids, sub_info, sub_actors, sub_des, sub_bm_seq = sub_X_movie
sub_r_seq = sub_Y
dev_feed_dict = {
model.m_oids:sub_m_oids,
model.info: sub_info,
model.actors: sub_actors,
model.descriptions: sub_des,
model.u_oids: sub_u_oids,
model.r_seq: sub_r_seq,
model.dropout_keep_prob: hp.DROPOUT_KEEP_PROB}
sub_loss, sub_mse = sess.run(
[model.loss_op, model.mse_op],
feed_dict=dev_feed_dict)
dev_loss += sub_loss
dev_mse += sub_mse
count += 1
dev_loss = dev_loss / count
dev_rmse = np.sqrt(dev_mse / count)
print('step:{} | loss:{} | rmse:{}'.format(step, dev_loss, dev_rmse))
# saving loss and rmse in devset
with open('../results/{}/res.csv'.format(
model_name), 'a', encoding='utf-8') as resfile:
resfile.write('{},{},{}\n'.format(step, dev_loss, dev_rmse))
# saving good model variables
if dev_rmse < last_best_rmse:
last_best_rmse = dev_rmse
last_best_loss = dev_loss
if not os.path.exists('../model_ckpt/{}/'.format(model_name)):
os.makedirs('../model_ckpt/{}/'.format(model_name))
saver.save(
sess, '../model_ckpt/{}/{}.ckpt'.format(model_name, dev_rmse))
elif last_best_rmse < init_rmse and last_best_loss < dev_loss:
# descreaing learning rate
bad_count += 1
with tf.variable_scope('train_{}'.format(bad_count)):
new_optimizer = tf.train.AdamOptimizer(
learning_rate=hp.LEARNING_RATE / 2**bad_count)
train_op = new_optimizer.minimize(model.loss_op, global_step=global_step)
sess.run(tf.variables_initializer(new_optimizer.variables()))
|
<reponame>utanashati/curiosity-recast<gh_stars>0
import torch
import torch.nn.functional as F
import torch.optim as optim
from model import IntrinsicCuriosityModule2
from itertools import chain # ICM
import os
import signal
class Killer:
kill_now = False
def __init__(self):
signal.signal(signal.SIGTERM, self.exit)
def exit(self, signum, frame):
self.kill_now = True
def ensure_shared_grads(model, shared_model):
for param, shared_param in zip(model.parameters(),
shared_model.parameters()):
if shared_param.grad is not None:
return
shared_param._grad = param.grad
def train_uniform(
rank, args, shared_curiosity, counter,
lock, pids, optimizer, train_inv_losses,
train_forw_losses, env
):
pids.append(os.getpid())
torch.manual_seed(args.seed + rank)
# env = create_picolmaze_env(args.num_rooms, args.colors, args.periodic)
env = env.copy()
env.seed(args.seed + rank)
curiosity = IntrinsicCuriosityModule2( # ICM
args.num_stack, env.action_space, args.epsilon)
if optimizer is None:
optimizer = optim.Adam( # ICM
chain(shared_curiosity.parameters()),
lr=args.lr)
curiosity.train() # ICM
state = env.reset()
state = torch.from_numpy(state)
done = True
episode_length = 0
killer = Killer()
while not killer.kill_now:
# Sync with the shared model
curiosity.load_state_dict(shared_curiosity.state_dict()) # ICM
if done:
cx = torch.zeros(1, 256)
hx = torch.zeros(1, 256)
else:
cx = cx.detach()
hx = hx.detach()
inv_loss = torch.tensor(0.0) # ICM
forw_loss = torch.tensor(0.0) # ICM
curiosity_reward = torch.tensor(0.0) # ICM
for step in range(args.num_steps):
if done:
episode_length = 0
if args.hard_reset:
print("hard_reset")
state = env.hard_reset()
else:
print("reset")
state = env.reset()
state = torch.from_numpy(state)
episode_length += 1
logit = torch.ones(1, env.action_space.n)
prob = F.softmax(logit, dim=-1)
action = prob.multinomial(num_samples=1).flatten().detach()
state_old = state # ICM
state, _, done, _ = env.step(action)
state = torch.from_numpy(state)
# <---ICM---
inv_out, phi2, forw_out_mean, forw_out_std, l2_loss, \
bayesian_loss, current_curiosity_reward = \
curiosity(
state_old.unsqueeze(0), action,
state.unsqueeze(0))
# In noreward-rl:
# self.invloss = tf.reduce_mean(
# tf.nn.sparse_softmax_cross_entropy_with_logits(logits, aindex),
# name="invloss")
# self.forwardloss = 0.5 * tf.reduce_mean(tf.square(tf.subtract(f, phi2)), name='forwardloss')
# self.forwardloss = self.forwardloss * 288.0 # lenFeatures=288. Factored out to make hyperparams not depend on it.
current_inv_loss = F.nll_loss(F.log_softmax(inv_out, dim=-1), action)
if args.new_curiosity:
current_forw_loss = bayesian_loss
if args.add_l2:
current_forw_loss += l2_loss
else:
current_forw_loss = l2_loss
current_curiosity_reward = l2_loss
inv_loss += current_inv_loss
forw_loss += current_forw_loss
curiosity_reward += current_curiosity_reward
# ---ICM--->
done = done or episode_length >= args.max_episode_length
with lock:
counter.value += 1
if done:
break
# <---ICM---
inv_loss = inv_loss / episode_length
forw_loss = forw_loss / episode_length
curiosity_reward = curiosity_reward / episode_length
curiosity_loss = (1 - args.beta) * inv_loss + args.beta * forw_loss
# ---ICM--->
optimizer.zero_grad()
train_inv_losses[rank - 1] = float((inv_loss).detach().item())
train_forw_losses[rank - 1] = float((forw_loss).detach().item())
if curiosity_reward != 0:
curiosity_loss.backward() # ICM
torch.nn.utils.clip_grad_norm_(curiosity.parameters(), args.max_grad_norm)
ensure_shared_grads(curiosity, shared_curiosity)
optimizer.step()
env.close()
|
<filename>foreman/data_refinery_foreman/foreman/management/commands/test_update_experiment_metadata.py
from unittest.mock import patch
from django.test import TransactionTestCase
from data_refinery_common.models import Experiment, ExperimentSampleAssociation, Sample
from data_refinery_foreman.foreman.management.commands.update_experiment_metadata import Command
class SurveyTestCase(TransactionTestCase):
def tearDown(self):
Experiment.objects.all().delete()
def test_sra_experiment_missing_metadata(self):
"""Tests that an SRA experiment has its missing metadata added."""
# 1. Create an experiment with a bad title
BAD_TITLE = "GEO accession GSE1337 is currently private\
and is scheduled to be released on Jan 01, 1970."
experiment = Experiment()
experiment.accession_code = "DRP003977"
experiment.source_database = "SRA"
experiment.title = BAD_TITLE
experiment.save()
# 2. We need to add a sample because the way that the SRA surveyor finds metadata is
# through run accessions
sample = Sample()
sample.accession_code = "DRR002116"
sample.technology = "RNA-SEQ"
sample.source_database = "SRA"
sample.title = "Not important"
sample.save()
ExperimentSampleAssociation.objects.get_or_create(experiment=experiment, sample=sample)
# 3. Setup is done, actually run the command.
command = Command()
command.handle()
# Test that the title was fixed
self.assertNotEqual(
Experiment.objects.get_or_create(accession_code=experiment.accession_code)[0].title,
BAD_TITLE,
)
# Run the command again to make sure that it does not fail if there are no changes
command = Command()
command.handle()
def test_sra_experiment_missing_alternate_accession(self):
"""Tests that an SRA experiment has its missing alternate_accession_code added."""
# 1. Create an experiment without an alternate_accession_code
experiment = Experiment()
experiment.accession_code = "SRP094947"
experiment.source_database = "SRA"
experiment.title = "Not important"
experiment.save()
# 2. We need to add a sample because the way that the SRA surveyor finds metadata is
# through run accessions
sample = Sample()
sample.accession_code = "SRR5099111"
sample.technology = "RNA-SEQ"
sample.source_database = "SRA"
sample.title = "Not important"
sample.save()
ExperimentSampleAssociation.objects.get_or_create(experiment=experiment, sample=sample)
# 3. Setup is done, actually run the command.
command = Command()
command.handle()
# 4. Refresh the experiment
experiment.refresh_from_db()
# Test that the correct alternate_accession_code was added
self.assertEquals(experiment.alternate_accession_code, "GSE92260")
def test_geo_experiment_missing_metadata(self):
"""Tests that a GEO experiment has its missing metadata added."""
# 1. Create an experiment with a bad title
BAD_TITLE = "GEO accession GSE1337 is currently private\
and is scheduled to be released on Jan 01, 1970."
experiment = Experiment()
experiment.accession_code = "GSE11915"
experiment.source_database = "GEO"
experiment.title = BAD_TITLE
experiment.save()
# 2. Setup is done, actually run the command.
command = Command()
command.handle()
# Test that the title was fixed
self.assertNotEqual(
Experiment.objects.get_or_create(accession_code=experiment.accession_code)[0].title,
BAD_TITLE,
)
# Run the command again to make sure that it does not fail if there are no changes
command = Command()
command.handle()
def test_array_express_experiment_missing_metadata(self):
"""Tests that an ArrayExpress experiment has its missing metadata added."""
# 1. Create an experiment with a bad title
BAD_TITLE = "GEO accession GSE1337 is currently private\
and is scheduled to be released on Jan 01, 1970."
experiment = Experiment()
experiment.accession_code = "E-MTAB-3050"
experiment.source_database = "ARRAY_EXPRESS"
experiment.title = BAD_TITLE
experiment.save()
# 2. Setup is done, actually run the command.
command = Command()
command.handle()
# Test that the title was fixed
self.assertNotEqual(
Experiment.objects.get_or_create(accession_code=experiment.accession_code)[0].title,
BAD_TITLE,
)
# Run the command again to make sure that it does not fail if there are no changes
command = Command()
command.handle()
|
<filename>wpcv/scripts/pil_image_trans_ops.py
import os, sys, shutil, math, random, json, multiprocessing, threading
import cv2
import numpy as np
from PIL import Image, ImageEnhance, ImageFilter, ImageDraw
import abc
def pilimg(img):
if isinstance(img,Image.Image):return img
if isinstance(img,np.ndarray):
if len(img.shape)==3:img=img[:,:,::-1]
return Image.fromarray(np.array(img).astype(np.uint8))
def cv2img(img):
if isinstance(img,Image.Image):
img=np.array(img)
if len(img.shape)==3:img=img[:,:,::-1]
return img
return img
class TransBase(object):
def __init__(self, probability = 1.):
super(TransBase, self).__init__()
self.probability = probability
@abc.abstractmethod
def tranfun(self, inputimage):
pass
def process(self,inputimage):
if np.random.random() < self.probability:
return self.tranfun(inputimage)
else:
return inputimage
class RandomContrast(TransBase):
def setparam(self, lower=0.5, upper=1.5):
self.lower = lower
self.upper = upper
assert self.upper >= self.lower, "upper must be >= lower."
assert self.lower >= 0, "lower must be non-negative."
def tranfun(self, image):
image = pilimg(image)
enh_con = ImageEnhance.Brightness(image)
return enh_con.enhance(random.uniform(self.lower, self.upper))
class RandomBrightness(TransBase):
def setparam(self, lower=0.5, upper=1.5):
self.lower = lower
self.upper = upper
assert self.upper >= self.lower, "upper must be >= lower."
assert self.lower >= 0, "lower must be non-negative."
def tranfun(self, image):
image = pilimg(image)
bri = ImageEnhance.Brightness(image)
return bri.enhance(random.uniform(self.lower, self.upper))
class RandomColor(TransBase):
def setparam(self, lower=0.5, upper=1.5):
self.lower = lower
self.upper = upper
assert self.upper >= self.lower, "upper must be >= lower."
assert self.lower >= 0, "lower must be non-negative."
def tranfun(self, image):
image = pilimg(image)
col = ImageEnhance.Color(image)
return col.enhance(random.uniform(self.lower, self.upper))
class RandomSharpness(TransBase):
def setparam(self, lower=0.5, upper=1.5):
self.lower = lower
self.upper = upper
assert self.upper >= self.lower, "upper must be >= lower."
assert self.lower >= 0, "lower must be non-negative."
def tranfun(self, image):
image = pilimg(image)
sha = ImageEnhance.Sharpness(image)
return sha.enhance(random.uniform(self.lower, self.upper))
class Compress(TransBase):
def setparam(self, lower=5, upper=85):
self.lower = lower
self.upper = upper
assert self.upper >= self.lower, "upper must be >= lower."
assert self.lower >= 0, "lower must be non-negative."
def tranfun(self, image):
img = cv2img(image)
param = [int(cv2.IMWRITE_JPEG_QUALITY), random.randint(self.lower, self.upper)]
img_encode = cv2.imencode('.jpeg', img, param)
img_decode = cv2.imdecode(img_encode[1], cv2.IMREAD_COLOR)
pil_img = pilimg(img_decode)
if len(image.split())==1:
pil_img = pil_img.convert('L')
return pil_img
class Exposure(TransBase):
def setparam(self, lower=5, upper=10):
self.lower = lower
self.upper = upper
assert self.upper >= self.lower, "upper must be >= lower."
assert self.lower >= 0, "lower must be non-negative."
def tranfun(self, image):
image = cv2img(image)
h,w = image.shape[:2]
x0 = random.randint(0, w)
y0 = random.randint(0, h)
x1 = random.randint(x0, w)
y1 = random.randint(y0, h)
transparent_area = (x0, y0, x1, y1)
mask=Image.new('L', (w, h), color=255)
draw=ImageDraw.Draw(mask)
mask = np.array(mask)
if len(image.shape)==3:
mask = mask[:,:,np.newaxis]
mask = np.concatenate([mask,mask,mask],axis=2)
draw.rectangle(transparent_area, fill=random.randint(150,255))
reflection_result = image + (255 - mask)
reflection_result = np.clip(reflection_result, 0, 255)
return pilimg(reflection_result)
class Rotate(TransBase):
def setparam(self, lower=-5, upper=5):
self.lower = lower
self.upper = upper
assert self.upper >= self.lower, "upper must be >= lower."
# assert self.lower >= 0, "lower must be non-negative."
def tranfun(self, image):
image = pilimg(image)
rot = random.uniform(self.lower, self.upper)
trans_img = image.rotate(rot, expand=True)
return trans_img
class Blur(TransBase):
def setparam(self, lower=0, upper=1):
self.lower = lower
self.upper = upper
assert self.upper >= self.lower, "upper must be >= lower."
assert self.lower >= 0, "lower must be non-negative."
def tranfun(self, image):
image = pilimg(image)
r=random.random()*(self.upper-self.lower)+self.lower
image = image.filter(ImageFilter.GaussianBlur(radius=r))
return image
class Salt(TransBase):
def setparam(self, rate=0.02):
self.rate = rate
def tranfun(self, image):
image = pilimg(image)
num_noise = int(image.size[1] * image.size[0] * self.rate)
# assert len(image.split()) == 1
for k in range(num_noise):
i = int(np.random.random() * image.size[1])
j = int(np.random.random() * image.size[0])
image.putpixel((j, i), int(np.random.random() * 255))
return image
class AdjustResolution(TransBase):
def setparam(self, max_rate=0.95,min_rate = 0.5):
self.max_rate = max_rate
self.min_rate = min_rate
def tranfun(self, image):
image = pilimg(image)
w, h = image.size
rate = np.random.random()*(self.max_rate-self.min_rate)+self.min_rate
w2 = int(w*rate)
h2 = int(h*rate)
image = image.resize((w2, h2))
image = image.resize((w, h))
return image
class Crop(TransBase):
def setparam(self, maxv=2):
self.maxv = maxv
def tranfun(self, image):
img = (image)
h,w = img.shape[:2]
org = np.array([[0,np.random.randint(0,self.maxv)],
[w,np.random.randint(0,self.maxv)],
[0,h-np.random.randint(0,self.maxv)],
[w,h-np.random.randint(0,self.maxv)]],np.float32)
dst = np.array([[0, 0], [w, 0], [0, h], [w, h]], np.float32)
M = cv2.getPerspectiveTransform(org,dst)
res = cv2.warpPerspective(img,M,(w,h))
return pilimg(res)
class Crop2(TransBase):
def setparam(self, maxv_h=4, maxv_w=4):
self.maxv_h = maxv_h
self.maxv_w = maxv_w
def tranfun(self, image_and_loc):
image, left, top, right, bottom = image_and_loc
w, h = image.size
left = np.clip(left,0,w-1)
right = np.clip(right,0,w-1)
top = np.clip(top, 0, h-1)
bottom = np.clip(bottom, 0, h-1)
img = cv2img(image)
try:
res = pilimg(img[top:bottom,left:right])
return res
except AttributeError as e:
print( left, top, right, bottom)
h = bottom - top
w = right - left
org = np.array([[left - np.random.randint(0, self.maxv_w), top + np.random.randint(-self.maxv_h, self.maxv_h//2)],
[right + np.random.randint(0, self.maxv_w), top + np.random.randint(-self.maxv_h, self.maxv_h//2)],
[left - np.random.randint(0, self.maxv_w), bottom - np.random.randint(-self.maxv_h, self.maxv_h//2)],
[right + np.random.randint(0, self.maxv_w), bottom - np.random.randint(-self.maxv_h, self.maxv_h//2)]], np.float32)
dst = np.array([[0, 0], [w, 0], [0, h], [w, h]], np.float32)
M = cv2.getPerspectiveTransform(org,dst)
res = cv2.warpPerspective(img,M,(w,h))
return pilimg(res)
class Stretch(TransBase):
def setparam(self, max_rate = 1.2,min_rate = 0.8):
self.max_rate = max_rate
self.min_rate = min_rate
def tranfun(self, image):
image = pilimg(image)
w, h = image.size
rate = np.random.random()*(self.max_rate-self.min_rate)+self.min_rate
w2 = int(w*rate)
image = image.resize((w2, h))
return image
|
# https://python-course.eu/advanced-python/generators-iterators.php
"""
Generators are a special kind of function, which enable us to implement or generate iterators.
Mostly, iterators are implicitly used, like in the for-loop of Python.
"""
# An list is not an iterator, but can be used as an iterable
import random
from itertools import permutations
from random import choice
from typing import Generator
cities = ['Sorocaba', 'São Paulo', 'Rio de Janeiro', 'Belo Horizonte',
'Salvador', 'Brasília', 'Minas Gerais', 'Fortaleza']
for location in cities:
print(f'Location: {location}')
# Call next and iter
expertises = ["Python Begginer", "Python Intermediate", "Python Proficient",
"Python Advanced", "Python Expert"]
expertises_iterator = iter(expertises)
print(f'Call "next" for the first time: {next(expertises_iterator)}')
print(f'Call "next" for the second time: {next(expertises_iterator)}')
# Simulate a loop with StopIteration
other_cities = ['<NAME>', 'Maranhão', 'Votorantim', 'Recife',
'Belém', 'Pernambuco', 'Cuiabá', 'Manaus']
city_iterator = iter(other_cities)
while city_iterator:
try:
print(f'Location: {next(city_iterator)}')
except StopIteration:
break
# Dictionary is an iterable
capitals = {'Brazil': 'Brasília', 'Argentina': 'Buenos Aires',
'França': 'Paris', 'Holanda': 'Amsterdã', 'Alemanha': 'Berlim',
'Suíça': 'Bern', 'Áustria': 'Viena'}
for country in capitals:
print(f'Capital of {country}: {capitals[country]}')
# Implements an iterator with a Class
class Cycle(object):
def __init__(self, iterable) -> None:
self.iterable = iterable
self.iterator_obj = iter(iterable)
def __iter__(self) -> object:
return self
def __next__(self) -> object:
while True:
try:
return next(self.iterator_obj)
except StopIteration:
self.iterator_obj = iter(self.iterable)
return next(self.iterator_obj)
x = Cycle(["abc", "def", "ghi"])
for i in range(10):
print(next(x), end=", ")
# Implements an iterator with a Generator (THE PYTHONIC WAY)
def city_generator():
yield("Hamburg")
yield("Konstanz")
yield("Berlin")
yield("Zurich")
yield("Schaffhausen")
yield("Stuttgart") # This is the last element
city = city_generator()
print(f'First element: {next(city)}')
print(f'Second element: {next(city)}')
print(f'Third element: {next(city)}')
print(f'Fourth element: {next(city)}')
print(f'Fifth element: {next(city)}')
print(f'Sixth element: {next(city)}')
# print(f'Seventh element: {next(city)}') # This will raise an error
# Method Of Operation
"""
Method of working:
- A generator is called like a function. Its return value is an iterator, i.e. a
generator object. The code of the generator will not be executed at this stage
- The iterator can be used by calling the next method. The first time the
execution starts like a function, i.e. the first line of code within the body
of the iterator. The code is executed until a yield statement is reached
- yield returns the value of the expression, which is following the keyword
yield. This is like a function, but Python keeps track of the position of
this yield and the state of the local variables is stored for the next call.
At the next call, the execution continues with the statement following the
yield statement and the variables have the same values as they had in the
previous call.
- The iterator is finished, if the generator body is completely worked through
or if the program flow encounters a return statement without a value.
"""
def counter_current(firstval=0, step=1):
current = firstval
while True:
yield current
current += step
counter = counter_current()
for i in range(10):
print(next(counter), end=", ")
print(type(counter))
start_value = 2.1
stop_value = 0.3
print('\n\nNew Counter value:')
counter = counter_current(start_value, stop_value)
for i in range(10):
new_value = next(counter)
print(f'{new_value:2.2f}', end=", ")
print(type(new_value))
# Fibonacci Sequence as an Generator
def fibonacci(n):
""" A generator for creating the Fibonacci numbers """
a, b, counter = 0, 1, 0
while True:
if (counter > n):
return
yield a
a, b = b, a + b
counter += 1
f = fibonacci(5)
for x in f:
print(x, " ", end="")
print()
def fibonacci_infinite():
"""Generates an infinite sequence of Fibonacci numbers on demand"""
a, b = 0, 1
while True:
yield a
a, b = b, a + b
f = fibonacci_infinite()
counter = 0
for x in f:
print(x, " ", end="")
counter += 1
if (counter > 10):
break
print()
# Using a 'return' in to a Generator
def generator() -> Generator:
yield 1
# raise StopIteration(42) # This will stop the generator
yield 2
# g = generator()
# next(g)
# next(g)
def generator_return() -> Generator:
yield 1
# return 42 # This will stop the generator
yield 2
# gr = generator_return()
# next(gr)
# next(gr)
# Send Methods / Coroutines
def simple_coroutine():
print('-> coroutine started!')
x = yield "foo"
print(f'-> coroutine received: {x}')
# cr = simple_coroutine()
# print(cr)
# next(cr)
# return_value = cr.send # ("bar")
# print(f'-> coroutine send return: {return_value}') # This will raise an error
# Another Example for Send
def song_gen(song_list):
new_song = None
while True:
if new_song != None and new_song not in song_list:
song_list.append(new_song)
new_song = yield new_song
else:
new_song = yield(choice(song_list))
songs = ["<NAME> - <NAME>",
"Bluesette - Toots Thielemans",
"Six Marimbas - <NAME>",
"Riverside - <NAME>",
"Not for Radio - Nas",
"What's going on - Taste",
"On Stream - <NAME>",
"<NAME> - Fayrouz",
"Ik Leef Niet <NAME>or Jou - <NAME>",
"Δέκα λεπτά - Αθηνά Ανδρεάδη"]
radio_program = song_gen(songs)
next(radio_program)
for i in range(3):
print(next(radio_program))
radio_program.send("Distorted Angels - Archive")
print(songs)
# Send with new musics
def song_generator(song_list):
new_song = None
while True:
if new_song != None:
if new_song[0] == "-songlist-":
song_list = new_song[1]
new_song = yield(choice(song_list))
else:
title, performer = new_song
new_song = title + " - " + performer
if new_song not in song_list:
song_list.append(new_song)
new_song = yield new_song
else:
new_song = yield(choice(song_list))
songs1 = ["Après un Rêve - <NAME>"
"On Stream - <NAME>",
"<NAME> - <NAME>",
"Les barricades mystérieuses - <NAME>",
"Monday - <NAME>"]
songs2 = ["Dünyadan Uszak - Pinhani",
"Again - Archive",
"If I had a Hear - <NAME>"
"Every you, every me - Placebo",
"Familiar - <NAME>"]
radio_prog = song_generator(songs1)
for i in range(5):
print(next(radio_prog))
radio_prog.send(("-songlist-", songs2))
for i in range(5):
print(next(radio_prog))
# The throw Method
def counter_gen(firstval=0, step=1):
counter = firstval
while True:
try:
new_counter_value = yield counter
if new_counter_value is not None:
counter += step
else:
counter = new_counter_value
except Exception:
yield (firstval, step, counter)
c = counter_gen()
for i in range(6):
print(next(c))
print("Let us see what the state of the iterator is:")
state_of_count = c.throw(Exception)
print(state_of_count)
print("now, we can continue:")
for i in range(3):
print(next(c))
# We can improve the previous example by defining our own exception class StateOfGenerator:
class StateOfGenerator(Exception):
def __init__(self, message=None):
self.message = message
def count(firstval=0, step=1):
counter = firstval
while True:
try:
new_counter_val = yield counter
if new_counter_val is None:
counter += step
else:
counter = new_counter_val
except StateOfGenerator:
yield (firstval, step, counter)
c = count()
for i in range(3):
print(next(c))
print("Let us see what the state of the iterator is:")
i = c.throw(StateOfGenerator)
print(i)
print("now, we can continue:")
for i in range(3):
print(next(c))
# yield from
def gen1():
for char in "Python":
yield char
for i in range(5):
yield i
def gen2():
yield from "Python"
yield from range(5)
g1 = gen1()
g2 = gen2()
print("g1: ", end=", ")
for x in g1:
print(x, end=", ")
print("\ng2: ", end=", ")
for x in g2:
print(x, end=", ")
print()
def cities():
for city in ["Sorocaba", "São Paulo", "Belém", "Votorantim"]:
yield city
def squares():
for number in range(10):
yield number ** 2
def generator_all_in_one():
for city in cities():
yield city
for number in squares():
yield number
def generator_splitted():
yield from cities()
yield from squares()
lst1 = [el for el in generator_all_in_one()]
lst2 = [el for el in generator_splitted()]
print(lst1 == lst2)
def subgenerator():
yield 1
return 42
def delegating_generator():
x = yield from subgenerator()
print(x)
for x in delegating_generator():
print(x)
"""
The full semantics of the yield from expression is described in six points in
"PEP 380 -- Syntax for Delegating to a Subgenerator" in terms of the
generator protocol:
- ny values that the iterator yields are passed directly to the caller.
- Any values sent to the delegating generator using send() are passed directly
to the iterator. If the sent value is None, the iterator's next() method is
called. If the sent value is not None, the iterator's send() method is
called. If the call raises StopIteration, the delegating generator is
resumed. Any other exception is propagated to the delegating generator.
- Exceptions other than GeneratorExit thrown into the delegating generator are
passed to the throw() method of the iterator. If the call raises
StopIteration, the delegating generator is resumed. Any other exception is
propagated to the delegating generator.
- If a GeneratorExit exception is thrown into the delegating generator, or the
close() method of the delegating generator is called, then the close() method
of the iterator is called if it has one. If this call results in an exception,
it is propagated to the delegating generator. Otherwise, GeneratorExit is
raised in the delegating generator.
- The value of the yield from expression is the first argument to the
StopIteration exception raised by the iterator when it terminates.
- return expr in a generator causes StopIteration(expr) to be raised upon exit
from the generator.
"""
# Recursive Generators
"""
Permutation:
Is a rearrangement of the elements of an ordered list. In other words:
Every arrangement of n elements is called a permutation.
In the following lines we show all the permutations of the letter a, b and c:
a b c
a c b
b a c
b c a
c a b
c b a
"""
def permutations(items):
n = len(items)
if n == 0:
yield []
else:
for i in range(len(items)):
for cc in permutations(items[:i] + items[i + 1:]):
yield [items[i]] + cc
for p in permutations(['r', 'e', 'd']):
print(''.join(p))
for p in permutations(list("game")):
print(''.join(p) + ", ", end="")
# Creating Permutations using itertools.permutations
perms = permutations(['f', 'o', 'o'])
print(list(perms))
"""
Permutations can denote in this weaker meaning a sequence of elements, where
each element occurs just once, but without the requirement to contain all the
elements of a given set
"""
def k_permutations(items, n):
if n == 0:
yield []
else:
for item in items:
for kp in k_permutations(items, n - 1):
if item not in kp:
yield [item] + kp
for kp in k_permutations("abcd", 3):
print(kp)
# A Generator of Generators
def firstn(generator, n):
g = generator()
for i in range(n):
yield next(g)
def fibonacci():
""" A Fibonacci number generator """
a, b = 0, 1
while True:
yield a
a, b = b, a + b
print(list(firstn(fibonacci, 100)))
# print(list(fibonacci())) # This will generate an Infinite Fibonacci sequence
# Exercises
"""
Write a generator which computes the running average.
"""
def running_average():
total = 0.0
counter = 0
average = None
while True:
term = yield average
total += term
counter += 1
average = total / counter
ra = running_average() # initialize the coroutine
next(ra) # we have to start the coroutine
for value in [7, 13, 17, 231, 12, 8, 3]:
out_str = "sent: {val:3d}, new average: {avg:6.2f}"
print(out_str.format(val=value, avg=ra.send(value)))
"""
Write a generator frange, which behaves like range but accepts float values.
"""
def frange(*args):
""" dfgdg """
startval = 0
stepsize = 1
if len(args) == 1:
endval = args[0]
elif len(args) == 2:
startval, endval = args
elif len(args) == 3:
startval, endval, stepsize = args
else:
txt = "range expected at most 3 arguments, got " + len(args)
raise TypeError(txt)
value = startval
factor = -1 if stepsize < 0 else 1
while (value - endval) * (factor) < 0:
yield value
value += stepsize
# Using frange may llok like this:
for i in frange(5.6):
print(i, end=", ")
print()
for i in frange(0.3, 5.6):
print(i, end=", ")
print()
for i in frange(0.3, 5.6, 0.8):
print(i, end=", ")
print()
"""
3) Write a generator trange, which generates a sequence of time tuples from
start to stop incremented by step.
A time tuple is a 3-tuple of integers: (hours, minutes, seconds) So a call to
trange might look like this:
trange((10, 10, 10), (13, 50, 15), (0, 15, 12) )
"""
def trange(start, stop, step):
"""
trange(stop) -> time as a 3-tuple (hours, minutes, seconds)
trange(start, stop[, step]) -> time tuple
start: time tuple (hours, minutes, seconds)
stop: time tuple
step: time tuple
returns a sequence of time tuples from start to stop incremented by step
"""
current = list(start)
while current < list(stop):
yield tuple(current)
seconds = step[2] + current[2]
min_borrow = 0
hours_borrow = 0
if seconds < 60:
current[2] = seconds
else:
current[2] = seconds - 60
min_borrow = 1
minutes = step[1] + current[1] + min_borrow
if minutes < 60:
current[1] = minutes
else:
current[1] = minutes - 60
hours_borrow = 1
hours = step[0] + current[0] + hours_borrow
if hours < 24:
current[0] = hours
else:
current[0] = hours - 24
# from timerange import trange
for time in trange((10, 10, 10), (19, 53, 15), (1, 24, 12)):
print(time)
"""
Write a version "rtrange" of the previous generator, which can receive messages
to reset the start value.
"""
def rtrange(start, stop, step):
"""
trange(stop) -> time as a 3-tuple (hours, minutes, seconds)
trange(start, stop[, step]) -> time tuple
start: time tuple (hours, minutes, seconds)
stop: time tuple
step: time tuple
returns a sequence of time tuples from start to stop incremented by step
The generator can be reset by sending a new "start" value.
"""
current = list(start)
while current < list(stop):
new_start = yield tuple(current)
if new_start != None:
current = list(new_start)
continue
seconds = step[2] + current[2]
min_borrow = 0
hours_borrow = 0
if seconds < 60:
current[2] = seconds
else:
current[2] = seconds - 60
min_borrow = 1
minutes = step[1] + current[1] + min_borrow
if minutes < 60:
current[1] = minutes
else:
current[1] = minutes - 60
hours_borrow = 1
hours = step[0] + current[0] + hours_borrow
if hours < 24:
current[0] = hours
else:
current[0] = hours - 24
# from rtimerange import rtrange
ts = rtrange((10, 10, 10), (17, 50, 15), (1, 15, 12))
for _ in range(3):
print(next(ts))
print(ts.send((8, 5, 50)))
for _ in range(3):
print(next(ts))
"""
Write a program, using the newly written generator "trange", to create a file
"times_and_temperatures.txt". The lines of this file contain a time in the format
hh::mm::ss and random temperatures between 10.0 and 25.0 degrees. The times
should be ascending in steps of 90 seconds starting with 6:00:00. For example:
06:00:00 20.1
06:01:30 16.1
06:03:00 16.9
06:04:30 13.4
06:06:00 23.7
06:07:30 23.6
06:09:00 17.5
06:10:30 11.0
Bitstream of zeroes and ones
P(1) = p
"""
# from timerange import trange
fh = open("times_and_temperatures.txt", "w")
for time in trange((6, 0, 0), (23, 0, 0), (0, 1, 30)):
random_number = random.randint(100, 250) / 10
lst = time + (random_number,)
output = "{:02d}:{:02d}:{:02d}{:4.1f}\n".format(*lst)
fh.write(output)
# You can find further details and the mathematical background about this
# exercise in our chapter on:
# https://python-course.eu/weighted_choice_and_sample.php
"""
Write a generator with the name "random_ones_and_zeroes", which returns a
bitstream, i.e. a zero or a one in every iteration. The probability p for
returning a 1 is defined in a variable p. The generator will initialize this
value to 0.5. In other words, zeroes and ones will be returned with the
same probability.
"""
def random_ones_and_zeros():
p = 0.5
while True:
x = random.random()
message = yield 1 if x < p else 0
if message != None:
p = message
x = random_ones_and_zeros()
next(x) # we are not interested in the return value
for p in [0.2, 0.8]:
print("\nWe change the probability to : " + str(p))
x.send(p)
for i in range(20):
print(next(x), end=" ")
print()
"""
We wrote a class Cycle in the beginning of this chapter of our Python tutorial.
Write a generator cycle performing the same task.
"""
def cycle(iterable):
# cycle('ABCD') --> A B C D A B C D A B C D ...
saved = []
for element in iterable:
yield element
saved.append(element)
while saved:
for element in saved:
yield element
countries = ["Brasil", "Argentina", "Uruguay"]
country_iterator = cycle(countries)
for i in range(7):
print(next(country_iterator))
|
import re
with open("data/raw/details.txt") as f:
text = f.read()
with open("data/raw/geography.txt") as f:
geo = f.read()
with open("data/raw/publishers.txt") as f:
pub = f.read()
with open("data/raw/subjects.txt") as f:
subj = f.read()
MAX = 6562
LOOKAHEAD = 100
STATES = [
"Alabama STATE",
"Alaska STATE",
"Arizona STATE",
"Arkansas STATE",
"California STATE",
"Colorado STATE",
"Connecticut STATE",
"Delaware STATE",
"Florida STATE",
"Georgia STATE",
"Hawaii STATE",
"Idaho STATE",
"Illinois STATE",
"Indiana STATE",
"Iowa STATE",
"Kansas STATE",
"Kentucky STATE",
"Louisiana STATE",
"Maine STATE",
"Maryland STATE",
"Massachusetts STATE",
"Michigan STATE",
"Minnesota STATE",
"Mississippi STATE",
"Missouri STATE",
"Montana STATE",
"Nebraska STATE",
"Nevada STATE",
"New Hampshire STATE",
"New Jersey STATE",
"New Mexico STATE",
"New York STATE",
"North Carolina STATE",
"North Dakota STATE",
"Ohio STATE",
"Oklahoma STATE",
"Oregon STATE",
"Pennsylvania STATE",
"Rhode Island STATE",
"South Carolina STATE",
"South Dakota STATE",
"Tennessee STATE",
"Texas STATE",
"Utah STATE",
"Vermont STATE",
"Virginia STATE",
"Washington STATE",
"West Virginia STATE",
"Wisconsin STATE",
"Wyoming STATE",
"District of Columbia STATE",
]
with open('data/bibliography.csv', 'w') as compiled:
print("ID,Name,First,Last,City,State,Publisher,Subject,Link", file=compiled)
# For each ID
for i in range(1, MAX+1):
# Find where the word "frequency" occurs after this ID
p = re.compile("\n%s (.|[\n])*[Ff]requency" % (i))
find_id_and_freq = p.search(text)
if not find_id_and_freq:
# if we can't find the word "frequency" occuring after this ID then go to the next
continue
start_of_id = find_id_and_freq.start() + len(" %d " %i)
start_of_date = text.find(". 1", start_of_id, start_of_id+LOOKAHEAD)
if start_of_date < 0:
start_of_date = text.find("\n1", start_of_id, start_of_id+LOOKAHEAD)
start_of_freq = text.find("Frequency", start_of_id, start_of_id+LOOKAHEAD)
if start_of_freq < 0:
start_of_freq = text.find("frequency", start_of_id, start_of_id+LOOKAHEAD)
date_dash = text.find("-", start_of_date, start_of_date+LOOKAHEAD)
if start_of_date < 0:
# we couldn't find a date, so go to the next ID
continue
if start_of_freq < 0:
# we couldn't find a frequency, so go to the next ID
continue
if date_dash < 0:
# we couldn't find a dash between years, so go to the next ID
continue
name = text[start_of_id:start_of_date].replace('\n', '')
while name.startswith(" ") or name.startswith(","):
name = name[1:]
while name.endswith(" ") or name.endswith(","):
name = name[:-1]
first = text[start_of_date:date_dash].replace(' ', '').replace('.', '').replace('-', '').replace('?', '').replace('\n', '')
last = text[date_dash:start_of_freq].replace(' ', '').replace('.', '').replace('-', '').replace('?', '').replace('\n', '')
# now let's find the city and state
r = re.compile("[\s,]%d[\s,]" % (i))
id_in_geo = r.search(geo)
if not id_in_geo:
# couldn't find ID in geo, go to next ID
continue
id_start = id_in_geo.start()
reverse_starting_at_id = geo[:id_start][::-1]
re_city_name = re.compile("([^\d\n,]*[a-zA-Z]+[^\d\n,]*)+")
city_match = re_city_name.search(reverse_starting_at_id)
city_start = city_match.start()
city = city_match.group(0)[::-1].replace('\n', '')
while city.startswith(" ") or city.startswith(","):
city = city[1:]
while city.endswith(" ") or city.endswith(","):
city = city[:-1]
best = -1
state = ""
for s in STATES:
reverse_starting_after_city = reverse_starting_at_id[city_start+len(city):]
state_pos = reverse_starting_after_city.find(s[::-1])
if (state_pos > 0 and state_pos < best) or best == -1:
state = s
best = state_pos
if best < 0:
# couldn't find a state, go to next ID
continue
# Now let's find the publisher
id_in_pub = r.finditer(pub)
publisher = ""
for m in id_in_pub:
id_start = m.start()
reverse_starting_at_id = pub[:id_start][::-1]
re_publisher_name = re.compile("([^\d\n]*[a-zA-Z]+[^\d\n]*)+")
publisher_match = re_publisher_name.search(reverse_starting_at_id)
if not publisher_match:
# couldn't find publisher, go to next ID
continue
publisher_start = publisher_match.start()
tmp_publisher = publisher_match.group(0)[::-1].replace('\n', '')
while tmp_publisher.startswith(" ") or tmp_publisher.startswith(","):
tmp_publisher = tmp_publisher[1:]
while tmp_publisher.endswith(" ") or tmp_publisher.endswith(","):
tmp_publisher = tmp_publisher[:-1]
publisher = tmp_publisher + " and " + publisher if publisher else tmp_publisher
# Now let's find the subject
id_in_subj = r.finditer(subj)
subject = ""
for m in id_in_subj:
id_start = m.start()
reverse_starting_at_id = subj[:id_start][::-1]
re_subject_name = re.compile("([^\d\n]*[a-zA-Z]+[^\d\n]*)+")
subject_match = re_subject_name.search(reverse_starting_at_id)
if not subject_match:
# couldn't find subject, go to next ID
continue
subject_start = subject_match.start()
tmp_subject = subject_match.group(0)[::-1].replace('\n', '')
while tmp_subject.startswith(" ") or tmp_subject.startswith(","):
tmp_subject = tmp_subject[1:]
while tmp_subject.endswith(" ") or tmp_subject.endswith(","):
tmp_subject = tmp_subject[:-1]
subject = tmp_subject + " | " + subject if subject else tmp_subject
print("%d,\"%s\",%s,%s,\"%s\",\"%s\",\"%s\",\"%s\"," %(i, name, first, last, city, state[:-len(" STATE")], publisher, subject), file=compiled) |
# ======================================================================
# Copyright TOTAL / CERFACS / LIRMM (02/2020)
# Contributor: <NAME> (<<EMAIL>>
# <<EMAIL>>)
# <NAME> (<<EMAIL>>)
# This software is governed by the CeCILL-B license under French law and
# abiding by the rules of distribution of free software. You can use,
# modify and/or redistribute the software under the terms of the
# CeCILL-B license as circulated by CEA, CNRS and INRIA at the following
# URL "http://www.cecill.info".
#
# As a counterpart to the access to the source code and rights to copy,
# modify and redistribute granted by the license, users are provided
# only with a limited warranty and the software's author, the holder of
# the economic rights, and the successive licensors have only limited
# liability.
#
# In this respect, the user's attention is drawn to the risks associated
# with loading, using, modifying and/or developing or reproducing the
# software by the user in light of its specific status of free software,
# that may mean that it is complicated to manipulate, and that also
# therefore means that it is reserved for developers and experienced
# professionals having in-depth computer knowledge. Users are therefore
# encouraged to load and test the software's suitability as regards
# their requirements in conditions enabling the security of their
# systems and/or data to be ensured and, more generally, to use and
# operate it in the same conditions as regards security.
#
# The fact that you are presently reading this means that you have had
# knowledge of the CeCILL-B license and that you accept its terms.
# ======================================================================
import logging
import typing as ty
import numpy
from qiskit.circuit.quantumregister import Qubit
from qiskit.dagcircuit.dagcircuit import DAGNode
from hamap.gates import TwoQubitGate, SwapTwoQubitGate
from hamap.hardware.IBMQHardwareArchitecture import IBMQHardwareArchitecture
from hamap.layer import QuantumLayer, update_layer
logger = logging.getLogger("hamap.heuristics")
def _gate_op_cost(
op: DAGNode,
distance_matrix: numpy.ndarray,
mapping: ty.Dict[Qubit, int],
hardware: IBMQHardwareArchitecture,
) -> float:
if hardware.is_ignored_operation(op):
return 0
if len(op.qargs) == 1:
# SABRE ignores 1-qubit gates
return 0
elif len(op.qargs) == 2:
# This is a CNOT
source, sink = op.qargs
return distance_matrix[mapping[source], mapping[sink]]
else:
logger.warning(
f"Found a quantum operation applied on '{len(op.qargs)}' qubits. This "
f"operation will be excluded from the cost computation."
)
return 0
def sabre_heuristic(
hardware: IBMQHardwareArchitecture,
front_layer: QuantumLayer,
topological_nodes: ty.List[DAGNode],
current_node_index: int,
current_mapping: ty.Dict[Qubit, int],
initial_mapping: ty.Dict[Qubit, int],
trans_mapping: ty.Dict[Qubit, int],
distance_matrix: numpy.ndarray,
tentative_gate: TwoQubitGate,
look_ahead_depth: int = 20,
look_ahead_weight: float = 0.5,
) -> float:
"""The heuristic cost function used in the SABRE optimiser.
:param hardware: the SABRE optimiser does not take into account the hardware data to
compute the heuristic cost, only to generate the possible SWAPs to evaluate
with this heuristic. The SABRE heuristic only uses the distance matrix.
Nevertheless, this implementation uses the hardware data to check if some
gates are ignored (such as barriers for example).
:param front_layer: the current front layer. Used to compute an "immediate" cost,
i.e. a quantity that will tell us if the SWAP/Bridge is useful to execute
gates in the front layer.
:param topological_nodes: the list of all the DAGNodes of the quantum circuit,
sorted in topological order.
:param current_node_index: index of the first non-processed node.
:param current_mapping: the mapping *before* applying the given SWAP.
:param distance_matrix: the pre-computed distance matrix between each qubits.
:param tentative_gate: the SWAP we want to estimate the usefulness of.
:param look_ahead_depth: the depth of the look-ahead. The procedure will consider
gates that will be executed in the future (i.e. not in the front layer) up to
the given depth. Note that 1-qubit gates are not ignored, which means that a
depth of 3 will not guarantee that there is at least 3 CNOTs in the
look-ahead set.
:param look_ahead_weight: weight of the look-ahead. The actual gates (i.e. the
gates in the front layer) have a weight of 1.
:return: the heuristic cost of the given SWAP/Bridge according to the current
state of the algorithm.
"""
# First, compute the proposed new mapping
new_mapping = tentative_gate.update_mapping(current_mapping)
# Compute H_basic, the cost associated with the distance.
H_basic = 0.0
H_tentative = 0.0
H_tentative_gate_number = 0
H_basic_gate_number = 0
for op in front_layer.ops:
# Only add the gate to the cost if the gate is not already implemented by the
# SWAP/Bridge
if not tentative_gate.implements_operation(op, initial_mapping, trans_mapping):
#if isinstance(tentative_gate, SwapTwoQubitGate):
H_basic += _gate_op_cost(op, distance_matrix, new_mapping, hardware)
H_basic_gate_number += 1
# Compute H, the cost cost that encourage parallelism and adds some look-ahead
# ability.
if isinstance(tentative_gate, SwapTwoQubitGate):
H_tentative += tentative_gate.cost(hardware, current_mapping, distance_matrix)
H_tentative_gate_number += 3
else:
H_tentative += tentative_gate.cost(hardware, initial_mapping, distance_matrix)
H_tentative_gate_number += 4
#H = 0.0
future_nodes_layer = QuantumLayer(max_depth=look_ahead_depth)
# We do not use the return of update_layer because we do not care about the
# number of gates that were added. Still, we add the firsts look_ahead_depth layers
# of our future gates in this set to have this look-ahead ability.
_ = update_layer(future_nodes_layer, topological_nodes, current_node_index)
# The decay is not implemented in the code the authors gave us and not
# sufficiently explained in the paper to implement it without guessing. Not
# implementing it for the moment...
#H += (H_basic / H_basic_gate_number) if H_basic_gate_number != 0 else 0
H = (H_basic + H_tentative) / (H_basic_gate_number + H_tentative_gate_number)
#print(type(tentative_gate), tentative_gate.left, tentative_gate.right, H, H_basic, H_basic_gate_number)
#print(front_layer.ops[0].qargs)
H_extended = 0.0
if future_nodes_layer:
# Only add this cost if there are nodes in the future_node_layer
H_extended += (
look_ahead_weight
* sum(
_gate_op_cost(op, distance_matrix, new_mapping, hardware)
for op in future_nodes_layer.ops
)
/ len(future_nodes_layer)
)
H += H_extended
#print(f"H extended {H_extended} and final H is {H}, gate number {len(future_nodes_layer)}")
return H
def sabre_heuristic_with_effect(
hardware: IBMQHardwareArchitecture,
front_layer: QuantumLayer,
topological_nodes: ty.List[DAGNode],
current_node_index: int,
current_mapping: ty.Dict[Qubit, int],
initial_mapping: ty.Dict[Qubit, int],
trans_mapping: ty.Dict[Qubit, int],
distance_matrix: numpy.ndarray,
tentative_gate: SwapTwoQubitGate,
look_ahead_depth: int = 20,
look_ahead_weight: float = 0.5,
) -> ty.Tuple[float, float]:
"""The heuristic cost function used by SABRE, modified to return the effect.
The effect of the SWAP is a float number that is negative if the SWAP gate has a
bad effect on the following gates, else positive.
:param hardware: the SABRE optimiser does not take into account the hardware data to
compute the heuristic cost, only to generate the possible SWAPs to evaluate
with this heuristic. The SABRE heuristic only uses the distance matrix.
Nevertheless, this implementation uses the hardware data to check if some
gates are ignored (such as barriers for example).
:param front_layer: the current front layer. Used to compute an "immediate" cost,
i.e. a quantity that will tell us if the SWAP/Bridge is useful to execute
gates in the front layer.
:param topological_nodes: the list of all the DAGNodes of the quantum circuit,
sorted in topological order.
:param current_node_index: index of the first non-processed node.
:param current_mapping: the mapping *before* applying the given SWAP.
:param distance_matrix: the pre-computed distance matrix between each qubits.
:param tentative_gate: the SWAP we want to estimate the usefulness of.
:param look_ahead_depth: the depth of the look-ahead. The procedure will consider
gates that will be executed in the future (i.e. not in the front layer) up to
the given depth. Note that 1-qubit gates are not ignored, which means that a
depth of 3 will not guarantee that there is at least 3 CNOTs in the
look-ahead set.
:param look_ahead_weight: weight of the look-ahead. The actual gates (i.e. the
gates in the front layer) have a weight of 1.
:return: the heuristic cost of the given SWAP according to the current
state of the algorithm along with the effect of the SWAP on the non-executed
gates.
"""
# First, compute the proposed new mapping
new_mapping = tentative_gate.update_mapping(current_mapping)
# Compute H_basic, the cost associated with the distance.
H_basic = 0.0
H_basic_gate_number = 0
for op in front_layer.ops:
# Only add the gate to the cost if the gate is not already implemented by the
# SWAP/Bridge
if not tentative_gate.implements_operation(op, initial_mapping, trans_mapping):
H_basic += _gate_op_cost(op, distance_matrix, new_mapping, hardware)
H_basic_gate_number += 1
# Compute H, the cost cost that encourage parallelism and adds some look-ahead
# ability.
#H = tentative_gate.cost(hardware, current_mapping)
H = 0.0
future_nodes_layer = QuantumLayer(max_depth=look_ahead_depth)
# We do not use the return of update_layer because we do not care about the
# number of gates that were added. Still, we add the firsts look_ahead_depth layers
# of our future gates in this set to have this look-ahead ability.
_ = update_layer(future_nodes_layer, topological_nodes, current_node_index)
# The decay is not implemented in the code the authors gave us and not
# sufficiently explained in the paper to implement it without guessing. Not
# implementing it for the moment...
swap_effect = 0.0
H += (H_basic / H_basic_gate_number) if H_basic_gate_number != 0 else 0
if future_nodes_layer:
# Only add this cost if there are nodes in the future_node_layer
H += (
look_ahead_weight
* sum(
_gate_op_cost(op, distance_matrix, new_mapping, hardware)
for op in future_nodes_layer.ops
)
/ len(future_nodes_layer)
)
swap_effect += sum(
_gate_op_cost(op, distance_matrix, current_mapping, hardware)
- _gate_op_cost(op, distance_matrix, new_mapping, hardware)
for op in future_nodes_layer.ops
)
return H, swap_effect
|
from django.shortcuts import get_object_or_404
from rest_framework import viewsets
from rest_framework import permissions
from rest_framework.decorators import detail_route
from rest_framework.response import Response
from rest_framework import filters, status
from premises.models import Contention, Premise
from .serializers import (ContentionSerializer, PremisesSerializer,
PremiseReportSerializer)
from premises.utils import int_or_default
from premises.signals import supported_a_premise
from api.v1.users.serializers import UserProfileSerializer
from newsfeed.models import Entry
class ContentionViewset(viewsets.ModelViewSet):
queryset = Contention.objects.filter(is_published=True)\
.prefetch_related('premises',
'premises__supporters')\
.select_related('user', 'premises__parent',
'premises__user')
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
serializer_class = ContentionSerializer
paginate_by = 20
filter_backends = (filters.SearchFilter, filters.DjangoFilterBackend,
filters.OrderingFilter)
search_fields = ('title', 'slug',)
filter_fields = ('is_featured',)
ordering_fields = ('date_creation',)
@detail_route()
def premises(self, request, pk=None):
contention = self.get_object()
serializer = PremisesSerializer(
contention.premises.select_related('user').all(), many=True)
return Response(serializer.data)
def create_argument(self, request):
serializer = self.serializer_class(
data=request.data, initial={'ip': request.META['REMOTE_ADDR'],
'user': request.user})
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def _get_owner_object(self):
lookup_url_kwarg = self.lookup_url_kwarg or self.lookup_field
filter_kwargs = {
self.lookup_field: self.kwargs[lookup_url_kwarg],
'user': self.request.user
}
obj = get_object_or_404(Contention.objects.all(), **filter_kwargs)
return obj
def update_argument(self, request, pk=None):
contention = self._get_owner_object()
serializer = self.serializer_class(data=request.DATA,
instance=contention)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete_argument(self, request, pk=None):
contention = self._get_owner_object()
Entry.objects.delete(contention.get_newsfeed_type(), contention.id)
contention.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
@detail_route()
def create_premise(self, request, pk=None):
contention = self.get_object()
serializer = PremisesSerializer(
data=request.data, initial={'ip': request.META['REMOTE_ADDR'],
'user': request.user,
'argument': contention})
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class PremiseViewset(viewsets.ModelViewSet):
queryset = Premise.objects.filter(is_approved=True)
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
serializer_class = PremisesSerializer
lookup_field = 'id'
lookup_url_kwarg = 'premise_id'
def filter_queryset(self, queryset):
argument_id = int_or_default(self.kwargs.get('pk'), default=0)
return queryset.filter(argument__id=argument_id,
argument__is_published=True)
@detail_route(methods=['post'])
def report(self, request, pk=None, premise_id=None):
premise = self.get_object()
if premise.reports.filter(reporter=request.user).exists():
return Response({'message': 'Onermeyi Zaten Rapor ettin.'},
status=status.HTTP_400_BAD_REQUEST)
serializer = PremiseReportSerializer(
data=request.data, initial={'reporter': request.user,
'premise': premise,
'contention': premise.argument})
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def _get_owner_object(self):
argument_id = int_or_default(self.kwargs.get('pk'), default=0)
lookup_url_kwarg = self.lookup_url_kwarg or self.lookup_field
filter_kwargs = {
self.lookup_field: self.kwargs[lookup_url_kwarg],
'argument__id': argument_id,
'user': self.request.user
}
obj = get_object_or_404(Premise.objects.all(), **filter_kwargs)
return obj
def update_premise(self, request, pk=None, premise_id=None):
premise = self._get_owner_object()
serializer = self.serializer_class(data=request.DATA,
instance=premise)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete_premise(self, request, pk=None, premise_id=None):
premise = self._get_owner_object()
premise.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class PremiseSupportViewset(PremiseViewset):
@detail_route(methods=['post'])
def support(self, request, pk=None, premise_id=None):
premise = self.get_object()
if premise.supporters.filter(id=request.user.id).exists():
return Response({'message': "Onermeyi Zaten destekliyorsun"},
status=status.HTTP_400_BAD_REQUEST)
premise.supporters.add(request.user)
supported_a_premise.send(sender=self, premise=premise,
user=self.request.user)
return Response(status=status.HTTP_201_CREATED)
@detail_route(methods=['get'])
def supporters(self, request, pk=None, premise_id=None):
premise = self.get_object()
page = self.paginate_queryset(premise.supporters.all())
serializer = self.get_pagination_serializer(page)
return Response(serializer.data)
@detail_route(methods=['delete'])
def unsupport(self, request, pk=None, premise_id=None):
premise = self.get_object()
if not premise.supporters.filter(id=request.user.id).exists():
return Response({'message': "Once onermeyi desteklemen gerekiyor"},
status=status.HTTP_400_BAD_REQUEST)
premise.supporters.remove(request.user)
return Response(status=status.HTTP_204_NO_CONTENT)
contention_list = ContentionViewset.as_view(
{'get': 'list', 'post': 'create_argument'}
)
contention_detail = ContentionViewset.as_view(
{'get': 'retrieve', 'put': 'update_argument',
'delete': 'delete_argument'}
)
premises_list = ContentionViewset.as_view(
{'get': 'premises', 'post': 'create_premise'}
)
premise_detail = PremiseViewset.as_view(
{'get': 'retrieve', 'put': 'update_premise',
'delete': 'delete_premise'}
)
premise_report = PremiseViewset.as_view(
{'post': 'report'}
)
premise_support = PremiseSupportViewset.as_view(
{'post': 'support', 'delete': 'unsupport'}
)
premise_supporters = PremiseSupportViewset.as_view(
{'get': 'supporters'},
serializer_class=UserProfileSerializer
)
|
<filename>tests/runtime_benchmark.py<gh_stars>10-100
"""Run benchmarks and print benchmark report.
This file times various aspects of the environment, such as the physics engine
and the renderer, given a task config. It is useful to benchmark new task
configs.
Note: To run this file, you must install the tqdm package.
"""
import sys
sys.path.insert(0, '..') # Allow imports from parent directory
from absl import flags
from absl import app
import importlib
import numpy as np
import time
from tqdm import tqdm
from moog import environment
from moog.observers import pil_renderer
from moog.observers import color_maps
FLAGS = flags.FLAGS
flags.DEFINE_string('config',
'moog.configs.examples.pacman',
'Filename of task config to use.')
flags.DEFINE_integer('level', 0, 'Level of task config to run.')
flags.DEFINE_string('color_map', 'hsv_to_rgb',
'Color map in observers/color_maps.py to use.')
_IMAGE_SIZE_ANTI_ALIASING = (
(64, 1),
(128, 1),
(256, 1),
(512, 1),
(512, 2),
(1024, 1),
)
_NUM_RESETS = 20
_TRIALS_PER_RESET = 20
def _time_env_function(env, env_function):
times_list = []
for reset_count in tqdm(range(_NUM_RESETS)):
env.reset()
t_start = time.time()
for _ in range(_TRIALS_PER_RESET):
abort = env_function()
if abort:
break
if abort:
continue
t_end = time.time()
times_list.append(t_end - t_start)
ms_per_step = 1e3 * np.array(times_list) / float(_NUM_RESETS)
print(' ms/step: {}'.format(np.mean(ms_per_step)))
print(' stddev ms/step: {}'.format(np.std(ms_per_step)))
print(' min ms/step: {}'.format(np.min(ms_per_step)))
print(' max ms/step: {}'.format(np.max(ms_per_step)))
def main(_):
"""Run benchmarking script."""
config = importlib.import_module(FLAGS.config)
print('Benchmarking config: {}'.format(FLAGS.config))
config = config.get_config(FLAGS.level)
############################################################################
# Benchmark without rendering, using random actions
############################################################################
config['observers'] = {'image': lambda _: None}
env = environment.Environment(**config)
print('Environment steps without rendering:')
def _step_env_function():
obs = env.step(action=env.action_space.random_action())
if obs.last():
return True
else:
return False
_time_env_function(env, _step_env_function)
############################################################################
# Benchmark only resets, without rendering, using random actions
############################################################################
config['observers'] = {'image': lambda _: None}
env = environment.Environment(**config)
print('Environment resets, without rendering:')
def _step_env_function():
obs = env.reset()
_time_env_function(env, _step_env_function)
############################################################################
# Benchmark physics only
############################################################################
config['observers'] = {'image': lambda _: None}
env = environment.Environment(**config)
print('Physics steps only:')
def _physics_env_function():
env.physics.step(env.state)
return False
_time_env_function(env, _physics_env_function)
############################################################################
# Benchmark renderer only
############################################################################
def _get_render_env_function(env):
def _render_env_function():
env.observation()
return False
return _render_env_function
for image_size, anti_aliasing in _IMAGE_SIZE_ANTI_ALIASING:
renderer = pil_renderer.PILRenderer(
image_size=(image_size, image_size),
anti_aliasing=anti_aliasing,
color_to_rgb=getattr(color_maps, FLAGS.color_map)
)
config['observers'] = {'image': renderer}
env = environment.Environment(**config)
print('Renderer steps only, image_size {}, anti_aliasing {}:'.format(
image_size, anti_aliasing))
_time_env_function(env, _get_render_env_function(env))
############################################################################
# Benchmark full steps with rendering
############################################################################
def _get_env_function(env):
def _env_function():
obs = env.step(action=env.action_space.random_action())
if obs.last():
return True
else:
return False
return _env_function
for image_size, anti_aliasing in _IMAGE_SIZE_ANTI_ALIASING:
renderer = pil_renderer.PILRenderer(
image_size=(image_size, image_size),
anti_aliasing=anti_aliasing,
color_to_rgb=getattr(color_maps, FLAGS.color_map)
)
config['observers'] = {'image': renderer}
env = environment.Environment(**config)
print(
'Full steps with rendering, image_size {}, anti_aliasing '
'{}:'.format(image_size, anti_aliasing))
_time_env_function(env, _get_env_function(env))
if __name__ == "__main__":
app.run(main)
|
<reponame>CyberQueenMara/baseband-research
##############################################################################
# Copyright (c) 2007 Open Kernel Labs, Inc. (Copyright Holder).
# All rights reserved.
#
# 1. Redistribution and use of OKL4 (Software) in source and binary
# forms, with or without modification, are permitted provided that the
# following conditions are met:
#
# (a) Redistributions of source code must retain this clause 1
# (including paragraphs (a), (b) and (c)), clause 2 and clause 3
# (Licence Terms) and the above copyright notice.
#
# (b) Redistributions in binary form must reproduce the above
# copyright notice and the Licence Terms in the documentation and/or
# other materials provided with the distribution.
#
# (c) Redistributions in any form must be accompanied by information on
# how to obtain complete source code for:
# (i) the Software; and
# (ii) all accompanying software that uses (or is intended to
# use) the Software whether directly or indirectly. Such source
# code must:
# (iii) either be included in the distribution or be available
# for no more than the cost of distribution plus a nominal fee;
# and
# (iv) be licensed by each relevant holder of copyright under
# either the Licence Terms (with an appropriate copyright notice)
# or the terms of a licence which is approved by the Open Source
# Initative. For an executable file, "complete source code"
# means the source code for all modules it contains and includes
# associated build and other files reasonably required to produce
# the executable.
#
# 2. THIS SOFTWARE IS PROVIDED ``AS IS'' AND, TO THE EXTENT PERMITTED BY
# LAW, ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED. WHERE ANY WARRANTY IS
# IMPLIED AND IS PREVENTED BY LAW FROM BEING DISCLAIMED THEN TO THE
# EXTENT PERMISSIBLE BY LAW: (A) THE WARRANTY IS READ DOWN IN FAVOUR OF
# THE COPYRIGHT HOLDER (AND, IN THE CASE OF A PARTICIPANT, THAT
# PARTICIPANT) AND (B) ANY LIMITATIONS PERMITTED BY LAW (INCLUDING AS TO
# THE EXTENT OF THE WARRANTY AND THE REMEDIES AVAILABLE IN THE EVENT OF
# BREACH) ARE DEEMED PART OF THIS LICENCE IN A FORM MOST FAVOURABLE TO
# THE COPYRIGHT HOLDER (AND, IN THE CASE OF A PARTICIPANT, THAT
# PARTICIPANT). IN THE LICENCE TERMS, "PARTICIPANT" INCLUDES EVERY
# PERSON WHO HAS CONTRIBUTED TO THE SOFTWARE OR WHO HAS BEEN INVOLVED IN
# THE DISTRIBUTION OR DISSEMINATION OF THE SOFTWARE.
#
# 3. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR ANY OTHER PARTICIPANT BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
# IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from weaver import MergeError
import weaver.bootinfo_elf
from weaver.bootinfo_elf import BI_EXPORT_CONST, BI_EXPORT_THREAD_ID, BI_IO_MEMORY
class AliasCapObject:
"""
Common interfaces for objects that can have an alias cap defined.
Not subclassing BootInfoObject is intentional, because these objects are
not supposed to appear in bootinfo at all.
"""
def __init__(self):
self.name = "Error: Unnamed AliasCapObject"
def create_implicit_objects(self, namespace, machine=None, pools=None, image=None, bootinfo=None):
pass # do nothing
def generate_implicit_bootinfo(self, pd, bi, image=None, machine=None, bootinfo=None):
pass # do nothing
class VirtualDevice(AliasCapObject):
"""
A virtual device object. Contains enough information for consumers
to communicate to and reference the virtual device.
"""
def __init__(self, name, server_name, server_pd, server_thread, index):
self.name = name
self.index = index
self.server_name = server_name
self.server_pd = server_pd
self.server_thread = server_thread
self.client_thread = None
# def get_server(self):
# return self.server
def get_index(self):
return self.index
def get_export_type(self):
return weaver.bootinfo_elf.BI_EXPORT_VIRTDEV_CAP
def create_implicit_objects(self, namespace, machine=None, pools=None, image=None, bootinfo=None):
"""
Create memory objects that are implicitly defined by this
virtual device.
For virtual devices, nothing needs to be done for now. See
comments in PhysicalDevice.create_implicit_objects.
"""
pass
def generate_implicit_bootinfo(self, pd, bi, image=None, machine=None, bootinfo=None):
# BEWARE! This pd.server_thread is NOT refering to the device server,
# but the server thread of the client program.
#
# In other words:
# - pd (arg to this method) = device client PD
# - self.server_pd = device server PD
# - self.server_thread != pd.server_thread
#
# Don't be confused! - nt
#
if pd.server_thread is not None:
self.client_tid = pd.server_thread
else:
raise MergeError, "Receiving PD must have a server thread to get virtdev handles."
# Patch server variables to tell it about client thread ids
elf = self.server_pd.elf
if not elf:
raise MergeError, "You cannot give device control to PDs without an executable ELF."
sym = elf.find_symbol("virtual_device_instance")
if not sym:
raise MergeError, "Cannot find symbol virtual_device_instance[]"
# XXX: The number of elements within the granted_physmem, interrupt and
# valid device instances is hardcoded to 4 in the code. get_size()
# returns the total size of the array while we want the size of an
# element, so we divide by 4 here. A better method might be to define
# the size of the array in the build system and use that value there
# in addition pass a -DDEVICES_ARRAY=num to gcc and use that #define
# instead of the hardcoded values.
size = sym.size / 4
addr = sym.value + size * self.index
image.patch(addr, 4, 1)
# Add device handle and device server tid to client environment
bi.write_object_export(pd = pd.get_bootinfo_id(),
key = self.server_name.upper() + "_TID",
obj = self.server_thread.get_bootinfo_id(),
type = BI_EXPORT_THREAD_ID)
bi.write_object_export(pd = pd.get_bootinfo_id(),
key = self.server_name.upper() + "_HANDLE",
obj = self.index,
type = BI_EXPORT_CONST)
class PhysicalDevice(AliasCapObject):
"""
A physical device object. Encompasses device information.
"""
def __init__(self, name):
self.name = name
self.physical_mem = {}
self.interrupt = {}
self.mappings = {} # mappings[name] = (physpool, virtpool, memsection)
def add_physical_mem(self, name, mem):
"""Add a named list of physical memory ranges."""
self.physical_mem[name] = mem
# _assert_no_overlaps(self.physical_mem)
def add_interrupt(self, name, number):
"""Add a named interrupts."""
# print "New interrupt %d added" % number
self.interrupt[name] = number
def get_physical_mem(self, name):
"""Get the named list of physical memory ranges."""
if self.physical_mem.has_key(name):
return self.physical_mem[name]
def get_interrupt(self, name):
"""Get the named interrupt."""
if not self.interrupt.has_key(name):
raise MergeError, "Interrupt called %s not found." % name
return self.interrupt[name]
def get_export_type(self):
return weaver.bootinfo_elf.BI_EXPORT_PHYSDEV_CAP
def create_implicit_objects(self, namespace, machine, pools, image, bootinfo):
"""
Create memory objects that are implicitly defined by this
physical device.
The reason we have to do this now, rather than earlier or later,
is because when the PhysicalDevice is created, we don't know
who owns it, so we can't do memsection allocation yet.
If we leave it until create_ops() when bootinfo instructions are
generated, layout() is already determined, so that's too late.
The only solution is to create the objects when you do
collect_environment_element(). This guarantees that this
function will only be called once.
For physical devices, for each physmem we create a phys pool,
and allocate memsections out of it. We grant a cap to the memsection
to the containing PD.
No objects are created for interrupts.
"""
caps = {}
for (name, physmem) in self.physical_mem.iteritems():
# Create the physpool
pp = weaver.bootinfo.PhysPool("%s_pool" % name, machine, pools)
bootinfo.add_physpool(pp)
# This cap is probably never used.
master = weaver.bootinfo.Cap("master", ["master"])
pp.add_cap(master)
namespace.add(master.get_name(), master)
cap_name = "%s_pool" % name
caps[cap_name.upper()] = master
for (base, size, mem_type) in physmem:
pp.add_memory(None, base, size, machine, pools)
# Create the memsection
attrs = image.new_attrs(namespace)
attrs.name = name
attrs.size = size
attrs.pager = None
attrs.physpool = pp.name
attrs.scrub = False
attrs.cache_policy = BI_IO_MEMORY
# attrs.virtpool can only be determined when we know the owner PD
ms = weaver.bootinfo.MemSection(image, machine, pools, attrs)
# This cap is probably never used.
master = weaver.bootinfo.Cap("master", ["master"])
ms.add_cap(master)
image.add_group(None, [ms.get_ms()])
# Return the list of caps, the XML parser should add it
cap_name = "%s_ms" % name
caps[cap_name.upper()] = master
# Record the mapping
self.mappings[name] = (pp, None, ms)
return caps
def generate_interrupt_bootinfo(self, pd, bi, image):
"""
Generate bootinfo instructions to grant interrupts to the PD.
"""
elf = pd.elf
if not elf:
raise MergeError, "You cannot give device control to PDs without an executable ELF."
sym = elf.find_symbol("iguana_granted_interrupt")
if not sym:
raise MergeError, "Cannot find symbol iguana_granted_interrupt[]"
addr = sym.value
offset = 0
size = sym.size
for irq in self.interrupt.itervalues():
if pd.server_thread is not None:
thread = pd.server_thread
elif len(pd.get_threads()) > 0:
thread = pd.get_threads()[0]
else:
raise MergeError, "Cannot grant interrupt to PD with no threads"
# Instruct iguana server to grant interrupt
bi.write_grant_interrupt(thread = thread.bi_name, irq = irq)
# Patch the virtual address bases in the elf image
image.patch(addr + offset, 4, irq)
offset += 4
# We write -1 unused interrupt entries to denote invalidity
while offset < size:
image.patch(addr + offset, 4, 0xffffffff)
offset += 4
def generate_mapping_bootinfo(self, pd, bi, machine, bootinfo, image):
"""
Generate bootinfo instructions to grant device memory mappings to the PD.
"""
elf = pd.elf
if not elf:
raise MergeError, "You cannot give device control to PDs without an executable ELF."
sym = elf.find_symbol("iguana_granted_physmem")
if not sym:
raise MergeError, "Cannot find symbol iguana_granted_physmem[]"
addr = sym.value
offset = 0
size = sym.size
for (name, mapping) in self.mappings.iteritems():
if not offset < size:
raise MergeError, "The physmem array has overflowed. Increase its size."
(pp, none, ms) = mapping
# We can only attach the memsection now because we don't
# know the owner PD until this point in time
pd.attach_memsection(ms)
# The memsection should use the PD's default virtpool, but
# the previously generated physpool
(virtpool, foo, directpool) = pd.get_default_pools()
ms.get_attrs().virtpool = virtpool.name
# Generate the mapping bootinfo ops
# No need to tell Iguana to create the memsection
ms.generate_bootinfo(bi, machine, bootinfo, skip_if_implicit = True)
ms.set_implicit()
# Update the mapping with the correct virtpool
self.mappings[name] = (pp, virtpool, ms)
# Patch the virtual address bases in the elf image
# We only want to apply the patch only when the vbase
# is known. This happens on the second pass to
# generate_bootinfo()
if ms.vbase is not 0:
image.patch(addr + offset, 4, ms.vbase)
offset += 4
def generate_implicit_bootinfo(self, pd, bi, image, machine, bootinfo):
self.generate_mapping_bootinfo(pd, bi, machine, bootinfo, image)
self.generate_interrupt_bootinfo(pd, bi, image)
|
<gh_stars>0
#!/usr/local/bin/python
import semver
import os
import argparse
import sys
import requests
import json
import re
class GitLab:
url = ""
default_branch = ""
header = ""
current_version = ""
def __init__(self, project_id, server_host, default_branch, token):
self.url = f"https://{server_host}/api/v4/projects/{project_id}"
self.default_branch = default_branch
self.header = {"Private-Token": token}
def get_latest_MR(self):
r = requests.get(
f"{self.url}/repository/commits/{self.default_branch}", headers=self.header
)
responce = json.loads(r.text)["message"]
mr_id = responce[responce.rindex("!") + 1:]
if re.match(r"^\d+$", mr_id):
print(f"Found MR id {mr_id}")
else:
sys.exit(
f"ERROR: last commit message does not contains MR number. Got this: {mr_id}"
)
return mr_id
def get_current_version(self):
r = requests.get(f"{self.url}/repository/tags", headers=self.header)
version = json.loads(r.text)[0]["name"]
print(f"Got current version {version}")
if semver.VersionInfo.isvalid(version):
print("Version is valid")
else:
sys.exit(
f"ERROR: Current version is not valid! Got this: {version}")
self.current_version = semver.VersionInfo.parse(version)
return semver.VersionInfo.parse(version)
def set_new_version(self, new_version):
params = {
"tag_name": str(new_version),
"ref": self.default_branch,
"message": str(new_version),
}
r = requests.post(
f"{self.url}/repository/tags", headers=self.header, params=params
)
if r.status_code == 201 and json.loads(r.text)["name"] == str(new_version):
print(f"New version was successfully bumped to {new_version}")
else:
print(r.status_code)
print(r.text)
sys.exit(f"ERROR: New version was not applied!")
def get_mr_labels(self, id):
r = requests.get(f"{self.url}/merge_requests/{id}",
headers=self.header)
labels = json.loads(r.text)["labels"]
print(f"Got labels: {labels}")
return labels
def determine_new_version(gitlab, labels):
if "major" in labels:
return gitlab.current_version.bump_major()
elif "feature" in labels:
return gitlab.current_version.bump_minor()
elif "bug" in labels:
return gitlab.current_version.bump_patch()
else:
return False
def main(args=None):
gitlab = GitLab(project_id=args.project, server_host=args.server,
default_branch=args.branch, token=args.token)
mr_id = gitlab.get_latest_MR()
gitlab.get_current_version()
labels = gitlab.get_mr_labels(id=mr_id)
version_bumped = determine_new_version(gitlab=gitlab, labels=labels)
if version_bumped:
print(f"New version is {version_bumped}")
gitlab.set_new_version(new_version=version_bumped)
else:
print("No need to bump version, keeping the old one")
if __name__ == "__main__":
# You can set this if you're running locally
# os.environ["CI_PROJECT_ID"] = "123"
# os.environ["CI_SERVER_HOST"] = "gitlab.example.com"
# os.environ["CI_DEFAULT_BRANCH"] = "main"
# os.environ["TOKEN"] = "token"
parser = argparse.ArgumentParser(description="Release manager script")
parser.add_argument(
"--project",
default=os.environ["CI_PROJECT_ID"] if "CI_PROJECT_ID" in os.environ else "",
help="Set project ID",
)
parser.add_argument(
"--server",
default=os.environ["CI_SERVER_HOST"] if "CI_SERVER_HOST" in os.environ else "",
help="Set GitLab server host",
)
parser.add_argument(
"--branch",
default=os.environ["CI_DEFAULT_BRANCH"] if "CI_DEFAULT_BRANCH" in os.environ else "",
help="Set GitLab default branch",
)
parser.add_argument(
"--token",
default=os.environ["TOKEN"] if "TOKEN" in os.environ else "",
help="Set token",
)
args = parser.parse_args()
if args.project == "":
sys.exit("Please use CI_PROJECT_ID env or use --project flag")
if args.server == "":
sys.exit("Please use CI_SERVER_HOST env or use --server flag")
if args.branch == "":
sys.exit("Please use CI_DEFAULT_BRANCH env or use --branch flag")
if args.token == "":
sys.exit("Please use TOKEN env or use --token flag")
main(args)
# TODO: add arguments for different stuff
# parser.add_argument(
# "--release",
# help="TODO: This will create new release branch and all that stuff",
# )
# parser.add_argument(
# "--major",
# action="store_true",
# help="This will bump current MAJOR project version",
# )
# parser.add_argument(
# "--minor",
# action="store_true",
# help="This will bump current MINOR project version",
# )
# parser.add_argument(
# "--patch",
# action="store_true",
# help="This will bump current PATCH project version",
# )
|
<gh_stars>0
from __future__ import print_function
import collections
import os
import re
import stat
import sys
from os import path
from bs4 import BeautifulSoup
MODE_CSS = '--css'
MODE_WA = '--wa'
MODE_FRAME = '--frame'
LENGTH_PROPERTIES = [
'font-size',
'letter-spacing',
'word-spacing']
def quit(status, *args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
sys.exit(status)
def quit_usage():
quit(os.EX_USAGE, ('usage: python -m translate-smil --css|--wa|--frame '
'<input-path> <output-path>'))
def quit_unsupported(feature):
quit(os.EX_UNAVAILABLE, 'The SMIL feature "' + feature +
'" is not currently supported by translate-smil.')
def transform_to_css(transform_type, value):
if transform_type == 'rotate':
rotation = re.split(',| ', value)
if len(rotation) == 1:
return 'rotate(' + rotation[0] + 'deg)'
if len(rotation) != 3:
quit_unsupported('bad rotation')
return ('translate(' + rotation[1] + 'px, ' + rotation[2] + 'px)' +
' rotate(' + rotation[0] + 'deg)' +
' translate(-' + rotation[1] + 'px, -' + rotation[2] + 'px)')
if transform_type == 'scale':
scale = re.split(',| ', value)
if len(scale) == 1:
return 'scale(' + scale[0] + ')'
if len(scale) != 2:
quit_unsupported('bad scale')
return 'scale(' + scale[0] + ', ' + scale[1] + ')'
if transform_type == 'translate':
translate = re.split(',| ', value)
if len(translate) == 1:
return 'translate(' + translate[0] + 'px)'
if len(translate) != 2:
quit_unsupported('bad translate')
return 'translate(' + translate[0] + 'px, ' + translate[1] + 'px)'
if transform_type == 'skewX':
return 'skewX(' + value + 'deg)'
if transform_type == 'skewY':
return 'skewY(' + value + 'deg)'
def double_to_string(value):
return '{0:g}'.format(value)
def milliseconds_to_css(value):
if value.endswith('00'):
return double_to_string(float(value) / 1000.) + 's'
else:
return value + 'ms'
def clock_value_to_milliseconds(dur):
if dur.endswith('ms'):
return dur[:-2]
if dur.endswith('s'):
return double_to_string(float(dur[:-1]) * 1000)
if dur.endswith('min'):
return double_to_string(float(dur[:-3]) * 60000)
if dur.endswith('h'):
return double_to_string(float(dur[:-1]) * 3600000)
parts = map(float, re.split(':', dur))
if len(parts) == 3:
return double_to_string(((parts[0] * 60 + parts[1]) * 60 +
parts[0]) * 1000)
if len(parts) == 2:
return double_to_string((parts[0] * 60 + parts[1]) * 1000)
quit_unsupported('dur value \"' + dur + '\"')
def to_camel_case(property):
return property[0] + property.title().replace("-", "")[1:]
def translate_soup(soup, mode):
animate_elements = soup.find_all('animate')
animate_motion_elements = soup.find_all('animateMotion')
animate_transform_elements = soup.find_all('animateTransform')
set_elements = soup.find_all('set')
animation_elements = []
animation_elements.extend(animate_elements)
animation_elements.extend(animate_motion_elements)
animation_elements.extend(animate_transform_elements)
animation_elements.extend(set_elements)
if not animation_elements:
# The document has no SMIL animations
return
head = soup.head
svg = soup.svg
if mode == MODE_CSS or len(animate_motion_elements) > 0:
style = soup.style
if not style:
style = soup.new_tag('style')
if head:
head.append(style)
elif svg:
svg.append(style)
else:
quitMissing('svg')
if not style.string:
style.string = ''
if mode == MODE_WA:
script = soup.script
if not script:
script = soup.new_tag('script')
if head:
head.append(script)
elif svg:
svg.append(script)
else:
quitMissing('head')
if not script.string:
script.string = ''
counter_dict = collections.defaultdict(int)
def generateId(prefix):
counter = counter_dict[prefix]
while True:
result = prefix + str(counter)
counter += 1
if not soup.find_all(id=result):
break
counter_dict[prefix] = counter
return result
for animation_element in animation_elements:
if animation_element.has_attr('id'):
animation_name = animation_element['id']
else:
animation_name = generateId('anim')
parent = animation_element.parent
if parent.has_attr('id'):
targetName = parent['id']
else:
targetName = generateId(parent.name)
parent['id'] = targetName
if animation_element.name == 'animateTransform':
if len(parent.find_all('animateTransform')) > 1:
quit_unsupported('multiple animateTransform on element')
if not animation_element.has_attr('type'):
quit_unsupported('animateTransform element without type')
transform_type = animation_element['type']
if animation_element.name == 'animateMotion':
if len(parent.find_all('animateMotion')) > 1:
quit_unsupported('multiple animateMotion on element')
if animation_element.has_attr('rotate'):
if animation_element['rotate'] == 'auto':
motion_rotation = 'auto'
elif animation_element['rotate'] == 'auto-reverse':
motion_rotation = 'reverse'
else:
motion_rotation = animation_element['rotate'] + 'deg'
else:
motion_rotation = '0deg'
motion_path = 'none'
if(animation_element.has_attr('from') and
animation_element.has_attr('to')):
motion_path = ('path(\"M' + animation_element['from'] +
' L' + animation_element['to'] + '\")')
if animation_element.has_attr('values'):
segments = re.split(';', animation_element['values'].strip())
if segments[-1] == '':
segments = segments[:-1]
if len(segments) < 2:
quit_unsupported('less than 2 values')
motion_path = 'path(\"M' + ' L'.join(segments) + '\")'
if animation_element.has_attr('path'):
motion_path = 'path(\"' + animation_element['path'] + '\")'
if(animation_element.mpath and
animation_element.mpath.has_attr('xlink:href')):
href = animation_element.mpath['xlink:href']
if href[:1] == '#':
path_element = soup.find(id=href[1:])
if path_element.has_attr('d'):
motion_path = 'path(\"' + path_element['d'] + '\")'
style.string += ('\n#' + targetName +
' { motion-path: ' + motion_path +
'; motion-rotation: ' + motion_rotation + '; }')
attribute_name = 'motion-offset'
else:
if not animation_element.has_attr('attributeName'):
quit_unsupported('animation element without attributeName')
attribute_name = animation_element['attributeName']
if not animation_element.has_attr('dur'):
quit_unsupported('animation element without dur')
elif animation_element['dur'] == 'indefinite':
quit_unsupported('animation element with indefinite dur')
if animation_element.name == 'set':
if(animation_element.has_attr('from') or
animation_element.has_attr('by') or
animation_element.has_attr('values') or
animation_element.has_attr('keyTimes') or
animation_element.has_attr('keySplines') or
animation_element.has_attr('calcMode') or
animation_element.has_attr('additive') or
animation_element.has_attr('accumulate')):
quit_unsupported('unexpected attribute for set')
values = [animation_element['to'],
animation_element['to']]
elif animation_element.name == 'animateMotion':
values = ['0%', '100%']
elif animation_element.has_attr('values'):
values = re.split(';', animation_element['values'].strip())
if values[-1] == '':
values = values[:-1]
if len(values) < 2:
quit_unsupported('less than 2 values')
values = map(lambda s: s.strip(), values)
elif(animation_element.has_attr('from') and
animation_element.has_attr('to')):
values = [animation_element['from'],
animation_element['to']]
else:
quit_unsupported('animation element without from/to or values')
if animation_element.has_attr('keyTimes'):
key_times = re.split(';', animation_element['keyTimes'].strip())
if key_times[-1] == '':
key_times = key_times[:-1]
if len(key_times) < 2:
quit_unsupported('less than 2 keyTimes')
key_times = map(float, key_times)
else:
def key_time(index):
return index * 1. / (len(values) - 1)
key_times = map(key_time, range(len(values)))
if len(key_times) != len(values):
quit_unsupported('values and keyTimes with different lengths')
if animation_element.has_attr('keySplines'):
quit_unsupported('keySplines')
if animation_element.has_attr('begin'):
begin = animation_element['begin']
if 'begin' in begin or 'end' in begin or 'on' in begin:
quit_unsupported('begin')
begin = clock_value_to_milliseconds(begin)
else:
begin = None
if animation_element.has_attr('end'):
quit_unsupported('end')
if animation_element.has_attr('min'):
quit_unsupported('min')
if animation_element.has_attr('max'):
quit_unsupported('max')
if animation_element.has_attr('restart'):
quit_unsupported('restart')
duration_ms = clock_value_to_milliseconds(animation_element['dur'])
if animation_element.has_attr('repeatDur'):
if animation_element['repeatDur'] == 'indefinite':
repeat_count = 'indefinite'
else:
numerator = float(clock_value_to_milliseconds(
animation_element['repeatDur']))
denominator = float(duration_ms)
if denominator == 0.:
quit_unsupported('duration 0 with repeatDur')
repeat_count = double_to_string(numerator / denominator)
else:
repeat_count = 'indefinite'
if animation_element.has_attr('repeatCount'):
# We choose the minumum of
# repeatDur / dur and repeatCount.
if animation_element['repeatCount'] != 'indefinite':
if repeat_count != 'indefinite':
first = float(repeat_count)
second = float(animation_element['repeatCount'])
smallest = min(first, second)
repeat_count = double_to_string(smallest)
else:
repeat_count = animation_element['repeatCount']
elif not animation_element.has_attr('repeatDur'):
repeat_count = '1'
fill_mode = 'none'
if animation_element.has_attr('fill'):
if animation_element['fill'] == 'freeze':
fill_mode = 'forwards'
elif animation_element['fill'] != 'remove':
quit_unsupported('fill \"' + animation_element['fill'] + '\"')
if animation_element.has_attr('calcMode'):
quit_unsupported('calcMode')
if animation_element.has_attr('by'):
quit_unsupported('by')
if animation_element.has_attr('additive'):
quit_unsupported('additive')
if animation_element.has_attr('accumulate'):
quit_unsupported('accumulate')
if animation_element.name == 'animateTransform':
def convert(value):
return transform_to_css(transform_type, value)
values = map(convert, values)
if attribute_name == 'd':
def convert(value):
return "path('" + value + "')"
values = map(convert, values)
elif attribute_name in LENGTH_PROPERTIES:
def convert(value):
if value[-1].isalpha():
return value
return value + 'px'
values = map(convert, values)
if mode == MODE_CSS:
attribute_duration = ' ' + milliseconds_to_css(duration_ms)
animation_timing_function = ' linear'
if begin is not None:
animation_delay = ' ' + milliseconds_to_css(begin)
else:
animation_delay = ''
if repeat_count == '1':
animation_iteration_count = ''
elif repeat_count == 'indefinite':
animation_iteration_count = ' infinite'
else:
animation_iteration_count = ' ' + repeat_count
animation_direction = ''
if fill_mode == 'none':
animation_fill_mode = ''
else:
animation_fill_mode = ' ' + fill_mode
style.string += ('\n#' + targetName +
' { animation: ' + animation_name +
attribute_duration + animation_timing_function +
animation_delay + animation_iteration_count +
animation_direction + animation_fill_mode + '; }')
style.string += '\n@keyframes ' + animation_name + ' {'
for index in range(len(values)):
percentage = double_to_string(key_times[index] * 100) + '%'
style.string += (' ' + percentage + ' { ' +
attribute_name + ': ' + values[index] + '; }')
style.string += ' }'
else:
keyframes = '['
for index in range(len(values)):
keyframes += ' { '
if animation_element.has_attr('keyTimes'):
keyframes += ('offset: ' +
double_to_string(key_times[index]) + ', ')
keyframes += (to_camel_case(attribute_name) + ': '
'\"' + values[index] + '\" },')
keyframes = keyframes[:-1] + ' ]'
if fill_mode == 'none' and repeat_count == '1' and begin is None:
timing = duration_ms
else:
timing = '{ duration: ' + duration_ms
if begin is not None:
timing += ', delay: ' + begin
if fill_mode != 'none':
timing += ', fill: \"' + fill_mode + '\"'
if repeat_count == 'indefinite':
timing += ', iterations: Infinity'
elif repeat_count != '1':
timing += ', iterations: ' + repeat_count
timing += ' }'
script.string += ('\nwindow.onload = function() { '
'document.getElementById("' + targetName + '").'
'animate(' + keyframes + ', ' + timing + '); };')
animation_element.extract()
def translate_file(mode, input_path, output_path):
print(output_path)
if input_path.endswith('.svg') and mode == MODE_WA:
quit_unsupported('JavaScript in .svg images')
if mode == MODE_FRAME:
(input_dir, input_name) = os.path.split(input_path)
output_content = ('<!DOCTYPE html>'
'<style>iframe { width: 25% }</style>\n'
'<iframe src="../css/' + input_name + '">'
'</iframe>\n'
'<iframe src="../smil/' + input_name + '">'
'</iframe>\n')
if not input_name.endswith('.svg'):
output_content += ('<iframe src="../wa/' + input_name + '">'
'</iframe>\n')
with open(output_path, 'w') as output_file:
output_file.write(output_content)
return
if(input_path.endswith('.svg') or
input_path.endswith('.xml') or
input_path.endswith('.xhtml')):
parser = 'lxml-xml'
else:
parser = 'html5lib'
with open(input_path, 'r') as input_file:
soup = BeautifulSoup(input_file, parser)
translate_soup(soup, mode)
with open(output_path, 'w') as output_file:
output_file.write(soup.prettify())
def main():
if len(sys.argv) != 4:
quit_usage()
mode = sys.argv[1]
input_path = sys.argv[2]
output_path = sys.argv[3]
if mode != MODE_CSS and mode != MODE_WA and mode != MODE_FRAME:
quit_usage()
if(stat.S_ISDIR(os.stat(input_path).st_mode) and
stat.S_ISDIR(os.stat(output_path).st_mode)):
for filename in os.listdir(input_path):
if filename.endswith('.svg') and mode == MODE_WA:
# Skip Web Animations as JavaScript is not supported in images.
continue
if mode == MODE_FRAME:
output_filename = filename.rsplit('.', 1)[0] + '.html'
else:
output_filename = filename
translate_file(mode,
path.join(input_path, filename),
path.join(output_path, output_filename))
else:
translate_file(mode, input_path, output_path)
|
from copy import deepcopy
from bbpyp.message_bus.abstract_publisher import AbstractPublisher
from bbpyp.message_bus.abstract_subscriber import AbstractSubscriber
from bbpyp.common.exception.bbpyp_value_error import BbpypValueError
class TopicChannel:
__CONTEXT_ID_KEY = "CONTEXT_ID"
__LINKED_DISCONNECT_EVENT_KEY = "__LINKED_DISCONNECT_EVENT_KEY"
__LINKED_TOPIC_KEY = "__LINKED_TOPIC_KEY"
def __init__(self, topic, logger, channel_topic_config, channel_topic_config_default, channel_max_buffer_size, async_service, context_service):
if not isinstance(channel_topic_config, dict) and channel_topic_config is not None:
raise BbpypValueError("channel_topic_config", channel_topic_config,
"channel_topic_config must be of type dict or None")
if not isinstance(channel_topic_config_default, dict):
raise BbpypValueError("channel_topic_config_default", channel_topic_config_default,
"channel_topic_config_default must be of type dict")
self._started = False
self._topic = topic
self._logger = logger
self._channel_topic_config = deepcopy(channel_topic_config) if channel_topic_config else {}
self._channel_topic_config_default = deepcopy(channel_topic_config_default)
self._async_service = async_service
self._context_service = context_service
self._publisher_connection_source = None
self._subscriber_connection_source = None
self._publisher_connection_clones = []
self._subscriber_connection_clones = []
self._publishers = []
self._subscribers = []
_channel_topic_config = self._get_channel_topic_config(topic)
self._number_of_publisher_clones = _channel_topic_config["publish_concurrency"] + 1
self._number_of_subscriber_clones = _channel_topic_config["subscribe_concurrency"] + 1
self._channel_max_buffer_size = channel_max_buffer_size
self._publisher_connect_event = self._async_service.create_event()
self._subscriber_connect_event = self._async_service.create_event()
self._publisher_disconnect_event = self._async_service.create_event()
self._subscriber_disconnect_event = self._async_service.create_event()
def _get_channel_topic_config(self, topic):
_channel_topic_config = self._channel_topic_config[topic] if topic in self._channel_topic_config else {
}
for key, val in self._channel_topic_config_default.items():
if key not in _channel_topic_config:
_channel_topic_config[key] = val
return _channel_topic_config
def _create_connections(self):
publisher_connection, subscriber_connection = self._async_service.create_channel(
self._channel_max_buffer_size)
self._publisher_connection_source = publisher_connection
self._subscriber_connection_source = subscriber_connection
self._publisher_connection_clones = [self._publisher_connection_source.clone(
) for i in range(0, self._number_of_publisher_clones)]
self._subscriber_connection_clones = [self._subscriber_connection_source.clone(
) for i in range(0, self._number_of_subscriber_clones)]
def register_publisher(self, publisher, **kwargs):
if not isinstance(publisher, AbstractPublisher):
raise MessageBusValueError("publisher", publisher,
f"The publisher being registered for topic [{self.topic}] must be of type [{type(AbstractPublisher)}], but was of type [{type(publisher)}]")
self._publishers.append((publisher, kwargs))
def register_subscriber(self, subscriber, **kwargs):
if not isinstance(subscriber, AbstractSubscriber):
raise MessageBusValueError("subscriber", subscriber,
f"The subscriber being registered for topic [{self.topic}] must be of type [{type(AbstractSubscriber)}], but was of type [{type(subscriber)}]")
self._subscribers.append((subscriber, kwargs))
async def start(self, channel_context):
if self._started:
await self._async_service.sleep()
return
self._create_connections()
context_id = self._context_service.get_context_variable(type(self).__CONTEXT_ID_KEY)
self._logger.info("[{}] starting connections", context_id)
for publisher, kwargs in self._publishers:
publisher.connect_event = self._publisher_connect_event
publisher.disconnect_event = self._publisher_disconnect_event
for cloned_connection in self._publisher_connection_clones:
self._context_service.set_context_variable(
type(self).__CONTEXT_ID_KEY, f"{context_id}P")
self._start_publisher(channel_context, publisher, cloned_connection, **kwargs)
_linked_channel_topic_config = self._get_channel_topic_config(
self.__linked_to_channel_topic)
for subscriber, kwargs in self._subscribers:
subscriber.connect_event = self._subscriber_connect_event
subscriber.disconnect_event = self._subscriber_disconnect_event
if self.__linked_from_channel_disconnect_event is not None:
self._logger.debug(
"opening subscriber message queue link: {} --> {} of type {}", subscriber.topic, self.__linked_to_channel_topic, _linked_channel_topic_config["queue_type"])
subscriber.open_message_queue(
self.__linked_to_channel_topic, self.__linked_from_channel_disconnect_event, _linked_channel_topic_config["queue_type"])
for cloned_connection in self._subscriber_connection_clones:
self._context_service.set_context_variable(
type(self).__CONTEXT_ID_KEY, f"{context_id}S")
self._start_subscriber(channel_context, subscriber, cloned_connection,
message_queue_topic=self.__linked_to_channel_topic, **kwargs)
self.__linked_from_channel_disconnect_event = None
self.__linked_to_channel_topic = None
self._started = True
async def stop(self):
context_id = self._context_service.get_context_variable(
type(self).__LINKED_DISCONNECT_EVENT_KEY)
self._logger.info(
"[{}] waiting for topic {} publisher connections to close...", context_id, self.topic)
await self._publisher_disconnect_event.wait()
await self._publisher_connection_source.aclose()
self._logger.info(
"[{}] waiting for topic {} subscriber connections to close...", context_id, self.topic)
await self._subscriber_disconnect_event.wait()
await self._subscriber_connection_source.aclose()
self._logger.info("all connections for topic {} have been closed.", self.topic)
def open_linked_topics(self, linked_to_topic):
self._logger.debug("opening linked topic: {} --> {}", self.topic, linked_to_topic)
self.__linked_from_channel_disconnect_event = self._subscriber_disconnect_event
self.__linked_to_channel_topic = linked_to_topic
def _start_publisher(self, channel_context, publisher, connection, **kwargs):
publisher.topic = self.topic
self._logger.debug(
"{} connecting publisher {} with kwargs: {}", self._context_service.get_context_variable(type(self).__CONTEXT_ID_KEY), publisher, kwargs)
channel_context.start_soon(publisher.connect_publisher, connection, kwargs)
def _start_subscriber(self, channel_context, subscriber, connection, **kwargs):
subscriber.topic = self.topic
self._logger.debug(
"{}: connecting subscriber {} with kwargs: {}", self._context_service.get_context_variable(type(self).__CONTEXT_ID_KEY), subscriber, kwargs)
channel_context.start_soon(subscriber.connect_subscriber,
connection, self._publisher_connect_event, self._publisher_disconnect_event, kwargs)
@property
def topic(self):
return self._topic
@property
def __linked_from_channel_disconnect_event(self):
return self._context_service.get_context_variable(type(self).__LINKED_DISCONNECT_EVENT_KEY)
@__linked_from_channel_disconnect_event.setter
def __linked_from_channel_disconnect_event(self, disconnect_event):
self._context_service.set_context_variable(
type(self).__LINKED_DISCONNECT_EVENT_KEY, disconnect_event)
@property
def __linked_to_channel_topic(self):
return self._context_service.get_context_variable(type(self).__LINKED_TOPIC_KEY)
@__linked_to_channel_topic.setter
def __linked_to_channel_topic(self, topic):
self._context_service.set_context_variable(
type(self).__LINKED_TOPIC_KEY, topic)
|
import logging
import inspect
import math
import time
import numpy as np
import pynisher
from smac.tae.execute_ta_run import StatusType, ExecuteTARun
from smac.utils.constants import MAXINT
__author__ = "<NAME>, <NAME>"
__copyright__ = "Copyright 2015, ML4AAD"
__license__ = "3-clause BSD"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__version__ = "0.0.2"
class AbstractTAFunc(ExecuteTARun):
"""Baseclass to execute target algorithms which are python functions.
**Note:*** Do not use directly
Attributes
----------
memory_limit
use_pynisher
"""
def __init__(self, ta, stats=None, runhistory=None, run_obj:str="quality",
memory_limit:int=None, par_factor:int=1,
cost_for_crash:float=float(MAXINT),
use_pynisher:bool=True):
super().__init__(ta=ta, stats=stats, runhistory=runhistory,
run_obj=run_obj, par_factor=par_factor,
cost_for_crash=cost_for_crash)
"""
Abstract class for having a function as target algorithm
Parameters
----------
ta : callable
Function (target algorithm) to be optimized.
stats: Stats()
stats object to collect statistics about runtime and so on
runhistory: RunHistory
runhistory to keep track of all runs; only used if set
run_obj: str
run objective of SMAC
memory_limit : int, optional
Memory limit (in MB) that will be applied to the target algorithm.
par_factor: int
penalization factor
cost_for_crash : float
cost that is used in case of crashed runs (including runs
that returned NaN or inf)
use_pynisher: bool
use pynisher to limit resources;
if disabled
* TA func can use as many resources
as it wants (time and memory) --- use with caution
* all runs will be returned as SUCCESS if returned value is not None
"""
signature = inspect.signature(ta).parameters
self._accepts_seed = len(signature) > 1
self._accepts_instance = len(signature) > 2
if memory_limit is not None:
memory_limit = int(math.ceil(memory_limit))
self.memory_limit = memory_limit
self.use_pynisher = use_pynisher
def run(self, config, instance=None,
cutoff=None,
seed=12345,
instance_specific="0"):
"""Runs target algorithm <self.ta> with configuration <config> for at
most <cutoff> seconds, allowing it to use at most <memory_limit> RAM.
Whether the target algorithm is called with the <instance> and
<seed> depends on the subclass implementing the actual call to
the target algorithm.
Parameters
----------
config : dictionary (or similar)
Dictionary param -> value
instance : str
Problem instance
cutoff : int, optional
Wallclock time limit of the target algorithm. If no value is
provided no limit will be enforced.
memory_limit : int, optional
Memory limit in MB enforced on the target algorithm If no
value is provided no limit will be enforced.
seed : int
Random seed
instance_specific: str
Instance specific information (e.g., domain file or solution)
Returns
-------
status: enum of StatusType (int)
{SUCCESS, TIMEOUT, CRASHED, ABORT}
cost: float
cost/regret/quality/runtime (float) (None, if not returned by TA)
runtime: float
runtime (None if not returned by TA)
additional_info: dict
all further additional run information
"""
# walltime for pynisher has to be a rounded up integer
if cutoff is not None:
cutoff = int(math.ceil(cutoff))
cutoff = 65535 if cutoff < 65535 else cutoff
arguments = {'logger': logging.getLogger("pynisher"),
'wall_time_in_s': cutoff,
'mem_in_mb': self.memory_limit}
obj_kwargs = {}
if self._accepts_seed:
obj_kwargs['seed'] = seed
if self._accepts_instance:
obj_kwargs['instance'] = instance
if self.use_pynisher:
obj = pynisher.enforce_limits(**arguments)(self.ta)
rval = self._call_ta(obj, config, **obj_kwargs)
if isinstance(rval, tuple):
result = rval[0]
additional_run_info = rval[1]
else:
result = rval
additional_run_info = {}
if obj.exit_status is pynisher.TimeoutException:
status = StatusType.TIMEOUT
cost = self.crash_cost
elif obj.exit_status is pynisher.MemorylimitException:
status = StatusType.MEMOUT
cost = self.crash_cost
elif obj.exit_status == 0 and result is not None:
status = StatusType.SUCCESS
cost = result
else:
status = StatusType.CRASHED
cost = self.crash_cost
runtime = float(obj.wall_clock_time)
else:
start_time = time.time()
result = self.ta(config, **obj_kwargs)
if result is not None:
status = StatusType.SUCCESS
cost = result
else:
status = StatusType.CRASHED
cost = self.crash_cost
runtime = time.time() - start_time
additional_run_info = {}
return status, cost, runtime, additional_run_info
def _call_ta(self, obj, config, instance, seed):
raise NotImplementedError()
class ExecuteTAFuncDict(AbstractTAFunc):
"""Evaluate function for given configuration and resource limit.
Passes the configuration as a dictionary to the target algorithm. The
target algorithm needs to implement one of the following signatures:
* ``target_algorithm(config: Configuration) -> Union[float, Tuple[float, Any]]``
* ``target_algorithm(config: Configuration, seed: int) -> Union[float, Tuple[float, Any]]``
* ``target_algorithm(config: Configuration, seed: int, instance: str) -> Union[float, Tuple[float, Any]]``
The target algorithm can either return a float (the loss), or a tuple
with the first element being a float and the second being additional run
information.
ExecuteTAFuncDict will use inspection to figure out the correct call to
the target algorithm.
Parameters
----------
ta : callable
Function (target algorithm) to be optimized.
stats : smac.stats.stats.Stats, optional
Stats object to collect statistics about runtime etc.
run_obj : str, optional
Run objective (runtime or quality)
runhistory : RunHistory, optional
runhistory to keep track of all runs; only used if set
memory_limit : int, optional
Memory limit (in MB) that will be applied to the target algorithm.
par_factor : int, optional
Penalized average runtime factor. Only used when `run_obj='runtime'`
"""
def _call_ta(self, obj, config, **kwargs):
return obj(config, **kwargs)
class ExecuteTAFuncArray(AbstractTAFunc):
"""Evaluate function for given configuration and resource limit.
Passes the configuration as an array-like to the target algorithm. The
target algorithm needs to implement one of the following signatures:
* ``target_algorithm(config: np.ndarray) -> Union[float, Tuple[float, Any]]``
* ``target_algorithm(config: np.ndarray, seed: int) -> Union[float, Tuple[float, Any]]``
* ``target_algorithm(config: np.ndarray, seed: int, instance: str) -> Union[float, Tuple[float, Any]]``
The target algorithm can either return a float (the loss), or a tuple
with the first element being a float and the second being additional run
information.
ExecuteTAFuncDict will use inspection to figure out the correct call to
the target algorithm.
Parameters
----------
ta : callable
Function (target algorithm) to be optimized.
stats : smac.stats.stats.Stats, optional
Stats object to collect statistics about runtime etc.
run_obj: str, optional
Run objective (runtime or quality)
runhistory: RunHistory, optional
runhistory to keep track of all runs; only used if set
memory_limit : int, optional
Memory limit (in MB) that will be applied to the target algorithm.
par_factor: int, optional
Penalized average runtime factor. Only used when `run_obj='runtime'`
"""
def _call_ta(self, obj, config, **kwargs):
x = np.array([val for _, val in sorted(config.get_dictionary().items())],
dtype=np.float)
return obj(x, **kwargs)
|
# -*- coding: utf-8 -*-
"""
Deep Q-network implementation with chainer and rlglue
Copyright (c) 2015 <NAME> All Right Reserved.
"""
import copy
import pickle
import numpy as np
import scipy.misc as spm
from chainer import cuda, FunctionSet, Variable, optimizers
import chainer.functions as F
from rlglue.agent.Agent import Agent
from rlglue.agent import AgentLoader as AgentLoader
from rlglue.types import Action
class DQN_class:
# Hyper-Parameters
gamma = 0.99 # Discount factor
initial_exploration = 10**4 # Initial exploratoin. original: 5x10^4
replay_size = 32 # Replay (batch) size
target_model_update_freq = 10**4 # Target update frequancy. original: 10^4
data_size = 10**5 # Data size of history. original: 10^6
def __init__(self, enable_controller=[0, 3, 4]):
self.num_of_actions = len(enable_controller)
self.enable_controller = enable_controller # Default setting : "Pong"
print "Initializing DQN..."
# Initialization for Chainer 1.1.0 or older.
# print "CUDA init"
# cuda.init()
print "Model Building"
self.model = FunctionSet(
l1=F.Convolution2D(4, 16, ksize=8, stride=4, wscale=np.sqrt(2)),
l2=F.Convolution2D(16, 32, ksize=4, stride=2, wscale=np.sqrt(2)),
l3=F.Linear(2592, 256),
q_value=F.Linear(256, self.num_of_actions,
initialW=np.zeros((self.num_of_actions, 256),
dtype=np.float32))
).to_gpu()
print "Initizlizing Optimizer"
self.optimizer = optimizers.RMSpropGraves(lr=0.0002, alpha=0.3, momentum=0.2)
self.optimizer.setup(self.model.collect_parameters())
# History Data : D=[s, a, r, s_dash, end_episode_flag]
self.D = [np.zeros((self.data_size, 4, 84, 84), dtype=np.uint8),
np.zeros(self.data_size, dtype=np.uint8),
np.zeros((self.data_size, 1), dtype=np.int8),
np.zeros((self.data_size, 4, 84, 84), dtype=np.uint8),
np.zeros((self.data_size, 1), dtype=np.bool)]
def forward(self, state, action, Reward, state_dash, episode_end):
num_of_batch = state.shape[0]
s = Variable(state)
s_dash = Variable(state_dash)
Q = self.Q_func(s) # Get Q-value
# Generate Target Signals
max_Q_dash_ = self.Q_func(s_dash)
tmp = list(map(np.max, max_Q_dash_.data.get()))
max_Q_dash = np.asanyarray(tmp, dtype=np.float32)
target = np.asanyarray(Q.data.get(), dtype=np.float32)
for i in xrange(num_of_batch):
if not episode_end[i][0]:
tmp_ = np.sign(Reward[i]) + self.gamma * max_Q_dash[i]
else:
tmp_ = np.sign(Reward[i])
target[i, self.action_to_index(action[i])] = tmp_
loss = F.mean_squared_error(Variable(cuda.to_gpu(target)), Q)
return loss, Q
def stockExperience(self, time,
state, action, reward, state_dash,
episode_end_flag):
data_index = time % self.data_size
if episode_end_flag is True:
self.D[0][data_index] = state
self.D[1][data_index] = action
self.D[2][data_index] = reward
else:
self.D[0][data_index] = state
self.D[1][data_index] = action
self.D[2][data_index] = reward
self.D[3][data_index] = state_dash
self.D[4][data_index] = episode_end_flag
def experienceReplay(self, time):
if self.initial_exploration < time:
# Pick up replay_size number of samples from the Data
if time < self.data_size: # during the first sweep of the History Data
replay_index = np.random.randint(0, time, (self.replay_size, 1))
else:
replay_index = np.random.randint(0, self.data_size, (self.replay_size, 1))
s_replay = np.ndarray(shape=(self.replay_size, 4, 84, 84), dtype=np.float32)
a_replay = np.ndarray(shape=(self.replay_size, 1), dtype=np.uint8)
r_replay = np.ndarray(shape=(self.replay_size, 1), dtype=np.float32)
s_dash_replay = np.ndarray(shape=(self.replay_size, 4, 84, 84), dtype=np.float32)
episode_end_replay = np.ndarray(shape=(self.replay_size, 1), dtype=np.bool)
for i in xrange(self.replay_size):
s_replay[i] = np.asarray(self.D[0][replay_index[i]], dtype=np.float32)
a_replay[i] = self.D[1][replay_index[i]]
r_replay[i] = self.D[2][replay_index[i]]
s_dash_replay[i] = np.array(self.D[3][replay_index[i]], dtype=np.float32)
episode_end_replay[i] = self.D[4][replay_index[i]]
s_replay = cuda.to_gpu(s_replay)
s_dash_replay = cuda.to_gpu(s_dash_replay)
# Gradient-based update
self.optimizer.zero_grads()
loss, _ = self.forward(s_replay, a_replay, r_replay, s_dash_replay, episode_end_replay)
loss.backward()
self.optimizer.update()
def Q_func(self, state):
h1 = F.relu(self.model.l1(state / 254.0)) # scale inputs in [0.0, 1.0]
h2 = F.relu(self.model.l2(h1))
h3 = F.relu(self.model.l3(h2))
Q = self.model.q_value(h3)
return Q
def e_greedy(self, state, epsilon):
s = Variable(state)
Q = self.Q_func(s)
Q = Q.data
if np.random.rand() < epsilon:
index_action = np.random.randint(0, self.num_of_actions)
print "RANDOM"
else:
index_action = np.argmax(Q.get())
print "GREEDY"
return self.index_to_action(index_action), Q
def index_to_action(self, index_of_action):
return self.enable_controller[index_of_action]
def action_to_index(self, action):
return self.enable_controller.index(action)
class dqn_agent(Agent): # RL-glue Process
lastAction = Action()
policyFrozen = False
def agent_init(self, taskSpec):
# Some initializations for rlglue
self.lastAction = Action()
self.time = 0
self.epsilon = 1.0 # Initial exploratoin rate
# Pick a DQN from DQN_class
self.DQN = DQN_class() # Default is for "Pong".
def agent_start(self, observation):
# Get intensity from current observation array
tmp = np.bitwise_and(np.asarray(observation.intArray[128:]).reshape([210, 160]), 0b0001111) # Get Intensity from the observation
obs_array = (spm.imresize(tmp, (110, 84)))[110-84-8:110-8, :] # Scaling
# Initialize State
self.state = np.zeros((4, 84, 84), dtype=np.uint8)
self.state[0] = obs_array
state_ = cuda.to_gpu(np.asanyarray(self.state.reshape(1, 4, 84, 84), dtype=np.float32))
# Generate an Action e-greedy
returnAction = Action()
action, Q_now = self.DQN.e_greedy(state_, self.epsilon)
returnAction.intArray = [action]
# Update for next step
self.lastAction = copy.deepcopy(returnAction)
self.last_state = self.state.copy()
self.last_observation = obs_array
return returnAction
def agent_step(self, reward, observation):
# Preproces
tmp = np.bitwise_and(np.asarray(observation.intArray[128:]).reshape([210, 160]), 0b0001111) # Get Intensity from the observation
obs_array = (spm.imresize(tmp, (110, 84)))[110-84-8:110-8, :] # Scaling
obs_processed = np.maximum(obs_array, self.last_observation) # Take maximum from two frames
# Compose State : 4-step sequential observation
self.state = np.asanyarray([self.state[1], self.state[2], self.state[3], obs_processed], dtype=np.uint8)
state_ = cuda.to_gpu(np.asanyarray(self.state.reshape(1, 4, 84, 84), dtype=np.float32))
# Exploration decays along the time sequence
if self.policyFrozen is False: # Learning ON/OFF
if self.DQN.initial_exploration < self.time:
self.epsilon -= 1.0/10**6
if self.epsilon < 0.1:
self.epsilon = 0.1
eps = self.epsilon
else: # Initial Exploation Phase
print "Initial Exploration : %d/%d steps" % (self.time, self.DQN.initial_exploration)
eps = 1.0
else: # Evaluation
print "Policy is Frozen"
eps = 0.05
# Generate an Action from e-greedy action selection
returnAction = Action()
action, Q_now = self.DQN.e_greedy(state_, eps)
returnAction.intArray = [action]
# Learning Phase
if self.policyFrozen is False: # Learning ON/OFF
self.DQN.stockExperience(self.time, self.last_state, self.lastAction.intArray[0], reward, self.state, False)
self.DQN.experienceReplay(self.time)
# Simple text based visualization
print ' Time Step %d / ACTION %d / REWARD %.1f / EPSILON %.6f / Q_max %3f' % (self.time, self.DQN.action_to_index(action), np.sign(reward), eps, np.max(Q_now.get()))
# Updates for next step
self.last_observation = obs_array
# Update for next step
if self.policyFrozen is False:
self.lastAction = copy.deepcopy(returnAction)
self.last_state = self.state.copy()
self.time += 1
return returnAction
def agent_end(self, reward): # Episode Terminated
# Learning Phase
if self.policyFrozen is False: # Learning ON/OFF
self.DQN.stockExperience(self.time, self.last_state, self.lastAction.intArray[0], reward, self.last_state, True)
self.DQN.experienceReplay(self.time)
# Simple text based visualization
print ' REWARD %.1f / EPSILON %.5f' % (np.sign(reward), self.epsilon)
# Time count
if not self.policyFrozen:
self.time += 1
def agent_cleanup(self):
pass
def agent_message(self, inMessage):
if inMessage.startswith("freeze learning"):
self.policyFrozen = True
return "message understood, policy frozen"
if inMessage.startswith("unfreeze learning"):
self.policyFrozen = False
return "message understood, policy unfrozen"
if inMessage.startswith("save model"):
with open('dqn_model.dat', 'w') as f:
pickle.dump(self.DQN.model, f)
return "message understood, model saved"
if __name__ == "__main__":
AgentLoader.loadAgent(dqn_agent())
|
<reponame>grodansparadis/vscp-python-sensorpuck<gh_stars>1-10
from bluepy.btle import UUID, Peripheral, DefaultDelegate, AssignedNumbers
import struct
import math
def _TI_UUID(val):
return UUID("%08X-0451-4000-b000-000000000000" % (0xF0000000+val))
# Sensortag versions
AUTODETECT = "-"
SENSORTAG_V1 = "v1"
SENSORTAG_2650 = "CC2650"
class SensorBase:
# Derived classes should set: svcUUID, ctrlUUID, dataUUID
sensorOn = struct.pack("B", 0x01)
sensorOff = struct.pack("B", 0x00)
def __init__(self, periph):
self.periph = periph
self.service = None
self.ctrl = None
self.data = None
def enable(self):
if self.service is None:
self.service = self.periph.getServiceByUUID(self.svcUUID)
if self.ctrl is None:
self.ctrl = self.service.getCharacteristics(self.ctrlUUID) [0]
if self.data is None:
self.data = self.service.getCharacteristics(self.dataUUID) [0]
if self.sensorOn is not None:
self.ctrl.write(self.sensorOn,withResponse=True)
def read(self):
return self.data.read()
def disable(self):
if self.ctrl is not None:
self.ctrl.write(self.sensorOff)
# Derived class should implement _formatData()
def calcPoly(coeffs, x):
return coeffs[0] + (coeffs[1]*x) + (coeffs[2]*x*x)
class IRTemperatureSensor(SensorBase):
svcUUID = _TI_UUID(0xAA00)
dataUUID = _TI_UUID(0xAA01)
ctrlUUID = _TI_UUID(0xAA02)
zeroC = 273.15 # Kelvin
tRef = 298.15
Apoly = [1.0, 1.75e-3, -1.678e-5]
Bpoly = [-2.94e-5, -5.7e-7, 4.63e-9]
Cpoly = [0.0, 1.0, 13.4]
def __init__(self, periph):
SensorBase.__init__(self, periph)
self.S0 = 6.4e-14
def read(self):
'''Returns (ambient_temp, target_temp) in degC'''
# See http://processors.wiki.ti.com/index.php/SensorTag_User_Guide#IR_Temperature_Sensor
(rawVobj, rawTamb) = struct.unpack('<hh', self.data.read())
tAmb = rawTamb / 128.0
Vobj = 1.5625e-7 * rawVobj
tDie = tAmb + self.zeroC
S = self.S0 * calcPoly(self.Apoly, tDie-self.tRef)
Vos = calcPoly(self.Bpoly, tDie-self.tRef)
fObj = calcPoly(self.Cpoly, Vobj-Vos)
tObj = math.pow( math.pow(tDie,4.0) + (fObj/S), 0.25 )
return (tAmb, tObj - self.zeroC)
class IRTemperatureSensorTMP007(SensorBase):
svcUUID = _TI_UUID(0xAA00)
dataUUID = _TI_UUID(0xAA01)
ctrlUUID = _TI_UUID(0xAA02)
SCALE_LSB = 0.03125;
def __init__(self, periph):
SensorBase.__init__(self, periph)
def read(self):
'''Returns (ambient_temp, target_temp) in degC'''
# http://processors.wiki.ti.com/index.php/CC2650_SensorTag_User's_Guide?keyMatch=CC2650&tisearch=Search-EN
(rawTobj, rawTamb) = struct.unpack('<hh', self.data.read())
tObj = (rawTobj >> 2) * self.SCALE_LSB;
tAmb = (rawTamb >> 2) * self.SCALE_LSB;
return (tAmb, tObj)
class AccelerometerSensor(SensorBase):
svcUUID = _TI_UUID(0xAA10)
dataUUID = _TI_UUID(0xAA11)
ctrlUUID = _TI_UUID(0xAA12)
def __init__(self, periph):
SensorBase.__init__(self, periph)
if periph.firmwareVersion.startswith("1.4 "):
self.scale = 64.0
else:
self.scale = 16.0
def read(self):
'''Returns (x_accel, y_accel, z_accel) in units of g'''
x_y_z = struct.unpack('bbb', self.data.read())
return tuple([ (val/self.scale) for val in x_y_z ])
class MovementSensorMPU9250(SensorBase):
svcUUID = _TI_UUID(0xAA80)
dataUUID = _TI_UUID(0xAA81)
ctrlUUID = _TI_UUID(0xAA82)
sensorOn = None
GYRO_XYZ = 7
ACCEL_XYZ = 7 << 3
MAG_XYZ = 1 << 6
ACCEL_RANGE_2G = 0 << 8
ACCEL_RANGE_4G = 1 << 8
ACCEL_RANGE_8G = 2 << 8
ACCEL_RANGE_16G = 3 << 8
def __init__(self, periph):
SensorBase.__init__(self, periph)
self.ctrlBits = 0
def enable(self, bits):
SensorBase.enable(self)
self.ctrlBits |= bits
self.ctrl.write( struct.pack("<H", self.ctrlBits) )
def disable(self, bits):
self.ctrlBits &= ~bits
self.ctrl.write( struct.pack("<H", self.ctrlBits) )
def rawRead(self):
dval = self.data.read()
return struct.unpack("<hhhhhhhhh", dval)
class AccelerometerSensorMPU9250:
def __init__(self, sensor_):
self.sensor = sensor_
self.bits = self.sensor.ACCEL_XYZ | self.sensor.ACCEL_RANGE_4G
self.scale = 8.0/32768.0 # TODO: why not 4.0, as documented?
def enable(self):
self.sensor.enable(self.bits)
def disable(self):
self.sensor.disable(self.bits)
def read(self):
'''Returns (x_accel, y_accel, z_accel) in units of g'''
rawVals = self.sensor.rawRead()[3:6]
return tuple([ v*self.scale for v in rawVals ])
class HumiditySensor(SensorBase):
svcUUID = _TI_UUID(0xAA20)
dataUUID = _TI_UUID(0xAA21)
ctrlUUID = _TI_UUID(0xAA22)
def __init__(self, periph):
SensorBase.__init__(self, periph)
def read(self):
'''Returns (ambient_temp, rel_humidity)'''
(rawT, rawH) = struct.unpack('<HH', self.data.read())
temp = -46.85 + 175.72 * (rawT / 65536.0)
RH = -6.0 + 125.0 * ((rawH & 0xFFFC)/65536.0)
return (temp, RH)
class HumiditySensorHDC1000(SensorBase):
svcUUID = _TI_UUID(0xAA20)
dataUUID = _TI_UUID(0xAA21)
ctrlUUID = _TI_UUID(0xAA22)
def __init__(self, periph):
SensorBase.__init__(self, periph)
def read(self):
'''Returns (ambient_temp, rel_humidity)'''
(rawT, rawH) = struct.unpack('<HH', self.data.read())
temp = -40.0 + 165.0 * (rawT / 65536.0)
RH = 100.0 * (rawH/65536.0)
return (temp, RH)
class MagnetometerSensor(SensorBase):
svcUUID = _TI_UUID(0xAA30)
dataUUID = _TI_UUID(0xAA31)
ctrlUUID = _TI_UUID(0xAA32)
def __init__(self, periph):
SensorBase.__init__(self, periph)
def read(self):
'''Returns (x, y, z) in uT units'''
x_y_z = struct.unpack('<hhh', self.data.read())
return tuple([ 1000.0 * (v/32768.0) for v in x_y_z ])
# Revisit - some absolute calibration is needed
class MagnetometerSensorMPU9250:
def __init__(self, sensor_):
self.sensor = sensor_
self.scale = 4912.0 / 32760
# Reference: MPU-9250 register map v1.4
def enable(self):
self.sensor.enable(self.sensor.MAG_XYZ)
def disable(self):
self.sensor.disable(self.sensor.MAG_XYZ)
def read(self):
'''Returns (x_mag, y_mag, z_mag) in units of uT'''
rawVals = self.sensor.rawRead()[6:9]
return tuple([ v*self.scale for v in rawVals ])
class BarometerSensor(SensorBase):
svcUUID = _TI_UUID(0xAA40)
dataUUID = _TI_UUID(0xAA41)
ctrlUUID = _TI_UUID(0xAA42)
calUUID = _TI_UUID(0xAA43)
sensorOn = None
def __init__(self, periph):
SensorBase.__init__(self, periph)
def enable(self):
SensorBase.enable(self)
self.calChr = self.service.getCharacteristics(self.calUUID) [0]
# Read calibration data
self.ctrl.write( struct.pack("B", 0x02), True )
(c1,c2,c3,c4,c5,c6,c7,c8) = struct.unpack("<HHHHhhhh", self.calChr.read())
self.c1_s = c1/float(1 << 24)
self.c2_s = c2/float(1 << 10)
self.sensPoly = [ c3/1.0, c4/float(1 << 17), c5/float(1<<34) ]
self.offsPoly = [ c6*float(1<<14), c7/8.0, c8/float(1<<19) ]
self.ctrl.write( struct.pack("B", 0x01), True )
def read(self):
'''Returns (ambient_temp, pressure_millibars)'''
(rawT, rawP) = struct.unpack('<hH', self.data.read())
temp = (self.c1_s * rawT) + self.c2_s
sens = calcPoly( self.sensPoly, float(rawT) )
offs = calcPoly( self.offsPoly, float(rawT) )
pres = (sens * rawP + offs) / (100.0 * float(1<<14))
return (temp,pres)
class BarometerSensorBMP280(SensorBase):
svcUUID = _TI_UUID(0xAA40)
dataUUID = _TI_UUID(0xAA41)
ctrlUUID = _TI_UUID(0xAA42)
def __init__(self, periph):
SensorBase.__init__(self, periph)
def read(self):
(tL,tM,tH,pL,pM,pH) = struct.unpack('<BBBBBB', self.data.read())
temp = (tH*65536 + tM*256 + tL) / 100.0
press = (pH*65536 + pM*256 + pL) / 100.0
return (temp, press)
class GyroscopeSensor(SensorBase):
svcUUID = _TI_UUID(0xAA50)
dataUUID = _TI_UUID(0xAA51)
ctrlUUID = _TI_UUID(0xAA52)
sensorOn = struct.pack("B",0x07)
def __init__(self, periph):
SensorBase.__init__(self, periph)
def read(self):
'''Returns (x,y,z) rate in deg/sec'''
x_y_z = struct.unpack('<hhh', self.data.read())
return tuple([ 250.0 * (v/32768.0) for v in x_y_z ])
class GyroscopeSensorMPU9250:
def __init__(self, sensor_):
self.sensor = sensor_
self.scale = 500.0/65536.0
def enable(self):
self.sensor.enable(self.sensor.GYRO_XYZ)
def disable(self):
self.sensor.disable(self.sensor.GYRO_XYZ)
def read(self):
'''Returns (x_gyro, y_gyro, z_gyro) in units of degrees/sec'''
rawVals = self.sensor.rawRead()[0:3]
return tuple([ v*self.scale for v in rawVals ])
class KeypressSensor(SensorBase):
svcUUID = UUID(0xFFE0)
dataUUID = UUID(0xFFE1)
ctrlUUID = None
sensorOn = None
def __init__(self, periph):
SensorBase.__init__(self, periph)
def enable(self):
SensorBase.enable(self)
self.char_descr = self.service.getDescriptors(forUUID=0x2902)[0]
self.char_descr.write(struct.pack('<bb', 0x01, 0x00), True)
def disable(self):
self.char_descr.write(struct.pack('<bb', 0x00, 0x00), True)
class OpticalSensorOPT3001(SensorBase):
svcUUID = _TI_UUID(0xAA70)
dataUUID = _TI_UUID(0xAA71)
ctrlUUID = _TI_UUID(0xAA72)
def __init__(self, periph):
SensorBase.__init__(self, periph)
def read(self):
'''Returns value in lux'''
raw = struct.unpack('<h', self.data.read()) [0]
m = raw & 0xFFF;
e = (raw & 0xF000) >> 12;
return 0.01 * (m << e)
class BatterySensor(SensorBase):
svcUUID = UUID("0000180f-0000-1000-8000-00805f9b34fb")
dataUUID = UUID("00002a19-0000-1000-8000-00805f9b34fb")
ctrlUUID = None
sensorOn = None
def __init__(self, periph):
SensorBase.__init__(self, periph)
def read(self):
'''Returns the battery level in percent'''
val = ord(self.data.read())
return val
class SensorTag(Peripheral):
def __init__(self,addr,version=AUTODETECT):
Peripheral.__init__(self,addr)
if version==AUTODETECT:
svcs = self.discoverServices()
if _TI_UUID(0xAA70) in svcs:
version = SENSORTAG_2650
else:
version = SENSORTAG_V1
print(version)
fwVers = self.getCharacteristics(uuid=AssignedNumbers.firmwareRevisionString)
if len(fwVers) >= 1:
self.firmwareVersion = fwVers[0].read().decode("utf-8")
else:
self.firmwareVersion = u''
if version==SENSORTAG_V1:
self.IRtemperature = IRTemperatureSensor(self)
self.accelerometer = AccelerometerSensor(self)
self.humidity = HumiditySensor(self)
self.magnetometer = MagnetometerSensor(self)
self.barometer = BarometerSensor(self)
self.gyroscope = GyroscopeSensor(self)
self.keypress = KeypressSensor(self)
self.lightmeter = None
elif version==SENSORTAG_2650:
self._mpu9250 = MovementSensorMPU9250(self)
self.IRtemperature = IRTemperatureSensorTMP007(self)
self.accelerometer = AccelerometerSensorMPU9250(self._mpu9250)
self.humidity = HumiditySensorHDC1000(self)
self.magnetometer = MagnetometerSensorMPU9250(self._mpu9250)
self.barometer = BarometerSensorBMP280(self)
self.gyroscope = GyroscopeSensorMPU9250(self._mpu9250)
self.keypress = KeypressSensor(self)
self.lightmeter = OpticalSensorOPT3001(self)
self.battery = BatterySensor(self)
class KeypressDelegate(DefaultDelegate):
BUTTON_L = 0x02
BUTTON_R = 0x01
ALL_BUTTONS = (BUTTON_L | BUTTON_R)
_button_desc = {
BUTTON_L : "Left button",
BUTTON_R : "Right button",
ALL_BUTTONS : "Both buttons"
}
def __init__(self):
DefaultDelegate.__init__(self)
self.lastVal = 0
def handleNotification(self, hnd, data):
# NB: only one source of notifications at present
# so we can ignore 'hnd'.
val = struct.unpack("B", data)[0]
down = (val & ~self.lastVal) & self.ALL_BUTTONS
if down != 0:
self.onButtonDown(down)
up = (~val & self.lastVal) & self.ALL_BUTTONS
if up != 0:
self.onButtonUp(up)
self.lastVal = val
def onButtonUp(self, but):
print ( "** " + self._button_desc[but] + " UP")
def onButtonDown(self, but):
print ( "** " + self._button_desc[but] + " DOWN")
def main():
import time
import sys
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('host', action='store',help='MAC of BT device')
parser.add_argument('-n', action='store', dest='count', default=0,
type=int, help="Number of times to loop data")
parser.add_argument('-t',action='store',type=float, default=5.0, help='time between polling')
parser.add_argument('-T','--temperature', action="store_true",default=False)
parser.add_argument('-A','--accelerometer', action='store_true',
default=False)
parser.add_argument('-H','--humidity', action='store_true', default=False)
parser.add_argument('-M','--magnetometer', action='store_true',
default=False)
parser.add_argument('-B','--barometer', action='store_true', default=False)
parser.add_argument('-G','--gyroscope', action='store_true', default=False)
parser.add_argument('-K','--keypress', action='store_true', default=False)
parser.add_argument('-L','--light', action='store_true', default=False)
parser.add_argument('-P','--battery', action='store_true', default=False)
parser.add_argument('--all', action='store_true', default=False)
arg = parser.parse_args(sys.argv[1:])
while True:
try:
print('Connecting to ' + arg.host)
tag = SensorTag(arg.host)
except:
print('Failed to connect. Try again after ten seconds')
time.sleep(10.0)
continue
# Enabling selected sensors
if arg.temperature or arg.all:
tag.IRtemperature.enable()
if arg.humidity or arg.all:
tag.humidity.enable()
if arg.barometer or arg.all:
tag.barometer.enable()
if arg.accelerometer or arg.all:
tag.accelerometer.enable()
if arg.magnetometer or arg.all:
tag.magnetometer.enable()
if arg.gyroscope or arg.all:
tag.gyroscope.enable()
if arg.battery or arg.all:
tag.battery.enable()
if arg.keypress or arg.all:
tag.keypress.enable()
tag.setDelegate(KeypressDelegate())
if arg.light and tag.lightmeter is None:
print("Warning: no lightmeter on this device")
if (arg.light or arg.all) and tag.lightmeter is not None:
tag.lightmeter.enable()
# Some sensors (e.g., temperature, accelerometer) need some time for initialization.
# Not waiting here after enabling a sensor, the first read value might be empty or incorrect.
time.sleep(1.0)
counter=1
while True:
if arg.temperature or arg.all:
print('Temp: ', tag.IRtemperature.read())
if arg.humidity or arg.all:
print("Humidity: ", tag.humidity.read())
if arg.barometer or arg.all:
print("Barometer: ", tag.barometer.read())
if arg.accelerometer or arg.all:
print("Accelerometer: ", tag.accelerometer.read())
if arg.magnetometer or arg.all:
print("Magnetometer: ", tag.magnetometer.read())
if arg.gyroscope or arg.all:
print("Gyroscope: ", tag.gyroscope.read())
if (arg.light or arg.all) and tag.lightmeter is not None:
print("Light: ", tag.lightmeter.read())
if arg.battery or arg.all:
print("Battery: ", tag.battery.read())
if counter >= arg.count and arg.count != 0:
break
print('-----------------------------------------------------------------------------')
counter += 1
try:
tag.waitForNotifications(arg.t)
except:
print('Disconnected');
break
tag.disconnect()
del tag
if __name__ == "__main__":
main()
|
from flask_jwt import current_identity
from flask_potion import ModelResource, fields
from flask_potion.routes import ItemRoute, Route
from ..app import db
from ..app.decorators import auth_required, role_required
from .models import Permission, User, UserPermissionLinker, UserRole
class UserResource(ModelResource):
class Meta:
model = User
exclude_fields = [User._password_hash.key]
read_only_fields = [User.uuid.key]
@Route.GET("/profile/permissions")
@auth_required
def resolve_profile_permissions(self, project_id: fields.Integer(minimum=1)) -> fields.List(fields.String()):
permissions = (
UserPermissionLinker.query.filter_by(project_id=project_id, user_id=current_identity.id)
.join(UserPermissionLinker.permission)
.with_entities(Permission.slug)
.all()
)
permissions = list(map(lambda x: x.slug, permissions))
return permissions
@Route.GET("/profile")
@auth_required
def resolve_profile(self) -> fields.Inline("self"):
return self.manager.read(current_identity.id)
@ItemRoute.PATCH("/deactivate", rel="deactivate")
@role_required(["admin"])
def deactivate(self, user) -> fields.Boolean():
user.is_active = False
db.session.commit()
return True
@ItemRoute.PATCH("/activate", rel="activate")
@role_required(["admin"])
def activate(self, user) -> fields.Boolean():
user.is_active = True
db.session.commit()
return True
@ItemRoute.GET("/permissions")
@role_required(["admin"])
def resolve_permissions(self, user, project_id: fields.Integer(minimum=1)) -> fields.List(fields.String()):
permissions = (
UserPermissionLinker.query.filter_by(project_id=project_id, user_id=user.id)
.join(UserPermissionLinker.permission)
.with_entities(Permission.slug)
.all()
)
permissions = list(map(lambda x: x.slug, permissions))
return permissions
@Route.POST("", rel="create", schema=fields.Inline("self"), response_schema=fields.Inline("self"))
@role_required(["admin"])
def create(self, properties):
return super().create(properties=properties)
@Route.GET(
lambda r: "/<{}:id>".format(r.meta.id_converter),
rel="self",
attribute="instance",
response_schema=fields.Inline("self"),
)
@role_required(["admin"])
def read(self, id):
return super().read(id=id)
@read.PATCH(
rel="update",
schema=fields.Inline("self", patchable=True),
response_schema=fields.Inline("self", patchable=True),
)
@role_required(["admin"])
def update(self, properties, id):
item = self.manager.read(id)
updated_item = self.manager.update(item, properties)
return updated_item
@ItemRoute.PATCH("/changePassword", rel="change_password")
@role_required(["admin"])
def change_password(self, user, new_password: fields.String()) -> fields.Boolean():
user.password_hash = <PASSWORD>
db.session.commit()
return True
class UserRoleResource(ModelResource):
class Meta:
model = UserRole
@Route.POST("", rel="create", schema=fields.Inline("self"), response_schema=fields.Inline("self"))
@role_required(["admin"])
def create(self, properties):
return super().create(properties=properties)
@Route.GET(
lambda r: "/<{}:id>".format(r.meta.id_converter),
rel="self",
attribute="instance",
response_schema=fields.Inline("self"),
)
@role_required(["admin"])
def read(self, id):
return super().read(id=id)
@read.PATCH(
rel="update",
schema=fields.Inline("self", patchable=True),
response_schema=fields.Inline("self", patchable=True),
)
@role_required(["admin"])
def update(self, properties, id):
item = self.manager.read(id)
updated_item = self.manager.update(item, properties)
return updated_item
class UserPermissionLink(ModelResource):
class Meta:
model = UserPermissionLinker
@Route.POST("", rel="create", schema=fields.Inline("self"), response_schema=fields.Inline("self"))
@role_required(["admin"])
def create(self, properties):
return super().create(properties=properties)
@Route.GET(
lambda r: "/<{}:id>".format(r.meta.id_converter),
rel="self",
attribute="instance",
response_schema=fields.Inline("self"),
)
@role_required(["admin"])
def read(self, id):
return super().read(id=id)
@read.PATCH(
rel="update",
schema=fields.Inline("self", patchable=True),
response_schema=fields.Inline("self", patchable=True),
)
@role_required(["admin"])
def update(self, properties, id):
item = self.manager.read(id)
updated_item = self.manager.update(item, properties)
return updated_item
|
<filename>ziggy/context.py
# -*- coding: utf-8 -*-
"""
ziggy.context
~~~~~~~~
This module provides the concept of 'Context' for collecting data that will
generate a log event.
:copyright: (c) 2012 by <NAME>
:license: ISC, see LICENSE for more details.
"""
import time
import os
import random
import struct
from . import utils
from . import network
# This will be redefined by our configuration to tell us where to record stuff
# to.
_recorder_function = None
class Context(object):
__slots__ = ["name", "data", "id", "_writable", "start_time", "_sample_checks", "enabled"]
def __init__(self, type_name, id=None, sample=None):
if type_name.startswith('.'):
parent_ctx = current_context()
if parent_ctx is None:
self.name = type_name[1:]
else:
self.name = parent_ctx.name + type_name
else:
parent_ctx = None
self.name = type_name
self.data = {}
self.start_time = time.time()
self._sample_checks = {}
if id is not None:
self.id = id
elif parent_ctx:
self.id = parent_ctx.id
else:
# Generate an id if one wasn't provided and we don't have any parents
# We're going to encode the time as the front 4 bytes so we have some order to the ids
# that could prove useful later on by making sorting a little easier.
self.id = (struct.pack(">L", time.time()) + os.urandom(12)).encode('hex')
if parent_ctx and not parent_ctx.enabled:
self.enabled = False
elif sample:
sample_name, rate = sample
if sample_name == type_name or sample_name == '.':
self.enabled = bool(random.random() <= rate)
elif parent_ctx and sample_name == '..':
self.enabled = parent_ctx.sampled_for(type_name, rate)
else:
self.enabled = _get_context(sample_name).sampled_for(type_name, rate)
else:
self.enabled = True
self._writable = False
@property
def writable(self):
"""Indicates the contest is open and can be written to"""
return self._writable
def sampled_for(self, name, rate):
if name not in self._sample_checks:
self._sample_checks[name] = bool(random.random() <= rate)
return self._sample_checks[name]
def set(self, key, *args, **kwargs):
if not self.writable:
raise ValueError()
if args and kwargs:
raise ValueError()
if len(args) > 1:
utils.set_deep(self.data, key, args)
elif args:
utils.set_deep(self.data, key, args[0])
elif kwargs:
existing_value = utils.get_deep(self.data, key, {})
existing_value.update(kwargs)
utils.set_deep(self.data, key, existing_value)
def append(self, key, value):
if not self.writable:
raise ValueError()
existing_value = utils.get_deep(self.data, key, [])
existing_value.append(value)
if len(existing_value) == 1:
utils.set_deep(self.data, key, existing_value)
def add(self, key, value):
if not self.writable:
raise ValueError()
existing_value = utils.get_deep(self.data, key, 0)
utils.set_deep(self.data, key, existing_value + value)
def to_dict(self):
return {'id': self.id,
'type': self.name,
'host': os.uname()[1],
'pid': os.getpid(),
'start': self.start_time,
'end': time.time(),
'body': self.data
}
def start(self):
_add_context(self)
self._writable = True
def stop(self):
self._writable = False
_remove_context(self)
def done(self):
self.stop() # Just be sure
if self.enabled and _recorder_function:
_recorder_function(self)
# Make sure we don't get any duplicate data
# I would clear out all the data here, but that makes testing a little
# more challenging.
self.enabled = False
def __enter__(self):
self.start()
return self
def __exit__(self, type, value, traceback):
self.stop()
self.done()
_contexts = []
_contexts_by_name = {}
def _add_context(context):
if context.name in _contexts_by_name:
return
_contexts_by_name[context.name] = context
_contexts.append(context)
def _get_context(name):
return _contexts_by_name.get(str(name))
def _remove_context(context):
try:
del _contexts_by_name[context.name]
except KeyError:
pass
try:
_contexts.remove(context)
except ValueError:
pass
def current_context():
try:
return _contexts[-1]
except IndexError:
return None
def set(*args, **kwargs):
context = current_context()
if context:
context.set(*args, **kwargs)
def append(*args, **kwargs):
context = current_context()
if context:
context.append(*args, **kwargs)
def add(*args, **kwargs):
context = current_context()
if context:
context.add(*args, **kwargs)
|
<reponame>gieseladev/andesite.py<filename>andesite/transform.py
"""Transformation utilities.
These functions are used to transform the data sent by Andesite
into the Python models.
These functions aren't exported to the `andesite` namespace, if you
want to use them you need to import them from `andesite.transform`.
However, there shouldn't be a need for you to use them in the first place
as andesite.py already does this for you.
Attributes:
RawDataType (Dict[str, Any]): (Type alias) JSON-like object data type.
CONVERTER_MEMO (lettercase.ConversionMemo): Memory used for letter case conversion.
MapFunction ((T) -> `Any`): (Type alias) Callable that takes an argument and returns a new one.
"""
import dataclasses
from functools import partial
from typing import Any, Callable, Dict, MutableMapping, MutableSequence, Optional, Type, TypeVar, cast, overload
import lettercase
__all__ = ["RawDataType",
"transform_input", "transform_output",
"convert_to_raw", "build_from_raw",
"seq_build_all_items_from_raw",
"map_build_all_values_from_raw",
"MapFunction", "map_convert_value", "map_convert_values", "map_convert_values_all",
"map_build_values_from_raw",
"from_milli", "to_milli",
"from_centi", "to_centi",
"map_convert_values_from_milli", "map_convert_values_to_milli",
"map_filter_none", "map_rename_keys", "map_remove_keys"]
T = TypeVar("T")
KT = TypeVar("KT")
VT = TypeVar("VT")
RawDataType = Dict[str, Any]
# memo used to speed up key conversion
CONVERTER_MEMO = lettercase.ConversionMemo()
def _transform(transformer: Callable[[RawDataType], Optional[RawDataType]], data: RawDataType) -> RawDataType:
"""Perform a transformation.
Args:
transformer: Transformer which is called with the data
data: Data to be transformed
Returns:
If the transformer returned something other than `None`, it is returned.
Otherwise it is assumed, that the transformer manipulated the data
and the original data is returned.
"""
res = transformer(data)
if res is None:
return data
else:
return res
def transform_input(cls: Any, data: RawDataType) -> RawDataType:
"""Call the __transform_input__ classmethod on a model.
This is different from calling the method directly because it
always returns the current data.
Args:
cls: Target model whose transformation to apply
data: Data to be transformed
The transform method can either manipulate the provided data
or replace it entirely by returning something other than `None`.
Notes:
When using this function inside of a transformation, make sure
that you continue to work on the data returned by this function.
Also, you cannot return `None` when using this function, you have
to return the data. This is because you can't be sure whether the
data has been modified or replaced and modifications on a replaced
value wouldn't propagate upward unless you return them.
"""
try:
transformer = cls.__transform_input__
except AttributeError:
return data
else:
return _transform(transformer, data)
def transform_output(cls: Any, data: RawDataType) -> RawDataType:
"""Call the __transform_output__ classmethod on a model.
This is different from calling the method directly because it
always returns the current data.
Args:
cls: Target model whose transformation to apply
data: Data to be transformed
The transform method can either manipulate the provided data
or replace it entirely by returning something other than `None`.
Notes:
When using this function inside of a transformation, make sure
that you continue to work on the data returned by this function.
Also, you cannot return `None` when using this function, you have
to return the data. This is because you can't be sure whether the
data has been modified or replaced and modifications on a replaced
value wouldn't propagate upward unless you return them.
"""
try:
transformer = cls.__transform_output__
except AttributeError:
return data
else:
return _transform(transformer, data)
def convert_to_raw(obj: Any) -> RawDataType:
"""Convert a dataclass to a `dict`.
This behaves similar to `dataclasses.asdict`. The difference is outlined below.
The function uses `transform_output`, if the model provides a `__transform_output__` method
it will be called.
After transformation the keys of the data dict are converted from
snake_case to dromedaryCase.
This does not copy the values of the dataclass, modifying a value
of the resulting `dict` will also modify the dataclass' value.
Args:
obj: Object to convert to its raw representation.
Usually this is a dataclass, however, you can also parse
`list`, `tuple`, and `dict` objects which will convert its
members. All other types will be returned without modifying them.
"""
if dataclasses.is_dataclass(obj):
data: RawDataType = {}
for field in dataclasses.fields(obj):
field = cast(dataclasses.Field, field)
value = convert_to_raw(getattr(obj, field.name))
data[field.name] = value
data = transform_output(obj, data)
lettercase.mut_convert_keys(data, lettercase.SNAKE_CASE, lettercase.DROMEDARY_CASE, memo=CONVERTER_MEMO)
return data
elif isinstance(obj, (list, tuple)):
return type(obj)(convert_to_raw(value) for value in obj)
elif isinstance(obj, dict):
return type(obj)((convert_to_raw(key), convert_to_raw(value)) for key, value in obj.items())
else:
# could copy it here to create a "safely" mutable dict but nah.
return obj
@overload
def build_from_raw(cls: Type[T], raw_data: None) -> None: ...
@overload
def build_from_raw(cls: Type[T], raw_data: RawDataType) -> T: ...
def build_from_raw(cls: Type[T], raw_data: Optional[RawDataType]) -> Optional[T]:
"""Build an instance of cls from the passed raw data.
In the spirit of the other transform functions, `None` is treated as a special value
and returned directly instead of handling it in any way.
The function uses `transform_input`, if the model provides a `__transform_input__` method
it will be called.
After transformation the data is used as the keyword arguments to the cls constructor.
Args:
cls: Target type to build
raw_data: Data which should be used to build the instance.
If this is `None`, `None` is returned.
"""
if raw_data is None:
return None
lettercase.mut_convert_keys(raw_data, lettercase.DROMEDARY_CASE, lettercase.SNAKE_CASE, memo=CONVERTER_MEMO)
raw_data = transform_input(cls, raw_data)
return cls(**raw_data)
def seq_build_all_items_from_raw(items: MutableSequence[RawDataType], cls: Type[T]) -> None:
"""Build all items of a mutable sequence.
This calls `build_from_raw` on all items in the sequence and assigns
the result to the index.
Args:
items: Mutable sequence of raw data to be converted
cls: Target type to build.
Returns:
This method mutates the provided sequence, it does not return anything.
"""
for i, value in enumerate(items):
items[i] = build_from_raw(cls, value)
MapFunction = Callable[[T], Any]
def map_convert_value(mapping: MutableMapping[KT, T], key: KT, func: MapFunction) -> None:
"""Call a function on the value of a key of the provided mapping.
The return value of the function then replaces the old value.
If the key does not exist in the mapping, it is ignored and the
function is not called.
Args:
mapping: Mutable mapping which is to be manipulated.
key: Key whose value in the mapping is to be converted.
If this key does not exist in the mapping, the conversion
is aborted and the function doesn't perform any action.
func: Callback which will be called with the value of the key.
Its return value then replaces the previous value in the
mapping.
Returns:
This method mutates the provided mapping, it does not return anything.
"""
try:
value = mapping[key]
except KeyError:
return
mapping[key] = func(value)
def map_convert_values(mapping: RawDataType, **key_funcs: MapFunction) -> None:
"""Run a callback for a key.
For each key you can specify which function to run (<key> = <MapFunction>).
Args:
mapping: Mutable mapping for which to apply the conversion
**key_funcs: key -> map function mapping.
For each key the specified function will
be applied using `map_convert_value`.
Returns:
This method mutates the provided mapping, it does not return anything.
"""
for key, func in key_funcs.items():
map_convert_value(mapping, key, func)
def map_convert_values_all(mapping: RawDataType, func: MapFunction, *keys: str) -> None:
"""Run the same callback on all keys.
Works like `map_convert_values` but runs the same function for all keys.
Args:
mapping: Mutable mapping for which to apply the conversion
func: Function to apply to the values of the specified keys.
The function is run using `map_convert_value`
*keys: Keys whose values are to be converted.
Returns:
This method mutates the provided mapping, it does not return anything.
"""
for key in keys:
map_convert_value(mapping, key, func)
def map_build_values_from_raw(mapping: RawDataType, **key_types: Type[T]) -> None:
"""Build the values of the specified keys to the specified type.
Args:
mapping: Mutable mapping for which to apply the conversion
**key_types: key -> type mapping.
For each key the value will be converted to the provided type
using `build_from_raw`.
Returns:
This method mutates the provided mapping, it does not return anything.
"""
for key, cls in key_types.items():
func = partial(build_from_raw, cls)
map_convert_value(mapping, key, func)
def map_build_all_values_from_raw(mapping: MutableMapping[Any, RawDataType], cls: Type[T]) -> None:
"""Build all values of a mapping.
This calls `build_from_raw` on all values of the mapping
and replaces the old value with the result.
Args:
mapping: Mutable mapping whose values are to be built
cls: Type to convert the values to
Returns:
This method mutates the provided mapping, it does not return anything.
"""
for key, value in mapping.items():
mapping[key] = build_from_raw(cls, value)
@overload
def from_milli(value: int) -> float: ...
@overload
def from_milli(value: None) -> None: ...
def from_milli(value: Optional[int]) -> Optional[float]:
"""Convert a number from thousandths to base.
Args:
value: Value to convert from milli.
Returns:
Optional[float]: `None` if you pass `None` as the value, otherwise a `float`.
"""
if value is None:
return value
return value / 1000
@overload
def to_milli(value: float) -> int: ...
@overload
def to_milli(value: None) -> None: ...
def to_milli(value: Optional[float]) -> Optional[int]:
"""Convert from base unit to thousandths.
Args:
value: Value to convert to milli.
Returns:
Optional[int]: `None` if you pass `None` as the value, otherwise an `int`.
"""
if value is None:
return value
return round(1000 * value)
@overload
def from_centi(value: int) -> float: ...
@overload
def from_centi(value: None) -> None: ...
def from_centi(value: Optional[int]) -> Optional[float]:
"""Convert a number from hundredths to base.
Args:
value: Value to convert from centi.
Returns:
Optional[float]: `None` if you pass `None` as the value, otherwise a `float`.
"""
if value is None:
return value
return value / 100
@overload
def to_centi(value: float) -> int: ...
@overload
def to_centi(value: None) -> None: ...
def to_centi(value: Optional[float]) -> Optional[int]:
"""Convert from base unit to hundredths.
This is really just multiplying by 1000.
Args:
value: Value to convert to milli.
Returns:
Optional[int]: `None` if you pass `None` as the value, otherwise an `int`.
"""
if value is None:
return value
return round(100 * value)
def map_convert_values_from_milli(mapping: RawDataType, *keys) -> None:
"""Run `from_milli` on all specified keys' values.
Args:
mapping: Mutable mapping for which to apply the conversion
*keys: Keys whose values to convert from milli.
Uses `from_milli` to perform the conversion.
Returns:
This method mutates the provided mapping, it does not return anything.
"""
map_convert_values_all(mapping, from_milli, *keys)
def map_convert_values_to_milli(mapping: RawDataType, *keys) -> None:
"""Run `to_milli` on all specified keys' values.
Args:
mapping: Mutable mapping for which to apply the conversion
*keys: Keys whose values to convert to milli.
Uses `to_milli` to perform the conversion.
Returns:
This method mutates the provided mapping, it does not return anything.
"""
map_convert_values_all(mapping, to_milli, *keys)
def map_filter_none(mapping: MutableMapping[Any, Any]) -> None:
"""Remove all keys from the mapping whose values are `None`.
Args:
mapping: Mutable mapping to filter
Returns:
This method mutates the provided mapping, it does not return anything.
"""
remove_keys = {key for key, value in mapping.items() if value is None}
for key in remove_keys:
del mapping[key]
def map_rename_keys(mapping: MutableMapping[str, Any], **key_maps: str) -> None:
"""Rename keys of a mapping.
This is just deleting the old key and assigning its value to the new key.
Args:
mapping: Mutable mapping to manipulate
**key_maps: new -> old name mapping.
The reason the mapping isn't from old to new is that you may want
to rename keys that aren't python-friendly.
"""
for new_key, old_key in key_maps.items():
try:
value = mapping.pop(old_key)
except KeyError:
continue
else:
mapping[new_key] = value
def map_remove_keys(mapping: MutableMapping[KT, Any], *keys: KT) -> None:
"""Remove a number of keys (and their values) from a mapping.
Args:
mapping: Mapping to remove keys from.
*keys: Keys to remove.
Returns:
Nothing, the operation mutates the given mapping.
"""
for key in keys:
try:
del mapping[key]
except KeyError:
pass
|
<reponame>alvinwan/lepoop<filename>lepoop/entry/main.py
"""Entry points manager for command line utility."""
from ..install import get_uninstall_candidates
from ..install import get_uninstall_dependencies_for
from ..install import get_installed_package_keys
from ..uninstall import get_reinstall_candidates
from ..download import get_file_candidates
from ..utils import colored
from ..utils import get_valid_pip_history
from ..utils import get_pip_command_packages
from .alias import poop_alias
from colorama import Fore
from subprocess import Popen
import argparse
def main():
args = argparse.ArgumentParser('poop')
args.add_argument('-a', '--alias', action='store_true',
help='Alias to `poop`')
args.add_argument('--harder', action='store_true',
help='Look through bash history as far as possible.')
args.add_argument('--stronger', action='store_true',
help='Look through bash history and module source files.'
' for modules to uninstall.')
args.add_argument('--skip', type=int, default=0,
help='Number of pip commands to skip.')
args.add_argument('package', nargs='*', help='packages to uninstall')
args = args.parse_args()
if args.alias:
print(poop_alias)
return
try:
if args.package:
command = create_command_using_packages(args.package)
elif args.harder:
command = create_command_using_pip_action('*', skip=args.skip)
elif args.stronger:
command = create_command_using_pip_action(
'*', uninstall_use_creation_time=True, skip=args.skip)
else:
command = create_command_using_pip_action(skip=args.skip)
input(colored('`{}` [enter/ctrl+c]'.format(command)))
event = Popen(command.split())
_, error = event.communicate()
except AssertionError as e:
print(Fore.RED + 'Already pooped. (%s)' % str(e))
except KeyboardInterrupt:
print()
def create_command_using_pip_action(
num_bash_entries=10, uninstall_use_creation_time=False, skip=0):
"""Create commands using latest pip action."""
valid_pip_commands = get_valid_pip_history(num_bash_entries)[skip:]
assert valid_pip_commands, 'No undoable pip commands.'
last_valid_pip_command = valid_pip_commands[0]
last_valid_pip_action = last_valid_pip_command.split()[1]
command = ''
if uninstall_use_creation_time:
command = 'pip uninstall -y {}'.format(get_uninstall_candidates())
elif last_valid_pip_action == 'install':
command = create_command_using_packages(get_pip_command_packages(
last_valid_pip_command))
elif last_valid_pip_action == 'uninstall':
command = 'pip install {}'.format(get_reinstall_candidates())
elif last_valid_pip_action == 'download':
command = 'rm {}'.format(get_file_candidates())
assert command, 'No undoable pip commands.'
return command
def create_command_using_packages(packages):
"""Create comands using a list of packages."""
all_packages = set(get_installed_package_keys())
not_installed = [p for p in packages if p not in all_packages]
installed = [p for p in packages if p in all_packages]
assert installed, ('None of these packages are installed: %s' %
', '.join(not_installed))
packages = get_uninstall_dependencies_for(installed)
if not_installed:
print(colored('Packages `%s` are not installed. I\'m ignoring '
'them.' % ', '.join(not_installed)))
return 'pip uninstall -y {}'.format(packages) |
<gh_stars>10-100
import numpy as np
import sys
from dynaphopy.displacements import atomic_displacements
def progress_bar(progress):
bar_length = 30
status = ""
if isinstance(progress, int):
progress = float(progress)
if not isinstance(progress, float):
progress = 0
status = "Progress error\r\n"
if progress < 0:
progress = 0
status = "Halt ...\r\n"
if progress >= 1:
progress = 1
status = "Done...\r\n"
block = int(round(bar_length*progress))
text = "\rTrajectory: [{0}] {1:.2f}% {2}".format("#"*block + "-"*(bar_length-block),
progress*100, status)
sys.stdout.write(text)
sys.stdout.flush()
#print(disp.relative_trajectory(cell, traj ,pos))
#Not used (only for test)
def relativize_trajectory(dynamic, memmap=False):
cell = dynamic.get_supercell()
number_of_atoms = dynamic.trajectory.shape[1]
supercell = dynamic.get_supercell_matrix()
position = dynamic.structure.get_positions(supercell=supercell)
# normalized_trajectory = np.zeros_like(dynamic.trajectory.real)
normalized_trajectory = dynamic.trajectory.copy()
trajectory = dynamic.trajectory
if memmap:
normalized_trajectory = np.memmap('r_trajectory.map', dtype='complex', mode='w+', shape=trajectory.shape)
else:
normalized_trajectory = dynamic.trajectory.copy()
# progress_bar(0)
for i in range(number_of_atoms):
normalized_trajectory[:, i, :] = atomic_displacement(trajectory[:, i, :], position[i], cell)
# progress_bar(float(i+1)/number_of_atoms)
return normalized_trajectory
# Not used (only for test)
def relativize_trajectory_py(dynamic):
print('Using python rutine for calculating atomic displacements')
cell = dynamic.get_supercell()
number_of_atoms = dynamic.trajectory.shape[1]
supercell = dynamic.get_supercell_matrix()
position = dynamic.structure.get_positions(supercell=supercell)
normalized_trajectory = dynamic.trajectory.real.copy()
progress_bar(0)
for j in range(number_of_atoms):
for i in range(0, normalized_trajectory.shape[0]):
difference = normalized_trajectory[i, j, :] - position[j]
# difference_matrix = np.array(np.dot(np.linalg.inv(cell),(IniSep)),dtype=int)
difference_matrix = np.around(np.dot(np.linalg.inv(cell), difference), decimals=0)
normalized_trajectory[i, j, :] -= np.dot(difference_matrix, cell) + position[j]
progress_bar(float(j+1)/number_of_atoms)
return normalized_trajectory
def trajectory_projection(dynamic, direction):
direction = np.array(direction)/np.linalg.norm(direction)
supercell = dynamic.get_supercell_matrix()
trajectory = dynamic.get_relative_trajectory()
atom_type_index = dynamic.structure.get_atom_type_index(supercell=supercell)
number_of_atom_types = dynamic.structure.get_number_of_atom_types()
projections = []
for j in range(number_of_atom_types):
projection = np.array([])
for i in range(0, trajectory.shape[1]):
if atom_type_index[i] == j:
# print('atom:', i, 'type:', atom_type_index[i])
projection = np.append(projection, np.dot(trajectory[:, i, :].real,
direction/np.linalg.norm(direction)))
projections.append(projection)
return np.array(projections)
|
<gh_stars>0
# -*- coding: utf-8 -*-
import json
import os
from twython import Twython, TwythonError
from .config import unittest
class TestHtmlForTweetTestCase(unittest.TestCase):
def setUp(self):
self.api = Twython('', '', '', '')
def load_tweet(self, name):
f = open(os.path.join(
os.path.dirname(__file__),
'tweets',
'%s.json' % name
))
tweet = json.load(f)
f.close()
return tweet
def test_basic(self):
"""Test HTML for Tweet returns what we want"""
tweet_object = self.load_tweet('basic')
tweet_text = self.api.html_for_tweet(tweet_object)
self.assertEqual(tweet_text,
'<a href="http://t.co/FCmXyI6VHd" class="twython-url">google.com</a> is a <a href="https://twitter.com/search?q=%23cool" class="twython-hashtag">#cool</a> site, lol! <a href="https://twitter.com/mikehelmick" class="twython-mention">@mikehelmick</a> shd <a href="https://twitter.com/search?q=%23checkitout" class="twython-hashtag">#checkitout</a>. Love, <a href="https://twitter.com/__twython__" class="twython-mention">@__twython__</a> <a href="https://t.co/67pwRvY6z9" class="twython-url">github.com</a> <a href="http://t.co/N6InAO4B71" class="twython-media">pic.twitter.com/N6InAO4B71</a>')
def test_reply(self):
"""Test HTML for Tweet links the replied-to username."""
tweet_object = self.load_tweet('reply')
tweet_text = self.api.html_for_tweet(tweet_object)
self.assertEqual(tweet_text,
u'<span class="twython-tweet-prefix"><a href="https://twitter.com/philgyford" class="twython-mention">@philgyford</a> </span>Here’s a test tweet that goes on as much as possible and includes an image. Hi to my fans in testland!<span class="twython-tweet-suffix"> https://t.co/tzhyk2QWSr</span>')
def test_expanded_url(self):
"""Test using expanded url in HTML for Tweet displays full urls"""
tweet_object = self.load_tweet('basic')
tweet_text = self.api.html_for_tweet(tweet_object,
use_expanded_url=True)
# Make sure full url is in HTML
self.assertTrue('http://google.com' in tweet_text)
def test_short_url(self):
"""Test using expanded url in HTML for Tweet displays full urls"""
tweet_object = self.load_tweet('basic')
tweet_text = self.api.html_for_tweet(tweet_object, False)
# Make sure HTML doesn't contain the display OR expanded url
self.assertTrue('http://google.com' not in tweet_text)
self.assertTrue('google.com' not in tweet_text)
def test_identical_urls(self):
"""If the 'url's for different url entities are identical, they should link correctly."""
tweet_object = self.load_tweet('identical_urls')
tweet_text = self.api.html_for_tweet(tweet_object)
self.assertEqual(tweet_text,
u'Use Cases, Trials and Making 5G a Reality <a href="https://t.co/W0uArTMk9N" class="twython-url">buff.ly/2sEhrgO</a> #5G #innovation via @5GWorldSeries <a href="https://t.co/W0uArTMk9N" class="twython-url">buff.ly/2sEhrgO</a>')
def test_symbols(self):
tweet_object = self.load_tweet('symbols')
tweet_text = self.api.html_for_tweet(tweet_object)
# Should only link symbols listed in entities:
self.assertTrue('<a href="https://twitter.com/search?q=%24AAPL" class="twython-symbol">$AAPL</a>' in tweet_text)
self.assertTrue('<a href="https://twitter.com/search?q=%24ANOTHER" class="twython-symbol">$ANOTHER</a>' not in tweet_text)
def test_no_symbols(self):
"""Should still work if tweet object has no symbols list"""
tweet = self.load_tweet('symbols')
# Save a copy:
symbols = tweet['entities']['symbols']
del tweet['entities']['symbols']
tweet_text = self.api.html_for_tweet(tweet)
self.assertTrue('symbols: $AAPL and' in tweet_text)
self.assertTrue('and $ANOTHER and $A.' in tweet_text)
def test_compatmode(self):
tweet_object = self.load_tweet('compat')
tweet_text = self.api.html_for_tweet(tweet_object)
# link to compat web status link
self.assertTrue(
u'<a href="https://t.co/SRmsuks2ru" class="twython-url">twitter.com/i/web/status/7…</a>' in tweet_text)
def test_extendedmode(self):
tweet_object = self.load_tweet('extended')
tweet_text = self.api.html_for_tweet(tweet_object)
# full tweet rendered with suffix
self.assertEqual(tweet_text,
'Say more about what\'s happening! Rolling out now: photos, videos, GIFs, polls, and Quote Tweets no longer count toward your 140 characters.<span class="twython-tweet-suffix"> <a href="https://t.co/I9pUC0NdZC" class="twython-media">pic.twitter.com/I9pUC0NdZC</a></span>')
def test_media(self):
tweet_object = self.load_tweet('media')
tweet_text = self.api.html_for_tweet(tweet_object)
self.assertEqual(
u"""I made some D3.js charts showing the years covered by books in a series compared to their publishing dates <a href="https://t.co/2yUmmn3TOc" class="twython-url">gyford.com/phil/writing/2\u2026</a><span class="twython-tweet-suffix"> <a href="https://t.co/OwNc6uJklg" class="twython-media">pic.twitter.com/OwNc6uJklg</a></span>""",
tweet_text)
def test_quoted(self):
"With expand_quoted_status=True it should include a quoted tweet."
tweet_object = self.load_tweet('quoted')
tweet_text = self.api.html_for_tweet(tweet_object,
expand_quoted_status=True)
self.assertEqual(
u"""Here\u2019s a quoted tweet. <a href="https://t.co/3neKzof0gT" class="twython-url">twitter.com/philgyford/sta\u2026</a><blockquote class="twython-quote">The quoted tweet text.<cite><a href="https://twitter.com/philgyford/status/917699069916729344"><span class="twython-quote-user-name"><NAME></span><span class="twython-quote-user-screenname">@philgyford</span></a></cite></blockquote>""",
tweet_text)
def test_retweet(self):
"With expand_quoted_status=True it should include a quoted tweet."
tweet_object = self.load_tweet('retweet')
tweet_text = self.api.html_for_tweet(tweet_object)
self.assertEqual(
u"""My aunt and uncle in a very ill humour one with another, but I made shift with much ado to keep them from scolding.""",
tweet_text)
|
<gh_stars>0
import node
import event
from leginon import leginondata
import time
import calibrationclient
from pyami import correlator, peakfinder, imagefun, ordereddict
import math
import gui.wx.Baker
import instrument
import presets
import types
import numpy
from leginon import leginondata
import threading
import player
class Baker(node.Node):
panelclass = gui.wx.Baker.Panel
settingsclass = leginondata.BakerSettingsData
defaultsettings = {
'bypass': False,
'preset': '',
'total bake time': 10.0,
'manual aperture': True,
'emission off': False,
}
eventinputs = node.Node.eventinputs + presets.PresetsClient.eventinputs + [event.MakeTargetListEvent,]
eventoutputs = node.Node.eventoutputs + presets.PresetsClient.eventoutputs + [event.MakeTargetListEvent,]
def __init__(self, id, session, managerlocation, **kwargs):
node.Node.__init__(self, id, session, managerlocation, **kwargs)
self.instrument = instrument.Proxy(self.objectservice, self.session,
self.panel)
self.presetsclient = presets.PresetsClient(self)
self.player = player.Player(callback=self.onPlayer)
self.panel.playerEvent(self.player.state())
self.lock = threading.RLock()
self.addEventInput(event.MakeTargetListEvent, self.onProcessing)
self.userpause = threading.Event()
self.endlocation = None
self.start()
def checkDistance(self):
if self.endlocation is None:
self.logger.error('No end stage position saved')
return False
else:
endx = self.endlocation['x']
endy = self.endlocation['y']
startstage = self.instrument.tem.StagePosition
startx = startstage['x']
starty = startstage['y']
distance = math.hypot((startx-endx),(starty-endy))
if distance < 2e-6:
self.logger.error('Stage is at the end position')
self.player.stop()
return False
self.player.play()
return True
def timedMoveToPosition(self):
endx = self.endlocation['x']
endy = self.endlocation['y']
startstage = self.instrument.tem.StagePosition
startx = startstage['x']
starty = startstage['y']
distance = math.hypot((startx-endx),(starty-endy))
if distance < 2e-6:
self.logger.error('Stage is at the end position')
return
n = 20
locations = [(startx,starty)]
step = (endx-startx)/(n-1), (endy-starty)/(n-1)
steptime = self.settings['total bake time'] / n
status = True
for i in range(1,n):
locations.append((locations[i-1][0]+step[0],locations[i-1][1]+step[1]))
for i in range(n):
start_time = time.time()
state = self.player.wait()
if state == 'stop':
status = False
break
status = self.toScope({'x':locations[i][0],'y':locations[i][1]})
if not status:
break
remaining_time = steptime - (time.time() - start_time)
if remaining_time > 0:
time.sleep(remaining_time)
end_time = time.time()
return status
def fromScope(self):
errstr = 'Location from instrument failed: %s'
try:
allstagedata = self.instrument.tem.StagePosition
except:
self.logger.error(errstr % 'unable to get stage position')
return False
stagedata = {}
stagedata['x'] = allstagedata['x']
stagedata['y'] = allstagedata['y']
stagedata['z'] = allstagedata['z']
stagedata['a'] = allstagedata['a']
self.endlocation = stagedata
self.logger.info('Save end location at %.1f,%.1f um (x,y)' % (stagedata['x']*1e6,stagedata['y']*1e6))
return True
def toScope(self, stagedict):
try:
self.instrument.tem.StagePosition = stagedict
except:
self.logger.exception(errstr % 'unable to set instrument')
return False
else:
self.logger.info('Moved to location in um (x,y): %.1f,%.1f' % (stagedict['x']*1e6,stagedict['y']*1e6))
return True
def onPlayer(self, state):
infostr = ''
if state == 'stop':
infostr += 'Aborting...'
if infostr:
self.logger.info(infostr)
def resetTiltStage(self):
zerostage = {'a':0.0}
self.instrument.tem.setStagePosition(zerostage)
zerostage = {'x':0.0,'y':0.0}
self.instrument.tem.setStagePosition(zerostage)
stageposition = self.instrument.tem.getStagePosition()
self.logger.info('return x,y, and alhpa tilt to %.1f um,%.1f um,%.1f deg' % (stageposition['x']*1e6,stageposition['y'],stageposition['a']))
def shutDown(self):
self.instrument.tem.ColumnValvePosition = 'closed'
self.logger.warning('column valves closed')
if self.settings['emission off']:
self.instrument.tem.Emission = False
self.logger.warning('emission switched off')
def startUp(self):
if self.instrument.tem.ColumnValvePosition != 'open':
self.logger.info('Open column valves...')
self.instrument.tem.ColumnValvePosition = 'open'
time.sleep(2.5)
if self.instrument.tem.MainScreenPosition != 'down':
self.logger.info('Screen down for baking...')
self.instrument.tem.MainScreenPosition = 'down'
time.sleep(2.5)
def startNext(self):
if self.instrument.tem.ColumnValvePosition != 'open':
self.logger.info('Open column valves...')
self.instrument.tem.ColumnValvePosition = 'open'
time.sleep(2.5)
# This uses self.instrument.ccdcamera of the baking preset.
# It should use that of the next node
if self.instrument.ccdcamera.hasAttribute('Inserted'):
try:
inserted = self.instrument.ccdcamera.Inserted
except:
inserted = True
if not inserted:
self.logger.info('inserting camera')
self.instrument.ccdcamera.Inserted = True
time.sleep(2.5)
if self.instrument.tem.MainScreenPosition != 'up':
self.logger.info('Screen up for data collection...')
self.instrument.tem.MainScreenPosition = 'up'
time.sleep(2.5)
def runAll(self):
self.setStatus('processing')
status = self.checkDistance()
if status is True:
self.setStatus('processing')
self.startUp()
preset_name = self.settings['preset']
self.setStatus('waiting')
self.logger.info('Sending %s preset for baking' % (preset_name,))
self.presetsclient.toScope(preset_name)
self.setStatus('processing')
status = self.timedMoveToPosition()
if not status:
self.logger.info('Aborted')
else:
self.logger.info('Baking is done')
self.panel.onDone()
self.setStatus('idle')
return status
def onPlay(self):
self.userpause.set()
def waitForUserCheck(self, task=''):
self.setStatus('user input')
self.logger.info('Waiting for user to %s...' % (task,))
self.userpause.clear()
self.userpause.wait()
self.setStatus('processing')
def outputMakeTargetListEvent(self,griddata):
evt = event.MakeTargetListEvent()
evt['grid'] = griddata
if evt['grid'] is None:
self.logger.error('Data collection event not sent')
else:
self.outputEvent(evt)
self.logger.info('Data collection initiated')
return evt['grid']
def onProcessing(self,evt):
if not self.settings['bypass']:
self.waitForUserCheck('save end location')
status = False
while not status:
status = self.runAll()
self.resetTiltStage()
if not status:
self.waitForUserCheck('correct error')
if self.settings['manual aperture']:
self.shutDown()
self.waitForUserCheck('change aperture')
self.startNext()
self.outputMakeTargetListEvent(evt['grid'])
self.panel.onDone()
self.setStatus('idle')
if __name__ == '__main__':
id = ('navigator',)
n = Navigator(id, None)
|
<reponame>tnakaicode/ChargedPaticle-LowEnergy<gh_stars>1-10
from logging import warning
import numpy as np
import scipy.sparse
import scipy.sparse.linalg
class FieldSolver:
def __init__(self, spat_mesh, inner_regions):
if inner_regions:
print("WARNING: field-solver: inner region support is untested")
print("WARNING: proceed with caution")
self._double_index = self.double_index(spat_mesh.n_nodes)
nrows = (spat_mesh.n_nodes - 2).prod()
self.A = self.construct_equation_matrix(spat_mesh, inner_regions)
self.phi_vec = np.empty(nrows, dtype='f')
self.rhs = np.empty_like(self.phi_vec)
self.create_solver_and_preconditioner()
def construct_equation_matrix(self, spat_mesh, inner_regions):
nx, ny, nz = spat_mesh.n_nodes - 2
cx, cy, cz = spat_mesh.cell ** 2
dx, dy, dz = cy * cz, cx * cz, cx * cy
matrix = dx * self.construct_d2dx2_in_3d(nx, ny, nz) + \
dy * self.construct_d2dy2_in_3d(nx, ny, nz) + \
dz * self.construct_d2dz2_in_3d(nx, ny, nz)
return self.zero_nondiag_for_nodes_inside_objects(matrix, spat_mesh, inner_regions)
@staticmethod
def construct_d2dx2_in_3d(nx, ny, nz):
diag_offset = 1
block_size = nx
block = scipy.sparse.diags([1.0, -2.0, 1.0], [-diag_offset, 0, diag_offset], shape=(block_size, block_size),
format='csr')
return scipy.sparse.block_diag([block] * (ny * nz))
@staticmethod
def construct_d2dy2_in_3d(nx, ny, nz):
diag_offset = nx
block_size = nx * ny
block = scipy.sparse.diags([1.0, -2.0, 1.0], [-diag_offset, 0, diag_offset], shape=(block_size, block_size),
format='csr')
return scipy.sparse.block_diag([block] * nz)
@staticmethod
def construct_d2dz2_in_3d(nx, ny, nz):
diag_offset = nx * ny
block_size = nx * ny * nz
return scipy.sparse.diags([1.0, -2.0, 1.0], [-diag_offset, 0, diag_offset], shape=(block_size, block_size),
format='csr')
def zero_nondiag_for_nodes_inside_objects(self, matrix, mesh, inner_regions):
for ir in inner_regions:
for n, i, j, k in self._double_index:
xyz = mesh.cell * (i, j, k)
if ir.check_if_points_inside(xyz):
csr_row_start = matrix.indptr[n]
csr_row_end = matrix.indptr[n + 1]
for t in range(csr_row_start, csr_row_end):
if matrix.indices[t] != n:
matrix.data[t] = 0
else:
matrix.data[t] = 1
return matrix
def create_solver_and_preconditioner(self):
self.maxiter = 1000
self.tol = 1e-10
# abstol = 0
# verbose = true
# monitor(rhs, iteration_limit, rtol, abstol, verbose)
# precond(A.num_rows, A.num_rows)
def eval_potential(self, spat_mesh, inner_regions):
self.solve_poisson_eqn(spat_mesh, inner_regions)
def solve_poisson_eqn(self, spat_mesh, inner_regions):
self.init_rhs_vector(spat_mesh, inner_regions)
# cusp::krylov::cg(A, phi_vec, rhs, monitor, precond)
self.phi_vec, info = scipy.sparse.linalg.cg(self.A, self.rhs, self.phi_vec,
self.tol, self.maxiter)
if info != 0:
warning(f"scipy.sparse.linalg.cg info: {info}")
self.transfer_solution_to_spat_mesh(spat_mesh)
def init_rhs_vector(self, spat_mesh, inner_regions):
self.init_rhs_vector_in_full_domain(spat_mesh)
self.set_rhs_for_nodes_inside_objects(spat_mesh, inner_regions)
def init_rhs_vector_in_full_domain(self, spat_mesh):
m = spat_mesh
rhs = -4 * np.pi * m.cell.prod() ** 2 * m.charge_density[1:-1, 1:-1, 1:-1]
dx, dy, dz = m.cell
rhs[0] -= dy * dy * dz * dz * m.potential[0, 1:-1, 1:-1]
rhs[-1] -= dy * dy * dz * dz * m.potential[-1, 1:-1, 1:-1]
rhs[:, 0] -= dx * dx * dz * dz * m.potential[1:-1, 0, 1:-1]
rhs[:, -1] -= dx * dx * dz * dz * m.potential[1:-1, -1, 1:-1]
rhs[:, :, 0] -= dx * dx * dy * dy * m.potential[1:-1, 1:-1, 0]
rhs[:, :, -1] -= dx * dx * dy * dy * m.potential[1:-1, 1:-1, -1]
self.rhs = rhs.ravel('F')
def set_rhs_for_nodes_inside_objects(self, spat_mesh, inner_regions):
for ir in inner_regions:
for n, i, j, k in self._double_index:
xyz = spat_mesh.cell * (i, j, k)
if ir.check_if_points_inside(xyz):
self.rhs[n] = ir.potential # where is dx**2 dy**2 etc?
def transfer_solution_to_spat_mesh(self, spat_mesh):
spat_mesh.potential[1:-1, 1:-1, 1:-1] = self.phi_vec.reshape(spat_mesh.n_nodes - 2, order='F')
@staticmethod
def eval_fields_from_potential(spat_mesh):
e = -np.stack(np.gradient(spat_mesh.potential, *spat_mesh.cell), -1)
spat_mesh.electric_field = e
@staticmethod
def double_index(n_nodes):
nx, ny, nz = n_nodes - 2
return [(i + j * nx + k * nx * ny, i + 1, j + 1, k + 1)
for k in range(nz) for j in range(ny) for i in range(nx)]
|
<reponame>wjguan/phenocell<gh_stars>0
import sys
import numpy
from clarity.ImageProcessing.BackgroundRemoval import removeBackground
from clarity.ImageProcessing.Filter.DoGFilter import filterDoG
from clarity.ImageProcessing.MaximaDetection import findExtendedMaxima, findPixelCoordinates, findIntensity, findCenterOfMaxima
from clarity.ImageProcessing.CellSizeDetection import detectCellShape, findCellSize, findCellIntensity
from clarity.Utils.Timer import Timer
from clarity.Utils.ParameterTools import getParameter
def detectSpots(img, detectSpotsParameter = None, removeBackgroundParameter = None,
filterDoGParameter = None, findExtendedMaximaParameter = None, detectCellShapeParameter = None, compactWatershedParameter = 0,
verbose = False, out = sys.stdout, **parameter):
"""Detect Cells in 3d grayscale image using DoG filtering and maxima detection
Effectively this function performs the following steps:
* illumination correction via :func:`~clarity.ImageProcessing.IlluminationCorrection.correctIllumination`
* background removal via :func:`~clarity.ImageProcessing.BackgroundRemoval.removeBackground`
* difference of Gaussians (DoG) filter via :func:`~clarity.ImageProcessing.Filter.filterDoG`
* maxima detection via :func:`~clarity.ImageProcessing.MaximaDetection.findExtendedMaxima`
* cell shape detection via :func:`~clarity.ImageProcessing.CellSizeDetection.detectCellShape`
* cell intensity and size measurements via: :func:`~clarity.ImageProcessing.CellSizeDetection.findCellIntensity`,
:func:`~clarity.ImageProcessing.CellSizeDetection.findCellSize`.
detectCells
Note:
Processing steps are done in place to save memory.
Arguments:
img (array): image data
detectSpotParameter: image processing parameter as described in the individual sub-routines
verbose (bool): print progress information
out (object): object to print progress information to
Returns:
tuple: tuple of arrays (cell coordinates, raw intensity, fully filtered intensty, illumination and background corrected intensity [, cell size])
"""
timer = Timer()
removeBackgroundParameter = getParameter(detectSpotsParameter, "removeBackgroundParameter", removeBackgroundParameter)
img = removeBackground(img, removeBackgroundParameter = removeBackgroundParameter, verbose = verbose, out = out, **parameter)
filterDoGParameter = getParameter(detectSpotsParameter, "filterDoGParameter", filterDoGParameter)
dogSize = getParameter(filterDoGParameter, "size", None)
if not dogSize is None:
img = filterDoG(img, filterDoGParameter = filterDoGParameter, verbose = verbose, out = out, **parameter)
findExtendedMaximaParameter = getParameter(detectSpotsParameter, "findExtendedMaximaParameter", findExtendedMaximaParameter)
hMax = getParameter(findExtendedMaximaParameter, "hMax", None)
imgmax = findExtendedMaxima(img, findExtendedMaximaParameter = findExtendedMaximaParameter, verbose = verbose, out = out, **parameter)
if not hMax is None:
centers = findCenterOfMaxima(img, imgmax, verbose = verbose, out = out, **parameter)
else:
centers = findPixelCoordinates(imgmax, verbose = verbose, out = out, **parameter)
del imgmax
detectCellShapeParameter = getParameter(detectSpotsParameter, "detectCellShapeParameter", detectCellShapeParameter)
cellShapeThreshold = getParameter(detectCellShapeParameter, "threshold", None)
if not cellShapeThreshold is None:
imgshape = detectCellShape(img, centers, detectCellShapeParameter = detectCellShapeParameter, compactWatershedParameter = compactWatershedParameter, verbose = verbose, out = out, **parameter)
csize = findCellSize(imgshape, maxLabel = centers.shape[0], out = out, **parameter)
cintensity = findCellIntensity(img, imgshape, maxLabel = centers.shape[0], verbose = verbose, out = out, **parameter)
idz = csize > 0
return (centers[idz], numpy.vstack((cintensity[idz], csize[idz])).transpose())
else:
cintensity = findIntensity(img, centers, verbose = verbose, out = out, **parameter)
return (centers, numpy.vstack((cintensity)).transpose())
|
<gh_stars>10-100
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TFXIO Interface.
TFXIO is the I/O abstraction for TFX. It allows TFX components / libraries to
access pipeline payload in the form of a common in-memory format (Apache Arrow
RecordBatch) regardless of the physical (at-rest) format of the payload. It also
provides an adapter (TensorAdapter) to translate a RecordBatch into TF tensors.
See
https://github.com/tensorflow/community/blob/master/rfcs/20191017-tfx-standardized-inputs.md
for the high-level design.
"""
import abc
from typing import Iterator, List, Optional, Text
import apache_beam as beam
import pyarrow as pa
import tensorflow as tf
from tfx_bsl.tfxio import dataset_options
from tfx_bsl.tfxio import tensor_adapter
class TFXIO(object, metaclass=abc.ABCMeta):
"""Abstract basic class of all TFXIO API implementations."""
@abc.abstractmethod
def BeamSource(self, batch_size: Optional[int] = None) -> beam.PTransform:
"""Returns a beam `PTransform` that produces `PCollection[pa.RecordBatch]`.
May NOT raise an error if the TFMD schema was not provided at construction
time.
If a TFMD schema was provided at construction time, all the
`pa.RecordBatch`es in the result `PCollection` must be of the same schema
returned by `self.ArrowSchema`. If a TFMD schema was not provided, the
`pa.RecordBatch`es might not be of the same schema (they may contain
different numbers of columns).
Args:
batch_size: if not None, the `pa.RecordBatch` produced will be of the
specified size. Otherwise it's automatically tuned by Beam.
"""
@abc.abstractmethod
def RecordBatches(
self, options: dataset_options.RecordBatchesOptions
) -> Iterator[pa.RecordBatch]:
"""Returns an iterable of record batches.
This can be used outside of Apache Beam or TensorFlow to access data.
Args:
options: An options object for iterating over record batches. Look at
`dataset_options.RecordBatchesOptions` for more details.
"""
@abc.abstractmethod
def ArrowSchema(self) -> pa.Schema:
"""Returns the schema of the `RecordBatch` produced by `self.BeamSource()`.
May raise an error if the TFMD schema was not provided at construction time.
"""
@abc.abstractmethod
def TensorRepresentations(self) -> tensor_adapter.TensorRepresentations:
"""Returns the `TensorRepresentations`.
These `TensorRepresentation`s describe the tensors or composite tensors
produced by the `TensorAdapter` created from `self.TensorAdapter()` or
the tf.data.Dataset created from `self.TensorFlowDataset()`.
May raise an error if the TFMD schema was not provided at construction time.
May raise an error if the tensor representations are invalid.
"""
@abc.abstractmethod
def TensorFlowDataset(
self,
options: dataset_options.TensorFlowDatasetOptions) -> tf.data.Dataset:
"""Returns a tf.data.Dataset of TF inputs.
May raise an error if the TFMD schema was not provided at construction time.
Args:
options: an options object for the tf.data.Dataset. Look at
`dataset_options.TensorFlowDatasetOptions` for more details.
"""
@abc.abstractmethod
def _ProjectImpl(self, tensor_names: List[Text]) -> "TFXIO":
"""Sub-classes should implement this interface to perform projections.
It should return a `TFXIO` instance that is the same as `self` except that:
- Only columns needed for given tensor_names are guaranteed to be
produced by `self.BeamSource()`
- `self.TensorAdapterConfig()` and `self.TensorFlowDataset()` are trimmed
to contain only those tensors.
May raise an error if the TFMD schema was not provided at construction time.
Args:
tensor_names: a set of tensor names.
"""
# final
def Project(self, tensor_names: List[Text]) -> "TFXIO":
"""Projects the dataset represented by this TFXIO.
A Projected TFXIO:
- Only columns needed for given tensor_names are guaranteed to be
produced by `self.BeamSource()`
- `self.TensorAdapterConfig()` and `self.TensorFlowDataset()` are trimmed
to contain only those tensors.
- It retains a reference to the very original TFXIO, so its TensorAdapter
knows about the specs of the tensors that would be produced by the
original TensorAdapter. Also see `TensorAdapter.OriginalTensorSpec()`.
May raise an error if the TFMD schema was not provided at construction time.
Args:
tensor_names: a set of tensor names.
Returns:
A `TFXIO` instance that is the same as `self` except that:
- Only columns needed for given tensor_names are guaranteed to be
produced by `self.BeamSource()`
- `self.TensorAdapterConfig()` and `self.TensorFlowDataset()` are trimmed
to contain only those tensors.
"""
if isinstance(self, _ProjectedTFXIO):
# pylint: disable=protected-access
return _ProjectedTFXIO(self.origin,
self.projected._ProjectImpl(tensor_names))
return _ProjectedTFXIO(self, self._ProjectImpl(tensor_names))
# final
def TensorAdapterConfig(self) -> tensor_adapter.TensorAdapterConfig:
"""Returns the config to initialize a `TensorAdapter`.
Returns:
a `TensorAdapterConfig` that is the same as what is used to initialize the
`TensorAdapter` returned by `self.TensorAdapter()`.
"""
return tensor_adapter.TensorAdapterConfig(
self.ArrowSchema(), self.TensorRepresentations())
# final
def TensorAdapter(self) -> tensor_adapter.TensorAdapter:
"""Returns a TensorAdapter that converts pa.RecordBatch to TF inputs.
May raise an error if the TFMD schema was not provided at construction time.
"""
return tensor_adapter.TensorAdapter(self.TensorAdapterConfig())
class _ProjectedTFXIO(TFXIO):
"""A wrapper of a projected TFXIO to track its origin."""
def __init__(self, origin: TFXIO, projected: TFXIO):
self._origin = origin
self._projected = projected
@property
def origin(self) -> TFXIO:
return self._origin
@property
def projected(self) -> TFXIO:
return self._projected
def BeamSource(self, batch_size: Optional[int] = None) -> beam.PTransform:
return self.projected.BeamSource(batch_size)
def RecordBatches(
self, options: dataset_options.RecordBatchesOptions
) -> Iterator[pa.RecordBatch]:
return self.projected.RecordBatches(options)
def ArrowSchema(self) -> pa.Schema:
return self.projected.ArrowSchema()
def TensorRepresentations(self) -> tensor_adapter.TensorRepresentations:
return self.projected.TensorRepresentations()
def TensorFlowDataset(
self,
options: dataset_options.TensorFlowDatasetOptions) -> tf.data.Dataset:
return self.projected.TensorFlowDataset(options)
def _ProjectImpl(self, unused_tensor_names: List[Text]) -> "TFXIO":
raise ValueError("This should never be called.")
def TensorAdapterConfig(self) -> tensor_adapter.TensorAdapterConfig:
return tensor_adapter.TensorAdapterConfig(
self.projected.ArrowSchema(),
self.projected.TensorRepresentations(),
original_type_specs=self.origin.TensorAdapter().TypeSpecs())
|
import json
from unittest.mock import MagicMock
import pytest
import requests
from connexion.exceptions import (BadRequestProblem, ConnexionException,
OAuthProblem, OAuthResponseProblem,
OAuthScopeProblem)
def test_get_tokeninfo_url(monkeypatch, security_handler_factory):
security_handler_factory.get_token_info_remote = MagicMock(return_value='get_token_info_remote_result')
env = {}
monkeypatch.setattr('os.environ', env)
logger = MagicMock()
monkeypatch.setattr('connexion.security.security_handler_factory.logger', logger)
security_def = {}
assert security_handler_factory.get_tokeninfo_func(security_def) is None
logger.warn.assert_not_called()
env['TOKENINFO_URL'] = 'issue-146'
assert security_handler_factory.get_tokeninfo_func(security_def) == 'get_token_info_remote_result'
security_handler_factory.get_token_info_remote.assert_called_with('issue-146')
logger.warn.assert_not_called()
logger.warn.reset_mock()
security_def = {'x-tokenInfoUrl': 'bar'}
assert security_handler_factory.get_tokeninfo_func(security_def) == 'get_token_info_remote_result'
security_handler_factory.get_token_info_remote.assert_called_with('bar')
logger.warn.assert_not_called()
def test_verify_oauth_missing_auth_header(security_handler_factory):
def somefunc(token):
return None
wrapped_func = security_handler_factory.verify_oauth(somefunc, security_handler_factory.validate_scope, ['admin'])
request = MagicMock()
request.headers = {}
assert wrapped_func(request) is security_handler_factory.no_value
def test_verify_oauth_scopes_remote(monkeypatch, security_handler_factory):
tokeninfo = dict(uid="foo", scope="scope1 scope2")
def get_tokeninfo_response(*args, **kwargs):
tokeninfo_response = requests.Response()
tokeninfo_response.status_code = requests.codes.ok
tokeninfo_response._content = json.dumps(tokeninfo).encode()
return tokeninfo_response
token_info_func = security_handler_factory.get_tokeninfo_func({'x-tokenInfoUrl': 'https://example.org/tokeninfo'})
wrapped_func = security_handler_factory.verify_oauth(token_info_func, security_handler_factory.validate_scope, ['admin'])
request = MagicMock()
request.headers = {"Authorization": "Bearer 123"}
session = MagicMock()
session.get = get_tokeninfo_response
monkeypatch.setattr('connexion.security.flask_security_handler_factory.session', session)
with pytest.raises(OAuthScopeProblem, match="Provided token doesn't have the required scope"):
wrapped_func(request)
tokeninfo["scope"] += " admin"
assert wrapped_func(request) is not None
tokeninfo["scope"] = ["foo", "bar"]
with pytest.raises(OAuthScopeProblem, match="Provided token doesn't have the required scope"):
wrapped_func(request)
tokeninfo["scope"].append("admin")
assert wrapped_func(request) is not None
def test_verify_oauth_invalid_local_token_response_none(security_handler_factory):
def somefunc(token):
return None
wrapped_func = security_handler_factory.verify_oauth(somefunc, security_handler_factory.validate_scope, ['admin'])
request = MagicMock()
request.headers = {"Authorization": "Bearer 123"}
with pytest.raises(OAuthResponseProblem):
wrapped_func(request)
def test_verify_oauth_scopes_local(security_handler_factory):
tokeninfo = dict(uid="foo", scope="scope1 scope2")
def token_info(token):
return tokeninfo
wrapped_func = security_handler_factory.verify_oauth(token_info, security_handler_factory.validate_scope, ['admin'])
request = MagicMock()
request.headers = {"Authorization": "Bearer 123"}
with pytest.raises(OAuthScopeProblem, match="Provided token doesn't have the required scope"):
wrapped_func(request)
tokeninfo["scope"] += " admin"
assert wrapped_func(request) is not None
tokeninfo["scope"] = ["foo", "bar"]
with pytest.raises(OAuthScopeProblem, match="Provided token doesn't have the required scope"):
wrapped_func(request)
tokeninfo["scope"].append("admin")
assert wrapped_func(request) is not None
def test_verify_basic_missing_auth_header(security_handler_factory):
def somefunc(username, password, required_scopes=None):
return None
wrapped_func = security_handler_factory.verify_basic(somefunc)
request = MagicMock()
request.headers = {"Authorization": "Bearer 123"}
assert wrapped_func(request) is security_handler_factory.no_value
def test_verify_basic(security_handler_factory):
def basic_info(username, password, required_scopes=None):
if username == 'foo' and password == '<PASSWORD>':
return {'sub': 'foo'}
return None
wrapped_func = security_handler_factory.verify_basic(basic_info)
request = MagicMock()
request.headers = {"Authorization": 'Basic Zm9vOmJhcg=='}
assert wrapped_func(request) is not None
def test_verify_apikey_query(security_handler_factory):
def apikey_info(apikey, required_scopes=None):
if apikey == 'foobar':
return {'sub': 'foo'}
return None
wrapped_func = security_handler_factory.verify_api_key(apikey_info, 'query', 'auth')
request = MagicMock()
request.query = {"auth": 'foobar'}
assert wrapped_func(request) is not None
def test_verify_apikey_header(security_handler_factory):
def apikey_info(apikey, required_scopes=None):
if apikey == 'foobar':
return {'sub': 'foo'}
return None
wrapped_func = security_handler_factory.verify_api_key(apikey_info, 'header', 'X-Auth')
request = MagicMock()
request.headers = {"X-Auth": 'foobar'}
assert wrapped_func(request) is not None
def test_multiple_schemes(security_handler_factory):
def apikey1_info(apikey, required_scopes=None):
if apikey == 'foobar':
return {'sub': 'foo'}
return None
def apikey2_info(apikey, required_scopes=None):
if apikey == 'bar':
return {'sub': 'bar'}
return None
wrapped_func_key1 = security_handler_factory.verify_api_key(apikey1_info, 'header', 'X-Auth-1')
wrapped_func_key2 = security_handler_factory.verify_api_key(apikey2_info, 'header', 'X-Auth-2')
schemes = {
'key1': wrapped_func_key1,
'key2': wrapped_func_key2,
}
wrapped_func = security_handler_factory.verify_multiple_schemes(schemes)
# Single key does not succeed
request = MagicMock()
request.headers = {"X-Auth-1": 'foobar'}
assert wrapped_func(request) is security_handler_factory.no_value
request = MagicMock()
request.headers = {"X-Auth-2": 'bar'}
assert wrapped_func(request) is security_handler_factory.no_value
# Supplying both keys does succeed
request = MagicMock()
request.headers = {
"X-Auth-1": 'foobar',
"X-Auth-2": 'bar'
}
expected_token_info = {
'key1': {'sub': 'foo'},
'key2': {'sub': 'bar'},
}
assert wrapped_func(request) == expected_token_info
def test_verify_security_oauthproblem(security_handler_factory):
"""Tests whether verify_security raises an OAuthProblem if there are no auth_funcs."""
func_to_secure = MagicMock(return_value='func')
secured_func = security_handler_factory.verify_security([], func_to_secure)
request = MagicMock()
with pytest.raises(OAuthProblem) as exc_info:
secured_func(request)
assert str(exc_info.value) == '401 Unauthorized: No authorization token provided'
@pytest.mark.parametrize(
'errors, most_specific',
[
([OAuthProblem()], OAuthProblem),
([OAuthProblem(), OAuthScopeProblem([], [])], OAuthScopeProblem),
([OAuthProblem(), OAuthScopeProblem([], []), BadRequestProblem], OAuthScopeProblem),
([OAuthProblem(), OAuthScopeProblem([], []), BadRequestProblem, ConnexionException], OAuthScopeProblem),
([BadRequestProblem(), ConnexionException()], BadRequestProblem),
([ConnexionException()], ConnexionException),
]
)
def test_raise_most_specific(errors, most_specific, security_handler_factory):
"""Tests whether most specific exception is raised from a list."""
with pytest.raises(most_specific):
security_handler_factory._raise_most_specific(errors)
|
<reponame>liori/optuna
import math
from typing import List
from typing import Optional
from typing import Tuple
from optuna.logging import get_logger
from optuna.study import Study
from optuna.study import StudyDirection
from optuna.trial import FrozenTrial
from optuna.trial import TrialState
from optuna.visualization._plotly_imports import _imports
from optuna.visualization._utils import _is_log_scale
if _imports.is_successful():
from optuna.visualization._plotly_imports import Contour
from optuna.visualization._plotly_imports import go
from optuna.visualization._plotly_imports import make_subplots
from optuna.visualization._plotly_imports import plotly
from optuna.visualization._plotly_imports import Scatter
_logger = get_logger(__name__)
def plot_contour(study: Study, params: Optional[List[str]] = None) -> "go.Figure":
"""Plot the parameter relationship as contour plot in a study.
Note that, If a parameter contains missing values, a trial with missing values is not plotted.
Example:
The following code snippet shows how to plot the parameter relationship as contour plot.
.. testcode::
import optuna
def objective(trial):
x = trial.suggest_uniform('x', -100, 100)
y = trial.suggest_categorical('y', [-1, 0, 1])
return x ** 2 + y
study = optuna.create_study()
study.optimize(objective, n_trials=10)
optuna.visualization.plot_contour(study, params=['x', 'y'])
.. raw:: html
<iframe src="../_static/plot_contour.html" width="100%" height="500px" frameborder="0">
</iframe>
Args:
study:
A :class:`~optuna.study.Study` object whose trials are plotted for their objective
values.
params:
Parameter list to visualize. The default is all parameters.
Returns:
A :class:`plotly.graph_objs.Figure` object.
"""
_imports.check()
return _get_contour_plot(study, params)
def _get_contour_plot(study: Study, params: Optional[List[str]] = None) -> "go.Figure":
layout = go.Layout(title="Contour Plot",)
trials = [trial for trial in study.trials if trial.state == TrialState.COMPLETE]
if len(trials) == 0:
_logger.warning("Your study does not have any completed trials.")
return go.Figure(data=[], layout=layout)
all_params = {p_name for t in trials for p_name in t.params.keys()}
if params is None:
sorted_params = sorted(list(all_params))
elif len(params) <= 1:
_logger.warning("The length of params must be greater than 1.")
return go.Figure(data=[], layout=layout)
else:
for input_p_name in params:
if input_p_name not in all_params:
raise ValueError("Parameter {} does not exist in your study.".format(input_p_name))
sorted_params = sorted(list(set(params)))
param_values_range = {}
for p_name in sorted_params:
values = [t.params[p_name] for t in trials if p_name in t.params]
param_values_range[p_name] = (min(values), max(values))
if len(sorted_params) == 2:
x_param = sorted_params[0]
y_param = sorted_params[1]
sub_plots = _generate_contour_subplot(trials, x_param, y_param, study.direction)
figure = go.Figure(data=sub_plots, layout=layout)
figure.update_xaxes(title_text=x_param, range=param_values_range[x_param])
figure.update_yaxes(title_text=y_param, range=param_values_range[y_param])
if _is_log_scale(trials, x_param):
log_range = [math.log10(p) for p in param_values_range[x_param]]
figure.update_xaxes(range=log_range, type="log")
if _is_log_scale(trials, y_param):
log_range = [math.log10(p) for p in param_values_range[y_param]]
figure.update_yaxes(range=log_range, type="log")
else:
figure = make_subplots(
rows=len(sorted_params), cols=len(sorted_params), shared_xaxes=True, shared_yaxes=True
)
figure.update_layout(layout)
showscale = True # showscale option only needs to be specified once
for x_i, x_param in enumerate(sorted_params):
for y_i, y_param in enumerate(sorted_params):
if x_param == y_param:
figure.add_trace(go.Scatter(), row=y_i + 1, col=x_i + 1)
else:
sub_plots = _generate_contour_subplot(
trials, x_param, y_param, study.direction
)
contour = sub_plots[0]
scatter = sub_plots[1]
contour.update(showscale=showscale) # showscale's default is True
if showscale:
showscale = False
figure.add_trace(contour, row=y_i + 1, col=x_i + 1)
figure.add_trace(scatter, row=y_i + 1, col=x_i + 1)
figure.update_xaxes(range=param_values_range[x_param], row=y_i + 1, col=x_i + 1)
figure.update_yaxes(range=param_values_range[y_param], row=y_i + 1, col=x_i + 1)
if _is_log_scale(trials, x_param):
log_range = [math.log10(p) for p in param_values_range[x_param]]
figure.update_xaxes(range=log_range, type="log", row=y_i + 1, col=x_i + 1)
if _is_log_scale(trials, y_param):
log_range = [math.log10(p) for p in param_values_range[y_param]]
figure.update_yaxes(range=log_range, type="log", row=y_i + 1, col=x_i + 1)
if x_i == 0:
figure.update_yaxes(title_text=y_param, row=y_i + 1, col=x_i + 1)
if y_i == len(sorted_params) - 1:
figure.update_xaxes(title_text=x_param, row=y_i + 1, col=x_i + 1)
return figure
def _generate_contour_subplot(
trials: List[FrozenTrial], x_param: str, y_param: str, direction: StudyDirection
) -> Tuple["Contour", "Scatter"]:
x_indices = sorted(list({t.params[x_param] for t in trials if x_param in t.params}))
y_indices = sorted(list({t.params[y_param] for t in trials if y_param in t.params}))
if len(x_indices) < 2:
_logger.warning("Param {} unique value length is less than 2.".format(x_param))
return go.Contour(), go.Scatter()
if len(y_indices) < 2:
_logger.warning("Param {} unique value length is less than 2.".format(y_param))
return go.Contour(), go.Scatter()
z = [[float("nan") for _ in range(len(x_indices))] for _ in range(len(y_indices))]
x_values = []
y_values = []
for trial in trials:
if x_param not in trial.params or y_param not in trial.params:
continue
x_values.append(trial.params[x_param])
y_values.append(trial.params[y_param])
x_i = x_indices.index(trial.params[x_param])
y_i = y_indices.index(trial.params[y_param])
if isinstance(trial.value, int):
value = float(trial.value)
elif isinstance(trial.value, float):
value = trial.value
else:
raise ValueError(
"Trial{} has COMPLETE state, but its value is non-numeric.".format(trial.number)
)
z[y_i][x_i] = value
# TODO(Yanase): Use reversescale argument to reverse colorscale if Plotly's bug is fixed.
# If contours_coloring='heatmap' is specified, reversesecale argument of go.Contour does not
# work correctly. See https://github.com/pfnet/optuna/issues/606.
colorscale = plotly.colors.PLOTLY_SCALES["Blues"]
if direction == StudyDirection.MINIMIZE:
colorscale = [[1 - t[0], t[1]] for t in colorscale]
colorscale.reverse()
contour = go.Contour(
x=x_indices,
y=y_indices,
z=z,
colorbar={"title": "Objective Value"},
colorscale=colorscale,
connectgaps=True,
contours_coloring="heatmap",
hoverinfo="none",
line_smoothing=1.3,
)
scatter = go.Scatter(
x=x_values, y=y_values, marker={"color": "black"}, mode="markers", showlegend=False
)
return (contour, scatter)
|
<filename>ciphr.py
import tools
from PyQt5.QtCore import Qt
from PyQt5 import QtWidgets, QtCore
from PyQt5.QtGui import QFont, QIcon
from PyQt5.QtWidgets import QApplication, QPushButton, QWidget, QLineEdit, QDesktopWidget
obj_list = ["Menu", "Backbutton", "Result", "Inputs", "Crypt", "Copy"]
class MainUi(QWidget):
switch_tab1 = QtCore.pyqtSignal()
switch_tab2 = QtCore.pyqtSignal()
switch_tab3 = QtCore.pyqtSignal()
switch_tab4 = QtCore.pyqtSignal()
switch_tab5 = QtCore.pyqtSignal()
def __init__(self):
QWidget.__init__(self)
qtRectangle = self.frameGeometry()
centerPoint = QDesktopWidget().availableGeometry().center()
qtRectangle.moveCenter(centerPoint)
self.url = "https://github.com/oOperaho/Ciphr"
self.innic = QtWidgets.QLabel(self)
self.ciphr_repo = QPushButton(self)
self.biny = QPushButton(self)
self.cae = QPushButton(self)
self.vig = QPushButton(self)
self.mor = QPushButton(self)
self.hil = QPushButton(self)
self.setWindowTitle("Ciphr")
self.setObjectName(obj_list[0])
self.setWindowIcon(QIcon("icons/cr.png"))
self.setGeometry(0, 0, 520, 400)
self.move(qtRectangle.topLeft())
self.setFixedSize(self.size()) # disable resizing
self.menu()
def menu(self):
keyname = "FirstBtt"
self.innic.setText("• CIPHR •")
self.innic.setGeometry(0, 0, 190, 100)
self.innic.setObjectName("Innic")
self.innic.setAlignment(Qt.AlignCenter)
pos_x = self.width() - self.innic.width()
self.innic.move(int(pos_x / 2), 40)
self.biny.setText("Binary")
self.biny.setObjectName(keyname)
self.biny.setGeometry(0, 0, 100, 40)
self.biny.clicked.connect(self.bin_toggle)
pos_x = self.width() - self.biny.width()
self.biny.move(int(pos_x / 2) - 60, 140)
self.cae.setText("Caesar")
self.cae.setObjectName(keyname)
self.cae.setGeometry(0, 0, 100, 40)
self.cae.clicked.connect(self.cae_toggle)
pos_x = self.width() - self.cae.width()
self.cae.move(int(pos_x / 2) + 60, 140)
self.vig.setText("Vigenere")
self.vig.setObjectName(keyname)
self.vig.setGeometry(0, 0, 100, 40)
self.vig.clicked.connect(self.vig_toggle)
pos_x = self.width() - self.vig.width()
self.vig.move(int(pos_x / 2) - 60, 200)
self.mor.setText("Morse")
self.mor.setObjectName(keyname)
self.mor.setGeometry(0, 0, 100, 40)
self.mor.clicked.connect(self.mor_toggle)
pos_x = self.width() - self.mor.width()
self.mor.move(int(pos_x / 2) + 60, 200)
self.hil.setText("Hill")
self.hil.setObjectName(keyname)
self.hil.setGeometry(0, 0, 100, 40)
self.hil.clicked.connect(self.hil_toggle)
pos_x = self.width() - self.hil.width()
self.hil.move(int(pos_x / 2) - 60, 260)
self.ciphr_repo.setGeometry(10, 800, 20, 20)
self.ciphr_repo.setObjectName("Repo")
self.ciphr_repo.clicked.connect(self.open_repo)
pos_y = self.height() - self.ciphr_repo.height()
margin = 15
self.ciphr_repo.move(margin, pos_y-margin)
def bin_toggle(self):
self.switch_tab1.emit()
def cae_toggle(self):
self.switch_tab2.emit()
def vig_toggle(self):
self.switch_tab3.emit()
def mor_toggle(self):
self.switch_tab4.emit()
def hil_toggle(self):
self.switch_tab5.emit()
def open_repo(self):
import webbrowser
webbrowser.open_new(self.url)
class BinaryTab(QWidget):
switch_tab = QtCore.pyqtSignal()
def __init__(self):
QWidget.__init__(self)
qtRectangle = self.frameGeometry()
centerPoint = QDesktopWidget().availableGeometry().center()
qtRectangle.moveCenter(centerPoint)
self.backbutton = QPushButton(self)
self.encodebutton = QPushButton(self)
self.encodeinput = QLineEdit(self)
self.decodebutton = QPushButton(self)
self.decodeinput = QLineEdit(self)
self.copy_text = QPushButton(self)
# self.numberVld = QIntValidator(self)
# self.encodeinput.setValidator(self.numberVld)
# self.decodeinput.setValidator(self.numberVld)
self.result = QtWidgets.QLabel(self)
self.setWindowTitle("Ciphr")
self.setWindowIcon(QIcon("icons/cr.png"))
self.setGeometry(0, 0, 520, 400)
self.move(qtRectangle.topLeft())
self.setObjectName(obj_list[0])
self.setFixedSize(self.size())
self.binary_window()
def binary_window(self):
self.backbutton.setText("←")
self.backbutton.setObjectName(obj_list[1])
self.backbutton.setGeometry(10, 10, 60, 35)
self.backbutton.clicked.connect(self.menu_toggle)
self.result.setText("")
self.result.setGeometry(640, 410, 170, 20)
self.result.setObjectName(obj_list[2])
pos_x = self.width() - self.result.width()
self.result.move(int(pos_x / 2), 200)
self.encodeinput.setText("0")
self.encodeinput.setGeometry(655, 280, 70, 30)
self.encodeinput.setObjectName(obj_list[3])
pos_x = (self.width() - self.encodeinput.width())
self.encodeinput.move(int(pos_x / 2) - 40, 130)
self.decodeinput.setText("0")
self.decodeinput.setGeometry(655, 280, 70, 30)
self.decodeinput.setObjectName(obj_list[3])
pos_x = (self.width() - self.decodeinput.width())
self.decodeinput.move(int(pos_x / 2) + 40, 130)
self.encodebutton.setText("Encode")
self.encodebutton.setGeometry(620, 360, 70, 30)
self.encodebutton.setObjectName(obj_list[4])
pos_x = self.width() - self.encodebutton.width()
self.encodebutton.move(int(pos_x / 2) - 40, 165)
self.encodebutton.clicked.connect(self.encodebinary)
self.decodebutton.setText("Decode")
self.decodebutton.setGeometry(720, 360, 70, 30)
self.decodebutton.setObjectName(obj_list[4])
pos_x = self.width() - self.decodebutton.width()
self.decodebutton.move(int(pos_x / 2) + 40, 165)
self.decodebutton.clicked.connect(self.decodebinary)
self.copy_text.setText("Copy")
self.copy_text.setGeometry(620, 360, 50, 25)
self.copy_text.setObjectName(obj_list[5])
pos_x = self.width() - self.copy_text.width()
self.copy_text.move(int(pos_x / 2), 230)
self.copy_text.clicked.connect(self.copy_to_clipboard)
def encodebinary(self):
from Binary.binary import binaryencoder
decimal = self.encodeinput.text()
if decimal != "":
decimal = tools.process_int(decimal)
out = binaryencoder(int(decimal))
self.result.setText(str(out))
else:
self.result.setText("?")
def decodebinary(self):
from Binary.binary import binarydecoder
decimal = self.decodeinput.text()
if decimal != "":
decimal = tools.process_int(decimal)
out = binarydecoder(int(decimal))
self.result.setText(str(out))
else:
self.result.setText("?")
def copy_to_clipboard(self):
tools.copy_to_pc(self.result.text())
def menu_toggle(self):
self.switch_tab.emit()
class CaesarTab(QWidget):
switch_tab = QtCore.pyqtSignal()
def __init__(self):
QWidget.__init__(self)
qtRectangle = self.frameGeometry()
centerPoint = QDesktopWidget().availableGeometry().center()
qtRectangle.moveCenter(centerPoint)
self.backbutton = QPushButton(self)
self.encodebutton = QPushButton(self)
self.decodebutton = QPushButton(self)
self.caesarinput = QLineEdit(self)
self.caesarkey = QLineEdit(self)
self.copy_text = QPushButton(self)
# self.numberVld = QIntValidator(self)
# self.caesarkey.setValidator(self.numberVld)
self.result = QtWidgets.QLabel(self)
self.setWindowTitle("Ciphr")
self.setWindowIcon(QIcon("icons/cr.png"))
self.setGeometry(0, 0, 520, 400)
self.move(qtRectangle.topLeft())
self.setObjectName(obj_list[0])
self.setFixedSize(self.size())
self.caesar_window()
def caesar_window(self):
self.backbutton.setText("←")
self.backbutton.setGeometry(10, 10, 60, 35)
self.backbutton.setFont(QFont("Helvetica", 15))
self.backbutton.setObjectName(obj_list[1])
self.backbutton.clicked.connect(self.menu_toggle)
self.result.setText("")
self.result.setGeometry(640, 410, 150, 20)
self.result.setObjectName(obj_list[2])
pos_x = self.width() - self.result.width()
self.result.move(int(pos_x / 2), 200)
self.caesarinput.setText("text")
self.caesarinput.setGeometry(655, 280, 120, 30)
self.caesarinput.setObjectName(obj_list[3])
pos_x = self.width() - self.caesarinput.width()
self.caesarinput.move(int(pos_x / 2), 80)
self.caesarkey.setText("3")
self.caesarkey.setGeometry(685, 320, 50, 30)
self.caesarkey.setObjectName(obj_list[3])
pos_x = self.width() - self.caesarkey.width()
self.caesarkey.move(int(pos_x / 2), 120)
self.encodebutton.setText("Encode")
self.encodebutton.setGeometry(620, 360, 70, 30)
self.encodebutton.setObjectName(obj_list[4])
pos_x = self.width() - self.encodebutton.width()
self.encodebutton.move(int(pos_x / 2) - 40, 160)
self.encodebutton.clicked.connect(self.encodecaesar)
self.decodebutton.setText("Decode")
self.decodebutton.setGeometry(720, 360, 70, 30)
self.decodebutton.setObjectName(obj_list[4])
pos_x = self.width() - self.decodebutton.width()
self.decodebutton.move(int(pos_x / 2) + 40, 160)
self.decodebutton.clicked.connect(self.decodecaesar)
self.copy_text.setText("Copy")
self.copy_text.setGeometry(620, 360, 50, 25)
self.copy_text.setObjectName(obj_list[5])
pos_x = self.width() - self.copy_text.width()
self.copy_text.move(int(pos_x / 2), 230)
self.copy_text.clicked.connect(self.copy_to_clipboard)
def encodecaesar(self):
from Caesar.caesar import caesarencoder
word = self.caesarinput.text()
word = tools.process_text(word)
key = self.caesarkey.text()
if key != "":
key = tools.process_int(key)
self.result.setText(caesarencoder(word, int(key)))
else:
self.result.setText("?")
def decodecaesar(self):
from Caesar.caesar import caesardecoder
word = self.caesarinput.text()
word = tools.process_text(word)
key = self.caesarkey.text()
if key != "":
key = tools.process_int(key)
self.result.setText(caesardecoder(word, int(key)))
else:
self.result.setText("?")
def copy_to_clipboard(self):
tools.copy_to_pc(self.result.text())
def menu_toggle(self):
self.switch_tab.emit()
class VigenereTab(QWidget):
switch_tab = QtCore.pyqtSignal()
def __init__(self):
QWidget.__init__(self)
qtRectangle = self.frameGeometry()
centerPoint = QDesktopWidget().availableGeometry().center()
qtRectangle.moveCenter(centerPoint)
self.backbutton = QPushButton(self)
self.result = QtWidgets.QLabel(self)
self.vigenerekey = QLineEdit(self)
self.vigenereinput = QLineEdit(self)
self.encodebutton = QPushButton(self)
self.decodebutton = QPushButton(self)
self.copy_text = QPushButton(self)
self.setWindowTitle("Ciphr")
self.setWindowIcon(QIcon("icons/cr.png"))
self.setGeometry(0, 0, 520, 400)
self.move(qtRectangle.topLeft())
self.setObjectName(obj_list[0])
self.setFixedSize(self.size())
self.vigenere_window()
def vigenere_window(self):
self.backbutton.setText("←")
self.backbutton.setGeometry(10, 10, 60, 35)
self.backbutton.setObjectName(obj_list[1])
self.backbutton.clicked.connect(self.menu_toggle)
self.result.setText("")
self.result.setGeometry(640, 410, 150, 20)
self.result.setObjectName(obj_list[2])
pos_x = self.width() - self.result.width()
self.result.move(int(pos_x / 2), 200)
self.vigenereinput.setText("text")
self.vigenereinput.setGeometry(655, 280, 120, 30)
self.vigenereinput.setObjectName(obj_list[3])
pos_x = self.width() - self.vigenereinput.width()
self.vigenereinput.move(int(pos_x / 2), 80)
self.vigenerekey.setText("key")
self.vigenerekey.setGeometry(685, 320, 120, 30)
self.vigenerekey.setObjectName(obj_list[3])
pos_x = self.width() - self.vigenerekey.width()
self.vigenerekey.move(int(pos_x / 2), 120)
self.encodebutton.setText("Encode")
self.encodebutton.setGeometry(620, 360, 70, 30)
self.encodebutton.setObjectName(obj_list[4])
pos_x = self.width() - self.encodebutton.width()
self.encodebutton.move(int(pos_x / 2) + 40, 160)
self.encodebutton.clicked.connect(self.encodevigenere)
self.decodebutton.setText("Decode")
self.decodebutton.setGeometry(720, 360, 70, 30)
self.decodebutton.setObjectName(obj_list[4])
pos_x = self.width() - self.decodebutton.width()
self.decodebutton.move(int(pos_x / 2) - 40, 160)
self.decodebutton.clicked.connect(self.decodevigenere)
self.copy_text.setText("Copy")
self.copy_text.setGeometry(620, 360, 50, 25)
self.copy_text.setObjectName(obj_list[5])
pos_x = self.width() - self.copy_text.width()
self.copy_text.move(int(pos_x / 2), 230)
self.copy_text.clicked.connect(self.copy_to_clipboard)
def encodevigenere(self):
from Vigenere.vigenere import vigenereencoder
word = self.vigenereinput.text()
word = tools.process_text(word)
key = self.vigenerekey.text()
key = tools.process_text(key)
if key != "":
key = tools.newKey(word, key)
self.result.setText(vigenereencoder(word.upper(), key.upper()))
else:
self.result.setText("?")
def decodevigenere(self):
from Vigenere.vigenere import vigeneredecoder
word = self.vigenereinput.text()
word = tools.process_text(word)
key = self.vigenerekey.text()
key = tools.process_text(key)
if key != "":
key = tools.newKey(word, key)
self.result.setText(vigeneredecoder(word.upper(), key.upper()))
else:
self.result.setText("?")
def copy_to_clipboard(self):
tools.copy_to_pc(self.result.text())
def menu_toggle(self):
self.switch_tab.emit()
class MorseTab(QWidget):
switch_tab = QtCore.pyqtSignal()
def __init__(self):
QWidget.__init__(self)
qtRectangle = self.frameGeometry()
centerPoint = QDesktopWidget().availableGeometry().center()
qtRectangle.moveCenter(centerPoint)
self.backbutton = QPushButton(self)
self.result = QtWidgets.QLabel(self)
self.morseinput = QLineEdit(self)
self.morsecrypt = QPushButton(self)
self.copy_text = QPushButton(self)
self.setWindowTitle("Ciphr")
self.setWindowIcon(QIcon("icons/cr.png"))
self.setGeometry(0, 0, 520, 400)
self.move(qtRectangle.topLeft())
self.setObjectName(obj_list[0])
self.setFixedSize(self.size())
self.morse_window()
def morse_window(self):
self.backbutton.setText("←")
self.backbutton.setGeometry(10, 10, 60, 35)
self.backbutton.setObjectName(obj_list[1])
self.backbutton.clicked.connect(self.menu_toggle)
self.result.setText("")
self.result.setGeometry(640, 410, 150, 30)
self.result.setObjectName(obj_list[2])
pos_x = self.width() - self.result.width()
self.result.move(int(pos_x / 2), 200)
self.morseinput.setText("text")
self.morseinput.setGeometry(655, 280, 150, 30)
self.morseinput.setObjectName(obj_list[3])
pos_x = self.width() - self.morseinput.width()
self.morseinput.move(int(pos_x / 2), 120)
self.morsecrypt.setText("Morse it")
self.morsecrypt.setGeometry(620, 360, 70, 30)
self.morsecrypt.setObjectName(obj_list[4])
pos_x = self.width() - self.morsecrypt.width()
self.morsecrypt.move(int(pos_x / 2), 160)
self.morsecrypt.clicked.connect(self.morse_code)
self.copy_text.setText("Copy")
self.copy_text.setGeometry(620, 360, 50, 25)
self.copy_text.setObjectName(obj_list[5])
pos_x = self.width() - self.copy_text.width()
self.copy_text.move(int(pos_x / 2), 240)
self.copy_text.clicked.connect(self.copy_to_clipboard)
def morse_code(self):
from Morse.morse import morsecode
word = self.morseinput.text()
word = tools.process_morse(word)
word = tools.remove_spaces(word)
self.result.setText(morsecode(word.lower()))
def copy_to_clipboard(self):
tools.copy_to_pc(self.result.text())
def menu_toggle(self):
self.switch_tab.emit()
class HillTab(QWidget):
switch_tab = QtCore.pyqtSignal()
def __init__(self):
QWidget.__init__(self)
qtRectangle = self.frameGeometry()
centerPoint = QDesktopWidget().availableGeometry().center()
qtRectangle.moveCenter(centerPoint)
self.backbutton = QPushButton(self)
self.result = QtWidgets.QLabel(self)
self.setWindowTitle("Ciphr")
self.setWindowIcon(QIcon("icons/cr.png"))
self.setGeometry(0, 0, 520, 400)
self.move(qtRectangle.topLeft())
self.setObjectName(obj_list[0])
self.setFixedSize(self.size())
self.hill_window()
def hill_window(self):
self.backbutton.setText("←")
self.backbutton.setGeometry(10, 10, 60, 35)
self.backbutton.setObjectName(obj_list[1])
self.backbutton.clicked.connect(self.menu_toggle)
self.result.setText("")
self.result.setGeometry(640, 410, 150, 30)
self.result.setObjectName(obj_list[2])
pos_x = self.width() - self.result.width()
self.result.move(int(pos_x / 2), 200)
def menu_toggle(self):
self.switch_tab.emit()
class Remote:
def __init__(self):
self.mwindow = MainUi()
self.cae_tab = CaesarTab()
self.vig_tab = VigenereTab()
self.bin_tab = BinaryTab()
self.mor_tab = MorseTab()
self.hil_tab = HillTab()
def main_window(self):
self.mwindow.switch_tab1.connect(self.binary_tab)
self.mwindow.switch_tab2.connect(self.caesar_tab)
self.mwindow.switch_tab3.connect(self.vigenere_tab)
self.mwindow.switch_tab4.connect(self.morse_tab)
self.mwindow.switch_tab5.connect(self.hill_tab)
self.bin_tab.close()
self.cae_tab.close()
self.vig_tab.close()
self.mor_tab.close()
self.hil_tab.close()
self.mwindow.show()
def binary_tab(self):
self.bin_tab.switch_tab.connect(self.main_window)
self.mwindow.close()
self.bin_tab.show()
def caesar_tab(self):
self.cae_tab.switch_tab.connect(self.main_window)
self.mwindow.close()
self.cae_tab.show()
def vigenere_tab(self):
self.vig_tab.switch_tab.connect(self.main_window)
self.mwindow.close()
self.vig_tab.show()
def morse_tab(self):
self.mor_tab.switch_tab.connect(self.main_window)
self.mwindow.close()
self.mor_tab.show()
def hill_tab(self):
self.hil_tab.switch_tab.connect(self.main_window)
self.mwindow.close()
self.hil_tab.show()
def display():
import sys
w = QApplication(sys.argv)
Interface = open("interface.qss", "r")
with Interface:
qss = Interface.read()
w.setStyleSheet(qss)
w.setStyle("Breeze")
ui = Remote()
ui.main_window()
sys.exit(w.exec_())
display()
|
import ipaddress
import asyncio
from multiprocessing import Queue
import asyncio_dgram
import datetime,time
from vosk import Model, KaldiRecognizer
import audioop
import auditok
from scapy.all import RTP
import click
import json
import motor.motor_asyncio
model = Model('/home/alex/vosk-server/model')
class DB:
_db: motor.motor_asyncio.AsyncIOMotorClient
@classmethod
async def init(cls, params=None):
"""
params = {
'db_url' : 'mongodb://localhost:27017',
'db_user' : None,
'db_password' : None,
}
"""
if params is None:
params = {'mongo_url':'mongodb://localhost:27017', 'db_user': None, 'db_password': None}
cls.db_url = params.get("db_url","mongodb://localhost:27017")
cls.db_user = params.get("db_user", None)
cls.db_password = params.get("db_password", None)
try:
cls._db = motor.motor_asyncio.AsyncIOMotorClient(cls.mongo_url,username=cls.db_user,password=cls.db_password)
except Exception as e:
print(f'Error connection to mongo DB {cls.db_url}. Error={e}')
quit(1)
print(cls._db.database_names())
@classmethod
async def write_result(cls, result=None,header=None):
"""
write recognition result to DB
result = {....} final result
header = {'date':'datetime session started',
'source':"IP:port" jf source stream
'record_path':'/../../xxxxx.wav',
}
"""
def recognizer_process(queue_audio, queue_text):
"""
as result: place into queue_text <- (text, True|False) where:
text - a str with recognizer result, to json.loads()
"""
print('Worker started')
rec = KaldiRecognizer(model, 8000)
last_received = datetime.datetime.now()
partial = True
while True:
queue_bytes = b''
while not queue_audio.empty():
last_received = datetime.datetime.now()
queue_bytes += queue_audio.get()
if rec.AcceptWaveform(queue_bytes):
res = rec.Result()
partial = False
queue_text.put(res)
if datetime.datetime.now()-datetime.timedelta(seconds=60) > last_received:
if partial:
queue_text.put(rec.FinalResult())
print(f'Worker stopped ')
time.sleep(1)
return
time.sleep(1)
class UdpRtpServer:
connections = dict()
def __init__(self, addr):
self._global_data = b''
self.queue_audio = Queue()
self.queue_text = Queue()
self.address = addr[0]
self.port = addr[1]
self.text_result = {
"start_time": datetime.datetime.now(),
"from_ip": self.address,
"from_port": self.port,
"text": "",
"result":[]
}
self._receiver = None
@classmethod
async def start_server(cls,bind,port,allow_global):
stream = await asyncio_dgram.bind((bind,port))
while True:
data, addr = await stream.recv()
if hash(f'{ipaddress.ip_address(addr[0])}:{addr[1]}') not in cls.connections.keys():
if not allow_global and ipaddress.ip_address(addr[0]).is_global:
print(f'Error: attempt to connect from {addr[0]}:{addr[1]} IS RESTRICTED!')
continue
listener = cls(addr)
cls.connections.update({hash(f'{ipaddress.ip_address(addr[0])}:{addr[1]}'): listener})
asyncio.create_task(listener.on_new_connection())
continue
try:
listener = cls.connections.get(hash(f'{ipaddress.ip_address(addr[0])}:{addr[1]}'),None)
except Exception as e:
print(f'Error: can\'t find link to worker')
continue
listener.on_data_received(data)
async def on_new_connection(self):
print(f'New connect from {ipaddress.IPv4Address(self.address)}:{self.port}')
self._receiver = asyncio.create_task(self.text_receiver())
await asyncio.gather(
asyncio.to_thread(recognizer_process,self.queue_audio, self.queue_text),
)
self._receiver.cancel()
self.__class__.connections.pop(hash(f'{ipaddress.ip_address(self.address)}:{self.port}'))
print(f'Job Done! {ipaddress.ip_address(self.address)}:{self.port}, total jobs now:{len(self.__class__.connections)}')
print(f'Final Result: {self.text_result}')
def on_data_received(self, data):
self._global_data += audioop.ulaw2lin(RTP(data).load, 2) # data.decode()
audio_regions = auditok.split(
self._global_data,
audio_format='bytes',
sampling_rate=8000,
sample_width=2,
channels=1,
min_dur=0.3, # minimum duration of a valid audio event in seconds
max_dur=6, # maximum duration of an event
max_silence=0.3, # maximum duration of tolerated continuous silence within an event
energy_threshold=50 # threshold of detection
)
if len(list(audio_regions)) > 1:
self.queue_audio.put(self._global_data)
self._global_data = b''
async def text_receiver(self):
while True:
while not self.queue_text.empty():
result = self.queue_text.get()
# write recognized data
print(f'Received for {result}, Session={self.address}:{self.port}')
try:
dict_result = json.loads(result)
self.text_result["text"] += f' {dict_result.get("text")}'
self.text_result["result"] += dict_result.get("result")
except Exception as e:
print(f'Error converting recognition result:{e}')
await asyncio.sleep(1)
@click.command()
@click.option('--allow-global/--no-allow_global', default=False, help='allow to connect from asterisks with "white" addresses ')
@click.option('--bind_address', default="0.0.0.0", help='bind address to listen packets from asterisks')
@click.option('--bind_port', default="8808", help='UDP port to listen voice packets from asterisks')
def main(allow_global,bind_address,bind_port):
DB.init()
quit()
print(f'Starting server on UDP://{bind_address}:{bind_port}')
loop = asyncio.get_event_loop()
loop.run_until_complete(asyncio.gather(UdpRtpServer.start_server(bind_address, int(bind_port),allow_global),))
if __name__ == '__main__':
main()
|
<filename>source/LaBSE.py
'''
Language-agnostic Sentence BERT Embeddings (LaBSE) utilities
'''
__author__ = '<NAME>'
import numpy as np
import tensorflow as tf
import tensorflow_hub as hub
import bert
from configs import config as cf
def get_model(model_url, max_seq_length):
'''
loads model given a valid url and maximum sequence length
[model_url] : tensorflow hub model URL
[max_seq_length] : int
'''
labse_layer = hub.KerasLayer(model_url, trainable=True)
# Define input
input_word_ids = tf.keras.layers.Input(shape=(max_seq_length,),
dtype=tf.int32,
name="input_word_ids")
input_mask = tf.keras.layers.Input(shape=(max_seq_length,),
dtype=tf.int32,
name="input_mask")
segment_ids = tf.keras.layers.Input(shape=(max_seq_length,),
dtype=tf.int32,
name="segment_ids")
# LaBSE layer
pooled_output, _ = labse_layer([input_word_ids, input_mask, segment_ids])
# The embedding is l2 normalized
pooled_output = tf.keras.layers.Lambda(
lambda x: tf.nn.l2_normalize(x))(pooled_output)
return tf.keras.Model(
inputs=[input_word_ids, input_mask, segment_ids],
outputs=pooled_output), labse_layer
def create_input(input_list, tokenizer, max_seq_length):
'''
BERT-style input preparation
[input_list] : list of strings
[tokenizer] : BERT tokenizer
[max_seq_length] : int
'''
input_ids_all, input_mask_all, segment_ids_all = [], [], []
for input_string in input_list:
# Tokenize input
input_tokens = ["[CLS]"] + tokenizer.tokenize(input_string) + ["[SEP]"]
input_ids = tokenizer.convert_tokens_to_ids(input_tokens)
sequence_length = min(len(input_ids), max_seq_length)
# Pad or clip
if len(input_ids) >= max_seq_length:
input_ids = input_ids[:max_seq_length]
else:
input_ids = input_ids + [0] * (max_seq_length - len(input_ids))
input_mask = [1] * sequence_length + [0] * (
max_seq_length - sequence_length)
input_ids_all.append(input_ids)
input_mask_all.append(input_mask)
segment_ids_all.append([0] * max_seq_length)
input_ids_all = np.array(input_ids_all)
input_mask_all = np.array(input_mask_all)
segment_ids_all = np.array(segment_ids_all)
return input_ids_all, input_mask_all, segment_ids_all
def get_tokenizer(embed_layer):
'''
return tokenizer given the LaBSE layer
[embed_layer] : tensorflow_hub.keras_layer
'''
vocab_file = embed_layer.resolved_object.vocab_file.asset_path.numpy()
do_lower_case = embed_layer.resolved_object.do_lower_case.numpy()
tokenizer = bert.bert_tokenization.FullTokenizer(vocab_file, do_lower_case)
return tokenizer
def encode(input_text, model, tokenizer):
'''
returns embeddings of size [batch_size, 768]
[input_text] : list of strings
[model] : tf.keras.Model
[tokenizer] : BERT tokenizer
'''
input_ids, input_mask, segment_ids = create_input(
input_text, tokenizer, cf.max_seq_length)
return model([input_ids, input_mask, segment_ids])
|
<reponame>kubruslihiga/sst-projeto
from seguranca_trabalho.submodels.funcionario import Funcionario
from django import forms
from django.forms.models import BaseInlineFormSet, inlineformset_factory
from seguranca_trabalho.submodels.monitoramento_saude_trabalhador import MonitoramentoSaudeTrabalhador
from seguranca_trabalho.submodels.usuario import Usuario
from seguranca_trabalho.submodels.condicao_fator_risco_ambiente_trabalho import CondicaoAmbientalFatorRisco, CondicaoFator, AnaliseEPI
class CondicaoAmbientalFatorRiscoForm(forms.ModelForm):
usuario:Usuario
def __init__(self, usuario:Usuario, *args, **kwargs):
super(CondicaoAmbientalFatorRiscoForm, self).__init__(*args, **kwargs)
self.usuario = usuario
self.fields['funcionario'].queryset = Funcionario.objects.filter(empresa=usuario.empresa_selecionada)
data_inicio = forms.DateField(
input_formats=['%d/%m/%Y'],
widget=forms.DateInput(
format="%d/%m/%Y",
attrs={ "data-mask": "99/99/9999" }),
required=False,
help_text="Formato dd/mm/yyyy")
funcionario = forms.ModelChoiceField(queryset=None, required=True, label="Funcionário", widget=forms.Select(attrs={
"class": "select"
}))
responsavel_registro_ambiental = forms.ModelChoiceField(queryset=None, label="Responsável pelo registro", required=False)
descricao_atividade_ambiente = forms.CharField(required=False, label="Descrição da atividade no ambiente de trabalho")
metodologia_riscos_ergonomicos = forms.CharField(required=False, label="Metodologia dos riscos ergonômicos", max_length=1000, widget=forms.Textarea())
observacao = forms.CharField(required=False, label="Observação", max_length=1000, widget=forms.Textarea())
class Meta:
model = CondicaoAmbientalFatorRisco
fields = ["funcionario",
"data_inicio",
"descricao_atividade_ambiente",
"atividades",
"responsavel_registro_ambiental",
"metodologia_riscos_ergonomicos",
"observacao"]
class BaseCondicaoAmbiente(BaseInlineFormSet):
def add_fields(self, form, index) -> None:
super().add_fields(form, index)
class CondicaoFatorForm(forms.ModelForm):
class Meta:
model = CondicaoFator
fields = [
"fator_risco",
"tipo_avaliacao",
"intensidade",
"limite_tolerancia",
"unidade_medida",
"tecnica_utilizada",
"insalubridade",
"periculosidade",
"aposentadoria_especial",
"utilizacao_epc",
"epc_eficaz",
"utilizacao_epi"]
class AnaliseEPIForm(forms.ModelForm):
class Meta:
model = AnaliseEPI
fields = [
"certificacao_epi",
"descricao_epi",
"epi_eficaz",
"hierarquia_medida_protecao_coletiva",
"observada_condicao_funcionamento",
"observado_epi",
"observado_prazo_validade_ca",
"observado_periodicidade_troca",
"observada_higienizacao_epi"]
AnaliseEPIFormset = inlineformset_factory(CondicaoFator, AnaliseEPI, form=AnaliseEPIForm, extra=1)
class BaseCondicaoFormset(BaseInlineFormSet):
def add_fields(self, form, index):
super(BaseCondicaoFormset, self).add_fields(form, index)
# save the formset in the 'nested' property
form.analise_epis = AnaliseEPIFormset(
instance=form.instance,
data=form.data if form.is_bound else None,
files=form.files if form.is_bound else None,
prefix='analise-epi-%s-%s' % (
form.prefix,
AnaliseEPIFormset.get_default_prefix()))
CondicaoFatorFormset = inlineformset_factory(CondicaoAmbientalFatorRisco, CondicaoFator, form=CondicaoFatorForm, formset=BaseCondicaoFormset, extra=1) |
<reponame>Zac-HD/trio
from collections import deque
import attr
from .. import _core
from .._util import aiter_compat
from .._deprecate import deprecated
__all__ = ["UnboundedQueue"]
@attr.s(frozen=True)
class _UnboundedQueueStats:
qsize = attr.ib()
tasks_waiting = attr.ib()
class UnboundedQueue:
"""An unbounded queue suitable for certain unusual forms of inter-task
communication.
This class is designed for use as a queue in cases where the producer for
some reason cannot be subjected to back-pressure, i.e., :meth:`put_nowait`
has to always succeed. In order to prevent the queue backlog from actually
growing without bound, the consumer API is modified to dequeue items in
"batches". If a consumer task processes each batch without yielding, then
this helps achieve (but does not guarantee) an effective bound on the
queue's memory use, at the cost of potentially increasing system latencies
in general. You should generally prefer to use a memory channel
instead if you can.
Currently each batch completely empties the queue, but `this may change in
the future <https://github.com/python-trio/trio/issues/51>`__.
A :class:`UnboundedQueue` object can be used as an asynchronous iterator,
where each iteration returns a new batch of items. I.e., these two loops
are equivalent::
async for batch in queue:
...
while True:
obj = await queue.get_batch()
...
"""
@deprecated(
"0.9.0",
issue=497,
thing="trio.hazmat.UnboundedQueue",
instead="trio.open_memory_channel(math.inf)"
)
def __init__(self):
self._lot = _core.ParkingLot()
self._data = []
# used to allow handoff from put to the first task in the lot
self._can_get = False
def __repr__(self):
return "<UnboundedQueue holding {} items>".format(len(self._data))
def qsize(self):
"""Returns the number of items currently in the queue.
"""
return len(self._data)
def empty(self):
"""Returns True if the queue is empty, False otherwise.
There is some subtlety to interpreting this method's return value: see
`issue #63 <https://github.com/python-trio/trio/issues/63>`__.
"""
return not self._data
@_core.enable_ki_protection
def put_nowait(self, obj):
"""Put an object into the queue, without blocking.
This always succeeds, because the queue is unbounded. We don't provide
a blocking ``put`` method, because it would never need to block.
Args:
obj (object): The object to enqueue.
"""
if not self._data:
assert not self._can_get
if self._lot:
self._lot.unpark(count=1)
else:
self._can_get = True
self._data.append(obj)
def _get_batch_protected(self):
data = self._data.copy()
self._data.clear()
self._can_get = False
return data
def get_batch_nowait(self):
"""Attempt to get the next batch from the queue, without blocking.
Returns:
list: A list of dequeued items, in order. On a successful call this
list is always non-empty; if it would be empty we raise
:exc:`~trio.WouldBlock` instead.
Raises:
~trio.WouldBlock: if the queue is empty.
"""
if not self._can_get:
raise _core.WouldBlock
return self._get_batch_protected()
async def get_batch(self):
"""Get the next batch from the queue, blocking as necessary.
Returns:
list: A list of dequeued items, in order. This list is always
non-empty.
"""
await _core.checkpoint_if_cancelled()
if not self._can_get:
await self._lot.park()
return self._get_batch_protected()
else:
try:
return self._get_batch_protected()
finally:
await _core.cancel_shielded_checkpoint()
def statistics(self):
"""Return an object containing debugging information.
Currently the following fields are defined:
* ``qsize``: The number of items currently in the queue.
* ``tasks_waiting``: The number of tasks blocked on this queue's
:meth:`get_batch` method.
"""
return _UnboundedQueueStats(
qsize=len(self._data),
tasks_waiting=self._lot.statistics().tasks_waiting
)
@aiter_compat
def __aiter__(self):
return self
async def __anext__(self):
return await self.get_batch()
|
<filename>enonces/ecoulements_potentiels/module/banque_ecoulements.py
# Banque d'ecoulements elementaires
import numpy as np
# Fonctions pour creer des grilles du plan d'ecoulement
# Tous les parametres sont optionnels
# Ils permettent de definir les bornes du plan et le nombre de points de discretisation
# Les objets grid sont des dictionnaires contenant les tableaux de coordonnees cartesiennes
def create_2Dgrid_cart(xbounds=[-5.,5.],ybounds=[-3.,3],nx=500,ny=300):
grid = dict()
x = np.linspace(xbounds[0],xbounds[1],nx)
y = np.linspace(ybounds[0],ybounds[1],ny)
grid['x'],grid['y'] = np.meshgrid(x,y)
return grid
def create_2Dgrid_cyl(rbounds=[1.0e-3,6.],nr=500,nt=360):
grid = dict()
rad = np.linspace(rbounds[0],rbounds[1],nr)
theta = np.linspace(0,2*np.pi,nt)
R,T = np.meshgrid(rad,theta)
grid['x'] = R * np.cos(T)
grid['y'] = R * np.sin(T)
return grid
# Fonction pour calculer les coordonneers polaire a partir des coordonnees cartesienne
# Il est possible de redefinir le centre du repere polaire a l'aide du parametre
# optionnel center. Par defaut il est en [0,0]
def cart2cyl(grid,center=[0,0]):
rad = np.sqrt((grid['x'] - center[0])**2 + (grid['y'] - center[1])**2)
theta = np.arctan2(grid['y']-center[1],grid['x']-center[0])
return rad,theta
# Les objets ecoul sont des dictionnaires contenant les tableaux permettant
# de definir les ecoulements elementaires sur la grille du plan fournie.
# Ils contiennent en particulier les valeurs sur la grille de
# la fonction courant psi, la fonction potentielle phi, les composantes de vitesse
# Ecoulement uniforme (sans incidence)
def uniform(grid,Vinf):
ecoulement = dict()
rad,theta = cart2cyl(grid,center=[0.,0.])
ecoulement['phi'] = Vinf*grid['x']
ecoulement['psi'] = Vinf*grid['y']
ecoulement['u'] = Vinf * np.ones_like(rad)
ecoulement['v'] = np.zeros_like(rad)
ecoulement['ur'] = ecoulement['u'] * np.cos(theta) + ecoulement['v'] * np.sin(theta)
ecoulement['ut'] = -ecoulement['u'] * np.sin(theta) + ecoulement['v'] * np.cos(theta)
return ecoulement
# Source ponctuelle
def source(grid,La,center=[0,0]):
ecoulement = dict()
rad,theta = cart2cyl(grid,center)
ecoulement['phi'] = La/(2*np.pi) * np.log(rad)
ecoulement['psi'] = La/(2*np.pi) * theta
ecoulement['ur'] = La/(2*np.pi*rad)
ecoulement['ut'] = np.zeros_like(rad)
ecoulement['u'] = ecoulement['ur'] * np.cos(theta) - ecoulement['ut'] * np.sin(theta)
ecoulement['v'] = ecoulement['ur'] * np.sin(theta) + ecoulement['ut'] * np.cos(theta)
return ecoulement
# Dipole ponctuel
def dipole(grid,Ka,center=[0,0]):
ecoulement = dict()
rad,theta = cart2cyl(grid,center)
# ecoulement['phi'] =
# ecoulement['psi'] =
# ecoulement['ur'] =
# ecoulement['ut'] =
# ecoulement['u'] =
# ecoulement['v'] =
return ecoulement
# Tourbillon ponctuel
def tourbillon(grid,Ga,center=[0,0],R0=1.0):
ecoulement = dict()
rad,theta = cart2cyl(grid,center)
# ecoulement['phi'] =
# ecoulement['psi'] =
# ecoulement['ur'] =
# ecoulement['ut'] =
# ecoulement['u'] =
# ecoulement['v'] =
return ecoulement
# Fonction pour calculer les coordonneers polaire a partir des coordonnees cartesienne
# Il est possible de redefinir le centre du repere polaire a l'aide du parametre
# optionnel center. Par defaut il est en [0,0]
def superpose_ecoulement(ecoul1,ecoul2,grid):
ecoulement = dict()
rad,theta = cart2cyl(grid,center=[0.,0.])
for var in ['phi','psi','u','v']:
ecoulement[var] = ecoul1[var] + ecoul2[var]
ecoulement['ur'] = ecoulement['u'] * np.cos(theta) + ecoulement['v'] * np.sin(theta)
ecoulement['ut'] = - ecoulement['u'] * np.sin(theta) + ecoulement['v'] * np.cos(theta)
return ecoulement
|
#!/usr/bin/env python3
"""
Prepare images to work with CNN model.
Inspired by https://github.com/kylemcdonald/SmileCNN
We're using data from https://github.com/hromi/SMILEsmileD/tree/master/SMILEs
Download the repository as zip file and put SMILEs/negatives and SMILEs/positives
into the data directory in the source direcotry for this section.
Please install sckit-image package before
using this script with:
$ conda install scikit-image
"""
from os import listdir, path, remove
from skimage.io import imread
from skimage.measure import block_reduce
from PIL import Image
import numpy as np
from keras.utils import np_utils
def img2array(f, detection=False, ii_size=(64, 64)):
"""
Convert images into matrixes/two-dimensional arrays.
detection - if True we will resize an image to fit the
shape of a data that our first convolutional
layer is accepting which is 32x32 array,
used only on detection.
ii_size - this is the size that our input images have.
"""
rf=None
if detection:
rf=f.rsplit('.')
rf=rf[0]+'-resampled.'+rf[1]
im = Image.open(f)
# Create a smaller scalled down thumbnail
# of our image.
im.thumbnail(ii_size)
# Our thumbnail might not be of a perfect
# dimensions, so we need to create a new
# image and paste the thumbnail in.
newi = Image.new('L', ii_size)
newi.paste(im, (0,0))
newi.save(rf, "JPEG")
f=rf
# Turn images into an array.
data=imread(f, as_gray=True)
# Downsample it from 64x64 to 32x32
# (that's what we need to feed into our first convolutional layer).
data=block_reduce(data, block_size=(2, 2), func=np.mean)
if rf:
remove(rf)
return data
def prep_array(data, detection=False):
"""
Convert our input array into the right format.
detection - if True we just wrapping up a single
image's array into a list to make things
consistent.
"""
if detection:
data=[data]
# By default values converted from our images
# are integers in range from 0 to 255 and our
# network will be really slow working with them.
# So, we need to convert them into values from
# 0.0 to 1.0 range which works much better in our case.
data=np.asarray(data) / 255.0
# We need to wrap each pixel value insided it's own array.
# This is the quick way of doing it.
data=np.expand_dims(data, axis=-1)
return data
def load_data(data_directory):
"""
Go trough each image in a data directory,
convert it into an array, add into
our input array X and return it.
"""
X=[]
for filename in listdir(data_directory):
if not filename.endswith('.jpg'):
continue
p=path.join(data_directory, filename)
data=img2array(p)
X.append(data)
return prep_array(X)
def gen_labels(length, label):
"""
Return a length list of label.
"""
return [ label for _ in range(length) ]
def get_data():
"""
Generate X and Y arrays, inputs and classes
ready for use in our convolutional network.
"""
# Load images, generate labels, starting with negatives
x_neg = load_data('data/negatives/negatives7')
y_neg = gen_labels(len(x_neg), 0)
x_pos = load_data('data/positives/positives7')
y_pos = gen_labels(len(x_pos), 1)
# Merge negative and postive data into one.
X=np.concatenate([x_neg, x_pos])
Y=np.asarray(y_neg+y_pos)
# By default we will have 64 bit values,
# it will run quicker if we convert them into
# 32 bit.
X = X.astype(np.float32)
Y = Y.astype(np.int32)
# Get the dimensions and number of color channels
# that we have in our data.
# Here we have (32,32,1) which means 32x32 array with
# one color channel (because we have black and white images)
inputs=X.shape[1:]
# Number of classes we want to predict.
# 0 - not smiling, 1 - smiling.
classes=2
# Convert classes to vector, this is needed when we use
# softmax in the last layer.
Y = np_utils.to_categorical(Y, classes).astype(np.float32)
# Shuffle all the data because
# we have more negative samples
# than positive ones.
# Then keras will take care of
# spliting the data for us
# later on training.
ixes = np.arange(len(X))
np.random.shuffle(ixes)
X = X[ixes]
Y = Y[ixes]
return X, Y, inputs, classes
if __name__ == '__main__':
from pprint import pprint
X, Y, inputs, classes=get_data()
print('Inputs: %s' % repr(inputs))
print('X[0] (first encoded image):')
pprint(X[0])
print('Y[0] (first encoded class):')
pprint(Y[0])
print('Classes %s' % classes)
pprint(np_utils.to_categorical([0,1], classes).astype(np.float32))
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# This file is a part of EM Media Handler Testing Module
# Copyright (c) 2014-2021 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Initialize module"""
import os
import shutil
from re import escape
import tests.common as common
from tests.common import unittest
from tests.common import MHTestSuite
from tests.test_media import MediaObjectTests
from mediahandler.util.config import _find_app
import mediahandler.types.music as Music
class MusicMediaObjectTests(MediaObjectTests):
def setUp(self):
# Call Super
super(MusicMediaObjectTests, self).setUp()
# Get music settings
self.settings = common.get_settings()['Music']
self.settings['single_track'] = False
# Set up beets path
_find_app(self.settings, {'name': 'Beets', 'exec': 'beet'})
# Set up music object
self.tracks = Music.MHMusic(self.settings, self.push)
def test_new_music_object(self):
expected = r"(Tagging|To)\:\n\s{1,4}(.*)\nURL\:\n\s{1,4}(.*)\n"
self.assertEqual(self.tracks.query.tags, '-ql')
self.assertEqual(self.tracks.query.added, expected)
def test_new_music_single(self):
self.settings['single_track'] = True
self.tracks = Music.MHMusic(self.settings, self.push)
# Check results
expected = r"(Tagging track)\:\s(.*)\nURL\:\n\s{1,4}(.*)\n"
self.assertEqual(self.tracks.query.tags, '-sql')
self.assertEqual(self.tracks.query.added, expected)
def test_music_add_log(self):
# Make dummy logfile
name = 'test-{0}.log'.format(common.get_test_id())
folder = os.path.join(os.path.dirname(self.conf), 'tmpl')
log_file = os.path.join(folder, name)
self.tracks.log_file = log_file
# Run tests
regex = r'Unable to match music files: {0}'.format(escape(self.tmp_file))
self.assertRaisesRegexp(
SystemExit, regex, self.tracks.add, self.tmp_file)
self.assertTrue(os.path.exists(folder))
# Clean up
shutil.rmtree(folder)
def test_music_output_good(self):
output = """
/Downloaded/Music/Alt-J - This Is All Yours (2014) CD RIP [MP3 @ 320 KBPS] (13 items)
Correcting tags from:
Alt-J - This Is All Yours
To:
alt-J - This Is All Yours
URL:
http://musicbrainz.org/release/e6f60da3-1d37-4aba-a309-6e65b84ffe66
(Similarity: 96.9%) (tracks) (CD, 2014, Infectious Records)
/Downloaded/Music/Eisley - Discographie (2002-2012)/Eisley - (2005) Room Noises ALBUM (13 items)
Tagging:
Eisley - Room Noises
URL:
http://musicbrainz.org/release/4186b65f-c36d-4dac-82d3-221d3f8c7925
(Similarity: 100.0%) (2005, US)
"""
(new_file, skipped) = self.tracks._process_output(output, self.tmp_file)
expected = ['alt-J - This Is All Yours', 'Eisley - Room Noises']
self.assertEqual(new_file, expected)
self.assertEqual(skipped, [])
def test_music_output_single_good(self):
# Single file
self.settings['single_track'] = True
self.tracks = Music.MHMusic(self.settings, self.push)
output = """
/Downloaded/Music/<NAME> - Blank Space {2014-Single}/02 Blank Space.mp3
Tagging track: <NAME> - Blank Space
URL:
http://musicbrainz.org/recording/c3fe7791-0a91-4f0a-a89b-b056f38d3cde
(Similarity: 100.0%)
"""
(new_file, skipped) = self.tracks._process_output(output, self.tmp_file)
self.assertEqual(new_file, ['Taylor Swift - Blank Space'])
self.assertEqual(skipped, [])
def test_music_output_skipped(self):
output = """
/Downloaded/Music/Eisley - Discographie (2002-2012)/Eisley - (2003) Marvelous Things EP (1 items)
Skipping.
/Downloaded/Music/Eisley - Discographie (2002-2012)/Eisley - (2005) Room Noises ALBUM (13 items)
Tagging:
Eisley - Room Noises
URL:
http://musicbrainz.org/release/4186b65f-c36d-4dac-82d3-221d3f8c7925
(Similarity: 100.0%) (2005, US)
/Downloaded/Music/Eisley - Discographie (2002-2012)/Eisley - (2009) Fire Kite EP (1 items)
Skipping.
"""
(new_file, skipped) = self.tracks._process_output(output, self.tmp_file)
new_expected = ['Eisley - Room Noises']
skip_expected = [
'Eisley - (2003) Marvelous Things EP',
'Eisley - (2009) Fire Kite EP',
]
self.assertEqual(new_file, new_expected)
self.assertEqual(skipped, skip_expected)
def test_music_output_single_skipped(self):
# Single file
self.settings['single_track'] = True
self.tracks = Music.MHMusic(self.settings, self.push)
output = """
/Downloaded/Music/Taylor Swift - Blank Space {2014-Single} (1 items)
Skipping.
"""
(new_file, skipped) = self.tracks._process_output(output, self.tmp_file)
expected = ['Taylor Swift - Blank Space {2014-Single}']
self.assertEqual(new_file, [])
self.assertEqual(skipped, expected)
def suite():
s = MHTestSuite()
tests = unittest.TestLoader().loadTestsFromName(__name__)
s.addTest(tests)
return s
if __name__ == '__main__':
unittest.main(defaultTest='suite', verbosity=2)
|
################################################################################
# Copyright 2016-2022 Advanced Micro Devices, Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell cop-
# ies of the Software, and to permit persons to whom the Software is furnished
# to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IM-
# PLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNE-
# CTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
################################################################################
from copy import deepcopy
import itertools
from .Common import print1, print2, hasParam, printExit, \
defaultBenchmarkCommonParameters, validParameters, globalParameters, \
defaultBatchedBenchmarkFinalProblemSizes, defaultBenchmarkFinalProblemSizes
from .CustomKernels import getAllCustomKernelNames
from .SolutionStructs import ProblemType, ProblemSizes
def getDefaultsForMissingParameters(paramList, defaultParams):
"""Returns all parameters (with values) in defaultParams not present in paramList"""
benchmarkParams = {}
for paramDict in defaultParams:
for name, value in paramDict.items():
if not hasParam(name, paramList) \
or name == "ProblemSizes":
benchmarkParams[name] = value
return benchmarkParams
def checkParametersAreValid(param, validParams):
"""Ensures paramaters in params exist and have valid values as specified by validParames"""
(name, values) = param
if name == "ProblemSizes":
return
if name not in validParams:
printExit("Invalid parameter name: {}\nValid parameters are {}." \
.format(name, sorted(validParameters.keys())))
for value in values:
if validParams[name] != -1 and value not in validParams[name]:
msgBase = "Invalid parameter value: {} = {}\nValid values for {} are {}{}."
msgExt = " (only first 32 combos printed)\nRefer to Common.py for more info" \
if len(validParams[name])>32 else ""
printExit(msgBase.format(name, value, name, validParams[name][:32], msgExt))
def separateParameters(paramSetList):
"""Separates paramSetList into parameters with single and multiple values"""
singleVaules = {}
multiValues = {}
for name, values in paramSetList.items():
if values == None:
printExit("You must specify value(s) for parameter \"{}\"".format(name))
if len(values) == 1 and name != "ProblemSizes":
singleVaules[name] = values[0]
elif len(values) > 1 and name != "ProblemSizes":
multiValues[name] = values
return singleVaules, multiValues
def checkCDBufferAndStrides(problemType, problemSizes, isCEqualD):
"""Ensures ldd == ldc when CEqualD"""
if isCEqualD and problemType["OperationType"] == "GEMM":
for problem in problemSizes.problems:
ldd = problem.sizes[problemType["IndexAssignmentsLD"][0]]
ldc = problem.sizes[problemType["IndexAssignmentsLD"][1]]
if ldd != ldc:
printExit("LDD({}) != LDC({}) causes unpredictable result when CEqualD(True)" \
.format(ldd, ldc))
class BenchmarkProcess:
"""Representation of benchmarking parameters and resulting steps"""
def __init__(self, problemTypeConfig, problemSizeGroupConfig):
"""Create from the two sections of a config for a BenchmarkProblem"""
self.problemType = ProblemType(problemTypeConfig)
self.isBatched = "Batched" in problemTypeConfig and problemTypeConfig["Batched"]
print2("# BenchmarkProcess beginning {}".format(self.problemType))
# fill parameter values from config
self.singleValueParams = {}
self.multiValueParams = {}
self.customKernels = []
self.sizes = None
self.getConfigParameters(self.isBatched, problemSizeGroupConfig)
# convert parameter lists to steps
# previously, multiple benchmark steps were possible
# currently only 1 benchmark step is possible; more may be added back later
self.benchmarkSteps = []
self.benchmarkStepIdx = 0
self.convertParametersToSteps()
def getConfigParameters(self, isbatched, config):
"""Parse and validate benchmarking parameters in config"""
print2("")
print2("####################################################################")
print1("# Filling in Parameters With Defaults")
print2("####################################################################")
print2("")
# check for no longer supported legacy benchmark steps
badParams = ["InitialSolutionParameters", "BenchmarkForkParameters", \
"JoinParameters", "BenchmarkJoinParameters"]
badsInConfig = []
for p in badParams:
if config.get(p) is not None:
badsInConfig.append(p)
if len(badsInConfig) == 1:
printExit("Benchmark step {} is no longer supported".format("'" + badsInConfig[0] +
"'"))
elif len(badsInConfig) > 1:
printExit("Benchmark steps {} are no longer supported".format(badsInConfig))
# get supported configurations
# value in config file may be "None", which we should ignore
def getNonNoneFromConfig(key, default):
if config.get(key) is not None:
return config[key]
else:
return default
# converts list of dicts into a flat dict
benchmarkCommonParams = dict(itertools.chain(*[x.items() \
for x in getNonNoneFromConfig("BenchmarkCommonParameters", [])]))
forkParams = dict(itertools.chain(*[x.items() \
for x in getNonNoneFromConfig("ForkParameters", [])]))
self.paramGroups = forkParams.pop("Groups") if "Groups" in forkParams else []
self.customKernels = getNonNoneFromConfig("CustomKernels", [])
if "BenchmarkFinalParameters" in config:
sizes = config["BenchmarkFinalParameters"][0]["ProblemSizes"]
else:
sizes = defaultBatchedBenchmarkFinalProblemSizes if isbatched \
else defaultBenchmarkFinalProblemSizes
self.problemSizes = ProblemSizes(self.problemType, sizes)
checkCDBufferAndStrides(self.problemType, self.problemSizes, globalParameters["CEqualD"])
# validate parameter values
configParams = {**benchmarkCommonParams, **forkParams}
for param in configParams.items():
checkParametersAreValid(param, validParameters)
# TODO other checks on groups (same params for each entry? no dups between groups?)
for list in self.paramGroups:
for group in list:
for k, v in group.items():
checkParametersAreValid((k, [v]), validParameters)
params = dict(itertools.chain(*[x.items() for x in defaultBenchmarkCommonParameters]))
params.update(configParams)
self.singleValueParams, self.multiValueParams = separateParameters(params)
# print summary of parameter values
print2("Single Value Parameters:")
for k, v in self.singleValueParams.items():
print2(" {}: {}".format(k, v))
print2("Multi-Value Parameters:")
for k, v in self.multiValueParams.items():
print2(" {}: {}".format(k, v))
if len(self.paramGroups) > 0:
print2("{} Parameter Group(s):".format(len(self.paramGroups)))
for i, group in enumerate(self.paramGroups):
print2(" {} entries is group {}".format(len(group), i + 1))
def convertParametersToSteps(self):
"""Create benchmark steps based on parsed parameters"""
print2("")
print2("####################################################################")
print1("# Convert Parameters to Benchmark Step(s)")
print2("####################################################################")
print2("")
# currently only a single step is supported
print2("")
print2("####################################################################")
print1("# Benchmark Final")
benchmarkStep = BenchmarkStep( \
self.multiValueParams, \
self.singleValueParams, \
self.paramGroups, \
self.customKernels, \
self.problemSizes, \
self.benchmarkStepIdx)
self.benchmarkSteps.append(benchmarkStep)
self.benchmarkStepIdx += 1
def __len__(self):
return len(self.benchmarkSteps)
def __getitem__(self, key):
return self.benchmarkSteps[key]
def __str__(self):
string = "BenchmarkProcess:\n"
for step in self.benchmarkSteps:
string += str(step)
return string
def __repr__(self):
return self.__str__()
def constructForkPermutations(forkParams, paramGroups):
"""Constructs cartesian product of parameter values in forkParams and paramGroups"""
myParams = {}
myParams.update(forkParams)
totalPermutations = 1
for _, values in forkParams.items():
totalPermutations *= len(values)
# add groups to parameters to fork on
for i, group in enumerate(paramGroups):
myParams["_group" + str(i)] = group
totalPermutations *= len(group)
forkPermutations = []
for i in range(0, totalPermutations):
permutation = {}
pIdx = i
for name, v in myParams.items():
values = deepcopy(v)
valueIdx = pIdx % len(v)
# groups have multiple parameters to update
if "_group" in name:
entry = values[valueIdx]
for n2, v2 in entry.items():
permutation[n2] = v2
else:
permutation[name] = values[valueIdx]
pIdx //= len(values)
forkPermutations.append(permutation)
return forkPermutations
class BenchmarkStep:
"""A single benchmark step which consists of constant and fork parameters and a set of sizes"""
def __init__(self, forkParams, constantParams, paramGroups, customKernels, problemSizes, idx):
"""Basic constructor storing each argument"""
self.forkParams = forkParams
self.constantParams = constantParams
self.paramGroups = paramGroups
self.customKernels = customKernels
self.problemSizes = problemSizes
self.stepIdx = idx
self.customKernelWildcard = False
if self.customKernels == ["*"]:
self.customKernels = getAllCustomKernelNames()
self.customKernelWildcard = True
print2("# Creating BenchmarkStep: {} fork params and {} sizes" \
.format( len(forkParams), problemSizes.totalProblemSizes))
def isFinal(self):
"""Legacy. Currently always returns true since only one benchmark step is possible"""
return True
def __str__(self):
string = "{:02d}".format(self.stepIdx)
if self.isFinal():
string += "_Final"
return string
def __repr__(self):
return self.__str__()
|
<reponame>rominaoji/ParsiNorm<filename>parsinorm/telephone_number.py<gh_stars>0
import re
import random
from num2fawords import words, HUNDREDS, ordinal_words
from .general_normalization import General_normalization
class Telephone_number:
def __init__(self):
self.general_normalization = General_normalization()
def find_phones_replace(self, sentence):
sentence = self.general_normalization.number_correction(sentence=sentence)
telephones = []
numbers_with_telephone1 = re.findall('تلفن: [۰-۹]+', sentence)
if numbers_with_telephone1:
telephones.extend(numbers_with_telephone1)
numbers_with_telephone2 = re.findall('تلفن [۰-۹]+', sentence)
if numbers_with_telephone2:
telephones.extend(numbers_with_telephone2)
numbers_with_telephone3 = re.findall(' و [۰-۹]+ تلفن های [۰-۹]+', sentence)
if numbers_with_telephone3:
telephones.extend(numbers_with_telephone3)
numbers_with_telephone4 = re.findall('تلفن های [۰-۹]+ و [۰-۹]+', sentence)
if numbers_with_telephone4:
telephones.extend(numbers_with_telephone4)
numbers_with_telephone5 = re.findall('تلفنهای [۰-۹]+ و [۰-۹]+', sentence)
if numbers_with_telephone5:
telephones.extend(numbers_with_telephone5)
numbers_with_telephone6 = re.findall('فکس [۰-۹]+', sentence)
if numbers_with_telephone6:
telephones.extend(numbers_with_telephone6)
numbers_with_telephone7 = re.findall('فاکس [۰-۹]+', sentence)
if numbers_with_telephone7:
telephones.extend(numbers_with_telephone7)
numbers_with_telephone8 = re.findall('۰۹[۰-۹]{9}', sentence)
if numbers_with_telephone8:
telephones.extend(numbers_with_telephone8)
numbers_with_telephone9 = re.findall('۰۲۱[۰-۹]{8}', sentence)
if numbers_with_telephone9:
telephones.extend(numbers_with_telephone9)
numbers_with_telephone10 = re.findall('۰۲۱-[۰-۹]{8}', sentence)
if numbers_with_telephone10:
telephones.extend(numbers_with_telephone10)
numbers_with_telephone11 = re.findall('۰۲۱-[۰-۹]+', sentence)
if numbers_with_telephone11:
telephones.extend(numbers_with_telephone11)
numbers_with_telephone12 = re.findall('۰۲۱[۰-۹]+', sentence)
if numbers_with_telephone12:
telephones.extend(numbers_with_telephone12)
numbers_with_telephone13 = re.findall('تلفن \+[۰-۹]+', sentence)
if numbers_with_telephone13:
telephones.extend(numbers_with_telephone13)
numbers_with_telephone14 = re.findall('تلفن [۰-۹]+\s?-\s?[۰-۹]+', sentence)
if numbers_with_telephone14:
telephones.extend(numbers_with_telephone14)
numbers_with_telephone15 = re.findall('شماره تماس: [۰-۹]+', sentence)
if numbers_with_telephone15:
telephones.extend(numbers_with_telephone15)
numbers_with_telephone16 = re.findall('سامانه پیامکی: [۰-۹]+', sentence)
if numbers_with_telephone16:
telephones.extend(numbers_with_telephone16)
numbers_with_telephone17 = re.findall(' شماره: [۰-۹]+', sentence)
if numbers_with_telephone17:
telephones.extend(numbers_with_telephone17)
numbers_with_telephone18 = re.findall(' شماره [۰-۹]+', sentence)
if numbers_with_telephone18:
telephones.extend(numbers_with_telephone18)
for telephone in telephones:
tels = re.findall('\+?[۰-۹]+', telephone)
for tel in tels:
sentence = sentence.replace(tel, self.number_with_varaible_length(str(tel)))
return sentence
def convert_number_to_letter(self, number):
text = words(number)
text = text.replace("یکصد", "صد")
return text
def number_with_varaible_length(self, number):
converted_to_text = " "
idx = 0
step_round = 0
if number[0] == "+":
converted_to_text += " دو صفر "
converted_to_text += self.convert_number_to_letter(number[1:3])
number = number[3:]
if len(number) == 1:
steps_template = [[1]]
elif len(number) == 2:
steps_template = [[2]]
elif len(number) == 3:
steps_template = [[3]]
elif len(number) == 4:
steps_template = [[2, 2]]
elif len(number) == 5:
steps_template = [[3, 2], [2, 3]]
elif len(number) == 6:
steps_template = [[3, 3], [2, 2, 2]]
elif len(number) == 7:
steps_template = [[3, 2, 2], [2, 2, 3], [2, 3, 2]]
elif len(number) == 8:
steps_template = [[3, 3, 2], [2, 3, 3], [3, 2, 3], [2, 2, 2, 2]]
elif len(number) == 9:
steps_template = [[3, 3, 3], [3, 2, 2, 2], [2, 3, 2, 2], [2, 2, 3, 2], [2, 2, 2, 3]]
elif len(number) == 10:
steps_template = [[2, 2, 2, 2, 2], [3, 3, 2, 2], [2, 2, 3, 3], [3, 2, 2, 3], [2, 3, 2, 3], [3, 2, 3, 2],
[2, 3, 2, 3]]
elif len(number) == 11:
if number[0:2] == "۰۹":
steps_template = [[3, 2, 2], [2, 2, 3], [2, 3, 2]]
idx += 4
converted_to_text += " صفر "
converted_to_text += self.convert_number_to_letter(number[1:4])
else:
if number[0] == "۰":
converted_to_text += " صفر "
idx += 1
steps_template = [[2, 3, 3, 2], [2, 2, 3, 3], [2, 3, 2, 3], [2, 2, 2, 2, 2]]
else:
steps_template = [[3, 3, 3, 2], [3, 2, 3, 3], [3, 3, 2, 3], [3, 2, 2, 2, 2]]
elif len(number) == 12:
steps_template = [[3, 3, 3, 3], [2, 2, 2, 3, 3], [2, 3, 3, 2, 3], [2, 2, 3, 2, 3], [2, 2, 3, 3, 2],
[3, 3, 2, 2, 2], [3, 2, 3, 2, 2], [3, 2, 2, 3, 2], [3, 2, 2, 2, 3]]
elif len(number) == 13:
steps_template = [[3, 3, 3, 2, 2], [3, 2, 2, 3, 3], [3, 3, 2, 2, 3], [2, 2, 3, 3, 3], [2, 3, 3, 3, 2],
[2, 3, 2, 3, 3],
[2, 3, 3, 2, 3], [3, 2, 2, 2, 2, 2], [2, 3, 2, 2, 2, 2], [2, 2, 3, 2, 2, 2],
[2, 2, 2, 3, 2, 2],
[2, 2, 2, 2, 3, 2], [2, 2, 2, 2, 2, 3]]
elif len(number) == 14:
steps_template = [[2, 2, 2, 2, 2, 2, 2], [3, 3, 3, 3, 2], [2, 3, 3, 3, 3], [3, 2, 3, 3, 3], [3, 3, 2, 3, 3],
[3, 3, 3, 2, 3], [3, 3, 2, 2, 2, 2],
[2, 2, 2, 2, 3, 3], [2, 3, 2, 3, 2, 2], [2, 2, 2, 3, 2, 3], [3, 2, 3, 2, 2, 2],
[3, 2, 2, 2, 3, 2]]
elif len(number) == 15:
steps_template = [[3, 3, 3, 3, 3], [2, 2, 2, 3, 3, 3], [2, 3, 2, 3, 2, 3], [3, 2, 3, 2, 3, 2],
[2, 2, 2, 2, 2, 2, 3],
[2, 2, 3, 3, 2, 3], [3, 3, 2, 2, 2, 3], [2, 2, 3, 3, 3, 2],
[2, 2, 3, 3, 3, 2],
[3, 3, 2, 2, 3, 2], [2, 3, 2, 2, 2, 2, 2], [2, 2, 3, 2, 2, 2, 2],
[2, 2, 2, 3, 2, 2, 2],
[2, 2, 2, 2, 3, 2, 2], [2, 2, 2, 2, 2, 3, 2], [2, 2, 2, 2, 2, 2, 3]]
elif len(number) % 2 == 0:
length = int(len(number) / 2)
steps_template = [[2 for index in range(length)]]
else:
length = int((len(number) - 3) / 2)
steps_template = [[2 for index in range(length)]]
steps_template[0].append(3)
steps_template_number = random.randint(0, len(steps_template) - 1)
steps = steps_template[steps_template_number]
while idx < len(number):
converted_to_text += " "
this_template = number[idx: idx + steps[step_round]]
if this_template == "۰۰۰":
converted_to_text += " سه صفر "
elif this_template == "۰۰":
converted_to_text += " دو صفر "
elif this_template[0:2] == "۰۰":
converted_to_text += " دو صفر "
elif this_template[0] == "۰":
converted_to_text += " صفر "
converted_to_text += self.convert_number_to_letter(number[idx: idx + steps[step_round]])
idx = idx + steps[step_round]
step_round += 1
return converted_to_text
|
<reponame>ChampionApe/Abatement_project<gh_stars>0
import os
from gams import *
from DB2Gams import *
import DataBase
from dreamtools.gamY import Precompiler
import pandas as pd
def IfInt(x):
try:
int(x)
return True
except ValueError:
return False
def return_version(x,dict_):
if x not in dict_:
return x
elif (x+'_0') not in dict_:
return x+'_0'
else:
maxInt = max([int(y.split('_')[1]) for y in dict_ if (y.split('_')[0]==x and IfInt(y.split('_')[1]))])
return x+'_'+str(maxInt+1)
def end_w_y(x,y):
if x.endswith(y):
return x
else:
return x+y
def end_w_gdx(x):
return end_w_y(x,'.gdx')
def end_w_gms(x):
return end_w_y(x,'.gms')
def end_w_gmy(x):
return end_w_y(x,'.gmy')
class AddShocks:
"""
Class that includes various ways to write gams-files that adds shocks to a GAMS model.
"""
def __init__(self,name,shock_db,loop_name,prefix='sol_'):
self.name = name # name of model to 'solve' in loop statement.
self.shock_gm = gams_model_py(shock_db) # gams_model_py class with information on shocks.
self.loop_name = loop_name # name of mapping to loop over.
self.loop_text = "" # text to write inside loop.
self.prefix=prefix # prefix used in UEVAS part.
self.write_components = {} # components used to write 'text'.
def WriteResolve(self,type_='CNS'):
return f"solve {self.name} using {type_};\n"
@property
def text(self):
"""
Return loop state with current state of attributes.
"""
return ' '.join([self.write_components[x] for x in self.write_components])
def write_sets(self):
"""
Write gams code for declaring loop-sets, and loading in values form database in self.shock_gm.database.
"""
self.write_components['sets'] = (self.shock_gm.write_sets()+
self.shock_gm.write_aliased_sets()+
self.shock_gm.write_sets_other()+
self.shock_gm.write_aliased_sets_other()+
self.shock_gm.write_sets_load(self.shock_gm.database.name))
return self.write_components['sets']
def write_pars(self):
"""
Write gams code for declaring parameters and load in values.
"""
self.write_components['pars'] = (self.shock_gm.write_parameters()+
self.shock_gm.write_parameters_load(self.shock_gm.database.name))
return self.write_components['pars']
def write_loop_text(self):
"""
Write the loop text using the database with loop information + text from 'loop_text'.
"""
self.write_components['loop'] = """loop( ({sets})$({cond}), {loop})
""".format( sets = ', '.join(self.shock_gm.database[self.loop_name].names),
cond = self.shock_gm.database.get(self.loop_name).to_str,
loop = self.loop_text)
return self.write_components['loop']
def UpdateExoVarsAndSolve(self,model):
"""
(Shorthand: UEVAS, could in principle be a class.)
Write a type of 'loop-text' that performs the following steps:
(1) Update value of exogenous variable,
(2) Resolve model,
(3) Store solution in database.
"""
self.model = model
self.name = self.model.model.name
self.UEVAS = {'sol': {}, 'adj': {}}
@property
def UEVAS_text(self):
self.write_components = {}
self.write_sets()
self.write_pars()
self.UEVAS_WritePGroup()
self.loop_text = self.UEVAS_UpdateExoVars()+self.WriteResolve()+self.UEVAS_WriteStoreSol()
self.write_loop_text()
return self.text
def UEVAS_2gmy(self,file_name):
with open(end_w_gms(file_name),"w") as file:
file.write(self.UEVAS_text)
with open(end_w_gmy(file_name),"w") as file:
file.write(Precompiler(end_w_gms(file_name))())
# os.remove(end_w_gms(file_name))
self.gmy = end_w_gmy(file_name)
self.gms = end_w_gms(file_name)
def UEVAS_var2sol(self,var,loop_dom,conditions=None):
"""
Var_domains should be a list (potentially empty).
"""
self.UEVAS['sol'][return_version(self.prefix+var,self.UEVAS['sol'])] = {'dom': f"[{', '.join(self.shock_gm.database[loop_dom].names+self.model.out_db[var].index.names)}]",
'cond': "" if conditions is None else f"$({conditions})",
'var': var}
def UEVAS_WritePGroup(self):
self.write_components['UEVAS_sol'] = 'parameter\n'
for x in self.UEVAS['sol']:
self.write_components['UEVAS_sol'] += f"\t{x}{self.UEVAS['sol'][x]['dom']}\n" # add conditionals to param? {self.UEVAS['sol'][x]['cond']}
self.write_components['UEVAS_sol'] += ';\n\n'
def UEVAS_WriteStoreSol(self):
out_str = ""
for x in self.UEVAS['sol']:
out_str += "{solpar} = {solvar};\n".format(
solpar = x+self.UEVAS['sol'][x]['dom']+self.UEVAS['sol'][x]['cond'],
solvar = (self.model.out_db.get(self.UEVAS['sol'][x]['var'],level='.l').to_str))
out_str += '\n'
return out_str
def UEVAS_adjVar(self,var,par,conditions=None,overwrite=False):
self.UEVAS['adj'][return_version(var,self.UEVAS['adj'])] = {'varname': var, 'par': par, 'cond': conditions}
def UEVAS_UpdateExoVars(self):
out_str = ""
for x in self.UEVAS['adj']:
out_str += "\t{var} = {par};\n".format(
var = self.model.out_db.get(self.UEVAS['adj'][x]['varname'],conditions=self.UEVAS['adj'][x]['cond'],level='.fx').to_str,
par = self.shock_gm.database.get(self.UEVAS['adj'][x]['par']).to_str)
out_str += '\n\n'
return out_str
|
<gh_stars>1-10
import json
from flask import jsonify
import redis
from flask import Flask, request
from hmac import HMAC, compare_digest
from hashlib import sha1
from redis_benchmarks_specification.__common__.builder_schema import (
commit_schema_to_stream,
)
from redis_benchmarks_specification.__common__.env import PULL_REQUEST_TRIGGER_LABEL
SIG_HEADER = "X-Hub-Signature"
def should_action(action):
res = False
types = ["synchronize", "opened", "reopened", "labeled"]
for tt in types:
if action in tt:
res = True
return res
def create_app(conn, user, test_config=None):
app = Flask(__name__)
conn = conn
# GH Token Authentication
def verify_signature(req):
result = False
try:
secret = conn.get("{}:auth_token".format(user))
sig_header = req.headers.get(SIG_HEADER)
if secret is not None and sig_header is not None:
if type(secret) == str:
secret = secret.encode()
if "sha1=" in sig_header:
received_sign = sig_header.split("sha1=")[-1].strip()
expected_sign = HMAC(
key=secret, msg=req.data, digestmod=sha1
).hexdigest()
result = compare_digest(received_sign, expected_sign)
except redis.exceptions.ResponseError:
pass
except redis.exceptions.AuthenticationError:
pass
return result
@app.route("/api/gh/redis/redis/commits", methods=["POST"])
def base():
if verify_signature(request):
print(request)
# Get Request body from JSON
request_data = request.json
if type(request_data) is str:
request_data = json.loads(request_data)
if type(request_data) is bytes:
request_data = json.loads(request_data.decode())
gh_org = "redis"
gh_repo = "redis"
ref = None
ref_label = None
sha = None
event_type = "Ignored event from webhook"
use_event = False
# Pull request labeled
trigger_label = PULL_REQUEST_TRIGGER_LABEL
if "pull_request" in request_data:
action = request_data["action"]
if should_action(action):
pull_request_dict = request_data["pull_request"]
head_dict = pull_request_dict["head"]
repo_dict = head_dict["repo"]
labels = []
if "labels" in pull_request_dict:
labels = pull_request_dict["labels"]
ref = head_dict["ref"]
ref_label = head_dict["label"]
sha = head_dict["sha"]
html_url = repo_dict["html_url"].split("/")
gh_repo = html_url[-1]
gh_org = html_url[-2]
detected_label = False
for label in labels:
label_name = label["name"]
if trigger_label == label_name:
use_event = True
event_type = "Pull request labeled with '{}'".format(
trigger_label
)
detected_label = True
if detected_label is False:
app.logger.info(
"Unable to detected benchmark trigger label: {}".format(
trigger_label
)
)
# Git pushes to repo
before_sha = None
if "ref" in request_data:
repo_dict = request_data["repository"]
html_url = repo_dict["html_url"].split("/")
gh_repo = html_url[-1]
gh_org = html_url[-2]
ref = request_data["ref"].split("/")[-1]
ref_label = request_data["ref"]
sha = request_data["after"]
before_sha = request_data["before"]
use_event = True
event_type = "Git pushes to repo"
if use_event is True:
if before_sha is not None:
fields_before = {
"git_hash": sha,
"ref_label": ref_label,
"ref": ref,
"gh_repo": gh_repo,
"gh_org": gh_org,
}
app.logger.info(
"Using event {} to trigger merge-base commit benchmark. final fields: {}".format(
event_type, fields_before
)
)
result, response_data, err_message = commit_schema_to_stream(
fields_before, conn, gh_org, gh_repo
)
app.logger.info(
"Using event {} to trigger merge-base commit benchmark. final fields: {}".format(
event_type, response_data
)
)
fields_after = {
"git_hash": sha,
"ref_label": ref_label,
"ref": ref,
"gh_repo": gh_repo,
"gh_org": gh_org,
}
app.logger.info(
"Using event {} to trigger benchmark. final fields: {}".format(
event_type, fields_after
)
)
result, response_data, err_message = commit_schema_to_stream(
fields_after, conn, gh_org, gh_repo
)
app.logger.info(
"Using event {} to trigger benchmark. final fields: {}".format(
event_type, response_data
)
)
else:
app.logger.info(
"{}. input json was: {}".format(event_type, request_data)
)
response_data = {"message": event_type}
# Send data back as JSON
return jsonify(response_data), 200
else:
return "Forbidden", 403
return app
|
<reponame>TimothyKlim/rules_scala3
load("//rules:scala.bzl", "scala_binary", "scala_library")
load(
"@rules_scala3//rules:providers.bzl",
_ScalaConfiguration = "ScalaConfiguration",
_ScalaInfo = "ScalaInfo",
)
load(
"//rules/common:private/utils.bzl",
_resolve_execution_reqs = "resolve_execution_reqs",
)
def scalajs_library(name, srcs, deps = [], visibility = None, scalacopts = [], scala = None, deps_used_whitelist = []):
"""Make scalajs library for provided sources"""
scala_library(
name = name,
srcs = srcs,
visibility = visibility,
deps_used_whitelist = [
"@scalajs_library_2_13//jar",
] + deps_used_whitelist,
deps = [
"@scalajs_library_2_13//jar",
] + deps,
scalacopts = ["-scalajs"] + scalacopts,
scala = scala,
)
def _scalajs_link_impl(ctx):
out = ctx.actions.declare_file("{}.js".format(ctx.label.name))
inputs = []
for dep in ctx.attr.deps:
if _ScalaInfo in dep:
inputs += [dep for dep in dep[JavaInfo].transitive_runtime_jars.to_list()]
args = ctx.actions.args()
args.add("--main-class", ctx.attr.main_class)
args.add("--main-method", ctx.attr.main_method)
args.add("--with-args", "true" if ctx.attr.main_method_with_args else "false")
args.add("--module", ctx.attr.module_kind)
args.add("--dest", out.path)
args.add_all(inputs)
outputs = [out]
ctx.actions.run(
mnemonic = "ScalaJsLinker",
inputs = inputs,
outputs = outputs,
executable = ctx.attr._scalajs_linker.files_to_run.executable,
# input_manifests = input_manifests,
# execution_requirements = _resolve_execution_reqs(ctx, {"no-sandbox": "1", "supports-workers": "1"}),
# arguments = worker_args + [args],
arguments = [args],
use_default_shell_env = True,
)
return [DefaultInfo(files = depset(outputs))]
scalajs_link = rule(
attrs = {
"data": attr.label_list(
doc = "The additional runtime files needed by this library.",
allow_files = True,
),
"deps_used_whitelist": attr.label_list(
doc = "The JVM library dependencies to always consider used for `scala_deps_used` checks.",
providers = [JavaInfo],
),
"deps_unused_whitelist": attr.label_list(
doc = "The JVM library dependencies to always consider unused for `scala_deps_direct` checks.",
providers = [JavaInfo],
),
"deps": attr.label_list(
doc = "The JVM library dependencies.",
providers = [JavaInfo],
),
"scala": attr.label(
default = "//external:default_scala",
doc = "The `ScalaConfiguration`. Among other things, this specifies which scala version to use.\n Defaults to the default_scala target specified in the WORKSPACE file.",
providers = [
_ScalaConfiguration,
],
),
"scalacopts": attr.string_list(
doc = "The Scalac options.",
),
"main_class": attr.string(default = "auto"),
"main_method": attr.string(default = "main"),
"module_kind": attr.string(default = "no-module"),
"main_method_with_args": attr.bool(default = False),
"_scalajs_linker": attr.label(
default = "@rules_scala3//scala/workers/scalajs:scalajs_linker",
allow_files = True,
executable = True,
cfg = "host",
),
},
implementation = _scalajs_link_impl,
)
|
import frappe
@frappe.whitelist()
def get_customer_transportation_list(customer_email,role, name):
condition = ""
selected_load_tracking = []
selected = ""
if not role:
customer = frappe.db.sql(""" SELECT * FROM `tabCustomer` WHERE user_id=%s """, customer_email, as_dict=1)
if len(customer) > 0:
load_tracking = frappe.db.sql(""" SELECT * FROM `tabLoad Tracking` WHERE customer=%s and docstatus = 1""", customer[0].name, as_dict=1)
if len(load_tracking) > 0:
for i in load_tracking:
print(frappe.utils.get_url())
print('href="' + frappe.utils.get_url() + "/desk#Form/Load%20Tracking/" + i.name + '"')
i['view'] = '''onclick="view('{0}')"'''.format(i.name),
i['href'] = 'href="' + frappe.utils.get_url() + "/desk#Form/Load%20Tracking/" + i.name + '"'
if i.status in ["Collecting", "Collected", "In Transit", "Bay Bill"]:
i["status_color"] = 'style="margin-right: 8px;color: orange"'
elif i.status in ["Delivered"]:
i["status_color"] = 'style="margin-right: 8px;color: #98d85b"'
elif i.status in ["Cancelled"]:
i["status_color"] = 'style="margin-right: 8px;color: #ff5858"'
if name:
selected = name
selected_load_tracking = frappe.db.sql(""" SELECT * FROM `tabLoad Tracking` WHERE customer=%s and name=%s""",
(customer[0].name, name), as_dict=1)
selected_load_tracking[0]['load_tracking_locations'] = frappe.db.sql(
""" SELECT * FROM `tabLoad Tracking Locations` WHERE parent=%s ORDER BY idx ASC""", name, as_dict=1)
else:
selected = load_tracking[0].name
selected_load_tracking.append(load_tracking[0])
selected_load_tracking[0]['load_tracking_locations'] = frappe.db.sql(
""" SELECT * FROM `tabLoad Tracking Locations` WHERE parent=%s ORDER BY idx ASC""", selected, as_dict=1)
selected_load_tracking[0]['foreground'] = 'style="width: calc((100% - 140px) * 0.00); background: rgb(45, 194, 88);"' if selected_load_tracking[0].status == "Collecting" \
else 'style="width: calc((100% - 140px) * 0.25); background: rgb(45, 194, 88);"' if selected_load_tracking[0].status == "Collected" \
else 'style="width: calc((100% - 140px) * 0.50); background: rgb(45, 194, 88);"' if selected_load_tracking[0].status == "In Transit" \
else 'style="width: calc((100% - 140px) * 0.75); background: rgb(45, 194, 88);"' if selected_load_tracking[0].status == "Bay Bill" \
else 'style="width: calc((100% - 140px) * 1); background: rgb(45, 194, 88);"'
selected_load_tracking[0][
'collecting_image'] = '<img src="files/check3.png" height="55" width="55" />' if \
selected_load_tracking[0].status == "Collecting" \
else '<img src="files/check2.png" height="55" width="55" />' if selected_load_tracking[
0].status in ["Collected",
"In Transit",
"Bay Bill",
"Delivered"] \
else '<img src="files/check4.jpg" height="55" width="55" />'
selected_load_tracking[0][
'collected_image'] = '<img src="files/check3.png" height="55" width="55" />' if \
selected_load_tracking[0].status == "Collected" \
else '<img src="files/check2.png" height="55" width="55" />' if selected_load_tracking[
0].status in ["In Transit",
"Bay Bill",
"Delivered"] \
else '<img src="files/check4.jpg" height="55" width="55" />'
selected_load_tracking[0][
'in_transit_image'] = '<img src="files/check3.png" height="55" width="55" />' if \
selected_load_tracking[0].status == "In Transit" \
else '<img src="files/check2.png" height="55" width="55" />' if selected_load_tracking[
0].status in ["Bay Bill",
"Delivered"] \
else '<img src="files/check4.jpg" height="55" width="55" />'
selected_load_tracking[0]['bay_bill_image'] = '<img src="files/check3.png" height="55" width="55" />' if \
selected_load_tracking[0].status == "Bay Bill" \
else '<img src="files/check2.png" height="55" width="55" />' if selected_load_tracking[
0].status in ["Delivered"] \
else '<img src="files/check4.jpg" height="55" width="55" />'
selected_load_tracking[0][
'delivered_image'] = '<img src="files/check3.png" height="55" width="55" />' if \
selected_load_tracking[0].status == "Delivered" \
else '<img src="files/check4.jpg" height="55" width="55" />'
return {
"load_tracking_list": load_tracking,
"length": len(load_tracking),
"selected_load_tracking": selected_load_tracking,
"selected_load_tracking_length": len(selected_load_tracking),
"selected_load_tracking_locations_length": len(selected_load_tracking[0].load_tracking_locations),
"selected": selected
}
else:
return {}
else:
frappe.throw("User is not a Customer")
load_tracking = frappe.db.sql(""" SELECT * FROM `tabLoad Tracking` where docstatus = 1""",
as_dict=1)
for i in load_tracking:
print(frappe.utils.get_url())
print('href="' + frappe.utils.get_url() + "/desk#Form/Load%20Tracking/" + i.name + '"')
i['view'] = '''onclick="view('{0}')"'''.format(i.name),
i['href'] = 'href="' + frappe.utils.get_url() + "/desk#Form/Load%20Tracking/" + i.name + '"'
if i.status in ["Collecting", "Collected", "In Transit", "Bay Bill"]:
i["status_color"] = 'style="margin-right: 8px;color: orange"'
elif i.status in ["Delivered"]:
i["status_color"] = 'style="margin-right: 8px;color: #98d85b"'
elif i.status in ["Cancelled"]:
i["status_color"] = 'style="margin-right: 8px;color: #ff5858"'
if name:
selected = name
selected_load_tracking = frappe.db.sql(""" SELECT * FROM `tabLoad Tracking` WHERE name=%s and docstatus = 1""",name, as_dict=1)
selected_load_tracking[0]['load_tracking_locations'] = frappe.db.sql(""" SELECT * FROM `tabLoad Tracking Locations` WHERE parent=%s ORDER BY idx ASC""", name, as_dict=1)
else:
selected = load_tracking[0].name
selected_load_tracking.append(load_tracking[0])
selected_load_tracking[0]['load_tracking_locations'] = frappe.db.sql(""" SELECT * FROM `tabLoad Tracking Locations` WHERE parent=%s ORDER BY idx ASC""", selected, as_dict=1)
selected_load_tracking[0][
'foreground'] = 'style="width: calc((100% - 140px) * 0.00); background: rgb(45, 194, 88);"' if \
selected_load_tracking[0].status == "Collecting" \
else 'style="width: calc((100% - 140px) * 0.25); background: rgb(45, 194, 88);"' if selected_load_tracking[
0].status == "Collected" \
else 'style="width: calc((100% - 140px) * 0.50); background: rgb(45, 194, 88);"' if selected_load_tracking[
0].status == "In Transit" \
else 'style="width: calc((100% - 140px) * 0.75); background: rgb(45, 194, 88);"' if selected_load_tracking[
0].status == "Bay Bill" \
else 'style="width: calc((100% - 140px) * 1); background: rgb(45, 194, 88);"'
selected_load_tracking[0]['collecting_image'] = '<img src="files/check3.png" height="55" width="55" />' if selected_load_tracking[0].status == "Collecting" \
else '<img src="files/check2.png" height="55" width="55" />' if selected_load_tracking[0].status in ["Collected", "In Transit", "Bay Bill", "Delivered"] \
else '<img src="files/check4.jpg" height="55" width="55" />'
selected_load_tracking[0]['collected_image'] = '<img src="files/check3.png" height="55" width="55" />' if selected_load_tracking[0].status == "Collected" \
else '<img src="files/check2.png" height="55" width="55" />' if selected_load_tracking[0].status in ["In Transit", "Bay Bill", "Delivered"] \
else '<img src="files/check4.jpg" height="55" width="55" />'
selected_load_tracking[0]['in_transit_image'] = '<img src="files/check3.png" height="55" width="55" />' if selected_load_tracking[0].status == "In Transit" \
else '<img src="files/check2.png" height="55" width="55" />' if selected_load_tracking[0].status in ["Bay Bill", "Delivered"] \
else '<img src="files/check4.jpg" height="55" width="55" />'
selected_load_tracking[0]['bay_bill_image'] = '<img src="files/check3.png" height="55" width="55" />' if selected_load_tracking[0].status == "Bay Bill" \
else '<img src="files/check2.png" height="55" width="55" />' if selected_load_tracking[0].status in [ "Delivered"] \
else '<img src="files/check4.jpg" height="55" width="55" />'
selected_load_tracking[0]['delivered_image'] = '<img src="files/check3.png" height="55" width="55" />' if selected_load_tracking[0].status == "Delivered" \
else '<img src="files/check4.jpg" height="55" width="55" />'
return {
"load_tracking_list": load_tracking,
"length": len(load_tracking),
"selected_load_tracking": selected_load_tracking,
"selected_load_tracking_length": len(selected_load_tracking),
"selected_load_tracking_locations": selected_load_tracking[0].load_tracking_locations,
"selected_load_tracking_locations_length": len(selected_load_tracking[0].load_tracking_locations),
"selected": selected
} |
# =============================================================================
# ---------------------------------------------
# Battery Monitoring System:
# ---------------------------------------------
# =============================================================================
import random
#Declare global variables
CURRENT_BATTERYPACK = 'Lithium'
LANGUAGE = 'EN'
ALERTS = []
#Define multi language error messages
error_messages = {'low_breach' : { 'DE' : 'Untergrenze überschritten für ',
'EN' : 'Lower Limit Breached for ' } ,
'low_warning' : { 'DE' : 'Warnung vor Untergrenze für ' ,
'EN' : 'Warning: Lower Limit approaching for '},
'high_breach' : { 'DE' : 'Obergrenze überschritten für ' ,
'EN' : 'Higher Limit Breached for ' } ,
'high_warning': { 'DE' : 'Warnung vor höherer Grenze für ',
'EN' : 'Warning: Higher Limit approaching for ' } }
#MVC Model
#Declare operating threshold params for different battery packs
batteryThresholdParams = { 'Lithium' :
{
'Temperature' : { 'lower': 0 , 'upper': 45 },
'StateOfCharge': { 'lower': 20 , 'upper': 80 },
'ChargeRate' : { 'lower': 0 , 'upper': 0.8 },
},
'NiMh' :
{
'Temperature' : { 'lower': -20 , 'upper': 40 },
'StateOfCharge': { 'lower': 20 , 'upper': 80 },
'ChargeRate' : { 'lower': 0 , 'upper': 0.8 },
},
}
def getBatteryThresholdLimit(batteryType):
return batteryThresholdParams[batteryType]
#Check if Battery is working fine
def battery_is_ok(**kwargs):
batteryLimits = getBatteryThresholdLimit(CURRENT_BATTERYPACK)
global BATTERY_CONDITION_ALL_OK
BATTERY_CONDITION_ALL_OK = True
for criteria, criteriavalue in kwargs.items():
lower,upper = getBoundaryConditions(batteryLimits,criteria, criteriavalue)
lower_status = checkLowerLimitBreach(criteriavalue,lower,upper)
upper_status = checkUpperLimitBreach(criteriavalue,upper)
hasBreached = checkBreaches(lower_status, upper_status)
if hasBreached:
BATTERY_CONDITION_ALL_OK = False
global ALERTS
ALERTS = setErrorMessages(upper_status, lower_status, criteria, criteriavalue)
printErrorMessages(ALERTS)
return BATTERY_CONDITION_ALL_OK
def checkBreaches(lower_status, upper_status):
if not ( upper_status == 'normal' and lower_status == 'normal'):
return True
else:
return False
#Consolidate Error messages - MVC Controller
def setErrorMessages(upper_status, lower_status, criteria, criteriavalue):
if upper_status != 'normal':
status = upper_status
error = { 'criteria': criteria, 'criteriavalue' : criteriavalue, 'status' : status}
ALERTS.append(error)
elif lower_status != 'normal':
status = lower_status
error = { 'criteria': criteria, 'criteriavalue' : criteriavalue, 'status' : status}
ALERTS.append(error)
return ALERTS
#Error Reporting - MVC View
def printErrorMessages(ALERTS):
for error in ALERTS:
error_type = error['status']
print ( error_messages[error_type][LANGUAGE] + error['criteria'] )
if ALERTS.__len__ != 0:
ALERTS.clear()
def getBoundaryConditions(batteryLimits,criteria, criteriavalue):
upper = batteryLimits[criteria]['upper']
lower = batteryLimits[criteria]['lower']
return lower,upper
def checkLowerLimitBreach(criteriavalue,lower,upper):
if criteriavalue < lower:
return 'low_breach'
elif criteriavalue < lower + upper*0.05:
return 'low_warning'
else:
return 'normal'
def checkUpperLimitBreach(criteriavalue,upper):
if criteriavalue > upper:
return 'high_breach'
elif criteriavalue > upper*0.95:
return 'high_warning'
else:
return 'normal'
if __name__ == '__main__':
#Create boundary values for test - middle range, upper limit, lower limit
temperature_limits = batteryThresholdParams[CURRENT_BATTERYPACK]['Temperature']
soc_limits = batteryThresholdParams[CURRENT_BATTERYPACK]['StateOfCharge']
chargerate_limits = batteryThresholdParams[CURRENT_BATTERYPACK]['ChargeRate']
#middle range
temp_middle_range = ( temperature_limits['upper'] - temperature_limits['lower'] ) / 2
soc_middle_range = ( soc_limits['upper'] - soc_limits['lower'] ) / 2
chargerate_middle_range = ( chargerate_limits['upper'] - chargerate_limits['lower'] ) / 2
#------------------------------------------------------
#Generate dynamic testcases independent of battery type
#------------------------------------------------------
delta_breach_temperature = random.randint(1,20)
delta_breach_soc = random.randint(1,20)
"""Temperature Tests"""
#Testcase for normal temperature working range
assert(battery_is_ok(Temperature = temp_middle_range, StateOfCharge = soc_middle_range, ChargeRate = chargerate_middle_range) is True), 'Temperature Normal Range Test'
#Testcase to check Upper limit breach for temperature
assert(battery_is_ok(Temperature = temperature_limits['upper']+delta_breach_temperature, StateOfCharge = soc_middle_range, ChargeRate = chargerate_middle_range) is False), 'Temperature Upper Limit Breach'
#Testcase to check Lower limit breach for temperature
assert(battery_is_ok(Temperature = temperature_limits['lower']-delta_breach_temperature, StateOfCharge = soc_middle_range, ChargeRate = chargerate_middle_range) is False), 'Temperature Lower Limit Breach'
#Lower limit edge testcase for temperature
assert(battery_is_ok(Temperature = temperature_limits['lower']+2, StateOfCharge = soc_middle_range, ChargeRate = chargerate_middle_range) is False), 'Temperature Lower Limit Warning'
#Upper limit edge testcase for temperature
assert(battery_is_ok(Temperature = temperature_limits['upper']-2, StateOfCharge = soc_middle_range, ChargeRate = chargerate_middle_range) is False), 'Temperature Upper Limit Warning'
"""State Of Charge Tests"""
#Testcase for normal State of charge working range
assert(battery_is_ok(Temperature = temp_middle_range, StateOfCharge = soc_middle_range, ChargeRate = chargerate_middle_range) is True), 'SOC Normal Range Test'
#Testcase to check Upper limit breach for State of charge
assert(battery_is_ok(Temperature = temp_middle_range, StateOfCharge = soc_limits['upper']+delta_breach_soc, ChargeRate = chargerate_middle_range) is False), 'SOC Upper Limit Breach'
#Testcase to check Lower limit breach for State of charge
assert(battery_is_ok(Temperature = temp_middle_range, StateOfCharge = soc_limits['lower']-delta_breach_soc, ChargeRate = chargerate_middle_range) is False), 'SOC Lower Limit Breach'
#Upper limit edge testcase for State of charge
assert(battery_is_ok(Temperature = temp_middle_range, StateOfCharge = soc_limits['upper']-2, ChargeRate = chargerate_middle_range) is False), 'SOC Upper Limit Edge Warning'
#Lower limit edge testcase for State of charge
assert(battery_is_ok(Temperature = temp_middle_range, StateOfCharge = soc_limits['lower']+2, ChargeRate = chargerate_middle_range) is False), 'SOC Lower Limit Edge Warning'
"""Charge Rate Tests"""
#Testcase for normal Charge Rate working range
assert(battery_is_ok(Temperature = temp_middle_range, StateOfCharge = soc_middle_range, ChargeRate = chargerate_middle_range ) is True), 'Charge Rate Range Test'
#Testcase to check Upper limit breach for Charge Rate
assert(battery_is_ok(Temperature = temp_middle_range, StateOfCharge = soc_middle_range, ChargeRate = chargerate_limits['upper']+0.1) is False), 'Charge Upper Limit Breach'
#Testcase to check lower limit breach for Charge Rate
assert(battery_is_ok(Temperature = temp_middle_range, StateOfCharge = soc_middle_range, ChargeRate = chargerate_limits['lower']-0.1) is False), 'Charge Lower Limit Breach'
#Testcase to check Upper limit breach for Charge Rate
assert(battery_is_ok(Temperature = temp_middle_range, StateOfCharge = soc_middle_range, ChargeRate = chargerate_limits['upper']-0.02) is False), 'Charge Upper Limit Warning'
#Testcase to check lower limit breach for Charge Rate
assert(battery_is_ok(Temperature = temp_middle_range, StateOfCharge = soc_middle_range, ChargeRate = chargerate_limits['lower']+0.02) is False), 'Charge Lower Limit Warning'
|
from ..utils import appid, have_appserver, on_production_server
from ..boot import DATA_ROOT
from .creation import DatabaseCreation
from django.db.backends.util import format_number
from djangotoolbox.db.base import NonrelDatabaseFeatures, \
NonrelDatabaseOperations, NonrelDatabaseWrapper, NonrelDatabaseClient, \
NonrelDatabaseValidation, NonrelDatabaseIntrospection
from urllib2 import HTTPError, URLError
import logging
import os
import time
REMOTE_API_SCRIPT = '$PYTHON_LIB/google/appengine/ext/remote_api/handler.py'
def auth_func():
import getpass
return raw_input('Login via Google Account (see note above if login fails): '), getpass.getpass('Password: ')
def rpc_server_factory(*args, ** kwargs):
from google.appengine.tools import appengine_rpc
kwargs['save_cookies'] = True
return appengine_rpc.HttpRpcServer(*args, ** kwargs)
def get_datastore_paths(options):
"""Returns a tuple with the path to the datastore and history file.
The datastore is stored in the same location as dev_appserver uses by
default, but the name is altered to be unique to this project so multiple
Django projects can be developed on the same machine in parallel.
Returns:
(datastore_path, history_path)
"""
from google.appengine.tools import dev_appserver_main
datastore_path = options.get('datastore_path',
os.path.join(DATA_ROOT, 'datastore'))
blobstore_path = options.get('blobstore_path',
os.path.join(DATA_ROOT, 'blobstore'))
history_path = options.get('history_path',
os.path.join(DATA_ROOT, 'history'))
return datastore_path, blobstore_path, history_path
def get_test_datastore_paths(inmemory=True):
"""Returns a tuple with the path to the test datastore and history file.
If inmemory is true, (None, None) is returned to request an in-memory
datastore. If inmemory is false the path returned will be similar to the path
returned by get_datastore_paths but with a different name.
Returns:
(datastore_path, history_path)
"""
if inmemory:
return None, None, None
datastore_path, blobstore_path, history_path = get_datastore_paths()
datastore_path = datastore_path.replace('.datastore', '.testdatastore')
blobstore_path = blobstore_path.replace('.blobstore', '.testblobstore')
history_path = history_path.replace('.datastore', '.testdatastore')
return datastore_path, blobstore_path, history_path
def destroy_datastore(*args):
"""Destroys the appengine datastore at the specified paths."""
for path in args:
if not path:
continue
try:
os.remove(path)
except OSError, error:
if error.errno != 2:
logging.error("Failed to clear datastore: %s" % error)
class DatabaseFeatures(NonrelDatabaseFeatures):
allows_primary_key_0 = True
supports_dicts = True
class DatabaseOperations(NonrelDatabaseOperations):
compiler_module = __name__.rsplit('.', 1)[0] + '.compiler'
DEFAULT_MAX_DIGITS = 16
def value_to_db_decimal(self, value, max_digits, decimal_places):
if value is None:
return None
sign = value < 0 and u'-' or u''
if sign:
value = abs(value)
if max_digits is None:
max_digits = self.DEFAULT_MAX_DIGITS
if decimal_places is None:
value = unicode(value)
else:
value = format_number(value, max_digits, decimal_places)
decimal_places = decimal_places or 0
n = value.find('.')
if n < 0:
n = len(value)
if n < max_digits - decimal_places:
value = u"0" * (max_digits - decimal_places - n) + value
return sign + value
def sql_flush(self, style, tables, sequences):
self.connection.flush()
return []
class DatabaseClient(NonrelDatabaseClient):
pass
class DatabaseValidation(NonrelDatabaseValidation):
pass
class DatabaseIntrospection(NonrelDatabaseIntrospection):
pass
class DatabaseWrapper(NonrelDatabaseWrapper):
def __init__(self, *args, **kwds):
super(DatabaseWrapper, self).__init__(*args, **kwds)
self.features = DatabaseFeatures(self)
self.ops = DatabaseOperations(self)
self.client = DatabaseClient(self)
self.creation = DatabaseCreation(self)
self.validation = DatabaseValidation(self)
self.introspection = DatabaseIntrospection(self)
options = self.settings_dict
self.use_test_datastore = False
self.test_datastore_inmemory = True
self.remote = options.get('REMOTE', False)
if on_production_server:
self.remote = False
self.remote_app_id = options.get('REMOTE_APP_ID', appid)
self.remote_api_path = options.get('REMOTE_API_PATH', None)
self.secure_remote_api = options.get('SECURE_REMOTE_API', True)
self._setup_stubs()
def _get_paths(self):
if self.use_test_datastore:
return get_test_datastore_paths(self.test_datastore_inmemory)
else:
return get_datastore_paths(self.settings_dict)
def _setup_stubs(self):
# If this code is being run without an appserver (eg. via a django
# commandline flag) then setup a default stub environment.
if not have_appserver:
from google.appengine.tools import dev_appserver_main
args = dev_appserver_main.DEFAULT_ARGS.copy()
args['datastore_path'], args['blobstore_path'], args['history_path'] = self._get_paths()
from google.appengine.tools import dev_appserver
dev_appserver.SetupStubs(appid, **args)
# If we're supposed to set up the remote_api, do that now.
if self.remote:
self.setup_remote()
def setup_remote(self):
if not self.remote_api_path:
from ..utils import appconfig
for handler in appconfig.handlers:
if handler.script == REMOTE_API_SCRIPT:
self.remote_api_path = handler.url.split('(', 1)[0]
break
self.remote = True
remote_url = 'https://%s.appspot.com%s' % (self.remote_app_id,
self.remote_api_path)
logging.info('Setting up remote_api for "%s" at %s' %
(self.remote_app_id, remote_url))
if not have_appserver:
print('Connecting to remote_api handler.\n\n'
'IMPORTANT: Check your login method settings in the '
'App Engine Dashboard if you have problems logging in. '
'Login is only supported for Google Accounts.\n')
from google.appengine.ext.remote_api import remote_api_stub
remote_api_stub.ConfigureRemoteApi(self.remote_app_id,
self.remote_api_path, auth_func, secure=self.secure_remote_api,
rpc_server_factory=rpc_server_factory)
retry_delay = 1
while retry_delay <= 16:
try:
remote_api_stub.MaybeInvokeAuthentication()
except HTTPError, e:
if not have_appserver:
print 'Retrying in %d seconds...' % retry_delay
time.sleep(retry_delay)
retry_delay *= 2
else:
break
else:
try:
remote_api_stub.MaybeInvokeAuthentication()
except HTTPError, e:
raise URLError("%s\n"
"Couldn't reach remote_api handler at %s.\n"
"Make sure you've deployed your project and "
"installed a remote_api handler in app.yaml. "
"Note that login is only supported for "
"Google Accounts. Make sure you've configured "
"the correct authentication method in the "
"App Engine Dashboard."
% (e, remote_url))
logging.info('Now using the remote datastore for "%s" at %s' %
(self.remote_app_id, remote_url))
def flush(self):
"""Helper function to remove the current datastore and re-open the stubs"""
if self.remote:
import random, string
code = ''.join([random.choice(string.ascii_letters) for x in range(4)])
print '\n\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!'
print '!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!'
print "Warning! You're about to delete the *production* datastore!"
print 'Only models defined in your INSTALLED_APPS can be removed!'
print 'If you want to clear the whole datastore you have to use the ' \
'datastore viewer in the dashboard. Also, in order to delete all '\
'unneeded indexes you have to run appcfg.py vacuum_indexes.'
print 'In order to proceed you have to enter the following code:'
print code
response = raw_input('Repeat: ')
if code == response:
print 'Deleting...'
from django.db import models
from google.appengine.api import datastore as ds
for model in models.get_models():
print 'Deleting %s...' % model._meta.db_table
while True:
data = ds.Query(model._meta.db_table, keys_only=True).Get(200)
if not data:
break
ds.Delete(data)
print "Datastore flushed! Please check your dashboard's " \
'datastore viewer for any remaining entities and remove ' \
'all unneeded indexes with manage.py vacuum_indexes.'
else:
print 'Aborting'
exit()
else:
destroy_datastore(*self._get_paths())
self._setup_stubs()
|
# -*- coding: utf-8 -*-
# Smoothing and normal estimation based on polynomial reconstruction
# http://pointclouds.org/documentation/tutorials/resampling.php#moving-least-squares
import numpy as np
from scipy.spatial import KDTree as kdtree
import pcl
from utils.pointconv_util import knn_point
from utils.utils import points2pcd
import torch
from time import time
import os
from scipy.optimize import minimize, rosen
import random
eps =1e-10
def inverse_distance(vector, h=0.1):
dis = (vector*vector).sum(1)
return torch.exp(-dis/(h*h))
'''def powell_loss(x):
"""x[0:3] is n, x[3] is t"""
'''
def MLS(points):
points2pcd(points, "origin")
#x, y = np.mgrid[0:5, 2:8]
#tree = kdtree(list(zip(x.ravel(), y.ravel())))
#pts = np.array([[0, 0], [2.1, 2.9]])
#res = tree.query(pts)
start_time = time()
#tree = kdtree(points)
#neighbor_lists = tree.query_ball_tree(tree, r=0.1)
#print("kdtree ball query time is :", time()-start_time)
start_time = time()
points = torch.from_numpy(points).unsqueeze(0)
neighbor_lists = knn_point(25, points, points).squeeze()
points = points.squeeze()
n_list = []
error_rate_list = []
projected_points = []
rate = 0
local_coordinate = []
filtered_neighbor_list = []
svd_time = 0
solve_time = 0
for i in range(points.shape[0]):
# Fit the plane
r = points[i, :]
neighbors = points[neighbor_lists[i]]
relative_shift = neighbors - r
theta_i = inverse_distance(relative_shift)
A = torch.matmul(torch.matmul(relative_shift.T, torch.diag(theta_i)), relative_shift)
local_start = time()
res = torch.eig(A, eigenvectors=True)
svd_time += time()-local_start
init_n = res.eigenvectors[:, 2]
# Powell iteration (optional)
n_list.append(init_n)
# Fit polynomial function
nTr = init_n.matmul(r)
x_axis = torch.tensor([-nTr/(init_n[2]+eps), 0, nTr/(init_n[1]+eps)])
x_axis = x_axis/(x_axis.norm()+eps)
y_axis = init_n.cross(x_axis)
f_i = relative_shift.matmul(init_n)
local_vector = relative_shift - f_i.repeat(3,1).T*init_n
x_coordinate = local_vector.matmul(x_axis)
y_coordinate = local_vector.matmul(y_axis)
coordinate = torch.stack((x_coordinate, y_coordinate)).T
local_coordinate.append(coordinate)
# minimize()
base = torch.stack([torch.ones_like(x_coordinate), x_coordinate, y_coordinate, x_coordinate*y_coordinate,
x_coordinate**2, y_coordinate**2, x_coordinate**2*y_coordinate, y_coordinate**2*x_coordinate,
x_coordinate**3, y_coordinate**3])#, x_coordinate**4, x_coordinate**3*y_coordinate,
#x_coordinate**2*y_coordinate**2, x_coordinate*y_coordinate**3, y_coordinate**4])
B = base.matmul(torch.diag(theta_i)).matmul(base.T)
F = base.matmul(f_i*theta_i).unsqueeze(1)
try:
local_start = time()
parameter, LU = torch.solve(F, B)
solve_time += time()-local_start
except:
continue
predict_f_i = parameter.T.matmul(base).squeeze()
L_o = ((predict_f_i - f_i).abs()).mean()
f_std = f_i.std()
if L_o < f_std:
rate += 1
error_rate_list.append(L_o)
indices = torch.where((predict_f_i - f_i).abs() < f_std)[0]
filtered_neighbor_list.append(neighbor_lists[i, indices])
projected_point = (r + local_vector + predict_f_i.repeat(3, 1).T * init_n)
projected_point = projected_point[indices, :]
origin_projected_point = parameter[0]*init_n + r
projected_points.append(projected_point)
# projected_points.append(origin_projected_point.unsqueeze(0).numpy())
print("svd time is:", svd_time, ", solve time is:", solve_time)
rate = rate / points.shape[0]
projected_points = torch.cat(projected_points)
local_coordinate = torch.cat(local_coordinate)
# points2pcd(projected_points, "projected")
print("plane time is: ", time()-start_time)
return filtered_neighbor_list, local_coordinate
def main():
# // Load input file into a PointCloud<T> with an appropriate type
# pcl::PointCloud<pcl::PointXYZ>::Ptr cloud (new pcl::PointCloud<pcl::PointXYZ> ());
# // Load bun0.pcd -- should be available with the PCL archive in test
# pcl::io::loadPCDFile ("bun0.pcd", *cloud);
points = np.load("sample.npy").transpose(1, 0)
filtered_neighbor_list, coordinate = MLS(points)
cloud = pcl.load('pcd/origin.pcd')
print('cloud(size) = ' + str(cloud.size))
# // Create a KD-Tree
# pcl::search::KdTree<pcl::PointXYZ>::Ptr tree (new pcl::search::KdTree<pcl::PointXYZ>);
start = time()
tree = cloud.make_kdtree()
# tree = cloud.make_kdtree_flann()
# blankCloud = pcl.PointCloud()
# tree = blankCloud.make_kdtree()
# // Output has the PointNormal type in order to store the normals calculated by MLS
# pcl::PointCloud<pcl::PointNormal> mls_points;
# mls_points = pcl.PointCloudNormal()
# // Init object (second point type is for the normals, even if unused)
# pcl::MovingLeastSquares<pcl::PointXYZ, pcl::PointNormal> mls;
# mls.setComputeNormals (true);
#
# // Set parameters
# mls.setInputCloud (cloud);
# mls.setPolynomialFit (true);
# mls.setSearchMethod (tree);
# mls.setSearchRadius (0.03);
#
# // Reconstruct
# mls.process (mls_points);
mls = cloud.make_moving_least_squares()
# print('make_moving_least_squares')
mls.set_Compute_Normals(True)
mls.set_polynomial_fit(True)
mls.set_polynomial_order(3)
mls.set_Search_Method(tree)
mls.set_search_radius(0.1)
print('set parameters')
mls_points = mls.process()
projected = np.asarray(mls_points)
points2pcd(projected, "projected_mls")
print("standard time is: ", time() - start)
# Save output
# pcl::io::savePCDFile ("bun0-mls.pcd", mls_points);
pcl.save_PointNormal(mls_points, 'bun0-mls.pcd')
if __name__ == "__main__":
# import cProfile
# cProfile.run('main()', sort='time')
main()
|
<gh_stars>1-10
# partly inspired by: https://automaticaddison.com/how-to-draw-contours-around-objects-using-opencv/
# and: https://www.programcreek.com/python/example/89328/cv2.approxPolyDP
import numpy as np
import cv2
from object_detection_kmeans import process_contours, process_frame
from perspective_transformer import get_frame_dimensions
from utility import Frame, Features
cap = cv2.VideoCapture(1)
vid = cv2.VideoCapture('../assets/WIN_20210918_10_58_50_Pro.mp4')
img = cv2.imread('../assets/whitepixeltest.PNG')
def fill_frame(frame: Frame):
frame_width = len(frame[0])
frame_height = len(frame)
left_upper = [0, 0]
right_upper = [frame_width, 0]
left_lower = [0, frame_height]
right_lower = [frame_width, frame_height]
frame_square = np.array([[left_upper, right_upper, right_lower, left_lower]], dtype=np.int32)
cv2.fillPoly(frame, frame_square, (255, 255, 255))
def get_contours(frame: Frame):
grayscale_image = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
threshold = cv2.threshold(src=grayscale_image, thresh=100, maxval=255, type=cv2.THRESH_BINARY_INV)[1]
kernel = np.ones(shape=(5, 5), dtype=np.uint8)
threshold = cv2.morphologyEx(src=threshold, op=cv2.MORPH_CLOSE, kernel=kernel)
contours = cv2.findContours(image=threshold, mode=cv2.RETR_CCOMP, method=cv2.CHAIN_APPROX_SIMPLE)
contours = contours[0] if len(contours) == 2 else contours[1]
contours = sorted(contours, key=cv2.contourArea, reverse=True)
return contours
def get_center_points(contours):
return process_contours(contours)
def get_black_white_ratio(frame):
bw_frame = cv2.cvtColor(frame, cv2.COLOR_RGBA2GRAY)
frame_dimensions = get_frame_dimensions(bw_frame)
non_zeros = cv2.findNonZero(bw_frame)
if non_zeros is None:
return 0.0
return len(non_zeros) / ((frame_dimensions[0] * frame_dimensions[1]) + 0.0001)
# TODO: get center point detection working
def detect_features(frame: Frame) -> Features:
contours = get_contours(frame)
fill_frame(frame)
for contour in contours:
if cv2.contourArea(contour) > 4000:
perimeter = cv2.arcLength(contour, True)
e = 0.001 * perimeter
contour = cv2.approxPolyDP(contour, epsilon=e, closed=True)
cv2.drawContours(frame, [contour], contourIdx=-1, color=(0, 255, 0), thickness=6)
cv2.fillPoly(frame, [contour], (0, 0, 0))
black_white_ratio = get_black_white_ratio(frame)
frame = cv2.cvtColor(frame, cv2.COLOR_RGBA2GRAY)
center_point = get_center_point_bw(frame)
return Features([center_point], contours, black_white_ratio)
def draw_contour_coordinates_text(frame: Frame, contour: np.ndarray) -> Frame:
n = contour.ravel()
i = 0
for _ in n:
if i % 2 == 0:
x = n[i]
y = n[i + 1]
text_color = (0, 255, 0)
if i == 0:
# text on topmost co-ordinate.
cv2.putText(frame, "Arrow tip", (x, y), 0, 0.5, text_color)
else:
# text on remaining co-ordinates.
cv2.putText(frame, f'{x} {y}', (x, y), 0, 0.5, text_color)
i = i + 1
return frame
def render_image(frame: Frame, features: Features) -> Frame:
fill_frame(frame)
for contour in features.contours:
if cv2.contourArea(contour) > 4000:
perimeter = cv2.arcLength(contour, True)
e = 0.001 * perimeter
contour = cv2.approxPolyDP(contour, epsilon=e, closed=True)
cv2.drawContours(frame, [contour], contourIdx=-1, color=(0, 255, 0), thickness=6)
cv2.fillPoly(frame, [contour], (0, 0, 0))
cp = features.center_points[0]
cv2.circle(frame, (int(cp[0]), int(cp[1])), 5, (0, 0, 255), 5)
return frame
def get_center_point_bw(frame):
cp = process_frame(frame)[0]
dims = get_frame_dimensions(frame)
return (cp[0] / dims[0]) * 16384, (cp[1] / dims[1]) * 16384
# get contours -> draw bw contours -> bw detect & centre point detect
def main():
video = cv2.VideoCapture('../assets/screen_recording.mp4')
while True:
ret, frame = video.read()
features = detect_features(frame)
print(f'bw ratio: {features.black_white_ratio}, center: {features.center_points[0]}')
frame = render_image(frame, features)
cv2.imshow('frame', frame)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
# coding=utf-8
import requests
import os
import json
import threading
import datetime
import shutil
from use_email import sendmail
from zipmyfile import zip_dist
"""统计的时间区间-开始日期"""
git_root_url = "http://gitlab.example.com/"
"""访问Token"""
git_token = "<KEY>2"
"""统计结果的存储目录"""
export_path = "./dist"
"""统计的时间区间-开始日期"""
#t_from = "2021-06-01"
"""获取每个月的1号"""
t_from = str(datetime.date(datetime.date.today().year,
datetime.date.today().month-1, 1))
"""统计的时间区间-结束日期"""
#t_end = "2021-06-24"
"""获取每个月的最后一天"""
t_end = str(datetime.date(datetime.date.today().year,
datetime.date.today().month, 1)-datetime.timedelta(1))
"""统计的时间区间-开始日期,datetime对象"""
date_from = datetime.datetime.strptime(t_from, '%Y-%m-%d')
"""统计的时间区间-结束日期,datetime对象"""
date_end = datetime.datetime.strptime(t_end, '%Y-%m-%d')
"""一个线程锁"""
lock = threading.RLock()
user_unknown = {}
user_email_alias_mapping = {}
user_email_name_mapping = {}
class GitlabApiCountTrueLeTrue:
"""
Worker类
"""
"""
所有commit的集合,用于去重。
这里的重复,可能是代码merge造成的
"""
total_commit_map = {}
"""
最终的数据集合
"""
totalMap = {}
def get_projects(self):
"""
获取所有仓库,并生成报告
:return:
"""
threads = []
# 获取服务器上的所有仓库,每个仓库新建一个线程
for i in range(1, 3):
# 线上gitlab可用,问题是没有全部显示
url = '%s/api/v4/projects' \
'?private_token=%s&per_page=1000&page=%d&order_by=last_activity_at' % (
git_root_url, git_token, i)
r1 = requests.get(url) # 请求url,传入header,ssl认证为false
r2 = r1.json() # 显示json字符串
print(r2)
for r3 in r2:
value = r3['default_branch']
last_active_time = r3['last_activity_at']
if value is None:
continue
days = date_from - \
datetime.datetime.strptime(
last_active_time, '%Y-%m-%dT%H:%M:%S.%fZ')
# 如果project的最后更新时间比起始时间小,则continue
if days.days > 1:
continue
project_info = ProjectInfo()
project_info.project_id = r3['id']
project_info.name = r3['name']
project_info.project_desc = r3['description']
project_info.project_url = r3['web_url']
project_info.path = r3['path']
# 构件好线程
t = threading.Thread(
target=self.get_branches, args=(r3['id'], project_info))
threads.append(t)
# 所有线程逐一开始
for t in threads:
t.start()
# 等待所有线程结束
for t in threads:
t.join()
final_commit_map = {}
for key, project in self.totalMap.items():
for author_email, detail in project.commit_map.items():
exist_detail = final_commit_map.get(detail.author_email)
if exist_detail is None:
final_commit_map[detail.author_email] = detail
else:
exist_detail.total += detail.total
exist_detail.additions += detail.additions
exist_detail.deletions += detail.deletions
final_commit_map[detail.author_email] = exist_detail
write_to_csv("%s/GitStatic_%s/%s_%s.csv" % (export_path, t_from, 'total', t_from), final_commit_map,
"extra")
return
def get_branches(self, project_id, project_info):
"""
获取仓库的所有Branch,并汇总commit到一个map里
:param project_id:
:param project_info:
:return:
"""
print("进入线程:%d,项目id%d,%s" %
(threading.get_ident(), project_id, project_info.project_url))
# 线上gitlab可用,问题是没有全部显示
url = '%s/api/v4/projects/%s/repository/branches?private_token=%s' % (
git_root_url, project_id, git_token)
print("start get branch list %d,url=%s" % (project_id, url))
r1 = requests.get(url) # 请求url,传入header,ssl认证为false
r2 = r1.json() # 显示json字符串
if not r2:
return
# branch的map,key为branch名称,value为按照提交者email汇总的,key为email的子map集合
branch_map = {}
# 主动获取master分支的提交
detail_map = self.get_commits(
project_id, project_info.project_url, 'master')
print("get commits finish project_id=%d branch master" % project_id)
if detail_map:
branch_map['master'] = detail_map
for r3 in r2:
branch_name = r3['name']
if branch_name is None:
continue
# 如果仓库已经被Merge了,则不再处理
if r3['merged']:
continue
detail_map = self.get_commits(
project_id, project_info.project_url, branch_name)
if not detail_map:
continue
# 将结果放到map里
branch_map[branch_name] = detail_map
print("get commits finish project_id=%d branch %s" %
(project_id, branch_name))
print("all branch commits finish %d " % project_id)
final_commit_map = {}
# 遍历branch map,并按照提交者email进行汇总
for key, value_map in branch_map.items():
for author_email, detail in value_map.items():
exist_detail = final_commit_map.get(detail.author_email)
if exist_detail is None:
final_commit_map[detail.author_email] = detail
else:
exist_detail.total += detail.total
exist_detail.additions += detail.additions
exist_detail.deletions += detail.deletions
final_commit_map[detail.author_email] = exist_detail
if not final_commit_map:
return
project_info.commit_map = final_commit_map
# 加锁
lock.acquire()
# 此对象会被各个线程操作
self.totalMap[project_info.project_id] = project_info
# 释放锁
lock.release()
# 汇总完毕后,将结果写入到projectID+日期的csv文件里
write_to_csv(
"%s/GitStatic_%s/project/%s_%d.csv" % (
export_path, t_from, project_info.path, project_info.project_id),
final_commit_map, project_info.project_url)
def get_commits(self, project_id, project_url, branch_name):
"""
获取指定仓库,指定分支的所有commits,然后遍历每一个commit获得单个branch的统计信息
:param project_id:
:param project_url:
:param branch_name:
:return:
"""
since_date = date_from.strftime('%Y-%m-%dT%H:%M:%S.%fZ')
until_date = date_end.strftime('%Y-%m-%dT%H:%M:%S.%fZ')
url = '%s/api/v4/projects/%s/repository/commits?page=1&per_page=1000&ref_name=%s&since=%s&until=%s&private_token=%s' % (
git_root_url, project_id, branch_name, since_date, until_date, git_token)
r1 = requests.get(url) # 请求url,传入header,ssl认证为false
r2 = r1.json() # 显示json字符串
if not r2:
return
print('start get_commits,projectID=%d,branch=%s,url=%s' %
(project_id, branch_name, url))
detail_map = {}
for r3 in r2:
commit_id = r3['id']
if commit_id is None:
continue
# 在这里进行commit去重判断
if self.total_commit_map.get(commit_id) is None:
self.total_commit_map[commit_id] = commit_id
else:
continue
# 这里开始获取单次提交详情
detail = get_commit_detail(project_id, commit_id)
if detail is None:
continue
if detail.total > 5000:
# 单次提交大于5000行的代码,可能是脚手架之类生成的代码,不做处理
continue
# 这里和主流程无关,是用来处理commit记录里的提交者,账号不规范的问题
if detail.author_email in user_unknown:
print("email %s projectid= %d,branchname,%s,url=%s" % (
detail.author_email, project_id, branch_name, project_url))
# 根据email纬度,统计提交数据
exist_detail = detail_map.get(detail.author_email)
if exist_detail is None:
detail_map[detail.author_email] = detail
else:
exist_detail.total += detail.total
exist_detail.additions += detail.additions
exist_detail.deletions += detail.deletions
detail_map[detail.author_email] = exist_detail
return detail_map
def get_commit_detail(project_id, commit_id):
"""
获取单个commit的信息
:param project_id: 工程ID
:param commit_id: commit的id
:return: 返回#CommitDetails对象
"""
url = '%s/api/v4/projects/%s/repository/commits/%s?private_token=%s' \
% (git_root_url, project_id, commit_id, git_token)
r1 = requests.get(url) # 请求url,传入header,ssl认证为false
r2 = r1.json() # 显示json字符串
# print(json.dumps(r2, ensure_ascii=False))
author_name = r2['author_name']
author_email = r2['author_email']
stats = r2['stats']
if 'Merge branch' in r2['title']:
return
if stats is None:
return
temp_mail = user_email_alias_mapping.get(author_email)
if temp_mail is not None:
author_email = temp_mail
temp_name = user_email_name_mapping.get(author_email)
if temp_name is not None:
author_name = temp_name
additions = stats['additions']
deletions = stats['deletions']
total = stats['total']
# details = {'additions': additions, 'deletions': deletions, 'total': total, 'author_email': author_email,
# 'author_name': author_name}
details = CommitDetails()
details.additions = additions
details.deletions = deletions
details.total = total
details.author_email = author_email
details.author_name = author_name
return details
def make_dir_safe(file_path):
"""
工具方法:写文件时,如果关联的目录不存在,则进行创建
:param file_path:文件路径或者文件夹路径
:return:
"""
if file_path.endswith("/"):
if not os.path.exists(file_path):
os.makedirs(file_path)
else:
folder_path = file_path[0:file_path.rfind('/') + 1]
if not os.path.exists(folder_path):
os.makedirs(folder_path)
def write_to_csv(file_path, final_commit_map, extra):
"""
工具方法:将结果写入csv,从#final_commit_map参数解析业务数据
:param file_path:文件路径
:param final_commit_map:提交参数
:param extra:额外数据列
:return:
"""
make_dir_safe(file_path)
with open(file_path, 'w') as out:
title = '%s,%s,%s,%s,%s,%s' % (
"提交人邮箱", "提交人姓名", "总行数", "增加行数", "删除行数", extra)
out.write(title + "\n")
# print(title)
for key, value in final_commit_map.items():
var = '%s,%s,%s,%s,%s' % (
value.author_email, value.author_name, value.total, value.additions, value.deletions)
out.write(var + '\n')
# print(var)
out.close()
class CommitDetails(json.JSONEncoder):
"""
提交信息的结构体
"""
author_name = None
author_email = None
additions = 0
deletions = 0
total = 0
class ProjectInfo(json.JSONEncoder):
"""
工程信息的结构体
"""
project_id = None
project_desc = None
project_url = None
path = None
name = None
commit_map = None
if __name__ == '__main__':
gitlab4 = GitlabApiCountTrueLeTrue()
gitlab4.get_projects()
zip_dist('./dist/', "gitlab"+t_end + "-myarch.zip")
s = sendmail("<EMAIL>", "mytest", "2<EMAIL>")
res = s.send_email("gitlab"+t_end + "-myarch.zip")
if res == 'sucess':
shutil.rmtree('./dist/')
else:
print(res)
|
"""
Copyright 2022 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import annotations
from typing import List
from uuid import uuid4
from dp.protos.enodebd_dp_pb2 import CBSDStateResult, LteChannel
SOME_FCC_ID = "some_fcc_id"
USER_ID = "some_user_id"
UNREGISTERED = "unregistered"
class CbsdAPIDataBuilder:
def __init__(self):
self.frequency_mhz = 3625
self.bandwidth_mhz = 10
self.max_eirp = 28
self.grant_state = "authorized"
self.payload = {
'fcc_id': SOME_FCC_ID,
'serial_number': str(uuid4()),
'user_id': USER_ID,
'cbsd_category': 'b',
'single_step_enabled': False,
}
def with_serial_number(self, serial_number: str) -> CbsdAPIDataBuilder:
self.payload['serial_number'] = serial_number
return self
def with_fcc_id(self, fcc_id: str = SOME_FCC_ID) -> CbsdAPIDataBuilder:
self.payload['fcc_id'] = fcc_id
return self
def with_cbsd_category(self, cbsd_category: str = "b") -> CbsdAPIDataBuilder:
self.payload['cbsd_category'] = cbsd_category
return self
def with_latitude_deg(self, latitude_deg: float = 10.5) -> CbsdAPIDataBuilder:
installation_param = self.payload.setdefault("installation_param", {})
installation_param["latitude_deg"] = latitude_deg
return self
def with_longitude_deg(self, longitude_deg: float = 11.5) -> CbsdAPIDataBuilder:
installation_param = self.payload.setdefault("installation_param", {})
installation_param["longitude_deg"] = longitude_deg
return self
def with_antenna_gain(self, antenna_gain: int = 15) -> CbsdAPIDataBuilder:
installation_param = self.payload.setdefault("installation_param", {})
installation_param["antenna_gain"] = antenna_gain
return self
def with_indoor_deployment(self, indoor_deployment: bool = False) -> CbsdAPIDataBuilder:
installation_param = self.payload.setdefault("installation_param", {})
installation_param["indoor_deployment"] = indoor_deployment
return self
def with_full_installation_param(
self,
latitude_deg: float = 10.5,
longitude_deg: float = 11.5,
antenna_gain: int = 15,
indoor_deployment: bool = True,
height_m: float = 12.5,
height_type: str = "agl",
) -> CbsdAPIDataBuilder:
self.payload["installation_param"] = {
"latitude_deg": latitude_deg,
"longitude_deg": longitude_deg,
"antenna_gain": antenna_gain,
"indoor_deployment": indoor_deployment,
"height_m": height_m,
"height_type": height_type,
}
return self
def with_frequency_preferences(
self,
bandwidth_mhz: int = 20,
frequencies_mhz: List[int] = None,
) -> CbsdAPIDataBuilder:
self.payload["frequency_preferences"] = {
"bandwidth_mhz": bandwidth_mhz,
"frequencies_mhz": frequencies_mhz or [],
}
return self
def with_capabilities(self, max_power=20, min_power=0, number_of_antennas=2):
self.payload['capabilities'] = {
'max_power': max_power,
'min_power': min_power,
'number_of_antennas': number_of_antennas,
}
return self
def with_desired_state(self, desired_state: str = "registered") -> CbsdAPIDataBuilder:
self.payload["desired_state"] = desired_state
return self
def with_expected_grant(
self, bandwidth_mhz: int = 10, frequency_mhz: int = 3625, max_eirp: int = 28,
grant_state="authorized",
) -> CbsdAPIDataBuilder:
self.bandwidth_mhz = bandwidth_mhz
self.frequency_mhz = frequency_mhz
self.max_eirp = max_eirp
self.grant_state = grant_state
return self
def with_grant(
self, bandwidth_mhz: int = None, frequency_mhz: int = None, max_eirp: int = None, grant_state=None,
) -> CbsdAPIDataBuilder:
self.payload['grant'] = {
'bandwidth_mhz': bandwidth_mhz or self.bandwidth_mhz,
'frequency_mhz': frequency_mhz or self.frequency_mhz,
'max_eirp': max_eirp or self.max_eirp,
'state': grant_state or self.grant_state,
}
return self
def with_max_eirp(self, max_eirp: int = 28) -> CbsdAPIDataBuilder:
self.payload['max_eirp'] = max_eirp
return self
def with_state(self, state: str = UNREGISTERED) -> CbsdAPIDataBuilder:
self.payload['state'] = state
return self
def with_cbsd_id(self, cbsd_id: str) -> CbsdAPIDataBuilder:
self.payload['cbsd_id'] = cbsd_id
return self
def with_is_active(self, is_active: bool) -> CbsdAPIDataBuilder:
self.payload['is_active'] = is_active
return self
def with_single_step_enabled(self, enabled: bool) -> CbsdAPIDataBuilder:
self.payload['single_step_enabled'] = enabled
return self
def build_grant_state_data(self, frequenzy_mhz=None, bandwidth_mhz=None, max_eirp=None) -> CBSDStateResult:
frequenzy_mhz = frequenzy_mhz or self.frequency_mhz
bandwidth_mhz = bandwidth_mhz or self.bandwidth_mhz
max_eirp = max_eirp or self.max_eirp
frequency_hz = int(1e6) * frequenzy_mhz
half_bandwidth_hz = int(5e5) * bandwidth_mhz
return CBSDStateResult(
radio_enabled=True,
channel=LteChannel(
low_frequency_hz=frequency_hz - half_bandwidth_hz,
high_frequency_hz=frequency_hz + half_bandwidth_hz,
max_eirp_dbm_mhz=max_eirp,
),
)
|
import argparse
import os
import random
import time
import warnings
import utils
import sys
import numpy as np
import pickle
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.models as models
from sklearn.metrics import normalized_mutual_info_score as nmi
from sklearn.metrics import adjusted_mutual_info_score as adjusted_nmi
from sklearn.metrics import adjusted_rand_score as adjusted_rand_index
from scipy.optimize import linear_sum_assignment
from sklearn.cluster import KMeans
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
parser = argparse.ArgumentParser(description='Kmeans Evaluation')
parser.add_argument('data', metavar='DIR',
help='path to dataset')
parser.add_argument('-a', '--arch', metavar='ARCH', default='resnet50',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: resnet50)')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('-b', '--train-batch-size', default=256, type=int,
help='train set batch size')
parser.add_argument('-p', '--print-freq', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--seed', default=None, type=int,
help='seed for initializing training. ')
parser.add_argument('--save-path', default='../saved/', type=str,
help='save path for checkpoints')
parser.add_argument('--pretrained', default=None, type=str,
help='path to pretrained checkpoint')
parser.add_argument('--label-subset', default="10", type=str, choices=["1", "10", "100"],
help='percentage of labeled data: 1%, 10% or 100% (default: 1)')
parser.add_argument('--num-classes', default=1000, type=int,
help='number of classes (1000 for ImageNet, 10 (default: 0.1)')
parser.add_argument('--load-features', action='store_true',
help='use features from earlier dump (in args.save_path)')
parser.add_argument('--kk', default=1000, type=int,
help='number of clusters to use for kmeans (default: 1000)')
parser.add_argument('--model', default='mocov2', const='mocov2', nargs='?',
choices=['mocov2', 'swav', 'simsiam', 'barlowtwins', 'obow', 'dino'],
help='type of pretrained model (default: %(default)s)')
parser.add_argument('--backbone-dim', default=2048, type=int,
help='backbone dimension size (default: %(default)s)')
def main():
args = parser.parse_args()
# create output directory
os.makedirs(args.save_path, exist_ok=True)
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
# save log file
sys.stdout = utils.PrintMultiple(sys.stdout, open(os.path.join(args.save_path, 'log.txt'), 'a+'))
print(args)
# create model
print("=> creating model '{}'".format(args.arch))
if args.model == 'barlowtwins':
model = torch.hub.load('facebookresearch/barlowtwins:main', 'resnet50')
elif args.model == 'dino':
model = torch.hub.load('facebookresearch/dino:main', 'dino_resnet50')
else:
model = models.__dict__[args.arch]()
model.fc = nn.Identity()
# load from pre-trained, before DistributedDataParallel constructor
if args.pretrained is not None:
if os.path.isfile(args.pretrained):
print("=> loading checkpoint '{}'".format(args.pretrained))
checkpoint = torch.load(args.pretrained, map_location="cpu")
# load state dictionary
if args.model == 'swav':
state_dict = checkpoint
elif args.model == 'obow':
state_dict = checkpoint['network']
else:
state_dict = checkpoint['state_dict']
# fix prefix
for k in list(state_dict.keys()):
if args.model == 'mocov2':
if k.startswith('module.encoder_q') and not k.startswith('module.encoder_q.fc'):
new_k = k[len('module.encoder_q.'):]
state_dict[new_k] = state_dict[k]
del state_dict[k]
elif args.model == 'simsiam':
if k.startswith('module.encoder') and not k.startswith('module.encoder.fc'):
new_k = k[len('module.encoder.'):]
state_dict[new_k] = state_dict[k]
del state_dict[k]
elif args.model == 'swav':
if k.startswith('module') and not k.startswith('module.projection_head'):
new_k = k[len('module.'):]
state_dict[new_k] = state_dict[k]
del state_dict[k]
elif args.model == 'obow':
if k.startswith('fc'):
del state_dict[k]
args.start_epoch = 0
msg = model.load_state_dict(state_dict, strict=False)
assert len(msg.missing_keys) == 0, "missing_keys: {}".format(msg.missing_keys)
print("=> loaded pre-trained model '{}'".format(args.pretrained))
else:
print("=> no checkpoint found at '{}'".format(args.pretrained))
return
if not torch.cuda.is_available():
print('using CPU, this will be slow')
else:
print('=> using {} GPUs.'.format(torch.cuda.device_count()))
# DataParallel will divide and allocate batch_size to all available GPUs
if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
model.features = torch.nn.DataParallel(model.features)
model.cuda()
else:
model = torch.nn.DataParallel(model).cuda()
cudnn.benchmark = True
# Data loading code
traindir = os.path.join(args.data, 'train')
valdir = os.path.join(args.data, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])
train_dataset = utils.ImageFolderWithIndices(traindir, transform)
if args.label_subset != "100":
train_dataset = utils.imagenet_subset_samples(train_dataset, traindir, args.label_subset) # extract subset
val_dataset = utils.ImageFolderWithIndices(valdir, transform)
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.train_batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
val_loader = torch.utils.data.DataLoader(
val_dataset, batch_size=args.train_batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
if args.load_features:
train_features = np.load(os.path.join(args.save_path, "trainfeat.npy"))
val_features = np.load(os.path.join(args.save_path, "valfeat.npy"))
val_labels = np.load(os.path.join(args.save_path, "vallabels.npy"))
else:
train_features, _ = inference(train_loader, model, args, prefix='Train Set Inference: ')
val_features, val_labels = inference(val_loader, model, args, prefix='Test Set Inference: ')
# dump
np.save(os.path.join(args.save_path, "trainfeat"), train_features)
np.save(os.path.join(args.save_path, "valfeat"), val_features)
np.save(os.path.join(args.save_path, "vallabels"), val_labels)
# evaluate kmeans classifier
print("Features are ready!\nEvaluate K-Means Classifier.")
kmeans_classifier(train_features, val_features, val_labels, args)
@torch.no_grad()
def inference(loader, model, args, prefix):
all_features = np.zeros((len(loader.dataset), args.backbone_dim), dtype=np.float)
all_labels = np.zeros((len(loader.dataset), ), dtype=np.int)
batch_time = AverageMeter('Time', ':6.3f')
progress = ProgressMeter(
len(loader),
[batch_time],
prefix=prefix)
# switch to evaluate mode
model.eval()
end = time.time()
for i, (images, targets, indices) in enumerate(loader):
if torch.cuda.is_available():
images = images.cuda()
# compute output
output = model(images)
# compute prediction
all_features[indices] = output.detach().cpu().numpy()
# save labels
all_labels[indices] = targets.numpy()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.display(i)
return all_features, all_labels
@torch.no_grad()
def kmeans_classifier(train_features, val_features, targets, args):
# fit based on train set
print('=> fitting K-Means classifier..')
kmeans = KMeans(n_clusters=args.kk, verbose=True, n_jobs=-1).fit(train_features)
# save kmeans model
print('=> saving K-Means classifier..')
kmeans_save_path = os.path.join(args.save_path, args.model + '_kmeans.pkl')
pickle.dump(kmeans, open(kmeans_save_path, "wb"))
# predict
preds = kmeans.predict(val_features)
# evaluate
val_nmi = nmi(targets, preds)
val_adjusted_nmi = adjusted_nmi(targets, preds)
val_adjusted_rand_index = adjusted_rand_index(targets, preds)
print('=> number of samples: {}'.format(len(targets)))
print('=> number of unique assignments: {}'.format(len(set(preds))))
print('=> NMI: {:.3f}%'.format(val_nmi * 100.0))
print('=> Adjusted NMI: {:.3f}%'.format(val_adjusted_nmi * 100.0))
print('=> Adjusted Rand-Index: {:.3f}%'.format(val_adjusted_rand_index * 100.0))
# compute accuracy
num_classes = max(targets.max(), preds.max()) + 1
count_matrix = np.zeros((num_classes, num_classes), dtype=np.int32)
for ii in range(preds.shape[0]):
count_matrix[preds[ii], targets[ii]] += 1
reassignment = np.dstack(linear_sum_assignment(count_matrix.max() - count_matrix))[0]
if preds.max() > targets.max(): # if using over-clustering, append remaining clusters to best option
for cls_idx in range(targets.max(), preds.max()):
reassignment[cls_idx, 1] = count_matrix[cls_idx].argmax()
acc = count_matrix[reassignment[:, 0], reassignment[:, 1]].sum().astype(np.float32) / preds.shape[0]
print('=> Accuracy: {:.3f}%'.format(acc * 100.0))
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
class ProgressMeter(object):
def __init__(self, num_batches, meters, prefix=""):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def display(self, batch):
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
print('\t'.join(entries))
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str(num_batches // 1))
fmt = '{:' + str(num_digits) + 'd}'
return '[' + fmt + '/' + fmt.format(num_batches) + ']'
if __name__ == '__main__':
main()
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import unittest
from unittest import mock
from google.api_core.gapic_v1.method import DEFAULT
from google.cloud.monitoring_v3 import AlertPolicy, NotificationChannel
from airflow.providers.google.cloud.operators.stackdriver import (
StackdriverDeleteAlertOperator,
StackdriverDeleteNotificationChannelOperator,
StackdriverDisableAlertPoliciesOperator,
StackdriverDisableNotificationChannelsOperator,
StackdriverEnableAlertPoliciesOperator,
StackdriverEnableNotificationChannelsOperator,
StackdriverListAlertPoliciesOperator,
StackdriverListNotificationChannelsOperator,
StackdriverUpsertAlertOperator,
StackdriverUpsertNotificationChannelOperator,
)
TEST_TASK_ID = 'test-stackdriver-operator'
TEST_FILTER = 'filter'
TEST_ALERT_POLICY_1 = {
"combiner": "OR",
"name": "projects/sd-project/alertPolicies/12345",
"enabled": True,
"display_name": "test display",
"conditions": [
{
"condition_threshold": {
"comparison": "COMPARISON_GT",
"aggregations": [{"alignment_eriod": {'seconds': 60}, "per_series_aligner": "ALIGN_RATE"}],
},
"display_name": "Condition display",
"name": "projects/sd-project/alertPolicies/123/conditions/456",
}
],
}
TEST_ALERT_POLICY_2 = {
"combiner": "OR",
"name": "projects/sd-project/alertPolicies/6789",
"enabled": False,
"display_name": "test display",
"conditions": [
{
"condition_threshold": {
"comparison": "COMPARISON_GT",
"aggregations": [{"alignment_period": {'seconds': 60}, "per_series_aligner": "ALIGN_RATE"}],
},
"display_name": "Condition display",
"name": "projects/sd-project/alertPolicies/456/conditions/789",
}
],
}
TEST_NOTIFICATION_CHANNEL_1 = {
"displayName": "sd",
"enabled": True,
"labels": {"auth_token": "<PASSWORD>", "channel_name": "#channel"},
"name": "projects/sd-project/notificationChannels/12345",
"type": "slack",
}
TEST_NOTIFICATION_CHANNEL_2 = {
"displayName": "sd",
"enabled": False,
"labels": {"auth_token": "<PASSWORD>", "channel_name": "#channel"},
"name": "projects/sd-project/notificationChannels/6789",
"type": "slack",
}
class TestStackdriverListAlertPoliciesOperator(unittest.TestCase):
@mock.patch('airflow.providers.google.cloud.operators.stackdriver.StackdriverHook')
def test_execute(self, mock_hook):
operator = StackdriverListAlertPoliciesOperator(task_id=TEST_TASK_ID, filter_=TEST_FILTER)
mock_hook.return_value.list_alert_policies.return_value = [AlertPolicy(name="test-name")]
result = operator.execute(None)
mock_hook.return_value.list_alert_policies.assert_called_once_with(
project_id=None,
filter_=TEST_FILTER,
format_=None,
order_by=None,
page_size=None,
retry=DEFAULT,
timeout=DEFAULT,
metadata=None,
)
assert [
{
'combiner': 0,
'conditions': [],
'display_name': '',
'name': 'test-name',
'notification_channels': [],
'user_labels': {},
}
] == result
class TestStackdriverEnableAlertPoliciesOperator(unittest.TestCase):
@mock.patch('airflow.providers.google.cloud.operators.stackdriver.StackdriverHook')
def test_execute(self, mock_hook):
operator = StackdriverEnableAlertPoliciesOperator(task_id=TEST_TASK_ID, filter_=TEST_FILTER)
operator.execute(None)
mock_hook.return_value.enable_alert_policies.assert_called_once_with(
project_id=None, filter_=TEST_FILTER, retry=DEFAULT, timeout=DEFAULT, metadata=None
)
class TestStackdriverDisableAlertPoliciesOperator(unittest.TestCase):
@mock.patch('airflow.providers.google.cloud.operators.stackdriver.StackdriverHook')
def test_execute(self, mock_hook):
operator = StackdriverDisableAlertPoliciesOperator(task_id=TEST_TASK_ID, filter_=TEST_FILTER)
operator.execute(None)
mock_hook.return_value.disable_alert_policies.assert_called_once_with(
project_id=None, filter_=TEST_FILTER, retry=DEFAULT, timeout=DEFAULT, metadata=None
)
class TestStackdriverUpsertAlertsOperator(unittest.TestCase):
@mock.patch('airflow.providers.google.cloud.operators.stackdriver.StackdriverHook')
def test_execute(self, mock_hook):
operator = StackdriverUpsertAlertOperator(
task_id=TEST_TASK_ID, alerts=json.dumps({"policies": [TEST_ALERT_POLICY_1, TEST_ALERT_POLICY_2]})
)
operator.execute(None)
mock_hook.return_value.upsert_alert.assert_called_once_with(
alerts=json.dumps({"policies": [TEST_ALERT_POLICY_1, TEST_ALERT_POLICY_2]}),
project_id=None,
retry=DEFAULT,
timeout=DEFAULT,
metadata=None,
)
class TestStackdriverDeleteAlertOperator(unittest.TestCase):
@mock.patch('airflow.providers.google.cloud.operators.stackdriver.StackdriverHook')
def test_execute(self, mock_hook):
operator = StackdriverDeleteAlertOperator(
task_id=TEST_TASK_ID,
name='test-alert',
)
operator.execute(None)
mock_hook.return_value.delete_alert_policy.assert_called_once_with(
name='test-alert', retry=DEFAULT, timeout=DEFAULT, metadata=None
)
class TestStackdriverListNotificationChannelsOperator(unittest.TestCase):
@mock.patch('airflow.providers.google.cloud.operators.stackdriver.StackdriverHook')
def test_execute(self, mock_hook):
operator = StackdriverListNotificationChannelsOperator(task_id=TEST_TASK_ID, filter_=TEST_FILTER)
mock_hook.return_value.list_notification_channels.return_value = [
NotificationChannel(name="test-123")
]
result = operator.execute(None)
mock_hook.return_value.list_notification_channels.assert_called_once_with(
project_id=None,
filter_=TEST_FILTER,
format_=None,
order_by=None,
page_size=None,
retry=DEFAULT,
timeout=DEFAULT,
metadata=None,
)
# Depending on the version of google-apitools installed we might receive the response either with or
# without mutation_records.
assert result in [
[
{
'description': '',
'display_name': '',
'labels': {},
'name': 'test-123',
'type_': '',
'user_labels': {},
'verification_status': 0,
}
],
[
{
'description': '',
'display_name': '',
'labels': {},
'mutation_records': [],
'name': 'test-123',
'type_': '',
'user_labels': {},
'verification_status': 0,
}
],
]
class TestStackdriverEnableNotificationChannelsOperator(unittest.TestCase):
@mock.patch('airflow.providers.google.cloud.operators.stackdriver.StackdriverHook')
def test_execute(self, mock_hook):
operator = StackdriverEnableNotificationChannelsOperator(task_id=TEST_TASK_ID, filter_=TEST_FILTER)
operator.execute(None)
mock_hook.return_value.enable_notification_channels.assert_called_once_with(
project_id=None, filter_=TEST_FILTER, retry=DEFAULT, timeout=DEFAULT, metadata=None
)
class TestStackdriverDisableNotificationChannelsOperator(unittest.TestCase):
@mock.patch('airflow.providers.google.cloud.operators.stackdriver.StackdriverHook')
def test_execute(self, mock_hook):
operator = StackdriverDisableNotificationChannelsOperator(task_id=TEST_TASK_ID, filter_=TEST_FILTER)
operator.execute(None)
mock_hook.return_value.disable_notification_channels.assert_called_once_with(
project_id=None, filter_=TEST_FILTER, retry=DEFAULT, timeout=DEFAULT, metadata=None
)
class TestStackdriverUpsertChannelOperator(unittest.TestCase):
@mock.patch('airflow.providers.google.cloud.operators.stackdriver.StackdriverHook')
def test_execute(self, mock_hook):
operator = StackdriverUpsertNotificationChannelOperator(
task_id=TEST_TASK_ID,
channels=json.dumps({"channels": [TEST_NOTIFICATION_CHANNEL_1, TEST_NOTIFICATION_CHANNEL_2]}),
)
operator.execute(None)
mock_hook.return_value.upsert_channel.assert_called_once_with(
channels=json.dumps({"channels": [TEST_NOTIFICATION_CHANNEL_1, TEST_NOTIFICATION_CHANNEL_2]}),
project_id=None,
retry=DEFAULT,
timeout=DEFAULT,
metadata=None,
)
class TestStackdriverDeleteNotificationChannelOperator(unittest.TestCase):
@mock.patch('airflow.providers.google.cloud.operators.stackdriver.StackdriverHook')
def test_execute(self, mock_hook):
operator = StackdriverDeleteNotificationChannelOperator(
task_id=TEST_TASK_ID,
name='test-channel',
)
operator.execute(None)
mock_hook.return_value.delete_notification_channel.assert_called_once_with(
name='test-channel', retry=DEFAULT, timeout=DEFAULT, metadata=None
)
|
<filename>generate.py
import os
import json
import glob
import torch
import numpy as np
import faiss
import PIL
from CLIP import clip
from argparse import ArgumentParser
from pathlib import Path
from tqdm import tqdm
from PIL import Image
from torch.utils.data import DataLoader
from torchvision import transforms as T
import model
import retrofit
class dotdict(dict):
""" dot.notation access to dictionary attributes """
__getattr__ = dict.get
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
def dl_collate_fn(batch):
return torch.stack([row[0] for row in batch]), [row[1] for row in batch]
class ImageDataset(torch.utils.data.Dataset):
def __init__(self,
folder: str,
image_size=224):
super().__init__()
path = Path(folder)
image_files = sorted([
*path.glob('**/*.png'), *path.glob('**/*.jpg'),
*path.glob('**/*.jpeg'), *path.glob('**/*.bmp')
])
self.image_files = {image_file.stem: image_file for image_file in image_files}
self.keys = list(self.image_files.keys())
self.image_transform = T.Compose([
T.Resize(image_size, interpolation=T.InterpolationMode.BICUBIC),
T.CenterCrop(image_size),
T.Lambda(self.fix_img),
T.ToTensor(),
T.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711))
])
def __len__(self):
return len(self.keys)
def fix_img(self, img):
return img.convert('RGB') if img.mode != 'RGB' else img
def sequential_sample(self, ind):
if ind >= self.__len__() - 1:
return self.__getitem__(0)
return self.__getitem__(ind + 1)
def skip_sample(self, ind):
if self.shuffle:
return self.random_sample()
return self.sequential_sample(ind=ind)
def __getitem__(self, ind):
key = self.keys[ind]
image_file = self.image_files[key]
cap_file = image_file.with_suffix('.cap')
try:
image_tensor = self.image_transform(PIL.Image.open(image_file))
except (PIL.UnidentifiedImageError, OSError) as corrupt_image_exceptions:
print(f"An exception occurred trying to load file {image_file}.")
print(f"Skipping index {ind}")
return self.skip_sample(ind)
return image_tensor, cap_file
def clip_rescoring(args, net, candidates, x):
textemb = net.perceiver.encode_text(
clip.tokenize(candidates).to(args.device)).float()
textemb /= textemb.norm(dim=-1, keepdim=True)
similarity = x @ textemb.T
_, indices = similarity[0].topk(args.num_return_sequences)
return [candidates[idx] for idx in indices[0]]
def caption_image(table, x, args, net):
captions = []
table = net.tokenizer.encode(table[0], return_tensors='pt').to(args.device)
table = table.squeeze()[:-1].unsqueeze(0)
out = net.model.generate(table,
max_length=args.maxlen,
do_sample=args.do_sample,
num_beams=args.num_beams,
temperature=args.temperature,
top_p=args.top_p,
num_return_sequences=args.num_return_sequences)
candidates = []
for seq in out:
decoded = net.tokenizer.decode(seq, skip_special_tokens=True)
decoded = decoded.split('|||')[1:][0].strip()
candidates.append(decoded)
captions = clip_rescoring(args, net, candidates, x[None,:])
return captions[:args.num_captions]
def captioner(args, net):
dataset = ImageDataset(folder=args.image_dir)
data = DataLoader(dataset,
batch_size=args.batch_size,
shuffle=False,
num_workers=args.nworkers,
pin_memory=True,
collate_fn=dl_collate_fn,
prefetch_factor=2)
print('Captioning images...')
for imgs, paths in tqdm(data):
ctx = [args.context] * len(imgs)
table, x = net.build_table(imgs.half().to(args.device),
net.perceiver,
ctx=ctx,
indices=net.indices,
indices_data=net.indices_data,
knn=args.knn,
tokenize=clip.tokenize,
device=args.device,
is_image=True,
return_images=True)
for idx in range(len(table)):
captions = caption_image([table[idx]], x, args, net)
result = ''.join(f'{captions[i]}\n' for i in range(len(captions)))
paths[idx].write_text(result)
def main():
# Args
parser = ArgumentParser()
parser.add_argument('--config', type=str)
parser.add_argument('--image_dir', type=str)
parser.add_argument('--index_dirs', type=str, default=None)
parser.add_argument('--context', type=str, default=None)
parser.add_argument('--clip_model', type=str, default='ViT-B/16')
parser.add_argument('--device', type=str, default='cuda:0')
parser.add_argument('--batch_size', type=int, default=16)
parser.add_argument('--knn', type=int, default=5)
parser.add_argument('--maxlen', type=int, default=64)
parser.add_argument('--nworkers', type=int, default=4)
parser.add_argument('--num_return_sequences', type=int, default=250)
parser.add_argument('--num_beams', type=int, default=1)
parser.add_argument('--temperature', type=float, default=0.8)
parser.add_argument('--top_p', type=float, default=0.9)
parser.add_argument('--do_sample', type=bool, default=True)
parser.add_argument('--num_captions', type=int, default=5)
args = parser.parse_args()
print('Loading indices...')
indices = []
indices_data = []
index_dirs = args.index_dirs.split(',')
index_dirs = list(filter(lambda t: len(t) > 0, index_dirs))
for index_dir in index_dirs:
fname = os.path.join(index_dir, 'args.txt')
with open(fname, 'r') as f:
index_args = dotdict(json.load(f))
entries = []
fname = os.path.join(index_dir, 'entries.txt')
with open(fname, 'r') as f:
entries.extend([line.strip() for line in f])
indices_data.append(entries)
indices.append(faiss.read_index(glob.glob(f"{index_dir}/*.index")[0]))
print('Loading model...')
config = dotdict(torch.load(args.config))
net = retrofit.load_params(config).to(args.device).half()
net.indices = indices
net.indices_data = indices_data
# Generate captions
captioner(args, net)
if __name__ == '__main__':
main() |
"""
@Time: 2021/1/20 16:59
@Author:
@File: RdfUtils.py
"""
from typing import List, Tuple, Dict, Union, Iterable
from rdflib import Graph, RDF
from rdflib.term import URIRef, Literal, BNode, Identifier
from pyfuseki import config
from pyfuseki.term import RDFList, Subject, Predicate, Object
from pyfuseki.ontology_mapper import BaseRdfPrefixEnum, GlobalNamespaceManager
def bind_prefixes_to_graph(graph: Graph, prefixes: Iterable[BaseRdfPrefixEnum]) -> None:
"""
将一个列表中的每个RDFPrefix枚举绑定到graph中
:param graph: rdflib库中Graph类的一个实例对象
:param prefixes: 由RDFPrefix枚举组成的列表
:return: None
"""
if Graph is None or prefixes is None:
return
for prefix in prefixes:
graph.namespace_manager.bind(prefix.name, prefix.value)
def add_list_to_graph(graph: Graph, spo_list: RDFList) -> None:
"""
将一个SPO三元组的列表一次性地加入 graph 中
:param graph: 需要加入RDF的graph
:param spo_list: SPO三元组的列表
:return: None
Examples
--------
>>> g = Graph()
>>> spo_list = [
... (URIRef('http://www.ifa.com#Firm/tencent'), URIRef('http://www.ifa.com#hasName'), Literal('腾讯', datatype=XSD.string)),
... (URIRef('http://www.ifa.com#Firm/tencent'), URIRef('http://www.ifa.com#hasApp'), URIRef('http://www.ifa.com#App/wechat'))
... ]
>>> add_list_to_graph(g, spo_list)
"""
quads = [(s, p, o, graph) for s, p, o in spo_list]
graph.addN(quads)
def add_dict_to_graph(graph: Graph, s: Subject,
po_dict: Dict[Predicate, Object]) -> None:
"""
将一个P-O的字典一次性地加入 graph 中
:param graph: 需要加入RDF的graph
:param s: subject的URIRef
:param po_dict: predicate-object 组成的字典类型
:return: None
Examples
--------
>>> from rdflib import XSD, Graph, Literal, URIRef
>>>
>>> g = Graph()
>>> po_dict = {
... URIRef('http://www.ifa.com#hasName'): Literal('腾讯', datatype=XSD.string),
... URIRef('http://www.ifa.com#hasApp'): URIRef('http://www.ifa.com#App/wechat')
... }
>>> s = URIRef('http://www.ifa.com#Firm/tencent')
>>> add_dict_to_graph(g, s, po_dict)
"""
quads = [(s, p, o, graph) for p, o in po_dict.items()]
graph.addN(quads)
def make_all_type_rel(rdf_graph: Graph, COMMON_PREFIX: str = None):
"""
生成subject和object的所有类型关系的SPO三元组
:return: 迭代生成所有三元关系的字符串
Examples
--------
>>> for rel in make_all_type_rel(graph)
>>> print(rel)
"""
if COMMON_PREFIX is None:
COMMON_PREFIX = config.COMMON_PREFIX
global_nm = GlobalNamespaceManager.get()
rdf_type_rel = RDF.term('type')
def extract_type_rel_from_identifier(identifier: Union[BNode, URIRef]):
"""
在一个identifier中提取出 rdf:type 的三元组关系,并将其转化成字符串表示
:return: 转化后的字符串
"""
typename = str(global_nm.compute_qname(identifier)[1])
if typename[-1] == '/':
typename = typename[:-1]
return f'{identifier.n3()} {rdf_type_rel.n3()} {URIRef(typename).n3()}.'
for s, o in rdf_graph.subject_objects():
try:
yield extract_type_rel_from_identifier(s)
except ValueError:
pass
try:
yield extract_type_rel_from_identifier(o)
except ValueError:
pass
def convert_graph_to_insert_sparql(rdf_graph: Graph, auto_gen_type_rel: bool=False) -> str:
"""
将一个graph转化成一个INSERT SPARQL语句
:param rdf_graph: 待转化的graph
:param auto_gen_type_rel:自动产生 rdf:type 关系
:return: 转化后的SPARQL语句
"""
# 检查参数是否异常
if rdf_graph is None:
raise ValueError
# 构建 rdf:type 关系
if auto_gen_type_rel:
type_rel_graph = create_type_rel_graph(rdf_graph)
rdf_graph += type_rel_graph
# 构造graph中已经存在的关系
spo_str = '\n'.join(
[f'{s.n3()} {p.n3()} {o.n3()}.' for (s, p, o) in rdf_graph]
)
return f"""
INSERT DATA
{{
{spo_str}
}}
"""
def create_type_rel_graph(g: Graph) -> Graph:
type_rel_graph = Graph()
entity_set = set() # 用于防止URI重复
def add_uri_to_rel_graph(uri: URIRef):
"""
对一个 URIRef 生成 rdf:type 关系并存入 type_rel_graph 中
:param uri:
:return:
"""
uri_s = str(uri)
if uri_s in entity_set:
return
entity_set.add(uri_s)
type_rel_graph.add((uri, RDF.type, URIRef(uri_s[:uri_s.rfind('/')])))
for s, p, o in g:
if o == RDF.type:
continue
if type(s) is URIRef:
add_uri_to_rel_graph(s)
if type(o) is URIRef:
add_uri_to_rel_graph(o)
return type_rel_graph |
<gh_stars>1-10
import argparse
import datetime
import json
import os
import requests
import sys
from pycrits import pycrits
from configparser import ConfigParser
# Crits vocabulary
from vocabulary.indicators import IndicatorTypes as it
class OTX2CRITs(object):
def __init__(self, dev=False, config=None, days=None):
# Load the configuration
self.config = self.load_config(config)
# Now we're talkin
self.otx_api_key = self.config.get('otx', 'otx_api_key')
self.otx_url = self.config.get('otx', 'otx_url')
self.proxies = {
'http' : self.config.get('proxy', 'http'),
'https' : self.config.get('proxy', 'https'),
}
self.crits_url = self.config.get('crits', 'prod_url')
self.crits_dev_url = self.config.get('crits', 'dev_url')
self.crits_username = self.config.get('crits', 'username')
self.crits_api_key = self.config.get('crits', 'prod_api_key')
self.crits_dev_api_key = self.config.get('crits', 'dev_api_key')
self.crits_verify = self.config.getboolean('crits', 'verify')
self.crits_source = self.config.get('crits', 'source')
if dev:
self.crits_url = self.crits_dev_url
self.crits_api_key = self.crits_dev_api_key
if self.crits_url[-1] == '/':
self.crits_url = self.crits_url[:-1]
self.crits_proxies = {
'http' : self.config.get('crits', 'crits_proxy'),
'https' : self.config.get('crits', 'crits_proxy'),
}
self.modified_since = None
if days:
print('Searching for pulses modified in the last {} '
'days'.format(days))
self.modified_since = datetime.datetime.now()\
- datetime.timedelta(days=days)
# Get pycrits ready for magic
self.crits = pycrits(self.crits_url, self.crits_username,
self.crits_api_key, proxies=self.crits_proxies,
verify=self.crits_verify)
def execute(self):
for pulse in self.get_pulse_generator(modified_since=\
self.modified_since,
proxies=self.proxies):
# This will be used to track relationships
relationship_map = []
print('Found pulse with id {} and title {}'.format(pulse['id'],
pulse['name'].encode("utf-8")))
if self.is_pulse_in_crits(pulse['id']):
print('Pulse was already in CRITs')
continue
print('Adding pulse {} to CRITs.'.format(pulse['name'].encode("utf-8")))
# Get the actual indicator and event data from the pulse
indicator_data = pulse['indicators']
event_title = pulse['name']
created = pulse['created']
reference =''
if not reference:
reference = 'No reference documented'
else:
reference = pulse['references'][0]
description = pulse['description']
bucket_list = pulse['tags']
# CRITs requires a description
if description == '':
description = 'No description given.'
# Create the CRITs event first
print('Adding Event to CRITs with title {}'.format(event_title.encode("utf-8")))
params = {
'bucket_list' : ','.join(bucket_list),
'description' : description,
'reference' : reference,
'method' : 'otx2crits',
}
event = self.build_crits_event(event_title, self.crits_source,
description, params=params)
if 'id' not in event:
print('id not found in event object returned from crits!')
print('Event object was: {}'.format(repr(event)))
print('Skipping event: {}.'.format(event_title))
continue
event_id = event['id']
# Add a ticket to the Event to track the pulse_id
# This goes above the indicators because sometimes adding
# indicators fails and we end up with many duplicate events.
print('Adding ticket to Event {}'.format(event_title.encode("utf-8")))
params = {
'api_key' : self.crits_api_key,
'username' : self.crits_username,
}
success = self.add_ticket_to_crits_event(event_id, pulse['id'],
params=params,
proxies=self.crits_proxies,
verify=self.crits_verify)
if not success:
print('Forging on after a ticket error.')
# Add the indicators to CRITs
mapping = self.get_indicator_mapping()
for i in indicator_data:
# Reuse the params from creating the event
if i['type'] in mapping:
_type = mapping[i['type']]
else:
# We found an indicator with a type we don't support.
print("We don't support type {}".format(i['type']))
continue
if _type == None:
continue
result = self.add_crits_indicator(i['indicator'],
mapping[i['type']],
self.crits_source,
params=params)
if result:
print('Indicator created: {}'.format(result))
indicator_id = result['id']
print('Indicator created with id: '
'{}'.format(indicator_id))
relationship_map.append( indicator_id )
# Build the relationships between the event and indicators
print('Building relationships.')
for _id in relationship_map:
self.build_crits_relationship(event_id, _id, params=params,
proxies=self.crits_proxies,
verify=self.crits_verify)
def parse_config(self, location):
'''
Parses the otx config file from the given location. Attempts to find
the config file if one is not given.
'''
try:
config = ConfigParser()
config.read(location)
except Exception as e:
print('Error parsing config: {}'.format(e))
return False
if len(config.sections()) == 0:
print('Configuration file not found: {}'.format(location))
return False
return config
def load_config(self, given_location=None):
'''
This checks several locations for the config file if a location is not
provided.
1) OTX_CONFIG_FILE environment variable
2) ~/.otx_config
'''
# given_location
if given_location:
return self.parse_config(given_location)
# environment variable
CONFIG_FILE = os.environ.get('OTX_CONFIG_FILE', None)
if CONFIG_FILE:
return self.parse_config(CONFIG_FILE)
# Final attempt
CONFIG_FILE = os.path.join(os.path.expanduser('~'), '.otx_config')
return self.parse_config(CONFIG_FILE)
def get_indicator_mapping(self):
# Indicators with no matching type return None
mapping = {
'FileHash-SHA256': it.SHA256,
'FileHash-SHA1': it.SHA1,
'URI': it.URI,
'URL': it.URI,
'hostname': it.DOMAIN,
'domain': it.DOMAIN,
'IPv4': it.IPV4_ADDRESS,
'IPv6': it.IPV6_ADDRESS,
'email': it.EMAIL_ADDRESS,
'Email': it.EMAIL_ADDRESS,
'filepath': it.FILE_PATH,
'Filepath': it.FILE_PATH,
'FilePath': it.FILE_PATH,
'FileHash-MD5': it.MD5,
'Imphash': it.IMPHASH,
'PEhash': None,
'CIDR': it.IPV4_SUBNET,
'mutex': it.MUTEX,
'Mutex': it.MUTEX,
'CVE': None,
'Yara': None,
}
return mapping
def send_otx_get(self, url, proxies=None, verify=True):
headers = {
'X-OTX-API-KEY' : self.otx_api_key,
}
r = requests.get(url, headers=headers, proxies=proxies, verify=verify)
if r.status_code == 200:
return r.text
else:
print('Error retrieving AlienVault OTX data')
print('Status code was: {}'.format(r.status_code))
return False
def get_pulse_generator(self, modified_since=None,
proxies=None, verify=True):
'''
This will yield a pulse and all its data while it can obtain more data.
The OTX API has an issue when not specifying a "limit" on the pulses
returned. If we specify a limit, we can get all of our pulses, but if
we don't, the API will only ever return 5 pulses total. Derp.
This also takes advantage of returning multiple pages of pulses, so
a reasonable amount of data is returned at once.
'''
request_args = ''
args = []
page = 1
if modified_since:
args.append('modified_since={}'.format(\
modified_since.strftime('%Y-%m-%d %H:%M:%S.%f')))
args.append('limit=10')
args.append('page=1')
request_args = '&'.join(args)
request_args = '?{}'.format(request_args)
response_data = self.send_otx_get('{}/pulses/subscribed{}'\
.format(self.otx_url, request_args),
proxies=proxies, verify=verify)
# We are going to loop through to get all the pulse data
generator_data = []
while response_data:
all_pulses = json.loads(response_data)
if 'results' in all_pulses:
for pulse in all_pulses['results']:
yield pulse
response_data = None
if 'next' in all_pulses:
if all_pulses['next']:
response_data = self.send_otx_get(all_pulses['next'],
proxies=proxies,
verify=verify)
def get_pulse_data(self, pulse_id, proxies=None, verify=True):
response_data = self.send_otx_get('{}/pulses/{}'.format(self.otx_url,
pulse_id),
proxies=proxies, verify=verify)
if response_data:
return json.loads(response_data)
else:
print('Error retrieving pulse with id {}'.format(pulse_id))
return False
def is_pulse_in_crits(self, pulse_id):
'''
Checks to see if the given pulse_id is already in CRITs as a ticket
in an Event object
'''
result = self.crits.event_count( params={ 'c-tickets.ticket_number' :
pulse_id } )
if result > 0:
return True
return False
def build_crits_event(self, event_title, crits_source, description='',
params={}):
'''
Builds an event in CRITs
'''
event = self.crits.add_event('Intel Sharing', event_title, description,
crits_source, params=params)
return event
def add_crits_indicator(self, indicator_value, indicator_type, crits_source,
params={}):
result = self.crits.add_indicator(indicator_type, indicator_value,
crits_source, params=params)
if result:
if result['return_code'] == 0:
return result
else:
print('Error when adding CRITs Indicator: '
'{}'.format(result['message']))
return False
def add_ticket_to_crits_event(self, event_id, pulse_id, params={},
proxies={}, verify=True):
'''
Adds a ticket to the provided CRITs Event
'''
submit_url = '{}/api/v1/{}/{}/'.format(self.crits_url, 'events',
event_id)
headers = {
'Content-Type' : 'application/json',
}
# date must be in the format %Y-%m-%d %H:%M:%S.%f
formatted_date = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')
data = {
'action' : 'ticket_add',
'ticket' : {
'ticket_number' : pulse_id,
'date' : formatted_date,
}
}
r = requests.patch(submit_url, headers=headers, proxies=proxies,
params=params, data=json.dumps(data), verify=False)
if r.status_code == 200:
print('Ticket added successfully: {0} <-> {1}'.format(event_id,
pulse_id))
return True
else:
print('Error with status code {0} and message {1} when adding a '
'ticket to event: {2} <-> {3}'.format(r.status_code, r.text,
event_id, pulse_id))
return False
def build_crits_relationship(self, event_id, indicator_id, params={},
proxies={}, verify=True):
'''
Builds a relationship between the given event and indicator IDs
'''
submit_url = '{}/api/v1/{}/{}/'.format(self.crits_url, 'events',
event_id)
headers = {
'Content-Type' : 'application/json',
}
data = {
'action' : 'forge_relationship',
'right_type' : 'Indicator',
'right_id' : indicator_id,
'rel_type' : 'Related To',
'rel_date' : datetime.datetime.now(),
'rel_confidence' : 'high',
'rel_reason' : 'Related during automatic OTX import'
}
r = requests.patch(submit_url, proxies=proxies, params=params,
data=data, verify=False)
if r.status_code == 200:
print('Relationship built successfully: {0} <-> '
'{1}'.format(event_id,indicator_id))
return True
else:
print('Error with status code {0} and message {1} between these '
'indicators: {2} <-> {3}'.format(r.status_code, r.text,
event_id, indicator_id))
return False
def main():
argparser = argparse.ArgumentParser()
argparser.add_argument('--dev', dest='dev', action='store_true',
default=False, help='Use your dev instance of '
'CRITS. For science.')
argparser.add_argument('-c', dest='config', default=None, help='Provide '
'a specific configuration file path.')
argparser.add_argument('-d', dest='days', default=None, type=int,
help='Specify the maximum age of a pulse in the '
'number of days.')
args = argparser.parse_args()
otx2crits = OTX2CRITs(dev=args.dev, config=args.config, days=args.days)
otx2crits.execute()
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
# Copyright (c) 2016-2021, <NAME>
# Licensed under the BSD license
# https://opensource.org/licenses/BSD-3-Clause
# Copyright (c) 1988, 1989, 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 2000
# The Regents of the University of California. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that: (1) source code distributions
# retain the above copyright notice and this paragraph in its entirety, (2)
# distributions including binary code include the above copyright notice and
# this paragraph in its entirety in the documentation or other materials
# provided with the distribution, and (3) all advertising materials mentioning
# features or use of this software display the following acknowledgement:
# ``This product includes software developed by the University of California,
# Lawrence Berkeley Laboratory and its contributors.'' Neither the name of
# the University nor the names of its contributors may be used to endorse
# or promote products derived from this software without specific prior
# written permission.
# THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
from typing import Optional
import sys
import os
import getopt
import ctypes as ct
import libpcap as pcap
from pcaptestutils import * # noqa
#ifndef lint
copyright = "@(#) Copyright (c) 1988, 1989, 1990, 1991, 1992, 1993, 1994, "\
"1995, 1996, 1997, 2000\n"\
"The Regents of the University of California. "\
"All rights reserved.\n"
#endif
pd = ct.POINTER(pcap.pcap_t)()
if is_windows:
@win32.PHANDLER_ROUTINE
def stop_capture(ctrltype: win32.DWORD) -> win32.BOOL:
global pd
pcap.breakloop(pd)
return True
else:
# void stop_capture(int signum _U_):
def stop_capture(signum):
global pd
pcap.breakloop(pd)
def parse_interface_number(device: bytes) -> int:
""" """
# Search for a colon, terminating any scheme at the beginning
# of the device.
idx = device.find(b":")
if idx != -1:
# We found it. Is it followed by "//"?
idx += 1 # skip the :
if device[idx:idx+2] == b"//":
# Yes. Search for the next /, at the end of the
# authority part of the URL.
idx += 2 # skip the //
idx = device[idx:].find(b"/")
if idx != -1:
# OK, past the / is the path.
idx += 1 # skip the :
device = device[idx:]
try:
devnum = int(device)
except: # ValueError:
# It's not all-numeric; return -1, so our caller
# knows that.
return -1
# It's all-numeric, but is it a valid number?
if devnum <= 0:
# No, it's not an ordinal.
error("Invalid adapter index")
return devnum
def find_interface_by_number(devnum: int) -> bytes:
""" """
ebuf = ct.create_string_buffer(pcap.PCAP_ERRBUF_SIZE)
devlist = ct.POINTER(pcap.pcap_if_t)()
status = pcap.findalldevs(ct.byref(devlist), ebuf)
if status < 0:
error("{}", ebuf2str(ebuf))
# Look for the devnum-th entry in the list of devices (1-based).
dev = devlist
for _ in range(devnum - 1):
if not dev: break
dev = dev.contents.next
if not dev:
error("Invalid adapter index")
device = dev.contents.name
pcap.freealldevs(devlist)
return device
def open_interface(device: bytes, snaplen: Optional[int],
ebuf: ct.POINTER(ct.c_char)) -> Optional[ct.POINTER(pcap.pcap_t)]:
""" """
pc = pcap.create(device, ebuf)
if not pc:
# If this failed with "No such device", that means
# the interface doesn't exist; return NULL, so that
# the caller can see whether the device name is
# actually an interface index.
if b"No such device" in ebuf.value:
return None
error("{}", ebuf2str(ebuf))
if snaplen is not None:
status = pcap.set_snaplen(pc, snaplen)
if status != 0:
error("{}: pcap.set_snaplen failed: {}",
device2str(device), status2str(status))
status = pcap.set_timeout(pc, 100)
if status != 0:
error("{}: pcap.set_timeout failed: {}",
device2str(device), status2str(status))
status = pcap.activate(pc)
if status < 0:
# pcap.activate() failed.
cp = pcap.geterr(pc)
if status == pcap.PCAP_ERROR:
error("{}", cp.decode("utf-8", "ignore"))
elif status == pcap.PCAP_ERROR_NO_SUCH_DEVICE:
# Return an error for our caller to handle.
src = b"%s: %s\n(%s)" % (
device, status2str(status).encode("utf-8"), cp)
ct.memmove(ebuf, ct.c_char_p(src), pcap.PCAP_ERRBUF_SIZE)
elif status == pcap.PCAP_ERROR_PERM_DENIED and cp:
error("{}: {}\n({})",
device2str(device), status2str(status),
cp.decode("utf-8", "ignore"))
else:
error("{}: {}",
device2str(device), status2str(status))
pcap.close(pc)
return None
elif status > 0:
# pcap.activate() succeeded, but it's warning us
# of a problem it had.
cp = pcap.geterr(pc)
if status == pcap.PCAP_WARNING:
warning("{}", cp.decode("utf-8", "ignore"))
elif status == pcap.PCAP_WARNING_PROMISC_NOTSUP and cp:
warning("{}: {}\n({})",
device2str(device), status2str(status),
cp.decode("utf-8", "ignore"))
else:
warning("{}: {}",
device2str(device), status2str(status))
return pc
def main(argv=sys.argv[1:]):
global program_name
program_name = os.path.basename(sys.argv[0])
global pd
try:
opts, args = getopt.getopt(argv, "DLi:s:w:y:")
except getopt.GetoptError:
usage()
show_interfaces = False
show_dlt_types = False
device: Optional[bytes] = None
snaplen: Optional[int] = None
savefile: Optional[bytes] = None
dlt_name: Optional[bytes] = None
for opt, optarg in opts:
if opt == '-D':
show_interfaces = True
elif opt == '-L':
show_dlt_types = True
elif opt == '-i':
device = optarg.encode("utf-8")
elif opt == '-s':
try:
snaplen = int(optarg)
except: # ValueError:
error("invalid snaplen {} (must be >= 0)", optarg)
elif opt == '-w':
savefile = optarg.encode("utf-8")
elif opt == '-y':
dlt_name = optarg.encode("utf-8")
else:
usage();
expression = args
ebuf = ct.create_string_buffer(pcap.PCAP_ERRBUF_SIZE)
if show_interfaces:
devlist = ct.POINTER(pcap.pcap_if_t)()
if pcap.findalldevs(ct.byref(devlist), ebuf) < 0:
error("{}", ebuf2str(ebuf))
pdev = devlist ; i = 0
while pdev:
dev = pdev.contents
print("{}.{}".format(i + 1, dev.name.decode("utf-8")), end="")
if dev.description:
print(" ({})".format(dev.description.decode("utf-8")), end="")
print()
pdev = dev.next ; i += 1
pcap.freealldevs(devlist)
return 0
if device is None:
devlist = ct.POINTER(pcap.pcap_if_t)()
if pcap.findalldevs(ct.byref(devlist), ebuf) == -1:
error("{}", ebuf2str(ebuf))
if not devlist:
error("no interfaces available for capture")
device = devlist[0].name
pcap.freealldevs(devlist)
if show_dlt_types:
pd = pcap.create(device, ebuf)
if not pd:
error("{}", ebuf2str(ebuf))
status = pcap.activate(pd)
if status < 0:
# pcap.activate() failed.
error("{}: {}\n({})",
device2str(device), status2str(status), geterr2str(pd))
dlts = ct.POINTER(ct.c_int)()
ndlts = pcap.list_datalinks(pd, ct.byref(dlts))
if ndlts < 0:
# pcap.list_datalinks() failed.
error("{}: {}\n({})",
device2str(device), status2str(status), geterr2str(pd))
for i in range(ndlts):
dlt_name = pcap.datalink_val_to_name(dlts[i])
if dlt_name is None:
print("DLT {}".format(dlts[i]), end="")
else:
print("{}".format(dlt_name.decode("utf-8")), end="")
print()
pcap_free_datalinks(dlts)
pcap.close(pd)
return 0
if savefile is None:
error("no savefile specified")
ebuf[0] = b"\0"
pd = open_interface(device, snaplen, ebuf)
if not pd:
# That failed because the interface couldn't be found.
#
# If we can get a list of interfaces, and the interface name
# is purely numeric, try to use it as a 1-based index
# in the list of interfaces.
devnum = parse_interface_number(device)
if devnum == -1:
# It's not a number; just report
# the open error and fail.
error("{}", ebuf2str(ebuf))
# OK, it's a number; try to find the
# interface with that index, and try
# to open it.
#
# find_interface_by_number() exits if it
# couldn't be found.
device = find_interface_by_number(devnum)
pd = open_interface(device, snaplen, ebuf)
if not pd:
error("{}", ebuf2str(ebuf))
localnet = pcap.bpf_u_int32()
netmask = pcap.bpf_u_int32()
if pcap.lookupnet(device, ct.byref(localnet), ct.byref(netmask), ebuf) < 0:
localnet = pcap.bpf_u_int32(0)
netmask = pcap.bpf_u_int32(0)
warning("{}", ebuf2str(ebuf))
if dlt_name is not None:
dlt = pcap.datalink_name_to_val(dlt_name)
if dlt == pcap.PCAP_ERROR:
error("{} isn't a valid DLT name", dlt_name.decode("utf-8"))
if pcap_set_datalink(pd, dlt) == pcap.PCAP_ERROR:
error("{}: {}", device2str(device), geterr2str(pd))
fcode = pcap.bpf_program()
cmdbuf = None
# Don't set a filter unless we were given one on the
# command line; if capturing doesn't work, or doesn't
# use the snapshot length, without a filter, that's
# a bug.
if args:
cmdbuf = " ".join(expression).encode("utf-8")
if pcap.compile(pd, ct.byref(fcode), cmdbuf, 1, netmask) < 0:
error("{}", geterr2str(pd))
if pcap.setfilter(pd, ct.byref(fcode)) < 0:
error("{}", geterr2str(pd))
pdd = pcap.dump_open(pd, savefile)
if not pdd:
error("{}", geterr2str(pd))
if is_windows:
win32.SetConsoleCtrlHandler(stop_capture, True)
else:
action = sigaction()
action.sa_handler = stop_capture
sigemptyset(ct.byref(action.sa_mask))
action.sa_flags = 0
if sigaction(SIGINT, ct.byref(action), NULL) == -1:
error("Can't catch SIGINT: {}", strerror(errno))
print("Listening on {}, link-type ".format(device2str(device)), end="")
dlt = pcap_datalink(pd)
dlt_name = pcap.datalink_val_to_name(dlt)
if dlt_name is None:
print("DLT {}".formt(dlt), end="")
else:
print("{}".format(dlt_name.decode("utf-8")), end="")
print()
while True:
status = pcap.dispatch(pd, -1, pcap.dump,
ct.cast(pdd, ct.POINTER(ct.c_ubyte)))
if status < 0:
break
if status != 0:
print("{} packets seen".format(status))
ps = pcap.stat()
pcap.stats(pd, ct.byref(ps))
print("{:d} ps_recv, {:d} ps_drop, {:d} ps_ifdrop".format(
ps.ps_recv, ps.ps_drop, ps.ps_ifdrop))
if status == pcap.PCAP_ERROR_BREAK:
# We got interrupted, so perhaps we didn't manage to finish a
# line we were printing. Print an extra newline, just in case.
print()
print("Broken out of loop from SIGINT handler")
sys.stdout.flush()
if status == -1:
# Error. Report it.
print("{}: pcap.dispatch: {}".format(program_name, geterr2str(pd)),
file=sys.stderr)
if cmdbuf is not None:
pcap.freecode(ct.byref(fcode))
del cmdbuf
pcap.close(pd)
return 1 if status == -1 else 0
def usage():
print("Usage: {} -D -L [ -i interface ] [ -s snaplen ] [ -w file ] "
"[ -y dlt ] [ expression ]".format(program_name), file=sys.stderr)
sys.exit(1)
if __name__.rpartition(".")[-1] == "__main__":
sys.exit(main())
|
<gh_stars>1-10
import glob
import hashlib
import os
import os.path as osp
import pickle
import time
from abc import ABC
from collections import defaultdict
from contextlib import ContextDecorator, contextmanager
from timeit import default_timer
from typing import Any, Callable, Dict, List
import cv2
import gym
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
from PIL import Image
try:
import wandb
except:
pass
def get_env_attr(env, attr_name, max_calls=10):
try:
return getattr(env, attr_name)
except Exception as e:
if max_calls == 0:
raise e
if hasattr(env, "env"):
return get_env_attr(env.env, attr_name, max_calls - 1)
elif hasattr(env, "_env"):
return get_env_attr(env._env, attr_name, max_calls - 1)
else:
raise ValueError(f"Could not find property {attr_name} of {env}")
def get_save_dir(args) -> str:
"""Directory for saving images, videos or any random debug info."""
return osp.join(args.vid_dir, args.env_name, args.prefix)
def plot_line(
plot_vals,
save_name,
save_dir,
to_wb,
update_iter=None,
x_vals=None,
x_name=None,
y_name=None,
title=None,
err=None,
file_suffix="",
):
"""
Plot a simple rough line.
"""
if x_vals is None:
x_vals = np.arange(len(plot_vals))
save_path = osp.join(save_dir, f"{save_name}_{file_suffix}.png")
if title is None:
plt.title(save_name)
else:
plt.title(title)
if x_name is not None:
plt.xlabel(x_name)
if y_name is not None:
plt.ylabel(y_name)
if err is None:
plt.plot(x_vals, plot_vals)
else:
plt.errorbar(x_vals, plot_vals, err)
plt.grid(b=True, which="major", color="lightgray", linestyle="--")
plt_save(save_path)
kwargs = {}
if update_iter is not None:
kwargs["step"] = update_iter
if to_wb:
wandb.log({save_name: [wandb.Image(save_path)]}, **kwargs)
def plt_save(*path_parts):
save_name = osp.join(*path_parts)
save_dir = osp.dirname(save_name)
if not osp.exists(save_dir):
os.makedirs(save_dir)
plt.savefig(save_name)
print(f"Saved fig to {save_name}")
plt.clf()
def save_model(model, save_name, args):
save_dir = osp.join(args.save_dir, args.env_name, args.prefix)
if not osp.exists(save_dir):
os.makedirs(save_dir)
save_path = osp.join(save_dir, save_name)
torch.save(model.state_dict(), save_path)
print(f"Saved model to {save_path}")
#########################################
# FORMATTING / PRINTING
#########################################
def human_format_int(num, round_pos=2):
magnitude = 0
while abs(num) >= 1000:
magnitude += 1
num /= 1000.0
# add more suffixes if you need them
format_str = "%." + str(round_pos) + "f"
num_str = format_str % num
num_str = num_str.rstrip("0").rstrip(".")
return num_str + ["", "K", "M", "G", "T", "P"][magnitude]
def print_weights(m):
for name, param in m.named_parameters():
print(name)
print(param)
def pstart_sep():
print("")
print("-" * 30)
def pend_sep():
print("-" * 10)
print("")
#########################################
#########################################
# OBS DICTIONARY ACTIONS
#########################################
def deep_dict_select(d, idx):
ret_dict = {}
for k in d:
ret_dict[k] = d[k][idx]
return ret_dict
def transpose_arr_dict(arr: List[Dict]) -> Dict[Any, Any]:
keys = arr[0].keys()
orig_format = arr[0][list(keys)[0]]
ret_d = {k: [] for k in keys}
for arr_ele in arr:
for k in keys:
ret_d[k].append(arr_ele[k])
if isinstance(orig_format, torch.Tensor):
for k in keys:
ret_d[k] = torch.stack(ret_d[k])
return ret_d
def transpose_dict_arr(d: Dict[Any, Any]) -> List[Dict]:
keys = list(d.keys())
lens = [len(d[k]) for k in d]
if len(set(lens)) != 1:
raise ValueError("All lists must have equal sizes")
# Assumes that all the lists are equal length.
ret = []
for i in range(lens[0]):
ret.append({k: d[k][i] for k in keys})
return ret
def flatten_obs_dict(ob_shape, keep_keys):
total_dim = 0
low_val = None
high_val = None
for k in keep_keys:
sub_space = ob_shape.spaces[k]
assert len(sub_space.shape) == 1
if low_val is None:
low_val = sub_space.low.reshape(-1)[0]
high_val = sub_space.high.reshape(-1)[0]
else:
low_val = min(sub_space.low.reshape(-1)[0], low_val)
high_val = max(sub_space.high.reshape(-1)[0], high_val)
total_dim += sub_space.shape[0]
return gym.spaces.Box(
shape=(total_dim,),
low=np.float32(low_val),
high=np.float32(high_val),
dtype=np.float32,
)
def obs_op(obs: Any, op: Callable[[Any], Any]) -> Any:
"""Apply an operation to every value in a dictionary."""
if isinstance(obs, dict):
return {k: op(obs[k]) for k in obs}
return op(obs)
def obs_select(obs, idx):
if isinstance(obs, dict):
return {k: obs[k][idx] for k in obs}
return obs[idx]
#########################################
def is_dict_obs(ob_space):
return isinstance(ob_space, gym.spaces.Dict)
def get_ob_keys(ob_space):
if isinstance(ob_space, gym.spaces.Dict):
return list(ob_space.spaces.keys())
else:
return [None]
def ob_to_np(obs):
if isinstance(obs, dict):
for k in obs:
obs[k] = obs[k].cpu().numpy()
return obs
else:
return obs.cpu().numpy()
def clone_ob(obs):
if isinstance(obs, dict):
return {k: np.array(v) for k, v in obs.items()}
return np.array(obs)
def ob_to_tensor(obs, device):
if isinstance(obs, dict):
for k in obs:
obs[k] = torch.tensor(obs[k]).to(device)
return obs
else:
return torch.tensor(obs).to(device)
def ob_to_cpu(obs):
new_obs = {}
if isinstance(obs, dict):
for k in obs:
new_obs[k] = obs[k].cpu()
return new_obs
elif obs is None:
return None
else:
return obs.cpu()
def ac_space_to_tensor(action_space):
return torch.tensor(action_space.low), torch.tensor(action_space.high)
def multi_dim_clip(val, low, high):
return torch.max(torch.min(val, high), low)
def get_ob_shapes(ob_space):
if isinstance(ob_space, gym.spaces.Dict):
return {k: space.shape for k, space in ob_space.spaces.items()}
else:
return {None: ob_space.shape}
def get_ob_shape(obs_space, k):
if k is None:
return obs_space.shape
else:
return obs_space.spaces[k].shape
def get_obs_shape(ob_space, k="observation"):
if isinstance(ob_space, gym.spaces.Dict):
return ob_space.spaces[k].shape
else:
return ob_space.shape
def get_obs_space(ob_space):
if isinstance(ob_space, gym.spaces.Dict):
return ob_space.spaces["observation"]
else:
return ob_space
def get_def_obs(obs, k="observation"):
if isinstance(obs, dict):
return obs[k]
else:
return obs
def set_def_obs(obs, new_obs, k="observation"):
if isinstance(obs, dict):
obs[k] = new_obs
else:
obs = new_obs
return obs
def obs_select(obs, idx):
if isinstance(obs, dict):
return {k: obs[k][idx] for k in obs}
return obs[idx]
def deep_get_other_obs(obs):
return [get_other_obs(o) for o in obs]
def deep_get_def_obs(obs):
return [get_def_obs(o) for o in obs]
def get_other_obs(obs, maink="observation"):
if isinstance(obs, dict):
return {k: obs[k] for k in obs if k != maink}
else:
return {}
def combine_spaces(orig_space, new_space_key, new_space):
if isinstance(orig_space, gym.spaces.Dict):
return gym.spaces.Dict(
{
**orig_space.spaces,
new_space_key: new_space,
}
)
else:
return gym.spaces.Dict(
{
"observation": orig_space,
new_space_key: new_space,
}
)
def update_obs_space(cur_space, update_obs_space):
if is_dict_obs(cur_space):
new_obs_space = {**cur_space.spaces}
new_obs_space["observation"] = update_obs_space
new_obs_space = gym.spaces.Dict(new_obs_space)
else:
new_obs_space = update_obs_space
return new_obs_space
def combine_obs(orig_obs, new_obs_key, new_obs):
if isinstance(orig_obs, dict):
return {**orig_obs, new_obs_key: new_obs}
else:
return {"observation": orig_obs, new_obs_key: new_obs}
def reshape_obs_space(obs_space, new_shape):
assert isinstance(obs_space, gym.spaces.Box)
return gym.spaces.Box(
shape=new_shape,
high=obs_space.low.reshape(-1)[0],
low=obs_space.high.reshape(-1)[0],
dtype=obs_space.dtype,
)
def combine_states(state0, state1):
if len(state0.shape) == 4:
return torch.cat([state0, state1], dim=1)
else:
return torch.cat([state0, state1], dim=-1)
def get_ac_repr(ac, action):
"""
Either returns the continuous value of the action or the one-hot encoded
action
"""
if isinstance(ac, gym.spaces.Box):
return action
elif isinstance(ac, gym.spaces.Discrete):
y_onehot = torch.zeros(action.shape[0], ac.n).to(action.device)
y_onehot = y_onehot.scatter(1, action.long(), 1)
return y_onehot
else:
raise ValueError("Invalid action space type")
def get_ac_compact(ac, action):
"""
Returns the opposite of `get_ac_repr`
"""
if isinstance(ac, gym.spaces.Box):
return action
elif isinstance(ac, gym.spaces.Discrete):
return torch.argmax(action, dim=-1).unsqueeze(-1)
else:
raise ValueError("Invalid action space type")
def get_ac_dim(ac):
"""
Returns the dimensionality of the action space
"""
if isinstance(ac, gym.spaces.Box):
return ac.shape[0]
elif isinstance(ac, gym.spaces.Discrete):
return ac.n
else:
raise ValueError("Invalid action space type")
def is_discrete(ac):
if ac.__class__.__name__ == "Discrete":
return True
elif ac.__class__.__name__ == "Box":
return False
else:
raise ValueError(f"Action space {ac} not supported")
def agg_ep_log_stats(env_infos, alg_info):
"""
Combine the values we want to log into one dictionary for logging.
- env_info: (list[dict]) returns everything starting with 'ep_' and everything
in the 'episode' key. There is a list of dicts for each environment
process.
- alg_info (dict) returns everything starting with 'alg_add_'
"""
all_log_stats = defaultdict(list)
for k in alg_info:
if k.startswith("alg_add_"):
all_log_stats[k].append(alg_info[k])
for inf in env_infos:
if "episode" in inf:
# Only log at the end of the episode
for k in inf:
if k.startswith("ep_"):
all_log_stats[k].append(inf[k])
for k, v in inf["episode"].items():
all_log_stats[k].append(v)
return all_log_stats
# Get a render frame function (Mainly for transition)
def get_render_frame_func(venv):
if hasattr(venv, "envs"):
return venv.envs[0].unwrapped.render_frame
elif hasattr(venv, "venv"):
return get_render_frame_func(venv.venv)
elif hasattr(venv, "env"):
return get_render_frame_func(venv.env)
return None
# Get a render function
def get_render_func(venv):
if hasattr(venv, "envs"):
return venv.envs[0].render
elif hasattr(venv, "venv"):
return get_render_func(venv.venv)
elif hasattr(venv, "env"):
return get_render_func(venv.env)
return None
def cleanup_log_dir(log_dir):
try:
os.makedirs(log_dir)
except OSError:
files = glob.glob(os.path.join(log_dir, "*.monitor.csv"))
for f in files:
os.remove(f)
def update_args(args, update_dict, check_exist=False):
args_dict = vars(args)
for k, v in update_dict.items():
if check_exist and k not in args_dict:
raise ValueError("Could not set key %s" % k)
args_dict[k] = v
CACHE_PATH = "./data/cache"
class CacheHelper:
def __init__(self, cache_name, lookup_val, def_val=None, verbose=False, rel_dir=""):
self.use_cache_path = osp.join(CACHE_PATH, rel_dir)
if not osp.exists(self.use_cache_path):
os.makedirs(self.use_cache_path)
sec_hash = hashlib.md5(str(lookup_val).encode("utf-8")).hexdigest()
cache_id = f"{cache_name}_{sec_hash}.pickle"
self.cache_id = osp.join(self.use_cache_path, cache_id)
self.def_val = def_val
self.verbose = verbose
def exists(self):
return osp.exists(self.cache_id)
def load(self, load_depth=0):
if self.exists():
try:
with open(self.cache_id, "rb") as f:
if self.verbose:
print("Loading cache @", self.cache_id)
return pickle.load(f)
except EOFError as e:
if load_depth == 32:
raise e
# try again soon
print(
"Cache size is ", osp.getsize(self.cache_id), "for ", self.cache_id
)
time.sleep(1.0 + np.random.uniform(0.0, 1.0))
return self.load(load_depth + 1)
return self.def_val
else:
return self.def_val
def save(self, val):
with open(self.cache_id, "wb") as f:
if self.verbose:
print("Saving cache @", self.cache_id)
pickle.dump(val, f)
@contextmanager
def elapsed_timer():
"""
Measure time elapsed in a block of code. Used for debugging.
Taken from:
https://stackoverflow.com/questions/7370801/measure-time-elapsed-in-python
"""
start = default_timer()
elapser = lambda: default_timer() - start
yield lambda: elapser()
end = default_timer()
elapser = lambda: end - start
try:
# For nvidia nsight profiling which is super helpful
from habitat_sim.utils import profiling_utils
except:
profiling_utils = None
class TimeProfiler(ContextDecorator):
def __init__(self, timer_name, timee=None, timer_prop=None):
"""
- timer_prop: str The code that is used to access `self` when using the
this as a method decorator.
"""
self.timer_name = timer_name
self.timer_prop = timer_prop
if timee is not None:
self.add_time_f = timee.timer.add_time
else:
self.add_time_f = None
def __enter__(self):
if profiling_utils is not None:
profiling_utils.range_push(self.timer_name)
self.start_time = time.time()
return self
def __call__(self, f):
def wrapper(*args, **kwargs):
other_self = args[0]
if self.timer_prop is not None:
self.add_time_f = eval(f"other_self.{self.timer_prop}.timer.add_time")
else:
self.add_time_f = other_self.timer.add_time
return f(*args, **kwargs)
return super().__call__(wrapper)
def __exit__(self, *exc):
elapsed = time.time() - self.start_time
self.add_time_f(self.timer_name, elapsed)
if profiling_utils is not None:
profiling_utils.range_pop()
return False
class TimeProfilee:
def __init__(self):
self.clear()
self._should_time = True
def freeze(self):
self._should_time = False
def unfreeze(self):
self._should_time = True
def add_time(self, timer_name, timer_val):
if self._should_time:
self.timers[timer_name] += timer_val
self.timer_call_count[timer_name] += 1
def get_time(self, timer_name):
return (self.timers[timer_name], self.timer_call_count[timer_name])
def clear(self):
self.timers = defaultdict(lambda: 0)
self.timer_call_count = defaultdict(lambda: 0)
class StackHelper:
"""
A helper for stacking observations.
"""
def __init__(self, ob_shape, n_stack, device, n_procs=None):
self.input_dim = ob_shape[0]
self.n_procs = n_procs
self.real_shape = (n_stack * self.input_dim, *ob_shape[1:])
if self.n_procs is not None:
self.stacked_obs = torch.zeros((n_procs, *self.real_shape))
if device is not None:
self.stacked_obs = self.stacked_obs.to(device)
else:
self.stacked_obs = np.zeros(self.real_shape)
def update_obs(self, obs, dones=None, infos=None):
"""
- obs: torch.tensor
"""
if self.n_procs is not None:
self.stacked_obs[:, : -self.input_dim] = self.stacked_obs[
:, self.input_dim :
].clone()
for (i, new) in enumerate(dones):
if new:
self.stacked_obs[i] = 0
self.stacked_obs[:, -self.input_dim :] = obs
# Update info so the final observation frame stack has the final
# observation as the final frame in the stack.
for i in range(len(infos)):
if "final_obs" in infos[i]:
new_final = torch.zeros(*self.stacked_obs.shape[1:])
new_final[:-1] = self.stacked_obs[i][1:]
new_final[-1] = torch.tensor(infos[i]["final_obs"]).to(
self.stacked_obs.device
)
infos[i]["final_obs"] = new_final
return self.stacked_obs.clone(), infos
else:
self.stacked_obs[: -self.input_dim] = self.stacked_obs[
self.input_dim :
].copy()
self.stacked_obs[-self.input_dim :] = obs
return self.stacked_obs.copy(), infos
def reset(self, obs):
if self.n_procs is not None:
if torch.backends.cudnn.deterministic:
self.stacked_obs = torch.zeros(self.stacked_obs.shape)
else:
self.stacked_obs.zero_()
self.stacked_obs[:, -self.input_dim :] = obs
return self.stacked_obs.clone()
else:
self.stacked_obs = np.zeros(self.stacked_obs.shape)
self.stacked_obs[-self.input_dim :] = obs
return self.stacked_obs.copy()
def get_shape(self):
return self.real_shape
|
# -*- coding: utf-8 -*-
# Copyright (c) 2019, <NAME> and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from frappe import _
from email_reply_parser import EmailReplyParser
from erpnext.hr.doctype.employee.employee import is_holiday
from frappe.utils import global_date_format
from six import string_types
class RegularWorkSummary(Document):
def send_mails(self, rws_group, emails):
'''Send emails to get regular work summary to all users \
in selected regular work summary group'''
incoming_email_account = frappe.db.get_value('Email Account',
dict(enable_incoming=1, default_incoming=1),
'email_id')
self.db_set('email_sent_to', '\n'.join(emails))
frappe.sendmail(recipients=emails,
message=rws_group.message,
subject=rws_group.subject,
reference_doctype=self.doctype,
reference_name=self.name,
reply_to=incoming_email_account)
def send_summary(self):
'''Send summary of all replies. Called at midnight'''
args = self.get_message_details()
emails = get_user_emails_from_group(self.regular_work_summary_group, 'Summary')
frappe.sendmail(recipients=emails,
template='daily_work_summary',
args=args,
subject=_(self.regular_work_summary_group),
reference_doctype=self.doctype,
reference_name=self.name)
self.db_set('status', 'Sent')
def get_message_details(self):
'''Return args for template'''
dws_group = frappe.get_doc('Regular Work Summary Group',
self.regular_work_summary_group)
replies = frappe.get_all('Communication',
fields=['content', 'text_content', 'sender'],
filters=dict(reference_doctype=self.doctype,
reference_name=self.name,
communication_type='Communication',
sent_or_received='Received'),
order_by='creation asc')
did_not_reply = self.email_sent_to.split()
for d in replies:
user = frappe.db.get_values("User",
{"email": d.sender},
["full_name", "user_image"],
as_dict=True)
d.sender_name = user[0].full_name if user else d.sender
d.image = user[0].image if user and user[0].image else None
original_image = d.image
# make thumbnail image
try:
if original_image:
file_name = frappe.get_list('File',
{'file_url': original_image})
if file_name:
file_name = file_name[0].name
file_doc = frappe.get_doc('File', file_name)
thumbnail_image = file_doc.make_thumbnail(
set_as_thumbnail=False,
width=100,
height=100,
crop=True
)
d.image = thumbnail_image
except:
d.image = original_image
if d.sender in did_not_reply:
did_not_reply.remove(d.sender)
if d.text_content:
d.content = frappe.utils.md_to_html(
EmailReplyParser.parse_reply(d.text_content)
)
did_not_reply = [(frappe.db.get_value("User", {"email": email}, "full_name") or email)
for email in did_not_reply]
return dict(replies=replies,
original_message=dws_group.message,
title=_('Work Summary for {0}'.format(
global_date_format(self.creation)
)),
did_not_reply=', '.join(did_not_reply) or '',
did_not_reply_title=_('No replies from'))
def get_user_emails_from_group(group, receive='Both'):
'''Returns list of email of enabled users from the given group
:param group: Regular Work Summary Group `name`'''
group_doc = group
if isinstance(group_doc, string_types):
group_doc = frappe.get_doc('Regular Work Summary Group', group)
emails = get_users_email(group_doc, receive)
return emails
def get_users_email(doc, receive):
return [d.email for d in doc.users
if frappe.db.get_value("User", d.user, "enabled") and (d.receive == receive or d.receive == 'Both')]
|
# Copyright (c) 2015, <NAME>
# See LICENSE file for details: <https://github.com/moble/scri/blob/master/LICENSE>
from __future__ import print_function, division, absolute_import
import pytest
import numpy as np
from numpy import *
import quaternion
import spherical_functions as sf
import scri
from conftest import linear_waveform, constant_waveform, random_waveform, delta_waveform
@pytest.mark.parametrize("w", [linear_waveform, constant_waveform, random_waveform])
def test_identity_rotation(w):
# Rotation by 1 should be identity operation
W_in = w()
W_out = w()
assert W_in.ensure_validity(alter=False)
assert W_out.ensure_validity(alter=False)
W_out.rotate_decomposition_basis(quaternion.one)
assert W_out.ensure_validity(alter=False)
assert np.array_equal(W_out.t, W_in.t)
assert np.array_equal(W_out.frame, W_in.frame)
assert np.array_equal(W_out.data, W_in.data)
assert np.array_equal(W_out.LM, W_in.LM)
assert W_out.ell_min == W_in.ell_min
assert W_out.ell_max == W_in.ell_max
for h_in, h_out in zip(W_in.history, W_out.history[:-1]):
assert (h_in == h_out.replace(type(W_out).__name__ + '_' + str(W_out.num),
type(W_in).__name__ + '_' + str(W_in.num))
or (h_in.startswith('# ') and h_out.startswith('# ')))
assert W_out.frameType == W_in.frameType
assert W_out.dataType == W_in.dataType
assert W_out.r_is_scaled_out == W_in.r_is_scaled_out
assert W_out.m_is_scaled_out == W_in.m_is_scaled_out
assert isinstance(W_out.num, int)
assert W_out.num != W_in.num
@pytest.mark.parametrize("w", [linear_waveform, constant_waveform, random_waveform])
def test_rotation_invariants(w):
# A random rotation should leave everything but data and frame the
# same (except num, of course)
W_in = w()
W_out = w()
np.random.seed(hash('test_rotation_invariants') % 4294967294) # Use mod to get in an acceptable range
W_out.rotate_decomposition_basis(np.quaternion(*np.random.uniform(-1, 1, 4)).normalized())
assert W_in.ensure_validity(alter=False)
assert W_out.ensure_validity(alter=False)
assert np.array_equal(W_out.t, W_in.t)
assert not np.array_equal(W_out.frame, W_in.frame) # This SHOULD change
assert not np.array_equal(W_out.data, W_in.data) # This SHOULD change
assert W_out.ell_min == W_in.ell_min
assert W_out.ell_max == W_in.ell_max
assert np.array_equal(W_out.LM, W_in.LM)
for h_in, h_out in zip(W_in.history[:-3], W_out.history[:-5]):
assert (h_in == h_out.replace(type(W_out).__name__ + '_' + str(W_out.num),
type(W_in).__name__ + '_' + str(W_in.num))
or (h_in.startswith('# ') and h_out.startswith('# ')))
assert W_out.frameType == W_in.frameType
assert W_out.dataType == W_in.dataType
assert W_out.r_is_scaled_out == W_in.r_is_scaled_out
assert W_out.m_is_scaled_out == W_in.m_is_scaled_out
assert W_out.num != W_in.num
@pytest.mark.parametrize("w", [linear_waveform, constant_waveform, random_waveform])
def test_constant_versus_series(w):
# A random rotation should leave everything but data and frame the
# same (except num, of course)
W_const = w()
W_series = w()
np.random.seed(hash('test_constant_versus_series') % 4294967294) # Use mod to get in an acceptable range
W_const.rotate_decomposition_basis(np.quaternion(*np.random.uniform(-1, 1, 4)).normalized())
W_series.rotate_decomposition_basis(
np.array([np.quaternion(*np.random.uniform(-1, 1, 4)).normalized()] * W_series.n_times))
assert W_const.ensure_validity(alter=False)
assert W_series.ensure_validity(alter=False)
assert np.array_equal(W_series.t, W_const.t)
assert not np.array_equal(W_series.frame, W_const.frame) # This SHOULD change
assert not np.array_equal(W_series.data, W_const.data) # This SHOULD change
assert W_series.ell_min == W_const.ell_min
assert W_series.ell_max == W_const.ell_max
assert np.array_equal(W_series.LM, W_const.LM)
for h_const, h_series in zip(W_const.history[:-5], W_series.history[:-11]):
assert (h_const == h_series.replace(type(W_series).__name__ + '_' + str(W_series.num),
type(W_const).__name__ + '_' + str(W_const.num))
or (h_const.startswith('# ') and h_series.startswith('# ')))
assert W_series.frameType == W_const.frameType
assert W_series.dataType == W_const.dataType
assert W_series.r_is_scaled_out == W_const.r_is_scaled_out
assert W_series.m_is_scaled_out == W_const.m_is_scaled_out
assert W_series.num != W_const.num
@pytest.mark.parametrize("w", [linear_waveform, constant_waveform, random_waveform])
def test_rotation_inversion(w):
# Rotation followed by the inverse rotation should leave
# everything the same (except that the frame data will be either a
# 1 or a series of 1s)
np.random.seed(hash('test_rotation_inversion') % 4294967294) # Use mod to get in an acceptable range
W_in = w()
assert W_in.ensure_validity(alter=False)
# We loop over (1) a single constant rotation, and (2) an array of random rotations
for R_basis in [np.quaternion(*np.random.uniform(-1, 1, 4)).normalized(),
np.array([np.quaternion(*np.random.uniform(-1, 1, 4)).normalized()] * W_in.n_times)]:
W_out = w()
W_out.rotate_decomposition_basis(R_basis)
W_out.rotate_decomposition_basis(~R_basis)
assert W_out.ensure_validity(alter=False)
assert np.array_equal(W_out.t, W_in.t)
assert np.max(np.abs(W_out.frame - W_in.frame)) < 1e-15
assert np.allclose(W_out.data, W_in.data, atol=W_in.ell_max ** 4 ** 4e-14, rtol=W_in.ell_max ** 4 * 4e-14)
assert W_out.ell_min == W_in.ell_min
assert W_out.ell_max == W_in.ell_max
assert np.array_equal(W_out.LM, W_in.LM)
for h_in, h_out in zip(W_in.history[:-3], W_out.history[:-5]):
assert (h_in == h_out.replace(type(W_out).__name__ + '_' + str(W_out.num),
type(W_in).__name__ + '_' + str(W_in.num))
or (h_in.startswith('# datetime') and h_out.startswith('# datetime')))
assert W_out.frameType == W_in.frameType
assert W_out.dataType == W_in.dataType
assert W_out.r_is_scaled_out == W_in.r_is_scaled_out
assert W_out.m_is_scaled_out == W_in.m_is_scaled_out
assert W_out.num != W_in.num
def test_rotations_of_0_0_mode(Rs):
# The (ell,m)=(0,0) mode should be rotationally invariant
n_copies = 10
W_in = delta_waveform(0, 0, begin=-10., end=100., n_times=n_copies * len(Rs), ell_min=0, ell_max=8)
assert W_in.ensure_validity(alter=False)
W_out = scri.WaveformModes(W_in)
R_basis = np.array([R for R in Rs for i in range(n_copies)])
W_out.rotate_decomposition_basis(R_basis)
assert W_out.ensure_validity(alter=False)
assert np.array_equal(W_out.t, W_in.t)
assert np.max(np.abs(W_out.frame - R_basis)) == 0.0
assert np.array_equal(W_out.data, W_in.data)
assert W_out.ell_min == W_in.ell_min
assert W_out.ell_max == W_in.ell_max
assert np.array_equal(W_out.LM, W_in.LM)
for h_in, h_out in zip(W_in.history, W_out.history[:-1]):
assert (h_in == h_out.replace(type(W_out).__name__ + '_' + str(W_out.num),
type(W_in).__name__ + '_' + str(W_in.num))
or (h_in.startswith('# ') and h_out.startswith('# ')))
assert W_out.frameType == W_in.frameType
assert W_out.dataType == W_in.dataType
assert W_out.r_is_scaled_out == W_in.r_is_scaled_out
assert W_out.m_is_scaled_out == W_in.m_is_scaled_out
assert W_out.num != W_in.num
def test_rotations_of_each_mode_individually(Rs):
ell_min = 0
ell_max = 8 # sf.ell_max is just too much; this test is too slow, and ell=8 should be fine
R_basis = Rs
Ds = np.empty((len(Rs), sf.LMpM_total_size(ell_min, ell_max)), dtype=complex)
for i, R in enumerate(Rs):
Ds[i, :] = sf.Wigner_D_matrices(R, ell_min, ell_max)
for ell in range(ell_max + 1):
first_zeros = np.zeros((len(Rs), sf.LM_total_size(ell_min, ell - 1)), dtype=complex)
later_zeros = np.zeros((len(Rs), sf.LM_total_size(ell + 1, ell_max)), dtype=complex)
for Mp in range(-ell, ell):
W_in = delta_waveform(ell, Mp, begin=-10., end=100., n_times=len(Rs), ell_min=ell_min, ell_max=ell_max)
# Now, the modes are f^{\ell,m[} = \delta^{\ell,mp}_{L,Mp}
assert W_in.ensure_validity(alter=False)
W_out = scri.WaveformModes(W_in)
W_out.rotate_decomposition_basis(Rs)
assert W_out.ensure_validity(alter=False)
assert np.array_equal(W_out.t, W_in.t)
assert np.max(np.abs(W_out.frame - R_basis)) == 0.0
i_D0 = sf.LMpM_index(ell, Mp, -ell, ell_min)
assert np.array_equal(W_out.data[:, :sf.LM_total_size(ell_min, ell - 1)], first_zeros)
if ell < ell_max:
assert np.array_equal(
W_out.data[:, sf.LM_total_size(ell_min, ell - 1):-sf.LM_total_size(ell + 1, ell_max)],
Ds[:, i_D0:i_D0 + (2 * ell + 1)])
assert np.array_equal(W_out.data[:, -sf.LM_total_size(ell + 1, ell_max):], later_zeros)
else:
assert np.array_equal(W_out.data[:, sf.LM_total_size(ell_min, ell - 1):],
Ds[:, i_D0:i_D0 + (2 * ell + 1)])
assert W_out.ell_min == W_in.ell_min
assert W_out.ell_max == W_in.ell_max
assert np.array_equal(W_out.LM, W_in.LM)
for h_in, h_out in zip(W_in.history, W_out.history[:-1]):
assert h_in == h_out.replace(type(W_out).__name__ + str(W_out.num), type(W_in).__name__ + str(W_in.num))
assert W_out.frameType == W_in.frameType
assert W_out.dataType == W_in.dataType
assert W_out.r_is_scaled_out == W_in.r_is_scaled_out
assert W_out.m_is_scaled_out == W_in.m_is_scaled_out
assert W_out.num != W_in.num
|
import unittest
from decimal import Decimal
from importasol.db import fields
from importasol.db.base import SOLFile
from importasol.db import contasol
from importasol.db.contasol import APU, Asiento, ContaSOL, AutoAcumulador, MAE
from importasol.exceptions import ValidationError
from importasol.utiles import print_diario
from datetime import date
class TestAsiento(unittest.TestCase):
def test_add_rm(self):
ap1 = APU()
ap1.euros = 1000
ap2 = APU()
ap2.euros = -500
ap3 = APU()
ap3.euros = -500
asi = Asiento(apuntes=[ap1, ap2, ap3])
asi.rm(ap3)
self.assertRaises(ValueError, asi.rm, ap3)
asi.add(ap3)
self.assertRaises(ValueError, asi.add, ap3)
def test_descuadre(self):
ap1 = APU()
ap1.euros = 1000
ap2 = APU()
ap2.euros = -500
ap3 = APU()
ap3.euros = -500
asi = Asiento(apuntes=[ap1, ap2, ap3])
self.assertEqual(0, asi.descuadre)
ap3.euros = -300
self.assertEqual(200, asi.descuadre)
def test_cuadra(self):
ap1 = APU()
ap1.euros = 1000
ap2 = APU()
ap2.euros = -500
ap3 = APU()
ap3.euros = -500
asi = Asiento(apuntes=[ap1, ap2, ap3])
self.assertEqual(True, asi.cuadra())
ap3.euros = -300
self.assertEqual(False, asi.cuadra())
def test_renumera(self):
ap1 = APU()
ap1.euros = 1000
ap2 = APU()
ap2.euros = -500
ap3 = APU()
ap3.euros = -300
asi = Asiento(apuntes=[ap1, ap2, ap3])
asi.renumera()
self.assertEquals([1, 2, 3],
[ap.orden for ap in asi.apuntes])
asi2 = Asiento(apuntes=[ap2, ap3, ap1])
asi2.renumera()
self.assertEquals([1, 2, 3],
[ap.orden for ap in asi2.apuntes])
self.assertEquals([3, 1, 2],
[ap.orden for ap in asi.apuntes])
def test_reordena(self):
ap1 = APU()
ap1.euros = 1000
ap2 = APU()
ap2.euros = -500
ap3 = APU()
ap3.euros = -300
asi = Asiento(apuntes=[ap1, ap2, ap3])
asi.renumera()
ap2.orden = 100
asi.reordena()
self.assertEquals([1000, -300, -500],
[ap.euros for ap in asi.apuntes])
ap1.orden = 50
asi.reordena()
self.assertEquals([-300, 1000, -500],
[ap.euros for ap in asi.apuntes])
def auto_crea_cuentas(entorno, tipo, obj):
if tipo == 'APU':
apu = obj
c = apu.cuenta
for cue in entorno.get_tabla_elemento('MAE'):
if cue.cuenta == c:
return
cue = MAE()
cue.cuenta = c
cue.descripcion = "cuenta magica"
entorno.bind(cue)
def solo_pyg(entorno, sal):
if sal.cuenta[0] not in ['6', '7']:
return False
return True
class TestContaSOL(unittest.TestCase):
def test_numerador(self):
e = ContaSOL(primer_asiento=10)
apu = APU(euros=5, cuenta='4', fecha=date(2010, 2, 1), concepto="Apu a")
apu2 = APU(euros=5, cuenta='4', fecha=date(2010, 2, 1), concepto="Apu a")
asi = Asiento(apuntes=[apu, apu2])
asi.vincular(e, autonum=True)
self.assertEqual(10, apu.asiento)
def test_autocierre(self):
e = ContaSOL()
e.on_pre_bind += auto_crea_cuentas
AutoAcumulador(e)
for c, importe in (('43000000', 1000), ('70000000', -1000)):
apu = APU(euros=importe, cuenta=c, fecha=date(2010, 2, 1), concepto="Apu a")
e.bind(apu)
for c, importe in (('41000000', -500), ('60000000', 500)):
apu = APU(euros=importe, cuenta=c, fecha=date(2010, 2, 2), concepto="Apu b")
e.bind(apu)
for c, importe in (('43000000', -1000), ('57000000', 1000)):
apu = APU(euros=importe, cuenta=c, fecha=date(2010, 3, 2), concepto="Apu c")
e.bind(apu)
for c, importe in (('41000000', 500), ('57000000', -500)):
apu = APU(euros=importe, cuenta=c, fecha=date(2010, 4, 2), concepto="Apu d")
e.bind(apu)
print_diario(e.get_tabla_elemento('APU'))
reg = e.auto_cierre('reg', date(2010, 12, 31), '12900001', 'Regularizar', selector=solo_pyg)
asi = reg[0]
self.assertEqual(-500, asi.apuntes[-1].euros)
asi.vincular(e)
cie = e.auto_cierre('cie', date(2010, 12, 31), '90000001', 'Cierre')
self.assertEqual(2, len(cie[0].apuntes))
cie[0].vincular(e)
print_diario(e.get_tabla_elemento('APU'))
|
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings('ignore')
import time
import io
import glob
import scipy.misc
import numpy as np
from six import BytesIO
from PIL import Image, ImageDraw, ImageFont
import tensorflow as tf
import os, sys
# os.environ['PYTHONPATH'] += "./models"
# import sys
# sys.path.append("./models")
sys.path.append(os.path.join(os.getcwd(), r"models\research"))
sys.path.append(os.path.join(os.getcwd(), r"models"))
from object_detection.utils import label_map_util
from object_detection.utils import config_util
from object_detection.utils import visualization_utils as viz_utils
from object_detection.builders import model_builder
def load_image_into_numpy_array(image):
"""Load an image from file into a numpy array.
Puts image into numpy array to feed into tensorflow graph.
Note that by convention we put it into a numpy array with shape
(height, width, channels), where channels=3 for RGB.
Args:
path: the file path to the image
Returns:
uint8 numpy array with shape (img_height, img_width, 3)
"""
#img_data = tf.io.gfile.GFile(path, 'rb').read()
#image = Image.open(BytesIO(img_data))
(im_width, im_height, channel) = image.shape
return image.astype(np.uint8)
#recover our saved model
pipeline_config = 'DownloadedModels/ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8/pipeline.config'
#generally you want to put the last ckpt from training in here
model_dir = 'ckpt-0'
print(os.getcwd())
configs = config_util.get_configs_from_pipeline_file(pipeline_config)
model_config = configs['model']
detection_model = model_builder.build(
model_config=model_config, is_training=False)
# Restore checkpoint
ckpt = tf.compat.v2.train.Checkpoint(
model=detection_model)
ckpt.restore('DownloadedModels/ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8/checkpoint/ckpt-0')
#ckpt.restore(os.path.join('ckpt-0'))
def get_model_detection_function(model):
"""Get a tf.function for detection."""
@tf.function
def detect_fn(image):
"""Detect objects in image."""
image, shapes = model.preprocess(image)
prediction_dict = model.predict(image, shapes)
detections = model.postprocess(prediction_dict, shapes)
return detections, prediction_dict, tf.reshape(shapes, [-1])
return detect_fn
detect_fn = get_model_detection_function(detection_model)
#map labels for inference decoding
label_map_path = configs['eval_input_config'].label_map_path
label_map = label_map_util.load_labelmap(label_map_path)
categories = label_map_util.convert_label_map_to_categories(
label_map,
max_num_classes=label_map_util.get_max_label_map_index(label_map),
use_display_name=True)
category_index = label_map_util.create_category_index(categories)
label_map_dict = label_map_util.get_label_map_dict(label_map, use_display_name=True)
import random
import numpy as np
import cv2
import tensorflow as tf
cap = cv2.VideoCapture(0)
# Define the codec and create VideoWriter object
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('stb_out.avi',fourcc, 20.0, (640,480))
while(True):
# Capture frame-by-frame
ret,image_np = cap.read()
image_np = load_image_into_numpy_array(image_np)
input_tensor = tf.convert_to_tensor(
np.expand_dims(image_np, 0), dtype=tf.float32)
start_t=time.time()
detections, predictions_dict, shapes = detect_fn(input_tensor)
end_t=time.time()
print(end_t-start_t)
label_id_offset = 1
image_np_with_detections = image_np.copy()
viz_utils.visualize_boxes_and_labels_on_image_array(
image_np_with_detections,
detections['detection_boxes'][0].numpy(),
(detections['detection_classes'][0].numpy() + label_id_offset).astype(int),
detections['detection_scores'][0].numpy(),
category_index,
use_normalized_coordinates=True,
max_boxes_to_draw=200,
min_score_thresh=.5,
agnostic_mode=False,
)
# Display the resulting frame
# out.write(image_np_with_detections)
cv2.imshow('frame',image_np_with_detections)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows() |
# How many species in GBIF?
# grep species ../../feed/gbif/in/taxon.txt | grep -v synonym | wc
# To test:
# ../../bin/jython taxonomy_metrics.py ../../t/tax/aster/ ../../tax/skel/ tmp_test_metrics.json tmp_test_contributions.csv
import os, sys, csv, json
from org.opentreeoflife.taxa import Taxonomy, Taxon, Rank
from org.opentreeoflife.conflict import ConflictAnalysis, Disposition
from org.opentreeoflife.smasher import AlignmentByName
# outpath is for the general stats JSON, and comes from the command line
def doit(ott, sep, outpath, conpath):
do_rug = False #os.path.isdir('out/ruggiero')
if do_rug:
rug = Taxonomy.getRawTaxonomy('out/ruggiero/', 'rug')
# Prepare for conflict analysis
# oh no, we really need a separation taxonomy to do that.
rug_alignment = AlignmentByName(rug, ott)
rug_alignment.align()
rug_conflict = ConflictAnalysis(rug, ott, rug_alignment, True)
overall_table(ott, outpath)
source_breakdown_table(ott, conpath)
# Taxomachine suppresses NOT_OTU, ENVIRONMENTAL[_INHERITED],
# VIRAL, HIDDEN[INHERITED], and WAS_CONTAINER
exclude_from_analysis = (Taxonomy.FORMER_CONTAINER |
Taxonomy.NOT_OTU)
def overall_table(ott, outpath):
excluded = 0
syn_count = 0
tip_count = 0
internals = 0
binomials = 0
species = 0
suppressed = 0
extinct = 0
incertae = 0
unplaced = 0
infra = 0
barren = 0
widest = None
names = {} # Number of nodes having a given name as primary name.
for taxon in ott.taxa():
all_flags = taxon.properFlags | taxon.inferredFlags
if (all_flags & exclude_from_analysis) != 0:
excluded += 1
continue
syn_count += len(taxon.synonyms)
if taxon.hasChildren():
internals += 1
else:
tip_count += 1
if (Taxon.isBinomial(taxon.name) and
(taxon.rank == Rank.NO_RANK or taxon.rank.level >= Rank.SPECIES_RANK.level)):
binomials += 1
if taxon.isHidden(): suppressed += 1
if taxon.isExtinct(): extinct += 1
if taxon.properFlags & Taxonomy.INCERTAE_SEDIS_ANY != 0:
incertae += 1
if taxon.properFlags & Taxonomy.UNPLACED != 0:
unplaced += 1
if all_flags & Taxonomy.INFRASPECIFIC != 0:
infra += 1
if all_flags & Taxonomy.BARREN != 0:
barren += 1
if taxon.rank == Rank.SPECIES_RANK:
species += 1
if taxon.name != None:
names[taxon.name] = names.get(taxon.name, 0) + 1
if widest == None or len(taxon.getChildren()) > len(widest.getChildren()):
widest = taxon
# Homonyms
# Classify them somehow?
poly = 0
poly_species = 0
poly_genera = 0
poly_list = []
for name in names:
semies = names[name] # How many nodes with this name as primary?
if semies > 1:
poly += 1
speciesp = False
genusp = False
for node in ott.lookup(name):
if node.taxon() == node:
all_flags = node.properFlags | node.inferredFlags
if (all_flags & exclude_from_analysis) != 0:
continue
if node.rank == Rank.SPECIES_RANK:
speciesp = True
elif node.rank == Rank.GENUS_RANK:
genusp = True
if speciesp: poly_species += 1
if genusp: poly_genera += 1
# get taxa = ott.lookup(name), get mrca, look at size of mrca
if semies > 4:
poly_list.append((semies, name))
report = {}
report['node_count'] = ott.count()
report['absorbed'] = excluded
report['synonym_count'] = syn_count
report['internal_node_count'] = internals
report['tip_count'] = tip_count
report['species'] = species
report['binomials'] = binomials
report['homonym_count'] = poly
report['species_homonym_count'] = poly_species
report['genus_homonym_count'] = poly_genera
report['max_depth'] = (max_depth(ott.forest) - 1)
report['max_children'] = len(widest.getChildren())
report['incertae_sedis_count'] = incertae
report['extinct_count'] = extinct
report['infraspecific_count'] = infra
report['barren'] = barren
print 'Writing', outpath
with open(outpath, 'w') as outfile:
json.dump(report, outfile, indent=2)
def dashify(x): return x if x != 0 else '-'
def fix_prefix(qid):
prefix = qid.prefix
if prefix.startswith('http'): prefix = 'curated'
elif prefix.startswith('additions'): prefix = 'curated'
return prefix
def source_breakdown_table(ott, conpath):
contributed = {}
aligned = {}
merged = {}
inconsistent = {}
for taxon in ott.taxa():
# Note as 'aligned' all but first qid
if taxon.sourceIds != None:
firstp = True
for qid in taxon.sourceIds:
if firstp == True:
firstp = False
else:
prefix = fix_prefix(qid)
aligned[prefix] = aligned.get(prefix, 0) + 1
if sep.lookupId(taxon.id) != None:
prefix = 'separation'
else:
prefix = fix_prefix(taxon.sourceIds[0])
else:
prefix = 'curated'
all_flags = taxon.properFlags | taxon.inferredFlags
if (all_flags & exclude_from_analysis) != 0:
if (taxon.properFlags & Taxonomy.MERGED) != 0:
merged[prefix] = merged.get(prefix, 0) + 1
elif (taxon.properFlags & Taxonomy.INCONSISTENT) != 0:
inconsistent[prefix] = inconsistent.get(prefix, 0) + 1
else:
contributed[prefix] = contributed.get(prefix, 0) + 1
source_order = {'separation': -1,
'silva': 0,
'h2007': 1,
'if': 2,
'worms': 3,
'study713': 4,
'ncbi': 5,
'gbif': 6,
'irmng': 7,
'curated': 8}
sources = sorted(contributed.keys(), key=lambda(src): source_order.get(src, 99))
total_first = 0
total_merged = 0
total_inconsistent = 0
total_aligned = 0
total_total = 0
table = []
table.append(['source', 'total', 'copied', 'aligned', 'absorbed', 'conflict'])
for source in sources:
con = contributed.get(source, 0)
al = aligned.get(source, 0)
mer = merged.get(source, 0)
inc = inconsistent.get(source, 0)
tot = con + al + mer + inc
table.append([source, tot, con, al, mer, inc])
total_first += con
total_aligned += al
total_merged += mer
total_inconsistent += inc
total_total += tot
table.append(['total', total_total, total_first, total_aligned, dashify(total_merged), dashify(total_inconsistent)])
print 'Writing', conpath
with open(conpath, 'w') as outfile:
dump_table_as_csv(table, outfile)
# Unused
def show_contributions():
# Show table in human-readable form
print
print '```'
format_string = '%10s %9s %9s %9s %9s %9s'
for row in table:
print format_string % tuple(row)
print '```'
print """
Key:
* source = name of source taxonomy
* total = total number of nodes in source
* copied = total number of nodes originating from this source (copied)
* aligned = number of source nodes aligned and copied
* absorbed = number of source nodes absorbed (not copied)
* conflict = number of inconsistent source nodes (not copied)
"""
def dump_table_as_csv(table, outfile):
# Provide CSV form for Pensoft
writer = csv.writer(outfile)
for row in table:
writer.writerow(row)
def max_depth(node):
m = 0
for child in node.getChildren():
d = max_depth(child) + 1
if d > m: m = d
return m
if __name__ == '__main__':
taxpath = sys.argv[1]
seppath = sys.argv[2]
outpath = sys.argv[3] # general report, JSON
conpath = sys.argv[4] # contributions, CSV
sep = Taxonomy.getRawTaxonomy(seppath, 'ott')
ott = Taxonomy.getRawTaxonomy(taxpath, 'ott')
ott.inferFlags()
doit(ott, sep, outpath, conpath)
|
# coding: utf-8
# to run every hour between 0700 & 2100 using CRON
# for instance using pythonanywhere
# * 7-21 * * * /home/ubuntu/cron/testwater.py >/dev/null 2>&1
import datetime
import sys
now = datetime.datetime.now()
print(now.hour)
#if (now.hour < 6) | (now.hour > 20): #UTC
sys.exit()
# CONFIG
account_sid = ""
auth_token = ""
from_nr = "" #+32...
# Dans Twilio : Configuration de la réponse aux messages envoyés à ce numéro (TwiML bin)
# <Response>
# <Message>
# Message de Poppy Alert. Utile? Parlez-en autour de vous.Plus utile? Envoyez NOALERT à ce numéro
# </Message>
# </Response>
# END CONFIG
#TODO
# all messages before => redact
# alternatively, send with facebook, telegram, etc
# ensuite : toute la gestion, l'interface client, le GDPR, ...
# critère de non envoi pourrait être plus
import requests
import bs4 as BeautifulSoup
from tabulate import tabulate
from operator import itemgetter
import datetime
from twilio.rest import TwilioRestClient
client = TwilioRestClient(account_sid, auth_token) # ! version 5 !!! -> pip install twilio==5.7.0
newtable = []
###########################################################
#GET SUBSCRIBERS
#EITHER MANUALLY
#recipients = [['NUMBER', 'STATION']] # pourrait être pris des SMS envoyés à ce numéro //
#OR BY CHECKING THE INCOMING MESSAGES
recipients = []
messages = client.messages.list(to_=from_nr, page_size=1000)
subscribers = {}
for message in reversed(messages):
subscribers[message.from_] = message.body.upper()
for subscriber in subscribers:
msg = subscribers[subscriber]
if msg.find("SUBSCRIBE") == 0:
print(subscriber, msg)
sta = msg.split(' ')
if len(sta) > 1:
recipients.append([subscriber, sta[1]])
print(recipients)
#END GET SUBSCRIBERS
###########################################################
for i in range(0,89):
r = requests.get('http://voies-hydrauliques.wallonie.be/opencms/opencms/fr/hydro/Actuelle/crue/cruetableau.do?id=' + str(i))
if r.status_code != 200:
continue
html = r.text
soup = BeautifulSoup.BeautifulSoup(html, "html.parser")
table = soup.find('table', {"summary":True})
rows = table.findAll('tr')
what = ''
for row in rows:
ch = row.find('th')
if ch != None:
ch = ch.find('strong')
if ch != None:
what = ch.text
cells = row.findAll('td')
if cells == None:
continue
newrow = [what]
for cell in cells:
if cell == None:
continue;
t = cell.text.strip()
if t == None:
continue
if t == '':
t = cell.find('img')
if t == None:
continue
else:
t = t.attrs['alt']
if 'la fiche signal' in t:
continue
if t == '':
continue
newrow.append(t)
newtable.append(newrow)
mytable = []
for row in newtable:
if len(row) < 2:
continue
if row in mytable:
continue
mytable.append(row)
mytable=sorted(mytable, key=itemgetter(1))
print(tabulate(mytable))
for r in recipients:
for e in mytable:
if (e[1] == r[1]) & (e[3] != 'Normale'): # pour test : mettre == 'Normale'
body="La station " + e[1] + " est en situation " + e[3] + ". Infos via http://voies-hydrauliques.wallonie.be/opencms/opencms/fr/hydro/Actuelle/crue/index.html. Message de Poppy Alert. Utile? Parlez-en autour de vous ou aidez-nous via http://paypal.me/ccloquet. Plus utile? Envoyez NOALERT à ce numéro"
print (body)
# si déjà reçu qqch hier ou aujourd'hui -> n'envoie rien
# le test pourrait être plus intelligent
today = datetime.date.today()
messages = client.messages.list(to=r[0], from_=from_nr, date_sent=today)
if len(messages) > 0:
print('*')
continue
yesterday = datetime.date.fromordinal(datetime.date.today().toordinal()-1)
messages = client.messages.list(to=r[0], from_=from_nr, date_sent=yesterday)
if len(messages) > 0:
print('*')
continue
print('sending SMS to ' + r[0])
#send SMS
message = client.messages.create(to=r[0], from_=from_nr, body=body)
|
<reponame>comzyh/SRGAN_impl
import tensorflow as tf
def srresnet_preprocess(images):
return (tf.to_float(images) / 127.5) - 1
def srresnet_postprocess(images):
return (images + 1) * 127.5
def SRResNet(images, training, reuse=False, residual_blocks_num=16):
"""
<NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., … <NAME>. (n.d.).
Photo-Realistic Single Image Super-Resolution Using a Generative Adversarial Network.
"""
with tf.variable_scope("SRResNet"):
# k9n64s1
x = tf.layers.conv2d(
inputs=images, filters=64, kernel_size=(9, 9), strides=(1, 1),
padding='same', use_bias=False,
)
x = tf.nn.leaky_relu(x)
skip_all_residual_blocks = x
# residual blocks
for i in range(residual_blocks_num):
skip = x
x = tf.layers.conv2d(
inputs=x, filters=64, kernel_size=(3, 3), strides=(1, 1),
padding='same', use_bias=False,
name='k3n64s1/{}/conv1'.format(i)
)
x = tf.layers.batch_normalization(
inputs=x, axis=3, fused=True, scale=True,
training=training,
name='k3n64s1/{}/bn1'.format(i)
)
x = tf.nn.leaky_relu(x)
x = tf.layers.conv2d(
inputs=x, filters=64, kernel_size=(3, 3), strides=(1, 1),
padding='same', use_bias=False,
name='k3n64s1/{}/conv2'.format(i)
)
x = tf.layers.batch_normalization(
inputs=x, axis=3, fused=True, scale=True,
training=training,
name='k3n64s1/{}/bn2'.format(i)
)
x = skip + x
# k3n64s1
x = tf.layers.conv2d(
inputs=x, filters=64, kernel_size=(3, 3), strides=(1, 1),
padding='same', use_bias=False,
name='k3n64s1/{}/conv1'.format(i + 1)
)
x = tf.layers.batch_normalization(
inputs=x, axis=3, fused=True, scale=True,
training=training,
name='k3n64s1/{}/bn1'.format(i + 1)
)
x = skip_all_residual_blocks + x
# k3n256s1
for i in range(1): # we use 2x upscaleing
# 256 = 64 * 2 * 2
x = tf.layers.conv2d(
inputs=x, filters=256, kernel_size=(3, 3), strides=(1, 1),
padding='same', use_bias=False,
name='k3n256s1/{}/conv1'.format(i)
)
# reduce channel, increase size
# https://www.tensorflow.org/api_docs/python/tf/depth_to_space
x = tf.depth_to_space(x, 2)
x = tf.nn.leaky_relu(x)
# k9n3s1
x = tf.layers.conv2d(
inputs=x, filters=3, kernel_size=(9, 9), strides=(1, 1),
padding='same', use_bias=False,
name='k9n3s1'
)
return x
def SRGAN_discriminator(images, training, reuse=False):
images = tf.image.resize_bicubic(images, (96, 96))
with tf.variable_scope("SRGAN_D"):
x = tf.layers.conv2d(
inputs=images, filters=64, kernel_size=(3, 3), strides=(1, 1),
padding='same', use_bias=False, reuse=reuse, name='conv1',
)
x = tf.nn.leaky_relu(x)
# blocks
filter_list = [64, 128, 128, 256, 256, 512, 512]
stride_list = [2, 1, 2, 1, 2, 1, 2]
for f, s in zip(filter_list, stride_list):
x = tf.layers.conv2d(
inputs=x, filters=f, kernel_size=(3, 3), strides=(2, 2),
padding='same', use_bias=False, reuse=reuse,
name='k3n{}s{}/conv'.format(f, s)
)
x = tf.layers.batch_normalization(
inputs=x, axis=3, fused=True, scale=True,
training=training, reuse=reuse,
name='k3n{}s{}/bn'.format(f, s)
)
x = tf.nn.leaky_relu(x)
x = tf.layers.flatten(x, name='flatten')
x = tf.layers.dense(x, units=1000, reuse=reuse, name='dense1')
x = tf.nn.leaky_relu(x)
logits = tf.layers.dense(x, units=1, reuse=reuse, name='dense2')
confidence = tf.sigmoid(logits)
return logits, confidence
|
import csv
import json
import logging
import os.path
import sys
from django.core.exceptions import ObjectDoesNotExist
from django.core.management.base import BaseCommand
from django.contrib.auth.models import User, Group
from django.conf import settings
from perfiles.models import Perfil
user_info = [
{
'user_name': 'pruebas',
'first_name': 'Pruebas',
'last_name': 'Usuario',
'password': '<PASSWORD>',
'grupos': [],
'perfil': {
'primer_nombre': 'Pruebas',
'primer_apellido': 'Usuario',
'provincia': '01',
'clase': '00',
'tomo': '000',
'folio': '0000',
'cod_sede': '00',
'cod_facultad': '00',
'cod_escuela': '00',
'cod_departamento': '00'
}
},
{
'user_name': 'profesor',
'first_name': 'Profesor',
'last_name': 'Usuario',
'password': '<PASSWORD>',
'grupos': ['Profesores'],
'perfil': {
'primer_nombre': 'Profesor',
'primer_apellido': 'Usuario',
'provincia': '02',
'clase': '00',
'tomo': '000',
'folio': '0000',
'cod_sede': '00',
'cod_facultad': '00',
'cod_escuela': '00',
'cod_departamento': '00'
}
},
{
'user_name': 'departamento',
'first_name': 'Departamento',
'last_name': 'Usuario',
'password': '<PASSWORD>',
'grupos': ['Director de Departamento'],
'perfil': {
'primer_nombre': 'Departamento',
'primer_apellido': 'Usuario',
'provincia': '03',
'clase': '00',
'tomo': '000',
'folio': '0000',
'cod_sede': '00',
'cod_facultad': '00',
'cod_escuela': '00',
'cod_departamento': '00'
}
},
{
'user_name': 'escuela',
'first_name': 'Escuela',
'last_name': 'Usuario',
'password': '<PASSWORD>',
'grupos': ['Director de Escuela'],
'perfil': {
'primer_nombre': 'Escuela',
'primer_apellido': 'Usuario',
'provincia': '04',
'clase': '00',
'tomo': '000',
'folio': '0000',
'cod_sede': '00',
'cod_facultad': '00',
'cod_escuela': '00',
'cod_departamento': '00'
}
},
{
'user_name': 'comision',
'first_name': 'Comision',
'last_name': 'Usuario',
'password': '<PASSWORD>',
'grupos': ['Comision de Anteproyecto'],
'perfil': {
'primer_nombre': 'Comision',
'primer_apellido': 'Usuario',
'provincia': '05',
'clase': '00',
'tomo': '000',
'folio': '0000',
'cod_sede': '00',
'cod_facultad': '00',
'cod_escuela': '00',
'cod_departamento': '00'
}
},
{
'user_name': 'administrativo',
'first_name': 'Administrativo',
'last_name': 'Usuario',
'password': '<PASSWORD>',
'grupos': ['Administrativos'],
'perfil': {
'primer_nombre': 'Administrativo',
'primer_apellido': 'Usuario',
'provincia': '06',
'clase': '00',
'tomo': '000',
'folio': '0000',
'cod_sede': '00',
'cod_facultad': '00',
'cod_escuela': '00',
'cod_departamento': '00'
}
},
{
'user_name': 'decano',
'first_name': 'Decano',
'last_name': 'Usuario',
'password': '<PASSWORD>',
'grupos': ['Decanos'],
'perfil': {
'primer_nombre': 'Decano',
'primer_apellido': 'Usuario',
'provincia': '07',
'clase': '00',
'tomo': '000',
'folio': '0000',
'cod_sede': '00',
'cod_facultad': '00',
'cod_escuela': '00',
'cod_departamento': '00'
}
}
]
class Command(BaseCommand):
help = 'Carga usuarios demo'
def handle(self, *args, **options):
for data in user_info:
u, created = User.objects.get_or_create(username=data['user_name'], first_name=data['first_name'], last_name=data['last_name'])
if created:
u.set_password(data['password'])
u.save()
for grupo in data['grupos']:
try:
g = Group.objects.get(name=grupo.title())
u.groups.add(g)
print('Usuario agregado {} al grupo: {}'.format(u.username, g.name))
except ObjectDoesNotExist as exc:
print(exc)
p, created = Perfil.objects.update_or_create(usuario=u, defaults=data['perfil'])
if created:
print("Perfil creado {0}".format(p))
|
<gh_stars>0
# Copyright 2021 Zilliz. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import json
import threading
from typing import Any, Dict, Set
from typing import Callable
# pylint: disable=pointless-string-statement
"""
Trackers that record all hyperparameter accesses.
"""
_read_tracker: Set[str] = set()
_write_tracker: Set[str] = set()
def reads():
"""
Get hyperparameter read operations.
Returns:
List[str]: hyperparameter read operations
Examples:
>>> _read_tracker.clear()
>>> hp = HyperParameter(a=1, b={'c': 2})
>>> reads() # no read operations
[]
>>> hp.a # accessing parameter directly
1
>>> reads() # not tracked
[]
>>> hp().a() # accessing with accessor
1
>>> reads() # tracked!
['a']
"""
retval = list(_read_tracker)
retval.sort()
return retval
def writes():
"""
Get hyperparameter write operations.
Returns:
List[str]: hyperparameter write operations
Examples:
>>> _write_tracker.clear()
>>> hp = HyperParameter(a=1, b={'c': 2})
>>> writes()
[]
>>> hp.a = 1
>>> writes()
[]
>>> hp().a = 1
>>> hp().a.b.c = 1
>>> writes()
['a', 'a.b.c']
"""
retval = list(_write_tracker)
retval.sort()
return retval
def all_params():
"""
Get all tracked hyperparameters.
"""
retval = list(_read_tracker.union(_write_tracker))
retval.sort()
return retval
class _Accessor(dict):
"""
Helper for accessing hyper-parameters.
When reading an undefined parameter, the accessor will:
1. return false in `if` statement:
>>> params = HyperParameter()
>>> if not params.undefined_int: print("parameter undefined")
parameter undefined
2. support default value for undefined parameter
>>> params = HyperParameter()
>>> params.undefined_int.get_or_else(10)
10
3. support to create nested parameter:
>>> params = HyperParameter()
>>> params.undefined_object.undefined_prop = 1
>>> print(params)
{'undefined_object': {'undefined_prop': 1}}
"""
def __init__(self, root, path=None):
super().__init__()
self._root = root
self._path = path
def get_or_else(self, default: Any = None):
"""
Get value for the parameter, or get default value if the parameter is not defined.
"""
_read_tracker.add(self._path)
value = self._root.get(self._path)
return default if value is None else value
def __getattr__(self, name: str) -> Any:
if name in ['_path', '_root']:
return self[name]
if self._path:
name = '{}.{}'.format(self._path, name)
return _Accessor(self._root, name)
def __setattr__(self, name: str, value: Any):
if name in ['_path', '_root']:
return self.__setitem__(name, value)
full_name = '{}.{}'.format(self._path,
name) if self._path is not None else name
_write_tracker.add(full_name)
root = self._root
root.put(full_name, value)
# for path in self._path.split('.'):
# root[path] = HyperParameter()
# root = root[path]
# root[name] = value
return value
def __str__(self):
return ''
def __bool__(self):
return False
def __call__(self, default: Any = None) -> Any:
"""
shortcut for get_or_else
"""
return self.get_or_else(default)
__nonzero__ = __bool__
class _CallHolder(dict):
"""
Helper for tracking function calls.
Examples:
>>> ch = _CallHolder()
>>> ch.my.foo(a=1,b=2)
('my.foo', (), {'a': 1, 'b': 2})
>>> ch.myspace2.gee(c=1,d=2)
('myspace2.gee', (), {'c': 1, 'd': 2})
"""
@staticmethod
def default_callback(path, *arg, **kws):
return (path, arg, kws)
def __init__(self, callback: Callable=None, path=None):
super().__init__()
self._callback = callback if callback is not None else _CallHolder.default_callback
self._path = path
def __getattr__(self, name: str) -> Any:
if name in ['_path', '_callback']:
return self[name]
if self._path:
name = '{}.{}'.format(self._path, name)
return _CallHolder(self._callback, name)
def __setattr__(self, name: str, value: Any):
if name in ['_path', '_callback']:
return self.__setitem__(name, value)
return
def __call__(self, *args: Any, **kwds: Any) -> Any:
return self._callback(self._path, *args, **kwds)
class HyperParameter(dict):
"""
HyperParameter is an extended dict with features for better parameter management.
A HyperParameter can be created with:
>>> hp = HyperParameter(param1=1, param2=2, obj1={'propA': 'A'})
or
>>> hp = HyperParameter(**{'param1': 1, 'param2': 2, 'obj1': {'propA': 'A'}})
Once the HyperParameter object is created, you can access the values using the object-style api:
>>> hp.param1
1
>>> hp.obj1.propA
'A'
or using the dict-style api (for legacy codes):
>>> hp['param1']
1
>>> hp['obj1']['propA']
'A'
The object-style api also support creating or updating the parameters:
>>> hp.a.b.c = 1
which avoid maintaining the dict data manually like this:
>>> hp = {}
>>> if 'a' not in hp: hp['a'] = {}
>>> if 'b' not in hp['a']: hp['a']['b'] = {}
>>> hp['a']['b']['c'] = 1
You can also create a parameter with a string name:
>>> hp = HyperParameter()
>>> hp.put('a.b.c', 1)
"""
def __init__(self, **kws):
super(HyperParameter, self).__init__() # pylint: disable=super-with-arguments
self.update(kws)
def update(self, kws):
for k, v in kws.items():
if isinstance(v, dict):
if k in self and isinstance(self[k], dict):
vv = HyperParameter(**self[k])
vv.update(v)
v = vv
else:
v = HyperParameter(**v)
self[k] = v
def put(self, name: str, value: Any):
"""
put/update a parameter with a string name
Args:
name (str): parameter name, 'obj.prop' is supported
value (Any): parameter value
Examples:
>>> cfg = HyperParameter()
>>> cfg.put('param1', 1)
>>> cfg.put('obj1.propA', 'A')
>>> cfg.param1
1
>>> cfg.obj1.propA
'A'
"""
path = name.split('.')
obj = self
for p in path[:-1]:
if p not in obj or (not isinstance(obj[p], dict)):
obj[p] = HyperParameter()
obj = obj[p]
_write_tracker.add(name)
obj[path[-1]] = safe_numeric(value)
def get(self, name: str) -> Any:
"""
get a parameter by a string name
Args:
name (str): parameter name
Returns:
Any: parameter value
Examples:
>>> cfg = HyperParameter(a=1, b = {'c':2, 'd': 3})
>>> cfg.get('a')
1
>>> cfg.get('b.c')
2
"""
path = name.split('.')
obj = self
for p in path[:-1]:
if p not in obj:
return None
obj = obj[p]
_read_tracker.add(name)
return obj[path[-1]] if path[-1] in obj else None
def __setitem__(self, key, value):
"""
set value and convert the value into `HyperParameter` if necessary
"""
if isinstance(value, dict):
return dict.__setitem__(self, key, HyperParameter(**value))
return dict.__setitem__(self, key, value)
def __getattr__(self, name):
"""
read parameter with object-style api
Examples:
for simple parameters:
>>> hp = HyperParameter(a=1, b = {'c':2, 'd': 3})
>>> hp.a
1
for nested parameters:
>>> hp.b.c
2
"""
if name in self.keys():
return self[name]
else:
if name in self.__dict__.keys():
return self.__dict__[name]
return _Accessor(self, name)
def __setattr__(self, name, value):
"""
create/update parameter with object-style api
Examples:
>>> hp = HyperParameter(a=1, b = {'c':2, 'd': 3})
>>> hp.e = 4
>>> hp['e']
4
"""
self[name] = value
def __call__(self) -> Any:
"""
Return a parameter accessor.
Returns:
Any: holder of the current parameter
Examples:
>>> cfg = HyperParameter(a=1, b = {'c':2, 'd': 3})
>>> cfg().a.get_or_else('default') # default value for simple parameter
1
>>> cfg().b.c.get_or_else('default') # default value for nested parameter
2
>>> cfg().b.undefined.get_or_else('default')
'default'
"""
return _Accessor(self, None)
def callholder(self, callback: Callable = None):
"""
Return a call holder.
Examples:
>>> ch = param_scope().callholder()
>>> ch.my.foo(a=1,b=2)
('my.foo', (), {'a': 1, 'b': 2})
>>> ch.myspace2.gee(c=1,d=2)
('myspace2.gee', (), {'c': 1, 'd': 2})
"""
return _CallHolder(callback)
@staticmethod
def loads(s):
"""
Load parameters from JSON string, similar as `json.loads`.
"""
obj = json.loads(s)
return HyperParameter(**obj)
@staticmethod
def load(f):
"""
Load parameters from json file, similar as `json.load`.
"""
obj = json.load(f)
return HyperParameter(**obj)
class param_scope(HyperParameter): # pylint: disable=invalid-name
"""
thread-safe scoped hyperparameter
Examples:
create a scoped HyperParameter
>>> with param_scope(**{'a': 1, 'b': 2}) as cfg:
... print(cfg.a)
1
read parameter in a function
>>> def foo():
... with param_scope() as cfg:
... return cfg.a
>>> with param_scope(**{'a': 1, 'b': 2}) as cfg:
... foo() # foo should get cfg using a with statement
1
update some config only in new scope
>>> with param_scope(**{'a': 1, 'b': 2}) as cfg:
... cfg.b
... with param_scope(**{'b': 3}) as cfg2:
... cfg2.b
2
3
"""
tls = threading.local()
def __init__(self, *args, **kws):
if hasattr(param_scope.tls,
'history') and len(param_scope.tls.history) > 0:
self.update(param_scope.tls.history[-1])
self.update(kws)
for line in args:
if '=' in line:
k, v = line.split('=', 1)
self.put(k, v)
def __enter__(self):
if not hasattr(param_scope.tls, 'history'):
param_scope.tls.history = []
param_scope.tls.history.append(self)
return param_scope.tls.history[-1]
def __exit__(self, exc_type, exc_value, traceback):
param_scope.tls.history.pop()
@staticmethod
def init(params):
"""
init param_scope for a new thread.
"""
if not hasattr(param_scope.tls, 'history'):
param_scope.tls.history = []
param_scope.tls.history.append(params)
"""
Tracker callback for auto_param
"""
_callback: Callable = None
def set_auto_param_callback(func: Callable[[Dict[str, Any]], None]):
""" report hyperparameter value to a tracker, for example, `mlflow.tracking`
"""
global _callback
_callback = func
def auto_param(func):
"""
Convert keyword arguments into hyperparameters
Examples:
>>> @auto_param
... def foo(a, b=2, c='c', d=None):
... print(a, b, c, d)
>>> foo(1)
1 2 c None
>>> with param_scope('foo.b=3'):
... foo(2)
2 3 c None
classes are also supported:
>>> @auto_param
... class foo:
... def __init__(self, a, b=2, c='c', d=None):
... print(a, b, c, d)
>>> obj = foo(1)
1 2 c None
>>> with param_scope('foo.b=3'):
... obj = foo(2)
2 3 c None
"""
predef_kws = {}
predef_val = {}
namespace = func.__module__
if namespace == '__main__':
namespace = None
if namespace is not None:
namespace += '.{}'.format(func.__name__)
else:
namespace = func.__name__
signature = inspect.signature(func)
for k, v in signature.parameters.items():
if v.default != v.empty:
name = '{}.{}'.format(namespace, k)
predef_kws[k] = name
_read_tracker.add(name)
predef_val[name] = v.default
def wrapper(*arg, **kws):
with param_scope() as hp:
local_params = {}
for k, v in predef_kws.items():
if hp.get(v) is not None and k not in kws:
kws[k] = hp.get(v)
local_params[v] = hp.get(v)
else:
local_params[v] = predef_val[v]
if _callback is not None:
_callback(local_params)
return func(*arg, **kws)
return wrapper
def safe_numeric(value):
if isinstance(value, str):
try:
return int(value)
except: # pylint: disable=bare-except
pass
try:
return float(value)
except: # pylint: disable=bare-except
pass
return value
if __name__ == '__main__':
import doctest
doctest.testmod(verbose=False)
import unittest
class TestHyperParameter(unittest.TestCase):
"""
tests for HyperParameter
"""
def test_parameter_create(self):
param1 = HyperParameter(a=1, b=2)
self.assertEqual(param1.a, 1)
self.assertEqual(param1.b, 2)
param2 = HyperParameter(**{'a': 1, 'b': 2})
self.assertEqual(param2.a, 1)
self.assertEqual(param2.b, 2)
def test_parameter_update_with_holder(self):
param1 = HyperParameter()
param1.a = 1
param1.b = 2
param1.c.b.a = 3
self.assertDictEqual(param1, {
'a': 1,
'b': 2,
'c': {
'b': {
'a': 3
}
}
})
def test_parameter_update(self):
param1 = HyperParameter()
param1.put('c.b.a', 1)
self.assertDictEqual(param1, {'c': {'b': {'a': 1}}})
def test_parameter_patch(self):
param1 = HyperParameter()
param1.update({'a': 1, 'b': 2})
self.assertEqual(param1.a, 1)
self.assertEqual(param1.b, 2)
class TestAccesscor(unittest.TestCase):
"""
tests for Accesscor
"""
def test_holder_as_bool(self):
param1 = HyperParameter()
self.assertFalse(param1.a.b)
param1.a.b = False
self.assertFalse(param1.a.b)
param1.a.b = True
self.assertTrue(param1.a.b)
class TestParamScope(unittest.TestCase):
"""
tests for param_scope
"""
def test_scope_create(self):
with param_scope(a=1, b=2) as hp:
self.assertEqual(hp.a, 1)
self.assertEqual(hp.b, 2)
with param_scope(**{'a': 1, 'b': 2}) as hp:
self.assertEqual(hp.a, 1)
self.assertEqual(hp.b, 2)
def test_nested_scope(self):
with param_scope(a=1, b=2) as hp1:
self.assertEqual(hp1.a, 1)
with param_scope(a=3) as hp2:
self.assertEqual(hp2.a, 3)
def test_scope_with_function_call(self):
def read_a():
with param_scope() as hp:
return hp.a
self.assertFalse(read_a())
with param_scope(a=1):
self.assertEqual(read_a(), 1)
with param_scope(a=2):
self.assertEqual(read_a(), 2)
with param_scope(a=1):
self.assertEqual(read_a(), 1)
with param_scope(a=2):
self.assertEqual(read_a(), 2)
self.assertEqual(read_a(), 1)
unittest.main()
|
<filename>blender/arm/material/mat_batch.py
import bpy
import arm.material.cycles as cycles
import arm.material.make_shader as make_shader
import arm.material.mat_state as mat_state
# TODO: handle groups
# TODO: handle cached shaders
batchDict = None
signatureDict = None
def traverse_tree(node, sign):
sign += node.type + '-'
for inp in node.inputs:
if inp.is_linked:
sign = traverse_tree(inp.links[0].from_node, sign)
else:
sign += 'o' # Unconnected socket
return sign
def get_signature(mat):
nodes = mat.node_tree.nodes
output_node = cycles.node_by_type(nodes, 'OUTPUT_MATERIAL')
if output_node != None:
sign = traverse_tree(output_node, '')
# Append flags
sign += '1' if mat.arm_cast_shadow else '0'
sign += '1' if mat.arm_overlay else '0'
sign += '1' if mat.arm_cull_mode == 'Clockwise' else '0'
return sign
def traverse_tree2(node, ar):
ar.append(node)
for inp in node.inputs:
inp.is_uniform = False
if inp.is_linked:
traverse_tree2(inp.links[0].from_node, ar)
def get_sorted(mat):
nodes = mat.node_tree.nodes
output_node = cycles.node_by_type(nodes, 'OUTPUT_MATERIAL')
if output_node != None:
ar = []
traverse_tree2(output_node, ar)
return ar
def mark_uniforms(mats):
ars = []
for m in mats:
ars.append(get_sorted(m))
# Buckle up..
for i in range(0, len(ars[0])): # Traverse nodes
for j in range(0, len(ars[0][i].inputs)): # Traverse inputs
inp = ars[0][i].inputs[j]
if not inp.is_linked and hasattr(inp, 'default_value'):
for k in range(1, len(ars)): # Compare default values
inp2 = ars[k][i].inputs[j]
diff = False
if str(type(inp.default_value)) == "<class 'bpy_prop_array'>":
for l in range(0, len(inp.default_value)):
if inp.default_value[l] != inp2.default_value[l]:
diff = True
break
elif inp.default_value != inp2.default_value:
diff = True
if diff: # Diff found
for ar in ars:
ar[i].inputs[j].is_uniform = True
break
def build(materialArray, mat_users, mat_armusers):
global batchDict
batchDict = dict() # Stores shader data for given material
signatureDict = dict() # Stores materials for given signature
# Update signatures
for mat in materialArray:
if mat.signature == '' or not mat.is_cached:
mat.signature = get_signature(mat)
# Group signatures
if mat.signature in signatureDict:
signatureDict[mat.signature].append(mat)
else:
signatureDict[mat.signature] = [mat]
# Mark different inputs
for ref in signatureDict:
mats = signatureDict[ref]
if len(mats) > 1:
mark_uniforms(mats)
mat_state.batch = True
# Build unique shaders
for mat in materialArray:
for mat2 in materialArray:
# Signature not found - build it
if mat == mat2:
batchDict[mat] = make_shader.build(mat, mat_users, mat_armusers)
break
# Already batched
if mat.signature == mat2.signature:
batchDict[mat] = batchDict[mat2]
break
mat_state.batch = False
def get(mat):
return batchDict[mat]
|
<reponame>harishbommakanti/rpl_sb_efforts<filename>experiments-harish/rollout.py
# general libraries
import numpy as np
import matplotlib.pyplot as plt
# robosuite libraries
import robosuite as suite
from robosuite.wrappers import GymWrapper
from robosuite import load_controller_config
# RL framework libraries
from stable_baselines import PPO2
from stable_baselines.common.vec_env import SubprocVecEnv
from stable_baselines.bench import Monitor
# maybe evaluate_policy does a lot of the code I wrote already?
from stable_baselines.common.evaluation import evaluate_policy
from stable_baselines.common import set_global_seeds
def make_rollout_env(env_name, robot_name, rank, seed):
"""making of a rollout env mimics the make_env() function in train.py"""
# use OSC_POSITION controller instead of OSC_POSE
control_config = load_controller_config(default_controller="OSC_POSITION")
suite_env = suite.make(
controller_configs=control_config,
env_name=env_name,
robots=robot_name,
use_camera_obs=False,
use_object_obs=True,
has_renderer=True, # set to True for video rollouts
has_offscreen_renderer=False,
reward_shaping = True, # if set to True, rewards will be very small
reward_scale=1.0
)
env = GymWrapper(
suite_env,
keys=['robot0_eef_pos', 'robot0_eef_quat', 'robot0_gripper_qpos',
'robot0_gripper_qvel', 'cube_pos', 'cube_quat']
)
return env
def load_model(model_fpath: str):
"""
returns a model from the saved filepath,
whether it is a .zip or .pkl file
"""
return PPO2.load(model_fpath)
def evaluate_rollouts(experiment_desc: str, model_name: str, horizon: int, num_episodes: int):
"""
Loads a model file and performs/evaluates the video rollouts.
- experiment_desc: the experiment description, the overarching folder where data is stored
- horizon: the length of an episode (in timesteps)
- num_episodes: the number of episodes to include in the rollout
"""
# create a lift-panda env with a random seed from 0 to 500
env_name = "Lift"
robot_name = "Panda"
env = make_rollout_env(env_name, robot_name, 0, np.random.randint(0, 500))
# load the model
log_dir = "log"
folder_filepath = "/".join([log_dir, env_name +"-"+robot_name, experiment_desc])
model_filepath = f"{folder_filepath}/{model_name}"
model = load_model(model_filepath)
# run the trained model and visualize results
# log all the summed rewards
rewards = []
# 1 if an episode was checked as successfuly completed atleast once
episode_successes = []
# max cube height over episode
max_cube_heights = []
# 1 if the cube was successfuly grasped atleast once
cube_grasps = []
# min eef to tgt dist over episode (0 is optimal)
eef_to_tgt_dists = []
for episode in range(num_episodes):
obs = env.reset()
episode_stats = {
"reward": 0,
"max_cube_height": -10,
"min_eef_to_tgt_dist": 20,
"cube_grasp": 0,
"cube_success": 0
}
for i in range(horizon):
action, _states = model.predict(obs, deterministic=True)
obs, reward, done, info = env.step(action)
# update episode stats using `get_env_info()`
episode_stats["reward"] += reward
env_info = env.get_env_info()
episode_stats["max_cube_height"] = max(env_info['cube_height'], episode_stats["max_cube_height"])
episode_stats["min_eef_to_tgt_dist"] = min(env_info['cube_to_gripper_dist'], episode_stats["min_eef_to_tgt_dist"])
# if a cube was grasped or task was completed atleast once in episode,
# set to true (in case cube is dropped or something like that)
if env_info['cube_grasped'] == 1:
episode_stats["cube_grasp"] = 1
if env_info['cube_success'] == 1:
episode_stats["cube_success"] = 1
# show visualizations
env.render()
# show logging for each of the env stats
print("-"*20)
print(f"Episode {episode}")
for stat in episode_stats.keys():
print(f"{stat}: {episode_stats[stat]}")
print("-"*20)
# add episode stats to cumulative stats arrays
rewards.append(episode_stats["reward"])
episode_successes.append(episode_stats["cube_success"])
max_cube_heights.append(episode_stats["max_cube_height"])
eef_to_tgt_dists.append(episode_stats["min_eef_to_tgt_dist"])
cube_grasps.append(episode_stats["cube_grasp"])
env.close()
print("\n\n")
print("SUMMARY:")
print(f"Percentage of sucessful episodes: {100*np.mean(episode_successes)}")
# print out the average for every stat we care about
print(f"Average reward over rollouts: {np.mean(rewards)}")
print(f"Average cube success over rollouts: {np.mean(episode_successes)}")
print(f"Average max cube height over rollouts: {np.mean(max_cube_heights)}")
print(f"Average min eef to tgt distance over rollouts: {np.mean(eef_to_tgt_dists)}")
print(f"Average cube grasp over rollouts: {np.mean(cube_grasps)}")
# plot results for each of the cumulative episode stats
num_stats = 5
fig, axs = plt.subplots(num_stats, sharex=True)
plt.xlabel("episode number")
x = np.linspace(0, num_episodes, num_episodes)
# ax[0]: all rewards
axs[0].plot(x, rewards, 'ro', label='rewards')
# ax[1]: successes
axs[1].plot(x, episode_successes, 'bo', label='successes: {0,1}')
# ax[2]: max cube heights
axs[2].plot(x, max_cube_heights, 'go', label='max_cube_heights')
# ax[3]: cube grasp
axs[3].plot(x, cube_grasps, 'mo', label='cube_grasp: {0,1}')
# ax[4]: min eef to tgt dist
axs[4].plot(x, eef_to_tgt_dists, 'co', label='min_eef_tgt_dist')
fig.legend(loc="upper center", ncol = num_stats, fontsize='x-small')
fig.savefig(f'{experiment_desc}_{model_name}_rollout_evaluation_stats.png')
# plt.show()
def main():
# change these to specify what to evaluate. here are test files
experiment_desc = "DVE_1Envs_500Horizon_10MSteps_3Seeds_trimmed_obsspace_changing_hyperparams_1.0scale"
seed = 970
evaluate_checkpts = False
if evaluate_checkpts:
# do evaluate_rollout for each checkpoint stored for that seed
for checkpt in range(int(1e6), int(9e6), int(1e6)):
model_name = f"Seed{seed}_{checkpt}_steps.zip"
evaluate_rollouts(
experiment_desc = experiment_desc,
model_name = model_name,
horizon = 100,
num_episodes = 1
)
else:
# evaluate final model pkl only over 5 episodes
model_name = f"Seed{seed}_5000000.pkl"
evaluate_rollouts(
experiment_desc = experiment_desc,
model_name = model_name,
horizon = 500,
num_episodes = 5
)
print("Finished evaluating rollouts.")
if __name__ == "__main__":
main()
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Dict, List, Optional
from azure.core.exceptions import HttpResponseError
import msrest.serialization
class Resource(msrest.serialization.Model):
"""Azure resource. This resource is tracked in Azure Resource Manager.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:param location: Required. Resource Location.
:type location: str
:ivar type: Resource type.
:vartype type: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'location': {'required': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
*,
location: str,
kind: Optional[str] = None,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
super(Resource, self).__init__(**kwargs)
self.id = None
self.name = None
self.kind = kind
self.location = location
self.type = None
self.tags = tags
class Certificate(Resource):
"""SSL certificate for an app.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:param location: Required. Resource Location.
:type location: str
:ivar type: Resource type.
:vartype type: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:ivar friendly_name: Friendly name of the certificate.
:vartype friendly_name: str
:ivar subject_name: Subject name of the certificate.
:vartype subject_name: str
:param host_names: Host names the certificate applies to.
:type host_names: list[str]
:param pfx_blob: Pfx blob.
:type pfx_blob: bytearray
:ivar site_name: App name.
:vartype site_name: str
:ivar self_link: Self link.
:vartype self_link: str
:ivar issuer: Certificate issuer.
:vartype issuer: str
:ivar issue_date: Certificate issue Date.
:vartype issue_date: ~datetime.datetime
:ivar expiration_date: Certificate expiration date.
:vartype expiration_date: ~datetime.datetime
:param password: <PASSWORD>.
:type password: str
:ivar thumbprint: Certificate thumbprint.
:vartype thumbprint: str
:ivar valid: Is the certificate valid?.
:vartype valid: bool
:ivar cer_blob: Raw bytes of .cer file.
:vartype cer_blob: bytearray
:ivar public_key_hash: Public key hash.
:vartype public_key_hash: str
:ivar hosting_environment_profile: Specification for the App Service Environment to use for the
certificate.
:vartype hosting_environment_profile:
~azure.mgmt.web.v2018_11_01.models.HostingEnvironmentProfile
:param key_vault_id: Key Vault Csm resource Id.
:type key_vault_id: str
:param key_vault_secret_name: Key Vault secret name.
:type key_vault_secret_name: str
:ivar key_vault_secret_status: Status of the Key Vault secret. Possible values include:
"Initialized", "WaitingOnCertificateOrder", "Succeeded", "CertificateOrderFailed",
"OperationNotPermittedOnKeyVault", "AzureServiceUnauthorizedToAccessKeyVault",
"KeyVaultDoesNotExist", "KeyVaultSecretDoesNotExist", "UnknownError", "ExternalPrivateKey",
"Unknown".
:vartype key_vault_secret_status: str or
~azure.mgmt.web.v2018_11_01.models.KeyVaultSecretStatus
:param server_farm_id: Resource ID of the associated App Service plan, formatted as:
"/subscriptions/{subscriptionID}/resourceGroups/{groupName}/providers/Microsoft.Web/serverfarms/{appServicePlanName}".
:type server_farm_id: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'location': {'required': True},
'type': {'readonly': True},
'friendly_name': {'readonly': True},
'subject_name': {'readonly': True},
'site_name': {'readonly': True},
'self_link': {'readonly': True},
'issuer': {'readonly': True},
'issue_date': {'readonly': True},
'expiration_date': {'readonly': True},
'thumbprint': {'readonly': True},
'valid': {'readonly': True},
'cer_blob': {'readonly': True},
'public_key_hash': {'readonly': True},
'hosting_environment_profile': {'readonly': True},
'key_vault_secret_status': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'friendly_name': {'key': 'properties.friendlyName', 'type': 'str'},
'subject_name': {'key': 'properties.subjectName', 'type': 'str'},
'host_names': {'key': 'properties.hostNames', 'type': '[str]'},
'pfx_blob': {'key': 'properties.pfxBlob', 'type': 'bytearray'},
'site_name': {'key': 'properties.siteName', 'type': 'str'},
'self_link': {'key': 'properties.selfLink', 'type': 'str'},
'issuer': {'key': 'properties.issuer', 'type': 'str'},
'issue_date': {'key': 'properties.issueDate', 'type': 'iso-8601'},
'expiration_date': {'key': 'properties.expirationDate', 'type': 'iso-8601'},
'password': {'key': 'properties.password', 'type': 'str'},
'thumbprint': {'key': 'properties.thumbprint', 'type': 'str'},
'valid': {'key': 'properties.valid', 'type': 'bool'},
'cer_blob': {'key': 'properties.cerBlob', 'type': 'bytearray'},
'public_key_hash': {'key': 'properties.publicKeyHash', 'type': 'str'},
'hosting_environment_profile': {'key': 'properties.hostingEnvironmentProfile', 'type': 'HostingEnvironmentProfile'},
'key_vault_id': {'key': 'properties.keyVaultId', 'type': 'str'},
'key_vault_secret_name': {'key': 'properties.keyVaultSecretName', 'type': 'str'},
'key_vault_secret_status': {'key': 'properties.keyVaultSecretStatus', 'type': 'str'},
'server_farm_id': {'key': 'properties.serverFarmId', 'type': 'str'},
}
def __init__(
self,
*,
location: str,
kind: Optional[str] = None,
tags: Optional[Dict[str, str]] = None,
host_names: Optional[List[str]] = None,
pfx_blob: Optional[bytearray] = None,
password: Optional[str] = None,
key_vault_id: Optional[str] = None,
key_vault_secret_name: Optional[str] = None,
server_farm_id: Optional[str] = None,
**kwargs
):
super(Certificate, self).__init__(kind=kind, location=location, tags=tags, **kwargs)
self.friendly_name = None
self.subject_name = None
self.host_names = host_names
self.pfx_blob = pfx_blob
self.site_name = None
self.self_link = None
self.issuer = None
self.issue_date = None
self.expiration_date = None
self.password = password
self.thumbprint = None
self.valid = None
self.cer_blob = None
self.public_key_hash = None
self.hosting_environment_profile = None
self.key_vault_id = key_vault_id
self.key_vault_secret_name = key_vault_secret_name
self.key_vault_secret_status = None
self.server_farm_id = server_farm_id
class CertificateCollection(msrest.serialization.Model):
"""Collection of certificates.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[~azure.mgmt.web.v2018_11_01.models.Certificate]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Certificate]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["Certificate"],
**kwargs
):
super(CertificateCollection, self).__init__(**kwargs)
self.value = value
self.next_link = None
class ProxyOnlyResource(msrest.serialization.Model):
"""Azure proxy only resource. This resource is not tracked by Azure Resource Manager.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
**kwargs
):
super(ProxyOnlyResource, self).__init__(**kwargs)
self.id = None
self.name = None
self.kind = kind
self.type = None
class CertificatePatchResource(ProxyOnlyResource):
"""ARM resource for a certificate.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:ivar friendly_name: Friendly name of the certificate.
:vartype friendly_name: str
:ivar subject_name: Subject name of the certificate.
:vartype subject_name: str
:param host_names: Host names the certificate applies to.
:type host_names: list[str]
:param pfx_blob: Pfx blob.
:type pfx_blob: bytearray
:ivar site_name: App name.
:vartype site_name: str
:ivar self_link: Self link.
:vartype self_link: str
:ivar issuer: Certificate issuer.
:vartype issuer: str
:ivar issue_date: Certificate issue Date.
:vartype issue_date: ~datetime.datetime
:ivar expiration_date: Certificate expiration date.
:vartype expiration_date: ~datetime.datetime
:param password: Certificate password.
:type password: str
:ivar thumbprint: Certificate thumbprint.
:vartype thumbprint: str
:ivar valid: Is the certificate valid?.
:vartype valid: bool
:ivar cer_blob: Raw bytes of .cer file.
:vartype cer_blob: bytearray
:ivar public_key_hash: Public key hash.
:vartype public_key_hash: str
:ivar hosting_environment_profile: Specification for the App Service Environment to use for the
certificate.
:vartype hosting_environment_profile:
~azure.mgmt.web.v2018_11_01.models.HostingEnvironmentProfile
:param key_vault_id: Key Vault Csm resource Id.
:type key_vault_id: str
:param key_vault_secret_name: Key Vault secret name.
:type key_vault_secret_name: str
:ivar key_vault_secret_status: Status of the Key Vault secret. Possible values include:
"Initialized", "WaitingOnCertificateOrder", "Succeeded", "CertificateOrderFailed",
"OperationNotPermittedOnKeyVault", "AzureServiceUnauthorizedToAccessKeyVault",
"KeyVaultDoesNotExist", "KeyVaultSecretDoesNotExist", "UnknownError", "ExternalPrivateKey",
"Unknown".
:vartype key_vault_secret_status: str or
~azure.mgmt.web.v2018_11_01.models.KeyVaultSecretStatus
:param server_farm_id: Resource ID of the associated App Service plan, formatted as:
"/subscriptions/{subscriptionID}/resourceGroups/{groupName}/providers/Microsoft.Web/serverfarms/{appServicePlanName}".
:type server_farm_id: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'friendly_name': {'readonly': True},
'subject_name': {'readonly': True},
'site_name': {'readonly': True},
'self_link': {'readonly': True},
'issuer': {'readonly': True},
'issue_date': {'readonly': True},
'expiration_date': {'readonly': True},
'thumbprint': {'readonly': True},
'valid': {'readonly': True},
'cer_blob': {'readonly': True},
'public_key_hash': {'readonly': True},
'hosting_environment_profile': {'readonly': True},
'key_vault_secret_status': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'friendly_name': {'key': 'properties.friendlyName', 'type': 'str'},
'subject_name': {'key': 'properties.subjectName', 'type': 'str'},
'host_names': {'key': 'properties.hostNames', 'type': '[str]'},
'pfx_blob': {'key': 'properties.pfxBlob', 'type': 'bytearray'},
'site_name': {'key': 'properties.siteName', 'type': 'str'},
'self_link': {'key': 'properties.selfLink', 'type': 'str'},
'issuer': {'key': 'properties.issuer', 'type': 'str'},
'issue_date': {'key': 'properties.issueDate', 'type': 'iso-8601'},
'expiration_date': {'key': 'properties.expirationDate', 'type': 'iso-8601'},
'password': {'key': 'properties.password', 'type': 'str'},
'thumbprint': {'key': 'properties.thumbprint', 'type': 'str'},
'valid': {'key': 'properties.valid', 'type': 'bool'},
'cer_blob': {'key': 'properties.cerBlob', 'type': 'bytearray'},
'public_key_hash': {'key': 'properties.publicKeyHash', 'type': 'str'},
'hosting_environment_profile': {'key': 'properties.hostingEnvironmentProfile', 'type': 'HostingEnvironmentProfile'},
'key_vault_id': {'key': 'properties.keyVaultId', 'type': 'str'},
'key_vault_secret_name': {'key': 'properties.keyVaultSecretName', 'type': 'str'},
'key_vault_secret_status': {'key': 'properties.keyVaultSecretStatus', 'type': 'str'},
'server_farm_id': {'key': 'properties.serverFarmId', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
host_names: Optional[List[str]] = None,
pfx_blob: Optional[bytearray] = None,
password: Optional[str] = None,
key_vault_id: Optional[str] = None,
key_vault_secret_name: Optional[str] = None,
server_farm_id: Optional[str] = None,
**kwargs
):
super(CertificatePatchResource, self).__init__(kind=kind, **kwargs)
self.friendly_name = None
self.subject_name = None
self.host_names = host_names
self.pfx_blob = pfx_blob
self.site_name = None
self.self_link = None
self.issuer = None
self.issue_date = None
self.expiration_date = None
self.password = password
self.thumbprint = None
self.valid = None
self.cer_blob = None
self.public_key_hash = None
self.hosting_environment_profile = None
self.key_vault_id = key_vault_id
self.key_vault_secret_name = key_vault_secret_name
self.key_vault_secret_status = None
self.server_farm_id = server_farm_id
class DefaultErrorResponse(msrest.serialization.Model):
"""App Service error response.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar error: Error model.
:vartype error: ~azure.mgmt.web.v2018_11_01.models.DefaultErrorResponseError
"""
_validation = {
'error': {'readonly': True},
}
_attribute_map = {
'error': {'key': 'error', 'type': 'DefaultErrorResponseError'},
}
def __init__(
self,
**kwargs
):
super(DefaultErrorResponse, self).__init__(**kwargs)
self.error = None
class DefaultErrorResponseError(msrest.serialization.Model):
"""Error model.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar code: Standardized string to programmatically identify the error.
:vartype code: str
:ivar message: Detailed error description and debugging information.
:vartype message: str
:ivar target: Detailed error description and debugging information.
:vartype target: str
:param details:
:type details: list[~azure.mgmt.web.v2018_11_01.models.DefaultErrorResponseErrorDetailsItem]
:ivar innererror: More information to debug error.
:vartype innererror: str
"""
_validation = {
'code': {'readonly': True},
'message': {'readonly': True},
'target': {'readonly': True},
'innererror': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'details': {'key': 'details', 'type': '[DefaultErrorResponseErrorDetailsItem]'},
'innererror': {'key': 'innererror', 'type': 'str'},
}
def __init__(
self,
*,
details: Optional[List["DefaultErrorResponseErrorDetailsItem"]] = None,
**kwargs
):
super(DefaultErrorResponseError, self).__init__(**kwargs)
self.code = None
self.message = None
self.target = None
self.details = details
self.innererror = None
class DefaultErrorResponseErrorDetailsItem(msrest.serialization.Model):
"""Detailed errors.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar code: Standardized string to programmatically identify the error.
:vartype code: str
:ivar message: Detailed error description and debugging information.
:vartype message: str
:ivar target: Detailed error description and debugging information.
:vartype target: str
"""
_validation = {
'code': {'readonly': True},
'message': {'readonly': True},
'target': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(DefaultErrorResponseErrorDetailsItem, self).__init__(**kwargs)
self.code = None
self.message = None
self.target = None
class HostingEnvironmentProfile(msrest.serialization.Model):
"""Specification for an App Service Environment to use for this resource.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID of the App Service Environment.
:type id: str
:ivar name: Name of the App Service Environment.
:vartype name: str
:ivar type: Resource type of the App Service Environment.
:vartype type: str
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
*,
id: Optional[str] = None,
**kwargs
):
super(HostingEnvironmentProfile, self).__init__(**kwargs)
self.id = id
self.name = None
self.type = None
|
<gh_stars>0
import os
import numpy as np
from pymoo.optimize import minimize
from pymoo.util import plotting
from pymoo.util.reference_direction import UniformReferenceDirectionFactory
from pymop.factory import get_problem
from pymop.problem import Problem
class Myproblem(Problem):
def __init__(self, n_var=2, n_obj=2, n_constr=2, xl=-5, xu=5,
type_var=np.double):
super().__init__(n_var=n_var, n_obj=n_obj, n_constr=n_constr, xl=xl,
xu=xu, type_var=type_var)
def _evaluate(self, x, f, *args, **kwargs):
x1 = x[:, 0]
x2 = x[:, 1]
# Define custom goal function
f[:, 0] = (np.power(x1, 4) - 10 * np.power(x1, 2) + x1 * x2 +
np.power(x2, 4) - np.power(x1, 2) * np.power(x2, 2))
f[:, 1] = (np.power(x2, 4) - np.power(x1, 2) * np.power(x2, 2)
+ np.power(x1, 4) + x1 * x2)
# If use margin condition, use g to define. Notice the condition need
# to normalize and < 0
class Viennet(Problem):
def __init__(self, n_var=2, n_obj=3, n_constr=0, xl=-3, xu=3,
type_var=np.double):
super().__init__(n_var=n_var, n_obj=n_obj, n_constr=n_constr, xl=xl,
xu=xu, type_var=type_var)
def _evaluate(self, x, f, *args, **kwargs):
x1 = x[:, 0]
x2 = x[:, 1]
# Define custom goal function
f[:, 0] = (0.5 * (np.power(x1, 2) + np.power(x2, 2)) +
np.sin(np.power(x1, 2) + np.power(x2, 2)))
f[:, 1] = (1 / 8 * np.power(3 * x1 - 2 * x2 + 4, 2) +
1 / 27 * np.power(x1 - x2 + 1, 2) + 15)
f[:, 2] = (1 / (np.power(x1, 2) + np.power(x2, 2) + 1) -1.1 *
np.exp(-(np.power(x1, 2) + np.power(x2, 2))))
def use_package_function(func_name):
# Parameter setting
n_var = 7
n_obj = 3
n_points = 100
iter_epoch = 500
random_seed = 5
problem = get_problem(func_name, n_var=n_var, n_obj=n_obj)
# problem = get_problem(func_name)
# create the reference directions to be used for the optimization
ref_dirs = UniformReferenceDirectionFactory(n_obj, n_points=n_points).do()
res = minimize(problem,
method='nsga3',
method_args={
'pop_size': n_points+1,
'ref_dirs': ref_dirs},
termination=('n_gen', iter_epoch),
# pf=pf,
seed=random_seed,
disp=True)
plotting.plot(res.F)
def use_custom_function():
# Create a custom problem.
# Parameter setting
n_var = 2
n_obj = 2
n_constr = 0
x_min = -5
x_max = 5
n_points = 100
iter_epoch = 500
random_seed = 5
problem = Myproblem()
# problem = Viennet()
# create the reference directions to be used for the optimization
ref_dirs = UniformReferenceDirectionFactory(
n_dim=n_obj, n_points=n_points).do()
res = minimize(
problem, method='nsga3', method_args={
'pop_size': n_points + 1, 'ref_dirs': ref_dirs},
termination=('n_gen', iter_epoch), seed=random_seed, disp=True)
plotting.plot(res.F)
def main():
savepath1 = './result/package/'
savepath2 = './result/custom/'
for path in [savepath1, savepath2]:
if not os.path.exists(path):
os.makedirs(path)
# use_custom_function()
use_package_function('dtlz1')
# use_package_function('osy')
if __name__ == '__main__':
main()
|
from pathlib import Path
import os
from typing import Union
import sys
import copy
from scipy.io import savemat
import spikeextractors as se
from ..basesorter import BaseSorter
from ..utils.shellscript import ShellScript
from ..sorter_tools import recover_recording
def check_if_installed(waveclus_path: Union[str, None]):
if waveclus_path is None:
return False
assert isinstance(waveclus_path, str)
if waveclus_path.startswith('"'):
waveclus_path = waveclus_path[1:-1]
waveclus_path = str(Path(waveclus_path).absolute())
if (Path(waveclus_path) / 'wave_clus.m').is_file():
return True
else:
return False
class WaveClusSorter(BaseSorter):
"""
"""
sorter_name: str = 'waveclus'
waveclus_path: Union[str, None] = os.getenv('WAVECLUS_PATH', None)
requires_locations = False
_default_params = {
'detect_threshold': 5,
'detect_sign': -1, # -1 - 1 - 0
'feature_type': 'wav',
'scales': 4,
'min_clus': 20,
'maxtemp': 0.251,
'template_sdnum': 3,
'enable_detect_filter': True,
'enable_sort_filter': True,
'detect_filter_fmin': 300,
'detect_filter_fmax': 3000,
'detect_filter_order': 4,
'sort_filter_fmin': 300,
'sort_filter_fmax': 3000,
'sort_filter_order': 2,
'mintemp': 0,
'w_pre': 20,
'w_post': 44,
'alignment_window': 10,
'stdmax': 50,
'max_spk': 40000,
'ref_ms': 1.5,
'interpolation': True
}
_params_description = {
'detect_threshold': "Threshold for spike detection",
'detect_sign': "Use -1 (negative), 1 (positive), or 0 (both) depending "
"on the sign of the spikes in the recording",
'feature_type': "wav (for wavelets) or pca, type of feature extraction applied to the spikes",
'scales': "Levels of the wavelet decomposition used as features",
'min_clus': "Minimum increase of cluster sizes used by the peak selection on the temperature map",
'maxtemp': "Maximum temperature calculated by the SPC method",
'template_sdnum': "Maximum distance (in total variance of the cluster) from the mean waveform to force a "
"spike into a cluster",
'enable_detect_filter': "Enable or disable filter on detection",
'enable_sort_filter': "Enable or disable filter on sorting",
'detect_filter_fmin': "High-pass filter cutoff frequency for detection",
'detect_filter_fmax': "Low-pass filter cutoff frequency for detection",
'detect_filter_order': "Order of the detection filter",
'sort_filter_fmin': "High-pass filter cutoff frequency for sorting",
'sort_filter_fmax': "Low-pass filter cutoff frequency for sorting",
'sort_filter_order': "Order of the sorting filter",
'mintemp': "Minimum temperature calculated by the SPC algorithm",
'w_pre': "Number of samples from the beginning of the spike waveform up to (including) the peak",
'w_post': "Number of samples from the peak (excluding it) to the end of the waveform",
'alignment_window': "Number of samples between peaks of different channels",
'stdmax': "The events with a value over this number of noise standard deviations will be discarded",
'max_spk': "Maximum number of spikes used by the SPC algorithm",
'ref_ms': "Refractory time in milliseconds, all the threshold crossing inside this period are detected as the "
"same spike",
'interpolation': "Enable or disable interpolation to improve the alignments of the spikes"
}
sorter_description = """Wave Clus combines a wavelet-based feature extraction and paramagnetic clustering with a
template-matching approach. It is mainly designed for monotrodes and low-channel count probes.
For more information see https://doi.org/10.1152/jn.00339.2018"""
installation_mesg = """\nTo use WaveClus run:\n
>>> git clone https://github.com/csn-le/wave_clus
and provide the installation path by setting the WAVECLUS_PATH
environment variables or using WaveClusSorter.set_waveclus_path().\n\n
More information on WaveClus at:
https://github.com/csn-le/wave_clus/wiki
"""
def __init__(self, **kargs):
BaseSorter.__init__(self, **kargs)
@classmethod
def is_installed(cls):
return check_if_installed(cls.waveclus_path)
@staticmethod
def get_sorter_version():
p = os.getenv('WAVECLUS_PATH', None)
if p is None:
return 'unknown'
else:
with open(str(Path(p) / 'version.txt'), mode='r', encoding='utf8') as f:
version = f.readline()
return version
@staticmethod
def set_waveclus_path(waveclus_path: str):
waveclus_path = str(Path(waveclus_path).absolute())
WaveClusSorter.waveclus_path = waveclus_path
try:
print("Setting WAVECLUS_PATH environment variable for subprocess calls to:", waveclus_path)
os.environ["WAVECLUS_PATH"] = waveclus_path
except Exception as e:
print("Could not set WAVECLUS_PATH environment variable:", e)
def _setup_recording(self, recording, output_folder):
if not self.is_installed():
raise Exception(WaveClusSorter.installation_mesg)
output_folder.mkdir(parents=True, exist_ok=True)
# Generate mat files in the dataset directory
for nch, id in enumerate(recording.get_channel_ids()):
vcFile_mat = str(output_folder / ('raw' + str(nch + 1) + '.mat'))
savemat(vcFile_mat,
{'data': recording.get_traces(channel_ids=[id]), 'sr': recording.get_sampling_frequency()})
def _run(self, recording, output_folder):
recording = recover_recording(recording)
source_dir = Path(__file__).parent
p = self.params.copy()
if recording.is_filtered and (p['enable_detect_filter'] or p['enable_sort_filter']):
print("Warning! The recording is already filtered, but Wave-Clus filters are enabled. You can disable "
"filters by setting 'enable_detect_filter' and 'enable_sort_filter' parameters to False")
if p['detect_sign'] < 0:
p['detect_sign'] = 'neg'
elif p['detect_sign'] > 0:
p['detect_sign'] = 'pos'
else:
p['detect_sign'] = 'both'
if not p['enable_detect_filter']:
p['detect_filter_order'] = 0
del p['enable_detect_filter']
if not p['enable_sort_filter']:
p['sort_filter_order'] = 0
del p['enable_sort_filter']
if p['interpolation']:
p['interpolation'] = 'y'
else:
p['interpolation'] = 'n'
samplerate = recording.get_sampling_frequency()
p['sr'] = samplerate
num_channels = recording.get_num_channels()
tmpdir = output_folder
tmpdir.mkdir(parents=True, exist_ok=True)
if self.verbose:
num_timepoints = recording.get_num_frames()
duration_minutes = num_timepoints / samplerate / 60
print('Num. channels = {}, Num. timepoints = {}, duration = {} minutes'.format(
num_channels, num_timepoints, duration_minutes))
par_str = ''
par_renames = {'detect_sign':'detection','detect_threshold':'stdmin',
'feature_type':'features','detect_filter_fmin':'detect_fmin',
'detect_filter_fmax':'detect_fmax','detect_filter_order':'detect_order',
'sort_filter_fmin':'sort_fmin','sort_filter_fmax':'sort_fmax',
'sort_filter_order':'sort_order'}
for key, value in p.items():
if type(value) == str:
value = '\'{}\''.format(value)
elif type(value) == bool:
value = '{}'.format(value).lower()
if key in par_renames:
key = par_renames[key]
par_str += 'par.{} = {};'.format(key, value)
if self.verbose:
print('Running waveclus in {tmpdir}...'.format(tmpdir=tmpdir))
cmd = '''
addpath(genpath('{waveclus_path}'), '{source_path}');
{parameters}
try
p_waveclus('{tmpdir}', {nChans}, par);
catch
fprintf('----------------------------------------');
fprintf(lasterr());
quit(1);
end
quit(0);
'''
cmd = cmd.format(waveclus_path=WaveClusSorter.waveclus_path, source_path=source_dir,
tmpdir=tmpdir, nChans=num_channels, parameters=par_str)
matlab_cmd = ShellScript(cmd, script_path=str(tmpdir / 'run_waveclus.m'), keep_temp_files=True)
matlab_cmd.write()
if 'win' in sys.platform and sys.platform != 'darwin':
shell_cmd = '''
{disk_move}
cd {tmpdir}
matlab -nosplash -wait -log -r run_waveclus
'''.format(disk_move=str(tmpdir)[:2], tmpdir=tmpdir)
else:
shell_cmd = '''
#!/bin/bash
cd "{tmpdir}"
matlab -nosplash -nodisplay -log -r run_waveclus
'''.format(tmpdir=tmpdir)
shell_cmd = ShellScript(shell_cmd, script_path=output_folder / f'run_{self.sorter_name}',
log_path=output_folder / f'{self.sorter_name}.log', verbose=self.verbose)
shell_cmd.start()
retcode = shell_cmd.wait()
if retcode != 0:
raise Exception('waveclus returned a non-zero exit code')
result_fname = tmpdir / 'times_results.mat'
if not result_fname.is_file():
raise Exception(f'Result file does not exist: {result_fname}')
@staticmethod
def get_result_from_folder(output_folder):
output_folder = Path(output_folder)
result_fname = str(output_folder / 'times_results.mat')
sorting = se.WaveClusSortingExtractor(file_path=result_fname)
return sorting
|
<reponame>sarar0sa/Cisco_Mac_Lookup
from time import sleep
import csv
from datetime import datetime
import mac_vendor_lookup
import cisco_service
class CiscoDnacMacLookupRunner():
headers = {'Content-Type': 'application/json'}
def __init__(self):
self.cisco = cisco_service.CiscoService()
self.mac_lookup = mac_vendor_lookup.MacLookup()
self.today = datetime.now()
self.filename = "mac_address_lookup_{}T{}Z.csv".format(str(self.today.date()), str(self.today.time()))
def main(self):
print("Obtaining token..")
token = self.cisco.get_dnac_jwt_token()
self.headers["X-Auth-Token"] = token
print("Fetching network devices..")
devices = self.cisco.get_network_devices(self.headers)
with open(self.filename, 'w') as csvfile:
print("MAC lookup as begun. This may take a while..")
print("Estimated run time: {} min".format(int(363/5)))
csvwriter = csv.writer(csvfile)
counter_rate_limit = 0
for item in devices:
if(counter_rate_limit == 5):
sleep(60)
counter_rate_limit = 0
details = self.cisco.get_device_enrichment_details(self.headers, item['macAddress'])
counter_rate_limit += 1
if 'links' in details['deviceDetails']['neighborTopology'][0]:
for detail in details['deviceDetails']['neighborTopology'][0]['links']:
if 'interfaceDetails' in detail and detail['id'] == "CLIENTS":
for client in detail['interfaceDetails']:
mac_address = client['clientMacAddress']
manufacturer = self.mac_lookup.lookup_mac_vendor(mac_address)
csvwriter.writerow([mac_address,manufacturer])
print("Ending script..")
print("See the result in {}".format(self.filename))
if __name__ == "__main__":
# Cool banner ofc
print("""
╔═╗╦╔═╗╔═╗╔═╗ ╔╦╗╔╗╔╔═╗╔═╗ ╔╦╗╔═╗╔═╗ ╦ ╔═╗╔═╗╦╔═╦ ╦╔═╗
║ ║╚═╗║ ║ ║ ║║║║║╠═╣║ ║║║╠═╣║ ║ ║ ║║ ║╠╩╗║ ║╠═╝
╚═╝╩╚═╝╚═╝╚═╝ ═╩╝╝╚╝╩ ╩╚═╝ ╩ ╩╩ ╩╚═╝ ╩═╝╚═╝╚═╝╩ ╩╚═╝╩
MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMWMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMWKKNMMMMMMMMMMMMMMMMMMMMWWWMMMMMMMMMMMMMMMMMMM
MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMXl,co0NWMMMMMMMMMMMMMMXxc:xWMMMMMMMMMMMMMMMMMM
MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMNd''',;cdkKNNNNNNWNKko,...oWMMMMMMMMMMMMMMMMMM
MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMO;''.....';ccllc:,. ...'kMMMMMMMMMMMMMMMMMMM
MMMMMMMMMMMMWXOxdllllldxOXWMMMMMMMWNd'........ .... ..lNMMMMMMMMMMMMMMMMMMM
MMMMMMMMMN0o:,;;:clllc:;,';oONMMMMMWd'',,,'. ..... .dWMMMMMMMMMMMMMMMMMMM
MMMMMWWWO:,cdOO0K0O0K0K0klc:';dXMMMXl,'',;;. .'''''.lXMMMMMMMMMMMMMMMMMMM
MMMMMMXo;oKWM0dkkdddoo0xddkW0o',kWM0c...,lol;. . .ccoc..;cdXMMMMMMMMMMMMMMMMMMM
MMMMMXo:0MMMMWK0KXXKKKKX00NMMWK:'dWO,....';;' .. .;::,'',,lKMMMMMMMMMMMMMMMMMMM
MMMMWxc0MMMMWW0kOxxkKkk0OXWWWMMNl'kO:'........,:'........,,cKMMMMMMMMMMMMMMMMMMM
MMMMNdxWMMMMMWOxkdddxxdxkKNWWWWMK;cXd'........,,'''.....',,:kXMMMMMMMMMMMMMMMMMM
MMMMXokMMMMMMMNXXXNNXNX0KXWWWWWWNlcXXd,.'......'..'.','.'',;:oKWMMMMMMMMMMMMMMMM
MMMMXoxWMMMMMMM0olxkoxxkXWMMMMMMNloNWNd... ..................:0WMMMMMMMMMMMMMMM
MMMMNxcOWMMMMMMKkkkOOkOOXWMMMMMMO:kMMNl.. .. .l0WMMMMMMMMMMMMMM
MMMMM0:;kNWXXNKO0K0000KKXK0OONWKlcOWNd' .,oKWMMMMMMMMMMMMM
MMMMMWO;'lOxxOddooddlcdxxxlox0Oolo0W0,. .,;oKMMMMMMMMMMMMM
MMMMMMWKc..';dkOKX0KXXXK00Oxdl:;,,oOo. .'',oKWMMMMMMMMMMM
MMMMMMMMWOl,..';coddxxdol:,..,;:;..':;.. .. ..''';dKWWMMMMMMMM
MMMMMMMMMMMN0dl:;''.'',:cokO0KNWW0l..''. ... ..,,'':xXWMMMMMMM
MMMMMMMMMMMMMMMWWNXKKXXWMMMMMMMMMMNl... . ..,'',,:xNWMMMMM
MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM0;.. .. .,;::,'cKMMMMM
MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMWx' .,;'. ....... ..','.lXMMMM
MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMK:. . .',. .. .. ....dWMMM
MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMk. .. ...cXMMM
""")
print("Starting script..")
CiscoDnacMacLookupRunner().main() |
"""
Entry point for training and evaluating a lemmatizer.
This lemmatizer combines a neural sequence-to-sequence architecture with an `edit` classifier
and two dictionaries to produce robust lemmas from word forms.
For details please refer to paper: https://nlp.stanford.edu/pubs/qi2018universal.pdf.
"""
import logging
import sys
import os
# import shutil
# import time
# from datetime import datetime
import argparse
import numpy as np
import random
import torch
from pathlib import Path
# from torch import nn, optim
from dadmatools.models.lemma.data import DataLoader
# from models.lemma.vocab import Vocab
from dadmatools.models.lemma.trainer import Trainer
from dadmatools.models.lemma import edit
# from models.lemma import scorer
# from models.common import utils
# import models.common.seq2seq_constant as constant
from dadmatools.models.common.doc import *
# from utils.conll import CoNLL
# from models import _training_logging
from dadmatools.models.common.doc import Document
import dadmatools.pipeline.download as dl
logger = logging.getLogger('stanza')
def parse_args():
args = {
'data_dir':None,
'train_file':None,
'eval_file':None,
'output_file':None,
'gold_file':None,
'mode':'predict',
'lang':'fa',
'no_dict':'ensemble_dict',
'dict_only':False,
'hidden_dim':200,
'emb_dim':50,
'num_layers':1.0,
'emb_dropout':0.5,
'dropout':0.5,
'max_dec_len':50,
'beam_size':1,
'attn_type':'spft',
'pos_dim':50,
'pos_dropout':0.5,
'no_edit':False,
'num_edit':len(edit.EDIT_TO_ID),
'alpha':1.0,
'no_pos':'pos',
'no_copy':'copy',
'sample_train':1.0,
'optim':'adam',
'lr':1e-3,
'lr_decay':0.9,
'decay_epoch':30,
'num_epoch':60,
'batch_size':50,
'max_grad_norm':0.5,
'log_step':20,
'seed':1234,
'cuda':torch.cuda.is_available(),
'cpu':True,
'save_dir':'saved_models/fa_lemmatizer/fa_lemmatizer.pt'
}
return args
def lemmatize(input_tokens):
'''input_tokens stores list of all tokens in the sentences e.g. input_tokens = [['this', 'is', 'a', 'test', '.']] '''
args = parse_args()
torch.manual_seed(args.seed)
np.random.seed(args.seed)
random.seed(args.seed)
if args.cpu:
args.cuda = False
elif args.cuda:
torch.cuda.manual_seed(args.seed)
prefix = str(Path(__file__).parent.absolute()).replace('models', '')
args['save_dir'] = prefix + args['save_dir']
# file paths
model_file = os.path.join(args['save_dir'], '{}_lemmatizer.pt'.format(args.lang))
# load model
use_cuda = args.cuda and not args.cpu
trainer = Trainer(model_file=model_file, use_cuda=use_cuda)
loaded_args, vocab = trainer.args, trainer.vocab
# load data
input_dict = [[{"text": t} for t in l] for l in input_tokens]
# doc = CoNLL.rawText2doc(input_dict)
doc = Document(input_dict, text=None, comments=None)
batch = DataLoader(doc, args.batch_size, loaded_args, vocab=vocab, evaluation=True)
# skip eval if dev data does not exist
if len(batch) == 0:
logger.warning("there are no inputs")
return
dict_preds = trainer.predict_dict(batch.doc.get([TEXT, UPOS]))
if loaded_args.get('dict_only', False):
preds = dict_preds
else:
# logger.info("Running the seq2seq model...")
preds = []
edits = []
for i, b in enumerate(batch):
ps, es = trainer.predict(b, args.beam_size)
preds += ps
if es is not None:
edits += es
preds = trainer.postprocess(batch.doc.get([TEXT]), preds, edits=edits)
if loaded_args.get('ensemble_dict', False):
logger.info("[Ensembling dict with seq2seq lemmatizer...]")
preds = trainer.ensemble(batch.doc.get([TEXT, UPOS]), preds)
return preds
#########################################################################################################
###################################breaking the model into load_model and predict########################
#########################################################################################################
def load_model():
## donwload the model (if it is not exist it'll download otherwise it dose not)
dl.download_model('fa_lemmatizer')
args = parse_args()
prefix = str(Path(__file__).parent.absolute()).replace('models', '')
args['save_dir'] = prefix + args['save_dir']
torch.manual_seed(args['seed'])
np.random.seed(args['seed'])
random.seed(args['seed'])
if args['cpu']:
args['cuda'] = False
elif args['cuda']:
torch.cuda.manual_seed(args['seed'])
# file paths
# model_file = os.path.join(args.save_dir, '{}_lemmatizer.pt'.format(args.lang))
# load model
use_cuda = args['cuda'] and not args['cpu']
trainer = Trainer(model_file=args['save_dir'], use_cuda=use_cuda)
loaded_args, vocab = trainer.args, trainer.vocab
return (trainer, args)
def lemma(trainer, args, input_tokens):
loaded_args, vocab = trainer.args, trainer.vocab
# load data
input_dict = [[{"text": t} for t in l] for l in input_tokens]
# doc = CoNLL.rawText2doc(input_dict)
doc = Document(input_dict, text=None, comments=None)
batch = DataLoader(doc, args['batch_size'], loaded_args, vocab=vocab, evaluation=True)
# skip eval if dev data does not exist
if len(batch) == 0:
logger.warning("there are no inputs")
return
dict_preds = trainer.predict_dict(batch.doc.get([TEXT, UPOS]))
if loaded_args.get('dict_only', False):
preds = dict_preds
else:
# logger.info("Running the seq2seq model...")
preds = []
edits = []
for i, b in enumerate(batch):
ps, es = trainer.predict(b, args['beam_size'])
preds += ps
if es is not None:
edits += es
preds = trainer.postprocess(batch.doc.get([TEXT]), preds, edits=edits)
if loaded_args.get('ensemble_dict', False):
logger.info("[Ensembling dict with seq2seq lemmatizer...]")
preds = trainer.ensemble(batch.doc.get([TEXT, UPOS]), preds)
return preds
|
<gh_stars>1-10
"""
All rights reserved to cnvrg.io
http://www.cnvrg.io
cnvrg.io - AI library
Written by: <NAME>
Last update: Oct 06, 2019
Updated by: <NAME>
logistic_regression.py
==============================================================================
"""
import argparse
import pandas as pd
from SKTrainer import *
from sklearn.naive_bayes import MultinomialNB
from sklearn.model_selection import train_test_split
def _cast_types(args):
"""
This method performs casting to all types of inputs passed via cmd.
:param args: argparse.ArgumentParser object.
:return: argparse.ArgumentParser object.
"""
args.x_val = None if args.x_val == 'None' else int(args.x_val)
args.test_size = float(args.test_size)
args.alpha = float(args.alpha)
args.fit_prior = (args.fit_prior in ['True', "True", 'true', "true"])
# class_prior - array like type (problem to convert)
if args.class_prior == "None" or args.class_prior == 'None':
args.class_prior = None
# --------- #
return args
def main(args):
args = _cast_types(args)
# Minimal number of rows and columns in the csv file.
MINIMAL_NUM_OF_ROWS = 10
MINIMAL_NUM_OF_COLUMNS = 2
# Loading data, and splitting it to train and test based on user input
data = pd.read_csv(args.data, index_col=0)
# Check for unfit given dataset and splitting to X and y.
rows_num, cols_num = data.shape
if rows_num < MINIMAL_NUM_OF_ROWS: raise ValueError("LibraryError: The given csv doesn't have enough rows (at least 10 examples must be given).")
if cols_num < MINIMAL_NUM_OF_COLUMNS: raise ValueError("DatasetError: Not enough columns in the csv (at least 2 columns must be given).")
X = data.iloc[:, :-1]
y = data.iloc[:, -1]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=args.test_size)
# Initializing classifier with user input
model = MultinomialNB(alpha=args.alpha,
fit_prior=args.fit_prior,
class_prior=args.class_prior)
folds = None if args.x_val is None else args.x_val
trainer = SKTrainer(model=model,
train_set=(X_train, y_train),
test_set=(X_test, y_test),
output_model_name=args.output_model,
testing_mode=args.test_mode,
folds=folds)
trainer.run()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="""MultinomialNB""")
# ----- cnvrg.io params.
parser.add_argument('--data', action='store', dest='data', required=True,
help="""String. path to csv file: The data set for the classifier. Assumes the last column includes the labels. """)
parser.add_argument('--project_dir', action='store', dest='project_dir',
help="""--- For inner use of cnvrg.io ---""")
parser.add_argument('--output_dir', action='store', dest='output_dir',
help="""--- For inner use of cnvrg.io ---""")
parser.add_argument('--x_val', action='store', default="None", dest='x_val',
help="""Integer. Number of folds for the cross-validation. Default is None.""")
parser.add_argument('--test_size', action='store', default="0.2", dest='test_size',
help="""Float. The portion of the data of testing. Default is 0.2""")
parser.add_argument('--output_model', action='store', default="model.sav", dest='output_model',
help="""String. The name of the output file which is a trained random forests model. Default is logistic_regression_model.sav""")
parser.add_argument('--test_mode', action='store', default=False, dest='test_mode',
help="""--- For inner use of cnvrg.io ---""")
# ----- model's params.
parser.add_argument('--alpha', action='store', default="0.1", dest='alpha',
help="""float: Additive (Laplace/Lidstone) smoothing parameter (0 for no smoothing)""")
parser.add_argument('--fit_prior', action='store', default="True", dest='fit_prior',
help="""boolean Whether to learn class prior probabilities or not. If false, a uniform prior will be used.""")
parser.add_argument('--class_prior', action='store', default=None, dest='class_prior',
help="""Prior probabilities of the classes. If specified the priors are not adjusted according to the data.""")
args = parser.parse_args()
main(args)
|
# -*- coding: utf-8 -*-
# keras系
from keras import models
from keras import layers
from keras.layers import Input,merge
from keras.layers.core import Reshape,Dense,Dropout,Activation,Flatten,MaxoutDense,Merge
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D, UpSampling2D, Deconvolution2D
from keras.layers.normalization import BatchNormalization
from keras.preprocessing.image import ImageDataGenerator
from keras.preprocessing.image import load_img
from keras.models import Sequential, Model
from keras.optimizers import Adam
from keras.utils.generic_utils import Progbar
from keras.utils.visualize_util import plot
from keras.datasets import cifar100
# その他
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from PIL import Image
import cPickle
import random
import sys
from tqdm import tqdm
def make_trainable(net, val):
net.trainable = val
for l in net.layers:
l.trainable = val
def create_generator(latent_size):
latent_input = Input(shape=[latent_size])
perception_input = Input(shape=[12])
PH = Dense(800, init='glorot_normal')(perception_input)
H = merge([latent_input, PH], mode='concat', concat_axis=1)
H = BatchNormalization(mode=2)(H)
H = Reshape( [5, 5, 40] )(H)
H = Convolution2D(1024, 3, 3, border_mode='same', init='glorot_uniform')(H)
H = Activation('relu')(H)
H = UpSampling2D(size=(2, 2))(H)
H = Convolution2D(512, 3, 3, border_mode='same', init='glorot_uniform')(H)
H = Activation('relu')(H)
H = UpSampling2D(size=(2, 2))(H)
H = Convolution2D(256, 5, 5, border_mode='same', init='glorot_uniform')(H)
H = Activation('relu')(H)
H = UpSampling2D(size=(2, 2))(H)
H = Convolution2D(128, 5, 5, border_mode='same', init='glorot_uniform')(H)
H = Activation('relu')(H)
H = UpSampling2D(size=(2, 2))(H)
H = Convolution2D(64, 5, 5, border_mode='same', init='glorot_uniform')(H)
H = Activation('relu')(H)
H = UpSampling2D(size=(2, 2))(H)
H = Convolution2D(32, 5, 5, border_mode='same', init='glorot_uniform')(H)
H = Activation('relu')(H)
H = UpSampling2D(size=(2, 2))(H)
H = Convolution2D(1, 5, 5, border_mode='same', init='glorot_uniform')(H)
g_V = Activation('sigmoid')(H)
generator_model = Model([latent_input, perception_input], g_V)
return generator_model
def create_discriminator(img_shape):
image_input = Input(shape=img_shape)
perception_input = Input(shape=[12])
H = Convolution2D(32, 5, 5, subsample=(2, 2), border_mode = 'same', activation='relu')(image_input)
H = LeakyReLU(0.2)(H)
H = Convolution2D(64, 5, 5, subsample=(2, 2), border_mode = 'same', activation='relu')(H)
H = LeakyReLU(0.2)(H)
H = Convolution2D(128, 5, 5, subsample=(2, 2), border_mode = 'same', activation='relu')(H)
H = LeakyReLU(0.2)(H)
H = Convolution2D(256, 5, 5, subsample=(2, 2), border_mode = 'same', activation='relu')(H)
H = LeakyReLU(0.2)(H)
H = Convolution2D(512, 5, 5, subsample=(2, 2), border_mode = 'same', activation='relu')(H)
H = LeakyReLU(0.2)(H)
H = Convolution2D(1024, 5, 5, subsample=(2, 2), border_mode = 'same', activation='relu')(H)
H = LeakyReLU(0.2)(H)
H = Convolution2D(1024, 5, 5, subsample=(2, 2), border_mode = 'same', activation='relu')(H)
H = Flatten()(H)
H = Dense(100)(H)
H = LeakyReLU(0.2)(H)
H = Dropout(0.5)(H)
PH = Dense(100, init='glorot_normal')(perception_input)
H = merge([H, PH], mode='sum')
d_V = Dense(2,activation='softmax')(H)
discriminator_model = Model([image_input, perception_input], d_V)
return discriminator_model
# 論文にちゃんと書いてなかったぽん。
def create_perception_model(img_shape):
image_input = Input(shape=img_shape)
H = Convolution2D(32, 5, 5, subsample=(2, 2), border_mode = 'same', activation='relu')(image_input)
H = LeakyReLU(0.2)(H)
H = Dropout(0.5)(H)
H = Convolution2D(64, 5, 5, subsample=(2, 2), border_mode = 'same', activation='relu')(H)
H = LeakyReLU(0.2)(H)
H = Dropout(0.5)(H)
H = Flatten()(H)
H = Dense(500, init='glorot_normal')(H)
H = Dense(500, init='glorot_normal')(H)
H = Dense(12, init='glorot_normal')(H)
p_V = Activation('sigmoid')(H)
perception_model = Model(image_input, p_V)
return perception_model
class PDTG:
def __init__(self, latent_size, input_shape):
self.latent_size = latent_size
self.input_shape = input_shape
self.is_model_created = False
def create_model(self):
self.generator = create_generator(self.latent_size)
self.discriminator = create_discriminator(self.input_shape)
self.perception_model = create_perception_model(self.input_shape)
gan_input = Input(shape=[self.latent_size])
perception_input = Input(shape=[12])
H = self.generator([gan_input, perception_input])
p_V = self.perception_model(H)
g_V = self.discriminator([H, perception_input])
self.g_p_model = Model([gan_input, perception_input], p_V)
self.g_d_model = Model([gan_input, perception_input], g_V)
self.model = Model([gan_input, perception_input], [g_V, p_V])
self.g_p_model.summary()
self.g_d_model.summary()
# plot(self.model, to_file="pdtg.png", show_shapes=True, show_layer_names=True)
self.is_model_created = True
def compile_model(self):
self.generator.compile(loss='binary_crossentropy', optimizer=Adam(lr=1e-4))
self.discriminator.compile(loss='binary_crossentropy', optimizer=Adam(lr=1e-4))
self.perception_model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=1e-4))
self.g_d_model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=1e-4))
self.g_p_model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=1e-4))
|
<gh_stars>1-10
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Created by magus0219[<EMAIL>] on 2020/4/6
import pytest
import copy
from artascope.src.lib.user_config_manager import ucm
from artascope.src.model.user_config import UserConfig
from artascope.src.lib.auth_manager import (
LoginStatusText,
LoginStatus,
)
from artascope.src.lib.auth_manager import AuthManager
from artascope.src.lib.task_manager import (
tm,
TaskRunType,
)
from artascope.test.conftest import MockPyiCloudService
MOCK_NEW_USER = {
"icloud_username": "account_name",
"icloud_password": "<PASSWORD>",
"admin_url_prefix": "http://127.0.0.1:5000",
"target": "1",
"sftp_host": "192.168.3.11",
"sftp_port": "2222",
"sftp_username": "someone",
"sftp_password": "<PASSWORD>",
"sftp_dir": "/home",
"notify_type": "1",
"slack_token": "abcdefg",
"slack_channel": "dev",
"smtp_host": "mail.google.com",
"smtp_port": "456",
"smtp_user": "user",
"smtp_password": "password",
"msg_from": "msg_from",
"msg_to": "msg_to1;msg_to2",
"scheduler_enable": "1",
"scheduler_crontab": "0 1 * * *",
"scheduler_last_day_cnt": "3",
}
@pytest.fixture
def response(client):
response = client.post("/user/edit/", data=MOCK_NEW_USER, follow_redirects=True)
return response
@pytest.fixture()
def mock_login(monkeypatch):
def mock_login(self):
self._icloud_api = MockPyiCloudService(
username="username", password="password", client_id="client_id"
)
status = self.get_login_status()
if status == LoginStatus.NEED_LOGIN_AGAIN:
self.set_login_status(LoginStatus.CAPTCHA_SENT)
return self._icloud_api
monkeypatch.setattr(AuthManager, "login", mock_login)
@pytest.fixture()
def mock_login_exception(monkeypatch):
def mock_login(self):
raise Exception("error")
monkeypatch.setattr(AuthManager, "login", mock_login)
@pytest.mark.web
class TestUser:
def test_user_without_content(self, client):
response = client.get("/user", follow_redirects=True)
assert b"User List" in response.data # test jumbotron
def test_user_with_content(self, client):
uc = UserConfig("username", "password")
ucm.save(uc)
tm.add_task(
task_name="task_name", username="username", run_type=TaskRunType.ALL
)
response = client.get("/user", follow_redirects=True)
# test info
assert b"User List" in response.data # test jumbotron
assert b"<td>1</td>" in response.data
assert b"<td>username</td>" in response.data
# test current task
assert (
"<td>{}</td>".format(tm.get_current_task_name(username="username")).encode()
in response.data
)
assert LoginStatusText[LoginStatus.NOT_LOGIN].encode() in response.data
# test link
assert b'href="/task/username"' in response.data
assert b'href="/task/run/username"' in response.data
assert b'href="/user/edit/username"' in response.data
assert b'href="/user/captcha/username"' in response.data
def test_user_edit_get_without_user(self, client):
response = client.get("/user/edit/")
assert b"Edit User Setting" in response.data # test jumbotron
assert b"Add one user to download photos" in response.data # test jumbotron
# test default radio choice
assert (
b'<input class="form-check-input" type="radio" name="target" id="targetSFTP" value=1 checked>'
in response.data
)
assert (
b'<input class="form-check-input" type="radio" name="notify_type" id="None" value=0 checked>'
in response.data
)
assert (
b'<input class="form-check-input" type="checkbox" name="reindex_enable" value="1" id="reindexCheck" checked>'
in response.data
)
assert (
b'<input class="form-check-input" type="radio" name="notify_type" id="Slack" value=1 >'
in response.data
)
assert (
b'<input class="form-check-input" type="radio" name="notify_type" id="Email" value=2 >'
in response.data
)
assert (
b'<input class="form-check-input" type="radio" name="scheduler_enable" id="SchedulerDisable" value=0 checked>'
in response.data
)
def test_user_edit_add_new_user(self, client, response):
# redirect to user list
# test info
assert b"User List" in response.data # test jumbotron
assert b"<td>1</td>" in response.data
assert b"<td>account_name</td>" in response.data
assert LoginStatusText[LoginStatus.NOT_LOGIN].encode() in response.data
# test link
assert b'href="/task/account_name"' in response.data
assert b'href="/task/run/account_name"' in response.data
assert b'href="/user/edit/account_name"' in response.data
assert b'href="/user/captcha/account_name"' in response.data
def test_user_edit_get_exsited_user(self, client, response):
response = client.get("/user/edit/{}".format(MOCK_NEW_USER["icloud_username"]))
assert b"Edit User Setting" in response.data # test jumbotron
print(response.data)
assert (
"of {}".format(MOCK_NEW_USER["icloud_username"]).encode() in response.data
) # test jumbotron
# test info
assert (
'<input type="email" class="form-control" id="inputEmail" name="icloud_username" value="{}"'.format(
MOCK_NEW_USER["icloud_username"]
).encode()
in response.data
)
assert (
'<input type="text" class="form-control" id="inputPassword" name="icloud_password" value="{}"'.format(
MOCK_NEW_USER["icloud_password"]
).encode()
in response.data
)
assert (
'<input type="text" class="form-control" id="adminURLPrefix" name="admin_url_prefix" value="{}"'.format(
MOCK_NEW_USER["admin_url_prefix"]
).encode()
in response.data
)
assert (
b'<input class="form-check-input" type="radio" name="target" id="targetSFTP" value=1 checked>'
in response.data
)
assert (
'<input type="text" class="form-control" id="SFTPHost" name="sftp_host" value="{}"'.format(
MOCK_NEW_USER["sftp_host"]
).encode()
in response.data
)
assert (
'<input type="text" class="form-control" id="SFTPPort" name="sftp_port" value="{}"'.format(
MOCK_NEW_USER["sftp_port"]
).encode()
in response.data
)
assert (
'<input type="text" class="form-control" id="SFTPUsername" name="sftp_username" value="{}"'.format(
MOCK_NEW_USER["sftp_username"]
).encode()
in response.data
)
assert (
'<input type="text" class="form-control" id="SFTPPassword" name="sftp_password" value="{}"'.format(
MOCK_NEW_USER["sftp_password"]
).encode()
in response.data
)
assert (
'<input type="text" class="form-control" id="SFTPDir" name="sftp_dir" value="{}"'.format(
MOCK_NEW_USER["sftp_dir"]
).encode()
in response.data
)
assert (
b'<input class="form-check-input" type="radio" name="notify_type" id="Slack" value=1 checked>'
in response.data
)
assert (
'<input type="text" class="form-control" id="slackToken" name="slack_token" value="{}"'.format(
MOCK_NEW_USER["slack_token"]
).encode()
in response.data
)
assert (
'<input type="text" class="form-control" id="slackChannel" name="slack_channel" value="{}"'.format(
MOCK_NEW_USER["slack_channel"]
).encode()
in response.data
)
assert (
'<input type="text" class="form-control" id="smtpHost" name="smtp_host" value="{}"'.format(
MOCK_NEW_USER["smtp_host"]
).encode()
in response.data
)
assert (
'<input type="text" class="form-control" id="smtpPort" name="smtp_port" value="{}"'.format(
MOCK_NEW_USER["smtp_port"]
).encode()
in response.data
)
assert (
'<input type="text" class="form-control" id="smtpUser" name="smtp_user" value="{}"'.format(
MOCK_NEW_USER["smtp_user"]
).encode()
in response.data
)
assert (
'<input type="text" class="form-control" id="smtpPassword" name="smtp_password" value="{}"'.format(
MOCK_NEW_USER["smtp_password"]
).encode()
in response.data
)
assert (
'<input type="text" class="form-control" id="msgFrom" name="msg_from" value="{}"'.format(
MOCK_NEW_USER["msg_from"]
).encode()
in response.data
)
assert (
'<input type="text" class="form-control" id="msgTo" name="msg_to" value="{}"'.format(
MOCK_NEW_USER["msg_to"]
).encode()
in response.data
)
assert (
b'<input class="form-check-input" type="radio" name="scheduler_enable" id="SchedulerEnable" value=1 checked>'
in response.data
)
assert (
'<input type="text" class="form-control" id="SchedulerCron" name="scheduler_crontab" value="{}"'.format(
MOCK_NEW_USER["scheduler_crontab"]
).encode()
in response.data
)
assert (
'<input type="text" class="form-control" id="SchedulerLastDayCnt" name="scheduler_last_day_cnt" value="{}"'.format(
MOCK_NEW_USER["scheduler_last_day_cnt"]
).encode()
in response.data
)
def test_user_captcha_get(self, client, response):
response = client.get(
"/user/captcha/{}".format(MOCK_NEW_USER["icloud_username"])
)
# test info
assert b"Captcha" in response.data # test jumbotron
assert (
"enter captcha of {}".format(MOCK_NEW_USER["icloud_username"]).encode()
in response.data
)
def test_user_captcha_post_without_current_task(self, client, response):
data = {"captcha": "abcd"}
response = client.post(
"/user/captcha/{}".format(MOCK_NEW_USER["icloud_username"]),
data=data,
follow_redirects=True,
)
# test info
assert b"Task List" in response.data # test jumbotron
assert (
"of {}".format(MOCK_NEW_USER["icloud_username"]).encode() in response.data
)
assert (
AuthManager(username=MOCK_NEW_USER["icloud_username"]).get_login_status()
== LoginStatus.CAPTCHA_RECEIVED
)
def test_user_captcha_post_with_current_task(self, client, response):
tm.add_task(
task_name="task_name",
username=MOCK_NEW_USER["icloud_username"],
run_type=TaskRunType.ALL,
)
data = {"captcha": "abcd"}
response = client.post(
"/user/captcha/{}".format(MOCK_NEW_USER["icloud_username"]),
data=data,
follow_redirects=True,
)
# test info
assert b"Task List" in response.data # test jumbotron
assert (
"of {}".format(MOCK_NEW_USER["icloud_username"]).encode() in response.data
)
assert (
AuthManager(username=MOCK_NEW_USER["icloud_username"]).get_login_status()
== LoginStatus.CAPTCHA_RECEIVED
)
# TODO how to check sync is called
def test_send_captcha_again(self, client, response, mock_login):
response = client.get(
"/user/send_captcha/{}".format(MOCK_NEW_USER["icloud_username"]),
)
assert b"Captcha has been sent." in response.data
assert (
AuthManager(username="account_name").get_login_status()
== LoginStatus.CAPTCHA_SENT
)
def test_send_captcha_again_exception(self, client, response, mock_login_exception):
response = client.get(
"/user/send_captcha/{}".format(MOCK_NEW_USER["icloud_username"]),
)
assert b"error" in response.data
|
import logging
import unittest
import requests
from requests.exceptions import ConnectionError
from mock_services import http_mock
from mock_services import is_http_mock_started
from mock_services import no_http_mock
from mock_services import reset_rules
from mock_services import start_http_mock
from mock_services import stop_http_mock
from mock_services import update_http_rules
from mock_services import with_http_mock
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s %(levelname)-8s %(name)s %(message)s'
)
def fake_duckduckgo_cb(request, context):
return 'Coincoin!'
rules = [
{
'text': fake_duckduckgo_cb,
'headers': {'Content-Type': 'text/html'},
'method': 'GET',
'url': r'^https://duckduckgo.com/\?q='
},
]
class HttpTestCase(unittest.TestCase):
def setUp(self):
stop_http_mock()
reset_rules()
http_mock.set_allow_external(False)
tearDown = setUp
def test_reset_rules(self):
self.assertFalse(http_mock.get_rules())
update_http_rules(rules)
self.assertEqual(len(http_mock.get_rules()), 1)
# reset
reset_rules()
self.assertFalse(http_mock.get_rules())
def test_update_rules(self):
self.assertFalse(http_mock.get_rules())
# add first rule
update_http_rules(rules)
self.assertEqual(len(http_mock.get_rules()), 1)
matcher = http_mock.get_rules()[0]
self.assertEqual(matcher._method, 'GET')
self.assertTrue(hasattr(matcher._url, 'match'))
self.assertTrue(matcher._url.match('https://duckduckgo.com/?q=mock-services')) # noqa
response = matcher._responses[0]
self.assertTrue(hasattr(response._params['text'], '__call__'))
self.assertEqual(response._params['headers']['Content-Type'], 'text/html') # noqa
# add second rule
update_http_rules([
{
'method': 'POST',
'status_code': 201,
'text': '{"coin": 1}',
'url': r'http://dummy/',
},
])
self.assertEqual(len(http_mock.get_rules()), 2)
matcher = http_mock.get_rules()[1]
self.assertTrue(hasattr(matcher._url, 'match'))
self.assertTrue(matcher._url.match('http://dummy/'))
self.assertEqual(matcher._method, 'POST')
response = matcher._responses[0]
self.assertEqual(response._params['status_code'], 201)
self.assertEqual(response._params['text'], '{"coin": 1}')
self.assertEqual(response._params['headers']['Content-Type'], 'text/plain') # noqa
def test_start_http_mock(self):
update_http_rules(rules)
response = requests.get('https://duckduckgo.com/?q=mock-services')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content[:15], b'<!DOCTYPE html>')
self.assertTrue(start_http_mock())
response = requests.get('https://duckduckgo.com/?q=mock-services')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'Coincoin!')
def test_stop_http_mock(self):
update_http_rules(rules)
self.assertTrue(start_http_mock())
response = requests.get('https://duckduckgo.com/?q=mock-services')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'Coincoin!')
self.assertTrue(stop_http_mock())
response = requests.get('https://duckduckgo.com/?q=mock-services')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content[:15], b'<!DOCTYPE html>')
def test_restart_http_mock(self):
update_http_rules(rules)
start_http_mock()
response = requests.get('https://duckduckgo.com/?q=mock-services')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'Coincoin!')
self.assertTrue(stop_http_mock())
# already stopped
self.assertFalse(stop_http_mock())
response = requests.get('https://duckduckgo.com/?q=mock-services')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content[:15], b'<!DOCTYPE html>')
self.assertTrue(start_http_mock())
response = requests.get('https://duckduckgo.com/?q=mock-services')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'Coincoin!')
# already started
self.assertFalse(start_http_mock())
def test_is_http_mock_started(self):
update_http_rules(rules)
self.assertFalse(is_http_mock_started())
self.assertTrue(start_http_mock())
self.assertTrue(is_http_mock_started())
def test_no_http_mock(self):
update_http_rules(rules)
self.assertTrue(start_http_mock())
@no_http_mock
def please_do_not_mock_me():
self.assertFalse(is_http_mock_started())
response = requests.get('https://duckduckgo.com/?q=mock-services')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content[:15], b'<!DOCTYPE html>')
self.assertTrue(is_http_mock_started())
def test_with_http_mock(self):
update_http_rules(rules)
self.assertFalse(is_http_mock_started())
@with_http_mock
def please_do_not_mock_me():
self.assertTrue(is_http_mock_started())
response = requests.get('https://duckduckgo.com/?q=mock-services')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'Coincoin!')
self.assertFalse(is_http_mock_started())
def test_real_http_0(self):
update_http_rules(rules)
self.assertTrue(start_http_mock())
# mocked
response = requests.get('https://duckduckgo.com/?q=mock-services')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'Coincoin!')
# not mocked but fail
self.assertRaises(ConnectionError, requests.get,
'https://www.google.com/#q=mock-services')
# test we keep the request
try:
url = 'https://www.google.com/#q=mock-services'
requests.get(url)
except ConnectionError as e:
self.assertEqual(e.request.url, url)
def test_real_http_1(self):
update_http_rules(rules)
self.assertTrue(start_http_mock())
# allow external call
http_mock.set_allow_external(True)
# mocked
response = requests.get('https://duckduckgo.com/?q=mock-services')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'Coincoin!')
# not mocked but do an external call
response = requests.get('https://www.google.com/#q=mock-services')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content[:15], b'<!doctype html>')
|
# coding:utf-8
#
#
#
#
import os
import argparse
import numpy as np
import soundfile as sf
import tensorflow as tf
from config import load_conf_info
from data_utils.data_loader import AudioParser
from model_utils.tester import BaseTester
from model_utils.utils import AudioReBuild
from model_utils.model import FullyCNNSEModel, FullyCNNSEModelV2, FullyCNNSEModelV3
class InferenceEngine(BaseTester):
def __init__(self, infer_config):
super(InferenceEngine, self).__init__(infer_config)
self.sample_rate = int(infer_config.get("data", 'sample_rate'))
self.feature_dim = int(infer_config.get('data', 'feature_dim'))
self.audio_save_path = infer_config.get('data', 'audio_save_path')
self.window_ms = int(infer_config.get("data", "window_ms"))
self.stride_ms = int(infer_config.get("data", "stride_ms"))
self.net_arch = infer_config.get('model', 'net_arch')
self.net_work = infer_config.get('model', 'net_work')
self.creat_graph()
self._init_session()
self._load_checkpoint()
self.param_count()
self.audio_parser = AudioParser(self.sample_rate, self.window_ms, self.stride_ms, use_complex=True)
self.audio_rebuilder = AudioReBuild()
def creat_graph(self):
#
self.input_x = tf.placeholder(shape=[None, None, self.feature_dim, 1],
dtype=tf.float32,
name="input")
self.target_y = tf.placeholder(shape=[None, None, self.feature_dim, 1],
dtype=tf.float32,
name="target")
#
if self.net_work == "FullyCNNV2":
self.model = FullyCNNSEModelV2(is_training=False)
elif self.net_work == "FullyCNNV3":
self.model = FullyCNNSEModelV3(is_training=False)
else:
print("net_work set default or not wright. Use FullyCNN")
self.model = FullyCNNSEModel(is_training=False)
self.pred = self.model(self.input_x)
def denoise(self, audio_file):
sig, sr = self.audio_parser.load_audio(audio_file)
sig_length = len(sig)
complex_spectrogram = self.audio_parser.parse_audio(sig)
mag = self.audio_parser.extractor.power_spectrum(complex_spectrogram)
mag = np.reshape(mag, (1, mag.shape[1], mag.shape[0], 1))
phase = self.audio_parser.extractor.divide_phase(complex_spectrogram)
phase = np.reshape(phase, (1, phase.shape[1], phase.shape[0]))
feed_dict = {
self.input_x: mag
}
pred = self.sess.run(self.pred, feed_dict=feed_dict)
denoise = self.audio_rebuilder.rebuild_audio([sig_length],
pred.squeeze(-1),
phase,
self.sample_rate,
self.window_ms,
self.stride_ms)
if not os.path.exists(self.audio_save_path):
os.makedirs(self.audio_save_path)
denoise_audio_path = os.path.join(self.audio_save_path,
os.path.basename(audio_file).replace('.wav', '_de.wav'))
sf.write(denoise_audio_path, denoise[0], samplerate=self.sample_rate)
print("Saving denoise file to {}.".format(denoise_audio_path))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Training')
parser.add_argument('--cfg', default='', type=str, help='cfg file for infer')
parser.add_argument('--audio-file', default='', type=str, help='audio to denoise')
args = parser.parse_args()
config = load_conf_info(args.cfg)
model = InferenceEngine(config)
audio_filepath = args.audio_file
model.denoise(audio_filepath)
|
<filename>LuciferMoringstar_Robot/__init__.py
from .Utils import (
get_filter_results,
get_file_details,
is_subscribed,
get_poster,
Media
)
from .Channel import (
RATING,
GENRES
)
HELP = """𝙷𝙴𝚈 {}
𝘏𝘦𝘳𝘦 𝘐𝘴 𝘛𝘩𝘦 𝘏𝘦𝘭𝘱 𝘍𝘰𝘳 𝘔𝘺 𝘊𝘰𝘮𝘮𝘢𝘯𝘥𝘴."""
ABOUT ="""
╔════❰ ꪖ᥇ꪮꪊ𝓽 ꪑ𝘴ᧁ ❱═❍⊱❁۪۪
║╭━━━━━━━━━━━━━━━➣
║┣⪼ 𝙈𝙔 𝙉𝘼𝙈𝙀 - <a href="https://t.me/No_Way_Home_bot"> TomHolland </a>
║┣⪼ Ⓓ︎Ⓔ︎Ⓥ︎1 - <a href="https://t.me/PeterParkerspide"> 𝙿𝚎𝚝𝚎𝚛ᵖᵃʳᵏᵉʳ </a>
║┣⪼ 𝓛𝓲𝓫𝓻𝓪𝓻𝓻𝔂 - 𝙿𝚈𝚁𝙾𝙶𝚁𝙰𝙼
║┣⪼ 𝓛𝓪𝓷𝓰𝓾𝓪𝓰𝓮 - 𝙿𝚈𝚃𝙷𝙾𝙽 𝟹
║┣⪼ 𝓓𝓪𝓽𝓪 𝓑𝓪𝓼𝓮 - 𝙼𝙾𝙽𝙶𝙾 𝙳𝙱
║┣⪼ 𝓑𝓸𝓽 𝓼𝓮𝓻𝓿𝓮𝓻 - 𝙷𝙴𝚁𝙾𝙺𝚄
║┣⪼ 𝓑𝓾𝓲𝓵𝓭 𝓢𝓽𝓪𝓽𝓾𝓼 - v1.0.1 [ 𝙱𝙴𝚃𝙰 ]
║╰━━━━━━━━━━━━━━━➣ ╚══════════════════❍⊱❁۪۪"""
FUN = """<b>FUN MODULE</b>
<b>🎲 NOTHING MUCH JUST SOME FUN THINGS</b>
t𝗋𝗒 𝗍𝗁𝗂𝗌 𝖮𝗎𝗍:
𝟣. /dice - Roll The Dice
𝟤. /Throw 𝗈𝗋 /Dart - 𝖳𝗈 𝖬𝖺𝗄𝖾 Drat
3. /Runs - Jokes
4. /Goal or /Shoot - To Make A Goal Or Shoot
5. /basketball or /basket - basketball game
6. /toss - Toss a coin
7. /luck or /cownd - Spin the Lucky"""
|
import glob
import os
import cv2
import h5py as h5
import utils.coalbp
import numpy as np
import utils.preprocess as prep
SEP = os.path.sep
def get_path(is_training=True):
if is_training:
dataset = "train_release"
else:
dataset = "test_release"
root_path = os.path.join("D:\Database", "CASIA-CBSR",dataset)
path_real = []
path_fake = []
for dirpath_, dirname_, filename_ in os.walk(root_path):
for dir_ in dirname_:
for _, _, aviname_ in os.walk( os.path.join(root_path, dir_)):
for avi in aviname_:
avi_path = os.path.join(root_path, dir_, avi )
print(avi_path)
if avi == "1.avi" or avi == "2.avi" or avi == '3.avi':
path_real.append(avi_path)
else:
path_fake.append(avi_path)
return path_real, path_fake
def get_face(path_real, path_fake, shape=(128,128,3)):
face_real = np.empty((0,128,128,3),np.uint8)
face_fake = np.empty((0,128,128,3),np.uint8)
for path in path_real:
frames = prep.parse_video(path)
i_ = 0
for i in range(len(frames)):
frame = frames[i]
i_ = i_+1
_, face = prep.face_detection(frame, 1.4)
if face is None:
continue
print("Processing {} {}/{}".format(path, i_, len(frames)))
face = cv2.resize(face, shape[0:2])
face = face[np.newaxis,:,:,:]
face_real = np.concatenate((face_real,face), axis=0)
print("shape: ",face_real.shape)
for path in path_fake:
frames = prep.parse_video(path)
i_ = 0
for frame in frames:
i_ = i_+1
_, face = prep.face_detection(frame, 1.4)
if face is None:
continue
face = cv2.resize(face, shape)
print("Processing {} {}/{}".format(path, i_, len(frames)))
face = face[np.newaxis,:,:,:]
face_fake = np.concatenate((face_fake,face), axis=0)
print("shape: ",face_fake.shape)
return face_real, face_fake
def creat_database(is_training):
path_real, path_fake = get_path(is_training)
face_real, face_fake = get_face(path_real, path_fake)
db_name = "CASIA_FACE_128x128_s1_BGR.h5df"
db = h5.File(db_name, "a")
y_real = np.ones(face_real.shape[0], np.int8)
y_fake = np.ones(face_fake.shape[0], np.int8)
db.create_dataset("X_real", data=face_real)
db.create_dataset("y_real", data=face_real)
db.create_dataset("X_fake", data=face_fake)
db.create_dataset("y_fake", data=y_fake)
db.close()
def load_database(is_training=True, stride=1, db_file=r"D:\Database\CASIA-CBSR\CASIA-FASD_128.mat"):
"""Load database created by Dr.Sun
"""
mat = h5.File(db_file,'r')
if is_training:
X1 = mat["TRAIN_X"][::stride]
X2 = mat["VAL_X"][::stride]
X = np.concatenate((X1,X2),axis=0)
print("Concate:" ,X.shape)
y1 = mat["TRAIN_LBL"][::stride]
y2 = mat["VAL_LBL"][::stride]
y = np.concatenate((y1,y2),axis=0)
else:
X = mat["TEST_X"][::stride]
y = mat["TEST_LBL"][::stride]
return X, np.int8(y)
def main():
pass
if __name__ == '__main__':
is_training = True
creat_database(is_training)
def main():
get_path()
if __name__ == '__main__':
main() |
<reponame>zh794390558/lingvo<filename>lingvo/tasks/asr/model_test_input_generator.py
# Lint as: python2, python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple input generator used for ASR model tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import lingvo.compat as tf
from lingvo.core import base_input_generator
from lingvo.core import py_utils
class TestInputGenerator(base_input_generator.BaseSequenceInputGenerator):
"""A simple InputGenerator that delegate requests to another obj."""
@classmethod
def Params(cls):
p = super(TestInputGenerator, cls).Params()
p.random_seed = 20349582
p.Define('feature_dims', 240, 'Feature dims')
p.Define('num_channels', 3, 'Data are preprocessed into these many '
'channels per timestep. E.g., feature_dims=240 is actually '
'[80, 3], i.e., 3 channels, each with a 40-dim feature vector.')
p.Define('source_shape', [2, 10, 8, 3], 'source shape.')
p.Define('target_shape', [2, 5], 'targets shape.')
p.Define('fixed_target_labels', None,
'If not None, use these as the targets instead of generating '
'random targets and set target padding to 0. Must have same '
'shape as target_shape.')
p.Define(
'fixed_target_ids', None,
'If not None, use these as the target_ids instead of generating '
'random targets. Must have same shape as target_shape.')
p.Define('cur_iter_in_seed', True, 'use current iter value in seed '
'computation.')
p.Define('integer_source_max', None, 'Generate integers as source values '
'with this value as an upper bound.')
p.Define(
'float_source_max', None, 'Generate floats as source values '
'with this value as an upper bound.')
p.Define('for_mt', False, 'True if this is for mt models; '
'this affects some parts of batch generation')
p.Define('target_key', '', 'If non-empty, targets will be specified in '
'batch.additional_tgts[target_key] instead of batch.tgt.')
p.Define('target_key_target_shape', [2, 5], 'Shape of the targets stored '
'batch.additional_tgts[target_key].')
p.Define('set_tgt_and_additional_tgts', False, 'If true, '
'both batch.tgt and batch.additional_tgts[target_key] will '
'be set. target_key_target_shape must be specified.')
p.Define('target_language', 'ENGLISH',
'The target language. Both language name (e.g. "ENGLISH") and '
'language code (e.g. "zh-CN") are acceptted.')
p.Define('align_label_with_frame', False,
'Whether to generate label-frame alignments.')
p.Define(
'bprop_filters', [], 'If set, simulates a multi source'
'input and sets filters for each source i.e the first filter'
'corresponds to the first source etc. The number of sources is set'
'to the length of this param.')
p.Define(
'number_sources', None, 'Integer which specifies the number of'
'sources. Cannot be used along with bprop_filters.')
p.Define(
'source_selected', None, 'Integer which specifies the index of the'
'source selected. Corresponds to the data source that would be'
'sampled by the input_generator when given multiple file_patterns.'
'This has an effect only when number_sources is set and greater than 1.'
'Can use either constant values or a tensor like'
'tf.mod(tf.train.get_or_create_global_step(), num_sources)')
p.Define('target_transcript', 'dummy_transcript',
'Text to use for transcript.')
return p
def __init__(self, params):
super(TestInputGenerator, self).__init__(params)
p = self.params
self._bprop_variable_filters = ['']
self._bprop_onehot = tf.constant([1], dtype=tf.float32)
if p.target_key and not p.target_key_target_shape:
raise ValueError('target_key_target_shape must be set when '
'target_key (%s) is not empty.' % p.target_key)
if (p.set_tgt_and_additional_tgts and
(p.target_key_target_shape[0] != p.target_shape[0])):
raise ValueError('The first dimension of target_key_target_shape (%d) '
'should match the first dimension of target_shape '
'(%d) when both have to be set.' %
(p.target_key_target_shape[0], p.target_shape[0]))
self._cur_iter = 0
if p.bprop_filters and p.number_sources:
raise ValueError(
'Number of sources will be set to length of bprop_filters, the param'
'number_sources should not be used when bprop_filters is set.')
number_sources = p.number_sources
if p.bprop_filters:
self._bprop_variable_filters = p.bprop_filters
number_sources = len(p.bprop_filters)
if number_sources and number_sources > 1:
self._bprop_onehot = tf.one_hot(
p.source_selected, number_sources, dtype=tf.float32)
def _check_paddings(self, paddings):
with tf.name_scope('check_paddings'):
unpacked_paddings = tf.unstack(paddings)
non_decr = []
for t in unpacked_paddings:
non_d = tf.is_non_decreasing(t)
non_decr.append(non_d)
all_non_decr = tf.stack(non_decr)
paddings = py_utils.with_dependencies([
tf.assert_equal(
tf.reduce_any(tf.equal(paddings, 0.0)),
True,
message='must have at least one zero value.'),
tf.assert_equal(all_non_decr, True, message='must be non-decreasing')
], paddings)
return paddings
def GetBpropParams(self):
return self._bprop_params
def GetBpropType(self):
"""Get the current bprop type of the input generator batch."""
return self._bprop_onehot
def SampleIds(self):
p = self.params
if p.cur_iter_in_seed:
random_seed = p.random_seed * 2000 * self._cur_iter
else:
random_seed = p.random_seed * 2000
return tf.as_string(tf.random_uniform(p.target_shape[:1], seed=random_seed))
def _Sources(self):
p = self.params
if p.cur_iter_in_seed:
self._cur_iter += 1
if p.integer_source_max:
inputs = tf.random_uniform(
p.source_shape,
maxval=p.integer_source_max,
dtype=tf.int32,
seed=p.random_seed + 1000 * self._cur_iter)
elif p.float_source_max:
inputs = tf.random_uniform(
p.source_shape,
maxval=p.float_source_max,
seed=p.random_seed + 1000 * self._cur_iter)
else:
inputs = tf.random_normal(
p.source_shape, seed=p.random_seed + 1000 * self._cur_iter)
paddings = tf.cast(
tf.cumsum(
tf.random_uniform(
p.source_shape[:2], seed=p.random_seed + 1001 * self._cur_iter),
axis=1) > 0.5 * p.source_shape[1], tf.float32)
paddings = self._check_paddings(paddings)
return inputs, paddings
def _Targets(self, target_shape):
p = self.params
if p.cur_iter_in_seed:
self._cur_iter += 1
random_seed = p.random_seed * 2000 * self._cur_iter
if p.fixed_target_ids is None:
tids = tf.cast(
tf.random_uniform(target_shape, seed=random_seed) *
p.tokenizer.vocab_size, tf.int32)
else:
tids = p.fixed_target_ids
assert tids.shape_as_list() == target_shape
if p.fixed_target_labels is None:
tlabels = tf.cast(
tf.random_uniform(target_shape, seed=random_seed + 1) *
p.tokenizer.vocab_size, tf.int32)
tpaddings = tf.cast(
tf.cumsum(
tf.random_uniform(
target_shape[:2], seed=p.random_seed + 1001 * self._cur_iter),
axis=1) > 0.4 * target_shape[1], tf.float32)
tpaddings = self._check_paddings(tpaddings)
else:
tlabels = p.fixed_target_labels
assert tlabels.shape_as_list() == target_shape
tpaddings = tf.constant(0.0, shape=target_shape)
tweights = 1.0 - tpaddings
d = {
'ids': tids,
'labels': tlabels,
'weights': tweights,
'paddings': tpaddings
}
if not p.for_mt:
d['transcripts'] = tf.constant(
p.target_transcript, shape=[target_shape[0]])
if p.align_label_with_frame:
source_len = p.source_shape[1]
d['alignments'] = tf.cast(
tf.random_uniform(target_shape, seed=p.random_seed) * source_len,
tf.int32)
return d
def GlobalBatchSize(self):
p = self.params
return tf.constant(p.target_shape[0])
def InputBatch(self):
p = self.params
ret = py_utils.NestedMap()
ret.src = py_utils.NestedMap()
input_name = 'ids' if p.for_mt else 'src_inputs'
ret.src[input_name], ret.src.paddings = self._Sources()
# Set tgts only when needed: If target_key is specified, and both tgt and
# additional_tgts are not needed, we only set additional_tgts. This is
# useful when testing a model that solely uses additional_tgts instead
# of tgt.
if not p.target_key or p.set_tgt_and_additional_tgts:
ret.tgt = py_utils.NestedMap(self._Targets(p.target_shape))
else:
ret.tgt = None
if p.target_key:
ret.additional_tgts = py_utils.NestedMap()
ret.additional_tgts[p.target_key] = py_utils.NestedMap(
self._Targets(p.target_key_target_shape))
ret.sample_ids = self.SampleIds()
# Cast floating point tensors to the fprop dtype (default: float32).
def _CastFloats(v):
if v is None:
return None
return tf.cast(v, py_utils.FPropDtype(p)) if v.dtype.is_floating else v
ret.source_selected = tf.tile(
tf.expand_dims(self._bprop_onehot, 0), [p.source_shape[0], 1])
return ret.Transform(_CastFloats)
def _GetSourceInputsAndLabels(self, data_source):
p = self.params
src_inputs, src_paddings, labels = data_source
# The data are laid out in the channel-major order. In order to move channel
# to the last dimension, a tf.transpose of the data is needed.
src_inputs = tf.transpose(
tf.reshape(
src_inputs,
tf.concat([tf.shape(src_inputs)[:-1], [p.num_channels, -1]], 0)),
[0, 1, 3, 2])
return src_inputs, src_paddings, labels
def SetBpropType(self):
"""Get the current bprop type of the input generator batch."""
self._bprop_index = tf.one_hot(1, 2)
|
from django.contrib import admin
from .models import PerguruanTinggi, JabatanFungsional, Dosen, \
Gelar, Keahlian, MataKuliah, Penelitian, Proyek, DosenGelar, DosenMatakuliah, DosenSkor, \
ProgramStudi, Jenjang, ProgramStudiKeahlian, GlobalVar, DosenJumlahPengajaran, \
DosenJumlahPenelitian, DosenJumlahProyek
'''user: admin, password: <PASSWORD>'''
# Register your models here.
class JabatanFungsionalAdmin(admin.ModelAdmin):
list_display = ('nama_jabatanfungsional', 'rank', 'skor_jabatanfungsional',)
list_display_links = ('nama_jabatanfungsional',)
class JenjangAdmin(admin.ModelAdmin):
list_display = ('nama_jenjang', 'rank', 'skor_jenjang',)
list_display_links = ('nama_jenjang',)
class GlobalVarAdmin(admin.ModelAdmin):
list_display = ('name', 'value',)
list_display_links = ('name',)
class DosenAdmin(admin.ModelAdmin):
list_display = ('nama_dosen', 'perguruantinggi', 'jabatanfungsional', \
'programstudi', \
'total_jumlah_pengajaran', 'total_jumlah_penelitian', 'total_jumlah_proyek')
list_display_links = ('nama_dosen',)
class ProyekAdmin(admin.ModelAdmin):
list_display = ('judul_proyek', 'dosen', 'keahlian', \
'pengguna_jasa', \
'tahun')
list_display_links = ('judul_proyek',)
class PenelitianAdmin(admin.ModelAdmin):
list_display = ('judul_penelitian', 'dosen', 'keahlian', \
'perguruantinggi', \
'tahun')
list_display_links = ('judul_penelitian',)
class DosenJumlahPenelitianAdmin(admin.ModelAdmin):
list_display = ('dosen', 'keahlian', 'jumlah_penelitian')
list_display_links = ('dosen',)
class DosenJumlahPengajaranAdmin(admin.ModelAdmin):
list_display = ('dosen', 'keahlian', 'jumlah_pengajaran')
list_display_links = ('dosen',)
class DosenJumlahProyekAdmin(admin.ModelAdmin):
list_display = ('dosen', 'keahlian', 'jumlah_proyek')
list_display_links = ('dosen',)
class DosenSkorAdmin(admin.ModelAdmin):
list_display = ('dosen', 'keahlian', 'skor_pendidikan', 'skor_jabatan', 'skor_penelitian', 'skor_pengajaran', \
'z_skor_pendidikan', 'z_skor_jabatan', 'z_skor_penelitian', 'z_skor_pengajaran')
list_display_links = ('dosen',)
admin.site.register(PerguruanTinggi)
admin.site.register(JabatanFungsional, JabatanFungsionalAdmin)
admin.site.register(Dosen, DosenAdmin)
admin.site.register(Gelar)
admin.site.register(Keahlian)
admin.site.register(MataKuliah)
admin.site.register(Penelitian, PenelitianAdmin)
admin.site.register(Proyek, ProyekAdmin)
admin.site.register(DosenGelar)
admin.site.register(DosenMatakuliah)
admin.site.register(DosenSkor, DosenSkorAdmin)
admin.site.register(ProgramStudi)
admin.site.register(Jenjang, JenjangAdmin)
admin.site.register(ProgramStudiKeahlian)
admin.site.register(GlobalVar, GlobalVarAdmin)
admin.site.register(DosenJumlahPengajaran, DosenJumlahPengajaranAdmin)
admin.site.register(DosenJumlahPenelitian, DosenJumlahPenelitianAdmin)
admin.site.register(DosenJumlahProyek, DosenJumlahProyekAdmin) |
#!/usr/bin/python2.7
#
#
### These are tests that can be performed with only core Qengine blocks
#
#
import json
import unittest
class QengineTestCase(unittest.TestCase):
@classmethod
def setUpClass(self):
from pkg.config.qconfig import Config
self.config = Config()
self.config.init({
'QENGINE_SALT':'must_be_16_24_or_32_chars_long__',
'QENGINE_QUESTION_LOCATION':'filesystem',
'QENGINE_CACHE_DIR':'',
'QENGINE_ENABLE_REMOTE':True,
'QENGINE_IV':'must_be_16_long_',
'QENGINE_LOG_REQUESTS':False,
'QENGINE_MOOLE_HACKS':True,
'QENGINE_NO_CACHE':True,
'QENGINE_PASSKEY':None,
'ENGINEINFO_name':'Engine Testing'
})
from pkg.routes import app
self.app = app.test_client()
self.app.testing = True
@classmethod
def tearDownClass(self):
pass
def setUp(self):
pass
def tearDown(self):
self.config.QENGINE_PASSKEY = None
def test_root(self):
result = self.app.get('/')
self.assertEqual(result.status_code,404)
def test_getEngineInfo_NoPasskey(self):
result = self.app.get('/info')
self.assertEqual(result.status_code,200)
data = json.loads(result.data)
self.assertIn('engineinfo',data)
self.assertNotIn('errors',data)
def test_getEngineInfo_NoPasskey(self):
self.config.QENGINE_PASSKEY = '<PASSWORD>_be_16_24_or_32_chars_long__'
result = self.app.get('/info')
data = json.loads(result.data)
self.assertIn('error',data)
def test_getEngineInfo_ValidPasskey(self):
self.config.QENGINE_PASSKEY = 'must_be_16_24_or_32_chars_long__'
result = self.app.get('/info?passKey=sNiHvH1axA%3D%3D:SKLEFEPVTRQNJDYI')
data = json.loads(result.data)
self.assertIn('engineinfo',data)
self.assertNotIn('errors',data)
def test_getEngineInfo_InvalidPasskey(self):
self.config.QENGINE_PASSKEY = 'must_be_16_24_or_32_chars_long__'
result = self.app.get('/info?passKey=qwertyuiop%3D%3D:QWERTYUIOPASDFGH')
data = json.loads(result.data)
self.assertIn('error',data)
def test_getQuestionMetadata_MissingQuestion(self):
result = self.app.get('/question/does/not/exist')
self.assertEqual(result.status_code,404)
data = json.loads(result.data)
self.assertIn('error',data)
def test_getQuestionMetadata(self):
result = self.app.get('/question/core/test/1.0')
self.assertEqual(result.status_code,200)
data = json.loads(result.data)
self.assertIn('questionmetadata',data)
def test_start(self):
postdata = '{"questionID":"test","questionVersion":"1.0","questionBaseURL":"core","initialParamNames":["randomseed"],"initialParamValues":[807031909],"cachedResources":[]}'
result = self.app.post('/session',data=postdata,follow_redirects=True)
self.assertEqual(result.status_code,200)
self.assertEqual(result.mimetype,'application/json')
data = json.loads(result.data)
self.assertIn('CSS',data)
self.assertIn('XHTML',data)
self.assertIn('progressInfo',data)
self.assertIn('questionSession',data)
self.assertIn('resources',data)
# remote file
self.assertIn('content',data['resources'][0])
self.assertIn('mimeType',data['resources'][0])
self.assertIn('filename',data['resources'][0])
self.assertIn('encoding',data['resources'][0])
self.assertNotIn('errors',data)
def test_start_noJson(self):
result = self.app.post('/session',data='',follow_redirects=True)
self.assertEqual(result.status_code,200)
data = json.loads(result.data)
self.assertIn('error',data)
def test_start_PassKeyRequired(self):
self.config.QENGINE_PASSKEY = '<PASSWORD>'
postdata = '{"questionID":"test","questionVersion":"1.0","questionBaseURL":"core","initialParamNames":["randomseed","passKey"],"initialParamValues":[807031909,"sNiHvH1axA%3D%3D:SKLEFEPVTRQNJDYI"],"cachedResources":[]}'
result = self.app.post('/session',data=postdata,follow_redirects=True)
self.assertEqual(result.status_code,200)
data = json.loads(result.data)
self.assertNotIn('errors',data)
def test_stop_BadRoute(self):
result = self.app.delete('/session/../1234')
self.assertEqual(result.status_code,400)
def test_stop(self):
result = self.app.delete('/session/1234')
self.assertEqual(result.status_code,200)
if __name__ == '__main__':
unittest.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.