id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
1736004 | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import sys
from .._generated import AzureBlobStorage
X_MS_VERSION = AzureBlobStorage(url="get_api_version")._config.version # pylint: disable=protected-access
# Socket timeout in seconds
CONNECTION_TIMEOUT = 20
READ_TIMEOUT = 20
# for python 3.5+, there was a change to the definition of the socket timeout (as far as socket.sendall is concerned)
# The socket timeout is now the maximum total duration to send all data.
if sys.version_info >= (3, 5):
# the timeout to connect is 20 seconds, and the read timeout is 80000 seconds
# the 80000 seconds was calculated with:
# 4000MB (max block size)/ 50KB/s (an arbitrarily chosen minimum upload speed)
READ_TIMEOUT = 80000
DEFAULT_OAUTH_SCOPE = "/.default"
STORAGE_OAUTH_SCOPE = "https://storage.azure.com/.default"
SERVICE_HOST_BASE = 'core.windows.net'
| StarcoderdataPython |
1971030 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
'''
Deployment toolkit
'''
import os,re
from datetime import datetime
from fabric.api import *
env.user = 'root'
env.sudo_user = 'root'
env.hosts = ['10.104.128.190']
_TAR_FILE = 'dist-xilingxue.tar.gz'
_REMOTE_TMP_TAR = '/tmp/%s' % _TAR_FILE
_REMOTE_BASE_DIR = '/srv/xilingxue/'
def _current_path():
return os.path.abspath('.')
def _now():
return datetime.now().strftime('%y-%m-%d_%H.%M.%S')
def build():
'''
Build distribute package.
'''
includes = ['static', 'templates', 'transwarp', '*.py']
excludes = ['test', '.*', '*.pyc', '*.pyo']
local('rm -f dist/%s' % _TAR_FILE)
with lcd(os.path.join(_current_path(), 'www')):
cmd = ['tar', '--dereference', '-czvf', '../dist/%s' % _TAR_FILE]
cmd.extend(['--exclude=\'%s\'' % ex for ex in excludes])
cmd.extend(includes)
local(' '.join(cmd))
def deploy():
newdir = 'www-%s' % _now()
# 删除已有的tar文件
run('rm -f %s' % _REMOTE_TMP_TAR)
# 上传新的tar文件
put('dist/%s' % _TAR_FILE, _REMOTE_TMP_TAR)
# 创建新目录
with cd(_REMOTE_BASE_DIR):
sudo('mkdir %s' % newdir)
# 解压到新目录
with cd('%s%s' % (_REMOTE_BASE_DIR, newdir)):
sudo('tar -xzvf %s' % _REMOTE_TMP_TAR)
# 重置软链接
with cd(_REMOTE_BASE_DIR):
sudo('rm -f www')
sudo('ln -s %s www' % newdir)
sudo('chown root:root www')
sudo('chown -R root:root %s' % newdir)
# 重启Python服务和nginx服务器
with settings(warn_only=True):
sudo('supervisorctl stop xilingxue')
sudo('supervisorctl start xilingxue')
sudo('service nginx restart')
RE_FILES = re.compile('\r?\n')
def rollback():
'''
回滚到旧版本
'''
with cd(_REMOTE_BASE_DIR):
r = run('ls -p -')
files = [s[:-1] for s in RE_FILES.split(r) if s.startswith('www-') and s.endswith('/')]
files.sort(cmp=lambda s1, s2: 1 if s1 < s2 else -1)
r = run('ls -l www')
ss = r.split(' -> ')
if len(ss) != 2:
print ('ERROR: \'www\' is not a symbol link.')
return
current = ss[1]
print ('Found current symbol link points to: %s\n' % current)
try:
index = files.index(current)
except ValueError, e:
print ('ERROR: symbol link is invalid.')
return
if len(files) == index + 1:
print ('ERROR:already the oldest version')
old = files[index + 1]
print ('==============================================')
for f in files:
if f == current:
print (' Current ---> %s' % current)
elif f == old:
print ('Rollback to ---> %s' % old)
else:
print (' %s' % f)
print ('==============================================')
print ('')
yn = raw_input ('continue? y/N ')
if yn != 'y' and yn != 'Y':
print ('Rollback cancelled.')
return
print ('Start rollback...')
sudo('rm -f www')
sudo('ln -s %s www' % old)
sudo('chown www-data:www-data www')
with settings(warn_only=True):
sudo('supervisorctl stop xilingxue')
sudo('supervisorctl start xilingxue')
sudo('/etc/init.d/nginx reload')
print ('ROLLBACKED OK')
| StarcoderdataPython |
386767 | <filename>fruit/api/serializers.py
from fruit.models import Fruit
from rest_framework import serializers
class FruitDefaultSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Fruit
fields = ["id", "name", "description"]
class FruitDetailSerializer(FruitDefaultSerializer):
class Meta:
model = FruitDefaultSerializer.Meta.model
fields = FruitDefaultSerializer.Meta.fields + ["detail", "detail_link"]
| StarcoderdataPython |
6462401 | import os
import sys
import random
import logging
import pkgutil
import importlib
import click
import numpy as np
from qtpy.uic import loadUi
from qtpy.QtGui import QIcon
from qtpy.QtCore import QTimer, Qt, Signal
from qtpy.QtWidgets import QApplication, QDialog, QMainWindow
import astropy.units as u
from astropy.modeling.models import Gaussian1D
from specutils import Spectrum1D
from specutils import __version__ as specutils_version
from . import __version__, plugins
from .widgets.workspace import Workspace
class Application(QApplication):
"""
Primary application object for specviz.
"""
current_workspace_changed = Signal(QMainWindow)
workspace_added = Signal(Workspace)
def __init__(self, *args, file_path=None, file_loader=None, embedded=False,
dev=False, skip_splash=False, load_all=False, **kwargs):
super(Application, self).__init__(*args, **kwargs)
# Store references to workspace instances
self._workspaces = []
# Set application icon
if not embedded:
self.setWindowIcon(QIcon(":/icons/specviz.icns"))
# Load local plugins
self.load_local_plugins()
# Show splash
if not skip_splash:
self._splash_dialog = SplashDialog(2000)
self._splash_dialog.exec()
# Cache a reference to the currently active window
self.current_workspace = self.add_workspace()
# Add an initially empty plot
self.current_workspace.add_plot_window()
if dev:
y = Gaussian1D(mean=50, stddev=10)(np.arange(100)) + np.random.sample(100) * 0.1
spec1 = Spectrum1D(flux=y * u.Jy,
spectral_axis=np.arange(100) * u.AA)
spec2 = Spectrum1D(flux=np.random.sample(100) * u.erg,
spectral_axis=np.arange(100) * u.Hz)
spec3 = Spectrum1D(flux=np.random.sample(100) * u.erg,
spectral_axis=np.arange(100) * u.Hz)
data_item = self.current_workspace.model.add_data(spec1, "Spectrum 1")
self.current_workspace.model.add_data(spec2, "Spectrum 2")
self.current_workspace.model.add_data(spec3, "Spectrum 3")
# Set the first item as selected
self.current_workspace.force_plot(data_item)
# If a file path has been given, automatically add data
if file_path is not None:
try:
self.current_workspace.load_data_from_file(
file_path, file_loader=file_loader,
multi_select=not load_all)
except Exception as e:
self.current_workspace.display_load_data_error(e)
def add_workspace(self):
"""
Create a new main window instance with a new workspace embedded within.
"""
# Initialize with a single main window
workspace = Workspace()
workspace.show()
self._workspaces.append(workspace)
# Connect the window focus event to the current workspace reference
workspace.window_activated.connect(self._on_window_activated)
return workspace
@staticmethod
def load_local_plugins():
"""
Import and parse any defined plugins in the `specviz.plugins`
namespace. These are then added to the plugin registry for future
initialization (e.g. when new workspaces are added to the application).
"""
# Load plugins
def iter_namespace(ns_pkg):
"""
Iterate over a given namespace to provide a list of importable
modules.
Parameters
----------
ns_pkg : module
The module whose namespace will be explored plugin definitions.
Returns
-------
: list
The list of `ModuleInfo`s pertaining to plugin definitions.
"""
# Specifying the second argument (prefix) to iter_modules makes the
# returned name an absolute name instead of a relative one. This
# allows import_module to work without having to do additional
# modification to the name.
return pkgutil.iter_modules(ns_pkg.__path__, ns_pkg.__name__ + ".")
# Import plugins modules into current namespace
loaded_plugins = {name: importlib.import_module(name)
for finder, name, ispkg
in iter_namespace(plugins)}
def remove_workspace(self):
"""
Explicitly removes a workspace in this SpecViz application instance.
"""
pass
@property
def current_workspace(self):
"""
Returns the active current window.
"""
return self._current_workspace
@current_workspace.setter
def current_workspace(self, value):
self._current_workspace = value
self.current_workspace_changed.emit(self.current_workspace)
def _on_window_activated(self, window):
self.current_workspace = window
self.current_workspace_changed.emit(self.current_workspace)
logging.info("Setting active workspace to '%s'", window.name)
class SplashDialog(QDialog):
"""
Provides a splash screen when loading SpecViz providing basic information
of the current version of relevant packages, and waits a set amount of
time to ensure that initialization of the application is complete.
Parameters
----------
wait_time : float
The time in milliseconds to wait for application start-up.
"""
def __init__(self, wait_time, *args, **kwargs):
super().__init__(*args, **kwargs)
self._wait_time = wait_time
self._total_time = 0
self.setWindowFlags(Qt.FramelessWindowHint)
self.setAutoFillBackground(True)
self.setAttribute(Qt.WA_NoSystemBackground, True)
self.setAttribute(Qt.WA_TranslucentBackground, True)
loadUi(os.path.abspath(
os.path.join(os.path.dirname(__file__),
"widgets", "ui", "splash_non_modal.ui")),
self)
# Set the version number
self.specviz_version_label.setText("Version {}".format(__version__))
self.specutils_version_label.setText("Using specutils {}".format(specutils_version))
self._timer = QTimer()
self._timer.timeout.connect(self.calculate_progress)
self._timer.start(300)
def calculate_progress(self):
"""
Calculates a random amount of progress to show in the progress bar.
The progress bar currently is not attached to any load operation, so
it's a simple visual representation for the time defined in
`wait_time`.
"""
rand = random.randrange(100, 500)
self._total_time += rand
self.progress_bar.setValue(self._total_time/self._wait_time*100)
if self._total_time > self._wait_time:
self._timer.stop()
self.close()
@click.command()
@click.option('--hide_splash', '-H', is_flag=True, help="Hide the startup splash screen.")
@click.option('--file_path', '-F', type=click.Path(exists=True), help="Load the file at the given path on startup.")
@click.option('--loader', '-L', type=str, help="Use specified loader when opening the provided file.")
@click.option('--embed', '-E', is_flag=True, help="Only display a single plot window. Useful when embedding in other applications.")
@click.option('--dev', '-D', is_flag=True, help="Open SpecViz in developer mode. This mode auto-loads example spectral data.")
@click.option('--load_all', is_flag=True, help="Automatically load all spectra in file instead of displaying spectrum selection dialog")
@click.option('--version', '-V', is_flag=True, help="Print version information", is_eager=True)
def start(version=False, file_path=None, loader=None, embed=None, dev=None,
hide_splash=False, load_all=None):
"""
The function called when accessed through the command line. Parses any
command line arguments and provides them to the application instance, or
returns the requested information to the user.
Parameters
----------
version : str
Prints the version number of SpecViz.
file_path : str
Path to a data file to load directly into SpecViz.
loader : str
Loader definition for specifying how to load the given data.
embed : bool
Whether or not this application is embed within another.
dev : bool
Auto-generates sample data for easy developer testing.
hide_splash : bool
Hides the splash screen on startup.
"""
if version:
print(__version__)
return
# Start the application, passing in arguments
app = Application(sys.argv, file_path=file_path, file_loader=loader,
embedded=embed, dev=dev, skip_splash=hide_splash,
load_all=load_all)
# Enable hidpi icons
QApplication.setAttribute(Qt.AA_UseHighDpiPixmaps, True)
sys.exit(app.exec_())
if __name__ == '__main__':
start()
| StarcoderdataPython |
365598 | from django import forms
from django.forms import formset_factory
from . import models
class ThermalSourceForm(forms.ModelForm):
scheme_image = forms.FileField(required=False)
def save(self, commit=True):
if self.cleaned_data.get('scheme_image') is not None \
and hasattr(self.cleaned_data['scheme_image'], 'file'):
data = self.cleaned_data['scheme_image'].file.read()
self.instance.scheme_image = data
return self.instance
def save_m2m(self):
# FIXME: this function is required by ModelAdmin, otherwise save process will fail
pass
class Meta:
model = models.Word
fields = ['name', 'scheme_image']
class InsereIdioma(forms.ModelForm):
class Meta:
model = models.Idioma
fields = '__all__'
exclude = ['usuario']
InsereIdiomaFormset = formset_factory(InsereIdioma, extra=1)
class InsereTecnologia(forms.ModelForm):
class Meta:
model = models.Tecnologia
fields = '__all__'
exclude = ['usuario']
InsereTecnologiaFormset = formset_factory(InsereTecnologia, extra=1)
class K8Points_ClassroomForm(forms.ModelForm):
def clean(self):
cleaned_data = super(K8Points_ClassroomForm, self).clean()
day = cleaned_data.get("day")
time_frame = cleaned_data.get("time_frame")
student_name = cleaned_data.get("student_name")
if models.K8Points.objects.filter(day=day, time_frame=time_frame, student_name=student_name).exists():
raise forms.ValidationError(
'This timeframe {} was already logged to the student {} on {}'.format(time_frame, student_name.student_name, day)
)
class Meta:
model = models.K8Points
fields = ('student_name', 'behavior','academic', 'time_frame','date','day','week_of','class_name')
class K8PointsForm(forms.ModelForm):
class Meta:
model = models.K8Points
fields = ('student_name', 'behavior','academic', 'time_frame','date','day','week_of','class_name')
def cities_as_choices():
choices = []
for country in models.Cities.objects.values('country').distinct():
new_country = []
cities = []
new_country.append(country.get('country'))
for city in models.Cities.objects.filter(country=country.get('country')):
cities.append([city.pk, city.city])
new_country.append(cities)
choices.append(new_country)
return choices
class CityNameForm(forms.Form):
"""
CityNameForm Class
"""
def __init__(self, *args, **kwargs):
super(CityNameForm, self).__init__(*args, **kwargs)
self.fields['cityName'].choices = cities_as_choices()
cityName = forms.ChoiceField(
choices=cities_as_choices(),
help_text="",
required=False,
label='Cities',
)
class QuoteForm(forms.ModelForm):
required_css_class = 'required'
class Meta:
model = models.Quote
fields = ['name',]
| StarcoderdataPython |
8089321 | <reponame>wlmwng/urlExpander
"""
This module extends the package's core functions to collect information related to news articles.
The NewsContent class acts as a template which is filled out by the fetching functions.
The output of every fetching function is a stringified JSON object, which can be
written to a .jsonl file or passed to a database.
"""
__all__ = [
"request_active_url",
"request_archived_url",
"fetch_url",
"fetch_urls",
"fetch_urls_to_file",
"load_fetched_from_file",
]
import datetime
import inspect
import json
import logging
import os
import re
import time
from random import randint
import newspaper
import waybackpy
from newsplease import NewsPlease
from urlexpander.core import api, constants, url_utils
from waybackpy.exceptions import URLError, WaybackError
LOGGER = logging.getLogger(__name__)
class NewsContent:
"""The fetching functions hydrate instances of this class.
:param original_url: URL
:type original_url: str
:param **kwargs:
- each kwarg is added as an attribute along with its provided value.
- both the argument and the value must be JSON serializable: see to_json().
- e.g., if "outlet=CNN" is a kwarg, "self.outlet" is added with a value of "CNN".
"""
def __init__(
self,
original_url,
**kwargs,
):
for key, value in kwargs.items():
setattr(self, key, value)
self.article_maintext = ""
self.original_url = original_url
# self.response_url = ""
self.resolved_url = ""
self.resolved_domain = ""
self.resolved_netloc = ""
self.standardized_url = ""
self.is_generic_url = ""
# for troubleshooting
self.response_code = ""
self.response_reason = ""
self.fetch_error = ""
# processed response text (HTML):
# backup option which can be parsed if `article_maintext` returns None
self.resolved_text = ""
# https://stackoverflow.com/a/5067654
self.FETCH_FUNCTION = inspect.currentframe().f_back.f_code.co_name
self.FETCH_AT = datetime.datetime.now(datetime.timezone.utc)
def set_article_maintext(self):
"""Extract the article text from the HTML with NewsPlease"""
try:
article = NewsPlease.from_html(
html=self.resolved_text, url=self.resolved_url
)
self.article_maintext = article.maintext
except newspaper.article.ArticleException as exc:
LOGGER.info(
f"Failed to extract article's maintext due to ArticleException, {str(exc)}",
)
self.article_maintext = ""
except Exception as exc:
LOGGER.info(
f"Failed to extract article's maintext due to unknown exception, {str(exc)}",
)
self.article_maintext = ""
def set_fetch_error_ind(self):
"""Set indicator for whether a fetch attempt resulted in an error"""
is_err = False
if self.FETCH_FUNCTION == "request_active_url":
if "ERROR" in self.resolved_url:
is_err = True
elif self.FETCH_FUNCTION == "request_archived_url":
if self.response_reason != "OK":
is_err = True
self.fetch_error = is_err
def set_url_versions(self):
"""Set URL versions"""
url = self.resolved_url
self.resolved_netloc = url_utils.standardize_url(
url=url,
remove_scheme=True,
replace_netloc_with_domain=False,
remove_path=True,
remove_query=True,
remove_fragment=True,
to_lowercase=True,
)
self.standardized_url = url_utils.standardize_url(
url=url,
remove_scheme=True,
replace_netloc_with_domain=False,
remove_path=False,
remove_query=False,
remove_fragment=True,
to_lowercase=True,
)
def set_generic_url_ind(self):
"""Set indicator for whether a URL is generic"""
self.is_generic_url = url_utils.is_generic_url(self.resolved_url)
def to_json(self):
"""Convert NewsContent instance into a JSON string"""
# https://stackoverflow.com/a/27058505
class CustomEncoder(json.JSONEncoder):
"""
Default JSON serializable types are bool, int, float, and str.
CustomEncoder is a subclass of json.JSONEncoder.
It modifies datetime and dictionary types to make them JSON serializable.
Modify this subclass to address other non-primitive types.
"""
def default(self, o):
if isinstance(o, datetime.datetime):
# https://docs.python.org/3/library/datetime.html#datetime.datetime.isoformat
# Return a string representing the date and time in ISO 8601 format
# e.g., '2019-05-18T15:17:00+00:00'
return o.isoformat()
elif isinstance(o, dict):
return json.dumps(o)
return json.JSONEncoder.default(self, o)
fetched = self.__dict__
fetched_json = json.dumps(fetched, cls=CustomEncoder)
return fetched_json
def load_fetched_from_file(path, filename):
"""Load fetched content from .jsonl file.
:param path: path to the directory
:type path: str
:param filename: name of the file
:type filename: str
:returns data: fetched content as stringified JSON object
:rtype data: Generator[str]
"""
json_file = os.path.join(path, filename)
if json_file and os.path.exists(json_file):
with open(file=json_file, mode="r", encoding="utf-8") as file:
for line in file:
data = line # json.loads(line)
yield data
def fetch_urls_to_file(
urls,
fetch_function,
path="",
filename="fetched.jsonl",
write_mode="a",
verbose=1,
):
"""Fetch the webpage contents for one URL or multiple URLs.
Outputs file where each line contains a URL's fetched content (stringified JSON object).
:param urls: URL(s) to fetch
- required: 'url' key
- optional: additional key-value pairs are passed along to the output
:param fetch_function: request_active_url, request_archived_url, fetch_url
:type fetch_function: function
:param path: output directory path (Default value = "")
:type path: str
:param filename: (Default value = "fetched.jsonl")
:type filename: str
:param write_mode: "a" to append (Default value = "a")
:type write_mode: str
:param verbose: 0 - don't print progress, 1 - print progress (Default value = 1)
:type verbose: bool
:returns: None
"""
if isinstance(urls, dict):
urls = [urls]
for n, url_dict in enumerate(urls):
# dictionaries are mutable so work off a copy to avoid modifying the input
d = url_dict.copy()
try:
# Collect the value of the 'url' key if it exists and
# remove it from the dictionary before calling fetch_function.
# the remaining key-values are passed as optional kwargs.
url = d.pop("url")
LOGGER.info((f"url {n}, {fetch_function.__name__}: {url}"))
msg = f"url {n}, {fetch_function.__name__}: {url}"
LOGGER.info(msg)
if verbose:
print(msg)
data = fetch_function(url=url, **d)
with open(
file=os.path.join(path, filename), mode=write_mode, encoding="utf-8"
) as file:
# https://stackoverflow.com/a/12451465
file.write(data + "\n")
except KeyError:
LOGGER.error(
"Fetch failed to start, please provide a 'url' key-value in the input dictionary."
)
def fetch_urls(urls, fetch_function, verbose=1):
"""Fetch the webpage contents for one URL or multiple URLs.
:param urls: URL(s) to fetch
- required: 'url' key
- optional: additional key-value pairs are passed along to the output
:param fetch_function: request_active_url, request_archived_url, fetch_url
:type fetch_function: function
:param verbose: 0 - don't print progress, 1 - print progress (Default value = 1)
:type verbose: bool
:returns data: fetched content as stringified JSON object
:rtype data: Generator[str]
"""
if isinstance(urls, dict):
urls = [urls]
for n, url_dict in enumerate(urls):
# dictionaries are mutable so work off a copy to avoid modifying the input
d = url_dict.copy()
try:
# Collect the value of the 'url' key if it exists and
# remove it from the dictionary before calling fetch_function.
# the remaining key-values are passed as optional kwargs.
url = d.pop("url")
msg = f"url {n}, {fetch_function.__name__}: {url}"
LOGGER.info(msg)
if verbose:
print(msg)
data = fetch_function(url=url, **d)
yield data
except KeyError:
LOGGER.error(
"Fetch failed to start, please provide a 'url' key-value in the input dictionary."
)
def fetch_url(url, timeout=10, **kwargs):
"""Fetch the URL directly or from an archive.
First try to fetch the content directly from the URL domain's servers.
If it fails, then try to fetch the content from an archived version of the URL.
:param url: URL
:type url: str
:param timeout: (Default value = 10)
:param **kwargs:
:returns: fetched-> fetched content as stringified JSON object
:rtype: str
"""
LOGGER.info(f"fetching URL: {url}")
active_json = request_active_url(
url=url,
timeout=timeout,
**kwargs,
)
active_data = json.loads(active_json)
if active_data["fetch_error"]:
archived_json = request_archived_url(
url=url,
**kwargs,
)
fetched = archived_json
LOGGER.info(
"Failed with request_active_url, returning fetched content from request_archived_url."
)
else:
fetched = active_json
LOGGER.info("Succeeded with request_active_url, returning fetched content.")
return fetched
def request_active_url(url, timeout=10, **kwargs):
"""Request the webpage directly from the URL domain
:param url: URL
:type url: str
:param timeout: how many seconds to wait for a response (Default value = 10)
:type timeout: int
:param **kwargs:
:returns: fetched-> as stringified JSON object
:rtype: str
"""
fetched = NewsContent(
original_url=url,
**kwargs,
)
# urlExpander.expand_with_content already includes a time delay
# send the request
LOGGER.info(f"request_active_url: {url}")
r = api.expand_with_content(url=url, timeout=timeout)
# hydrate the instance with the response info
fetched.resolved_url = r["resolved_url"]
fetched.resolved_domain = r["resolved_domain"]
fetched.resolved_text = r["resolved_text"]
# fetched.response_url = r["response_url"]
fetched.response_code = r["response_code"]
fetched.response_reason = r["response_reason"]
fetched.set_fetch_error_ind()
fetched.set_article_maintext()
fetched.set_url_versions()
fetched.set_generic_url_ind()
fetched_json = fetched.to_json()
return fetched_json
def request_archived_url(url, **kwargs):
"""Request the oldest version of the webpage from the Internet Archive's Wayback Machine
:param url: URL
:type url: str
:param **kwargs:
:returns: fetched-> as stringified JSON object
:rtype: str
"""
fetched = NewsContent(original_url=url, **kwargs)
# canonicalize, remove common ad analytics query params, remove fragment
# this step may help improve the archive hit rate
url = url_utils.standardize_url(
url=url,
remove_scheme=False,
replace_netloc_with_domain=False,
remove_path=False,
remove_query=False,
remove_fragment=True,
to_lowercase=False,
)
time.sleep(randint(constants.MIN_DELAY, constants.MAX_DELAY))
try:
# send the request
LOGGER.info(f"request_archived_url: {url}")
wayback = waybackpy.Url(url, constants.headers["User-Agent"])
archive = wayback.oldest()
wbm_url = archive.archive_url
wbm_html = archive.get()
# fetched.response_url = wbm_url
# remove prefix URL from Wayback Machine
fetched.resolved_url = re.sub(
"^http(s)?:\/\/web\.archive\.org\/web\/\d+\/", "", wbm_url
)
fetched.resolved_domain = url_utils.get_domain(fetched.resolved_url)
fetched.resolved_text = wbm_html
fetched.response_code = 200
fetched.response_reason = "OK"
# https://github.com/akamhy/waybackpy/blob/6c71dfbe41ce8791ebd352817e6cfc0833f38140/waybackpy/exceptions.py
except WaybackError: # 'Can not find archive for ___'
msg = "WaybackError, API down or invalid arguments"
LOGGER.warning(msg)
fetched.response_code = float("nan")
fetched.response_reason = msg
except URLError:
msg = "URLError, malformed URL"
LOGGER.warning(msg)
fetched.response_code = float("nan")
fetched.response_reason = msg
except Exception as exc:
msg = f"Unknown error, {str(exc)}"
LOGGER.warning(msg)
fetched.response_code = float("nan")
fetched.response_reason = msg
fetched.set_fetch_error_ind()
fetched.set_article_maintext()
fetched.set_url_versions()
fetched.set_generic_url_ind()
fetched_json = fetched.to_json()
return fetched_json
| StarcoderdataPython |
5002922 | <filename>{{ cookiecutter.project_slug }}/{{ cookiecutter.pkg_name }}/{{ cookiecutter.first_app_name }}/apps.py<gh_stars>1-10
from django.apps import AppConfig
class {{ cookiecutter.first_app_name.split('_')|map('capitalize')|join('') }}Config(AppConfig):
name = '{{ cookiecutter.pkg_name }}.{{ cookiecutter.first_app_name }}'
verbose_name = '{{ cookiecutter.project_name }}: {{ cookiecutter.first_app_name.split('_')|map('capitalize')|join(' ') }}'
| StarcoderdataPython |
1720032 | import phonenumbers
from authy.api import AuthyApiClient
from flask import current_app as app
from flask import flash, g, request, session
def parse_phone_number(full_phone):
"""
Parses the phone number from E.164 format
:param full_phone: phone number in E.164 format
:returns: tuple (country_code, phone)
"""
pn = phonenumbers.parse(full_phone)
return (pn.country_code, pn.national_number)
def get_authy_client():
return AuthyApiClient(app.config['AUTHY_API_KEY'])
def start_verification(country_code, phone, channel='sms'):
"""
Sends a verification code to the user's phone number
via the specified channel
:param country_code: country code for the phone number
:param phone: national format phone number
:param channel: either 'sms' or 'call'
"""
api = get_authy_client()
try:
verification = api.phones.verification_start(
phone, country_code, via=channel)
if verification.ok():
flash(verification.content['message'])
else:
flash(verification.errors()['message'])
except Exception as e:
flash("Error sending code: {}".format(e))
def check_verification(country_code, phone, code):
"""
Validates a verification code
:param country_code: country code for the phone number
:param phone: national format phone number
:param code: verification code from user input
"""
api = get_authy_client()
try:
verification = api.phones.verification_check(
phone, country_code, code)
if verification.ok():
flash(verification.content['message'])
return True
else:
flash(verification.errors()['message'])
except Exception as e:
flash("Error validating code: {}".format(e))
return False
def create_authy_user(email, country_code, phone):
"""
Creates a user with the Authy API
:param email: email to be associated with the user.
Used by the API for account recovery
:param country_code: country code for the phone number
:param phone: national format phone number
:returns: the generated Authy ID
"""
api = get_authy_client()
authy_user = api.users.create(email, phone, country_code)
if authy_user.ok():
return authy_user.id
else:
flash("Error creating Authy user: {}".format(authy_user.errors()))
return None
| StarcoderdataPython |
11235319 | <gh_stars>0
import numpy as onp
import casadi as cas
def clip(
x,
min,
max
):
"""
Clip a value to a range.
Args:
x: Value to clip.
min: Minimum value to clip to.
max: Maximum value to clip to.
Returns:
"""
return onp.fmin(onp.fmax(x, min), max)
| StarcoderdataPython |
6627301 | <filename>lib/sensor.py
import pigpio
class Sensor:
ax=0
ay=0
az=0
mx=0
my=0
mz=0
def __init__(self, pi):
self.pi = pi
self.acc = self.pi.i2c_open(1, 0x19, 0)
self.pi.i2c_write_byte_data(self.acc,0x20,0x37)
self.mag = self.pi.i2c_open(1, 0x1e, 0)
self.pi.i2c_write_byte_data(self.mag,0x00,0x14)
self.pi.i2c_write_byte_data(self.mag,0x01,0x20)
self.pi.i2c_write_byte_data(self.mag,0x02,0x01)
def update(self):
self.ax = self.read(self.acc,0x29)
self.ay = self.read(self.acc,0x2B)
self.az = self.read(self.acc,0x2D)
self.mx = self.read(self.mag,0x03)
self.my = self.read(self.mag,0x05)
self.mz = self.read(self.mag,0x07)
def debug(self):
print (self.ax,self.ay,self.az,"***",self.mx,self.my,self.mz)
def read(self, dev,addr):
return self.pi.i2c_read_byte_data(dev, addr)
if __name__ == "__main__":
import time
pi = pigpio.pi()
s = Sensor(pi)
while True:
s.update()
s.debug()
time.sleep(0.5)
| StarcoderdataPython |
159582 | from onelang_core import *
from enum import Enum
import OneLang.One.Ast.AstTypes as astTypes
import OneLang.One.Ast.Types as types
class DETECTION_MODE(Enum):
ALL_IMPORTS = 1
ALL_INHERITENCE = 2
BASE_CLASSES_ONLY = 3
class GraphCycleDetector:
def __init__(self, visitor):
self.node_is_in_path = None
self.visitor = visitor
def find_cycles(self, nodes):
self.node_is_in_path = Map()
for node in nodes:
self.visit_node(node)
def visit_node(self, node):
if not self.node_is_in_path.has(node):
# untouched node
self.node_is_in_path.set(node, True)
self.visitor.process_node(node)
self.node_is_in_path.set(node, False)
return False
else:
# true = node used in current path = cycle
# false = node was already scanned previously (not a cycle)
return self.node_is_in_path.get(node)
class CircularDependencyDetector:
def __init__(self, detection_mode):
self.detector = GraphCycleDetector(self)
self.detection_mode = detection_mode
def process_intfs(self, file, type, intfs):
for intf in intfs:
for base_intf in intf.get_all_base_interfaces():
if base_intf.parent_file != file and self.detector.visit_node(base_intf.parent_file):
console.error(f'''Circular dependency found in file \'{file.export_scope.get_id()}\': {type} \'{intf.name}\' inherited from \'{base_intf.name}\' (from \'{base_intf.parent_file.export_scope.get_id()}\')''')
def process_node(self, file):
if self.detection_mode == DETECTION_MODE.ALL_IMPORTS:
for imp in file.imports:
for imp_sym in imp.imports:
imp_file = (imp_sym).parent_file
if self.detector.visit_node(imp_file):
console.error(f'''Circular dependency found in file \'{file.export_scope.get_id()}\' via the import \'{imp_sym.name}\' imported from \'{imp_file.export_scope.get_id()}\'''')
elif self.detection_mode == DETECTION_MODE.ALL_INHERITENCE:
self.process_intfs(file, "class", file.classes)
self.process_intfs(file, "interface", file.interfaces)
elif self.detection_mode == DETECTION_MODE.BASE_CLASSES_ONLY:
for cls_ in file.classes:
base_class = (cls_.base_class).decl
if base_class.parent_file != file and self.detector.visit_node(base_class.parent_file):
console.error(f'''Circular dependency found in file \'{file.export_scope.get_id()}\': class \'{cls_.name}\' inherited from \'{base_class.name}\' (from \'{base_class.parent_file.export_scope.get_id()}\')''')
def process_package(self, pkg):
self.detector.find_cycles(pkg.files.values())
def process_workspace(self, ws):
for pkg in ws.packages.values():
self.process_package(pkg) | StarcoderdataPython |
11289911 | <reponame>antonelloceravola/ToolBOSCore
# -*- coding: utf-8 -*-
#
# Custom package settings
#
# Copyright (C)
# Honda Research Institute Europe GmbH
# Carl-Legien-Str. 30
# 63073 Offenbach/Main
# Germany
#
# UNPUBLISHED PROPRIETARY MATERIAL.
# ALL RIGHTS RESERVED.
#
#
BST_useClang = True
# EOF
| StarcoderdataPython |
5085539 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-02-28 03:10
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('conference', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='applicant',
name='visa_exp_date',
field=models.DateField(blank=True, help_text='Format: DD/MM/YYYY', null=True, verbose_name='Visa Expiry Date'),
),
]
| StarcoderdataPython |
12800707 | #!/usr/bin/env python
from __future__ import division, absolute_import, print_function
from future.builtins import super
from iris_sdk.models.base_resource import BaseResource
from iris_sdk.models.data.totals import TotalsData
XML_NAME_TOTALS = "Quantity"
XPATH_TOTALS = "/totals"
class Totals(BaseResource, TotalsData):
"""Telephone numbers totals for resource"""
_node_name = XML_NAME_TOTALS
_xpath = XPATH_TOTALS
def get(self):
return self._get_data() | StarcoderdataPython |
1801371 | <reponame>pablogs98/lithops
#
# (C) Copyright Cloudlab URV 2020
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import re
import time
import logging
from requests.exceptions import SSLError as TooManyConnectionsError
from io import BytesIO
from google.api_core import exceptions as google_exceptions
from google.cloud import storage
from google.cloud.exceptions import NotFound
from lithops.constants import STORAGE_CLI_MSG
from lithops.storage.utils import StorageNoSuchKeyError
logging.getLogger('urllib3').setLevel(logging.CRITICAL)
logger = logging.getLogger(__name__)
class GCPStorageBackend:
def __init__(self, gcp_storage_config):
logger.debug("Creating GCP Storage client")
self.credentials_path = gcp_storage_config['credentials_path']
try: # Get credenitals from JSON file
self.client = storage.Client.from_service_account_json(self.credentials_path)
except Exception: # Get credentials from gcp function environment
self.client = storage.Client()
msg = STORAGE_CLI_MSG.format('GCP')
logger.info("{}".format(msg))
def get_client(self):
"""
Get ibm_boto3 client.
:return: ibm_boto3 client
"""
return self.client
def put_object(self, bucket_name, key, data):
"""
Put an object in COS. Override the object if the key already exists.
:param key: key of the object.
:param data: data of the object
:type data: str/bytes
:return: None
"""
done = False
while not done:
try:
bucket = self.client.get_bucket(bucket_name)
blob = bucket.blob(blob_name=key)
blob.upload_from_string(data=data)
done = True
except TooManyConnectionsError:
time.sleep(0.1)
except google_exceptions.NotFound:
raise StorageNoSuchKeyError(bucket=bucket_name, key=key)
def get_object(self, bucket_name, key, stream=False, extra_get_args={}):
"""
Get object from COS with a key. Throws StorageNoSuchKeyError if the given key does not exist.
:param key: key of the object
:return: Data of the object
:rtype: str/bytes
"""
try:
bucket = self.client.get_bucket(bucket_name)
blob = bucket.blob(blob_name=key)
except google_exceptions.NotFound:
raise StorageNoSuchKeyError(bucket_name, key)
if not blob.exists():
raise StorageNoSuchKeyError(bucket_name, key)
if extra_get_args and 'Range' in extra_get_args:
start, end = re.findall(r'\d+', extra_get_args['Range'])
start = int(start)
end = int(end)
else:
start, end = None, None
if stream:
stream = BytesIO()
# Download object to bytes buffer
blob.download_to_file(stream, start=start, end=end)
stream.seek(0) # Retrun to the initial buffer position
return stream
else:
return blob.download_as_string(start=start, end=end)
def head_object(self, bucket_name, key):
"""
Head object from COS with a key. Throws StorageNoSuchKeyError if the given key does not exist.
:param key: key of the object
:return: Data of the object
:rtype: str/bytes
"""
try:
bucket = self.client.get_bucket(bucket_name)
blob = bucket.get_blob(blob_name=key)
except google_exceptions.NotFound:
raise StorageNoSuchKeyError(bucket_name, key)
if blob is None:
raise StorageNoSuchKeyError(bucket_name, key)
response = {
'LastModified': blob.updated,
'ETag': blob.etag,
'content-type': blob.content_type,
'content-length': blob.size
}
return response
def delete_object(self, bucket_name, key):
"""
Delete an object from storage.
:param bucket: bucket name
:param key: data key
"""
try:
bucket = self.client.get_bucket(bucket_name)
except google_exceptions.NotFound:
raise StorageNoSuchKeyError(bucket_name, key)
blob = bucket.get_blob(blob_name=key)
if blob is None:
raise StorageNoSuchKeyError(bucket_name, key)
blob.delete()
def delete_objects(self, bucket_name, key_list):
"""
Delete a list of objects from storage.
:param bucket: bucket name
:param key_list: list of keys
"""
bucket = self.client.get_bucket(bucket_name)
try:
bucket.delete_blobs(blobs=key_list)
except google_exceptions.NotFound:
pass
def head_bucket(self, bucket_name):
pass
def list_objects(self, bucket_name, prefix=None):
"""
Return a list of objects for the given bucket and prefix.
:param bucket_name: Name of the bucket.
:param prefix: Prefix to filter object names.
:return: List of objects in bucket that match the given prefix.
:rtype: list of str
"""
try:
page = self.client.get_bucket(bucket_name).list_blobs(prefix=prefix)
except google_exceptions.ClientError:
raise StorageNoSuchKeyError(bucket_name, '')
return [{'Key': blob.name, 'Size': blob.size} for blob in page]
def list_keys(self, bucket_name, prefix=None):
"""
Return a list of keys for the given prefix.
:param prefix: Prefix to filter object names.
:return: List of keys in bucket that match the given prefix.
:rtype: list of str
"""
try:
page = list(self.client.get_bucket(bucket_name).list_blobs(prefix=prefix))
except google_exceptions.ClientError:
raise StorageNoSuchKeyError(bucket_name, '')
return [blob.name for blob in page]
| StarcoderdataPython |
11318544 | word = input("Enter a string: ")
word = word.split(", ")
hnum = 0
dic = {}
for elm in word:
elm = int(elm)
if hnum == 0:
hnum = elm
elif hnum < elm:
hnum = elm
print(hnum)
for elm in word:
flag = True
for i in range(2,hnum//2):
elm = int(elm)
if elm%i==0:
flag = False
if i not in dic.keys():
dic[i] = [elm]
else:
temp = dic[i]
temp.append(elm)
dic[i] = temp
if flag:
if "None"not in dic.keys():
dic["None"] = [elm]
else:
temp = dic["None"]
temp.append(elm)
dic["None"] = temp
print(dic) | StarcoderdataPython |
3365040 | <reponame>feloundou/safe-experts<filename>algos/make_experts.py<gh_stars>0
from agent_utils import Expert
from adabelief_pytorch import AdaBelief
import gym
import safety_gym
from ppo_algos import MLPActorCritic
from utils import setup_logger_kwargs, mpi_fork
ENV_NAME = 'Safexp-PointGoal1-v0'
# # make expert
# expert = Expert(config_name='wonder',
# record_samples=True,
# actor_critic=MLPActorCritic,
# ac_kwargs=dict(hidden_sizes=[128] * 4),
# seed=0,
# penalty_init=5e-3)
#
<<<<<<< HEAD
# mpi_fork(10) # run parallel code with mpi
#
# CONFIG_LIST2 = CONFIG_LIST
#
# for configuration in CONFIG_LIST2:
# print(configuration)
#
# logger_kwargs_BIG = setup_logger_kwargs(configuration['name'], configuration['seed'])
#
# BIG_EXPERT = Expert(config_name=configuration['name'],
# record_samples=True,
# actor_critic=MLPActorCritic,
# ac_kwargs=dict(hidden_sizes=[configuration['hid']] * configuration['l']),
# seed=configuration['seed'],
# penalty_init=5e-3)
#
#
# BIG_EXPERT.ppo_train(env_fn=lambda: gym.make(ENV_NAME),
# epochs=500,
# gamma=configuration['gamma'],
# lam=configuration['lam'],
# steps_per_epoch=configuration['steps'],
# train_pi_iters=100,
# pi_lr=3e-4,
# train_vf_iters=100,
# vf_lr=1e-3,
# penalty_lr=configuration['penalty_lr'],
# cost_lim=configuration['cost_lim'],
# clip_ratio=0.2,
# max_ep_len=1000,
# save_every=10,
# wandb_write=False,
# logger_kwargs=logger_kwargs_BIG)
#
# print("just finished!")
=======
# logger_kwargs = setup_logger_kwargs('STANDARDTEST', 0)
>>>>>>> parent of b84a2cf... gail and vail implementations
#
# expert.ppo_train(env_fn=lambda: gym.make(ENV_NAME),
# epochs=50,
# gamma=0.99,
# lam=0.98,
# steps_per_epoch=5000,
# train_pi_iters=100,
# pi_lr=3e-4,
# train_vf_iters=100,
# vf_lr=1e-3,
# penalty_lr=5e-3,
# cost_lim=10,
# clip_ratio=0.2 ,
# max_ep_len=1000,
# save_every=10,
# wandb_write=False,
# logger_kwargs=logger_kwargs)
########################################################################################################################
########################################################################################################################
exec(open('nn_config.py').read(), globals())
# print(standard_config)
mpi_fork(2) # run parallel code with mpi
CONFIG_LIST2 = CONFIG_LIST
for configuration in CONFIG_LIST2:
print(configuration)
logger_kwargs_BIG = setup_logger_kwargs(configuration['name'], configuration['seed'])
BIG_EXPERT = Expert(config_name=configuration['name'],
record_samples=True,
actor_critic=MLPActorCritic,
ac_kwargs=dict(hidden_sizes=[configuration['hid']] * configuration['l']),
seed=configuration['seed'],
penalty_init=5e-3)
BIG_EXPERT.ppo_train(env_fn=lambda: gym.make(ENV_NAME),
epochs=1000,
gamma=configuration['gamma'],
lam=configuration['lam'],
steps_per_epoch=configuration['steps'],
train_pi_iters=100,
pi_lr=3e-4,
train_vf_iters=100,
vf_lr=1e-3,
penalty_lr=configuration['penalty_lr'],
cost_lim=configuration['cost_lim'],
clip_ratio=0.2,
max_ep_len=1000,
save_every=10,
wandb_write=False,
logger_kwargs=logger_kwargs_BIG)
print("just finished!")
| StarcoderdataPython |
9730692 | # Copyright 2021, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Aggregation factory for adding custom measurements."""
import inspect
from typing import Any, Callable, Dict, Optional
from tensorflow_federated.python.aggregators import factory
from tensorflow_federated.python.common_libs import py_typecheck
from tensorflow_federated.python.core.api import computations
from tensorflow_federated.python.core.impl.federated_context import intrinsics
from tensorflow_federated.python.core.impl.types import computation_types
from tensorflow_federated.python.core.templates import aggregation_process
from tensorflow_federated.python.core.templates import measured_process
def add_measurements(
inner_agg_factory: factory.AggregationFactory,
*,
client_measurement_fn: Optional[Callable[..., Dict[str, Any]]] = None,
server_measurement_fn: Optional[Callable[..., Dict[str, Any]]] = None,
) -> factory.AggregationFactory:
"""Wraps `AggregationFactory` to report additional measurements.
The function `client_measurement_fn` should be a Python callable that will be
called as `client_measurement_fn(value)` or `client_measurement_fn(value,
weight)` depending on whether `inner_agg_factory` is weighted or unweighted.
It must be traceable by TFF and expect `tff.Value` objects placed at `CLIENTS`
as inputs, and return a `collections.OrderedDict` mapping string names to
tensor values placed at `SERVER`, which will be added to the measurement dict
produced by the `inner_agg_factory`.
Similarly, `server_measurement_fn` should be a Python callable that will be
called as `server_measurement_fn(result)` where `result` is the result (on
server) of the inner aggregation.
One or both of `client_measurement_fn` and `server_measurement_fn` must be
specified.
Args:
inner_agg_factory: The factory to wrap and add measurements.
client_measurement_fn: A Python callable that will be called on `value`
(and/or `weight`) provided to the `next` function to compute additional
measurements of the client values/weights.
server_measurement_fn: A Python callable that will be called on the `result`
of aggregation at server to compute additional measurements of the result.
Returns:
An `AggregationFactory` that reports additional measurements.
"""
py_typecheck.check_type(inner_agg_factory,
factory.AggregationFactory.__args__)
if not (client_measurement_fn or server_measurement_fn):
raise ValueError('Must specify one or both of `client_measurement_fn` or '
'`server_measurement_fn`.')
if client_measurement_fn:
py_typecheck.check_callable(client_measurement_fn)
if isinstance(inner_agg_factory, factory.UnweightedAggregationFactory):
if len(inspect.signature(client_measurement_fn).parameters) != 1:
raise ValueError(
'`client_measurement_fn` must take a single parameter if '
'`inner_agg_factory` is unweighted.')
elif isinstance(inner_agg_factory, factory.WeightedAggregationFactory):
if len(inspect.signature(client_measurement_fn).parameters) != 2:
raise ValueError(
'`client_measurement_fn` must take a two parameters if '
'`inner_agg_factory` is weighted.')
if server_measurement_fn:
py_typecheck.check_callable(server_measurement_fn)
if len(inspect.signature(server_measurement_fn).parameters) != 1:
raise ValueError('`server_measurement_fn` must take a single parameter.')
@computations.tf_computation()
def dict_update(orig_dict, new_values):
if not orig_dict:
return new_values
orig_dict.update(new_values)
return orig_dict
if isinstance(inner_agg_factory, factory.WeightedAggregationFactory):
class WeightedWrappedFactory(factory.WeightedAggregationFactory):
"""Wrapper for `WeightedAggregationFactory` that adds new measurements."""
def create(
self, value_type: factory.ValueType, weight_type: factory.ValueType
) -> aggregation_process.AggregationProcess:
py_typecheck.check_type(value_type, factory.ValueType.__args__)
py_typecheck.check_type(weight_type, factory.ValueType.__args__)
inner_agg_process = inner_agg_factory.create(value_type, weight_type)
init_fn = inner_agg_process.initialize
@computations.federated_computation(
init_fn.type_signature.result,
computation_types.at_clients(value_type),
computation_types.at_clients(weight_type))
def next_fn(state, value, weight):
inner_agg_output = inner_agg_process.next(state, value, weight)
measurements = inner_agg_output.measurements
if client_measurement_fn:
client_measurements = client_measurement_fn(value, weight)
measurements = intrinsics.federated_map(
dict_update, (measurements, client_measurements))
if server_measurement_fn:
server_measurements = server_measurement_fn(inner_agg_output.result)
measurements = intrinsics.federated_map(
dict_update, (measurements, server_measurements))
return measured_process.MeasuredProcessOutput(
state=inner_agg_output.state,
result=inner_agg_output.result,
measurements=measurements)
return aggregation_process.AggregationProcess(init_fn, next_fn)
return WeightedWrappedFactory()
else:
class UnweightedWrappedFactory(factory.UnweightedAggregationFactory):
"""Wrapper for `UnweightedAggregationFactory` that adds new measurements."""
def create(
self, value_type: factory.ValueType
) -> aggregation_process.AggregationProcess:
py_typecheck.check_type(value_type, factory.ValueType.__args__)
inner_agg_process = inner_agg_factory.create(value_type)
init_fn = inner_agg_process.initialize
@computations.federated_computation(
init_fn.type_signature.result,
computation_types.at_clients(value_type))
def next_fn(state, value):
inner_agg_output = inner_agg_process.next(state, value)
measurements = inner_agg_output.measurements
if client_measurement_fn:
client_measurements = client_measurement_fn(value)
measurements = intrinsics.federated_map(
dict_update, (measurements, client_measurements))
if server_measurement_fn:
server_measurements = server_measurement_fn(inner_agg_output.result)
measurements = intrinsics.federated_map(
dict_update, (measurements, server_measurements))
return measured_process.MeasuredProcessOutput(
state=inner_agg_output.state,
result=inner_agg_output.result,
measurements=measurements)
return aggregation_process.AggregationProcess(init_fn, next_fn)
return UnweightedWrappedFactory()
| StarcoderdataPython |
11240824 | <gh_stars>0
import tensorflow as tf
import os
import numpy as np
from fetcher import *
def load_ellipsoid_as_tensor():
pkl = pickle.load(open('utils/ellipsoid/info_ellipsoid.dat', 'rb'))
coord = pkl[0]
pool_idx = pkl[4]
faces = pkl[5]
lape_idx = pkl[7]
edges = []
for i in range(1, 4):
adj = pkl[i][1]
edges.append(adj[0])
neighbors = [[], [], []]
neighbors[0] = [[] for i in range(156)]
neighbors[1] = [[] for i in range(618)]
neighbors[2] = [[] for i in range(2466)]
i = 0
for e in edges:
for p in e:
if p[0] == p[1]:
continue
neighbors[i][p[0]].append(p[1])
i = i + 1
# print neighbors
# print neighbors
# print lape_idx[0][0]
# Coord
#coord_ts = tf.convert_to_tensor(coord, dtype = tf.float32)
# Support 1 - 3
# print pkl[1][1][2]
# print tf.convert_to_tensor_or_sparse_tensor(pkl[1][1])
# s_1 = tf.convert_to_tensor(pkl[1][1][0], dtype=tf.int64)
# s_2 = tf.convert_to_tensor(pkl[1][1][1], dtype=tf.float32)
# s_3 = tf.convert_to_tensor(pkl[1][1][2], dtype=tf.int64)
# sparse_support = tf.SparseTensor(indices=s_1, values=s_2, dense_shape=s_3)
# sparse_support2 = tf.SparseTensor(indices=pkl[1][1][0], values=pkl[1][1][1], dense_shape=pkl[1][1][2])
#support_x = [tf.convert_to_tensor(s[0],dtype=tf.float32) for s in pkl[1]]
# print support_x
# faces
#faces_ts = [tf.convert_to_tensor(f) for f in faces]
# edges
#edges_ts = [tf.convert_to_tensor(t) for t in edges]
# lape idx
#lape_ts = [tf.convert_to_tensor(l) for l in lape_idx]
# pool idx
#pool_ts = [tf.convert_to_tensor(p) for p in pool_idx]
# print pool_ts
#feat = tf.gather(tf.convert_to_tensor(coord), pool_idx[0])
#sess = tf.Session()
# print sess.run(feat)
load_ellipsoid_as_tensor()
| StarcoderdataPython |
9650033 | # Copyright 2018 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from kuryr_kubernetes.objects import base as kuryr_base
from kuryr_kubernetes.tests import base as test_base
from oslo_versionedobjects import base
from oslo_versionedobjects import fixture
# NOTE(danms): The hashes in this list should only be changed if
# they come with a corresponding version bump in the affected
# objects
object_data = {
'LBaaSListener': '1.0-a9e2d5c73687f5edc66fdb2f48650e15',
'LBaaSLoadBalancer': '1.3-8bc0a9bdbd160da67572aa38784378d1',
'LBaaSMember': '1.0-a770c6884c27d6d8c21186b27d0e2ccb',
'LBaaSPool': '1.1-6e77370d7632a902445444249eb77b01',
'LBaaSPortSpec': '1.1-1b307f34630617086c7af70f2cb8b215',
'LBaaSPubIp': '1.0-83992edec2c60fb4ab8998ea42a4ff74',
'LBaaSServiceSpec': '1.0-d430ecd443f2b1999196bfe531e56f7e',
'LBaaSState': '1.0-a0ff7dce2d3f6ce1ffab4ff95a344361',
}
def get_kuryr_objects():
"""Get Kuryr versioned objects
This returns a dict of versioned objects which are
in the Kuryr project namespace only (excludes objects
from os-vif and other 3rd party modules)
:return: a dict mapping class names to lists of versioned objects
"""
all_classes = base.VersionedObjectRegistry.obj_classes()
kuryr_classes = {}
for name in all_classes:
objclasses = all_classes[name]
if (objclasses[0].OBJ_PROJECT_NAMESPACE ==
kuryr_base.KuryrK8sObjectBase.OBJ_PROJECT_NAMESPACE):
kuryr_classes[name] = objclasses
return kuryr_classes
class TestObjectVersions(test_base.TestCase):
def test_versions(self):
"""Test Versions
Ensures that modified objects had their versions bumped
"""
checker = fixture.ObjectVersionChecker(
get_kuryr_objects())
expected, actual = checker.test_hashes(object_data)
self.assertEqual(expected, actual,
"""Some objects have changed; please make sure the
versions have been bumped and backporting
compatibility code has been added to
obj_make_compatible if necessary, and then update
their hashes in the object_data map in this test
module. If we don't need to add backporting code then
it means we also don't need the version bump and we
just have to change the hash in this module.""")
| StarcoderdataPython |
6493741 | import torch
import numpy as np
from data.agent_vocab import AgentVocab
from .metadata_helper import get_shapes_metadata, get_metadata_properties
from .dataloader_helper import get_shapes_features, get_shapes_dataloader
from enums.dataset_type import DatasetType
from models.shapes_receiver import ShapesReceiver
from models.shapes_sender import ShapesSender
from models.shapes_trainer import ShapesTrainer
from models.shapes_single_model import ShapesSingleModel
from models.shapes_meta_visual_module import ShapesMetaVisualModule
from models.messages_receiver import MessagesReceiver
def get_sender_receiver(device, args) -> (ShapesSender, ShapesReceiver, MessagesReceiver):
# Load Vocab
vocab = AgentVocab(args.vocab_size)
baseline_receiver = None
diagnostic_receiver = None
cell_type = "lstm"
genotype = {}
if args.single_model:
sender = ShapesSingleModel(
args.vocab_size,
args.max_length,
vocab.bound_idx,
embedding_size=args.embedding_size,
hidden_size=args.hidden_size,
greedy=args.greedy,
cell_type=cell_type,
genotype=genotype,
dataset_type=args.dataset_type,
)
baseline_receiver = ShapesSingleModel(
args.vocab_size,
args.max_length,
vocab.bound_idx,
embedding_size=args.embedding_size,
hidden_size=args.hidden_size,
greedy=args.greedy,
cell_type=cell_type,
genotype=genotype,
dataset_type=args.dataset_type,
)
else:
sender = ShapesSender(
args.vocab_size,
args.max_length,
vocab.bound_idx,
device,
embedding_size=args.embedding_size,
hidden_size=args.hidden_size,
greedy=args.greedy,
cell_type=cell_type,
genotype=genotype,
dataset_type=args.dataset_type,
inference_step=args.inference_step
)
baseline_receiver = ShapesReceiver(
args.vocab_size,
device,
embedding_size=args.embedding_size,
hidden_size=args.hidden_size
)
if args.sender_path:
sender = torch.load(args.sender_path)
if args.receiver_path:
baseline_receiver = torch.load(args.receiver_path)
return sender, baseline_receiver, diagnostic_receiver
def get_trainer(
sender,
device,
inference_step,
multi_task,
multi_task_lambda,
dataset_type,
step3,
hidden_size,
baseline_receiver=None,
diagnostic_receiver=None,
disabled_properties=None):
extract_features = dataset_type == "raw"
return ShapesTrainer(
sender,
device,
inference_step,
multi_task,
multi_task_lambda,
step3,
num_classes_by_model=[3, 3, 2, 3, 3],
num_hidden=hidden_size,
baseline_receiver=baseline_receiver,
diagnostic_receiver=diagnostic_receiver,
extract_features=extract_features,
disabled_properties=disabled_properties)
def get_meta_data():
train_meta_data = get_metadata_properties(dataset=DatasetType.Train)
valid_meta_data = get_metadata_properties(dataset=DatasetType.Valid)
test_meta_data = get_metadata_properties(dataset=DatasetType.Test)
return train_meta_data, valid_meta_data, test_meta_data
def get_training_data(device, batch_size, k, debugging, dataset_type, step3, zero_shot, property_one, property_two):
valid_meta_data = get_shapes_metadata(dataset=DatasetType.Valid)
valid_features = get_shapes_features(device=device, dataset=DatasetType.Valid)
# Load data
train_data, valid_data, test_data = get_shapes_dataloader(
device=device,
batch_size=batch_size,
k=k,
debug=debugging,
dataset_type=dataset_type,
step3=step3,
zero_shot=zero_shot,
property_one=property_one,
property_two=property_two,
valid_meta_data=valid_meta_data)
# valid_meta_data = get_shapes_metadata(dataset=DatasetType.Valid)
valid_features = get_shapes_features(
device=device, dataset=DatasetType.Valid)
return (train_data, valid_data, test_data, valid_meta_data, valid_features)
def get_raw_data(args, dataset=DatasetType.Valid):
if args.task == "shapes":
valid_raw = get_shapes_features(dataset=dataset, mode="raw")
return valid_raw
else:
raise ValueError(
"Unsupported task type for raw : {}".formate(args.task))
def save_example_images(args, filename):
if args.save_example_batch:
valid_raw = get_raw_data(args)
valid_raw = valid_raw[:10]
file_path = filename + "/example_batch.npy"
np.save(file_path, valid_raw)
| StarcoderdataPython |
388285 | '''
GSLCodeGenerators for code that uses the ODE solver provided by the GNU Scientific Library (GSL)
'''
import os
import re
import sys
import numpy as np
from brian2.units.fundamentalunits import fail_for_dimension_mismatch
from brian2.core.variables import AuxiliaryVariable, ArrayVariable, Constant
from brian2.core.functions import Function
from brian2.codegen.translation import make_statements
from brian2.codegen.permutation_analysis import (check_for_order_independence,
OrderDependenceError)
from brian2.core.preferences import prefs, BrianPreference
from brian2.utils.stringtools import get_identifiers, word_substitute
from brian2.parsing.statements import parse_statement
from brian2.codegen.generators import c_data_type
from brian2.core.preferences import PreferenceError
__all__ = ['GSLCodeGenerator', 'GSLWeaveCodeGenerator', 'GSLCythonCodeGenerator']
def valid_gsl_dir(val):
'''
Validate given string to be path containing required GSL files.
'''
if val is None:
return True
if not isinstance(val, basestring):
raise PreferenceError(('Illegal value for GSL directory: %s, '
'has to be str' % (str(val))))
if not os.path.isdir(val):
raise PreferenceError(('Illegal value for GSL directory: %s, '
'has to be existing directory' % (val)))
if any(not os.path.isfile(os.path.join(val, 'gsl', filename))
for filename in ['gsl_odeiv2.h', 'gsl_errno.h', 'gsl_matrix.h']):
raise PreferenceError(('Illegal value for GSL directory: %s, '
'has to contain gsl_odeiv2.h, gsl_errno.h '
'and gsl_matrix.h' % (val)))
return True
prefs.register_preferences(
'GSL',
'Directory containing GSL code',
directory=BrianPreference(
validator=valid_gsl_dir,
docs=("Set path to directory containing GSL header files (gsl_odeiv2.h etc.)"
"\nIf this directory is already in Python's include (e.g. because of "
"conda installation), this path can be set to None."),
default=None
)
)
class GSLCodeGenerator(object):
'''
GSL code generator.
Notes
-----
Approach is to first let the already existing code generator for a target
language do the bulk of the translating from abstract_code to actual code.
This generated code is slightly adapted to render it GSL compatible.
The most critical part here is that the vector_code that is normally
contained in a loop in the ```main()``` is moved to the function ```_GSL_func```
that is sent to the GSL integrator. The variables used in the vector_code are
added to a struct named ```dataholder``` and their values are set from the
Brian namespace just before the scalar code block.
'''
def __init__(self, variables, variable_indices, owner, iterate_all,
codeobj_class, name, template_name,
override_conditional_write=None,
allows_scalar_write=False):
self.generator = codeobj_class.original_generator_class(variables,
variable_indices,
owner, iterate_all,
codeobj_class, name,
template_name,
override_conditional_write,
allows_scalar_write)
self.method_options = dict(owner.state_updater.method_options)
self.integrator = owner.state_updater.integrator
# default timestep to start with is the timestep of the NeuronGroup itself
self.method_options['dt_start'] = owner.dt.variable.get_value()[0]
self.variable_flags = owner.state_updater._gsl_variable_flags
def __getattr__(self, item):
return getattr(self.generator, item)
# A series of functions that should be overridden by child class:
def c_data_type(self, dtype):
'''
Get string version of object dtype that is attached to Brian variables. c
pp_generator already has this function, but the Cython generator does not,
but we need it for GSL code generation.
'''
return NotImplementedError
def initialize_array(self, varname, values):
'''
Initialize a static array with given floating point values. E.g. in C++,
when called with arguments ``array`` and ``[1.0, 3.0, 2.0]``, this
method should return ``double array[] = {1.0, 3.0, 2.0}``.
Parameters
----------
varname : str
The name of the array variable that should be initialized
values : list of float
The values that should be assigned to the array
Returns
-------
code : str
One or more lines of array initialization code.
'''
raise NotImplementedError
def var_init_lhs(self, var, type):
'''
Get string version of the left hand side of an initializing expression
Parameters
----------
var : str
type : str
Returns
-------
code : str
For cpp returns type + var, while for cython just var
'''
raise NotImplementedError
def unpack_namespace_single(self, var_obj, in_vector, in_scalar):
'''
Writes the code necessary to pull single variable out of the Brian
namespace into the generated code.
The code created is significantly different between cpp and cython,
so I decided to not make this function general
over all target languages (i.e. in contrast to most other functions
that only have syntactical differences)
'''
raise NotImplementedError
# GSL functions that are the same for all target languages:
def find_function_names(self):
'''
Return a list of used function names in the self.variables dictionary
Functions need to be ignored in the GSL translation process, because the
brian generator already sufficiently
dealt with them. However, the brian generator also removes them from the
variables dict, so there is no
way to check whether an identifier is a function after the brian
translation process. This function is called
before this translation process and the list of function names is stored
to be used in the GSL translation.
Returns
-------
function_names : list
list of strings that are function names used in the code
'''
variables = self.variables
return [var for var, var_obj in variables.iteritems()
if isinstance(var_obj, Function)]
def is_cpp_standalone(self):
'''
Check whether we're running with cpp_standalone.
Test if `get_device()` is instance `CPPStandaloneDevice`.
Returns
-------
is_cpp_standalone : bool
whether currently using cpp_standalone device
See Also
--------
is_constant_and_cpp_standalone : uses the returned value
'''
# imports here to avoid circular imports
from brian2.devices.device import get_device
from brian2.devices.cpp_standalone.device import CPPStandaloneDevice
device = get_device()
return isinstance(device, CPPStandaloneDevice)
def is_constant_and_cpp_standalone(self, var_obj):
"""Check whether self.cpp_standalone and variable is Constant.
This check is needed because in the case of using the cpp_standalone device we do not
want to apply our GSL variable conversion (var --> _GSL_dataholder.var), because the cpp_standalone
code generation process involves replacing constants with their actual value ('freezing').
This results in code that looks like (if for example var = 1.2): _GSL_dataholder.1.2 = 1.2 and _GSL_dataholder->1.2.
To prevent repetitive calls to get_device() etc. the outcome of is_cpp_standalone is saved.
Parameters
----------
var_obj : `Variable`
instance of brian Variable class describing the variable
Returns
-------
is_cpp_standalone : bool
whether the used device is cpp_standalone and the given variable is an instance of Constant
"""
if not hasattr(self, 'cpp_standalone'):
self.cpp_standalone = self.is_cpp_standalone()
return isinstance(var_obj, Constant) and self.cpp_standalone
def find_differential_variables(self, code):
'''
Find the variables that were tagged _gsl_{var}_f{ind} and return var, ind pairs.
`GSLStateUpdater` tagged differential variables and here we extract the information given in these tags.
Parameters
----------
code : list of strings
A list of strings containing gsl tagged variables
Returns
-------
diff_vars : dict
A dictionary with variable names as keys and differential equation index as value
'''
diff_vars = {}
for expr_set in code:
for expr in expr_set.split('\n'):
expr = expr.strip(' ')
try:
lhs, op, rhs, comment = parse_statement(expr)
except ValueError:
pass
m = re.search('_gsl_(.+?)_f([0-9]*)$', lhs)
if m:
diff_vars[m.group(1)] = m.group(2)
return diff_vars
def diff_var_to_replace(self, diff_vars):
'''
Add differential variable-related strings that need to be replaced to go
from normal brian to GSL code
From the code generated by Brian's 'normal' generators (cpp_generator or
cython_generator a few bits of text need to be replaced to get GSL
compatible code. The bits of text related to differential equation
variables are put in the replacer dictionary in this function.
Parameters
----------
diff_vars : dict
dictionary with variables as keys and differential equation index as value
Returns
-------
to_replace : dict
dictionary with strings that need to be replaced as keys and the
strings that will replace them as values
'''
variables = self.variables
to_replace = {}
for var, diff_num in diff_vars.items():
to_replace.update(self.var_replace_diff_var_lhs(var, diff_num))
var_obj = variables[var]
array_name = self.generator.get_array_name(var_obj, access_data=True)
idx_name = '_idx' #TODO: could be dynamic?
replace_what = '{var} = {array_name}[{idx_name}]'.format(array_name=array_name, idx_name=idx_name, var=var)
replace_with = '{var} = _GSL_y[{ind}]'.format(ind=diff_num, var=var)
to_replace[replace_what] = replace_with
return to_replace
def get_dimension_code(self, diff_num):
'''
Generate code for function that sets the dimension of the ODE system.
GSL needs to know how many differential variables there are in the
ODE system. Since the current approach is to have the code in the vector
loop the same for all simulations, this dimension is set by an external
function. The code for this set_dimension functon is written here.
It is assumed the code will be the same for each target language with the
exception of some syntactical differences
Parameters
----------
diff_num : int
Number of differential variables that describe the ODE system
Returns
-------
set_dimension_code : str
The code describing the target language function in a single string
'''
code = ['\n{start_declare}int set_dimension(size_t * dimension){open_function}']
code += ['\tdimension[0] = %d{end_statement}'%diff_num]
code += ['\treturn GSL_SUCCESS{end_statement}{end_function}']
return ('\n').join(code).format(**self.syntax)
def yvector_code(self, diff_vars):
'''
Generate code for function dealing with GSLs y vector.
The values of differential variables have to be transferred from
Brian's namespace to a vector that is given to GSL. The transferring
from Brian --> y and back from y --> Brian after integration happens in
separate functions. The code for these is written here.
Parameters
----------
diff_vars : dictionary
Dictionary containing variable names as keys (str) and differential
variable index as value
Returns
-------
yvector_code : str
The code for the two functions (``_fill_y_vector`` and
``_empty_y_vector``) as single string.
'''
fill_y = [("\n{start_declare}int _fill_y_vector(_dataholder *"
"_GSL_dataholder, double * _GSL_y, int _idx){open_function}")]
empty_y = [("\n{start_declare}int _empty_y_vector(_dataholder * "
"_GSL_dataholder, double * _GSL_y, int _idx){"
"open_function}")]
for var, diff_num in diff_vars.items():
diff_num = int(diff_num)
array_name = self.generator.get_array_name(self.variables[var],
access_data=True)
fill_y += [("\t_GSL_y[%d] = _GSL_dataholder{access_pointer}%s["
"_idx]{end_statement}"%(diff_num, array_name))]
empty_y += [("\t_GSL_dataholder{access_pointer}%s[_idx] = _GSL_y["
"%d]{end_statement}"%(array_name, diff_num))]
fill_y += ['\treturn GSL_SUCCESS{end_statement}{end_function}']
empty_y += ['\treturn GSL_SUCCESS{end_statement}{end_function}']
return ('\n').join(fill_y + empty_y).format(**self.syntax)
def make_function_code(self, lines):
'''
Add lines of GSL translated vector code to 'non-changing' _GSL_func code.
Adds nonchanging aspects of GSL _GSL_func code to lines of code
written somewhere else (`translate_vector_code`). Here these lines
are put between the non-changing parts of the code and the
target-language specific syntax is added.
Parameters
----------
lines : str
Code containing GSL version of equations
Returns
-------
function_code : str
code describing ``_GSL_func`` that is sent to GSL integrator.
'''
code = [("\n{start_declare}int _GSL_func(double t, const double "
"_GSL_y[], double f[], void * params){open_function}"
"\n\t{start_declare}_dataholder * _GSL_dataholder = {open_cast}"
"_dataholder *{close_cast} params{end_statement}"
"\n\t{start_declare}int _idx = _GSL_dataholder{access_pointer}_idx"
"{end_statement}")]
code += [lines]
code += ["\treturn GSL_SUCCESS{end_statement}{end_function}"]
return ('\n').join(code).format(**self.syntax)
def write_dataholder_single(self, var_obj):
'''
Return string declaring a single variable in the ``_dataholder`` struct.
Parameters
----------
var_obj : `Variable`
Returns
-------
code : str
string describing this variable object as required for the ``_dataholder`` struct
(e.g. ``double* _array_neurongroup_v``)
'''
dtype = self.c_data_type(var_obj.dtype)
if isinstance(var_obj, ArrayVariable):
pointer_name = self.get_array_name(var_obj, access_data=True)
try:
restrict = self.generator.restrict
except AttributeError:
restrict = ''
if var_obj.scalar:
restrict = ''
return '%s* %s %s{end_statement}'%(dtype, restrict, pointer_name)
else:
return '%s %s{end_statement}'%(dtype, var_obj.name)
def write_dataholder(self, variables_in_vector):
'''
Return string with full code for _dataholder struct.
Parameters
----------
variables_in_vector : dict
dictionary containing variable name as key and `Variable` as value
Returns
-------
code : str
code for _dataholder struct
'''
code = ['\n{start_declare}struct _dataholder{open_struct}']
code += ['\tint _idx{end_statement}']
for var, var_obj in variables_in_vector.items():
if var == 't' or '_gsl' in var or self.is_constant_and_cpp_standalone(var_obj):
continue
code += ['\t'+self.write_dataholder_single(var_obj)]
code += ['{end_struct}']
return ('\n').join(code).format(**self.syntax)
def scale_array_code(self, diff_vars, method_options):
'''
Return code for definition of ``_GSL_scale_array`` in generated code.
Parameters
----------
diff_vars : dict
dictionary with variable name (str) as key and differential variable
index (int) as value
method_options : dict
dictionary containing integrator settings
Returns
-------
code : str
full code describing a function returning a array containing doubles
with the absolute errors for each differential variable (according
to their assigned index in the GSL StateUpdater)
'''
# get scale values per variable from method_options
abs_per_var = method_options['absolute_error_per_variable']
abs_default = method_options['absolute_error']
if not isinstance(abs_default, float):
raise TypeError(("The absolute_error key in method_options should be "
"a float. Was type %s" % (str(type(abs_default)))))
if abs_per_var is None:
diff_scale = {var: float(abs_default) for var in diff_vars.keys()}
elif isinstance(abs_per_var, dict):
diff_scale = {}
for var, error in abs_per_var.items():
# first do some checks on input
if not var in diff_vars:
if not var in self.variables:
raise KeyError("absolute_error specified for variable "
"that does not exist: %s"%var)
else:
raise KeyError("absolute_error specified for variable "
"that is not being integrated: %s"%var)
fail_for_dimension_mismatch(error, self.variables[var],
("Unit of absolute_error_per_variable "
"for variable %s does not match "
"unit of varialbe itself"%var))
# if all these are passed we can add the value for error in base units
diff_scale[var] = float(error)
# set the variables that are not mentioned to default value
for var in diff_vars.keys():
if var not in abs_per_var:
diff_scale[var] = float(abs_default)
else:
raise TypeError(("The absolute_error_per_variable key in method_options "
"should either be None or a dictionary "
"containing the error for each individual state variable. "
"Was type %s"%(str(type(abs_per_var)))))
# write code
return self.initialize_array('_GSL_scale_array', [diff_scale[var] for var in sorted(diff_vars)])
def find_undefined_variables(self, statements):
'''
Find identifiers that are not in ``self.variables`` dictionary.
Brian does not save the ``_lio_`` variables it uses anywhere. This is
problematic for our GSL implementation because we save the lio variables
in the ``_dataholder`` struct (for which we need the datatype of the
variables). This function adds the left hand side variables that are
used in the vector code to the variable dictionary as
`AuxiliaryVariable`\ s (all we need later is the datatype).
Parameters
----------
statements : list
list of statement objects (need to have the dtype attribute)
Notes
-----
I keep ``self.variables`` and ``other_variables`` separate so I can
distinguish what variables are in the Brian namespace and which ones are
defined in the code itself.
'''
variables = self.variables
other_variables = {}
for statement in statements:
var, op, expr, comment = (statement.var, statement.op,
statement.expr, statement.comment)
if var not in variables:
other_variables[var] = AuxiliaryVariable(var, dtype=statement.dtype)
return other_variables
def find_used_variables(self, statements, other_variables):
'''
Find all the variables used in the right hand side of the given
expressions.
Parameters
----------
statements : list
list of statement objects
Returns
-------
used_variables : dict
dictionary of variables that are used as variable name (str),
`Variable` pairs.
'''
variables = self.variables
used_variables = {}
for statement in statements:
lhs, op, rhs, comment = (statement.var, statement.op,
statement.expr, statement.comment)
for var in (get_identifiers(rhs)):
if var in self.function_names:
continue
try:
var_obj = variables[var]
except KeyError:
var_obj = other_variables[var]
used_variables[var] = var_obj # save as object because this has
# all needed info (dtype, name, isarray)
# I don't know a nicer way to do this, the above way misses write
# variables (e.g. not_refractory)..
read, write, _ = self.array_read_write(statements)
for var in (read|write):
if var not in used_variables:
used_variables[var] = variables[var] # will always be array and
# thus exist in variables
return used_variables
def to_replace_vector_vars(self, variables_in_vector, ignore=frozenset()):
'''
Create dictionary containing key, value pairs with to be replaced text
to translate from conventional Brian to GSL.
Parameters
----------
variables_in_vector : dict
dictionary with variable name (str), `Variable` pairs of variables
occurring in vector code
ignore : set, optional
set of strings with variable names that should be ignored
Returns
-------
to_replace : dict
dictionary with strings that need to be replaced i.e. _lio_1 will be
_GSL_dataholder._lio_1 (in cython) or _GSL_dataholder->_lio_1 (cpp)
Notes
-----
t will always be added because GSL defines its own t.
i.e. for cpp: {'const t = _ptr_array_defaultclock_t[0];' : ''}
'''
access_pointer = self.syntax['access_pointer']
to_replace = {}
t_in_code = None
for var, var_obj in variables_in_vector.items():
if var_obj.name == 't':
t_in_code = var_obj
continue
if '_gsl' in var or var in ignore:
continue
if self.is_constant_and_cpp_standalone(var_obj):
# does not have to be processed by GSL generator
self.variables_to_be_processed.remove(var_obj.name)
continue
if isinstance(var_obj, ArrayVariable):
pointer_name = self.get_array_name(var_obj, access_data=True)
to_replace[pointer_name] = '_GSL_dataholder' + access_pointer + pointer_name
else:
to_replace[var] = '_GSL_dataholder' + access_pointer + var
# also make sure t declaration is replaced if in code
if t_in_code is not None:
t_declare = self.var_init_lhs('t', 'const double ')
array_name = self.get_array_name(t_in_code, access_data=True)
end_statement = self.syntax['end_statement']
replace_what = ("{t_declare} = {array_name}[0]"
"{end_statement}".format(t_declare=t_declare,
array_name=array_name,
end_statement=end_statement))
to_replace[replace_what] = ''
self.variables_to_be_processed.remove('t')
return to_replace
def unpack_namespace(self, variables_in_vector, variables_in_scalar,
ignore=frozenset()):
'''
Write code that unpacks Brian namespace to cython/cpp namespace.
For vector code this means putting variables in _dataholder (i.e.
_GSL_dataholder->var or _GSL_dataholder.var = ...)
Note that code is written so a variable could occur both in scalar and
vector code
Parameters
----------
variables_in_vector : dict
dictionary with variable name (str), `Variable` pairs of variables
occurring in vector code
variables_in_scalar : dict
dictionary with variable name (str), `Variable` pairs of variables
occurring in scalar code
ignore : set, optional
set of string names of variables that should be ignored
Returns
-------
unpack_namespace_code : str
code fragment unpacking the Brian namespace (setting variables in
the _dataholder struct in case of vector)
'''
code = []
for var, var_obj in self.variables.items():
if var in ignore:
continue
if self.is_constant_and_cpp_standalone(var_obj):
continue
in_vector = var in variables_in_vector
in_scalar = var in variables_in_scalar
if in_vector:
self.variables_to_be_processed.remove(var)
code += [self.unpack_namespace_single(var_obj, in_vector, in_scalar)]
return ('\n').join(code)
def translate_vector_code(self, code_lines, to_replace):
'''
Translate vector code to GSL compatible code by substituting fragments
of code.
Parameters
----------
code_lines : list
list of strings describing the vector_code
to_replace: dict
dictionary with to be replaced strings (see to_replace_vector_vars
and to_replace_diff_vars)
Returns
-------
vector_code : str
New code that is now to be added to the function that is sent to the
GSL integrator
'''
code = []
for expr_set in code_lines:
for line in expr_set.split('\n'): # every line seperate to make tabbing correct
code += ['\t' + line]
code = ('\n').join(code)
code = word_substitute(code, to_replace)
# special substitute because of limitations of regex word boundaries with
# variable[_idx]
for from_sub, to_sub in to_replace.items():
m = re.search('\[(\w+)\];?$', from_sub)
if m:
code = re.sub(re.sub('\[','\[', from_sub), to_sub, code)
if '_gsl' in code:
raise AssertionError(('Translation failed, _gsl still in code (should only '
'be tag, and should be replaced.\n'
'Code:\n%s' % code))
return code
def translate_scalar_code(self, code_lines, variables_in_scalar,
variables_in_vector):
'''
Translate scalar code: if calculated variables are used in the vector_code
their value is added to the variable in the _dataholder.
Parameters
----------
code_lines : list
list of strings containing scalar code
variables_in_vector : dict
dictionary with variable name (str), `Variable` pairs of variables
occurring in vector code
variables_in_scalar : dict
dictionary with variable name (str), `Variable` pairs of variables
occurring in scalar code
Returns
-------
scalar_code : str
code fragment that should be injected in the main before the loop
'''
code = []
for line in code_lines:
m = re.search('(\w+ = .*)', line)
try:
new_line = m.group(1)
var, op, expr, comment = parse_statement(new_line)
except (ValueError, AttributeError):
code += [line]
continue
if var in variables_in_scalar.keys():
code += [line]
elif var in variables_in_vector.keys():
if var == 't':
continue
try:
self.variables_to_be_processed.remove(var)
except KeyError:
raise AssertionError(("Trying to process variable named %s by "
"putting its value in the _GSL_dataholder "
"based on scalar code, but the variable "
"has been processed already." % var))
code += ['_GSL_dataholder.{var} {op} {expr} {comment}'.format(
var=var, op=op, expr=expr, comment=comment)]
return '\n'.join(code)
def add_gsl_variables_as_non_scalar(self, diff_vars):
'''
Add _gsl variables as non-scalar.
In `GSLStateUpdater` the differential equation variables are substituted
with GSL tags that describe the information needed to translate the
conventional Brian code to GSL compatible code. This function tells
Brian that the variables that contain these tags should always be vector
variables. If we don't do this, Brian renders the tag-variables as
scalar if no vector variables are used in the right hand side of the
expression.
Parameters
----------
diff_vars : dict
dictionary with variables as keys and differential equation index as
value
'''
for var, ind in diff_vars.items():
name = '_gsl_{var}_f{ind}'.format(var=var,ind=ind)
self.variables[name] = AuxiliaryVariable(var, scalar=False)
def add_meta_variables(self, options):
if options['use_last_timestep']:
try:
N = int(self.variables['N'].get_value())
self.owner.variables.add_array('_last_timestep', size=N,
values=np.ones(N)*options['dt_start'],
dtype=np.float64)
except KeyError:
# has already been run
pass
self.variables['_last_timestep'] = self.owner.variables.get('_last_timestep')
pointer_last_timestep = self.get_array_name(self.variables['_last_timestep'])+ '[_idx]'
else:
pointer_last_timestep = None
if options['save_failed_steps']:
N = int(self.variables['N'].get_value())
try:
self.owner.variables.add_array('_failed_steps', size=N, dtype=np.int32)
except KeyError:
# has already been run
pass
self.variables['_failed_steps'] = self.owner.variables.get('_failed_steps')
pointer_failed_steps = self.get_array_name(self.variables['_failed_steps']) + '[_idx]'
else:
pointer_failed_steps = None
if options['save_step_count']:
N = int(self.variables['N'].get_value())
try:
self.owner.variables.add_array('_step_count', size=N, dtype=np.int32)
except KeyError:
# has already been run
pass
self.variables['_step_count'] = self.owner.variables.get('_step_count')
pointer_step_count = self.get_array_name(self.variables['_step_count']) + '[_idx]'
else:
pointer_step_count = None
return {'pointer_last_timestep' : pointer_last_timestep,
'pointer_failed_steps' : pointer_failed_steps,
'pointer_step_count' : pointer_step_count}
def translate(self, code, dtype): # TODO: it's not so nice we have to copy the contents of this function..
'''
Translates an abstract code block into the target language.
'''
# first check if user code is not using variables that are also used by GSL
reserved_variables = ['_dataholder', '_fill_y_vector', '_empty_y_vector',
'_GSL_dataholder', '_GSL_y', '_GSL_func']
if any([var in self.variables for var in reserved_variables]):
# import here to avoid circular import
raise ValueError(("The variables %s are reserved for the GSL "
"internal code."%(str(reserved_variables))))
# if the following statements are not added, Brian translates the
# differential expressions in the abstract code for GSL to scalar statements
# in the case no non-scalar variables are used in the expression
diff_vars = self.find_differential_variables(code.values())
self.add_gsl_variables_as_non_scalar(diff_vars)
# add arrays we want to use in generated code before self.generator.translate() so
# brian does namespace unpacking for us
pointer_names = self.add_meta_variables(self.method_options)
scalar_statements = {}
vector_statements = {}
for ac_name, ac_code in code.iteritems():
statements = make_statements(ac_code,
self.variables,
dtype,
optimise=True,
blockname=ac_name)
scalar_statements[ac_name], vector_statements[ac_name] = statements
for vs in vector_statements.itervalues():
# Check that the statements are meaningful independent on the order of
# execution (e.g. for synapses)
try:
if self.has_repeated_indices(vs): # only do order dependence if there are repeated indices
check_for_order_independence(vs,
self.generator.variables,
self.generator.variable_indices)
except OrderDependenceError:
# If the abstract code is only one line, display it in ful l
if len(vs) <= 1:
error_msg = 'Abstract code: "%s"\n' % vs[0]
else:
error_msg = ('%_GSL_driver lines of abstract code, first line is: '
'"%s"\n') % (len(vs), vs[0])
# save function names because self.generator.translate_statement_sequence
# deletes these from self.variables but we need to know which identifiers
# we can safely ignore (i.e. we can ignore the functions because they are
# handled by the original generator)
self.function_names = self.find_function_names()
scalar_code, vector_code, kwds = self.generator.translate_statement_sequence(scalar_statements,
vector_statements)
############ translate code for GSL
# first check if any indexing other than '_idx' is used (currently not supported)
for code_list in scalar_code.values()+vector_code.values():
for code in code_list:
m = re.search('\[(\w+)\]', code)
if m is not None:
if m.group(1) != '0' and m.group(1) != '_idx':
from brian2.stateupdaters.base import UnsupportedEquationsException
raise UnsupportedEquationsException(("Equations result in state "
"updater code with indexing "
"other than '_idx', which "
"is currently not supported "
"in combination with the "
"GSL stateupdater."))
# differential variable specific operations
to_replace = self.diff_var_to_replace(diff_vars)
GSL_support_code = self.get_dimension_code(len(diff_vars))
GSL_support_code += self.yvector_code(diff_vars)
# analyze all needed variables; if not in self.variables: put in separate dic.
# also keep track of variables needed for scalar statements and vector statements
other_variables = self.find_undefined_variables(scalar_statements[None] +
vector_statements[None])
variables_in_scalar = self.find_used_variables(scalar_statements[None],
other_variables)
variables_in_vector = self.find_used_variables(vector_statements[None],
other_variables)
# so that _dataholder holds diff_vars as well, even if they don't occur
# in the actual statements
for var in diff_vars.keys():
if not var in variables_in_vector:
variables_in_vector[var] = self.variables[var]
# lets keep track of the variables that eventually need to be added to
# the _GSL_dataholder somehow
self.variables_to_be_processed = variables_in_vector.keys()
# add code for _dataholder struct
GSL_support_code = self.write_dataholder(variables_in_vector) + GSL_support_code
# add e.g. _lio_1 --> _GSL_dataholder._lio_1 to replacer
to_replace.update(self.to_replace_vector_vars(variables_in_vector,
ignore=diff_vars.keys()))
# write statements that unpack (python) namespace to _dataholder struct
# or local namespace
GSL_main_code = self.unpack_namespace(variables_in_vector, variables_in_scalar, ['t'])
# rewrite actual calculations described by vector_code and put them in _GSL_func
func_code = self.translate_one_statement_sequence(vector_statements[None],
scalar=False)
GSL_support_code += self.make_function_code(self.translate_vector_code(func_code,
to_replace))
scalar_func_code = self.translate_one_statement_sequence(scalar_statements[None],
scalar=True)
# rewrite scalar code, keep variables that are needed in scalar code normal
# and add variables to _dataholder for vector_code
GSL_main_code += '\n' + self.translate_scalar_code(scalar_func_code,
variables_in_scalar,
variables_in_vector)
if len(self.variables_to_be_processed) > 0:
raise AssertionError(("Not all variables that will be used in the vector "
"code have been added to the _GSL_dataholder. This "
"might mean that the _GSL_func is using unitialized "
"variables."
"\nThe unprocessed variables "
"are: %s" % (str(self.variables_to_be_processed))))
scalar_code['GSL'] = GSL_main_code
kwds['define_GSL_scale_array'] = self.scale_array_code(diff_vars,
self.method_options)
kwds['n_diff_vars'] = len(diff_vars)
kwds['GSL_settings'] = dict(self.method_options)
kwds['GSL_settings']['integrator'] = self.integrator
kwds['support_code_lines'] += GSL_support_code.split('\n')
kwds['t_array'] = self.get_array_name(self.variables['t']) + '[0]'
kwds['dt_array'] = self.get_array_name(self.variables['dt']) + '[0]'
kwds['define_dt'] = 'dt' not in variables_in_scalar
kwds['cpp_standalone'] = self.is_cpp_standalone()
for key, value in pointer_names.items():
kwds[key] = value
return scalar_code, vector_code, kwds
class GSLCythonCodeGenerator(GSLCodeGenerator):
syntax = {'end_statement': '',
'access_pointer': '.',
'start_declare': 'cdef ',
'open_function': ':',
'open_struct': ':',
'end_function': '',
'end_struct': '',
'open_cast': '<',
'close_cast': '>',
'diff_var_declaration': ''}
def c_data_type(self, dtype):
return c_data_type(dtype)
def initialize_array(self, varname, values):
value_list = ', '.join(repr(v) for v in values)
code = 'cdef double {varname}[{n_values}]\n'
code += '{varname}[:] = [{value_list}]'
return code.format(varname=varname, value_list=value_list,
n_values=len(values))
def var_replace_diff_var_lhs(self, var, ind):
return {'_gsl_{var}_f{ind}'.format(var=var, ind=ind):
'f[{ind}]'.format(ind=ind)}
def var_init_lhs(self, var, type):
return var
def unpack_namespace_single(self, var_obj, in_vector, in_scalar):
code = []
if isinstance(var_obj, ArrayVariable):
array_name = self.generator.get_array_name(var_obj)
dtype = self.c_data_type(var_obj.dtype)
if in_vector:
code += [('_GSL_dataholder.{array} = <{dtype} *> '
'_buf_{array}.data'.format(array=array_name, dtype=dtype))]
if in_scalar:
code += [('{array} = <{dtype} *> '
'_buf_{array}.data'.format(array=array_name, dtype=dtype))]
else:
if in_vector:
code += ['_GSL_dataholder.{var} = _namespace["{var}"]'.format(var=var_obj.name)]
if in_scalar:
code += ['{var} = _namespace["{var}"]'.format(var=var_obj.name)]
return '\n'.join(code)
@staticmethod
def get_array_name( var, access_data=True):
# We have to do the import here to avoid circular import dependencies.
from brian2.codegen.generators.cython_generator import CythonCodeGenerator
return CythonCodeGenerator.get_array_name(var, access_data)
class GSLWeaveCodeGenerator(GSLCodeGenerator):
def __getattr__(self, item):
return getattr(self.generator, item)
syntax = {'end_statement': ';',
'access_pointer': '->',
'start_declare': '',
'open_function': '\n{',
'open_struct' :'\n{',
'end_function': '\n}',
'end_struct': '\n};',
'open_cast': '(',
'close_cast': ')',
'diff_var_declaration': 'const scalar '}
def c_data_type(self, dtype):
return self.generator.c_data_type(dtype)
def initialize_array(self, varname, values):
value_list = ', '.join(repr(v) for v in values)
return 'double const %s[] = {%s};' % (varname, value_list)
def var_replace_diff_var_lhs(self, var, ind):
scalar_dtype = self.c_data_type(prefs.core.default_float_dtype)
f = 'f[{ind}]'.format(ind=ind)
try:
if 'unless refractory' in self.variable_flags[var]:
return {'_gsl_{var}_f{ind}'.format(var=var,ind=ind) : f,
'{scalar_dtype} _gsl_{var}_f{ind};'.format(var=var, ind=ind,
scalar_dtype=scalar_dtype): '',
'{scalar_dtype} {f};'.format(f=f, scalar_dtype=scalar_dtype): ''} # in case the replacement
# of _gsl_var_find to f[ind] happens first
except KeyError:
pass
return {'const {scalar_dtype} _gsl_{var}_f{ind}'.format(scalar_dtype=scalar_dtype,
var=var, ind=ind) : f}
def var_init_lhs(self, var, type):
return type + var
def unpack_namespace_single(self, var_obj, in_vector, in_scalar):
if isinstance(var_obj, ArrayVariable):
pointer_name = self.get_array_name(var_obj, access_data=True)
array_name = self.get_array_name(var_obj)
if in_vector:
return '_GSL_dataholder.{ptr} = {array};'.format(ptr=pointer_name,
array=array_name)
else:
return ''
else:
if in_vector:
return '_GSL_dataholder.{var} = {var};'.format(var=var_obj.name)
else:
return ''
@staticmethod
def get_array_name( var, access_data=True):
# We have to do the import here to avoid circular import dependencies.
from brian2.codegen.runtime.weave_rt import WeaveCodeGenerator
return WeaveCodeGenerator.get_array_name(var, access_data)
| StarcoderdataPython |
11253887 | <filename>002_merge_json.py
import sys
import urllib
import json
import argparse
import urllib.request
import time
import os
import requests
import glob
import hashlib
files = glob.glob("data/tmp/*.json")
arr = []
for i in range(len(files)):
file = files[i]
if i % 100 == 0:
print(i+1, len(files))
try:
with open(file, 'r') as f:
data = json.load(f)
arr.append(data)
except Exception as e:
print(file, e)
fw2 = open("data/data.json", 'w')
json.dump(arr, fw2, ensure_ascii=False, indent=4, sort_keys=True, separators=(',', ': '))
| StarcoderdataPython |
9796753 | <filename>tests/test_gravity_ingestor.py
# coding: utf-8
import os
import unittest
import pandas as pd
import numpy as np
import datetime
from dgp.lib import gravity_ingestor as gi
class TestGravityIngestor(unittest.TestCase):
def test_read_bitfield_default(self):
status = pd.Series(data=[21061]*5)
unpacked = gi._extract_bits(status)
array = np.array([[1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],]*5,
dtype=np.uint8)
expect = pd.DataFrame(data=array)
self.assertTrue(unpacked.equals(expect))
def test_read_bitfield_options(self):
status = pd.Series(data=[21061]*5)
# test num columns specified less than num bits
columns = ['test1', 'test2', 'test3', 'test4']
unpacked = gi._extract_bits(status, columns=columns, as_bool=True)
array = np.array([[1, 0, 1, 0],] * 5)
expect = pd.DataFrame(data=array, columns=columns).astype(np.bool_)
self.assertTrue(unpacked.equals(expect))
# test num columns specified greater than num bits
columns = ['test' + str(i) for i in range(1,35)]
unpacked = gi._extract_bits(status, columns=columns, as_bool=True)
array = np.array([[1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],]*5,
dtype=np.uint8)
expect_cols = ['test' + str(i) for i in range(1, 33)]
expect = pd.DataFrame(data=array, columns=expect_cols).astype(np.bool_)
self.assertTrue(unpacked.equals(expect))
np.testing.assert_array_equal(unpacked.columns, expect.columns)
# test num columns specified equal to num bits
columns = ['test' + str(i) for i in range(1,33)]
unpacked = gi._extract_bits(status, columns=columns, as_bool=True)
array = np.array([[1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],]*5,
dtype=np.uint8)
expect_cols = ['test' + str(i) for i in range(1, 33)]
expect = pd.DataFrame(data=array, columns=expect_cols).astype(np.bool_)
self.assertTrue(unpacked.equals(expect))
np.testing.assert_array_equal(unpacked.columns, expect.columns)
def test_import_at1a_no_fill_nans(self):
df = gi.read_at1a(os.path.abspath('tests/sample_gravity.csv'), fill_with_nans=False)
self.assertEqual(df.shape, (9, 26))
fields = ['gravity', 'long_accel', 'cross_accel', 'beam', 'temp', 'status', 'pressure', 'Etemp', 'GPSweek', 'GPSweekseconds']
# Test and verify an arbitrary line of data against the same line in the pandas DataFrame
line5 = [10061.171360, -0.026226, -0.094891, -0.093803, 62.253987, 21061, 39.690004, 52.263138, 1959, 219697.800]
sample_line = dict(zip(fields, line5))
self.assertEqual(df.gravity[4], sample_line['gravity'])
self.assertEqual(df.long_accel[4], sample_line['long_accel'])
self.assertFalse(df.gps_sync[8])
def test_import_at1a_fill_nans(self):
df = gi.read_at1a(os.path.abspath('tests/sample_gravity.csv'))
self.assertEqual(df.shape, (10, 26))
fields = ['gravity', 'long_accel', 'cross', 'beam', 'temp', 'status', 'pressure', 'Etemp', 'GPSweek', 'GPSweekseconds']
# Test and verify an arbitrary line of data against the same line in the pandas DataFrame
line5 = [10061.171360, -0.026226, -0.094891, -0.093803, 62.253987, 21061, 39.690004, 52.263138, 1959, 219697.800]
sample_line = dict(zip(fields, line5))
self.assertEqual(df.gravity[5], sample_line['gravity'])
self.assertEqual(df.long_accel[5], sample_line['long_accel'])
self.assertTrue(df.iloc[[2]].isnull().values.all())
def test_import_at1a_interp(self):
df = gi.read_at1a(os.path.abspath('tests/sample_gravity.csv'), interp=True)
self.assertEqual(df.shape, (10, 26))
# check whether NaNs were interpolated for numeric type fields
self.assertTrue(df.iloc[[2]].notnull().values.any())
def test_import_zls(self):
df = gi.read_zls(os.path.abspath('tests/sample_zls'))
self.assertEqual(df.shape, (10800, 16))
line21 = ['FLIGHT3', 12754.71, 12747.7, 0.3, -375.8, -1.0, 0.0, -14.0, 5.0, -2.0, 57.0, 4.0, 128.0, -15.0, 'FFFFFF', 34.0]
self.assertEqual(df.iloc[[20]].values.tolist()[0], line21)
def test_import_zls_times(self):
ok_begin_time = datetime.datetime(2015, 11, 12, hour=0, minute=30, second=0)
ok_end_time = datetime.datetime(2015, 11, 12, hour=2, minute=30, second=0)
df = gi.read_zls(os.path.abspath('tests/sample_zls'),
begin_time=ok_begin_time,
end_time=ok_end_time)
self.assertTrue(df.index[0] == ok_begin_time)
self.assertTrue(df.index[-1] == ok_end_time)
oob_begin_time = datetime.datetime(2015, 11, 11, hour=23, minute=0, second=0)
with self.assertRaises(ValueError):
df = gi.read_zls(os.path.abspath('tests/sample_zls'), begin_time=oob_begin_time)
oob_end_time = datetime.datetime(2015, 11, 12, hour=3, minute=0, second=0)
with self.assertRaises(ValueError):
df = gi.read_zls(os.path.abspath('tests/sample_zls'), end_time=oob_end_time)
with self.assertRaises(ValueError):
df = gi.read_zls(os.path.abspath('tests/sample_zls'),
begin_time=ok_begin_time,
end_time=oob_begin_time)
| StarcoderdataPython |
12852987 | from math import ceil, floor, trunc
x = 1.4
y = 2.6
print(floor(x), floor(y))
print(floor(-x), floor(-y))
print(ceil(x), ceil(y))
print(ceil(-x), ceil(-y))
print(trunc(x), trunc(y))
print(trunc(-x), trunc(-y))
| StarcoderdataPython |
8040624 | '''
Copyright 2019 Secure Shed Project Dev Team
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
CONFIGURATIONJSONSCHEMA = \
{
"$schema": "http://json-schema.org/draft-07/schema#",
"definitions":
{
"action":
{
"type": "object",
"additionalProperties" : False,
"properties":
{
"additionalProperties" : False,
"actionType":
{
"type": "string",
"enum": ["disableKeyPad", "triggerAlarm", "resetAttemptAccount"]
},
"parameters":
{
"type": "array",
"items": {"$ref": "#/definitions/actionParameter"},
"default": []
}
},
"required": ["actionType"]
},
"actionParameter":
{
"type": "object",
"additionalProperties" : False,
"properties":
{
"additionalProperties" : False,
"key": {"type": "string"},
"value": {"type": "string"}
},
"required": ["key", "value"]
},
"failedAttemptResponse":
{
"type": "object",
"additionalProperties" : False,
"properties":
{
"additionalProperties" : False,
"attemptNo":
{
"type": "integer",
"minimum": 1,
"maximum": 100
},
"actions":
{
"type": "array",
"items": {"$ref": "#/definitions/action"},
"default": []
}
},
"required" : ["attemptNo", "actions"]
}
},
"type" : "object",
"additionalProperties" : False,
"properties":
{
"additionalProperties" : False,
"failedAttemptResponses":
{
"type": "array",
"items": {"$ref": "#/definitions/failedAttemptResponse"},
"default": []
},
"centralControllerApi":
{
"additionalProperties" : False,
"properties":
{
"additionalProperties" : False,
"networkPort" :
{
"type" : "integer",
"minimum": 1
},
"authKey" :
{
"type" : "string"
}
},
"required" : ["authKey", "networkPort"]
},
"keypadController":
{
"additionalProperties" : False,
"properties":
{
"endpoint":
{
"type" : "string"
},
"authKey":
{
"type" : "string"
}
},
"required" : ["authKey", "endpoint"]
},
"generalSettings":
{
"additionalProperties" : False,
"properties":
{
"additionalProperties" : False,
"devicesConfigFile":
{
"type" : "string"
},
"deviceTypesConfigFile":
{
"type" : "string"
}
},
"required" : ["devicesConfigFile", "deviceTypesConfigFile"]
}
},
"required" : ["centralControllerApi", "failedAttemptResponses",
"generalSettings", "keypadController"]
}
| StarcoderdataPython |
3377036 | <filename>tools/find_missing.py
# -*- coding: utf-8 -*-
# find missing glyphs needed to render given txt's
import json
def find_missing(pths):
missing = {}
for p in pths:
txt = open(p,'r').read()
js = json.loads(open("./dist/min-trad-compiled.json",'r').read())
for c in txt:
if c not in js and 0x4e00 <= ord(c) <= 0x9fef:
if c not in missing:
missing[c] = 0
missing[c]+=1
sb = sorted([(k,missing[k]) for k in missing if missing[k] > 10],key=lambda k: -k[1])
print(sb)
print(len(sb),len(missing),float(sum(s[1] for s in sb))/sum(missing[k] for k in missing))
return missing
find_missing([
u"../txt/彷徨朝花夕拾故事新编.txt",
u"../txt/唐诗宋词三百首.txt",
u"../txt/史记.txt",
u"../txt/古文观止.txt",
u"../txt/红楼梦.txt",
u"../txt/雅舍小品.txt",
u"../txt/子不语.txt",
u"../txt/闲情偶寄.txt",
u"../txt/六十种曲/還魂記.txt",
]) | StarcoderdataPython |
9783480 | import logging
from gym import envs
from gym.envs.registration import register
logger = logging.getLogger(__name__)
# Register openai's environments as multi agent
# This should be done before registering new environments
env_specs = [env_spec for env_spec in envs.registry.all() if 'gym.envs' in env_spec.entry_point]
for spec in env_specs:
register(
id='ma_' + spec.id,
entry_point='ma_gym.envs.openai:MultiAgentWrapper',
kwargs={'name': spec.id, **spec._kwargs}
)
register(
id='Combat-v0',
entry_point='ma_gym.envs.combat:Combat',
)
| StarcoderdataPython |
1780968 | <filename>nicos_sinq/amor/setups/virtual_source.py
description = 'Virtual source for the Selene guide'
devices = dict(
dvv = device('nicos_sinq.amor.devices.virtual_source.NumberSwitcher',
description = 'Diaphragm virtual Source Vertical',
mapping = {
1: 0.,
2: 45.,
3: 90.,
4: 135.,
5: 180.,
6: 225,
7: 270,
8: -45.
},
precision=0.1,
moveable = 'dvv_motor',
fmtstr = '%d',
),
dvv_motor = device('nicos_ess.devices.epics.motor.EpicsMotor',
epicstimeout = 3.0,
description = 'Diaphragm virtual Source Vertical motor',
motorpv = 'SQ:AMOR:motd:dvv',
errormsgpv = 'SQ:AMOR:motd:dvv-MsgTxt',
precision=0.1,
),
dvh = device('nicos_sinq.amor.devices.virtual_source.NumberSwitcher',
description = 'Diaphragm virtual Source Horiziontal',
mapping = {
1: 0.,
2: 45.,
3: 90.,
4: 135.,
5: 180.,
6: 225,
7: 270,
8: -45.
},
fmtstr = '%d',
moveable = 'dvh_motor',
precision=0.1,
),
dvh_motor = device('nicos_ess.devices.epics.motor.EpicsMotor',
epicstimeout = 3.0,
description = 'Diaphragm virtual Source Horiziontal motor',
motorpv = 'SQ:AMOR:motd:dvh',
errormsgpv = 'SQ:AMOR:motd:dvh-MsgTxt',
precision = 0.1,
),
dmf = device('nicos_sinq.amor.devices.virtual_source.NumberSwitcher',
description = 'Diaphragm virtual Source Vertical',
mapping = {
1: 288.,
2: 0.,
3: 72,
4: 144,
5: 216.
},
fmtstr = '%d',
precision=0.1,
moveable = 'dmf_motor',
),
dmf_motor = device('nicos_ess.devices.epics.motor.EpicsMotor',
epicstimeout = 3.0,
description = 'Diaphragm virtual Source Vertical motor',
motorpv = 'SQ:AMOR:motd:dmf',
errormsgpv = 'SQ:AMOR:motd:dmf-MsgTxt',
precision=0.1,
),
)
| StarcoderdataPython |
3549520 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 3 17:35:32 2020
@author: lutra
"""
import sqlite3
PC_lab = False
if PC_lab:
main = '/home/shiri/plasmid_project/Phage_like_plasmids/PLP_final/'
else:
main = '/data/Current_work/Phage_like_plasmids/PLP_final/'
print(main)
proj = 'SSU5'
table = 'Phage_like_plasmids_SSU5_P1_D6_12Nov20'
database = main + table + '.sqlite3'
conn = sqlite3.connect(database)
cur = conn.cursor()
if proj != 'D6':
rep_column = 'enterobacteriaceae'
else:
rep_column = 'D6_putative_replicon_orf42'
ref_cov = {'D6': 'MF356679_D6_ref', 'P1': 'AF234172_P1_ref', 'SSU5': 'JQ965645_SSU5_ref'}
replicons = {'D6': ['D6_putative_replicon_orf42_c45699_46596'], 'P1':['IncY_1__K02380', 'p0111_1__AP010962'], 'SSU5': ['IncFIB_pHCM2_1__AL513384', 'IncFIB_pKPHS1_1__CP003223', 'IncFIB_H89-PhagePlasmid_1__HG530657', 'IncFIB_pLF82-PhagePlasmid_1__CU638872']}
collect_all = []
for rep in replicons[proj]:
task = f'SELECT nucleotide, completeness, slen, PLP_status, {rep_column}, {ref_cov[proj]}_cov, {ref_cov[proj]}_CDS_N FROM {table}'
task += f" WHERE {rep_column} LIKE '%{rep}%'"
qcovs_ranges = []
CDSs_ranges = []
rep_collect = []
for row in cur.execute(task):
gid, complete, slen, status, reps, cov, cds_n = [str(r) for r in row]
cov = float(cov)
if complete == 'complete' and int(slen) < 2000000 and cov:
label = 'grouped'
reason = ''
if complete == 'complete' and int(slen) < 2000000 and cov >= 40:
reason = 'grouped'
elif complete != 'complete':
reason = 'not complete'
elif int(slen) >= 2000000:
reason = 'chromosome'
elif cov < 40:
reason = 'lower_coverage'
qcovs_ranges.append(cov)
CDSs_ranges.append(int(cds_n))
if not reason:
print('REASON PROBLEM', gid)
upd_rep = ''
if proj == 'D6':
upd_rep = 'D6_orf42'
elif proj == 'P1':
upd_rep = rep.split('_')[0]
else:
upd_rep = '_'.join(rep.split('_')[:2])
if reason == 'grouped':
reason = f'{proj}-PLP group'
rep_collect.append(f'{upd_rep}__{reason}')
db_range = ''
if qcovs_ranges:
if len(qcovs_ranges) > 1:
db_range = f'qcovs: {int(min(qcovs_ranges))}-{int(max(qcovs_ranges))}, CDSs: {min(CDSs_ranges)}-{max(CDSs_ranges)}, n={len(CDSs_ranges)}'
else:
db_range = f'qcovs: {int(qcovs_ranges[0])}, CDSs: {CDSs_ranges[0]}, n=1'
rep_collect = [r.replace('lower_coverage', db_range) for r in rep_collect]
collect_all += rep_collect
print('collect_all', len(collect_all))
collect_all_clust = list(set(collect_all))
collect_all_clust.sort()
collect_all_clust.sort(key = lambda x: (x.split('__')[-1]))
names = []
colors = []
sizes = []
color_lib = {'PLP group': 'green', 'chromosome': 'blue', 'not complete': 'grey', 'CDSs': 'orange'}
print('N of clusters', len(collect_all_clust))
for clust in collect_all_clust:
print(clust, )
size = collect_all.count(clust)
color = ''
for col in color_lib:
if col in clust:
if not color:
color = color_lib[col]
else:
print('COLOR EXISTS', color, col, clust)
if not color:
print('COLOR LOST', clust)
if size > 1:
name = clust.split('__')[0] + '\nn=' + str(size)
else:
name = clust.split('__')[0] + ', ' + str(size)
if 'CDSs' in clust:
name = clust.replace('__', '\n')
name = name.replace(', CDSs', '%\nCDSs')
# name = name.replace(', CDSs', '%, CDSs')
name = name.replace(', n=', '\nn=')
name, qcovs, cdss, n = name.split('\n')
name = name + ', ' + n + '\n' + qcovs + '\n' + cdss
# elif proj == 'SSU5':
# name = name.replace(', n=', '\nn=')
names.append(name)
colors.append(color)
sizes.append(size)
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import squarify # pip install squarify (algorithm for treemap)
from pylab import rcParams
# if proj == 'P1':
# rcParams['figure.figsize'] = 8,6
# SMALL_SIZE = 14
# elif proj == 'SSU5':
rcParams['figure.figsize'] = 6,7
SMALL_SIZE = 14
plt.rcParams["font.family"] = "Arial"
MEDIUM_SIZE = SMALL_SIZE + 0
BIGGER_SIZE = SMALL_SIZE + 1
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('legend', fontsize=MEDIUM_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
# Create tree plot
alpha_level = 0.75
squarify.plot(sizes=sizes, label=names, color=colors, alpha=alpha_level, pad=True)
plt.axis('off')
#add user legend
legend_dict = { 'PLP_group': 'green', 'Chromosome': 'blue', 'Complete, lower coverage': 'orange', 'Not complete sequences': 'grey' }
patchList = []
for key in legend_dict:
data_key = mpatches.Patch(color=legend_dict[key], label=key)
patchList.append(data_key)
leg = plt.legend(handles=patchList, bbox_to_anchor=(1, 0.24), loc='upper left', frameon=False)
#add opacity to colors in the legend
for lh in leg.legendHandles:
lh.set_alpha(alpha_level)
fig_name = {'SSU5': 'SSU5 four FIB replicons*', 'P1': 'P1-group IncY and p0111 replicons', 'D6': 'D6_orf42'}
plt.title(f'{fig_name[proj]}, n = {len(collect_all)}', y=-0.08)
plt.savefig(f'{main}Charts/{proj}_replicons.svg', format='svg', bbox_inches='tight')
plt.show()
| StarcoderdataPython |
1884165 | # Copyright 2017 Neural Networks and Deep Learning lab, MIPT
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from deeppavlov.deprecated.agents.default_agent import DefaultAgent
from deeppavlov.deprecated.agents.processors import HighestConfidenceSelector
from deeppavlov.deprecated.skills.pattern_matching_skill import PatternMatchingSkill
def make_hello_bot_agent() -> DefaultAgent:
"""Builds agent based on PatternMatchingSkill and HighestConfidenceSelector.
This is agent building tutorial. You can use this .py file to check how hello-bot agent works.
Returns:
agent: Agent capable of handling several simple greetings.
"""
skill_hello = PatternMatchingSkill(['Hello world'], patterns=['hi', 'hello', 'good day'])
skill_bye = PatternMatchingSkill(['Goodbye world', 'See you around'], patterns=['bye', 'chao', 'see you'])
skill_fallback = PatternMatchingSkill(['I don\'t understand, sorry', 'I can say "Hello world"'])
agent = DefaultAgent([skill_hello, skill_bye, skill_fallback], skills_processor=HighestConfidenceSelector())
return agent
if __name__ == '__main__':
hello_bot_agent = make_hello_bot_agent()
response = hello_bot_agent(['Hello', 'Bye', 'Or not'])
print(response)
| StarcoderdataPython |
11267193 | <reponame>openedx/edx-celeryutils
"""
Testing persistent tasks.
A task built with the LoggedTask base class is imported from test_utils.tasks.
* simple_logged_task -
Emits an "INFO" level log statement as soon as the task returns.
Note that this will occur after the task runs if the task is run under
always_eager, but it will not be emitted if task.apply() is called directly.
"""
from unittest import mock
from billiard.einfo import ExceptionInfo
import pytest
from celery_utils.logged_task import LoggedTask
from test_utils import tasks
def test_no_failure():
with mock.patch('celery_utils.logged_task.log') as mocklog:
result = tasks.simple_logged_task.apply_async(args=(3, 4), kwargs={'c': 5}, task_id='papers-please')
result.wait()
stringc = 'c' # Handle different string repr for python 2 and 3
logmessage = "Task test_utils.tasks.simple_logged_task[papers-please] submitted with arguments (3, 4), {%r: 5}"
mocklog.info.assert_called_with(logmessage % stringc)
assert not mocklog.error.called
def test_failure():
with mock.patch('celery_utils.logged_task.log') as mocklog:
result = tasks.failed_logged_task.delay()
with pytest.raises(ValueError):
result.wait(retry=False)
assert result.status == 'FAILURE'
assert mocklog.error.called
log_message = mocklog.error.call_args[0][0]
assert f'[{result.task_id}] failed due to Traceback' in log_message
def test_retry():
# With celery running in eager mode, the on_retry handler doesn't actually
# get called when a retry happens. Here we just try to show that when it does
# get called, the log message is formatted correctly.
task = LoggedTask()
task_id = 'my-id'
args = (1, 2)
kwargs = {'c': 3}
try:
raise ValueError()
except ValueError as exc:
einfo = ExceptionInfo()
with mock.patch('celery_utils.logged_task.log') as mocklog:
task.on_retry(exc, task_id, args, kwargs, einfo)
logmessage = mocklog.warning.call_args[0][0]
assert f'[{task_id}]' in logmessage
assert einfo.traceback in logmessage
| StarcoderdataPython |
3456767 | <gh_stars>0
import re
import random
import itertools
import math
from collections import defaultdict
import botconfig
import src.settings as var
from src.utilities import *
from src import channels, users, debuglog, errlog, plog
from src.functions import get_players, get_all_players, get_main_role
from src.decorators import cmd, event_listener
from src.messages import messages
from src.events import Event
ENTRANCED = set()
ENTRANCED_DYING = set()
VISITED = {}
ALL_SUCC_IDLE = True
@cmd("visit", chan=False, pm=True, playing=True, silenced=True, phases=("night",), roles=("succubus",))
def hvisit(cli, nick, chan, rest):
"""Entrance a player, converting them to your team."""
if VISITED.get(nick):
pm(cli, nick, messages["succubus_already_visited"].format(VISITED[nick]))
return
victim = get_victim(cli, nick, re.split(" +",rest)[0], False, True)
if not victim:
return
if nick == victim:
pm(cli, nick, messages["succubus_not_self"])
return
evt = Event("targeted_command", {"target": victim, "misdirection": True, "exchange": False})
evt.dispatch(cli, var, "visit", nick, victim, frozenset({"detrimental", "immediate"}))
if evt.prevent_default:
return
victim = evt.data["target"]
VISITED[nick] = victim
if victim not in var.ROLES["succubus"]:
ENTRANCED.add(victim)
pm(cli, nick, messages["succubus_target_success"].format(victim))
else:
pm(cli, nick, messages["harlot_success"].format(victim))
if nick != victim:
if victim not in var.ROLES["succubus"]:
pm(cli, victim, messages["notify_succubus_target"].format(nick))
else:
pm(cli, victim, messages["harlot_success"].format(nick))
revt = Event("succubus_visit", {})
revt.dispatch(cli, var, nick, victim)
# TODO: split these into assassin, hag, and alpha wolf when they are split off
if var.TARGETED.get(victim) in var.ROLES["succubus"]:
msg = messages["no_target_succubus"].format(var.TARGETED[victim])
del var.TARGETED[victim]
if victim in var.ROLES["village drunk"]:
target = random.choice(list(set(list_players()) - var.ROLES["succubus"] - {victim}))
msg += messages["drunk_target"].format(target)
var.TARGETED[victim] = target
pm(cli, victim, nick)
if victim in var.HEXED and var.LASTHEXED[victim] in var.ROLES["succubus"]:
pm(cli, victim, messages["retract_hex_succubus"].format(var.LASTHEXED[victim]))
var.TOBESILENCED.remove(nick)
var.HEXED.remove(victim)
del var.LASTHEXED[victim]
if var.BITE_PREFERENCES.get(victim) in var.ROLES["succubus"]:
pm(cli, victim, messages["no_kill_succubus"].format(var.BITE_PREFERENCES[victim]))
del var.BITE_PREFERENCES[victim]
debuglog("{0} (succubus) VISIT: {1} ({2})".format(nick, victim, get_role(victim)))
chk_nightdone(cli)
@cmd("pass", chan=False, pm=True, playing=True, silenced=True, phases=("night",), roles=("succubus",))
def pass_cmd(cli, nick, chan, rest):
"""Do not entrance someone tonight."""
if VISITED.get(nick):
pm(cli, nick, messages["succubus_already_visited"].format(VISITED[nick]))
return
VISITED[nick] = None
pm(cli, nick, messages["succubus_pass"])
debuglog("{0} (succubus) PASS".format(nick))
chk_nightdone(cli)
@event_listener("harlot_visit")
def on_harlot_visit(evt, cli, var, nick, victim):
if victim in var.ROLES["succubus"]:
pm(cli, nick, messages["notify_succubus_target"].format(victim))
pm(cli, victim, messages["succubus_harlot_success"].format(nick))
ENTRANCED.add(nick)
@event_listener("get_random_totem_targets")
def on_get_random_totem_targets(evt, var, shaman):
if shaman.nick in ENTRANCED:
for succubus in get_all_players(("succubus",)):
if succubus in evt.data["targets"]:
evt.data["targets"].remove(succubus)
@event_listener("chk_decision", priority=0)
def on_chk_decision(evt, cli, var, force):
for votee, voters in evt.data["votelist"].items():
if votee in var.ROLES["succubus"]:
for vtr in ENTRANCED:
if vtr in voters:
voters.remove(vtr)
def _kill_entranced_voters(var, votelist, not_lynching, votee):
if not var.ROLES["succubus"] & (set(itertools.chain(*votelist.values())) | not_lynching):
# none of the succubi voted (or there aren't any succubi), so short-circuit
return
# kill off everyone entranced that did not follow one of the succubi's votes or abstain
# unless a succubus successfully voted the target, then people that didn't follow are spared
ENTRANCED_DYING.update(ENTRANCED - var.DEAD)
for other_votee, other_voters in votelist.items():
if var.ROLES["succubus"] & set(other_voters):
if votee == other_votee:
ENTRANCED_DYING.clear()
return
ENTRANCED_DYING.difference_update(other_voters)
if var.ROLES["succubus"] & not_lynching:
if votee is None:
ENTRANCED_DYING.clear()
return
ENTRANCED_DYING.difference_update(not_lynching)
@event_listener("chk_decision_lynch", priority=5)
def on_chk_decision_lynch(evt, cli, var, voters):
# a different event may override the original votee, but people voting along with succubus
# won't necessarily know that, so base whether or not they risk death on the person originally voted
_kill_entranced_voters(var, evt.params.votelist, evt.params.not_lynching, evt.params.original_votee)
@event_listener("chk_decision_abstain")
def on_chk_decision_abstain(evt, cli, var, not_lynching):
_kill_entranced_voters(var, evt.params.votelist, not_lynching, None)
# entranced logic should run after team wins have already been determined (aka run last)
# we do not want to override the win conditions for neutral roles should they win while entranced
# For example, entranced monsters should win with other monsters should mosnters win, and be
# properly credited with a team win in that event.
@event_listener("player_win", priority=6)
def on_player_win(evt, var, user, role, winner, survived):
nick = user.nick
if nick in ENTRANCED:
evt.data["special"].append("entranced")
if winner != "succubi" and role not in var.TRUE_NEUTRAL_ROLES:
evt.data["won"] = False
else:
evt.data["iwon"] = True
if role == "succubus" and winner == "succubi":
evt.data["won"] = True
@event_listener("chk_win", priority=2)
def on_chk_win(evt, cli, var, rolemap, mainroles, lpl, lwolves, lrealwolves):
lsuccubi = len(rolemap.get("succubus", ()))
lentranced = len(ENTRANCED - var.DEAD)
if lsuccubi and var.PHASE == "day" and lpl - lsuccubi == lentranced:
evt.data["winner"] = "succubi"
evt.data["message"] = messages["succubus_win"].format(plural("succubus", lsuccubi), plural("has", lsuccubi), plural("master's", lsuccubi))
@event_listener("can_exchange")
def on_can_exchange(evt, var, user, target):
if user.nick in var.ROLES["succubus"] or target.nick in var.ROLES["succubus"]:
evt.prevent_default = True
evt.stop_processing = True
@event_listener("del_player")
def on_del_player(evt, var, user, mainrole, allroles, death_triggers):
global ALL_SUCC_IDLE
if "succubus" not in allroles:
return
if user.nick in VISITED:
# if it's night, also unentrance the person they visited
if var.PHASE == "night" and var.GAMEPHASE == "night":
if VISITED[user.nick] in ENTRANCED:
ENTRANCED.discard(VISITED[user.nick])
ENTRANCED_DYING.discard(VISITED[user.nick])
pm(user.client, VISITED[user.nick], messages["entranced_revert_win"])
del VISITED[user.nick]
# if all succubi are dead, one of two things happen:
# 1. if all succubi idled out (every last one of them), un-entrance people
# 2. otherwise, kill all entranced people immediately, they still remain entranced (and therefore lose)
# death_triggers is False for an idle-out, so we use that to determine which it is
if death_triggers:
ALL_SUCC_IDLE = False
if len(var.ROLES["succubus"]) == 0:
entranced_alive = {users._get(x) for x in ENTRANCED}.difference(evt.params.deadlist) # FIXME
if ALL_SUCC_IDLE:
while ENTRANCED:
e = ENTRANCED.pop()
pm(user.client, e, messages["entranced_revert_win"])
elif entranced_alive:
msg = []
# Run in two loops so we can play the message for everyone dying at once before we actually
# kill any of them off (if we killed off first, the message order would be wrong wrt death chains)
comma = ""
if var.ROLE_REVEAL in ("on", "team"):
comma = ","
for e in entranced_alive:
if var.ROLE_REVEAL in ("on", "team"):
role = get_reveal_role(e.nick)
an = "n" if role.startswith(("a", "e", "i", "o", "u")) else ""
msg.append("\u0002{0}\u0002, a{1} \u0002{2}\u0002".format(e, an, role))
else:
msg.append("\u0002{0}\u0002".format(e))
if len(msg) == 1:
channels.Main.send(messages["succubus_die_kill"].format(msg[0] + comma))
elif len(msg) == 2:
channels.Main.send(messages["succubus_die_kill"].format(msg[0] + comma + " and " + msg[1] + comma))
else:
channels.Main.send(messages["succubus_die_kill"].format(", ".join(msg[:-1]) + ", and " + msg[-1] + comma))
for e in entranced_alive:
# to ensure we do not double-kill someone, notify all child deaths that we'll be
# killing off everyone else that is entranced so they don't need to bother
dlc = list(evt.params.deadlist)
dlc.extend(entranced_alive - {e})
debuglog("{0} (succubus) SUCCUBUS DEATH KILL: {1} ({2})".format(user, e, get_main_role(e)))
evt.params.del_player(e, end_game=False, killer_role="succubus",
deadlist=dlc, original=evt.params.original, ismain=False)
evt.data["pl"] = evt.params.refresh_pl(evt.data["pl"])
ENTRANCED_DYING.clear()
@event_listener("transition_day_resolve", priority=1)
def on_transition_day_resolve(evt, var, victim):
if victim.nick in var.ROLES["succubus"] and VISITED.get(victim.nick) and victim not in evt.data["dead"] and victim in evt.data["onlybywolves"]:
# TODO: check if this is necessary for succubus, it's to prevent a message playing if alpha bites
# a harlot that is visiting a wolf, since the bite succeeds in that case.
if victim not in evt.data["bitten"]:
evt.data["message"].append(messages["target_not_home"])
evt.data["novictmsg"] = False
evt.stop_processing = True
evt.prevent_default = True
@event_listener("transition_day_resolve_end", priority=1)
def on_transition_day_resolve_end(evt, var, victims):
for victim in victims + evt.data["bitten"]:
if victim in evt.data["dead"] and victim.nick in VISITED.values() and (victim in evt.data["bywolves"] or victim in evt.data["bitten"]):
for succ in VISITED:
user = users._get(succ) # FIXME
if VISITED[succ] == victim.nick and user not in evt.data["bitten"] and user not in evt.data["dead"]:
if var.ROLE_REVEAL in ("on", "team"):
evt.data["message"].append(messages["visited_victim"].format(succ, get_reveal_role(succ)))
else:
evt.data["message"].append(messages["visited_victim_noreveal"].format(succ))
evt.data["bywolves"].add(user)
evt.data["onlybywolves"].add(user)
evt.data["dead"].append(user)
@event_listener("night_acted")
def on_night_acted(evt, var, user, actor):
if VISITED.get(user.nick):
evt.data["acted"] = True
@event_listener("chk_nightdone")
def on_chk_nightdone(evt, var):
evt.data["actedcount"] += len(VISITED)
evt.data["nightroles"].extend(get_all_players(("succubus",)))
@event_listener("targeted_command")
def on_targeted_command(evt, cli, var, cmd, actor, orig_target, tags):
if "beneficial" not in tags and actor in ENTRANCED and evt.data["target"] in var.ROLES["succubus"]:
try:
what = evt.params.action
except AttributeError:
what = cmd
pm(cli, actor, messages["no_acting_on_succubus"].format(what))
evt.stop_processing = True
evt.prevent_default = True
@event_listener("transition_night_end", priority=2)
def on_transition_night_end(evt, var):
succubi = get_all_players(("succubus",))
for succubus in succubi:
pl = get_players()
random.shuffle(pl)
pl.remove(succubus)
to_send = "succubus_notify"
if succubus.prefers_simple():
to_send = "succubus_simple"
succ = []
for p in pl:
if p in succubi:
succ.append("{0} (succubus)".format(p))
else:
succ.append(p.nick)
succubus.send(messages[to_send], "Players: " + ", ".join(succ), sep="\n")
@event_listener("begin_day")
def on_begin_day(evt, var):
VISITED.clear()
ENTRANCED_DYING.clear()
@event_listener("transition_day", priority=2)
def on_transition_day(evt, var):
for v in ENTRANCED_DYING:
user = users._get(v) # FIXME
var.DYING.add(user) # indicate that the death bypasses protections
evt.data["victims"].append(user)
evt.data["onlybywolves"].discard(user)
# we do not add to killers as retribution totem should not work on entranced not following succubus
@event_listener("get_special")
def on_get_special(evt, var):
evt.data["special"].update(get_players(("succubus",)))
@event_listener("vg_kill")
def on_vg_kill(evt, var, ghost, target):
if ghost.nick in ENTRANCED:
evt.data["pl"] -= var.ROLES["succubus"]
@event_listener("rename_player")
def on_rename(evt, cli, var, prefix, nick):
if prefix in ENTRANCED:
ENTRANCED.remove(prefix)
ENTRANCED.add(nick)
if prefix in ENTRANCED_DYING:
ENTRANCED_DYING.remove(prefix)
ENTRANCED_DYING.add(nick)
kvp = {}
for a,b in VISITED.items():
s = nick if a == prefix else a
t = nick if b == prefix else b
kvp[s] = t
VISITED.update(kvp)
if prefix in VISITED:
del VISITED[prefix]
@event_listener("reset")
def on_reset(evt, var):
global ALL_SUCC_IDLE
ALL_SUCC_IDLE = True
ENTRANCED.clear()
ENTRANCED_DYING.clear()
VISITED.clear()
@event_listener("revealroles")
def on_revealroles(evt, var, wrapper):
if ENTRANCED:
evt.data["output"].append("\u0002entranced players\u0002: {0}".format(", ".join(ENTRANCED)))
if ENTRANCED_DYING:
evt.data["output"].append("\u0002dying entranced players\u0002: {0}".format(", ".join(ENTRANCED_DYING)))
# vim: set sw=4 expandtab:
| StarcoderdataPython |
4825437 | <reponame>ducluongtran9121/Web-Application-Project
from django.contrib.auth.base_user import AbstractBaseUser, BaseUserManager
from django.contrib.auth.models import PermissionsMixin
from django.db import models
# Create your models here.
class MemberManager(BaseUserManager):
def create_user(self, code, email, name, password=None):
if not email:
raise ValueError("Users must have an email")
email = self.normalize_email(email)
user = self.model(code = code,email=email, name=name)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, code, email, name, password):
user = self.create_user(code, email, name, password)
user.is_superuser = True
user.is_staff = True
user.save(using=self._db)
return user
class Member(AbstractBaseUser, PermissionsMixin):
code = models.CharField(max_length=20, unique=True, blank=False, null=False)
name = models.CharField(max_length=200)
GENDER_CHOICES = (
('M', 'Male'),
('F', 'Female')
)
gender = models.CharField(
max_length=1, choices=GENDER_CHOICES, default='M')
email = models.EmailField(max_length=255, unique=True)
image = models.ImageField(upload_to="img/",null=True)
is_lecturer = models.BooleanField(default=False)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
objects = MemberManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['code', 'name']
def __str__(self):
return "%s - %s" % (self.name, self.code)
def delete(self, using=None, keep_parents=False):
self.image.storage.delete(self.image.name)
super().delete()
class Meta:
ordering = ['code']
| StarcoderdataPython |
4970914 | <filename>examples/idioms/programs/094.1101-print-type-of-variable.py
"""Print type of variable.
Print the name of the type of _x. Explain if it is a static type or dynamic type.
This may not make sense in all languages.
Source: programming-idioms.org
"""
# Implementation author: nickname
# Created on 2016-02-18T16:58:02.394021Z
# Last modified on 2016-10-26T12:12:01.566764Z
# Version 2
print(type(x))
| StarcoderdataPython |
48038 | #-*- coding:utf-8 -*-
from __future__ import print_function
import os,sys,sip,time
from datetime import datetime,timedelta
from qtpy.QtWidgets import QTreeWidgetItem,QMenu,QApplication,QAction,QMainWindow
from qtpy import QtGui,QtWidgets
from qtpy.QtCore import Qt,QUrl,QDate
from Graph import graphpage
from layout import Ui_MainWindow
from pandas import DataFrame as df
import pandas as pd
import tushare as ts
import pickle
import numpy as np
list1 = []
class MyUi(QMainWindow):
def __init__(self):
super(MyUi, self).__init__()
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
cwd = os.getcwd()
cwd = str(cwd)
if os.path.isfile(cwd+"/time"):
with open("time","rb") as outfile:#reads current time
history = pickle.load(outfile)
if (datetime.now()-history).total_seconds()<43200: #measures if time elapse>12 hours
print("Less than 12 hours. Loading previously saved Pickle...")
else:
print("More than 12 hours. Updating Pickle...")
data = ts.get_industry_classified()
with open("class","wb+") as outfile:
pickle.dump(data,outfile)
now = datetime.now()
with open("time", "wb+") as outfile: #update time
pickle.dump(now, outfile)
else:
print("No Pickle found!") #If this is first time using tuchart in this directory
data = df()
data = ts.get_industry_classified()
with open('class', 'wb+') as outfile: #records pickle
pickle.dump(data, outfile)
now = datetime.now()
with open("time", "wb+") as outfile:
pickle.dump(now,outfile)
with open("class", "rb") as infile: # reads current time
series = pickle.load(infile)
#series = pd.read_json(cwd + "\\class.json")
#series = ts.get_industry_classified()
series = pd.DataFrame(series)
curdate = time.strftime("%Y/%m/%d") # gets current time to put into dateedit
curdateQ = QDate.fromString(curdate,"yyyy/MM/dd")
dateobj = datetime.strptime(curdate, "%Y/%m/%d")#converts to datetime object
past = dateobj - timedelta(days = 7) #minus a week to start date
pasttime = datetime.strftime(past, "%Y/%m/%d")
pastQ = QDate.fromString(pasttime,"yyyy/MM/dd") #convert to qtime so that widget accepts the values
pastL = dateobj - timedelta(days=30) # minus a month to start date
pasttimeL = datetime.strftime(pastL, "%Y/%m/%d")
pastQL = QDate.fromString(pasttimeL, "yyyy/MM/dd")
np_indexes = np.array([['sh', '上证指数', '大盘指数'],
['sz', '深证成指', '大盘指数'],
['hs300', '沪深300指数', '大盘指数'],
['sz50', '上证50', '大盘指数'],
['zxb', '中小板', '大盘指数'],
['cyb', '创业板', '大盘指数']])
indexes = df(data=np_indexes,
index=range(5000, 5006),
columns=["code", "name", "c_name"])
series = indexes.append(series)
list1_bfr = series["c_name"].tolist() #Get industry categories. Filters out redundant ones
list1 = list(set(list1_bfr))
list1.sort(key=list1_bfr.index)
#w = database()
#zsparent = QTreeWidgetItem(self.ui.treeWidget)
#zsparent.setText(0,"股票指数")
#zsnames =["上证指数-sh","深圳成指-sz","沪深300指数-hs300","上证50-"]
self.init_treeWidget(list1,series)
self.ui.treeWidget.setContextMenuPolicy(Qt.CustomContextMenu)
self.ui.treeWidget.customContextMenuRequested.connect(self.openMenu)
#self.ui.webView.setGeometry(QtCore.QRect(0, 30,1550, 861))
file_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "render.html")) #path to read html file
local_url = QUrl.fromLocalFile(file_path)
self.ui.webView.load(local_url)
#self.ui.commandLinkButton.setFixedSize(50, 50)
self.ui.search_btn.clicked.connect(lambda: self.search_comp(series))
self.ui.init_code_btn.clicked.connect(lambda: self.code_sort_tree(series))
self.ui.init_category_btn.clicked.connect(lambda: self.init_treeWidget(list1, series))
self.ui.commandLinkButton.clicked.connect(self.classify) #when the arrow button is clicked, trigger events
#self.ui.commandLinkButton.clicked.connect(lambda action: self.classify(action, self.ui.treewidget))
# QSizePolicy
try:
retain_size = self.ui.dateEdit_2.sizePolicy()
retain_size.setRetainSizeWhenHidden(True)
self.ui.dateEdit_2.setSizePolicy(retain_size)
retain_size = self.ui.comboBox.sizePolicy()
retain_size.setRetainSizeWhenHidden(True)
self.ui.comboBox.setSizePolicy(retain_size)
retain_size = self.ui.label_2.sizePolicy()
retain_size.setRetainSizeWhenHidden(True)
self.ui.label_2.setSizePolicy(retain_size)
except AttributeError:
print("No PYQT5 Binding! Widgets might be deformed")
self.ui.dateEdit.setDate(pastQL)
self.ui.dateEdit_2.setDate(curdateQ)#populate widgets
self.ui.dateEdit.setCalendarPopup(True)
self.ui.dateEdit_2.setCalendarPopup(True)
self.ui.comboBox.addItems(["D", "W", "M", "5", "15", "30", "60"])
self.ui.treeWidget_2.setDragDropMode(self.ui.treeWidget_2.InternalMove)
self.ui.treeWidget_2.setContextMenuPolicy(Qt.CustomContextMenu)
self.ui.treeWidget_2.customContextMenuRequested.connect(self.openWidgetMenu)
#self.ui.toolbutton.clicked.connect(lambda action: self.graphmerge(action, CombineKeyword))
self.ui.combobox.currentIndexChanged.connect(lambda: self.modifycombo(pastQL,pastQ))
def init_treeWidget(self, list1, series):
self.ui.treeWidget.clear()
for j in list1:
parent = QTreeWidgetItem(self.ui.treeWidget) #populate treewidget with names
parent.setText(0,j)
var = series.loc[series["c_name"] == j]
list2 = var["code"].tolist()
name = var["name"].tolist()
#var = showcollection(i) #Display database items
for idx,val in enumerate(list2):
child = QTreeWidgetItem(parent)
child.setText(0, name[idx]+"-"+val)
#for i in Drag:
#grandson = QTreeWidgetItem(child) #Commented out because increases program response time
#grandson.setText(0, i)
#self.ui.treeWidget.itemDoubleClicked.connect(self.onClickItem) #Display Collection items
def code_sort_tree(self, companies):
self.ui.treeWidget.clear()
sorted_comps = companies.sort_values(["code"])
code_list = sorted_comps["code"].tolist()
name_list = sorted_comps["name"].tolist()
shares_parent = QTreeWidgetItem(self.ui.treeWidget)
shares_parent.setText(0, "个股行情")
for idx, val in enumerate(code_list):
child = QTreeWidgetItem(shares_parent)
child.setText(0, name_list[idx] + "-" + str(val))
self.ui.treeWidget.expandToDepth(0)
def search_comp(self, companies):
self.ui.treeWidget.clear()
text = self.ui.search_lineEdit.text()
filtered_codes = companies[companies['code'].str.contains(text)]
filtered_names = companies[companies['name'].str.contains(text)]
filtered_comps = filtered_codes.append(filtered_names)
code_list = filtered_comps["code"].tolist()
name_list = filtered_comps["name"].tolist()
parent = QTreeWidgetItem(self.ui.treeWidget)
parent.setText(0, "搜索结果")
for idx, val in enumerate(code_list):
child = QTreeWidgetItem(parent)
child.setText(0, name_list[idx] + "-" + str(val))
self.ui.treeWidget.expandToDepth(0)
def modifycombo(self,pastQL,pastQ):
if self.ui.combobox.currentText()=="复权": #if 复权 is selected, clear all existing queries to avoid value conflict
self.ui.label_2.show()
self.ui.dateEdit_2.show()
self.ui.dateEdit.setDate(pastQL)
self.ui.interval_label.show()
self.ui.comboBox.show()
self.ui.comboBox.clear()
self.ui.comboBox.addItems(["hfq", "qfq"])
self.ui.treeWidget_2.clear()
if self.ui.combobox.currentText()=="K线":
self.ui.label_2.show()
self.ui.dateEdit_2.show()
self.ui.dateEdit.setDate(pastQL)
self.ui.interval_label.show()
self.ui.comboBox.show()
self.ui.comboBox.clear()
self.ui.comboBox.addItems(["D", "W", "M", "5", "15", "30", "60"])#same as above
self.ui.treeWidget_2.clear()
if self.ui.combobox.currentText()=="分笔数据":
self.ui.interval_label.hide()
self.ui.comboBox.hide()
self.ui.label_2.hide()
self.ui.dateEdit_2.hide()
self.ui.dateEdit.setDate(pastQ)
self.ui.treeWidget_2.clear()
if self.ui.combobox.currentText()=="历史分钟":
self.ui.interval_label.hide()
self.ui.comboBox.show()
self.ui.comboBox.clear()
self.ui.comboBox.addItems(["1min","5min","15min","30min","60min"])
self.ui.label_2.hide()
self.ui.dateEdit_2.hide()
self.ui.dateEdit.setDate(pastQ)
self.ui.treeWidget_2.clear()
if self.ui.combobox.currentText()==u"十大股东":
self.ui.interval_label.hide()
self.ui.comboBox.hide()
self.ui.label_2.hide()
self.ui.dateEdit_2.hide()
self.ui.treeWidget_2.clear()
def openMenu(self,position):
indexes = self.ui.treeWidget.selectedIndexes()
item = self.ui.treeWidget.itemAt(position)
db_origin = ""
#if item.parent():
# db_origin = item.parent().text(0)
collec = item.text(0)
if len(indexes) > 0:
level = 0
index = indexes[0]
while index.parent().isValid():
index = index.parent()
level = level + 1
menu = QMenu()
#print((collec, db_origin))
if level ==0:
pass
else:
#keyarray = GetKeys(collec, db_origin)
#if "Open" in keyarray:
if self.ui.combobox.currentText()==u"K线":
menu.addAction(QAction("Kline", menu, checkable=True))
menu.addAction(QAction("Open", menu, checkable=True))
menu.addAction(QAction("Close", menu, checkable=True))#open up different menu with different kind of graphs
menu.addAction(QAction("High", menu, checkable=True))
menu.addAction(QAction("Low", menu, checkable=True))
menu.addAction(QAction("Volume", menu, checkable=True))
#menu.addAction(QAction("P_change", menu, checkable=True))
#menu.addAction(QAction("Turnover",menu,checkable=True))
if self.ui.combobox.currentText()==u"复权":
menu.addAction(QAction("Kline", menu, checkable=True))
menu.addAction(QAction("Open", menu, checkable=True))
menu.addAction(QAction("Close", menu, checkable=True))
menu.addAction(QAction("High", menu, checkable=True))
menu.addAction(QAction("Low", menu, checkable=True))
menu.addAction(QAction("Volume", menu, checkable=True))
menu.addAction(QAction("Amount", menu, checkable=True))
if self.ui.combobox.currentText()==u"分笔数据":
menu.addAction(QAction("分笔", menu, checkable=True))
if self.ui.combobox.currentText()==u"历史分钟":
menu.addAction(QAction("Kline", menu, checkable=True))
menu.addAction(QAction("Open", menu, checkable=True))
menu.addAction(QAction("Close", menu, checkable=True))
menu.addAction(QAction("High", menu, checkable=True))
menu.addAction(QAction("Low", menu, checkable=True))
menu.addAction(QAction("Volume", menu, checkable=True))
menu.addAction(QAction("Amount", menu, checkable=True))
if self.ui.combobox.currentText()==u"十大股东":
menu.addAction(QAction("季度饼图", menu, checkable=True))
#menu.addAction(QAction("持股比例", menu, checkable=True))
#for g in keyarray:
#menu.addAction(QAction(g, menu, checkable=True))
menu.triggered.connect(lambda action: self.methodSelected(action, collec))
menu.exec_(self.ui.treeWidget.viewport().mapToGlobal(position))
def methodSelected(self, action, collec):
# print(action.text()) #Choice
# if (self.ui.treewidget.count() == 5):
# self.ui.label.setText("Maximum number of queries")
# return
# self.ui.label.setText("")
Choice = action.text()
Stock = collec
# print(collec) #Stock Name
# print(db_origin) #DataBase name
# list1 = [self.tr(Stock+"-"+Choice+"-"+db_origin)]
# self.ui.treewidget.addItems(list1)
parent = QTreeWidgetItem(self.ui.treeWidget_2)
parent.setText(0, Stock+ "-" + Choice)
def openWidgetMenu(self,position):
indexes = self.ui.treeWidget_2.selectedIndexes()
item = self.ui.treeWidget_2.itemAt(position)
if item == None:
return
#item = self.ui.listWidget.itemAt(position)
if len(indexes) > 0:
menu = QMenu()
menu.addAction(QAction("Delete", menu,checkable = True))#This function is perhaps useless
#menu.triggered.connect(self.eraseItem)
item = self.ui.treeWidget_2.itemAt(position)
#collec = str(item.text())
menu.triggered.connect(lambda action: self.ListMethodSelected(action, item))
menu.exec_(self.ui.treeWidget_2.viewport().mapToGlobal(position))
def ListMethodSelected(self, action, item):
if action.text() == "Delete":
self.eraseItem()
if action.text() == "Combine":
global CombineKeyword
collec = str(item.text())
CombineKeyword.append(collec)#Useless function(maybe?)
list1 = [self.tr(collec)]
self.ui.listwidget.addItems(list1)
self.eraseItem()
def eraseItem(self):
for x in self.ui.treeWidget_2.selectedItems():#delete with write click menu
#item = self.ui.treewidget.takeItem(self.ui.treewidget.currentRow())
sip.delete(x)
#item.delete
def classify(self, folder):
startdate = self.ui.dateEdit.date()
startdate = startdate.toPyDate()
startdate = startdate.strftime("%Y/%m/%d")#converts date from dateedit to tushare readable date
enddate = self.ui.dateEdit_2.date()
enddate = enddate.toPyDate()
enddate = enddate.strftime("%Y/%m/%d")
option = self.ui.comboBox.currentText()
option = str(option)
#if (self.ui.treewidget) == 0:
#self.ui.label.setText("Need to select at least one query")
#return
root = self.ui.treeWidget_2.invisibleRootItem()# This is for iterating child items
child_count = root.childCount()
texts = []
if child_count==0:
return
for i in range(child_count):
item = root.child(i)
text = item.text(0)#with 3 part'stock_name'+'-'+'code'+'-'+action
texts.append(text)
labels = [k for k in texts]
#items = ([x.encode("utf-8") for x in labels])
width = self.ui.webView.width()#give width and height of user's screen so that graphs can be generated with dynamic size
height = self.ui.webView.height()
mode_combo = self.ui.combobox.currentText()
graphpage(labels,mode_combo, startdate,enddate,option,width, height)#labels:复权ork线or分笔 option:hfq, qfq or 15, 30, D, etc
self.ui.webView.reload()#refreshes webengine
self.ui.webView.repaint()
self.ui.webView.update()
def graphmerge(self, combineKeyword):
sth = ""
for i in combineKeyword:
if sth == "":
sth = sth + i
else :
sth = sth + "\n" + "&"+ "-"+i
list1 = sth
return sth
global CombineKeyword
CombineKeyword = []
self.ui.listwidget.clear() #combine stuff so that different graphs can be drawn together
app = QApplication(sys.argv)
w = MyUi()
w.show()
sys.exit(app.exec_())
| StarcoderdataPython |
6595633 | import cupy as np
def soft_py(x, tau):
threshed = np.maximum(np.abs(x)-tau, 0)
threshed = threshed*np.sign(x)
return threshed
def ht3(x, ax, shift, thresh):
C = 1./np.sqrt(2.)
if shift == True:
x = np.roll(x, -1, axis = ax)
if ax == 0:
w1 = C*(x[1::2,:,:] + x[0::2, :, :])
w2 = soft_py(C*(x[1::2,:,:] - x[0::2, :, :]), thresh)
elif ax == 1:
w1 = C*(x[:, fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b,:] + x[:, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, :])
w2 = soft_py(C*(x[:,fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b,:] - x[:,fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, :]), thresh)
elif ax == 2:
w1 = C*(x[:,:,1::2] + x[:,:, 0::2])
w2 = soft_py(C*(x[:,:,1::2] - x[:,:,0::2]), thresh)
return w1, w2
def iht3(w1, w2, ax, shift, shape):
C = 1./np.sqrt(2.)
y = np.zeros(shape)
x1 = C*(w1 - w2); x2 = C*(w1 + w2);
if ax == 0:
y[0::2, :, :] = x1
y[1::2, :, :] = x2
if ax == 1:
y[:, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, :] = x1
y[:, fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b, :] = x2
if ax == 2:
y[:, :, 0::2] = x1
y[:, :, 1::2] = x2
if shift == True:
y = np.roll(y, 1, axis = ax)
return y
def iht3_py2(w1, w2, ax, shift, shape):
C = 1./np.sqrt(2.)
y = np.zeros(shape)
x1 = C*(w1 - w2); x2 = C*(w1 + w2);
ind = ax + 2;
y = np.reshape(np.concatenate([np.expand_dims(x1, ind), np.expand_dims(x2, ind)], axis = ind), shape)
if shift == True:
y = np.roll(y, 1, axis = ax+1)
return y
def tv3dApproxHaar(x, tau, alpha):
D = 3
fact = np.sqrt(2)*2
thresh = D*tau*fact
y = np.zeros_like(x)
for ax in range(0,len(x.shape)):
if ax ==2:
t_scale = alpha
else:
t_scale = 1;
w0, w1 = ht3(x, ax, False, thresh*t_scale)
w2, w3 = ht3(x, ax, True, thresh*t_scale)
t1 = iht3(w0, w1, ax, False, x.shape)
t2 = iht3(w2, w3, ax, True, x.shape)
y = y + t1 + t2
y = y/(2*D)
return y
| StarcoderdataPython |
1825996 | <reponame>NoListen/RL-forest<gh_stars>1-10
# https://github.com/openai/baselines/baselines/ddpg/main.py
# the TensorBoardOutputFormat of logger of baselines is perfect.
import argparse
import time
import os
from tempfile import mkdtemp
import sys
import json
# import gym_starcraft.envs.war_map_battle_env as sc
# import gym_starcraft.envs.dynamic_battle_env as dsc
import gym_starcraft.envs.compound_battle_env as dsc
from RL_forest.ddpg_plant.common.misc_util import (
set_global_seeds,
boolean_flag )
import dynamic_training
from models import Conv_Actor, Conv_Critic, Dynamic_Conv_Actor, Dynamic_Conv_Critic, Dynamic_Actor, Dynamic_Critic
from memory import Memory, CompoundMemory
from RL_forest.ddpg_plant.common.noise import *
import gym
import tensorflow as tf
import os
def run(env_id, seed, noise_type, layer_norm, logdir, evaluation, nb_units, ip, port, dynamic, simple, frame_skip, **kwargs):
kwargs['logdir'] = logdir
print("Well I am going to print the ip", ip)
# remove evaluation environment.
if env_id == "StarCraft":
#if not dynamic:
# env = sc.WarMapBattleEnv(ip, port, frame_skip = frame_skip)
#else:
env = dsc.CompoundBattleEnv(ip, port, frame_skip = frame_skip, map_types_table=("unit_data",))
else:
env = gym.make(env_id)
# Parse noise_type
action_noise = None
nb_actions = env.action_space.shape
nb_unit_actions = env.nb_unit_actions
for current_noise_type in noise_type.split(','):
current_noise_type = current_noise_type.strip()
if current_noise_type == 'none':
pass
elif 'normal' in current_noise_type:
_, stddev = current_noise_type.split('_')
action_noise = NormalActionNoise(mu=np.zeros(nb_actions), sigma=float(stddev) * np.ones(nb_actions))
elif 'ou' in current_noise_type:
_, stddev = current_noise_type.split('_')
action_noise = OrnsteinUhlenbeckActionNoise(mu=np.zeros(nb_actions),
sigma=float(stddev) * np.ones(nb_actions))
else:
raise RuntimeError('unknown noise type "{}"'.format(current_noise_type))
# Configure components.
reward_shape = env.reward_shape
# simple means no convolution
if simple:
actor = Dynamic_Actor(nb_unit_actions, layer_norm=layer_norm, time_step=nb_units)
critic = Dynamic_Critic(layer_norm=layer_norm, time_step=nb_units)
else:
critic = Dynamic_Conv_Critic(layer_norm=layer_norm, time_step=nb_units)
actor = Dynamic_Conv_Actor(nb_unit_actions, layer_norm=layer_norm, time_step=nb_units)
memory = CompoundMemory(limit=int(1e6), action_shape=env.action_space.shape, observation_shape=env.observation_shape,
observation_dtype=env.observation_dtype, reward_shape=reward_shape)
# Seed everything to make things reproducible.
tf.reset_default_graph()
set_global_seeds(seed)
env.seed(seed)
# Disable logging for rank != 0 to avoid noise.
start_time = time.time()
dynamic_training.train(env=env, action_noise=action_noise, actor=actor, critic=critic, memory=memory,
evaluation=evaluation, reward_shape=reward_shape, **kwargs)
env.close()
print('total runtime: {}s'.format(time.time() - start_time))
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--env-id', type=str, default='StarCraft')
parser.add_argument('--ip', help="server ip")
parser.add_argument('--port', help="server port", type=int, default=11111)
parser.add_argument('--save-epoch-interval', type=int, default=5)
parser.add_argument('--dynamic', default=True)
parser.add_argument('--simple', default=True)
parser.add_argument('--nb-units', type=int, default=3)
boolean_flag(parser, 'render-eval', default=False)
boolean_flag(parser, 'layer-norm', default=True)
boolean_flag(parser, 'render', default=False)
parser.add_argument('--seed', type=int, default=123457)
parser.add_argument('--critic-l2-reg', type=float, default=1e-3)
parser.add_argument('--batch-size', type=int, default=32) # per MPI worker
parser.add_argument('--actor-lr', type=float, default=2e-4)
parser.add_argument('--critic-lr', type=float, default=2e-4)
parser.add_argument('--gamma', type=float, default=0.99)
parser.add_argument('--reward-scale', type=float, default=1.)
parser.add_argument('--clip-norm', type=float, default=None)
parser.add_argument('--nb-epochs', type=int, default=20) # with default settings, perform 1M steps total
parser.add_argument('--nb-epoch-cycles', type=int, default=20)
parser.add_argument('--nb-train-steps', type=int, default=32) # per epoch cycle and MPI worker
parser.add_argument('--nb-eval-cycles', type=int, default=10) # per epoch cycle and MPI worker
parser.add_argument('--frame-skip', type=int, default=2)
# parser.add_argument('--nb-rollout-steps', type=int, default=300) # per epoch cycle and MPI worker
parser.add_argument('--noise-type', type=str,
default='ou_0.1') # choices are adaptive-param_xx, ou_xx, normal_xx, none
parser.add_argument('--logdir', type=str, default='checkpoints')
boolean_flag(parser, 'evaluation', default=True)
return vars(parser.parse_args())
if __name__ == '__main__':
args = parse_args()
# Figure out what logdir to use.
if args['logdir'] is None:
args['logdir'] = os.getenv('OPENAI_LOGDIR')
# Print and save arguments.
print('Arguments:')
for key in sorted(args.keys()):
print('{}: {}'.format(key, args[key]))
print('')
if not os.path.exists(args['logdir']):
os.mkdir(args['logdir'])
if args['logdir']:
with open(os.path.join(args['logdir'], 'args.json'), 'w') as f:
json.dump(args, f)
# Run actual script.
run(**args)
| StarcoderdataPython |
5019354 | <reponame>DistributedML/Biscotti
import pdb
import pandas as pd
import os
import numpy as np
# input_file_directory = "FedSys_Azure"
# output_file_directory = input_file_directory + "_parsedResults/"
# total_nodes = 100
# numRuns=1
def parse_logs():
acceptedUpdates = 0
rejectedUpdates = 0
fname = "pingValues"
# print(fname)
lines = [line.rstrip('\n') for line in open(fname)]
minLatency=2000
maxLatency=0
totalCount=0
totalLatency=0
avgLatency=0
for line in lines:
thisLat = float(line)
totalCount= totalCount + 1
totalLatency= totalLatency + thisLat
if thisLat < minLatency:
minLatency=thisLat
if thisLat > maxLatency:
maxLatency=thisLat
avgLatency=totalLatency/totalCount
print(avgLatency)
print(minLatency)
print(maxLatency)
if __name__ == '__main__':
parse_logs()
| StarcoderdataPython |
55980 | <filename>aplpy/tests/setup_package.py
def get_package_data():
return {
_ASTROPY_PACKAGE_NAME_ + '.tests': ['coveragerc', 'data/*/*.hdr', 'baseline_images/*.png']}
| StarcoderdataPython |
6468105 | from typing import _Final
from typing import _GenericAlias
from typing import _Immutable
from typing import _SpecialForm
from typing import _tp_cache
from typing import _type_check
__all__ = ["Constant"]
class SubscriptableAlias(_Final, _Immutable, _root=True):
__slots__ = ("_name", "_doc")
def __new__(cls, *args, **kwds):
return super().__new__(cls)
def __init__(self, name, doc):
self._name = name
self._doc = doc
def __eq__(self, other):
if not isinstance(other, _SpecialForm):
return NotImplemented
return self._name == other._name
def __hash__(self):
return hash((self._name,))
def __repr__(self):
return "nibbler." + self._name
def __reduce__(self):
return self._name
def __call__(self, *args, **kwds):
raise TypeError(f"Cannot instantiate {self!r}")
def __instancecheck__(self, obj):
raise TypeError(f"{self} cannot be used with isinstance()")
def __subclasscheck__(self, cls):
raise TypeError(f"{self} cannot be used with issubclass()")
@_tp_cache
def __getitem__(self, parameters):
item = _type_check(parameters, "Static accepts only single type.")
return _GenericAlias(self, (item,))
Constant = SubscriptableAlias("Constant", "Constant marker used by nibbler.")
| StarcoderdataPython |
60344 | from sklearn.base import BaseEstimator
import yake
from ._prep import TextPrep
class YakeTextPrep(TextPrep, BaseEstimator):
"""
Remove all text except meaningful key-phrases. Uses [yake](https://github.com/LIAAD/yake).
Arguments:
top_n: number of key-phrases to select
unique: only return unique keywords from the key-phrases
Usage:
```python
from tokenwiser.textprep import YakeTextPrep
text = ["Sources tell us that Google is acquiring Kaggle, a platform that hosts data science and machine learning"]
example = YakeTextPrep(top_n=3, unique=False).transform(text)
assert example[0] == 'hosts data science acquiring kaggle google is acquiring'
```
"""
def __init__(self, top_n: int = 5, unique: bool = False):
self.top_n = top_n
self.unique = unique
self.extractor = yake.KeywordExtractor(top=self.top_n)
def encode_single(self, text):
texts = " ".join([t[0] for t in self.extractor.extract_keywords(text)])
if not self.unique:
return texts
return " ".join(set(texts.split(" ")))
| StarcoderdataPython |
5151682 | #!/usr/bin/env python3
from typing import List, Optional
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class Solution:
def deleteMiddle(self, head: [ListNode]) -> Optional[ListNode]:
p1 = head
p2 = p3 = head
length = 0
while p1:
length += 1
p1 = p1.next
if length == 1:
return None
middle = length // 2
temp = 0
while p3:
if (temp+1) == middle:
p3.next = p3.next.next
else:
p3 = p3.next
temp+=1
return p2
a = Solution()
l1 =ListNode(1)
#l1.next = ListNode(2)
#l1.next.next = ListNode(3)
#l1.next.next.next = ListNode(4)
result = Solution.deleteMiddle(a, l1)
while result:
print(result.val)
result = result.next | StarcoderdataPython |
1766346 | #!/usr/bin/python
import time
import sys
import subprocess
import psycopg2
import argparse
#Start time for processing file
def start_time():
starttime = time.asctime(time.localtime(time.time()))
print starttime
return starttime
#First Line
def first_line(filename):
firstline = subprocess.check_output(['head', '-1', filename])
firstline = firstline.strip()
if len(firstline) != 50:
print "Length of first Line is not equal to 50"
sys.exit()
else:
print "Period :", firstline[9:12]
return firstline[9:12]
#Last Line
def last_line(filename):
lastline = subprocess.check_output(['tail', '-1', filename])
lastline = lastline.strip()
if len(lastline) != 50:
print "Length of Last Line is not equal to 50"
sys.exit()
else:
print "Record Count : ", lastline[-11:]
return lastline[-11:]
#Process remaining lines of file
def remainder_lines():
with open(filename) as lines:
next(lines)
count = 1
for line in lines:
line = line.strip()
if len(line) != 50:
count = count + 1
print count , " : Line not exactly 50"
else:
count = count + 1
print "\n", count
print "Vatref :", line[0:7]
print "Checkdigits :", line[7:9]
print "Periods : ", line[9:12]
print "Record Type :", line[12:14]
print "Stagger :", line[17:19]
print "Sic2007 :", line[19:24]
print "Return Type :", line[24:25]
print "Turnover :", line[25:36]
print "Expenditure :", line[36:44]
print "Date :", line[44:]
#End time for processing file
def end_time():
endtime = time.asctime(time.localtime(time.time()))
print endtime
return endtime
#Sending Data to Database
def db_entry(starttime, endtime, filename):
connection = psycopg2.connect(database="nsdc", user="postgres", password="<PASSWORD>", host="localhost", port="5432")
print "Opened database successfully"
create_db = connection.cursor()
create_db.execute('''CREATE TABLE IF NOT EXISTS SSETS_PRECHECK
(id SERIAL PRIMARY KEY NOT NULL,
start_timestamp VARCHAR(1000) NOT NULL,
end_timestamp VARCHAR(1000) NOT NULL,
file VARCHAR(1000)
)''')
connection.commit()
insert_db = connection.cursor()
insert_db.execute('''INSERT INTO SSETS_PRECHECK (start_timestamp, end_timestamp, file) \
VALUES (%s, %s, %s )''', (starttime, endtime, filename))
connection.commit()
print "Records entered successfully : (StartTime: %s, EndTime: %s, FileName: %s ) " % (starttime, endtime, filename)
connection.close()
if __name__ == "__main__":
if len(sys.argv[1:]) > 0:
filename = sys.argv[-1]
starttime = start_time()
firstline = first_line(filename)
lastline = last_line(filename)
endtime = end_time()
db_entry(starttime, endtime, filename)
else:
print '''
==> Usage : python before_streamsets.py <filename>
filename - where filename is the file which need to be processed
before its passed for further processing to Streamsets.
'''
sys.exit(2)
| StarcoderdataPython |
12834921 | # Generated by Django 2.2.12 on 2020-05-14 22:02
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('products', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('email', models.EmailField(max_length=60, unique=True, verbose_name='email')),
('username', models.CharField(max_length=15, unique=True, verbose_name='pseudonyme')),
('date_joined', models.DateTimeField(auto_now_add=True, verbose_name="date d'inscription")),
('last_login', models.DateTimeField(auto_now=True, verbose_name='dernière connexion')),
('is_admin', models.BooleanField(default=False)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('is_superuser', models.BooleanField(default=False)),
('prenom', models.CharField(default='', max_length=30)),
('nom', models.CharField(default='', max_length=50)),
('code_postal', models.IntegerField(default=0)),
('ville', models.CharField(default='', max_length=80)),
('num_rue', models.IntegerField(default=0, verbose_name='numéro')),
('nom_rue', models.CharField(default='', max_length=100)),
('tel', models.CharField(default='', max_length=10, verbose_name='numéro de téléphone')),
('foyer', models.IntegerField(default=0, verbose_name='nombre de personne dans le foyer')),
('bebes', models.IntegerField(default=0, verbose_name="nombre d'enfants de moin de 4 ans")),
('stripe_id', models.CharField(blank=True, max_length=200, null=True)),
('ebooks', models.ManyToManyField(blank=True, to='products.Product')),
],
options={
'abstract': False,
},
),
]
| StarcoderdataPython |
11295983 | import sublime
import sublime_plugin
from .qpython.qtype import QException
from socket import error as socket_error
import numpy
import datetime
from . import q_chain
from . import QCon as Q
from . import util
#for testing in console
#from qpython import qconnection
#q = qconnection.QConnection(host = 'localhost', port = 5555)
#q.open()
#d = q('.Q.s `a`b`c!1 2 3')
#d = d.decode('utf-8')
#view.show_popup(d)
class QSendRawCommand(q_chain.QChainCommand):
def do(self, edit=None, input=None):
con = Q.QCon.loadFromViewWithPrompt(self.view)
if con:
return self.send(con, input)
#keep this because it is overwritten in QUpdateCompletionsCommand
def send(self, con, input):
return QSendRawCommand.sendAndUpdateStatus(self.view, con, input)
@staticmethod
def sendAndUpdateStatus(view, con, input):
view.set_status('result', 'executing...')
try:
d = QSendRawCommand.executeRaw(con, input)
view.set_status('result', d['status'])
view.set_status('q', con.status())
return d['result']
except Exception as e:
sublime.error_message('Error in QSendRawCommand.sendAndUpdateStatus:\n' + str(e))
view.set_status('result', 'ERROR')
view.set_status('q', con.status())
raise e
@staticmethod
def executeRaw(con, input):
try:
q = con.q
q.open()
input_prefix = input[:5] # probably .Q.s or .j.j -- make more generic later if necessary
input_body = input[5:]
mem_query = '@[{.Q.w[][`used]};();0]'
write_query = '@[{`.st.tmp set x;`sublimeq};();0b]'
initial_query = '(' + mem_query + ';' + write_query + ')'
pre_res = q(initial_query)
try:
write_flag = util.decode(pre_res[1]) == 'sublimeq'
start_mem = int(util.decode(pre_res[0]))
except:
write_flag = False
start_mem = 0
input = input_prefix + ('.st.tmp:' if write_flag else '') + input_body
start_time = datetime.datetime.now()
res = util.decode(q(input))
end_time = datetime.datetime.now()
time = str(end_time - start_time)[2:-3]
dims_query = '$[@[{`tmp in key x};`.st;0b];" x " sv string (count @[{$[0<=type x; cols x;()]};.st.tmp;()]),count .st.tmp;0]'
post_query = '(' + mem_query + ';' + dims_query + ')'
post_res = q(post_query)
try:
end_mem = int(util.decode(post_res[0]))
mem = end_mem - start_mem
sign = '+' if mem>0 else '-'
mem = util.format_mem(abs(mem))
dims = util.decode(post_res[1])
except:
sign = ''
mem = '0'
dims = '0'
# Return input itself if no results return (e.g. a query that defines a variable or function)
# if res in [None, 'None']:
# res = input_body
status = 'Result: ' + dims + ', ' + time + ', ' + sign + mem
#self.view.set_status('result', 'Result: ' + count + ', ' + time + ', ' + sign + mem)
except QException as e:
res = "error: `" + util.decode(e)
status = "error: `" + util.decode(e)
except socket_error as serr:
msg = 'Sublime-q cannot to connect to \n"' + con.h() + '"\n\nError message: ' + str(serr)
sublime.error_message(msg)
res = ""
status = "error: " + str(serr)
finally:
q.close()
#self.view.set_status('q', con.status())
return {'result': res, 'status': status}
class QSendCommand(QSendRawCommand):
def do(self, edit=None, input=None):
if (input[0] == "\\"):
input = "value\"\\" + input + "\""
input = ".Q.s " + input
return super().do(input=input)
class QSendJsonCommand(QSendRawCommand):
def do(self, edit=None, input=None):
if (input[0] == "\\"):
input = "value\"\\" + input + "\""
input = ".j.j " + input #save to temprary result, so we can get dimension later
return super().do(input=input)
| StarcoderdataPython |
3449779 | <gh_stars>0
from setuptools import setup
if __name__ == "__main__":
try:
setup()
except Exception as e:
print(
f"Exception occurred: {e}"
"please ensure you have the right version of python and the most updated version of setuptools and wheels"
"python --version"
"pip install wheel setuptools"
)
raise
| StarcoderdataPython |
3534716 | import json
import plotly
from plotly.graph_objs import Bar
from flask import Flask
from flask import render_template, request, jsonify
from sklearn.externals import joblib
from sqlalchemy import create_engine
import sys, os
# import libraries
import nltk
nltk.download(['punkt', 'wordnet', 'averaged_perceptron_tagger', 'stopwords'])
import pickle
import pandas as pd
from sqlalchemy import create_engine
import re
import numpy as np
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
from nltk.corpus import stopwords
stop_words = set(stopwords.words('english'))
from sklearn.multioutput import MultiOutputClassifier
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.svm import SVC
lemmatizer = WordNetLemmatizer()
from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score
from sklearn.model_selection import GridSearchCV
app = Flask(__name__)
class StartingVerbExtractor(BaseEstimator, TransformerMixin):
def starting_verb(self, text):
sentence_list = nltk.sent_tokenize(text)
for sentence in sentence_list:
pos_tags = nltk.pos_tag(tokenize(sentence))
try:
first_word, first_tag = pos_tags[0]
except:
return False
if first_tag in ['VB', 'VBP'] or first_word == 'RT':
return True
return False
def fit(self, x, y=None):
return self
def transform(self, X):
X_tagged = pd.Series(X).apply(self.starting_verb)
return pd.DataFrame(X_tagged)
def tokenize(text):
"""
Process the text and clean it to prepare it for training
Inputs:
text: text to process
Outputs:
clean_tokens: text split into tokens after cleaning
"""
# this list is copied from source https://stackoverflow.com/questions/43018030/replace-apostrophe-short-words-in-python
contractions = {
"aren't": "are not",
"can't": "cannot",
"can't've": "cannot have",
"'cause": "because",
"could've": "could have",
"couldn't": "could not",
"couldn't've": "could not have",
"didn't": "did not",
"doesn't": "does not",
"don't": "do not",
"hadn't": "had not",
"hadn't've": "had not have",
"hasn't": "has not",
"haven't": "have not",
"he'd": "he would",
"he'd've": "he would have",
"he'll": "he will",
"he'll've": "he will have",
"he's": "he is",
"how'd": "how did",
"how'd'y": "how do you",
"how'll": "how will",
"how's": "how is",
"i'd": "I would",
"i'd've": "I would have",
"i'll": "I will",
"i'll've": "I will have",
"i'm": "I am",
"i've": "I have",
"isn't": "is not",
"it'd": "it would",
"it'd've": "it would have",
"it'll": "it will",
"it'll've": "it will have",
"it's": "it is",
"let's": "let us",
"ma'am": "madam",
"mayn't": "may not",
"might've": "might have",
"mightn't": "might not",
"mightn't've": "might not have",
"must've": "must have",
"mustn't": "must not",
"mustn't've": "must not have",
"needn't": "need not",
"needn't've": "need not have",
"o'clock": "of the clock",
"oughtn't": "ought not",
"oughtn't've": "ought not have",
"shan't": "shall not",
"sha'n't": "shall not",
"shan't've": "shall not have",
"she'd": "she would",
"she'd've": "she would have",
"she'll": "she will",
"she'll've": "she will have",
"she's": "she has / she is",
"should've": "should have",
"shouldn't": "should not",
"shouldn't've": "should not have",
"so've": "so have",
"so's": "so is",
"that'd": "that would",
"that'd've": "that would have",
"that's": "that is",
"there'd": "there would",
"there'd've": "there would have",
"there's": "there is",
"they'd": "they would",
"they'd've": "they would have",
"they'll": "they will",
"they'll've": "they will have",
"they're": "they are",
"they've": "they have",
"to've": "to have",
"wasn't": "was not",
"we'd": "we would",
"we'd've": "we would have",
"we'll": "we will",
"we'll've": "we will have",
"we're": "we are",
"we've": "we have",
"weren't": "were not",
"what'll": "what will",
"what'll've": "what will have",
"what're": "what are",
"what's": "what is",
"what've": "what have",
"when's": "when is",
"when've": "when have",
"where'd": "where did",
"where's": "where is",
"where've": "where have",
"who'll": "who will",
"who'll've": "who will have",
"who's": "who is",
"who've": "who have",
"why's": "why is",
"why've": "why have",
"will've": "will have",
"won't": "will not",
"won't've": "will not have",
"would've": "would have",
"wouldn't": "would not",
"wouldn't've": "would not have",
"y'all": "you all",
"y'all'd": "you all would",
"y'all'd've": "you all would have",
"y'all're": "you all are",
"y'all've": "you all have",
"you'd": "you would",
"you'd've": "you would have",
"you'll": "you will",
"you'll've": "you will have",
"you're": "you are",
"you've": "you have"
}
url_regex = 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
detected_urls = re.findall(url_regex, text)
for url in detected_urls:
text = text.replace(url, "urlplaceholder")
pat = re.compile(r"\b(%s)\b" % "|".join(contractions))
text = pat.sub(lambda m: contractions.get(m.group()), text)
text = re.sub('[^ \a-zA-Z0-9]', ' ', text)
text = re.sub(' +', ' ',text)
tokens = word_tokenize(text)
clean_tokens = [lemmatizer.lemmatize(tok.lower()) for tok in tokens if tok not in stop_words]
return clean_tokens
# load data
engine = create_engine('sqlite:///../data/DisasterResponse.db')
df = pd.read_sql_table('DisasterResponse', engine)
# load model
model = joblib.load("../models/classifier.pkl.z")
# index webpage displays cool visuals and receives user input text for model
@app.route('/')
@app.route('/index')
def index():
# extract data needed for visuals
genre_counts = df.groupby('genre').count()['message']
genre_names = list(genre_counts.index)
cat_counts = df[df.columns[4:]].sum()
cat_names = df.columns[4:]
# create visuals
graphs = [
{
'data': [
Bar(
x=genre_names,
y=genre_counts
)
],
'layout': {
'title': 'Distribution of Message Genres',
'yaxis': {
'title': "Count"
},
'xaxis': {
'title': "Genre"
}
}
},
{
'data': [
Bar(
x=cat_names,
y=cat_counts
)
],
'layout': {
'title': 'Distribution of Message Categories',
'yaxis': {
'title': "Count"
},
'xaxis': {
'title': "Category"
}
}
}
]
# encode plotly graphs in JSON
ids = ["graph-{}".format(i) for i, _ in enumerate(graphs)]
graphJSON = json.dumps(graphs, cls=plotly.utils.PlotlyJSONEncoder)
# render web page with plotly graphs
return render_template('master.html', ids=ids, graphJSON=graphJSON)
# web page that handles user query and displays model results
@app.route('/go')
def go():
# save user input in query
query = request.args.get('query', '')
# use model to predict classification for query
classification_labels = model.predict([query])[0]
classification_results = dict(zip(df.columns[4:], classification_labels))
# This will render the go.html Please see that file.
return render_template(
'go.html',
query=query,
classification_result=classification_results
)
def main():
app.run(host='0.0.0.0', port=3000, debug=True)
if __name__ == '__main__':
main() | StarcoderdataPython |
177356 | <reponame>bird-house/malleefowl<filename>malleefowl/processes/wps_workflow.py
import os
import yaml
from pywps import Process
from pywps import ComplexInput
from pywps import ComplexOutput
from pywps import Format, FORMATS
from pywps.app.Common import Metadata
from malleefowl.workflow import run
import logging
LOGGER = logging.getLogger("PYWPS")
class DispelWorkflow(Process):
"""
The workflow process is usually called by the `Phoenix`_ WPS web client to
run WPS process for climate data (like cfchecker, climate indices with ocgis, ...)
with a given selection of input data (currently NetCDF files from ESGF data nodes).
Currently the `Dispel4Py <https://github.com/dispel4py/dispel4py>`_ workflow engine is used.
The Workflow for ESGF input data is as follows::
Search ESGF files -> Download ESGF files -> Run choosen process on local (downloaded) ESGF files.
"""
def __init__(self):
inputs = [
ComplexInput('workflow', 'Workflow description',
abstract='Workflow description in YAML.',
min_occurs=1,
max_occurs=1,
supported_formats=[Format('text/yaml')]),
]
outputs = [
ComplexOutput('output', 'Workflow result',
abstract="Workflow result document in YAML.",
as_reference=True,
supported_formats=[Format('text/yaml')]),
ComplexOutput('logfile', 'Workflow log file',
abstract="Workflow log file.",
as_reference=True,
supported_formats=[FORMATS.TEXT]),
]
super(DispelWorkflow, self).__init__(
self._handler,
identifier="workflow",
title="Workflow",
version="0.7",
abstract="Runs Workflow with dispel4py.",
metadata=[
Metadata('Birdhouse', 'http://bird-house.github.io/'),
Metadata('User Guide', 'http://malleefowl.readthedocs.io/en/latest/'),
],
inputs=inputs,
outputs=outputs,
status_supported=True,
store_supported=True,
)
def _handler(self, request, response):
def monitor(message, progress):
response.update_status(message, progress)
response.update_status("starting workflow ...", 0)
workflow = yaml.load(request.inputs['workflow'][0].stream)
workflow_name = workflow.get('name', 'unknown')
response.update_status("workflow {0} prepared.".format(workflow_name), 0)
result = run(workflow, monitor=monitor, headers=request.http_request.headers)
with open(os.path.join(self.workdir, 'output.txt'), 'w') as fp:
yaml.dump(result, stream=fp)
response.outputs['output'].file = fp.name
with open(os.path.join(self.workdir, 'logfile.txt'), 'w') as fp:
fp.write("workflow log file")
response.outputs['logfile'].file = fp.name
response.update_status("workflow {0} done.".format(workflow_name), 100)
return response
| StarcoderdataPython |
6545535 | <reponame>AngelBrisco/SubPixelConvolution-in-Keras
import numpy as np
from tensorflow.keras.layers import *
from tensorflow.keras import backend as K
import tensorflow as tf
__all__ =["SubpixelLayer2D","conv_up","SubpixelLayer2D_log"]
class SubpixelLayer2D(Layer):
def __init__(self,filters=None,ksz=1, scale=2, **kwargs):
self.scale=scale
self.out_channels=filters
self.ksz=ksz
super(SubpixelLayer2D, self).__init__(**kwargs)
def kinit(self,shape,dtype=None,partition_info=None):
h,w,cin,cout=shape
#Multiplica el kernel para evitar efecto tablero.
y=tf.initializers.variance_scaling()(shape=(h,w,cin,cout))
y=tf.tile(y,[1,1,1,self.scale**2])
sp_weights=tf.Variable(y,
dtype=dtype,
name="kernel")
return sp_weights
def build(self, input_shape):
b,h,w,cin=input_shape
if self.out_channels==None:
self.out_channels=(cin.value)//(self.scale**2)
self.kernel = self.add_weight(shape=(self.ksz,self.ksz,cin.value,self.out_channels),
initializer=self.kinit,
name='kernel')
super(SubpixelLayer2D, self).build(input_shape)
def call(self,input):
y = K.conv2d(input, self.kernel, strides=(1, 1), padding='same', data_format="channels_last",
dilation_rate=(1, 1))
y = K.relu(y)
y = tf.depth_to_space(y, self.scale)
y = K.pool2d(y, pool_size=(self.scale,self.scale), strides=(1, 1), padding='same', data_format="channels_last", pool_mode='avg')
return y
def compute_output_shape(self,input_shape):
shape=input_shape
return(shape[0],
shape[1] * self.scale,
shape[2] * self.scale,
self.out_channels)
def get_config(self):
base_config = super(SubpixelLayer2D, self).get_config()
base_config['filters'] = self.out_channels
base_config['scale'] = self.scale
base_config['ksz'] = self.ksz
return base_config
class conv_up(Layer):
def __init__(self,filters=None,ksz=1, scale=2, **kwargs):
self.scale=scale
self.out_channels=filters
self.ksz=ksz
super(conv_up, self).__init__(**kwargs)
def build(self, input_shape):
b,h,w,cin=input_shape
if self.out_channels==None:
self.out_channels=(cin.value)
self.kernel = self.add_weight(shape=(self.ksz,self.ksz,cin.value,self.out_channels),
initializer=tf.initializers.variance_scaling,
name='kernel')
super(conv_up, self).build(input_shape)
def call(self,input):
y = tf.keras.backend.conv2d(input, self.kernel, strides=(1, 1), padding='same', data_format="channels_last",
dilation_rate=(1, 1))
y = tf.keras.backend.relu(y)
y = tf.keras.backend.resize_images(y, height_factor=self.scale, width_factor=self.scale, data_format="channels_last",
interpolation='nearest')
return y
def compute_output_shape(self,input_shape):
shape=input_shape
return(shape[0],
shape[1] * self.scale,
shape[2] * self.scale,
self.out_channels)
def get_config(self):
base_config = super(conv_up, self).get_config()
base_config['filters'] = self.out_channels
base_config['scale'] = self.scale
base_config['ksz'] = self.ksz
return base_config
class SubpixelLayer2D_log(Layer):
def __init__(self,filters=None,ksz=1, scale=2, **kwargs):
self.loop=int(np.log2(scale))-1
self.scale=scale
self.prime_scale=2
self.out_channels=filters
self.ksz=ksz
self.loop_kernel={}
super(SubpixelLayer2D_log, self).__init__(**kwargs)
def kinit(self,shape,dtype=None,partition_info=None):
h,w,cin,cout=shape
#Multiplica el kernel para evitar efecto tablero. Aunque no lo creas lo entendiste
y=tf.initializers.variance_scaling()(shape=(h,w,cin,cout))
y=tf.tile(y,[1,1,1,self.prime_scale**2])
sp_weights=tf.Variable(y,
dtype=dtype,
name="kernel")
return sp_weights
def build(self, input_shape):
b,h,w,cin=input_shape
if self.out_channels==None:
self.out_channels=(cin.value)//(self.prime_scale**2)
self.kernel = self.add_weight(shape=(self.ksz,self.ksz,cin.value,self.out_channels),
initializer=self.kinit,
name='kernel')
for i in range(self.loop):
self.loop_kernel[i] = self.add_weight(shape=(self.ksz,self.ksz,self.out_channels,self.out_channels),
initializer=self.kinit,
name='loop_kernel%d'%i)
super(SubpixelLayer2D_log, self).build(input_shape)
def call(self,input):
for i in range(self.loop+1):
kernel=self.kernel if i==0 else self.loop_kernel[i-1]
x= input if i==0 else y
y = tf.keras.backend.conv2d(x, kernel, strides=(1, 1), padding='same', data_format="channels_last",
dilation_rate=(1, 1))
y = tf.keras.backend.relu(y)
y = tf.depth_to_space(y, self.prime_scale)
y = tf.keras.backend.pool2d(y, pool_size=(self.prime_scale,self.prime_scale), strides=(1, 1), padding='same', data_format="channels_last", pool_mode='avg')
return y
def compute_output_shape(self,input_shape):
shape=input_shape
return(shape[0],
shape[1] * self.scale,
shape[2] * self.scale,
self.out_channels)
def get_config(self):
base_config = super(SubpixelLayer2D_log, self).get_config()
base_config['filters'] = self.out_channels
base_config['scale'] = self.scale
base_config['ksz'] = self.ksz
return base_config
| StarcoderdataPython |
9690116 | <reponame>aluoch-sheila/NEIGHBOURHOOD<gh_stars>0
from django.test import TestCase
from .models import Neighbourhood, Profile,Business,Join,Posts
from django.contrib.auth.models import User
# TESTING NEIGHBOURHOOD CLASS
class NeighbourhoodTestClass(TestCase):
'''
Test Neighbourhood class and its methods and functions
'''
def setUp(self):
self.user = User.objects.create(id =1, username='test')
self.neighbourhood = Neighbourhood(name='testhood', location='testlocation', user=self.user)
def test_instance(self):
self.assertTrue(isinstance(self.neighbourhood, Neighbourhood))
def test_save_method(self):
'''
Function that tests whether a neighbourhood is being saved
'''
self.neighbourhood.save_neighbourhood()
neighbourhoods = Neighbourhood.objects.all()
self.assertTrue(len(neighbourhoods) > 0)
def test_delete_method(self):
'''
Function that tests whether a neighbourhood can be deleted
'''
self.neighbourhood.save_neighbourhood()
self.neighbourhood.delete_neighbourhood
def test_update_method(self):
'''
Function that test whether a neighbourhood's details can be updated
'''
self.neighbourhood.save_neighbourhood()
new_hood = Neighbourhood.objects.filter(name='testhood').update(name='hoodtest')
hoods = Neighbourhood.objects.get(name='hoodtest')
self.assertTrue(hoods.name, 'hoodtest')
def test_find_neighbourhood_by_id(self):
'''
Function that tests whether one can get a neighbourhood by its id
'''
self.neighbourhood.save_neighbourhood()
this_hood= self.neighbourhood.find_neighbourhood_by_id(self.neighbourhood.id)
hood = Neighbourhood.objects.get(id=self.neighbourhood.id)
self.assertTrue(this_hood, hood)
class ProfileTestClass(TestCase):
'''
Test Profile class and its methods and functions
'''
def setUp(self):
self.user = User.objects.create(id =1,username='testname')
self.neighbourhood = Neighbourhood(name='kejani', location='pale', user=self.user)
self.neighbourhood.save_neighbourhood()
self.profile = Profile(user=self.user, hood = self.neighbourhood)
def test_instance(self):
self.assertTrue(isinstance(self.profile, Profile))
def test_save_method(self):
'''
Function that tests whether a user's profile is being saved
'''
self.profile.save_profile()
profiles = Profile.objects.all()
self.assertTrue(len(profiles) > 0)
def test_delete_method(self):
'''
Function that tests whether a user's profile can be deleted
'''
self.profile.save_profile()
self.profile.delete_profile()
class BusinessTestClass(TestCase):
'''
Test Business class and its methods and functions
'''
def setUp(self):
self.user = User.objects.create(id =1, username='test')
self.neighbourhood = Neighbourhood(name='kejani', location='pale', user=self.user)
self.neighbourhood.save_neighbourhood()
self.business = Business(name="kazi", email="<EMAIL>", user=self.user, neighbourhood=self.neighbourhood)
def test_instance(self):
self.assertTrue(isinstance(self.business, Business))
def test_save_method(self):
'''
Function to test that neighbourhood is being saved
'''
self.business.save_business()
businesses = Business.objects.all()
# self.assertTrue(len(businesses) > 0)
def test_delete_method(self):
'''
Function that tests whether a neighbourhood can be deleted
'''
self.business.save_business()
self.business.delete_business()
def test_update_method(self):
'''
Function that tests whether a neighbourhood's details can be updated
'''
self.business.save_business()
new_business = Business.objects.filter(name='kazi').update(name='kazini')
class PostsTestClass(TestCase):
'''
Test Posts class and its methods and functions
'''
def setUp(self):
self.user = User.objects.create(id =1, username='test')
self.neighbourhood = Neighbourhood(name='kejani', location='pale', user=self.user)
self.neighbourhood.save_neighbourhood()
self.post = Posts(post="hii post", user=self.user, hood=self.neighbourhood)
def test_instance(self):
self.assertTrue(isinstance(self.post, Posts))
def test_save_method(self):
'''
Function that tests whether a post is being saved
'''
self.post.save_posts()
posts = Posts.objects.all()
self.assertTrue(len(posts) > 0)
def test_delete_method(self):
'''
Function that tests whether a neighbourhood can be deleted
'''
self.post.save_posts()
self.post.delete_posts()
| StarcoderdataPython |
4947373 | <gh_stars>1-10
from typing import Dict, cast
from django.core import signing
from django.core.files.storage import default_storage
from django.urls import reverse
import pytest
import requests
from rest_framework.test import APIClient
from s3_file_field._sizes import mb
from .fuzzy import URL_RE, UUID_RE, Re
def test_prepare(api_client):
resp = api_client.post(
reverse('s3_file_field:upload-initialize'),
{'field_id': 'test_app.Resource.blob', 'file_name': 'test.txt', 'file_size': 10},
format='json',
)
assert resp.status_code == 200
assert resp.data == {
'object_key': Re(r'[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}/test.txt'),
'upload_id': UUID_RE,
'parts': [{'part_number': 1, 'size': 10, 'upload_url': URL_RE}],
'upload_signature': Re(r'.*:.*'),
}
assert signing.loads(resp.data['upload_signature']) == {
'object_key': Re(r'[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}/test.txt'),
'field_id': 'test_app.Resource.blob',
}
def test_prepare_two_parts(api_client):
resp = api_client.post(
reverse('s3_file_field:upload-initialize'),
{'field_id': 'test_app.Resource.blob', 'file_name': 'test.txt', 'file_size': mb(10)},
format='json',
)
assert resp.status_code == 200
assert resp.data == {
'object_key': Re(r'[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}/test.txt'),
'upload_id': UUID_RE,
'parts': [
# 5 MB size
{'part_number': 1, 'size': mb(5), 'upload_url': URL_RE},
{'part_number': 2, 'size': mb(5), 'upload_url': URL_RE},
],
'upload_signature': Re(r'.*:.*'),
}
def test_prepare_three_parts(api_client):
resp = api_client.post(
reverse('s3_file_field:upload-initialize'),
{'field_id': 'test_app.Resource.blob', 'file_name': 'test.txt', 'file_size': mb(12)},
format='json',
)
assert resp.status_code == 200
assert resp.data == {
'object_key': Re(r'[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}/test.txt'),
'upload_id': UUID_RE,
'parts': [
{'part_number': 1, 'size': mb(5), 'upload_url': URL_RE},
{'part_number': 2, 'size': mb(5), 'upload_url': URL_RE},
{'part_number': 3, 'size': mb(2), 'upload_url': URL_RE},
],
'upload_signature': Re(r'.*:.*'),
}
@pytest.mark.parametrize('file_size', [10, mb(10), mb(12)], ids=['10B', '10MB', '12MB'])
@pytest.mark.parametrize(
'content_type',
[None, 'image/png', 'application/dicom'],
ids=['none', 'png', 'dicom'],
)
def test_full_upload_flow(
api_client: APIClient,
file_size: int,
content_type: str,
):
request_body = {
'field_id': 'test_app.Resource.blob',
'file_name': 'test.txt',
'file_size': file_size,
}
# Only include Content headers if non-null
if content_type is not None:
request_body['content_type'] = content_type
# Initialize the multipart upload
resp = api_client.post(reverse('s3_file_field:upload-initialize'), request_body, format='json')
assert resp.status_code == 200
initialization = resp.data
assert isinstance(initialization, dict)
upload_signature = initialization['upload_signature']
# Perform the upload
for part in initialization['parts']:
part_resp = requests.put(part['upload_url'], data=b'a' * part['size'])
part_resp.raise_for_status()
# Modify the part to transform it from an initialization to a finalization
del part['upload_url']
part['etag'] = part_resp.headers['ETag']
initialization['field_id'] = 'test_app.Resource.blob'
# Presign the complete request
resp = api_client.post(
reverse('s3_file_field:upload-complete'),
{
'upload_id': initialization['upload_id'],
'parts': initialization['parts'],
'upload_signature': upload_signature,
},
format='json',
)
assert resp.status_code == 200
assert resp.data == {
'complete_url': Re(r'.*'),
'body': Re(r'.*'),
}
completion_data = cast(Dict, resp.data)
# Complete the upload
complete_resp = requests.post(
completion_data['complete_url'],
data=completion_data['body'],
)
complete_resp.raise_for_status()
# Verify the object is present in the store
assert default_storage.exists(initialization['object_key'])
# Finalize the upload
resp = api_client.post(
reverse('s3_file_field:finalize'),
{
'upload_signature': upload_signature,
},
format='json',
)
assert resp.status_code == 200
assert resp.data == {
'field_value': Re(r'.*:.*'),
}
# Verify that the Content headers were stored correctly on the object
object_resp = requests.get(default_storage.url(initialization['object_key']))
assert resp.status_code == 200
if content_type is not None:
assert object_resp.headers['Content-Type'] == content_type
default_storage.delete(initialization['object_key'])
| StarcoderdataPython |
8036245 | #!/usr/bin/env python
#####################
# Calibrates a given filter
#####################
import sys, os, optparse, re, unittest, inspect
import numpy, astropy, astropy.io.fits as pyfits
if __name__ == '__main__':
import matplotlib
matplotlib.use('PS')
from matplotlib import pylab
pylab.ioff()
else:
import pylab
import ldac, utilities, leastsq, photometry_db
#####################
__cvs_id__ = "$Id: fit_phot.py,v 1.25 2010-10-05 22:36:03 dapple Exp $"
##################################################
### Photometry Global Database
##################################################
class Phot_db(object):
'''Provide lazy, proxy access to the photometry database of choice'''
def __init__(self, db, *args, **keywords):
self.db = db
self.instance = None
self.args = args
self.keywords = keywords
def __getattr__(self, name):
if self.instance is None:
self.instance = self.db(*self.args, **self.keywords)
return getattr(self.instance, name)
__default_photometry_db__ = Phot_db(photometry_db.Photometry_db)
#########################################
### DEFAULTS
#########################################
__default_fluxtype__ = 'APER1'
##########################################
### DEFINED FILTER INFORMATION
##########################################
__3sec_zp__ = 25
filter_info = {
'B':{'sdss_filter':'g','color1':'gmr','color2':'umg','EXTCOEFF':-0.2104,'COLCOEFF':0.0, 'color1cut' : lambda x : x < .8},
'U':{'sdss_filter':'g', 'color1':'gmr','color2':'umg','EXTCOEFF':-0.2104,'COLCOEFF':0.0, 'color1cut': lambda x : numpy.logical_and(x > 0.25, x < 1.0)},
'W-J-B':{'sdss_filter':'g','color1':'gmr','color2':'umg','EXTCOEFF':-0.2104,'COLCOEFF':0.0, 'color1cut' : lambda x : numpy.logical_and(x < .75,x>0.2)},
'W-J-V':{'sdss_filter':'g','color1':'gmr','color2':'rmi','EXTCOEFF':-0.1202,'COLCOEFF':0.0, 'color1cut' : lambda x : numpy.logical_and(x < 1.0, x >0.35)},
'W-C-RC':{'sdss_filter':'r','color1':'rmi','color2':'gmr','EXTCOEFF':-0.0925,'COLCOEFF':0.0, 'color1cut' : lambda x : x < 0.44},
'W-C-IC':{'sdss_filter':'i','color1':'rmi','color2':'imz','EXTCOEFF':-0.02728,'COLCOEFF':0.0, \
'color1cut' : lambda x : numpy.logical_and( x < 1,x>0)},
'W-S-I+':{'sdss_filter':'i','color1':'rmi','color2':'imz','EXTCOEFF':-0.02728,'COLCOEFF':0.0, 'color1cut' : lambda x : x < 0.6},
'I':{'sdss_filter':'i','color1':'imz','color2':'rmi','EXTCOEFF':-0.02728,'COLCOEFF':0.0, 'color1cut' : lambda x : x < 0.6},
'W-S-Z+':{'sdss_filter':'z','color1':'imz','color2':'rmi','EXTCOEFF':0.0,'COLCOEFF':0.0, 'color1cut' : lambda x : numpy.logical_and(x < 0.8, x > 0.2)},
'u':{'sdss_filter':'u','color1':'umg','color2':'umg','EXTCOEFF':-0.2104,'COLCOEFF':0.0, 'color1cut' : lambda x : numpy.isfinite(x)},
'g':{'sdss_filter':'g','color1':'gmr','color2':'umg','EXTCOEFF':-0.2104,'COLCOEFF':0.0, 'color1cut' : lambda x : numpy.isfinite(x)},
'r':{'sdss_filter':'r','color1':'rmi','color2':'umg','EXTCOEFF':-0.0925,'COLCOEFF':0.0, 'color1cut' : lambda x : numpy.isfinite(x)},
'i':{'sdss_filter':'i','color1':'imz','color2':'rmi','EXTCOEFF':-0.02728,'COLCOEFF':0.0, 'color1cut' : lambda x : numpy.isfinite(x)},
'z':{'sdss_filter':'z','color1':'imz','color2':'rmi','EXTCOEFF':0.0,'COLCOEFF':0.0, 'color1cut' : lambda x : numpy.isfinite(x)}
}
#I_SUBARU = i_SDSS (-0.276)*(r_SDSS - i_SDSS) + 0.020
#Fit to Pickles Stellar Template Library
colorterms = {#'SUBARU-10_1-1-W-C-IC': (0.40280557662545879, 0.44855074031464925),
'SUBARU-10_1-1-W-C-IC': (0.276, 0.020),
'SUBARU-10_1-1-W-C-RC': (0.30587358225815486, 0.42517900392093094),
'SUBARU-10_1-1-W-J-B': (-0.24078726421577926, 0.26136672075848993),
'SUBARU-10_1-1-W-J-V': (0.59960441249635976, 0.2194019839308905),
'SUBARU-10_1-1-W-S-I+': (0.074485, 0.328308),
'SUBARU-10_1-1-W-S-Z+': (0.044574463739716047, 0.37674513156137274),
# 'SUBARU-10_2-1-W-C-IC': (0.40280557662545879, 0.44855074031464925),
'SUBARU-10_2-1-W-C-IC': (0.276, 0.020),
'SUBARU-10_2-1-W-C-RC': (0.30587358225815486, 0.42517900392093094),
'SUBARU-10_2-1-W-J-B': (-0.24078726421577926, 0.26136672075848993),
'SUBARU-10_2-1-W-J-V': (0.59960441249635976, 0.2194019839308905),
'SUBARU-10_2-1-W-S-I+': (0.074485, 0.328308),
'SUBARU-10_2-1-W-S-Z+': (0.044574463739716047, 0.37674513156137274),
'SUBARU-10_1-2-W-C-RC': (0.30587358225815486, 0.42517900392093094),
'SUBARU-10_1-2-W-J-B': (-0.24078726421577926, 0.26136672075848993),
'SUBARU-10_1-2-W-J-V': (0.59960441249635976, 0.2194019839308905),
'SUBARU-10_1-2-W-S-I+': (0.074485, 0.328308),
'SUBARU-10_1-2-W-S-Z+': (0.044574463739716047, 0.37674513156137274),
'SUBARU-10_2-2-W-C-IC': (0.40280557662545879, 0.44855074031464925),
'SUBARU-10_2-2-W-C-RC': (0.30587358225815486, 0.42517900392093094),
'SUBARU-10_2-2-W-J-B': (-0.24078726421577926, 0.26136672075848993),
'SUBARU-10_2-2-W-J-V': (0.59960441249635976, 0.2194019839308905),
'SUBARU-10_2-2-W-S-I+': (0.074485, 0.328308),
'SUBARU-10_2-2-W-S-Z+': (0.044574463739716047, 0.37674513156137274),
'SUBARU-10_3-1-W-C-IC': (0.279459, 0.145487),
'SUBARU-10_3-1-W-C-RC': (0.301447, 0.425181),
'SUBARU-10_3-1-W-J-B': (-0.211061, 0.136693),
'SUBARU-10_3-1-W-J-V': (0.593218, 0.136693),
'SUBARU-10_3-1-W-S-Z+': (0.076176, 0.341251),
'SUBARU-8-1-W-C-IC': (0.264017, 0.145487),
'SUBARU-8-1-W-C-RC': (0.29517895168989122, 0.42517900813547471),
'SUBARU-8-1-W-J-B': (-0.23759822513479578, 0.26136672003092487),
'SUBARU-8-1-W-J-V': (0.59222754271066269, 0.21940198721395668),
'SUBARU-8-1-W-S-Z+': (0.039680091339973099, 0.37674513161790629),
'SUBARU-8-2-W-C-IC': (0.258385, 0.145487),
'SUBARU-8-2-W-C-RC': (0.29716045031934651, 0.42517900572453288),
'SUBARU-8-2-W-J-B': (-0.091763, 0.339650),
'SUBARU-8-2-W-J-V': (0.6116458875238111, 0.21940198507480779),
'SUBARU-8-2-W-S-Z+': (0.014494538164775472, 0.3767451323829949),
'SUBARU-8-4-W-C-IC': (0.260918, 0.145487),
'SUBARU-8-4-W-C-RC': (0.29956356433016157, 0.42517900741658754),
'SUBARU-8-4-W-J-B': (-0.27545711721961763, 0.26136672054131793),
'SUBARU-8-4-W-J-V': (0.60061067325937401, 0.21940198472543423),
'SUBARU-8-4-W-S-Z+': (0.04000067443408889, 0.37674513335575216),
'SUBARU-9-1-W-C-IC': (0.264017, 0.145487),
'SUBARU-9-1-W-C-RC': (0.29517895168989122, 0.42517900813547471),
'SUBARU-9-1-W-J-B': (-0.23759822513479578, 0.26136672003092487),
'SUBARU-9-1-W-J-V': (0.59222754271066269, 0.21940198721395668),
'SUBARU-9-1-W-S-Z+': (0.039680091339973099, 0.37674513161790629),
'SUBARU-9-2-W-C-IC': (0.258385, 0.145487),
'SUBARU-9-2-W-C-RC': (0.29716045031934651, 0.42517900572453288),
'SUBARU-9-2-W-J-B': (-0.091763, 0.339650),
'SUBARU-9-2-W-J-V': (0.6116458875238111, 0.21940198507480779),
'SUBARU-9-2-W-S-Z+': (0.014494538164775472, 0.3767451323829949),
'SUBARU-9-4-W-C-IC': (0.260918, 0.145487),
'SUBARU-9-4-W-C-RC': (0.29956356433016157, 0.42517900741658754),
'SUBARU-9-4-W-J-B': (-0.27545711721961763, 0.26136672054131793),
'SUBARU-9-4-W-J-V': (0.60061067325937401, 0.21940198472543423),
'SUBARU-9-4-W-S-Z+': (0.04000067443408889, 0.37674513335575216),
'SUBARU-9-8-W-C-IC': (0.260918, 0.145487),
'SUBARU-9-8-W-C-RC': (0.29956356433016157, 0.42517900741658754),
'SUBARU-9-8-W-J-B': (-0.27545711721961763, 0.26136672054131793),
'SUBARU-9-8-W-J-V': (0.60061067325937401, 0.21940198472543423),
'SUBARU-9-8-W-S-Z+': (0.04000067443408889, 0.37674513335575216),
'WHT-0-1-B': (0.34601698954778792, 0.080284105865396194),
'WHT-0-1-U': (1.7302077394178275, 0.42729437531924919)}
#subaru-9-8 faked, assuming like 9-4, assuming 10_x-2 is like 10_x-1
##################################
### UTILITIES
##################################
def stdCalibrationCuts(cat, mag, sdss_names, colorcut_function):
peakvals = cat['SEx_BackGr'] + cat['SEx_MaxVal']
colorcut = colorcut_function(cat[sdss_names.sdss_color])
detected = numpy.logical_and(
numpy.logical_and( numpy.abs(mag) < 90,
numpy.abs(cat[sdss_names.sdss_mag]) < 90),
numpy.abs(cat[sdss_names.sdss_color]) < 90)
# detected = numpy.logical_and(detected,cat[sdss_names.sdss_mag]<18)
# print 'detected = '
# print detected
# if 0:
if 1. in cat['Clean']:
print "using clean set"
# good_detection = numpy.logical_and(
# numpy.logical_and(peakvals < 20000,
# cat['SEx_Flag']<2),
# cat['Clean'] == 1)
good_detection = cat['Clean'] == 1
else:
print "using unclean set"
# good_detection = numpy.logical_and(
# numpy.logical_and(peakvals < 20000,
# cat['SEx_Flag']==0),
good_detection = cat['Clean'] != 1
# good_detection = numpy.logical_and( cat['SEx_Flag']==0, \
# cat['Clean'] != 1)
print len(cat)
# print cat['SEx_Flag']
# print cat['Clean']
return numpy.logical_and( numpy.logical_and( detected,
good_detection),
colorcut)
#######################
def basicCalCuts(cat, mag, sdss_names, colorcut_function):
detected = numpy.logical_and(mag < 90, mag > -90)
color = cat[sdss_names.sdss_color]
sdssmag = cat[sdss_names.sdss_mag]
goodSDSS = numpy.logical_and(numpy.logical_and(color > -50, color < 50), numpy.logical_and(sdssmag < 90, sdssmag > -90))
return numpy.logical_and(detected, goodSDSS)
#######################
def looseCalibrationCuts(cat, mag, sdss_names, colorcut_function):
peakvals = cat['SEx_BackGr'] + cat['SEx_MaxVal']
colorcut = colorcut_function(cat[sdss_names.sdss_color])
detected = numpy.logical_and(
numpy.logical_and( numpy.abs(mag) < 90,
numpy.abs(cat[sdss_names.sdss_mag]) < 90),
numpy.abs(cat[sdss_names.sdss_color]) < 90)
detected = numpy.logical_and(detected,numpy.abs(cat[sdss_names.sdss_mag]) < 23)
detected = numpy.logical_and(detected,numpy.abs(mag) < 23)
# print 'detected = '
# print detected
print "using unclean set"
good_detection = numpy.logical_and(
numpy.logical_and(peakvals < 20000,
cat['SEx_Flag']==0),
cat['Clean'] != 1)
print len(cat)
# print cat['SEx_Flag']
# print cat['Clean']
return numpy.logical_and( numpy.logical_and( detected,
good_detection),
colorcut)
#######################
class SDSSNames(object):
def __init__(self, filterInfo):
self.sdss_filter = filterInfo['sdss_filter']
self.sdss_mag = '%smag' % self.sdss_filter
self.sdss_magerr = '%serr' % self.sdss_filter
self.sdss_color = filterInfo['color1']
self.sdss_color_err = '%serr' % self.sdss_color
#######################
class FitResults(object):
def __init__(self, zp, zperr, colorterm, colorterm_err, fixedcolor):
self.zp = float(zp)
self.zperr = float(zperr)
self.colorterm = float(colorterm)
if colorterm_err is None:
self.colorterm_err = colorterm_err
else:
self.colorterm_err = float(colorterm_err)
self.fixedcolor = fixedcolor
def __str__(self):
if self.fixedcolor:
return 'zp = %5.3f +- %3.3f' % (self.zp, self.zperr)
else:
return 'zp = %5.3f +- %3.3f\tcolorterm = %5.3f +- %3.3f' % (self.zp, self.zperr,
self.colorterm, self.colorterm_err)
######################
class CalibrationData(object):
def __init__(self, mag, mag_err, refmag, refmag_err, color, colorerr, colorterm = None):
self.mag = mag
self.mag_err = mag_err
self.refmag = refmag
self.refmag_err = refmag_err
self.color = color
self.colorerr = colorerr
self.colorterm = colorterm
# print 'colorterm = ', colorterm
################
def vals(self):
if self.colorterm is None:
return self.refmag - self.mag
return self.refmag - self.mag - self.colorterm*self.color
################
def errs(self):
if self.colorterm is None:
return numpy.sqrt(self.mag_err**2 + \
self.refmag_err**2)
return numpy.sqrt(self.refmag_err**2 + \
self.mag_err**2 + \
(self.colorterm**2)*self.colorerr**2)
#################
def calibrate(self):
if(len(self.mag) < 4):
sys.stderr.write("Not Enough Data to do Photometry\n")
return
if self.colorterm is None:
return self.calibrate_freecolor()
else:
return self.calibrate_fixedcolor()
##################
def calibrate_fixedcolor(self):
vals = self.vals()
errs = self.errs()
weights = 1./errs**2
weightsum = weights.sum()
zp = (weights*vals).sum(dtype=numpy.float64)/weightsum
zperr = numpy.sqrt(1./weightsum)
return FitResults(zp, zperr, self.colorterm, colorterm_err = None, fixedcolor=True)
########################
def calibrate_freecolor(self):
vals = self.vals()
errs = self.errs()
params, chisq, covar, isCon = leastsq.linear_leastsq(self.color, vals, errs, fullOutput=True)
colorterm = params[0]
colorterm_err = numpy.sqrt(covar[0][0])
zp = params[1]
zperr = numpy.sqrt(covar[1][1])
return FitResults(zp, zperr, colorterm, colorterm_err, fixedcolor=False)
#######################
########################
def saveCalibration(cluster, filter, fitResults, photometry_db = __default_photometry_db__, specification = {}):
db_calib = photometry_db.registerPhotometricCalibration(
cluster = cluster,
filter = filter,
fitresults = fitResults,
**specification)
photometry_db.updateCalibration(cluster, filter = filter, calibration = db_calib, **specification)
##################################
### USER-CALLABLE FUNCTIONS
##################################
def standardCalibration(cluster, filter, cat, fluxtype = __default_fluxtype__, plotdir=None, freecolor=False, photometry_db = __default_photometry_db__, specification = {}, cuts = stdCalibrationCuts):
instrum, config, chipid, stdfilter = utilities.parseFilter(filter)
mag_name = 'SEx_MAG_%s-%s' % (fluxtype, filter)
magerr_name = 'SEx_MAGERR_%s-%s' % (fluxtype, filter)
filterInfo = filter_info[stdfilter]
sdss_names = SDSSNames(filterInfo)
allfits = []
goodObjs = cat.filter(cuts(cat, cat[mag_name], sdss_names, filterInfo['color1cut']))
print ' we have '+str(len(goodObjs))+' good Objects'
if freecolor:
calibrationData = CalibrationData(mag = goodObjs[mag_name],
mag_err = goodObjs[magerr_name],
refmag = goodObjs[sdss_names.sdss_mag],
refmag_err = goodObjs[sdss_names.sdss_magerr],
color = goodObjs[sdss_names.sdss_color],
colorerr = goodObjs[sdss_names.sdss_color_err])
else:
if filter not in colorterms:
sys.stderr.write('Unknown Filter, Skipping: %s\n' % filter)
return
colorterm = colorterms[filter]
calibrationData = CalibrationData(mag = goodObjs[mag_name],
mag_err = goodObjs[magerr_name],
refmag = goodObjs[sdss_names.sdss_mag],
refmag_err = goodObjs[sdss_names.sdss_magerr],
color = goodObjs[sdss_names.sdss_color],
colorerr = goodObjs[sdss_names.sdss_color_err],
colorterm = colorterm[0])
fitresults = calibrationData.calibrate()
if fitresults is None:
sys.stderr.write('Error in Calibration of %s %s' % (cluster, filter))
return
allfits.append(fitresults)
aperture_filter = filter
print '%s %s: %s' % (cluster, aperture_filter, str(fitresults))
if photometry_db:
saveCalibration(cluster, filter=aperture_filter, fitResults = fitresults, photometry_db = photometry_db, specification = specification)
if plotdir is not None:
if not os.path.exists(plotdir):
os.mkdir(plotdir)
if freecolor:
title = 'Calibration withh Free Color'
else:
title = 'Calibration with Fixed Pickles Color'
print 'making residual'
plotCalibrationResiduals(calibrationData, fitresults,
title = 'Sloan - Sub vs. Sloan',
color_label=sdss_names.sdss_color,
residual_label='%s - %s - %3.2f - %3.3f*%s' % (sdss_names.sdss_mag,
aperture_filter,
fitresults.zp,
fitresults.colorterm,
sdss_names.sdss_color))
pylab.show()
pylab.savefig('%s/%s.ps' % (plotdir, aperture_filter))
pylab.clf()
print 'making plot '
plotCalibrationPull(calibrationData, fitresults,
title = title,
color_label=sdss_names.sdss_color,
residual_label='%s - %s - %3.2f - %3.3f*%s' % (sdss_names.sdss_mag,
aperture_filter,
fitresults.zp,
fitresults.colorterm,
sdss_names.sdss_color))
pylab.show()
pylab.savefig('%s/%s_%s.ps' % (plotdir, aperture_filter,'pull'))
pylab.clf()
plotCalibrationMag(calibrationData, fitresults,
title = title,
color_label=sdss_names.sdss_mag,
residual_label='%s - %s - %3.2f - %3.3f*%s' % (sdss_names.sdss_mag,
aperture_filter,
fitresults.zp,
fitresults.colorterm,
sdss_names.sdss_color))
pylab.show()
pylab.savefig('%s/%s_%s.ps' % (plotdir, aperture_filter,'mag'))
pylab.clf()
makeCutPlots(cat, fitresults, sdss_names, mag_name, magerr_name, filterInfo['color1cut'],colorterm)
pylab.savefig('%s/%s_%s.ps' % (plotdir, aperture_filter,'cuts'))
pylab.clf()
xcat=goodObjs['SEx_Xpos']
ycat=goodObjs['SEx_Ypos']
plotMagPosition(calibrationData, fitresults, xcat, ycat,
title = 'Sloan - Subaru vs. Sloan mag',
color_label=sdss_names.sdss_mag)
pylab.savefig('%s/%s_%s.ps' % (plotdir, aperture_filter,'position'))
pylab.clf()
return allfits
##########################
def threeSecondCalibration(cluster, stdfilter, filterPrefix, cat, plotdir = None, freecolor=False, photometry_db = __default_photometry_db__, specification = {}, cuts = stdCalibrationCuts):
filter='%s-%s' % (filterPrefix, stdfilter)
filterInfo = filter_info[stdfilter]
sdss_names = SDSSNames(filterInfo)
mag_name = 'SEx_MAG_AUTO'
magerr_name ='SEx_MAGERR_AUTO'
goodObjs = cat.filter(cuts(cat, cat[mag_name], sdss_names, filterInfo['color1cut']))
print 'goodobjs = ', len(goodObjs)
if freecolor:
calibrationData = CalibrationData(mag = goodObjs[mag_name] - __3sec_zp__,
mag_err = goodObjs[magerr_name],
refmag = goodObjs[sdss_names.sdss_mag],
refmag_err = goodObjs[sdss_names.sdss_magerr],
color = goodObjs[sdss_names.sdss_color],
colorerr = goodObjs[sdss_names.sdss_color_err])
else:
if filter not in colorterms:
sys.stderr.write('Unknown Filter, Skipping: %s\n' % filter)
return
colorterm = colorterms[filter]
calibrationData = CalibrationData(mag = goodObjs[mag_name] - __3sec_zp__,
mag_err = goodObjs[magerr_name],
refmag = goodObjs[sdss_names.sdss_mag],
refmag_err = goodObjs[sdss_names.sdss_magerr],
color = goodObjs[sdss_names.sdss_color],
colorerr = goodObjs[sdss_names.sdss_color_err],
colorterm = colorterm[0])
fitresults = calibrationData.calibrate()
if fitresults is None:
sys.stderr.write('Error in Calibration of %s %s' % cluster, filter)
return
aperture_filter = '%s_3sec' % filter
print '%s %s: %s' % (cluster, aperture_filter, str(fitresults))
if photometry_db:
saveCalibration(cluster, filter=aperture_filter, fitResults =fitresults, photometry_db = photometry_db, specification = specification)
if plotdir is not None:
if not os.path.exists(plotdir):
os.mkdir(plotdir)
if freecolor:
title = 'Calibration withh Free Color'
else:
title = 'Calibration with Fixed Pickles Color'
plotCalibrationResiduals(calibrationData, fitresults,
title = title,
color_label=sdss_names.sdss_color,
residual_label='%s - %s - %3.2f - %3.3f*%s' % (sdss_names.sdss_mag,
aperture_filter,
fitresults.zp,
fitresults.colorterm,
sdss_names.sdss_color))
pylab.show()
pylab.savefig('%s/%s.ps' % (plotdir, aperture_filter))
pylab.clf()
plotCalibrationPull(calibrationData, fitresults,
title = title,
color_label=sdss_names.sdss_color,
residual_label='%s - %s - %3.2f - %3.3f*%s' % (sdss_names.sdss_mag,
aperture_filter,
fitresults.zp,
fitresults.colorterm,
sdss_names.sdss_color))
pylab.show()
pylab.savefig('%s/%s_%s.ps' % (plotdir, aperture_filter,'pull'))
pylab.clf()
plotCalibrationMag(calibrationData, fitresults,
title = 'Sloan - Subaru vs. Sloan mag',
color_label=sdss_names.sdss_mag,
residual_label='%s - %s - %3.2f' % (sdss_names.sdss_mag,
aperture_filter,
fitresults.zp))
pylab.show()
pylab.savefig('%s/%s_%s.ps' % (plotdir, aperture_filter,'mag'))
pylab.clf()
makeCutPlots(cat, fitresults, sdss_names, mag_name, magerr_name, filterInfo['color1cut'],colorterm )
pylab.savefig('%s/%s_%s.ps' % (plotdir, aperture_filter,'cuts'))
pylab.clf()
xcat=goodObjs['SEx_Xpos']
ycat=goodObjs['SEx_Ypos']
plotMagPosition(calibrationData, fitresults, xcat, ycat,
title = 'Sloan - Subaru vs. Sloan mag',
color_label=sdss_names.sdss_mag)
pylab.savefig('%s/%s_%s.ps' % (plotdir, aperture_filter,'position'))
pylab.clf()
return fitresults
##########################
def specialCalibration(maindir, cluster, filter, photometry_db = __default_photometry_db__, specification = {}):
instrum, config, chipid, stdfilter = utilities.parseFilter(filter)
imagefile = '%(maindir)s/%(cluster)s/%(filter)s/SCIENCE/coadd_%(cluster)s_all/coadd.fits' % { \
'maindir' : maindir,
'cluster' : cluster,
'filter' : stdfilter}
zp = pyfits.getval(imagefile, 'MAGZP')
print zp
if photometry_db:
calib = photometry_db.registerSpecialFiltersCalibration(cluster = cluster, filter = filter, file=imagefile, zp=zp, **specification)
photometry_db.updateCalibration(cluster, filter = filter, calibration = calib, **specification)
return zp
###################################################
### MAIN
###################################################
def main(argv = sys.argv,
standardCalibration = standardCalibration,
threeSecondCalibration = threeSecondCalibration,
specialCalibration = specialCalibration,
photometry_db = __default_photometry_db__):
###
def parse_spec(option, opt, value, parser):
key, val = value.split('=')
if not hasattr(parser.values, 'specification'):
setattr(parser.values, 'specification', {})
parser.values.specification[key] = val
###
parser = optparse.OptionParser()
parser.add_option('-c', '--cluster', dest='cluster', help='Cluster name')
parser.add_option('-i', '--inputcat',
dest='catfile',
help='catalog for use in calibration')
parser.add_option('-f', '--filtername',
dest='filter',
help='Filter to calibrate')
parser.add_option('-p', '--plotdir',
dest='plotdir',
help='Directory to save plots')
parser.add_option('-t', '--chipid',
dest='chipid',
help='Chip id used in measurement')
parser.add_option('-3', '--threesec',
dest='threesec',
action='store_true',
help='Treat as a 3second exposure',
default=False)
parser.add_option('-s', '--special',
dest='special',
action='store_true',
help='Treat as a special exposure',
default=False)
parser.add_option('-m', '--maindir',
dest='maindir',
help='subaru directory')
parser.add_option('--free-color',
dest='freecolor',
action='store_true',
help='Allow color term to be free!',
default=False)
parser.add_option('--no-save',
dest='saveCalib',
action='store_false',
help='Do not save fits to database',
default = True)
parser.add_option('--spec', dest='specification',
action='callback',
type= 'string',
help='key=val set determines the uniqueness of this calibration',
default = {},
callback = parse_spec)
parser.add_option('-n', '--fluxtype',
dest='fluxtype',
help='Type of flux/mag to calibrate, ie. FLUX_(XXXX)',
default=__default_fluxtype__)
options, args = parser.parse_args(argv)
print "Called with:"
print options
if not options.special and options.catfile is None:
parser.error('Need to specify catalog file!')
if options.cluster is None:
parser.error('Need to specify cluster!')
if options.filter is None:
parser.error('Need to specify filter')
if options.threesec and options.special:
parser.error('Cannot treat as 3sec and special')
if options.threesec and not options.chipid:
parser.error('Need a config type for this obs')
if options.special and options.maindir is None:
parser.error('Need to specify main directory')
if not options.saveCalib:
photometry_db = None
if options.special:
specialCalibration(options.maindir,
options.cluster, options.filter,
photometry_db = photometry_db, specification = options.specification)
else:
cat = ldac.openObjectFile(options.catfile, 'PSSC')
if options.threesec:
threeSecondCalibration(options.cluster,
options.filter,
options.chipid,
cat,
plotdir = options.plotdir,
freecolor=options.freecolor,
photometry_db = photometry_db,
specification = options.specification)
else:
standardCalibration(options.cluster, options.filter, cat,
fluxtype = options.fluxtype,
plotdir=options.plotdir,freecolor=options.freecolor,
photometry_db = photometry_db,
specification = options.specification,
cuts = basicCalCuts)
############
# Plotting
###########
def plotCalibrationResiduals(calibrationData, fitResults,
color_label = None,
residual_label = None,
title = None):
color = calibrationData.color
if fitResults.fixedcolor:
residuals = calibrationData.vals() - fitResults.zp
else:
residuals = calibrationData.vals() - fitResults.colorterm*calibrationData.color - fitResults.zp
errs = calibrationData.errs()
pylab.errorbar(color, residuals, errs, fmt='b.')
#pylab.axis([0,10000,-1.,1.])
pylab.axhline(0, color='r')
if color_label:
pylab.xlabel(color_label)
if residual_label:
pylab.ylabel(residual_label)
if title:
pylab.title(title)
#######################
def plotCalibrationPull(calibrationData, fitResults,
color_label = None,
residual_label = None,
title = None):
color = calibrationData.color
if fitResults.fixedcolor:
residuals = calibrationData.vals() - fitResults.zp
else:
residuals = calibrationData.vals() - fitResults.colorterm*calibrationData.color - fitResults.zp
errs = calibrationData.errs()
pulls = residuals / errs
pylab.hist(pulls,bins=100,range=(-8,8))
# pylab.errorbar(color, residuals, errs, fmt='b.')
# pylab.axhline(0, color='r')
if color_label:
pylab.xlabel(residual_label)
if title:
pylab.title(title)
print 'made pull plot'
pylab.show()
########################
def plotCalibrationMag(calibrationData, fitResults,
color_label = None,
residual_label = None,
title = None):
if fitResults.fixedcolor:
residuals = calibrationData.vals() - fitResults.zp
else:
residuals = calibrationData.vals() - fitResults.colorterm*calibrationData.color - fitResults.zp
errs = calibrationData.errs()
smag = calibrationData.refmag
# print sub_m_sln
# print 'smag = '
# print smag
pylab.errorbar(smag, residuals, errs, fmt='b.')
# pylab.axis([0,10000,-1.,1.])
pylab.axhline(0, color='r')
if color_label:
pylab.xlabel(color_label)
if residual_label:
pylab.ylabel(residual_label)
if title:
pylab.title(title)
print 'made mag plot'
pylab.show()
########################
def plotMagPosition(calibrationData, fitResults,xcat,ycat,
color_label = None,
residual_label = None,
title = None):
if fitResults.fixedcolor:
residuals = calibrationData.vals() - fitResults.zp
else:
residuals = calibrationData.vals() - fitResults.colorterm*calibrationData.color - fitResults.zp
errs = calibrationData.errs()
pylab.subplot(2,1,1)
pylab.errorbar(xcat, residuals, errs, fmt='b.')
pylab.axis([0,10000,-0.4,0.4])
pylab.axhline(0, color='r')
if color_label:
pylab.xlabel('x position')
if residual_label:
pylab.ylabel('')
if title:
pylab.title('')
pylab.subplot(2,1,2)
pylab.errorbar(ycat, residuals, errs, fmt='b.')
pylab.axis([0,10000,-0.8,0.8])
pylab.axhline(0, color='r')
if color_label:
pylab.xlabel('y position')
if residual_label:
pylab.ylabel(residual_label)
if title:
pylab.title('')
print 'made mag plot'
pylab.show()
########################
def makeCutPlots(cat, results, names, mag_name, magerr_name ,color_function , colorterm, iaper = -1):
cuts=[]
pylab.figure(1)
cuts.append( (numpy.logical_not(color_function(cat[names.sdss_color])))) # color cut
peakvals = cat['SEx_BackGr'] + cat['SEx_MaxVal']
cuts.append(numpy.logical_not(peakvals < 20000)) # Saturation Cut
cuts.append(numpy.logical_not(cat['SEx_Flag']==0)) # Flag
cuts.append(numpy.logical_not(cat['Clean'] == 1)) # Clean
titles=[]
titles.append('colorcut')
titles.append('Saturation Cut')
titles.append('Flag')
titles.append('Clean')
for i in range(len(cuts)):
print 'iaper is', iaper
if iaper>=0:
theseobjs = cat.filter(numpy.logical_and(cuts[i],numpy.abs(cat[mag_name][:,iaper])<80))
cutData = CalibrationData(mag = theseobjs[mag_name][:,iaper],
mag_err = theseobjs[magerr_name][:,iaper],
refmag = theseobjs[names.sdss_mag],
refmag_err = theseobjs[names.sdss_magerr],
color = theseobjs[names.sdss_color],
colorerr = theseobjs[names.sdss_color_err],
colorterm = colorterm[0])
else:
theseobjs = cat.filter(numpy.logical_and(cuts[i],numpy.abs(cat[mag_name])<80))
cutData = CalibrationData(mag = theseobjs[mag_name] - __3sec_zp__,
mag_err = theseobjs[magerr_name],
refmag = theseobjs[names.sdss_mag],
refmag_err = theseobjs[names.sdss_magerr],
color = theseobjs[names.sdss_color],
colorerr = theseobjs[names.sdss_color_err],
colorterm = colorterm[0])
smag = cutData.refmag
sub_m_sln = cutData.mag - (cutData.refmag -results.zp )
errs = cutData.errs()
# print titles[i]
print 'smag = ',smag
print 'sub_m_sln = ', sub_m_sln
print 'err = ', errs
smag2=[]
sub2=[]
err2=[]
for j in range(len(smag)):
if smag[j]>0:
smag2.append(smag[j])
sub2.append(sub_m_sln[j])
err2.append(errs[j])
smag=smag2
sub_m_sln=sub2
errs = err2
if len(smag):
pylab.subplot(2,3,i+1)
pylab.errorbar(smag, sub_m_sln, errs, fmt='b.')
pylab.axhline(0, color='r')
pylab.xlabel(names.sdss_mag,fontsize='small')
pylab.ylabel(titles[i])
pylab.show()
################################
### TESTING
################################
class TestingDBEntry(object):
def __init__(self, id, **fields):
self.id = id
self.fields = fields
def __getattr__(self, name):
if name in self.fields:
return self.fields[name]
raise AttributeError
###
class TestingDatabase(object):
def __init__(self):
self.reset()
def reset(self):
self.photoentries = []
self.calibrations = []
self.specialentries = []
def registerPhotometricCalibration(self, cluster, fitresults, **specification):
self.photoentries.append(TestingDBEntry(len(self.photoentries), cluster = cluster, fitresults = fitresults, **specification))
return self.photoentries[-1]
def registerSpecialFiltersCalibration(self, cluster, file, zp, **specification):
self.specialentries.append(TestingDBEntry(len(self.specialentries), cluster = cluster, file = file, zp = zp, **specification))
return self.specialentries[-1]
def updateCalibration(self, cluster, calibration, **specification):
self.calibrations.append(TestingDBEntry(len(self.calibrations), cluster = cluster, calibration = calibration, **specification))
####################
class TestRegularCalib(unittest.TestCase):
def setUp(self):
self.db = TestingDatabase()
####
def testStdZP(self):
filterName = 'SUBARU-10_2-1-W-J-V'
pickles = ldac.openObjectFile('Pickles.cat', 'PICKLES')
pickles_sdss = ldac.openObjectFile('Pickles.cat', 'SDSS')
sample = numpy.random.randint(0, len(pickles), 100)
targetZP = 27.15
seqnr = pyfits.Column(name = 'SeqNr', format = 'K', array = numpy.arange(100))
mags = pyfits.Column(name = 'SEx_MAG_APER1-%s' % filterName,
format = 'E',
array = pickles[filterName][sample] + targetZP)
magerrs = pyfits.Column(name = 'SEx_MAGERR_APER1-%s' % filterName,
format = 'E',
array = 0.05 * numpy.ones(100))
sdss = pyfits.Column(name = 'gmag', format = 'E', array = pickles_sdss['gp'][sample])
sdsserr = pyfits.Column(name = 'gerr', format = 'E', array = 0.1 * numpy.ones(100))
sdsscolor = pyfits.Column(name = 'gmr', format = 'E', array = pickles_sdss['gp'][sample] - pickles_sdss['rp'][sample])
sdsscolorerr = pyfits.Column(name = 'gmrerr',
format = 'E',
array = numpy.sqrt(2)*0.1*numpy.ones(100))
clean = pyfits.Column(name = 'Clean', format = 'K', array = numpy.ones(100))
backgr = pyfits.Column(name = 'SEx_BackGr', format = 'E', array = numpy.ones(100))
maxval = pyfits.Column(name = 'SEx_MaxVal', format = 'E', array = numpy.ones(100))
flag = pyfits.Column(name = 'SEx_Flag', format = 'K', array = numpy.zeros(100))
cat = ldac.LDACCat(pyfits.BinTableHDU.from_columns(pyfits.ColDefs([mags, magerrs, clean,
backgr, maxval, flag,
sdss, sdsserr, sdsscolor,
sdsscolorerr])))
allfits = standardCalibration('TestCluster', filterName, cat, photometry_db = None, plotdir = None)
self.assertEquals(len(allfits), 1)
fitresult = allfits[0]
self.assertTrue(fitresult.fixedcolor)
self.assertTrue(numpy.abs(fitresult.zp + targetZP) < 0.25)
###
def testStdDatabase(self):
clustername = 'TestCluster'
filtername = 'SUBARU-10_2-1-W-J-V'
pickles = ldac.openObjectFile('Pickles.cat', 'PICKLES')
pickles_sdss = ldac.openObjectFile('Pickles.cat', 'SDSS')
sample = numpy.random.randint(0, len(pickles), 100)
targetZP = 27.15
seqnr = pyfits.Column(name = 'SeqNr', format = 'K', array = numpy.arange(100))
mags = pyfits.Column(name = 'SEx_MAG_APER1-%s' % filtername,
format = 'E',
array = pickles[filtername][sample] + targetZP)
magerrs = pyfits.Column(name = 'SEx_MAGERR_APER1-%s' % filtername,
format = 'E',
array = 0.05 * numpy.ones(100))
sdss = pyfits.Column(name = 'gmag', format = 'E', array = pickles_sdss['gp'][sample])
sdsserr = pyfits.Column(name = 'gerr', format = 'E', array = 0.1 * numpy.ones(100))
sdsscolor = pyfits.Column(name = 'gmr', format = 'E', array = pickles_sdss['gp'][sample] - pickles_sdss['rp'][sample])
sdsscolorerr = pyfits.Column(name = 'gmrerr',
format = 'E',
array = numpy.sqrt(2)*0.1*numpy.ones(100))
clean = pyfits.Column(name = 'Clean', format = 'K', array = numpy.ones(100))
backgr = pyfits.Column(name = 'SEx_BackGr', format = 'E', array = numpy.ones(100))
maxval = pyfits.Column(name = 'SEx_MaxVal', format = 'E', array = numpy.ones(100))
flag = pyfits.Column(name = 'SEx_Flag', format = 'K', array = numpy.zeros(100))
cat = ldac.LDACCat(pyfits.BinTableHDU.from_columns(pyfits.ColDefs([mags, magerrs, clean,
backgr, maxval, flag,
sdss, sdsserr, sdsscolor,
sdsscolorerr])))
allfits = standardCalibration(clustername, filtername, cat, photometry_db = self.db, plotdir = None)
self.assertEquals(len(self.db.photoentries), 1)
photocalib = self.db.photoentries[0]
self.assertEquals(sorted('cluster fitresults filter'.split()), sorted(photocalib.fields.keys()))
self.assertEquals(photocalib.cluster, clustername)
self.assertEquals(photocalib.filter, filtername)
self.assertEquals(photocalib.fitresults, allfits[0])
self.assertEquals(len(self.db.calibrations), 1)
calib = self.db.calibrations[0]
self.assertEquals(sorted('cluster calibration filter'.split()), sorted(calib.fields.keys()))
self.assertEquals(calib.cluster, clustername)
self.assertEquals(calib.calibration, photocalib)
self.assertEquals(calib.filter, filtername)
####
def testAltMagDatabase(self):
clustername = 'TestCluster'
filtername = 'SUBARU-10_2-1-W-J-V'
pickles = ldac.openObjectFile('Pickles.cat', 'PICKLES')
pickles_sdss = ldac.openObjectFile('Pickles.cat', 'SDSS')
sample = numpy.random.randint(0, len(pickles), 100)
targetZP = 27.15
seqnr = pyfits.Column(name = 'SeqNr', format = 'K', array = numpy.arange(100))
mags = pyfits.Column(name = 'SEx_MAG_ISO-%s' % filtername,
format = 'E',
array = pickles[filtername][sample] + targetZP)
magerrs = pyfits.Column(name = 'SEx_MAGERR_ISO-%s' % filtername,
format = 'E',
array = 0.05 * numpy.ones(100))
sdss = pyfits.Column(name = 'gmag', format = 'E', array = pickles_sdss['gp'][sample])
sdsserr = pyfits.Column(name = 'gerr', format = 'E', array = 0.1 * numpy.ones(100))
sdsscolor = pyfits.Column(name = 'gmr', format = 'E', array = pickles_sdss['gp'][sample] - pickles_sdss['rp'][sample])
sdsscolorerr = pyfits.Column(name = 'gmrerr',
format = 'E',
array = numpy.sqrt(2)*0.1*numpy.ones(100))
clean = pyfits.Column(name = 'Clean', format = 'K', array = numpy.ones(100))
backgr = pyfits.Column(name = 'SEx_BackGr', format = 'E', array = numpy.ones(100))
maxval = pyfits.Column(name = 'SEx_MaxVal', format = 'E', array = numpy.ones(100))
flag = pyfits.Column(name = 'SEx_Flag', format = 'K', array = numpy.zeros(100))
cat = ldac.LDACCat(pyfits.BinTableHDU.from_columns(pyfits.ColDefs([mags, magerrs, clean,
backgr, maxval, flag,
sdss, sdsserr, sdsscolor,
sdsscolorerr])))
allfits = standardCalibration(clustername, filtername, cat, fluxtype = 'ISO', photometry_db = self.db, plotdir = None)
self.assertEquals(len(self.db.photoentries), 1)
photocalib = self.db.photoentries[0]
self.assertEquals(sorted('cluster fitresults filter'.split()), sorted(photocalib.fields.keys()))
self.assertEquals(photocalib.cluster, clustername)
self.assertEquals(photocalib.filter, filtername)
self.assertEquals(photocalib.fitresults, allfits[0])
self.assertEquals(len(self.db.calibrations), 1)
calib = self.db.calibrations[0]
self.assertEquals(sorted('cluster calibration filter'.split()), sorted(calib.fields.keys()))
self.assertEquals(calib.cluster, clustername)
self.assertEquals(calib.calibration, photocalib)
self.assertEquals(calib.filter, filtername)
#####
def testOtherSpecificationsDatabase(self):
clustername = 'TestCluster'
filtername = 'SUBARU-10_2-1-W-J-V'
pickles = ldac.openObjectFile('Pickles.cat', 'PICKLES')
pickles_sdss = ldac.openObjectFile('Pickles.cat', 'SDSS')
sample = numpy.random.randint(0, len(pickles), 100)
targetZP = 27.15
seqnr = pyfits.Column(name = 'SeqNr', format = 'K', array = numpy.arange(100))
mags = pyfits.Column(name = 'SEx_MAG_APER1-%s' % filtername,
format = 'E',
array = pickles[filtername][sample] + targetZP)
magerrs = pyfits.Column(name = 'SEx_MAGERR_APER1-%s' % filtername,
format = 'E',
array = 0.05 * numpy.ones(100))
sdss = pyfits.Column(name = 'gmag', format = 'E', array = pickles_sdss['gp'][sample])
sdsserr = pyfits.Column(name = 'gerr', format = 'E', array = 0.1 * numpy.ones(100))
sdsscolor = pyfits.Column(name = 'gmr', format = 'E', array = pickles_sdss['gp'][sample] - pickles_sdss['rp'][sample])
sdsscolorerr = pyfits.Column(name = 'gmrerr',
format = 'E',
array = numpy.sqrt(2)*0.1*numpy.ones(100))
clean = pyfits.Column(name = 'Clean', format = 'K', array = numpy.ones(100))
backgr = pyfits.Column(name = 'SEx_BackGr', format = 'E', array = numpy.ones(100))
maxval = pyfits.Column(name = 'SEx_MaxVal', format = 'E', array = numpy.ones(100))
flag = pyfits.Column(name = 'SEx_Flag', format = 'K', array = numpy.zeros(100))
cat = ldac.LDACCat(pyfits.BinTableHDU.from_columns(pyfits.ColDefs([mags, magerrs, clean,
backgr, maxval, flag,
sdss, sdsserr, sdsscolor,
sdsscolorerr])))
allfits = standardCalibration(clustername, filtername, cat, photometry_db = self.db, plotdir = None,
specification = {'myspec' :'custom'})
self.assertEquals(len(self.db.photoentries), 1)
photocalib = self.db.photoentries[0]
self.assertEquals(sorted('cluster fitresults filter myspec'.split()), sorted(photocalib.fields.keys()))
self.assertEquals(photocalib.cluster, clustername)
self.assertEquals(photocalib.filter, filtername)
self.assertEquals(photocalib.fitresults, allfits[0])
self.assertEquals(photocalib.myspec, 'custom')
self.assertEquals(len(self.db.calibrations), 1)
calib = self.db.calibrations[0]
self.assertEquals(sorted('cluster calibration filter myspec'.split()), sorted(calib.fields.keys()))
self.assertEquals(calib.cluster, clustername)
self.assertEquals(calib.calibration, photocalib)
self.assertEquals(calib.filter, filtername)
self.assertEquals(calib.myspec, 'custom')
#####
def testThreeSec(self):
clustername = 'TestCluster'
filtername = 'SUBARU-10_2-1-W-J-V'
pickles = ldac.openObjectFile('Pickles.cat', 'PICKLES')
pickles_sdss = ldac.openObjectFile('Pickles.cat', 'SDSS')
sample = numpy.random.randint(0, len(pickles), 100)
targetZP = 27.15
seqnr = pyfits.Column(name = 'SeqNr', format = 'K', array = numpy.arange(100))
mags = pyfits.Column(name = 'SEx_MAG_AUTO',
format = 'E',
array = pickles[filtername][sample] + targetZP)
magerrs = pyfits.Column(name = 'SEx_MAGERR_AUTO',
format = 'E',
array = 0.05 * numpy.ones(100))
sdss = pyfits.Column(name = 'gmag', format = 'E', array = pickles_sdss['gp'][sample])
sdsserr = pyfits.Column(name = 'gerr', format = 'E', array = 0.1 * numpy.ones(100))
sdsscolor = pyfits.Column(name = 'gmr', format = 'E', array = pickles_sdss['gp'][sample] - pickles_sdss['rp'][sample])
sdsscolorerr = pyfits.Column(name = 'gmrerr',
format = 'E',
array = numpy.sqrt(2)*0.1*numpy.ones(100))
clean = pyfits.Column(name = 'Clean', format = 'K', array = numpy.ones(100))
backgr = pyfits.Column(name = 'SEx_BackGr', format = 'E', array = numpy.ones(100))
maxval = pyfits.Column(name = 'SEx_MaxVal', format = 'E', array = numpy.ones(100))
flag = pyfits.Column(name = 'SEx_Flag', format = 'K', array = numpy.zeros(100))
cat = ldac.LDACCat(pyfits.BinTableHDU.from_columns(pyfits.ColDefs([mags, magerrs, clean,
backgr, maxval, flag,
sdss, sdsserr, sdsscolor,
sdsscolorerr])))
print self.db
fit = threeSecondCalibration(clustername, 'W-J-V', 'SUBARU-10_2-1', cat, photometry_db = self.db, plotdir = None)
self.assertEquals(len(self.db.photoentries), 1)
photocalib = self.db.photoentries[0]
self.assertEquals(sorted('cluster fitresults filter'.split()), sorted(photocalib.fields.keys()))
self.assertEquals(photocalib.cluster, clustername)
self.assertEquals(photocalib.filter, '%s_3sec' % filtername)
self.assertEquals(photocalib.fitresults, fit)
self.assertEquals(len(self.db.calibrations), 1)
calib = self.db.calibrations[0]
self.assertEquals(sorted('cluster calibration filter'.split()), sorted(calib.fields.keys()))
self.assertEquals(calib.cluster, clustername)
self.assertEquals(calib.calibration, photocalib)
self.assertEquals(calib.filter, '%s_3sec' % filtername)
#####
def testThreeSec(self):
clustername = 'TestCluster'
filtername = 'SUBARU-10_2-1-W-J-V'
pickles = ldac.openObjectFile('Pickles.cat', 'PICKLES')
pickles_sdss = ldac.openObjectFile('Pickles.cat', 'SDSS')
sample = numpy.random.randint(0, len(pickles), 100)
targetZP = 27.15
seqnr = pyfits.Column(name = 'SeqNr', format = 'K', array = numpy.arange(100))
mags = pyfits.Column(name = 'SEx_MAG_AUTO',
format = 'E',
array = pickles[filtername][sample] + targetZP)
magerrs = pyfits.Column(name = 'SEx_MAGERR_AUTO',
format = 'E',
array = 0.05 * numpy.ones(100))
sdss = pyfits.Column(name = 'gmag', format = 'E', array = pickles_sdss['gp'][sample])
sdsserr = pyfits.Column(name = 'gerr', format = 'E', array = 0.1 * numpy.ones(100))
sdsscolor = pyfits.Column(name = 'gmr', format = 'E', array = pickles_sdss['gp'][sample] - pickles_sdss['rp'][sample])
sdsscolorerr = pyfits.Column(name = 'gmrerr',
format = 'E',
array = numpy.sqrt(2)*0.1*numpy.ones(100))
clean = pyfits.Column(name = 'Clean', format = 'K', array = numpy.ones(100))
backgr = pyfits.Column(name = 'SEx_BackGr', format = 'E', array = numpy.ones(100))
maxval = pyfits.Column(name = 'SEx_MaxVal', format = 'E', array = numpy.ones(100))
flag = pyfits.Column(name = 'SEx_Flag', format = 'K', array = numpy.zeros(100))
cat = ldac.LDACCat(pyfits.BinTableHDU.from_columns(pyfits.ColDefs([mags, magerrs, clean,
backgr, maxval, flag,
sdss, sdsserr, sdsscolor,
sdsscolorerr])))
print self.db
fit = threeSecondCalibration(clustername, 'W-J-V', 'SUBARU-10_2-1', cat, photometry_db = self.db, plotdir = None,
specification = {'myspec2' : 'custom'})
self.assertEquals(len(self.db.photoentries), 1)
photocalib = self.db.photoentries[0]
self.assertEquals(sorted('cluster fitresults filter myspec2'.split()), sorted(photocalib.fields.keys()))
self.assertEquals(photocalib.cluster, clustername)
self.assertEquals(photocalib.filter, '%s_3sec' % filtername)
self.assertEquals(photocalib.fitresults, fit)
self.assertEquals(photocalib.myspec2, 'custom')
self.assertEquals(len(self.db.calibrations), 1)
calib = self.db.calibrations[0]
self.assertEquals(sorted('cluster calibration filter myspec2'.split()), sorted(calib.fields.keys()))
self.assertEquals(calib.cluster, clustername)
self.assertEquals(calib.calibration, photocalib)
self.assertEquals(calib.filter, '%s_3sec' % filtername)
self.assertEquals(calib.myspec2, 'custom')
##############
class TestSpecialCalib(unittest.TestCase):
def setUp(self):
self.db = TestingDatabase()
self.maindir = '/tmp'
self.cluster = 'testcluster'
self.stdfilter = 'K'
self.filter = 'SPECIAL-0-1-%s' % self.stdfilter
self.zp = 27.11
self.imagedir = '%(maindir)s/%(cluster)s/%(filter)s/SCIENCE/coadd_%(cluster)s_all' % {'maindir' : self.maindir,
'cluster' : self.cluster,
'filter' : self.stdfilter}
self.imagefile = '%s/coadd.fits' % self.imagedir
if not os.path.exists(self.imagedir):
os.makedirs(self.imagedir)
if not os.path.exists(self.imagefile):
hdu = pyfits.PrimaryHDU(numpy.ones((100,100)))
hdu.header['MAGZP']= self.zp
hdu.writeto(self.imagefile, overwrite=True)
#####
def tearDown(self):
if os.path.exists(self.imagefile):
os.remove(self.imagefile)
if os.path.exists(self.imagedir):
os.removedirs(self.imagedir)
######
def testSpecialCalib(self):
zp = specialCalibration(self.maindir, self.cluster, self.filter, photometry_db = self.db)
self.assertEquals(zp, self.zp)
self.assertEquals(len(self.db.photoentries), 0)
self.assertEquals(len(self.db.specialentries), 1)
specialcalib = self.db.specialentries[0]
self.assertEquals(sorted('cluster filter file zp'.split()), sorted(specialcalib.fields.keys()))
self.assertEquals(specialcalib.cluster, self.cluster)
self.assertEquals(specialcalib.filter, self.filter)
self.assertEquals(specialcalib.zp, self.zp)
self.assertEquals(specialcalib.file, self.imagefile)
self.assertEquals(len(self.db.calibrations), 1)
calib = self.db.calibrations[0]
self.assertEquals(sorted('cluster calibration filter'.split()), sorted(calib.fields.keys()))
self.assertEquals(calib.cluster, self.cluster)
self.assertEquals(calib.calibration, specialcalib)
self.assertEquals(calib.filter, self.filter)
###############
class FunctionTrapper(object):
def __init__(self):
self.calls = []
def __call__(self, *args, **kw):
self.calls.append((args, kw))
####
class TestMain(unittest.TestCase):
def setUp(self):
self.db = TestingDatabase()
self.catfile = 'fitphot.testcat.cat'
if not os.path.exists(self.catfile):
hdu = pyfits.BinTableHDU.from_columns(pyfits.ColDefs([pyfits.Column(name = 'SeqNr', format='K', array = numpy.arange(100))]))
hdu.header['EXTNAME']= 'PSSC'
hdu.writeto(self.catfile)
self.cat = ldac.LDACCat(hdu)
else:
raise IOError
###
def tearDown(self):
if os.path.exists(self.catfile):
os.remove(self.catfile)
###
def testStdCalib(self):
stdCal = FunctionTrapper()
commandline = ('./fit_phot.py -c testcluster -i %s -f SUBARU-10_2-1-W-J-V --fluxtype ISO --spec fluxtype=iso --spec myspec=custom' % self.catfile).split()
main(argv = commandline,
standardCalibration = stdCal,
threeSecondCalibration = None,
specialCalibration = None,
photometry_db = self.db)
self.assertEquals(len(stdCal.calls), 1)
args, kw = stdCal.calls[0]
expectedArgs, varargname, varkwname, defaults = inspect.getargspec(standardCalibration)
compiledArgs = {}
for key, val in zip(reversed(expectedArgs), reversed(defaults)):
compiledArgs[key] = val
for key, val in zip(expectedArgs, args):
compiledArgs[key] = val
compiledArgs.update(kw)
self.assertEquals(compiledArgs['cluster'], 'testcluster')
self.assertEquals(compiledArgs['filter'], 'SUBARU-10_2-1-W-J-V')
self.assertEquals(compiledArgs['fluxtype'], 'ISO')
self.assertEquals(compiledArgs['plotdir'], None)
self.assertEquals(compiledArgs['freecolor'], False)
self.assertEquals(compiledArgs['photometry_db'], self.db)
self.assertEquals(compiledArgs['specification'], {'myspec' : 'custom', 'fluxtype' : 'iso'})
#############
def test3Sec(self):
threeSecCal = FunctionTrapper()
commandline = ('./fit_phot.py -c testcluster -i %s -t SUBARU-10_2-1 -f W-J-V --spec fluxtype=iso --spec myspec=custom -3' % self.catfile).split()
main(argv = commandline,
standardCalibration = None,
threeSecondCalibration = threeSecCal,
specialCalibration = None,
photometry_db = self.db)
self.assertEquals(len(threeSecCal.calls), 1)
args, kw = threeSecCal.calls[0]
expectedArgs, varargname, varkwname, defaults = inspect.getargspec(threeSecondCalibration)
compiledArgs = {}
for key, val in zip(reversed(expectedArgs), reversed(defaults)):
compiledArgs[key] = val
for key, val in zip(expectedArgs, args):
compiledArgs[key] = val
compiledArgs.update(kw)
self.assertEquals(compiledArgs['cluster'], 'testcluster')
self.assertEquals(compiledArgs['stdfilter'], 'W-J-V')
self.assertEquals(compiledArgs['filterPrefix'], 'SUBARU-10_2-1')
self.assertEquals(compiledArgs['plotdir'], None)
self.assertEquals(compiledArgs['freecolor'], False)
self.assertEquals(compiledArgs['photometry_db'], self.db)
self.assertEquals(compiledArgs['specification'], {'fluxtype':'iso', 'myspec':'custom'})
#############
def testSpecial(self):
specialCal = FunctionTrapper()
commandline = './fit_phot.py -c testcluster -m ./ -f K --spec fluxtype=ISO --spec myspec=custom -s'.split()
main(argv = commandline,
standardCalibration = None,
threeSecondCalibration = None,
specialCalibration = specialCal,
photometry_db = self.db)
self.assertEquals(len(specialCal.calls), 1)
args, kw = specialCal.calls[0]
expectedArgs, varargname, varkwname, defaults = inspect.getargspec(specialCalibration)
compiledArgs = {}
for key, val in zip(reversed(expectedArgs), reversed(defaults)):
compiledArgs[key] = val
for key, val in zip(expectedArgs, args):
compiledArgs[key] = val
compiledArgs.update(kw)
self.assertEquals(compiledArgs['maindir'], './')
self.assertEquals(compiledArgs['cluster'], 'testcluster')
self.assertEquals(compiledArgs['filter'], 'K')
self.assertEquals(compiledArgs['photometry_db'], self.db)
self.assertEquals(compiledArgs['specification'], {'myspec':'custom', 'fluxtype':'ISO'})
#####################
def test():
testcases = [TestRegularCalib, TestSpecialCalib, TestMain]
suite = unittest.TestSuite(map(unittest.TestLoader().loadTestsFromTestCase,
testcases))
unittest.TextTestRunner(verbosity=2).run(suite)
################################
### COMMAND LINE EXECUTABLE
################################
if __name__ == '__main__':
if len(sys.argv) == 2 and sys.argv[1] == 'test':
test()
else:
main()
| StarcoderdataPython |
11285039 | import fnmatch
import re
from validator.constants import BUGZILLA_BUG
from validator.compat import (FX4_DEFINITION, FX5_DEFINITION, FX6_DEFINITION,
FX7_DEFINITION, FX8_DEFINITION, FX9_DEFINITION,
FX11_DEFINITION, FX12_DEFINITION, TB7_DEFINITION,
TB10_DEFINITION, TB11_DEFINITION, TB12_DEFINITION)
from validator.contextgenerator import ContextGenerator
NP_WARNING = "Network preferences may not be modified."
EUP_WARNING = "Extension update settings may not be modified."
NSINHS_LINK = ("https://developer.mozilla.org/en/XPCOM_Interface_Reference"
"/nsINavHistoryService")
TB7_LINK = "https://developer.mozilla.org/en/Thunderbird_7_for_developers"
GENERIC_PATTERNS = {
r"globalStorage\[.*\].password":
"Global Storage may not be used to store passwords.",
r"launch\(\)":
"Use of 'launch()' is disallowed because of restrictions on "
"nsILocalFile. If the code does not use nsILocalFile, consider a "
"different function name."}
# JS category hunting; bug 635423
# Generate regexes for all of them. Note that they all begin with
# "JavaScript". Capitalization matters, bro.
CATEGORY_REGEXES = (
map(re.compile,
map(lambda r: '''"%s"|'%s'|%s''' % (r, r, r.replace(' ', '-')),
map(lambda r: "%s%s" % ("JavaScript ", r),
("global constructor",
"global constructor prototype alias",
"global property",
"global privileged property",
"global static nameset",
"global dynamic nameset",
"DOM class",
"DOM interface")))))
PASSWORD_REGEX = re.compile("password", re.I)
PROTOTYPE_REGEX = re.compile(r"(String|Object|Number|Date|RegExp|Function|"
r"Boolean|Array|Iterator)\.prototype"
r"(\.[a-zA-Z0-9]+|\[.+\]) =", re.I)
CHROME_PATTERNS = (
(r"(?<![\'\"])require\s*\(\s*[\'\"]"
r"(chrome|window-utils|observer-service)"
r"[\'\"]\s*\)",
'Usage of non-SDK interface',
"This SDK-based add-on uses interfaces that aren't part of the SDK."),
)
# DOM mutation events; bug 642153
DOM_MUTATION_REGEXES = map(re.compile,
("DOMAttrModified", "DOMAttributeNameChanged",
"DOMCharacterDataModified", "DOMElementNameChanged",
"DOMNodeInserted", "DOMNodeInsertedIntoDocument", "DOMNodeRemoved",
"DOMNodeRemovedFromDocument", "DOMSubtreeModified"))
FX6_INTERFACES = {"nsIDOMDocumentTraversal": 655514,
"nsIDOMDocumentRange": 655513,
"IWeaveCrypto": 651596}
FX7_INTERFACES = {"nsIDOMDocumentStyle": 658904,
"nsIDOMNSDocument": 658906,
"nsIDOM3TypeInfo": 660539,
"nsIDOM3Node": 659053}
FX8_INTERFACES = {"nsISelection2": 672536,
"nsISelection3": 672536}
FX11_INTERFACES = {"nsICharsetResolver": 700490}
FX12_INTERFACES = {"nsIProxyObjectManager":
(675221,
"This add-on uses nsIProxyObjectManager, which was "
"removed in Gecko 12."),
"documentCharsetInfo": 713825,
"nsIJetpack(Service)?":
(711838,
"This add-on uses the Jetpack service, which was "
"deprecated long ago and is no longer included in "
"Gecko 12. Please update your add-on to use a more "
"recent version of the Add-ons SDK.")}
TB11_STRINGS = {"newToolbarCmd\.(label|tooltip)": 694027,
"openToolbarCmd\.(label|tooltip)": 694027,
"saveToolbarCmd\.(label|tooltip)": 694027,
"publishToolbarCmd\.(label|tooltip)": 694027,
"messengerWindow\.title": 701671,
"folderContextSearchMessages\.(label|accesskey)": 652555,
"importFromSeamonkey2\.(label|accesskey)": 689437,
"comm4xMailImportMsgs\.properties": 689437,
"specialFolderDeletionErr": 39121,
"sourceNameSeamonkey": 689437,
"sourceNameOExpress": 689437,
"sourceNameOutlook": 689437,
"failedDuplicateAccount": 709020,}
TB12_STRINGS = {"editImageMapButton\.(label|tooltip)": 717240,
"haveSmtp[1-3]\.suffix2": 346306,
"EditorImage(Map|MapHotSpot)\.dtd": 717240,
"subscribe-errorInvalidOPMLFile": 307629,}
TB11_JS = {"onViewToolbarCommand": 644169,
"nsContextMenu": 680192,
"MailMigrator\.migrateMail": 712395,
"AddUrlAttachment": 708982,
"makeFeedObject": 705504,
"deleteFeed": 705504,}
TB12_JS = {"TextEditorOnLoad": 695842,
"Editor(OnLoad|Startup|Shutdown|CanClose)": 695842,
"gInsertNewIMap": 717240,
"editImageMap": 717240,
"(SelectAll|MessageHas)Attachments": 526998,}
def run_regex_tests(document, err, filename, context=None, is_js=False):
"""Run all of the regex-based JS tests."""
if context is None:
context = ContextGenerator(document)
def _generic_test(pattern, title, message, metadata={}):
"""Run a single regex test."""
match = pattern.search(document)
if match:
line = context.get_line(match.start())
err.warning(
err_id=("testcases_javascript_regex", "generic",
"_generic_test"),
warning=title,
description=message,
filename=filename,
line=line,
context=context)
if metadata:
err.metadata.update(metadata)
def _substring_test(pattern, title, message):
"""Run a single substringest."""
match = re.compile(pattern).search(document)
if match:
line = context.get_line(match.start())
err.warning(
err_id=("testcases_javascript_regex", "generic",
"_generic_test"),
warning=title,
description=message,
filename=filename,
line=line,
context=context)
def _compat_test(pattern, title, message, compatibility_type,
appversions=None, logFunc=err.notice):
"""Run a single regex test and return a compatibility message."""
match = pattern.search(document)
if match:
line = context.get_line(match.start())
logFunc(
("testcases_javascript_regex", "generic", "_compat_test"),
title,
description=message,
filename=filename,
line=line,
context=context,
compatibility_type=compatibility_type,
for_appversions=appversions,
tier=5)
if not filename.startswith("defaults/preferences/"):
from javascript.predefinedentities import (BANNED_PREF_BRANCHES,
BANNED_PREF_REGEXPS)
for pattern in BANNED_PREF_REGEXPS:
_generic_test(
re.compile("[\"']" + pattern),
"Potentially unsafe preference branch referenced",
"Extensions should not alter preferences matching /%s/"
% pattern)
for branch in BANNED_PREF_BRANCHES:
_substring_test(
branch.replace(r".", r"\."),
"Potentially unsafe preference branch referenced",
"Extensions should not alter preferences in the '%s' "
"preference branch" % branch)
for pattern, message in GENERIC_PATTERNS.items():
_generic_test(
re.compile(pattern),
"Potentially unsafe JS in use.",
message)
for pattern, title, message in CHROME_PATTERNS:
_generic_test(re.compile(pattern), title, message,
{'requires_chrome': True})
if is_js:
for pattern in CATEGORY_REGEXES:
_generic_test(
pattern,
"Potential JavaScript category registration",
"Add-ons should not register JavaScript categories. It "
"appears that a JavaScript category was registered via a "
"script to attach properties to JavaScript globals. This "
"is not allowed.")
if fnmatch.fnmatch(filename, "defaults/preferences/*.js"):
_generic_test(
PASSWORD_REGEX,
"Passwords may be stored in /defaults/preferences JS files.",
"Storing passwords in the preferences is insecure and the "
"Login Manager should be used instead.")
is_jsm = filename.endswith(".jsm") or "EXPORTED_SYMBOLS" in document
if not is_jsm:
# Have a non-static/dynamic test for prototype extension.
_generic_test(
PROTOTYPE_REGEX,
"JS Prototype extension",
"It appears that an extension of a built-in JS type was "
"made. This is not allowed for security and compatibility "
"reasons.")
for pattern in DOM_MUTATION_REGEXES:
_generic_test(
pattern,
"DOM Mutation Events Prohibited",
"DOM mutation events are flagged because of their "
"deprecated status, as well as their extreme "
"inefficiency. Consider using a different event.")
# Firefox 5 Compatibility
if err.supports_version(FX5_DEFINITION):
_compat_test(
re.compile(r"navigator\.language"),
"navigator.language may not behave as expected",
("JavaScript code was found that references "
"navigator.language, which will no longer indicate "
"the language of Firefox's UI. To maintain existing "
"functionality, general.useragent.locale should be "
"used in place of `navigator.language`."),
compatibility_type="error",
appversions=FX5_DEFINITION)
# Firefox 6 Compatibility
if err.supports_version(FX6_DEFINITION):
for pattern, bug in FX6_INTERFACES.items():
_compat_test(
re.compile(pattern),
"Unsupported interface in use",
("Your add-on uses interface %s, which has been removed "
"from Firefox 6. Please refer to %s for possible "
"alternatives.") % (pattern, BUGZILLA_BUG % bug),
compatibility_type="error",
appversions=FX6_DEFINITION,
logFunc=err.warning)
# app.update.timer
_compat_test(
re.compile(r"app\.update\.timer"),
"app.update.timer is incompatible with Firefox 6",
("The 'app.update.timer' preference is being replaced by the "
"'app.update.timerMinimumDelay' preference in Firefox 6. "
"Please refer to %s for more details.") %
(BUGZILLA_BUG % 614181),
compatibility_type="error",
appversions=FX6_DEFINITION)
if is_js:
# javascript/data: URI usage in the address bar
_compat_test(
re.compile(r"['\"](javascript|data):"),
"javascript:/data: URIs may be incompatible with Firefox "
"6.",
("Loading 'javascript:' and 'data:' URIs through the "
"location bar may no longer work as expected in Firefox "
"6. If you load these types of URIs, please test your "
"add-on on the latest Firefox 6 builds, or refer to %s "
"for more information.") %
(BUGZILLA_BUG % 656433),
compatibility_type="warning",
appversions=FX6_DEFINITION)
# Firefox 7 Compatibility
if err.supports_version(FX7_DEFINITION):
for pattern, bug in FX7_INTERFACES.items():
_compat_test(
re.compile(pattern),
"Unsupported interface in use",
("Your add-on uses interface %s, which has been removed "
"from Firefox 7. Please refer to %s for possible "
"alternatives.") % (pattern, BUGZILLA_BUG % bug),
compatibility_type="error",
appversions=FX7_DEFINITION,
logFunc=err.warning)
# nsINavHistoryObserver
_compat_test(
re.compile(r"nsINavHistoryObserver"),
"nsINavHistoryObserver interface has changed in Firefox 7",
("The nsINavHistoryObserver interface has changed in Firefox "
"7. Most function calls now required a GUID parameter, "
"please refer to %s and %s for more information.") %
(NSINHS_LINK, BUGZILLA_BUG % 633266),
compatibility_type="error",
appversions=FX7_DEFINITION)
# nsIMarkupDocumentViewer_MOZILLA_2_0_BRANCH
_compat_test(
re.compile(r"nsIMarkupDocumentViewer_MOZILLA_2_0_BRANCH"),
"MOZILLA_2_0 Namespace has been merged in Firefox 7",
("The '_MOZILLA_2_0_BRANCH' interfaces have been merged out. "
"You should now use the namespace without the "
"'_MOZILLA_2_0_BRANCH' suffix. Please refer to %s for more "
"details.") %
(BUGZILLA_BUG % 617539),
compatibility_type="warning",
appversions=FX7_DEFINITION)
# Firefox 8 Compatibility
if err.supports_version(FX8_DEFINITION):
for pattern, bug in FX8_INTERFACES.items():
_compat_test(
re.compile(pattern),
"Removed, deprecated, or unsupported interface in use.",
("The nsISelection2 and nsISelection3 interfaces have "
"been removed in Firefox 8. You can use the nsISelection "
"interface instead. See %s for more details.") %
(BUGZILLA_BUG % bug),
compatibility_type="error",
appversions=FX8_DEFINITION,
logFunc=err.warning)
# nsIDOMWindowInternal
NSIDWI_MDN = ("https://developer.mozilla.org/en/"
"XPCOM_Interface_Reference/nsIDOMWindow")
_compat_test(
re.compile(r"nsIDOMWindowInternal"),
"nsIDOMWindowInternal has been deprecated in Firefox 8.",
("The nsIDOMWindowInternal interface has been deprecated in "
"Firefox 8. You can use the nsIDOMWindow interface instead. "
"See %s for more information.") % NSIDWI_MDN,
compatibility_type="warning",
appversions=FX8_DEFINITION)
# ISO8601DateUtils
# TODO(basta): Make this a string test instead once they're invented.
ISO8601_MDC = ("https://developer.mozilla.org/en/JavaScript/Reference/"
"Global_Objects/Date")
_compat_test(
re.compile(r"ISO8601DateUtils"),
"ISO8601DateUtils.jsm was removed in Firefox 8.",
("The ISO8601DateUtils object is no longer available in "
"Firefox 8. You can use the normal Date object instead. See "
"%s for more information.") % ISO8601_MDC,
compatibility_type="error",
appversions=FX8_DEFINITION,
logFunc=err.warning)
# Firefox 9 Compatibility
if err.supports_version(FX9_DEFINITION):
TAINTENABLED_BUG = BUGZILLA_BUG % 679971
_compat_test(
re.compile(r"navigator\.taintEnabled"),
"navigator.taintEnabled was removed in Firefox 9.",
("The taintEnabled function is no longer available in"
" Firefox 9. Since this function was only used for "
"browser detection and this doesn't belong in extension"
" code, you should remove it if possible. For more "
"information, please see %s.") % TAINTENABLED_BUG,
compatibility_type="warning",
appversions=FX9_DEFINITION,
logFunc=err.warning)
XRAYPROPS_BUG = BUGZILLA_BUG % 660233
_compat_test(
re.compile(r"\.nodePrincipal"),
("nodePrincipal only available in chrome context"),
("The nodePrincipal property is no longer accessible from "
"untrusted scripts. For more information, please see %s."
) % XRAYPROPS_BUG,
compatibility_type="warning",
appversions=FX9_DEFINITION)
_compat_test(
re.compile(r"\.documentURIObject"),
("documentURIObject only available in chrome context"),
("The documentURIObject property is no longer accessible from "
"untrusted scripts. For more information, please see %s."
) % XRAYPROPS_BUG,
compatibility_type="warning",
appversions=FX9_DEFINITION)
_compat_test(
re.compile(r"\.baseURIObject"),
("baseURIObject only available in chrome context"),
("The baseURIObject property is no longer accessible from "
"untrusted scripts. For more information, please see %s."
) % XRAYPROPS_BUG,
compatibility_type="warning",
appversions=FX9_DEFINITION)
_compat_test(
re.compile(r"nsIGlobalHistory3"),
"nsIGlobalHistory3 was removed in Firefox 9",
("The nsIGlobalHistory3 interface has been removed from Firefox."
" For more information, please see %s."
) % (BUGZILLA_BUG % 568971),
compatibility_type="warning",
appversions=FX9_DEFINITION,
logFunc=err.warning)
# geo.wifi.* warnings
geo_wifi_description = (
"The geo.wifi.* preferences are no longer created by default "
"in Gecko 9. Reading them without testing for their presence "
"can result in unexpected errors. See %s for more "
"information." % BUGZILLA_BUG % 689252)
_compat_test(
re.compile(r"geo\.wifi\.uri"),
"The preference 'geo.wifi.uri' was removed in Firefox 9",
geo_wifi_description,
compatibility_type="error",
appversions=FX9_DEFINITION,
logFunc=err.warning)
_compat_test(
re.compile(r"geo\.wifi\.protocol"),
"The preference 'geo.wifi.protocol' was removed in Firefox 9",
geo_wifi_description,
compatibility_type="error",
appversions=FX9_DEFINITION,
logFunc=err.warning)
# Firefox 11 Compatibility
if err.supports_version(FX11_DEFINITION):
for pattern, bug in FX11_INTERFACES.items():
_compat_test(
re.compile(pattern),
"Unsupported interface in use",
"Your add-on uses interface %s, which has been removed "
"from Firefox 11. Please refer to %s for possible "
"alternatives." % (pattern, BUGZILLA_BUG % bug),
compatibility_type="error",
appversions=FX11_DEFINITION,
logFunc=err.warning)
# omni.jar renamed
for instance in re.finditer(r"omni\.jar", document):
err.warning(
err_id=("testcases_regex", "regex_regex_tests", "omni.jar"),
warning="'omni.jar' renamed to 'omni.ja'",
description="This add-on references omni.jar, which was "
"renamed to omni.ja. You should avoid referencing "
"this file directly, and at least update this "
"reference for any versions that support Firefox "
"11 and above. See %s for more information." %
BUGZILLA_BUG % 701875,
filename=filename,
line=context.get_line(instance.start()),
context=context,
for_appversions=FX11_DEFINITION,
compatibility_type="error",
tier=5)
# Firefox 12 Compatibility
if err.supports_version(FX12_DEFINITION):
for pattern, bug in FX12_INTERFACES.items():
if isinstance(bug, tuple):
bug, message = bug
else:
message = ("Your add-on uses interface %s, which has been "
"removed from Gecko 12.") % pattern
message = "%s See %s for more infomration." % (message,
BUGZILLA_BUG % bug)
_compat_test(
re.compile(pattern),
"Unsupported interface in use",
message,
compatibility_type="error",
appversions=FX12_DEFINITION,
logFunc=err.warning)
# Test for `chromemargin` (bug 735876)
for instance in re.finditer(r"chromemargin", document):
err.notice(
err_id=("testcases_regex", "regex_regex_tests", "chromemargin"),
notice="`chromemargin` attribute changed in Gecko 12",
description="This add-on uses the chromemargin attribute, "
"which after Gecko 12 will not work in the same "
"way with values other than 0 or -1. Please see "
"%s for more information." % BUGZILLA_BUG % 735876,
filename=filename,
line=context.get_line(instance.start()),
context=context,
for_appversions=FX12_DEFINITION,
compatibility_type="error",
tier=5)
# Thunderbird 7 Compatibility rdf:addressdirectory
if err.supports_version(TB7_DEFINITION):
# dictUtils.js removal
_compat_test(
re.compile(r"resource:///modules/dictUtils.js"),
"dictUtils.js was removed in Thunderbird 7.",
"The dictUtils.js file is no longer available in "
"Thunderbird 7. You can use Dict.jsm instead. See"
"%s for more information." % BUGZILLA_BUG % 621213,
compatibility_type="error",
appversions=TB7_DEFINITION,
logFunc=err.warning)
# de-RDF the addressbook
_compat_test(
re.compile(r"rdf:addressdirectory"),
"The address book does not use RDF in Thunderbird 7.",
"The address book was changed to use a look up table in "
"Thunderbird 7. See %s and %s for more information." %
(TB7_LINK, BUGZILLA_BUG % 621213),
compatibility_type="error",
appversions=TB7_DEFINITION)
# Second test for de-RDFing the addressbook
# r"GetResource(.*?)\s*\.\s*QueryInterface(.*?nsIAbDirectory);"
_compat_test(
re.compile(r"GetResource\(.*?\)\s*\.\s*"
r"QueryInterface\(.*?nsIAbDirectory\)"),
"The address book does not use RDF in Thunderbird 7.",
"The address book was changed to use a look up table in "
"Thunderbird 7. See %s and %s for more information." %
(TB7_LINK, BUGZILLA_BUG % 621213),
compatibility_type="error",
appversions=TB7_DEFINITION)
# Thunderbird 10 Compatibility
if err.supports_version(TB10_DEFINITION):
# gDownloadManagerStrings removal
_compat_test(
re.compile(r"gDownloadManagerStrings"),
"gDownloadManagerStrings was removed in Thunderbird 10.",
"This global is no longer available in "
"Thunderbird 10. See %s for more information." %
BUGZILLA_BUG % 700220,
compatibility_type="error",
appversions=TB10_DEFINITION,
logFunc=err.warning)
# nsTryToClose.js removal
_compat_test(
re.compile(r"nsTryToClose.js"),
"nsTryToClose.js was removed in Thunderbird 10.",
"The nsTryToClose.js file is no longer available in "
"Thunderbird 10. See %s for more information." %
BUGZILLA_BUG % 539997,
compatibility_type="error",
appversions=TB10_DEFINITION,
logFunc=err.warning)
# Thunderbird 11 Compatibility
if err.supports_version(TB11_DEFINITION):
# specialFoldersDeletionAllowed removal
_compat_test(
re.compile(r"specialFoldersDeletionAllowed"),
"specialFoldersDeletionAllowed was removed in Thunderbird 11.",
"This global is no longer available in "
"Thunderbird 11. See %s for more information." %
BUGZILLA_BUG % 39121,
compatibility_type="error",
appversions=TB11_DEFINITION,
logFunc=err.notice)
for pattern, bug in TB11_STRINGS.items():
_compat_test(
re.compile(pattern),
"Removed, renamed, or changed strings in use",
"Your add-on uses string %s, which has been changed or "
"removed from Thunderbird 11. Please refer to %s for "
"possible alternatives." % (pattern, BUGZILLA_BUG % bug),
compatibility_type="error",
appversions=TB11_DEFINITION,
logFunc=err.warning)
for pattern, bug in TB11_JS.items():
_compat_test(
re.compile(pattern),
"Removed, renamed, or changed javascript in use",
"Your add-on uses the javascript method or class %s, which "
"has been changed or removed from Thunderbird 11. Please "
"refer to %s for possible alternatives." %
(pattern, BUGZILLA_BUG % bug),
compatibility_type="error",
appversions=TB11_DEFINITION,
logFunc=err.notice)
# Thunderbird 12 Compatibility
if err.supports_version(TB12_DEFINITION):
_compat_test(
re.compile(r"EdImage(Map|MapHotSpot|MapShapes|Overlay)\.js"),
"Removed javascript file EdImage*.js in use ",
"EdImageMap.js, EdImageMapHotSpot.js, "
"EdImageMapShapes.js, and EdImageMapOverlay.js "
"were removed in Thunderbird 12. "
"See %s for more information." % BUGZILLA_BUG % 717240,
compatibility_type="error",
appversions=TB12_DEFINITION,
logFunc=err.notice)
for pattern, bug in TB12_STRINGS.items():
_compat_test(
re.compile(pattern),
"Removed, renamed, or changed strings in use",
"Your add-on uses string %s, which has been changed or "
"removed from Thunderbird 11. Please refer to %s for "
"possible alternatives." % (pattern, BUGZILLA_BUG % bug),
compatibility_type="error",
appversions=TB12_DEFINITION,
logFunc=err.warning)
for pattern, bug in TB12_JS.items():
_compat_test(
re.compile(pattern),
"Removed, renamed, or changed javascript in use",
"Your add-on uses the javascript method or class %s, which "
"has been changed or removed from Thunderbird 11. Please "
"refer to %s for possible alternatives." %
(pattern, BUGZILLA_BUG % bug),
compatibility_type="error",
appversions=TB12_DEFINITION,
logFunc=err.notice)
| StarcoderdataPython |
3219258 | <reponame>andrewgailey/robogen
# Gir by InvaderZim
# http://robotgame.org/viewrobot/5740
from random import choice
import rg
class defaultdict(dict):
def __init__(self, default_factory=None, *args, **kwargs):
dict.__init__(self, *args, **kwargs)
self.default_factory = default_factory
def __getitem__(self, key):
try:
return dict.__getitem__(self, key)
except KeyError:
return self.__missing__(key)
def __missing__(self, key):
if self.default_factory is None:
raise KeyError(key)
self[key] = value = self.default_factory()
return value
def copy(self):
return self.__copy__()
def __copy__(self):
return type(self)(self.default_factory, self)
def __repr__(self):
return 'defaultdict(%s, %s)' % (self.default_factory, dict.__repr__(self))
class Robot:
# Constants
MIN_ATTACK_DAMAGE, MAX_ATTACK_DAMAGE = rg.settings.attack_range
SUICIDE_DAMAGE = rg.settings.suicide_damage
TURNS, TURNS_TO_KEEP = rg.settings.max_turns, 2
# Locations
ALL_LOCATIONS = set()
SPAWN_LOCATIONS = set()
VALID_LOCATIONS = set()
INVALID_LOCATIONS = set()
# State
COMMANDS = defaultdict(dict) # {turn => {robot_id => command}}
ATTACKS = defaultdict(lambda: defaultdict(int)) # {turn => {location => damage}}
def act(self, game):
turn = game['turn']
if not self.ALL_LOCATIONS:
self.setup()
if turn not in self.COMMANDS:
self.compute_commands(game['robots'], turn)
self.cleanup(turn)
return self.COMMANDS[turn][self.robot_id]
@classmethod
def setup(cls):
cls.SPAWN_LOCATIONS.update(rg.settings.spawn_coords)
cls.INVALID_LOCATIONS.update(rg.settings.obstacles)
for x in xrange(rg.settings.board_size):
for y in xrange(rg.settings.board_size):
location = (x, y)
if location not in cls.SPAWN_LOCATIONS and location not in cls.INVALID_LOCATIONS:
cls.VALID_LOCATIONS.add(location)
cls.ALL_LOCATIONS.update(cls.SPAWN_LOCATIONS, cls.VALID_LOCATIONS)
def compute_commands(self, robots, turn):
my_bots = dict((b.location, b) for b in robots.itervalues() if self.is_my_bot(b))
enemy_bots = dict((b.location, b) for b in robots.itervalues() if self.is_enemy_bot(b))
self.perimeter_scan(my_bots, enemy_bots)
self.perimeter_scan(enemy_bots, my_bots)
for bot in my_bots.itervalues():
self.compute_command(turn, bot, enemy_bots)
@classmethod
def cleanup(cls, turn):
cleanup_turn = turn - cls.TURNS_TO_KEEP
for state in (cls.COMMANDS, cls.ATTACKS):
if cleanup_turn in state:
del state[cleanup_turn]
def compute_command(self, turn, bot, enemy_bots):
if bot.location in self.SPAWN_LOCATIONS:
self.move(turn, bot, choice(bot.movements) if bot.movements else rg.toward(bot.location, rg.CENTER_POINT))
return
# if there are enemies around, attack them
if bot.enemies:
enemies = [enemy_bots[loc] for loc in bot.enemies]
weak_enemies = [b for b in enemies if self.health(b) <= self.MIN_ATTACK_DAMAGE]
if weak_enemies:
self.attack(turn, bot, choice(weak_enemies).location)
return
if self.health(bot) < len(enemies) * self.MIN_ATTACK_DAMAGE:
self.suicide(turn, bot)
return
target = min(enemies, key=self.health)
self.attack(turn, bot, target.location)
return
# if we're in the center, stay put
if bot.location == rg.CENTER_POINT:
self.guard(turn, bot)
return
# move toward the center
self.move(turn, bot, rg.toward(bot.location, rg.CENTER_POINT))
def perimeter_scan(self, bots, opposing_bots):
for bot in bots.itervalues():
bot.enemies, bot.movements = [], []
for loc in self.around(bot, include_spawn=True):
if loc in opposing_bots:
bot.enemies.append(loc)
else:
bot.movements.append(loc)
# Helpers for issuing commands
def attack(self, turn, bot, location):
self.COMMANDS[turn][bot.robot_id] = ['attack', location]
self.ATTACKS[turn][location] += self.MIN_ATTACK_DAMAGE
def guard(self, turn, bot):
self.COMMANDS[turn][bot.robot_id] = ['guard']
def move(self, turn, bot, location):
self.COMMANDS[turn][bot.robot_id] = ['move', location]
def suicide(self, turn, bot):
self.COMMANDS[turn][bot.robot_id] = ['suicide']
for loc in self.around(bot):
self.ATTACKS[turn][loc] += self.SUICIDE_DAMAGE
# Helpers for filtering
def health(self, bot=None):
return (bot or self).hp
def is_my_bot(self, bot):
return bot.player_id == self.player_id
def is_enemy_bot(self, bot):
return not self.is_my_bot(bot)
# Helpers for locations
def around(self, bot=None, distance=1, include_spawn=False):
bot = bot or self
valid_locations = self.ALL_LOCATIONS if include_spawn else self.VALID_LOCATIONS
if distance == 1:
return [l for l in (self.north(bot), self.east(bot), self.south(bot), self.west(bot)) if l in valid_locations]
return filter(lambda l: rg.wdist(bot.location, l) <= distance, valid_locations)
def north(self, bot=None):
bot = bot or self
return (bot.location[0], bot.location[1] - 1)
def east(self, bot=None):
bot = bot or self
return (bot.location[0] + 1, bot.location[1])
def south(self, bot=None):
bot = bot or self
return (bot.location[0], bot.location[1] + 1)
def west(self, bot=None):
bot = bot or self
return (bot.location[0] - 1, bot.location[1])
| StarcoderdataPython |
8142989 | #!/usr/bin/env python
import brawlbracket
import argparse
parser = argparse.ArgumentParser(description='Run the BrawlBracket server.')
parser.add_argument('-d', '--debug', dest='debugMode', action='store_true',
help='run in debug mode')
parser.set_defaults(debugMode=False)
args = parser.parse_args();
brawlbracket.runWebServer(args.debugMode)
| StarcoderdataPython |
1750470 | <gh_stars>0
class Solution:
def maxArea(self, height: list) -> int:
max_cheng = 0
i = 0
j = len(height) - 1
while i < j:
cheng = min(height[i], height[j]) * (j - i)
max_cheng = max(max_cheng, cheng)
if height[i] <= height[j]:
i += 1
else:
j -= 1
return max_cheng
| StarcoderdataPython |
3474290 |
from sympy import Symbol, log, sqrt, series, exp
K = Symbol("K")
S = Symbol("S")
softness = Symbol("softness")
x = Symbol("x")
def f(x):
return 10 * log (K * (1 + x)) / log(10)
def g(x):
return -(S-1)/(20*S) * (sqrt(f(x)**2 + softness**2) + f(x))
def h(x):
return exp(g(x) * log(10))
poly = series(h(x), x, n=2)
print poly
print poly.coeff(1)
print poly.coeff(x)
| StarcoderdataPython |
3592643 | CONVERTERS_PREFIX = '_converters'
SERVER_URL = 'https://www.inf.unibz.it'
MODEL_DIR_URL = SERVER_URL + '/~hbabii/pretrained_models'
CONVERTERS_URL= MODEL_DIR_URL + CONVERTERS_PREFIX
LIST_PREFIX = '_list'
MODEL_LIST_URL = MODEL_DIR_URL + LIST_PREFIX
PATH_TO_MODELS_ON_SERVER = '/home/students/hbabii/public_html/pretrained_models'
SERVER_HOST_NAME = 'actarus.inf.unibz.it'
PATH_TO_LOGS_ON_SERVER = '.pysftp.log' | StarcoderdataPython |
9618682 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import globalFunctions
import re
import os
import logging
class ReadComicOnlineTo(object):
def __init__(self, manga_url, download_directory, chapter_range, **kwargs):
current_directory = kwargs.get("current_directory")
conversion = kwargs.get("conversion")
keep_files = kwargs.get("keep_files")
self.logging = kwargs.get("log_flag")
self.sorting = kwargs.get("sorting_order")
self.image_quality = kwargs.get("image_quality")
self.comic_name = self.name_cleaner(manga_url)
self.print_index = kwargs.get("print_index")
url_split = str(manga_url).split("/")
if len(url_split) in [5]: # Sometimes, this value came out to be 6, instead of 5. Hmmmmmmmm weird.
# Removing "6" from here, because it caused #47
self.full_series(comic_url=manga_url.replace("&readType=1", ""), comic_name=self.comic_name,
sorting=self.sorting, download_directory=download_directory, chapter_range=chapter_range,
conversion=conversion, keep_files=keep_files)
else:
if "&readType=0" in manga_url:
manga_url = str(manga_url).replace("&readType=0", "&readType=1") # All Images in one page!
# disabled to fix #132 and #145
# elif "&readType=1" not in manga_url:
# manga_url = str(manga_url) + "&readType=1" # All Images in one page!
self.single_chapter(manga_url, self.comic_name, download_directory, conversion=conversion,
keep_files=keep_files)
def single_chapter(self, comic_url, comic_name, download_directory, conversion, keep_files):
# print("Received Comic Url : {0}".format(comic_url))
print("Fooling CloudFlare...Please Wait...")
chapter_number = str(comic_url).split("/")[5].split("?")[0].replace("-", " - ")
source, cookies = globalFunctions.GlobalFunctions().page_downloader(manga_url=comic_url, scrapper_delay=10)
img_list = re.findall(r"lstImages.push\(\"(.*?)\"\);", str(source))
file_directory = globalFunctions.GlobalFunctions().create_file_directory(chapter_number, comic_name)
# directory_path = os.path.realpath(file_directory)
directory_path = os.path.realpath(str(download_directory) + "/" + str(file_directory))
if not os.path.exists(directory_path):
os.makedirs(directory_path)
# image_len = len(image_list)
if str(self.image_quality).lower().strip() in ["low", "worst", "bad", "cancer", "mobile"]:
print("Downloading In Low Quality...")
links = []
file_names = []
for current_chapter, image_link in enumerate(img_list):
image_link = image_link.replace("\\", "")
logging.debug("Image Link : %s" % image_link)
image_link = image_link.replace("=s1600", "=s0").replace("/s1600", "/s0") # Change low quality to best.
if str(self.image_quality).lower().strip() in ["low", "worst", "bad", "cancer", "mobile"]:
image_link = image_link.replace("=s0", "=s1600").replace("/s0", "/s1600")
current_chapter += 1
file_name = str(globalFunctions.GlobalFunctions().prepend_zeroes(current_chapter, len(img_list))) + ".jpg"
file_names.append(file_name)
links.append(image_link)
globalFunctions.GlobalFunctions().multithread_download(chapter_number, comic_name, comic_url, directory_path,
file_names, links, self.logging)
globalFunctions.GlobalFunctions().conversion(directory_path, conversion, keep_files, comic_name,
chapter_number)
return 0
def name_cleaner(self, url):
initial_name = str(url).split("/")[4].strip()
safe_name = re.sub(r"[0-9][a-z][A-Z]\ ", "", str(initial_name))
manga_name = str(safe_name.title()).replace("-", " ")
return manga_name
def full_series(self, comic_url, comic_name, sorting, download_directory, chapter_range, conversion, keep_files):
print("Fooling CloudFlare...Please Wait...")
source, cookies = globalFunctions.GlobalFunctions().page_downloader(manga_url=comic_url, scrapper_delay=10)
all_links = []
listing_table = source.find_all("table", {"class": "listing"})
# print(listing_table)
for elements in listing_table:
x = elements.findAll('a')
for a in x:
all_links.append(str(a['href']).strip())
"""Readcomiconline.to shows the chapters in the Descending order. The 1st chapter is at the bottom, hence, at
the end of the list. So, we'll reverse the list, to perform the ranging functionality properly.
This is a fix for issues like #74.
"""
all_links.reverse()
# print("All Links : {0}".format(all_links))
logging.debug("All Links : %s" % all_links)
# Uh, so the logic is that remove all the unnecessary chapters beforehand
# and then pass the list for further operations.
if chapter_range != "All":
# -1 to shift the episode number accordingly to the INDEX of it. List starts from 0 xD!
starting = int(str(chapter_range).split("-")[0]) - 1
if str(chapter_range).split("-")[1].isdigit():
ending = int(str(chapter_range).split("-")[1])
else:
ending = len(all_links)
indexes = [x for x in range(starting, ending)]
all_links = [all_links[x] for x in indexes][::-1]
else:
all_links = all_links
if self.print_index:
idx = 0
for chap_link in all_links:
idx = idx + 1
print str(idx) + ": " + chap_link
return
if str(sorting).lower() in ['new', 'desc', 'descending', 'latest']:
for chap_link in all_links:
chap_link = "http://readcomiconline.to" + chap_link
self.single_chapter(comic_url=chap_link, comic_name=comic_name, download_directory=download_directory,
conversion=conversion, keep_files=keep_files)
# if chapter range contains "__EnD__" write new value to config.json
if chapter_range != "All" and chapter_range.split("-")[1] == "__EnD__":
globalFunctions.GlobalFunctions().addOne(comic_url)
elif str(sorting).lower() in ['old', 'asc', 'ascending', 'oldest', 'a']:
for chap_link in all_links[::-1]:
chap_link = "http://readcomiconline.to" + chap_link
self.single_chapter(comic_url=chap_link, comic_name=comic_name, download_directory=download_directory,
conversion=conversion, keep_files=keep_files)
# if chapter range contains "__EnD__" write new value to config.json
if chapter_range != "All" and chapter_range.split("-")[1] == "__EnD__":
globalFunctions.GlobalFunctions().addOne(comic_url)
return 0
| StarcoderdataPython |
8161723 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Exaplanation: github_listmyrepos.py. enumerate all of your repositories
Usage:
$ python github_listmyrepos.py [ options ]
Style:
Google Python Style Guide:
http://google.github.io/styleguide/pyguide.html
@name github_listmyrepos.py
@version 1.00
@author-name <NAME>
@author-email <EMAIL>
@license-name Apache 2.0
@license-url https://www.apache.org/licenses/LICENSE-2.0
"""
__version__ = 1.00
__author__ = "<NAME> (<EMAIL>)"
### import os
import sys
import datetime
import argparse
import requests
sys.dont_write_bytecode = 1
MY_CFG = 'undefined'
PARSER = argparse.ArgumentParser(description="""
github_listmyrepos.py displays info on all Github repositories you have.
""")
PARSER.add_argument("-u", metavar='<username>', required=True, dest='USERNAME', help="set username")
PARSER.add_argument("-r", metavar='<resource>', default="html_url", dest='RESOURCE', \
help="specify resource (default: html_url )")
ARGS = PARSER.parse_args()
RESOURCE = ARGS.RESOURCE
try:
USERNAME = ARGS.USERNAME
except KeyError as myerror:
print(f'Environment Variable Not Set :: {myerror.args[0]}')
sys.exit()
REPOLIST = {}
RIGHTNOW = datetime.datetime.now()
DATESTAMP = RIGHTNOW.strftime('%Y%m%d')
TIMESTAMP = RIGHTNOW.strftime('%H%M%S')
### beginning ###
def main():
"""
Retrieve the repository list into JSON, and extract out details
"""
baseurl = 'https://api.github.com/users'
tailurl = 'repos?type=all&per_page=10000'
githubrepo = f'{baseurl}/{USERNAME}/{tailurl}'
response = requests.get(githubrepo)
repolist = response.json()
for repo in repolist:
repourl = repo[ARGS.RESOURCE]
reponame = repourl.split('/')[-1]
print(f'| [{reponame}]({repourl}) |')
if __name__ == '__main__':
main()
| StarcoderdataPython |
1823189 | #%%
'''
Used to create diagrams for Descriptive Stat worksheet
'''
import csv, seaborn as sns, pandas as pd
flavor_counts = {}
with open('../../Datasets/icecream.csv', 'r') as f:
data = csv.DictReader(f)
for row in data:
current_flavor = row['flavor']
if current_flavor not in flavor_counts:
flavor_counts[current_flavor] = 0
flavor_counts[current_flavor] += 1
flavor_df = pd.DataFrame([flavor_counts], columns=flavor_counts.keys())
print(flavor_counts)
print(flavor_df)
chart = sns.barplot(data=flavor_df, palette="crest")
# %%
| StarcoderdataPython |
1990041 | <filename>Python/Algorithm/LoopEscope.py
import random
def main():
ponto = 0
letra = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']
while True:
rletra = letra[random.randint(0, 25)]
print (rletra)
print ('Sua pontuação é:', ponto)
if input('') == rletra:
ponto += 1
print ('Sua pontuação é:', ponto)
else:
print('errado')
break
main()
#https://pt.stackoverflow.com/q/361805/101
| StarcoderdataPython |
6408549 | print("""
/$$$$$ /$$$$$$$ /$$ /$$
|__ $$ | $$__ $$ | $$ | $$
| $$ | $$ \ $$ /$$$$$$$ /$$$$$$ /$$$$$$ | $$ /$$
| $$ /$$$$$$| $$$$$$$ /$$__ $$ /$$__ $$ /$$__ $$| $$ /$$/
/$$ | $$|______/| $$__ $$| $$ | $$| $$ \ $$| $$ \__/| $$$$$$/
| $$ | $$ | $$ \ $$| $$ | $$| $$ | $$| $$ | $$_ $$
| $$$$$$/ | $$$$$$$/| $$$$$$$| $$$$$$/| $$ | $$ \ $$
\______/ |_______/ \_______/ \______/ |__/ |__/ \__/
====================================================================
[*] <NAME> - R&D ICWR
====================================================================
""")
import socket
from requests import get
from re import findall
from time import sleep as delay
from threading import Thread
from datetime import datetime
from os.path import isfile
from os.path import isdir
from os import mkdir
from random import randint
class jancok_dorker:
def useragent(self):
arr=["Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.0.12) Gecko/2009070611 Firefox/3.0.12 (.NET CLR 3.5.30729)","Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.3 (KHTML, like Gecko) Chrome/6.0.464.0 Safari/534.3","Mozilla/5.0 (Macintosh; U; PPC Mac OS X 10_5_8; ja-jp) AppleWebKit/533.16 (KHTML, like Gecko) Version/5.0 Safari/533.16","Mozilla/5.0 (X11; U; FreeBSD i386; en-US) AppleWebKit/532.0 (KHTML, like Gecko) Chrome/4.0.207.0 Safari/532.0","Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/534.1 (KHTML, like Gecko) Chrome/6.0.427.0 Safari/534.1"]
return arr[randint(0, len(arr) - 1)]
def check_site(self, site):
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(5)
s.connect((site.split("/")[2], 80))
s.send("GET / HTTP/1.1\r\nHost: {}\r\n\r\n".format(site.split("/")[2]).encode())
s.close()
print("[+] {}".format(site))
open("result-site/result-" + str(datetime.now().strftime("%Y-%m-%d")) + ".txt", "a").write("{}\n".format(site))
except:
print("[-] {}".format(site))
def extract_link(self, dork, page):
try:
resp = get(url="https://www.bing.com/search?q={}&first={}".format(dork, str(page)), headers={ "User-Agent": self.useragent() }, timeout=5)
link = findall("href=\"(.+?)\"", resp.text)
for x in link:
xurl = x.split("/")
if xurl[0] == "http:" or xurl[0] == "https:":
if all(not xxx in xurl[2] for xxx in [".bing.", ".google.", ".microsoft."]):
Thread(target=self.check_site, args=(x, )).start()
delay(0.1)
except:
pass
def __init__(self):
if not isdir("result-site"):
mkdir("result-site")
input_dork = input("[*] Dork / List Dork : ")
page = input("[*] Page : ")
print("\n")
if input_dork != '' and page != '':
if isfile(input_dork):
for dork in open(input_dork, errors='ignore').read().split("\n"):
for i in range(0, int(page)):
if dork != '':
Thread(target=self.extract_link, args=(dork, "{}0".format(str(i)))).start()
delay(0.1)
else:
for i in range(0, int(page)):
Thread(target=self.extract_link, args=(input_dork, "{}0".format(str(i)))).start()
delay(0.1)
else:
print("[-] Invalid Option")
if __name__ == "__main__":
jancok_dorker()
| StarcoderdataPython |
9656464 | <reponame>augastinklazar8673/Password-strength-prediction-model
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pds
import numpy as npy
import seaborn as sb
import warnings
warnings.filterwarnings('ignore')
# In[4]:
data = pds.read_csv('D:\password-strength\data.csv', error_bad_lines = False)
data.head()
# In[6]:
data['strength'].unique()
# In[ ]:
# 0 - poor
# 2 - strong
# 1 - normal
# In[7]:
data.isna().sum() #how many has NaN value
# In[8]:
data[data['password'].isnull()] # what's inside first '[]' is the filter
# In[9]:
# drop it
data.dropna(inplace = True)
# In[10]:
data.isnull().sum()
# In[11]:
sb.countplot(data['strength'])
# In[ ]:
# 1 has the highest count, imbalanced dataset
#strength is a dependent feature. Separate them
# In[12]:
password_tuple = npy.array(data)
# In[13]:
password_tuple
# In[ ]:
# shuffle the data so it provides robustness to the model
# In[14]:
import random
random.shuffle(password_tuple)
# In[15]:
#separation
x = [labels[0] for labels in password_tuple]
y = [labels[1] for labels in password_tuple]
# In[16]:
x #all passwords
# In[ ]:
# TF-IDF
# In[28]:
def word_divide_char(inputs):
character = []
for i in inputs: # password data gets split into single chars
character.append(i)
return character
# In[29]:
from sklearn.feature_extraction.text import TfidfVectorizer
# In[30]:
vectorizer = TfidfVectorizer(tokenizer = word_divide_char)
# In[31]:
X = vectorizer.fit_transform(x)
# In[32]:
X.shape
# In[ ]:
# number of cols increased as now it is in the form of a vector.
# In[33]:
vectorizer.get_feature_names()
# In[34]:
first_doc_vector = X[0]
first_doc_vector
# In[35]:
first_doc_vector.T.todense()
# In[ ]:
#prepare data for model
# In[38]:
df = pds.DataFrame(first_doc_vector.T.todense(), index = vectorizer.get_feature_names(), columns = ['TF-IDF'])
df.sort_values(by = ['TF-IDF'], ascending = False)
# In[ ]:
# pass data
# In[ ]:
# apply logistic Regression (ML Algo)
# In[39]:
from sklearn.model_selection import train_test_split
# In[41]:
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2) #returns 4 param
# test size - here-20% of data is for testing and 80% for training.
# In[42]:
X_train.shape
# In[ ]:
# 53571 is somewhere close to 80 percent.
# In[43]:
from sklearn.linear_model import LogisticRegression
# In[44]:
clf = LogisticRegression(random_state = 0, multi_class = 'multinomial')
# multinomial class is considered because we have 3 categories (0, 1, 2)
# In[45]:
clf.fit(X_train, y_train)
# In[ ]:
# now it's time for predictions....!
# In[46]:
dt = npy.array(['@#123abcd'])
predc = vectorizer.transform(dt)
clf.predict(predc)
# In[ ]:
# In[47]:
y_predc = clf.predict(X_test)
y_predc # all predictions in the form of array.
# In[ ]:
# In[48]:
from sklearn.metrics import confusion_matrix, accuracy_score
# In[49]:
cm = confusion_matrix(y_test, y_predc)
print(cm)
print(accuracy_score(y_test, y_predc))
# In[ ]:
# 5271 92798 11529 are true predictions,
#0.8183352248969595 shows that model has an accuracy of approx 82 percent
# In[50]:
from sklearn.metrics import classification_report
print(classification_report(y_test, y_predc))
# In[ ]:
| StarcoderdataPython |
3539472 | <filename>scripts/examples/tools/autoruns.py
# Tool Imports
from bph.tools.windows.autoruns import BphAutoruns as Autoruns
# Core Imports
from bph.core.server.template import BphTemplateServer as TemplateServer
from bph.core.session import BphSession as Session
from bph.core.sample import BphLabFile as LabFile
session = Session(project_name='blackhat_arsenal_2019')
session.start()
templateserver = TemplateServer()
templateserver.start()
autoruns = Autoruns()
autoruns.analysis_basic()
autoruns.execute(delay=5)
autoruns.files()
| StarcoderdataPython |
11228363 | # coding: utf-8
from __future__ import absolute_import
import datetime
import re
import importlib
import six
from huaweicloudsdkcore.client import Client, ClientBuilder
from huaweicloudsdkcore.exceptions import exceptions
from huaweicloudsdkcore.utils import http_utils
from huaweicloudsdkcore.sdk_stream_request import SdkStreamRequest
class AntiDDoSAsyncClient(Client):
"""
:param configuration: .Configuration object for this client
:param pool_threads: The number of threads to use for async requests
to the API. More threads means more concurrent API requests.
"""
PRIMITIVE_TYPES = (float, bool, bytes, six.text_type) + six.integer_types
NATIVE_TYPES_MAPPING = {
'int': int,
'long': int if six.PY3 else long,
'float': float,
'str': str,
'bool': bool,
'date': datetime.date,
'datetime': datetime.datetime,
'object': object,
}
def __init__(self):
super(AntiDDoSAsyncClient, self).__init__()
self.model_package = importlib.import_module("huaweicloudsdkantiddos.v1.model")
self.preset_headers = {'User-Agent': 'HuaweiCloud-SDK-Python'}
@classmethod
def new_builder(cls, clazz=None):
if clazz is None:
return ClientBuilder(cls)
if clazz.__name__ != "AntiDDoSClient":
raise TypeError("client type error, support client type is AntiDDoSClient")
return ClientBuilder(clazz)
def create_default_config_async(self, request):
"""配置Anti-DDoS默认防护策略
配置用户的默认防护策略。配置防护策略后,新购买的资源在自动开启防护时,会按照该默认防护策略进行配置。
:param CreateDefaultConfigRequest request
:return: CreateDefaultConfigResponse
"""
return self.create_default_config_with_http_info(request)
def create_default_config_with_http_info(self, request):
"""配置Anti-DDoS默认防护策略
配置用户的默认防护策略。配置防护策略后,新购买的资源在自动开启防护时,会按照该默认防护策略进行配置。
:param CreateDefaultConfigRequest request
:return: CreateDefaultConfigResponse
"""
all_params = ['create_default_config_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v1/{project_id}/antiddos/default-config',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='CreateDefaultConfigResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def delete_default_config_async(self, request):
"""删除Ani-DDoS默认防护策略
删除用户配置的默认防护策略。
:param DeleteDefaultConfigRequest request
:return: DeleteDefaultConfigResponse
"""
return self.delete_default_config_with_http_info(request)
def delete_default_config_with_http_info(self, request):
"""删除Ani-DDoS默认防护策略
删除用户配置的默认防护策略。
:param DeleteDefaultConfigRequest request
:return: DeleteDefaultConfigResponse
"""
all_params = []
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v1/{project_id}/antiddos/default-config',
method='DELETE',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='DeleteDefaultConfigResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def show_alert_config_async(self, request):
"""查询告警配置信息
查询用户配置信息,用户可以通过此接口查询是否接收某类告警,同时可以配置是手机短信还是电子邮件接收告警信息。
:param ShowAlertConfigRequest request
:return: ShowAlertConfigResponse
"""
return self.show_alert_config_with_http_info(request)
def show_alert_config_with_http_info(self, request):
"""查询告警配置信息
查询用户配置信息,用户可以通过此接口查询是否接收某类告警,同时可以配置是手机短信还是电子邮件接收告警信息。
:param ShowAlertConfigRequest request
:return: ShowAlertConfigResponse
"""
all_params = []
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/{project_id}/warnalert/alertconfig/query',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ShowAlertConfigResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def show_default_config_async(self, request):
"""查询Ani-DDoS默认防护策略
查询用户配置的默认防护策略。
:param ShowDefaultConfigRequest request
:return: ShowDefaultConfigResponse
"""
return self.show_default_config_with_http_info(request)
def show_default_config_with_http_info(self, request):
"""查询Ani-DDoS默认防护策略
查询用户配置的默认防护策略。
:param ShowDefaultConfigRequest request
:return: ShowDefaultConfigResponse
"""
all_params = []
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v1/{project_id}/antiddos/default-config',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ShowDefaultConfigResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def update_alert_config_async(self, request):
"""更新告警配置信息
更新用户配置信息,用户可以通过此接口更新是否接收某类告警,同时可以配置是手机短信还是电子邮件接收告警信息。
:param UpdateAlertConfigRequest request
:return: UpdateAlertConfigResponse
"""
return self.update_alert_config_with_http_info(request)
def update_alert_config_with_http_info(self, request):
"""更新告警配置信息
更新用户配置信息,用户可以通过此接口更新是否接收某类告警,同时可以配置是手机短信还是电子邮件接收告警信息。
:param UpdateAlertConfigRequest request
:return: UpdateAlertConfigResponse
"""
all_params = ['update_alert_config_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/{project_id}/warnalert/alertconfig/update',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='UpdateAlertConfigResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_d_dos_status_async(self, request):
"""查询EIP防护状态列表
查询用户所有EIP的Anti-DDoS防护状态信息,用户的EIP无论是否绑定到云服务器,都可以进行查询。
:param ListDDosStatusRequest request
:return: ListDDosStatusResponse
"""
return self.list_d_dos_status_with_http_info(request)
def list_d_dos_status_with_http_info(self, request):
"""查询EIP防护状态列表
查询用户所有EIP的Anti-DDoS防护状态信息,用户的EIP无论是否绑定到云服务器,都可以进行查询。
:param ListDDosStatusRequest request
:return: ListDDosStatusResponse
"""
all_params = ['status', 'limit', 'offset', 'ip']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
if 'status' in local_var_params:
query_params.append(('status', local_var_params['status']))
if 'limit' in local_var_params:
query_params.append(('limit', local_var_params['limit']))
if 'offset' in local_var_params:
query_params.append(('offset', local_var_params['offset']))
if 'ip' in local_var_params:
query_params.append(('ip', local_var_params['ip']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v1/{project_id}/antiddos',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListDDosStatusResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_daily_log_async(self, request):
"""查询指定EIP异常事件
查询指定EIP在过去24小时之内的异常事件信息,异常事件包括清洗事件和黑洞事件,查询延迟在5分钟之内。
:param ListDailyLogRequest request
:return: ListDailyLogResponse
"""
return self.list_daily_log_with_http_info(request)
def list_daily_log_with_http_info(self, request):
"""查询指定EIP异常事件
查询指定EIP在过去24小时之内的异常事件信息,异常事件包括清洗事件和黑洞事件,查询延迟在5分钟之内。
:param ListDailyLogRequest request
:return: ListDailyLogResponse
"""
all_params = ['floating_ip_id', 'sort_dir', 'limit', 'offset', 'ip']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'floating_ip_id' in local_var_params:
path_params['floating_ip_id'] = local_var_params['floating_ip_id']
query_params = []
if 'sort_dir' in local_var_params:
query_params.append(('sort_dir', local_var_params['sort_dir']))
if 'limit' in local_var_params:
query_params.append(('limit', local_var_params['limit']))
if 'offset' in local_var_params:
query_params.append(('offset', local_var_params['offset']))
if 'ip' in local_var_params:
query_params.append(('ip', local_var_params['ip']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v1/{project_id}/antiddos/{floating_ip_id}/logs',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListDailyLogResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_daily_report_async(self, request):
"""查询指定EIP防护流量
查询指定EIP在过去24小时之内的防护流量信息,流量的间隔时间单位为5分钟。
:param ListDailyReportRequest request
:return: ListDailyReportResponse
"""
return self.list_daily_report_with_http_info(request)
def list_daily_report_with_http_info(self, request):
"""查询指定EIP防护流量
查询指定EIP在过去24小时之内的防护流量信息,流量的间隔时间单位为5分钟。
:param ListDailyReportRequest request
:return: ListDailyReportResponse
"""
all_params = ['floating_ip_id', 'ip']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'floating_ip_id' in local_var_params:
path_params['floating_ip_id'] = local_var_params['floating_ip_id']
query_params = []
if 'ip' in local_var_params:
query_params.append(('ip', local_var_params['ip']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v1/{project_id}/antiddos/{floating_ip_id}/daily',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListDailyReportResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_new_configs_async(self, request):
"""查询Anti-DDoS配置可选范围
查询系统支持的Anti-DDoS防护策略配置的可选范围,用户根据范围列表选择适合自已业务的防护策略进行Anti-DDoS流量清洗。
:param ListNewConfigsRequest request
:return: ListNewConfigsResponse
"""
return self.list_new_configs_with_http_info(request)
def list_new_configs_with_http_info(self, request):
"""查询Anti-DDoS配置可选范围
查询系统支持的Anti-DDoS防护策略配置的可选范围,用户根据范围列表选择适合自已业务的防护策略进行Anti-DDoS流量清洗。
:param ListNewConfigsRequest request
:return: ListNewConfigsResponse
"""
all_params = []
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/{project_id}/antiddos/query-config-list',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListNewConfigsResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_weekly_reports_async(self, request):
"""查询周防护统计情况
查询用户所有Anti-DDoS防护周统计情况,包括一周内DDoS拦截次数和攻击次数、以及按照被攻击次数进行的排名信息等统计数据。系统支持当前时间之前四周的周统计数据查询,超过这个时间的请求是查询不到统计数据的。
:param ListWeeklyReportsRequest request
:return: ListWeeklyReportsResponse
"""
return self.list_weekly_reports_with_http_info(request)
def list_weekly_reports_with_http_info(self, request):
"""查询周防护统计情况
查询用户所有Anti-DDoS防护周统计情况,包括一周内DDoS拦截次数和攻击次数、以及按照被攻击次数进行的排名信息等统计数据。系统支持当前时间之前四周的周统计数据查询,超过这个时间的请求是查询不到统计数据的。
:param ListWeeklyReportsRequest request
:return: ListWeeklyReportsResponse
"""
all_params = ['period_start_date']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
if 'period_start_date' in local_var_params:
query_params.append(('period_start_date', local_var_params['period_start_date']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v1/{project_id}/antiddos/weekly',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListWeeklyReportsResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def show_d_dos_async(self, request):
"""查询Anti-DDoS服务
查询配置的Anti-DDoS防护策略,用户可以查询指定EIP的Anti-DDoS防护策略。
:param ShowDDosRequest request
:return: ShowDDosResponse
"""
return self.show_d_dos_with_http_info(request)
def show_d_dos_with_http_info(self, request):
"""查询Anti-DDoS服务
查询配置的Anti-DDoS防护策略,用户可以查询指定EIP的Anti-DDoS防护策略。
:param ShowDDosRequest request
:return: ShowDDosResponse
"""
all_params = ['floating_ip_id', 'ip']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'floating_ip_id' in local_var_params:
path_params['floating_ip_id'] = local_var_params['floating_ip_id']
query_params = []
if 'ip' in local_var_params:
query_params.append(('ip', local_var_params['ip']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v1/{project_id}/antiddos/{floating_ip_id}',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ShowDDosResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def show_d_dos_status_async(self, request):
"""查询指定EIP防护状态
查询指定EIP的Anti-DDoS防护状态。
:param ShowDDosStatusRequest request
:return: ShowDDosStatusResponse
"""
return self.show_d_dos_status_with_http_info(request)
def show_d_dos_status_with_http_info(self, request):
"""查询指定EIP防护状态
查询指定EIP的Anti-DDoS防护状态。
:param ShowDDosStatusRequest request
:return: ShowDDosStatusResponse
"""
all_params = ['floating_ip_id', 'ip']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'floating_ip_id' in local_var_params:
path_params['floating_ip_id'] = local_var_params['floating_ip_id']
query_params = []
if 'ip' in local_var_params:
query_params.append(('ip', local_var_params['ip']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v1/{project_id}/antiddos/{floating_ip_id}/status',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ShowDDosStatusResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def show_new_task_status_async(self, request):
"""查询Anti-DDoS任务
用户查询指定的Anti-DDoS防护配置任务,得到任务当前执行的状态。
:param ShowNewTaskStatusRequest request
:return: ShowNewTaskStatusResponse
"""
return self.show_new_task_status_with_http_info(request)
def show_new_task_status_with_http_info(self, request):
"""查询Anti-DDoS任务
用户查询指定的Anti-DDoS防护配置任务,得到任务当前执行的状态。
:param ShowNewTaskStatusRequest request
:return: ShowNewTaskStatusResponse
"""
all_params = ['task_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
if 'task_id' in local_var_params:
query_params.append(('task_id', local_var_params['task_id']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/{project_id}/query-task-status',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ShowNewTaskStatusResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def update_d_dos_async(self, request):
"""更新Anti-DDoS服务
更新指定EIP的Anti-DDoS防护策略配置。调用成功,只是说明服务节点收到了关闭更新配置请求,操作是否成功需要通过任务查询接口查询该任务的执行状态,具体请参考查询Anti-DDoS任务。
:param UpdateDDosRequest request
:return: UpdateDDosResponse
"""
return self.update_d_dos_with_http_info(request)
def update_d_dos_with_http_info(self, request):
"""更新Anti-DDoS服务
更新指定EIP的Anti-DDoS防护策略配置。调用成功,只是说明服务节点收到了关闭更新配置请求,操作是否成功需要通过任务查询接口查询该任务的执行状态,具体请参考查询Anti-DDoS任务。
:param UpdateDDosRequest request
:return: UpdateDDosResponse
"""
all_params = ['floating_ip_id', 'update_d_dos_request_body', 'ip']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'floating_ip_id' in local_var_params:
path_params['floating_ip_id'] = local_var_params['floating_ip_id']
query_params = []
if 'ip' in local_var_params:
query_params.append(('ip', local_var_params['ip']))
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v1/{project_id}/antiddos/{floating_ip_id}',
method='PUT',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='UpdateDDosResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def call_api(self, resource_path, method, path_params=None, query_params=None, header_params=None, body=None,
post_params=None, response_type=None, response_headers=None, auth_settings=None,
collection_formats=None, request_type=None):
"""Makes the HTTP request and returns deserialized data.
:param resource_path: Path to method endpoint.
:param method: Method to call.
:param path_params: Path parameters in the url.
:param query_params: Query parameters in the url.
:param header_params: Header parameters to be
placed in the request header.
:param body: Request body.
:param post_params dict: Request post form parameters,
for `application/x-www-form-urlencoded`, `multipart/form-data`.
:param auth_settings list: Auth Settings names for the request.
:param response_type: Response data type.
:param response_headers: Header should be added to response data.
:param collection_formats: dict of collection formats for path, query,
header, and post parameters.
:param request_type: Request data type.
:return:
Return the response directly.
"""
return self.do_http_request(
method=method,
resource_path=resource_path,
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body,
post_params=post_params,
response_type=response_type,
response_headers=response_headers,
collection_formats=collection_formats,
request_type=request_type,
async_request=True)
| StarcoderdataPython |
5080422 | <gh_stars>0
#!/usr/bin/env python
# coding: utf-8
# # Introduction to Cufflinks
# This library binds the power of plotly with the flexibility of pandas for easy plotting.
# ## Import Libraries
# In[1]:
import pandas as pd
import numpy as np
import cufflinks as cf
import chart_studio.plotly as py
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
# ## Config
# In[2]:
cf.set_config_file(theme='ggplot',sharing='public',offline=True)
# ## Enable Notebook Mode
# In[3]:
init_notebook_mode(connected=True)
# ## Let's a Create a Simple Plot
# In[4]:
cf.datagen.lines().iplot(kind='scatter',xTitle='Dates',yTitle='Returns',title='Cufflinks - Line Chart')
| StarcoderdataPython |
1696448 | # emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 et:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""High-level interface generation
"""
__docformat__ = 'restructuredtext'
import logging
lgr = logging.getLogger('datalad.interface.base')
from abc import (
ABC,
abstractmethod,
)
import os
import re
import textwrap
from importlib import import_module
from collections import (
OrderedDict,
)
import warnings
import datalad
from datalad.interface.common_opts import eval_params
from datalad.distribution.dataset import Dataset
from datalad.distribution.dataset import resolve_path
from datalad.support.exceptions import CapturedException
default_logchannels = {
'': 'debug',
'ok': 'debug',
'notneeded': 'debug',
'impossible': 'warning',
'error': 'error',
}
def get_api_name(intfspec):
"""Given an interface specification return an API name for it"""
if len(intfspec) > 3:
name = intfspec[3]
else:
name = intfspec[0].split('.')[-1]
return name
def get_interface_groups(include_plugins=False):
"""Return a list of command groups.
Returns
-------
A list of tuples with the form (GROUP_NAME, GROUP_DESCRIPTION, COMMANDS).
"""
if include_plugins:
warnings.warn("Plugins are no longer supported.", DeprecationWarning)
from .. import interface as _interfaces
grps = []
# auto detect all available interfaces and generate a function-based
# API from them
for _item in _interfaces.__dict__:
if not _item.startswith('_group_'):
continue
grp_name = _item[7:]
grp = getattr(_interfaces, _item)
grps.append((grp_name,) + grp)
return grps
def get_cmd_summaries(descriptions, groups, width=79):
"""Return summaries for the commands in `groups`.
Parameters
----------
descriptions : dict
A map of group names to summaries.
groups : list of tuples
A list of groups and commands in the form described by
`get_interface_groups`.
width : int, optional
The maximum width of each line in the summary text.
Returns
-------
A list with a formatted entry for each command. The first command of each
group is preceded by an entry describing the group.
"""
cmd_summary = []
for grp in sorted(groups, key=lambda x: x[0]):
grp_descr = grp[1]
grp_cmds = descriptions[grp[0]]
cmd_summary.append('\n*%s*\n' % (grp_descr,))
for cd in grp_cmds:
cmd_summary.append(' %s\n%s'
% ((cd[0],
textwrap.fill(
cd[1].rstrip(' .'),
width - 5,
initial_indent=' ' * 6,
subsequent_indent=' ' * 6))))
return cmd_summary
def load_interface(spec):
"""Load and return the class for `spec`.
Parameters
----------
spec : tuple
For a standard interface, the first item is the datalad source module
and the second object name for the interface.
Returns
-------
The interface class or, if importing the module fails, None.
"""
lgr.log(5, "Importing module %s ", spec[0])
try:
mod = import_module(spec[0], package='datalad')
except Exception as e:
ce = CapturedException(e)
lgr.error("Internal error, cannot import interface '%s': %s",
spec[0], ce)
intf = None
else:
intf = getattr(mod, spec[1])
return intf
def get_cmd_doc(interface):
"""Return the documentation for the command defined by `interface`.
Parameters
----------
interface : subclass of Interface
"""
intf_doc = '' if interface.__doc__ is None else interface.__doc__.strip()
if hasattr(interface, '_docs_'):
# expand docs
intf_doc = intf_doc.format(**interface._docs_)
return intf_doc
def dedent_docstring(text):
"""Remove uniform indentation from a multiline docstring"""
# Problem is that first line might often have no offset, so might
# need to be ignored from dedent call
if text is None:
return None
if not text.startswith(' '):
lines = text.split('\n')
if len(lines) == 1:
# single line, no indentation, nothing to do
return text
text2 = '\n'.join(lines[1:])
return lines[0] + "\n" + textwrap.dedent(text2)
else:
return textwrap.dedent(text)
def alter_interface_docs_for_api(docs):
"""Apply modifications to interface docstrings for Python API use."""
# central place to alter the impression of docstrings,
# like removing cmdline specific sections
if not docs:
return docs
docs = dedent_docstring(docs)
# clean cmdline sections
docs = re.sub(
r'\|\| CMDLINE \>\>.*?\<\< CMDLINE \|\|',
'',
docs,
flags=re.MULTILINE | re.DOTALL)
# clean cmdline in-line bits
docs = re.sub(
r'\[CMD:\s[^\[\]]*\sCMD\]',
'',
docs,
flags=re.MULTILINE | re.DOTALL)
docs = re.sub(
r'\[PY:\s([^\[\]]*)\sPY\]',
lambda match: match.group(1),
docs,
flags=re.MULTILINE)
# select only the python alternative from argument specifications
docs = re.sub(
r'``([a-zA-Z0-9_,.]+)\|\|([a-zA-Z0-9-,.]+)``',
lambda match: f'``{match.group(1)}``',
docs)
docs = re.sub(
r'\|\| PYTHON \>\>(.*?)\<\< PYTHON \|\|',
lambda match: match.group(1),
docs,
flags=re.MULTILINE | re.DOTALL)
if 'DATALAD_SPHINX_RUN' not in os.environ:
# remove :role:`...` RST markup for cmdline docs
docs = re.sub(
r':\S+:`[^`]*`[\\]*',
lambda match: ':'.join(match.group(0).split(':')[2:]).strip('`\\'),
docs,
flags=re.MULTILINE | re.DOTALL)
# make the handbook doc references more accessible
# the URL is a redirect configured at readthedocs
docs = re.sub(
r'(handbook:[0-9]-[0-9]*)',
'\\1 (http://handbook.datalad.org/symbols)',
docs)
docs = re.sub(
r'^([ ]*)\|\| REFLOW \>\>\n(.*?)\<\< REFLOW \|\|',
lambda match: textwrap.fill(match.group(2), subsequent_indent=match.group(1)),
docs,
flags=re.MULTILINE | re.DOTALL)
return docs
def is_api_arg(arg):
"""Return True if argument is our API argument or self or used for internal
purposes
"""
return arg != 'self' and not arg.startswith('_')
def update_docstring_with_parameters(func, params, prefix=None, suffix=None,
add_args=None):
"""Generate a useful docstring from a parameter spec
Amends any existing docstring of a callable with a textual
description of its parameters. The Parameter spec needs to match
the number and names of the callables arguments.
"""
from datalad.utils import getargspec
# get the signature
args, varargs, varkw, defaults = getargspec(func, include_kwonlyargs=True)
defaults = defaults or tuple()
if add_args:
add_argnames = sorted(add_args.keys())
args.extend(add_argnames)
defaults = defaults + tuple(add_args[k] for k in add_argnames)
ndefaults = len(defaults)
# start documentation with what the callable brings with it
doc = prefix if prefix else u''
if len(args) > 1:
if len(doc):
if not doc.endswith('\n'):
doc += '\n'
doc += '\n'
doc += "Parameters\n----------\n"
for i, arg in enumerate(args):
if not is_api_arg(arg):
continue
# we need a parameter spec for each argument
if not arg in params:
raise ValueError("function has argument '%s' not described as a parameter" % arg)
param = params[arg]
# validate the default -- to make sure that the parameter description is
# somewhat OK
defaults_idx = ndefaults - len(args) + i
if defaults_idx >= 0:
if param.constraints is not None:
param.constraints(defaults[defaults_idx])
orig_docs = param._doc
param._doc = alter_interface_docs_for_api(param._doc)
doc += param.get_autodoc(
arg,
default=defaults[defaults_idx] if defaults_idx >= 0 else None,
has_default=defaults_idx >= 0)
param._doc = orig_docs
doc += '\n'
doc += suffix if suffix else u""
# assign the amended docs
func.__doc__ = doc
return func
# TODO should export code_field and indicator, rather than have modes
# TODO this should be a doc helper
def build_example(example, api='python'):
"""Build a code example.
Take a dict from a classes _example_ specification (list of dicts) and
build a string with an api or cmd example (for use in cmd help or
docstring).
Parameters
----------
api : {'python', 'cmdline'}
If 'python', build Python example for docstring. If 'cmdline', build
cmd example.
Returns
-------
ex : str
Concatenated examples for the given class.
"""
if api == 'python' :
code_field='code_py'
indicator='>'
elif api == 'cmdline':
code_field='code_cmd'
indicator='%'
else:
raise ValueError("unknown API selection: {}".format(api))
if code_field not in example:
# only show an example if it exist for the API
return ''
description = textwrap.fill(example.get('text'))
# this indent the code snippet to get it properly rendered as code
# we are not using textwrap.fill(), because it would not acknowledge
# any meaningful structure/formatting of code snippets. Instead, we
# maintain line content as is.
code = dedent_docstring(example.get(code_field))
needs_indicator = not code.startswith(indicator)
code = textwrap.indent(code, ' ' * (5 if needs_indicator else 3)).lstrip()
ex = """{}::\n\n {}{}\n\n""".format(
description,
# disable automatic prefixing, if the example already has one
# this enables providing more complex examples without having
# to infer its inner structure
'{} '.format(indicator)
if needs_indicator
# maintain spacing to avoid undesired relative indentation
else '',
code)
return ex
def update_docstring_with_examples(cls_doc, ex):
"""Update a commands docstring with examples.
Take _examples_ of a command, build the Python examples, and append
them to the docstring.
Parameters
----------
cls_doc: str
docstring
ex: list
list of dicts with examples
"""
from textwrap import indent
if len(cls_doc):
cls_doc += "\n"
cls_doc += " Examples\n --------\n"
# loop though provided examples
for example in ex:
cls_doc += indent(build_example(example, api='python'), ' '*4)
return cls_doc
def build_doc(cls, **kwargs):
"""Decorator to build docstrings for datalad commands
It's intended to decorate the class, the __call__-method of which is the
actual command. It expects that __call__-method to be decorated by
eval_results.
Note that values for any `eval_params` keys in `cls._params_` are
ignored. This means one class may extend another's `_params_`
without worrying about filtering out `eval_params`.
Parameters
----------
cls: Interface
DataLad command implementation
"""
if datalad.in_librarymode():
lgr.debug("Not assembling DataLad API docs in libary-mode")
return cls
# Note, that this is a class decorator, which is executed only once when the
# class is imported. It builds the docstring for the class' __call__ method
# and returns the original class.
#
# This is because a decorator for the actual function would not be able to
# behave like this. To build the docstring we need to access the attribute
# _params of the class. From within a function decorator we cannot do this
# during import time, since the class is being built in this very moment and
# is not yet available in the module. And if we do it from within the part
# of a function decorator, that is executed when the function is called, we
# would need to actually call the command once in order to build this
# docstring.
lgr.debug("Building doc for {}".format(cls))
cls_doc = cls.__doc__
if hasattr(cls, '_docs_'):
# expand docs
cls_doc = cls_doc.format(**cls._docs_)
# get examples
ex = getattr(cls, '_examples_', [])
if ex:
cls_doc = update_docstring_with_examples(cls_doc, ex)
call_doc = None
# suffix for update_docstring_with_parameters:
if cls.__call__.__doc__:
call_doc = cls.__call__.__doc__
# build standard doc and insert eval_doc
spec = getattr(cls, '_params_', dict())
# update class attributes that may override defaults
if not _has_eval_results_call(cls):
add_args = None
else:
# defaults for all common parameters are guaranteed to be available
# from the class
add_args = {k: getattr(cls, k) for k in eval_params}
# ATTN: An important consequence of this update() call is that it
# fulfills the docstring's promise of overriding any existing
# values for eval_params keys in _params_.
#
# get docs for eval_results parameters:
spec.update(eval_params)
update_docstring_with_parameters(
cls.__call__, spec,
prefix=alter_interface_docs_for_api(cls_doc),
suffix=alter_interface_docs_for_api(call_doc),
add_args=add_args
)
if hasattr(cls.__call__, '_dataset_method'):
cls.__call__._dataset_method.__doc__ = cls.__call__.__doc__
# return original
return cls
class Interface(ABC):
'''Abstract base class for DataLad command implementations
Any DataLad command implementation must be derived from this class. The
code snippet below shows a complete sketch of a Python class with such an
implementation.
Importantly, no instances of command classes will created. Instead the main
entry point is a static ``__call__()`` method, which must be implemented
for any command. It is incorporated as a function in :mod:`datalad.api`, by
default under the name of the file the implementation resides (e.g.,
``command`` for a ``command.py`` file). Therefore the file should have a
name that is a syntax-compliant function name. The default naming rule can
be overwritten with an explicit alternative name (see
:func:`datalad.interface.base.get_api_name`).
For commands implementing functionality that is operating on DataLad
datasets, a command can be also be bound to the
:class:`~datalad.distribution.dataset.Dataset` class as a method using
the ``@datasetmethod`` decorator, under the specified name.
Any ``__call__()`` implementation should be decorated with
:func:`datalad.interface.utils.eval_results`. This adds support for
standard result processing, and a range of common command parameters that
do not need to be manually added to the signature of ``__call__()``. Any
implementation decorated in this way should be implemented as a generator,
and ``yield`` :ref:`result records <chap_design_result_records>`.
Any argument or keyword argument that appears in the signature of
``__call__()`` must have a matching item in :attr:`Interface._params_`.
The dictionary maps argument names to
:class:`datalad.support.param.Parameter` specifications. The specification
contain CLI argument declarations, value constraint and data type
conversation specifications, documentation, and optional
``argparse``-specific arguments for CLI parser construction.
The class decorator :func:`datalad.interface.base.build_doc` inspects an
:class:`Interface` implementation, and builds a standard docstring from
various sources of structured information within the class (also see
below). The documentation is automatically tuned differently, depending on
the target API (Python vs CLI).
.. code:: python
@build_doc
class ExampleCommand(Interface):
"""SHORT DESCRIPTION
LONG DESCRIPTION
...
"""
# COMMAND PARAMETER DEFINITIONS
_params_ = dict(
example=Parameter(
args=("--example",),
doc="""Parameter description....""",
constraints=...),
...
)
)
# RESULT PARAMETER OVERRIDES
return_type= 'list'
...
# USAGE EXAMPLES
_examples_ = [
dict(text="Example description...",
code_py="Example Python code...",
code_cmd="Example shell code ..."),
...
]
@staticmethod
@datasetmethod(name='example_command')
@eval_results
def __call__(example=None, ...):
...
yield dict(...)
The basic implementation setup described above can be customized for
individual commands in various way that alter the behavior and
presentation of a specific command. The following overview uses
the code comment markers in the above snippet to illustrate where
in the class implementation these adjustments can be made.
(SHORT/LONG) DESCRIPTION
``Interface.short_description`` can be defined to provide an
explicit short description to be used in documentation and help output,
replacing the auto-generated extract from the first line of the full
description.
COMMAND PARAMETER DEFINITIONS
When a parameter specification declares ``Parameter(args=tuple(), ...)``,
i.e. no arguments specified, it will be ignored by the CLI. Likewise, any
``Parameter`` specification for which :func:`is_api_arg` returns ``False``
will also be ignored by the CLI. Additionally, any such parameter will
not be added to the parameter description list in the Python docstring.
RESULT PARAMETER OVERRIDES
The :func:`datalad.interface.utils.eval_results` decorator automatically
add a range of additional arguments to a command, which are defined in
:py:data:`datalad.interface.common_opts.eval_params`. For any such
parameter an Interface implementation can define an interface-specific
default value, by declaring a class member with the respective parameter
name and the desired default as its assigned value. This feature can be
used to tune the default command behavior, for example, with respect to the
default result rendering style, or its error behavior.
In addition to the common parameters of the Python API, an additional
``Interface.result_renderer_cmdline`` can be defined, in order to
instruct the CLI to prefer the specified alternative result renderer
over an ``Interface.result_renderer`` specification.
USAGE EXAMPLES
Any number of usage examples can be described in an ``_examples_`` list
class attribute. Such an example contains a description, and code examples
for Python and CLI.
'''
_params_ = {}
@abstractmethod
def __call__():
"""Must be implemented by any command"""
# https://github.com/datalad/datalad/issues/6376
@classmethod
def get_refds_path(cls, dataset):
"""Return a resolved reference dataset path from a `dataset` argument
.. deprecated:: 0.16
Use ``require_dataset()`` instead.
"""
# theoretically a dataset could come in as a relative path -> resolve
if dataset is None:
return dataset
refds_path = dataset.path if isinstance(dataset, Dataset) \
else Dataset(dataset).path
if refds_path:
refds_path = str(resolve_path(refds_path))
return refds_path
# pull all defaults from all eval_results() related parameters and assign them
# as attributes to the class, which then becomes the one place to query for
# default and potential overrides
for k, p in eval_params.items():
setattr(Interface,
# name is always given
k,
# but there may be no default (rather unlikely, though)
p.cmd_kwargs.get('default', None))
def get_allargs_as_kwargs(call, args, kwargs):
"""Generate a kwargs dict from a call signature and ``*args``, ``**kwargs``
Basically resolving the argnames for all positional arguments, and
resolving the defaults for all kwargs that are not given in a kwargs
dict
"""
from datalad.utils import getargspec
argspec = getargspec(call, include_kwonlyargs=True)
defaults = argspec.defaults
nargs = len(argspec.args)
defaults = defaults or [] # ensure it is a list and not None
assert (nargs >= len(defaults))
# map any args to their name
argmap = list(zip(argspec.args[:len(args)], args))
kwargs_ = OrderedDict(argmap)
# map defaults of kwargs to their names (update below)
for k, v in zip(argspec.args[-len(defaults):], defaults):
if k not in kwargs_:
kwargs_[k] = v
# update with provided kwarg args
kwargs_.update(kwargs)
# XXX we cannot assert the following, because our own highlevel
# API commands support more kwargs than what is discoverable
# from their signature...
#assert (nargs == len(kwargs_))
return kwargs_
# Only needed to support command implementations before the introduction
# of @eval_results
def _has_eval_results_call(cls):
"""Return True if cls has a __call__ decorated with @eval_results
"""
return getattr(getattr(cls, '__call__', None), '_eval_results', False)
| StarcoderdataPython |
1705753 | <filename>src/attrbench/metrics/sensitivity_n/result.py<gh_stars>0
from typing import List
import h5py
import numpy as np
from attrbench.metrics import MaskerActivationMetricResult
from attrbench.lib import NDArrayTree
class SensitivityNResult(MaskerActivationMetricResult):
inverted = False
def __init__(self, method_names: List[str], maskers: List[str], activation_fns: List[str], index: np.ndarray):
super().__init__(method_names, maskers, activation_fns)
self.index = index
def add_to_hdf(self, group: h5py.Group):
group.attrs["index"] = self.index
super().add_to_hdf(group)
@classmethod
def load_from_hdf(cls, group: h5py.Group) -> MaskerActivationMetricResult:
maskers = list(group.keys())
activation_fns = list(group[maskers[0]].keys())
method_names = list(group[maskers[0]][activation_fns[0]].keys())
result = cls(method_names, maskers, activation_fns, group.attrs["index"])
result._tree = NDArrayTree.load_from_hdf(["masker", "activation_fn", "method"], group)
return result
def get_df(self, mode="raw", include_baseline=False, masker="constant",
activation_fn="linear", columns=None):
def _postproc_fn(x):
if columns is not None:
x = x[..., columns]
return np.mean(x, axis=-1)
return super().get_df(mode, include_baseline, masker, activation_fn,
postproc_fn=_postproc_fn)
class SegSensitivityNResult(SensitivityNResult):
pass
| StarcoderdataPython |
6537952 | ## 搜索旋转排序数组
class Solution:
def search(self, nums: List[int], target: int) -> int:
lo, hi = 0, len(nums)
while lo < hi:
mid = (lo + hi) >> 1
if (nums[mid] < nums[0]) == ( target < nums[0]):
if (nums[mid] < target):
lo = mid + 1
elif (nums[mid] > target):
hi = mid
else:
return mid
elif target < nums[0]:
lo = mid + 1
else:
hi = mid
return -1 | StarcoderdataPython |
322685 | <reponame>wiboticalex/pyuavcan
# Copyright (c) 2019 UAVCAN Consortium
# This software is distributed under the terms of the MIT License.
# Author: <NAME> <<EMAIL>>
import os
import sys
import time
import gzip
import typing
import pickle
import base64
import pathlib
import logging
import itertools
import dataclasses
import pydsdl
import nunavut
import nunavut.jinja
import nunavut.postprocessors
_AnyPath = typing.Union[str, pathlib.Path]
_TEMPLATE_DIRECTORY: pathlib.Path = pathlib.Path(__file__).absolute().parent / pathlib.Path("_templates")
_OUTPUT_FILE_PERMISSIONS = 0o444
"""
Read-only for all because the files are autogenerated and should not be edited manually.
"""
_logger = logging.getLogger(__name__)
@dataclasses.dataclass(frozen=True)
class GeneratedPackageInfo:
path: pathlib.Path
"""
Path to the directory that contains the top-level ``__init__.py``.
"""
models: typing.Sequence[pydsdl.CompositeType]
"""
List of PyDSDL objects describing the source DSDL definitions.
This can be used for arbitrarily complex introspection and reflection.
"""
name: str
"""
The name of the generated package, which is the same as the name of the DSDL root namespace unless
the name had to be stropped. See ``nunavut.lang.py.PYTHON_RESERVED_IDENTIFIERS``.
"""
def generate_package(
root_namespace_directory: _AnyPath,
lookup_directories: typing.Optional[typing.List[_AnyPath]] = None,
output_directory: typing.Optional[_AnyPath] = None,
allow_unregulated_fixed_port_id: bool = False,
) -> typing.Optional[GeneratedPackageInfo]:
"""
This function runs the DSDL compiler, converting a specified DSDL root namespace into a Python package.
In the generated package, nested DSDL namespaces are represented as Python subpackages,
DSDL types as Python classes, type version numbers as class name suffixes separated via underscores
(like ``Type_1_0``), constants as class attributes, fields as properties.
For a more detailed information on how to use generated types, just generate them and read the resulting
code -- it is made to be human-readable and contains docstrings.
Generated packages can be freely moved around the file system or even deployed on other systems --
they are fully location-invariant.
Generated packages do not automatically import their nested subpackages. For example, if the application
needs to use ``uavcan.node.Heartbeat.1.0``, it has to ``import uavcan.node`` explicitly; doing just
``import uavcan`` is not sufficient.
If the source definition contains identifiers, type names, namespace components, or other entities whose
names are listed in ``nunavut.lang.py.PYTHON_RESERVED_IDENTIFIERS``,
the compiler applies stropping by suffixing such entities with an underscore ``_``.
A small subset of applications may require access to a generated entity without knowing in advance whether
its name is a reserved identifier or not (i.e., whether it's stropped or not). To simplify usage,
this submodule provides helper functions
:func:`pyuavcan.dsdl.get_attribute` and :func:`pyuavcan.dsdl.set_attribute` that provide access to generated
class/object attributes using their original names before stropping.
Likewise, the function :func:`pyuavcan.dsdl.get_model` can find a generated type even if any of its name
components are stropped; e.g., a DSDL type ``str.Type.1.0`` would be imported as ``str_.Type_1_0``.
None of it, however, is relevant for an application that does not require genericity (vast majority of
applications don't), so a much easier approach in that case is just to look at the generated code and see
if there are any stropped identifiers in it, and then just use appropriate names statically.
The recommended usage pattern for this function is lazy generation.
First, add the ``output_directory`` (if not specified it defaults to the current working directory)
to :data:`sys.path` or to the ``PYTHONPATH`` environment variable to make the generated package(s) importable.
Then try importing the target DSDL-generated package. If the attempt is successful, our job here is done.
Otherwise, the package(s) need(s) to be generated by invoking this function,
and then another import attempt will have to be made.
Beware that before retrying the import it's necessary to invoke :func:`importlib.invalidate_caches`.
A package generated for a particular version of PyUAVCAN may be incompatible with any other version of the
library. If your application relies on lazy generation, consider including the library version string
:data:`pyuavcan.__version__` in ``output_directory``, so that the generated package cache is
invalidated automatically when a different version of the library is used.
Having generated a package, consider updating the include path set of your Python IDE to take advantage
of code completion and static type checking.
When using PyUAVCAN from an interactive session (e.g., REPL or Jupyter), it is usually more convenient
to generate packages using the command-line tool rather than invoking this function manually.
Please refer to the command-line tool documentation for details.
:param root_namespace_directory: The source DSDL root namespace directory path. The last component of the path
is the name of the root namespace. For example, to generate package for the root namespace ``uavcan``,
the path would be like ``foo/bar/uavcan``.
:param lookup_directories: An iterable of DSDL root namespace directory paths where to search for referred DSDL
definitions. The format of each path is the same as for the previous parameter; i.e., the last component
of each path is a DSDL root namespace name. If you are generating code for a vendor-specific DSDL root
namespace, make sure to provide at least the path to the standard ``uavcan`` namespace directory here.
:param output_directory: The generated Python package directory will be placed into this directory.
If not specified or None, the current working directory is used.
For example, if this argument equals ``foo/bar``, and the DSDL root namespace name is ``uavcan``,
the top-level ``__init__.py`` of the generated package will end up in ``foo/bar/uavcan/__init__.py``.
The directory tree will be created automatically if it does not exist (like ``mkdir -p``).
If the destination exists, it will be silently written over.
In production, applications are recommended to shard the output directory by the library version number
to avoid compatibility issues with code generated by older versions of the library.
Don't forget to add the output directory to ``PYTHONPATH``, even if it's the current working directory.
:param allow_unregulated_fixed_port_id: If True, the DSDL processing front-end will not reject unregulated
data types with fixed port-ID. If you are not sure what it means, do not use it, and read the UAVCAN
specification first. The default is False.
:return: An instance of :class:`GeneratedPackageInfo` describing the generated package,
unless the root namespace is empty, in which case it's None.
:raises: :class:`OSError` if required operations on the file system could not be performed;
:class:`pydsdl.InvalidDefinitionError` if the source DSDL definitions are invalid;
:class:`pydsdl.InternalError` if there is a bug in the DSDL processing front-end;
:class:`ValueError` if any of the arguments are otherwise invalid.
The following table is an excerpt from the UAVCAN specification. Observe that *unregulated fixed port identifiers*
are prohibited by default, but it can be overridden.
+-------+---------------------------------------------------+----------------------------------------------+
|Scope | Regulated | Unregulated |
+=======+===================================================+==============================================+
|Public |Standard and contributed (e.g., vendor-specific) |Definitions distributed separately from the |
| |definitions. Fixed port identifiers are allowed; |UAVCAN specification. Fixed port identifiers |
| |they are called *"regulated port-IDs"*. |are *not allowed*. |
+-------+---------------------------------------------------+----------------------------------------------+
|Private|Nonexistent category. |Definitions that are not available to anyone |
| | |except their authors. Fixed port identifiers |
| | |are permitted (although not recommended); they|
| | |are called *"unregulated fixed port-IDs"*. |
+-------+---------------------------------------------------+----------------------------------------------+
Here is a brief usage example:
>>> import sys
>>> import pathlib
>>> import tempfile
>>> import importlib
>>> import pyuavcan
>>> dsdl_generated_dir = pathlib.Path(tempfile.gettempdir(), 'dsdl-for-my-program', pyuavcan.__version__)
>>> dsdl_generated_dir.mkdir(parents=True, exist_ok=True)
>>> sys.path.insert(0, str(dsdl_generated_dir))
>>> try:
... import sirius_cyber_corp
... import uavcan.si.sample.volumetric_flow_rate
... except (ImportError, AttributeError):
... _ = pyuavcan.dsdl.generate_package(root_namespace_directory='tests/dsdl/namespaces/sirius_cyber_corp',
... lookup_directories=['tests/public_regulated_data_types/uavcan'],
... output_directory=dsdl_generated_dir)
... _ = pyuavcan.dsdl.generate_package(root_namespace_directory='tests/public_regulated_data_types/uavcan',
... output_directory=dsdl_generated_dir)
... importlib.invalidate_caches()
... import sirius_cyber_corp
... import uavcan.si.sample.volumetric_flow_rate
"""
started_at = time.monotonic()
if isinstance(lookup_directories, (str, bytes, pathlib.Path)):
# https://forum.uavcan.org/t/nestedrootnamespaceerror-in-basic-usage-demo/794
raise TypeError(f"Lookup directories shall be an iterable of paths, not {type(lookup_directories).__name__}")
output_directory = pathlib.Path(pathlib.Path.cwd() if output_directory is None else output_directory).resolve()
root_namespace_directory = pathlib.Path(root_namespace_directory).resolve()
if root_namespace_directory.parent == output_directory:
# https://github.com/UAVCAN/pyuavcan/issues/133 and https://github.com/UAVCAN/pyuavcan/issues/127
raise ValueError(
"The specified destination may overwrite the DSDL root namespace directory. "
"Consider specifying a different output directory instead."
)
# Read the DSDL definitions
composite_types = pydsdl.read_namespace(
root_namespace_directory=str(root_namespace_directory),
lookup_directories=list(map(str, lookup_directories or [])),
allow_unregulated_fixed_port_id=allow_unregulated_fixed_port_id,
)
if not composite_types:
_logger.info("Root namespace directory %r does not contain DSDL definitions", root_namespace_directory)
return None
(root_namespace_name,) = set(map(lambda x: x.root_namespace, composite_types)) # type: str,
_logger.info("Read %d definitions from root namespace %r", len(composite_types), root_namespace_name)
# Template primitives
filters = {
"pickle": _pickle_object,
"numpy_scalar_type": _numpy_scalar_type,
}
# Generate code
assert isinstance(output_directory, pathlib.Path)
language_context = nunavut.lang.LanguageContext("py", namespace_output_stem="__init__")
root_ns = nunavut.build_namespace_tree(
types=composite_types,
root_namespace_dir=str(root_namespace_directory),
output_dir=str(output_directory),
language_context=language_context,
)
generator = nunavut.jinja.DSDLCodeGenerator(
namespace=root_ns,
generate_namespace_types=nunavut.YesNoDefault.YES,
templates_dir=_TEMPLATE_DIRECTORY,
followlinks=True,
additional_filters=filters,
post_processors=[
nunavut.postprocessors.SetFileMode(_OUTPUT_FILE_PERMISSIONS),
nunavut.postprocessors.LimitEmptyLines(2),
nunavut.postprocessors.TrimTrailingWhitespace(),
],
)
generator.generate_all()
_logger.info(
"Generated %d types from the root namespace %r in %.1f seconds",
len(composite_types),
root_namespace_name,
time.monotonic() - started_at,
)
# A minor UX improvement; see https://github.com/UAVCAN/pyuavcan/issues/115
for p in sys.path:
if pathlib.Path(p).resolve() == pathlib.Path(output_directory):
break
else:
if os.name == "nt":
quick_fix = f'Quick fix: `$env:PYTHONPATH += ";{output_directory.resolve()}"`'
elif os.name == "posix":
quick_fix = f'Quick fix: `export PYTHONPATH="{output_directory.resolve()}"`'
else:
quick_fix = "Quick fix is not available for this OS."
_logger.info(
"Generated package is stored in %r, which is not in Python module search path list. "
"The package will fail to import unless you add the destination directory to sys.path or PYTHONPATH. %s",
str(output_directory),
quick_fix,
)
return GeneratedPackageInfo(
path=pathlib.Path(output_directory) / pathlib.Path(root_namespace_name),
models=composite_types,
name=root_namespace_name,
)
def _pickle_object(x: typing.Any) -> str:
pck: str = base64.b85encode(gzip.compress(pickle.dumps(x, protocol=4))).decode().strip()
segment_gen = map("".join, itertools.zip_longest(*([iter(pck)] * 100), fillvalue=""))
return "\n".join(repr(x) for x in segment_gen)
def _numpy_scalar_type(t: pydsdl.Any) -> str:
def pick_width(w: int) -> int:
for o in [8, 16, 32, 64]:
if w <= o:
return o
raise ValueError(f"Invalid bit width: {w}") # pragma: no cover
if isinstance(t, pydsdl.BooleanType):
return f"_np_.bool"
if isinstance(t, pydsdl.SignedIntegerType):
return f"_np_.int{pick_width(t.bit_length)}"
if isinstance(t, pydsdl.UnsignedIntegerType):
return f"_np_.uint{pick_width(t.bit_length)}"
if isinstance(t, pydsdl.FloatType):
return f"_np_.float{pick_width(t.bit_length)}"
assert not isinstance(t, pydsdl.PrimitiveType), "Forgot to handle some primitive types"
return f"_np_.object_"
| StarcoderdataPython |
11383525 | <gh_stars>1-10
from rest_framework import serializers
from transformation.models import TransformationXput, TransformationInput, \
TransformationOutput, XputStructure
class XputStructureSerializer(serializers.ModelSerializer):
class Meta:
model = XputStructure
fields = (
"compounddatatype",
"min_row",
"max_row"
)
class TransformationXputSerializer(serializers.ModelSerializer):
structure = XputStructureSerializer(allow_null=True, required=False)
class Meta:
model = TransformationXput
fields = ("x", "y", "structure")
# It's recommended in the documentation to explicitly declare the Meta classes for
# these classes.
class TransformationInputSerializer(TransformationXputSerializer):
class Meta:
model = TransformationInput
fields = ("dataset_name", "dataset_idx", "x", "y", "structure")
class TransformationOutputSerializer(TransformationXputSerializer):
class Meta:
model = TransformationOutput
fields = ("dataset_name", "dataset_idx", "x", "y", "structure")
| StarcoderdataPython |
3233868 | import pytest
from grep_func import grep_func
import sys
if sys.version_info[0] == 2:
from io import BytesIO as StringIO
else:
from io import StringIO
import re
from colorama import Fore
def test_grep():
import logging
pattern = "basic"
sys.stdout = StringIO()
grep_func.grep_func(logging, pattern)
out_val = str(sys.stdout.getvalue())
sys.stdout = sys.__stdout__ # restores original stdout
val = (
re.sub(r"({})".format(pattern), Fore.RED + r"\1" + Fore.RESET, "basicConfig")
+ "\n"
)
assert out_val == val
| StarcoderdataPython |
11361535 | #%%
import pandas as pd
import datetime as dt
from datetime import date, timedelta, datetime
import requests
import urllib.request, json, os, itertools, threading, time, sys
#%%
def call_api():
api_url = 'https://www.ncdc.noaa.gov/cdo-web/api/v2/locations?datasetid=GHCND&locationcategoryid=ZIP' + '&units=standard&limit=1000&offset=3000'
f = open('noaa_token.txt', 'r')
token = f.read()
headers = {'token': token}
response = requests.get(api_url, headers=headers)
if response.status_code == 200:
data = json.loads(response.content.decode('utf-8'))
return data
else:
return None
# %%
data = call_api()
df = pd.DataFrame(data['results'])
# %%
# Make a class to search in NCDC database?
class NCDC:
def __init__(self, zip_code, token=None):
# If token is not specified it is read from noaa_token.txt
if token == None:
f = open('noaa_token.txt', 'r')
self.token = f.read()
else:
self.token = token
# Get location data from zip code
self.zip_code = zip_code
self.base_url = 'https://www.ncdc.noaa.gov/cdo-web/api/v2/data?datasetid=GHCND&limit=1000&locationid=ZIP:' + str(self.zip_code) + '&units=standard'
# Print how much data is in that location
return
def call_api(self, url):
headers = {'token': self.token}
response = requests.get(url, headers=headers)
if response.status_code != 200:
raise Exception('Error')
else:
return json.loads(response.content.decode('utf-8'))
def get_day(self, day):
'''
Gets one day of weather
day is in format yyyy-mm-dd
'''
# self.call_api(self.base_url + '&startdate=' + day + '&enddate=' + day + '&datatypeid=TAVG')
# df = df[df['datatype'].isin(['TMAX', 'TMIN', 'TAVG'])].pivot('date','datatype', 'value').copy()
return self.call_api(self.base_url + '&startdate=' + day + '&enddate=' + day + '&datatypeid=TAVG')
def get_range(self, startdate, enddate):
'''
Gets range of weather between dates
dates are in format yyyy-mm-dd
'''
df = self.call_api(self.base_url + '&startdate=' + startdate + '&enddate=' + enddate)
return df
def get_year(self, year):
'''
Gets year worth of weather
'''
df = self.call_api(self.base_url + '&startdate=' + year + '-01-01&enddate=' + year + '-12-31')
return df
# %%
| StarcoderdataPython |
1853549 | <gh_stars>0
import getpass
import json
import pathlib
import socket
import subprocess
import urllib.request
GITLAB_BASE_URL = "https://gitlab.YOURCOMPANY.com"
user_name = getpass.getuser()
user_password = getpass.getpass()
ssh_key_path = pathlib.Path.home().joinpath(".ssh", "id_ed25519")
if ssh_key_path.is_file():
print("Using existing SSH key:", ssh_key_path)
else:
print("Creating new SSH key...", ssh_key_path)
ssh_key_path.parent.mkdir(exist_ok=True)
subprocess.check_call(["ssh-keygen", "-q", "-t", "ed25519", "-f", str(ssh_key_path), "-N", ""])
pub_key_file = ssh_key_path.with_suffix(".pub")
with pub_key_file.open("r") as file:
ssh_key = file.read()
print("Creating temporary Gitlab access token...")
request = urllib.request.Request(
url=GITLAB_BASE_URL + "/oauth/token",
headers={"Content-Type": "application/json; charset=utf-8"},
data=json.dumps({"grant_type": "password", "username": user_name, "password": <PASSWORD>}).encode()
)
with urllib.request.urlopen(request) as response:
access_token = json.loads(response.read())["access_token"]
print("Publishing SSH key to Gitlab....")
urllib.request.urlopen(urllib.request.Request(
url=GITLAB_BASE_URL + "/api/v4/user/keys",
headers={
"Content-Type": "application/json; charset=utf-8",
"Authorization": f"Bearer {access_token}",
},
data=json.dumps({"title": socket.gethostname(), "key": ssh_key}).encode()
))
| StarcoderdataPython |
3440155 | from enum import Enum
class Colors(Enum):
BLACK = 0
GRAY = 1
WHITE = 2
def dfs(v, graph, colors):
colors[v] = Colors.GRAY
for neighbor in graph[v]:
if (colors[neighbor] == Colors.GRAY) or (colors[neighbor] == Colors.WHITE and dfs(neighbor, graph, colors)):
return True
colors[v] = Colors.BLACK
def graph_color(graph):
colors = {v: Colors.WHITE for v in graph}
for v in graph:
if colors[v] == Colors.WHITE and dfs(v, graph, colors):
return True
return False
| StarcoderdataPython |
98944 | <gh_stars>0
from abc import abstractmethod, abstractproperty
import csv
from enum import Enum
from typing import Any, Dict, List, Optional, Tuple, TYPE_CHECKING, Union
if TYPE_CHECKING:
from xvfb import Xvfb # type: ignore
SubProc = Any # subprocess.Popen[AnyStr]
class TokenLocation(Enum):
COOKIE = 1
PATH = 2
QUERY_PARAM = 3
BODY = 4
@staticmethod
def from_int(num: int) -> "TokenLocation":
if num == 1:
return TokenLocation.COOKIE
if num == 2:
return TokenLocation.PATH
if num == 3:
return TokenLocation.QUERY_PARAM
if num == 4:
return TokenLocation.BODY
raise ValueError(f"Unknown enum value: {num}.")
RequestRec = Dict[Union["TokenLocation", str], Any]
Url = str
Domain = str
ThirdPartyDomain = Domain
FirstPartyDomain = Domain
ISOTimestamp = str
RequestTimestamp = Tuple[Url, ISOTimestamp]
TokenKey = str
TokenValue = str
KeyValueList = List[Tuple[TokenKey, TokenValue]]
Token = Tuple[TokenLocation, TokenKey, TokenValue]
class RecordingHandles:
def __init__(self, browser: Optional[SubProc] = None,
xvfb: Optional["Xvfb"] = None) -> None:
self.browser = browser
self.xvfb = xvfb
| StarcoderdataPython |
6420334 |
#********************************************************************
# File: blocks_series2.py
# Author: <NAME>
#
# Description:
# Series 2 blocks
#
# Copyright (c) 2017 by Cisco Systems, Inc.
#
# ALL RIGHTS RESERVED. THESE SOURCE FILES ARE THE SOLE PROPERTY
# OF CISCO SYSTEMS, Inc. AND CONTAIN CONFIDENTIAL AND PROPRIETARY
# INFORMATION. REPRODUCTION OR DUPLICATION BY ANY MEANS OF ANY
# PORTION OF THIS SOFTWARE WITHOUT PRIOR WRITTEN CONSENT OF
# CISCO SYSTEMS, Inc. IS STRICTLY PROHIBITED.
#
#*********************************************************************/
from estreamer.definitions.blocks_series1 import BLOCK_STRING
from estreamer.definitions.blocks_series1 import BLOCK_BLOB
from estreamer.definitions.core import TYPE_BYTE
from estreamer.definitions.core import TYPE_UINT16
from estreamer.definitions.core import TYPE_UINT32
from estreamer.definitions.core import TYPE_UINT64
from estreamer.definitions.core import TYPE_UINT128
from estreamer.definitions.core import TYPE_UINT160
from estreamer.definitions.core import TYPE_UINT256
from estreamer.definitions.core import TYPE_UUID
from estreamer.definitions.core import TYPE_IPV6
# Without this the series 1 and 2 types collide. There is probably
# another nicer way to do this but right now this will have to do
BLOCK_SERIES_2_SHIM = 0x00010000
# Series 2 data blocks
BLOCK_EVENT_EXTRA_DATA = 4 | BLOCK_SERIES_2_SHIM
BLOCK_EVENT_EXTRA_DATA_METADATA = 5 | BLOCK_SERIES_2_SHIM
BLOCK_UUID_STRING = 14 | BLOCK_SERIES_2_SHIM
BLOCK_ACCESS_CONTROL_RULE = 15 | BLOCK_SERIES_2_SHIM
BLOCK_ICMP_TYPE_DATA = 19 | BLOCK_SERIES_2_SHIM
BLOCK_ICMP_CODE_DATA = 20 | BLOCK_SERIES_2_SHIM
BLOCK_IP_REPUTATION_CATEGORY = 22 | BLOCK_SERIES_2_SHIM
BLOCK_RULE_DOCUMENTATION_DATA_52 = 27 | BLOCK_SERIES_2_SHIM
BLOCK_GEOLOCATION_52 = 28 | BLOCK_SERIES_2_SHIM
BLOCK_IOC_NAME_53 = 39 | BLOCK_SERIES_2_SHIM
BLOCK_FILE_EVENT_SHA_HASH_53 = 40 | BLOCK_SERIES_2_SHIM
BLOCK_INTRUSION_EVENT_53 = 41 | BLOCK_SERIES_2_SHIM
BLOCK_SSL_CERTIFICATION_DETAILS_54 = 50 | BLOCK_SERIES_2_SHIM
BLOCK_FILE_EVENT_60 = 56 | BLOCK_SERIES_2_SHIM
BLOCK_USER_60 = 57 | BLOCK_SERIES_2_SHIM
BLOCK_ENDPOINT_PROFILE_60 = 58 | BLOCK_SERIES_2_SHIM
BLOCK_ACCESS_CONTROL_POLICY_RULE_REASON_60 = 59 | BLOCK_SERIES_2_SHIM
BLOCK_INTRUSION_EVENT_60 = 60 | BLOCK_SERIES_2_SHIM
BLOCK_ID_NAME_DESCRIPTION = 61 | BLOCK_SERIES_2_SHIM
BLOCK_MALWARE_EVENT_60 = 62 | BLOCK_SERIES_2_SHIM
BLOCK_ACCESS_CONTROL_POLICY_METADATA = 64 | BLOCK_SERIES_2_SHIM
BLOCKS_SERIES_2 = {
# 4 Series 2
BLOCK_EVENT_EXTRA_DATA: [
{ 'type': TYPE_UINT32, 'name': 'blockType' },
{ 'type': TYPE_UINT32, 'name': 'blockLength' },
{ 'type': TYPE_UINT32, 'name': 'deviceId' },
{ 'type': TYPE_UINT32, 'name': 'eventId' },
{ 'type': TYPE_UINT32, 'name': 'eventSecond' },
{ 'type': TYPE_UINT32, 'name': 'type' },
{ 'block': BLOCK_BLOB, 'name': 'blob' }],
# 5
BLOCK_EVENT_EXTRA_DATA_METADATA: [
{ 'type': TYPE_UINT32, 'name': 'blockType' },
{ 'type': TYPE_UINT32, 'name': 'blockLength' },
{ 'type': TYPE_UINT32, 'name': 'type' },
{ 'block': BLOCK_STRING, 'name': 'name' },
{ 'block': BLOCK_STRING, 'name': 'encoding' }],
# 14
BLOCK_UUID_STRING: [
{ 'type': TYPE_UINT32, 'name': 'blockType' },
{ 'type': TYPE_UINT32, 'name': 'blockLength' },
{ 'type': TYPE_UUID, 'name': 'uuid' },
{ 'block': BLOCK_STRING, 'name': 'name' }],
# 15
BLOCK_ACCESS_CONTROL_RULE: [
{ 'type': TYPE_UINT32, 'name': 'blockType' },
{ 'type': TYPE_UINT32, 'name': 'blockLength' },
{ 'type': TYPE_UUID, 'name': 'uuid' },
{ 'type': TYPE_UINT32, 'name': 'id' },
{ 'block': BLOCK_STRING, 'name': 'name' }],
# 19
BLOCK_ICMP_TYPE_DATA: [
{ 'type': TYPE_UINT32, 'name': 'blockType' },
{ 'type': TYPE_UINT32, 'name': 'blockLength' },
{ 'type': TYPE_UINT16, 'name': 'type' },
{ 'type': TYPE_UINT16, 'name': 'protocol' },
{ 'block': BLOCK_STRING, 'name': 'description' }],
# 20
BLOCK_ICMP_CODE_DATA: [
{ 'type': TYPE_UINT32, 'name': 'blockType' },
{ 'type': TYPE_UINT32, 'name': 'blockLength' },
{ 'type': TYPE_UINT16, 'name': 'code' },
{ 'type': TYPE_UINT16, 'name': 'type' },
{ 'type': TYPE_UINT16, 'name': 'protocol' },
{ 'block': BLOCK_STRING, 'name': 'description' }],
# 22
BLOCK_IP_REPUTATION_CATEGORY: [
{ 'type': TYPE_UINT32, 'name': 'blockType' },
{ 'type': TYPE_UINT32, 'name': 'blockLength' },
{ 'type': TYPE_UINT32, 'name': 'id' },
{ 'type': TYPE_UUID, 'name': 'accessControlPolicyUuid' },
{ 'block': BLOCK_STRING, 'name': 'name' }],
# 27
BLOCK_RULE_DOCUMENTATION_DATA_52: [
{ 'type': TYPE_UINT32, 'name': 'blockType' },
{ 'type': TYPE_UINT32, 'name': 'blockLength' },
{ 'type': TYPE_UINT32, 'name': 'signatureId' },
{ 'type': TYPE_UINT32, 'name': 'generatorId' },
{ 'type': TYPE_UINT32, 'name': 'revision' },
{ 'block': BLOCK_STRING, 'name': 'summary' },
{ 'block': BLOCK_STRING, 'name': 'impact' },
{ 'block': BLOCK_STRING, 'name': 'detail' },
{ 'block': BLOCK_STRING, 'name': 'affectedSystems' },
{ 'block': BLOCK_STRING, 'name': 'attackScenarios' },
{ 'block': BLOCK_STRING, 'name': 'easeOfAttack' },
{ 'block': BLOCK_STRING, 'name': 'falsePositives' },
{ 'block': BLOCK_STRING, 'name': 'falseNegatives' },
{ 'block': BLOCK_STRING, 'name': 'correctiveAction' },
{ 'block': BLOCK_STRING, 'name': 'contributors' },
{ 'block': BLOCK_STRING, 'name': 'additionalReferences' } ],
# 28
BLOCK_GEOLOCATION_52: [
{ 'type': TYPE_UINT32, 'name': 'blockType' },
{ 'type': TYPE_UINT32, 'name': 'blockLength' },
{ 'type': TYPE_UINT16, 'name': 'countryCode' },
{ 'block': BLOCK_STRING, 'name': 'country' }],
# 39
BLOCK_IOC_NAME_53: [
{ 'type': TYPE_UINT32, 'name': 'blockType' },
{ 'type': TYPE_UINT32, 'name': 'blockLength' },
{ 'type': TYPE_UINT32, 'name': 'id' },
{ 'block': BLOCK_STRING, 'name': 'category' },
{ 'block': BLOCK_STRING, 'name': 'eventType' }],
# 40
BLOCK_FILE_EVENT_SHA_HASH_53: [
{ 'type': TYPE_UINT32, 'name': 'blockType' },
{ 'type': TYPE_UINT32, 'name': 'blockLength' },
{ 'type': TYPE_UINT256, 'name': 'shaHash' },
{ 'block': BLOCK_STRING, 'name': 'fileName' },
{ 'type': TYPE_BYTE, 'name': 'disposition' },
{ 'type': TYPE_BYTE, 'name': 'userDefined'}],
# 41 - LEGACY
BLOCK_INTRUSION_EVENT_53: [
{ 'type': TYPE_UINT32, 'name': 'blockType' },
{ 'type': TYPE_UINT32, 'name': 'blockLength' },
{ 'type': TYPE_UINT32, 'name': 'deviceId' },
{ 'type': TYPE_UINT32, 'name': 'eventId' },
{ 'type': TYPE_UINT32, 'name': 'eventSecond' },
{ 'type': TYPE_UINT32, 'name': 'eventMicrosecond' },
{ 'type': TYPE_UINT32, 'name': 'ruleId' },
{ 'type': TYPE_UINT32, 'name': 'generatorId' },
{ 'type': TYPE_UINT32, 'name': 'ruleRevision' },
{ 'type': TYPE_UINT32, 'name': 'classificationId' },
{ 'type': TYPE_UINT32, 'name': 'priorityId' },
{ 'type': TYPE_IPV6, 'name': 'sourceIpAddress' },
{ 'type': TYPE_IPV6, 'name': 'destinationIpAddress' },
{ 'type': TYPE_UINT16, 'name': 'sourcePortOrIcmpType' },
{ 'type': TYPE_UINT16, 'name': 'destinationPortOrIcmpType' },
{ 'type': TYPE_BYTE, 'name': 'ipProtocolId' },
{ 'type': TYPE_BYTE, 'name': 'impactFlags' },
{ 'type': TYPE_BYTE, 'name': 'impact' },
{ 'type': TYPE_BYTE, 'name': 'blocked' },
{ 'type': TYPE_UINT32, 'name': 'mplsLabel' },
{ 'type': TYPE_UINT16, 'name': 'vlanId' },
{ 'type': TYPE_UINT16, 'name': 'pad' },
{ 'type': TYPE_UUID, 'name': 'policyUuid' },
{ 'type': TYPE_UINT32, 'name': 'userId' },
{ 'type': TYPE_UINT32, 'name': 'webApplicationId' },
{ 'type': TYPE_UINT32, 'name': 'clientApplicationId' },
{ 'type': TYPE_UINT32, 'name': 'applicationId' },
{ 'type': TYPE_UINT32, 'name': 'accessControlRuleId' },
{ 'type': TYPE_UUID, 'name': 'accessControlPolicyUuid' },
{ 'type': TYPE_UUID, 'name': 'interfaceIngressUuid' },
{ 'type': TYPE_UUID, 'name': 'interfaceEgressUuid' },
{ 'type': TYPE_UUID, 'name': 'securityZoneIngressUuid' },
{ 'type': TYPE_UUID, 'name': 'securityZoneEgressUuid' },
{ 'type': TYPE_UINT32, 'name': 'connectionTimestamp' },
{ 'type': TYPE_UINT16, 'name': 'connectionInstanceId' },
{ 'type': TYPE_UINT16, 'name': 'connectionCounter' },
{ 'type': TYPE_UINT16, 'name': 'sourceCountry' },
{ 'type': TYPE_UINT16, 'name': 'destinationCountry' },
{ 'type': TYPE_UINT16, 'name': 'iocNumber' }],
# 50
BLOCK_SSL_CERTIFICATION_DETAILS_54: [
{ 'type': TYPE_UINT32, 'name': 'blockType' },
{ 'type': TYPE_UINT32, 'name': 'blockLength' },
{ 'type': TYPE_UINT160, 'name': 'fingerprintShaHash' },
{ 'type': TYPE_UINT160, 'name': 'publicKeyShaHash' },
{ 'type': TYPE_UINT160, 'name': 'serialNumber' },
{ 'type': TYPE_UINT32, 'name': 'serialNumberLength' },
{ 'block': BLOCK_STRING, 'name': 'subjectCn' },
{ 'block': BLOCK_STRING, 'name': 'subjectOrganisation' },
{ 'block': BLOCK_STRING, 'name': 'subjectOU' },
{ 'block': BLOCK_STRING, 'name': 'subjectCountry' },
{ 'block': BLOCK_STRING, 'name': 'issuerCn' },
{ 'block': BLOCK_STRING, 'name': 'issuerOrganisation' },
{ 'block': BLOCK_STRING, 'name': 'issuerOU' },
{ 'block': BLOCK_STRING, 'name': 'issuerCountry' },
{ 'type': TYPE_UINT32, 'name': 'validStartDate' },
{ 'type': TYPE_UINT32, 'name': 'validFinishDate' } ],
# 56
BLOCK_FILE_EVENT_60: [
{ 'type': TYPE_UINT32, 'name': 'blockType' },
{ 'type': TYPE_UINT32, 'name': 'blockLength' },
{ 'type': TYPE_UINT32, 'name': 'deviceId' },
{ 'type': TYPE_UINT16, 'name': 'connectionInstance' },
{ 'type': TYPE_UINT16, 'name': 'connectionCounter' },
{ 'type': TYPE_UINT32, 'name': 'connectionTimestamp' },
{ 'type': TYPE_UINT32, 'name': 'fileEventTimestamp' },
{ 'type': TYPE_IPV6, 'name': 'sourceIpAddress' },
{ 'type': TYPE_IPV6, 'name': 'destinationIpAddress' },
{ 'type': TYPE_BYTE, 'name': 'disposition' },
{ 'type': TYPE_BYTE, 'name': 'speroDisposition' },
{ 'type': TYPE_BYTE, 'name': 'fileStorageStatus' },
{ 'type': TYPE_BYTE, 'name': 'fileAnalysisStatus' },
{ 'type': TYPE_BYTE, 'name': 'localMalwareAnalysisStatus' },
{ 'type': TYPE_BYTE, 'name': 'archiveFileStatus' },
{ 'type': TYPE_BYTE, 'name': 'threatScore' },
{ 'type': TYPE_BYTE, 'name': 'action' },
{ 'type': TYPE_UINT256, 'name': 'shaHash' },
{ 'type': TYPE_UINT32, 'name': 'fileTypeId' },
{ 'block': BLOCK_STRING, 'name': 'fileName' },
{ 'type': TYPE_UINT64, 'name': 'fileSize' },
{ 'type': TYPE_BYTE, 'name': 'direction' },
{ 'type': TYPE_UINT32, 'name': 'applicationId' },
{ 'type': TYPE_UINT32, 'name': 'userId' },
{ 'block': BLOCK_STRING, 'name': 'uri' },
{ 'block': BLOCK_STRING, 'name': 'signature' },
{ 'type': TYPE_UINT16, 'name': 'sourcePort' },
{ 'type': TYPE_UINT16, 'name': 'destinationPort' },
{ 'type': TYPE_BYTE, 'name': 'protocol' },
{ 'type': TYPE_UUID, 'name': 'accessControlPolicyUuid' },
{ 'type': TYPE_UINT16, 'name': 'sourceCountry' },
{ 'type': TYPE_UINT16, 'name': 'destinationCountry' },
{ 'type': TYPE_UINT32, 'name': 'webApplicationId' },
{ 'type': TYPE_UINT32, 'name': 'clientApplicationId' },
{ 'type': TYPE_UINT128, 'name': 'securityContext' },
{ 'type': TYPE_UINT160, 'name': 'sslCertificateFingerprint' },
{ 'type': TYPE_UINT16, 'name': 'sslActualAction' },
{ 'type': TYPE_UINT16, 'name': 'sslFlowStatus' },
{ 'block': BLOCK_STRING, 'name': 'archiveSha' },
{ 'block': BLOCK_STRING, 'name': 'archiveName' },
{ 'type': TYPE_BYTE, 'name': 'archiveDepth'},
{ 'type': TYPE_UINT32, 'name': 'httpResponse'}],
# 57
BLOCK_USER_60: [
{ 'type': TYPE_UINT32, 'name': 'blockType' },
{ 'type': TYPE_UINT32, 'name': 'blockLength' },
{ 'type': TYPE_UINT32, 'name': 'id' },
{ 'type': TYPE_UINT32, 'name': 'protocol' },
{ 'block': BLOCK_STRING, 'name': 'name' }],
# 58
BLOCK_ENDPOINT_PROFILE_60: [
{ 'type': TYPE_UINT32, 'name': 'blockType' },
{ 'type': TYPE_UINT32, 'name': 'blockLength' },
{ 'type': TYPE_UINT32, 'name': 'id' },
{ 'block': BLOCK_STRING, 'name': 'profileName' },
{ 'block': BLOCK_STRING, 'name': 'fullName' }],
# 59
BLOCK_ACCESS_CONTROL_POLICY_RULE_REASON_60: [
{ 'type': TYPE_UINT32, 'name': 'blockType' },
{ 'type': TYPE_UINT32, 'name': 'blockLength' },
{ 'type': TYPE_UINT32, 'name': 'id' },
{ 'block': BLOCK_STRING, 'name': 'description' }],
# 60
BLOCK_INTRUSION_EVENT_60: [
{ 'type': TYPE_UINT32, 'name': 'blockType' },
{ 'type': TYPE_UINT32, 'name': 'blockLength' },
{ 'type': TYPE_UINT32, 'name': 'deviceId' },
{ 'type': TYPE_UINT32, 'name': 'eventId' },
{ 'type': TYPE_UINT32, 'name': 'eventSecond' },
{ 'type': TYPE_UINT32, 'name': 'eventMicrosecond' },
{ 'type': TYPE_UINT32, 'name': 'ruleId' },
{ 'type': TYPE_UINT32, 'name': 'generatorId' },
{ 'type': TYPE_UINT32, 'name': 'ruleRevision' },
{ 'type': TYPE_UINT32, 'name': 'classificationId' },
{ 'type': TYPE_UINT32, 'name': 'priorityId' },
{ 'type': TYPE_IPV6, 'name': 'sourceIpAddress' },
{ 'type': TYPE_IPV6, 'name': 'destinationIpAddress' },
{ 'type': TYPE_UINT16, 'name': 'sourcePortOrIcmpType' },
{ 'type': TYPE_UINT16, 'name': 'destinationPortOrIcmpType' },
{ 'type': TYPE_BYTE, 'name': 'ipProtocolId' },
{ 'type': TYPE_BYTE, 'name': 'impactFlags' },
{ 'type': TYPE_BYTE, 'name': 'impact' },
{ 'type': TYPE_BYTE, 'name': 'blocked' },
{ 'type': TYPE_UINT32, 'name': 'mplsLabel' },
{ 'type': TYPE_UINT16, 'name': 'vlanId' },
{ 'type': TYPE_UINT16, 'name': 'pad' },
{ 'type': TYPE_UUID, 'name': 'policyUuid' },
{ 'type': TYPE_UINT32, 'name': 'userId' },
{ 'type': TYPE_UINT32, 'name': 'webApplicationId' },
{ 'type': TYPE_UINT32, 'name': 'clientApplicationId' },
{ 'type': TYPE_UINT32, 'name': 'applicationId' },
{ 'type': TYPE_UINT32, 'name': 'accessControlRuleId' },
{ 'type': TYPE_UUID, 'name': 'accessControlPolicyUuid' },
{ 'type': TYPE_UUID, 'name': 'interfaceIngressUuid' },
{ 'type': TYPE_UUID, 'name': 'interfaceEgressUuid' },
{ 'type': TYPE_UUID, 'name': 'securityZoneIngressUuid' },
{ 'type': TYPE_UUID, 'name': 'securityZoneEgressUuid' },
{ 'type': TYPE_UINT32, 'name': 'connectionTimestamp' },
{ 'type': TYPE_UINT16, 'name': 'connectionInstanceId' },
{ 'type': TYPE_UINT16, 'name': 'connectionCounter' },
{ 'type': TYPE_UINT16, 'name': 'sourceCountry' },
{ 'type': TYPE_UINT16, 'name': 'destinationCountry' },
{ 'type': TYPE_UINT16, 'name': 'iocNumber' },
{ 'type': TYPE_UINT128, 'name': 'securityContext' },
{ 'type': TYPE_UINT160, 'name': 'sslCertificateFingerprint' },
{ 'type': TYPE_UINT16, 'name': 'sslActualAction' },
{ 'type': TYPE_UINT16, 'name': 'sslFlowStatus' },
{ 'type': TYPE_UUID, 'name': 'networkAnalysisPolicyUuid' },
{ 'type': TYPE_UINT32, 'name': 'httpResponse'}],
# 61
BLOCK_ID_NAME_DESCRIPTION: [
{ 'type': TYPE_UINT32, 'name': 'blockType'},
{ 'type': TYPE_UINT32, 'name': 'blockLength'},
{ 'type': TYPE_UINT32, 'name': 'id'},
{ 'block': BLOCK_STRING, 'name': 'name' },
{ 'block': BLOCK_STRING, 'name': 'description' }],
# 62
BLOCK_MALWARE_EVENT_60: [
{ 'type': TYPE_UINT32, 'name': 'blockType' },
{ 'type': TYPE_UINT32, 'name': 'blockLength' },
{ 'type': TYPE_UUID, 'name': 'agentUuid' },
{ 'type': TYPE_UUID, 'name': 'cloudUuid' },
{ 'type': TYPE_UINT32, 'name': 'malwareEventTimestamp' },
{ 'type': TYPE_UINT32, 'name': 'eventTypeId' },
{ 'type': TYPE_UINT32, 'name': 'eventSubtypeId' },
{ 'type': TYPE_BYTE, 'name': 'detectorId' },
{ 'block': BLOCK_STRING, 'name': 'detectionName' },
{ 'block': BLOCK_STRING, 'name': 'user' },
{ 'block': BLOCK_STRING, 'name': 'fileName' },
{ 'block': BLOCK_STRING, 'name': 'filePath' },
{ 'block': BLOCK_STRING, 'name': 'fileShaHash' },
{ 'type': TYPE_UINT32, 'name': 'fileSize' },
{ 'type': TYPE_UINT32, 'name': 'fileType' },
{ 'type': TYPE_UINT32, 'name': 'fileTimestamp' },
{ 'block': BLOCK_STRING, 'name': 'parentFileName' },
{ 'block': BLOCK_STRING, 'name': 'parentShaHash' },
{ 'block': BLOCK_STRING, 'name': 'eventDescription' },
{ 'type': TYPE_UINT32, 'name': 'deviceId' },
{ 'type': TYPE_UINT16, 'name': 'connectionInstance' },
{ 'type': TYPE_UINT16, 'name': 'connectionCounter' },
{ 'type': TYPE_UINT32, 'name': 'connectionEventTimestamp' },
{ 'type': TYPE_BYTE, 'name': 'direction' },
{ 'type': TYPE_IPV6, 'name': 'sourceIpAddress' },
{ 'type': TYPE_IPV6, 'name': 'destinationIpAddress' },
{ 'type': TYPE_UINT32, 'name': 'applicationId' },
{ 'type': TYPE_UINT32, 'name': 'userId' },
{ 'type': TYPE_UUID, 'name': 'accessControlPolicyUuid' },
{ 'type': TYPE_BYTE, 'name': 'disposition' },
{ 'type': TYPE_BYTE, 'name': 'retroDisposition' },
{ 'block': BLOCK_STRING, 'name': 'uri' },
{ 'type': TYPE_UINT16, 'name': 'sourcePort' },
{ 'type': TYPE_UINT16, 'name': 'destinationPort' },
{ 'type': TYPE_UINT16, 'name': 'sourceCountry' },
{ 'type': TYPE_UINT16, 'name': 'destinationCountry' },
{ 'type': TYPE_UINT32, 'name': 'webApplicationId' },
{ 'type': TYPE_UINT32, 'name': 'clientApplicationId' },
{ 'type': TYPE_BYTE, 'name': 'action' },
{ 'type': TYPE_BYTE, 'name': 'protocol' },
{ 'type': TYPE_BYTE, 'name': 'threatScore' },
{ 'type': TYPE_UINT16, 'name': 'iocNumber' },
{ 'type': TYPE_UINT128, 'name': 'securityContext' },
{ 'type': TYPE_UINT160, 'name': 'sslCertificateFingerprint' },
{ 'type': TYPE_UINT16, 'name': 'sslActualAction' },
{ 'type': TYPE_UINT16, 'name': 'sslFlowStatus' },
{ 'block': BLOCK_STRING, 'name': 'archiveSha' },
{ 'block': BLOCK_STRING, 'name': 'archiveName' },
{ 'type': TYPE_BYTE, 'name': 'archiveDepth' },
{ 'type': TYPE_UINT32, 'name': 'httpResponse'}],
# 64
BLOCK_ACCESS_CONTROL_POLICY_METADATA: [
{ 'type': TYPE_UINT32, 'name': 'blockType' },
{ 'type': TYPE_UINT32, 'name': 'blockLength' },
{ 'type': TYPE_UUID, 'name': 'uuid' },
{ 'type': TYPE_UINT32, 'name': 'sensorId' },
{ 'block': BLOCK_STRING, 'name': 'name' } ],
}
| StarcoderdataPython |
5048466 | from spaceone.api.sample.v1 import helloworld_pb2, helloworld_pb2_grpc
from spaceone.core.pygrpc import BaseAPI
class HelloWorld(BaseAPI, helloworld_pb2_grpc.HelloWorldServicer):
pb2 = helloworld_pb2
pb2_grpc = helloworld_pb2_grpc
def say_hello(self, request, context):
params, metadata = self.parse_request(request, context)
with self.locator.get_service('HelloWorldService', metadata) as helloworld_svc:
return self.locator.get_info('HelloWorldInfo', helloworld_svc.say_hello(params))
| StarcoderdataPython |
9750385 | <filename>hw3/language_model.py<gh_stars>0
import numpy as np
from segtok import tokenizer
import torch as th
from torch import nn
# Using a basic RNN/LSTM for Language modeling
class LanguageModel(nn.Module):
def __init__(self, vocab_size, rnn_size, num_layers=1, dropout=0):
super().__init__()
# Create an embedding layer of shape [vocab_size, rnn_size]
# Use nn.Embedding
# That will map each word in our vocab into a vector of rnn_size size.
self.embedding = nn.Embedding(num_embeddings=vocab_size, embedding_dim=rnn_size)
# Create an LSTM layer of rnn_size size. Use any features you wish.
# We will be using batch_first convention
self.lstm = nn.LSTM(input_size=rnn_size, hidden_size=rnn_size, num_layers=num_layers, dropout=dropout)
# LSTM layer does not add dropout to the last hidden output.
# Add this if you wish.
self.dropout = nn.Dropout(p=dropout)
# Use a dense layer to project the outputs of the RNN cell into logits of
# the size of vocabulary (vocab_size).
self.output = nn.Linear(in_features=rnn_size, out_features=vocab_size)
def forward(self,x):
embeds = self.embedding(x)
lstm_out, _ = self.lstm(embeds)
lstm_out = self.dropout(lstm_out)
logits = self.output(lstm_out)
return logits
| StarcoderdataPython |
3446386 | <gh_stars>1-10
"""This is SWAMP: Solving structures With Alpha Membrane Pairs
This module implements useful classes and methods used across all modules in SWAMP
"""
__author__ = "<NAME>"
__credits__ = "<NAME>, & <NAME>"
__email__ = "<EMAIL>"
import sys
import os
import gzip
import shutil
import tempfile
import logging
from swamp import version
__version__ = version.__version__
if 'DISABLE_DEPENDENCY_CHECKS' not in os.environ:
if "CCP4" not in os.environ:
raise RuntimeError("Cannot find CCP4 root directory")
import gemmi
import conkit.io
from conkit.core import Contact, ContactMap, Sequence
from Bio.PDB.parse_pdb_header import _parse_pdb_header_list
def SwampLibrary(*args, **kwargs):
""":py:obj:`~swamp.utils.swamplibrary.SwampLibrary` instance"""
from swamp.utils.swamplibrary import SwampLibrary
return SwampLibrary(*args, **kwargs)
def ThreadResults(*args, **kwargs):
""":py:obj:`~swamp.utils.threadresults.ThreadResults` instance"""
from swamp.utils.threadresults import ThreadResults
return ThreadResults(*args, **kwargs)
def TargetSplit(*args, **kwargs):
""":py:obj:`~swamp.utils.targetsplit.TargetSplit` instance"""
from swamp.utils.targetsplit import TargetSplit
return TargetSplit(*args, **kwargs)
def compress(fname, out=None):
"""Compress a text file into .gz
:param str fname: the file name to be compressed
:param str out: specify an output file name, otherwise default is fname.gz
:returns: compressed file name (str)
"""
if out is None:
out = '%s.gz' % fname
with open(fname, 'rb') as f_in, gzip.open(out, 'wb') as f_out:
data = f_in.read()
if sys.version_info[0] < 3:
bindata = data
else:
bindata = bytearray(data)
f_out.write(bindata)
return out
def decompress(fname, out=None):
"""Decompress a .gz file into text file
:param str fname: the file name to be decompressed
:param str out: specify an output file name, otherwise default is fname without .gz
:returns: the decompressed file name (str)
"""
if out is None:
out = fname.replace('.gz', '')
with open(out, "wb") as f_out, gzip.open(fname, "rb") as f_in:
bindata = f_in.read()
f_out.write(bindata)
return out
def touch(fname, content='', mode='w'):
"""Create a file with the specified contents
:param str fname: file name to be created
:param str content: content to write into the file (default '')
:param str mode: mode to open the file handler (default: 'w')
"""
with open(fname, mode) as fhandle:
fhandle.write(content)
fhandle.close()
def get_tempfile():
"""Method to get a temporary file name
:returns: temporary file name (str)
"""
temp_name = next(tempfile._get_candidate_names())
return os.path.join(os.environ['CCP4_SCR'], '%s.pdb' % temp_name)
def remove(path):
if os.path.exists(path):
if os.path.isdir(path):
shutil.rmtree(path)
else:
os.remove(path)
def create_tempfile(content, mode="w"):
"""Create a temporary file with a given set of contents
:param str content: content to dump into the temporary file
:param str mode: mode to open the file handler (default: 'w')
:returns: the path to the temporary file name (str)
"""
fname = get_tempfile()
touch(fname, content, mode)
return fname
def invert_contactmap(cmap):
"""Method to invert a contact map
:param :py:obj:`~conkit.core.ContactMap` cmap: the contact map of interest
:returns: and inverted_cmap: the contact map corresponding with the inverted sequence (1-res_seq) \
(:py:obj:`~conkit.core.ContactMap`)
"""
inverted_cmap = ContactMap('inverted')
highest_residue_number = max([max(contact.id) for contact in cmap])
for contact in cmap:
new_contact = Contact(highest_residue_number + 1 - contact.res1_seq,
highest_residue_number + 1 - contact.res2_seq,
contact.raw_score)
inverted_cmap.add(new_contact)
inverted_cmap.sequence = cmap.sequence
return inverted_cmap
def extract_interhelical_cmap(cmap, helices, residues, new_id, score_threshold=0.0):
"""Method to extract the interhelical contacts
:param :py:obj:`~conkit.core.ContactMap` cmap: the contact map of interest
:param tuple helices: a nested list with the listed residue numbers of the two helices of interest
:param str new_id: the new identifies given to the resulting contact map
:param float score_threshold: the raw score threshold at which contacts will be included (default 0.0)
:returns: inverted_cmap: the contact map containing only the inter-helical contacts between the pair of helices \
(:py:obj:`~conkit.core.ContactMap`)
"""
result_cmap = ContactMap(new_id)
dummy_sequence = Sequence('id', 'A' * len(residues))
for contact in cmap:
if contact.raw_score >= score_threshold:
helix_1 = [(contact.res1_seq in x) for x in helices]
helix_2 = [(contact.res2_seq in x) for x in helices]
if helix_1 != helix_2 and any(helix_1) and any(helix_2):
# Create a new contact (IMPORTANT: Renumber the contact position within the new map)
new_contact = Contact(residues.index(contact.res1_seq) + 1,
residues.index(contact.res2_seq) + 1, contact.raw_score)
result_cmap.add(new_contact)
result_cmap.sequence = dummy_sequence
return result_cmap
def extract_fragment_cmap(pdb_hierarchy, helices):
"""Method to extract the interhelical contact map of a given pdb file
:param :py:obj:`~gemmi.Structure` pdb_hierarchy: the pdb hierarchy of the fragment
:param tuple helices: a nested list with the listed residues numbers of the helices of interest
:returns: the contact map with the interhelical contacts (:py:obj:`~conkit.core.ContactMap`)
"""
residues = [i for sub in helices for i in sub]
temp_pdb = get_tempfile()
pdb_hierarchy.write_pdb(temp_pdb)
fragment_cmap = conkit.io.read(temp_pdb, "pdb").top_map
os.remove(temp_pdb)
if fragment_cmap is None:
return None
return extract_interhelical_cmap(fragment_cmap, helices, residues, "InterhelicalContacts")
def merge_hierarchies(hiearchies, new_chain_id="A", new_model_id="1", renumber=False):
"""Method to merge two given hierarchies into one (same chain and model)
:param tuple hiearchies: a list with the pdb hierarchies to be merged
:param str new_chain_id: the new chain id for the result hierarchy
:param str new_model_id: the new model name for the result hierarchy
:param bool renumber: if True the residues of the resulting hierarchy will be renumbered starting at 1
:returns: a new :py:obj:`~gemmi.Structure` hierarchy corresponding to the merged input hierarchies
"""
if not isinstance(hiearchies, list) and not isinstance(hiearchies, tuple):
raise ValueError("Please provide hierarchies to be merged as lists!")
if len(hiearchies) < 2:
raise ValueError("Please provide at least two hierarchies to merge!")
new_model = gemmi.Model(new_model_id)
new_chain = gemmi.Chain(new_chain_id)
new_hierarchy = gemmi.Structure()
for hierarchy in hiearchies:
for res in hierarchy[0][0]:
new_chain.add_residue(res)
new_model.add_chain(new_chain)
new_hierarchy.add_model(new_model)
if renumber:
renumber_hierarchy(new_hierarchy)
return new_hierarchy
def renumber_hierarchy(hierarchy, start=1):
"""Method to renumber a given hierarchy to start in a given value. Renumbered inplace
:param :py:obj:`~gemmi.Structure` hierarchy: pdb hierarchy to be renumbered
:param int start: first residue to start renumbering of the hierarchy
"""
atom_idx = 1
for model in hierarchy:
for chain in model:
for idx, residue in enumerate(chain):
residue.seqid.num = idx + start
for atom in residue:
atom.serial = atom_idx
atom_idx += 1
def extract_hierarchy(full_hierarchy, to_extract, chainID=None):
"""Method to extract a given set of residues from a pdbfile in form of a gemmi structure hierarchy
:param :py:obj:`~gemmi.Structure` full_hierarchy: pdb hierarchy of interest
:param tuple to_extract: list with the residue numbers to be extracted
:param str chainID: the chain id where the residues to be extracted are located (default None)
:returns: a new :py:obj:`~gemmi.Structure` hierarchy containing the extracted residues
"""
new_hierarchy = gemmi.Structure()
new_chain = gemmi.Chain("A")
new_model = gemmi.Model("1")
# Check model number
if len(full_hierarchy) > 1:
logging.debug("pdb {0} has > 1r model - only first model will be kept".format(full_hierarchy.name))
# Check chain number
if len(full_hierarchy[0]) > 1:
if chainID is None:
logging.debug(
"pdb {0} has > 1 chain - only first chain will be kept".format(full_hierarchy.name))
old_chain = full_hierarchy[0][0]
else:
try:
old_chain = full_hierarchy[0][chainID]
except ValueError as e:
raise ValueError("Chain %s not found in %s!" % (chainID, full_hierarchy.name))
else:
old_chain = full_hierarchy[0][0]
# Extract region
for residue in old_chain:
if residue.seqid.num in to_extract:
new_chain.add_residue(residue)
# Append the model to the new hierarchy and exit
new_model.add_chain(new_chain)
new_hierarchy.add_model(new_model)
return new_hierarchy
def invert_hiearchy(hierarchy):
"""Method to return the inverted hierarchy (1-res_seq)
:param :py:obj:`~gemmi.Structure` hierarchy: pdb hierarchy to be inverted
:returns: the :py:obj:`~gemmi.Structure` hierarchy corresponding with the inverted sequence (1-res_seq)
"""
inverted_model = gemmi.Model("1")
inverted_chain = gemmi.Chain("A")
inverted_hierarchy = gemmi.Structure()
tmp_list = []
for residue in hierarchy[0][0]:
tmp_list.append(residue)
for idx, residue in enumerate(tmp_list[::-1]):
inverted_chain.add_residue(residue)
inverted_chain[-1].seqid.num = idx + 1
inverted_model.add_chain(inverted_chain)
inverted_hierarchy.add_model(inverted_model)
renumber_hierarchy(inverted_hierarchy)
return inverted_hierarchy
def get_missing_residues(header_list):
"""Get a dictionary with the missing residues described in the REMARK section of a pdb file
:param tuple header_list: a list with the lines of the header section of the pdb file
:returns: a dictionary with the missing residues present in each chain (chain ids are used as keys)
"""
head = _parse_pdb_header_list(header_list)
rslt = {}
for residue in head['missing_residues']:
if residue['chain'] in rslt.keys():
rslt[residue['chain']].append(residue['ssseq'])
else:
rslt[residue['chain']] = [residue['ssseq']]
return rslt
def extract_hierarchy_seqnumber(hierarchy, seq_numbers, chain_id='A'):
""" Extract the hierarchy corresponding with a given set of residue sequence numbers. Considers missing residues.
:argument :py:obj:`~gemmi.Structure` hierarchy: original hierarchy to trim
:argument tuple seq_numbers: residue sequence number to extract
:argument str chain_id: chain where the residues should be extracted from
:returns: a new :py:obj:`~gemmi.Structure` with the residues of interest
:example
>>> import gemmi
>>> from swamp.utils import extract_hierarchy_seqnumber
>>> hierarchy = gemmi.read_structure('/mnt/sda1/MR_edge_cases/3txt_MR/3txt.pdb')
>>> subtrgt_1 = extract_hierarchy_seqnumber(hierarchy, [x for x in range(104 ,125)] + [x for x in range(158, 179)])
>>> subtrgt_1.write_minimal_pdb('/home/filo/test.pdb')
"""
header = hierarchy.make_pdb_headers().split('\n')
try:
missing_res = get_missing_residues(header)[chain_id]
except KeyError:
missing_res = []
new_hierarchy = gemmi.Structure()
new_model = gemmi.Model("1")
new_chain = gemmi.Chain(chain_id)
idx = 1
for residue in hierarchy[0][chain_id]:
factor = sum([1 for x in missing_res if x < residue.seqid.num])
residue.seqid.num = idx + factor
idx += 1
if residue.seqid.num in seq_numbers:
new_chain.add_residue(residue)
new_model.add_chain(new_chain)
new_hierarchy.add_model(new_model)
return new_hierarchy
def merge_into_ensemble(hierarchies):
"""Method to merge a series of hierarchies into an ensemble where each of the original hierarchies is
represented as a model
:argument tuple hierarchies: the hierarchies that will merged to form an ensemble
:returns: a new :py:obj:`~gemmi.Structure` hierarchy containing the ensemble
"""
new_hierarchy = gemmi.Structure()
for hierarchy in hierarchies:
new_model = gemmi.Model(str(len(new_hierarchy) + 1))
new_model.add_chain(hierarchy[0][0])
new_hierarchy.add_model(new_model)
return new_hierarchy
def split_ensemble_into_models(hierarchy):
"""Method to split a ensemble into its constituent models
:argument :py:obj:`~gemmi.Structure` hierarchy: the input ensemble to be splited
:returns: a tuple containing :py:obj:`~gemmi.Structure`, each formed by a single model originating from the input \
ensemble
"""
result = []
for model in hierarchy:
new_hierarchy = gemmi.Structure()
new_hierarchy.add_model(model)
result.append(new_hierarchy)
return tuple(result)
| StarcoderdataPython |
1623923 | import numpy as np
import pickle
import os
import matplotlib.pylab as plt
plt.close('all')
def save_obj(obj, name ):
with open(name + '.pkl', 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def load_obj(name ):
with open(name + '.pkl', 'rb') as f:
return pickle.load(f)
d = load_obj('./Tensile/infoSimu')
fileExt = r".txt"
fileDir = './Tensile/'
epsilonTarget = 0.01
L = [_ for _ in os.listdir(fileDir) if _.endswith(fileExt)]
nstep = np.loadtxt(fileDir+L[0]).shape[0]
arr = np.zeros((nstep, 3))
# for i, file in enumerate(L):
# if 'ReactionForce' in file:
# arr += np.loadtxt(fileDir+file)[:, 1:4]
# FReac[file] = arr
# else:
posDirichlet = np.loadtxt(fileDir+'CentralBeamDisplacementEnd_x.txt')
Freac = np.loadtxt(fileDir + "ReactionForces.txt")
posDirichlet = posDirichlet[:,1:4] # the first column stores the time
assert posDirichlet[0].shape == (3,)
t = np.loadtxt(fileDir+L[0])[:,0]
RFz = Freac[1:,2]
uDirichlet = posDirichlet - posDirichlet[0]
# length of the strand
h = d['lengthStrand']
# compute the strand axial strain
epsilon = uDirichlet[:,2] / h
if 0.99 < epsilon[-1] / epsilonTarget < 1.01: print("The target strain has not been reached")
filter = t <= 1
plt.figure()
plt.plot(t[filter], epsilon[filter])
ax = plt.gca()
ax.set_xlabel('time')
ax.set_ylabel(r'$\epsilon$ strand $\frac{l}{L}$')
plt.savefig(fileDir+ 'timeVSepsilon')
plt.figure()
plt.plot(epsilon[filter], RFz[filter], label='Sofa model with BFE')
# currve from Costello theory. Seen in the paper of Jiang, 1999
# I just took 2 points on the curve of Fig. 5
slopeCostello = (150E3)/0.011
plt.plot(epsilon[filter], slopeCostello * epsilon[filter], label='Costello', linestyle='--')
ax = plt.gca()
ax.set_xlabel(r'$\epsilon$ strand $\frac{l}{L}$')
ax.set_ylabel(r'$F_{z}$')
ax.legend()
plt.savefig(fileDir+ 'epsilonVSAxialLoad')
plt.pause(0.1) | StarcoderdataPython |
238601 | import sqlite3
def get_users():
conn = sqlite3.connect("data/manage_RP")
sql = '''
select * from dbuser where delete_flg = 'false' order by userid
'''
data = conn.execute(sql).fetchall()
conn.close()
return data
def get_user_by_id(user_id):
conn = sqlite3.connect("data/manage_RP")
sql = '''
select * from dbuser where userid = ? and delete_flg = 'false'
'''
data = conn.execute(sql, (user_id,)).fetchone()
conn.close()
return data
def get_user_by_username(username):
conn = sqlite3.connect("data/manage_RP")
sql = '''
select password from dbuser where username = ? and delete_flg = 'false'
'''
data = conn.execute(sql, (username,)).fetchone()[0]
conn.close()
return data
def delete_user_by_id(userid):
conn = sqlite3.connect("data/manage_RP")
sql1 = '''
delete from dbuser where userid = ?
'''
conn.execute(sql1, (userid,))
conn.commit()
conn.close()
return userid
def insert_user_by_id(username, password, email, admin_flg):
conn = sqlite3.connect("data/manage_RP")
sql_dbuser = '''
insert into dbuser (username, password, email, admin_flg, delete_flg) VALUES (?, ?, ?, ?, 'false')
'''
conn.execute(sql_dbuser, (username, password, email, admin_flg,))
conn.commit()
conn.close()
return True
def update_user_by_id(user_id, password, admin_flg):
conn = sqlite3.connect("data/manage_RP")
sql_dbuser = '''
update dbuser set password = ?, admin_flg = ? where userid = ?
'''
conn.execute(sql_dbuser, (password, admin_flg, user_id,))
conn.commit()
conn.close()
return True
| StarcoderdataPython |
368433 | from django.shortcuts import render, redirect
from django.urls import reverse
from django.http import HttpResponse
from c4.celery import app
from movies.models import Movie
from movies.tasks import search_and_save
import urllib.parse
from celery.exceptions import TimeoutError
def search(request):
# Retrieves search term from query string param then uses .delay
# It waits two seconds for a result but if noe is recieved, it redirects to the waiting view
search_term = request.GET["search_term"]
res = search_and_save.delay(search_term)
try:
res.get(timeout=2)
except TimeoutError:
return redirect(
reverse("search_wait", args=(res.id,)) + "?search_term=" + urllib.parse.quote_plus(search_term)
)
return redirect(
reverse("search_results") + "?search_term=" + urllib.parse.quote_plus(search_term),
permanent=False,
)
def search_wait(request, result_uuid):
# Accepts UUID as argument then fetchs AsyncResult()
search_term = request.GET["search_term"]
res = app.AsyncResult(result_uuid)
# Tries to get the result but will return immediately if no result (Timeout of -1)
try:
res.get(timeout=-1)
except TimeoutError:
return HttpResponse("Task pending, please refresh.",
status=200)
return redirect(
reverse("search_results") + "?search_term=" + urllib.parse.quote_plus(search_term)
)
def search_results(request):
# Queries the DB for the search term
search_term = request.GET["search_term"]
movies = Movie.objects.filter(title__icontains=search_term)
# returns all the results as a plain text list
return HttpResponse(
"\n".join([movie.title for movie in movies]),
content_type="text/plain"
)
| StarcoderdataPython |
3318188 | from django.views.generic.base import TemplateView
from django.contrib.auth.mixins import LoginRequiredMixin
from django.http import HttpResponse
from django.conf import settings
from rest_framework.permissions import IsAdminUser, IsAuthenticated
from rest_framework.viewsets import ViewSet
from rest_framework.decorators import action
from rest_framework import status
from config.export.tasks import (
async_export_full_data,
async_export_statistics,
async_export_yearly_compare_statistics,
)
class Index(LoginRequiredMixin, TemplateView):
login_url = "/users/login/"
redirect_field_name = "redirect_to"
template_name = "index.html"
class SessionTimeout(TemplateView):
template_name = "session-timeout.html"
class SessionViewSet(ViewSet):
http_method_names = ['get']
permission_classes = [IsAuthenticated]
@action(methods=['GET'], detail=False)
def keep_alive(self, request):
"""Extend session by reset the max age."""
request.session.set_expiry(settings.SESSION_COOKIE_AGE)
return HttpResponse(status=status.HTTP_200_OK)
class ExportViewSet(ViewSet):
http_method_names = ['get']
permission_classes = [IsAdminUser]
@action(methods=['GET'], detail=False)
def full_data(self, request):
year = int(request.query_params.get('year'))
async_export_full_data.delay(year, request.user.email)
return HttpResponse(status=status.HTTP_204_NO_CONTENT)
@action(methods=['GET'], detail=False)
def statistic(self, request):
year = int(request.query_params.get('year'))
async_export_statistics.delay(year, request.user.email)
return HttpResponse(status=status.HTTP_204_NO_CONTENT)
@action(methods=['GET'], detail=False)
def statistic_compare(self, request):
y1 = int(request.query_params.get('y1'))
y2 = int(request.query_params.get('y2'))
async_export_yearly_compare_statistics.delay(y1, y2, request.user.email)
return HttpResponse(status=status.HTTP_204_NO_CONTENT)
| StarcoderdataPython |
5183378 | <reponame>xiaoluo91/whatshap
"""
Print the square of a number
This is an example subcommand. The first line above is a short summary description
of what the command does. It is shown when running whatshap --help. Make it short!
To add a new subcommand:
* Copy this module to a new file and adjust it to your needs. The name of the
module is identical to the subcommand name.
* Add the name of this module to the ``COMMANDS`` list in ``whatshap/__main__.py``.
The module must have add_arguments() and main() methods. The validate() method is optional and
can be used to validate the command-line options (use parser.error to raise an
error).
"""
import logging
logger = logging.getLogger(__name__)
def add_arguments(parser):
add = parser.add_argument
add("number", type=int, help="The number to square (at least 10)")
def validate(args, parser):
if args.number <= 10:
parser.error("Sorry, this is too simple!")
def main(args):
print("{} * {} = {}".format(args.number, args.number, args.number * args.number))
| StarcoderdataPython |
342150 | <reponame>FranciscoAT/adventOfCode2017<gh_stars>0
def main():
sequence = input().split(',')
programs = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p']
program_string = ''.join(programs)
seen = [program_string]
for index in range(1000000000):
for command in sequence:
programs = run_command(programs, command)
program_string = ''.join(programs)
if program_string not in seen:
seen.append(program_string)
else:
index_of_seen = seen.index(program_string)
seen = seen[index_of_seen:]
program_string = seen[(1000000000-index-1)%len(seen)]
break
print(program_string)
def run_command(programs, command):
if(command[0] == 's'):
num_to_switch = -1*int(command[1:])
programs = programs[num_to_switch:]+programs[0:num_to_switch]
elif(command[0] == 'x'):
command = [int(x) for x in command[1:].split('/')]
programs[command[0]], programs[command[1]] = programs[command[1]], programs[command[0]]
elif(command[0] == 'p'):
command = command[1:].split('/')
index_1, index_2 = [programs.index(x) for x in command]
programs[index_1], programs[index_2] = programs[index_2], programs[index_1]
return programs
if __name__ == '__main__':
main() | StarcoderdataPython |
5055880 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import struct
import array
import string
import re
from google.pyglib.gexcept import AbstractMethod
import httplib
__all__ = ['ProtocolMessage', 'Encoder', 'Decoder',
'ProtocolBufferDecodeError',
'ProtocolBufferEncodeError',
'ProtocolBufferReturnError']
URL_RE = re.compile('^(https?)://([^/]+)(/.*)$')
class ProtocolMessage:
def __init__(self, contents=None):
raise AbstractMethod
def Clear(self):
raise AbstractMethod
def IsInitialized(self, debug_strs=None):
raise AbstractMethod
def Encode(self):
try:
return self._CEncode()
except AbstractMethod:
e = Encoder()
self.Output(e)
return e.buffer().tostring()
def _CEncode(self):
raise AbstractMethod
def ParseFromString(self, s):
self.Clear()
self.MergeFromString(s)
return
def MergeFromString(self, s):
try:
self._CMergeFromString(s)
dbg = []
if not self.IsInitialized(dbg):
raise ProtocolBufferDecodeError, '\n\t'.join(dbg)
except AbstractMethod:
a = array.array('B')
a.fromstring(s)
d = Decoder(a, 0, len(a))
self.Merge(d)
return
def _CMergeFromString(self, s):
raise AbstractMethod
def __getstate__(self):
return self.Encode()
def __setstate__(self, contents_):
self.__init__(contents=contents_)
def sendCommand(self, server, url, response, follow_redirects=1,
secure=0, keyfile=None, certfile=None):
data = self.Encode()
if secure:
if keyfile and certfile:
conn = httplib.HTTPSConnection(server, key_file=keyfile,
cert_file=certfile)
else:
conn = httplib.HTTPSConnection(server)
else:
conn = httplib.HTTPConnection(server)
conn.putrequest("POST", url)
conn.putheader("Content-Length", "%d" %len(data))
conn.endheaders()
conn.send(data)
resp = conn.getresponse()
if follow_redirects > 0 and resp.status == 302:
m = URL_RE.match(resp.getheader('Location'))
if m:
protocol, server, url = m.groups()
return self.sendCommand(server, url, response,
follow_redirects=follow_redirects - 1,
secure=(protocol == 'https'),
keyfile=keyfile,
certfile=certfile)
if resp.status != 200:
raise ProtocolBufferReturnError(resp.status)
if response is not None:
response.ParseFromString(resp.read())
return response
def sendSecureCommand(self, server, keyfile, certfile, url, response,
follow_redirects=1):
return self.sendCommand(server, url, response,
follow_redirects=follow_redirects,
secure=1, keyfile=keyfile, certfile=certfile)
def __str__(self, prefix="", printElemNumber=0):
raise AbstractMethod
def ToASCII(self):
return self._CToASCII(ProtocolMessage._SYMBOLIC_FULL_ASCII)
def ToCompactASCII(self):
return self._CToASCII(ProtocolMessage._NUMERIC_ASCII)
def ToShortASCII(self):
return self._CToASCII(ProtocolMessage._SYMBOLIC_SHORT_ASCII)
_NUMERIC_ASCII = 0
_SYMBOLIC_SHORT_ASCII = 1
_SYMBOLIC_FULL_ASCII = 2
def _CToASCII(self, output_format):
raise AbstractMethod
def ParseASCII(self, ascii_string):
raise AbstractMethod
def Output(self, e):
dbg = []
if not self.IsInitialized(dbg):
raise ProtocolBufferEncodeError, '\n\t'.join(dbg)
self.OutputUnchecked(e)
return
def OutputUnchecked(self, e):
raise AbstractMethod
def Parse(self, d):
self.Clear()
self.Merge(d)
return
def Merge(self, d):
self.TryMerge(d)
dbg = []
if not self.IsInitialized(dbg):
raise ProtocolBufferDecodeError, '\n\t'.join(dbg)
return
def TryMerge(self, d):
raise AbstractMethod
def CopyFrom(self, pb):
if (pb == self): return
self.Clear()
self.MergeFrom(pb)
def MergeFrom(self, pb):
raise AbstractMethod
def lengthVarInt32(self, n):
return self.lengthVarInt64(n)
def lengthVarInt64(self, n):
if n < 0:
return 10
result = 0
while 1:
result += 1
n >>= 7
if n == 0:
break
return result
def lengthString(self, n):
return self.lengthVarInt32(n) + n
def DebugFormat(self, value):
return "%s" % value
def DebugFormatInt32(self, value):
if (value <= -2000000000 or value >= 2000000000):
return self.DebugFormatFixed32(value)
return "%d" % value
def DebugFormatInt64(self, value):
if (value <= -2000000000 or value >= 2000000000):
return self.DebugFormatFixed64(value)
return "%d" % value
def DebugFormatString(self, value):
def escape(c):
o = ord(c)
if o == 10: return r"\n"
if o == 39: return r"\'"
if o == 34: return r'\"'
if o == 92: return r"\\"
if o >= 127 or o < 32: return "\\%03o" % o
return c
return '"' + "".join([escape(c) for c in value]) + '"'
def DebugFormatFloat(self, value):
return "%ff" % value
def DebugFormatFixed32(self, value):
if (value < 0): value += (1L<<32)
return "0x%x" % value
def DebugFormatFixed64(self, value):
if (value < 0): value += (1L<<64)
return "0x%x" % value
def DebugFormatBool(self, value):
if value:
return "true"
else:
return "false"
class Encoder:
NUMERIC = 0
DOUBLE = 1
STRING = 2
STARTGROUP = 3
ENDGROUP = 4
FLOAT = 5
MAX_TYPE = 6
def __init__(self):
self.buf = array.array('B')
return
def buffer(self):
return self.buf
def put8(self, v):
if v < 0 or v >= (1<<8): raise ProtocolBufferEncodeError, "u8 too big"
self.buf.append(v & 255)
return
def put16(self, v):
if v < 0 or v >= (1<<16): raise ProtocolBufferEncodeError, "u16 too big"
self.buf.append((v >> 0) & 255)
self.buf.append((v >> 8) & 255)
return
def put32(self, v):
if v < 0 or v >= (1L<<32): raise ProtocolBufferEncodeError, "u32 too big"
self.buf.append((v >> 0) & 255)
self.buf.append((v >> 8) & 255)
self.buf.append((v >> 16) & 255)
self.buf.append((v >> 24) & 255)
return
def put64(self, v):
if v < 0 or v >= (1L<<64): raise ProtocolBufferEncodeError, "u64 too big"
self.buf.append((v >> 0) & 255)
self.buf.append((v >> 8) & 255)
self.buf.append((v >> 16) & 255)
self.buf.append((v >> 24) & 255)
self.buf.append((v >> 32) & 255)
self.buf.append((v >> 40) & 255)
self.buf.append((v >> 48) & 255)
self.buf.append((v >> 56) & 255)
return
def putVarInt32(self, v):
if v >= (1L << 31) or v < -(1L << 31):
raise ProtocolBufferEncodeError, "int32 too big"
self.putVarInt64(v)
return
def putVarInt64(self, v):
if v >= (1L << 63) or v < -(1L << 63):
raise ProtocolBufferEncodeError, "int64 too big"
if v < 0:
v += (1L << 64)
self.putVarUint64(v)
return
def putVarUint64(self, v):
if v < 0 or v >= (1L << 64):
raise ProtocolBufferEncodeError, "uint64 too big"
while 1:
bits = v & 127
v >>= 7
if (v != 0):
bits |= 128
self.buf.append(bits)
if v == 0:
break
return
def putFloat(self, v):
a = array.array('B')
a.fromstring(struct.pack("f", v))
self.buf.extend(a)
return
def putDouble(self, v):
a = array.array('B')
a.fromstring(struct.pack("d", v))
self.buf.extend(a)
return
def putBoolean(self, v):
if v:
self.buf.append(1)
else:
self.buf.append(0)
return
def putPrefixedString(self, v):
self.putVarInt32(len(v))
a = array.array('B')
a.fromstring(v)
self.buf.extend(a)
return
def putRawString(self, v):
a = array.array('B')
a.fromstring(v)
self.buf.extend(a)
class Decoder:
def __init__(self, buf, idx, limit):
self.buf = buf
self.idx = idx
self.limit = limit
return
def avail(self):
return self.limit - self.idx
def buffer(self):
return self.buf
def pos(self):
return self.idx
def skip(self, n):
if self.idx + n > self.limit: raise ProtocolBufferDecodeError, "truncated"
self.idx += n
return
def skipData(self, tag):
t = tag & 7
if t == Encoder.NUMERIC:
self.getVarInt64()
elif t == Encoder.DOUBLE:
self.skip(8)
elif t == Encoder.STRING:
n = self.getVarInt32()
self.skip(n)
elif t == Encoder.STARTGROUP:
while 1:
t = self.getVarInt32()
if (t & 7) == Encoder.ENDGROUP:
break
else:
self.skipData(t)
if (t - Encoder.ENDGROUP) != (tag - Encoder.STARTGROUP):
raise ProtocolBufferDecodeError, "corrupted"
elif t == Encoder.ENDGROUP:
raise ProtocolBufferDecodeError, "corrupted"
elif t == Encoder.FLOAT:
self.skip(4)
else:
raise ProtocolBufferDecodeError, "corrupted"
def get8(self):
if self.idx >= self.limit: raise ProtocolBufferDecodeError, "truncated"
c = self.buf[self.idx]
self.idx += 1
return c
def get16(self):
if self.idx + 2 > self.limit: raise ProtocolBufferDecodeError, "truncated"
c = self.buf[self.idx]
d = self.buf[self.idx + 1]
self.idx += 2
return (d << 8) | c
def get32(self):
if self.idx + 4 > self.limit: raise ProtocolBufferDecodeError, "truncated"
c = self.buf[self.idx]
d = self.buf[self.idx + 1]
e = self.buf[self.idx + 2]
f = long(self.buf[self.idx + 3])
self.idx += 4
return (f << 24) | (e << 16) | (d << 8) | c
def get64(self):
if self.idx + 8 > self.limit: raise ProtocolBufferDecodeError, "truncated"
c = self.buf[self.idx]
d = self.buf[self.idx + 1]
e = self.buf[self.idx + 2]
f = long(self.buf[self.idx + 3])
g = long(self.buf[self.idx + 4])
h = long(self.buf[self.idx + 5])
i = long(self.buf[self.idx + 6])
j = long(self.buf[self.idx + 7])
self.idx += 8
return ((j << 56) | (i << 48) | (h << 40) | (g << 32) | (f << 24)
| (e << 16) | (d << 8) | c)
def getVarInt32(self):
v = self.getVarInt64()
if v >= (1L << 31) or v < -(1L << 31):
raise ProtocolBufferDecodeError, "corrupted"
return v
def getVarInt64(self):
result = self.getVarUint64()
if result >= (1L << 63):
result -= (1L << 64)
return result
def getVarUint64(self):
result = long(0)
shift = 0
while 1:
if shift >= 64: raise ProtocolBufferDecodeError, "corrupted"
b = self.get8()
result |= (long(b & 127) << shift)
shift += 7
if (b & 128) == 0:
if result >= (1L << 64): raise ProtocolBufferDecodeError, "corrupted"
return result
return result
def getFloat(self):
if self.idx + 4 > self.limit: raise ProtocolBufferDecodeError, "truncated"
a = self.buf[self.idx:self.idx+4]
self.idx += 4
return struct.unpack("f", a)[0]
def getDouble(self):
if self.idx + 8 > self.limit: raise ProtocolBufferDecodeError, "truncated"
a = self.buf[self.idx:self.idx+8]
self.idx += 8
return struct.unpack("d", a)[0]
def getBoolean(self):
b = self.get8()
if b != 0 and b != 1: raise ProtocolBufferDecodeError, "corrupted"
return b
def getPrefixedString(self):
length = self.getVarInt32()
if self.idx + length > self.limit:
raise ProtocolBufferDecodeError, "truncated"
r = self.buf[self.idx : self.idx + length]
self.idx += length
return r.tostring()
def getRawString(self):
r = self.buf[self.idx:self.limit]
self.idx = self.limit
return r.tostring()
class ProtocolBufferDecodeError(Exception): pass
class ProtocolBufferEncodeError(Exception): pass
class ProtocolBufferReturnError(Exception): pass
| StarcoderdataPython |
4937262 | <filename>filter.py
import argparse
import cv2
import os
import re
class imgProcess:
def __init__(self, targetDir, sampleDir, fileType, height, width):
'''
targetDir: path to the target image
sampleDir: path to the directory containing sample images
height & with: size of the sample image,
also used as the size of the tile in the mosaic painting
'''
self.targetDir = targetDir
self.sampleDir = sampleDir
self.fileType = fileType
self.height = height
self.width= width
def load_samples(self):
'''
Loads sample images and resizes them.
'''
sampleSet = []
for filename in os.listdir(self.sampleDir):
# validate the file type
if not re.search(self.fileType, filename, re.I):
continue
filepath = os.path.join(self.sampleDir, filename)
# open and resize the sample
img = cv2.imread(filepath)
resized = cv2.resize(img, (self.width, self.height), interpolation=cv2.INTER_AREA)
sampleSet.append(resized)
return sampleSet
def mosaic(self, outputDir, bins, option):
'''
Splits the target image into tiles.
Replaces each tile with the sample whose RGB histogram is closet to it in the image set.
Generates the filtered image.
outputDir: path to the directory containing the output image
bins: # of bins per channel when calculating histogram
option: for calculating the difference between two histograms
-- correlation
-- chi-squared
-- intersection
-- hellinger
'''
img = cv2.imread(self.targetDir)
H, W = img.shape[:2]
if H < self.height or W < self.width:
print("Failed to crop " + self.targetDir + ": out of the boundary.")
exit(0)
H = H - H % self.height
W = W - W % self.width
croppedImg = img[0:H, 0:W].copy()
sampleSet = self.load_samples()
for row in range(0, H, self.height):
for col in range(0, W, self.width):
# split the target image into tiles
tile = croppedImg[row:row + self.height, col:col + self.width]
# for each tile,
# compare its histogram with sample histogram set to find the best match
compare = comparison(bins, option)
temp = [tile] # input should be a list
targetHist = compare.hist_set(temp)
sampleHists = compare.hist_set(sampleSet)
match = compare.compare_hist(targetHist[0], sampleHists)
# replace each tile with the best fit image
croppedImg[row:row + self.height, col:col + self.width] = sampleSet[match]
# generate the filtered image
cv2.imwrite(outputDir, croppedImg)
class comparison:
'''
Compares the color difference between two images based on their RGB histogram.
'''
def __init__(self, bins, option):
self.bins = bins
self.option = option
def generate_hist(self, img):
'''
Generates 3D RGB histogram and normalizes it.
'''
hist = cv2.calcHist([img], [0, 1, 2], None, [self.bins, self.bins, self.bins], [0, 256, 0, 256, 0, 256])
cv2.normalize(hist, hist)
return hist.flatten()
def hist_set(self, sampleSet):
'''
Generates a list of histograms.
'''
histSet = []
for sample in sampleSet:
histSet.append(self.generate_hist(sample))
return histSet
def compare_hist(self, tarHist, sampleHistSet):
'''
Computes the differences between the target histogram and histograms of the sample set.
'''
methods = {
"correlation" : cv2.HISTCMP_CORREL, # Correlation
"chi-squared" : cv2.HISTCMP_CHISQR, # Chi-Squared
"intersection" : cv2.HISTCMP_INTERSECT, # Intersection
"hellinger" : cv2.HISTCMP_BHATTACHARYYA} # Hellinger
# results should be in reverse order
# when applying Correlation and intersection method
rev = False
if self.option in ("correlation", "Hellinger"):
rev = True
results = {}
for i in range(0, len(sampleHistSet)):
diff = cv2.compareHist(tarHist, sampleHistSet[i], methods[self.option])
results[i] = diff
results = sorted([(v, k) for (k, v) in results.items()], reverse = rev)
# retrive the index of the best matched sample
return results[0][1]
# main() function
def main():
# create parser
descStr = "This program applies mosaic filter onto the image specified."
parser = argparse.ArgumentParser(description=descStr)
# add expected arguments
parser.add_argument('--i', dest='targetFile', required=True)
parser.add_argument('--sample', dest='sampleFile', required=False)
parser.add_argument('--type', dest='fileType', required=False)
parser.add_argument('--height', dest='height', required=False)
parser.add_argument('--width', dest='width', required=False)
parser.add_argument('--o', dest='outFile', required=False)
parser.add_argument('--bins', dest='bins', required=False)
parser.add_argument('--opt', dest='option', required=False)
# parse args
args = parser.parse_args()
targetFile = args.targetFile # path to the target file
sampleFile = "./color_set" # path to the sample set
if args.sampleFile:
sampleFile = args.sampleFile
fileType = ".jpg" # type of the image in the sample set
if args.fileType:
fileType = args.fileType
height = 16 # height of the tile
if args.height:
height = int(args.height)
width = 16 # width of the tile
if args.width:
width = int(args.width)
outFile = "./out.jpg" # path to the output file
if args.outFile:
outFile = args.outFile
bins = 8 # number of bins used for calculating RGB histogram
if args.bins:
bins = int(args.bins)
option = "correlation" # method used for calculating histogram differences
if args.option:
option = args.option
targetImg = imgProcess(targetFile, sampleFile, fileType, height, width)
targetImg.mosaic(outFile, bins, option)
# call main
if __name__ == '__main__':
main()
| StarcoderdataPython |
1755143 |
# from rhombus.models.core import *
from rhombus.models.core import BaseMixIn, Base, Column, relationship, types, deferred, ForeignKey, backref, UniqueConstraint, object_session, Sequence
from rhombus.models.ek import EK
from rhombus.models.user import User, Group
# from rhombus.lib.roles import *
from rhombus.lib import roles as r
from rhombus.lib.utils import cerr, cout, cexit, get_dbhandler, get_userid
from sqlalchemy.sql import func
from sqlalchemy.ext.orderinglist import ordering_list
import posixpath, time, datetime, difflib, yaml
from sqlalchemy_utils.types.uuid import UUIDType
from sqlalchemy_utils.types.json import JSONType
import os
import uuid
from collections import deque
# the models employed Rhombus' BaseMixIn to provide id, lastuser_id and stamp
class Site(BaseMixIn, Base):
""" this class manages sites
"""
__tablename__ = 'sites'
fqdn = Column(types.String(128), nullable=False, index=True, server_default='*')
group_id = Column(types.Integer, ForeignKey('groups.id'), nullable=False)
group = relationship(Group, uselist=False, foreign_keys=group_id)
class Node(BaseMixIn, Base):
""" this class manages all objects that have path and permission
"""
__tablename__ = 'nodes'
site_id = Column(types.Integer, ForeignKey('sites.id'), nullable=False)
site = relationship('Site', uselist=False)
uuid = Column(UUIDType, nullable=False, unique=True)
slug = Column(types.String(128), nullable=False, index=True)
path = Column(types.String(1024), nullable=False, server_default='')
level = Column(types.Integer, nullable=False, server_default='-1')
parent_id = Column(types.Integer, ForeignKey('nodes.id'), nullable=True, index=True)
ordering = Column(types.Integer, nullable=False)
children = relationship(
'Node',
cascade="all, delete-orphan",
# many to one + adjacency list - remote_side
# is required to reference the 'remote'
# column in the join condition.
backref=backref("parent", remote_side='Node.id'),
# children will be represented as a dictionary
# on the "name" attribute.
# collection_class=attribute_mapped_collection('slug'),
order_by="Node.ordering",
lazy='dynamic',
collection_class=ordering_list('ordering')
)
user_id = Column(types.Integer, ForeignKey('users.id'), nullable=False)
user = relationship(User, uselist=False, foreign_keys=user_id)
group_id = Column(types.Integer, ForeignKey('groups.id'), nullable=False)
group = relationship(Group, uselist=False, foreign_keys=group_id)
create_time = Column(types.DateTime(timezone=True), nullable=False,
server_default=func.now())
publish_time = Column(types.DateTime(timezone=True), nullable=False,
server_default=func.now())
expire_time = Column(types.DateTime(timezone=True), nullable=True)
# state represent the workflow status, with global value of 0 being public
# the meaning of the values depends on the workflow
state = Column(types.Integer, nullable=False, server_default='0')
# flags can be used to indicate special meta-information for the node
# the lowest 16-bit may be interpreted freely by any viewer of each Node-subclass
# the highest 16-bit is preserved for system
flags = Column(types.Integer, nullable=False, server_default='0')
# boolean to indicate whether this node will appear in the Content tab
listed = Column(types.Boolean, nullable=False, server_default='1')
mimetype_id = Column(types.Integer, ForeignKey('eks.id'), nullable=False)
mimetype = EK.proxy('mimetype_id', '@MIMETYPE')
json_code = deferred(Column(JSONType, nullable=False, server_default='{}'))
# for more options on the above, see note at the end of this file
polymorphic_type = Column(types.Integer, nullable=False, server_default='0', index=True)
__mapper_args__ = {'polymorphic_on': polymorphic_type, 'polymorphic_identity': 0}
__table_args__ = (UniqueConstraint('path', 'site_id'),
UniqueConstraint('parent_id', 'ordering'), )
__strict_container__ = None
__mimetypes__ = None
# flag options
f_commentable = (1 << 15)
f_inmenu = (1 << 14)
def __init__(self, UUID=None, **kwargs):
if not UUID:
self.uuid = uuid.uuid1()
else:
self.uuid = UUID
self._versioning = None
self.flags = 0
super().__init__(**kwargs)
def update(self, obj):
if 'site_id' in obj:
self.site_id = obj['site_id']
if 'slug' in obj and obj['slug']:
self.slug = obj['slug']
if 'path' in obj and obj['path']:
self.path = obj['path']
if 'user_id' in obj and type(obj['user_id']) == int:
self.user_id = obj['user_id']
if 'lastuser_id' in obj and type(obj['lastuser_id']) == int:
self.lastuser_id = obj['lastuser_id']
if 'stamp' in obj:
self.stamp = obj['stamp']
if 'group_id' in obj and type(obj['group_id']) == int:
self.group_id = obj['group_id']
if 'mimetype_id' in obj and type(obj['mimetype_id']) == int:
self.mimetype_id = obj['mimetype_id']
if 'listed' in obj:
self.listed = bool(obj['listed'])
if 'level' in obj:
self.level = int(obj['level'])
if 'create_time' in obj:
self.create_time = obj['create_time']
if 'publish_time' in obj:
self.publish_time = obj['publish_time']
if 'expire_time' in obj:
self.expire_time = obj['expire_time']
if 'ordering' in obj:
self.ordering = int(obj['ordering'])
if 'json_code' in obj:
self.json_code = obj['json_code']
# tags
if 'tags' in obj:
if not self.id:
# this is new object, so we can just attach using SqlAlchemy relationship mechanism
user_id = get_userid()
session = get_dbhandler().session()
with session.no_autoflush:
for tag_id in obj['tags']:
if type(tag_id) == str and tag_id.startswith(':'):
tag_id = int(tag_id[1:])
Tag.add_tag(self, tag_id, user_id, session)
# raise RuntimeError('FATAL ERR: node does not have id while performing tagging')
else:
Tag.sync_tags(self.id, obj['tags'], session=object_session(self))
# flags
if 'flags-on' in obj:
self.flags |= obj['flags-on']
if 'flags-off' in obj:
self.flags &= ~ obj['flags-off']
# state
if 'state' in obj:
self.state = obj['state']
def clear(self):
""" this clear all child nodes and perform necessary cleanup """
session = object_session(self)
for child in self.children:
child.clear()
session.delete(child)
def generate_slug(self):
""" generate random slug based on time """
self.slug = str(time.time())
def generate_path(self):
if not self.slug:
raise RuntimeError('Node slug needs to be initialized first!')
if self.parent.path == '/':
self.path = posixpath.normpath('/%s' % self.slug)
else:
self.path = posixpath.normpath('%s/%s' % (self.parent.path, self.slug))
return self.path
def render_title(self):
return self.title
def is_manageable(self, user):
# check if user has ADMIN role or owner of this page
if not user:
return False
if self.user == user or user.has_roles(r.SYSADM, r.DATAADM):
return True
# check if user is a member of the group and group is writable:
if self.group.has_member(user):
return True
return False
# Flags related functions
def is_commentable(self):
return self.flags & (1 << 15)
def set_commentable(self, val=True):
self.flags |= ((1 if val else 0) << 15)
def is_inmenu(self):
return self.flags & (1 << 14)
def set_inmenu(self, val=True):
self.flags |= ((1 if val else 0) << 14)
def check_flags(self, flag):
return self.flags & flag
def set_flags(self, flag, val):
self.flags = (self.flags | flag) if val is True else (self.flags & ~flag)
def add(self, n):
with object_session(self).no_autoflush:
if not n.slug:
n.generate_slug()
n.site_id = self.site_id
n.level = self.level + 1
self.children.append(n)
n.generate_path()
n.ordering = -1
object_session(n).flush()
n.ordering = 19 * n.id
return n
@property
def url(self):
""" remove the leading slash (/) for use with request.route_url """
return self.path[1:]
def get_descendants(self):
""" perform preorder iterative traversal of all children """
stack = deque([])
preorder = [self]
stack.append(self)
if self.children.count() == 0:
return preorder
while len(stack) > 0:
flag = 0
if (stack[len(stack) - 1]).children.count() == 0:
X = stack.pop()
else:
par = stack[len(stack) - 1]
for i in range(0, par.children.count()):
child = par.children[i]
if child not in preorder:
flag = 1
stack.append(child)
preorder.append(child)
break
if flag == 0:
stack.pop()
return preorder
@classmethod
def container(cls, item_cls):
global _containers_
register_nodeclass(item_cls)
try:
_containers_[cls].append(item_cls)
except KeyError:
_containers_[cls] = [item_cls]
return item_cls
@classmethod
def explicit_container(cls, item_cls):
global _explicit_containers_
register_nodeclass(item_cls)
try:
_explicit_containers_[cls].append(item_cls)
except KeyError:
_explicit_containers_[cls] = [item_cls]
return item_cls
@classmethod
def inherited_container(cls, item_cls):
global _inherited_containers_
register_nodeclass(item_cls)
try:
_inherited_containers_[cls].append(item_cls)
except KeyError:
_inherited_containers_[cls] = [item_cls]
return item_cls
def get_item_classes(self):
global _containers_, _inherited_containers_, _explicit_containers_
if hasattr(self, '__strict_container__') and self.__strict_container__ is not None:
return self.__strict_container__
# raise RuntimeError
if 'strict_container' in self.json_code:
classnames = self.json_code['strict_container']
classitems = (_containers_.get(self.__class__, [])
+ self.get_inherited_item_classes()
+ _explicit_containers_.get(self.__class__, [])
)
classitems_d = {}
for classitem in classitems:
classitems_d[classitem.__name__] = classitem
return [classitems_d[n] for n in classnames if n in classitems_d]
cls_set = _containers_.get(self.__class__, [])
for c, l in _inherited_containers_.items():
if issubclass(self.__class__, c):
cls_set = cls_set + l
return cls_set
def get_inherited_item_classes(self):
cls_set = []
for c, l in _inherited_containers_.items():
if issubclass(self.__class__, c):
cls_set = cls_set + l
return cls_set
@classmethod
def search(cls, text, site_id):
raise NotImplementedError
@classmethod
def get_label(cls):
return getattr(cls, '__label__', cls.__name__)
def as_dict(self):
return dict(
_type_=type(self).__name__,
id=self.id,
site=self.site.fqdn,
uuid=str(self.uuid),
slug=self.slug,
path=self.path,
level=self.level,
parent_url=self.parent.url if self.parent else '',
ordering=self.ordering,
user=self.user.get_login(),
lastuser=self.lastuser.get_login() if self.lastuser else self.user.get_login(),
stamp=self.stamp,
group=self.group.name,
create_time=self.create_time,
publish_time=self.publish_time,
expire_time=self.expire_time,
state=self.state,
flags=self.flags,
listed=self.listed,
mimetype=self.mimetype,
json_code=self.json_code,
tags=[t.tag.key for t in self.tags],
)
def as_yaml(self):
return yaml.safe_dump(self.as_dict(), default_flow_style=False)
@classmethod
def from_dict(cls, d, obj=None):
if not obj:
obj = cls(UUID=uuid.UUID(d['uuid']))
cerr('Created instance of [%s]' % obj.__class__.__name__)
obj.update(d)
# update the low-level data
# obj.user = None
# obj.group = None
return obj
# export/import
def dump(self, target_dir):
if not os.path.exists(target_dir):
os.makedirs(target_dir)
with open(target_dir + '/_c.yaml', 'w') as f:
f.write(self.as_yaml())
@classmethod
def _load(cls, d, source_dir):
# restore user & group
dbh = get_dbhandler()
d['site_id'] = dbh.get_site(d['site']).id
user = dbh.get_user(d['user'])
if not user:
cexit('ERR: user %s does not exist!' % d['user'])
d['user_id'] = user.id
d['lastuser'] = d.get('lastuser', d['user'])
lastuser = dbh.get_user(d['lastuser'])
if not lastuser:
cexit('ERR: user %s does not exist!' % d['lastuser'])
d['lastuser_id'] = lastuser.id
group = dbh.get_group(d['group'])
if not group:
cexit('ERR: group %s does not exist!' % d['group'])
d['group_id'] = group.id
mimetype = dbh.get_ekey(d['mimetype'])
d['mimetype_id'] = mimetype.id
# modify tags to ids
if 'tags' in d:
d['tags'] = [dbh.get_ekey(t).id for t in d['tags']]
# recreate node
n = cls.from_dict(d)
dbh.session().add(n)
print(n)
return n
@staticmethod
def load(source_dir):
with open(source_dir + '/_c.yaml') as f:
d = yaml.load(f.read())
nodeclass = _nodeclasses_[d['_type_']]
print('NodeClass:', nodeclass)
return nodeclass._load(d, source_dir)
def ascendant(self, node):
""" check wheter self is an ascendant of node """
if self.level < node.level:
return False
if self.level == node.level:
return True if self == node else False
parent_node = self.parent
while parent_node.level >= node.level:
if parent_node == node:
return True
parent_node = self.parent
return False
def versioning(self):
self._versioning = self.as_yaml().splitlines()
def diff(self):
curr_yaml = self.as_yaml().splitlines()
# difflib between self._versioning and curr_yaml
return difflib.context_diff(self._versioning, curr_yaml, n=1)
def difflog(self):
diff = ''.join(self.diff())
# create a difflog
difflog_item = DiffLog()
difflog_item.node = self
difflog_item.diff = diff
object_session(self).flush(difflog_item)
return difflog_item
def search_text(self):
return ''
def search_keywords(self):
return ''
def __repr__(self):
return '<%s|%s|%s|%s>' % (self.__class__.__name__, self.id, self.path, self.title)
class DiffLog(BaseMixIn, Base):
__tablename__ = 'difflogs'
node_id = Column(types.Integer, ForeignKey('nodes.id'), nullable=False)
node = relationship(Node, uselist=False,
backref=backref('difflog', cascade='all, delete-orphan'))
diff = Column(types.Text, nullable=False, server_default='')
def __repr__(self):
return '<DiffLog|%d|%s>' % (self.node_id, self.stamp)
class Workflow(BaseMixIn, Base):
__tablename__ = 'workflows'
node_id = Column(types.Integer, ForeignKey('nodes.id'), nullable=False)
node = relationship(Node, uselist=False)
state = Column(types.Integer, nullable=False, server_default='0')
# state indicates the position in the workflow step
# 0 - the final step, ie. published
log = Column(types.String(256), nullable=False, server_default='')
__table_args__ = (UniqueConstraint('node_id', 'state'), )
class ACL(BaseMixIn, Base):
__tablename__ = 'xacls'
node_id = Column(types.Integer, ForeignKey('nodes.id'), nullable=False, index=True)
node = relationship(Node, uselist=False)
user_id = Column(types.Integer, ForeignKey('users.id'), nullable=True)
group_id = Column(types.Integer, ForeignKey('groups.id'), nullable=True)
mode = Column(types.Integer, nullable=False, server_default='0')
__table_args__ = (UniqueConstraint('node_id', 'user_id'),
UniqueConstraint('node_id', 'group_id'),
)
class Tag(Base):
__tablename__ = 'tags'
id = Column(types.Integer, primary_key=True)
node_id = Column(types.Integer, ForeignKey('nodes.id'), nullable=False, index=True)
node = relationship(Node, uselist=False, backref=backref('tags', cascade='delete, delete-orphan'))
tag_id = Column(types.Integer, ForeignKey('eks.id'), nullable=False, index=True)
tag = relationship(EK, uselist=False, foreign_keys=tag_id)
user_id = Column(types.Integer, ForeignKey('users.id'))
__table_args__ = (UniqueConstraint('node_id', 'tag_id'), )
@classmethod
def sync_tags(cls, node_id, tag_keys, user_id=None, session=None):
# synchronize node_id and tag_keys
# tag_keys are eiither the actual, new tags or :id notation
# eg. [ 'DNA', 'RNA', ':65', ':24']
# check sanity
assert type(node_id) == int
tag_ids = []
for t in tag_keys:
if t[0] == ':':
tag_ids.append(int(t[1:]))
else:
tag_ids.append(t)
# check user_id first
if not user_id:
user_id = get_userid()
if not session:
session = get_dbhandler().session()
tags = cls.query(session).filter(cls.node_id == node_id)
in_sync = []
for tag in tags:
if tag.tag_id in tag_ids:
in_sync.append(tag.tag_id)
else:
# remove this tag
session.delete(tag)
for tag_id in tag_ids:
if tag_id in in_sync:
continue
cls.add_tag(node_id, tag_id, user_id, session)
@classmethod
def add_tag(cls, node_id, tag_id, user_id, session):
# XXX: check if we need to create new tag, and set the owner
if type(tag_id) == str:
# check if we alreday have identical tag
login = User.query(session).filter(User.id == user_id).one().login
ek_tag = EK.search('@TAG', None, session)
ekey = EK.search(tag_id, ek_tag, session)
if ekey is None:
# create new tag
ekey = EK(key=tag_id, desc=login, parent=ek_tag)
session.add(ekey)
session.flush([ekey])
tag_id = ekey.id
assert type(tag_id) == int
if not session:
session = get_dbhandler().session()
if type(node_id) == int:
tag = cls(node_id=node_id, tag_id=tag_id, user_id=user_id)
else:
tag = cls(node=node_id, tag_id=tag_id, user_id=user_id)
session.add(tag)
@classmethod
def remove_tag(cls, node_id, tag_id, user_id, session):
tag = cls.query().filter(cls.node_id == node_id, cls.tag_id == tag_id).one()
session.delete(tag)
class NodeRelationship(Base):
__tablename__ = 'noderelationships'
id = Column(types.Integer, Sequence('noderelationship_id', optional=True),
primary_key=True)
node1_id = Column(types.Integer, ForeignKey('nodes.id'), nullable=False)
node2_id = Column(types.Integer, ForeignKey('nodes.id'), nullable=False)
text = Column(types.String(64), nullable=False, server_default='')
ordering = Column(types.Integer, nullable=False, server_default='0')
__table_args__ = (UniqueConstraint('node1_id', 'node2_id'), {})
node1 = relationship(Node, uselist=False, foreign_keys=[node1_id],
backref=backref('noderel1', cascade='all,delete,delete-orphan'))
node2 = relationship(Node, uselist=False, foreign_keys=[node2_id],
backref=backref('noderel2', cascade='all,delete,delete-orphan'))
@classmethod
def create(cls, node1, node2, text=''):
r = cls(node1_id=node1.id, node2_id=node2.id, text=text)
s = object_session(node1)
s.add(r)
s.flush([r])
r.ordering = r.id * 19
return r
@classmethod
def gets(cls, ids, session):
return cls.query(session).filter(cls.id.in_(ids))
@classmethod
def node_relationship(cls, params):
return relationship()
# container related
# the structure for below variabels is:
# d[cls] = [ cls1, cls2, ... ]
_containers_ = {}
_inherited_containers_ = {}
_explicit_containers_ = {}
def self_container(item_cls):
global _containers_
register_nodeclass(item_cls)
try:
_containers_[item_cls].append(item_cls)
except KeyError:
_containers_[item_cls] = [item_cls]
return item_cls
_nodeclasses_ = {}
def register_nodeclass(cls):
global _nodeclasses_
cerr('Registering [%s]' % cls.__name__)
if cls.__name__ not in _nodeclasses_:
_nodeclasses_[cls.__name__] = cls
elif _nodeclasses_[cls.__name__] != cls:
raise RuntimeError('inconsistent class %s' % cls.__name__)
__NOTE__ = '''
json_code can be used to further control a node.
Below are options used in json_code:
strict_containers: [ ]
'''
# EOF
| StarcoderdataPython |
8197755 | import mne
import pandas as pd
import numpy as np
from scipy.signal import spectrogram
from sklearn.svm import LinearSVC, SVC
import matplotlib.pyplot as plt
import matplotlib
class ssvep:
def __init__(self, set_fontsize=True, fontsize=22):
"""
Parameters
----------
set_fontsize : boolean
Whether or not to set matplotlib font size for plots.
fontsize : int
Font size for matplotlib plots.
"""
if set_fontsize:
font = {'family' : 'normal',
'size' : fontsize}
matplotlib.rc('font', **font)
def load_data(self,
datapath='/home/nate/github/ssvep_test/win_exp1_ssvep/bluetooth/10_20/',
filename='exp_1_1020hz_data.csv',
sample_rate=125,
frequencies=[10, 20]):
"""Loads EEG ssvep data.
Notes
-----
16-channel Cyton+Daisy over bluetooth is 125Hz sampling rate.
16-channel Cyton+Daisy over WiFi is 1000Hz sampling rate.
The timestamp is not exact -- it can be delayed by system issues
(e.g. loading into memory on the computer, etc).
Apparantly the frequency sampling on the board is pretty accurate, so
other than the large gaps in the data, one can assume the sampling rate
is 125, 250, or 1000 Hz.
Paramaters
----------
datapath : str
path to data
filename : str
filename of data (csv file)
sample_rate : int
sample rate in Hz
"""
self.frequencies = frequencies
self.sample_rate = sample_rate
df = pd.read_csv(datapath + filename)
unique_freqs = set(df['frequency'].unique())
if set([0, 'alpha', 'beta'] + frequencies) != unique_freqs:
print('frequencies from data do not match from function args;')
# print('setting frequencies equal to those from the data')
# unique_freqs.remove(0)
print(f'frequencies in data: {unique_freqs}')
# self.frequencies = sorted(unique_freqs)
df_clean = df.copy()
# trim the first 2s off
df_clean = df_clean.iloc[sample_rate * 2:]
for channel in range(1, 17):
bandpass_ch = mne.filter.filter_data(df_clean[str(channel)], sample_rate, 5, 50)
bp = pd.Series(bandpass_ch)
if bp.shape[0] != df_clean[str(channel)].shape[0]:
print(f"bp and df shape mismatch channel {channel}: {bp.shape[0]} and {df_clean[str(channel)].shape[0]}")
df_clean[str(channel)] = pd.Series(bandpass_ch)
# store copies of data for any analysis
df_clean.dropna(inplace=True)
# for some reason the last second of bandpassed wifi data has issues
if sample_rate == 1000:
df_clean = df_clean.iloc[:-1000]
self.df_clean = df_clean
self.df = df
def get_alpha_beta(self,
nperseg=None,
noverlap=None,
channels=[7, 8, 15, 16]):
"""Gets alpha and beta sections from data and extracts FFT features.
"""
if nperseg is None:
nperseg = self.sample_rate
if noverlap is None:
noverlap = nperseg - 10
df_clean = self.df_clean
# breaks up contiguous chunks of ssvep sections into groups and lists
groups = list(df_clean.groupby((df_clean['frequency'] != df_clean['frequency'].shift()).cumsum()))
alpha_dfs = [d[1] for d in groups if d[1]['frequency'].unique()[0] == 'alpha']
beta_dfs = [d[1] for d in groups if d[1]['frequency'].unique()[0] == 'beta']
alpha_specs = []
alpha_fs = []
alpha_ts = []
for d in alpha_dfs:
specs = []
for c in channels:
# frequency, time, intensity (shape fxt)
alpha_f, alpha_t, c_spec = spectrogram(d[str(c)],
fs=self.sample_rate,
nperseg=nperseg,
noverlap=noverlap)
specs.append(c_spec)
alpha_spec = np.mean(np.array(specs), axis=0)
alpha_specs.append(alpha_spec)
alpha_fs.append(alpha_f)
alpha_ts.append(alpha_t)
beta_specs = []
beta_fs = []
beta_ts = []
for d in beta_dfs:
specs = []
for c in channels:
# frequency, time, intensity (shape fxt)
beta_f, beta_t, c_spec = spectrogram(d[str(c)],
fs=self.sample_rate,
nperseg=nperseg,
noverlap=noverlap)
specs.append(c_spec)
beta_spec = np.mean(np.array(specs), axis=0)
beta_specs.append(beta_spec)
beta_fs.append(beta_f)
beta_ts.append(beta_t)
# for plotting
self.alpha_fs = alpha_fs
self.alpha_ts = alpha_ts
self.beta_fs = beta_fs
self.beta_ts = beta_ts
self.alpha_specs = alpha_specs
self.beta_specs = beta_specs
def engineer_features(self,
nperseg=None,
noverlap=None,
channels=[7, 8, 15, 16]):
"""Creates spectrogram features from cleaned data.
Notes
-----
If noverlap = the sample rate, then Hz resolution of the spectrogram is
1 Hz.
Parameters
----------
frequencies : list of ints
list of frequencies used in experiment
nperseg : int
number of points per window in spectrogram FFT
noverlap : int
number of points overlapping for each FFT window in spectrogram
channel : list of ints
channel numbers (1-16) to use
"""
if nperseg is None:
nperseg = self.sample_rate
if noverlap is None:
noverlap = nperseg - 10
df_clean = self.df_clean
f1, f2 = self.frequencies
# breaks up contiguous chunks of ssvep sections into groups and lists
groups = list(df_clean.groupby((df_clean['frequency'] != df_clean['frequency'].shift()).cumsum()))
f1_dfs = [d[1] for d in groups if d[1]['frequency'].unique()[0] == str(f1)]
f2_dfs = [d[1] for d in groups if d[1]['frequency'].unique()[0] == str(f2)]
# higher n per segment gets higher frequency resolution
# n per segment at the sampling frequency gets a resolution of 1Hz
# higher n overlap means higher time resolution
f1_specs = []
f1_fs = []
f1_ts = []
for d in f1_dfs:
specs = []
for c in channels:
# frequency, time, intensity (shape fxt)
f1_f, f1_t, c_spec = spectrogram(d[str(c)],
fs=self.sample_rate,
nperseg=nperseg,
noverlap=noverlap)
specs.append(c_spec)
f1_spec = np.mean(np.array(specs), axis=0)
f1_specs.append(f1_spec)
f1_fs.append(f1_f)
f1_ts.append(f1_t)
f2_specs = []
f2_fs = []
f2_ts = []
for d in f2_dfs:
specs = []
for c in channels:
# frequency, time, intensity (shape fxt)
f2_f, f2_t, c_spec = spectrogram(d[str(c)],
fs=self.sample_rate,
nperseg=nperseg,
noverlap=noverlap)
specs.append(c_spec)
f2_spec = np.mean(np.array(specs), axis=0)
f2_specs.append(f2_spec)
f2_fs.append(f2_f)
f2_ts.append(f2_t)
# for plotting
self.f1_fs = f1_fs
self.f1_ts = f1_ts
self.f2_fs = f2_fs
self.f2_ts = f2_ts
self.f1_specs = f1_specs
self.f2_specs = f2_specs
def create_train_test_frequencies(self,
train_fraction=0.8,
alpha_waves=False):
"""Creates train and test features from spectrogram features.
Parameters
----------
train_fraction : float
Percent (0-1) of data to use as training set.
alpha_waves : boolean
Whether or not to use alpha waves or frequencies.
"""
if alpha_waves:
specs1 = self.alpha_specs
specs2 = self.beta_specs
f1, f2 = 10, 20
else:
specs1 = self.f1_specs
specs2 = self.f2_specs
f1, f2 = self.frequencies
np.random.seed(42)
num_train_samples = int(train_fraction * len(specs1))
idxs = list(range(len(specs1)))
train_idxs = np.random.choice(idxs, num_train_samples, replace=False)
test_idxs = list(set(idxs).difference(set(train_idxs)))
train_f1s = np.concatenate([specs1[i] for i in train_idxs], axis=1)
train_f2s = np.concatenate([specs2[i] for i in train_idxs], axis=1)
test_f1s = np.concatenate([specs1[i] for i in test_idxs], axis=1)
test_f2s = np.concatenate([specs2[i] for i in test_idxs], axis=1)
train_features = np.concatenate((train_f1s, train_f2s), axis=-1)
train_targets = np.array([self.frequencies[0]] * train_f1s.shape[1] + \
[self.frequencies[1]] * train_f2s.shape[1])
# randomly mix train samples
train_idxs = np.array(range(train_features.shape[1]))
np.random.shuffle(train_idxs)
# transpose features to be (timesteps, frequencies)
train_features = train_features[:, train_idxs].T
train_targets = train_targets[train_idxs]
self.train_features = train_features
self.train_targets = train_targets
test_features = np.concatenate((test_f1s, test_f2s), axis=-1)
test_targets = np.array([f1] * test_f1s.shape[1] + \
[f2] * test_f2s.shape[1])
self.test_features = test_features.T
self.test_targets = test_targets
def fit_svm(self, C=0.01):
svc = SVC(C=C)
svc.fit(self.train_features, self.train_targets)
print('training accuracy:', svc.score(self.train_features, self.train_targets))
print('testing accuracy:', svc.score(self.test_features, self.test_targets))
def plot_spectrogram(self, ts, fs, spec, savefig=False, filename=None):
"""Plots a spectrogram of FFT.
Parameters
----------
ts : np.array
timestamps in seconds
fs : np.array
frequencies in Hz
spec : np.array
spectrogram (FFT magnitudes)
savefig : boolean
Whether to save the figure to disk.
filename : str
File name of the saved image.
"""
f = plt.figure(figsize=(12, 12))
plt.pcolormesh(ts, fs, spec, shading='gouraud')
plt.ylabel('Frequency [Hz]')
plt.xlabel('Time [sec]')
plt.ylim([5, 50])
plt.colorbar()
plt.tight_layout()
plt.show()
if savefig:
if filename is None:
filename = 'saved_plot.png'
plt.savefig(filename)
| StarcoderdataPython |
9701986 | <gh_stars>10-100
import os
from graphviz import Digraph
from yattag import Doc, indent
class HTML:
"""
This contains helper functions for creating HTML files with yattag.
Parameters
----------
outFile: str
Output file
"""
def __init__(self, outFile):
"""This function initializes the header file, and saves useful variables to self.
Parameters
----------
outFile: str
Output file
"""
self._outFile = outFile
self.doc, self.tag, self.text = Doc().tagtext()
self.centerClass = (
".center {\n\tdisplay: block;\n\tmargin-left: auto;\n\tmargin-right: auto;\n}\n"
)
def GenerateHtml(self):
"""This function generates and formats the HTML file text."""
html = indent(self.doc.getvalue(), indentation="", newline="\n")
return html
def WriteFile(self):
"""This function writes the HTML file text to a file."""
if not os.path.exists(os.path.dirname(self._outFile)):
os.makedirs(os.path.dirname(self._outFile))
out = open(self._outFile, "w")
out.write(self.GenerateHtml())
out.close()
def ReadFile(self, inFile):
"""This function reads the file specified by "inFile" and retuns the
contents as a string.
Parameters
----------
inFile : str
File to read.
Returns
-------
outStr : str
Contents of inFile as a string.
"""
tmpFile = open(inFile, "r")
dark = tmpFile.read()
tmpFile.close()
return dark
class Dot:
"""
This class contains helper functions used to create dot graphs.
Parameters
----------
outFile : str
Name of the filename under which the dot file should be saved.
name: str
What the dot file should be called by Digrapph.
"""
def __init__(self, outFile, name):
"""This function initizes the class and creates the digraph."""
self._outFile = outFile
self._name = name
self.dot = Digraph(name=self._name)
def Render(self, formats=["cmapx", "svg"]):
"""This function renders the dot graph as a .svg and as a .cmapx.
Parameters
----------
formats : list, optional
List whose elementts dictatte which formats to render the dot graph in. (Default value = ["cmapx", "svg"]
"""
for f in formats:
self.dot.render(self._outFile, format=f, cleanup=True, view=False)
| StarcoderdataPython |
4874204 | import unittest
import riemann
from riemann import simple
from riemann import tx as txn
from riemann.tests import helpers
class TestSimple(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
riemann.select_network('bitcoin_main')
def test_output(self):
for i in range(len(helpers.P2WSH['human']['outs'])):
self.assertEqual(
simple.output(
value=helpers.P2WSH['human']['outs'][i]['value'],
address=helpers.P2WSH['human']['outs'][i]['addr']),
helpers.P2WSH['ser']['outs'][i]['output'])
def test_outpoint(self):
self.assertEqual(
simple.outpoint(
tx_id=helpers.P2PKH['human']['ins'][0]['hash'],
index=helpers.P2PKH['human']['ins'][0]['index']),
helpers.P2PKH['ser']['ins'][0]['outpoint'])
def test_unsigned_input(self):
outpoint = simple.outpoint(
tx_id=helpers.P2PKH['human']['ins'][0]['hash'],
index=helpers.P2PKH['human']['ins'][0]['index'])
self.assertEqual(
simple.unsigned_input(
outpoint=outpoint),
outpoint.to_bytes() + b'\x00' + b'\xFE\xFF\xFF\xFF')
self.assertEqual(
simple.unsigned_input(
outpoint=outpoint,
sequence=0x1234abcd),
outpoint.to_bytes() + b'\x00' + b'\xcd\xab\x34\x12')
def test_unsigned_legacy_tx(self):
outpoint = simple.outpoint(
tx_id=helpers.P2PKH['human']['ins'][0]['hash'],
index=helpers.P2PKH['human']['ins'][0]['index'])
tx_in = simple.unsigned_input(
outpoint=outpoint,
sequence=helpers.P2PKH['human']['ins'][0]['sequence'])
tx_out = simple.output(
helpers.P2PKH['human']['outs'][0]['value'],
helpers.P2PKH['human']['outs'][0]['addr'])
tx_return_output = txn.make_op_return_output(
helpers.P2PKH['human']['outs'][1]['memo'])
tx = simple.unsigned_legacy_tx(
tx_ins=[tx_in],
tx_outs=[tx_out, tx_return_output])
self.assertEqual(tx, helpers.P2PKH['ser']['tx']['unsigned'])
def test_unsigned_witness_tx(self):
outpoint = simple.outpoint(
tx_id=helpers.P2WPKH['human']['ins'][0]['hash'],
index=helpers.P2WPKH['human']['ins'][0]['index'])
tx_in = simple.unsigned_input(
outpoint=outpoint,
sequence=helpers.P2WPKH['human']['ins'][0]['sequence'])
tx_out = simple.output(
helpers.P2WPKH['human']['outs'][0]['value'],
helpers.P2WPKH['human']['outs'][0]['addr'])
tx = simple.unsigned_witness_tx(
tx_ins=[tx_in],
tx_outs=[tx_out],
lock_time=helpers.P2WPKH['human']['locktime'])
self.assertEqual(tx, helpers.P2WPKH['ser']['tx']['unsigned'])
| StarcoderdataPython |
8005861 | <filename>src/pyspiffe/workloadapi/jwt_source.py
"""
This module defines the source for JWT Bundles and SVIDs.
"""
from abc import ABC, abstractmethod
from typing import Optional, Set
from pyspiffe.bundle.jwt_bundle.jwt_bundle import JwtBundle
from pyspiffe.spiffe_id.spiffe_id import TrustDomain
from pyspiffe.svid.jwt_svid import JwtSvid
from pyspiffe.spiffe_id.spiffe_id import SpiffeId
class JwtSource(ABC):
"""Source of JWT-SVIDs and JWT bundles maintained via the Workload API."""
@abstractmethod
def get_jwt_svid(self, audiences: Set[str], subject: Optional[SpiffeId]) -> JwtSvid:
"""Returns an JWT-SVID from the source."""
pass
@abstractmethod
def get_jwt_bundle(self, trust_domain: TrustDomain) -> Optional[JwtBundle]:
"""Returns the JWT bundle for the given trust domain."""
pass
@abstractmethod
def close(self) -> None:
"""Closes this JWTSource."""
pass
@abstractmethod
def is_closed(self) -> bool:
"""Returns True if the connection to Workload API is closed."""
pass
| StarcoderdataPython |
11378770 | <reponame>KaustubhKekre/homeland
from django.urls import path
from .views import register_view, login_view, logout_view, dashboard_view
app_name = 'accounts'
urlpatterns = [
path('dashboard', dashboard_view, name='dashboard'),
path('signup', register_view, name='register'),
path('login', login_view, name='login'),
path('logout', logout_view, name='logout'),
]
| StarcoderdataPython |
3539125 | <reponame>cowboycow4/cms
"""
Unit and regression test for the hey package.
"""
# Import package, test suite, and other packages as needed
import hey
import pytest
import sys
def test_hey_imported():
"""Sample test, will always pass so long as import statement worked"""
assert "hey" in sys.modules
| StarcoderdataPython |
395154 | from flask import Flask
import json
import os
from flask_sqlalchemy import SQLAlchemy
def open_settings():
with open("settings.json", "r") as file:
args = json.load(file)
return args
db = SQLAlchemy()
settings = open_settings()
DB_NAME = settings["db_name"]
host = settings["host"]
port = settings["port"]
def create_app():
app = Flask(__name__)
app.config["SECRET_KEY"] = settings["secret_key"]
app.config["SQLALCHEMY_DATABASE_URI"] = f"sqlite:///{DB_NAME}"
app.config["SERVER_NAME"] = f"{host}:{port}"
db.init_app(app)
from .views import views
from .auth import auth
from .dashboard import dashboard
app.register_blueprint(views, url_prefix="/")
app.register_blueprint(auth, url_prefix="/")
app.register_blueprint(dashboard, url_prefix="/")
from .models import Post
create_database(app)
return app
def create_database(app):
if not os.path.exists("website"+DB_NAME):
db.create_all(app=app)
| StarcoderdataPython |
372217 | from PySide2.QtCore import QMetaObject, QObject, QRect, QSize
from PySide2.QtGui import QKeySequence
from PySide2.QtWidgets import (
QAction,
QGridLayout,
QLayout,
QMenu,
QMenuBar,
QSplitter,
QStatusBar,
QTabWidget,
QWidget,
)
from nexus_constructor.instrument_view.instrument_view import InstrumentView
from ui.treeview_tab import ComponentTreeViewTab
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.resize(1280, 720)
self.central_widget = QWidget(MainWindow)
self.splitter = QSplitter(self.central_widget)
self.splitter.setChildrenCollapsible(False)
self.splitter.setOpaqueResize(True)
self.main_grid_layout = QGridLayout(self.central_widget)
self.main_grid_layout.addWidget(self.splitter)
self.main_grid_layout.setSizeConstraint(QLayout.SetDefaultConstraint)
self.tab_widget = QTabWidget(self.central_widget)
self.tab_widget.setMinimumSize(QSize(500, 0))
self._set_up_component_tree_view()
self.splitter.addWidget(self.tab_widget)
self._set_up_3d_view()
MainWindow.setCentralWidget(self.central_widget)
self._set_up_menus(MainWindow)
self.tab_widget.setCurrentIndex(0)
QMetaObject.connectSlotsByName(MainWindow)
self.splitter.setStretchFactor(0, 0)
self.splitter.setStretchFactor(1, 1)
def _set_up_3d_view(self):
self.sceneWidget.setMinimumSize(QSize(600, 0))
self.splitter.addWidget(self.sceneWidget)
def _set_up_component_tree_view(self):
self.sceneWidget = InstrumentView(self.splitter)
self.component_tree_view_tab = ComponentTreeViewTab(
scene_widget=self.sceneWidget, parent=self
)
self.tab_widget.addTab(self.component_tree_view_tab, "")
def _set_up_menus(self, MainWindow: QObject):
self.menu_bar = QMenuBar()
self.menu_bar.setGeometry(QRect(0, 0, 1280, 720))
self.file_menu = QMenu(self.menu_bar)
MainWindow.setMenuBar(self.menu_bar)
self.status_bar = QStatusBar(MainWindow)
MainWindow.setStatusBar(self.status_bar)
self.open_json_file_action = QAction(MainWindow)
self.open_json_file_action.setShortcut(QKeySequence("Ctrl+O"))
self.export_to_filewriter_JSON_action = QAction(MainWindow)
self.export_to_filewriter_JSON_action.setShortcut(QKeySequence("Ctrl+S"))
self.file_menu.addAction(self.open_json_file_action)
self.file_menu.addAction(self.export_to_filewriter_JSON_action)
self.view_menu = QMenu(self.menu_bar)
self.show_action_labels = QAction(MainWindow)
self.show_action_labels.setCheckable(True)
self.simple_tree_view = QAction(MainWindow)
self.simple_tree_view.setCheckable(True)
self.about_window = QAction(MainWindow)
self.view_menu.addAction(self.about_window)
self.view_menu.addAction(self.show_action_labels)
self.view_menu.addAction(self.simple_tree_view)
self.menu_bar.addAction(self.file_menu.menuAction())
self.menu_bar.addAction(self.view_menu.menuAction())
self._set_up_titles(MainWindow)
def _set_up_titles(self, MainWindow):
MainWindow.setWindowTitle("NeXus Constructor")
self.tab_widget.setTabText(
self.tab_widget.indexOf(self.component_tree_view_tab), "Nexus Structure"
)
self.file_menu.setTitle("File")
self.open_json_file_action.setText("Open File writer JSON file")
self.export_to_filewriter_JSON_action.setText("Export to File writer JSON")
self.view_menu.setTitle("View")
self.show_action_labels.setText("Show Button Labels")
self.simple_tree_view.setText("Use Simple Tree Model View")
self.about_window.setText("About")
| StarcoderdataPython |
11338389 | <gh_stars>0
"""\
Python code generator
@copyright: <NAME>
@copyright: 2002-2007 <NAME>
@copyright: 2012-2016 <NAME>
@copyright: 2016-2021 <NAME>
@license: MIT (see LICENSE.txt) - THIS PROGRAM COMES WITH NO WARRANTY
"""
import os, os.path, random, re
from codegen import BaseLangCodeWriter, BaseSourceFileContent
import wcodegen
import compat
class SourceFileContent(BaseSourceFileContent):
rec_block_start = re.compile(
r'^(?P<spaces>\s*)' # leading spaces
r'#\s*' # comment sign
r'begin\s+wxGlade:\s*' # "begin wxGlade:" statement and trailing spaces
r'(?P<classname>[a-zA-Z_]+\w*)??' # class or function name (non-greedy)
r'[.]?' # separator between class and function / block (non-greedy)
r'(?P<block>\w+)' # function / block name
r'\s*$' ) # trailing spaces
rec_block_end = re.compile(
r'^\s*' # leading spaces
r'#\s*' # comment sign
r'end\s+wxGlade' # "end exGlade" statement
r'\s*$' ) # trailing spaces
# Less precise regex, but matches definitions with base classes having module qualified names.
rec_class_decl = re.compile(
r'^class\s+([a-zA-Z_]\w*)\s*(\([\s\w.,]*\))?:' # starting wihth "class <name>" statement
r'\s*$' ) # tailing spaces
rec_event_handler = re.compile(
r'^\s+' # leading spaces (mandatory)
r'def\s+(?P<handler>[A-Za-z_]+\w*)' # event handler name
r'\s*' # optional spaces
r'\(.*\)(?:\s*->\s*None)*:' # function parameters; optional PEP 3107 function annotations
r'\s*' # optional spaces
r'#\s*wxGlade:\s*(?P<class>\w+)\.<event_handler>' # wxGlade event handler statement with class name
r'\s*$' ) # trailing spaces
def build_untouched_content(self):
BaseSourceFileContent.build_untouched_content(self)
inside_block = False
inside_triple_quote = False
triple_quote_str = None
tmp_in = self._load_file(self.name)
out_lines = []
check_old_methods = [] # list of indices with __set_properties or __do_layout
for line in tmp_in:
if line.endswith("\r\n"): # normalize line ending for files on Windows
line = "%s\n"%line[:-2]
quote_index = -1
if not inside_triple_quote:
triple_dquote_index = line.find('"""')
triple_squote_index = line.find("'''")
if triple_squote_index == -1:
quote_index = triple_dquote_index
tmp_quote_str = '"""'
elif triple_dquote_index == -1:
quote_index = triple_squote_index
tmp_quote_str = "'''"
else:
quote_index, tmp_quote_str = min( (triple_squote_index, "'''"),
(triple_dquote_index, '"""') )
if not inside_triple_quote and quote_index != -1:
inside_triple_quote = True
triple_quote_str = tmp_quote_str
if inside_triple_quote:
end_index = line.rfind(triple_quote_str)
if quote_index < end_index and end_index != -1:
inside_triple_quote = False
result = self.rec_class_decl.match(line)
if not inside_triple_quote and not inside_block and result:
if not self.class_name:
# this is the first class declared in the file: insert the new ones before this
out_lines.append('<%swxGlade insert new_classes>' % self.nonce)
self.new_classes_inserted = True
self.class_name = result.group(1)
self.class_name = self.format_classname(self.class_name)
self.classes.add( self.class_name ) # add the found class to the list of classes of this module
out_lines.append(line)
elif not inside_block:
result = self.rec_block_start.match(line)
if not inside_triple_quote and result:
# replace the lines inside a wxGlade block with a tag that will be used later by add_class
spaces = result.group('spaces')
which_class = result.group('classname')
which_block = result.group('block')
if not which_class:
which_class = self.class_name
else:
which_class = self.format_classname(which_class)
self.spaces[which_class] = spaces
inside_block = True
if not self.class_name:
out_lines.append('<%swxGlade replace %s>' % (self.nonce, which_block))
else:
if which_block in ("__do_layout","__set_properties"):
# probably to be removed
check_old_methods.append( len(out_lines) )
out_lines.append('<%swxGlade replace %s %s>' % (self.nonce, which_class, which_block))
else:
result = self.rec_event_handler.match(line)
if not inside_triple_quote and result:
which_handler = result.group('handler')
which_class = self.format_classname(result.group('class'))
self.event_handlers.setdefault( which_class, set() ).add( which_handler )
if self.class_name and self.is_end_of_class(line):
# add extra event handlers here...
out_lines.append('<%swxGlade event_handlers %s>' % (self.nonce, self.class_name))
out_lines.append(line)
else:
# ignore all the lines inside a wxGlade block
if self.rec_block_end.match(line):
inside_block = False
if not self.new_classes_inserted:
# if we are here, the previous ``version'' of the file did not contain any class,
# so we must add the new_classes tag at the end of the file
out_lines.append('<%swxGlade insert new_classes>' % self.nonce)
# when moving from 0.9 to 1.0: remove empty methods "__do_layout" and "__set_properties"
while check_old_methods:
i = check_old_methods.pop(-1)
if len(out_lines)==i+1 or not out_lines[i+1].strip() or out_lines[i+1].lstrip().startswith("def"):
self._remove_method(out_lines, i-1, i)
# set the ``persistent'' content of the file
self.content = out_lines #"".join(out_lines)
def format_classname(self, class_name):
"""Format class name read from existing source file.
If we're in a subpackage, we should include the package name in the class name."""
if not self.multiple_files:
return class_name
name = self.name
if self.out_dir:
name = name.replace(self.out_dir, '')
pkg = os.path.dirname(name).replace(os.sep, '.')
if pkg.startswith('.'):
pkg = pkg[1:]
if pkg:
return pkg + '.' + class_name
else:
return class_name
class PythonCodeWriter(BaseLangCodeWriter, wcodegen.PythonMixin):
"Code writer class for writing Python code out of the designed GUI elements"
_code_statements = {'backgroundcolour': "%(objname)s.SetBackgroundColour(%(value)s)\n",
'contentnotfound': "pass",
'disabled': "%(objname)s.Enable(False)\n",
'extraproperties': "%(objname)s.Set%(propname_cap)s(%(value)s)\n",
'focused': "%(objname)s.SetFocus()\n",
'foregroundcolour': "%(objname)s.SetForegroundColour(%(value)s)\n",
'hidden': "%(objname)s.Hide()\n",
'setfont': "%(objname)s.SetFont(%(cnfont)s(%(size)s, %(family)s, "
"%(style)s, %(weight)s, %(underlined)s, %(face)s))\n",
'tooltip': "%(objname)s.SetToolTipString(%(tooltip)s)\n",
'wxcolour': "wxColour(%(value)s)",
'wxnullcolour': "wxNullColour"}
if compat.IS_CLASSIC:
_code_statements['wxsystemcolour'] = "wxSystemSettings_GetColour(%(value)s)"
_code_statements['tooltip_3' ] = "%(objname)s.SetToolTip(wx.ToolTip(%(tooltip)s))\n"
else:
_code_statements['wxsystemcolour'] = "wxSystemSettings.GetColour(%(value)s)"
_code_statements['tooltip_3' ] = "%(objname)s.SetToolTip(%(tooltip)s)\n"
class_separator = '.'
indent_level_func_body = 2
name_ctor = '__init__'
if compat.PYTHON2:
shebang = '#!/usr/bin/env python\n'
else:
shebang = '#!/usr/bin/env python3\n'
SourceFileContent = SourceFileContent
tmpl_encoding = "# -*- coding: %s -*-\n#\n"
tmpl_class_end = '\n%(comment)s end of class %(klass)s\n'
tmpl_class_end_nomarker = '\n'
tmpl_func_empty = '%(tab)spass\n'
tmpl_sizeritem = '%s.Add(%s, %s, %s, %s)\n'
tmpl_sizeritem_button = '%s.AddButton(%s)\n'
tmpl_gridbagsizeritem = '%s.Add(%s, %s, %s, %s, %s)\n'
if compat.IS_CLASSIC:
tmpl_gridbagsizerspacer = '%s.Add((%s, %s), %s, %s, %s, %s)\n'
else:
tmpl_gridbagsizerspacer = '%s.Add(%s, %s, %s, %s, %s, %s)\n'
tmpl_style = '%(tab)skwds["style"] = %(style)s\n'
tmpl_toplevel_style = '%(tab)skwds["style"] = kwds.get("style", 0) | %(style)s\n'
tmpl_style0 = '%(tab)skwds["style"] = 0\n'
tmpl_toplevel_style0 = '%(tab)skwds["style"] = kwds.get("style", 0)\n'
tmpl_appfile = """\
%(overwrite)s\
%(header_lines)s\
%(import_gettext)s\
from %(top_win_module)s import %(top_win_class)s\n\n"""
def _get_app_template(self, app, top_win):
'build template string for application'
klass = app.klass
if not self.app_name and not app.klass: return None
ret = ['']
if klass:
# create application class
if top_win and top_win.WX_CLASS=="wxDialog":
# use ShowModal()/Destroy for dialogs
show_code = ['%(tab)s%(tab)sself.%(top_win)s.ShowModal()',
'%(tab)s%(tab)sself.%(top_win)s.Destroy()']
else:
# use Show() for other toplevel windows
show_code = ['%(tab)s%(tab)sself.%(top_win)s.Show()']
ret += ['class %(klass)s(%(cn_wxApp)s):',
'%(tab)sdef OnInit(self):',
'%(tab)s%(tab)sself.%(top_win)s = %(top_win_class)s(None, %(cn_wxIDANY)s, "")',
'%(tab)s%(tab)sself.SetTopWindow(self.%(top_win)s)'
] + show_code + [
'%(tab)s%(tab)sreturn True',
'']
if self._mark_blocks:
ret.append( '# end of class %(klass)s\n' )
if self.app_name:
# instantiate application class or PySimpleApp
ret.append( 'if __name__ == "__main__":' )
if self._use_gettext:
ret.append( '%(tab)sgettext.install("%(textdomain)s") # replace with the appropriate catalog name\n' )
if klass:
ret.append( '%(tab)s%(name)s = %(klass)s(0)' )
ret.append( '%(tab)s%(name)s.MainLoop()' )
else:
# use PySimpleApp
if top_win and top_win.WX_CLASS=="wxDialog":
show_code = ['%(tab)s%(top_win)s.ShowModal()',
'%(tab)s%(top_win)s.Destroy()']
else:
show_code = ['%(tab)s%(top_win)s.Show()']
ret += ['%(tab)s%(name)s = wx.PySimpleApp()',
'%(tab)s%(top_win)s = %(top_win_class)s(None, %(cn_wxIDANY)s, "")',
'%(tab)s%(name)s.SetTopWindow(%(top_win)s)'
] + show_code + [
'%(tab)s%(name)s.MainLoop()']
ret.append('')
return '\n'.join(ret)
def init_lang(self, app):
if self.preview and compat.PYTHON2:
self.header_lines.append('from __future__ import print_function\n')
self.header_lines.append('import wx\n')
def add_app(self, app, top_win):
# add language specific mappings
self.lang_mapping = { 'cn_wxApp': self.cn('wxApp'), 'cn_wxIDANY': self.cn('wxID_ANY'), 'import_gettext': ''}
# Add gettext import statements
if self._use_gettext:
if self.multiple_files:
self.lang_mapping['import_gettext'] = 'import gettext\n'
else:
self.dependencies.add( 'import gettext\n' )
BaseLangCodeWriter.add_app(self, app, top_win)
def generate_code_ctor(self, code_obj, is_new, tab):
# generate code for the class constructor, including all children
code_lines = []
write = lambda s: code_lines.append(s if s.strip() else '\n')
builder = self.obj_builders[code_obj.WX_CLASS]
mycn = getattr(builder, 'cn', self.cn)
mycn_f = getattr(builder, 'cn_f', self.cn_f)
fmt_klass = self.cn_class( code_obj.get_prop_value("class", code_obj.WX_CLASS) )
# custom base classes support
custom_base = None
if code_obj.check_prop_nodefault('custom_base') and not self.preview:
custom_base = code_obj.custom_base.strip() or None
# generate constructor code
if is_new:
base = mycn(code_obj.WX_CLASS)
if custom_base: base = ", ".join([b.strip() for b in custom_base.split(',')])
write('\nclass %s(%s):\n' % (self.get_class(fmt_klass), base))
write(self.tabs(1) + 'def __init__(self, *args, **kwds):\n')
elif custom_base:
# custom base classes set, but "overwrite existing sources" not set. Issue a warning about this
self.warning( '%s has custom base classes, but you are not overwriting '
'existing sources: please check that the resulting code is correct!' % code_obj.name )
if self._mark_blocks:
# __init__ begin tag
write(self.tmpl_block_begin % {'class_separator': self.class_separator, 'comment_sign': self.comment_sign,
'function':self.name_ctor, 'klass':fmt_klass, 'tab':tab} )
# the optional initial code from the code properties
if self._check_code_prop(code_obj, "extracode_pre"):
for l in code_obj.properties["extracode_pre"].get_lines():
write(tab + l)
style_p = code_obj.properties.get("style")
if style_p:
style = style_p.get_string_value()
m_style = mycn_f( style )
stmt_style = self._format_style(style, code_obj)
if stmt_style:
write(stmt_style % {'style': m_style, 'tab': tab} )
# initialise custom base class
if custom_base:
bases = [b.strip() for b in custom_base.split(',')]
for i, b in enumerate(bases):
if not i:
write(tab + '%s.__init__(self, *args, **kwds)\n' % b)
else:
write(tab + '%s.__init__(self)\n' % b)
else:
write(tab + '%s.__init__(self, *args, **kwds)\n' % mycn(code_obj.WX_CLASS))
# set size here to avoid problems with splitter windows
if code_obj.check_prop('size'):
write( tab + self.generate_code_size(code_obj) )
if code_obj.check_prop('min_size'):
write( tab + self.generate_code_size(code_obj, code_obj.min_size, "SetMinSize") )
for l in builder.get_properties_code(code_obj):
write(tab + l)
if self._check_code_prop(code_obj, "extraproperties"):
for l in self.generate_code_extraproperties(code_obj):
write(tab + l)
# the initial and final code for the contained elements
for l in self.classes[code_obj].init:
write(tab + l)
if self.classes[code_obj].final:
write(tab + "\n")
for l in self.classes[code_obj].final:
write(tab + l)
# now check if there is initial and final code for the element itself
for l in builder.get_init_code(code_obj):
write(tab+l)
for l in builder.get_layout_code(code_obj):
write(tab + l)
# the optional final code from the code properties
if self._check_code_prop(code_obj, "extracode_post"):
for l in code_obj.properties["extracode_post"].get_lines():
write(tab + l)
return code_lines
def generate_code_event_bind(self, code_obj, tab, event_handlers):
code_lines = []
if event_handlers: code_lines.append('\n')
for obj, event, handler, unused in event_handlers:
if obj is None: continue # bound already, the entry is just for creation of the method stub
if isinstance(obj, str):
obj_name_id = "id=%s"%obj # mainly for toolbar
else:
obj_name_id = self.format_generic_access(obj) # e.g. 'self.button_1' or 'self' for toplevels
if not handler.startswith("lambda "):
handler = 'self.%s'%handler
if 'EVT_NAVIGATION_KEY' in event:
tmpl = '%(tab)sself.Bind(%(event)s, %(handler)s)\n'
else:
tmpl = '%(tab)s%(obj_name_id)s.Bind(%(event)s, %(handler)s)\n'
lines = [tmpl % {'tab':tab, 'event':self.cn(event), 'handler':handler, 'obj_name_id':obj_name_id}]
if self.preview:
# for preview we add an exception handler as the event may be unknown for this wx version or at all
indent = self.tabs(1) # one additional level
lines.insert(0, '%stry:\n'%tab)
lines[1] = indent + lines[1] # indent by one level
lines.append( '%sexcept:\n'%tab )
lines.append( '%s%sprint("could not bind event %s - ignoring error for preview")\n'%(indent, tab, event) )
code_lines += lines
return code_lines
def generate_code_event_handler(self, code_obj, is_new, tab, prev_src, event_handlers):
# generate default event handler, calling event.Skip()
# Python has two indentation levels
# 1st) for function declaration
# 2nd) for function body
stub = [self.tabs(1), "def %(handler)s(self, event):"]
if self._mark_blocks: stub.append(" # wxGlade: %(klass)s.<event_handler>")
stub.append( """\n%(tab)sprint("Event handler '%(handler)s' not implemented!")\n""" )
stub.append( '%(tab)sevent.Skip()\n' )
self.tmpl_func_event_stub = "".join(stub)
event_handlers = [handler for handler in event_handlers if not handler[2].startswith("lambda ")]
return BaseLangCodeWriter.generate_code_event_handler( self, code_obj, is_new, tab, prev_src, event_handlers )
def generate_code_id(self, obj, id=None):
if obj and self.preview:
return '', '-1' # never generate ids for preview code
if id is None:
id = obj.window_id
if not id:
if obj is not None and obj.check_prop_truth("stockitem"):
return '', self.cn("wxID_" + obj.stockitem)
return '', self.cn('wxID_ANY')
id = str(id)
tokens = id.split('=', 1)
if len(tokens) != 2:
return '', self.cn(tokens[0]) # we assume name is declared elsewhere
name, val = tokens
if not name:
return '', self.cn(val)
name = name.strip()
val = val.strip()
if val == '?':
val = self.cn('wxNewId()')
else:
val = self.cn(val)
# check to see if we have to make the var global or not...
if '.' in name:
return '%s = %s\n' % (name, val), name
return 'global %s; %s = %s\n' % (name, name, val), name
def generate_code_size(self, obj, size=None, method=None):
objname = self.format_generic_access(obj)
if size is None:
size = obj.properties["size"].get_string_value()
use_dialog_units = (size[-1] == 'd')
if method is None:
method = 'SetMinSize' if obj.parent_window else 'SetSize'
if use_dialog_units:
if compat.IS_CLASSIC:
return '%s.%s(%s(%s, (%s)))\n' % ( objname, method, self.cn('wxDLG_SZE'), objname, size[:-1] )
else:
return '%s.%s(%s(%s, %s(%s)))\n' % ( objname, method, self.cn('wxDLG_UNIT'), objname, self.cn("wxSize"), size[:-1] )
else:
return '%s.%s((%s))\n' % (objname, method, size)
def _quote_str(self, s):
"""Escape all unicode characters to there unicode code points in form of \\uxxxx.
The returned string is a pure ascii string.
Normal ascii characters like \\n or \\t won't be escaped.
note: wxGlade don't handles file encoding well currently. Thereby we escape all unicode characters.
note: The string 's' is encoded with self.app_encoding already.
see: BaseLangCodeWriter._quote_str for additional details
_recode_x80_xff()"""
# convert all strings to unicode first
if not isinstance(s, compat.unicode):
s = s.decode(self.app_encoding)
# check if it's pure ascii
try:
dummy = s.encode('ascii')
if self._use_gettext:
return '_("%s")' % s
else:
return '"%s"' % s
except UnicodeError:
pass
# escape the unicode characters if given encoding requires it
try:
s.encode(self.app_encoding)
except UnicodeEncodeError:
s = s.encode('raw-unicode-escape')
s = self._recode_x80_xff(s)
s = s.decode("ASCII")
# convert unicode strings to pure ascii
# use "raw-unicode-escape" just escaped unicode characters and not default escape sequences
if self._use_gettext:
return '_(u"%s")' % s # XXX omit u for Python 3
else:
return 'u"%s"' % s # XXX omit u for Python 3
def add_object_format_name(self, name):
return '#self.%s' % name
def _format_classattr(self, obj):
res = BaseLangCodeWriter._format_classattr(self, obj)
if not res:
return res
elif obj.name.startswith('self.'):
return obj.name
# spacer.name is "<width>, <height>" already, but wxPython expect a tuple instead of two single values
elif obj.WX_CLASS in ('spacer','sizerslot'):
return '(%s)' % obj.name
elif self.store_as_attr(obj):
return 'self.%s' % obj.name
return obj.name
def _format_import(self, klass):
return 'from %s import %s\n' % (klass, self.get_class(klass))
def _get_class_filename(self, klass):
return os.path.join( self.out_dir, klass.replace('.', os.sep) + '.py' )
def format_generic_access(self, obj):
ret = self.get_cached(obj, 'attribute_access')
if ret is not None: return ret
ret = obj.IS_CLASS and 'self' or self._format_classattr(obj)
return self.cache(obj, 'attribute_access', ret)
writer = PythonCodeWriter() # The code writer is an instance of PythonCodeWriter.
language = writer.language # Language generated by this code generator
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.