id stringlengths 1 265 | text stringlengths 6 5.19M | dataset_id stringclasses 7
values |
|---|---|---|
3246889 | input = """
a(1) v a(2).
b(1) v b(2).
okay :- not #count{X:a(X),b(X)}>1, #count{V:a(V),b(V)}>0.
"""
output = """
{a(1), b(1), okay}
{a(1), b(2)}
{a(2), b(1)}
{a(2), b(2), okay}
"""
| StarcoderdataPython |
4806428 | # -*- coding: utf-8 -*-
"""
Created on Thu May 4 15:17:30 2017
@author: nberliner
"""
import numpy as np
import pandas as pd
from features.seaIce import get_seaIce
from features.krillbase import KrillBase
from features.temperature import Temperature
from utils.NestDistance import NestDistance
from utils.utils import get_ts_steps
class Features():
def __init__(self, krill_radius, nestCount_radius, padding):
self.krill_radius = krill_radius
self.nestCount_radius = nestCount_radius
self.padding = padding
self.nest_distance = NestDistance()
self.seaIce = get_seaIce(padding)
self.krillbase = KrillBase()
self.krillbase.create(krill_radius)
self.temperature = Temperature()
def add_features(self, df_features):
"""
Top-level function to add all desined features to the feature DataFrame
containing the time-series information.
"""
# Add the species information
df_features = add_species(df_features)
# Add the proximity nest count data
df_features = add_proximity_nestCount(df_features, self.nestCount_radius, self.nest_distance)
# Add the sea ice data
df_features = add_seaIce(df_features, self.seaIce)
# Add the krill data
df_features = add_krill(df_features, self.krillbase)
# Add the temperature data
df_features = add_temperature(df_features, self.temperature)
return(df_features)
def add_species(df_features):
# These are the species of each row
species = df_features.reset_index()['species']
# Create the categories for the species
categories = np.zeros((species.shape[0],3))
categories[:,0] = species == 'adelie penguin'
categories[:,1] = species == 'chinstrap penguin'
categories[:,2] = species == 'gentoo penguin'
# Assemble the DataFrame and add it to the features
df = pd.DataFrame(categories, index=df_features.index, columns=['adelie penguin', 'chinstrap penguin', 'gentoo penguin'])
df_features = pd.concat([df_features, df], axis=1)
return(df_features)
def add_proximity_nestCount(df_features, radius, nest_distance):
"""
This will add the median change of all nests found within radius of each
location per species. Note that only nests of the same species are considered.
"""
# Need to make sure the DataFrame is sorted
df_features.sort_index(inplace=True)
# Extract only the time stop column names
#ts_step = [ item for item in df_features.columns if len(item)==2 and item[0] == 't' ]
ts_step = get_ts_steps(df_features)[-1] # only take the values of the last year
#ts_step = ts_step[-1] # only take the values of the last year
values = list()
siteCount = list()
# Iterate over every site_id, species and year
for site_id, species, year in df_features.index:
neighbour_sites = nest_distance.query(site_id, radius)
val = list()
count = 0 # it may happen that at a given site
for nn in neighbour_sites:
try:
val.append(df_features.loc[(nn, species, year),ts_step])
count += 1
except KeyError:
pass
# Compute the val to store
val = np.array(val)
val = val[np.isfinite(val)]
if val.shape == (0,):
val = [0, ]
val = np.median(val)
values.append(val)
siteCount.append(count)
df_features = df_features.assign(proximityNestCountChange = values)
df_features = df_features.assign(siteCount = siteCount)
return(df_features)
#def add_seaIce(df_features, agg_type, padding=1):
# # Obtain the sea ice values
# seaIce = get_seaIce(agg_type, padding=padding)
#
# # Assemble a DataFrame with the sea ice
# tmp = df_features.reset_index()
# vals = np.array([ seaIce[key] for key in zip(tmp['site_id'], tmp['year']) ])
#
# seaIceCol = [ 'sea_ice_px_%i'%i for i in range(vals.shape[1]) ]
# df_seaIce = pd.DataFrame(vals, index=df_features.index, columns=seaIceCol)
#
# df_features = pd.concat([df_features, df_seaIce], axis=1)
# return(df_features)
def add_seaIce(df_features, seaIce):
# Assemble a DataFrame with the sea ice
tmp = df_features.reset_index()
vals = np.array([ seaIce[key] for key in zip(tmp['site_id'], tmp['year']) ])
seaIceCol = [ 'sea_ice_month_%i'%i for i in range(vals.shape[1]) ]
df_seaIce = pd.DataFrame(vals, index=df_features.index, columns=seaIceCol)
df_features = pd.concat([df_features, df_seaIce], axis=1)
return(df_features)
def add_krill(df_features, krillbase):
vals = [ krillbase.query(site_id, int(year)) for (site_id, _, year) in list(df_features.index) ]
df_features = df_features.assign(krill=vals)
return(df_features)
def add_temperature(df_features, temperature):
# Get the lat, lon positions of the locations
tmp = df_features.reset_index()
vals = np.array([ temperature.query(site_id, int(year)) for site_id, year in zip(tmp['site_id'], tmp['year']) ])
tempCol = [ 'temp_month_%i'%i for i in range(vals.shape[1]) ]
df_temp = pd.DataFrame(vals, index=df_features.index, columns=tempCol)
df_features = pd.concat([df_features, df_temp], axis=1)
return(df_features) | StarcoderdataPython |
1770831 | <filename>PiGPIO/views/views.py
from django.contrib.auth.decorators import login_required
from django.shortcuts import render
from PiGPIO.models import Program, Dashboard
from PiGPIO.helper import raspi
@login_required
def index(request):
buttons = Dashboard.objects.filter(active=True).all()
return render(request, 'index.html', {'buttons': buttons})
@login_required
def program(request, pk):
return render(request, 'program_blockly.html', {'program': Program.objects.get(pk=pk)})
@login_required
def settings(request):
return render(request, 'generic/language_chooser.html', )
def docs(request, page=''):
if page != '':
return render(request, 'docs/' + page + '.html')
return render(request, 'docs/docs.html')
@login_required
def remote(request):
pins = []
for i in range(1, 41, 2):
r1 = str(i)
if i < 10:
r1 = '0' + str(i)
r2 = str(i + 1)
if (i + 1) < 10:
r2 = '0' + str(i + 1)
pins.append({'r': r1, 'l': r2})
raspi.set_mode(0)
for x in pins:
try:
raspi.setup_pin(int(x['r']), 1)
raspi.set_output(int(x['r']), 0)
except:
x['error_r'] = 'error'
pass
try:
raspi.setup_pin(int(x['l']), 1)
raspi.set_output(int(x['l']), 0)
except:
x['error_l'] = 'error'
pass
return render(request, 'remote.html', {'pins': pins})
@login_required
def test(request):
return render(request, 'test.html', )
| StarcoderdataPython |
43312 | #!/usr/bin/env python
#encoding=utf-8
# Copyright (c) 2012 Baidu, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A utility wraps Python built-in loggings
"""
import logging
import logging.handlers
import os
import platform
import sys
unicode_type = unicode
bytes_type = str
basestring_type = str
try:
import curses
except ImportError:
curses = None
logger = logging.getLogger("com.baidu.bigflow")
def _safe_unicode(obj, encoding='utf-8'):
"""
Converts any given object to unicode string.
>>> safeunicode('hello')
u'hello'
>>> safeunicode(2)
u'2'
>>> safeunicode('\xe1\x88\xb4')
u'\u1234'
"""
t = type(obj)
if t is unicode:
return obj
elif t is str:
return obj.decode(encoding, 'ignore')
elif t in [int, float, bool]:
return unicode(obj)
elif hasattr(obj, '__unicode__') or isinstance(obj, unicode):
try:
return unicode(obj)
except Exception as e:
return u""
else:
return str(obj).decode(encoding, 'ignore')
def _stderr_supports_color():
import sys
color = False
if curses and hasattr(sys.stderr, 'isatty') and sys.stderr.isatty():
try:
curses.setupterm()
if curses.tigetnum("colors") > 0:
color = True
except Exception:
pass
return color
class LogFormatter(logging.Formatter):
"""Log formatter used in Tornado.
Key features of this formatter are:
* Color support when logging to a terminal that supports it.
* Timestamps on every log line.
* Robust against str/bytes encoding problems.
This formatter is enabled automatically by
`tornado.options.parse_command_line` (unless ``--logging=none`` is
used).
"""
DEFAULT_FORMAT = '%(color)s[%(levelname)1.1s %(asctime)s %(module)s:%(lineno)d]%(end_color)s %(message)s'
DEFAULT_DATE_FORMAT = '%y%m%d %H:%M:%S'
DEFAULT_COLORS = {
logging.DEBUG: 4, # Blue
logging.INFO: 2, # Green
logging.WARNING: 3, # Yellow
logging.ERROR: 1, # Red
}
def __init__(self, color=True, fmt=DEFAULT_FORMAT,
datefmt=DEFAULT_DATE_FORMAT, colors=None):
r"""
:arg bool color: Enables color support.
:arg string fmt: Log message format.
It will be applied to the attributes dict of log records. The
text between ``%(color)s`` and ``%(end_color)s`` will be colored
depending on the level if color support is on.
:arg dict colors: color mappings from logging level to terminal color
code
:arg string datefmt: Datetime format.
Used for formatting ``(asctime)`` placeholder in ``prefix_fmt``.
.. versionchanged:: 3.2
Added ``fmt`` and ``datefmt`` arguments.
"""
logging.Formatter.__init__(self, datefmt=datefmt)
self._fmt = fmt
if colors is None:
colors = LogFormatter.DEFAULT_COLORS
self._colors = {}
if color and _stderr_supports_color():
# The curses module has some str/bytes confusion in
# python3. Until version 3.2.3, most methods return
# bytes, but only accept strings. In addition, we want to
# output these strings with the logging module, which
# works with unicode strings. The explicit calls to
# unicode() below are harmless in python2 but will do the
# right conversion in python 3.
fg_color = (curses.tigetstr("setaf") or
curses.tigetstr("setf") or "")
if (3, 0) < sys.version_info < (3, 2, 3):
fg_color = unicode_type(fg_color, "ascii")
for levelno, code in colors.items():
self._colors[levelno] = unicode_type(curses.tparm(fg_color, code), "ascii")
self._normal = unicode_type(curses.tigetstr("sgr0"), "ascii")
else:
self._normal = ''
def format(self, record):
try:
message = record.getMessage()
# assert isinstance(message, basestring_type) # guaranteed by logging
# Encoding notes: The logging module prefers to work with character
# strings, but only enforces that log messages are instances of
# basestring. In python 2, non-ascii bytestrings will make
# their way through the logging framework until they blow up with
# an unhelpful decoding error (with this formatter it happens
# when we attach the prefix, but there are other opportunities for
# exceptions further along in the framework).
#
# If a byte string makes it this far, convert it to unicode to
# ensure it will make it out to the logs. Use repr() as a fallback
# to ensure that all byte strings can be converted successfully,
# but don't do it by default so we don't add extra quotes to ascii
# bytestrings. This is a bit of a hacky place to do this, but
# it's worth it since the encoding errors that would otherwise
# result are so useless (and tornado is fond of using utf8-encoded
# byte strings whereever possible).
record.message = _safe_unicode(message)
except Exception as e:
record.message = "Bad message (%r): %r" % (e, record.__dict__)
record.asctime = self.formatTime(record, self.datefmt)
if record.levelno in self._colors:
record.color = self._colors[record.levelno]
record.end_color = self._normal
else:
record.color = record.end_color = ''
formatted = self._fmt % record.__dict__
if record.exc_info:
if not record.exc_text:
record.exc_text = self.formatException(record.exc_info)
if record.exc_text:
# exc_text contains multiple lines. We need to _safe_unicode
# each line separately so that non-utf8 bytes don't cause
# all the newlines to turn into '\n'.
lines = [formatted.rstrip()]
lines.extend(_safe_unicode(ln) for ln in record.exc_text.split('\n'))
formatted = '\n'.join(lines)
return formatted.replace("\n", "\n ")
def enable_pretty_logging(
logger,
level,
log_file="",
backupCount=10,
maxBytes=10000000):
"""Turns on formatted logging output as configured.
"""
if logger is None:
raise error.BigflowPlanningException("logger cannot be None")
if "__PYTHON_IN_REMOTE_SIDE" in os.environ:
# Do not do logging at runtime
logger.addHandler(logging.NullHandler())
else:
logger.setLevel(level)
if log_file:
channel = logging.handlers.RotatingFileHandler(
filename=log_file,
maxBytes=maxBytes,
backupCount=backupCount)
channel.setFormatter(LogFormatter(color=False))
logger.addHandler(channel)
if not logger.handlers:
# Set up color if we are in a tty and curses is installed
channel = logging.StreamHandler()
channel.setFormatter(LogFormatter())
logger.addHandler(channel)
def enable_pretty_logging_at_debug(
logger,
level,
log_file="",
backupCount=10,
maxBytes=10000000):
"""Turns on formatted logging output only at DEBUG level
"""
if level == logging.DEBUG:
enable_pretty_logging(logger, level, log_file, backupCount, maxBytes)
else:
logger.addHandler(logging.NullHandler())
def init_log(level=logging.INFO):
""" init_log - initialize log module
Args:
level (str): msg above the level will be displayed
DEBUG < INFO < WARNING < ERROR < CRITICAL \n
``the default value is logging.INFO``
Raises:
OSError: fail to create log directories
IOError: fail to open log file
"""
log_file = os.environ.get("BIGFLOW_LOG_FILE", "")
if log_file:
log_file = os.path.abspath(log_file + ".log")
print >> sys.stderr, "Bigflow Log file is written to [%s]" % log_file
enable_pretty_logging(logger, level, log_file=log_file)
#enable_pretty_logging_at_debug(
# logging.getLogger("pbrpc"),
# level,
# log_file=log_file)
#enable_pretty_logging_at_debug(
# logging.getLogger("pbrpcrpc_client"),
# level,
# log_file=log_file)
init_log(logging.INFO)
| StarcoderdataPython |
3239263 | from setuptools import setup
setup(
name='tudir',
version='0.0.1',
packages=['networks', 'networks.task_heads', 'networks.transformers', 'dataset'],
) | StarcoderdataPython |
3327220 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.pkg.builtin.libflame import LibflameBase
class Amdlibflame(LibflameBase):
"""libFLAME is a portable library for dense matrix computations,
providing much of the functionality present in Linear Algebra
Package (LAPACK). It includes a compatibility layer, FLAPACK,
which includes complete LAPACK implementation. The library
provides scientific and numerical computing communities with a
modern, high-performance dense linear algebra library that is
extensible, easy to use, and available under an open source
license. libFLAME is a C-only implementation and does not
depend on any external FORTRAN libraries including LAPACK.
There is an optional backward compatibility layer, lapack2flame
that maps LAPACK routine invocations to their corresponding
native C implementations in libFLAME. This allows legacy
applications to start taking advantage of libFLAME with
virtually no changes to their source code.
In combination with BLIS library which includes optimizations
for the AMD EPYC processor family, libFLAME enables running
high performing LAPACK functionalities on AMD platform."""
_name = 'amdlibflame'
homepage = "http://developer.amd.com/amd-cpu-libraries/blas-library/#libflame"
url = "https://github.com/amd/libflame/archive/2.2.tar.gz"
git = "https://github.com/amd/libflame.git"
version('2.2', sha256='12b9c1f92d2c2fa637305aaa15cf706652406f210eaa5cbc17aaea9fcfa576dc')
version('2.1', sha256='dc2dcaabd4a90ecb328bee3863db0908e412bf7ce5fb8f5e93377fdbca9abb65')
version('2.0', sha256='c80517b455df6763341f67654a6bda909f256a4927ffe9b4f0a2daed487d3739')
provides('flame@5.2', when='@2:')
def configure_args(self):
# Libflame has a secondary dependency on BLAS,
# but doesn't know which library name to expect:
# https://github.com/flame/libflame/issues/24
config_args = ['LIBS=' + self.spec['blas'].libs.link_flags]
if '+lapack2flame' in self.spec:
config_args.append("--enable-lapack2flame")
else:
config_args.append("--disable-lapack2flame")
if '+static' in self.spec:
config_args.append("--enable-static-build")
else:
config_args.append("--disable-static-build")
if '+shared' in self.spec:
config_args.append("--enable-dynamic-build")
else:
config_args.append("--disable-dynamic-build")
if '+debug' in self.spec:
config_args.append("--enable-debug")
else:
config_args.append("--disable-debug")
config_args.extend(self.enable_or_disable('threads'))
if 'none' != self.spec.variants['threads'].value:
config_args.append("--enable-supermatrix")
else:
config_args.append("--disable-supermatrix")
# https://github.com/flame/libflame/issues/21
config_args.append("--enable-max-arg-list-hack")
config_args.append("--enable-external-lapack-interfaces")
return config_args
def install(self, spec, prefix):
make()
# make install in parallel fails with message 'File already exists'
make("install", parallel=False)
| StarcoderdataPython |
135404 | <filename>python/send-to-eventhub.py
import json
import logging
import os
import random
import time
import string
from dotenv import load_dotenv
from azure.eventhub import EventHubProducerClient, EventData
def random_text(n=3):
return ''.join([string.ascii_lowercase[random.randint(0, 25)] for i in range(n)])
if __name__ == "__main__":
load_dotenv()
logger = logging.getLogger("azure")
connection_str = os.environ.get("EHUB_SEND_CONN_STR")
eventhub_name = os.environ.get("EHUB_SEND_NAME")
while True:
client = EventHubProducerClient.from_connection_string(connection_str, eventhub_name=eventhub_name)
event_data_batch = client.create_batch()
try:
terms = random.randint(1,50)
tokens = random.randint(0,5)
for i in range(terms):
message = ' '.join([random_text() for i in range(tokens)])
event_data_batch.add(EventData(message))
except ValueError:
pass
with client:
client.send_batch(event_data_batch)
print("Sent {} messages.".format(len(event_data_batch)))
time_to_sleep = random.randint(0,25)
print("Sleeping for {}".format(time_to_sleep))
time.sleep(time_to_sleep)
| StarcoderdataPython |
3291648 | import pymongo
from flask import Flask, render_template, request, jsonify, make_response
from utils.converters import RegexConverter
app = Flask(__name__)
app.secret_key = 'movidesk'
app.url_map.converters['regex'] = RegexConverter
from views import *
# testing some things
# testing again
# teste
if __name__ == '__main__':
app.run()
| StarcoderdataPython |
3355970 | <gh_stars>10-100
from locale import windows_locale
from PyQt5.QtWidgets import QLineEdit, QDialog, QTabWidget, QLabel, QPushButton
from PyQt5.QtGui import QIcon
from PyQt5.QtCore import Qt
from PyQt5 import uic
import numpy as np
from data.user_input.project.printMessageInput import PrintMessageInput
window_title = "ERROR"
class AnalysisSetupInput(QDialog):
def __init__(self, project, f_min = 0, f_max = 0, f_step = 0):
super().__init__()
"""
|--------------------------------------------------------------------|
| Analysis ID codification |
|--------------------------------------------------------------------|
| 0 - Structural - Harmonic analysis through direct method |
| 1 - Structural - Harmonic analysis through mode superposition |
| 2 - Structural - Modal analysis |
| 3 - Acoustic - Harmonic analysis through direct method |
| 4 - Acoustic - Modal analysis (convetional FE 1D) |
| 5 - Coupled - Harmonic analysis through direct method |
| 6 - Coupled - Harmonic analysis through mode superposition |
|--------------------------------------------------------------------|
"""
self.analysis_ID = project.analysis_ID
if self.analysis_ID in [1,6]:
uic.loadUi('data/user_input/ui/Analysis/Structural/analysisSetupInput_HarmonicAnalysisModeSuperpositionMethod.ui', self)
elif self.analysis_ID in [0,5]:
uic.loadUi('data/user_input/ui/Analysis/Structural/analysisSetupInput_HarmonicAnalysisDirectMethod.ui', self)
elif self.analysis_ID in [3]:
uic.loadUi('data/user_input/ui/Analysis/Acoustic/analysisSetupInput_HarmonicAnalysisDirectMethod.ui', self)
else:
return
icons_path = 'data\\icons\\'
self.icon = QIcon(icons_path + 'pulse.png')
self.setWindowIcon(self.icon)
title = project.analysis_type_label
subtitle = project.analysis_method_label
self.complete = False
self.flag_run = False
self.frequencies = []
self.f_min = f_min
self.f_max = f_max
self.f_step = f_step
self.global_damping = project.global_damping
self.modes = 0
self.label_title = self.findChild(QLabel, 'label_title')
self.label_subtitle = self.findChild(QLabel, 'label_subtitle')
if self.analysis_ID == 1:
self.lineEdit_modes = self.findChild(QLineEdit, 'lineEdit_modes')
self.lineEdit_av = self.findChild(QLineEdit, 'lineEdit_av')
self.lineEdit_bv = self.findChild(QLineEdit, 'lineEdit_bv')
self.lineEdit_ah = self.findChild(QLineEdit, 'lineEdit_ah')
self.lineEdit_bh = self.findChild(QLineEdit, 'lineEdit_bh')
self.lineEdit_fmin = self.findChild(QLineEdit, 'lineEdit_min')
self.lineEdit_fmax = self.findChild(QLineEdit, 'lineEdit_max')
self.lineEdit_fstep = self.findChild(QLineEdit, 'lineEdit_step')
self.pushButton_confirm_close = self.findChild(QPushButton, 'pushButton_confirm_close')
self.pushButton_confirm_close.clicked.connect(self.check_exit)
self.pushButton_confirm_run_analysis = self.findChild(QPushButton, 'pushButton_confirm_run_analysis')
self.pushButton_confirm_run_analysis.clicked.connect(self.check_run)
self.tabWidget = self.findChild(QTabWidget, 'tabWidget')
self.tabWidget.currentChanged.connect(self.tabEvent)
self.currentTab = self.tabWidget.currentIndex()
self.label_title.setText(title)
self.label_subtitle.setText(subtitle)
self.update_frequency_setup_input_texts()
self.update_damping_input_texts()
self.exec_()
def keyPressEvent(self, event):
if event.key() == Qt.Key_Enter or event.key() == Qt.Key_Return:
self.check_run()
elif event.key() == Qt.Key_Escape:
self.close()
def tabEvent(self):
self.currentTab = self.tabWidget.currentIndex()
def update_damping_input_texts(self):
if self.analysis_ID not in [2,3,4]:
if self.global_damping != [0,0,0,0]:
self.lineEdit_av.setText(str(self.global_damping[0]))
self.lineEdit_bv.setText(str(self.global_damping[1]))
self.lineEdit_ah.setText(str(self.global_damping[2]))
self.lineEdit_bh.setText(str(self.global_damping[3]))
def update_frequency_setup_input_texts(self):
if self.f_step != 0:
self.lineEdit_fmin.setText(str(self.f_min))
self.lineEdit_fmax.setText(str(self.f_max))
self.lineEdit_fstep.setText(str(self.f_step))
def check_exit(self):
input_fmin = input_fmax = input_fstep = 0
if self.analysis_ID not in [2,4]:
if self.analysis_ID == 1:
self.modes = self.check_inputs(self.lineEdit_modes, "'number of modes'")
if self.stop:
self.lineEdit_modes.setFocus()
return True
input_fmin = self.check_inputs(self.lineEdit_fmin, "'minimum frequency'", zero_included=True, _float=True)
if self.stop:
self.lineEdit_fmin.setFocus()
return True
input_fmax = self.check_inputs(self.lineEdit_fmax, "'maximum frequency'", _float=True)
if self.stop:
self.lineEdit_fmax.setFocus()
return True
input_fstep = self.check_inputs(self.lineEdit_fstep, "'frequency resolution (df)'", _float=True)
if self.stop:
self.lineEdit_fstep.setFocus()
return True
if input_fmax < input_fmin + input_fstep:
title = "Invalid frequency setup"
message = "The maximum frequency (fmax) must be greater than \n"
message += "the sum between minimum frequency (fmin) and \n"
message += "frequency resolution (df)."
PrintMessageInput([title, message, window_title])
return True
alpha_v = beta_v = alpha_h = beta_h = 0.0
if self.analysis_ID in [0, 1, 5, 6]:
alpha_v = self.check_inputs(self.lineEdit_av, "'proportional viscous damping (alpha_v)'", zero_included=True, _float=True)
if self.stop:
self.lineEdit_av.setFocus()
return True
self.check_inputs(self.lineEdit_bv, "'proportional viscous damping (beta_v)'", zero_included=True, _float=True)
if self.stop:
self.lineEdit_bv.setFocus()
return True
alpha_h = self.check_inputs(self.lineEdit_ah, "'proportional hysteretic damping (alpha_h)'", zero_included=True, _float=True)
if self.stop:
self.lineEdit_ah.setFocus()
return True
self.check_inputs(self.lineEdit_bh, "'proportional hysteretic damping (beta_h)'", zero_included=True, _float=True)
if self.stop:
self.lineEdit_bh.setFocus()
return True
self.global_damping = [alpha_v, beta_v, alpha_h, beta_h]
self.f_min = input_fmin
self.f_max = input_fmax
self.f_step = input_fstep
self.frequencies = np.arange(input_fmin, input_fmax+input_fstep, input_fstep)
self.complete = True
self.close()
return False
def check_inputs(self, lineEdit, label, only_positive=True, zero_included=False, _float=False):
self.stop = False
message = ""
title = "Invalid input to the analysis setup"
if lineEdit.text() != "":
try:
if _float:
out = float(lineEdit.text())
else:
out = int(lineEdit.text())
if only_positive:
if zero_included:
if out < 0:
message = f"Insert a positive value to the {label}."
message += "\n\nNote: zero value is allowed."
else:
if out <= 0:
message = f"Insert a positive value to the {label}."
message += "\n\nNote: zero value is not allowed."
except Exception as _err:
message = "Dear user, you have typed and invalid value at the \n"
message += f"{label} input field.\n\n"
message += str(_err)
else:
if zero_included:
return float(0)
else:
message = f"Insert some value at the {label} input field."
if message != "":
PrintMessageInput([title, message, window_title])
self.stop = True
return None
return out
def check_run(self):
if self.check_exit():
return
self.flag_run = True
| StarcoderdataPython |
3365825 | from django import forms
from django.contrib.auth.models import User
from Easynote.models import Notes
from Easynote.lib import const
class AuthenticationForm(forms.Form):
"""
AuthenticationForm class. Inherit from Form class.
:fields username: User username. Must be a str.
:fields password: User password. Must be a str.
"""
username = forms.CharField(max_length=const.AUTH["username"], widget=forms.TextInput(attrs={"class":"form-control","placeholder":"Username"}))
password = forms.CharField(max_length=const.AUTH["password"], widget=forms.PasswordInput(attrs={"class":"form-control","placeholder":"Password"}))
class RegisterForm(forms.ModelForm):
"""
RegisterForm class. Inherit from Form class.
:fields username: User username. Must be a str.
:fields password1: User password. Must be a str.
:fields password2: User password. Must be a str.
"""
username = forms.CharField(max_length=const.AUTH["username"], widget=forms.TextInput(attrs={"class":"form-control","placeholder":"Username"}))
password = forms.CharField(max_length=const.AUTH["password"], widget=forms.PasswordInput(attrs={"class":"form-control","placeholder":"Password"}))
confirm_password = forms.CharField(max_length=const.AUTH["password"], widget=forms.PasswordInput(attrs={"class":"form-control","placeholder":"Confirm password"}))
class Meta:
"""
Meta class. Called save() to register new entry in User table.
"""
model = User
fields = ("username", "password", "confirm_password")
class NewNoteForm(forms.ModelForm):
"""
NewNoteForm class. Inherit from ModelForm.
:fields name: Notes name. Must be a str.
:fields summary: Notes summary. Must be a str.
"""
name = forms.CharField(max_length=const.NOTES["name"], widget=forms.TextInput(attrs={"class":"form-control", "placeholder":"Name"}))
summary = forms.CharField(widget=forms.Textarea(attrs={"class":"form-control", "cols":"80", "rows":"10", "placeholder":"Type your text here"}))
class Meta:
"""
Meta class. Called save() to register new entry in User table.
"""
model = Notes
fields = ("name", "summary")
class EditNoteForm(forms.Form):
"""
EditNoteForm class. Inherit from Form.
:fields name: Notes name. Must be a str.
:fields summary: Notes summary. Must be a str.
"""
name = forms.CharField(max_length=const.NOTES["name"], widget=forms.TextInput(attrs={"class":"form-control", "placeholder":"Name", "readonly":"" }))
summary = forms.CharField(widget=forms.Textarea(attrs={"class":"form-control", "cols":"80", "rows":"10", "placeholder":"Type your text here"}))
class ChangePasswordForm(forms.Form):
"""
ChangePasswordForm class. Inherit from Form.
:fields current_password: Must be a str.
:fields new_password: Must be a str.
:fields confirm_password: Must be a str.
"""
username = forms.CharField(max_length=const.AUTH["username"], widget=forms.TextInput(attrs={"hidden":""}))
current_password = forms.CharField(max_length=const.AUTH["password"], widget=forms.PasswordInput(attrs={"class":"form-control","placeholder":"Current password"}))
new_password = forms.CharField(max_length=const.AUTH["password"], widget=forms.PasswordInput(attrs={"class":"form-control","placeholder":"<PASSWORD>"}))
confirm_password = forms.CharField(max_length=const.AUTH["password"], widget=forms.PasswordInput(attrs={"class":"form-control","placeholder":"Confirm password"}))
| StarcoderdataPython |
3385262 | <gh_stars>0
from pyvisdk.base.managed_object_types import ManagedObjectTypes
from pyvisdk.base.base_entity import BaseEntity
import logging
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
class HostPatchManager(BaseEntity):
'''This managed object is the interface for scanning and patching an ESX server.
VMware publishes updates through its external website. A patch update is
synonymous with a bulletin. An update may contain many individual patch
binaries, but its installation and uninstallation are atomic.'''
def __init__(self, core, name=None, ref=None, type=ManagedObjectTypes.HostPatchManager):
super(HostPatchManager, self).__init__(core, name=name, ref=ref, type=type)
def CheckHostPatch_Task(self, metaUrls=None, bundleUrls=None, spec=None):
'''Check the list of metadata and returns the dependency, obsolete and conflict
information The operation is cancelable through the returned Task object. No
integrity checks are performed on the metadata.
:param metaUrls: a list of urls pointing to metadata.zip.
:param bundleUrls: a list of urls pointing to an "offline" bundle.
:param spec:
'''
return self.delegate("CheckHostPatch_Task")(metaUrls, bundleUrls, spec)
def InstallHostPatch_Task(self, repository, updateID, force=None):
'''<b>Deprecated.</b> <i>Method is deprecated, use InstallHostPatchV2_Task
instead.</i> Patch the host. The operation is not cancelable. If the patch
installation failed, an atomic rollback of the installation will be attempted.
Manual rollback is required if the atomic rollback failed, see
PatchInstallFailed for details.
:param repository: Location of the repository that contains the bulletin depot. The depot must be organized as a flat collection of bulletins with each one being a folder named after the bulletin ID. Each folder must contain both update metadata and required binaries.
:param updateID: The update to be installed on the host.
:param force: Specify whether to force reinstall an update. By default, installing an already-installed update would fail with the PatchAlreadyInstalled fault. If force is set to true, the update will be forcifully reinstalled, thus overwriting the already installed update.
'''
return self.delegate("InstallHostPatch_Task")(repository, updateID, force)
def InstallHostPatchV2_Task(self, metaUrls=None, bundleUrls=None, vibUrls=None, spec=None):
'''Patch the host. The operation is not cancelable. If the patch installation
failed, an atomic rollback of the installation will be attempted. Manual
rollback is required if the atomic rollback failed, see PatchInstallFailed for
details.
:param metaUrls: A list of urls pointing to metadata.zip.
:param bundleUrls: a list of urls pointing to an "offline" bundle.
:param vibUrls: The urls of update binary files to be installed.
:param spec:
'''
return self.delegate("InstallHostPatchV2_Task")(metaUrls, bundleUrls, vibUrls, spec)
def QueryHostPatch_Task(self, spec=None):
'''Query the host for installed bulletins.
:param spec:
'''
return self.delegate("QueryHostPatch_Task")(spec)
def ScanHostPatch_Task(self, repository, updateID=None):
'''<b>Deprecated.</b> <i>As of VI API 4.0, use ScanHostPatchV2_Task.</i> Scan the
host for the patch status. The operation is cancelable through the returned
Task object. Integrity checks are performed on the metadata only during the
scan operation.
:param repository: Location of the repository that contains the bulletin depot. The depot must be organized as a flat collection of bulletins with each one being a folder named after the bulletin ID. Each folder must contain the full update metadata.
:param updateID: The updates to scan. Wildcards can be used to specify the update IDs. The wildcards will be expanded to include all updates whose IDs match the specified wildcard and whose metadata is available in the repository. Specifying no update is equivalent to a wildcard "*". In this case all updates available in the repository will be scanned.
'''
return self.delegate("ScanHostPatch_Task")(repository, updateID)
def ScanHostPatchV2_Task(self, metaUrls=None, bundleUrls=None, spec=None):
'''Scan the host for the patch status. The operation is cancelable through the
returned Task object. Integrity checks are performed on the metadata only
during the scan operation.
:param metaUrls: a list of urls pointing to metadata.zip.
:param bundleUrls: a list of urls pointing to an "offline" bundle.
:param spec:
'''
return self.delegate("ScanHostPatchV2_Task")(metaUrls, bundleUrls, spec)
def StageHostPatch_Task(self, metaUrls=None, bundleUrls=None, vibUrls=None, spec=None):
'''Stage the vib files to esx local location and possibly do some run time check.
:param metaUrls: A list of urls pointing to metadata.zip.
:param bundleUrls: a list of urls pointing to an "offline" bundle.
:param vibUrls: The urls of update binary files to be staged.
:param spec:
'''
return self.delegate("StageHostPatch_Task")(metaUrls, bundleUrls, vibUrls, spec)
def UninstallHostPatch_Task(self, bulletinIds=None, spec=None):
'''Uninstall patch from the host. The operation is not cancelable.
:param bulletinIds: A list of bulletin IDs to be removed.
:param spec:
'''
return self.delegate("UninstallHostPatch_Task")(bulletinIds, spec) | StarcoderdataPython |
1659004 | #!/usr/bin/env python3
# testcases.py
import json
import math
import os.path
import sys
import traceback
from argparse import ArgumentParser
from typing import NamedTuple, List, Dict, Any, Optional
import logging
import hwsuite
_log = logging.getLogger(__name__)
_DEFAULT_CASE_ID_PRECISION = 2
_DEFAULT_DEFINITIONS_FILENAME = "test-cases.json"
_DEFAULT_TEST_CASES_DIRNAME = "test-cases"
def to_pathname(filename, disable_mkdir=False):
pathname = os.path.join(os.path.dirname(__file__), 'test-cases', filename)
if not disable_mkdir:
os.makedirs(os.path.dirname(pathname), exist_ok=True)
return pathname
def _read_file_text(pathname) -> str:
with open(pathname, 'r') as ifile:
return ifile.read()
class ParameterSource(NamedTuple):
input_text_template: str
expected_text_template: str
test_cases: List[Dict[str, Any]]
case_id_precision: Optional[int]
def __str__(self):
return f"ParameterSource<num_test_cases={len(self.test_cases)}>"
def precision(self):
if self.case_id_precision is not None:
return self.case_id_precision
return 1 + int(math.log10(len(self.test_cases)))
def render_input_text(self, test_case):
return self.input_text_template.format(**test_case)
def render_expected_text(self, test_case):
return self.expected_text_template.format(**test_case)
@staticmethod
def load(model: Dict, root_dir: str) -> 'ParameterSource':
test_cases = []
input_text_template = None
if 'input' in model and 'input_file' in model:
_log.warning("model defines both 'input' and 'input_file'; using 'input'")
if 'input' in model:
input_text_template = model['input']
elif 'input_file' in model:
path = model['input_file']
if not os.path.isabs(path):
path = os.path.join(root_dir, path)
input_text_template = _read_file_text(path)
if input_text_template is None:
raise ValueError("model must define 'input' or 'input_file'")
expected_text_template = None
if 'expected' in model and 'expected_file' in model:
_log.warning("model defines both 'expected' and 'expected_file'; using 'expected'")
if 'expected' in model:
expected_text_template = model['expected']
elif 'expected_file' in model:
path = model['expected_file']
if not os.path.isabs(path):
path = os.path.join(root_dir, path)
expected_text_template = _read_file_text(path)
if expected_text_template is None:
raise ValueError("model must define 'expected' or 'expected_file'")
try:
param_names = None
for test_case in model['test_cases']:
if isinstance(test_case, dict):
test_cases.append(test_case)
else:
case_dict = {}
param_names = param_names or model.get('param_names', None)
if param_names is None:
raise ValueError("'param_names' must be defined if array test cases are defined")
for i in range(len(param_names)):
case_dict[param_names[i]] = test_case[i]
test_cases.append(case_dict)
except KeyError:
_log.warning("test cases not defined")
pass
precision = model.get('case_id_precision', None)
return ParameterSource(input_text_template, expected_text_template, test_cases, precision)
def write_cases(param_source: ParameterSource, dest_dir: str, suffix=".txt", onerror='continue'):
nsuccesses = 0
for i, test_case in enumerate(param_source.test_cases):
try:
rendered_input = param_source.render_input_text(test_case)
case_id = ("{0:0" + str(param_source.precision()) + "d}").format(i + 1)
input_filename = f"{case_id}-input{suffix}"
input_pathname = os.path.join(dest_dir, input_filename)
os.makedirs(os.path.dirname(input_pathname), exist_ok=True)
with open(input_pathname, 'w') as ofile:
ofile.write(rendered_input)
expected_filename = f"{case_id}-expected{suffix}"
expected_pathname = os.path.join(dest_dir, expected_filename)
os.makedirs(os.path.dirname(expected_pathname), exist_ok=True)
with open(expected_pathname, 'w') as ofile:
ofile.write(param_source.render_expected_text(test_case))
nsuccesses += 1
except Exception:
if onerror == 'raise':
raise
exc_info = sys.exc_info()
info = traceback.format_exception(*exc_info)
_log.debug("writing cases: exception traceback:\n%s", "".join(info).strip())
e = exc_info[1]
_log.warning("failed to write cases to %s: %s, %s", dest_dir, type(e), e)
continue
_log.debug("%s of %s test cases generated in %s", nsuccesses, len(param_source.test_cases), dest_dir)
def is_skel_file(pathname, proj_dir):
pathname = os.path.normpath(os.path.abspath(pathname))
skel_dir = os.path.normpath(os.path.join(os.path.abspath(proj_dir), 'skel'))
return pathname.startswith(skel_dir)
def find_all_definitions_files(top_dir: str, filename: str) -> List[str]:
defs_files = []
for root, dirs, files in os.walk(top_dir):
for f in files:
if f == filename:
pathname = os.path.join(root, f)
if not is_skel_file(pathname, top_dir):
defs_files.append(pathname)
return defs_files
def produce_from_defs(defs_file: str, dest_dirname: str = 'test-cases', onerror='continue') -> ParameterSource:
with open(defs_file, 'r') as ifile:
model = json.load(ifile)
param_source = ParameterSource.load(model, os.path.dirname(defs_file))
dest_dir = os.path.join(os.path.dirname(defs_file), dest_dirname)
write_cases(param_source, dest_dir, onerror=onerror)
return param_source
def produce_files(subdirs: Optional[List[str]], definitions_filename: str, dest_dirname: str, proj_dir: str=None):
proj_dir = os.path.abspath(proj_dir or hwsuite.find_proj_root())
if not subdirs:
defs_files = find_all_definitions_files(proj_dir, definitions_filename)
else:
defs_files = map(lambda d: os.path.join(d, definitions_filename), subdirs)
defs_files = list(filter(os.path.exists, defs_files))
nsuccesses = 0
for defs_file in defs_files:
try:
produce_from_defs(defs_file, dest_dirname)
nsuccesses += 1
except Exception:
exc_info = sys.exc_info()
info = traceback.format_exception(*exc_info)
_log.debug("exception info:\n%s", "".join(info).strip())
e = exc_info[1]
_log.warning("failure to load model and write cases from %s: %s, %s", defs_file, type(e), e)
_log.debug("test cases generated from %s of %s definitions files", nsuccesses, len(defs_files))
if nsuccesses == 0:
_log.error("test case generation did not succeed for any of %s definitions files", len(defs_files))
return nsuccesses
def main():
parser = ArgumentParser()
parser.add_argument("subdirs", nargs='*', metavar="DIR", help="subdirectory containing 'test-cases.json` file")
parser.add_argument("--definitions-filename", metavar="BASENAME", default=_DEFAULT_DEFINITIONS_FILENAME, help="test cases definitions filename to search for, if not 'test-cases.json'")
parser.add_argument("--dest-dirname", default="test-cases", metavar="BASENAME", help="destination directory name (relative to definitions file location)")
hwsuite.add_logging_options(parser)
args = parser.parse_args()
hwsuite.configure_logging(args)
try:
nsuccesses = produce_files(args.subdirs, args.definitions_filename, args.dest_dirname)
return 0 if nsuccesses > 0 else 2
except hwsuite.MessageworthyException as ex:
print(f"{__name__}: {type(ex).__name__}: {ex}", file=sys.stderr)
return 1
| StarcoderdataPython |
3247371 | from django.shortcuts import get_object_or_404, render
# Create your views here.
from .models import BlogAuthor, Blog, BlogComment
def index(request):
"""View function for home page of site."""
return render(request, 'index.html',)
from django.views import generic
class BlogListView(generic.ListView):
model = Blog
paginate_by = 5
class BlogDetailView(generic.DetailView):
model = Blog
class BlogAuthorListView(generic.ListView):
model = BlogAuthor
class BlogAuthorDetailView(generic.DetailView):
model = BlogAuthor
from django.views.generic.edit import CreateView
from django.contrib.auth.mixins import LoginRequiredMixin
from django.urls import reverse
class BlogCommentCreate(LoginRequiredMixin, CreateView):
model = BlogComment
fields = ['description',]
def get_context_data(self, **kwargs):
"""
Add associated blog to form template so can display its title in HTML.
"""
# Call the base implementation first to get a context
context = super(BlogCommentCreate, self).get_context_data(**kwargs)
# Get the blog form id and add it to the context
context['blog'] = get_object_or_404(Blog, pk = self.kwargs['pk'])
return context
def form_valid(self, form):
"""
Add author and associated blog to form data before setting it as valid (so it is saved to model)
"""
#Add logged-in user as author of comment
form.instance.author = self.request.user
#Associate comment with blog based on passed id
form.instance.blog = get_object_or_404(Blog, pk = self.kwargs['pk'])
# Call super-class form validation behavior
return super().form_valid(form)
def get_success_url(self):
"""
After posting comment return to associated blog.
"""
return reverse('blog-detail', kwargs={'pk': self.kwargs['pk'],}) | StarcoderdataPython |
69887 | class A(object):
__sizeof__ = 17
print(__sizeof__)
# <ref> | StarcoderdataPython |
1778078 | import matplotlib.pyplot as plt
import base64
from io import BytesIO
import numpy as np
def get_graph():
buffer=BytesIO()
plt.savefig(buffer,format='png')
buffer.seek(0)
image_png=buffer.getvalue()
graph=base64.b64encode(image_png)
graph=graph.decode('utf-8')
buffer.close()
return graph
def get_pie_plot(data,labels,title):
plt.switch_backend('AGG')
# Creating explode data
# explode = (0.1, 0.0)
# Creating color parameters
colors = ("orange", "cyan", "brown",
"grey", "indigo", "beige")
# Wedge properties
wp = {'linewidth': 1, 'edgecolor': "green"}
# Creating autocpt arguments
def func(pct, allvalues):
absolute = float(pct / 100. * np.sum(allvalues))
return "{:.1f}%\n({:.2f})".format(pct, absolute)
# Creating plot
fig, ax = plt.subplots(figsize=(8, 4.5))
wedges, texts, autotexts = ax.pie(data,
autopct=lambda pct: func(pct, data),
labels=labels,
shadow=True,
colors=colors,
startangle=90,
wedgeprops=wp,
textprops=dict(color="black"))
# Adding legend
ax.legend(wedges, labels,
# title="Cars",
loc="best",
bbox_to_anchor=(1, 0, 0.5, 1))
plt.setp(autotexts, size=8, weight="bold")
ax.set_title(title)
# show plot
# plt.show()
# plt.figure(figsize=(10,5))
# plt.title("Teacher Count VS Student Count")
# plt.pie(data,labels=labels)
# plt.show()
plt.tight_layout()
graph=get_graph()
return graph
def courses_bar_chart(courses,values,person):
# plt.switch_backend('AGG')
# plt.figure(figsize=(8, 4.5))
# plt.bar(courses,values,kind='barh')
# plt.title("Students Enrolled in Different Courses")
# Figure Size
fig, ax = plt.subplots(figsize=(10, 5))
# Horizontal Bar Plot
ax.barh(courses, values,color=['red', 'blue', 'purple', 'green', 'lavender','skyblue'])
# Remove axes splines
for s in ['top', 'bottom', 'left', 'right']:
ax.spines[s].set_visible(False)
# Remove x, y Ticks
ax.xaxis.set_ticks_position('none')
ax.yaxis.set_ticks_position('none')
# Add padding between axes and labels
ax.xaxis.set_tick_params(pad=5)
ax.yaxis.set_tick_params(pad=10)
# Add x, y gridlines
ax.grid(b=True, color='grey',
linestyle='-.', linewidth=0.5,
alpha=0.2)
# Show top values
ax.invert_yaxis()
# Add annotation to bars
for i in ax.patches:
plt.text(i.get_width() + 0.2, i.get_y() + 0.5,
str(round((i.get_width()), 2)),
fontsize=10, fontweight='bold',
color='grey')
# Add Plot Title
ax.set_title(person+' Registered in each Course',
loc='left', )
plt.tight_layout()
graph = get_graph()
return graph
def attendance_pie_chart(labels,values):
plt.figure(figsize=(10, 5))
plt.title("Attendance Performance Chart")
plt.xlabel('Dates')
plt.ylabel('Number of Students Present')
plt.plot(labels,values)
plt.tight_layout()
graph = get_graph()
return graph
def quiz_analytics_bar(lables,values,title):
fig, ax = plt.subplots(figsize=(10, 2))
# Horizontal Bar Plot
ax.barh(lables, values,color=['green'])
# Remove axes splines
for s in ['top', 'bottom', 'left', 'right']:
ax.spines[s].set_visible(False)
# Remove x, y Ticks
ax.xaxis.set_ticks_position('none')
ax.yaxis.set_ticks_position('none')
# Add padding between axes and labels
ax.xaxis.set_tick_params(pad=5)
ax.yaxis.set_tick_params(pad=10)
# Add x, y gridlines
ax.grid(b=True, color='grey',
linestyle='-.', linewidth=0.5,
alpha=0.2)
# Show top values
ax.invert_yaxis()
# Add annotation to bars
for i in ax.patches:
plt.text(i.get_width() + 0.2, i.get_y() + 0.5,
str(round((i.get_width()), 2)),
fontsize=10, fontweight='bold',
color='grey')
# Add Plot Title
ax.set_title(title,
loc='left', )
plt.tight_layout()
graph = get_graph()
return graph
def quiz_analytics_bar(lables,values,title,xlabel,ylabel):
fig, ax = plt.subplots(figsize=(15, 4))
# Horizontal Bar Plot
ax.barh(lables, values,color=['green'])
# Remove axes splines
for s in ['top', 'bottom', 'left', 'right']:
ax.spines[s].set_visible(False)
# Remove x, y Ticks
ax.xaxis.set_ticks_position('none')
ax.yaxis.set_ticks_position('none')
# Add padding between axes and labels
ax.xaxis.set_tick_params(pad=5)
ax.yaxis.set_tick_params(pad=10)
# Add x, y gridlines
ax.grid(b=True, color='grey',
linestyle='-.', linewidth=0.5,
alpha=0.2)
# Show top values
ax.invert_yaxis()
# Add annotation to bars
for i in ax.patches:
plt.text(i.get_width() + 0.2, i.get_y() + 0.5,
str(round((i.get_width()), 2)),
fontsize=10, fontweight='bold',
color='grey')
# Add Plot Title
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_title(title,
loc='left', )
graph = get_graph()
return graph
def quiz_analytics_filter_bar(lables,values,title,xlabel,ylabel):
fig, ax = plt.subplots(figsize=(12, 4))
# Horizontal Bar Plot
ax.barh(lables, values,color=['green'])
# Remove axes splines
for s in ['top', 'bottom', 'left', 'right']:
ax.spines[s].set_visible(False)
# Remove x, y Ticks
ax.xaxis.set_ticks_position('none')
ax.yaxis.set_ticks_position('none')
# Add padding between axes and labels
ax.xaxis.set_tick_params(pad=5)
ax.yaxis.set_tick_params(pad=10)
# Add x, y gridlines
ax.grid(b=True, color='grey',
linestyle='-.', linewidth=0.5,
alpha=0.2)
# Show top values
ax.invert_yaxis()
# Add annotation to bars
for i in ax.patches:
plt.text(i.get_width() + 0.05, i.get_y() + 0.5,
str(round((i.get_width()), 2)),
fontsize=10, fontweight='bold',
color='grey')
# Add Plot Title
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_title(title,
loc='left', )
graph = get_graph()
return graph | StarcoderdataPython |
3398658 | <reponame>excalibur1987/team-management
from functools import wraps
from typing import Callable, List, Union
from flask_restx import Model, OrderedModel, fields
from flask_restx.namespace import Namespace
from app.database import BaseModel
from .parsers import offset_parser
class ExtendedNameSpace(Namespace):
def serialize_multi(
self,
restx_model: Union[Model, OrderedModel],
db_model: BaseModel,
description="",
):
extended_model = self.model(
f"{restx_model.name}s",
{
"count": fields.Integer(),
"data": fields.Nested(restx_model, as_list=True),
"limit": fields.Integer(),
"offset": fields.Integer(),
},
)
def wrapper(fn: Callable):
@wraps(fn)
@self.marshal_with(extended_model)
@self.response(200, description, model=extended_model)
def wrapped(*args, **kwargs):
args_ = offset_parser.parse_args()
result: List[BaseModel] = fn(*args, **kwargs)
return {
"count": db_model.query.count(),
"limit": args_.get("limit", 10) or 10,
"offset": args_.get("offset", 0) or 0,
"data": result,
}
return wrapped
return wrapper
class Nested(fields.Nested):
def __init__(
self,
model,
allow_null=False,
skip_none=False,
as_list=False,
only: List[str] = None,
**kwargs,
):
self.only = only
super().__init__(
(
model
if only is None
else dict(
(k, v)
for (k, v) in model.items()
if k.startswith("__") or k in only
)
),
allow_null=allow_null,
skip_none=skip_none,
as_list=as_list,
**kwargs,
)
class IndexedAttribute:
def __init__(self, name, index) -> None:
self.name = name
self.index = index
def __repr__(self) -> str:
return self.name
def default(self):
return self.name
class SubscriptableEnum:
__list: List
def __init__(self, list_: List[str]) -> None:
self.__list = []
for idx, item in enumerate(list_):
indexed_item = IndexedAttribute(item, idx)
self.__list.append(indexed_item)
setattr(self, item.upper().replace(" ", "_"), indexed_item)
def __getitem__(self, i):
return (
self.__list[i] if isinstance(i, int) else self.__list[self.__list.index(i)]
)
def get_items(self):
return self.__list
items = property(get_items)
| StarcoderdataPython |
1793949 | <gh_stars>0
"""All datastore models live in this module"""
import datetime
from google.appengine.ext import ndb
class Torrent(ndb.Model):
"""A main model for representing an individual Torrent entry."""
title = ndb.StringProperty(indexed=False, required=True)
btih = ndb.StringProperty(indexed=False, required=True) # Infohash
dt = ndb.DateTimeProperty(required=True) # Create/update time, as reported by tracker
nbytes = ndb.IntegerProperty(indexed=False, required=True) # Torrent data size, bytes
description = ndb.TextProperty(required=True)
class Account(ndb.Model):
"""Represents tracker user account along with its session"""
username = ndb.StringProperty(indexed=False, required=True)
password = ndb.StringProperty(indexed=False, required=True)
userid = ndb.IntegerProperty(indexed=False, required=True)
cookies = ndb.JsonProperty()
def __repr__(self):
return "<Account username='{}' userid='{}' cookies=[{}]>".format(
self.username, self.userid, self.cookies and self.cookies.keys())
class Category(ndb.Model):
"""Represents category entry"""
title = ndb.StringProperty(indexed=False, required=True)
num_torrents = ndb.IntegerProperty(indexed=True, required=True, default=0)
dirty = ndb.BooleanProperty(indexed=True, default=True) # This flag is set when torrent is added to category
| StarcoderdataPython |
4835231 | <gh_stars>0
#!/usr/bin/env python3
from dataclasses import dataclass
from elftools.elf.elffile import ELFFile
from elftools.elf.sections import Section, SymbolTableSection
from typing import List, Tuple, Dict, Generator, Union, Set
from collections import defaultdict
import os, sys
import json
## Configuration:
# sector size of the img file in bytes
SECTOR_SIZE = 512
# start address
MEM_START = 0x100
# address where the userspace binaries should be located (-1 to start directly after the kernel)
USR_BIN_START = -1
## end of config
# A set of sections that we want to include in the image
INCLUDE_THESE_SECTIONS = set((
'.text', '.stack', '.bss', '.sdata', '.rdata', '.rodata'
'.sbss', '.data', '.stack', '.init',
'.fini', '.preinit_array', '.init_array',
'.fini_array', '.rodata', '.thread_fini'
))
# these sections are empty, so we don't want to read the elf here
EMPTY_SECTIONS = set((
'.bss', '.sbss', '.stack'
))
# this is the name of the global variable holding the list of loaded binaries
KERNEL_BINARY_TABLE = 'binary_table'
# loaded_binary struct size (4 integers)
KERNEL_BINARY_TABLE_ENTRY_SIZE = 4 * 4
# overwrite this function to generate the entries for the loaded binary list
def create_loaded_bin_struct(binid: int, entrypoint: int, start: int, end: int):
"""
Creates the binary data to populate the KERNEL_BINARY_TABLE structs
"""
return b''.join(num.to_bytes(4, 'little') for num in (binid, entrypoint, start, end))
def overlaps(p1, l1, p2, l2) -> bool:
"""
check if the intervals (p1, p1+l1) and (p2, p2+l2) overlap
"""
return (p1 <= p2 and p1 + l1 > p2) or (p2 <= p1 and p2 + l2 > p1)
class MemoryImageDebugInfos:
"""
This defines the riscemu debug information format.
See the riscemu project for more detail.
"""
VERSION = '1'
"""
Schema version
"""
base: int = 0
"""
The base address where the image starts. Defaults to zero.
"""
sections: Dict[str, Dict[str, Tuple[int, int]]]
"""
This dictionary maps a program and section to (start address, section length)
"""
symbols: Dict[str, Dict[str, int]]
"""
This dictionary maps a program and a symbol to a value
"""
globals: Dict[str, Set[str]]
"""
This dictionary contains the list of all global symbols of a given program
"""
def __init__(self,
sections: Dict[str, Dict[str, Tuple[int, int]]],
symbols: Dict[str, Dict[str, int]],
globals: Dict[str, Set[str]],
base: int = 0
):
self.sections = sections
self.symbols = symbols
self.globals = globals
self.base = base
def serialize(self) -> str:
def serialize(obj: any) -> str:
if isinstance(obj, defaultdict):
return dict(obj)
if isinstance(obj, (set, tuple)):
return list(obj)
return "<<unserializable {}>>".format(getattr(obj, '__qualname__', '{unknown}'))
return json.dumps(
dict(
sections=self.sections,
symbols=self.symbols,
globals=self.globals,
base=self.base,
VERSION=self.VERSION
),
default=serialize,
indent=2
)
def add_section(self, program: str, name: str, start: int, length: str):
self.sectionss[program][name] = (start, length)
def add_symbol(self, program: str, name: str, val: int):
self.symbols[program][name] = val
@classmethod
def load(cls, serialized_str: str) -> 'MemoryImageDebugInfos':
json_obj: dict = json.loads(serialized_str)
if 'VERSION' not in json_obj:
raise RuntimeError("Unknown MemoryImageDebugInfo version!")
version: str = json_obj.pop('VERSION')
# compare major version
if version != cls.VERSION or version.split('.')[0] != cls.VERSION.split('.')[0]:
raise RuntimeError(
"Unknown MemoryImageDebugInfo version! This emulator expects version {}, debug info version {}".format(
cls.VERSION, version
)
)
return MemoryImageDebugInfos(**json_obj)
@classmethod
def builder(cls) -> 'MemoryImageDebugInfos':
return MemoryImageDebugInfos(
defaultdict(dict), defaultdict(dict), defaultdict(set)
)
class Section:
name: str
start: int
size: int
data: bytes
def __init__(self, sec):
self.name = sec.name
self.start = sec.header.sh_addr
if sec.name not in EMPTY_SECTIONS:
self.data = sec.data()
else:
self.data = bytes(sec.header.sh_size)
self.size = sec.header.sh_size
assert self.size == len(self.data)
def __repr__(self) -> str:
return "Section[{}]:{}:{}\n".format(self.name, self.start, self.size)
def __len__(self):
return self.size
class Bin:
name: str
secs: List[Section]
symtab: Dict[str, int]
global_symbols: List[str]
entry: int
start: int
def __init__(self, name):
self.name = name
self.secs = list()
self.symtab = dict()
self.global_symbols = list()
with open(self.name, 'rb') as f:
elf = ELFFile(f)
if not elf.header.e_machine == 'EM_RISCV':
raise Exception("Not a RISC-V elf file!")
self.entry = elf.header.e_entry
for sec in elf.iter_sections():
if sec.name in INCLUDE_THESE_SECTIONS:
self.secs.append(Section(sec) )
if isinstance(sec, SymbolTableSection):
for sym in sec.iter_symbols():
if not sym.name:
continue
self.symtab[sym.name] = sym.entry.st_value
if sym.entry.st_info.bind == 'STB_GLOBAL':
self.global_symbols.append(sym.name)
self.secs = sorted(self.secs, key=lambda sec: sec.start)
self.start = self.secs[0].start
def __iter__(self):
for x in self.secs:
yield x
def size(self):
return sum(sec.size for sec in self)
class MemImageCreator:
"""
Interface for writing the img file
"""
data: bytes
patches: List[Tuple[int, bytes]]
dbg_nfo: MemoryImageDebugInfos
def __init__(self):
self.data = b''
self.patches = list()
self.dbg_nfo = MemoryImageDebugInfos.builder()
def seek(self, pos):
if len(self.data) > pos:
raise Exception("seeking already passed position!")
if len(self.data) == pos:
return
print(f" - zeros {pos-len(self.data):8x} {len(self.data):x}:{pos:x}")
self.put(bytes(pos - len(self.data)), '', '.empty')
assert len(self.data) == pos
def align(self, bound):
if len(self.data) % bound != 0:
self.put(bytes(bound - (len(self.data) % bound)), '', '.empty')
assert len(self.data) % bound == 0
def put(self, stuff: bytes, parent: str, name: str) -> int:
pos = len(self.data)
self.data += stuff
if parent:
self.dbg_nfo.sections[parent][name] = (pos, len(stuff))
return pos
def putBin(self, bin: Bin) -> int:
bin_start = len(self.data)
for sec in bin:
img_pos = bin_start + sec.start - bin.start
self.seek(img_pos)
print(f" - section {sec.name:<6} {img_pos:x}:{img_pos + sec.size:x}")
self.put(sec.data, bin.name, sec.name)
self.dbg_nfo.symbols[bin.name] = {
name: bin_start + val - bin.start
for name, val in sorted(bin.symtab.items(), key=lambda x:x[1])
if val != 0
}
self.dbg_nfo.globals[bin.name] = set(bin.global_symbols)
return bin_start
def patch(self, pos, bytes):
for ppos, pbytes in self.patches:
if overlaps(ppos, len(pbytes), pos, len(bytes)):
raise Exception("cant patch same area twice!")
self.patches.append((pos, bytes))
def write(self, fname):
"""
write to a file
"""
pos = 0
print(f"writing binary image to {fname}")
with open(fname, 'wb') as f:
for patch_start, patch_data in sorted(self.patches, key=lambda e: e[0]):
if pos < patch_start:
filler = patch_start - pos
f.write(self.data[pos : pos + filler])
print(f" - data {pos:x}:{pos+filler:x}")
pos += filler
assert pos == patch_start
f.write(patch_data)
print(f" - patch {pos:x}:{pos+len(patch_data):x}")
pos += len(patch_data)
if pos < len(self.data):
print(f" - data {pos:x}:{len(self.data):x}")
f.write(self.data[pos : len(self.data)])
if len(self.data) % SECTOR_SIZE != 0:
print(f" - zeros {len(self.data):x}:{(SECTOR_SIZE - (len(self.data) % SECTOR_SIZE))+len(self.data):x}")
f.write(bytes(SECTOR_SIZE - (len(self.data) % SECTOR_SIZE)))
# done!
print(f"writing debug info to {fname}.dbg")
with open(fname + '.dbg', 'w') as f:
f.write(self.dbg_nfo.serialize())
def package(kernel: str, binaries: List[str], out: str):
"""
Main logic for creating the image file
"""
img = MemImageCreator()
# process kernel
img.seek(MEM_START)
kernel = Bin(kernel)
kernel.name = 'kernel' # make sure kernel is marked kernel in debug symbols
bin_table_addr = kernel.symtab.get(KERNEL_BINARY_TABLE, 0) - kernel.start + MEM_START
print(f"kernel binary loaded, binary table located at: {bin_table_addr:x} (symtab addr {kernel.symtab.get(KERNEL_BINARY_TABLE, '??'):x})")
img.putBin(kernel)
if USR_BIN_START > 0:
img.seek(USR_BIN_START)
binid = 0
for bin_name in binaries:
img.align(8) # align to eight bytes
bin = Bin(bin_name)
print(f"adding binary \"{bin.name}\"")
start = img.putBin(bin)
addr = bin_table_addr + (binid * KERNEL_BINARY_TABLE_ENTRY_SIZE)
img.patch(addr, create_loaded_bin_struct(binid+1, bin.entry - bin.start + start, start, start + bin.size()))
binid += 1
print(f" binary image")
print(f" entry: {bin.entry:>6x} {bin.entry - bin.start + start:>6x}")
print(f" start: {bin.start:>6x} {start:>6x}")
img.write(out)
if __name__ == '__main__':
if '--help' in sys.argv or len(sys.argv) == 1:
print("package.py <kernel path> <user path> [<user path>...] <output path>\n\
\n\
Generate a memory image with the given kernel and userspace binaries.")
else:
print(f"creating image {sys.argv[-1]}")
package(sys.argv[1], sys.argv[2:-1], sys.argv[-1])
| StarcoderdataPython |
4035 | import numpy as np
import scipy
import scipy.io
import pylab
import numpy
import glob
import pyfits
def mklc(t, nspot=200, incl=(scipy.pi)*5./12., amp=1., tau=30.5, p=10.0):
diffrot = 0.
''' This is a simplified version of the class-based routines in
spot_model.py. It generates a light curves for dark, point like
spots with no limb-darkening.
Parameters:
nspot = desired number of spots present on star at any
one time
amp = desired light curve amplitude
tau = characteristic spot life-time
diffrot = fractional difference between equatorial and polar
rotation period
(unit of time is equatorial rotation period)'''
# print('Period = ', p)
dur = (max(t) - min(t))
# (crude estimate of) total number of spots needed during entire
# time-series
nspot_tot = int(nspot * dur / 2 / tau)
# uniform distribution of spot longitudes
lon = scipy.rand(nspot_tot) * 2 * scipy.pi
# distribution of spot latitudes uniform in sin(latitude)
lat = scipy.arcsin(scipy.rand(nspot_tot))
# spot rotation rate optionally depends on latitude
period = ((scipy.sin(lat) - 0.5) * diffrot + 1.0 ) * p
period0 = scipy.ones(nspot_tot) * p
# all spots have the same maximum area
# (crude estimate of) filling factor needed per spot
ff = amp / scipy.sqrt(nspot)
scale_fac = 1
amax = scipy.ones(nspot_tot) * ff * scale_fac
# all spots have the evolution timescale
decay = scipy.ones(nspot_tot) * tau
# uniform distribution of spot peak times
# start well before and end well after time-series limits (to
# avoid edge effects)
extra = 3 * decay.max()
pk = scipy.rand(nspot_tot) * (dur + 2 * extra) - extra
# COMPUTE THE LIGHT CURVE
# print("Computing light curve...")
time = numpy.array(t - min(t))
area_tot = scipy.zeros_like(time)
dF_tot = scipy.zeros_like(time)
dF_tot0 = scipy.zeros_like(time)
# add up the contributions of individual spots
for i in range(nspot_tot):
# Spot area
if (pk[i] == 0) + (decay[i] == 0):
area = scipy.ones_like(time) * amax[i]
else:
area = amax[i] * \
scipy.exp(-(time - pk[i])**2 / 2. / decay[i]**2)
area_tot += area
# Fore-shortening
phase = 2 * scipy.pi * time / period[i] + lon[i]
phase0 = 2 * scipy.pi * time / period0[i] + lon[i]
mu = scipy.cos(incl) * scipy.sin(lat[i]) + \
scipy.sin(incl) * scipy.cos(lat[i]) * scipy.cos(phase)
mu0 = scipy.cos(incl) * scipy.sin(lat[i]) + \
scipy.sin(incl) * scipy.cos(lat[i]) * scipy.cos(phase0)
mu[mu < 0] = 0.0
mu0[mu0 < 0] = 0.0
# Flux
dF_tot -= area * mu
dF_tot0 -= area * mu0
amp_eff = dF_tot.max()-dF_tot.min()
nspot_eff = area_tot / scale_fac / ff
res0 = scipy.array([nspot_eff.mean(), ff, amp_eff])
res1 = scipy.zeros((4, len(time)))
res1[0,:] = time
res1[1,:] = area_tot
res1[2,:] = dF_tot
res1[3,:] = dF_tot0
# print('Used %d spots in total over %d rotation periods.' % (nspot_tot, dur))
# print('Mean filling factor of individual spots was %.4f.' % ff)
# print('Desired amplitude was %.4f, actual amplitude was %.4f.' \
# % (amp, amp_eff))
# print('Desired number of spots at any one time was %d.' % nspot)
return res0, res1
| StarcoderdataPython |
186032 | import os
TORNADO_PORT = 8888
POSTGRES = {
'host': '127.0.0.1',
'port': 5432,
'user': 'admin',
'password': '<PASSWORD>',
'database': 'open_graph_links',
}
PROTOCOL = 'http'
HOST = '127.0.0.1'
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
DOWNLOAD_DIR_NAME = 'media'
DOWNLOAD_DIR = os.path.join(BASE_DIR, DOWNLOAD_DIR_NAME)
| StarcoderdataPython |
3395987 | import tkinter as tk
from tkinter import ttk
root = tk.Tk()
mygreen = "#d2ffd2"
myred = "#dd0202"
style = ttk.Style()
style.theme_create( "yummy", parent="alt", settings={
"TNotebook": {"configure": {"tabmargins": [2, 5, 2, 0] } },
"TNotebook.Tab": {
"configure": {"padding": [5, 1], "background": mygreen },
"map": {"background": [("selected", myred)],
"expand": [("selected", [1, 1, 1, 0])] } } } )
style.theme_use("yummy")
note = ttk.Notebook(root)
f1 = ttk.Frame(note, width=300, height=200)
note.add(f1, text = 'First')
f2 = ttk.Frame(note, width=300, height=200)
note.add(f2, text = 'Second')
note.pack(expand=1, fill='both', padx=5, pady=5)
tk.Button(root, text='yummy!').pack(fill='x')
root.mainloop() | StarcoderdataPython |
25172 | import discord
import asyncio
import aiofiles
from discord.ext import commands
intents = discord.Intents.all()
client = commands.Bot(command_prefix=commands.when_mentioned_or('!'),intents=intents)
client.ticket_configs = {}
@client.command()
async def ping(ctx):
embed=discord.Embed(title="Bot Ping",description=f"My ping is {round(client.latency * 1000)}ms ",color=discord.Colour.gold())
await ctx.reply(embed=embed)
@client.event
async def on_ready():
print("Bot is online")
@client.event
async def on_raw_reaction_add(payload): #When a reaction is added
if payload.member.id != client.user.id and str(payload.emoji) == u"\U0001F3AB": #Checks if the reaction is not made by a bot an emoji is "🎫"
msg_id, channel_id, category_id = client.ticket_configs[payload.guild_id]
if payload.message_id == msg_id: #checks if the reaction message is equal to the message id in ticket_configs.txt
guild = client.get_guild(payload.guild_id)
for category in guild.categories:
if category.id == category_id:
break
channel = guild.get_channel(channel_id) #gets the channel id
ticket_channel = await category.create_text_channel(f"ticket-{payload.member.display_name}", topic=f"Ticket for {payload.member.display_name}.", permission_synced=True) #Creates a ticket as "ticket_channel"
f = open(f"tickets/{ticket_channel.id}.txt", "w") #Opens a folder called "tickets" and inside it creates a file with the channel id. Usefull for transcripts
f.close() #closes the file
await ticket_channel.set_permissions(payload.member, read_messages=True, send_messages=True) # Adds the member to the ticket
mention_member = f"{payload.member.mention}"
message = await channel.fetch_message(msg_id)
await message.remove_reaction(payload.emoji, payload.member) #Removes the reaction for the message where you react to make a ticket
creation_embed=discord.Embed(title="Ticket Created",description="Thank you for creating a ticket and make sure that the ticket follows our ticket guidelines and explain the ticket creation reason in detail so our staff can help you.",color=discord.Colour.blurple())
await ticket_channel.send(mention_member,embed=creation_embed) # Mentions the member and sends the embded to the channel where the ticket is created.
@client.command()
async def close(ctx):
channel = ctx.channel
if channel.name.startswith("ticket"): #checks if a channel name starts with "ticket"
await ctx.reply("Are you sure you want to close the ticket? Reply with ``confirm`` to close the ticket.") #Will ask the user to confirm to close the ticket
await client.wait_for("message",check=lambda m: m.channel == ctx.channel and m.author == ctx.author and m.content == "confirm",timeout=10) #Wait for a message with content "confirm" and makes sure that the command runner is the message sender and waits for reply for 10 seconds.
await channel.delete() #If the message is "confirm" it will delete the channel
closer = ctx.author.mention
transcript_chan = client.get_channel(803399751487717396) #channel to send the ticket transcript to.
await transcript_chan.send(closer,file=discord.File(f"tickets/{channel.id}.txt")) #Sends the file to the transcript channel and mentions the ticket closer there.
else:
return
@client.command()
@commands.has_permissions(administrator=True)
async def config(ctx, msg: discord.Message=None, category: discord.CategoryChannel=None): #Usage = !config "message_id category_id" to get the ids enable deveoper mode and right click the message that will be used to create tickets and the category id is the category where the tickets will be created.
if msg is None or category is None: #If a message id or category id is not provided.
error_embed=discord.Embed(title="Ticket Configuration Failed",description="Failed to configure. Either an argument is missing or an invalid argument was passed.",color=discord.Colour.red())
await ctx.channel.send(embed=error_embed)
return
client.ticket_configs[ctx.guild.id] = [msg.id, msg.channel.id, category.id] #Resets the configuration
async with aiofiles.open("ticket_configs.txt", mode="r") as file:
data = await file.readlines()
async with aiofiles.open("ticket_configs.txt", mode="w") as file:
await file.write(f"{ctx.guild.id} {msg.id} {msg.channel.id} {category.id}\n")
for line in data:
if int(line.split(" ")[0]) != ctx.guild.id:
await file.write(line)
await msg.add_reaction(u"\U0001F3AB") # Adds reaction to the message and when someone reacts to this emoji it will create a ticket.
await ctx.channel.send("Successfully configured the ticket system.") # If you get thsi it means that the ticket system has been configured successfully.
@client.event
async def on_message(message):
await client.process_commands(message)#processes the command
if message.channel.name.startswith("ticket"): #check if the channel name starts with "ticket"
f = open(f"tickets/{message.channel.id}.txt", "a") # Opens the channel id in the tickets folder
f.write(f"{message.author} : {message.content}\n") # Write the message author and the message he sent
f.close() #closesthe file
client.run("your_bot_token_here")
| StarcoderdataPython |
144798 | <filename>ca_fighter.py
#! /usr/bin/python
import copy
import curses
import pprint
import ca_equipment
import ca_timers
class ThingsInFight(object):
'''
Base class to manage timers, equipment, and notes for Fighters and Venues.
'''
def __init__(self,
name, # string, name of the thing
group, # string to index into world['fights']
details, # world.details['fights'][name] (world is
# a World object)
ruleset, # Ruleset object
window_manager # GmWindowManager object for reporting errors
):
self.name = name
self.detailed_name = self.name
self.group = group
self.details = details
self._ruleset = ruleset
self._window_manager = window_manager
# Equipment
if 'stuff' not in self.details: # Public to facilitate testing
self.details['stuff'] = []
self.equipment = ca_equipment.Equipment(self.name,
self.details['stuff'])
# Timers
if 'timers' not in details:
details['timers'] = []
self.timers = ca_timers.Timers(self.details['timers'],
self,
self._window_manager)
#
# Equipment related methods
#
def add_equipment(self,
new_item, # dict describing new equipment
source=None, # string: from where did equipment come
identified=None, # ignored, here
container_stack=[]
):
'''
Accept a new item of equipment and put it in the list.
Returns the new index of the equipment (for testing).
'''
return self.equipment.add(new_item, source, container_stack)
def ask_how_many(self,
item_index, # index into fighter's stuff list
count=None, # number to remove (None if 'ask')
container_stack=[]
):
'''
Determine how many items to remove.
'''
item = self.equipment.get_item_by_index(item_index, container_stack)
if item is None:
return 0
current_item_count = 1 if 'count' not in item else item['count']
if current_item_count < 1:
return 0
if current_item_count == 1:
return current_item_count
if count is not None:
return count
# Ask how many to remove
title = 'How Many Items (%d Available)?' % current_item_count
height = 1
width = len(title)
item_count = self._window_manager.input_box_number(height,
width,
title)
if item_count is None:
return 0
if item_count > current_item_count:
item_count = current_item_count
return item_count
def get_ruleset(self):
return self._ruleset
def remove_equipment(self,
item_index, # index into fighter's stuff list
count=None, # number to remove (None if 'ask')
container_stack=[]
):
'''
Discards an item from the Fighter's/Venue's equipment list.
Returns: the discarded item
'''
count = self.ask_how_many(item_index, count, container_stack)
return self.equipment.remove(item_index, count, container_stack)
#
# Notes methods
#
def get_defenses_notes(self,
opponent # Throw away Fighter object
):
'''
Returns a tuple of strings describing:
1) the current (based on weapons held, armor worn, etc) defensive
capability of the Fighter, and
2) the pieces that went into the above calculations
Or, in the case of the base class, None.
'''
return None, None
def get_long_summary_string(self):
'''
Returns a string that contains a short (but not the shortest)
description of the state of the Fighter or Venue.
'''
return '%s' % self.name
def get_notes(self):
'''
Returns a list of strings describing the current fighting state of the
fighter (rounds available, whether s/he's aiming, current posture,
etc.). There's decidedly less information than that returned by the
base class.
'''
return None
def get_short_summary_string(self,
fight_handler # FightHandler, ignored
):
'''
Returns a string that contains the shortest description of the Fighter.
'''
return '%s' % self.name
def get_to_hit_damage_notes(self,
opponent # Throw away Fighter object
):
'''
Returns a list of strings describing the current (using the current
weapon, in the current posture, etc.) fighting capability (to-hit and
damage) of a fighter. It does nothing in the base class.
'''
return None
#
# Miscellaneous methods
#
def end_fight(self,
fight_handler # FightHandler object
):
'''
Perform the post-fight cleanup. Remove all the timers, that sort of
thing.
Returns nothing.
'''
self.timers.clear_all()
def start_fight(self):
'''
Configures the Fighter or Venue to start a fight.
Returns nothing.
'''
pass
#
# Protected
#
def _explain_numbers(self,
fight_handler # FightHandler object, ignored
):
'''
Explains how the stuff in the descriptions were calculated.
Returns [[{'text':x, 'mode':x}, {...}], [], [], ...]
where the outer array contains lines
each line is an array that contains a line-segment
each line segment has its own mode so, for example, only SOME of
the line is shown in bold
'''
return [[{'text': '(Nothing to explain)', 'mode': curses.A_NORMAL}]]
class Venue(ThingsInFight):
'''
Incorporates all of the information for a room (or whatever). This is the
mechanism whereby items can be distributed in a room. The data
is just a pointer to the Game File data so that it's edited in-place.
'''
name = '<< ROOM >>' # Describes the FIGHT object
detailed_name = '<< ROOM: %s >>' # insert the group into the '%s' slot
empty_venue = {
'stuff': [],
'notes': [],
'timers': []
}
def __init__(self,
group, # string to index into world['fights']
details, # world.details['fights'][name] (world is
# a World object)
ruleset, # Ruleset object
window_manager # GmWindowManager object for error reporting
):
super(Venue, self).__init__(Venue.name,
group,
details,
ruleset,
window_manager)
self.detailed_name = Venue.detailed_name % group
def get_description(self,
char_detail, # recepticle for character detail.
# [[{'text','mode'},...], # line 0
# [...], ] # line 1..
expand_containers # Bool, ignored in the base class
):
'''
Provides a text description of all of the components of a Venue.
Returns: nothing -- output is written to the |char_detail| variable
'''
# stuff
mode = curses.A_NORMAL
char_detail.append([{'text': 'Equipment',
'mode': mode | curses.A_BOLD}])
found_one = False
if 'stuff' in self.details:
for item in sorted(self.details['stuff'], key=lambda x: x['name']):
found_one = True
ca_equipment.EquipmentManager.get_description(
item, '', [], False, char_detail)
if not found_one:
char_detail.append([{'text': ' (None)', 'mode': mode}])
# Timers
mode = curses.A_NORMAL
char_detail.append([{'text': 'Timers', 'mode': mode | curses.A_BOLD}])
found_one = False
timers = self.timers.get_all() # objects
for timer in timers:
found_one = True
text = timer.get_description()
leader = ' '
for line in text:
char_detail.append([{'text': '%s%s' % (leader, line),
'mode': mode}])
leader = ' '
if not found_one:
char_detail.append([{'text': ' (None)', 'mode': mode}])
# Notes
mode = curses.A_NORMAL
char_detail.append([{'text': 'Notes', 'mode': mode | curses.A_BOLD}])
found_one = False
if 'notes' in self.details:
for note in self.details['notes']:
found_one = True
char_detail.append([{'text': ' %s' % note, 'mode': mode}])
if not found_one:
char_detail.append([{'text': ' (None)', 'mode': mode}])
def get_short_summary_string(self,
fight_handler # FightHandler, ignored
):
'''
Returns a string that contains the shortest description of the Fighter.
'''
fighter_string = '%s' % self.name
if 'stuff' in self.details and len(self.details['stuff']) > 0:
fighter_string += ' - EQUIP'
if 'timers' in self.details and len(self.details['timers']) > 0:
fighter_string += ' - TIMERS'
if 'notes' in self.details and len(self.details['notes']) > 0:
fighter_string += ' - NOTES'
return fighter_string
def get_state(self):
return Fighter.FIGHT
class Fighter(ThingsInFight):
'''
Incorporates all of the information for a PC, NPC, or monster. The data
is just a pointer to the Game File data so that it's edited in-place.
'''
(ALIVE,
UNCONSCIOUS,
DEAD,
INJURED, # Injured is separate since it's tracked by HP
ABSENT,
FIGHT) = range(6)
MAX_WEAPONS = 2 # because we only have 2 arms
# When adding a new weapon, make it preferred?
NOT_PREFERRED, ADD_PREFERRED, REPLACE_PREFERRED = range(3)
conscious_map = {
'alive': ALIVE,
'unconscious': UNCONSCIOUS,
'dead': DEAD,
'Absent': ABSENT, # Capitalized for menus
'fight': FIGHT,
}
strawman = None
def __init__(self,
name, # string
group, # string = 'PCs' or some monster group
fighter_details, # dict as in the Game File
ruleset, # a Ruleset object
window_manager # a GmWindowManager object for display
# windows
):
super(Fighter, self).__init__(name,
group,
fighter_details,
ruleset,
window_manager)
if Fighter.strawman is None:
Fighter.strawman = ruleset.make_empty_creature()
@staticmethod
def get_fighter_state(details):
'''
Returns the fighter state number. Note that Fighter.INJURED needs to
be calculated -- we don't store that in the Fighter as a separate
state.
'''
conscious_number = Fighter.conscious_map[details['state']]
if (conscious_number == Fighter.ALIVE and
details['current']['hp'] < details['permanent']['hp']):
return Fighter.INJURED
return conscious_number
@staticmethod
def get_name_from_state_number(number):
for name, value in Fighter.conscious_map.iteritems():
if value == number:
return name
return '<unknown>'
#
# Equipment related methods
#
def add_equipment(self,
new_item, # dict describing new equipment
source=None, # string: from where did equipment
# come (None for no change)
identified=None, # Bool: no change if |None|
container_stack=[]
):
'''
Accept a new item of equipment and put it in the list.
Returns the new index of the equipment.
'''
# if 'owners' doesn't exist or is None, then it's a mundane item and
# is indistinguishable from any similar item -- you don't need to know
# its provenance and you don't need to identify it.
if (identified is not None and 'owners' in new_item and
new_item['owners'] is not None):
new_item['identified'] = identified
# If we're adding a weapon or a piece of armor, is it the first of
# its kind?
is_weapon = True if ca_equipment.Weapon.is_weapon(new_item) else False
is_armor = True if 'armor' in new_item['type'] else False
before_item_count = self.equipment.get_item_count()
# Add the item
new_item_index = self.equipment.add(new_item,
source,
container_stack)
# if we're adding something to a container, then it can't be preferred.
if len(container_stack) > 0:
return new_item_index
# If we're adding the creature's first weapon or armor, make it the
# preferred weapon or armor. If it's not the creature's first,
# ask the user if it should be the creature's preferred.
if new_item_index is not None:
after_item_count = self.equipment.get_item_count()
if is_weapon:
if len(self.details['preferred-weapon-index']) > 0:
if before_item_count != after_item_count:
# Only ask if we've added a new weapon and not just
# bumped-up the count on a previous weapon
replace_preferred_menu = [
('no', Fighter.NOT_PREFERRED),
('replace existing',
Fighter.REPLACE_PREFERRED)]
if (len(self.details['preferred-weapon-index']) <
Fighter.MAX_WEAPONS):
replace_preferred_menu.append(
('add to existing list',
Fighter.ADD_PREFERRED))
replace_preferred, ignore = self._window_manager.menu(
'Make %s the preferred weapon?' %
new_item['name'],
replace_preferred_menu)
# If we're replacing an item, which one do we replace?
if replace_preferred == Fighter.REPLACE_PREFERRED:
remove_which_menu = []
for index in self.details['preferred-weapon-index']:
weapon = self.equipment.get_item_by_index(index)
remove_which_menu.append((weapon['name'], index))
remove_index, ignore = self._window_manager.menu(
'Replace which weapon?',
remove_which_menu)
if remove_index is None:
# I guess we're not replacing anything
replace_preferred = Fighter.NOT_PREFERRED
else:
self.details['preferred-weapon-index'].remove(
remove_index)
if replace_preferred != Fighter.NOT_PREFERRED:
self.details['preferred-weapon-index'].append(
new_item_index)
else:
self.details['preferred-weapon-index'].append(new_item_index)
if is_armor:
if len(self.details['preferred-armor-index']) > 0:
if before_item_count != after_item_count:
# Only ask if we've added a new item and not just
# bumped-up the count on a previous item
replace_preferred_menu = [
('no', Fighter.NOT_PREFERRED),
('replace existing',
Fighter.REPLACE_PREFERRED)]
replace_preferred_menu.append(
('add to existing list',
Fighter.ADD_PREFERRED))
replace_preferred, ignore = self._window_manager.menu(
'Make %s the preferred armor?' %
new_item['name'],
replace_preferred_menu)
# If we're replacing an item, which one do we replace?
if replace_preferred == Fighter.REPLACE_PREFERRED:
remove_which_menu = []
for index in self.details['preferred-armor-index']:
item = self.equipment.get_item_by_index(index)
remove_which_menu.append((item['name'], index))
remove_index, ignore = self._window_manager.menu(
'Replace which piece of armor?',
remove_which_menu)
if remove_index is None:
# I guess we're not replacing anything
replace_preferred = Fighter.NOT_PREFERRED
else:
self.details['preferred-armor-index'].remove(
remove_index)
if replace_preferred != Fighter.NOT_PREFERRED:
self.details['preferred-armor-index'].append(
new_item_index)
else:
self.details['preferred-armor-index'].append(new_item_index)
return new_item_index
def doff_armor_by_index(self,
index # Index of armor in fighter's 'stuff'
# list. 'None' removes current armor.
):
'''Removes armor.'''
if index not in self.details['armor-index']:
return # Not wearing the armor we want to taking off
self.details['armor-index'].remove(index)
# if we're doffing armor and there's natural armor, pick that
# one up.
for item_index, item in enumerate(self.details['stuff']):
if (item_index not in self.details['armor-index'] and
'natural-armor' in item and item['natural-armor']):
self.details['armor-index'].append(item_index)
def don_armor_by_index(self,
index # Index of armor in fighter's 'stuff'
# list. 'None' removes current armor.
):
'''Puts on armor.'''
if index not in self.details['armor-index']:
self.details['armor-index'].append(index)
def draw_weapon_by_index(self,
weapon_index # Index of weapon in fighter's 'stuff'
# list.
):
'''Draws weapon.'''
# NOTE: ONLY CALLED BY ACTIONS
if weapon_index is None:
self._window_manager.error(
['Trying to draw weapon with index <None>'])
return
weapon_indexes = self.get_current_weapon_indexes()
if len(weapon_indexes) < Fighter.MAX_WEAPONS: # [], [x]
self.details['weapon-index'].append(weapon_index)
elif weapon_indexes[0] is None: # [0, x]
weapon_indexes[0] = weapon_index
#else:
# error
def draw_weapon_by_name(self, # Public to support testing
name
):
# NOTE: ONLY USED FOR TESTING
'''
Draw weapon from sheath or holster.
Just used in testing.
Returns index, Weapon object
'''
index, item = self.equipment.get_item_by_name(name)
if index is not None:
self.details['weapon-index'].append(index)
return index, ca_equipment.Weapon(item)
def end_fight(self,
fight_handler # FightHandler object (for do_action)
):
'''
Perform the post-fight cleanup. Remove all the timers, reload if the
option is set. Sheathe the weapon.
Returns nothing.
'''
super(Fighter, self).end_fight(fight_handler)
reload_after_fight = self._ruleset.get_option('reload-after-fight')
if (reload_after_fight is not None and reload_after_fight and
self.group == 'PCs'):
# Reload EACH weapon the person has.
before_weapon_indexes = copy.deepcopy(self.get_current_weapon_indexes())
item_count = self.equipment.get_item_count()
for item_index in range(item_count):
# Look at the next item owned by the fighter
weapon = self.equipment.get_item_by_index(item_index)
if 'ranged weapon' not in weapon['type']:
continue
# Dump whatever the person is carrying
weapon_indexes = copy.deepcopy(self.get_current_weapon_indexes())
for weapon_index in weapon_indexes:
self._ruleset.do_action(
self,
{'action-name': 'holster-weapon',
'weapon-index': weapon_index,
'notimer': True},
fight_handler,
logit=False)
# Now, draw the next ranged weapon and reload it
self._ruleset.do_action(self,
{'action-name': 'draw-weapon',
'weapon-index': item_index,
'comment': 'Reloading after fight',
'notimer': True,
'quiet': True},
fight_handler,
logit=False)
self._ruleset.do_action(self,
{'action-name': 'reload',
'comment': 'Reloading after fight',
'notimer': True,
'quiet': True},
fight_handler,
logit=False)
# Holster whatever the person is carrying and, then, re-draw
# "current" weapon
after_weapon_indexes = copy.deepcopy(self.get_current_weapon_indexes())
for weapon_index in after_weapon_indexes:
self._ruleset.do_action(
self,
{'action-name': 'holster-weapon',
'weapon-index': weapon_index,
'notimer': True},
fight_handler,
logit=False)
for weapon_index in before_weapon_indexes:
self._ruleset.do_action(self,
{'action-name': 'draw-weapon',
'weapon-index': weapon_index,
'comment': 'Reloading after fight',
'notimer': True,
'quiet': True},
fight_handler,
logit=False)
def get_current_armor_indexes(self):
'''
Gets the armor the Fighter is wearing.
Returns a tuple:
1) a dict (from the Game File)
2) index of the armor
'''
if 'armor-index' not in self.details:
return []
return self.details['armor-index']
def get_current_weapon_indexes(self):
if 'weapon-index' not in self.details:
return []
return self.details['weapon-index']
def get_current_weapons(self):
weapon_indexes = self.get_current_weapon_indexes()
weapon_list = []
for weapon_index in weapon_indexes:
if weapon_index is None:
weapon_list.append(None)
else:
item = self.equipment.get_item_by_index(weapon_index)
weapon = ca_equipment.Weapon(item)
weapon_list.append(weapon)
return weapon_list
def get_items_from_indexes(self,
indexes # list of indexes in self.details.stuff
):
'''
Gets the items corresponding to indexes into the Fighter's stuff
Returns a list of a dict (from the Game File)
'''
result = []
for index in indexes:
item = self.equipment.get_item_by_index(index)
result.append(item)
return result
def get_preferred_item_indexes(self):
'''
Returns a list of indexes of preferred weapons and armor.
'''
result = []
if len(self.details['preferred-armor-index']) > 0:
result.extend(self.details['preferred-armor-index'])
if len(self.details['preferred-weapon-index']) > 0:
for item in self.details['preferred-weapon-index']:
if item not in result:
result.append(item)
return result
def holster_weapon_by_index(self,
weapon_index # Index of weapon in fighter's 'stuff'
# list.
):
'''Holsters weapon.'''
# NOTE: ONLY CALLED FROM AN ACTION
# Make sure we have the weapon in question
try:
index = self.details['weapon-index'].index(weapon_index)
except ValueError:
item = self.equipment.get_item_by_index(weapon_index)
self._window_manager.error(
['Trying to holster non-drawn weapon: %s' % item['name']])
return
# Actually remove the weapon
# NOTE: if you're removing the primary hand weapon, the off-hand weapon
# is moved to the primary location. This makes sense unless the
# primary hand is injured. This is a weird enough situation and hard
# enough to handle that I'm not supporting it just now. If I did, it
# would look something like this:
# if index == len(self.details['weapon-index']) - 1:
# self.details['weapon-index'].pop()
# else:
# self.details['weapon-index'][index] = None
self.details['weapon-index'].pop(index)
# If there're no weapons left, add natural weapons (if applicable)
if len(self.details['weapon-index']) == 0:
for item_index, item in enumerate(self.details['stuff']):
if 'natural-weapon' in item and item['natural-weapon']:
self.details['weapon-index'].append(item_index)
#def print_me(self):
# print '-- Fighter (%s, %s) --' % (self.name, self.group)
# PP.pprint(self.details)
def remove_equipment(self,
index_to_remove, # <int> index into Equipment list
count=None, # number to remove (None if 'ask')
container_stack=[]
):
'''
Discards an item from the Fighter's equipment list.
Returns: the discarded item
'''
item = self.equipment.get_item_by_index(index_to_remove,
container_stack)
if 'natural-weapon' in item and item['natural-weapon']:
return None # can't remove a natural weapon
if 'natural-armor' in item and item['natural-armor']:
return None # can't remove a natural armor
before_item_count = self.equipment.get_item_count(container_stack)
count = self.ask_how_many(index_to_remove, count, container_stack)
item = self.equipment.remove(index_to_remove, count, container_stack)
after_item_count = self.equipment.get_item_count(container_stack)
# Adjust indexes into the list if the list changed.
if len(container_stack) == 0 and before_item_count != after_item_count:
# Remove weapon from current weapon list
for index_in_weapons, index_in_stuff in enumerate(
self.details['weapon-index']):
if index_to_remove == index_in_stuff:
self.details['weapon-index'].remove(index_in_stuff)
elif index_to_remove < index_in_stuff:
self.details['weapon-index'][index_in_weapons] -= 1
# Remove weapon from preferred weapons list
for index_in_weapons, index_in_stuff in enumerate(
self.details['preferred-weapon-index']):
if index_to_remove == index_in_stuff:
self.details['preferred-weapon-index'].remove(index_in_stuff)
elif index_to_remove < index_in_stuff:
self.details['preferred-weapon-index'][index_in_weapons] -= 1
# Remove armor from current armor list
for index_in_armor, index_in_stuff in enumerate(
self.details['armor-index']):
if index_to_remove == index_in_stuff:
self.details['armor-index'].remove(index_in_stuff)
elif index_to_remove < index_in_stuff:
self.details['armor-index'][index_in_armor] -= 1
# Remove armor from preferred armor list
for index_in_armor, index_in_stuff in enumerate(
self.details['preferred-armor-index']):
if index_to_remove == index_in_stuff:
self.details['preferred-armor-index'].remove(index_in_stuff)
elif index_to_remove < index_in_stuff:
self.details['preferred-armor-index'][index_in_armor] -= 1
return item
#
# Notes related methods
#
def get_defenses_notes(self,
opponent # Fighter object
):
'''
Returns a tuple of strings describing:
1) the current (based on weapons held, armor worn, etc) defensive
capability of the Fighter, and
2) the pieces that went into the above calculations
'''
defense_notes, defense_why = self._ruleset.get_fighter_defenses_notes(
self,
opponent)
return defense_notes, defense_why
def get_description(self,
output, # recepticle for character detail.
# [[{'text','mode'},...], # line 0
# [...], ] # line 1...
expand_containers # Bool
):
'''
Provides a text description of a Fighter including all of the
attributes (current and permanent), equipment, etc.
Returns: nothing. The output is written to the |output| variable.
'''
self._ruleset.get_character_description(self,
output,
expand_containers)
def get_long_summary_string(self):
'''
Returns a string that contains a short (but not the shortest)
description of the state of the Fighter.
'''
# TODO (eventually): this is ruleset-based
fighter_string = '%s HP: %d/%d FP: %d/%d' % (
self.name,
self.details['current']['hp'],
self.details['permanent']['hp'],
self.details['current']['fp'],
self.details['permanent']['fp'])
return fighter_string
def get_notes(self):
'''
Returns a list of strings describing the current fighting state of the
fighter (rounds available, whether s/he's aiming, current posture,
etc.)
'''
notes = self._ruleset.get_fighter_notes(self)
return notes
def get_short_summary_string(self,
fight_handler # FightHandler, ignored
):
'''
Returns a string that contains the shortest description of the Fighter.
'''
# TODO (eventually): this is ruleset based
fighter_string = '%s HP:%d/%d' % (self.name,
self.details['current']['hp'],
self.details['permanent']['hp'])
if 'label' in self.details and self.details['label'] is not None:
fighter_string += ' - %s' % self.details['label']
if self.is_dead():
fighter_string += ' - DEAD'
elif 'stunned' in self.details and self.details['stunned']:
fighter_string += ' - STUNNED'
else:
if self.timers.is_busy():
fighter_string += ' - BUSY'
if fight_handler.is_fighter_holding_init(self.name, self.group):
fighter_string += ' - HOLDING INIT'
return fighter_string
def get_to_hit_damage_notes(self,
opponent # Fighter object
):
'''
Returns a list of strings describing the current (using the current
weapon, in the current posture, etc.) fighting capability (to-hit and
damage) of the fighter.
'''
notes = self._ruleset.get_fighter_to_hit_damage_notes(self, opponent)
return notes
#
# Miscellaneous methods
#
def can_finish_turn(self,
fight_handler # FightHandler object
):
'''
If a Fighter has done something this turn, we can move to the next
Fighter. Otherwise, the Fighter should do something before we go to
the next Fighter.
Returns: <bool> telling the caller whether this Fighter needs to do
something before we move on.
'''
return self._ruleset.can_finish_turn(self, fight_handler)
def end_turn(self,
fight_handler # FightHandler object
):
'''
Performs all of the stuff required for a Fighter to end his/her
turn.
Returns: nothing
'''
self._ruleset.end_turn(self, fight_handler)
self.timers.remove_expired_kill_dying()
def get_state(self):
return Fighter.get_fighter_state(self.details)
def is_absent(self):
return True if self.details['state'] == 'Absent' else False
def is_conscious(self):
# NOTE: 'injured' is not stored in self.details['state']
return True if self.details['state'] == 'alive' else False
def is_dead(self):
return True if self.details['state'] == 'dead' else False
def set_consciousness(self,
conscious_number, # <int> See Fighter.conscious_map
fight_handler
):
'''
Sets the state (alive, unconscious, dead, absent, etc.) of the
Fighter.
Returns nothing.
'''
# NOTE: ONLY CALLED FROM ACTION OR TESTING
for state_name, state_num in Fighter.conscious_map.iteritems():
if state_num == conscious_number:
self.details['state'] = state_name
break
if not self.is_conscious():
self.details['opponent'] = None # unconscious men fight nobody
weapon_indexes = self.get_current_weapon_indexes()
for index in weapon_indexes:
# unconscious men don't hold stuff
self._ruleset.do_action(
self,
{'action-name': 'holster-weapon',
'weapon-index': index,
'notimer': True},
fight_handler,
logit=False)
def start_fight(self):
'''
Configures the Fighter to start a fight.
Returns nothing.
'''
# NOTE: we're allowing health to still be messed-up, here
# NOTE: person may go around wearing armor -- no need to reset
self.details['opponent'] = None
if self.group == 'PCs':
if ('fight-notes' in self.details and
self.details['fight-notes'] is not None):
self.details['fight-notes'] = []
if ('label' in self.details and self.details['label'] is not None):
self.details['label'] = None
self._ruleset.start_fight(self)
def start_turn(self,
fight_handler # FightHandler object
):
'''
Performs all of the stuff required for a Fighter to start his/her
turn. Handles timer stuff.
Returns: nothing
'''
self._ruleset.start_turn(self, fight_handler)
self.timers.decrement_all()
self.timers.remove_expired_keep_dying()
for timer in self.timers.get_all():
if 'busy' in timer.details and timer.details['busy']:
window_text = []
lines = timer.get_description()
for line in lines:
window_text.append([{'text': line,
'mode': curses.A_NORMAL}])
self._window_manager.display_window(
('%s is busy' % self.name),
window_text)
# Allow the fighter to continue without doing anything since
# s/he's already busy
self.details['actions_this_turn'].append('busy')
fight_handler.add_to_history(
{'comment': '(%s) is busy this round' % self.name})
def toggle_absent(self):
'''
Toggles the consciousness state between absent and alive.
Returns nothing.
'''
if self.details['state'] == 'Absent':
self.details['state'] = 'alive'
else:
self.details['state'] = 'Absent'
#
# Protected and Private Methods
#
def _explain_numbers(self,
fight_handler # FightHandler object
):
'''
Explains how the stuff in the descriptions were calculated.
Returns [[{'text':x, 'mode':x}, {...}], [], [], ...]
where the outer array contains lines
each line is an array that contains a line-segment
each line segment has its own mode so, for example, only SOME of
the line is shown in bold
'''
weapons = self.get_current_weapons()
why_opponent = fight_handler.get_opponent_for(self)
all_lines = []
for weapon in weapons:
if weapon is None:
continue
lines = self._explain_one_weapon_numbers(weapon, why_opponent)
all_lines.extend(lines)
ignore, defense_why = self.get_defenses_notes(why_opponent)
if defense_why is not None:
lines = [[{'text': x,
'mode': curses.A_NORMAL}] for x in defense_why]
all_lines.extend(lines)
return all_lines
def _explain_one_weapon_numbers(self,
weapon, # Weapon object
why_opponent # Fighter object
):
'''
Explains how the stuff in the descriptions (for only one weapon) were
calculated.
Returns [[{'text':x, 'mode':x}, {...}], [], [], ...]
where the outer array contains lines
each line is an array that contains a line-segment
each line segment has its own mode so, for example, only SOME of
the line is shown in bold
'''
lines = []
if self._ruleset.does_weapon_use_unarmed_skills(weapon):
unarmed_info = self._ruleset.get_unarmed_info(self,
why_opponent,
weapon)
lines = [[{'text': x,
'mode': curses.A_NORMAL}] for x in unarmed_info['why']]
else:
#lines.extend([[{'text': 'Weapon: "%s"' % weapon.name,
# 'mode': curses.A_NORMAL}]])
if self.get_best_skill_for_weapon(weapon.details) is not None:
# To-Hit
ignore, to_hit_why = self._ruleset.get_to_hit(self,
why_opponent,
weapon)
lines.extend([[{'text': x,
'mode': curses.A_NORMAL}] for x in to_hit_why])
# Damage
ignore, damage_why = self._ruleset.get_damage(self, weapon)
lines.extend([[{'text': x,
'mode': curses.A_NORMAL}] for x in damage_why])
if weapon.notes() is not None:
lines.extend([[{'text': ' %s' % weapon.notes(),
'mode': curses.A_NORMAL}]])
return lines
def get_best_skill_for_weapon(self,
weapon # dict
):
# skills = [{'name': name, 'modifier': number}, ...]
#if weapon['skill'] in self.details['skills']:
# best_skill = weapon['skill']
# best_value = self.details['skills'][best_skill]
#else:
# return None
'''
Finds the best skill for this fighter and this weapon.
Returns None if no skill matching the given weapon was found, else
dict: {'name': best_skill, 'value': best_value}
'''
best_skill = None
best_value = None
for skill_camel, value in weapon['skill'].iteritems():
skill_lower = skill_camel.lower()
found_skill = False
if skill_camel in self.details['skills']:
value += self.details['skills'][skill_camel]
found_skill = True
elif skill_lower in self.details['current']:
value += self.details['current'][skill_lower]
found_skill = True
if found_skill and (best_value is None or value > best_value):
best_value = value
best_skill = skill_camel
if best_skill is None or best_value is None:
return None
return {'name': best_skill, 'value': best_value}
| StarcoderdataPython |
176341 | #!/usr/bin/env python3
import sys
import subprocess
from .kast import *
from .kastManip import *
from .kast import _notif, _warning, _fatal
def ruleHasId(sentence, ruleIds):
if isKRule(sentence):
ruleId = getAttribute(sentence, 'UNIQUE_ID')
return ruleId is not None and ruleId in ruleIds
return True
def syntaxHasKLabel(sentence, klabels):
if isKProduction(sentence) and 'klabel' in sentence:
return sentence['klabel'] in klabels
return True
def keepSentences(kOuter, filter):
att = None if 'att' not in kOuter else kOuter['att']
if isKDefinition(kOuter):
newModules = [ keepSentences(mod, filter) for mod in kOuter['modules'] ]
requires = None if 'requires' not in kOuter else kOuter['requires']
return KDefinition(kOuter['mainModule'], newModules, requires = requires, att = att)
elif isKFlatModule(kOuter):
newSentences = [ sent for sent in kOuter['localSentences'] if filter(sent) ]
return KFlatModule(kOuter['name'], kOuter['imports'], newSentences, att = att)
else:
return kOuter
def collectKLabels(kast):
labels = []
def _collectKLabels(_kast):
if isKApply(_kast):
labels.append(_kast['label'])
collectBottomUp(kast, _collectKLabels)
return labels
def collectKLabelsFromRules(definition):
used_labels = []
for module in definition['modules']:
for sentence in module['localSentences']:
if isKRule(sentence):
used_labels += collectKLabels(sentence['body'])
return used_labels
def minimizeDefinition(definition, rulesList):
new_definition = keepDefinitionModulesOnly(definition)
new_definition = keepSentences(new_definition, lambda sent: ruleHasId(sent, rulesList))
used_labels = collectKLabelsFromRules(new_definition)
new_definition = keepSentences(new_definition, lambda sent: syntaxHasKLabel(sent, used_labels))
return new_definition
| StarcoderdataPython |
4821101 | import json
import logging
import os
# Get environment variables
LOG_LEVEL = os.getenv("LOG_LEVEL", "INFO").upper()
# Configure logging
logger = logging.getLogger()
logger.setLevel(LOG_LEVEL)
def remove_policy(input_path: str, output_path: str):
"""Remove all IAM policies from a CloudFormation json template
CDK implementation of CodePipelines does not respect the CF option to leave a role blank
to automatically default to the execution role.
for reference, check https://github.com/aws/aws-cdk/issues/14887
"""
with open(input_path, "r") as f:
t = json.load(f)
# Remove policies
policy_list = [
k for k in t["Resources"] if t["Resources"][k]["Type"] == "AWS::IAM::Policy"
]
for p in policy_list:
logger.debug(f"Removing Policy {p}")
del t["Resources"][p]
# Remove policy dependencies
depends_on = [k for k in t["Resources"] if "DependsOn" in t["Resources"][k]]
for d in depends_on:
for p in policy_list:
if p in t["Resources"][d]["DependsOn"]:
logger.debug(f"Removing DependsOn {p}")
t["Resources"][d]["DependsOn"].remove(p)
if len(t["Resources"][d]["DependsOn"]) == 0:
del t["Resources"][d]["DependsOn"]
# Save file back
logger.info(f"Writing template to: {output_path}")
with open(output_path, "w") as f:
json.dump(t, f, indent=2)
if __name__ == "__main__":
remove_policy(
"cdk.out/drift-pipeline.template.json",
"cdk.out/drift-pipeline-clean.template.json",
)
| StarcoderdataPython |
3210967 | #-*- codeing: utf-8 -*-
import sys
"""
tip
ord()
chr()
input
Kingdom
output
ASCII code for 'K' is 75
ASCII code for 'i' is 105
ASCII code for 'n' is 110
ASCII code for 'g' is 103
ASCII code for 'd' is 100
ASCII code for 'o' is 111
ASCII code for 'm' is 109
713
"""
if __name__ == '__main__':
_str = input()
_sum = 0
for x in _str:
print('ASCII code for \'%s\' is %d' %(x, ord(x)))
_sum += ord(x)
print(_sum)
| StarcoderdataPython |
3258083 | from scse.controller import miniscot as miniSCOT
from scse.default_run_parameters.national_grid_default_run_parameters import DEFAULT_RUN_PARAMETERS
class miniSCOTnotebook():
def __init__(
self,
simulation_seed=DEFAULT_RUN_PARAMETERS.simulation_seed,
start_date=DEFAULT_RUN_PARAMETERS.start_date,
time_increment=DEFAULT_RUN_PARAMETERS.time_increment,
time_horizon=DEFAULT_RUN_PARAMETERS.time_horizon,
num_batteries=DEFAULT_RUN_PARAMETERS.num_batteries,
max_battery_capacity=DEFAULT_RUN_PARAMETERS.max_battery_capacity,
battery_penalty=DEFAULT_RUN_PARAMETERS.battery_penalty,
discharge_discount=DEFAULT_RUN_PARAMETERS.discharge_discount,
charging_discount=DEFAULT_RUN_PARAMETERS.charging_discount,
surge_modulator=DEFAULT_RUN_PARAMETERS.surge_modulator,
solar_surge_modulator=DEFAULT_RUN_PARAMETERS.solar_surge_modulator,
surge_scenario=DEFAULT_RUN_PARAMETERS.surge_scenario
):
self.profile = DEFAULT_RUN_PARAMETERS.run_profile
self.asin_selection = DEFAULT_RUN_PARAMETERS.asin_selection
self.simulation_seed = simulation_seed
self.start_date = start_date
self.time_increment = time_increment
self.time_horizon = time_horizon
self.num_batteries = num_batteries
self.max_battery_capacity = max_battery_capacity
self.battery_penalty = battery_penalty
self.discharge_discount = discharge_discount
self.charging_discount = charging_discount
self.surge_modulator = surge_modulator
self.solar_surge_modulator = solar_surge_modulator
self.surge_scenario = surge_scenario
self.start(simulation_seed=self.simulation_seed,
start_date=self.start_date,
time_increment=self.time_increment,
time_horizon=self.time_horizon,
num_batteries=self.num_batteries,
asin_selection=self.asin_selection,
profile=self.profile,
max_battery_capacity=self.max_battery_capacity,
battery_penalty=self.battery_penalty,
discharge_discount=self.discharge_discount,
charging_discount=self.charging_discount,
surge_modulator=self.surge_modulator,
solar_surge_modulator=self.solar_surge_modulator,
surge_scenario=self.surge_scenario
)
# The cumulative reward at each time step i.e. the episode reward for each time-step
self.cum_reward = []
def start(self, **run_parameters):
self.horizon = run_parameters['time_horizon']
self.actions = []
self.breakpoints = []
self.env = miniSCOT.SupplyChainEnvironment(**run_parameters)
self.context, self.state = self.env.get_initial_env_values()
self.env.reset_agents(self.context, self.state)
def next(self):
"""Execute a single time unit."""
self.state, self.actions, self.reward = self.env.step(
self.state, self.actions)
def run(self):
"""Run simulation until the first break-point or, if none are enabled, until the end of time (the specified horizon)."""
for t in range(self.state['clock'], self.horizon):
if t in self.breakpoints:
break
else:
self.state, self.actions, self.reward = self.env.step(
self.state, self.actions)
self.cum_reward.append(self.reward.get(
'episode_reward').get('total'))
return self.cum_reward
m = miniSCOTnotebook()
# Can use the following line to step through simulation
# m.next()
# Example below of injecting an action into the simulation. Note this will raise an error since CHA1 doesn't have 5 units onhand!
# action={"schedule": 0, "type": "inbound_shipment", "asin": "9780465024759", "origin": "Manufacturer", "destination": "Newsvendor", "quantity": 5}
# actions = [action]
# m.env.step(m.state, actions)
| StarcoderdataPython |
3303210 | <reponame>ahnitz/pegasus
#!/usr/bin/env python3
from Pegasus.api import *
# --- Workflow -----------------------------------------------------------------
wf = Workflow("sleep-wf")
sleep_1 = Job("sleep").add_args(2)
sleep_2 = Job("sleep").add_args(2)
wf.add_jobs(sleep_1, sleep_2)
wf.add_dependency(job=sleep_1, children=[sleep_2])
wf.write(file="inner_sleep_workflow.yml") | StarcoderdataPython |
1653187 | <reponame>mwregan2/MiriTE
#!/usr/bin/env python
#
# Script 'convert_droop'
#
# :History:
#
# 20 Feb 2013: Created
# 26 Feb 2013: Removed "inputtype" input parameter. Added SUBARRAY as a
# header keyword to copy over.
# 25 Jun 2013: Added astropy.io.ascii as an alternative to asciitable.
# 22 Aug 2013: columnnames renamed to fieldnames.
# 03 Sep 2013: DETECTOR field removed from table in anticipation of CDP-2
# delivery. Ensure there is a DETECTOR identification in the
# header.
# 11 Sep 2013: Modified to no longer set keywords BAND and CHANNEL (not
# relevant for Droop); now skipping first line of inputfile,
# presumed to hold column names.
# 24 Feb 2014: Instrument name (INSTRUME) changed from meta.instrument.type to
# meta.instrument.name.
# 03 Jun 2014: Using astropy.io.ascii instead of asciitable.
# 15 Oct 2014: Minor update to setting the required keywords
#
# @author: <NAME> (DIAS), <NAME> (UKATC)
#
"""
Script `convert_droop` creates a FITS file in standard MIRI format
from an input ASCII table. The script relies on the MiriDroopModel
data product, which is based on the STScI data model.
Optionally, it can read in meta data from a separate ASCII table. Keywords,
taken from a pre-determined list of required keywords, that are present in
the metadata are inserted into the output product.
The following command arguments are defined by position:
inputfile[0]
The path+name of the file to be read. Compulsory.
outputfile[1]
The path+name of the file to be written.
Optional. Defaults to the same name as inputfile with "_out" appended.
The command also takes the following options:
--verbose or -v:
Generate more output.
--plot or -p:
Plot the bad pixel mask.
--overwrite or -o:
Overwrite any existing FITS file.
--meta or -m:
The path+name of an ASCII file containing the meta data to
insert into the product.
"""
import optparse
import os, sys, time
import astropy.io.ascii as ascii
import numpy as np
from miri.datamodels.miri_droop_model import MiriDroopModel
def load_droop_file( filename, metafile ):
"""
Reads flux conversion data from an ascii file.
"""
# Read the flux conversion data from file
droopdata = np.array(ascii.read(filename, data_start=1, delimiter='\s',\
names=MiriDroopModel.fieldnames))
# If available, load meta data from file
if not metafile is None:
metatable = ascii.read(metafile, data_start=1, delimiter='|')
metadata = dict(list(zip(metatable['col1'].tolist(),metatable['col2'].tolist())))
else:
metadata = None
return metadata, droopdata
def set_meta_key( product, metadata, keyword ):
try:
product = metadata[keyword]
except KeyError:
pass
return product
if __name__ == "__main__":
# Parse arguments
help_text = __doc__
usage = "%prog [opt] inputtype inputfile outputfile\n"
usage += "Converts a droop table file into "
usage += "standard MIRI CDP format."
parser = optparse.OptionParser(usage)
parser.add_option("-v", "--verbose", dest="verb", action="store_true",
help="Verbose mode"
)
parser.add_option("-p", "--plot", dest="makeplot", action="store_true",
help="Plot bad pixel mask"
)
parser.add_option("-o", "--overwrite", dest="overwrite", action="store_true",
help="Overwrite the FITS file if it already exists"
)
parser.add_option("-m", "--metafile", dest="metafile", action="store",
help="Filename for meta data."
)
(options, args) = parser.parse_args()
try:
inputfile = args[0]
if len(args) > 1:
outputfile = args[1]
else:
outputfile = inputfile + "_out.fits"
except IndexError:
print(help_text)
time.sleep(1) # Ensure help text appears before error messages.
parser.error("Not enough arguments provided")
sys.exit(1)
verb = options.verb
makeplot = options.makeplot
overwrite = options.overwrite
metafile = options.metafile
# Read the given file.
if verb:
print("Reading %s" % inputfile)
metadata, inputdata = load_droop_file( inputfile, metafile )
# The DETECTOR keyword is compulsory.
detector = metadata['DETECTOR']
with MiriDroopModel( droop_table=inputdata, detector=detector ) as droopproduct:
# default modifications for CDP specifications
droopproduct.meta.filename_original = os.path.basename(inputfile)
droopproduct.meta.filename = os.path.basename(outputfile)
# Set required keywords, if metadata was provided
if not metadata is None:
droopproduct.meta.instrument.model = set_meta_key( droopproduct.meta.instrument.model, metadata, 'MODELNAM' )
droopproduct.meta.instrument.detector_settings = set_meta_key( droopproduct.meta.instrument.detector_settings, metadata, 'DETSETNG' )
droopproduct.meta.exposure.readpatt = set_meta_key( droopproduct.meta.exposure.readpatt, metadata, 'READPATT' )
droopproduct.meta.subarray.name = set_meta_key( droopproduct.meta.subarray.name, metadata, 'SUBARRAY' )
if detector == 'MIRIMAGE':
droopproduct.meta.instrument.filter = set_meta_key( droopproduct.meta.instrument.filter, metadata, 'FILTER' )
droopproduct.meta.version = set_meta_key( droopproduct.meta.version, metadata, 'VERSION' )
droopproduct.meta.author = set_meta_key( droopproduct.meta.author, metadata, 'AUTHOR' )
droopproduct.meta.origin = set_meta_key( droopproduct.meta.origin, metadata, 'ORIGIN' )
droopproduct.meta.description = set_meta_key( droopproduct.meta.description, metadata, 'DESCRIP' )
if verb:
print(droopproduct)
if makeplot:
droopproduct.plot()
droopproduct.save( outputfile, overwrite=overwrite)
if verb:
print("Data saved to %s\n" % outputfile)
del droopproduct
| StarcoderdataPython |
4831492 | <reponame>biud436/font-parser
class NameRecord:
def __init__(self):
self.platform_id = 0
self.encoding_id = 0
self.language_id = 0
self.name_id = 0
self.string_length = 0
self.string_offset = 0
self.name = ""
self.hex_offset = "" | StarcoderdataPython |
3391916 | # coding=UTF-8
"""Data previewer functions
Functions and data structures that are needed for the ckan data preview.
"""
import urlparse
import pylons.config as config
import ckan.plugins as p
DEFAULT_DIRECT_EMBED = ['png', 'jpg', 'gif']
DEFAULT_LOADABLE_IFRAME = ['html', 'htm', 'rdf+xml', 'owl+xml', 'xml', 'n3', 'n-triples', 'turtle', 'plain', 'atom', 'rss', 'txt']
def compare_domains(urls):
''' Return True if the domains of the provided are the same.
'''
first_domain = None
for url in urls:
# all urls are interpreted as absolute urls,
# except for urls that start with a /
if not urlparse.urlparse(url).scheme and not url.startswith('/'):
url = '//' + url
parsed = urlparse.urlparse(url.lower(), 'http')
domain = (parsed.scheme, parsed.hostname, parsed.port)
if not first_domain:
first_domain = domain
continue
if first_domain != domain:
return False
return True
def resource_is_on_same_domain(data_dict):
# compare CKAN domain and resource URL
ckan_url = config.get('ckan.site_url', '//localhost:5000')
resource_url = data_dict['resource']['url']
return compare_domains([ckan_url, resource_url])
def can_be_previewed(data_dict):
'''
Determines whether there is an extension that can preview the resource.
:param data_dict: contains a resource and package dict.
The resource dict has to have a value for ``on_same_domain``
:type data_dict: dictionary
'''
data_dict['resource']['on_same_domain'] = resource_is_on_same_domain(data_dict)
plugins = p.PluginImplementations(p.IResourcePreview)
return any(plugin.can_preview(data_dict) for plugin in plugins)
| StarcoderdataPython |
1616376 | <reponame>rajeshr188/dea
from django.db import models
from mptt.models import MPTTModel,TreeForeignKey
import datetime
from django.db.models import Sum
from django.db.models.functions import Coalesce
# Create your models here.
# cr credit,dr debit
class TransactionType_DE(models.Model):
XactTypeCode = models.CharField(max_length=2,primary_key=True)
name = models.CharField(max_length=10)
def __str__(self):
return self.name
# sundry_debtor[dr],sundry_creditor[cr],let desc be unique
class AccountType_Ext(models.Model):
XactTypeCode = models.ForeignKey(TransactionType_DE ,
on_delete=models.CASCADE)
description = models.CharField(max_length=100,unique = True)
def __str__(self):
return self.description
# person or organisation
class EntityType(models.Model):
name = models.CharField(max_length=100)
def __str__(self):
return self.name
# rameshbi[sundry debtor],ramlalji,narsa,mjk[sundry creditor]
# add accountno
class Account(models.Model):
entity = models.ForeignKey(EntityType,
null = True,
on_delete = models.SET_NULL)
AccountType_Ext = models.ForeignKey(AccountType_Ext,
on_delete = models.CASCADE)
name = models.CharField(max_length=50)
def __str__(self):
return self.name
def set_opening_bal(self,amount,tc=0,tb=0):
# ensure there aint no txns before setting op bal if present then audit and adjust
return AccountStatement.objects.create(self,amount,0,0)
def adjust(self,amount,xacttypecode):
pass
def audit(self):
try:
latest_stmt = AccountStatement.objects.latest()
except:
latest_stmt = None
if latest_stmt:
cb = self.current_balance(since = latest_stmt.created)
else:
cb = self.current_balance()
return AccountStatement.objects.create(self,cb,self.total_credit,self.total_debit)
def total_credit(self):
try:
ls = AccountStatement.object.latest()
return self.accounttransaction_set.filter(created_ge = ls.created,
XactTypeCode__XactTypeCode='cr').\
aggregate(t=Sum('Amount')).t
except:
ls=None
return self.accounttransaction_set.filter(XactTypeCode__XactTypeCode = 'cr').\
aggregate(t = Sum('Amount')).t
def total_debit(self):
try:
ls = AccountStatement.object.latest()
return self.accounttransaction_set.filter(created_ge=ls.created,
XactTypeCode__XactTypeCode='dr').\
aggregate(t=Sum('Amount')).t
except:
ls = None
return self.accounttransaction_set.filter(XactTypeCode__XactTypeCode='dr').\
aggregate(t=Sum('Amount')).t
def current_balance(self,since = None):
latest_acc_stmt = 0
try:
latest_acc_stmt = self.accountstatement_set.latest()
except:
print("no acc statement available")
closing_balance = latest_acc_stmt.ClosingBalance if latest_acc_stmt else 0
credit = self.accounttransaction_set\
.filter(XactTypeCode_ext__in = ['LG'])\
.aggregate(
t = Coalesce(Sum('Amount'),0))
debit = self.accounttransaction_set\
.filter(XactTypeCode_ext__in = ['LP'])\
.aggregate(
t=Coalesce(Sum('Amount'),0))
return closing_balance + (credit['t'] - debit['t'])
# account statement for ext account
class AccountStatement(models.Model):
AccountNo = models.ForeignKey(Account,
on_delete = models.CASCADE)
created = models.DateTimeField(unique = True,auto_now_add = True)
ClosingBalance = models.DecimalField(max_digits=13, decimal_places=3)
TotalCredit = models.DecimalField(max_digits=13, decimal_places=3)
TotalDebit = models.DecimalField(max_digits=13, decimal_places=3)
class Meta:
get_latest_by = 'created'
def __str__(self):
return f"{self.id}"
# sales,purchase,receipt,payment
class TransactionType_Ext(models.Model):
XactTypeCode_ext = models.CharField(max_length=3,primary_key= True)
description = models.CharField(max_length=100)
def __str__(self):
return self.XactTypeCode_ext
# ledger account type for COA ,asset,liability,revenue,expense,gain,loss
class AccountType(models.Model):
AccountType = models.CharField(max_length=50)
description = models.CharField(max_length=100)
def __str__(self):
return self.AccountType
# ledger is chart of accounts
# add ledgerno
class Ledger(MPTTModel):
AccountType = models.ForeignKey(AccountType,
on_delete = models.CASCADE)
name = models.CharField(max_length=100, unique = True)
parent = TreeForeignKey('self',
null = True,
blank = True,
on_delete = models.CASCADE,
related_name = 'children')
class MPTTMeta:
order_insertion_by = ['name']
def __str__(self):
return self.name
def ledger_txn(self,dr_ledger,amount):
# check if ledger exists
return LedgerTransaction.objects.create(ledgerno = self,
ledgerno_dr = dr_ledger,
amount=amount)
def acc_txn(self,ledger,xacttypecode,xacttypecode_ext,amount):
# check if acc exists
return AccountTransaction.objects.create(ledgerno = ledger,
XactTypeCode = xacttypecode,
XactTypeCode_ext = xacttypecode_ext,
amount = amount)
def set_opening_bal(self,amount):
# ensure there aint no txns before setting op bal if present then audit and adjust
return LedgerStatement.objects.create(self,amount)
def adjust(self,xacttypcode,amount):
pass
def audit(self):
# get latest audit
# then get txns after latest audit
# then crunch debit and credit and find closing bal
# then save that closing bal as latest statement
# this statement will serve as opening balance for this acc
credit = self.credit_txns.aggregate(Sum('amount'))
debit = self.debit_txns.aggregate(Sum('amount'))
return LedgerStatement.objects.create(self,credit - debit)
def current_balance(self):
latest_acc_statement =0
try:
latest_acc_statement = self.ledgerstatement_set.latest()
except:
print("no recent trxns in this ledger")
closing_balance = latest_acc_statement.ClosingBalance if latest_acc_statement else 0
decendants = self.get_descendants(include_self = True)
bal = [ acc.credit_txns.aggregate(t = Coalesce(Sum('amount'),0))['t']
-
acc.debit_txns.aggregate(t = Coalesce(Sum('amount'), 0))['t']
for acc in decendants]
return closing_balance + sum(bal)
def audit(self):
pass
class LedgerTransaction(models.Model):
ledgerno = models.ForeignKey(Ledger,on_delete =models.CASCADE ,related_name='credit_txns')
created = models.DateTimeField(unique = True, auto_now_add = True)
ledgerno_dr = models.ForeignKey(Ledger ,on_delete =models.CASCADE, related_name= 'debit_txns')
amount = models.DecimalField(max_digits=13, decimal_places=3)
def __str__(self):
return self.ledgerno.name
class LedgerStatement(models.Model):
ledgerno = models.ForeignKey(Ledger,on_delete = models.CASCADE)
created = models.DateTimeField(unique = True,auto_now_add=True)
ClosingBalance = models.DecimalField(max_digits=13, decimal_places=3)
class Meta:
get_latest_by = 'created'
def __str__(self):
return f"{self.created} - {self.ClosingBalance}"
class AccountTransaction(models.Model):
ledgerno = models.ForeignKey(Ledger,on_delete = models.CASCADE)
created = models.DateTimeField(auto_now_add= True,unique = True)
XactTypeCode = models.ForeignKey(TransactionType_DE,
on_delete = models.CASCADE)
XactTypeCode_ext = models.ForeignKey(TransactionType_Ext,
on_delete=models.CASCADE)
Account = models.ForeignKey(Account,on_delete=models.CASCADE)
Amount = models.DecimalField(max_digits=13,decimal_places=3)
def __str__(self):
return f"{self.XactTypeCode_ext}"
# alex deposit 50
# accountxact+
from django.db import transaction
@transaction.atomic()
def pledge_loan(self,amount,interest=0):
# if acc is creditor ledger is liabilility-loans
# move money from LiabilityLoan to cash [debit:Ll,credit:cash]
# ledger to ledger txn
# money came from creditor to cash [debit:creditor-acc,credit:Liabilityloans]
# ledger to Acc txn
# elif acc is debtor ledger is asset-loans
# move money from cash to AssetLoan [debit:cash,credit:AL]
# ledger to ledger txn
# then [debit:AL , credit:debtor-acc]
# ledger to acc txn
lt = TransactionType_Ext.objects.get(XactTypeCode_ext = 'LT')
lg = TransactionType_Ext.objects.get(XactTypeCode_ext='LG')
cr = TransactionType_DE.objects.get(XactTypeCode='cr')
# ip = TransactionType_Ext.objects.get(XactTypeCode_ext = 'LP')
# int_paid = Ledger.objects.get(name='Interest Paid')
cash_ledger_Acc = Ledger.objects.get(name="Cash_in_drawer")
if self.AccountType_Ext.description == "creditors":
liability_loan = Ledger.objects.get(name="Loans Received")
# txns for receiving loan
LedgerTransaction.objects.create(
ledgerno = cash_ledger_Acc,ledgerno_dr = liability_loan,amount = amount)
AccountTransaction.objects.create(
ledgerno = liability_loan,XactTypeCode = cr,
XactTypeCode_ext = lt,Account = self,Amount = amount
)
# txns for corresponding loan interest
# LedgerTransaction.objects.create(
# ledgerno = int_paid,ledgerno_dr = cash_ledger_Acc,amount = interest
# )
# AccountTransaction.objects.create(
# ledgerno = cash_ledger_Acc,XactTypeCode = 'dr',
# XacTtypeCode_ext = ip,Account = self,Amount = interest
# )
else:
# tsns for alloting loan
asset_loan = Ledger.objects.get(name="Loans")
# int_received = Ledger.objects.get(name = "Interest Received")
# ir = TransactionType_Ext.objects.get(XactTypeCode_ext = 'IR')
LedgerTransaction.objects.create(
ledgerno=asset_loan,ledgerno_dr=cash_ledger_Acc,amount=amount
)
AccountTransaction.objects.create(
ledgerno=asset_loan, XactTypeCode=cr,
XactTypeCode_ext = lg,Account = self,Amount = amount
)
# txns for corresponding loan interest
# LedgerTransaction.objects.create(
# ledgerno = cash_ledger_Acc,ledgerno_dr = int_received,amount = interest
# )
# AccountTransaction.objects.create(
# ledgerno = int_received,XactTypeCode = 'cr',
# XacTtypeCode_ext = ir,Account = self,Amount = interest
# )
@transaction.atomic()
def repay_loan(self,amount):
# opposite of pledge_loan
lp = TransactionType_Ext.objects.get(XactTypeCode_ext = 'LP')
dr = TransactionType_DE.objects.get(XactTypeCode='dr')
cash_ledger_acc = Ledger.objects.get(name="Cash_in_drawer")
if self.AccountType_Ext.description == "creditors":
liability_loan = Ledger.objects.get(name="Loans Received")
LedgerTransaction.objects.create(
ledgerno = liability_loan,ledgerno_dr = cash_ledger_acc,amount = amount)
AccountTransaction.objects.create(
ledgerno=liability_loan, XactTypeCode=dr,
XactTypeCode_ext = lp, Account = self,Amount = amount)
else:
asset_loan = Ledger.objects.get(name="Loans")
LedgerTransaction.objects.create(
ledgerno = cash_ledger_acc,ledgerno_dr = asset_loan,amount = amount
)
AccountTransaction.objects.create(
ledgerno=asset_loan, XactTypeCode = dr,
XactTypeCode_ext = lp, Account = self,Amount = amount
)
def sale():
pass
def receipt():
pass
def sale_return():
pass
def purchase():
pass
def payment():
pass
def purchase_return():
pass
# add a way to import ledger and account iniital balance
# i.e import each bal to corresponding acc by creating that particulat statement with closing balance
# add a way to initiate audit
# add a view to view current balance since audit
# statements are only created when user select audit report
# otherwise statements are created manually for inputting opening balance
# a common function audit_all() to audit ledger and accounts i.e report daybook or something like that
# for acc and ledger get txns after statement if any
# write a manager method for both acc and ledger that gets txns after self.statement.latest.created
| StarcoderdataPython |
166159 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AnttechBlockchainDefinSaasPaymentCancelResponse(AlipayResponse):
def __init__(self):
super(AnttechBlockchainDefinSaasPaymentCancelResponse, self).__init__()
self._available_amount = None
self._available_currency = None
self._fund_mode = None
self._order_state = None
self._out_order_id = None
self._out_request_id = None
@property
def available_amount(self):
return self._available_amount
@available_amount.setter
def available_amount(self, value):
self._available_amount = value
@property
def available_currency(self):
return self._available_currency
@available_currency.setter
def available_currency(self, value):
self._available_currency = value
@property
def fund_mode(self):
return self._fund_mode
@fund_mode.setter
def fund_mode(self, value):
self._fund_mode = value
@property
def order_state(self):
return self._order_state
@order_state.setter
def order_state(self, value):
self._order_state = value
@property
def out_order_id(self):
return self._out_order_id
@out_order_id.setter
def out_order_id(self, value):
self._out_order_id = value
@property
def out_request_id(self):
return self._out_request_id
@out_request_id.setter
def out_request_id(self, value):
self._out_request_id = value
def parse_response_content(self, response_content):
response = super(AnttechBlockchainDefinSaasPaymentCancelResponse, self).parse_response_content(response_content)
if 'available_amount' in response:
self.available_amount = response['available_amount']
if 'available_currency' in response:
self.available_currency = response['available_currency']
if 'fund_mode' in response:
self.fund_mode = response['fund_mode']
if 'order_state' in response:
self.order_state = response['order_state']
if 'out_order_id' in response:
self.out_order_id = response['out_order_id']
if 'out_request_id' in response:
self.out_request_id = response['out_request_id']
| StarcoderdataPython |
1712018 |
import bpy
import yerface_blender.SceneUtilities
import yerface_blender.WebsocketReader
isPreviewRunning = False
myPreviewTimer = None
myReader = None
myUpdater = None
class YerFacePreviewStartOperator(bpy.types.Operator):
bl_idname = "wm.yerface_preview_start"
bl_label = "YerFace Preview Start"
bl_description = "Start previewing data from the Yer Face performance capture tool."
bl_options = {'REGISTER'}
def modal(self, context, event):
global isPreviewRunning
global myUpdater
if event.type == 'ESC' or not isPreviewRunning:
return self.cancel(context)
if event.type == 'TIMER':
myUpdater.runUpdate()
return {'PASS_THROUGH'}
def execute(self, context):
global isPreviewRunning
global myPreviewTimer
global myReader
global myUpdater
props = context.scene.yerFaceBlenderProperties
fps = context.scene.render.fps / context.scene.render.fps_base
time_step = 1/fps
isPreviewRunning = True
myReader = yerface_blender.WebsocketReader.YerFaceWebsocketReader(props.websocketURI)
myReader.openWebsocket()
myUpdater = yerface_blender.SceneUtilities.YerFaceSceneUpdater(context, myReader, fps)
if props.tickCallback != "":
tickProps = {
'userData': props.tickUserData,
'resetState': True,
'perfcapPacket': {},
'insertKeyframes': False,
'currentFrameNumber': None,
'flushLastFrame': False,
'discardLastFrameData': False,
'samplingMode': None,
'framesPerSecond': fps
}
bpy.app.driver_namespace[props.tickCallback](tickProps)
context.window_manager.modal_handler_add(self)
myPreviewTimer = context.window_manager.event_timer_add(time_step, context.window)
print("STARTED TIMER w/Time Step: ", time_step)
return {'RUNNING_MODAL'}
def cancel(self, context):
global isPreviewRunning
global myPreviewTimer
global myReader
if isPreviewRunning:
isPreviewRunning = False
context.window_manager.event_timer_remove(myPreviewTimer)
myReader.closeWebsocket()
myReader = None
print("CANCELLED TIMER")
return {'CANCELLED'}
def isPreviewRunning(self):
global isPreviewRunning
return isPreviewRunning
class YerFacePreviewStopOperator(bpy.types.Operator):
bl_idname = "wm.yerface_preview_stop"
bl_label = "YerFace Preview Stop"
bl_description = "Stop previewing data from the Yer Face performance capture tool."
bl_options = {'REGISTER'}
def execute(self, context):
YerFacePreviewStartOperator.cancel(None, context)
return {'FINISHED'}
| StarcoderdataPython |
185553 | """
# Custom colormap
This example shows how to create and use a custom colormap.
"""
import numpy as np
import numpy.random as nr
from datoviz import app, canvas, run, colormap
# Create the canvas, panel, and visual.
c = canvas(show_fps=True)
ctx = c.gpu().context()
panel = c.scene().panel(controller='panzoom')
visual = panel.visual('path', transform=None)
# Uniform parameters for the visual.
visual.data('linewidth', np.array([50]))
visual.data('cap_type', np.array([0]))
# Create a horizontal thick line.
n = 256
x = np.linspace(-1, 1, n)
y = np.zeros(n)
z = np.zeros(n)
pos = np.c_[x, y, z] # an (N, 3) array with the coordinates of the path vertices.
pos[:, 1] -= .25
# Create a first custom color map, ranging from red to green.
cmap = np.c_[np.arange(256), np.arange(256)[::-1], np.zeros(256), 255 * np.ones(256)]
ctx.colormap('mycmap0', cmap.astype(np.uint8))
# Add a first line.
visual.data('pos', pos)
visual.data('color', colormap(np.linspace(0, 1, n), cmap='mycmap0'))
# Create a second custom color map, ranging from green to blue.
cmap = np.c_[np.zeros(256), np.arange(256), np.arange(256)[::-1], 255 * np.ones(256)]
ctx.colormap('mycmap1', cmap.astype(np.uint8))
# Add a second line.
pos[:, 1] += .5
# NOTE: note the use of the .append() method here, to concatenate the array to the existing data.
visual.append('pos', pos)
visual.append('color', colormap(np.linspace(0, 1, n), cmap='mycmap1'))
# Set the length of each path.
visual.data('length', np.array([n, n]))
# Start the event loop.
run()
| StarcoderdataPython |
1669485 | <filename>tests/unit/compute/test_ebs_nuke.py
# -*- coding: utf-8 -*-
"""Tests for the ebs nuke class."""
import boto3
import time
from moto import mock_ec2
from package.nuke.compute.ebs import NukeEbs
from .utils import create_ebs
import pytest
@pytest.mark.parametrize(
"aws_region, older_than_seconds, result_count", [
("eu-west-1", time.time() + 43200, 0),
("eu-west-2", time.time() + 43200, 0),
("eu-west-2", 630720000, 1),
]
)
@mock_ec2
def test_ebs_nuke(aws_region, older_than_seconds, result_count):
"""Verify ebs volume nuke function."""
client = boto3.client("ec2", region_name=aws_region)
create_ebs(region_name=aws_region)
ebs = NukeEbs(aws_region)
ebs.nuke(older_than_seconds=older_than_seconds)
ebs_list = client.describe_volumes()["Volumes"]
assert len(ebs_list) == result_count
| StarcoderdataPython |
84432 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from distutils.core import setup
setup(name='arpy',
version='0.1.1',
description='Library for accessing "ar" files',
author=u'<NAME>',
author_email='<EMAIL>',
url='http://bitbucket.org/viraptor/arpy',
py_modules=['arpy'],
license="Simplified BSD",
)
| StarcoderdataPython |
3305958 | <filename>meta_mb/workers/metrpo/worker_data.py
import time, pickle
from meta_mb.logger import logger
from meta_mb.workers.base import Worker
class WorkerData(Worker):
def __init__(self, simulation_sleep):
super().__init__()
self.simulation_sleep = simulation_sleep
self.env = None
self.env_sampler = None
self.dynamics_sample_processor = None
self.samples_data_arr = []
def construct_from_feed_dict(
self,
policy_pickle,
env_pickle,
baseline_pickle,
dynamics_model_pickle,
feed_dict
):
from meta_mb.samplers.sampler import Sampler
from meta_mb.samplers.mb_sample_processor import ModelSampleProcessor
env = pickle.loads(env_pickle)
policy = pickle.loads(policy_pickle)
baseline = pickle.loads(baseline_pickle)
self.env = env
self.env_sampler = Sampler(env=env, policy=policy, **feed_dict['env_sampler'])
self.dynamics_sample_processor = ModelSampleProcessor(
baseline=baseline,
**feed_dict['dynamics_sample_processor']
)
def prepare_start(self):
initial_random_samples = self.queue.get()
self.step(initial_random_samples)
self.push()
def step(self, random=False):
time_step = time.time()
'''------------- Obtaining samples from the environment -----------'''
if self.verbose:
logger.log("Data is obtaining samples...")
env_paths = self.env_sampler.obtain_samples(
log=True,
random=random,
log_prefix='Data-EnvSampler-',
)
'''-------------- Processing environment samples -------------------'''
if self.verbose:
logger.log("Data is processing environment samples...")
samples_data = self.dynamics_sample_processor.process_samples(
env_paths,
log=True,
log_prefix='Data-EnvTrajs-',
)
self.samples_data_arr.append(samples_data)
time_step = time.time() - time_step
time_sleep = max(self.simulation_sleep - time_step, 0)
time.sleep(time_sleep)
logger.logkv('Data-TimeStep', time_step)
logger.logkv('Data-TimeSleep', time_sleep)
def _synch(self, policy_state_pickle):
time_synch = time.time()
policy_state = pickle.loads(policy_state_pickle)
assert isinstance(policy_state, dict)
self.env_sampler.policy.set_shared_params(policy_state)
time_synch = time.time() - time_synch
logger.logkv('Data-TimeSynch', time_synch)
def push(self):
time_push = time.time()
self.queue_next.put(pickle.dumps(self.samples_data_arr))
self.samples_data_arr = []
time_push = time.time() - time_push
logger.logkv('Data-TimePush', time_push)
def set_stop_cond(self):
if self.itr_counter >= self.n_itr:
self.stop_cond.set()
| StarcoderdataPython |
4826912 | <reponame>vijayRT/inkbot
#tweepy1.py - To test trend obtaining
import sys
sys.path.append('/home/vijay/.local/lib/python2.7/site-packages')
import tweepy
import woeid
import yweather
import time
import os
import sys
reload(sys)
#Configure Tweepy API
t0 = time.time()
consumer_key = 'Osyy0PSrhMRpnIWxjBLzLJeKR'
consumer_secret = '<KEY>'
access_token = '<KEY>'
access_token_secret = '<KEY>'
# OAuth process, using the keys and tokens
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
tweapi = tweepy.API(auth)
#Configure Yahoo WOEID API
def get_trends(cou):
try:
client = yweather.Client()
filename = cou
filepath = os.path.join('trends', filename)
thefile = open(filepath, 'w')
place_woeid = client.fetch_woeid(cou)
trends1 = tweapi.trends_place(place_woeid,exclude='hashtags')
data = trends1[0]
trends = data['trends']
except Exception as e:
print('')
for trend in trends[:10]:
thefile.write(("%s\n" % trend['name']).encode("utf8"))
country_list = ['USA', 'UK', 'India', 'Canada', 'Australia']
for country in country_list:
get_trends(country)
print(country)
print("\n")
t1 = time.time()
print(t1 - t0)
| StarcoderdataPython |
3269500 | <reponame>Douwe-Spaanderman/ChessVideoAI<gh_stars>0
import setuptools
def readme():
with open("README.md", "r", encoding="utf-8") as fh:
return fh.read()
setuptools.setup(
name="ChessVideoAI",
version="0.0.1",
author="<NAME>",
author_email="<EMAIL>",
description="Deep learning project to analyse chess games from image or video",
long_description=readme(),
long_description_content_type="text/markdown",
url="https://github.com/Douwe-Spaanderman/ChessVideoAI",
project_urls={
"Bug Tracker": "https://github.com/Douwe-Spaanderman/ChessVideoAI/issues"
},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
packages=setuptools.find_packages(),
python_requires='>=3.7.3',
) | StarcoderdataPython |
1612138 | <reponame>profesormig/quimica3a
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import core.models.quota
class Migration(migrations.Migration):
dependencies = [
('core', '0051_non_null_key_instance_action'),
]
operations = [
migrations.AddField(
model_name='quota',
name='floating_ip_count',
field=models.IntegerField(default=core.models.quota._get_default_floating_ip_count, null=True, blank=True),
),
migrations.AddField(
model_name='quota',
name='instance_count',
field=models.IntegerField(default=core.models.quota._get_default_instance_count, null=True, blank=True),
),
migrations.AddField(
model_name='quota',
name='port_count',
field=models.IntegerField(default=core.models.quota._get_default_port_count, null=True, blank=True),
),
migrations.AddField(
model_name='quota',
name='snapshot_count',
field=models.IntegerField(default=core.models.quota._get_default_snapshot_count, null=True, blank=True),
),
migrations.AlterUniqueTogether(
name='quota',
unique_together=set([]),
),
]
| StarcoderdataPython |
1628583 | #!/usr/bin/env python3
"""Tests for cve_scan."""
from collections import defaultdict
import datetime as dt
import unittest
import cve_scan
class CveScanTest(unittest.TestCase):
def test_parse_cve_json(self):
cve_json = {
'CVE_Items': [
{
'cve': {
'CVE_data_meta': {
'ID': 'CVE-2020-1234'
},
'description': {
'description_data': [{
'value': 'foo'
}]
}
},
'configurations': {
'nodes': [{
'cpe_match': [{
'cpe23Uri': 'cpe:2.3:a:foo:bar:1.2.3'
}],
}],
},
'impact': {
'baseMetricV3': {
'cvssV3': {
'baseScore': 3.4,
'baseSeverity': 'LOW'
}
}
},
'publishedDate': '2020-03-17T00:59Z',
'lastModifiedDate': '2020-04-17T00:59Z'
},
{
'cve': {
'CVE_data_meta': {
'ID': 'CVE-2020-1235'
},
'description': {
'description_data': [{
'value': 'bar'
}]
}
},
'configurations': {
'nodes': [{
'cpe_match': [{
'cpe23Uri': 'cpe:2.3:a:foo:bar:1.2.3'
}],
'children': [
{
'cpe_match': [{
'cpe23Uri': 'cpe:2.3:a:foo:baz:3.2.3'
}]
},
{
'cpe_match': [{
'cpe23Uri': 'cpe:2.3:a:foo:*:*'
}, {
'cpe23Uri': 'cpe:2.3:a:wat:bar:1.2.3'
}]
},
],
}],
},
'impact': {
'baseMetricV3': {
'cvssV3': {
'baseScore': 9.9,
'baseSeverity': 'HIGH'
}
}
},
'publishedDate': '2020-03-18T00:59Z',
'lastModifiedDate': '2020-04-18T00:59Z'
},
]
}
cves = {}
cpe_revmap = defaultdict(set)
cve_scan.ParseCveJson(cve_json, cves, cpe_revmap)
self.maxDiff = None
self.assertDictEqual(
cves, {
'CVE-2020-1234':
cve_scan.Cve(id='CVE-2020-1234',
description='foo',
cpes=set([self.BuildCpe('cpe:2.3:a:foo:bar:1.2.3')]),
score=3.4,
severity='LOW',
published_date=dt.date(2020, 3, 17),
last_modified_date=dt.date(2020, 4, 17)),
'CVE-2020-1235':
cve_scan.Cve(id='CVE-2020-1235',
description='bar',
cpes=set(
map(self.BuildCpe, [
'cpe:2.3:a:foo:bar:1.2.3', 'cpe:2.3:a:foo:baz:3.2.3',
'cpe:2.3:a:foo:*:*', 'cpe:2.3:a:wat:bar:1.2.3'
])),
score=9.9,
severity='HIGH',
published_date=dt.date(2020, 3, 18),
last_modified_date=dt.date(2020, 4, 18))
})
self.assertDictEqual(cpe_revmap, {
'cpe:2.3:a:foo:*:*': {'CVE-2020-1234', 'CVE-2020-1235'},
'cpe:2.3:a:wat:*:*': {'CVE-2020-1235'}
})
def BuildCpe(self, cpe_str):
return cve_scan.Cpe.FromString(cpe_str)
def BuildDep(self, cpe_str, version=None, release_date=None):
return {'cpe': cpe_str, 'version': version, 'release_date': release_date}
def CpeMatch(self, cpe_str, dep_cpe_str, version=None, release_date=None):
return cve_scan.CpeMatch(self.BuildCpe(cpe_str),
self.BuildDep(dep_cpe_str, version=version, release_date=release_date))
def test_cpe_match(self):
# Mismatched part
self.assertFalse(self.CpeMatch('cpe:2.3:o:foo:bar:*', 'cpe:2.3:a:foo:bar:*'))
# Mismatched vendor
self.assertFalse(self.CpeMatch('cpe:2.3:a:foo:bar:*', 'cpe:2.3:a:foz:bar:*'))
# Mismatched product
self.assertFalse(self.CpeMatch('cpe:2.3:a:foo:bar:*', 'cpe:2.3:a:foo:baz:*'))
# Wildcard product
self.assertTrue(self.CpeMatch('cpe:2.3:a:foo:bar:*', 'cpe:2.3:a:foo:*:*'))
# Wildcard version match
self.assertTrue(self.CpeMatch('cpe:2.3:a:foo:bar:*', 'cpe:2.3:a:foo:bar:*'))
# Exact version match
self.assertTrue(self.CpeMatch('cpe:2.3:a:foo:bar:1.2.3', 'cpe:2.3:a:foo:bar:*',
version='1.2.3'))
# Date version match
self.assertTrue(
self.CpeMatch('cpe:2.3:a:foo:bar:2020-03-05',
'cpe:2.3:a:foo:bar:*',
release_date='2020-03-05'))
fuzzy_version_matches = [
('2020-03-05', '2020-03-05'),
('2020-03-05', '20200305'),
('2020-03-05', 'foo-20200305-bar'),
('2020-03-05', 'foo-2020_03_05-bar'),
('2020-03-05', 'foo-2020-03-05-bar'),
('1.2.3', '1.2.3'),
('1.2.3', '1-2-3'),
('1.2.3', '1_2_3'),
('1.2.3', '1:2:3'),
('1.2.3', 'foo-1-2-3-bar'),
]
for cpe_version, dep_version in fuzzy_version_matches:
self.assertTrue(
self.CpeMatch(f'cpe:2.3:a:foo:bar:{cpe_version}',
'cpe:2.3:a:foo:bar:*',
version=dep_version))
fuzzy_version_no_matches = [
('2020-03-05', '2020-3.5'),
('2020-03-05', '2020--03-05'),
('1.2.3', '1@2@3'),
('1.2.3', '1..2.3'),
]
for cpe_version, dep_version in fuzzy_version_no_matches:
self.assertFalse(
self.CpeMatch(f'cpe:2.3:a:foo:bar:{cpe_version}',
'cpe:2.3:a:foo:bar:*',
version=dep_version))
def BuildCve(self, cve_id, cpes, published_date):
return cve_scan.Cve(cve_id,
description=None,
cpes=cpes,
score=None,
severity=None,
published_date=dt.date.fromisoformat(published_date),
last_modified_date=None)
def CveMatch(self, cve_id, cpes, published_date, dep_cpe_str, version=None, release_date=None):
return cve_scan.CveMatch(self.BuildCve(cve_id, cpes=cpes, published_date=published_date),
self.BuildDep(dep_cpe_str, version=version, release_date=release_date))
def test_cve_match(self):
# Empty CPEs, no match
self.assertFalse(self.CveMatch('CVE-2020-123', set(), '2020-05-03', 'cpe:2.3:a:foo:bar:*'))
# Wildcard version, stale dependency match
self.assertTrue(
self.CveMatch('CVE-2020-123',
set([self.BuildCpe('cpe:2.3:a:foo:bar:*')]),
'2020-05-03',
'cpe:2.3:a:foo:bar:*',
release_date='2020-05-02'))
self.assertTrue(
self.CveMatch('CVE-2020-123',
set([self.BuildCpe('cpe:2.3:a:foo:bar:*')]),
'2020-05-03',
'cpe:2.3:a:foo:bar:*',
release_date='2020-05-03'))
# Wildcard version, recently updated
self.assertFalse(
self.CveMatch('CVE-2020-123',
set([self.BuildCpe('cpe:2.3:a:foo:bar:*')]),
'2020-05-03',
'cpe:2.3:a:foo:bar:*',
release_date='2020-05-04'))
# Version match
self.assertTrue(
self.CveMatch('CVE-2020-123',
set([self.BuildCpe('cpe:2.3:a:foo:bar:1.2.3')]),
'2020-05-03',
'cpe:2.3:a:foo:bar:*',
version='1.2.3'))
# Version mismatch
self.assertFalse(
self.CveMatch('CVE-2020-123',
set([self.BuildCpe('cpe:2.3:a:foo:bar:1.2.3')]),
'2020-05-03',
'cpe:2.3:a:foo:bar:*',
version='1.2.4',
release_date='2020-05-02'))
# Multiple CPEs, match first, don't match later.
self.assertTrue(
self.CveMatch('CVE-2020-123',
set([
self.BuildCpe('cpe:2.3:a:foo:bar:1.2.3'),
self.BuildCpe('cpe:2.3:a:foo:baz:3.2.1')
]),
'2020-05-03',
'cpe:2.3:a:foo:bar:*',
version='1.2.3'))
def test_cve_scan(self):
cves = {
'CVE-2020-1234':
self.BuildCve(
'CVE-2020-1234',
set([
self.BuildCpe('cpe:2.3:a:foo:bar:1.2.3'),
self.BuildCpe('cpe:2.3:a:foo:baz:3.2.1')
]), '2020-05-03'),
'CVE-2020-1235':
self.BuildCve(
'CVE-2020-1235',
set([
self.BuildCpe('cpe:2.3:a:foo:bar:1.2.3'),
self.BuildCpe('cpe:2.3:a:foo:baz:3.2.1')
]), '2020-05-03'),
'CVE-2020-1236':
self.BuildCve('CVE-2020-1236', set([
self.BuildCpe('cpe:2.3:a:foo:wat:1.2.3'),
]), '2020-05-03'),
}
cpe_revmap = {
'cpe:2.3:a:foo:*:*': ['CVE-2020-1234', 'CVE-2020-1235', 'CVE-2020-1236'],
}
cve_allowlist = ['CVE-2020-1235']
repository_locations = {
'bar': self.BuildDep('cpe:2.3:a:foo:bar:*', version='1.2.3'),
'baz': self.BuildDep('cpe:2.3:a:foo:baz:*', version='3.2.1'),
'foo': self.BuildDep('cpe:2.3:a:foo:*:*', version='1.2.3'),
'blah': self.BuildDep('N/A'),
}
possible_cves, cve_deps = cve_scan.CveScan(cves, cpe_revmap, cve_allowlist,
repository_locations)
self.assertListEqual(sorted(possible_cves.keys()), ['CVE-2020-1234', 'CVE-2020-1236'])
self.assertDictEqual(cve_deps, {
'CVE-2020-1234': ['bar', 'baz', 'foo'],
'CVE-2020-1236': ['foo']
})
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
1679226 | """
This module provide the defence method for THERMOMETER ENCODING's implement.
THERMOMETER ENCODING: ONE HOT WAY TO RESIST ADVERSARIAL EXAMPLES
"""
from builtins import range
import logging
logger=logging.getLogger(__name__)
import numpy as np
from keras.utils import to_categorical
__all__ = [
'ThermometerEncodingDefence'
]
def _perchannel(x,num_space):
pos = np.zeros(shape=x.shape)
for i in range(1, num_space):
pos[x > float(i) / num_space] += 1
onehot_rep = to_categorical(pos.reshape(-1), num_space)
for i in reversed(list(range(1, num_space))):
onehot_rep[:, i] += np.sum(onehot_rep[:, :i], axis=1)
result = onehot_rep.reshape(list(x.shape) + [num_space])
return result
#num_space=10为 一般为10
#clip_values为最终处理后取值范围 可能包含负数 常见的为[0,1] [-1,1]
# 支持的格式为[28,28,1]
def ThermometerEncodingDefence(x, y=None, num_space=10, clip_values=(0.0, 1.0)):
result = []
#for c in range(x.shape[-1]):
# result.append(_perchannel(x[:, :, :, c],num_space))
for c in range(x.shape[1]):
result.append(_perchannel(x[:, c, :, :],num_space))
result = np.concatenate(result, axis=3)
result = np.clip(result, clip_values[0], clip_values[1])
return result
| StarcoderdataPython |
1742272 | # Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Loads dataset for the tagging (e.g., NER/POS) task."""
from typing import Mapping, Optional
import dataclasses
import tensorflow as tf
from official.core import input_reader
from official.modeling.hyperparams import config_definitions as cfg
from official.nlp.data import data_loader_factory
@dataclasses.dataclass
class TaggingDataConfig(cfg.DataConfig):
"""Data config for tagging (tasks/tagging)."""
is_training: bool = True
seq_length: int = 128
include_sentence_id: bool = False
@data_loader_factory.register_data_loader_cls(TaggingDataConfig)
class TaggingDataLoader:
"""A class to load dataset for tagging (e.g., NER and POS) task."""
def __init__(self, params: TaggingDataConfig):
self._params = params
self._seq_length = params.seq_length
self._include_sentence_id = params.include_sentence_id
def _decode(self, record: tf.Tensor):
"""Decodes a serialized tf.Example."""
name_to_features = {
'input_ids': tf.io.FixedLenFeature([self._seq_length], tf.int64),
'input_mask': tf.io.FixedLenFeature([self._seq_length], tf.int64),
'segment_ids': tf.io.FixedLenFeature([self._seq_length], tf.int64),
'label_ids': tf.io.FixedLenFeature([self._seq_length], tf.int64),
}
if self._include_sentence_id:
name_to_features['sentence_id'] = tf.io.FixedLenFeature([], tf.int64)
example = tf.io.parse_single_example(record, name_to_features)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in example:
t = example[name]
if t.dtype == tf.int64:
t = tf.cast(t, tf.int32)
example[name] = t
return example
def _parse(self, record: Mapping[str, tf.Tensor]):
"""Parses raw tensors into a dict of tensors to be consumed by the model."""
x = {
'input_word_ids': record['input_ids'],
'input_mask': record['input_mask'],
'input_type_ids': record['segment_ids']
}
if self._include_sentence_id:
x['sentence_id'] = record['sentence_id']
y = record['label_ids']
return (x, y)
def load(self, input_context: Optional[tf.distribute.InputContext] = None):
"""Returns a tf.dataset.Dataset."""
reader = input_reader.InputReader(
params=self._params, decoder_fn=self._decode, parser_fn=self._parse)
return reader.read(input_context)
| StarcoderdataPython |
3282479 | <reponame>samyuyagati/Pequin<gh_stars>0
'''
Copyright 2021 <NAME> <<EMAIL>>
<NAME> <<EMAIL>>
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use, copy,
modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import utils
import sys
import concurrent
from utils.experiment_util import *
def main():
if len(sys.argv) != 2:
sys.stderr.write('Usage: python3 %s <config_file>\n' % sys.argv[0])
sys.exit(1)
with concurrent.futures.ThreadPoolExecutor(max_workers=16) as executor:
run_multiple_experiments(sys.argv[1], executor)
if __name__ == "__main__":
main()
| StarcoderdataPython |
161013 | import os
base_dir = '/rds/general/user/bheineik/home/'
genomeDir= base_dir + 'genomes/pombe_20201008_star'
fastqbase = base_dir + 'rna_seq_data/20210315_pombe_ox_bulk/Unaligned/'
outfilebase = base_dir + 'rna_seq_data/20210315_pombe_ox_bulk/mapped/'
readFilesCommand= 'zcat' #Use to decompress fastq.gz files
outReadsUnmapped = 'Fastx'
outMultimapperOrder = 'Random' #Random order of alignments for multimappers. Default for future releases.
#outSJfilterIntronMaxVsReadN 1200 - I don't think this is needed if we use align intron max
alignIntronMax = '1200' #Requires introns to be less than 1200bp
alignMatesGapMax = '1700' #Requires mated pairs to be no longer than 1700bp. This is set as the max intron size + 500, which should be the max size of any read in the library.
quantMode= 'GeneCounts'
runThreadN = '8'
files = os.listdir('/rds/general/user/bheineik/home/rna_seq_data/20210315_pombe_ox_bulk/Unaligned/')
files.sort()
reads = []
read_files = []
commands_fname = ('/rds/general/user/bheineik/home/github/rna_seq_processing/star/20210421_pombe_commands.txt')
with open(commands_fname,'w') as f:
for jj,file in enumerate(files):
strain_time_cond = '_'.join(file.split('_')[1:4])
read= file.split('_')[5]
reads.append(read)
read_files.append(file)
if reads == ['R1', 'R2']:
#print(strain_time_cond)
outfiledir = outfilebase + strain_time_cond
f.write('mkdir ' + outfiledir + '\n')
outFileNamePrefix = outfiledir + '/' + strain_time_cond + '_'
#print(read_files)
star_command = ['STAR', '--runThreadN', runThreadN,
'--genomeDir', genomeDir,
'--readFilesIn', fastqbase + read_files[0], fastqbase + read_files[1],
'--readFilesCommand', 'zcat',
'--outFileNamePrefix', outFileNamePrefix,
'--outReadsUnmapped', outReadsUnmapped,
'--outMultimapperOrder', outMultimapperOrder,
'--alignIntronMax', alignIntronMax,
'--alignMatesGapMax', alignMatesGapMax,
'--quantMode', quantMode
]
f.write(' '.join(star_command) + '\n')
f.write("echo '" + strain_time_cond + " complete '\n" )
reads = []
read_files = []
# genomeDir=/rds/general/user/bheineik/home/genomes/pombe_20201008_star
# genomeFastaFiles=/rds/general/user/bheineik/home/genomes/pombe_20201008/Schizosaccharomyces_pombe_all_chromosomes_20201008_bare.fa
# sjdbGTFfile=/rds/general/user/bheineik/home/genomes/pombe_20201008/Schizosaccharomyces_pombe_all_chromosomes_20201008_cellranger_exon.gtf
# genomeDir=/rds/general/user/bheineik/home/genomes/pombe_20201008_star
# fastqIn_R1=/rds/general/user/bheineik/home/rna_seq_data/20210315_pombe_ox_bulk/Unaligned/BMH_WT_20_cont_S18_R1_001.fastq.gz
# fastqIn_R2=/rds/general/user/bheineik/home/rna_seq_data/20210315_pombe_ox_bulk/Unaligned/BMH_WT_20_cont_S18_R1_001.fastq.gz
# mkdir WT_20_cont
# outFileNamePrefix=/rds/general/user/bheineik/home/rna_seq_data/20210315_pombe_ox_bulk/mapped/WT_20_cont/WT_20_cont_
# #readFilesCommand zcat - to uncompress .gz files.
# #outMultimapperOrder Random: Random order of alignments for multimappers. Default for future releases.
| StarcoderdataPython |
11465 | from app import db, login
from flask_login import UserMixin
from datetime import datetime
from flask import url_for, redirect
from werkzeug.security import generate_password_hash, check_password_hash
class users(UserMixin, db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
username = db.Column(db.String(100), nullable=False, unique=True)
password = db.Column(db.String(96), nullable=False)
email = db.Column(db.String(128), nullable=False, unique=True)
firstname = db.Column(db.String(130), nullable=False)
lastname = db.Column(db.String(130), nullable=False)
lastLogin = db.Column(db.DateTime)
isActive = db.Column(db.Boolean)
isAdmin = db.Column(db.Boolean)
noteHighScore = db.Column(db.Integer)
KeyHighScore = db.Column(db.Integer)
submit = db.relationship("submission", backref="submitter")
###################################################
def __init__(self):
self.isActive = True
self.isAdmin = False
self.noteHighScore = 0
self.lastLogin = None
self.KeyHighScore = 0
def set_password(self, pwd):
self.password = generate_password_hash(pwd, method="<PASSWORD>")
def check_password(self, pwd):
return check_password_hash(self.password, pwd)
def is_active(self):
return self.isActive
def validate(self):
if self.username and self.email and self.firstname and self.lastname:
return True
else:
return False
def getSubmissions(self):
res = submission.query.filter_by(creater_id=self.id).all()
return res
def __repr__(self):
return '<user %r>' % self.username
class submission(db.Model):
__tablename__ = 'submission'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
createdAt = db.Column(db.DateTime, nullable=False)
markedAt = db.Column(db.DateTime)
feedback = db.Column(db.Boolean)
totalmark = db.Column(db.Integer)
difficulty = db.Column(db.String(30), nullable=False)
passed = db.Column(db.Boolean)
creater_id = db.Column(db.Integer, db.ForeignKey("users.id"), nullable=False)
answers = db.relationship("answer", backref="submission")
def __init__(self):
self.createdAt = datetime.utcnow()
self.markedAt = None
self.feedback = False
self.totalmark = None
self.marked = False
self.passed = False
def validate(self):
if self.difficulty and self.creater_id and self.createdAt:
return True
def __repr__(self):
return '<submission %r>' % self.id
class answer(db.Model):
__tablename__ = 'answer'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
answerSeq = db.Column(db.Integer)
submittedAnswer = db.Column(db.String(400))
feedback = db.Column(db.String(400))
markreceived = db.Column(db.Boolean)
submissionId = db.Column(db.Integer, db.ForeignKey("submission.id"))
def __init__(self):
self.feedback = None
self.markreceived = False
def validate(self):
if self.answerSeq and self.submittedAnswer and self.submissionId:
return True
else:
print("missingfield")
return False
def __repr__(self):
return '<ans>'
@login.user_loader
def load_user(usr_id):
return users.query.get(int(usr_id))
@login.unauthorized_handler
def unauthorized():
return redirect(url_for("auth.login"))
| StarcoderdataPython |
1612660 | frase = str(input('Digite uma Frase: ')).strip() # Retira os espaços no início e fim
print('A Letra A aperece {} vezes'.format(frase.upper().count('A')))
print('A posição que ela aparece 1° vez é {}'.format(frase.upper().find('A')+1)) # +1 é para que não conte a posição ZERO
print('A posição que ela aparece por último é {}'.format(frase.upper().rfind('A')+1)) # + 1 é para que não conte a posição ZERO
| StarcoderdataPython |
3220446 | <reponame>sjamgade/python-socks<gh_stars>0
import curio.io
import curio.socket
from ... import _abc as abc
from ..._errors import ProxyError
DEFAULT_RECEIVE_SIZE = 65536
class CurioSocketStream(abc.AsyncSocketStream):
_socket: curio.io.Socket = None
def __init__(self, sock: curio.io.Socket):
self._socket = sock
async def write_all(self, data):
await self._socket.sendall(data)
async def read(self, max_bytes=DEFAULT_RECEIVE_SIZE):
return await self._socket.recv(max_bytes)
async def read_exact(self, n):
data = bytearray()
while len(data) < n:
packet = await self._socket.recv(n - len(data))
if not packet: # pragma: no cover
raise ProxyError('Connection closed unexpectedly')
data += packet
return data
async def close(self):
await self._socket.close()
| StarcoderdataPython |
1798075 | from django.contrib import sitemaps
from django.core.urlresolvers import reverse
class SupportPageSitemap(sitemaps.Sitemap):
priority = 0.5
changefreq = 'daily'
def items(self):
return ['console', 'lp-designers', 'lp-creatives', 'lp-founders',
'lp-startupweekend', 'lp-learning-to-code', 'lp-developers',
'login', 'support', ]
def location(self, item):
return reverse(item)
| StarcoderdataPython |
3399623 | <filename>python_script/extract_vertex_group.py
import bpy
import sys
import os
def find_max_group(weights):
max_weight = 0
max_index = 0
for i in range(len(weights)):
item = weights[i]
if item > max_weight:
max_weight = item
max_index = i
return max_index
def extractVertexGroup(from_path, to_path, hier_path):
to_dir = "/".join(to_path.split("/")[:-1])
res_dir = "/".join(hier_path.split("/")[:-1])
if not os.path.exists(to_dir):
os.makedirs(to_dir)
if not os.path.exists(res_dir):
os.makedirs(res_dir)
objs = bpy.data.objects
objs.remove(objs["Cube"], do_unlink=True)
objs.remove(objs["Camera"], do_unlink=True)
objs.remove(objs["Light"], do_unlink=True)
bpy.ops.import_scene.fbx(filepath=from_path)
real_armature = bpy.data.armatures['Armature']
print(real_armature.bones.keys())
with open(hier_path, "w") as f:
for key in real_armature.bones.keys():
if real_armature.bones[key].parent is None:
f.write(key+" None 0\n")
elif real_armature.bones[key].parent.name.find("root")!=-1:
f.write(key+" "+real_armature.bones[key].parent.name+" 0\n")
else:
f.write(key+" "+real_armature.bones[key].parent.name+" 1\n")
bpy.ops.export_scene.obj(filepath=to_path, keep_vertex_order=True,use_vertex_groups=True)
argv = sys.argv
argv = argv[argv.index("--") + 1:]
extractVertexGroup(argv[0], argv[1], argv[2]) | StarcoderdataPython |
3309354 | <reponame>WaveBlocks/WaveBlocks
"""The WaveBlocks Project
@author: <NAME>
@copyright: Copyright (C) 2010, 2011 <NAME>
@license: Modified BSD License
"""
from legend import legend
from color_map import color_map
from plotcf import plotcf
from stemcf import stemcf
from plotcm import plotcm
#try:
# from surfcf import surfcf
#except ImportError:
# pass
| StarcoderdataPython |
3335443 | <reponame>philipp01wagner/gym-pybullet-drones<filename>examples/test_straight_flight.py
import time
import gym
import numpy as np
import argparse
from stable_baselines3 import A2C, PPO, DDPG, SAC, TD3
from stable_baselines3.common.env_checker import check_env
import pybullet as p
from gym_pybullet_drones.envs.single_agent_rl.StraightFlightAviary import StraightFlightAviary
from gym_pybullet_drones.envs.BaseAviary import DroneModel, Physics
from gym_pybullet_drones.utils.utils import sync
from gym.envs.registration import register
from gym_pybullet_drones.utils.utils import sync, str2bool#
import matplotlib.pyplot as plt
def find_alg_name(alg):
s = str(alg)
ind1 = s[::-1].find(".")
name = s[-ind1:-2]
return name
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Helix flight script using CtrlAviary or VisionAviary and DSLPIDControl')
parser.add_argument('--drone', default="ha", type=DroneModel, help='Drone model (default: CF2X)', metavar='', choices=DroneModel)
parser.add_argument('--physics', default="pyb", type=Physics, help='Physics updates (default: PYB)', metavar='', choices=Physics)
parser.add_argument('--gui', default=False, type=str2bool, help='Whether to use PyBullet GUI (default: True)', metavar='')
parser.add_argument('--aggregate', default=True, type=str2bool, help='Whether to aggregate physics steps (default: True)', metavar='')
parser.add_argument('--simulation_freq_hz', default=240, type=int, help='Simulation frequency in Hz (default: 240)', metavar='')
parser.add_argument('--control_freq_hz', default=24, type=int, help='Control frequency in Hz (default: 48)', metavar='')
parser.add_argument('--duration_sec', default=24, type=int, help='Duration of the simulation in seconds (default: 5)', metavar='')
parser.add_argument('--trajectory', default=1, type=int, help='Trajectory type (default: 1)', metavar='')
parser.add_argument('--wind', default=False, type=str2bool, help='Whether to enable wind (default: False)', metavar='')
parser.add_argument('--record_video', default=False, type=str2bool, help='Whether to record a video (default: False)', metavar='')
parser.add_argument('--policy_path', default="", type=str, help='path to the policy zip file', metavar='')
ARGS = parser.parse_args()
policy_name = ARGS.policy_path
algs = [A2C, PPO, SAC, DDPG, TD3]
algorithm = [a for a in algs if find_alg_name(a).lower() in policy_name][0]
H = 1.0
R = .3
INIT_XYZS = np.array([[0, 0, H]])
INIT_RPYS = np.array([[0, 0, 0]])
AGGR_PHY_STEPS = int(ARGS.simulation_freq_hz/ARGS.control_freq_hz) if ARGS.aggregate else 1
env = StraightFlightAviary(gui=True,
record=False,
initial_xyzs=INIT_XYZS,
initial_rpys=INIT_RPYS,
physics=ARGS.physics,
freq=ARGS.simulation_freq_hz,
aggregate_phy_steps=AGGR_PHY_STEPS,
duration_sec=ARGS.duration_sec
)
PYB_CLIENT = env.getPyBulletClient()
p.setGravity(0,0,0)
p.loadURDF("duck_vhacd.urdf", [1.0, 0, 0.2], p.getQuaternionFromEuler([0,0,0]), physicsClientId=PYB_CLIENT)
model = algorithm.load(policy_name)
#obs = env.reset()
action = np.array([0,0])
start = time.time()
rew = []
for i in range(ARGS.duration_sec*ARGS.control_freq_hz):
obs, reward, done, info = env.step(action)
if i%env.SIM_FREQ == 0:
env.render()
print(done)
sync(i, start, env.TIMESTEP)
if done:
print("DONE")
obs = env.reset()
action, _states = model.predict(obs,
deterministic=True
)
print("X: ", obs[0])
print("Reward: ", reward)
rew.append(reward)
env.close()
plt.plot(list(range(len(rew))), rew)
plt.show()
| StarcoderdataPython |
110965 | <reponame>anna-ka/segmentation.evaluation<filename>src/python/main/segeval/window/Pk.py
'''
Implementation of the Pk segmentation evaluation metric described in
[BeefermanBerger1999]_
@author: <NAME>
@contact: <EMAIL>
'''
#===============================================================================
# Copyright (c) 2011-2012, <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author nor the names of its contributors may
# be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#===============================================================================
from decimal import Decimal
from . import compute_window_size, parser_one_minus_support
from .. import SegmentationMetricError, compute_pairwise, \
convert_masses_to_positions, compute_pairwise_values, create_tsv_rows
from ..data import load_file
from ..data.TSV import write_tsv
from ..data.Display import render_mean_values
def pk(hypothesis_positions, reference_positions, window_size=None,
one_minus=False, convert_from_masses=False):
'''
Calculates the Pk segmentation evaluation metric score for a
hypothetical segmentation against a reference segmentation for a given
window size. The standard method of calculating the window size
is performed if a window size is not specified.
:param hypothesis_positions: Hypothesis segmentation section labels
sequence.
:param reference_positions: Reference segmentation section labels sequence.
:param window_size: The size of the window that is slid over the \
two segmentations used to count mismatches \
(default is None and will use the average \
window size)
:param one_minus: Return 1-Pk to make it no longer a \
penalty-metric.
:param convert_from_masses: Convert the segmentations provided from \
masses into positions.
:type hypothesis_positions: list
:type reference_positions: list
:type window_size: int
:type one_minus: bool
:type convert_from_masses: bool
.. note:: See :func:`segeval.convert_masses_to_positions` for an example of
the input format.
'''
# pylint: disable=C0103
# Convert from masses into positions
if convert_from_masses:
reference_positions = convert_masses_to_positions(reference_positions)
hypothesis_positions = convert_masses_to_positions(hypothesis_positions)
# Check for input errors
if len(reference_positions) != len(hypothesis_positions):
raise SegmentationMetricError(
'Reference and hypothesis segmentations differ in length.')
# Compute window size to use if unspecified
if window_size is None:
window_size = compute_window_size(reference_positions)
# Create a set of pairs of units from each segmentation to go over using a
# window
sum_differences = 0
# Slide window over and sum the number of varying windows
measurements = 0
for i in xrange(0, len(reference_positions) - (window_size)):
# Create probe windows with k boundaries inside
window_ref = reference_positions[i:i+window_size+1]
window_hyp = hypothesis_positions[i:i+window_size+1]
# Probe agreement
agree_ref = window_ref[0] == window_ref[-1]
agree_hyp = window_hyp[0] == window_hyp[-1]
# If the windows agreements agree
if agree_ref != agree_hyp:
sum_differences += 1
measurements += 1
# Perform final division
p_k = Decimal(sum_differences) / measurements
if not one_minus:
return p_k
else:
return Decimal('1.0') - p_k
def pairwise_pk(dataset_masses, one_minus=False, convert_from_masses=True):
'''
Calculate mean pairwise segmentation F-Measure.
.. seealso:: :func:`pk`
.. seealso:: :func:`segeval.compute_pairwise`
:param dataset_masses: Segmentation mass dataset (including multiple \
codings).
:type dataset_masses: dict
:returns: Mean, standard deviation, variance, and standard error of a \
segmentation metric.
:rtype: :class:`decimal.Decimal`, :class:`decimal.Decimal`, \
:class:`decimal.Decimal`, :class:`decimal.Decimal`
'''
def wrapper(hypothesis_masses, reference_masses):
'''
Wrapper to provide parameters.
'''
return pk(hypothesis_masses, reference_masses, one_minus=one_minus,
convert_from_masses=convert_from_masses)
return compute_pairwise(dataset_masses, wrapper, permuted=True)
OUTPUT_NAME = 'Mean Pk'
SHORT_NAME = 'Pk'
def values_pk(dataset_masses, name, one_minus):
'''
Produces a TSV for this metric
'''
# Define a fnc to pass parameters
def wrapper(hypothesis_masses, reference_masses):
'''
Wrapper to provide parameters.
'''
return pk(hypothesis_masses, reference_masses, one_minus=one_minus,
convert_from_masses=True)
# Get values
header = list(['coder1', 'coder2', name])
values = compute_pairwise_values(dataset_masses, wrapper, permuted=True)
return create_tsv_rows(header, values)
def parse(args):
'''
Parse this module's metric arguments and perform requested actions.
'''
output = None
values = load_file(args)[0]
one_minus = args['oneminus']
name = SHORT_NAME
if one_minus:
name = '1 - %s' % name
# Is a TSV requested?
if args['output'] != None:
# Create a TSV
output_file = args['output'][0]
header, rows = values_pk(values, name, one_minus)
write_tsv(output_file, header, rows)
else:
# Create a string to output
mean, std, var, stderr = pairwise_pk(values, one_minus)
output = render_mean_values(name, mean, std, var, stderr)
return output
def create_parser(subparsers):
'''
Setup a command line parser for this module's metric.
'''
from ..data import parser_add_file_support
parser = subparsers.add_parser('pk',
help=OUTPUT_NAME)
parser_add_file_support(parser)
parser_one_minus_support(parser)
parser.set_defaults(func=parse)
| StarcoderdataPython |
131050 | <reponame>ninarina12/e3nn<gh_stars>100-1000
from typing import Tuple
import torch
def direct_sum(*matrices):
r"""Direct sum of matrices, put them in the diagonal
"""
front_indices = matrices[0].shape[:-2]
m = sum(x.size(-2) for x in matrices)
n = sum(x.size(-1) for x in matrices)
total_shape = list(front_indices) + [m, n]
out = matrices[0].new_zeros(total_shape)
i, j = 0, 0
for x in matrices:
m, n = x.shape[-2:]
out[..., i: i + m, j: j + n] = x
i += m
j += n
return out
@torch.jit.script
def orthonormalize(
original: torch.Tensor,
eps: float = 1e-9
) -> Tuple[torch.Tensor, torch.Tensor]:
r"""orthonomalize vectors
Parameters
----------
original : `torch.Tensor`
list of the original vectors :math:`x`
eps : float
a small number
Returns
-------
final : `torch.Tensor`
list of orthonomalized vectors :math:`y`
matrix : `torch.Tensor`
the matrix :math:`A` such that :math:`y = A x`
"""
assert original.dim() == 2
dim = original.shape[1]
final = []
matrix = []
for i, x in enumerate(original):
cx = x.new_zeros(len(original))
cx[i] = 1
for j, y in enumerate(final):
c = torch.dot(x, y)
x = x - c * y
cx = cx - c * matrix[j]
if x.norm() > 2 * eps:
c = 1 / x.norm()
x = c * x
cx = c * cx
x[x.abs() < eps] = 0
cx[cx.abs() < eps] = 0
c = x[x.nonzero()[0, 0]].sign()
x = c * x
cx = c * cx
final += [x]
matrix += [cx]
final = torch.stack(final) if len(final) > 0 else original.new_zeros((0, dim))
matrix = torch.stack(matrix) if len(matrix) > 0 else original.new_zeros((0, len(original)))
return final, matrix
@torch.jit.script
def complete_basis(
vecs: torch.Tensor,
eps: float = 1e-9
) -> torch.Tensor:
assert vecs.dim() == 2
dim = vecs.shape[1]
base = [x / x.norm() for x in vecs]
expand = []
for x in torch.eye(dim, device=vecs.device, dtype=vecs.dtype):
for y in base + expand:
x -= torch.dot(x, y) * y
if x.norm() > 2 * eps:
x /= x.norm()
x[x.abs() < eps] = x.new_zeros(())
x *= x[x.nonzero()[0, 0]].sign()
expand += [x]
expand = torch.stack(expand) if len(expand) > 0 else vecs.new_zeros(0, dim)
return expand
| StarcoderdataPython |
3350423 | #!/usr/bin/env python
from setuptools import setup
from pip.req import parse_requirements
def local_requirements():
install_reqs = parse_requirements('./requirements.txt')
return [str(ir.req) for ir in install_reqs]
setup(name='steamapi',
version='0.1',
description='An object-oriented Python 2.7+ library for accessing the Steam Web API',
url='https://github.com/smiley/steamapi',
author='Smiley',
author_email='',
license='MIT',
packages=['steamapi'],
install_requires=local_requirements(),
zip_safe=False)
| StarcoderdataPython |
1722489 | <reponame>BerlinRDT/roaddetection<filename>src/data/download_raw.py<gh_stars>1-10
# -*- coding: utf-8 -*-
from google.cloud import storage
from google.cloud.exceptions import NotFound
import os.path
local_images_dir = 'data/raw/images/'
def download(blob):
source_blob_name = blob.name
file_name = source_blob_name.rsplit('/', 1)[1]
destination_file_name = local_images_dir + file_name
if not os.path.isfile(destination_file_name):
blob.download_to_filename(destination_file_name)
print('Image {} downloaded to {}.'.format(
source_blob_name,
destination_file_name))
else:
print('Image already {} exists. Skipping download'.
format(destination_file_name))
def main():
client = storage.Client()
if not os.path.exists(local_images_dir):
os.makedirs(local_images_dir)
try:
bucket = client.get_bucket('satellite_images')
blobs = bucket.list_blobs()
[download(blob) for blob in blobs if "Visual.tif" in blob.name]
except NotFound:
print('Sorry, that bucket does not exist!')
if __name__ == '__main__':
main()
| StarcoderdataPython |
1685917 | import sys
import glob
import unittest
def create_test_suite():
test_file_strings = glob.glob('tests/test_*.py')
module_strings = ['tests.'+str[6:len(str)-3] for str in test_file_strings]
suites = [unittest.defaultTestLoader.loadTestsFromName(name) \
for name in module_strings]
testSuite = unittest.TestSuite(suites)
return testSuite
testSuite = create_test_suite()
test_runner = unittest.TextTestRunner().run(testSuite)
if len(test_runner.failures) == 0 and len(test_runner.errors) == 0:
sys.exit(0)
else:
sys.exit(1)
| StarcoderdataPython |
3337947 | <reponame>software-mansion/protostar
from collections import OrderedDict
from dataclasses import dataclass, field
from pathlib import Path
from typing import Any, Dict, List, Optional, cast
import flatdict
import tomli
import tomli_w
from protostar.commands.test.expected_event import collect_immediate_subdirectories
from protostar.protostar_exception import ProtostarException
from protostar.utils.protostar_directory import VersionManager
class NoProtostarProjectFoundException(ProtostarException):
pass
class VersionNotSupportedException(ProtostarException):
pass
@dataclass
class ProtostarConfig:
protostar_version: str = field(default="0.1.0")
@dataclass
class ProjectConfig:
libs_path: str = field(default="./lib")
contracts: Dict[str, List[str]] = field(
default_factory=lambda: {"main": ["./src/main.cairo"]}
)
class Project:
def __init__(
self, version_manager: VersionManager, project_root: Optional[Path] = None
):
self.project_root = project_root or Path()
self.shared_command_configs_section_name = "shared_command_configs"
self._project_config = None
self._protostar_config = None
self._version_manager = version_manager
self._config_dict: Optional[Dict[str, Any]] = None
@property
def libs_path(self) -> Path:
return self.project_root.resolve() / self.config.libs_path
@property
def config(self) -> ProjectConfig:
if not self._project_config:
self.load_config()
return cast(ProjectConfig, self._project_config)
@property
def config_path(self) -> Path:
return self.project_root / "protostar.toml"
@property
def ordered_dict(self):
general = OrderedDict(**self.config.__dict__)
general.pop("contracts")
result = OrderedDict()
result["protostar.config"] = OrderedDict(self._protostar_config.__dict__)
result["protostar.project"] = general
result["protostar.contracts"] = self.config.contracts
return result
def get_include_paths(self) -> List[str]:
libs_path = Path(self.project_root, self.config.libs_path)
return [
str(self.project_root),
str(libs_path),
*collect_immediate_subdirectories(libs_path),
]
def write_config(self, project_config: ProjectConfig):
self._project_config = project_config
self._protostar_config = ProtostarConfig(
protostar_version=str(self._version_manager.protostar_version)
)
with open(self.config_path, "wb") as file:
tomli_w.dump(self.ordered_dict, file)
def load_argument(
self, section_name: str, attribute_name: str, profile_name: Optional[str] = None
) -> Optional[Any]:
assert not section_name.startswith("protostar.")
if not self._config_dict:
try:
with open(self.config_path, "rb") as config_file:
self._config_dict = tomli.load(config_file)
except FileNotFoundError:
return None
flat_config = flatdict.FlatDict(self._config_dict, delimiter=".")
section_name = f"protostar.{section_name}"
if profile_name:
section_name = f"profile.{profile_name}.{section_name}"
if section_name not in flat_config:
return None
section_config = flat_config[section_name]
if attribute_name not in section_config:
attribute_name = attribute_name.replace("-", "_")
if attribute_name not in section_config:
attribute_name = attribute_name.replace("_", "-")
if attribute_name not in section_config:
return None
return section_config[attribute_name]
def load_config(self) -> "ProjectConfig":
if not self.config_path.is_file():
raise NoProtostarProjectFoundException(
"No protostar.toml found in the working directory"
)
with open(self.config_path, "rb") as config_file:
parsed_config = tomli.load(config_file)
flat_config = {
**parsed_config["protostar.project"],
"contracts": parsed_config["protostar.contracts"],
}
self._project_config = ProjectConfig(**flat_config)
self._protostar_config = ProtostarConfig(
**parsed_config["protostar.config"],
)
config_protostar_version = self._version_manager.parse(
self._protostar_config.protostar_version
)
if (
self._version_manager.protostar_version
or VersionManager.parse("99.99.99")
) < config_protostar_version:
raise VersionNotSupportedException(
(
# pylint: disable=line-too-long
f"Current Protostar build ({self._version_manager.protostar_version}) doesn't support protostar_version {config_protostar_version}\n"
"Try upgrading protostar by running: protostar upgrade"
)
)
return self._project_config
def load_protostar_config(self) -> ProtostarConfig:
if not self.config_path.is_file():
raise NoProtostarProjectFoundException(
"No protostar.toml found in the working directory"
)
with open(self.config_path, "rb") as config_file:
parsed_config = tomli.load(config_file)
self._protostar_config = ProtostarConfig(
**parsed_config["protostar.config"]
)
return self._protostar_config
| StarcoderdataPython |
181781 | import pickle
import torch
import trimesh
from .util import set_module, create_quads
@set_module('deep_surfel')
def export_mesh(file, deep_surfel_scene, only_filled=False, features_as_colors=False, surfel_transformation=None):
inside_inds = ~torch.isinf(deep_surfel_scene.locations).any(-1)
if only_filled:
inside_inds = inside_inds & (deep_surfel_scene.counts > 0)
surfel_loc = deep_surfel_scene.locations[inside_inds]
if features_as_colors:
s_colors = deep_surfel_scene.features[inside_inds][..., :3]
if surfel_transformation is not None:
s_colors = surfel_transformation(s_colors)
else:
s_colors = torch.ones_like(surfel_loc) * 127
surfel_orientations = deep_surfel_scene.orientations[inside_inds]
s_vertices, s_faces = create_quads(surfel_loc, surfel_orientations, deep_surfel_scene.surfel_size)
mesh = trimesh.Trimesh(
vertices=s_vertices.cpu().numpy(),
faces=s_faces.cpu().numpy(),
vertex_normals=s_vertices.repeat_interleave(4, dim=0).cpu().numpy(),
vertex_colors=s_colors.repeat_interleave(4, dim=0).cpu().numpy()
)
mesh.export(file)
@set_module('deep_surfel')
def save(file, scene):
if not file.endswith('.dsurf'):
file = f'{file}.dsurf'
with open(file, 'wb') as f:
pickle.dump(scene, f, protocol=pickle.HIGHEST_PROTOCOL)
@set_module('deep_surfel')
def load(file):
if not file.endswith('.dsurf'):
file = f'{file}.dsurf'
with open(file, 'rb') as f:
scene = pickle.load(f)
return scene
@set_module('deep_surfel')
def save_sdf(dst_file, sdf, scale, translation):
if not dst_file.endswith('.sdf'):
dst_file = f'{dst_file}.sdf'
with open(dst_file, 'wb') as f:
pickle.dump((sdf, scale, translation), f, protocol=pickle.HIGHEST_PROTOCOL)
@set_module('deep_surfel')
def load_sdf(src_file):
if not src_file.endswith('.sdf'):
src_file = f'{src_file}.sdf'
with open(src_file, 'rb') as f:
sdf, scale, translation = pickle.load(f)
return sdf, scale, translation
| StarcoderdataPython |
1793097 | <reponame>WilliamMayor/scytale.xyz<gh_stars>1-10
from scytale.ciphers.base import Cipher
from scytale.exceptions import ScytaleError
class RailFence(Cipher):
name = "RailFence"
default = 5
def __init__(self, key=None):
self.key = self.validate(key)
def validate(self, key):
if key is None:
key = self.default
try:
return int(key)
except:
raise ScytaleError("The Rail Fence key should be a number")
def fence(self, text):
fence = [[None] * len(text) for n in range(self.key)]
rails = list(range(self.key - 1)) + list(range(self.key - 1, 0, -1))
for n, x in enumerate(text):
fence[rails[n % len(rails)]][n] = x
return [c for rail in fence for c in rail if c is not None]
def encrypt(self, plaintext):
plaintext = self.clean(plaintext.upper())
return "".join(self.fence(plaintext))
def decrypt(self, ciphertext):
rng = range(len(ciphertext))
pos = self.fence(rng)
return "".join(ciphertext[pos.index(n)] for n in rng)
| StarcoderdataPython |
4826267 | import requests
import json
import csv
import time
from html.parser import HTMLParser
timeout = 10
class ThesisHTMLParser(HTMLParser):
def __init__(self, url, query = None) :
super().__init__()
self.fields = {}
self.fields['url'] = url
self.fields['query'] = query
self.fields['keywords'] = []
trying = True
while trying :
try :
self.resp = requests.get(url)
trying = False
except :
print(f'Failed to connect... retrying in {timeout}s...')
time.sleep(timeout)
timeout+=10
self.feed(self.resp.text)
def handle_starttag(self, tag, attrs):
if tag == 'meta':
if attrs[0] == ('name', 'DC.title') and attrs[2] == ('xml:lang', 'pt-br'):
self.fields['title'] = attrs[1][1]
if attrs[0] == ('name', 'DCTERMS.issued') and attrs[2] == ('xml:lang', 'pt-br'):
self.fields['date'] = attrs[1][1]
if attrs[0] == ('name', 'DC.creator') and attrs[2] == ('xml:lang', 'pt-br'):
self.fields['author'] = attrs[1][1]
if attrs[0] == ('name', 'DC.contributor') and attrs[2] == ('xml:lang', 'pt-br'):
self.fields['advisor'] = attrs[1][1]
if attrs[0] == ('name', 'DCTERMS.abstract') and attrs[2] == ('xml:lang', 'pt-br'):
self.fields['abstract'] = attrs[1][1]
if attrs[0] == ('name', 'DC.subject') and attrs[2] == ('xml:lang', 'pt-br'):
cur_keys = [s.strip().lower() for s in attrs[1][1].split(';')]
for k in cur_keys :
self.fields['keywords'].append(k)
if attrs[0] == ('name','citation_pdf_url') :
self.fields['pdf_url'] = attrs[1][1]
if attrs[0] == ('name', 'citation_doi') :
self.fields['doi'] = attrs[1][1]
#def handle_endtag(self, tag):
# print("End tag :", tag)
#def handle_data(self, data):
# print("Data :", data)
#def handle_comment(self, data):
# print("Comment :", data)
#def handle_entityref(self, name):
# c = chr(name2codepoint[name])
# print("Named ent:", c)
#def handle_charref(self, name):
# if name.startswith('x'):
# c = chr(int(name[1:], 16))
# else:
# c = chr(int(name))
# print("Num ent :", c)
#def handle_decl(self, data):
# print("Decl :", data)
@property
def match_query(self) :
return self.fields['query'] in self.fields['keywords']
@property
def return_fields(self):
return self.fields.keys()
def return_fields_as_str_list(self, list_of_fields = None, clear_newline = True) :
ret_list = []
if list_of_fields is None :
list_of_fields = self.fields.keys()
for f in list_of_fields :
s = self.fields[f]
if f == 'keywords' :
s = ','.join(self.fields[f])
if clear_newline :
s = s.replace('\n', ' ')
ret_list.append(s)
return ret_list
def __str__(self) :
return json.dumps(self.fields, indent = 4, ensure_ascii=False).encode('utf-8').decode()
class Crawler :
def __init__(self) :
self.entries = []
def query_by_keyword(self, keyword, n = 10) :
_keyword = keyword.replace(' ', '%20')
page = 1
ret_list = []
last_page = 10
while len(ret_list) < n and last_page == 10:
key_query = f"https://www.teses.usp.br/index.php?option=com_jumi&fileid=19&Itemid=87&lang=pt-br&g=1&b0={_keyword}&c0=p&o0=AND&pagina={page}"
trying = True
while trying :
try :
resp = requests.get(key_query)
trying = False
except :
print(f'Failed to connect... retrying in {timeout}s...')
time.sleep(timeout)
timeout+=10
lines = resp.text.split('\n')
last_page = 0
for line in lines :
if line.find('<div class="dadosDocNome"><a href=') == 0:
last_page += 1
url = line.split('"')[3]
cur_thesis = ThesisHTMLParser(url, keyword)
if cur_thesis.match_query :
ret_list.append(cur_thesis)
if len(ret_list) >= n :
break
print(f" Added {len(ret_list)}/{n}")
page += 1
return ret_list
def run(self, keyword_list, entries_per_keyword = 20) :
for key in keyword_list :
print(f"Trying {key}")
cur_list = self.query_by_keyword(key, n = entries_per_keyword)
self.entries.extend(cur_list)
def save_as_csv(self, output, field_list, delimiter = ';', quotechar = '"', clear_newline = True) :
with open(output, 'a') as csv_file :
csv_writer = csv.writer(csv_file, delimiter = delimiter, quotechar = quotechar, quoting = csv.QUOTE_MINIMAL)
for item in self.entries :
cur_list = item.return_fields_as_str_list(field_list, clear_newline)
csv_writer.writerow(cur_list)
| StarcoderdataPython |
1687976 | <gh_stars>0
#!/usr/bin/env python3
# This program is free software. It comes without any warranty, to
# the extent permitted by applicable law. You can redistribute it
# and/or modify it under the terms of the Do What The Fuck You Want
# To Public License, Version 2, as published by Sam Hocevar. See
# http://sam.zoy.org/wtfpl/COPYING for more details.
import traceback, os
try:
#### IMPORTING ####
from lib.Hydrus import Py2To3, Exceptions, Constants as HC,\
Data, Paths, Globals as HG, Logger
import locale, sys, time, threading, argparser, wx
try:
from twisted.internet import reactor
except:
HG.twisted_is_broke = True
#### DEBUG ####
try: locale.setlocale( locale.LC_ALL, '' )
except: pass
#### EXECUTION ####
result = SetupArgParser()
db_dir = SetupDirectories()
HG.no_daemons = result.no_daemons
HG.no_wal = result.no_wal
HG.no_db_temp_files = result.no_db_temp_files
run(result.program)
except ( Exceptions.InsufficientCredentialsException, Exceptions.ShutdownException ) as e:
Data.Print( e )
except Exception as e:
print( traceback.format_exc() )
if 'db_dir' in locals() and os.path.exists( db_dir ):
dest_path = os.path.join( db_dir, 'crash.log' )
with open( dest_path, 'w', encoding = 'utf-8' ) as f:
f.write( traceback.format_exc() )
print( 'Critical error occurred! Details written to crash.log!' )
#### FUNCTIONS ####
def run( program: str ):
if program == 'server':
Py2To3.do_2to3_test()
from lib.Server import Controller
action = result.action
action = Controller.ProcessStartingAction( db_dir, action )
if program == 'client':
Py2To3.do_2to3_test( wx_error_display_callable = wx.SafeShowMessage )
from lib.Client import Controller
with Logger.Logger( db_dir, program ) as logger:
try:
if program == 'server' && action in ( 'stop', 'restart' ):
Controller.ShutdownSiblingInstance( db_dir )
if program == 'client' || program == 'server' && action in ( 'start', 'restart'):
Data.Print( f'hydrus {program} started' )
if not HG.twisted_is_broke: threading.Thread( target = reactor.run, name = 'twisted', kwargs = { 'installSignalHandlers' : 0 } ).start()
controller = Controller.Controller( db_dir )
controller.run()
except:
Data.Print( f'hydrus {program} failed' )
Data.Print( traceback.format_exc() )
finally:
HG.view_shutdown = True
HG.model_shutdown = True
try:
controller.pubimmediate( 'wake_daemons' )
except:
Data.Print( traceback.format_exc() )
reactor.callFromThread( reactor.stop )
Data.Print( f'hydrus {program} shut down' )
HG.shutdown_complete = True
if HG.restart:
Data.RestartProcess()
def SetupDirectories():
db_dir = HC.DEFAULT_DB_DIR if result.db_dir is None else db_dir = result.db_dir
if not Paths.DirectoryIsWritable( db_dir ) or HC.RUNNING_FROM_OSX_APP:
db_dir = HC.USERPATH_DB_DIR
db_dir = Paths.ConvertPortablePathToAbsPath( db_dir, HC.BASE_DIR )
try:
Paths.MakeSureDirectoryExists( db_dir )
except:
raise Exception( 'Could not ensure db path ' + db_dir + ' exists! Check the location is correct and that you have permission to write to it!' )
if result.temp_dir is not None:
if not os.path.exists( result.temp_dir ):
raise Exception( 'The given temp directory, "{}", does not exist!'.format( result.temp_dir ) )
if HC.PLATFORM_WINDOWS:
os.environ[ 'TEMP' ] = result.temp_dir
os.environ[ 'TMP' ] = result.temp_dir
else:
os.environ[ 'TMPDIR' ] = result.temp_dir
return db_dir
def SetupArgParser():
argparser = argparse.ArgumentParser( description = 'hydrus network launcher', usage = '%(prog)s client|server [OPTIONS]')
argsubparsers = argparser.add_subparsers( dest = 'program' )
clientparser = argsubparsers.add_parser( 'client', help = 'client help' )
serverparser = argsubparsers.add_parser( 'server', help = 'server help' )
serverparser.add_argument( 'action', default = 'start', nargs = '?', choices = [ 'start', 'stop', 'restart' ], help = 'either start this server (default), or stop an existing server, or both' )
argparser.add_argument( '-d', '--db_dir', help = 'set an external db location' )
argparser.add_argument( '--no_daemons', action='store_true', help = 'run without background daemons' )
argparser.add_argument( '--no_wal', action='store_true', help = 'run without WAL db journalling' )
argparser.add_argument( '--no_db_temp_files', action='store_true', help = 'run the db entirely in memory' )
argparser.add_argument( '--temp_dir', help = 'override the program\'s temporary directory' )
return argparser.parse_args() | StarcoderdataPython |
3463 | <gh_stars>0
# Copyright (c) 2018-2021 <NAME>
# License: MIT License
# source: http://www.lee-mac.com/bulgeconversion.html
# source: http://www.afralisp.net/archive/lisp/Bulges1.htm
from typing import Any, TYPE_CHECKING, Tuple
import math
from ezdxf.math import Vec2
if TYPE_CHECKING:
from ezdxf.eztypes import Vertex
__all__ = [
"bulge_to_arc", "bulge_3_points", "bulge_center", "bulge_radius",
"arc_to_bulge"
]
def polar(p: Any, angle: float, distance: float) -> Vec2:
""" Returns the point at a specified `angle` and `distance` from point `p`.
Args:
p: point as :class:`Vec2` compatible object
angle: angle in radians
distance: distance
"""
return Vec2(p) + Vec2.from_angle(angle, distance)
def angle(p1: Any, p2: Any) -> float:
""" Returns angle a line defined by two endpoints and x-axis in radians.
Args:
p1: start point as :class:`Vec2` compatible object
p2: end point as :class:`Vec2` compatible object
"""
return (Vec2(p2) - Vec2(p1)).angle
def arc_to_bulge(center: 'Vertex', start_angle: float, end_angle: float,
radius: float) -> Tuple['Vec2', 'Vec2', float]:
"""
Returns bulge parameters from arc parameters.
Args:
center: circle center point as :class:`Vec2` compatible object
start_angle: start angle in radians
end_angle: end angle in radians
radius: circle radius
Returns:
tuple: (start_point, end_point, bulge)
"""
start_point = polar(center, start_angle, radius)
end_point = polar(center, end_angle, radius)
pi2 = math.pi * 2
a = math.fmod((pi2 + (end_angle - start_angle)), pi2) / 4.
bulge = math.sin(a) / math.cos(a)
return start_point, end_point, bulge
def bulge_3_points(start_point: 'Vertex', end_point: 'Vertex',
point: 'Vertex') -> float:
""" Returns bulge value defined by three points.
Based on 3-Points to Bulge by `Lee Mac`_.
Args:
start_point: start point as :class:`Vec2` compatible object
end_point: end point as :class:`Vec2` compatible object
point: arbitrary point as :class:`Vec2` compatible object
"""
a = (math.pi - angle(point, start_point) + angle(point, end_point)) / 2
return math.sin(a) / math.cos(a)
def bulge_to_arc(start_point: 'Vertex',
end_point: 'Vertex',
bulge: float) -> Tuple['Vec2', float, float, float]:
""" Returns arc parameters from bulge parameters.
The arcs defined by bulge values of :class:`~ezdxf.entities.LWPolyline`
and 2D :class:`~ezdxf.entities.Polyline` entities start at the vertex which
includes the bulge value and ends at the following vertex.
Based on Bulge to Arc by `Lee Mac`_.
Args:
start_point: start vertex as :class:`Vec2` compatible object
end_point: end vertex as :class:`Vec2` compatible object
bulge: bulge value
Returns:
Tuple: (center, start_angle, end_angle, radius)
"""
r = signed_bulge_radius(start_point, end_point, bulge)
a = angle(start_point, end_point) + (math.pi / 2 - math.atan(bulge) * 2)
c = polar(start_point, a, r)
if bulge < 0:
return c, angle(c, end_point), angle(c, start_point), abs(r)
else:
return c, angle(c, start_point), angle(c, end_point), abs(r)
def bulge_center(start_point: 'Vertex', end_point: 'Vertex',
bulge: float) -> 'Vec2':
""" Returns center of arc described by the given bulge parameters.
Based on Bulge Center by `<NAME>`_.
Args:
start_point: start point as :class:`Vec2` compatible object
end_point: end point as :class:`Vec2` compatible object
bulge: bulge value as float
"""
start_point = Vec2(start_point)
a = angle(start_point, end_point) + (math.pi / 2. - math.atan(bulge) * 2.)
return start_point + Vec2.from_angle(a, signed_bulge_radius(start_point,
end_point,
bulge))
def signed_bulge_radius(start_point: 'Vertex', end_point: 'Vertex',
bulge: float) -> float:
return Vec2(start_point).distance(Vec2(end_point)) * (
1. + (bulge * bulge)) / 4. / bulge
def bulge_radius(start_point: 'Vertex', end_point: 'Vertex',
bulge: float) -> float:
""" Returns radius of arc defined by the given bulge parameters.
Based on Bulge Radius by `<NAME>`_
Args:
start_point: start point as :class:`Vec2` compatible object
end_point: end point as :class:`Vec2` compatible object
bulge: bulge value
"""
return abs(signed_bulge_radius(start_point, end_point, bulge))
| StarcoderdataPython |
1711289 | <gh_stars>0
from __future__ import print_function
from __future__ import absolute_import
from .ControlCommon import *
import sys
import re
import fnmatch
from itertools import chain
try:
from .CommunityRTE import CommunityRTEControl
except ImportError:
CommunityRTEControl = None
def complete_rte_name(prefix, parsed_args, **kwargs):
arcconf = get_parsed_arcconf(parsed_args.config)
if parsed_args.action == 'enable':
return RTEControl(arcconf).complete_enable()
if parsed_args.action == 'default':
return RTEControl(arcconf).complete_default()
if parsed_args.action == 'disable':
return RTEControl(arcconf).complete_disable()
if parsed_args.action == 'undefault':
return RTEControl(arcconf).complete_undefault()
if parsed_args.action == 'cat':
return RTEControl(arcconf).complete_all()
if parsed_args.action == 'params-get':
return RTEControl(arcconf).complete_all()
if parsed_args.action == 'params-set':
return RTEControl(arcconf).complete_all()
def complete_rte_params(prefix, parsed_args, **kwargs):
arcconf = get_parsed_arcconf(parsed_args.config)
return RTEControl(arcconf).complete_params(parsed_args.rte)
def complete_rte_params_values(prefix, parsed_args, **kwargs):
arcconf = get_parsed_arcconf(parsed_args.config)
return RTEControl(arcconf).complete_params_values(parsed_args.rte, parsed_args.parameter)
class RTEControl(ComponentControl):
def __init__(self, arcconfig):
self.logger = logging.getLogger('ARCCTL.RunTimeEnvironment')
if arcconfig is None:
self.logger.critical('Controlling RunTime Environments is not possible without arc.conf defined controldir')
sys.exit(1)
self.arcconfig = arcconfig
# define directories
self.control_rte_dir = arcconfig.get_value('controldir', 'arex').rstrip('/') + '/rte'
self.system_rte_dir = ARC_DATA_DIR.rstrip('/') + '/rte'
self.user_rte_dirs = arcconfig.get_value('runtimedir', 'arex', force_list=True)
if self.user_rte_dirs is None:
self.user_rte_dirs = []
# define internal structures to hold RTEs
self.all_rtes = {}
self.system_rtes = {}
self.user_rtes = {}
self.community_rtes = {}
self.enabled_rtes = {}
self.default_rtes = {}
self.dummy_rtes = {}
self.broken_rtes = {}
@staticmethod
def get_dir_rtes(rtedir):
rtes = {}
for path, _, files in os.walk(rtedir):
rtebase = path.lstrip(rtedir + '/')
for f in files:
rtename = rtebase + '/' + f if rtebase else f
rtepath = path + '/' + f
if os.path.islink(rtepath):
rtepath = os.readlink(rtepath)
rtes[rtename] = rtepath
return rtes
@staticmethod
def get_rte_description(rte_path):
if rte_path == '/dev/null':
return 'Dummy RTE for information publishing'
with open(rte_path) as rte_f:
max_lines = 10
description = 'RTE description is Not Available'
for line in rte_f:
descr_re = re.match(r'^#+\s*description:\s*(.*)\s*$', line, flags=re.IGNORECASE)
if descr_re:
description = descr_re.group(1)
max_lines -= 1
if not max_lines:
break
return description
@staticmethod
def __list_rte(rte_dict, long_list, prefix='', suffix='', broken_list=None):
if broken_list is None:
broken_list = []
for rte in sorted(rte_dict):
if rte in broken_list:
suffix += ' (broken)'
if long_list:
print('{0}{1:32} -> {2}{3}'.format(prefix, rte, rte_dict[rte], suffix))
else:
print('{0}{1}'.format(rte, suffix))
def __fetch_rtes(self):
"""Look for RTEs on the filesystem and fill the object structures"""
# run once per tool invocation
if self.all_rtes:
return
# available pre-installed RTEs
self.logger.debug('Indexing ARC defined RTEs from %s', self.system_rte_dir)
self.system_rtes = self.get_dir_rtes(self.system_rte_dir)
if not self.system_rtes:
self.logger.info('There are no RTEs found in ARC defined location %s', self.system_rte_dir)
# RTEs in user-defined locations
for urte in self.user_rte_dirs:
self.logger.debug('Indexing user-defined RTEs from %s', urte)
rtes = self.get_dir_rtes(urte)
if not rtes:
self.logger.info('There are no RTEs found in user-defined location %s', urte)
self.user_rtes.update(rtes)
# all available RTEs
self.all_rtes.update(self.system_rtes)
self.all_rtes.update(self.user_rtes)
# Community-defined RTEs
if CommunityRTEControl is not None:
self.community_rtes = CommunityRTEControl(self.arcconfig).get_deployed_rtes()
self.all_rtes.update(self.community_rtes)
# enabled RTEs (linked to controldir)
self.logger.debug('Indexing enabled RTEs in %s', self.control_rte_dir + '/enabled')
self.enabled_rtes = self.get_dir_rtes(self.control_rte_dir + '/enabled')
for rte, rtepath in self.enabled_rtes.items():
# handle dummy enabled RTEs
if rtepath == '/dev/null':
self.dummy_rtes[rte] = '/dev/null'
# detect broken RTEs
if not os.path.exists(rtepath):
self.broken_rtes[rte] = rtepath
self.logger.warning('RunTimeEnvironment %s is enabled but the link to %s is broken.', rte, rtepath)
# default RTEs (linked to default)
self.logger.debug('Indexing default RTEs in %s', self.control_rte_dir + '/default')
self.default_rtes = self.get_dir_rtes(self.control_rte_dir + '/default')
for rte, rtepath in self.default_rtes.items():
# detect broken RTEs
if not os.path.exists(rtepath):
self.broken_rtes[rte] = rtepath
self.logger.warning('RunTimeEnvironment %s is set default but the link to %s is broken.', rte, rtepath)
def __get_rte_file(self, rte):
self.__fetch_rtes()
if rte not in self.all_rtes:
if rte not in self.dummy_rtes:
self.logger.error('There is no %s RunTimeEnvironment found', rte)
sys.exit(1)
else:
return '/dev/null'
return self.all_rtes[rte]
def __get_rte_params_file(self, rte):
rte_params_path = self.control_rte_dir + '/params/'
rte_params_file = rte_params_path + rte
if os.path.exists(rte_params_file):
return rte_params_file
return None
def __get_rte_list(self, rtes, check_dict=None):
rte_list = []
if check_dict is None:
check_dict = self.all_rtes
for r in rtes:
if r.startswith('/'):
# path instead of name (comes from filesystem paths in user and system RTE dirs)
rte_found = False
for rname, rpath in chain(iter(self.user_rtes.items()),
iter(self.system_rtes.items()),
iter(self.community_rtes.items())):
if rpath == r:
self.logger.debug('RTE path %s match %s RTE name, adding to the list.', rpath, rname)
rte_list.append({'name': rname, 'path': rpath})
rte_found = True
break
if not rte_found:
self.logger.error('There is no RTE defined by %s path.', r)
elif r in check_dict:
# filename match goes directly to list
rte_list.append({'name': r, 'path': check_dict[r]})
else:
# check for glob wildcards in rte name
rte_found = False
for irte in check_dict:
if fnmatch.fnmatch(irte, r):
self.logger.debug('Glob wildcard match for %s RTE, adding to the list.', irte)
rte_list.append({'name': irte, 'path': check_dict[irte]})
rte_found = True
if not rte_found:
self.logger.error('There are no RTEs matched to %s found', r)
if not rte_list:
self.logger.error('Failed to find requested RunTimeEnvironment(s). '
'No RTEs that matches \'%s\' are available.', ' '.join(rtes))
sys.exit(1)
return rte_list
def __list_brief(self):
for rte_type, rte_dict in [('system', self.system_rtes),
('user', self.user_rtes),
('community', self.community_rtes),
('dummy', self.dummy_rtes),
('broken', self.broken_rtes)]:
for rte in sorted(rte_dict):
link = rte_dict[rte]
kind = [rte_type]
show_disabled = True
if rte_type == 'system':
if rte in self.user_rtes or rte in self.dummy_rtes:
kind.append('masked')
if rte in self.enabled_rtes:
if link == self.enabled_rtes[rte]:
kind.append('enabled')
show_disabled = False
if rte in self.default_rtes:
if link == self.default_rtes[rte]:
kind.append('default')
show_disabled = False
if rte in self.broken_rtes:
show_disabled = False
if show_disabled:
kind.append('disabled')
print('{0:32} ({1})'.format(rte, ', '.join(kind)))
def __list_long(self):
# system
if not self.system_rtes:
print('There are no system pre-defined RTEs in {0}'.format(self.system_rte_dir))
else:
print('System pre-defined RTEs in {0}:'.format(self.system_rte_dir))
for rte in sorted(self.system_rtes):
print('\t{0:32} # {1}'.format(rte, self.get_rte_description(self.system_rtes[rte])))
# user-defined
if not self.user_rte_dirs:
print('User-defined RTEs are not configured in arc.conf')
elif not self.user_rtes:
print('There are no user-defined RTEs in {0}'.format(', '.join(self.user_rte_dirs)))
else:
print('User-defined RTEs in {0}:'.format(', '.join(self.user_rte_dirs)))
for rte in sorted(self.user_rtes):
print('\t{0:32} # {1}'.format(rte, self.get_rte_description(self.user_rtes[rte])))
# community
if CommunityRTEControl is not None:
if not self.system_rtes:
print('There are no deployed community-defined RTEs')
else:
print('Deployed community-defined RTEs:')
for rte in sorted(self.community_rtes):
rte_location = self.community_rtes[rte]
rte_base_location = CommunityRTEControl(self.arcconfig).get_rtes_dir()
c = rte_location.replace(rte_base_location, '').replace('/rte/' + rte, '').strip('/')
print('\t{0:32} # {1} ({2} community)'.format(rte, self.get_rte_description(rte_location), c))
# enabled
if not self.enabled_rtes:
print('There are no enabled RTEs')
else:
print('Enabled RTEs:')
self.__list_rte(self.enabled_rtes, True, prefix='\t', broken_list=self.broken_rtes.keys())
# default
if not self.default_rtes:
print('There are no default RTEs')
else:
print('Default RTEs:')
rte_dict = {}
for rte, rtepath in self.default_rtes.items():
if rte in self.broken_rtes:
rte += ' (broken)'
rte_dict[rte] = rtepath
self.__list_rte(self.default_rtes, True, prefix='\t', broken_list=self.broken_rtes.keys())
def list(self, args):
self.__fetch_rtes()
if args.enabled:
self.__list_rte(self.enabled_rtes, args.long, broken_list=self.broken_rtes.keys())
elif args.default:
self.__list_rte(self.default_rtes, args.long, broken_list=self.broken_rtes.keys())
elif args.system:
self.__list_rte(self.system_rtes, args.long)
elif args.user:
self.__list_rte(self.user_rtes, args.long)
elif hasattr(args, 'community') and args.community:
self.__list_rte(self.community_rtes, args.long)
elif args.available:
self.system_rtes.update(self.user_rtes)
self.system_rtes.update(self.community_rtes)
self.__list_rte(self.system_rtes, args.long)
elif args.dummy:
self.__list_rte(self.dummy_rtes, args.long)
elif args.long:
self.__list_long()
else:
self.__list_brief()
def __params_parse(self, rte):
rte_file = self.__get_rte_file(rte)
param_str = re.compile(r'#\s*param:([^:]+):([^:]+):([^:]*):(.*)$')
params = {}
with open(rte_file) as rte_f:
max_lines = 20
for line in rte_f:
param_re = param_str.match(line)
if param_re:
pname = param_re.group(1)
params[pname] = {
'name': pname,
'allowed_string': param_re.group(2),
'allowed_values': param_re.group(2).split(','),
'default_value': param_re.group(3),
'value': param_re.group(3),
'description': param_re.group(4)
}
params_defined = self.__params_read(rte)
if pname in params_defined:
params[pname]['value'] = params_defined[pname]
max_lines -= 1
if not max_lines:
break
return params
def __params_read(self, rte, suffix=''):
self.__fetch_rtes()
rte_params_file = self.__get_rte_params_file(rte + suffix)
params = {}
if rte_params_file:
kv_re = re.compile(r'^([^ =]+)="(.*)"\s*$')
with open(rte_params_file) as rte_parm_f:
for line in rte_parm_f:
kv = kv_re.match(line)
if kv:
params[kv.group(1)] = kv.group(2)
return params
def __params_write(self, rte, params):
rte_params_path = self.control_rte_dir + '/params/'
if not os.path.exists(rte_params_path):
self.logger.debug('Making control directory %s for RunTimeEnvironments parameters', rte_params_path)
os.makedirs(rte_params_path, mode=0o755)
rte_dir_path = rte_params_path + '/'.join(rte.split('/')[:-1])
if not os.path.exists(rte_dir_path):
self.logger.debug('Making RunTimeEnvironment directory structure inside controldir %s', rte_dir_path)
os.makedirs(rte_dir_path, mode=0o755)
rte_params_file = rte_params_path + rte
self.logger.debug('Writing data to RTE parameters file %s', rte_params_file)
try:
with open(rte_params_file, 'w') as rte_parm_f:
for p in params.values():
rte_parm_f.write('{name}="{value}"\n'.format(**p))
except EnvironmentError as err:
self.logger.error('Failed to write RTE parameters file %s. Error: %s', rte_params_file, str(err))
sys.exit(1)
def params_get(self, rte, is_long=False):
params = self.__params_parse(rte)
for pdescr in params.values():
# output
if is_long:
# set strings for undefined values output
if pdescr['value'] == '':
pdescr['value'] = 'undefined'
if pdescr['default_value'] == '':
pdescr['default_value'] = 'undefined'
print('{name:>16} = {value:10} {description} (default is {default_value}) '
'(allowed values are: {allowed_string})'.format(**pdescr))
else:
print('{name}={value}'.format(**pdescr))
# community software deployment (read-only) params
cparams = self.__params_read(rte, '.community')
if cparams:
print('# Community software deployment parameters (read-only):')
for k in sorted(cparams.keys()):
fstring = '{0}={1}'
if is_long:
fstring = '{0:>16} = {1:10}'
print(fstring.format(k, cparams[k]))
def params_unset(self, rte, parameter):
self.params_set(rte, parameter, None, use_default=True)
def params_set(self, rte, parameter, value, use_default=False):
params = self.__params_parse(rte)
if parameter not in params:
self.logger.error('There is no such parameter %s for RunTimeEnvironment %s', parameter, rte)
sys.exit(1)
# use default value if requested
if use_default:
value = params[parameter]['default_value']
# check type and allowed values
if params[parameter]['allowed_string'] == 'string':
pass
elif params[parameter]['allowed_string'] == 'int':
if not re.match(r'[-0-9]+'):
self.logger.error('Parameter %s for RunTimeEnvironment %s should be integer', parameter, rte)
sys.exit(1)
elif value not in params[parameter]['allowed_values']:
self.logger.error('Parameter %s for RunTimeEnvironment %s should be one of %s',
parameter, rte, params[parameter]['allowed_string'])
sys.exit(1)
# assign new value
params[parameter]['value'] = value
self.__params_write(rte, params)
def cat_rte(self, rte):
rte_file = self.__get_rte_file(rte)
self.logger.info('Printing the content of %s RunTimeEnvironment from %s', rte, rte_file)
for prte in [rte, rte + '.community']:
rte_params_file = self.__get_rte_params_file(prte)
if rte_params_file:
self.logger.info('Including the content of RunTimeEnvironment parameters file from %s', rte_params_file)
with open(rte_params_file) as rte_parm_f:
for line in rte_parm_f:
sys.stdout.write(line)
with open(rte_file, 'r') as rte_fd:
for line in rte_fd:
sys.stdout.write(line)
sys.stdout.flush()
def enable(self, rtes_def, force=False, rtetype='enabled', dummy=False):
"""
Entry point for enable operation.
RTE definition can be glob wildcarded RTE name.
"""
if dummy:
# enable dummy rtes (linked to /dev/null) for provided names
for rte in rtes_def:
self.enable_rte({'name': rte, 'path': '/dev/null'}, force, rtetype)
else:
# find RTEs by name (including wildcards substitutions) and enable
self.__fetch_rtes()
for rte in self.__get_rte_list(rtes_def):
self.enable_rte(rte, force, rtetype)
def enable_rte(self, rteinfo, force=False, rtetype='enabled'):
"""
Enables single RTE
"""
rtename = rteinfo['name']
rtepath = rteinfo['path']
rte_enable_path = self.control_rte_dir + '/' + rtetype + '/'
if not os.path.exists(rte_enable_path):
self.logger.debug('Making control directory %s for %s RunTimeEnvironments', rte_enable_path, rtetype)
os.makedirs(rte_enable_path, mode=0o755)
rte_dir_path = rte_enable_path + '/'.join(rtename.split('/')[:-1])
if not os.path.exists(rte_dir_path):
self.logger.debug('Making RunTimeEnvironment directory structure inside controldir %s', rte_dir_path)
os.makedirs(rte_dir_path, mode=0o755)
rte_path = rte_enable_path + rtename
if os.path.exists(rte_path):
# handle case for already enabled RTEs
if os.path.islink(rte_path):
linked_to = os.readlink(rte_path)
if linked_to != rtepath:
log_msg_base = 'RunTimeEnvironment %s is already enabled but linked to different location (%s).'
if not force:
self.logger.error(log_msg_base + 'Use \'--force\' to relink', rtename, linked_to)
sys.exit(1)
else:
self.logger.debug(log_msg_base + 'Removing previous link.', rtename, linked_to)
os.unlink(rte_path)
else:
self.logger.warning('RunTimeEnvironment %s is already %s. Nothing to do.', rtename, rtetype)
return
else:
self.logger.error('RunTimeEnvironment file %s is already exists but it is not symlink as expected. '
'Have you manually perform modifications of controldir content?', rte_path)
sys.exit(1)
elif os.path.islink(rte_path):
# handle broken symlink case
log_msg_base = 'RunTimeEnvironment %s is already enabled but points to not-existing location (%s).'
if not force:
self.logger.error(log_msg_base + 'Use \'--force\' to relink.', rtename, os.readlink(rte_path))
sys.exit(1)
else:
self.logger.debug(log_msg_base + 'Removing broken link.', rtename, os.readlink(rte_path))
os.unlink(rte_path)
# create link to controldir
try:
self.logger.debug('Linking RunTimeEnvironment file %s to %s', rtepath, rte_enable_path)
os.symlink(rtepath, rte_enable_path + rtename)
except OSError as e:
self.logger.error('Filed to link RunTimeEnvironment file %s to %s. Error: %s', rtepath,
rte_enable_path, e.strerror)
sys.exit(1)
def disable(self, rte_def, rtetype='enabled'):
"""
Entry point for disable operation.
RTE definition can be glob wildcarded RTE name.
"""
self.__fetch_rtes()
check_dict = getattr(self, rtetype + '_rtes', None)
for rte in self.__get_rte_list(rte_def, check_dict):
self.disable_rte(rte, rtetype)
def disable_rte(self, rteinfo, rtetype='enabled'):
"""
Disables single RTE
"""
rtename = rteinfo['name']
rtepath = rteinfo['path']
enabled_rtes = getattr(self, rtetype + '_rtes', {})
if rtename not in enabled_rtes:
self.logger.error('RunTimeEnvironment \'%s\' is not %s.', rtename, rtetype)
return
if enabled_rtes[rtename] != rtepath:
self.logger.error('RunTimeEnvironment \'%s\' is %s but linked to the %s instead of %s. '
'Please use either RTE name or correct path.',
rtename, rtetype, enabled_rtes[rtename], rtepath)
return
rte_enable_path = self.control_rte_dir + '/' + rtetype + '/'
self.logger.debug('Removing RunTimeEnvironment link %s', rte_enable_path + rtename)
os.unlink(rte_enable_path + rtename)
rte_split = rtename.split('/')[:-1]
while rte_split:
rte_dir = rte_enable_path + '/'.join(rte_split)
if not os.listdir(rte_dir):
self.logger.debug('Removing empty RunTimeEnvironment directory %s', rte_dir)
os.rmdir(rte_dir)
del rte_split[-1]
def check_enabled(self, rte):
"""Check RTE is enabled. Return path to RTE file or None"""
self.__fetch_rtes()
if rte in self.enabled_rtes:
return self.enabled_rtes[rte]
return None
def check_default(self, rte):
"""Check RTE is default. Return path to RTE file or None"""
self.__fetch_rtes()
if rte in self.default_rtes:
return self.default_rtes[rte]
return None
def control(self, args):
if args.action == 'list':
self.list(args)
elif args.action == 'enable':
self.enable(args.rte, args.force, dummy=args.dummy)
elif args.action == 'default':
self.enable(args.rte, args.force, 'default')
elif args.action == 'disable':
self.disable(args.rte)
elif args.action == 'undefault':
self.disable(args.rte, 'default')
elif args.action == 'cat':
self.cat_rte(args.rte)
elif args.action == 'params-get':
self.params_get(args.rte, args.long)
elif args.action == 'params-set':
self.params_set(args.rte, args.parameter, args.value)
elif args.action == 'params-unset':
self.params_unset(args.rte, args.parameter)
elif args.action == 'community' and CommunityRTEControl is not None:
CommunityRTEControl(self.arcconfig).control(args)
else:
self.logger.critical('Unsupported RunTimeEnvironment control action %s', args.action)
sys.exit(1)
def complete_enable(self):
self.__fetch_rtes()
return list(set(list(self.system_rtes.keys()) + list(self.user_rtes.keys()) + list(self.community_rtes.keys()))
- set(self.enabled_rtes.keys()))
def complete_default(self):
self.__fetch_rtes()
return list(set(list(self.system_rtes.keys()) + list(self.user_rtes.keys()) + list(self.community_rtes.keys()))
- set(self.default_rtes.keys()))
def complete_disable(self):
self.__fetch_rtes()
return list(self.enabled_rtes.keys())
def complete_undefault(self):
self.__fetch_rtes()
return list(self.default_rtes.keys())
def complete_all(self):
self.__fetch_rtes()
return list(self.all_rtes.keys())
def complete_params(self, rte):
self.__fetch_rtes()
return list(self.__params_parse(rte).keys())
def complete_params_values(self, rte, param):
self.__fetch_rtes()
param_options = self.__params_parse(rte)[param]
if param_options['allowed_string'] == 'string' or param_options['allowed_string'] == 'int':
return []
return param_options['allowed_values']
@staticmethod
def register_parser(root_parser):
rte_ctl = root_parser.add_parser('rte', help='RunTime Environments')
rte_ctl.set_defaults(handler_class=RTEControl)
rte_actions = rte_ctl.add_subparsers(title='RunTime Environments Actions', dest='action',
metavar='ACTION', help='DESCRIPTION')
rte_actions.required = True
rte_enable = rte_actions.add_parser('enable', help='Enable RTE to be used by A-REX')
rte_enable.add_argument('rte', nargs='+', help='RTE name').completer = complete_rte_name
rte_enable.add_argument('-f', '--force', help='Force RTE enabling', action='store_true')
rte_enable.add_argument('-d', '--dummy', action='store_true',
help='Enable dummy RTE that do nothing but published in the infosys')
rte_disable = rte_actions.add_parser('disable', help='Disable RTE to be used by A-REX')
rte_disable.add_argument('rte', nargs='+', help='RTE name').completer = complete_rte_name
rte_list = rte_actions.add_parser('list', help='List RunTime Environments')
rte_list.add_argument('-l', '--long', help='Detailed listing of RTEs', action='store_true')
rte_list_types = rte_list.add_mutually_exclusive_group()
rte_list_types.add_argument('-e', '--enabled', help='List enabled RTEs', action='store_true')
rte_list_types.add_argument('-d', '--default', help='List default RTEs', action='store_true')
rte_list_types.add_argument('-a', '--available', help='List available RTEs', action='store_true')
rte_list_types.add_argument('-s', '--system', help='List available system RTEs', action='store_true')
rte_list_types.add_argument('-u', '--user', help='List available user-defined RTEs', action='store_true')
rte_list_types.add_argument('-n', '--dummy', help='List dummy enabled RTEs', action='store_true')
if CommunityRTEControl is not None:
rte_list_types.add_argument('-c', '--community', help='List deployed community RTEs', action='store_true')
rte_default = rte_actions.add_parser('default', help='Transparently use RTE for every A-REX job')
rte_default.add_argument('rte', nargs='+', help='RTE name').completer = complete_rte_name
rte_default.add_argument('-f', '--force', help='Force RTE enabling', action='store_true')
rte_undefault = rte_actions.add_parser('undefault', help='Remove RTE from transparent A-REX usage')
rte_undefault.add_argument('rte', nargs='+', help='RTE name').completer = complete_rte_name
rte_cat = rte_actions.add_parser('cat', help='Print the content of RTE file')
rte_cat.add_argument('rte', help='RTE name').completer = complete_rte_name
rte_params_get = rte_actions.add_parser('params-get', help='List configurable RTE parameters')
rte_params_get.add_argument('rte', help='RTE name').completer = complete_rte_name
rte_params_get.add_argument('-l', '--long', help='Detailed listing of parameters', action='store_true')
rte_params_set = rte_actions.add_parser('params-set', help='Set configurable RTE parameter')
rte_params_set.add_argument('rte', help='RTE name').completer = complete_rte_name
rte_params_set.add_argument('parameter', help='RTE parameter to configure').completer = complete_rte_params
rte_params_set.add_argument('value', help='RTE parameter value to set').completer = complete_rte_params_values
rte_params_unset = rte_actions.add_parser('params-unset', help='Use default value for RTE parameter')
rte_params_unset.add_argument('rte', help='RTE name').completer = complete_rte_name
rte_params_unset.add_argument('parameter', help='RTE parameter to unset').completer = complete_rte_params
# add community RTE controller
if CommunityRTEControl is not None:
CommunityRTEControl.register_parser(rte_actions) | StarcoderdataPython |
3322351 | <reponame>dslab-epfl/svshi<filename>src/generator/tests/parser_test.py
from ..parsing.device import Device
from ..parsing.parser import Parser, ParserException
import pytest
DEVICES_FOLDER_PATH = "tests/devices"
def test_parser_devices_equal():
device1 = Device("binary_sensor_instance_name", "BinarySensor", "binary")
device2 = Device("binary_sensor_instance_name", "BinarySensor", "binary")
assert device1 == device2
def test_parser_devices_not_equal():
device1 = Device("binary_sensor_instance_name", "BinarySensor", "binary")
device2 = "a string"
assert device1.__eq__(device2) == False
def test_parser_reads_devices():
parser = Parser(f"{DEVICES_FOLDER_PATH}/devices.json")
devices = parser.read_devices()
assert devices[0] == Device("binary_sensor_instance_name", "BinarySensor", "binary")
assert devices[1] == Device("switch_instance_name", "Switch", "switch")
assert devices[2] == Device(
"temperature_sensor_instance_name", "TemperatureSensor", "temperature"
)
assert devices[3] == Device(
"humidity_sensor_instance_name", "HumiditySensor", "humidity"
)
assert devices[4] == Device("co_two_sensor_instance_name", "CO2Sensor", "co2")
assert devices[5] == Device("A_switch", "Switch", "switch")
def test_parser_on_read_devices_throws_exceptions_on_wrong_device_type():
parser = Parser(f"{DEVICES_FOLDER_PATH}/devices_wrong_device_type.json")
with pytest.raises(ParserException):
parser.read_devices()
def test_parser_on_read_devices_throws_exceptions_on_wrong_device_name():
parser = Parser(f"{DEVICES_FOLDER_PATH}/devices_wrong_device_name.json")
with pytest.raises(ParserException):
parser.read_devices()
def test_parser_on_read_devices_throws_exceptions_on_missing_fields():
with pytest.raises(ParserException):
Parser(f"{DEVICES_FOLDER_PATH}/devices_missing_fields.json")
def test_parser_on_read_devices_throws_exceptions_on_missing_devices_name_field():
parser = Parser(f"{DEVICES_FOLDER_PATH}/devices_missing_devices_name_field.json")
with pytest.raises(ParserException):
parser.read_devices()
def test_parser_on_read_devices_throws_exceptions_on_missing_devices_device_type_field():
parser = Parser(
f"{DEVICES_FOLDER_PATH}/devices_missing_devices_device_type_field.json"
)
with pytest.raises(ParserException):
parser.read_devices()
def test_parser_on_read_devices_throws_exceptions_on_wrong_timer_type():
with pytest.raises(ParserException):
Parser(f"{DEVICES_FOLDER_PATH}/devices_wrong_timer_type.json")
def test_parser_on_read_devices_throws_exceptions_on_wrong_permission_level_type():
with pytest.raises(ParserException):
Parser(f"{DEVICES_FOLDER_PATH}/devices_wrong_permission_level_type.json")
def test_parser_on_read_devices_throws_exceptions_on_wrong_files_type():
with pytest.raises(ParserException):
Parser(f"{DEVICES_FOLDER_PATH}/devices_wrong_files_type.json")
def test_parser_on_read_devices_throws_exceptions_on_wrong_devices_type():
with pytest.raises(ParserException):
Parser(f"{DEVICES_FOLDER_PATH}/devices_wrong_devices_type.json")
| StarcoderdataPython |
3364669 | import Linkelist
class Stack:
def __init__(self) -> None:
self.data = Linkelist.Linkedlist()
def push(self,data):
newNode = Linkelist.Node(data)
self.data.insert(newNode)
def pop(self):
self.data.delete()
def printStack(self):
self.data.printNodeinList()
print("---------")
def getpeek(self):
return self.data.head.getdata()
if __name__=='__main__':
mystack = Stack()
mystack.push("elaine")
mystack.push("kyle")
mystack.pop()
mystack.printStack()
| StarcoderdataPython |
1687223 | # -*- coding: utf-8 -*-
########
# Copyright (c) 2015 Fastconnect - Atost. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import testtools
import time
import test_utils
import random
from plugin import (utils,
constants,
resource_group
)
from cloudify.state import current_ctx
from cloudify.mocks import MockCloudifyContext
TIME_DELAY = 20
class TestResourceGroup(testtools.TestCase):
__random_id = str(random.randrange(0, 1000, 2))
@classmethod
def setUpClass(self):
ctx = self.mock_ctx('init')
ctx.logger.info("BEGIN test ressource_group number " + self.__random_id)
@classmethod
def mock_ctx(self, test_name):
""" Creates a mock context for the instance
tests
"""
test_properties = {
constants.AZURE_CONFIG_KEY:{
constants.SUBSCRIPTION_KEY: test_utils.SUBSCRIPTION_ID,
constants.USERNAME_KEY: test_utils.AZURE_USERNAME,
constants.PASSWORD_KEY: test_utils.AZURE_PASSWORD,
constants.LOCATION_KEY: 'westeurope',
constants.RESOURCE_GROUP_KEY: test_name + self.__random_id,
},
constants.RESOURCE_GROUP_KEY: test_name + self.__random_id,
constants.DELETABLE_KEY: True
}
return MockCloudifyContext(node_id = 'test' + self.__random_id,
properties = test_properties)
def setUp(self):
super(TestResourceGroup, self).setUp()
def tearDown(self):
super(TestResourceGroup, self).tearDown()
time.sleep(TIME_DELAY)
def test_create_resource_group(self):
ctx = self.mock_ctx('testcreategroup')
current_ctx.set(ctx=ctx)
ctx.logger.info("BEGIN resource_group create test")
ctx.logger.info("create resource_group")
status_code = resource_group.create(ctx=ctx)
ctx.logger.debug("status_code = " + str(status_code) )
self.assertTrue(bool((status_code == 200) or (status_code == 201)))
current_ctx.set(ctx=ctx)
utils.wait_status(ctx, "resource_group",constants.SUCCEEDED, timeout=600)
current_ctx.set(ctx=ctx)
ctx.logger.info("delete resource_group")
self.assertEqual(202, resource_group.delete(ctx=ctx))
try:
current_ctx.set(ctx=ctx)
utils.wait_status(ctx, "resource_group","waiting for exception", timeout=600)
except utils.WindowsAzureError:
pass
ctx.logger.info("END resource_group create test")
def test_delete_resource_group(self):
ctx = self.mock_ctx('testdeletegroup')
current_ctx.set(ctx=ctx)
ctx.logger.info("BEGIN resource_group delete test")
ctx.logger.info("create resource_group")
status_code = resource_group.create(ctx=ctx)
ctx.logger.debug("status_code = " + str(status_code) )
self.assertTrue(bool((status_code == 200) or (status_code == 201)))
current_ctx.set(ctx=ctx)
utils.wait_status(ctx, "resource_group",constants.SUCCEEDED, timeout=600)
current_ctx.set(ctx=ctx)
ctx.logger.info("delete resource_group")
self.assertEqual(202, resource_group.delete(ctx=ctx))
try:
current_ctx.set(ctx=ctx)
utils.wait_status(ctx, "resource_group","waiting for exception", timeout=600)
except utils.WindowsAzureError:
pass
ctx.logger.info("create resource_group with deletable propertie set to false")
ctx.node.properties[constants.DELETABLE_KEY] = False
status_code = resource_group.create(ctx=ctx)
ctx.logger.debug("status_code = " + str(status_code) )
self.assertTrue(bool((status_code == 200) or (status_code == 201)))
current_ctx.set(ctx=ctx)
utils.wait_status(ctx, "resource_group",constants.SUCCEEDED, timeout=600)
ctx.logger.info("not delete resource_group")
current_ctx.set(ctx=ctx)
self.assertEqual(0, resource_group.delete(ctx=ctx))
ctx.logger.info("delete resource_group")
ctx.logger.info("Set deletable propertie to True")
current_ctx.set(ctx=ctx)
ctx.node.properties[constants.DELETABLE_KEY] = True
self.assertEqual(202, resource_group.delete(ctx=ctx))
try:
current_ctx.set(ctx=ctx)
utils.wait_status(ctx, "resource_group","waiting for exception", timeout=600)
except utils.WindowsAzureError:
pass
ctx.logger.info("END resource_group delete test")
def test_conflict_resource_group(self):
ctx = self.mock_ctx('conflictgroup')
current_ctx.set(ctx=ctx)
ctx.logger.info("BEGIN resource_group conflict test")
ctx.logger.info("create resource group")
status_code = resource_group.create(ctx=ctx)
ctx.logger.debug("status_code = " + str(status_code) )
self.assertTrue(bool((status_code == 200) or (status_code == 201)))
current_ctx.set(ctx=ctx)
utils.wait_status(ctx, "resource_group",constants.SUCCEEDED, timeout=600)
ctx.logger.info("conflict create resource group")
status_code = resource_group.create(ctx=ctx)
ctx.logger.debug("status_code = " + str(status_code) )
self.assertTrue(bool((status_code == 200) or (status_code == 201)))
ctx.logger.info("delete resource_group")
current_ctx.set(ctx=ctx)
self.assertEqual(202, resource_group.delete(ctx=ctx))
try:
current_ctx.set(ctx=ctx)
utils.wait_status(ctx, "resource_group","waiting for exception", timeout=600)
except utils.WindowsAzureError:
pass
ctx.logger.info("END resource_group conflict test") | StarcoderdataPython |
57987 | from .behavior_action_server import BehaviorActionServer
__all__ = [
'BehaviorActionServer'
]
| StarcoderdataPython |
3273297 | import argparse
from ipaddress import ip_address
from itertools import chain
import logging
from multiprocessing import Process, Queue
import os
import statistics
from time import perf_counter
from typing import Tuple, List, Optional
from twisted.internet import defer
from twisted.python.failure import Failure
from dnsagent.app import App, init_log
from dnsagent.resolver import HostsResolver, ExtendedResolver, TCPExtendedResolver
from dnsagent.server import ExtendedDNSServerFactory
from dnsagent.utils import get_reactor, wait_for_tcp
init_log()
# set log level for root logger
if os.environ.get('DEBUG'):
logging.getLogger().setLevel(logging.DEBUG)
else:
logging.getLogger().setLevel(logging.ERROR)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
# use IOCP reactor if available to workaround descriptor limit
try:
import twisted.internet.iocpreactor
except ImportError:
pass
else:
twisted.internet.iocpreactor.install()
DEFAULT_SERVER_ADDR = ('127.0.1.88', 5353)
_server_process = None
def gen_host_name(num: int):
return 'a%08d' % (num % int(1e8))
def gen_ip_address(num: int):
return str(ip_address(0x7f000001 + (num % int(1e8))))
def run_server(bind: Tuple[str, int]):
logger.info('starting server on %r', bind)
mapping = dict((gen_host_name(i), gen_ip_address(i)) for i in range(10000))
resolver = HostsResolver(mapping=mapping)
server = ExtendedDNSServerFactory(resolver=resolver)
reactor = get_reactor()
app = App(reactor=reactor)
app.start((server, [bind]))
reactor.run()
class QueryRunner:
def __init__(
self, addr: Tuple[str, int], count: int, concurrency=100,
tcp_only=False, no_reuse_resolver=False,
reactor=None, **extra
):
assert count > 0 and concurrency > 0
self.addr = addr
self.count = count
self.concurrency = concurrency
self.tcp_only = tcp_only
self.no_reuse_resolver = no_reuse_resolver
self.reactor = get_reactor(reactor)
self._resolver = None
self.done = defer.Deferred()
self.results = [None] * self.count # type: List[Optional[float]]
self.waitting = 0
self.finished = 0
self.started = 0
def run(self):
self.tick()
return self.done
def tick(self):
if self.started < self.count:
n = min(self.concurrency - self.waitting, self.count - self.started)
for _ in range(n):
self.waitting += 1
self.spawn(self.started)
self.started += 1
else:
assert self.started == self.count
if self.finished == self.count:
assert self.waitting == 0
self.done.callback(self.results)
def spawn(self, index):
hostname = gen_host_name(index).encode()
d = self.get_resolver().lookupAddress(hostname)
d.addCallbacks(
callback=self.got_answer, callbackArgs=(index, perf_counter()),
errback=self.failed,
).addErrback(self.unexpected)
def got_answer(self, answer, index: int, started_time: float):
diff = perf_counter() - started_time
self.results[index] = diff
self.finished_one()
def failed(self, err):
self.finished_one()
def finished_one(self):
self.finished += 1
self.waitting -= 1
self.tick()
def unexpected(self, err):
logger.error('unhandled exception: %r', err)
self.done.errback(err)
return err
def get_resolver(self):
if not self._resolver or self.no_reuse_resolver:
resolver_cls = ExtendedResolver if not self.tcp_only else TCPExtendedResolver
self._resolver = resolver_cls(servers=[self.addr], reactor=self.reactor)
return self._resolver
RunQueryResultType = Tuple[float, List[Optional[float]]]
def run_query(inqueue: Queue, outqueue: Queue):
server_addr, options = inqueue.get()
logger.info('run_query() begins')
def got_result(result: List[Optional[float]]):
diff = perf_counter() - started_time
logger.info('%d requests finished in %.3f s', options.count, diff)
outqueue.put((diff, result))
querier = QueryRunner(addr=server_addr, **vars(options))
started_time = perf_counter()
d = querier.run()
d.addCallback(got_result)
reactor = get_reactor()
reactor.run()
def process_results(results: List[RunQueryResultType], options):
def convert_none(arg):
if arg is None:
return float('+inf')
else:
return arg
total_queries = options.process * options.count
concurrency = options.process * options.concurrency
avg_process_time = statistics.mean(process_time for process_time, _ in results)
qps = total_queries / avg_process_time
proc_query_times_cat = chain.from_iterable(times for _, times in results)
query_times = sorted(map(convert_none, proc_query_times_cat))
failure_rate = sum(int(t == float('+inf')) for t in query_times) / total_queries
median_query_time = statistics.median(query_times)
print('options: ', options)
interesting_vars = [
'qps', 'failure_rate', 'median_query_time', 'total_queries', 'concurrency',
]
for name in interesting_vars:
value = locals()[name]
print('{name}: {value}'.format_map(locals()))
if options.stats_file:
with open(options.stats_file, 'wt+') as fp:
fp.writelines('%s\n' % t for t in query_times)
# TODO: print bar chart of query time
def run_controller(server_addr, options):
def server_ready(ignore):
for inq, outq, proc in queriers:
inq.put((server_addr, options))
results = [] # type: List[Tuple[float, List[Optional[float]]]
for inq, outq, proc in queriers:
results.append(outq.get(timeout=60))
proc.terminate()
try:
process_results(results, options)
except Exception:
logger.exception('process_result()')
def server_failed(err):
logger.error('failed to start server: %r', err)
for _, _, proc in queriers:
proc.terminate()
def teardown(result):
if isinstance(result, Failure):
logger.error('unhandled error: %r', result)
reactor.stop()
if _server_process:
_server_process.terminate()
reactor.stop()
queriers = [] # type: List[Tuple[Queue, Queue, Process]]
for n in range(options.process):
inqueue, outqueue = Queue(), Queue()
client = Process(target=run_query, args=(inqueue, outqueue))
client.start()
queriers.append((inqueue, outqueue, client))
d = wait_for_tcp(server_addr, logger=logger)
d.addCallbacks(server_ready, server_failed).addBoth(teardown)
reactor = get_reactor()
reactor.run()
def parse_args():
parser = argparse.ArgumentParser()
modes = parser.add_mutually_exclusive_group()
modes.add_argument(
'-s', '--server', dest='mode', action='store_const', const='server',
help='server only mode',
)
modes.add_argument(
'-c', '--client', dest='mode', action='store_const', const='client',
help='client only mode',
)
parser.add_argument(
'--address', help='server address',
)
parser.add_argument(
'-p', '--process', default=2, type=int,
help='number of client process',
)
parser.add_argument(
'-n', '--count', default=4000, type=int,
help='number of queries to run per process',
)
parser.add_argument(
'--con', '--concurrency', dest='concurrency', default=500, type=int,
help='maximum concurrent queries per process',
)
parser.add_argument(
'--tcp-only', default=False, action='store_true',
help='only use TCP for query',
)
parser.add_argument(
'--no-reuse-resolver', default=False, action='store_true',
help='use a new resolver for every query',
)
parser.add_argument('--stats-file', help='write query times to this file')
return parser.parse_args()
def main():
global _server_process
options = parse_args()
if options.address:
host, port = options.address.split(':')
port = int(port)
server_addr = host, port # type: Tuple[str, int]
else:
server_addr = DEFAULT_SERVER_ADDR
if options.mode == 'server':
run_server(server_addr)
else:
if options.mode != 'client':
_server_process = Process(target=run_server, args=(server_addr,))
_server_process.start()
run_controller(server_addr, options)
if __name__ == '__main__':
main()
| StarcoderdataPython |
1683221 | <gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Experimentation script.
Analysis based on letter frequencies and letter frequencies resolved by
position. Main purpose is to generate optimal starter words for humans
to use to catch as many letters as possible.
This script evolved from a few lines of trial and error, so it is not well
structured. Too many global variables.
I started over with a different and better approach in wordlestat2.py.
Created on Sat Jan 15 12:54:43 2022 // author: @hk_nien on Twitter.
"""
import re
import numpy as np
import matplotlib.pyplot as plt
ALPHABET ='abcdefghijklmnopqrstuvwxyz'
WORDS = None # list, all eligible 5-letter words
LFREQ = None # dict, key: letter -> value: fraction of words containing that letter
LFPOS = None # array (i_alphabet, j_pos) -> fraction
#%%
def _get5words(files, blacklist=()):
"""Return words list for dictionaries"""
words = set()
for fn in files:
words.update(open(fn).read().split('\n'))
exp = re.compile('[a-zA-Z]{5}$')
words = sorted(set(
w.lower() for w in words
if re.match(exp, w)
) - set(blacklist))
return words
def get5words_en():
return _get5words(
['/usr/share/dict/american-english', '/usr/share/dict/british-english'],
blacklist={'aires', 'aries', 'bligh'}
)
def get5words_en_2(n=2700):
"""Return sorted word list, most frequent on top.
Corpus size 2700 seems to be about what Wordle uses.
"""
words = open('data/wordlist-en-freq.txt').read().split('\n')
exp = re.compile('[A-Za-z]{5}$')
words = [
w.lower()
for w in words
if re.match(exp, w) and w not in ('aries', 'bligh', 'begum')
]
return words[:n]
def get5words_nl():
return _get5words(
['data/woordle-nl-a.txt'],
blacklist={
'aties','olink', 'molin', 'limon', 'leoni', 'pilon',
'budak', 'bedum', 'dumps'
}
)
def getletfreq():
"""Return LFREQ"""
lcount = []
for let in ALPHABET:
lcount.append((len([w for w in WORDS if let in w]), let))
lfreq = {let:num/len(WORDS) for num, let in sorted(lcount, reverse=True)}
return lfreq
def get_letfreq_pos():
"""Get letter frequencies by position."""
lfpos = {}
wlen = 5
na = len(ALPHABET)
count = np.zeros((na, wlen), dtype=int)
for i in range(wlen):
ltrs = [w[i] for w in WORDS]
for ia, let in enumerate(ALPHABET):
count[(ia, i)] = ltrs.count(let)
lfpos = count / count.sum(axis=1).reshape(-1, 1)
return lfpos
def find_optimal_word(prev_words, tweaks=(0.1, 0.2, -0.5), ntop=100, verbose=True):
"""Find highest scoring word given previous word tries.
- prev_words: list of previous words
- tweaks: tuple of:
- bonus for letter position
- bonus for repeated letter in different position
- penalty for repeated letter in same position
- ntop: how many words to apply (slow) tweaks to.
- verbose: print extra info.
"""
prev_letters = set(''.join(prev_words))
letters = ''.join(sorted(set(ALPHABET) - prev_letters))
wscores = {k: 0 for k in WORDS}
for let in letters:
for w in WORDS:
if let in w:
wscores[w] += LFREQ[let]
wscores = {
k:wscores[k]
for k in sorted(wscores, key=lambda x:wscores[x], reverse=True)
}
topwords = [w for _, w in zip(range(ntop), wscores.keys())]
# Bonus based on position
for w in topwords:
for i, let in enumerate(w):
pbonus = LFPOS[ord(let)-ord('a'), i]
lbonus = LFREQ[let]
wscores[w] += tweaks[0]*pbonus*lbonus
# Small bonus for previously seen letters on different positions
# (more bonus for high-scoring letters).
for iw, w in enumerate(topwords):
# if w == 'blimp':
# breakpoint()
for i, let in enumerate(w):
if let not in prev_letters:
continue
for pw in prev_words:
if pw[i] == let:
break
else:
bonus = tweaks[1]*LFREQ[let]
if iw < 10 and verbose:
print(f'{w}: bonus {bonus:.2f}')
wscores[w] *= 1 + bonus
# penalty for double letters
for w in topwords:
if len(set(w)) < len(w):
wscores[w] *= 0.8
# give penalty to words having previously seen letters in the same
# place.
exps = []
for i, ll in enumerate(zip(*prev_words)):
exp = ('.'*i) + f'[{"".join(ll)}]' + ('.'*(4-i)) + '$'
exps.append(re.compile(exp))
for iw, w in enumerate(topwords):
for exp in exps:
if re.match(exp, w):
if iw < 5 and verbose:
print(f'{w}: penalty 0.2')
wscores[w] *= (1 + tweaks[2])
# re-sort topwords
topwords = sorted(topwords, key=lambda x: -wscores[x])
tws_w_score = [
f'{w} ({wscores[w]*100:.0f})'
for w in topwords
]
if verbose:
print(f'For {letters}: {", ".join(tws_w_score[:5])}')
return topwords[0]
def match_1word(word, pwords):
"""Match probe words against word.
Return:
- hits1: set of letters that match
- hits2: set of matched (letter, pos) tuples.
- wlets: unique letters in the word
- badlets: set of bad letters
- badlets_p: list of bad letter sets by position
"""
hits1 = set() # letters
hits2 = set() # (letter, pos) tuples
#hits2u = set() # letters in correct position
wlets = set(word)
plets = set()
badlets_p = [set() for _ in range(len(word))]
badlets = set()
for pw in pwords:
hits1.update(wlets.intersection(set(pw)))
plets.update(set(pw))
badlets.update(set(pw) - wlets)
for i, let in enumerate(word):
if pw[i] == let:
hits2.update([(let, i)])
else:
badlets_p[i].add(pw[i])
return hits1, hits2, wlets, badlets, badlets_p
def evaluate_1word_fast(word, pwords, verbose=False, w1=0.5, ret_full=False):
"""Try probe words on word, return score 0 <= score <= 100
Heuristic method.
Parameters:
- word: 1 word
- pwords: list of probe words
- verbose: True to print details.
- w1: weight for hits (0<=w1<=1); weigth for position hits is 1-w1.
- ret_full: True to return both metrics.
Return:
- score
- (for ret_full=True) fraction of letters found.
- (for ret_full=True) fraction of letters found on correct position.
"""
hits1, hits2, wlets, _, _ = match_1word(word, pwords)
score = w1*len(hits1)/len(wlets) + (1-w1)*len(hits2)/len(word)
score = np.around(score*100, 1)
if verbose:
print(
f' {word}: {len(hits1)}/{len(wlets)} '
f'{len(hits2)}/{len(word)} '
f'{score:.1f}'
)
if ret_full:
return score, len(hits1)/len(wlets), len(hits2)/len(word)
return score
def evaluate_1word_slow(word, pwords, verbose=False):
"""Try probe words on word, return score 0 <= score <= 100
Score based on % of all words that can be ruled out.
Parameters:
- word: 1 word
- pwords: list of probe words
- verbose: True to print details.
Return: score
"""
hits1, hits2, wlets, badlets, badlets_p = match_1word(word, pwords)
words = set(WORDS)
nws = [len(words)]
if hits2:
# filter based on known letters
exp = ['.']*len(word)
for let, i in hits2:
exp[i] = let
exp = re.compile(''.join(exp))
words = [w for w in words if re.match(exp, w)]
nws.append(len(words))
# filter based on occurence of bad letters
exp = ''.join(badlets)
exp = re.compile(f'[{exp}]')
words = [w for w in words if not re.search(exp, w)]
nws.append(len(words))
# filter based on occurence of good letters
words1 = []
for w in words:
for let in hits1:
if not let in w:
break
else:
words1.append(w)
words = words1
nws.append(len(words))
score = max(0, 100 - (nws[-1]-1)*10)
nws_str = ",".join([str(x) for x in nws])
if verbose:
print(f'{word}: {nws_str} - score {score}')
return score
def evaluate_1word(word, pwords, verbose=False, w1=0.5, smethod='fast',
ret_full=False):
if smethod == 'fast':
return evaluate_1word_fast(word, pwords, verbose=verbose, w1=w1,
ret_full=ret_full)
elif smethod == 'slow':
return evaluate_1word_slow(word, pwords, verbose=verbose)
else:
raise ValueError(f'smethod={smethod!r}')
def evaluate_words(pwords, w1=0.5, smethod='fast', speedup=2):
"""Evaluate list of probe words on corpus; return mean score.
smethod can be 'slow' or 'fast'
"""
scores = [
evaluate_1word(w, pwords, w1=w1, smethod=smethod)
for w in WORDS[::speedup]
]
return np.mean(scores)
def evaluate_words_verbose(pwords, w1=0.5, smethod='fast', nsamp=10):
score = evaluate_words(pwords, w1=w1, smethod=smethod)
print(f' Probe words: {", ".join(pwords)}')
print(f'Performance: {score:.1f}')
print('Sample:')
results = []
for i, w in enumerate(WORDS[::len(WORDS)//nsamp]):
r = evaluate_1word(w, pwords, w1=w1, verbose=(i < 10), smethod=smethod,
ret_full=True)
results.append(r)
if i > 10:
print('(Truncated after 10)')
if smethod == 'slow':
smean = np.mean(results)
print(f'Mean score: {smean:.1f}')
else:
smean = np.array(results).mean(axis=0) # columns: score, fhit, fpos
wlen = len(pwords[0])
print(f'Mean: score {smean[0]:.1f}, '
f'letters found {smean[1]*100:.0f}%, '
f'positions found {smean[2]*wlen:.2f}/{wlen}.')
return score
def evaluate_hyper(tweaks=(0.25, -0.2, -0.5), num=5, w1=0.5, verbose=True,
smethod='fast'):
"""smethod: 'slow' or 'fast'
Return:
- score
- probe_words
"""
wseen = []
for _ in range(num):
next_word = find_optimal_word(
wseen, verbose=False,
tweaks=tweaks
)
wseen.append(next_word)
if verbose:
print(f'\n** tweaks={tweaks}, num={num}, w1={w1}, smethod={smethod!r}')
score = evaluate_words_verbose(wseen, w1=w1, smethod=smethod)
else:
score = evaluate_words(wseen, w1=w1, smethod=smethod)
return score, wseen
def scan_hyper(num=5, w1=0.5, tweak2=-0.2, plot=True, smethod='fast',
t0range=(0, 0.4, 9), t1range=(0, 0.4, 9)):
"""Plot and return optimal kwargs."""
scores = []
t0s = np.linspace(*t0range)
t1s = np.linspace(*t1range)
scores = np.zeros((len(t0s), len(t1s)))
print('scan ...', end='', flush=True)
ndone = 0
for i0, t0 in enumerate(t0s):
for i1, t1 in enumerate(t1s):
score, _ = evaluate_hyper(
tweaks=(t0, t1, tweak2), num=num, w1=w1, verbose=False,
smethod=smethod
)
scores[i0, i1] = score
ndone += 1
print(f'\rscan {ndone}/{scores.size}', end='', flush=True)
print(' done.')
i0, j0 = np.unravel_index(np.argmax(scores), scores.shape)
opt_tweak = (t0s[i0], t1s[j0], tweak2)
if plot:
fig, ax = plt.subplots()
ax.set_xticks(np.arange(len(t1s)))
ax.set_xticklabels([f'{t1:.2f}' for t1 in t1s])
ax.set_xlabel('tweak1: different position bonus')
ax.set_yticks(np.arange(len(t0s)))
ax.set_yticklabels([f'{t0:.2f}' for t0 in t0s])
ax.set_ylabel('tweak0: position bonus')
cm = ax.matshow(scores)
fig.colorbar(cm)
fig.show()
return dict(tweaks=opt_tweak, w1=w1, num=num, smethod=smethod)
def analyze_wordle_stats():
"""Original wordle statistics"""
picks = (
'panic,tangy,abbey,favor,drink,query,gorge,crank,slump,banal,tiger,'
'siege,truss,boost,rebus'
).split(',')
global WORDS
WORDS = get5words_en_2(5000) # Large corpus, sorted.
poss = []
notfound = set()
for w in picks:
try:
poss.append(WORDS.index(w))
except ValueError:
notfound.add(w)
poss = np.array(poss)
print(f'Word positions in corpus: {poss}')
m, s = poss.mean(), poss.std()
sm = 1/np.sqrt(3)
print(f'Mean: {m:.0f}, std={s:.0f}, ratio {s/m:.2f}'
f' (expected for flat-top: {sm:.2f}')
print(f'not found: {notfound}')
def run_nl(hyperscan=False, numw=4, w1=0.7):
"""NL:
Best for either hits or pos hits:
['toren', 'balie', 'drugs', 'gemak', 'schop']
"""
global WORDS, LFREQ, LFPOS
WORDS = get5words_nl()
LFREQ = getletfreq()
LFPOS = get_letfreq_pos()
plt.close('all')
kwargs1 = dict(num=numw, w1=w1, smethod='fast')
if hyperscan:
kwargs2 = scan_hyper(
**kwargs1,
t0range=(0, 0.4, 9), t1range=(-0.3, 0.4, 15), tweak2=-0.5
)
else:
# from a previous run
kwargs2 = {**kwargs1, 'tweaks': (0.25, -0.15, -0.5)}
_, pwords = evaluate_hyper(**kwargs2, verbose=False)
for _ in range(5 - numw):
pwords.append(
find_optimal_word(pwords, tweaks=kwargs2['tweaks'], verbose=False)
)
print(f'Optimized for {numw} words: {repr(pwords)}')
print(f'tweaks={kwargs2["tweaks"]}')
for inum in (4, 5):
evaluate_words_verbose(pwords[:inum], w1=kwargs2['w1'], smethod='fast')
print('\n\n')
for w in pwords:
print(f' {w}')
print()
return pwords
def run_en(hyperscan=False, n_corpus=2700, numw=5, w1=0.7):
"""Run for English.
Best for hit rate: ['raise', 'clout', 'nymph', 'bowed', 'kings']
Best for pos hits: ['raise', 'count', 'piled', 'shaky', 'began']
Manually tweaked: ['cares', 'point', 'bulky', 'width', 'gnome']
(Manual tweak: allow 'c' in first word for better position hits and
very little penalty on letter hit rate for the first 4 words together.)
"""
global WORDS, LFREQ, LFPOS
WORDS = get5words_en_2(n_corpus)
LFREQ = getletfreq()
LFPOS = get_letfreq_pos()
plt.close('all')
kwargs1 = dict(num=numw, w1=w1, smethod='fast')
if hyperscan:
kwargs2 = scan_hyper(
**kwargs1,
t0range=(0, 0.4, 9), t1range=(-0.3, 0.4, 15), tweak2=-0.5
)
else:
# from a previous run
kwargs2 = {**kwargs1, 'tweaks': (0.25, -0.15, -0.5)}
_, pwords = evaluate_hyper(**kwargs2, verbose=False)
for _ in range(5 - numw):
pwords.append(
find_optimal_word(pwords, tweaks=kwargs2['tweaks'], verbose=False)
)
print(f'Optimized for {numw} words: {repr(pwords)}')
print(f'tweaks={kwargs2["tweaks"]}')
for inum in (4, 5):
evaluate_words_verbose(pwords[:inum], w1=kwargs2['w1'], smethod='fast')
print('\n\n')
for w in pwords:
print(f' {w}')
print()
return pwords
def search(regexp, good_letters, tried_letters):
"""Search for words.
- regexp: like '..a.t'
- goodletters: str with letters that must be present.
- triedletters: str with all letters as tried.
"""
good_letters = set(good_letters)
for let in re.sub(r'\[.*?\]', '', regexp):
if let != '.':
good_letters.add(let)
regexp = re.compile(regexp)
bad_letters = ''.join(set(tried_letters) - set(good_letters))
badexp = re.compile(f'[{bad_letters}]')
words = [
w
for w in WORDS
if re.match(regexp, w) and not re.search(badexp, w)
]
words2 = []
for w in words:
for let in good_letters:
if let not in w:
break
else:
words2.append(w)
print(', '.join(words2[:20]))
if __name__ == '__main__':
# run_en(hyperscan=True, n_corpus=1200)
run_nl(hyperscan=True, w1=0.5)
run_en(hyperscan=True)
| StarcoderdataPython |
165835 | <filename>python/common/rsi_email.py
import python.common.helper as helper
from python.common.config import Config
import python.common.common_email_services as common_email_services
from datetime import datetime
import json
import logging
from jinja2 import Environment, select_autoescape, FileSystemLoader
logging.basicConfig(level=Config.LOG_LEVEL, format=Config.LOG_FORMAT)
def application_accepted(**args):
config = args.get('config')
prohibition_number = args.get('prohibition_number')
vips_data = args.get('vips_data')
t = "{}_application_accepted.html".format(vips_data['noticeTypeCd'])
args['email_template'] = t
content = get_email_content(t, prohibition_number)
template = get_jinja2_env().get_template(t)
return common_email_services.send_email(
[args.get('applicant_email_address')],
content["subject"],
config,
template.render(
deadline_date_string=args.get('deadline_date_string'),
link_to_paybc=config.LINK_TO_PAYBC,
full_name=args.get('applicant_full_name'),
prohibition_number=prohibition_number,
subject=content["subject"])), args
def applicant_review_type_change(**args):
config = args.get('config')
prohibition_number = args.get('prohibition_number')
t = "review_type_change.html"
args['email_template'] = t
content = get_email_content(t, prohibition_number)
template = get_jinja2_env().get_template(t)
return common_email_services.send_email(
[args.get('applicant_email_address')],
content["subject"],
config,
template.render(
full_name=args.get('applicant_full_name'),
prohibition_number=prohibition_number,
subject=content["subject"])), args
def send_form_xml_to_admin(**args):
xml = args.get('xml_base64', None)
if xml:
config = args.get('config')
subject = 'DEBUG - Form XML attached'
template = get_jinja2_env().get_template('admin_notice.html')
return common_email_services.send_email(
[config.ADMIN_EMAIL_ADDRESS],
subject,
config,
template.render(
body='XML attached',
message='message xml attached',
subject=subject),
[{
"content": args.get('xml_base64'),
"contentType": "string",
"encoding": "base64",
"filename": "submitted_form.xml"
}]), args
logging.info('No XML to send')
def insufficient_reviews_available(**args) -> tuple:
config = args.get('config')
prohibition_number = args.get('prohibition_number')
t = "insufficient_reviews_available.html"
args['email_template'] = t
content = get_email_content(t, prohibition_number)
template = get_jinja2_env().get_template(t)
return common_email_services.send_to_business(
content["subject"],
config,
template.render(
prohibition_number=prohibition_number,
subject=content["subject"])), args
def applicant_did_not_schedule(**args) -> tuple:
config = args.get('config')
prohibition_number = args.get('prohibition_number')
t = "applicant_did_not_schedule.html"
args['email_template'] = t
content = get_email_content(t, prohibition_number)
template = get_jinja2_env().get_template(t)
return common_email_services.send_to_business(
content["subject"],
config,
template.render(
full_name=args.get('applicant_name'),
receipt_number=args.get('receipt_number'),
receipt_amount=args.get('receipt_amount'),
receipt_date=args.get('receipt_date'),
order_number=args.get('order_number'),
prohibition_number=prohibition_number,
subject=content["subject"])), args
def applicant_applied_at_icbc(**args) -> tuple:
config = args.get('config')
prohibition_number = args.get('prohibition_number')
t = "applicant_applied_at_icbc.html"
args['email_template'] = t
content = get_email_content(t, prohibition_number)
template = get_jinja2_env().get_template(t)
return common_email_services.send_email(
[args.get('applicant_email_address')],
content["subject"],
config,
template.render(
link_to_paybc=config.LINK_TO_PAYBC,
full_name="Applicant",
prohibition_number=prohibition_number,
subject=content["subject"])), args
def send_email_to_admin(**args):
subject = args.get('subject')
config = args.get('config')
message = args.get('message')
body = args.get('body')
template = get_jinja2_env().get_template('admin_notice.html')
return common_email_services.send_email(
[config.ADMIN_EMAIL_ADDRESS],
subject,
config,
template.render(subject=subject, body=body, message=json.dumps(message))), args
def applicant_prohibition_served_more_than_7_days_ago(**args):
config = args.get('config')
prohibition_number = args.get('prohibition_number')
t = "not_received_in_time.html"
args['email_template'] = t
content = get_email_content(t, prohibition_number)
template = get_jinja2_env().get_template(t)
return common_email_services.send_email(
[args.get('applicant_email_address')],
content["subject"],
config,
template.render(
full_name=args.get('applicant_full_name'),
prohibition_number=prohibition_number,
subject=content["subject"])), args
def applicant_licence_not_seized(**args):
config = args.get('config')
prohibition_number = args.get('prohibition_number')
t = "licence_not_seized.html"
args['email_template'] = t
content = get_email_content(t, prohibition_number)
template = get_jinja2_env().get_template(t)
return common_email_services.send_email(
[args.get('applicant_email_address')],
content["subject"],
config,
template.render(
link_to_icbc=config.LINK_TO_ICBC,
link_to_service_bc=config.LINK_TO_SERVICE_BC,
full_name=args.get('applicant_full_name'),
prohibition_number=prohibition_number,
subject=content["subject"])), args
def applicant_prohibition_not_found(**args):
config = args.get('config')
prohibition_number = args.get('prohibition_number')
notice_type = args.get('user_entered_notice_type')
t = "{}_prohibition_not_found.html".format(notice_type)
args['email_template'] = t
content = get_email_content(t, prohibition_number)
template = get_jinja2_env().get_template(t)
return common_email_services.send_email(
[args.get('applicant_email_address')],
content["subject"],
config,
template.render(
link_to_icbc=config.LINK_TO_ICBC,
link_to_service_bc=config.LINK_TO_SERVICE_BC,
full_name=args.get('applicant_full_name'),
prohibition_number=prohibition_number,
subject=content["subject"])), args
def applicant_to_schedule_review(**args):
"""
This message is sent immediately after an applicant pays
the application fee. Since we don't have the driver's
first name handy, this email is addressed to the applicant.
"""
config = args.get('config')
payload = args.get('payload')
vips_application = args.get('vips_application')
vips_data = args.get('vips_data')
t = "{}_select_review_date.html".format(vips_data['noticeTypeCd'])
args['email_template'] = t
email_address = vips_application['email']
full_name = "{} {}".format(vips_application['firstGivenNm'], vips_application['surnameNm'])
prohibition_number = args.get('prohibition_number')
content = get_email_content(t, prohibition_number)
template = get_jinja2_env().get_template(t)
return common_email_services.send_email(
[email_address],
content["subject"],
config,
template.render(
link_to_schedule_form=config.LINK_TO_SCHEDULE_FORM,
order_number=payload.get('transaction_id'),
full_name=full_name,
prohibition_number=prohibition_number,
subject=content["subject"])), args
def applicant_schedule_confirmation(**args):
"""
This message is sent to the applicant after the requested review date
is successfully saved to VIPS.
"""
config = args.get('config')
vips_application = args.get('vips_application')
email_address = vips_application['email']
presentation_type = vips_application['presentationTypeCd']
t = 'review_date_confirmed_{}.html'.format(presentation_type)
args['email_template'] = t
phone = vips_application['phoneNo']
prohibition_number = args.get('prohibition_number')
content = get_email_content(t, prohibition_number)
template = get_jinja2_env().get_template(t)
return common_email_services.send_email(
[email_address],
content["subject"],
config,
template.render(
full_name=args.get('applicant_name'),
prohibition_number=prohibition_number,
subject=content["subject"],
phone=phone,
friendly_review_time_slot=args.get('friendly_review_time_slot'))), args
def applicant_last_name_mismatch(**args):
"""
This email is sent to the applicant if the last name entered by the applicant
does not match the last name of the driver as entered in VIPS
"""
config = args.get('config')
prohibition_number = args.get('prohibition_number')
vips_data = args.get('vips_data')
t = "{}_last_name_mismatch.html".format(vips_data['noticeTypeCd'])
args['email_template'] = t
content = get_email_content(t, prohibition_number)
template = get_jinja2_env().get_template(t)
return common_email_services.send_email(
[args.get('applicant_email_address')],
content["subject"],
config,
template.render(
link_to_application_form=config.LINK_TO_APPLICATION_FORM,
link_to_icbc=config.LINK_TO_ICBC,
link_to_service_bc=config.LINK_TO_SERVICE_BC,
full_name=args.get('applicant_full_name'),
prohibition_number=prohibition_number,
subject=content["subject"])), args
def applicant_prohibition_not_found_yet(**args):
config = args.get('config')
prohibition_number = args.get('prohibition_number')
date_served_string = args.get('date_of_service')
date_served = helper.localize_timezone(datetime.strptime(date_served_string, '%Y-%m-%d'))
human_friendly_date_served = date_served.strftime("%B %d, %Y")
notice_type = args.get('user_entered_notice_type')
t = "{}_prohibition_not_found_yet.html".format(notice_type)
args['email_template'] = t
content = get_email_content(t, prohibition_number)
template = get_jinja2_env().get_template(t)
# Note: we rely on the date_served as submitted by the user -- not the date in VIPS
# Check to see if enough time has elapsed to enter the prohibition into VIPS
return common_email_services.send_email(
[args.get('applicant_email_address')],
content["subject"],
config,
template.render(
link_to_icbc=config.LINK_TO_ICBC,
link_to_service_bc=config.LINK_TO_SERVICE_BC,
date_of_service=human_friendly_date_served,
full_name=args.get('applicant_full_name'),
prohibition_number=prohibition_number,
subject=content["subject"])), args
def applicant_prohibition_still_not_found(**args) -> tuple:
config = args.get('config')
prohibition_number = args.get('prohibition_number')
date_served_string = args.get('date_of_service')
date_served = helper.localize_timezone(datetime.strptime(date_served_string, '%Y-%m-%d'))
human_friendly_date_served = date_served.strftime("%B %d, %Y")
notice_type = args.get('user_entered_notice_type')
t = "{}_prohibition_still_not_found.html".format(notice_type)
args['email_template'] = t
content = get_email_content(t, prohibition_number)
template = get_jinja2_env().get_template(t)
# Note: we rely on the date_served as submitted by the user -- not the date in VIPS
# Check to see if enough time has elapsed to enter the prohibition into VIPS
return common_email_services.send_email(
[args.get('applicant_email_address')],
content["subject"],
config,
template.render(
link_to_icbc=config.LINK_TO_ICBC,
link_to_service_bc=config.LINK_TO_SERVICE_BC,
date_of_service=human_friendly_date_served,
full_name=args.get('applicant_full_name'),
prohibition_number=prohibition_number,
subject=content["subject"])), args
def already_applied(**args):
config = args.get('config')
prohibition_number = args.get('prohibition_number')
vips_data = args.get('vips_data')
t = "{}_already_applied.html".format(vips_data['noticeTypeCd'])
args['email_template'] = t
content = get_email_content(t, prohibition_number)
template = get_jinja2_env().get_template(t)
return common_email_services.send_email(
[args.get('applicant_email_address')],
content["subject"],
config,
template.render(
full_name=args.get('applicant_full_name'),
prohibition_number=prohibition_number,
subject=content["subject"])), args
def applicant_disclosure(**args) -> tuple:
config = args.get('config')
prohibition_number = args.get('prohibition_number')
vips_data = args.get('vips_data')
t = '{}_send_disclosure.html'.format(vips_data['noticeTypeCd'])
args['email_template'] = t
content = get_email_content(t, prohibition_number)
template = get_jinja2_env().get_template(t)
return common_email_services.send_email(
[args.get('applicant_email_address')],
content["subject"],
config,
template.render(
link_to_get_driving_record=config.LINK_TO_GET_DRIVING_RECORD,
full_name=args.get('applicant_name'),
prohibition_number=prohibition_number,
subject=content["subject"]),
args.get('disclosure_for_applicant')), args
def applicant_evidence_instructions(**args) -> tuple:
config = args.get('config')
prohibition_number = args.get('prohibition_number')
vips_application = args.get('vips_application')
email_address = vips_application['email']
t = 'send_evidence_instructions.html'
args['email_template'] = t
content = get_email_content(t, prohibition_number)
template = get_jinja2_env().get_template(t)
return common_email_services.send_email(
[email_address],
content["subject"],
config,
template.render(
link_to_evidence_form=config.LINK_TO_EVIDENCE_FORM,
full_name=args.get('applicant_name'),
prohibition_number=prohibition_number,
subject=content["subject"])), args
def applicant_evidence_received(**args) -> tuple:
config = args.get('config')
prohibition_number = args.get('prohibition_number')
email_address = args.get('email_address')
vips_application = args.get('vips_application')
full_name = "{} {}".format(vips_application['firstGivenNm'], vips_application['surnameNm'])
t = 'evidence_received.html'
args['email_template'] = t
content = get_email_content(t, prohibition_number)
template = get_jinja2_env().get_template(t)
return common_email_services.send_email(
[email_address],
content["subject"],
config,
template.render(
link_to_evidence_form=config.LINK_TO_EVIDENCE_FORM,
full_name=full_name,
today_date=args.get('today_date').strftime("%B %d, %Y %H:%M"),
prohibition_number=prohibition_number,
subject=content["subject"])), args
def admin_unable_to_save_to_vips(**args) -> tuple:
logging.critical('inside unable_to_save_to_vips_api()')
config = args.get('config')
message = args.get('message')
subject = 'Critical Error: Unable to save to VIPS'
body_text = 'While attempting to save an application to VIPS, an error was returned. ' + \
'We will save the record to a failed write queue in RabbitMQ.'
logging.critical('unable to save to VIPS: {}'.format(json.dumps(message)))
return send_email_to_admin(config=config, subject=subject, body=body_text), args
def admin_unknown_event_type(**args) -> tuple:
message = args.get('message')
config = args.get('config')
title = 'Critical Error: Unknown Event Type'
body_text = "An unknown event has been received: " + message['event_type']
logging.critical('unknown event type: {}'.format(message['event_type']))
return send_email_to_admin(config=config, title=title, body=body_text), args
def get_jinja2_env():
template_loader = FileSystemLoader(searchpath="./python/common/templates")
return Environment(
loader=template_loader,
autoescape=select_autoescape(['html', 'xml'])
)
def get_email_content(template_name: str, prohibition_number: str):
content = content_data()
if template_name in content:
email_content = content[template_name]
email_content['subject'] = email_content['raw_subject'].format(_hyphenate(prohibition_number))
logging.info(email_content)
return email_content
return dict({
"raw_subject": "Unknown template requested {}",
"subject": "Unknown template",
"callout": "",
"title": "Unknown Template",
"timeline": ""
})
def _hyphenate(prohibition_number: str) -> str:
return "{}-{}".format(prohibition_number[0:2], prohibition_number[2:8])
def content_data() -> dict:
return dict({
"IRP_last_name_mismatch.html": {
"raw_subject": "Prohibition Number or Name Don't Match - Driving Prohibition {} Review",
"title": "IRP Prohibition Number or Name Don't Match",
},
"ADP_last_name_mismatch.html": {
"raw_subject": "Prohibition Number or Name Don't Match - Driving Prohibition {} Review",
"title": "ADP Prohibition Number or Name Don't Match",
},
"UL_last_name_mismatch.html": {
"raw_subject": "Prohibition Number or Name Don't Match - Driving Prohibition {} Review",
"title": "UL Prohibition Number or Name Don't Match",
},
"IRP_prohibition_not_found_yet.html": {
"raw_subject": "Prohibition Not Yet Found - Driving Prohibition {} Review",
"title": "IRP Prohibition Not Yet Found",
},
"ADP_prohibition_not_found_yet.html": {
"raw_subject": "Prohibition Not Yet Found - Driving Prohibition {} Review",
"title": "ADP Prohibition Not Yet Found",
},
"UL_prohibition_not_found_yet.html": {
"raw_subject": "Prohibition Not Yet Found - Driving Prohibition {} Review",
"title": "UL Prohibition Not Yet Found",
},
"IRP_prohibition_still_not_found.html": {
"raw_subject": "Prohibition Still Not Found - Driving Prohibition {} Review",
"title": "IRP Prohibition Still Not Found",
},
"ADP_prohibition_still_not_found.html": {
"raw_subject": "Prohibition Still Not Found - Driving Prohibition {} Review",
"title": "ADP Prohibition Still Not Found",
},
"UL_prohibition_still_not_found.html": {
"raw_subject": "Prohibition Still Not Found - Driving Prohibition {} Review",
"title": "UL Prohibition Still Not Found",
},
"IRP_already_applied.html": {
"raw_subject": "Already Applied – Driving Prohibition {} Review",
"title": "IRP Already Applied",
},
"ADP_already_applied.html": {
"raw_subject": "Already Applied – Driving Prohibition {} Review",
"title": "ADP Already Applied",
},
"UL_already_applied.html": {
"raw_subject": "Previous Review on File – Driving Prohibition {} Review",
"title": "UL Already Applied",
},
"review_date_confirmed_ORAL.html": {
"raw_subject": "Review Date Confirmed - Driving Prohibition {} Review",
"title": "Review Date Confirmed Oral",
},
"review_date_confirmed_WRIT.html": {
"raw_subject": "Review Date Confirmed - Driving Prohibition {} Review",
"title": "Review Date Confirmed Written",
},
"IRP_select_review_date.html": {
"raw_subject": "Select Review Date - Driving Prohibition {} Review",
"title": "IRP Select Review Date",
},
"ADP_select_review_date.html": {
"raw_subject": "Select Review Date - Driving Prohibition {} Review",
"title": "ADP Select Review Date",
},
"UL_select_review_date.html": {
"raw_subject": "Select Review Date - Driving Prohibition {} Review",
"title": "UL Select Review Date",
},
"IRP_prohibition_not_found.html": {
"raw_subject": "Prohibition Not Found and 7-day Application Window Missed - Driving Prohibition {} Review",
"title": "IRP Prohibition Not Found"
},
"ADP_prohibition_not_found.html": {
"raw_subject": "Prohibition Not Found and 7-day Application Window Missed - Driving Prohibition {} Review",
"title": "ADP Prohibition Not Found"
},
"UL_prohibition_not_found.html": {
"raw_subject": "Prohibition Not Found – Driving Prohibition {} Review",
"title": "UL Prohibition Not Found"
},
"licence_not_seized.html": {
"raw_subject": "Licence Not Surrendered - Driving Prohibition {} Review",
"title": "Licence Not Surrendered",
},
"not_received_in_time.html": {
"raw_subject": "7-day Application Window Missed - Driving Prohibition {} Review",
"title": "7-day Application Window Missed",
},
"IRP_application_accepted.html": {
"raw_subject": "Application Accepted - Driving Prohibition {} Review",
"title": "IRP Application Accepted",
},
"ADP_application_accepted.html": {
"raw_subject": "Application Accepted - Driving Prohibition {} Review",
"title": "ADP Application Accepted",
},
"UL_application_accepted.html": {
"raw_subject": "Application Accepted - Driving Prohibition {} Review",
"title": "UL Application Accepted",
},
"IRP_send_disclosure.html": {
"raw_subject": "Disclosure Documents Attached - Driving Prohibition {} Review",
"title": "Send Disclosure",
},
"ADP_send_disclosure.html": {
"raw_subject": "Disclosure Documents Attached - Driving Prohibition {} Review",
"title": "Send Disclosure",
},
"UL_send_disclosure.html": {
"raw_subject": "Disclosure Documents Attached - Driving Prohibition {} Review",
"title": "Send Disclosure",
},
"send_evidence_instructions.html": {
"raw_subject": "Submit Evidence - Driving Prohibition {} Review",
"title": "Submit Evidence",
},
"evidence_received.html": {
"raw_subject": "Evidence Received - Driving Prohibition {} Review",
"title": "Evidence Received",
},
"review_type_change.html": {
"raw_subject": "Review Type Change - Driving Prohibition {} Review",
"title": "Review Type Change",
},
"insufficient_reviews_available.html": {
"raw_subject": "Insufficient Review Dates Available - Driving Prohibition {} Review",
"title": "Insufficient Review Dates Available",
},
"applicant_did_not_schedule.html": {
"raw_subject": "Did Not Schedule - Driving Prohibition {} Review",
"title": "Applicant Did Not Schedule",
},
"applicant_applied_at_icbc.html": {
"raw_subject": "Applied at ICBC - Driving Prohibition {} Review",
"title": "Applied at ICBC",
}
})
| StarcoderdataPython |
21483 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import base64
import os
import zlib
from .environment import get_environment
from . import util
def iter_results_paths(results):
"""
Iterate over all of the result file paths.
"""
skip_files = set([
'machine.json', 'benchmarks.json'
])
for root, dirs, files in os.walk(results):
for filename in files:
if filename not in skip_files and filename.endswith('.json'):
yield (root, filename)
def iter_results(results):
"""
Iterate over all of the result files.
"""
for (root, filename) in iter_results_paths(results):
yield Results.load(os.path.join(root, filename))
def iter_results_for_machine(results, machine_name):
"""
Iterate over all of the result files for a particular machine.
"""
return iter_results(os.path.join(results, machine_name))
def iter_results_for_machine_and_hash(results, machine_name, commit):
"""
Iterate over all of the result files with a given hash for a
particular machine.
"""
for (root, filename) in iter_results_paths(
os.path.join(results, machine_name)):
results_commit = filename.split('-')[0]
max_len = max(len(commit), len(results_commit))
if results_commit[:max_len] == commit[:max_len]:
yield Results.load(os.path.join(root, filename))
def iter_existing_hashes(results):
"""
Iterate over all of the result commit hashes and dates. Each
element yielded is the pair (hash, date).
May return duplicates. Use `get_existing_hashes` if that matters.
"""
for result in iter_results(results):
yield result.commit_hash, result.date
def get_existing_hashes(results):
"""
Get all of the commit hashes that have already been tested.
Each element yielded is the pair (hash, date).
"""
hashes = list(set(iter_existing_hashes(results)))
return hashes
def find_latest_result_hash(machine, root):
"""
Find the latest result for the given machine.
"""
root = os.path.join(root, machine)
latest_date = 0
latest_hash = ''
for commit_hash, date in iter_existing_hashes(root):
if date > latest_date:
latest_date = date
latest_hash = commit_hash
return latest_hash
def get_filename(machine, commit_hash, env):
"""
Get the result filename for a given machine, commit_hash and
environment.
"""
return os.path.join(
machine,
"{0}-{1}.json".format(
commit_hash[:8],
env.name))
class Results(object):
"""
Manage a set of benchmark results for a single machine and commit
hash.
"""
api_version = 1
def __init__(self, params, env, commit_hash, date):
"""
Parameters
----------
params : dict
Parameters describing the environment in which the
benchmarks were run.
env : Environment object
Environment in which the benchmarks were run.
commit_hash : str
The commit hash for the benchmark run.
date : int
Javascript timestamp for when the commit was merged into
the repository.
"""
self._params = params
self._env = env
self._commit_hash = commit_hash
self._date = date
self._results = {}
self._profiles = {}
self._python = env.python
self._filename = get_filename(
params['machine'], self._commit_hash, env)
@property
def commit_hash(self):
return self._commit_hash
@property
def date(self):
return self._date
@property
def params(self):
return self._params
@property
def results(self):
return self._results
@property
def env(self):
return self._env
def add_time(self, benchmark_name, time):
"""
Add benchmark times.
Parameters
----------
benchmark_name : str
Name of benchmark
time : number
Numeric result
"""
self._results[benchmark_name] = time
def add_profile(self, benchmark_name, profile):
"""
Add benchmark profile data.
Parameters
----------
benchmark_name : str
Name of benchmark
profile : bytes
`cProfile` data
"""
self._profiles[benchmark_name] = base64.b64encode(
zlib.compress(profile))
def get_profile(self, benchmark_name):
"""
Get the profile data for the given benchmark name.
"""
return zlib.decompress(
base64.b64decode(self._profiles[benchmark_name]))
def has_profile(self, benchmark_name):
"""
Does the given benchmark data have profiling information?
"""
return benchmark_name in self._profiles
def save(self, result_dir):
"""
Save the results to disk.
Parameters
----------
result_dir : str
Path to root of results tree.
"""
path = os.path.join(result_dir, self._filename)
util.write_json(path, {
'results': self._results,
'params': self._params,
'requirements': self._env.requirements,
'commit_hash': self._commit_hash,
'date': self._date,
'python': self._python,
'profiles': self._profiles
}, self.api_version)
@classmethod
def load(cls, path):
"""
Load results from disk.
Parameters
----------
path : str
Path to results file.
"""
d = util.load_json(path, cls.api_version)
obj = cls(
d['params'],
get_environment('', d['python'], d['requirements']),
d['commit_hash'],
d['date'])
obj._results = d['results']
if 'profiles' in d:
obj._profiles = d['profiles']
obj._filename = os.path.join(*path.split(os.path.sep)[-2:])
return obj
def rm(self, result_dir):
path = os.path.join(result_dir, self._filename)
os.remove(path)
@classmethod
def update(cls, path):
util.update_json(cls, path, cls.api_version)
| StarcoderdataPython |
54808 | from sklearn.base import BaseEstimator, TransformerMixin
from autogluon.features.generators import OneHotEncoderFeatureGenerator
class OheFeaturesGenerator(BaseEstimator, TransformerMixin):
def __init__(self):
self._feature_names = []
self._encoder = None
def fit(self, X, y=None):
self._encoder = OneHotEncoderFeatureGenerator(max_levels=10000, verbosity=0)
self._encoder.fit(X)
self._feature_names = self._encoder.features_out
return self
def transform(self, X, y=None):
return self._encoder.transform_ohe(X)
def get_feature_names(self):
return self._feature_names
class NlpDataPreprocessor(BaseEstimator, TransformerMixin):
def __init__(self, nlp_cols):
self.nlp_cols = nlp_cols
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
X = X[self.nlp_cols].copy()
for c in self.nlp_cols:
X[c] = X[c].astype(str).fillna(' ')
X = X.apply(' '.join, axis=1).str.replace('[ ]+', ' ', regex=True)
return X.values.tolist()
| StarcoderdataPython |
1732877 | <filename>geoevents/feedback/forms.py
# This technical data was produced for the U. S. Government under Contract No. W15P7T-13-C-F600, and
# is subject to the Rights in Technical Data-Noncommercial Items clause at DFARS 252.227-7013 (FEB 2012)
from django import forms
from geoevents.core.forms import StyledModelForm
from geoevents.feedback.models import Article, Feedback
class ArticleForm(StyledModelForm):
class Meta:
model = Article
fields = ('title', 'category', 'content', 'common_issue', 'active')
class FeedbackForm(StyledModelForm):
send_confirmation_email = forms.BooleanField(required=False, initial=True)
error_css_class = 'error'
class Meta:
model = Feedback
fields = ('name', 'email', 'organization', 'phone', 'subject', 'login_method', 'platform', 'feedback')
| StarcoderdataPython |
8188 | <reponame>pageuppeople-opensource/relational-data-loader
import logging
from rdl.data_sources.MsSqlDataSource import MsSqlDataSource
from rdl.data_sources.AWSLambdaDataSource import AWSLambdaDataSource
class DataSourceFactory(object):
def __init__(self, logger=None):
self.logger = logger or logging.getLogger(__name__)
self.sources = [MsSqlDataSource, AWSLambdaDataSource]
def create_source(self, connection_string):
for source in self.sources:
if source.can_handle_connection_string(connection_string):
self.logger.info(
f"Found handler '{source}' for given connection string."
)
return source(connection_string)
raise RuntimeError(
"There are no data sources that can handle this connection string"
)
def is_prefix_supported(self, connection_string):
for source in self.sources:
if source.can_handle_connection_string(connection_string):
return True
return False
def get_supported_source_prefixes(self):
return list(
map(lambda source: source.get_connection_string_prefix(), self.sources)
)
| StarcoderdataPython |
3301943 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
The calibration test suite.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from future.builtins import * # NOQA
import os
import unittest
import numpy as np
from obspy import read
from obspy.signal.calibration import rel_calib_stack
from obspy.core.util.misc import TemporaryWorkingDirectory
class CalibrationTestCase(unittest.TestCase):
"""
Calibration test case
"""
def setUp(self):
# directory where the test files are located
self.path = os.path.abspath(os.path.join(os.path.dirname(__file__),
'data'))
def test_relcal_sts2_vs_unknown(self):
"""
Test relative calibration of unknown instrument vs STS2 in the same
time range. Window length is set to 20 s, smoothing rate to 10.
"""
st1 = read(os.path.join(self.path, 'ref_STS2'))
st2 = read(os.path.join(self.path, 'ref_unknown'))
calfile = os.path.join(self.path, 'STS2_simp.cal')
with TemporaryWorkingDirectory():
freq, amp, phase = rel_calib_stack(st1, st2, calfile, 20,
smooth=10, save_data=True)
self.assertTrue(os.path.isfile("0438.EHZ.20.resp"))
self.assertTrue(os.path.isfile("STS2.refResp"))
freq_, amp_, phase_ = np.loadtxt("0438.EHZ.20.resp", unpack=True)
self.assertTrue(np.allclose(freq, freq_, rtol=1e-8, atol=1e-8))
self.assertTrue(np.allclose(amp, amp_, rtol=1e-8, atol=1e-8))
self.assertTrue(np.allclose(phase, phase_, rtol=1e-8, atol=1e-8))
# read in the reference responses
un_resp = np.loadtxt(os.path.join(self.path, 'unknown.resp'))
kn_resp = np.loadtxt(os.path.join(self.path, 'STS2.refResp'))
# bug resolved with 2f9876d, arctan was used which maps to
# [-pi/2, pi/2]. arctan2 or np.angle shall be used instead
# correct the test data by hand
un_resp[:, 2] = np.unwrap(un_resp[:, 2] * 2) / 2
if False:
import matplotlib.pyplot as plt
plt.plot(freq, un_resp[:, 2], 'b', label='reference', alpha=.8)
plt.plot(freq, phase, 'r', label='new', alpha=.8)
plt.xlim(-10, None)
plt.legend()
plt.show()
# test if freq, amp and phase match the reference values
np.testing.assert_array_almost_equal(freq, un_resp[:, 0],
decimal=4)
np.testing.assert_array_almost_equal(freq, kn_resp[:, 0],
decimal=4)
np.testing.assert_array_almost_equal(amp, un_resp[:, 1],
decimal=4)
# TODO: unknown why the first frequency mismatches so much
np.testing.assert_array_almost_equal(phase[1:], un_resp[1:, 2],
decimal=4)
def test_relcal_using_traces(self):
"""
Tests using traces instead of stream objects as input parameters.
"""
st1 = read(os.path.join(self.path, 'ref_STS2'))
st2 = read(os.path.join(self.path, 'ref_unknown'))
calfile = os.path.join(self.path, 'STS2_simp.cal')
# stream
freq, amp, phase = rel_calib_stack(st1, st2, calfile, 20, smooth=10,
save_data=False)
# traces
freq2, amp2, phase2 = rel_calib_stack(st1[0], st2[0], calfile, 20,
smooth=10, save_data=False)
np.testing.assert_array_almost_equal(freq, freq2, decimal=4)
np.testing.assert_array_almost_equal(amp, amp2, decimal=4)
np.testing.assert_array_almost_equal(phase, phase2, decimal=4)
def test_relcal_different_overlaps(self):
"""
Tests using different window overlap percentages.
Regression test for bug #1821.
"""
st1 = read(os.path.join(self.path, 'ref_STS2'))
st2 = read(os.path.join(self.path, 'ref_unknown'))
calfile = os.path.join(self.path, 'STS2_simp.cal')
def median_amplitude_plateau(freq, amp):
# resulting response is pretty much flat in this frequency range
return np.median(amp[(freq >= 0.3) & (freq <= 3)])
# correct results using default overlap fraction of 0.5
freq, amp, phase = rel_calib_stack(
st1, st2, calfile, 20, smooth=10, overlap_frac=0.5,
save_data=False)
amp_expected = median_amplitude_plateau(freq, amp)
for overlap in np.linspace(0.1, 0.9, 5):
freq2, amp2, phase2 = rel_calib_stack(
st1, st2, calfile, 20, smooth=10, overlap_frac=overlap,
save_data=False)
amp_got = median_amplitude_plateau(freq2, amp2)
percentual_difference = abs(
(amp_expected - amp_got) / amp_expected)
# make sure results are close for any overlap choice
self.assertTrue(percentual_difference < 0.01)
def test_relcal_using_dict(self):
"""
Tests using paz dictionary instead of a gse2 file.
"""
st1 = read(os.path.join(self.path, 'ref_STS2'))
st2 = read(os.path.join(self.path, 'ref_unknown'))
calfile = os.path.join(self.path, 'STS2_simp.cal')
calpaz = dict()
calpaz['poles'] = [-0.03677 + 0.03703j, -0.03677 - 0.03703j]
calpaz['zeros'] = [0 + 0j, 0 - 0j]
calpaz['sensitivity'] = 1500
# stream
freq, amp, phase = rel_calib_stack(st1, st2, calfile, 20, smooth=10,
save_data=False)
# traces
freq2, amp2, phase2 = rel_calib_stack(st1, st2, calpaz, 20,
smooth=10, save_data=False)
np.testing.assert_array_almost_equal(freq, freq2, decimal=4)
np.testing.assert_array_almost_equal(amp, amp2, decimal=4)
np.testing.assert_array_almost_equal(phase, phase2, decimal=4)
def suite():
return unittest.makeSuite(CalibrationTestCase, 'test')
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| StarcoderdataPython |
106497 | <filename>pyspedas/mms/tests/validation/scm.py
from pyspedas import mms_load_scm
from pytplot import get_data
mms_load_scm()
t, d = get_data('mms1_scm_acb_gse_scsrvy_srvy_l2')
print(t[0:10].round(6).tolist())
print(d[10000].tolist())
print(d[50000].tolist())
print(d[100000].tolist())
print(d[200000].tolist())
print(d[300000].tolist())
print(d[400000].tolist())
print(d[500000].tolist())
print(d[600000].tolist())
print(d[700000].tolist())
print(d[800000].tolist())
print(d[900000].tolist())
print(d[1000000].tolist())
print(d[1500000].tolist())
print(d[2000000].tolist())
| StarcoderdataPython |
1762941 | <gh_stars>1-10
"""
Setup development environment
"""
import time
import network
import machine
import gc
try:
import appconfig
except:
class AppConfig(object):
def __init__(self, ssid:str, password:str) -> None:
self.wifi_ssid = ssid
self.wifi_password = password
ssid = input('Input Wi-Fi SSID: ')
password = input('Input Wi-Fi Password: ')
appconfig = AppConfig(ssid, password)
# Start Wi-Fi
w = network.WLAN()
w.active(True)
w.connect(appconfig.wifi_ssid, appconfig.wifi_password)
while not w.isconnected():
time.sleep(1)
print(w.ifconfig())
# Start FTP server to upload source codes.
network.ftp.start(user='esp32', password='<PASSWORD>')
gc.collect()
| StarcoderdataPython |
3310455 | # * The MIT License (MIT) Copyright (c) 2017 by <NAME>.
# * The formulation and display of an AUdio Spectrum using an ESp8266 or ESP32 and SSD1306 or SH1106 OLED Display using a Fast Fourier Transform
# * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files
# * (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge,
# * publish, distribute, but not to use it commercially for profit making or to sub-license and/or to sell copies of the Software or to
# * permit persons to whom the Software is furnished to do so, subject to the following conditions:
# * The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
# * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# * See more at http://dsbird.org.uk
#!/usr/bin/env python3
# encoding: utf-8
import sys,os,json
import time
from time import sleep
import cv2
import imutils
import numpy as np
import matplotlib.pyplot as plt
f, (ax1, ax2) = plt.subplots(1, 2) # create subplots
try:
filename = sys.argv[1]
name, ext = os.path.splitext(sys.argv[1])
output = name + '_final' + ext
data = {'filename': filename, 'output': output, 'count': 0}
print("fichier:", filename)
print("final:", output)
print(json.dumps(data))
except IndexError:
print("missing filename")
sys.exit()
# https://www.codetd.com/en/article/12003434
#print ('type x,y,srcW,refW,srcH,refH', x,type(x),y,type(y),srcW,type(srcW),refW,type(refW),srcH,type(srcH),refH,type(refH))
def addWeightedSmallImgToLargeImg(largeImg,alpha,smallImg,beta,gamma=0.0,regionTopLeftPos=(0,0)):
srcW, srcH = largeImg.shape[1::-1]
refW, refH = smallImg.shape[1::-1]
y,x = regionTopLeftPos
# print ('type x,y,srcW,refW,srcH,refH', x,type(x),y,type(y),srcW,type(srcW),refW,type(refW),srcH,type(srcH),refH,type(refH))
if (refW>srcW) or (refH>srcH):
#raise ValueError("img2's size must less than or equal to img1")
raise ValueError(f"img2's size {smallImg.shape[1::-1]} must less than or equal to img1's size {largeImg.shape[1::-1]}")
else:
if (x+refW)>srcW:
x = str(srcW-refW)
if (y+refH)>srcH:
y = str(srcH-refH)
destImg = np.array(largeImg)
x1 = int(x)
y1 = int(y)
x2 = int(x)+refW
y2 = int(y)+refH
# print ('print 1 x1,x2,y1,y2', x1,type(x1),x2,type(x2),y1,type(y1),y2,type(y2))
tmpSrcImg = destImg[y1:y2,x1:x2]
tmpImg = cv2.addWeighted(tmpSrcImg, alpha, smallImg, beta,gamma)
destImg[y1:y2,x1:x2] = tmpImg
return destImg
def mon_resize (image,taille):
width = int(image.shape[1] * taille / 100)
height = int(image.shape[0] * taille / 100)
dsize = (width, height)
output = cv2.resize(image, dsize)
return output
# Méthode : Villeurbanne
def comptage(image,blank_image) :
# lire l'image
image = cv2.imread('final_blank_image.jpg')
# ecrire ses dimensions
print('Dimensions de l image de départ :', image.shape)
# transforme e nuance de gris
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# fait un flougaussien
flougaussien = cv2.bilateralFilter(gray, 6, 157,157)
#determine les contours
edge = imutils.auto_canny(flougaussien)
# calcul le perimetre des contours trouves (non fermes)
(cnts,_) = cv2.findContours(edge, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
print (" debut")
borne_sup_sup = 1000
borne_sup_inf = 700
#print(" borne_sup_sup = ",borne_sup_sup, " borne_sup_inf = ",borne_sup_inf)
borne_inf_sup = 30
borne_inf_inf = 20
#print(' borne_inf_sup = ',borne_inf_sup, ' borne_inf_inf = ',borne_inf_inf)
for j in range(borne_sup_inf,borne_sup_sup,10):
for jj in range(borne_inf_inf,borne_inf_sup,1):
compteur = 0
for i in range(len(cnts)):
area = cv2.contourArea(cnts[i])
if ( j>area>jj ):# initialement 120>area> 80
compteur += 1
final = cv2.drawContours(image, cnts[i], -1, (255,0,0), 1) # entoure les varroas d'un cercle bleu
print(' RESULTATS pour ',j,'>area>',jj,' Nombre de varroas : ',compteur)
cv2.imwrite('output.jpg',final)
return compteur
def insertion(blank_image,pts) :
# insertion dans la page "blanche" des découpes de l'image d'origine autour des varroas détectés
# https://stackoverflow.com/questions/35884409/how-to-extract-x-y-coordinates-from-opencv-cv2-keypoint-object/35884644
# RETREIVE KEYPOINTS COORDINATES AND DRAW MANUALLY
h1 = 8 # demi-largeur du crop
srcW, srcH = blank_image.shape[1::-1] # taille de l'image blanche
tailleW, tailleH = workingImage.shape[1::-1] # taille de l'image d'origine
# print('taille de la page blanche srcW,srcH : ',srcW,srcH,type(srcH),type(srcH))
# print('taille de la working tailleW,tailleH : ',tailleW,tailleH,type(tailleW),type(tailleH))
# print('pts', pts) # liste des coordonnées des varroas détectés
for point in pts: # on balaye la liste des varroas détectés : y=point[0] , x =point[1]
# print('position du varroa detecte y,x : ',point)
b1 = int(point[0]-h1) # coin à gauche
if (b1>srcW) :
b1=srcW
b2 = int(point[0]+h1) # coin à droite
if (b2>srcW) :
b2=srcW
a1 = int(point[1]-h1) # coin en haut
if (a1>srcH) :
a1=srcH
a2 = int(point[1]+h1) # coin en bas
if (a2>srcH) :
a2=srcH
# print('taille du crop : ',b1,b2,a1,a2)
crop_img = workingImage[a1:a2,b1:b2] # découpage d'un carré 2h1x2h1 de l'image d'origine autour du varroa détecté
# print('taille du crop : ',crop_img.shape)
# print('taille image : ',workingImage.shape)
y = int(point[0]) - h1 # point d'insertion en y
x = int(point[1]) - h1 # point d'insertion en x
# print('point insertion y,x : ',y,x,type(y),type(x))
refW, refH = crop_img.shape[1::-1]
# print('taille de la page blanche srcW,srcH : ',srcW,srcH,type(srcH),type(srcH))
# print('taille du crop refW,refH : ',refW,refH,type(refW),type(refH)) # insertion du crop dans l'image blanche
# maintenant on insert la découpe h1xh1 autour du varroa de l'image d'origine dans une page blanche
blank_image = addWeightedSmallImgToLargeImg(blank_image, 0.01, crop_img, 1,regionTopLeftPos=(x,y)) # !! inversion y,x en x,y !!!
return blank_image # image balnche avec les insertions des varroas détectés
def analyse(filename, parameters,image_height,image_width,blank_image): # méthode du Blob Vincent-Fabrice-Jody
workingImage = cv2.imread(filename) # chargement de l'image
# traitement de l'image en nuance de gris
# workingImage1 = cv2.cvtColor(workingImage0, cv2.COLOR_BGR2GRAY)
# fait un flougaussien
# workingImage = cv2.bilateralFilter(workingImage1, 18, 90,100) # initialement à 6 , 157 , 157
a,b,c,d,e,f,g,h=parameters # paramètres du blob
print(f'minThreshold:{a} maxThreshold:{b} blobColor:{c} minArea:{d} maxArea:{e} minCircularity:{f} minConvexity:{g} minInertiaRatio:{h} ')
params = cv2.SimpleBlobDetector_Params()
params.minThreshold = a # = 15 # original 10
params.maxThreshold = b # = 180 # original 200
params.filterByColor = True
params.blobColor = c # blobColor = 0 sombre / blobColor = 255 clair
params.filterByArea = True
params.minArea = d # 23 mais avec une marge 50
params.maxArea = e # 120 mais avec marge 150
params.filterByCircularity = True
params.minCircularity = f # params.minCircularity = 0.1
params.filterByConvexity = True
params.minConvexity = g # params.minConvexity = 0.69
params.filterByInertia = True
params.minInertiaRatio = h # params.minInertiaRatio = 0.52
detector = cv2.SimpleBlobDetector_create(params) # création du blob
# print(f'A:{a} B:{b} C:{c} D:{d} E:{e} F:{f} G:{g} H:{h} ')
g1 = cv2.cvtColor(workingImage, cv2.COLOR_BGR2GRAY)
keyPoints = detector.detect(g1) # détection des varroas par le blob
nb_varroas=len(keyPoints) # nombre varroas détectés
# marquage des varroas détectés par un cercle rouge
im_with_keypoints = cv2.drawKeypoints(workingImage, keyPoints, np.array([]), (0, 0, 255),
cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
ax1.imshow(im_with_keypoints) # plot des points détectés
pts = np.asarray([[p.pt[0], p.pt[1]] for p in keyPoints])
cols = pts[:,0]
rows = pts[:,1]
# h1 = pts.size/2 # demi-largeur du crop autour du varroa : pas terrible !!!
ax2.imshow(cv2.cvtColor(workingImage, cv2.COLOR_BGR2RGB))
ax2.scatter(cols, rows) # création d'une image avec le nuage de points des varroas détectés
# insertion dans la page "blanche" des découpes de l'image d'origine autour des varroas détectés
# https://stackoverflow.com/questions/35884409/how-to-extract-x-y-coordinates-from-opencv-cv2-keypoint-object/35884644
# RETREIVE KEYPOINTS COORDINATES AND DRAW MANUALLY
h1 = 8 # demi-largeur du crop
srcW, srcH = blank_image.shape[1::-1] # taille de l'image blanche
tailleW, tailleH = workingImage.shape[1::-1] # taille de l'image d'origine
# print('taille de la page blanche srcW,srcH : ',srcW,srcH,type(srcH),type(srcH))
# print('taille de la working tailleW,tailleH : ',tailleW,tailleH,type(tailleW),type(tailleH))
# print('pts', pts) # liste des coordonnées des varroas détectés
for point in pts: # on balaye la liste des varroas détectés : y=point[0] , x =point[1]
# print('position du varroa detecte y,x : ',point)
b1 = int(point[0]-h1) # coin à gauche
if (b1>srcW) :
b1=srcW
b2 = int(point[0]+h1) # coin à droite
if (b2>srcW) :
b2=srcW
a1 = int(point[1]-h1) # coin en haut
if (a1>srcH) :
a1=srcH
a2 = int(point[1]+h1) # coin en bas
if (a2>srcH) :
a2=srcH
# print('taille du crop : ',b1,b2,a1,a2)
crop_img = workingImage[a1:a2,b1:b2] # découpage d'un carré 2h1x2h1 de l'image d'origine autour du varroa détecté
# print('taille du crop : ',crop_img.shape)
# print('taille image : ',workingImage.shape)
y = int(point[0]) - h1 # point d'insertion en y
x = int(point[1]) - h1 # point d'insertion en x
# print('point insertion y,x : ',y,x,type(y),type(x))
refW, refH = crop_img.shape[1::-1]
# print('taille de la page blanche srcW,srcH : ',srcW,srcH,type(srcH),type(srcH))
# print('taille du crop refW,refH : ',refW,refH,type(refW),type(refH)) # insertion du crop dans l'image blanche
# maintenant on insert la découpe h1xh1 autour du varroa de l'image d'origine dans une page blanche
blank_image = addWeightedSmallImgToLargeImg(blank_image, 0.01, crop_img, 1,regionTopLeftPos=(x,y)) # !! inversion y,x en x,y !!!
#cv2.imshow('image',blank_image)
#cv2.waitKey(0)
#plt.show()
#cv2.imwrite(output,im_with_keypoints) #im_with_keypoints,
cv2.imwrite('final_blank_image.jpg',blank_image) # ecrit le fichier sur le disque
output= mon_resize(blank_image,25) # retaille la page à 25%
cv2.imshow('image',output) # imprime la page sur l'écran
plt.show() # affiche toutes les images présentes
cv2.waitKey(0) # stop l'éxécution
return nb_varroas,im_with_keypoints,g1 # workingImage
minThreshold = 10 # 99
maxThreshold = 150 # 168
blobColor = 0 # 0
minArea = 35 # 117
maxArea = 150 # 134
minCircularity = 0.66 # 0.8
minConvexity = 0.72 # 0.7
minInertiaRatio = 0.44 # 0.4
parameters=(minThreshold,maxThreshold,blobColor,minArea,maxArea,minConvexity,minConvexity,minInertiaRatio)
workingImage = cv2.imread(filename)
data['image_height'] = workingImage.shape[0]
data['image_width'] = workingImage.shape[1]
# création d'une page blanche pour y inserer les varroas
page_blanche = np.zeros((data['image_height'],data['image_width'],3), np.uint8) # fabrication de la page
page_blanche.fill(255) # remplit l'image de la couleur blanche
nbVarroas,im_with_keypoints,workingImage=analyse(filename,parameters,workingImage.shape[0],workingImage.shape[1],page_blanche)
# nbVarroas premier passage
print('nbVarroas premier passage : ',nbVarroas)
# création d'une nouvelle page blanche pour y inserer les varroas du nouveau décompte
page_blanche_2 = np.zeros((data['image_height'],data['image_width'],3), np.uint8) # fabrication de la page
page_blanche_2.fill(255) # remplit l'image de la couleur blanche
# deuxième détection
nbVarroas = comptage(page_blanche,page_blanche_2)
# nbVarroas second passage
print('nbVarroas second passage : ',nbVarroas)
data['count'] = nbVarroas
# r = requests.post('https://varroacounter.jodaille.org/counter-results', json=json.dumps(data))
print(json.dumps(data))
# print(r) | StarcoderdataPython |
1634138 | <filename>setup.py
from setuptools import setup
setup(name='fnplus',
version='0.4.1',
description='Yet another functional programming library',
url='http://github.com/mdowds/fnplus',
author='<NAME>',
license='MIT',
packages=['fnplus'],
test_suite='fnplus.tests',
zip_safe=False)
| StarcoderdataPython |
127138 | """
Sample some tests
"""
from python_template import fizzbuzz
def test_fizzbuzz():
"""
test for fizzbuzz func
"""
assert fizzbuzz(11) == "11"
assert fizzbuzz(12) == "fizz"
assert fizzbuzz(15) == "fizzbuzz"
assert fizzbuzz(20) == "buzz"
| StarcoderdataPython |
94217 | <reponame>christopinka/django-civil
# -*- coding: utf-8 -*-
from django.contrib import admin
from django.utils.translation import ugettext_lazy as _
from civil.library.admin import BaseAdmin
from .models import *
#==============================================================================
class NameOnlyAdmin(BaseAdmin):
list_display = ('id', 'name', 'created', 'modified')
list_display_links = ('id', 'name', )
list_filter = ('created', 'modified')
search_fields = ['name']
#==============================================================================
admin.site.register(AddressType, NameOnlyAdmin)
admin.site.register(PhoneType, NameOnlyAdmin)
admin.site.register(EmailType, NameOnlyAdmin)
admin.site.register(WebsiteType, NameOnlyAdmin)
admin.site.register(SexType, NameOnlyAdmin)
admin.site.register(RelationshipType, NameOnlyAdmin)
admin.site.register(PaymentType, NameOnlyAdmin)
#==============================================================================
admin.site.register(ContactType, BaseAdmin)
admin.site.register(PrefixType, BaseAdmin)
admin.site.register(SuffixType, BaseAdmin)
| StarcoderdataPython |
194140 | <reponame>GilianPonte/Deep-Learning
#Code taken from https://www.tensorflow.org/tutorials/images/classification
from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D, LeakyReLU
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import os
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from tensorflow.keras.datasets import cifar10
def generate_model(dropout,HIDDEN_UNITS,activation,optimizer,x_train):
if optimizer is 'sgd':
optimizer = tensorflow.keras.optimizers.SGD(lr=0.1 ,decay=1e-6, momentum=0.9, nesterov=True)
if optimizer is 'adadelta':
optimizer = tensorflow.keras.optimizers.Adadelta(learning_rate=0.1, rho=0.95)
if activation == "leaky":
model_new = Sequential([
Conv2D(32, (3, 3), padding='same',
input_shape=x_train.shape[1:]),
# BatchNormalization(),
MaxPooling2D(),
Dropout(dropout),
Conv2D(32, 3, padding='same', activation='relu'),
# BatchNormalization(),
MaxPooling2D(),
Conv2D(64, 3, padding='same', activation='relu'),
# BatchNormalization(),
MaxPooling2D(),
Dropout(dropout),
Flatten(),
Dense(HIDDEN_UNITS),
LeakyReLU(alpha=0.3),
# BatchNormalization(),
Dense(10, activation='sigmoid')
])
else:
model_new = Sequential([
Conv2D(32, (3, 3), padding='same',
input_shape=x_train.shape[1:]),
# BatchNormalization(),
MaxPooling2D(),
Dropout(dropout),
Conv2D(32, 3, padding='same', activation='relu'),
# BatchNormalization(),
MaxPooling2D(),
Conv2D(64, 3, padding='same', activation='relu'),
# BatchNormalization(),
MaxPooling2D(),
Dropout(dropout),
Flatten(),
Dense(HIDDEN_UNITS,activation =activation),
# BatchNormalization(),
Dense(10, activation='sigmoid') ])
print(optimizer)
model_new.compile(optimizer=optimizer,
loss='categorical_crossentropy',
metrics=['accuracy'])
model_new.summary()
return model_new
batch_size = 32
num_classes = 10
epochs = 100
data_augmentation = True
num_predictions = 20
save_dir = os.path.join(os.getcwd(), 'saved_models')
model_name = 'keras_cifar10_trained_model.h5'
# The data, split between train and test sets:
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
y_train = tf.keras.utils.to_categorical(y_train, num_classes)
y_test = tf.keras.utils.to_categorical(y_test, num_classes)
# activation =['leaky','relu','tanh','sigmoid','softsign']
activation = ['relu']
optimizers = [ 'adam','sgd','rmsprop','adagrad','adadelta']
test_accuracies = []
accuracies = []
val_accuracies = []
losses = []
val_losses =[]
datagen = ImageDataGenerator(
featurewise_center=False, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=False, # divide inputs by std of the dataset
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # apply ZCA whitening
zca_epsilon=1e-06, # epsilon for ZCA whitening
rotation_range=0, # randomly rotate images in the range (degrees, 0 to 180)
# randomly shift images horizontally (fraction of total width)
width_shift_range=0.1,
# randomly shift images vertically (fraction of total height)
height_shift_range=0.1,
shear_range=0., # set range for random shear
zoom_range=0., # set range for random zoom
channel_shift_range=0., # set range for random channel shifts
# set mode for filling points outside the input boundaries
fill_mode='nearest',
cval=0., # value used for fill_mode = "constant"
horizontal_flip=True, # randomly flip images
vertical_flip=False, # randomly flip images
# set rescaling factor (applied before any other transformation)
rescale=None,
# set function that will be applied on each input
preprocessing_function=None,
# image data format, either "channels_first" or "channels_last"
data_format=None,
# fraction of images reserved for validation (strictly between 0 and 1)
validation_split=0.33)
datagen.fit(x_train)
test_accuracies = []
accuracies = []
val_accuracies = []
losses = []
val_losses =[]
for idx in range (0,len(optimizers)):
name = 'optimizers/cnn ' +str(optimizers[idx] +'.h5')
model = generate_model(0.2,512,'leaky',optimizers[idx],x_train)
checkpoint = ModelCheckpoint(name, monitor='val_acc', verbose=1, save_best_only=True, save_weights_only=False, mode='auto', period=1)
early = EarlyStopping(monitor='val_acc', min_delta=0, patience=10, verbose=1, mode='auto')
history = model.fit_generator(datagen.flow(x_train, y_train,
batch_size=batch_size,subset ='training'),
epochs=epochs,
validation_data= datagen.flow(x_train, y_train,
batch_size=batch_size,subset ='validation'),
callbacks=[checkpoint,early])
model.load_weights(name)
score = model.evaluate(x_test, y_test, verbose=0)
accuracies.append(history.history['acc'])
val_accuracies.append(history.history['val_acc'])
losses.append(history.history['loss'])
val_losses.append(history.history['val_loss'])
test_accuracies.append(score)
epochs_range = range(epochs)
accuracies_df = pd.DataFrame([array for array in accuracies] )
accuracies_df.to_csv("optimizers/accuracies.csv")
val_accuracies_df = pd.DataFrame([array for array in val_accuracies] )
val_accuracies_df.to_csv("optimizers/val_accuracies.csv")
test_accuracies_df = pd.DataFrame(test_accuracies )
test_accuracies_df.to_csv("optimizers/test_accuracies.csv")
losses_df = pd.DataFrame([array for array in losses] )
losses_df.to_csv("optimizers/losses.csv")
val_losses_df = pd.DataFrame([array for array in val_losses] )
val_losses_df.to_csv("optimizers/val_losses.csv")
| StarcoderdataPython |
70813 | <gh_stars>10-100
from unittest import TestCase
from brnolm.runtime.model_statistics import scaled_int_str
class ScaledIntRepreTests(TestCase):
def test_order_0(self):
self.assertEqual(scaled_int_str(0), '0')
def test_order_1(self):
self.assertEqual(scaled_int_str(10), '10')
def test_order_2(self):
self.assertEqual(scaled_int_str(210), '210')
def test_order_3(self):
self.assertEqual(scaled_int_str(3210), '3.2k')
def test_order_4(self):
self.assertEqual(scaled_int_str(43210), '43.2k')
def test_order_5(self):
self.assertEqual(scaled_int_str(543210), '543.2k')
def test_order_6(self):
self.assertEqual(scaled_int_str(6543210), '6.5M')
| StarcoderdataPython |
3272400 | ETH_GATEWAY_STATS_INTERVAL = 60
ETH_GATEWAY_STATS_LOOKBACK = 1
ETH_ON_BLOCK_FEED_STATS_INTERVAL_S = 5 * 60
ETH_ON_BLOCK_FEED_STATS_LOOKBACK = 1
| StarcoderdataPython |
124545 | # Generated by Django 3.2.7 on 2021-09-10 17:23
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('project', '0005_auto_20210910_1320'),
]
operations = [
migrations.RemoveField(
model_name='project',
name='principle_address1',
),
migrations.RemoveField(
model_name='project',
name='principle_address2',
),
migrations.RemoveField(
model_name='project',
name='principle_first_name',
),
migrations.RemoveField(
model_name='project',
name='principle_last_name',
),
]
| StarcoderdataPython |
41345 | <filename>multi_agent_rmp.py
# RMPflow basic classes
# @author <NAME>
# @date April 8, 2019
from rmp import RMPRoot, RMPNode
from rmp_leaf import CollisionAvoidance, CollisionAvoidanceDecentralized, GoalAttractorUni
import numpy as np
from numpy.linalg import norm
from scipy.integrate import solve_ivp
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib.animation as animation
import math
from scipy.spatial.distance import pdist
# ---------------------------------------------
# build the rmp tree
N = 10
theta = np.arange(0, 2 * np.pi, 2 * np.pi / N)
x_g = np.stack((np.cos(theta), np.sin(theta))).T * 10
r = RMPRoot('root')
robots = []
def create_mappings(i):
phi = lambda y, i=i: np.array([[y[2 * i, 0]], [y[2 * i + 1, 0]]])
J = lambda y, i=i: np.concatenate((
np.zeros((2, 2 * i)),
np.eye(2),
np.zeros((2, 2 * (N - i - 1)))), axis=1)
J_dot = lambda y, y_dot: np.zeros((2, 2 * N))
return phi, J, J_dot
for i in range(N):
phi, J, J_dot = create_mappings(i)
robot = RMPNode('robot_' + str(i), r, phi, J, J_dot)
robots.append(robot)
gas = []
for i in range(N):
ga = GoalAttractorUni(
'ga_robot_' + str(i),
robots[i],
x_g[i],
alpha = 1,
gain = 1,
eta = 2)
gas.append(ga)
iacas = []
for i in range(N):
for j in range(N):
if i == j:
continue
iaca = CollisionAvoidanceDecentralized(
'ca_robot_' + str(i) + '_robot_' + str(j),
robots[i],
robots[j],
R=1)
iacas.append(iaca)
# ----------------------------------------------
# -----------------------------------------
# possible initial configurations
x_0 = - x_g + np.random.randn(*x_g.shape) * 0.2
x = x_0.reshape(-1)
x_dot = np.zeros_like(x)
state_0 = np.concatenate((x, x_dot), axis=None)
r.set_root_state(x, x_dot)
r.pushforward()
[leaf.update() for leaf in iacas]
r.pullback()
# --------------------------------------------
# --------------------------------------------
# dynamics
def dynamics(t, state):
state = state.reshape(2, -1)
x = state[0]
x_dot = state[1]
r.set_root_state(x, x_dot)
r.pushforward()
[leaf.update() for leaf in iacas]
r.pullback()
x_ddot = r.resolve()
state_dot = np.concatenate((x_dot, x_ddot), axis=None)
return state_dot
# --------------------------------------------
# ---------------------------------------------
# solve the diff eq
sol = solve_ivp(dynamics, [0, 60], state_0)
# ---------------------------------------------
# --------------------------------------------
# plot trajectories
for i in range(N):
plt.plot(sol.y[2 * i], sol.y[2 * i + 1], 'y--')
plt.plot(x_g[i, 0], x_g[i, 1], 'go')
plt.plot(x_0[i, 0], x_0[i, 1], 'ro')
plt.axis(np.array([-12, 12, -12, 12]))
plt.gca().set_aspect('equal', 'box')
fig = plt.gcf()
ax = plt.gca()
agents, = plt.plot(sol.y[0: 2 * N: 2, 0], sol.y[1: 2 * N + 1: 2, 0], 'ko')
def init(): # only required for blitting to give a clean slate.
return agents,
def animate(i):
nsteps = sol.y.shape[-1]
agents.set_xdata(sol.y[0: 2 * N: 2, i % nsteps])
agents.set_ydata(sol.y[1: 2 * N + 1: 2, i % nsteps])
return agents,
ani = animation.FuncAnimation(
fig, animate, init_func=init, interval=30, blit=True)
plt.show()
# --------------------------------------------
| StarcoderdataPython |
3285864 | <filename>common/bulk_import.py<gh_stars>0
import datetime
import re
from django.contrib.auth.models import User
from common.models import Class, Semester, Subject
from io import StringIO
from lxml.html import parse
class ImportException(Exception):
pass
class BulkImport:
def is_allowed(self, clazz, no_lectures, no_exercises):
t, _ = clazz.split('/')
if t == 'P':
if no_lectures:
return False
elif t == 'C':
if no_exercises:
return False
else:
print(f"Uknown class type: {clazz}")
return True
def parse_subject(self, doc):
h2 = doc.xpath('//h2[@class="nomargin"]')
if not h2:
raise ImportException("Missing h2 element, have you imported correct file?")
subject = re.search(r'\(([^)]+)', h2[0].text)
if not subject:
raise ImportException("Subject missing in h2 element")
return subject.group(1).strip()
def parse_semester(self, doc):
elems = doc.xpath('//h2[@class="nomargin"]/span[@class="outputText"]')
if len(elems) != 2:
raise ImportException("two elements .outputText with semester not found in h2")
year = elems[0].text.split('/')[0]
h = elems[1].text.strip().lower()
if h == 'letní':
return year, False
elif h == 'zimní':
return year, True
raise ImportException("failed to parse semester")
def run(self, content, no_lectures=False, no_exercises=False, class_code=None):
doc = parse(StringIO(content)).getroot()
abbr = self.parse_subject(doc)
try:
subject = Subject.objects.get(abbr=abbr)
except Subject.DoesNotExist:
raise ImportException(f"Subject {abbr} does not exist. Please create it first.")
year, is_winter = self.parse_semester(doc)
semester = Semester.objects.get(year=year, winter=is_winter)
classes = list(map(str.strip, doc.xpath('//tr[@class="rowClass1"]/th/div/span[1]/text()')))
labels = list(doc.xpath('//tr[@class="rowClass1"]/th/div/@title'))
default_classes = []
for code in class_code or []:
try:
default_classes.append(Class.objects.get(semester__year=year, semester__winter=is_winter, code=code, subject__abbr=opts['subject']))
except Class.DoesNotExist:
raise ImportException(f"Class with code {code} does not exist.")
class_in_db = {}
for c, label in zip(classes, labels):
if not self.is_allowed(c, no_lectures, no_exercises):
continue
try:
class_in_db[c] = Class.objects.get(code=c, semester=semester, subject=subject)
except Class.DoesNotExist:
s = label.split(' ')
class_in_db[c] = Class()
class_in_db[c].code = c
day = s[6].upper()
mapping = {'ÚT': 'UT', 'ČT': 'CT', 'PÁ': 'PA'}
class_in_db[c].day = mapping.get(day, day)
class_in_db[c].hour = s[7]
class_in_db[c].year = datetime.datetime.now().year
class_in_db[c].winter = datetime.datetime.now().month >= 9
class_in_db[c].time = s[7]
class_in_db[c].subject = subject
class_in_db[c].semester = semester
first_name, last_name = label.replace(',', '').replace('Ph.D.', '').replace('Bc', '').replace('DiS', '').strip().split(' ')[-2:]
if first_name and last_name:
teacher = User.objects.filter(first_name=first_name, last_name=last_name)
if not teacher:
raise ImportException(f"Teacher '{first_name}' '{last_name}' not found")
class_in_db[c].teacher = teacher[0]
class_in_db[c].save()
for row in doc.xpath('//table[@class="dataTable"]//tr')[1:]:
def clean_name(s):
for remove in ['Ing', 'Bc', 'BA', 'MBA', 'Mgr', 'MgrA', '.', ',']:
s = s.replace(remove, '')
return ' '.join(s.split()).strip()
if not row.xpath('./td[2]/a'):
raise ImportException("Student login not found in table. Have you imported correct file?")
login = row.xpath('./td[2]/a/text()')[0].strip()
email = row.xpath('./td[2]/a/@href')[0].replace('mailto:', '').strip()
name = clean_name(row.xpath('./td[3]/a/text()')[0])
lastname, firstname = name.strip().split(' ', 1)
member_of = []
created = False
user = None
try:
user = User.objects.get(username=login)
except User.DoesNotExist:
user = User.objects.create_user(login.upper(), email)
user.first_name = firstname
user.last_name = lastname
user.save()
created = True
for i, el in enumerate(row.xpath('.//input')):
clazz = classes[i]
if "checked" in el.attrib:
if not self.is_allowed(clazz, no_lectures, no_exercises):
continue
if user not in class_in_db[clazz].students.all():
member_of.append(clazz)
class_in_db[clazz].students.add(user)
elif clazz in class_in_db:
class_in_db[clazz].students.remove(user)
for clazz in default_classes:
if user not in clazz.students.all():
member_of.append(clazz.code)
clazz.students.add(user)
classess = []
for c in Class.objects.filter(students__username=login, semester__year=year, semester__winter=is_winter, subject_id=subject.id):
classess.append(f"{c.timeslot} {c.teacher.username}")
yield {
'login': login,
'firstname': firstname,
'lastname': lastname,
'created': created,
'classes': classess,
}
| StarcoderdataPython |
35311 | <filename>regulation/settings.py
#!/usr/bin/env python
from __future__ import print_function
from __future__ import unicode_literals
import importlib
import os
import sys
# Try to load the settings module
try:
local_settings = importlib.import_module(
os.environ.get('REGML_SETTINGS_FILE', 'settings'))
globals().update(local_settings.__dict__)
except ImportError:
logger.error("Unable to import settings module. "
"Please double-check your REGML_SETTINGS_FILE "
"environment variable")
sys.exit(1)
globals().update(local_settings.__dict__)
| StarcoderdataPython |
3220034 | """ Example or something
"""
import pandas as pd
from alpha_vantage.timeseries import TimeSeries
import config_terminal as cfg
import res_menu as rm
from discovery import disc_menu as dm
from due_diligence import dd_menu as ddm
from fundamental_analysis import fa_menu as fam
from helper_funcs import *
from prediction_techniques import pred_menu as pm
from sentiment import sen_menu as sm
from technical_analysis import ta_menu as tam
# import warnings
# warnings.simplefilter("always")
# ----------------------------------------------------- LOAD -----------------------------------------------------
def load(l_args, s_ticker, s_start, s_interval, df_stock):
parser = argparse.ArgumentParser(prog='load', description=""" Load a stock in order to perform analysis""")
parser.add_argument('-t', "--ticker", action="store", dest="s_ticker", required=True, help="Stock ticker")
parser.add_argument('-s', "--start", type=valid_date, dest="s_start_date",
help="The starting date (format YYYY-MM-DD) of the stock")
parser.add_argument('-i', "--interval", action="store", dest="n_interval", type=int, default=1440,
choices=[1, 5, 15, 30, 60], help="Intraday stock minutes")
try:
(ns_parser, l_unknown_args) = parser.parse_known_args(l_args)
except SystemExit:
print("")
return [s_ticker, s_start, s_interval, df_stock]
if l_unknown_args:
print(f"The following args couldn't be interpreted: {l_unknown_args}")
# Update values:
s_ticker = ns_parser.s_ticker
s_start = ns_parser.s_start_date
s_interval = str(ns_parser.n_interval) + 'min'
try:
ts = TimeSeries(key=cfg.API_KEY_ALPHAVANTAGE, output_format='pandas')
# Daily
if s_interval == "1440min":
df_stock, d_stock_metadata = ts.get_daily_adjusted(symbol=ns_parser.s_ticker, outputsize='full')
# Intraday
else:
df_stock, d_stock_metadata = ts.get_intraday(symbol=ns_parser.s_ticker, outputsize='full',
interval=s_interval)
df_stock.sort_index(ascending=True, inplace=True)
except:
print("Either the ticker or the API_KEY are invalids. Try again!")
return [s_ticker, s_start, s_interval, df_stock]
s_intraday = (f'Intraday {s_interval}', 'Daily')[s_interval == "1440min"]
if s_start:
# Slice dataframe from the starting date YYYY-MM-DD selected
df_stock = df_stock[ns_parser.s_start_date:]
print(
f"Loading {s_intraday} {s_ticker} stock with starting period {s_start.strftime('%Y-%m-%d')} for analysis.")
else:
print(f"Loading {s_intraday} {s_ticker} stock for analysis.")
print("")
return [s_ticker, s_start, s_interval, df_stock]
# ----------------------------------------------------- VIEW -----------------------------------------------------
def view(l_args, s_ticker, s_start, s_interval, df_stock):
parser = argparse.ArgumentParser(prog='view',
description='Visualise historical data of a stock. An alpha_vantage key is '
'necessary.')
if s_ticker:
parser.add_argument('-t', "--ticker", action="store", dest="s_ticker", default=s_ticker, help='Stock ticker')
else:
parser.add_argument('-t', "--ticker", action="store", dest="s_ticker", required=True, help='Stock ticker')
parser.add_argument('-s', "--start", type=valid_date, dest="s_start_date", default=s_start,
help="The starting date (format YYYY-MM-DD) of the stock")
parser.add_argument('-i', "--interval", action="store", dest="n_interval", type=int, default=0,
choices=[1, 5, 15, 30, 60], help="Intraday stock minutes")
parser.add_argument("--type", action="store", dest="n_type", type=check_positive, default=5, # in case it's daily
help='1234 corresponds to types: 1. open; 2. high; 3.low; 4. close; while 14 corresponds to '
'types: 1.open; 4. close')
try:
(ns_parser, l_unknown_args) = parser.parse_known_args(l_args)
except SystemExit:
print("")
return
if l_unknown_args:
print(f"The following args couldn't be interpreted: {l_unknown_args}")
# Update values:
s_ticker = ns_parser.s_ticker
# A new interval intraday period was given
if ns_parser.n_interval != 0:
s_interval = str(ns_parser.n_interval) + 'min'
try:
ts = TimeSeries(key=cfg.API_KEY_ALPHAVANTAGE, output_format='pandas')
# Daily
if s_interval == "1440min":
df_stock, d_stock_metadata = ts.get_daily_adjusted(symbol=s_ticker, outputsize='full')
# Intraday
else:
df_stock, d_stock_metadata = ts.get_intraday(symbol=s_ticker, outputsize='full', interval=s_interval)
except:
print("Either the ticker or the API_KEY are invalids. Try again!")
return
df_stock.sort_index(ascending=True, inplace=True)
# Slice dataframe from the starting date YYYY-MM-DD selected
df_stock = df_stock[ns_parser.s_start_date:]
# Daily
if s_interval == "1440min":
# The default doesn't exist for intradaily data
ln_col_idx = [int(x) - 1 for x in list(str(ns_parser.n_type))]
if 4 not in ln_col_idx:
ln_col_idx.append(4)
# Check that the types given are not bigger than 4, as there are only 5 types (0-4)
if len([i for i in ln_col_idx if i > 4]) > 0:
print("An index bigger than 4 was given, which is wrong. Try again")
return
# Append last column of df to be filtered which corresponds to: 6. Volume
ln_col_idx.append(5)
# Intraday
else:
# The default doesn't exist for intradaily data
if ns_parser.n_type == 5:
ln_col_idx = [3]
else:
ln_col_idx = [int(x) - 1 for x in list(str(ns_parser.n_type))]
# Check that the types given are not bigger than 3, as there are only 4 types (0-3)
if len([i for i in ln_col_idx if i > 3]) > 0:
print("An index bigger than 3 was given, which is wrong. Try again")
return
# Append last column of df to be filtered which corresponds to: 5. Volume
ln_col_idx.append(4)
# Plot view of the stock
plot_view_stock(df_stock.iloc[:, ln_col_idx], ns_parser.s_ticker)
# ----------------------------------------------------- HELP
# ------------------------------------------------------------------
def print_help(s_ticker, s_start, s_interval, b_is_market_open):
""" Print help
"""
print("What do you want to do?")
print(" help help to see this menu again")
print(" quit to abandon the program")
print("")
print(" clear clear a specific stock ticker from analysis")
print(" load load a specific stock ticker for analysis")
print(" view view and load a specific stock ticker for technical analysis")
s_intraday = (f'Intraday {s_interval}', 'Daily')[s_interval == "1440min"]
if s_ticker and s_start:
print(f"\n{s_intraday} Stock: {s_ticker} (from {s_start.strftime('%Y-%m-%d')})")
elif s_ticker:
print(f"\n{s_intraday} Stock: {s_ticker}")
else:
print("\nStock: ?")
print(f"Market {('CLOSED', 'OPEN')[b_is_market_open]}.")
print("\nMenus:")
print(" disc discover trending stocks, \t e.g. map, sectors, high short interest")
print(" sen sentiment of the market, \t from: reddit, stocktwits, twitter")
if s_ticker:
print(" res research web page, \t e.g.: macroaxis, yahoo finance, fool")
print(" fa fundamental analysis, \t e.g.: income, balance, cash, earnings")
print(" ta technical analysis, \t e.g.: ema, macd, rsi, adx, bbands, obv")
print(" dd in-depth due-diligence, \t e.g.: news, analyst, shorts, insider, sec")
print(" pred prediction techniques, \t e.g.: regression, arima, rnn, lstm, prophet")
print("")
# -----------------------------------------------------------------------------------------------------------------------
def main():
"""Gamestonk Terminal is an awesome stock market terminal that has been developed for fun,
while I saw my GME shares tanking. But hey, I like the stock."""
s_ticker = ""
s_start = ""
df_stock = pd.DataFrame()
b_intraday = False
s_interval = "1440min"
'''
# Set stock by default to speed up testing
s_ticker = "BB"
ts = TimeSeries(key=cfg.API_KEY_ALPHAVANTAGE, output_format='pandas')
df_stock, d_stock_metadata = ts.get_daily_adjusted(symbol=s_ticker, outputsize='full')
df_stock.sort_index(ascending=True, inplace=True)
s_start = datetime.strptime("2020-06-04", "%Y-%m-%d")
df_stock = df_stock[s_start:]
'''
# Add list of arguments that the main parser accepts
menu_parser = argparse.ArgumentParser(prog='gamestonk_terminal', add_help=False)
menu_parser.add_argument('opt', choices=['help', 'quit', 'q',
'clear', 'load', 'view',
'disc', 'sen', 'res', 'fa', 'ta', 'dd', 'pred'])
# Print first welcome message and help
print("\nWelcome to Didier's Gamestonk Terminal\n")
print_help(s_ticker, s_start, s_interval, b_is_stock_market_open())
# Loop forever and ever
while True:
# Get input command from user
as_input = input('> ')
# Is command empty
if not len(as_input):
print("")
continue
# Parse main command of the list of possible commands
try:
(ns_known_args, l_args) = menu_parser.parse_known_args(as_input.split())
except SystemExit:
print("The command selected doesn't exist\n")
continue
if ns_known_args.opt == 'help':
print_help(s_ticker, s_start, s_interval, b_is_stock_market_open())
elif (ns_known_args.opt == 'quit') or (ns_known_args.opt == 'q'):
print("Hope you made money today. Good bye my lover, good bye my friend.\n")
return
elif ns_known_args.opt == 'clear':
print("Clearing stock ticker to be used for analysis")
s_ticker = ""
s_start = ""
elif ns_known_args.opt == 'load':
[s_ticker, s_start, s_interval, df_stock] = load(l_args, s_ticker, s_start, s_interval, df_stock)
elif ns_known_args.opt == 'view':
view(l_args, s_ticker, s_start, s_interval, df_stock)
# DISCOVERY MENU
elif ns_known_args.opt == 'disc':
b_quit = dm.disc_menu()
if b_quit:
print("Hope you made money today. Good bye my lover, good bye my friend.\n")
return
else:
print_help(s_ticker, s_start, s_interval, b_is_stock_market_open())
# SENTIMENT MARKET
elif ns_known_args.opt == 'sen':
b_quit = sm.sen_menu(s_ticker, s_start)
if b_quit:
print("Hope you made money today. Good bye my lover, good bye my friend.\n")
return
else:
print_help(s_ticker, s_start, s_interval, b_is_stock_market_open())
# RESEARCH MENU
elif ns_known_args.opt == 'res':
b_quit = rm.res_menu(s_ticker)
if b_quit:
print("Hope you made money today. Good bye my lover, good bye my friend.\n")
return
else:
print_help(s_ticker, s_start, s_interval, b_is_stock_market_open())
# FUNDAMENTAL ANALYSIS MENU
elif ns_known_args.opt == 'fa':
b_quit = fam.fa_menu(s_ticker, s_start, s_interval)
if b_quit:
print("Hope you made money today. Good bye my lover, good bye my friend.\n")
return
else:
print_help(s_ticker, s_start, s_interval, b_is_stock_market_open())
# TECHNICAL ANALYSIS MENU
elif ns_known_args.opt == 'ta':
b_quit = tam.ta_menu(df_stock, s_ticker, s_start, s_interval)
if b_quit:
print("Hope you made money today. Good bye my lover, good bye my friend.\n")
return
else:
print_help(s_ticker, s_start, s_interval, b_is_stock_market_open())
# DUE DILIGENCE MENU
elif ns_known_args.opt == 'dd':
b_quit = ddm.dd_menu(df_stock, s_ticker, s_start, s_interval)
if b_quit:
print("Hope you made money today. Good bye my lover, good bye my friend.\n")
return
else:
print_help(s_ticker, s_start, s_interval, b_is_stock_market_open())
# PREDICTION TECHNIQUES
elif ns_known_args.opt == 'pred':
if s_interval == "1440min":
b_quit = pm.pred_menu(df_stock, s_ticker, s_start, s_interval)
# If stock data is intradaily, we need to get data again as prediction techniques work on daily adjusted
# data
else:
df_stock_pred, _ = ts.get_daily_adjusted(symbol=s_ticker, outputsize='full')
df_stock_pred = df_stock_pred.sort_index(ascending=True)
df_stock_pred = df_stock_pred[s_start:]
b_quit = pm.pred_menu(df_stock_pred, s_ticker, s_start, s_interval="1440min")
if b_quit:
print("Hope you enjoyed the terminal. Remember that stonks only go up. Diamond hands.\n")
return
else:
print_help(s_ticker, s_start, s_interval, b_is_stock_market_open())
else:
print('Shouldnt see this command!')
if __name__ == "__main__":
main()
| StarcoderdataPython |
98208 | """
bluew.daemon
~~~~~~~~~~~~~~~~~
This module provides a Daemon object that tries its best to keep connections
alive, and has the ability to reproduce certain steps when a reconnection is
needed.
:copyright: (c) 2017 by <NAME>.
:license: MIT, see LICENSE for more details.
"""
def daemonize(func):
"""
A function wrapper that checks daemon flags. This wrapper as it is assumes
that the calling class has a daemon attribute.
"""
def _wrapper(self, *args, d_init=False, **kwargs):
if d_init:
self.daemon.d_init.append((func, self, args, kwargs))
return func(self, *args, **kwargs)
return _wrapper
class Daemon(object):
"""The bluew daemon."""
def __init__(self):
self.d_init = []
def run_init_funcs(self):
"""
This function iterates through the functions added to d_init, and
runs them in the same order they were added in.
"""
for func, self_, args, kwargs in self.d_init:
func(self_, *args, **kwargs)
| StarcoderdataPython |
1696560 | from multiprocessing.pool import ThreadPool
import urllib
import urllib.request
import re
import os
import time
import sys
import glob
from bs4 import BeautifulSoup
from pyunpack import Archive
from threading import Lock
class Downloader:
def __init__(self, processes):
directory = ""
self.processes = processes
self.lock = Lock()
def get_match_ids(self, eventid):
# Create an offset variable for lists that are paginated on HLTV
offset = 0
# Build the URL
# Create an array of all of the Demo URLs on the page
match_ids = self.find_match_ids_at_url(f'https://www.hltv.org/results?offset={offset}&event={eventid}')
# If the length is = 50, offset by 50 and loop again
if len(match_ids) == 50:
print (f'Parsed first page. Found {(len(match_ids))} IDs')
# Set a boolean to close the while loop and a page variable we can increment when paginating
more_pages = True
page = 1
# While check is true, offset by 50
while more_pages:
offset += 50
# Same URL building and parsing as above
more_match_ids = self.find_match_ids_at_url(f'https://www.hltv.org/results?offset={offset}&event={eventid}')
for match in more_match_ids:
match_ids.append(match)
# Determine if there are additional pages to be found, if not the while loop ends
if len(more_match_ids) < 50:
more_pages = False
page += 1
print( f'Parsed page {page}. Found {len(match_ids)} IDs.')
else:
# Prints the current page and the number of parsed IDs
page += 1
print (f'Parsed page {page}. {len(match_ids)} IDs found so far.')
elif len(match_ids) < 50:
print (f'Total demos: {len(match_ids)}')
elif len(match_ids) > 50:
print ("HLTV altered demo page layout :(")
return match_ids
def find_match_ids_at_url(self,url):
# Get the HTML using get_html()
html = self.get_html(url)
# Create an array of all of the Demo URLs on the page
match_ids = re.findall(r'<div class=\"result-con\" data-zonedgrouping-entry-unix=\"(?:).*?\"><a href=\"/matches/(.*?)\"', html)
return match_ids
def convert_to_demo_ids(self,match_ids):
# Tell the user what is happening
print ("Converting Match IDs to Demo IDs")
threads = self.processes
# Define the number of threads
pool = ThreadPool(threads)
# Calls get_demo_ids() and adds the value returned each call to an array called demo_ids
demo_ids = pool.map(self.get_demo_ids, match_ids)
pool.close()
pool.join()
# Create an array to add any captured errors to
errors = []
# Find any errors, add them to the errors array, and remove them from demo_ids
for demo_id in demo_ids:
if "/" in demo_id:
errors.append(demo_id)
demo_ids = [x for x in demo_ids if x not in errors]
# Print the errors (if there are any)
self.print_errors(errors)
return demo_ids
def get_demo_ids(self,match_id):
# URL building and opening
url = f'https://www.hltv.org/matches/{match_id}'
html = self.get_html(url)
demo_id = re.findall('"/download/demo/(.*?)"', html)
# Check if re.findall()'s array is empty
# If it has an element, add that Demo ID to the demo_ids array
if len(demo_id) > 0:
# Loop through the demo_ids array and remove everything up to the last / to get the real Demo ID
for i in range(0, len(demo_id)):
print ("Converted " + str( match_id))
#time.sleep(0)
# Return the Demo ID
return demo_id[0]
# If there is no element, print which match has no demo
elif len(demo_id) < 1:
print (f'No demo found for {match_id}')
# Return the Match ID with a space char so we can find it later
return " %s" % match_id
def download(self, demo_ids, folder_name, unzip=False):
# Temporarily use 1 due to 503 errors
# Convert the DemoIDs to URLs
urls = self.convert_to_urls(demo_ids)
# Make a folder for the event to save the files in
self.directory = self.make_dir(folder_name)
total_file_size = 0
for url in urls:
total_file_size += self.get(url,unzip)
# Create a float to store the filesizes in and add them together
#total_file_size = sum(filesizes)
# Print the properly formatted filesize.
print (f'Successfully transferred {(self.format_file_size(total_file_size))}. Enjoy!')
return True
def convert_to_urls(self,demo_ids):
return [f'https://www.hltv.org/download/demo/{str(demo_id)}' for demo_id in demo_ids]
def get(self, url, unzip = False):
self.lock.acquire()
# Build and open the URL
opener = urllib.request.build_opener()
opener.addheaders = [('User-Agent', r"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.170 Safari/537.36")]
response = opener.open(url)
# HLTV redicrects to a .rar or .zip file
final_url = response.geturl()
# Gets the filename (everything after the last trailing /)
filename = final_url.rsplit('/', 1)[-1]
if(os.path.exists(self.directory+"/"+filename)):
self.lock.release()
return 0
filesize = 0
# Gets the Content-Length from the metadata from final_url
urllib.request.install_opener(opener)
info = urllib.request.urlopen(final_url).info()
filesize = (int(info["Content-Length"])/1024)/1024
print (f'Starting {filename}: {filesize} MB.')
# Downloads the file to the directory the user enters
(filepath, message) = urllib.request.urlretrieve(final_url, self.directory+"/"+filename)
if unzip:
Archive(filepath).extractall(os.path.dirname(filepath))
#os.remove(self.directory+"/"+filename)
# Tell user the current status and file information
print( f'Completed {filename}: {filesize} MB.')
self.lock.release()
return filesize
def make_dir(self, folder_name):
# Create a global variable so the different threads can access it
directory = f'./{folder_name}'
os.makedirs(directory, exist_ok=True)
# Return the string so we can use it
return directory
def format_file_size(self, filesize):
if filesize > 1024:
return "%.2f GB" % (float(filesize) / 1024)
else:
return "%s MB" % (int(filesize))
def get_html(self,url):
# Open the URL
opener = urllib.request.build_opener()
# Spoof the user agent
opener.addheaders = [('User-Agent', r"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.170 Safari/537.36")]
response = opener.open(url)
# Read the response as HTML
html = response.read().decode('utf-8')
return html
def print_errors(self, errors):
# Print URL(s) for the match(es) with no demo file(s)
if len(errors) == 1:
print (f'{len(errors)} matches have no demo:')
for i in range(0, len(errors)):
print (f'{i+1}: https://www.hltv.org/matches/{errors[i]}')
elif len(errors) > 0:
print (f'{len(errors)} matches have no demo:')
for i in range(0, len(errors)):
print (f'{i+1}: https://www.hltv.org/matches/{errors[i]}')
else:
print("No errors found!")
return True
def get_major_ids(self):
major_archive_url = 'https://www.hltv.org/events/archive?eventType=MAJOR'
major_html = self.get_html(major_archive_url)
major_soup = BeautifulSoup(major_html, 'html.parser')
majors_ids_names = {}
for major_div in major_soup.find_all('div',{'class': 'events-month'}):
major_hrefs = major_div.find_all('a',href=True)
href = next((major_ref for major_ref in major_hrefs if major_ref['href'].startswith('/events')), None)
if href is not None:
#/events/3883/iem-katowice-2019
splitted_href = href['href'].split('/')
majors_ids_names[splitted_href[3]] = splitted_href[2]
return majors_ids_names
if __name__ == "__main__":
downloader = Downloader(1)
majors = downloader.get_major_ids()
for major in majors:
match_ids = downloader.get_match_ids(majors[major])
demo_ids = downloader.convert_to_demo_ids(match_ids)
downloader.download(demo_ids,major, True)
#downloader.unzip_all_archives()
| StarcoderdataPython |
3376874 | from django.conf.urls import url
from apps.event.views import EventListCreateView, EventTypeListView, EventDetailView, EventAcceptView, EventDeclineView
urlpatterns = [
url(r'^types/?$', EventTypeListView.as_view(), name='event_types'),
url(r'^$', EventListCreateView.as_view(), name='events'),
url(r'^personal_events/$', EventDetailView.as_view(), name='personal_events')
url(r'^(?P<event_id>\d+)/?$', EventDetailView.as_view(), name='event_detail'),
url(r'^(?P<event_id>\d+)/accept/?$', EventAcceptView.as_view(), name='event_accept'),
url(r'^(?P<event_id>\d+)/decline/?$', EventDeclineView.as_view(), name='event_decline'),
]
| StarcoderdataPython |
1679975 | <filename>jumodjango/etc/gfk_manager.py
from django.db.models import Manager
from django.db.models.query import QuerySet
from django.contrib.contenttypes.generic import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes.generic import GenericForeignKey
# Adapted from django snippet 1773: http://djangosnippets.org/snippets/1773/
# Use this manager to eager load generic relations in 1 batch per content type (rather than n+1)
# Example: Model.objects.filter(...).fetch_generic_relations()
class GFKManager(Manager):
def get_query_set(self):
return GFKQuerySet(self.model)
class GFKQuerySet(QuerySet):
def fetch_generic_relations(self):
qs = self._clone()
gfk_fields = [g for g in self.model._meta.virtual_fields if isinstance(g, GenericForeignKey)]
ct_map = {}
item_map = {}
data_map = {}
for item in qs:
for gfk in gfk_fields:
ct_id_field = self.model._meta.get_field(gfk.ct_field).column
ct_id = getattr(item, ct_id_field)
obj_id = getattr(item, gfk.fk_field)
ct_map.setdefault(ct_id, []).append(obj_id)
item_map[item.id] = item
for ct_id, obj_ids in ct_map.iteritems():
if ct_id:
ct = ContentType.objects.get_for_id(ct_id)
for o in ct.model_class().objects.select_related().filter(id__in=obj_ids).all():
data_map[(ct_id, o.id)] = o
for item in qs:
for gfk in gfk_fields:
obj_id = getattr(item, gfk.fk_field)
if obj_id != None:
ct_id_field = self.model._meta.get_field(gfk.ct_field).column
ct_id = getattr(item, ct_id_field)
setattr(item, gfk.name, data_map[(ct_id, obj_id)])
return qs
| StarcoderdataPython |
3287550 | <filename>ib2/settings.py<gh_stars>1-10
"""
Django settings for ib2 project.
Generated by 'django-admin startproject' using Django 2.2.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
from vitalis.max30100 import MAX30100
from vitalis import max30100
# A button is a good approximation for what we need,
# a digital active-low trigger
from gpiozero import Button
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = <KEY>
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = [
'*',
]
mx30 = None
mx30_error = ""
try:
mx30 = MAX30100(max_buffer_len=1000)
mx30.enable_spo2()
# Set up a trigger to fire when the FIFO buffer (on the MAX30100) fills up.
# You could also use INTERRUPT_HR to get a trigger on every measurement.
mx30.enable_interrupt(max30100.INTERRUPT_HR)
interrupt = Button(16) # Pick a pin
interrupt.when_activated = mx30.read_sensor # Connect the interrupt
print("MAX30100 configurado")
except Exception as e:
print(e)
mx30_error = str(e)
# mx30 = None
# Application definition
INSTALLED_APPS = [
'vitalis.apps.VitalisConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_extensions',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'ib2.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'ib2.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
| StarcoderdataPython |
49146 | <gh_stars>1-10
"""
Callbacks parser
"""
from __future__ import unicode_literals
from . import webhook_attachments
from .types import webhook_types
def parse_payload(payload):
# pylint: disable=too-many-return-statements
if 'message' in payload:
return MessageReceived(payload)
elif 'delivery' in payload:
return MessageDelivered(payload)
elif 'read' in payload:
return MessageRead(payload)
elif 'postback' in payload:
return Postback(payload)
elif 'optin' in payload:
return Authentication(payload)
elif 'account_linking' in payload:
return AccountLinking(payload)
return
class Webhook(object):
def __init__(self, payload):
self.user_id = self.sender_id = payload['sender'].get('id')
self.page_id = self.recipient_id = payload['recipient'].get('id')
self.timestamp = payload.get('timestamp')
""" :type dict """
self.payload = payload
def __str__(self):
return str(self.payload)
def __unicode__(self):
return self.__str__()
def __repr__(self):
return str(self.__dict__)
class Authentication(Webhook):
"""
:see https://developers.facebook.com/docs/messenger-platform/webhook-reference/authentication
"""
type = webhook_types.AUTHENTICATION
def __init__(self, payload):
super(Authentication, self).__init__(payload)
self.optin_ref = payload['optin'].get('ref')
class MessageReceived(Webhook):
"""
:see https://developers.facebook.com/docs/messenger-platform/webhook-reference/message-received
"""
type = webhook_types.MESSAGE_RECEIVED
def __init__(self, payload):
super(MessageReceived, self).__init__(payload)
self.mid = payload['message'].get('mid')
self.seq = payload['message'].get('seq')
self.text = payload['message'].get('text')
self.attachments = []
self.quick_reply_payload = None
if 'quick_reply' in payload['message']:
self.quick_reply_payload = payload['message']['quick_reply'].get('payload')
if 'attachments' in payload['message']:
for attachment_payload in payload['message']['attachments']:
parsed_attachment = webhook_attachments.parse_payload(attachment_payload)
if parsed_attachment:
self.attachments.append(parsed_attachment)
class MessageDelivered(Webhook):
"""
:see https://developers.facebook.com/docs/messenger-platform/webhook-reference/message-delivered
"""
type = webhook_types.MESSAGE_DELIVERED
def __init__(self, payload):
super(MessageDelivered, self).__init__(payload)
self.seq = payload['delivery'].get('seq')
self.watermark = payload['delivery'].get('watermark')
self.mids = payload['delivery'].get('mids')
class MessageRead(Webhook):
"""
:see https://developers.facebook.com/docs/messenger-platform/webhook-reference/message-read
"""
type = webhook_types.MESSAGE_READ
def __init__(self, payload):
super(MessageRead, self).__init__(payload)
self.seq = payload['read'].get('seq')
self.watermark = payload['read'].get('watermark')
class Postback(Webhook):
"""
:see https://developers.facebook.com/docs/messenger-platform/webhook-reference/postback-received
"""
type = webhook_types.POSTBACK_RECEIVED
def __init__(self, payload):
super(Postback, self).__init__(payload)
self.postback_payload = payload['postback'].get('payload')
class AccountLinking(Webhook):
"""
:see https://developers.facebook.com/docs/messenger-platform/webhook-reference/account-linking
"""
type = webhook_types.ACCOUNT_LINKING
def __init__(self, payload):
super(AccountLinking, self).__init__(payload)
self.status = payload['account_linking'].get('status')
self.authorization_code = payload['account_linking'].get('authorization_code')
| StarcoderdataPython |
1647260 | import unittest
from drgpy.msdrg import DRGEngine
class TestMCD00(unittest.TestCase):
def test_mdcs00(self):
de = DRGEngine()
drg_lst = de.get_drg_all(["I10", "E0800"], ["02YA0Z0"])
self.assertTrue("001" in drg_lst)
drg_lst = de.get_drg_all(["I10"], ["02YA0Z0"])
self.assertTrue("002" in drg_lst)
drg_lst = de.get_drg_all([], ["02HA0RS"])
self.assertTrue("002" not in drg_lst)
drg_lst = de.get_drg_all([], ["02HA0RS", "02PA0RZ"])
self.assertTrue("002" in drg_lst)
drg_lst = de.get_drg_all([], ["5A1522F"])
self.assertTrue("003" in drg_lst)
drg_lst = de.get_drg_all([], ["0B110F4", "5A1955Z"])
self.assertTrue("003" in drg_lst)
drg_lst = de.get_drg_all(["E0800"], ["0B110F4"])
self.assertTrue("003" in drg_lst)
drg_lst = de.get_drg_all(["A360"], ["0B110F4"])
self.assertTrue("004" not in drg_lst)
drg_lst = de.get_drg_all([], ["0FY00Z0", "0DY80Z0"])
self.assertTrue("005" in drg_lst)
drg_lst = de.get_drg_all(["I10", "E0800"], ["0FY00Z0"])
self.assertTrue("005" in drg_lst)
drg_lst = de.get_drg_all([], ["0DY80Z0"])
self.assertTrue("005" in drg_lst)
drg_lst = de.get_drg_all([], ["0FY00Z0"])
self.assertTrue("006" in drg_lst)
drg_lst = de.get_drg_all([], ["30230G2"])
self.assertTrue("014" in drg_lst)
drg_lst = de.get_drg_all([], ["0BYC0Z0"])
self.assertTrue("007" in drg_lst)
drg_lst = de.get_drg_all(["I120", "E0800"], ["0TY00Z0", "0FYG0Z0"])
self.assertTrue("008" in drg_lst)
drg_lst = de.get_drg_all(["I120"], ["0TY00Z0", "0FYG0Z0"])
self.assertTrue("008" not in drg_lst)
drg_lst = de.get_drg_all([], ["XW033C3"])
self.assertTrue("016" in drg_lst)
drg_lst = de.get_drg_all(["I10", "E0800"], ["30230AZ"])
self.assertTrue("304" in drg_lst)
drg_lst = de.get_drg_all([], ["30230AZ"])
# in v37, this is 983, used to be 017
self.assertTrue("983" in drg_lst)
drg_lst = de.get_drg_all(["E0800"], ["0FYG0Z0"])
self.assertTrue("010" in drg_lst)
drg_lst = de.get_drg_all(["A360", "E0800"], ["0B110F4"])
self.assertTrue("011" in drg_lst)
drg_lst = de.get_drg_all(["A360", "A000"], ["0B110F4"])
self.assertTrue("012" in drg_lst)
drg_lst = de.get_drg_all(["A360"], ["0B110F4"])
self.assertTrue("013" in drg_lst)
if __name__=="__main__":
unittest.main()
| StarcoderdataPython |
3254833 | <filename>get_user_credentials.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Get user oauth credentials.
Utility to help with getting the access token for a user
"""
from __future__ import (division, absolute_import, print_function,
unicode_literals)
import sys
import logging
import tweepy
from six.moves.configparser import SafeConfigParser
from six.moves import input
def main():
"""The main function."""
#log = logging.getLogger('main')
config = SafeConfigParser()
if not config.read('config.ini'):
print('Could not read config file')
return 1
consumer_key = config.get('twitter', 'consumer_key')
consumer_secret = config.get('twitter', 'consumer_secret')
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
try:
redirect_url = auth.get_authorization_url()
except tweepy.TweepError as exc:
print('Error! Failed to get request token')
print(exc)
return 1
print('Please go to the following url to authorize this app:')
print(" {}\n".format(redirect_url))
print('Enter the verifier code you see after authorizing app')
verifier = input('Verifier: ')
try:
auth.get_access_token(verifier)
except tweepy.TweepError:
print('Error! Failed to get access token')
return 1
print('Enter these values in for access_token and access_token_secret')
print('access_token: {}'.format(auth.access_token.key))
print('access_token_secret: {}'.format(auth.access_token.secret))
return 0
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
sys.exit(main())
| StarcoderdataPython |
3297074 | import tqdm
sources = {}
def source(fn):
"""Append function to available sources for the CLI."""
sources[fn.__name__] = fn
return fn
def _scrape_ids(ids, scraper, name, progress=False):
parsed_scripts = []
iterator = tqdm.tqdm(ids, 'Processing {} scripts'.format(name)) if progress else ids
for script_id in iterator:
parsed_scripts += scraper.extract_dialog(script_id)
return parsed_scripts
@source
def tos(module, progress=False):
"""Include the original series TV scripts."""
return _scrape_ids(module.ids['tos'], module.Scraper(), 'original', progress)
@source
def tas(module, progress=False):
"""Include the animated series TV scripts."""
return _scrape_ids(module.ids['tas'], module.Scraper(), 'animated', progress)
@source
def tng(module, progress=False):
"""Include The Next Generation TV scripts."""
return _scrape_ids(module.ids['tng'], module.Scraper(), 'TNG', progress)
@source
def ds9(module, progress=False):
"""Include Deep Space Nine TV scripts."""
return _scrape_ids(module.ids['ds9'], module.Scraper(), 'DS9', progress)
@source
def voy(module, progress=False):
"""Include Voyager TV scripts."""
return _scrape_ids(module.ids['voy'], module.Scraper(), 'Voyager', progress)
@source
def ent(module, progress=False):
"""Include Enterprise TV scripts."""
return _scrape_ids(module.ids['ent'], module.Scraper(), 'Enterprise', progress)
@source
def mov_tos(module, progress=False):
"""Include TOS-era movie scripts."""
return _scrape_ids(module.ids['mov_tos'], module.Scraper(), 'TOS movies', progress)
@source
def mov_tng(module, progress=False):
"""Include TNG-era movie scripts."""
return _scrape_ids(module.ids['mov_tng'], module.Scraper(), 'TNG movies', progress)
# @source # not yet ready
# def mov_jja(progress=False):
# """Include JJ Abrams-verse movie scripts."""
# raise NotImplementedError()
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.