id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
12809052 | <reponame>hemo650/Ezy-Sort
from .runner import ColourTextTestRunner
class ColourRunnerMixin(object):
test_runner = ColourTextTestRunner
def __init__(self, *args, **kwargs):
self.no_colour = kwargs.get('no_color', False)
super(ColourRunnerMixin, self).__init__(*args, **kwargs)
def run_suite(self, suite, **kwargs):
"""This is the version from Django 1.7."""
return self.test_runner(
verbosity=self.verbosity,
failfast=self.failfast,
no_colour=self.no_colour,
).run(suite)
| StarcoderdataPython |
5145754 | """Extract/pre-process data."""
import pandas as pd
import logging
log = logging.getLogger(__name__)
class PreProcess():
"""Preprocess data."""
def __init__(self):
self.frame = pd.DataFrame()
def csv_to_df(self, file):
"""
Set the dict to a dataframe.
Args:
----
file (str); Contains filename of the CSV file to read from.
"""
df = pd.read_csv(file, index_col=None, header=0)
return df
def drop_col(self, df, l_columns):
"""
Find rows that matche string @elem in column @column.
Args:
----
l_columns (lst): List containing strings of columns to drop in df.
df (df): Pandas dataframe
"""
df.drop(l_columns, axis=1, inplace=True)
return df
def extract_uniq_val(self, df, column):
"""
Find rows that matche string @elem in column @column.
Args:
----
column (str): Contains name of the column to match the string.
df (df): Pandas dataframe
"""
unique_val = df[column].unique()
return unique_val
def find_rows(self, df, elem, column):
"""
Find rows that matche string @elem in column @column.
Args:
----
elem (str): String to match when searching rows.
column (str): Contains name of the column to match the string.
df (df): Pandas dataframe.
"""
log.info("Creating new dataframe matching value {0} in column {1}".format(elem, column))
df_base = df[df[column] == elem]
return df_base
def extract_rows_type(self, df, elem, column):
"""
Extract all rows based on value 'elem' in column 'column'.
Args:
----
elem (str): String, that matches with a value in specified column.
column (str): Contains name of the column that the value 'elem'
should be matched.
df: Pandas dataframe
"""
log.info("Extracting rows with value {0} from column {1}".format(elem, column))
df_base = df[df[column] == elem]
return df_base
def remove_rows(self, df, elem, column):
"""
Remove the rows from the main pandas dataframe.
Args:
----
df (df): Pandas Dataframe
elem (str): String used in rows that is matched for removal
column (str): Contains the column in which 'elem' should
be matched.
"""
log.info("Removing rows with value {0} from column {1}".format(elem, column))
df = df[df[column] == elem]
return df
def csv_out(self, df, filename):
"""
Write Pandas dataframe to CSV file.
Args:
----
df: Pandas Dataframe
filename: Contains file name the dataframe should be written to.
"""
log.info("Writing to CSV file {0}".format(filename))
df.to_csv(filename, index=False)
| StarcoderdataPython |
1791985 | #!/usr/bin/env python3
# ================== i18n.py =====================
# It localizes website elements.
# Hook type: pre_build (modifies config file)
# Configuration:
# Create a i18n.yaml file in your project root. Look at i18n.yaml and i18n.example.yaml
# to get a feel for the structure.
# Add the correct id to every element (via react-template.yaml)
import json
import os
import sys
import shutil
# External libs
# pip install pyyaml
import yaml
CODEC = "utf-8"
DATA_PATH = "i18n.yaml"
DATA_PLACEHOLDER = "__I18N_JSON__"
JS_OUTPUT_NAME = "i18n.js"
JS_INPUT_PATH = "template-tools/i18n/i18n_temlate.js"
CONFIG_PATH = "react-template.yaml"
CUSTOM_HTML_HEAD_FIELD = "customHtmlHead"
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
LANGUAGE_CHOOSER_DOM_FIELD = "language_chooser_dom"
def load_config(project_dir: str) -> dict:
# Load the project data file (if it exists)
try:
project_data_path = os.path.join(project_dir, DATA_PATH)
project_data = parse_yaml_file(project_data_path) or {}
except Exception:
project_data = {}
# Check if the project should be merged with the defaults
ignore_defaults = project_data.get("ignore_defaults", False)
if ignore_defaults:
# Just use the project data, ignore the defaults
return project_data
else:
# Load the template data file (defaults)
template_data_path = os.path.join(SCRIPT_DIR, DATA_PATH)
template_data = parse_yaml_file(template_data_path)
# Merge the two data files
merged_data = template_data
if project_data:
# Merge the individual config items
for [key, data] in project_data.items():
if key == "ignore_defaults":
# Already processed
pass
elif key == "languages":
# Overwrite language list
merged_data[key] = data
elif key == "translations":
# Merge translations dict
translations = template_data.get(key, {})
translations.update(data)
template_data[key] = translations
else:
raise Exception(f"Unsupported config key: '{key}'")
text = yaml.safe_dump(merged_data)
write_file_bytes(
"template-tools/debug/i18n.merged.yaml", text.encode(CODEC))
return merged_data
def create_i18n_js(i18n_config: dict, project_dir: str):
translations = i18n_config.get("translations", {})
languages = set()
for translation_dict in translations.values():
languages.update(translation_dict.keys())
js_data = {
"languages": list(sorted(languages)),
"translations": translations,
}
# Inject the data into the file
js_output_path = "public/"+JS_OUTPUT_NAME
inject_data_into_js_file(js_data, JS_INPUT_PATH, js_output_path)
def inject_data_into_js_file(data, js_input_file: str, js_output_file: str):
text = read_file_bytes(js_input_file).decode(CODEC)
# Sorting them forces them in a deterministic order. Same input -> same output
json_string = json.dumps(data, sort_keys=True)
new_text = text.replace(DATA_PLACEHOLDER, json_string)
if new_text == text:
raise Exception("JS input template has no placeholder for the data")
write_file_bytes(js_output_file, new_text.encode(CODEC))
def inject_script_url_into_config(i18n_config: dict, project_dir: str):
yaml_path = os.path.join(project_dir, CONFIG_PATH)
config = parse_yaml_file(yaml_path)
# Inject script tag to load i18n.js
script_tag = f'<script src="%PUBLIC_URL%/{JS_OUTPUT_NAME}"></script>'
custom_html_head = config.get(CUSTOM_HTML_HEAD_FIELD, "")
custom_html_head += script_tag
# Inject dom for language chooser
languages = i18n_config.get("languages", [])
if languages:
lang_select = f'''<select id="page-language-chooser">
{"".join([
f'<option value="{lang_obj["code"]}">{lang_obj["title"]}</option>'
for lang_obj in languages
])
}
</select>'''
config[LANGUAGE_CHOOSER_DOM_FIELD] = lang_select
# Append the css link or language selector
# custom_html_head += "<link rel=stylesheet href=%PUBLIC_URL%/i18n.css>"
# shutil.move("template-tools/i18n/i18n.scss",
# "public/i18n.scss")
shutil.move("template-tools/i18n/languageicon-org.png",
"public/languageicon-org.png")
else:
config[LANGUAGE_CHOOSER_DOM_FIELD] = ""
config[CUSTOM_HTML_HEAD_FIELD] = custom_html_head
text = yaml.safe_dump(config)
write_file_bytes(CONFIG_PATH, text.encode(CODEC))
def parse_yaml_file(yamlPath: str):
yamlText = read_file_bytes(yamlPath).decode(CODEC)
return yaml.safe_load(yamlText)
def write_file_bytes(path: str, content: bytes):
try:
os.makedirs(os.path.dirname(path))
except Exception:
pass
with open(path, "wb") as f:
f.write(content)
def read_file_bytes(path: str) -> bytes:
with open(path, "rb") as f:
return f.read()
def main():
if len(sys.argv) < 2:
print(f"Usage: {sys.argv[0]} <react_project_folder>")
sys.exit(1)
project_dir = sys.argv[1]
print("Project dir:", project_dir)
i18n_config = load_config(project_dir)
# Do the important parts here
create_i18n_js(i18n_config, project_dir)
inject_script_url_into_config(i18n_config, project_dir)
if __name__ == "__main__":
main()
| StarcoderdataPython |
1971165 | <reponame>ulikoehler/ODBPy<filename>ODBPy/Profile.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Parser for the ODB++ PCB profile file
"""
import os.path
from collections import namedtuple
from .LineRecordParser import *
from .SurfaceParser import *
from .PolygonParser import *
from .Decoder import *
from .Treeifier import *
from .Units import *
__all__ = ["read_profile", "parse_profile", "Profile"]
Profile = namedtuple("Profile", ["unit", "surfaces"])
def read_profile(directory):
profile = read_linerecords(os.path.join(directory, "steps/pcb/profile"))
return parse_profile(profile)
def parse_profile(linerecords):
# Build rulesets
decoder_options = surface_decoder_options + polygon_decoder_options
treeifyer_rules = surface_treeify_rules + polygon_treeify_rules
decoded = list(run_decoder(linerecords["Layer features"], decoder_options))
surfaces = treeify(decoded, treeifyer_rules)
return Profile(linerecords_unit(linerecords), surfaces)
| StarcoderdataPython |
6416740 | import re
import warnings
from optparse import make_option
from django.core.management.commands.inspectdb import Command as InspectDBCommand
from django.db import connections, DEFAULT_DB_ALIAS
from salesforce.backend import introspection as sf_introspection
import django
import salesforce
class Command(InspectDBCommand):
# This will export Salestorce to a valid models.py, if Django >=1.5.
# It is recommended to use Django >=1.5 for inspectdb, even if the generated models.py will be used on Django <1.5
# (The model generated by Django <=1.4 requires very much manual editing, adding many `related_name=...`)
option_list = InspectDBCommand.option_list + (
make_option('--table-filter', action='store', dest='table_name_filter',
help='Regular expression that filters API Names of SF tables to introspect.'),
)
def handle_noargs(self, **options):
if isinstance(options['table_name_filter'], str):
options['table_name_filter'] = re.compile(options['table_name_filter']).match
self.connection = connections[options['database']]
if self.connection.vendor == 'salesforce':
self.db_module = 'salesforce'
for line in self.handle_inspection(options):
line = line.replace(" Field renamed because it contained more than one '_' in a row.", "")
line = re.sub(' #$', '', line)
if django.VERSION[:2] < (1,5):
# prevent problems with mutual dependencies etc.
line = re.sub(r'(?<=models.ForeignKey\()(\w+)', r"'\1'", line)
elif django.VERSION[:2] == (1,5):
# fix bug in Django 1.5
line = line.replace("''self''", "'self'")
self.stdout.write("%s\n" % line)
else:
super(Command, self).handle_noargs(**options)
def get_field_type(self, connection, table_name, row):
field_type, field_params, field_notes = super(Command, self
).get_field_type(connection, table_name, row)
if connection.vendor == 'salesforce':
name, type_code, display_size, internal_size, precision, scale, null_ok, sf_params = row
field_params.update(sf_params)
return field_type, field_params, field_notes
def normalize_col_name(self, col_name, used_column_names, is_relation):
if self.connection.vendor == 'salesforce':
beautified = re.sub('__c$', '', col_name)
beautified = re.sub(r'([a-z0-9])(?=[A-Z])', r'\1_', beautified)
beautified = beautified.lower()
new_name, field_params, field_notes = super(Command, self
).normalize_col_name(beautified, used_column_names, is_relation)
# *reconstructed* : is what will SfField reconstruct to db column
reconstructed = new_name.title().replace('_', '')
if col_name.endswith('__c'):
reconstructed += '__c'
field_params['custom'] = True
elif is_relation:
reconstructed += 'Id'
# TODO: Discuss whether 'db_column' should be rather compared case insensitive
if reconstructed != col_name or 'db_column' in field_params:
field_params['db_column'] = col_name
if is_relation:
if col_name in sf_introspection.last_with_important_related_name:
field_params['related_name'] = '%s_%s_set' % (
sf_introspection.last_introspected_model.lower(),
new_name.replace('_', '')
)
if col_name in sf_introspection.last_read_only:
field_params['sf_read_only'] = sf_introspection.last_read_only[col_name]
field_params['on_delete'] = sf_introspection.SymbolicModelsName('DO_NOTHING')
else:
new_name, field_params, field_notes = super(Command, self
).normalize_col_name(col_name, used_column_names, is_relation)
return new_name, field_params, field_notes
def get_meta(self, table_name):
"""
Return a sequence comprising the lines of code necessary
to construct the inner Meta class for the model corresponding
to the given database table name.
"""
ret = [" class Meta(models.Model.Meta):",
" db_table = '%s'" % table_name,
]
if self.connection.vendor == 'salesforce':
for line in self.connection.introspection.get_additional_meta(table_name):
ret.append(" " + line)
ret.append("")
return ret
if django.VERSION[:2] < (1,5):
warnings.warn("Django >= 1.5 is required to generate a valid model. "
"Manual editing is necessary for older Django.")
| StarcoderdataPython |
6697269 | <reponame>sosolidkk/manga-unifier<gh_stars>1-10
from rest_framework import status
from rest_framework.authtoken.models import Token
from rest_framework.reverse import reverse
from rest_framework.test import APIClient, APITransactionTestCase
from tests.factories.user import UserFactory
class CreateTokenForUserTest(APITransactionTestCase):
client = APIClient()
def setUp(self):
self.user = UserFactory(password="<PASSWORD>!")
self.user.set_password(self.user.password)
self.user.save()
self.payload = {
"username": self.user.username,
"password": "<PASSWORD>!",
}
self.invalid_payload = {
"username": self.user.username,
"password": <PASSWORD>,
}
def test_create_token_successful(self):
response = self.client.post(reverse("auth-token"), data=self.payload)
assert response.status_code == status.HTTP_200_OK
assert "token" in response.json()
assert response.json()["token"] == Token.objects.first().key
def test_create_token_without_user(self):
response = self.client.post(reverse("auth-token"))
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert response.json()["username"] == ["This field is required."]
assert response.json()["password"] == ["<PASSWORD>."]
def test_create_token_with_invalid_credentials(self):
response = self.client.post(reverse("auth-token"), data=self.invalid_payload)
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert response.json()["non_field_errors"] == ["Unable to log in with provided credentials."]
| StarcoderdataPython |
1820000 | <filename>src/const_performance.py
# -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <codecell>
#!/usr/bin/env python
#
#The MIT CorrelX Correlator
#
#https://github.com/MITHaystack/CorrelX
#Contact: <EMAIL>
#Project leads: <NAME>, <NAME> Project developer: <NAME>
#
#Copyright 2017 MIT Haystack Observatory
#
#Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
#
#------------------------------
#------------------------------
#Project: CorrelX.
#File: const_performance.py.
#Author: <NAME> (<EMAIL>)
#Description:
"""
Constants for performance configuration: multi-threading, optimizations, approximations...
"""
#History:
#initial version: 2016.12 ajva
#MIT Haystack Observatory
# -------------------------------------------------------------------------------------------------- Application layer
# Map
###########################################################
# Mapper general optimzations
###########################################################
# Superframes: group information for multiple frames in mapper output
# This is to reduce overhad in MapReduce interface
# TO DO: this needs debugging, keep <=1 to avoid superframes
#NUM_FRAMES_PER_LINE = 10
NUM_FRAMES_PER_LINE = -1 # Keep -1. Needs debugging for >1
# Reduce
###########################################################
# Reducer general optimizations
###########################################################
# Compute FX after sub-accumulation periods (trade-off performance vs. memory)
# This is problematic for delay correction if delay greater than number of samples stored...)
# 1 for compute at every acc period
# TO DO: this needs testing
COMPUTE_FOR_SUB_ACC_PERIOD = -1 # Keep -1
#COMPUTE_FOR_SUB_ACC_PERIOD = 100
#COMPUTE_FOR_SUB_ACC_PERIOD = 400
# -------------------------------------------------------------------------------------------------- Libraries
# FX
###########################################################
# FX library general optimzations
###########################################################
# Try to compute rotations for different polarizations of the same station only once.
# TO DO: Keep 0, debug for 1
SAVE_TIME_ROTATIONS = 0 # Keep 0. Needs debugging for 1
###########################################################
# FX library approximations
###########################################################
# Performance vs. precision in fringe rotation
#FULL_TIMESCALE=0 # Evaluate delays only for the first sample
FULL_TIMESCALE=1 # Evaluate delays for the full timescale
#FULL_TIMESCALE=2 # Interpolate linearly based on delays for first and last samples
###########################################################
# FX library multithreading
###########################################################
# PyFFTW
# (https://pypi.python.org/pypi/pyFFTW)
# Using scipy fft by default.
# TO DO: This is under development.
USE_FFTW = 0
THREADS_FFTW = 1
# Numexpr
# (https://pypi.python.org/pypi/numexpr)
# TO DO: This is under development.
USE_NE = 0
THREADS_NE = 1
if USE_NE:
USE_NE_EXP= 1 # Use numexpr computing exponential
USE_NE_FRINGE= 1 # Use numexpr in fringe rotation
USE_NE_MULT= 1 # Use numexpr in multiplication (unused)
USE_NE_F= 1 # Use numexpr in freq domain operations (unused)
else:
USE_NE_EXP= 0
USE_NE_FRINGE= 0
USE_NE_MULT= 0
USE_NE_F= 0
# Python multiprocessing.Pool
# (https://docs.python.org/2/library/multiprocessing.html#using-a-pool-of-workers)
# Use multi-threading, currently for fringe rotation.
# TO DO: This is under development.
USE_MP = 0
MP_THREADS = 1
# <codecell>
| StarcoderdataPython |
4971403 | import unittest
import zserio
from testutils import getZserioApi
class OptionalBit31RangeCheckTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.api = getZserioApi(__file__, "with_range_check_code.zs",
extraArgs=["-withRangeCheckCode"]).optional_bit31_range_check
def testOptionalBit31LowerBound(self):
self._checkOptionalBit31Value(OPTIONAL_BIT31_LOWER_BOUND)
def testOptionalBit31UpperBound(self):
self._checkOptionalBit31Value(OPTIONAL_BIT31_UPPER_BOUND)
def testOptionalBit31BelowLowerBound(self):
with self.assertRaises(zserio.PythonRuntimeException):
self._checkOptionalBit31Value(OPTIONAL_BIT31_LOWER_BOUND - 1)
def testOptionalBit31AboveUpperBound(self):
with self.assertRaises(zserio.PythonRuntimeException):
self._checkOptionalBit31Value(OPTIONAL_BIT31_UPPER_BOUND + 1)
def testOptionalBit31None(self):
optionalBit31RangeCheckCompound = self.api.OptionalBit31RangeCheckCompound()
optionalBit31RangeCheckCompound.setValue(None)
def _checkOptionalBit31Value(self, value):
optionalBit31RangeCheckCompound = self.api.OptionalBit31RangeCheckCompound.fromFields(True, value)
writer = zserio.BitStreamWriter()
optionalBit31RangeCheckCompound.write(writer)
reader = zserio.BitStreamReader(writer.getByteArray())
readOptionalBit31RangeCheckCompound = self.api.OptionalBit31RangeCheckCompound.fromReader(reader)
self.assertEqual(optionalBit31RangeCheckCompound, readOptionalBit31RangeCheckCompound)
OPTIONAL_BIT31_LOWER_BOUND = 0
OPTIONAL_BIT31_UPPER_BOUND = 2147483647
| StarcoderdataPython |
5155944 | <filename>BOJ/19000~19999/19600~19699/19504.py
X=[]
Y=[]
for i in range(int(input())):
a,b=map(int,input().split(','))
X.append(a)
Y.append(b)
print(f"{min(X)-1},{min(Y)-1}")
print(f"{max(X)+1},{max(Y)+1}") | StarcoderdataPython |
114581 | <gh_stars>1-10
#!/usr/bin/env python
#===========================================================================
#
# DOWNLOAD aircraft data in IWG1 format
#
#===========================================================================
import os
import sys
from stat import *
import time
import datetime
from datetime import timedelta
import dateutil.parser
import string
import subprocess
from optparse import OptionParser
import atexit
def main():
global appName
appName = "download_iwg1.py"
global DATA_DIR
global project
DATA_DIR = os.environ['DATA_DIR']
project = os.environ['project']
global startTime
startTime = time.time()
global prevEntryTime
prevEntryTime = ""
# parse the command line
global options
parseArgs()
# initialize
if (options.debug == True):
print >>sys.stderr, "======================================================="
print >>sys.stderr, "BEGIN: " + appName + " " + str(datetime.datetime.now())
print >>sys.stderr, "======================================================="
# set exit condition
atexit.register(procmapUnregister)
# loop, getting data
while (True):
procmapRegister()
getOneEntry()
time.sleep(float(options.intervalSecs))
# let the user know we are done
if (options.debug == True):
print >>sys.stderr, "======================================================="
print >>sys.stderr, "END: " + appName + " " + str(datetime.datetime.now())
print >>sys.stderr, "======================================================="
sys.exit(0)
########################################################################
# Get one data line
def getOneEntry():
global prevEntryTime
# determine the file name from the last part of the URL
urlParts = options.inputUrl.split("/")
fileName = urlParts[-1:][0]
if (options.debug == True):
print >>sys.stderr, "URL parts: ", urlParts
print >>sys.stderr, "fileName: ", fileName
# ensure outputdir exists
if not os.path.exists(options.outputDir):
runCommand("mkdir -p " + options.outputDir)
# go to output dir
os.chdir(options.outputDir)
# delete any existing files, since wget will name the
# files fileName.1, fileName.2 etc if the file already exists
runCommand("/bin/rm " + fileName + "*")
# perform wget
cmd = 'wget ' + options.inputUrl
runCommand(cmd)
# compute file path
filePath = os.path.join(options.outputDir, fileName)
if (options.debug == True):
print >>sys.stderr, "filePath: ", filePath
# parse the file
file = open(filePath, 'r');
line = file.readline()
if (options.debug == True):
print >>sys.stderr, "line: ", line
# split comma-delimted values
lineParts = line.split(',')
if (len(lineParts) < 3):
print >>sys.stderr, "..... no data found"
return
dateTimeStr = lineParts[1]
dtime = dateutil.parser.parse(dateTimeStr)
if (options.debug == True):
print >>sys.stderr, "dtime: ", dtime
yyyymmdd = "%.4d%.2d%.2d" % (dtime.year, dtime.month, dtime.day)
hhmmss = "%.2d%.2d%.2d" % (dtime.hour, dtime.minute, dtime.second)
entryTime = yyyymmdd + hhmmss
if (options.debug == True):
print >>sys.stderr, "entryTime: ", entryTime
# check if we have already handled this time
if (entryTime == prevEntryTime):
if (options.debug == True):
print >>sys.stderr, " ==>> entry time already read: ", prevEntryTime
print >>sys.stderr, " ==>> ignoring"
runCommand("/bin/rm " + fileName + "*")
return
prevEntryTime = entryTime
# make day dir
if not os.path.exists(yyyymmdd):
runCommand("mkdir -p " + yyyymmdd)
# move the file into the day dir
renamedFileName = entryTime + "_" + fileName
renamedFilePath = os.path.join(yyyymmdd, renamedFileName)
runCommand("/bin/mv -f " + fileName + " " + renamedFilePath)
# write latest_data_info
cmd = 'LdataWriter -dir ' + options.outputDir + \
' -rpath ' + renamedFilePath + \
' -ltime ' + entryTime + \
' -writer ' + appName
runCommand(cmd)
########################################################################
# Register with procmap
def procmapRegister():
# determine the file name from the last part of the URL
command = "procmap_register"
command = command + " -instance " + options.instance
command = command + " -name " + appName
command = command + " -pid " + str(os.getpid())
command = command + " -reg_int 60 "
command = command + " -start " + str(startTime)
runCommand(command)
########################################################################
# Un-register with procmap
def procmapUnregister():
# determine the file name from the last part of the URL
command = "procmap_unregister"
command = command + " -instance " + options.instance
command = command + " -name " + appName
command = command + " -pid " + str(os.getpid())
runCommand(command)
########################################################################
# Parse the command line
def parseArgs():
global options
# parse the command line
usage = "usage: %prog [options]"
parser = OptionParser(usage)
# these options come from the ldata info file
parser.add_option('--debug',
dest='debug', default='False',
action="store_true",
help='Set debugging on')
parser.add_option('--verbose',
dest='verbose', default='False',
action="store_true",
help='Set debugging on')
parser.add_option('--interval',
dest='intervalSecs',
default=5,
help='Time interval between gets - secs')
parser.add_option('--input_url',
dest='inputUrl',
default='http://asp-interface-2.arc.nasa.gov/API/parameter_data/N555DS/IWG1',
help='URL for wget for IWG1 data')
parser.add_option('--output_dir',
dest='outputDir',
default='raw/aircraft/N555DS',
help='Output directory')
parser.add_option('--instance',
dest='instance',
default='N555DS',
help='Process instance')
(options, args) = parser.parse_args()
if (options.verbose):
options.debug = True
if (options.debug == True):
print >>sys.stderr, "Options:"
print >>sys.stderr, " debug? ", options.debug
print >>sys.stderr, " intervalSecs: ", options.intervalSecs
print >>sys.stderr, " inputUrl: ", options.inputUrl
print >>sys.stderr, " outputDir: ", options.outputDir
########################################################################
# Run a command in a shell, wait for it to complete
def runCommand(cmd):
if (options.debug == True):
print >>sys.stderr, "running cmd:",cmd
try:
retcode = subprocess.call(cmd, shell=True)
if retcode < 0:
print >>sys.stderr, "Child was terminated by signal: ", -retcode
else:
if (options.debug == True):
print >>sys.stderr, "Child returned code: ", retcode
except OSError, e:
print >>sys.stderr, "Execution failed:", e
########################################################################
# kick off main method
if __name__ == "__main__":
main()
| StarcoderdataPython |
371405 | """add an output_type field
Revision ID: 5720713911df
Revises: 10dea94d2dc1
Create Date: 2018-05-24 12:05:10.226540
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '5720713911df'
down_revision = '10dea94d2dc1'
branch_labels = None
depends_on = None
from sqlalchemy.orm import sessionmaker, Session as BaseSession
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
Session = sessionmaker()
class Output(Base):
__tablename__ = 'outputs'
id = sa.Column(sa.Integer, primary_key=True)
def upgrade():
op.add_column('outputs', sa.Column('output_type', sa.String))
bind = op.get_bind()
session = Session(bind=bind)
# let's home it's all SLAM!
for o in session.query(Output):
setattr(o, 'output_type', 'slam/6dof')
session.commit()
def downgrade():
op.drop_column('outputs', 'output_type')
| StarcoderdataPython |
1954975 | <reponame>HelmchenLabSoftware/mesostat-dev<gh_stars>0
import pandas as pd
import itertools
############################
# Display
############################
def pd_print_all(df):
with pd.option_context('display.max_rows', None, 'display.max_columns', None): # more options can be specified also
print(df)
############################
# Search
############################
def pd_first_row(df):
for idx, row in df.iterrows():
return idx, row
def pd_is_one_row(df):
nRows = df.shape[0]
if nRows == 0:
return None, None
elif nRows > 1:
raise ValueError("Expected 1 match, got", nRows)
return pd_first_row(df)
def pd_rows_colval(df, colname, val):
return df[df[colname] == val]
# Get rows for which several columns have some exact values
# FIXME: Does not work with complex datatypes like tuple
# TODO: Implement partial matches
# TODO: Implement inequalities
def pd_query(df, queryDict, dropQuery=False):
assert isinstance(queryDict, dict)
if len(queryDict) == 0:
return df
elif len(df) == 0:
return df
else:
# If a non-existing column is requested, return empty
for k in queryDict.keys():
if k not in df.columns:
return pd.DataFrame(columns=df.columns)
# Query likes strings to be wrapped in quotation marks for later evaluation
strwrap = lambda val: '"' + val + '"' if isinstance(val, str) else str(val)
query = ' and '.join([colname+'=='+strwrap(val) for colname, val in queryDict.items()])
rez = df.query(query)
if dropQuery:
return rez.drop(columns=list(queryDict.keys()))
else:
return rez
# Return all rows for which values match the list exactly
def pd_query_exact(df, lst):
return pd_query(df, dict(zip(df.columns, lst)))
# Check if there is at least 1 row that matches the list exactly
def pd_row_exists(df, lst):
return len(pd_query_exact(df, lst)) > 0
##############################
# Merging and Stacking
##############################
# Add new row to dataframe, unless such a row is already present
def pd_append_row(df, lst, skip_repeat=False):
if skip_repeat:
if pd_row_exists(df, lst):
print("Skipping existing row", lst)
return df
else:
newRow = pd.DataFrame([lst], columns=df.columns)
return df.append(newRow, ignore_index=True)
# Appends all dataframes in a list
# A new column is added with values unique to the rows of original dataframes
def pd_vstack_df(dfLst, colName, colVals):
rez = pd.DataFrame()
for df, val in zip(dfLst, colVals):
df1 = df.copy()
df1[colName] = val
rez = rez.append(df1)
return rez.reset_index()
# Merge several dataframes with exactly the same structure by adding columns that have different values
# TODO: Test that dataframes are indeed equivalent
# TODO: Test that dataframe values are exactly the same except of split cols
def pd_merge_equivalent_df(dfLst, splitColNames, dfNames):
dfRez = dfLst[0].copy()
dfRez = dfRez.drop(splitColNames, axis=1)
for df, dfName in zip(dfLst, dfNames):
for colName in splitColNames:
dfRez[colName + '_' + dfName] = df[colName]
return dfRez
def pd_merge_multiple(dfNamesLst, dfLst, categoricalCols):
for i in range(len(dfNamesLst)):
if i == 0:
dfJoint = dfLst[i].copy()
else:
suffixes = ('', '_' + dfNamesLst[i])
dfJoint = pd.merge(dfJoint, dfLst[i], how="inner", on=categoricalCols, suffixes=suffixes)
# Problem is that columns of the very first df will lack suffixes, so need to add them manually
extraCols = set(dfLst[0].columns) - set(categoricalCols)
for extraCol in extraCols:
dfJoint[extraCol + '_' + dfNamesLst[0]] = dfJoint[extraCol]
dfJoint.drop(extraCol, axis=1, inplace=True)
return dfJoint
def merge_df_from_dict(dfDict, columnNames):
'''
:param dfDict: keys are extra column values as tuple. Values are dataframes. All dataframes must have same columns
:param columnNames: names of the extra columns
:return: a single dataframe that merges other dataframes using extra columns
'''
rezDFList = []
for k, v in dfDict.items():
if isinstance(k, str):
k = [k]
dfCopy = v.copy()
# Iterate in reverse order because we will be inserting each column at the beginning
for colname, colval in zip(columnNames[::-1], k[::-1]):
dfCopy.insert(0, colname, colval)
rezDFList += [dfCopy]
return pd.concat(rezDFList, sort=False).reset_index(drop=True)
##############################
# Delete
##############################
def drop_rows_byquery(df, queryLst):
dfRez = df.copy()
for queryDict in queryLst:
rows = pd_query(df, queryDict)
dfRez = dfRez.drop(index=rows.index)
return dfRez
##############################
# Constructors
##############################
# Get a dictionary where keys are column names and values are possible values for that column
# Construct a dataframe where rows are all combinations of provided column values
def outer_product_df(d):
rowsLst = list(itertools.product(*d.values()))
return pd.DataFrame(rowsLst, columns = list(d.keys()))
##############################
# Manipulation
##############################
# Move some of the columns in front in that order, the rest stay in the same order at the end
def pd_move_cols_front(df, colsMove):
colsNew = list(df.keys())
for col in colsMove[::-1]:
colsNew.insert(0, colsNew.pop(colsNew.index(col)))
return df.loc[:, colsNew]
def pd_category_to_column(df, catName, rezName):
'''
:param df: Pandas Dataframe
:param catName: Name of the column containing categorical data
:param rezName: Name of the column containing non-categorical data (e.g. float)
:return: Pandas Dataframe
Input a dataframe that has all columns categorical except one.
Also input the name of one of the categorical columns.
Drop that categorical column column. Instead, create a column for each value of categorical data,
with values of the non-categorical column
'''
categories = set(df[catName])
rez = pd.DataFrame()
for catVal in categories:
dfThis = df[df[catName] == catVal]
dfThis.rename(columns={rezName: catVal}, inplace=True)
rez = rez.append(dfThis)
return rez.reset_index()
# Convert list of combinations of two categorical variables into 2D indexed dataframe
def pd_pivot(df, xLabel, yLabel, valLabel, xVals=None, yVals=None):
# Construct 2D pivot
dfPivot = df.pivot(index=xLabel, columns=yLabel, values=valLabel)
# Change order of dimensions
xVals = xVals if xVals is not None else sorted(set(df[xLabel]))
yVals = yVals if yVals is not None else sorted(set(df[yLabel]))
print(xVals, yVals)
return dfPivot[yVals].loc[xVals]#.reindex(yVals)
| StarcoderdataPython |
145369 | <filename>dev/mlsqltestssupport/aliyun/upload_release.py
# -*- coding: utf-8 -*-
import os
import mlsqltestssupport.aliyun.config as config
if not os.environ['MLSQL_RELEASE_TAR']:
raise ValueError('MLSQL_RELEASE_TAR should be configured')
fileName = os.environ['MLSQL_RELEASE_TAR']
bucket = config.ossClient()
bucket.put_object_from_file(fileName.split("/")[-1], fileName)
print("success uploaded")
| StarcoderdataPython |
1957621 | <filename>flask_blog/api/views.py
from flask import Blueprint, jsonify
from flask_blog.auth.decorators import requires_basic_auth
api = Blueprint('api', __name__)
@api.route('/hello-world', methods=['GET'])
@requires_basic_auth
def login():
return jsonify({'message': 'Hello World!'})
| StarcoderdataPython |
3266463 | #!/usr/bin/env python3
import sys
import argparse
from Bio import SeqIO
from gffpal.gff import GFFRecord, Strand
from gffpal.attributes import GFFAttributes
def cli(prog, args):
parser = argparse.ArgumentParser(
prog=prog,
description=""" Converts a tab-separated blast-like file to a GFF3.
The table should have a header, and column names should match
mmseqs labels.
"""
)
parser.add_argument(
"genome",
type=argparse.FileType('r'),
help="The source.",
)
parser.add_argument(
"infile",
type=argparse.FileType('r'),
help="Input fasta file.",
)
parser.add_argument(
"-s", "--source",
type=str,
default="mitefinder",
help="The source.",
)
parser.add_argument(
"-o", "--outfile",
default=sys.stdout,
type=argparse.FileType('w'),
help="Output GFF3 file. Default stdout.",
)
return parser.parse_args(args)
def split_desc(seq, seqids):
split_id = seq.id.split("|")
seqid = seqids[int(split_id[1]) - 1]
lborder_start = int(split_id[2])
lborder_end = int(split_id[3])
rborder_start = int(split_id[4])
rborder_end = int(split_id[5])
score = float(split_id[-1].split(":", maxsplit=1)[1])
return seqid, lborder_start, lborder_end, rborder_start, rborder_end, score
def get_region_feature(i, seqid, left, right, score):
start = min(left + right)
end = max(left + right)
region_id = f"repeat_region{i}"
attributes = GFFAttributes(
id=region_id,
ontology_term=["SO:0000657", "SO:repeat_region"],
custom={"mitefinder_score": score},
)
record = GFFRecord(
seqid=seqid,
source="MiteFinderII",
type="repeat_region",
start=start,
end=end,
score=score,
strand=Strand.UNKNOWN,
attributes=attributes,
)
return record
def get_tir_feature(i, seqid, pos):
start = min(pos)
end = max(pos)
region_id = f"repeat_region{i}"
attributes = GFFAttributes(
parent=[region_id],
ontology_term=["SO:0000481", "SO:terminal_inverted_repeat"]
)
record = GFFRecord(
seqid=seqid,
source="MiteFinderII",
type="terminal_inverted_repeat",
start=start,
end=end,
score=None,
strand=Strand.UNKNOWN,
attributes=attributes,
)
return record
def get_mite_feature(i, seqid, left, right):
start = max(left)
end = min(right)
assert start < end
region_id = f"repeat_region{i}"
attributes = GFFAttributes(
parent=[region_id],
ontology_term=["SO:0000338", "SO:MITE"]
)
record = GFFRecord(
seqid=seqid,
source="MiteFinderII",
type="MITE",
start=start,
end=end,
score=None,
strand=Strand.UNKNOWN,
attributes=attributes,
)
return record
def main():
args = cli(sys.argv[0], sys.argv[1:])
seqs = SeqIO.parse(args.infile, format="fasta")
genome = SeqIO.parse(args.genome, format="fasta")
seqids = [s.id for s in genome]
i = 1
for seq in seqs:
(seqid, lborder_start, lborder_end,
rborder_start, rborder_end, score) = split_desc(seq, seqids)
region = get_region_feature(
i,
seqid,
[lborder_start, lborder_end],
[rborder_start, rborder_end],
score
)
ltir = get_tir_feature(i, seqid, [lborder_start, lborder_end])
rtir = get_tir_feature(i, seqid, [rborder_start, rborder_end])
mid = get_mite_feature(
i,
seqid,
[lborder_start, lborder_end],
[rborder_start, rborder_end],
)
print(region, file=args.outfile)
print(ltir, file=args.outfile)
print(mid, file=args.outfile)
print(rtir, file=args.outfile)
i += 1
return
if __name__ == "__main__":
main()
| StarcoderdataPython |
11245151 | from math import sin, cos, tan, atan, sqrt, radians
imgw = 1920
imgh = 1080
a = radians(42.5) # camera vertical view angle +/-3
b = radians(69.4) # camera horizontal view angle +/-3
aGAO = radians(39) # camera optical angle (to z-axis)
AO = 80 # distance from camera to ground
CO = tan(aGAO - a/2)*AO
AC = sqrt(AO*AO + CO*CO)
AF = cos(a/2)*AC
AG = AO/cos(aGAO) # optical line
# trapezium to triangle angle
GO = tan(aGAO)*AO
RO = AO/tan(aGAO)
PQ = 2*tan(b/2)*AF # PQ get through F
CD = 2*sin(a/2)*AC
def to_coord(x, y):
x = x - imgw/2
y = imgh/2 - y
rx, ry = 0, 0
HF = y/imgh*CD
aGAE = atan(HF/AF)
aEAO = aGAO + aGAE if y >= 0 else aGAO - aGAE
EO = tan(aEAO)*AO
ry = EO
imX = x/imgw*(AG*PQ/AF)
rx = imX*(EO + RO)/(GO + RO)
print ('Camera POV:', rx, ry)
return rx, ry
if __name__ == '__main__':
to_coord(300, 300) | StarcoderdataPython |
9730253 | from django.contrib.messages.views import SuccessMessageMixin
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
from django.views import generic
from braces.views import LoginRequiredMixin
from .forms import FeedbackForm
from .models import Feedback
class FeedbackCreate(LoginRequiredMixin, SuccessMessageMixin, generic.CreateView):
model = Feedback
form_class = FeedbackForm
success_message = _('Feedback verstuurd')
def get_initial(self):
"""Sets URL to the referrer and submitter to current User"""
initial = super(FeedbackCreate, self).get_initial()
initial['url'] = self.request.META.get('HTTP_REFERER')
return initial
def form_valid(self, form):
"""Sets submitter to current User"""
form.instance.submitter = self.request.user
return super(FeedbackCreate, self).form_valid(form)
def get_success_url(self):
"""Redirect to thank-you page"""
return reverse('feedback:thanks', args=(self.object.pk,))
class FeedbackThanks(LoginRequiredMixin, generic.DetailView):
model = Feedback
template_name = 'feedback/feedback_thanks.html'
class FeedbackListing(LoginRequiredMixin, generic.ListView):
model = Feedback
| StarcoderdataPython |
3538639 | # coding: utf-8
import sys
#from AppTestStringObjectWithDict
def test_format_item_dict():
d = {'i': 23}
assert 'a23b' == 'a%(i)sb' % d
assert '23b' == '%(i)sb' % d
assert 'a23' == 'a%(i)s' % d
assert '23' == '%(i)s' % d
def test_format_two_items():
d = {'i': 23, 'j': 42}
assert 'a23b42c' == 'a%(i)sb%(j)sc' % d
assert 'a23b23c' == 'a%(i)sb%(i)sc' % d
def test_format_percent_dict():
d = {}
assert 'a%b' == 'a%%b' % d
def test_format_empty_key():
d = {'':42}
assert '42' == '%()s' % d
def test_format_wrong_char_dict():
d = {'i': 23}
raises(ValueError, 'a%(i)Zb'.__mod__, d)
def test_format_missing():
d = {'i': 23}
raises(KeyError, 'a%(x)sb'.__mod__, d)
def test_format_error():
d = {}
assert '' % d == ''
n = 5
raises(TypeError, "'' % n")
class MyMapping(object):
def __getitem__(self, key):
py.test.fail('should not be here')
assert '' % MyMapping() == ''
class MyMapping2(object):
def __getitem__(self, key):
return key
assert '%(key)s'%MyMapping2() == 'key'
#assert u'%(key)s'%MyMapping2() == u'key' # no py3k
#from AppTestStringObject
def test_format_item_string():
n = 23
assert 'a23b' == 'a%sb' % n
assert '23b' == '%sb' % n
assert 'a23' == 'a%s' % n
assert '23' == '%s' % n
def test_format_percent_tuple():
t = ()
assert 'a%b' == 'a%%b' % t
assert '%b' == '%%b' % t
assert 'a%' == 'a%%' % t
assert '%' == '%%' % t
def test_format_percent_subclass_tuple_ignores_iter():
class t(tuple):
def __iter__(self):
yield 1
yield 2
yield 3
assert "%s %s %s" % t((4, 5, 6)) == "4 5 6"
def test_format_too_much():
raises(TypeError, '%s%s'.__mod__, ())
raises(TypeError, '%s%s'.__mod__, (23,))
def test_format_not_enough():
raises(TypeError, '%s%s'.__mod__, (23,)*3)
raises(TypeError, '%s%s'.__mod__, (23,)*4)
def test_format_string():
s = '23'
assert '23' == '%s' % s
assert "'23'" == '%r' % s
raises(TypeError, '%d'.__mod__, s)
def test_format_float():
f = -23.456
assert '-23' == '%d' % f
assert '-23' == '%i' % f
assert '-23' == '%u' % f
e = raises(TypeError, "'%x' % f")
assert str(e.value).startswith("%x format:")
e = raises(TypeError, "'%X' % f")
assert str(e.value).startswith("%X format:")
raises(TypeError, "'%o' % f")
assert '-23.456' == '%s' % f
# for 'r' use a float that has an exact decimal rep:
g = 23.125
assert '23.125' == '%r' % g
h = 0.0276
assert '0.028' == '%.3f' % h # should work on most platforms...
big = 1E200
assert ' inf' == '%6g' % (big * big)
assert '0.' == '%#.0f' % 0.0
def test_format_int():
n = 23
z = 0
assert '23' == '%d' % n
assert '17' == '%x' % n
assert '0x17' == '%#x' % n
assert '0x0' == '%#x' % z
assert '23' == '%s' % n
assert '23' == '%r' % n
assert ('%d' % (-sys.maxsize-1,) == '-' + str(sys.maxsize+1)
== '-%d' % (sys.maxsize+1,))
n = 28
m = 8
assert '1C' == '%X' % n
assert '0X1C' == '%#X' % n
assert '10' == '%o' % m
assert '0o10' == '%#o' % m
assert '-0o10' == '%#o' % -m
assert '0' == '%o' % z
assert '0o0' == '%#o' % z
n = 23
f = 5
assert '-0x017' == '%#06x' % -n
assert '0' == '%.0o' % z
assert '0o0' == '%#.0o' % z
assert '5' == '%.0o' % f
assert '0o5' == '%#.0o' % f
assert '000' == '%.3o' % z
assert '0o000' == '%#.3o' % z
assert '005' == '%.3o' % f
assert '0o005' == '%#.3o' % f
assert '27' == '%.2o' % n
assert '0o27' == '%#.2o' % n
def test_format_subclass_with_str():
class SubInt2(int):
def __str__(self):
assert False, "not called"
def __hex__(self):
assert False, "not called"
def __oct__(self):
assert False, "not called"
def __int__(self):
assert False, "not called"
def __long__(self):
assert False, "not called"
sl = SubInt2(123)
assert '%i' % sl == '123'
assert '%u' % sl == '123'
assert '%d' % sl == '123'
assert '%x' % sl == '7b'
assert '%X' % sl == '7B'
assert '%o' % sl == '173'
skip("the rest of this test is serious nonsense imho, changed "
"only on 2.7.13, and is different on 3.x anyway. We could "
"reproduce it by writing lengthy logic, then get again the "
"reasonable performance by special-casing the exact type "
"'long'. And all for 2.7.13 only. Let's give up.")
class SubLong2(long):
def __str__(self):
return extra_stuff + 'Xx'
def __hex__(self):
return extra_stuff + '0xYy' + extra_tail
def __oct__(self):
return extra_stuff + '0Zz' + extra_tail
def __int__(self):
assert False, "not called"
def __long__(self):
assert False, "not called"
sl = SubLong2(123)
for extra_stuff in ['', '-']:
for extra_tail in ['', 'l', 'L']:
m = extra_stuff
x = '%i' % sl
assert x == m+'Xx'
assert '%u' % sl == m+'Xx'
assert '%d' % sl == m+'Xx'
assert '%x' % sl == m+('Yyl' if extra_tail == 'l' else 'Yy')
assert '%X' % sl == m+('YYL' if extra_tail == 'l' else 'YY')
assert '%o' % sl == m+('Zzl' if extra_tail == 'l' else 'Zz')
extra_stuff = '??'
raises(ValueError, "'%x' % sl")
raises(ValueError, "'%X' % sl")
raises(ValueError, "'%o' % sl")
def test_format_list():
l = [1,2]
assert '<[1, 2]>' == '<%s>' % l
assert '<[1, 2]-[3, 4]>' == '<%s-%s>' % (l, [3,4])
def test_format_tuple():
t = (1,2)
assert '<(1, 2)>' == '<%s>' % (t,)
assert '<(1, 2)-(3, 4)>' == '<%s-%s>' % (t, (3,4))
def test_format_dict():
# I'll just note that the first of these two completely
# contradicts what CPython's documentation says:
# When the right argument is a dictionary (or other
# mapping type), then the formats in the string
# \emph{must} include a parenthesised mapping key into
# that dictionary inserted immediately after the
# \character{\%} character.
# It is what CPython *does*, however. All software sucks.
d = {1:2}
assert '<{1: 2}>' == '<%s>' % d
assert '<{1: 2}-{3: 4}>' == '<%s-%s>' % (d, {3:4})
def test_format_wrong_char():
raises(ValueError, 'a%Zb'.__mod__, ((23,),))
raises(ValueError, u'a%\ud800b'.__mod__, ((23,),))
def test_incomplete_format():
raises(ValueError, '%'.__mod__, ((23,),))
raises((ValueError, TypeError), '%('.__mod__, ({},))
def test_format_char():
A = 65
e = 'e'
assert '%c' % A == 'A'
assert '%c' % e == 'e'
#raises(OverflowError, '%c'.__mod__, (256,)) # py2
assert '%c' % 256 == '\u0100' # py3k
raises(OverflowError, '%c'.__mod__, (-1,))
raises(OverflowError, '%c'.__mod__, (sys.maxunicode+1,))
raises(TypeError, '%c'.__mod__, ("bla",))
raises(TypeError, '%c'.__mod__, ("",))
raises(TypeError, '%c'.__mod__, (['c'],))
raises(TypeError, '%c'.__mod__, b'A')
surrogate = 0xd800
assert '%c' % surrogate == '\ud800'
def test___int__index__():
class MyInt(object):
def __init__(self, x):
self.x = x
def __int__(self):
return self.x
x = MyInt(33)
raises(TypeError, "'%c' % x")
MyInt.__index__ = lambda self: self.x * 2
assert '%c' % x == 'B'
def test_index_fails():
class IndexFails(object):
def __index__(self):
raise Exception
exc = raises(TypeError, "%x".__mod__, IndexFails())
expected = "%x format: an integer is required, not IndexFails"
assert str(exc.value) == expected
raises(TypeError, "%c".__mod__, IndexFails())
def test_formatting_huge_precision():
prec = 2**31
format_string = "%.{}f".format(prec)
exc = raises(ValueError, "format_string % 2.34")
assert str(exc.value) == 'precision too big'
raises(OverflowError, lambda: u'%.*f' % (prec, 1. / 7))
def test_formatting_huge_width():
format_string = "%{}f".format(sys.maxsize + 1)
exc = raises(ValueError, "format_string % 2.34")
assert str(exc.value) == 'width too big'
def test_wrong_formatchar_error_not_masked_by_not_enough_args():
with raises(ValueError):
"%?" % () # not TypeError (which would be due to lack of arguments)
with raises(ValueError):
"%?" % {} # not TypeError
#from AppTestWidthPrec
def test_width():
a = 'a'
assert "%3s" % a == ' a'
assert "%-3s"% a == 'a '
def test_no_chars_between_percent():
with raises(ValueError) as exc:
"% %" % ()
assert "extra character ' ' (0x20) before escaped '%' at index 1" in str(exc.value)
def test_prec_zero():
z = 0
assert "%.0x" % z == '0'
assert "%.0X" % z == '0'
assert "%.x" % z == '0'
assert "%.0d" % z == '0'
assert "%.i" % z == '0'
assert "%.0o" % z == '0'
assert "%.o" % z == '0'
def test_prec_string():
a = 'a'
abcde = 'abcde'
assert "%.3s"% a == 'a'
assert "%.3s"% abcde == 'abc'
def test_prec_width_string():
a = 'a'
abcde = 'abcde'
assert "%5.3s" % a == ' a'
assert "%5.3s" % abcde == ' abc'
assert "%-5.3s"% a == 'a '
assert "%-5.3s"% abcde == 'abc '
def test_zero_pad():
one = 1
ttf = 2.25
assert "%02d" % one == "01"
assert "%05d" % one == "00001"
assert "%-05d" % one == "1 "
assert "%04f" % ttf == "2.250000"
assert "%05g" % ttf == "02.25"
assert "%-05g" % ttf =="2.25 "
assert "%05s" % ttf == " 2.25"
def test_star_width():
f = 5
assert "%*s" %( f, 'abc') == ' abc'
assert "%*s" %(-f, 'abc') == 'abc '
assert "%-*s"%( f, 'abc') == 'abc '
assert "%-*s"%(-f, 'abc') == 'abc '
def test_star_prec():
t = 3
assert "%.*s"%( t, 'abc') == 'abc'
assert "%.*s"%( t, 'abcde') == 'abc'
assert "%.*s"%(-t, 'abc') == ''
def test_star_width_prec():
f = 5
assert "%*.*s"%( f, 3, 'abc') == ' abc'
assert "%*.*s"%( f, 3, 'abcde') == ' abc'
assert "%*.*s"%(-f, 3, 'abcde') == 'abc '
def test_long_format():
def f(fmt, x):
return fmt % x
assert '%.70f' % 2.0 == '2.' + '0' * 70
assert '%.110g' % 2.0 == '2'
def test_subnormal():
inf = 1e300 * 1e300
assert "%f" % (inf,) == 'inf'
assert "%E" % (inf,) == 'INF'
assert "%f" % (-inf,) == '-inf'
assert "%F" % (-inf,) == '-INF'
nan = inf / inf
assert "%f" % (nan,) == 'nan'
assert "%f" % (-nan,) == 'nan'
assert "%E" % (nan,) == 'NAN'
assert "%F" % (nan,) == 'NAN'
assert "%G" % (nan,) == 'NAN'
#from AppTestUnicodeObject
def test_unicode_d():
t = 3
assert "%.1d" % t == '3'
def test_unicode_overflow():
skip("nicely passes on top of CPython but requires > 2GB of RAM")
raises((OverflowError, MemoryError), 'u"%.*d" % (sys.maxint, 1)')
def test_unicode_format_a():
ten = 10
assert '%x' % ten == 'a'
def test_long_no_overflow():
big = 0x1234567890987654321
assert "%x" % big == "1234567890987654321"
def test_missing_cases():
big = -123456789012345678901234567890
assert '%032d' % big == '-0123456789012345678901234567890'
def test_invalid_char():
f = 4
raises(ValueError, '"%\u1234" % (f,)')
def test_invalid_b_with_unicode():
raises(ValueError, '"%b" % b"A"')
raises(ValueError, '"%b" % 42')
def test_formatting_huge_precision_u():
prec = 2**31
format_string = u"%.{}f".format(prec)
exc = raises(ValueError, "format_string % 2.34")
assert str(exc.value) == 'precision too big'
raises(OverflowError, lambda: u'%.*f' % (prec, 1. / 7))
def test_formatting_huge_width_u():
format_string = u"%{}f".format(sys.maxsize + 1)
exc = raises(ValueError, "format_string % 2.34")
assert str(exc.value) == 'width too big'
def test_unicode_error_position():
with raises(ValueError) as info:
u"\xe4\xe4\xe4%?" % {}
assert str(info.value) == "unsupported format character '?' (0x3f) at index 4"
with raises(ValueError) as info:
u"\xe4\xe4\xe4%\xe4" % {}
assert str(info.value) == "unsupported format character '\xe4' (0xe4) at index 4"
def test_ascii():
assert "<%a>" % "test" == "<'test'>"
assert "<%a>" % "\t\x80" == "<'\\t\\x80'>"
assert repr("\xe9") == "'\xe9'"
assert "<%r>" % "\xe9" == "<'\xe9'>"
assert "<%a>" % "\xe9" == "<'\\xe9'>"
#from AppTestBytes
def test_ascii_bytes():
assert b"<%a>" % b"test" == b"<b'test'>"
assert b"<%a>" % b"\t\x80" == b"<b'\\t\\x80'>"
assert repr(b"\xe9") == "b'\\xe9'"
assert b"<%a>" % b"\xe9" == b"<b'\\xe9'>"
assert b"<%a>" % "foo" == b"<'foo'>"
assert b"<%a>" % "\u1234" == b"<'\\u1234'>"
assert b"<%a>" % 3.14 == b"<3.14>"
def test_r_compat_bytes():
assert b"<%r>" % b"test" == b"<b'test'>"
assert b"<%r>" % b"\t\x80" == b"<b'\\t\\x80'>"
assert repr(b"\xe9") == "b'\\xe9'"
assert b"<%r>" % b"\xe9" == b"<b'\\xe9'>"
assert b"<%r>" % "foo" == b"<'foo'>"
assert b"<%r>" % "\u1234" == b"<'\\u1234'>"
def test_numeric_bytes():
assert b"<%4x>" % 10 == b"< a>"
assert b"<%#4x>" % 10 == b"< 0xa>"
assert b"<%04X>" % 10 == b"<000A>"
def test_char_bytes():
assert b"<%c>" % 48 == b"<0>"
assert b"<%c>" % b"?" == b"<?>"
raises(TypeError, 'b"<%c>" % "?"')
assert b"<%c>" % bytearray(b"?") == b"<?>"
class X:
def __bytes__(self):
return b'5'
raises(TypeError, 'b"<%c>" % X()')
def test_bytes_bytes():
assert b"<%b>" % b"123" == b"<123>"
class Foo:
def __bytes__(self):
return b"123"
assert b"<%b>" % Foo() == b"<123>"
raises(TypeError, 'b"<%b>" % 42')
raises(TypeError, 'b"<%b>" % "?"')
def test_s_compat_bytes():
assert b"<%s>" % b"123" == b"<123>"
class Foo:
def __bytes__(self):
return b"123"
assert b"<%s>" % Foo() == b"<123>"
raises(TypeError, 'b"<%s>" % 42')
raises(TypeError, 'b"<%s>" % "?"')
assert b"<%s>" % memoryview(b"X") == b"<X>"
#from AppTestBytearray
def test_ascii_bytearray():
assert bytearray(b"<%a>") % b"test" == bytearray(b"<b'test'>")
assert bytearray(b"<%a>") % b"\t\x80" == bytearray(b"<b'\\t\\x80'>")
assert repr(b"\xe9") == "b'\\xe9'"
assert bytearray(b"<%a>") % b"\xe9" == bytearray(b"<b'\\xe9'>")
assert bytearray(b"<%a>") % "foo" == bytearray(b"<'foo'>")
assert bytearray(b"<%a>") % "\u1234" == bytearray(b"<'\\u1234'>")
def test_bytearray_not_modified():
b1 = bytearray(b"<%a>")
b2 = b1 % b"test"
assert b1 == bytearray(b"<%a>")
assert b2 == bytearray(b"<b'test'>")
def test_r_compat_bytearray():
assert bytearray(b"<%r>") % b"test" == bytearray(b"<b'test'>")
assert bytearray(b"<%r>") % b"\t\x80" == bytearray(b"<b'\\t\\x80'>")
assert repr(b"\xe9") == "b'\\xe9'"
assert bytearray(b"<%r>") % b"\xe9" == bytearray(b"<b'\\xe9'>")
assert bytearray(b"<%r>") % "foo" == bytearray(b"<'foo'>")
assert bytearray(b"<%r>") % "\u1234" == bytearray(b"<'\\u1234'>")
def test_numeric_bytearray():
assert bytearray(b"<%4x>") % 10 == bytearray(b"< a>")
assert bytearray(b"<%#4x>") % 10 == bytearray(b"< 0xa>")
assert bytearray(b"<%04X>") % 10 == bytearray(b"<000A>")
def test_char_bytearray():
assert bytearray(b"<%c>") % 48 == bytearray(b"<0>")
assert bytearray(b"<%c>") % b"?" == bytearray(b"<?>")
raises(TypeError, 'bytearray(b"<%c>") % "?"')
assert bytearray(b"<%c>") % bytearray(b"?") == bytearray(b"<?>")
raises(TypeError, 'bytearray(b"<%c>") % memoryview(b"X")')
def test_bytes_bytearray():
assert bytearray(b"<%b>") % b"123" == bytearray(b"<123>")
class Foo:
def __bytes__(self):
return b"123"
assert bytearray(b"<%b>") % Foo() == bytearray(b"<123>")
raises(TypeError, 'bytearray(b"<%b>") % 42')
raises(TypeError, 'bytearray(b"<%b>") % "?"')
def test_s_compat_bytearray():
assert bytearray(b"<%s>") % b"123" == bytearray(b"<123>")
class Foo:
def __bytes__(self):
return b"123"
assert bytearray(b"<%s>") % Foo() == bytearray(b"<123>")
raises(TypeError, 'bytearray(b"<%s>") % 42')
raises(TypeError, 'bytearray(b"<%s>") % "?"')
assert bytearray(b"<%s>") % memoryview(b"X") == bytearray(b"<X>")
| StarcoderdataPython |
3574640 | import json
from typing import Optional
import PySide6.QtWidgets
from PySide6 import QtWidgets, QtCore, QtGui
from PySide6.QtCore import SIGNAL, QPoint
from PySide6.QtGui import QStandardItemModel, QStandardItem, QIcon
from PySide6.QtWidgets import QLineEdit, QFormLayout, QPushButton, QHBoxLayout, QListView
import src.widgets.CategoryMenuBar as CategoryMenuBar
from src.QtColors import QtColors
from src.annotations import AnnotateManager, Annotation
from src.widgets import rects
import os
class CategorieFrame(QtWidgets.QMainWindow):
def __init__(self, fPath, begin: QPoint, destination: QPoint, currentRect: QtWidgets.QGraphicsRectItem,
imgSize: tuple[int, int], scene: QtWidgets.QGraphicsScene,
parent: Optional[QtWidgets.QWidget] = ..., isEditing=False) -> None:
"""
:param fPath: image path
:type fPath: str
:param begin: begin point
:type begin: QPoint
:param destination: end point
:type destination: QPoint
:param currentRect: rectangle drawn by user
:type currentRect: QtWidgets.QGraphicsRectItem
:param imgSize: size of the image
:type imgSize: tuple[int, int]
:param scene: the graphicScene where rectangles are drawn
:type scene: QtWidgets.QGraphicsScene
:param parent: the parent frame of self
:type parent: Optional[QtWidgets.QWidget] = ...
:param isEditing: a boolean to know if the reactagle is being eddited or not
:type isEditing: bool
"""
super().__init__()
self.begin = begin
self.setFocus()
self.destination = destination
self.currentRect = currentRect
self.parent = parent
if self.currentRect.view is None:
self.currentRect.view = self.parent
self.isEditing = isEditing
self.listView = QListView(self)
self.lineEdit = QLineEdit()
self.addCat = QPushButton()
self.addCat.setText("Ok")
self.imgSize = imgSize
self.scene = scene
self.connect(self.addCat, SIGNAL("clicked()"), self.addCategory)
self.model = QStandardItemModel(self.listView)
self.listView.clicked[QtCore.QModelIndex].connect(self.onItemSelected)
self.listView.setModel(self.model)
self.itemSelectedIndex = None
self.oldItem = QStandardItem()
self.fPath = fPath
self.fName = self.fPath.split("/")[-1].split(".")[0]
self.buttonSelectCategory = QtWidgets.QPushButton(icon=QIcon("./ressources/assets/32x32validate.png"),
text="\tSelect category")
self.buttonSelectCategory.setEnabled(False)
self.buttonSelectCategory.clicked.connect(self.validate)
self.buttonChangeCategory = QtWidgets.QPushButton( # icon=QIcon("./ressources/assets/32x32delete.png"),
text="\tChange category")
self.buttonChangeCategory.setEnabled(False)
self.buttonChangeCategory.clicked.connect(self.changeCategory)
self.buttonDeleteCategory = QtWidgets.QPushButton(icon=QIcon("./ressources/assets/32x32delete.png"),
text="\tDelete category")
self.buttonDeleteCategory.setEnabled(False)
self.buttonDeleteCategory.clicked.connect(self.deleteCategory)
self.addCategoryWidget = QHBoxLayout()
self.addCategoryWidget.addWidget(self.lineEdit)
self.addCategoryWidget.addWidget(self.addCat)
self.addCategoryWidget.addStretch()
self.layout = QFormLayout()
self.layout.addRow("Add category", self.addCategoryWidget)
self.layout.addRow(self.listView)
self.layout.addRow(self.buttonSelectCategory)
self.layout.addRow(self.buttonChangeCategory)
self.layout.addRow(self.buttonDeleteCategory)
self.central = QtWidgets.QWidget()
self.central.setLayout(self.layout)
self.setCentralWidget(self.central)
self.menu = CategoryMenuBar.CategoryBar(self)
self.setMenuBar(self.menu)
self.setWindowTitle(self.currentRect.choice)
try:
if self.parent.isJSON and self.parent.fpathJSON != "":
self.loadCategoriesFileJSON(self.parent.fpathJSON)
elif not self.parent.isJSON and self.parent.fpathCSV != "":
self.loadCategoriesFileCSV(self.parent.fpathCSV)
except:
self.loadCategoriesFileJSON("./ressources/config/categories.json")
def validate(self):
"""
method called when the user validate a choice, it applies the given category on the rectangle and give it a color, write in the JSON.
:return:
"""
choice = self.categories[self.itemSelectedIndex]
color = QtColors.COLORS[self.itemSelectedIndex % QtColors.lengthColors]
if self.isEditing:
annotations = AnnotateManager.annotations[self.fName]["annotations"]
for annotation in annotations:
if annotation["id"] == self.currentRect.rectId:
annotation["categorie"] = choice
annotation["categorie_id"] = self.itemSelectedIndex
break
self.currentRect.setBrush(color)
self.currentRect.label.setStyleSheet("QLabel { color:" + color.name() + " }")
self.currentRect.label.setText(choice)
self.currentRect.choice = choice
self.currentRect.label.adjustSize()
else:
self.currentRect.label.setStyleSheet("QLabel { color:" + color.name() + " }")
self.currentRect.setBrush(color)
self.currentRect.label.setText(choice)
self.currentRect.choice = choice
AnnotateManager.addAnnotation(self.fName,
Annotation(
self.currentRect.rectId,
self.begin,
self.destination,
choice,
os.path.relpath(self.fPath),
self.itemSelectedIndex,
self.imgSize[0],
self.imgSize[1]
))
try:
rects.RECTS[self.fName].append(self.currentRect)
except:
rects.RECTS[self.fName] = [self.currentRect]
self.scene.addItem(self.currentRect)
self.scene.addWidget(self.currentRect.label)
self._close()
def _close(self):
"""
private method:
called when close
:return:
"""
self.close()
def onItemSelected(self, index):
"""
listener when an item in the category list
it activates the button to edit or validate a category
:param index:
:return:
"""
item = self.model.itemFromIndex(index)
self.itemSelectedIndex = item.row()
self.buttonSelectCategory.setEnabled(True)
self.buttonChangeCategory.setEnabled(True)
self.buttonDeleteCategory.setEnabled(True)
def addCategory(self):
"""
method called when th user add a category, it add it in the category file imported, if no category file is imported it write it in default file.
:return:
"""
if self.parent.fpathCSV != "" and not self.parent.isJSON:
newCategorie = self.lineEdit.text()
self.categories.append(newCategorie)
# string = ",".join(self.categories)
with open(self.parent.fpathCSV, "a") as f:
f.write("," + newCategorie)
else:
if self.parent.fpathJSON == "":
self.parent.fpathJSON = "./ressources/config/categories.json"
self.parent.isJSON = True
newCategorie = self.lineEdit.text()
self.categories.append(newCategorie)
data = []
for c in self.categories:
temp = {"category": c}
data.append(temp)
json_object = json.dumps(data, indent=2)
with open(self.parent.fpathJSON, "w") as outfile:
outfile.write(json_object)
self.loadCategories()
def deleteCategory(self):
"""
method called when user delete a category, it removes it from the category file imported
:return:
"""
if self.listView.selectedIndexes():
selectedCategorie = self.listView.currentIndex().data()
self.categories.remove(selectedCategorie)
self.deleteSquares()
self.loadCategoriesCSVJson()
AnnotateManager.deleteAnnotation(selectedCategorie)
self.loadCategories()
def changeCategory(self):
"""
change the category of a rectangle when it is edited
:return:
"""
if self.listView.selectedIndexes():
selectedCategorie = self.listView.currentIndex().data()
idx = int(str(self.listView.currentIndex()).replace("<PySide6.QtCore.QModelIndex(", '')[0])
oldCat = self.categories[idx]
self.categories[idx] = selectedCategorie
self.loadCategoriesCSVJson()
AnnotateManager.changeAnnotation(selectedCategorie, oldCat)
self.loadCategories()
for i, rect in enumerate(rects.RECTS[self.fName]):
annotation = AnnotateManager.annotations[self.fName]["annotations"][i]
if annotation["id"] == rect.rectId:
rect.choice = annotation["categorie"]
rect.label.setText(annotation["categorie"])
rect.label.adjustSize()
def loadCategoriesCSVJson(self):
"""
load a category from the path edited by the dialog
:return:
"""
if self.parent.fpathCSV != "" and not self.parent.isJSON:
string = ",".join(self.categories)
with open(self.parent.fpathCSV, "w+") as f:
f.write(string)
if self.parent.fpathJSON != "" and self.parent.isJSON:
# self.categories.remove(selectedCategorie)
data = []
for c in self.categories:
temp = {"category": c}
data.append(temp)
json_object = json.dumps(data, indent=2)
with open(self.parent.fpathJSON, "w") as outfile:
outfile.write(json_object)
def loadCategories(self):
"""
update categories
:return:
"""
self.model.clear()
for category in self.categories:
item = QStandardItem(category)
item.setEditable(True)
self.model.appendRow(item)
def loadCategoriesFileCSV(self, fpathCSV):
"""
load a category from a specific file path
:param fpathCSV:
:type fpathCSV: str
:return:
"""
if fpathCSV != "":
self.parent.fpathCSV = fpathCSV
self.parent.isJSON = False
self.parent.fpathJSON = ""
fd = open(fpathCSV)
lines = " ".join(fd.readlines())
cat = lines.split(",")
self.categories = cat
self.loadCategories()
def loadCategoriesFileJSON(self, fpathJSON):
"""
load a category from a specific file path
:param fpathJSON:
:type fpathJSON: str
:return:
"""
if fpathJSON != "":
self.parent.fpathJSON = fpathJSON
self.parent.fpathCSV = ""
self.parent.isJSON = True
fd = open(fpathJSON)
data = json.load(fd)
categories = []
for d in data:
categories.append(d["category"])
self.categories = categories
self.loadCategories()
def closeEvent(self, event: PySide6.QtGui.QCloseEvent) -> None:
"""
Event when the user close the category frame widget
make the image enable when this category frame is closed.
If nothing is selected the rectangle is removed from the image,
if the rectangle is cover some other rectangle it deletes the rectangles behind if it's validated.
:param event: closing event
:type event: PySide6.QtGui.QCloseEvent
:return:
"""
try:
self.currentRect.view.setDisabled(False)
if not self.currentRect in rects.RECTS[self.fName]:
self.scene.removeItem(self.currentRect)
elif self.parent.graphicsView.rectsToRemove != []:
for i in range(len(self.parent.graphicsView.rectsToRemove)):
self.parent.graphicsView.rectsToRemove[i].label.hide()
self.scene.removeItem(self.parent.graphicsView.rectsToRemove[i])
try:
idx = rects.RECTS[self.parent.fName].index(self.parent.graphicsView.rectsToRemove[i])
del AnnotateManager.annotations[self.parent.fName]["annotations"][idx]
del rects.RECTS[self.parent.fName][idx]
except ValueError:
pass
self.parent.graphicsView.rectsToRemove = []
self.parent.graphicsView.indexesAnnotation = []
except:
pass
def deleteSquares(self):
"""
method called when the right click is clicked on the rectangle.
:return:
"""
if self.fName not in rects.RECTS:
rects.RECTS[self.fName] = []
rectsToRemove = []
for i, rect in enumerate(rects.RECTS[self.fName]):
annotation = AnnotateManager.annotations[self.fName]["annotations"][i]
if annotation["categorie"] not in self.categories:
rectsToRemove.append(rect)
for i in range(len(rectsToRemove)):
idx = rects.RECTS[self.fName].index(rectsToRemove[i])
self.scene.removeItem(rectsToRemove[i])
rectsToRemove[i].label.hide()
del rects.RECTS[self.fName][idx]
| StarcoderdataPython |
11273479 | # Copyright 2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
logger = logging.getLogger(__name__)
class WorkerLookUp():
def __init__(self):
self.id_obj = {"jsonrpc": "2.0", "method": "WorkerLookUp", "id": 1}
self.params_obj = {}
self.request_mode = "file"
self.tamper = {"params": {}}
self.output_json_file_name = "worker_lookup"
def add_json_values(self, input_json_temp, tamper):
if "workerType" in input_json_temp["params"].keys():
if input_json_temp["params"]["workerType"] != "":
self.set_worker_type(input_json_temp["params"]["workerType"])
else:
self.set_worker_type(1)
if "id" in input_json_temp.keys():
self.set_request_id(input_json_temp["id"])
for key in tamper["params"].keys():
param = key
value = tamper["params"][key]
self.set_unknown_parameter(param, value)
def set_unknown_parameter(self, param, value):
self.params_obj[param] = value
def set_worker_type(self, worker_type):
self.params_obj["workerType"] = worker_type
def set_request_id(self, request_id):
self.id_obj["id"] = request_id
def get_params(self):
return self.params_obj.copy()
def to_string(self):
json_rpc_request = self.id_obj
json_rpc_request["params"] = self.get_params()
return json.dumps(json_rpc_request, indent=4)
def configure_data(
self, input_json, worker_obj, pre_test_response):
if input_json is None:
self.set_worker_type(1)
else:
self.add_json_values(input_json, self.tamper)
final_json = json.loads(self.to_string())
return final_json
def configure_data_sdk(
self, input_json, worker_obj, pre_test_response):
if input_json is None:
worker_type = 'SGX'
else:
try:
worker_value = input_json["params"]["workerType"]
if worker_value == 1:
worker_type = 'SGX'
elif worker_value == 2:
worker_type = 'MPC'
elif worker_value == 3:
worker_type = 'ZK'
else:
worker_type = worker_value
except LookupError:
worker_type = ""
return worker_type
| StarcoderdataPython |
11214059 | <filename>twitchAPI/types.py
# Copyright (c) 2020. Lena "Teekeks" During <<EMAIL>>
from enum import Enum
class AnalyticsReportType(Enum):
"""Enum of all Analytics report types
:var V1:
:var V2:
"""
V1 = 'overview_v1'
V2 = 'overview_v2'
class AuthScope(Enum):
"""Enum of Authentication scopes
:var ANALYTICS_READ_EXTENSION:
:var ANALYTICS_READ_GAMES:
:var BITS_READ:
:var CHANNEL_READ_SUBSCRIPTIONS:
:var CLIPS_EDIT:
:var USER_EDIT:
:var USER_EDIT_BROADCAST:
:var USER_READ_BROADCAST:
:var USER_READ_EMAIL:
:var CHANNEL_MODERATE:
:var CHAT_EDIT:
:var CHAT_READ:
:var WHISPERS_READ:
:var WHISPERS_EDIT:
:var MODERATION_READ:
"""
ANALYTICS_READ_EXTENSION = 'analytics:read:extensions'
ANALYTICS_READ_GAMES = 'analytics:read:games'
BITS_READ = 'bits:read'
CHANNEL_READ_SUBSCRIPTIONS = 'channel:read:subscriptions'
CLIPS_EDIT = 'clips:edit'
USER_EDIT = 'user:edit'
USER_EDIT_BROADCAST = 'user:edit:broadcast'
USER_READ_BROADCAST = 'user:read:broadcast'
USER_READ_EMAIL = 'user:read:email'
CHANNEL_MODERATE = 'channel:moderate'
CHAT_EDIT = 'chat:edit'
CHAT_READ = 'chat:read'
WHISPERS_READ = 'whispers:read'
WHISPERS_EDIT = 'whispers:edit'
MODERATION_READ = 'moderation:read'
class ModerationEventType(Enum):
"""Enum of moderation event types
:var BAN:
:var UNBAN:
:var UNKNOWN:
"""
BAN = 'moderation.user.ban'
UNBAN = 'moderation.user.unban'
UNKNOWN = ''
class TimePeriod(Enum):
"""Enum of valid Time periods
:var ALL:
:var DAY:
:var WEEK:
:var MONTH:
:var YEAR:
"""
ALL = 'all'
DAY = 'day'
WEEK = 'week'
MONTH = 'month'
YEAR = 'year'
class SortMethod(Enum):
"""Enum of valid sort methods
:var TIME:
:var TRENDING:
:var VIEWS:
"""
TIME = 'time'
TRENDING = 'trending'
VIEWS = 'views'
class VideoType(Enum):
"""Enum of valid video types
:var ALL:
:var UPLOAD:
:var ARCHIVE:
:var HIGHLIGHT:
:var UNKNOWN:
"""
ALL = 'all'
UPLOAD = 'upload',
ARCHIVE = 'archive',
HIGHLIGHT = 'highlight'
UNKNOWN = ''
class AuthType(Enum):
"""Type of authentication required. Only internal use
:var NONE: No auth required
:var USER: User auth required
:var APP: app auth required
"""
NONE = 0
USER = 1
APP = 2
class CodeStatus(Enum):
"""Enum Code Status, see https://dev.twitch.tv/docs/api/reference#get-code-status for more documentation
:var SUCCESSFULLY_REDEEMED:
:var ALREADY_CLAIMED:
:var EXPIRED:
:var USER_NOT_ELIGIBLE:
:var NOT_FOUND:
:var INACTIVE:
:var UNUSED:
:var INCORRECT_FORMAT:
:var INTERNAL_ERROR:
:var UNKNOWN_VALUE:
"""
SUCCESSFULLY_REDEEMED = 'SUCCESSFULLY_REDEEMED'
ALREADY_CLAIMED = 'ALREADY_CLAIMED'
EXPIRED = 'EXPIRED'
USER_NOT_ELIGIBLE = 'USER_NOT_ELIGIBLE'
NOT_FOUND = 'NOT_FOUND'
INACTIVE = 'INACTIVE'
UNUSED = 'UNUSED'
INCORRECT_FORMAT = 'INCORRECT_FORMAT'
INTERNAL_ERROR = 'INTERNAL_ERROR'
UNKNOWN_VALUE = ''
class UnauthorizedException(Exception):
"""Not authorized to use this"""
pass
class MissingScopeException(Exception):
"""authorization is missing scope"""
pass
| StarcoderdataPython |
1712761 | <filename>mc/bookmarks/bookmarksexport/HtmlExporter.py<gh_stars>1-10
from .BookmarksExporter import BookmarksExporter
from PyQt5.Qt import QDir
from mc.common.globalvars import gVar
from ..BookmarkItem import BookmarkItem
from traceback import print_exc
class HtmlExporter(BookmarksExporter):
def __init__(self, parent=None):
super().__init__(parent)
self._path = ''
# override
def name(self):
'''
@return: QString
'''
return _('HTML File') + ' (bookmarks.html)'
def getPath(self, parent):
'''
@param: parent QWidget
@return: QString
'''
defaultPath = QDir.homePath() + '/bookmarks.html'
filter_ = _('HTML Bookmarks') + '.html'
self._path = gVar.appTools.getSaveFileName('HtmlExporter', parent,
_('Choose file...'), defaultPath, filter_)
return self._path
def exportBookmarks(self, root):
'''
@param: root BookmarkItem
@return: bool
'''
try:
with open(self._path, 'wt') as fp:
fp.writelines(
[
"<!DOCTYPE NETSCAPE-Bookmark-file-1>",
"<!-- This is an automatically generated file.",
" It will be read and overwritten.",
" DO NOT EDIT! -->",
"<META HTTP-EQUIV=\"Content-Type\" CONTENT=\"text/html; charset=UTF-8\">",
"<TITLE>Bookmarks</TITLE>",
"<H1>Bookmarks</H1>"
]
)
self._writeBookmark(root, fp, 0)
return True
except IOError:
self._setError(_('Cannot open file for writing!'))
return False
# private:
def _writeBookmark(self, item, fp, level):
'''
@param: item BookmarkItem
@param: stream QTextStream
@param: level int
'''
assert(item)
indent = ' ' * level * 4
itemType = item.type()
if itemType == BookmarkItem.Url:
fp.write('%s<DT><A HREF="%s">%s</A>\n' % (indent,
item.urlString(), item.title()
))
elif itemType == BookmarkItem.Separator:
fp.write('%s<HR>\n' % indent)
elif itemType == BookmarkItem.Folder:
fp.write('%s<DT><H3>%s</H3>\n' % (indent, item.title()))
fp.write('%s<DL><p>\n' % indent)
for child in item.children():
self._writeBookmark(child, fp, level + 1)
fp.write('%s</p></DL>\n' % indent)
elif itemType == BookmarkItem.Root:
fp.write('%s<DL><p>\n' % indent)
for child in item.children():
self._writeBookmark(child, fp, level + 1)
fp.write('%s</p></DL>\n' % indent)
| StarcoderdataPython |
1809194 | # Copyright 2017-2020 The GPflow Contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
import tensorflow as tf
from ..base import SamplesMeanAndVariance
from ..inducing_variables import InducingVariables
from ..kernels import Kernel
from .dispatch import conditional, sample_conditional
from .util import sample_mvn
@sample_conditional.register(object, object, Kernel, object)
@sample_conditional.register(object, InducingVariables, Kernel, object)
def _sample_conditional(
Xnew: tf.Tensor,
inducing_variable: InducingVariables,
kernel: Kernel,
f: tf.Tensor,
*,
full_cov: bool = False,
full_output_cov: bool = False,
q_sqrt: Optional[tf.Tensor] = None,
white: bool = False,
num_samples: Optional[int] = None,
) -> SamplesMeanAndVariance:
"""
`sample_conditional` will return a sample from the conditional distribution.
In most cases this means calculating the conditional mean m and variance v and then
returning m + sqrt(v) * eps, with eps ~ N(0, 1).
However, for some combinations of Mok and Mof more efficient sampling routines exists.
The dispatcher will make sure that we use the most efficient one.
:return: samples, mean, cov
samples has shape [num_samples, N, P] or [N, P] if num_samples is None
mean and cov as for conditional()
"""
if full_cov and full_output_cov:
msg = "The combination of both `full_cov` and `full_output_cov` is not permitted."
raise NotImplementedError(msg)
mean, cov = conditional(
Xnew,
inducing_variable,
kernel,
f,
q_sqrt=q_sqrt,
white=white,
full_cov=full_cov,
full_output_cov=full_output_cov,
)
if full_cov:
# mean: [..., N, P]
# cov: [..., P, N, N]
mean_for_sample = tf.linalg.adjoint(mean) # [..., P, N]
samples = sample_mvn(
mean_for_sample, cov, full_cov=True, num_samples=num_samples
) # [..., (S), P, N]
samples = tf.linalg.adjoint(samples) # [..., (S), N, P]
else:
# mean: [..., N, P]
# cov: [..., N, P] or [..., N, P, P]
samples = sample_mvn(
mean, cov, full_cov=full_output_cov, num_samples=num_samples
) # [..., (S), N, P]
return samples, mean, cov
| StarcoderdataPython |
8047414 | <gh_stars>0
def solution(n):
result = ''
while n > 0:
n, mod = divmod(n, 3)
if mod == 0:
n -= 1
mod = 3
result += str(mod)
result = result.replace('3', '4')
return result[::-1] | StarcoderdataPython |
6480045 | <reponame>Seniorcaptain/Scraper<filename>main.py<gh_stars>0
# import what we need
import pandas as pd
import requests_html
session = requests_html.HTMLSession()
# use session to get the page
r = session.get('https://kenyanwallstreet.com/')
r = session.get('https://www.businessdailyafrica.com/')
# r =session.get(' https://www.standardmedia.co.ke/')
# render the html, sleep=1 to give it a second to finish before moving on. scrolldown= how many times to page down on the browser, to get more results. 5 was a good number here
# r.html.render()
# find all the articles by using inspect element and create blank list
articles = r.html.find('article')
newslist = []
# loop through each article to find the title and link. try and except as repeated articles from other sources have different h tags.
for item in articles:
try:
newsitem = item.find('h3', first=True)
title = newsitem.text
# link = newsitem.absolute_links
newsarticle = {
'title': title,
# 'link': link
}
newslist.append(newsarticle)
except:
pass
# print the length of the list
df = pd.DataFrame(newslist)
# print(df.head(5))
df.to_csv('Headlines')
from datetime import datetime
with open('Headlines', 'a+') as file:
file.write(str(datetime.now()))
# string punctuations contain all String punctuations
def strip_punctiations(line):
for character in string.punctuation:
line = line.replace(character, "")
return line
filepath = 'Headlines.csv'
word_count = {}
with open(filepath, 'r') as fi:
# for each line in file
for line in fi:
line = strip_punctiations(line)
words = line.split()
for word in words:
word = word.lower
if word not in word_count:
word_count[word] = 0
word_count[word] += 1
list(word_count.keys())[:10]
ten_words = list(word_count.keys())[:10]
for word in ten_words:
print("{0:1}{1:8d}".format(word, word_count[word]))
def order_dict_by_freq(dictionary):
sorted_values = []
for key in dictionary:
sorted_values.append((dictionary[key], key))
sorted_values = sorted(sorted_values)
sorted_values = sorted_values[::1]
return sorted_values
top_words = order_dict_by_freq(word_count)[:100]
for tuple_freq in top_words:
count, word = tuple_freq
print("{0:15}{1:8d}".format(word, count))
| StarcoderdataPython |
3554897 | <reponame>SGC-Tlaxcala/cerebro
# Generated by Django 3.2.5 on 2021-07-24 00:46
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('productividad', '0007_auto_20190613_1548'),
]
operations = [
migrations.CreateModel(
name='Numbers',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('distrito', models.CharField(max_length=2, verbose_name='Distrito')),
('mac', models.CharField(max_length=6, verbose_name='Módulo')),
('type', models.CharField(max_length=20, verbose_name='Tipo de módulo')),
('days_worked', models.PositiveSmallIntegerField(verbose_name='Días trabajados')),
('hours_worked', models.FloatField(verbose_name='Jornada trabajada')),
('setup', models.CharField(max_length=10, verbose_name='Configuración')),
('tramites_aplicados', models.SmallIntegerField(verbose_name='Trámites')),
('cards_by_update', models.SmallIntegerField(help_text='Credenciales entregadas producto de trámites de actualización', verbose_name='Credenciales entregadas')),
('total_atenciones', models.SmallIntegerField(verbose_name='Total de atención')),
('numbers_by_day', models.SmallIntegerField(verbose_name='Productividad por día')),
('numbers_by_machine_by_day', models.SmallIntegerField(verbose_name='Productividad por día por estación')),
('credenciales_recibidas', models.SmallIntegerField(verbose_name='Credenciales recibidas')),
('reporte_semanal', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='reporte_cifras', to='productividad.reporte')),
],
options={
'verbose_name': 'Cifras por módulo',
'verbose_name_plural': 'Reportes de productividad',
'ordering': ['distrito', 'mac'],
},
),
migrations.DeleteModel(
name='Cifras',
),
]
| StarcoderdataPython |
4996450 | from django.forms import ModelForm
from .models import *
class TagForm(ModelForm):
class Meta:
model = Tag
fields = '__all__'
class CorpusForm(ModelForm):
class Meta:
model = Corpus
fields = '__all__'
class ArticleForm(ModelForm):
class Meta:
model = Article
fields = '__all__'
class ReferenceForm(ModelForm):
class Meta:
model = Reference
fields = '__all__'
class ImageForm(ModelForm):
class Meta:
model = Image
fields = '__all__'
| StarcoderdataPython |
1927441 | BINARY_FNAME_TEMPLATE = "{version}-{platform}-{architecture}"
| StarcoderdataPython |
4894721 | from flask_login import UserMixin
from app.db_instance import db
class UserProfile(db.Model):
user_id = db.Column(db.String(36), primary_key=True)
role_id = db.Column(db.Integer, unique=False, nullable=False)
department_id = db.Column(db.Integer, unique=False, nullable=False)
# Nullables:
first_name = db.Column(db.String(50), unique=False, nullable=True)
last_name = db.Column(db.String(50), unique=False, nullable=True)
avatar = db.Column(db.String(300), unique=False, nullable=True)
gender = db.Column(db.Boolean, nullable=True, comment="T for Male, F for Female", default=None)
def __repr__(self):
return f'<User: {self.first_name} {self.last_name}>'
| StarcoderdataPython |
11228910 | <filename>In Class Projects/In Class Examples Spring 2019/Section 8/schoolCountyDoubleIndexInClass.py
import pandas as pd
import os
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
data = pd.read_csv("schoolDataRealExpenditures.csv",
index_col = ["County","Year"])
county_index = data.index.get_level_values('County')
year_index = data.index.get_level_values('Year')
county_index = set(county_index)
year_index = set(year_index)
county_index = sorted(list(county_index))
year_index = sorted(list(year_index))
williams_index = data.index.get_level_values("County") == "Williams"
williams_data = data[williams_index]
williams_data.to_csv("williamsData.csv")
y2000_index = data.index.get_level_values("Year") == 2000
y2000_data = data[y2000_index]
y2000_data.to_csv("2000Data.csv")
county_folder = "County"
year_folder = "Year"
try:
os.mkdir(county_folder)
except:
print("Folder", county_folder, "already exists")
try:
os.mkdir(year_folder)
except:
print("Folder", year_folder, year_folder)
for county in county_index:
index_for_county = data.index.get_level_values("County") == county
data_for_county = data[index_for_county]
data_for_county.to_csv(county_folder + "/" + county + ".csv")
for year in year_index:
index_for_year = data.index.get_level_values("Year") == year
data_for_year = data[index_for_year]
data_for_year.to_csv(year_folder + "/" + str(year) + ".csv")
pp = PdfPages("NDSchoolDataVisualizations.pdf")
for county in county_index:
index_for_county = data.index.get_level_values("County") == county
data_for_county = data[index_for_county]
df = data_for_county.reset_index().set_index("Year")
try:
xname = "Total Expenditures"
yname = "Students Enrolled / Population"
fig, ax = plt.subplots(figsize = (15, 8))
scatter = ax.scatter(x=df[xname],
y=df[yname],
c = df.index.get_level_values("Year"))
#
plt.colorbar(scatter)
# Axis manipulation
ax.tick_params(axis='x', rotation = 90)
plt.rc('xtick',labelsize=20)
plt.rc('ytick',labelsize=20)
plt.xlabel(xname, fontsize = 20)
plt.ylabel(yname, fontsize = 20)
plt.ylim(ymin = 0, ymax = df[yname].max())
plt.title(county, fontsize = 26)
plt.show()
pp.savefig(fig, bbox_inches = "tight")
#
# Create Line Graph
fig, ax = plt.subplots(figsize = (15, 8))
ax2 = plt.twinx()
df[xname].plot.line(color = "C0", ax=ax)
df[yname].plot.line(color = "C2", ax=ax2)
ax.set_xlabel("Year", fontsize = 20)
ax.tick_params(axis = "x", rotation = 90)
ax.set_ylabel(xname, fontsize = 20, color = "C0")
ax2.set_ylabel(yname, fontsize = 20, color = "C2")
# format y axis as percent
vals = ax.get_yticks()
ax.set_yticklabels(['{:5.2e}'.format(x) for x in vals])
vals = ax2.get_yticks()
ax2.set_yticklabels(['{:,.3%}'.format(x) for x in vals])
plt.title(county, fontsize = 26)
plt.show()
pp.savefig(fig, bbox_inches = "tight")
plt.close()
except:
print("NAN entries for", county)
pp.close()
| StarcoderdataPython |
1781393 | <gh_stars>0
from __future__ import print_function
from future.standard_library import install_aliases
install_aliases()
from urllib.parse import urlparse, urlencode
from urllib.request import urlopen, Request
from urllib.error import HTTPError
import json
import os
import wolframalpha
import wikipedia
import requests
from flask import Flask
from flask import request
from flask import make_response
from flask import jsonify,request
import random
app = Flask(__name__)
@app.route('/webhook', methods=['POST'])
def webhook():
req = request.get_json(silent=True, force=True)
print("Request:")
res = processRequest(req)
print(res)
return make_response(jsonify({'fulfillmentText':res}))
def processRequest(req):
# Parsing the POST request body into a dictionary for easy access.
speech = ""
try:
action = req.get('queryResult').get('action')
except AttributeError:
return 'json error'
if action == "input.unknown":
my_input = req.get('queryResult').get('queryText').lower()
if ("news" in my_input) or ("top headlines" in my_input) or ("headlines" in my_input):
x = news()
speech = "" + x + ""
res = makeWebhookResult(speech)
else:
try:
app_id = "R2LUUJ-QTHXHRHLHK"
client = wolframalpha.Client(app_id)
r = client.query(my_input)
answer = next(r.results).text
speech = "" + answer + ""
res = makeWebhookResult(speech)
except:
my_input = my_input.split(' ')
my_input = " ".join(my_input[2:])
answer = wikipedia.summary(my_input, sentences=2)
speech = "" + answer + ""
res = makeWebhookResult(speech)
else:
speech = "no input"
res = makeWebhookResult(speech)
return res
def makeWebhookResult(speech):
return speech
if __name__ == '__main__':
port = int(os.getenv('PORT', 5000))
print("Starting app on port %d" % port)
app.run(debug=False, port=port, host='0.0.0.0', threaded=True)
| StarcoderdataPython |
11335460 | <gh_stars>0
key_to_command = {
"w": b"\x01",
"s": b"\x02",
"a": b"\x03",
"d": b"\x04",
"turbo_on": b"\x05",
"turbo_off": b"\x06",
"f": b"\x07",
}
command_to_key = {v: k for k, v in key_to_command.items()}
| StarcoderdataPython |
8025290 | <gh_stars>1-10
"""This module implements a distributed database as an example usage of the piChain package. It's a key-value storage
that can handle keys and values that are arbitrary byte arrays. Supported operations are put(key,value), get(key) and
delete(key).
note: If you want to delete the local database and the internal datastructure piChain uses delete the ~/.pichain
directory.
"""
import logging
import argparse
import os
import plyvel
from twisted.internet.protocol import Factory, connectionDone
from twisted.protocols.basic import LineReceiver
from twisted.internet import reactor
from piChain import Node
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
class DatabaseProtocol(LineReceiver):
"""Object representing a connection with another node."""
def __init__(self, factory):
"""
Args:
factory (DatabaseFactory): Twisted Factory used to keep a shared state among multiple connections.
"""
self.factory = factory
def connectionMade(self):
self.factory.connections.update({self.transport.getPeer(): self})
logger.debug('client connection made')
def connectionLost(self, reason=connectionDone):
self.factory.connections.pop(self.transport.getPeer())
logger.debug('client connection lost')
def lineReceived(self, line):
""" The `line` represents the database operation send by a client. Put and delete operations have to be
committed first by calling `make_txn(operation)` on the node instance stored in the factory. Get operations
can be directly executed locally.
Args:
line (bytes): received command str encoded in bytes.
"""
txn_command = line.decode()
logger.debug('received command from client: %s', txn_command)
c_list = txn_command.split()
if c_list[0] == 'put' or c_list[0] == 'delete':
self.factory.node.make_txn(txn_command)
elif c_list[0] == 'get':
# get command is directly locally executed and will not be committed
key = c_list[1]
value = self.factory.db.get(key.encode())
if value is None:
message = 'key "%s" does not exist' % key
self.sendLine(message.encode())
else:
self.sendLine(value)
def rawDataReceived(self, data):
pass
class DatabaseFactory(Factory):
"""Object managing all connections. This is a twisted Factory used to listen for incoming connections. It keeps a
Node instance `self.node` as a shared object among multiple connections.
Attributes:
connections (dict): Maps an IAddress (representing an address of a remote peer) to a DatabaseProtocol instance
(representing the connection between the local node and the peer).
node (Node): A Node instance representing the local node.
db (pyvel db): A plyvel db instance used to store the key-value pairs (python implementation of levelDB).
"""
def __init__(self, node_index, c_size):
"""Setup of a Node instance: A peers dictionary containing an (ip,port) pair for each node must be defined. The
`node_index` argument defines the node that will run locally. The `tx_committed` field of the Node instance is a
callable that is called once a block has been committed. By calling `start_server()` on the Node instance the
local node will try to connect to its peers.
Args:
node_index (int): Index of node in the given peers dict.
c_size (int): Cluster size.
"""
self.connections = {}
peers = {}
for i in range(0, c_size):
peers.update({str(i): {'ip': 'localhost', 'port': (7000 + i)}})
self.node = Node(node_index, peers)
self.node.tx_committed = self.tx_committed
# create a db instance
base_path = os.path.expanduser('~/.pichain/distributed_DB')
if not os.path.exists(base_path):
os.makedirs(base_path)
path = base_path + '/node_' + str(node_index)
self.db = plyvel.DB(path, create_if_missing=True)
def buildProtocol(self, addr):
return DatabaseProtocol(self)
def broadcast(self, line):
for con in self.connections.values():
con.sendLine(line.encode())
def tx_committed(self, commands):
"""Called once a transaction has been committed. Since the delete and put operations have now been committed,
they can be executed locally.
Args:
commands (list): list of commands inside committed block (one per Transaction)
"""
for command in commands:
c_list = command.split()
if c_list[0] == 'put':
key = c_list[1]
value = c_list[2]
self.db.put(key.encode(), value.encode())
message = 'stored key-value pair = ' + key + ': ' + value
self.broadcast(message)
elif c_list[0] == 'delete':
key = c_list[1]
self.db.delete(key.encode())
message = 'deleted key = ' + key
self.broadcast(message)
def main():
# get node index as an argument
parser = argparse.ArgumentParser()
parser.add_argument("node_index", help='Index of node in the given peers dict.')
parser.add_argument("clustersize")
args = parser.parse_args()
node_index = args.node_index
cluster_size = args.clustersize
# setup node instance
db_factory = DatabaseFactory(int(node_index), int(cluster_size))
# Any of the nodes may receive commands
if node_index == '0':
reactor.listenTCP(8000, db_factory)
elif node_index == '1':
reactor.listenTCP(8001, db_factory)
elif node_index == '2':
reactor.listenTCP(8002, db_factory)
db_factory.node.start_server()
if __name__ == "__main__":
main()
| StarcoderdataPython |
6530606 | from scipy.constants import mu_0, epsilon_0
from . import TDEM
from . import FDEM
from . import NSEM
from . import Static
from . import Base
from . import Analytics
from . import Utils
| StarcoderdataPython |
4931234 | #! /usr/bin/env python
## @file Pet_behaviours.py
# @brief Pet state machine
#
# Details: This component handles the user interface of the project
#
## Library declaration
import rospy
from std_srvs.srv import *
import random
## Variable definition
random_timer=0 # variable to make chronologically randomic the choice made by the robot
names=["Entrance","Living_room","Bedroom","Bathroom","Closet","Kitchen"]
## Main body of code
def main():
global random_timer
rospy.init_node('user_interface')
rate = rospy.Rate(10) # to adjust the frequency of the code
while True:
## Code to handle the user input
string=raw_input("Input your command(type 'help') and press ENTER: ")
devided=string.split(" ")
if devided[0]=="play": ## set the play behavior
rospy.set_param("state",2)
elif devided[0]=="help": ## help command to display all options
print("Type your command from this list(respecting the lower case):")
print("'play': start the play behavior of the robot")
print("'go to '+ 'room name': when the robot is at the user it will go to this room if it knows it, otherwise it will start the find behavior")
print("'room_list': print the list of rooms(remember to respect the letters casing)")
elif devided[0]=="go": ## Say the robot where to go in the play behavior
if rospy.get_param("cplace")=="owner": ## Handle the case where the robot is at the owner(can receive the command)
if rospy.get_param("destination")=="unknown" or not(rospy.get_param("destination")=="owner"):
rospy.set_param("command",devided[2])
else: ## handles the case in which the robot is not at the owner(cannot receive the command)
print("Another destination have already been set, try again when the robot comes back")
else:
print("The robot is not at the owner, try again when it is")
elif devided[0]=="room_list": ## Show the room list
print("Entrance \n Living_room \n Bedroom \n Bathroom \n Closet \n Kitchen")
else: ## Catch if the user introduced an incorrect command
print("The input is incorrect, check the casing when trying again or type 'help' and press enter for the list of available commands")
## Code to handle the autonomous random decision of the robot
chaos=random.uniform(0,100) ## base of the randomic decisions
if random_timer==0 or counter==0:
random_timer=random.uniform(75,120) ## to decide how frequently change the behavior as a multiple of the rate of this scripts
counter=random_timer
else:
if not(rospy.get_param("state")==4 or rospy.get_param("state")==0) and chaos <15:
rospy.set_param("state",0) ## put the robot to sleep
elif rospy.get_param("state")==2 and rospy.get_param("cplace")=="owner" and rospy.get_param("destination")=="unknown" and chaos<25:
var=random.randrange(len(names)) ## select one room to go to
rospy.set_param("destination",names[var]) # set it as the destination
elif (rospy.get_param("state")==1 or rospy.get_param("state")==0):
rospy.set_param("state",2) ## set state to play both to activate the play behavior and to exit the sleep state
counter-=1 ## start the counter between behaviors changes
rate.sleep() ## sleep till the end of the rate
if __name__ == '__main__':
main()
| StarcoderdataPython |
11277301 | <reponame>euseand/RSS_Reader<gh_stars>0
import argparse
import json
from datetime import datetime
from .rss_parser import RssParser
current_version = 0.42
def main():
"""
This function contains all utility features realisation
:return: None
"""
parser = argparse.ArgumentParser(description='Brand New euseand\'s RSS-parser written in Python')
parser.version = current_version
parser.add_argument("source", help="RSS URL to be parsed. Write empty quotes for default")
parser.add_argument("--version", help="Print version info", action="store_true")
parser.add_argument("--json", help="Print result as JSON in stdout", action="store_true")
parser.add_argument("--verbose", help="Output verbose status messages", action="store_true")
parser.add_argument("--limit", help="Limit news topics if this parameter provided", type=int)
parser.add_argument("--date", help="Write date in %%Y%%m%%d format (example: --date 20191020)"
"to print out cached news for that date", type=str)
parser.add_argument("--to-html", help="Cache news to html file in human-readable format. "
"Takes path to file including file name as argument. "
"If only file name given, creates file in a current working directory. "
"Write \"default\" to create a default file in package data folder", type=str)
parser.add_argument("--to-pdf", help="Cache news to pdf file in human-readable format. "
"Takes path to file including file name as argument. "
"If only file name given, creates file in a current working directory. "
"Write \"default\" to create a default file in package data folder", type=str)
args = parser.parse_args()
error_raised = False
if args.verbose:
logger = RssParser.create_logger('rss-reader')
logger.info('Logging enabled.')
if args.limit:
if args.limit > 0:
limit = args.limit
if args.verbose:
logger.info(f'News limit was set to {limit}')
else:
limit = 10
if args.verbose:
logger.info(f'News limit was set to {limit} due to invalid limit value input')
else:
limit = 10
if args.verbose:
logger.info(f'News limit was set to {limit} as default')
if args.source:
source = args.source
if args.verbose:
logger.info(f'Utility has started with next source URL: {source}')
else:
source = "https://news.yahoo.com/rss/"
if args.verbose:
logger.info(f'Source URL was set to default: {source}')
if args.date:
try:
datetime.strptime(args.date, '%Y%m%d')
except ValueError:
print(f'rss-reader: rss-reader.py : error: Wrong date format')
date = datetime.strftime(datetime.now(), '%Y%m%d')
print(f'rss-reader: rss-reader.py : info: '
f'Date was set to today ({date}) due to invalid date value input')
my_parser = RssParser(source, limit, args.verbose, args.date, args.to_html, args.to_pdf)
online_or_cached = ''
if args.date:
online_or_cached += 'cached'
if args.verbose:
logger.info(f'Cached news will be fetched')
try:
my_parser.parse_json_cache()
except Exception as parse_json_exc:
print(f'rss-reader: rss_parser.py : parse_json_cache : error : {parse_json_exc}')
error_raised = True
else:
online_or_cached += 'online'
if args.verbose:
logger.info(f'Online news will be fetched')
try:
my_parser.parse_rss()
except Exception as parse_online_feed_exc:
print(f'rss-reader: rss_parser.py : parse_rss : error : {parse_online_feed_exc}')
error_raised = True
try:
my_parser.cache_feed_to_json_file()
except Exception as cache_to_json_exc:
print(f'rss-reader: rss_parser.py : cache_feed_to_json_file : error : {cache_to_json_exc}')
error_raised = True
if not error_raised:
if args.json:
print(json.dumps(my_parser.feed_to_json(), indent=1))
if args.verbose:
logger.info(f'{len(my_parser.news)} {online_or_cached} news have been printed in JSON format')
else:
text_feed = ''
text_feed += my_parser.feed_to_string()
print(text_feed)
if args.verbose:
logger.info(f'{len(my_parser.news)} {online_or_cached} news have been printed')
if args.to_html:
try:
my_parser.cache_feed_to_html_file()
except Exception as cache_to_html_exc:
print(f'rss-reader: rss_parser.py : cache_feed_to_html_file : error : {cache_to_html_exc}')
if args.verbose:
logger.info(f'{len(my_parser.news)} {online_or_cached} news have been cached in html file')
if args.to_pdf:
try:
my_parser.cache_feed_to_pdf_file()
except Exception as cache_to_pdf_exc:
print(f'rss-reader: rss_parser.py : cache_feed_to_pdf_file : error : {cache_to_pdf_exc}')
if args.verbose:
logger.info(f'{len(my_parser.news)} {online_or_cached} news have been cached in pdf file')
if args.version:
print(f'Current version: {current_version}')
if args.verbose:
logger.info('Current utility version was printed')
if __name__ == '__main__':
try:
main()
except Exception as main_exc:
print(f'rss-reader: rss-reader.py : main : error: {main_exc}')
| StarcoderdataPython |
9688870 | <gh_stars>1-10
from quo import echo, Console
from quo.padding import Padding
console = Console()
test = Padding("Hello", (2, 4), style="on blue", expand=False)
console.echo(test, fg="red")
| StarcoderdataPython |
11374220 | # encoding: utf-8
"""
@author: <NAME>
@time: 2021/06/20 14:49
@desc:
"""
from networks.head.yolo_head import YoloHead
from networks.head.rcnn_head.rcnn_head import RcnnHead
head_dict = {
'yolohead': YoloHead,
'rcnnhead': RcnnHead,
}
def build_head(cfg):
_cfg = cfg.copy()
type_name = _cfg.pop('name')
assert type_name in head_dict
model = head_dict[type_name]
return model(**_cfg)
| StarcoderdataPython |
11262640 | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import random
import numpy as np
import os
import shutil
import paddle
from paddle.fluid import core
from datetime import timedelta
import paddle.fluid.core as core
from paddle.fluid.framework import _test_eager_guard
from paddle.fluid.dygraph.parallel import ParallelEnv
import paddle.distributed as dist
def init_process_group(strategy=None):
nranks = ParallelEnv().nranks
rank = ParallelEnv().local_rank
is_master = True if rank == 0 else False
pg_group = dist.init_parallel_env()
return pg_group.process_group
class TestProcessGroupFp32(unittest.TestCase):
def setUp(self):
paddle.seed(2022)
random.seed(2022)
np.random.seed(2022)
self.config()
def config(self):
self.dtype = "float32"
self.shape = (2, 10, 5)
def test_create_process_group_nccl(self):
with _test_eager_guard():
paddle.set_device('gpu:%d' %
paddle.distributed.ParallelEnv().dev_id)
pg = init_process_group()
print("rank:", pg.rank(), "size:", pg.size(), "name:", pg.name())
print("test new group api ok")
# test allreduce sum
# rank 0
x = np.random.random(self.shape).astype(self.dtype)
tensor_x = paddle.to_tensor(x)
# rank 1
y = np.random.random(self.shape).astype(self.dtype)
tensor_y = paddle.to_tensor(y)
sum_result = tensor_x + tensor_y
if pg.rank() == 0:
task = dist.all_reduce(tensor_x)
assert np.array_equal(tensor_x, sum_result)
else:
task = dist.all_reduce(tensor_y)
assert np.array_equal(tensor_y, sum_result)
print("test allreduce sum api ok")
# test allreduce max
# rank 0
x = np.random.random(self.shape).astype(self.dtype)
tensor_x = paddle.to_tensor(x)
# rank 1
y = np.random.random(self.shape).astype(self.dtype)
tensor_y = paddle.to_tensor(y)
max_result = paddle.maximum(tensor_x, tensor_y)
if pg.rank() == 0:
task = dist.all_reduce(tensor_x,
dist.ReduceOp.MAX,
use_calc_stream=False)
task.wait()
assert np.array_equal(tensor_x, max_result)
else:
task = dist.all_reduce(tensor_y,
dist.ReduceOp.MAX,
use_calc_stream=False)
task.wait()
assert np.array_equal(tensor_y, max_result)
print("test allreduce max api ok")
# test allreduce min
# rank 0
x = np.random.random(self.shape).astype(self.dtype)
tensor_x = paddle.to_tensor(x)
# rank 1
y = np.random.random(self.shape).astype(self.dtype)
tensor_y = paddle.to_tensor(y)
min_result = paddle.minimum(tensor_x, tensor_y)
if pg.rank() == 0:
task = dist.all_reduce(tensor_x,
dist.ReduceOp.MIN,
use_calc_stream=False)
task.wait()
assert np.array_equal(tensor_x, min_result)
else:
task = dist.all_reduce(tensor_y,
dist.ReduceOp.MIN,
use_calc_stream=False)
task.wait()
assert np.array_equal(tensor_y, min_result)
print("test allreduce min api ok")
# test allreduce prod
# rank 0
x = np.random.random(self.shape).astype(self.dtype)
tensor_x = paddle.to_tensor(x)
# rank 1
y = np.random.random(self.shape).astype(self.dtype)
tensor_y = paddle.to_tensor(y)
prod_result = np.multiply(x, y)
if pg.rank() == 0:
task = dist.all_reduce(tensor_x,
dist.ReduceOp.PROD,
use_calc_stream=False)
task.wait()
assert np.array_equal(tensor_x, prod_result)
else:
task = dist.all_reduce(tensor_y,
dist.ReduceOp.PROD,
use_calc_stream=False)
task.wait()
assert np.array_equal(tensor_y, prod_result)
print("test allreduce prod api ok")
# test broadcast
# rank 0
x = np.random.random(self.shape).astype(self.dtype)
tensor_x = paddle.to_tensor(x)
# rank 1
y = np.random.random(self.shape).astype(self.dtype)
tensor_y = paddle.to_tensor(y)
broadcast_result = paddle.assign(tensor_x)
if pg.rank() == 0:
task = dist.broadcast(tensor_x, 0, use_calc_stream=False)
task.synchronize()
paddle.device.cuda.synchronize()
assert task.is_completed()
assert np.array_equal(broadcast_result, tensor_x)
else:
task = dist.broadcast(tensor_y, 0)
paddle.device.cuda.synchronize()
assert np.array_equal(broadcast_result, tensor_y)
print("test broadcast api ok")
# test barrier
# rank 0
if pg.rank() == 0:
dist.barrier()
# rank 1
else:
task = pg.barrier()
task.wait()
print("test barrier api ok\n")
# test allgather
# rank 0
x = np.random.random(self.shape).astype(self.dtype)
y = np.random.random(self.shape).astype(self.dtype)
tensor_x = paddle.to_tensor(x)
tensor_y = paddle.to_tensor(y)
out_shape = list(self.shape)
out_shape[0] *= 2
out = np.random.random(out_shape).astype(self.dtype)
tensor_out = paddle.to_tensor(out)
if pg.rank() == 0:
task = pg.all_gather(tensor_x, tensor_out)
task.wait()
paddle.device.cuda.synchronize()
# rank 1
else:
tensor_out_list = [
paddle.empty_like(tensor_x),
paddle.empty_like(tensor_x)
]
task = dist.all_gather(tensor_out_list,
tensor_y,
use_calc_stream=False)
paddle.device.cuda.synchronize()
tensor_out = paddle.concat(tensor_out_list)
out_1 = paddle.slice(tensor_out, [0], [0], [out_shape[0] // 2])
out_2 = paddle.slice(tensor_out, [0], [out_shape[0] // 2],
[out_shape[0]])
assert np.array_equal(tensor_x, out_1)
assert np.array_equal(tensor_y, out_2)
print("test allgather api ok\n")
if pg.rank() == 0:
task = pg.all_gather(tensor_x, tensor_out)
task.wait()
paddle.device.cuda.synchronize()
# rank 1
else:
tensor_out_list = []
task = dist.all_gather(tensor_out_list,
tensor_y,
use_calc_stream=False)
paddle.device.cuda.synchronize()
tensor_out = paddle.concat(tensor_out_list)
out_1 = paddle.slice(tensor_out, [0], [0], [out_shape[0] // 2])
out_2 = paddle.slice(tensor_out, [0], [out_shape[0] // 2],
[out_shape[0]])
assert np.array_equal(tensor_x, out_1)
assert np.array_equal(tensor_y, out_2)
print("test allgather api2 ok\n")
# test alltoall
# rank 0
x = np.random.random(self.shape).astype(self.dtype)
y = np.random.random(self.shape).astype(self.dtype)
out1 = np.random.random(self.shape).astype(self.dtype)
out2 = np.random.random(self.shape).astype(self.dtype)
tensor_x = paddle.to_tensor(x)
tensor_y = paddle.to_tensor(y)
tensor_out1 = paddle.to_tensor(out1)
tensor_out2 = paddle.to_tensor(out2)
raw_tensor_x_2 = paddle.slice(tensor_x, [0], [self.shape[0] // 2],
[self.shape[0]])
raw_tensor_y_1 = paddle.slice(tensor_y, [0], [0],
[self.shape[0] // 2])
if pg.rank() == 0:
task = pg.alltoall(tensor_x, tensor_out1)
task.wait()
# rank 1
else:
in_1, in_2 = paddle.split(tensor_y, 2)
out_1, out_2 = paddle.split(tensor_out2, 2)
out_tensor_list = [out_1, out_2]
task = dist.alltoall([in_1, in_2], out_tensor_list)
paddle.device.cuda.synchronize()
tensor_out2 = paddle.concat(out_tensor_list)
out1_2 = paddle.slice(tensor_out1, [0], [self.shape[0] // 2],
[self.shape[0]])
out2_1 = paddle.slice(tensor_out2, [0], [0], [self.shape[0] // 2])
if pg.rank() == 0:
assert np.array_equal(out1_2.numpy(), raw_tensor_y_1.numpy())
else:
assert np.array_equal(out2_1, raw_tensor_x_2)
print("test alltoall api ok\n")
x = np.random.random(self.shape).astype(self.dtype)
y = np.random.random(self.shape).astype(self.dtype)
out1 = np.random.random(self.shape).astype(self.dtype)
out2 = np.random.random(self.shape).astype(self.dtype)
tensor_x = paddle.to_tensor(x)
tensor_y = paddle.to_tensor(y)
tensor_out1 = paddle.to_tensor(out1)
tensor_out2 = paddle.to_tensor(out2)
raw_tensor_x_2 = paddle.slice(tensor_x, [0], [self.shape[0] // 2],
[self.shape[0]])
raw_tensor_y_1 = paddle.slice(tensor_y, [0], [0],
[self.shape[0] // 2])
if pg.rank() == 0:
task = pg.alltoall(tensor_x, tensor_out1)
task.wait()
# rank 1
else:
in_1, in_2 = paddle.split(tensor_y, 2)
out_1, out_2 = paddle.split(tensor_out2, 2)
out_tensor_list = []
task = dist.alltoall([in_1, in_2], out_tensor_list)
paddle.device.cuda.synchronize()
tensor_out2 = paddle.concat(out_tensor_list)
out1_2 = paddle.slice(tensor_out1, [0], [self.shape[0] // 2],
[self.shape[0]])
out2_1 = paddle.slice(tensor_out2, [0], [0], [self.shape[0] // 2])
if pg.rank() == 0:
assert np.array_equal(out1_2.numpy(), raw_tensor_y_1.numpy())
else:
assert np.array_equal(out2_1, raw_tensor_x_2)
print("test alltoall api2 ok\n")
# test Reduce
# rank 0
x = np.random.random(self.shape).astype(self.dtype)
y = np.random.random(self.shape).astype(self.dtype)
tensor_x = paddle.to_tensor(x)
tensor_y = paddle.to_tensor(y)
sum_result = tensor_x + tensor_y
if pg.rank() == 0:
task = dist.reduce(tensor_x, 0, use_calc_stream=True)
paddle.device.cuda.synchronize()
# rank 1
else:
task = dist.reduce(tensor_y, 0, use_calc_stream=False)
task.wait()
paddle.device.cuda.synchronize()
if pg.rank() == 0:
assert np.array_equal(tensor_x, sum_result)
print("test reduce sum api ok\n")
# test reduce max
# rank 0
x = np.random.random(self.shape).astype(self.dtype)
tensor_x = paddle.to_tensor(x)
# rank 1
y = np.random.random(self.shape).astype(self.dtype)
tensor_y = paddle.to_tensor(y)
max_result = paddle.maximum(tensor_x, tensor_y)
if pg.rank() == 0:
task = dist.reduce(tensor_x,
0,
dist.ReduceOp.MAX,
use_calc_stream=False)
task.wait()
assert np.array_equal(tensor_x, max_result)
else:
task = dist.reduce(tensor_y,
0,
dist.ReduceOp.MAX,
use_calc_stream=False)
task.wait()
print("test reduce max api ok")
# test reduce min
# rank 0
x = np.random.random(self.shape).astype(self.dtype)
tensor_x = paddle.to_tensor(x)
# rank 1
y = np.random.random(self.shape).astype(self.dtype)
tensor_y = paddle.to_tensor(y)
min_result = paddle.minimum(tensor_x, tensor_y)
if pg.rank() == 0:
task = dist.reduce(tensor_x,
0,
dist.ReduceOp.MIN,
use_calc_stream=False)
task.wait()
assert np.array_equal(tensor_x, min_result)
else:
task = dist.reduce(tensor_y,
0,
dist.ReduceOp.MIN,
use_calc_stream=False)
task.wait()
print("test reduce min api ok")
# test reduce product
# rank 0
x = np.random.random(self.shape).astype(self.dtype)
tensor_x = paddle.to_tensor(x)
# rank 1
y = np.random.random(self.shape).astype(self.dtype)
tensor_y = paddle.to_tensor(y)
prod_result = np.multiply(x, y)
if pg.rank() == 0:
task = dist.reduce(tensor_x,
0,
dist.ReduceOp.PROD,
use_calc_stream=False)
task.wait()
assert np.array_equal(tensor_x, prod_result)
else:
task = dist.reduce(tensor_y,
0,
dist.ReduceOp.PROD,
use_calc_stream=False)
task.wait()
print("test reduce prod api ok")
# test Scatter
# rank 0
in_shape = list(self.shape)
in_shape[0] *= 2
x = np.random.random(in_shape).astype(self.dtype)
y = np.random.random(self.shape).astype(self.dtype)
tensor_x = paddle.to_tensor(x)
tensor_y = paddle.to_tensor(y)
if pg.rank() == 0:
in_1, in_2 = paddle.split(tensor_x, 2)
task = dist.scatter(tensor_y, [in_1, in_2],
0,
use_calc_stream=True)
#task.wait()
paddle.device.cuda.synchronize()
# rank 1
else:
task = dist.scatter(tensor_y, [], 0, use_calc_stream=False)
task.wait()
paddle.device.cuda.synchronize()
out1 = paddle.slice(tensor_x, [0], [0], [self.shape[0]])
out2 = paddle.slice(tensor_x, [0], [self.shape[0]],
[self.shape[0] * 2])
if pg.rank() == 0:
assert np.array_equal(tensor_y, out1)
else:
assert np.array_equal(tensor_y, out2)
print("test scatter api ok\n")
# test send min
# rank 0
x = np.random.random(self.shape).astype(self.dtype)
tensor_x = paddle.to_tensor(x)
# rank 1
y = np.random.random(self.shape).astype(self.dtype)
tensor_y = paddle.to_tensor(y)
if pg.rank() == 0:
task = dist.send(tensor_x, 1, use_calc_stream=False)
task.wait()
else:
task = dist.recv(tensor_y, 0, use_calc_stream=False)
task.wait()
assert np.array_equal(tensor_y, tensor_x)
print("test send api ok")
# test send min
# rank 0
x = np.random.random(self.shape).astype(self.dtype)
tensor_x = paddle.to_tensor(x)
# rank 1
y = np.random.random(self.shape).astype(self.dtype)
tensor_y = paddle.to_tensor(y)
if pg.rank() == 0:
task = dist.send(tensor_x, 1, use_calc_stream=True)
else:
task = dist.recv(tensor_y, 0, use_calc_stream=True)
assert np.array_equal(tensor_y, tensor_x)
print("test send api ok")
class TestProcessGroupFp16(TestProcessGroupFp32):
def setUp(self):
paddle.seed(2022)
random.seed(2022)
np.random.seed(2022)
self.config()
def config(self):
self.dtype = "float16"
self.shape = (4, 20, 20)
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
1699892 | <filename>teamcity-ldap-sync.py
import argparse
import json
import requests
import random
from ldap3 import Server, Connection, SUBTREE, ALL
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
try:
import configparser
except ImportError:
import ConfigParser as configparser
def get_args():
def _usage():
return """
Usage: teamcity-ldap-sync [-sr] -f <config>
teamcity-ldap-sync -h
Options:
-h, --help Display this usage info
-s, --skip-disabled Skip disabled AD users
-r, --recursive Resolves AD group members recursively (i.e. nested groups)
-f <config>, --file <config> Configuration file to use
"""
"""Get command line args from the user"""
parser = argparse.ArgumentParser(description="Standard Arguments", usage=_usage())
parser.add_argument("-f", "--file",
required=True,
help="Configuration file to use")
parser.add_argument("-r", "--recursive",
required=False,
action='store_true',
help='Resolves AD group members recursively (i.e. nested groups)')
parser.add_argument("-l", "--lowercase",
required=False,
action='store_true',
help="Create AD user names as lowercase")
parser.add_argument("-s", "--skip-disabled",
required=False,
action='store_true',
help="Skip disabled AD users")
args = parser.parse_args()
return args
class TeamCityLDAPConfig(object):
"""
TeamCity-LDAP configuration class
Provides methods for parsing and retrieving config entries
"""
def __init__(self, parser):
try:
if parser.has_section('ldap'):
self.ldap_type = parser.get('ldap', 'type')
self.ldap_uri = parser.get('ldap', 'uri')
self.ldap_base = parser.get('ldap', 'base')
self.ldap_user = parser.get('ldap', 'binduser')
self.ldap_pass = parser.get('ldap', 'bindpass')
self.ldap_groups = [i.strip() for i in parser.get('ldap', 'groups').split(',')]
self.ldap_wildcard = any('*' in group for group in self.ldap_groups)
if parser.has_section('ad'):
self.ad_filtergroup = parser.get('ad', 'filtergroup')
self.ad_filteruser = parser.get('ad', 'filteruser')
self.ad_filterdisabled = parser.get('ad', 'filterdisabled')
self.ad_filtermemberof = parser.get('ad', 'filtermemberof')
self.ad_groupattribute = parser.get('ad', 'groupattribute')
self.ad_userattribute = parser.get('ad', 'userattribute')
if parser.has_section('openldap'):
self.openldap_type = parser.get('openldap', 'type')
self.openldap_filtergroup = parser.get('openldap', 'filtergroup')
self.openldap_filteruser = parser.get('openldap', 'filteruser')
self.openldap_groupattribute = parser.get('openldap', 'groupattribute')
self.openldap_userattribute = parser.get('openldap', 'userattribute')
if parser.has_section('teamcity'):
self.tc_server = parser.get('teamcity', 'server')
self.tc_username = parser.get('teamcity', 'username')
self.tc_password = parser.get('teamcity', 'password')
self.tc_verify_certificate = parser.get('teamcity', 'verify_certificate')
except configparser.NoOptionError as e:
raise SystemExit('Configuration issues detected in %s' % e)
def set_groups_with_wildcard(self, ldap_conn):
"""
Set group from LDAP with wildcard
:return:
"""
result_groups = []
for group in self.ldap_groups:
groups = ldap_conn.get_groups_with_wildcard(group)
result_groups = result_groups + groups
if result_groups:
self.ldap_groups = result_groups
else:
raise SystemExit('ERROR - No groups found with wildcard')
class LDAPConnector(object):
"""
LDAP connector class
Defines methods for retrieving users and groups from LDAP server.
"""
def __init__(self, args, config):
self.uri = urlparse(config.ldap_uri)
self.base = config.ldap_base
self.ldap_user = config.ldap_user
self.ldap_pass = config.ldap_pass
self.lowercase = args.lowercase
self.skipdisabled = args.skip_disabled
self.recursive = args.recursive
if config.ldap_type == 'activedirectory':
self.active_directory = "true"
self.group_filter = config.ad_filtergroup
self.user_filter = config.ad_filteruser
self.disabled_filter = config.ad_filterdisabled
self.memberof_filter = config.ad_filtermemberof
self.group_member_attribute = config.ad_groupattribute
self.uid_attribute = config.ad_userattribute
else:
self.active_directory = None
self.openldap_type = config.openldap_type
self.group_filter = config.openldap_filtergroup
self.user_filter = config.openldap_filteruser
self.group_member_attribute = config.openldap_groupattribute
self.uid_attribute = config.openldap_userattribute
def __enter__(self):
server = Server(host=self.uri.hostname,
port=self.uri.port,
get_info=ALL)
self.conn = Connection(server=server,
user=self.ldap_user,
password=<PASSWORD>,
check_names=True,
raise_exceptions=True)
self.conn.bind()
return self
def __exit__(self, exctype, exception, traceback):
self.conn.unbind()
print('Synchronization complete')
def group_exist(self, group):
filter = self.group_filter % group
self.conn.search(search_base=self.base,
search_filter=filter,
search_scope=SUBTREE,
attributes=['sn'])
if self.conn.entries:
return True
else:
return False
def get_group_members(self, group):
"""
Retrieves the members of an LDAP group
Args:
group (str): The LDAP group name
Returns:
A list of all users in the LDAP group
"""
attrlist = [self.group_member_attribute]
filter = self.group_filter % group
result = self.conn.search(search_base=self.base,
search_scope=SUBTREE,
search_filter=filter,
attributes=attrlist)
if not result:
print('Unable to find group {}, skipping group'.format(group))
return None
# Get DN for each user in the group
if self.active_directory:
final_listing = {}
result = json.loads(self.conn.response_to_json())['entries']
for members in result:
result_dn = members['dn']
result_attrs = members['attributes']
group_members = []
attrlist = [self.uid_attribute]
if self.recursive:
# Get a DN for all users in a group (recursive)
# It's available only on domain controllers with Windows Server 2003 SP2 or later
member_of_filter_dn = self.memberof_filter % result_dn
if self.skipdisabled:
filter = "(&%s%s%s)" % (self.user_filter, member_of_filter_dn, self.disabled_filter)
else:
filter = "(&%s%s)" % (self.user_filter, member_of_filter_dn)
uid = self.conn.search(search_base=self.base,
search_scope=SUBTREE,
search_filter=filter,
attributes=attrlist)
if uid:
group_members = self.conn.response_to_json()
group_members = json.loads(group_members)['entries']
else:
# Otherwise, just get a DN for each user in the group
for member in result_attrs[self.group_member_attribute]:
if self.skipdisabled:
filter = "(&%s%s)" % (self.user_filter, self.disabled_filter)
else:
filter = "(&%s)" % self.user_filter
uid = self.conn.search(search_base=member,
search_scope=SUBTREE,
search_filter=filter,
attributes=attrlist)
if uid:
group_members = self.conn.response_to_json()
group_members = json.loads(group_members)['entries']
# Fill dictionary with usernames and corresponding DNs
for item in group_members:
dn = item['dn']
username = item['attributes']['sAMAccountName']
final_listing[username.lower()] = dn
return final_listing
else:
dn, users = result.pop()
final_listing = {}
# Get DN for each user in the group
for uid in users[self.group_member_attribute]:
if self.openldap_type == "groupofnames":
uid = uid.split('=', 2)
uid = uid[1].split(',', 1)
uid = uid[0]
filter = self.user_filter % uid
attrlist = [self.uid_attribute]
# get the actual LDAP object for each group member
user = self.conn.search(search_base=self.base,
search_scope=SUBTREE,
search_filter=filter,
attributes=attrlist)
for items in user:
final_listing[uid] = items[0]
return final_listing
def get_groups_with_wildcard(self, groups_wildcard):
print("Search group with wildcard: {}".format(groups_wildcard))
filter = self.group_filter % groups_wildcard
result_groups = []
result = self.conn.search(search_base=self.base,
search_scope=SUBTREE,
search_filter=filter,
attributes='cn')
if result:
result = json.loads(self.conn.response_to_json())['entries']
for group in result:
group_name = group['attributes']['cn']
result_groups.append(group_name)
if not result_groups:
print('Unable to find group {}, skipping group wildcard'.format(groups_wildcard))
return result_groups
def get_user_attributes(self, dn, attr_list):
"""
Retrieves list of attributes of an LDAP user
Args:
:param dn: The LDAP distinguished name to lookup
:param attr_list: List of attributes to extract
Returns:
The user's media attribute value
"""
filter = '(distinguishedName=%s)' % dn
self.conn.search(search_base=self.base,
search_filter=filter,
search_scope=SUBTREE,
attributes=attr_list)
if not self.conn:
return None
result = json.loads(self.conn.response_to_json())['entries'][0]['attributes']
return result
class TeamCityClient(object):
def __init__(self, config, ldap_object):
self.rest_url = '{url}/app/rest/'.format(url=config.tc_server)
self.ldap_object = ldap_object
self.ldap_groups = config.ldap_groups
self.session = requests.Session()
self.session.auth = (config.tc_username, config.tc_password)
self.session.headers.update({'Content-type': 'application/json', 'Accept': 'application/json'})
self.session.verify = config.tc_verify_certificate
self.tc_groups = TeamCityClient.get_tc_groups(self)
self.tc_users = TeamCityClient.get_tc_users(self)
def get_tc_groups(self):
url = self.rest_url + 'userGroups'
groups_in_tc = self.session.get(url, verify=False).json()
return [group for group in groups_in_tc['group']]
def get_tc_users(self):
url = self.rest_url + 'users'
users = self.session.get(url).json()['user']
return [user['username'] for user in users]
def get_user_groups(self, user):
url = self.rest_url + 'users/' + user + '/groups'
resp = self.session.get(url, verify=False)
if resp.status_code == 200:
return resp.json()
elif resp.status_code != 200:
return "Error: Couldn't find user {}\n{}".format(user, resp.content)
def get_users_from_group(self, group_name):
if [group['key'] for group in self.tc_groups if group['name'] == group_name]:
key = [group['key'] for group in self.tc_groups if group['name'] == group_name][0]
url = self.rest_url + 'userGroups/key:' + key
resp = self.session.get(url, verify=False)
if resp.status_code != 200:
Exception("Error: Couldn't find group {}\n{}".format(group_name, resp.content))
users = resp.json()['users']['user']
return [user['username'] for user in users if users]
else:
return []
def add_user_to_group(self, user, group_name):
print("Adding user {} to group {}".format(user, group_name))
url = self.rest_url + 'users/' + user + '/groups'
user_groups = TeamCityClient.get_user_groups(self, user)
href = [group['href'] for group in self.tc_groups if group['name'] == group_name][0]
key = [group['key'] for group in self.tc_groups if group['name'] == group_name][0]
new_group = {u'href': href,
u'name': group_name,
u'key': key}
user_groups['group'].append(new_group)
data = json.dumps(user_groups)
resp = self.session.put(url, data=data, verify=False)
if resp.status_code != 200:
print("Error: Couldn't add user {} to group {}\n{}".format(user, group_name, resp.content))
def remove_user_from_group(self, user, group_name):
print("Removing user {} from group {}".format(user, group_name))
url = self.rest_url + 'users/' + user + '/groups'
user_groups = TeamCityClient.get_user_groups(self, user)
for group in user_groups['group']:
if group['name'] == group_name:
user_groups['group'].remove(group)
data = json.dumps(user_groups)
resp = self.session.put(url, data=data, verify=False)
if resp.status_code != 200:
print("Error: Couldn't remove user {} from group {}\n{}".format(user, group_name, resp.content))
def create_group(self, group_name):
print("Creating group {} in TC".format(group_name))
url = self.rest_url + 'userGroups'
key = ''.join(random.choice('<KEY>') for i in range(16))
data = json.dumps({"name": group_name, "key": key})
resp = self.session.post(url, verify=False, data=data)
if resp.status_code == 200:
self.tc_groups = TeamCityClient.get_tc_groups(self)
else:
print("Error: Couldn't create group {}\n{}".format(group_name, resp.content))
def create_user(self, user):
print("Creating user {}".format(user['username']))
url = self.rest_url + 'users'
if not user['email']:
user['email'] = ''
data = json.dumps({u'username': user['username'], u'name': user['name'], u'email': user['email']})
resp = self.session.post(url, verify=False, data=data)
if resp.status_code == 200:
self.tc_users = TeamCityClient.get_tc_users(self)
else:
print("Error: Couldn't create user {}\n{}".format(user['username'], resp.content))
def start_sync(self):
for ldap_group in self.ldap_groups:
if self.ldap_object.group_exist(ldap_group):
print("Syncing group: {}\n{}".format(ldap_group, "=" * 20))
# Get users from LDAP group
ldap_group_users = self.ldap_object.get_group_members(ldap_group)
# Create group if not exists
tc_groups = [gr['name'] for gr in self.tc_groups]
if ldap_group not in tc_groups:
TeamCityClient.create_group(self, ldap_group)
# Create users if they not exist
for login, dn in ldap_group_users.items():
if login not in self.tc_users:
attr_list = ['sn', 'givenName', 'mail']
attributes = self.ldap_object.get_user_attributes(dn, attr_list)
user = {
'username': login,
'name': attributes['givenName'] + ' ' + attributes['sn'] if attributes['sn'] else login,
'email': attributes.get('mail', '')
}
TeamCityClient.create_user(self, user)
# Get users from TC group
tc_group_users = TeamCityClient.get_users_from_group(self, ldap_group)
# Add users to TC group
for user in ldap_group_users.keys():
if user not in tc_group_users:
TeamCityClient.add_user_to_group(self, user, ldap_group)
# Remove users from TC group
for user in tc_group_users:
if user not in ldap_group_users.keys():
TeamCityClient.remove_user_from_group(self, user, ldap_group)
else:
print("Couldnt find group {}".format(ldap_group))
def main():
# Parse CLI arguments
args = get_args()
# Read config file
parser = configparser.RawConfigParser()
parser.read(args.file)
# Create config object from config file
config = TeamCityLDAPConfig(parser)
# Connect to LDAP
with LDAPConnector(args, config) as ldap_conn:
if config.ldap_wildcard:
config.set_groups_with_wildcard(ldap_conn)
tc = TeamCityClient(config, ldap_conn)
tc.start_sync()
if __name__ == '__main__':
main()
| StarcoderdataPython |
4992114 | <filename>Python_Scripts_for_extracting_named_events/_main_.py
"""
@author: alex
"""
from load_wp_term_relationships import load_wp_term_relationships
from load_wp_term_taxonomy_and_wp_terms import merge_wp_term_taxonomy_and_read_wp_terms
from make_relationships_events import make_associations_between_dfs
from make_lookup_table import make_lookup_table
from extract_valid_data import extract_valid_data
from use_vectorizer import execute_use_vectorizer
def main():
__new_df_wp_term_relationships = load_wp_term_relationships()
# print(__new_df_wp_term_relationships)
__new_df_wp_term_taxonomy_and_wp_terms = merge_wp_term_taxonomy_and_read_wp_terms()
# print(__new_df_wp_term_taxonomy_and_wp_terms)
__new_final_clean_df_wp_posts = make_associations_between_dfs(__new_df_wp_term_relationships,__new_df_wp_term_taxonomy_and_wp_terms)
__df_events = __new_final_clean_df_wp_posts[['object_id','taxonomy','name']].copy()
# print(__df_events)
__df_events.to_csv("/home/alex/NewslinesNamedEventsPrediction/csv_tables_db/output1_of_main.csv", sep=',', encoding='utf-8')
__new_df_events, __set_of_events = make_lookup_table(__df_events)
__new_df_events.to_csv("/home/alex/NewslinesNamedEventsPrediction/csv_tables_db/output2_of_main.csv", sep=',', encoding='utf-8')
__new_df_wp_posts = extract_valid_data()
#__np_events = __new_df_events.values
execute_use_vectorizer(__new_df_wp_posts, __df_events)
if __name__ == "__main__": main() | StarcoderdataPython |
6492153 | <filename>tmp/even.py
# -*- coding: utf-8 -*-
# 回调函数1
# 生成一个2k形式的偶数
def double(x):
return x * 2
# 回调函数2
# 生成一个4k形式的偶数
def quadruple(x):
return x * 4
| StarcoderdataPython |
1974520 | """Test funsies cleaning."""
# std
from signal import SIGKILL
import time
# funsies
import funsies as f
def test_cleanup() -> None:
"""Test truncation."""
# std
import os
def kill_self(*inp: bytes) -> bytes:
pid = os.getpid()
os.kill(pid, SIGKILL)
time.sleep(2.0)
return b"what"
with f.ManagedFun(nworkers=1) as db:
inp = "\n".join([f"{k}" for k in range(10)]).encode()
fun = f.reduce(kill_self, inp)
f.execute(fun)
time.sleep(0.5)
key1 = db.get(f._constants.join(f._constants.OPERATIONS, fun.parent, "owner"))
f._context.cleanup_funsies(db)
key2 = db.get(f._constants.join(f._constants.OPERATIONS, fun.parent, "owner"))
assert key1 is not None
assert key2 is None
| StarcoderdataPython |
9713075 | """
Module containing the `~halotools.mock_observables.surface_density_in_annulus`
and `~halotools.mock_observables.surface_density_in_cylinder` functions used to
calculate galaxy-galaxy lensing.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
from .mass_in_cylinders import total_mass_enclosed_in_stack_of_cylinders
__all__ = ('surface_density_in_annulus', 'surface_density_in_cylinder')
__author__ = ('<NAME>', )
def surface_density_in_annulus(centers, particles, particle_masses,
downsampling_factor, rp_bins, period,
num_threads=1, approx_cell1_size=None, approx_cell2_size=None):
""" Calculate the average surface mass density of particles in a stack of annuli
"""
total_mass_in_stack_of_cylinders = total_mass_enclosed_in_stack_of_cylinders(
centers, particles, particle_masses, downsampling_factor, rp_bins, period,
num_threads=num_threads, approx_cell1_size=approx_cell1_size,
approx_cell2_size=approx_cell2_size)
total_mass_in_stack_of_annuli = np.diff(total_mass_in_stack_of_cylinders)
rp_sq = rp_bins * rp_bins
area_annuli = np.pi * np.diff(rp_sq)
num_annuli = float(centers.shape[0])
surface_densities = total_mass_in_stack_of_annuli / (area_annuli * num_annuli)
return surface_densities
def surface_density_in_cylinder(centers, particles, particle_masses,
downsampling_factor, rp_bins, period,
num_threads=1, approx_cell1_size=None, approx_cell2_size=None):
""" Calculate the average surface mass density of particles in a stack of annuli
"""
total_mass_in_stack_of_cylinders = total_mass_enclosed_in_stack_of_cylinders(
centers, particles, particle_masses, downsampling_factor, rp_bins, period,
num_threads=num_threads, approx_cell1_size=approx_cell1_size,
approx_cell2_size=approx_cell2_size)
area_cylinders = np.pi * rp_bins * rp_bins
num_cylinders = float(centers.shape[0])
surface_densities = total_mass_in_stack_of_cylinders / (area_cylinders * num_cylinders)
return surface_densities
| StarcoderdataPython |
5148604 | import os
import sys
import cppimport.config
def check_contains_cppimport(filepath):
with open(filepath, "r") as f:
return "cppimport" in f.readline()
def find_file_in_folders(filename, paths, opt_in):
for d in paths:
if not os.path.exists(d):
continue
if os.path.isfile(d):
continue
for f in os.listdir(d):
if f != filename:
continue
filepath = os.path.join(d, f)
if opt_in and not check_contains_cppimport(filepath):
continue
return filepath
return None
def find_matching_path_dirs(moduledir):
if moduledir == "":
return sys.path
ds = []
for dir in sys.path:
test_path = os.path.join(dir, moduledir)
if os.path.exists(test_path) and os.path.isdir(test_path):
ds.append(test_path)
return ds
def _find_module_cpppath(modulename, opt_in=False):
modulepath_without_ext = modulename.replace(".", os.sep)
moduledir = os.path.dirname(modulepath_without_ext + ".throwaway")
matching_dirs = find_matching_path_dirs(moduledir)
matching_dirs = [os.getcwd() if d == "" else d for d in matching_dirs]
matching_dirs = [
d if os.path.isabs(d) else os.path.join(os.getcwd(), d) for d in matching_dirs
]
for ext in cppimport.config.file_exts:
modulefilename = os.path.basename(modulepath_without_ext + ext)
outfilename = find_file_in_folders(modulefilename, matching_dirs, opt_in)
if outfilename is not None:
return outfilename
return None
def find_module_cpppath(modulename, opt_in=False):
filepath = _find_module_cpppath(modulename, opt_in)
if filepath is None:
raise ImportError(
"Couldn't find a file matching the module name: "
+ str(modulename)
+ " (note: opt_in = "
+ str(opt_in)
+ ")"
)
return filepath
| StarcoderdataPython |
9651248 | <reponame>vmthunder/nova
# Copyright 2013 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Image caching and management.
"""
import os
from oslo.config import cfg
from oslo.utils import excutils
from oslo.utils import units
from nova.compute import flavors
from nova.i18n import _
from nova.openstack.common import log as logging
from nova import utils
from nova.virt.hyperv import utilsfactory
from nova.virt.hyperv import vmutils
from nova.virt import images
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.import_opt('use_cow_images', 'nova.virt.driver')
class ImageCache(object):
def __init__(self):
self._pathutils = utilsfactory.get_pathutils()
self._vhdutils = utilsfactory.get_vhdutils()
def _get_root_vhd_size_gb(self, instance):
try:
# In case of resizes we need the old root disk size
old_flavor = flavors.extract_flavor(
instance, prefix='old_')
return old_flavor['root_gb']
except KeyError:
return instance['root_gb']
def _resize_and_cache_vhd(self, instance, vhd_path):
vhd_info = self._vhdutils.get_vhd_info(vhd_path)
vhd_size = vhd_info['MaxInternalSize']
root_vhd_size_gb = self._get_root_vhd_size_gb(instance)
root_vhd_size = root_vhd_size_gb * units.Gi
root_vhd_internal_size = (
self._vhdutils.get_internal_vhd_size_by_file_size(
vhd_path, root_vhd_size))
if root_vhd_internal_size < vhd_size:
raise vmutils.HyperVException(
_("Cannot resize the image to a size smaller than the VHD "
"max. internal size: %(vhd_size)s. Requested disk size: "
"%(root_vhd_size)s") %
{'vhd_size': vhd_size, 'root_vhd_size': root_vhd_size}
)
if root_vhd_internal_size > vhd_size:
path_parts = os.path.splitext(vhd_path)
resized_vhd_path = '%s_%s%s' % (path_parts[0],
root_vhd_size_gb,
path_parts[1])
@utils.synchronized(resized_vhd_path)
def copy_and_resize_vhd():
if not self._pathutils.exists(resized_vhd_path):
try:
LOG.debug("Copying VHD %(vhd_path)s to "
"%(resized_vhd_path)s",
{'vhd_path': vhd_path,
'resized_vhd_path': resized_vhd_path})
self._pathutils.copyfile(vhd_path, resized_vhd_path)
LOG.debug("Resizing VHD %(resized_vhd_path)s to new "
"size %(root_vhd_size)s",
{'resized_vhd_path': resized_vhd_path,
'root_vhd_size': root_vhd_size})
self._vhdutils.resize_vhd(resized_vhd_path,
root_vhd_internal_size,
is_file_max_size=False)
except Exception:
with excutils.save_and_reraise_exception():
if self._pathutils.exists(resized_vhd_path):
self._pathutils.remove(resized_vhd_path)
copy_and_resize_vhd()
return resized_vhd_path
def get_cached_image(self, context, instance):
image_id = instance['image_ref']
base_vhd_dir = self._pathutils.get_base_vhd_dir()
base_vhd_path = os.path.join(base_vhd_dir, image_id)
@utils.synchronized(base_vhd_path)
def fetch_image_if_not_existing():
vhd_path = None
for format_ext in ['vhd', 'vhdx']:
test_path = base_vhd_path + '.' + format_ext
if self._pathutils.exists(test_path):
vhd_path = test_path
break
if not vhd_path:
try:
images.fetch(context, image_id, base_vhd_path,
instance['user_id'],
instance['project_id'])
format_ext = self._vhdutils.get_vhd_format(base_vhd_path)
vhd_path = base_vhd_path + '.' + format_ext.lower()
self._pathutils.rename(base_vhd_path, vhd_path)
except Exception:
with excutils.save_and_reraise_exception():
if self._pathutils.exists(base_vhd_path):
self._pathutils.remove(base_vhd_path)
return vhd_path
vhd_path = fetch_image_if_not_existing()
if CONF.use_cow_images and vhd_path.split('.')[-1].lower() == 'vhd':
# Resize the base VHD image as it's not possible to resize a
# differencing VHD. This does not apply to VHDX images.
resized_vhd_path = self._resize_and_cache_vhd(instance, vhd_path)
if resized_vhd_path:
return resized_vhd_path
return vhd_path
| StarcoderdataPython |
5128229 | <gh_stars>10-100
#!/usr/bin/python
# -*- coding: utf-8 -*-
import httplib2 as http
import httplib
import re
from urlparse import urlparse
import pprint
import urllib2
class streamscrobbler:
def parse_headers(self, response):
headers = {}
int = 0
while True:
line = response.readline()
if line == '\r\n':
break # end of headers
if ':' in line:
key, value = line.split(':', 1)
headers[key] = value.rstrip()
if int == 12:
break;
int = int + 1
return headers
# this is the fucntion you should call with the url to get all data sorted as a object in the return
def getServerInfo(self, url):
print
"shoutcast check v.2"
if url.endswith('.pls') or url.endswith('listen.pls?sid=1'):
address = self.checkPLS(url)
else:
address = url
if isinstance(address, str):
meta_interval = self.getAllData(address)
else:
meta_interval = {"status": 0, "metadata": None}
return meta_interval
def getAllData(self, address):
shoutcast = False
status = 0
request = urllib2.Request(address)
user_agent = 'iTunes/9.1.1'
request.add_header('User-Agent', user_agent)
request.add_header('icy-metadata', 1)
try:
response = urllib2.urlopen(request, timeout=6)
headers = self.getHeaders(response)
pp = pprint.PrettyPrinter(indent=4)
print "parse headers: "
pp.pprint(headers)
if "server" in headers:
shoutcast = headers['server']
elif "X-Powered-By" in headers:
shoutcast = headers['X-Powered-By']
elif "icy-notice1" in headers:
shoutcast = headers['icy-notice2']
else:
shoutcast = bool(1)
if isinstance(shoutcast, bool):
if shoutcast is True:
status = 1
else:
status = 0
metadata = False;
elif "SHOUTcast" in shoutcast:
status = 1
metadata = self.shoutcastCheck(response, headers, False)
elif "Icecast" or "137" in shoutcast:
status = 1
metadata = self.shoutcastCheck(response, headers, True)
elif "StreamMachine" in shoutcast:
status = 1
metadata = self.shoutcastCheck(response, headers, True)
elif shoutcast is not None:
status = 1
metadata = self.shoutcastCheck(response, headers, True)
else:
metadata = False
response.close()
return {"status": status, "metadata": metadata}
except urllib2.HTTPError, e:
print ' Error, HTTPError = ' + str(e.code)
return {"status": status, "metadata": None}
except urllib2.URLError, e:
print " Error, URLError: " + str(e.reason)
return {"status": status, "metadata": None}
except Exception, err:
print " Error: " + str(err)
return {"status": status, "metadata": None}
def checkPLS(self, address):
try:
response = urllib2.urlopen(address, timeout=2)
for line in response:
if line.startswith("File1="):
stream = line;
response.close()
if 'stream' in locals():
return stream[6:]
else:
return bool(0)
except Exception:
return bool(0)
def shoutcastCheck(self, response, headers, itsOld):
if itsOld is not True:
if 'icy-br' in headers:
bitrate = headers['icy-br']
bitrate = bitrate.rstrip()
else:
bitrate = None
if 'icy-metaint' in headers:
icy_metaint_header = headers['icy-metaint']
else:
icy_metaint_header = None
if "Content-Type" in headers:
contenttype = headers['Content-Type']
elif 'content-type' in headers:
contenttype = headers['content-type']
else:
if 'icy-br' in headers:
bitrate = headers['icy-br'].split(",")[0]
else:
bitrate = None
if 'icy-metaint' in headers:
icy_metaint_header = headers['icy-metaint']
else:
icy_metaint_header = None
if headers.get('Content-Type') is not None:
contenttype = headers.get('Content-Type')
elif headers.get('content-type') is not None:
contenttype = headers.get('content-type')
if icy_metaint_header is not None:
metaint = int(icy_metaint_header)
print "icy metaint: " + str(metaint)
read_buffer = metaint + 255
content = response.read(read_buffer)
start = "StreamTitle='"
end = "';"
try:
title = re.search('%s(.*)%s' % (start, end), content[metaint:]).group(1)
title = re.sub("StreamUrl='.*?';", "", title).replace("';", "").replace("StreamUrl='", "")
title = re.sub("&artist=.*", "", title)
title = re.sub("http://.*", "", title)
title.rstrip()
except Exception, err:
print "songtitle error: " + str(err)
title = content[metaint:].split("'")[1]
return {'song': title, 'bitrate': bitrate, 'contenttype': contenttype.rstrip()}
else:
print
"No metaint"
return False
def getHeaders(self, response):
if self.is_empty(response.headers.dict) is False:
headers = response.headers.dict
elif hasattr(response.info(),"item") and self.is_empty(response.info().item()) is False:
headers = response.info().item()
else:
headers = self.parse_headers(response)
return headers
def is_empty(self, any_structure):
if any_structure:
return False
else:
return True
def stripTags(self, text):
finished = 0
while not finished:
finished = 1
start = text.find("<")
if start >= 0:
stop = text[start:].find(">")
if stop >= 0:
text = text[:start] + text[start + stop + 1:]
finished = 0
return text | StarcoderdataPython |
4837205 | import cv2
import os
import sys
from string import Template
# first argument is the haarcascades path
face_cascade_path = sys.argv[1]
face_cascade = cv2.CascadeClassifier(os.path.expanduser(face_cascade_path))
scale_factor = 1.1
min_neighbors = 3
min_size = (30, 30)
flags = cv2.cv.CV_HAAR_SCALE_IMAGE
for infname in sys.argv[2:]:
image_path = os.path.expanduser(infname)
image = cv2.imread(image_path)
faces = face_cascade.detectMultiScale(image, scaleFactor = scale_factor, minNeighbors = min_neighbors,
minSize = min_size, flags = flags)
for( x, y, w, h ) in faces:
cv2.rectangle(image, (x, y), (x + w, y + h), (255, 255, 0), 2)
#outfname = "/tmp/%s.faces.jpg" % os.path.basename(infname)
outfname = "faces.jpg"
cv2.imwrite(os.path.expanduser(outfname), image)
| StarcoderdataPython |
1853230 | import sys
input = sys.stdin.readline
a, b = map(int, input().split())
if a == 1:
a += 13
if b == 1:
b += 13
if a > b:
ans = "Alice"
elif a == b:
ans = "Draw"
else:
ans = "Bob"
print(ans)
| StarcoderdataPython |
9656899 | <reponame>tkettu/AdventOfCode2017<filename>milliseconds2/milliseconds21.py
import sys
def difference_bw_largest_adn_smallest(line):
s = line.split()
nums = [int(i) for i in s]
ma, mi = max(nums), min(nums)
return (ma - mi)
def checksum(nums):
cs = 0
while (True):
line = nums.readline()
#print(line)
if line=='':
break
cs += difference_bw_largest_adn_smallest(line)
return cs
if __name__ == '__main__':
file = 'test_input.txt'
if len(sys.argv) > 1:
file = sys.argv[1]
nums = open(file)
print(checksum(nums))
| StarcoderdataPython |
3350510 | import csv
import time
import datetime
from selenium import webdriver
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
begin_year = 1975
end_year = 2021
file_name = 'weather.csv'
overwrite = True
def get_date(begin, end):
ret = []
begin = datetime.datetime.strptime(begin, '%Y%m%d')
end = datetime.datetime.strptime(end, '%Y%m%d')
while begin <= end:
date = begin.strftime('%Y%m%d')
ret.append(date)
begin += datetime.timedelta(days=1)
return ret
browser = webdriver.Chrome()
timeout = 100
header = {'DATE': 'date', 'TEMP': 'mean_temperature', 'MAX_TEMP': 'max_temperature', 'MIN_TEMP': 'min_temperature',
'MSLP': 'pressure', 'DEW_PT': 'dew_point_temperature', 'WET_BULB': 'wet_bulb_temperature', 'RH': 'humidity',
'CLD': 'cloud', 'RF': 'rainfall', 'GRASS': 'grass_temperature', 'SUNSHINE': 'sunshine', 'GLOBAL': 'radiation',
'EVAPO': 'evaporation', 'PREV_DIR': 'wind_direction', 'MEAN_WIND': 'wind_speed', 'LIGHT_GROUND': 'lightning',
'VIS_HKA': 'visibility'}
if overwrite:
with open(file_name, 'w') as f:
writer = csv.DictWriter(f, fieldnames=[header[k] for k in header.keys()])
writer.writeheader()
start = time.time()
for year_index, year in enumerate(range(begin_year, end_year + 1)):
dates_index = get_date('%d0101' % year, '%d1231' % year)
info = {d: {} for d in dates_index}
for ele in header.keys():
if ele == 'DATE':
for d in dates_index:
info[d][header[ele]] = d
continue
url = 'https://www.hko.gov.hk/en/cis/dailyElement.htm?ele=%s&y=%d' % (ele, year)
p = EC.presence_of_element_located((By.XPATH, '/html/body/div[2]/div[2]/div[1]/div[1]/div[4]/div[1]/div[3]/table'))
while True:
try:
browser.get(url)
WebDriverWait(browser, timeout).until(p)
except TimeoutException:
browser.close()
browser = webdriver.Chrome()
else:
break
page = browser.find_element_by_xpath('/html/body/div[2]/div[2]/div[1]/div[1]/div[4]/div[1]/div[3]/table')
check_year = browser.find_element_by_xpath('/html/body/div[2]/div[2]/div[1]/div[1]/div[4]/div[1]/h1').text
if str(year) not in check_year:
continue
dates = page.find_elements(By.XPATH, 'tr')
for date in range(1, len(dates)):
months = dates[date].find_elements(By.XPATH, 'td')
for month in range(1, len(months)):
index = '%d%02d%02d' % (year, month, date)
if index in dates_index:
info[index][header[ele]] = months[month].text
with open(file_name, 'a') as f:
writer = csv.DictWriter(f, fieldnames=[header[k] for k in header.keys()])
for d in dates_index:
writer.writerow(info[d])
end = time.time()
print('%s. Processed %d/%d. Average %.2fs/year. %.2fs to go.' % \
(year, (year_index + 1), end_year - begin_year + 1, (end - start) / (year_index + 1),
(end - start) / (year_index + 1) * (end_year - begin_year - year_index)))
browser.close()
| StarcoderdataPython |
6619924 | <gh_stars>0
from pyspark.mllib.tree import RandomForest
from pyspark import SparkContext, SparkConf
from pyspark.mllib.regression import LabeledPoint
from pyspark.mllib.linalg import Vectors
import logging
import time, os, sys
import re
def union(line):
predic = line[0]
values = line[1]
#if(predic == u'1.0'):
return (str(values[0]),str(values[1]))
def saveCSV(line):
newValue = line[0]+","+line[1]
return newValue
# f.write(line[0]+","+line[1])
# f.close()
conf = (SparkConf()
.setMaster("local")
.setAppName("My app"))
sc = SparkContext(conf = conf)
predicitions = sc.textFile("/local/data/recsys/predicitions_real.dat/",10)
data2 = sc.textFile( '/local/data/recsysTales/all_samples/all_clicks_test_i.dat',10)
#data2 = sc.textFile( '/local/data/recsys/rel_test.dat/',9)
data2 = data2.map(lambda line:line.split(','))
#data2.saveAsTextFile('/local/data/recsys/rel_test.dat/',)
#lines = predicitions.count()
print data2.getNumPartitions()
print predicitions.getNumPartitions()
a = predicitions.zip(data2)
print a.take(3)
paraSalvar = a.map(union).filter(lambda line: line!=None)
paraSalvar = paraSalvar.map(saveCSV)
print paraSalvar.take(3)
paraSalvar.saveAsTextFile('/local/data/recsysTales/all_samples/output_real')
| StarcoderdataPython |
1915053 | #sort
lst = [1,3,5,10,9,8,7,6,4,2]
print ('sort={}'.format(lst.sort()))
print ('sort={}'.format(lst))
#sorted
lst = [1,3,5,10,9,8,7,6,4,2]
print ('sorted={}'.format(sorted(lst)))
print (lst) | StarcoderdataPython |
11342572 | import cx_Oracle
from database_connections import password_encryption
from database_connections.DatabaseConnection import DatabaseConnection
def get_DatabaseConnection( **kwargs ):
'''returns the class instance of the said object'''
return Oracle( **kwargs )
class Oracle( DatabaseConnection ):
'''To run the Oracle module, you need to set the following attributes:
dsn
username
passkey_path
encpass_path
or
host
port
sid
username
passkey_path
encpass_path
'''
def __init__( self, **kwargs ):
DatabaseConnection.__init__( self, **kwargs)
def init(self, **kwargs):
self.set_atts( kwargs )
self.get_password()
self.get_dsn()
self.get_conn()
self.get_cursor()
self.print_atts()
def get_password(self):
'''get password by custom decrypting '''
if not self.has_attr( 'password' ):
self.password = password_encryption.custom_decrypt( self.passkey_path, self.encpass_path )
def get_conn( self, **kwargs ):
'''get the connection to cx_Oracle'''
self.conn = cx_Oracle.connect( user = self.username, password = self.password, dsn=self.dsn, **kwargs )
#(user=None, password=<PASSWORD>, dsn=None, mode=None, handle=None, pool=None, threaded=False, events=False, cclass=None, purity=None, newpassword=None, encoding=None, nencoding=None, edition=None, appcontext=[], tag=None, matchanytag=None, shardingkey=[], supershardingkey=[])
def get_dsn( self ):
'''make a dsn connection if one doesnt exist'''
if 'dsn' not in vars(self):
possible_kwargs = ['sid','service_name','region','sharding_key','super_sharding_key']
dsn_kwargs = {}
for att in vars(self):
if att in possible_kwargs:
dsn_kwargs[att] = vars(self)[att]
self.dsn = cx_Oracle.makedsn( self.host, self.port, **dsn_kwargs )
def get_all_tables( self ):
'''returns a list of all available table names'''
string = """select * from all_tables"""
df = self.query( query_string = string )
return list( df['TABLE_NAME'] )
def create_generic_select( self, table_name, top = None ):
'''returns a string with a generic select'''
string = 'SELECT * FROM ' + str(table_name)
if top != None:
string += 'WHERE ROWNUM <= ' + str(top)
return string
| StarcoderdataPython |
6531943 | <reponame>rscohn2/sensepy
# SPDX-FileCopyrightText: 2020 <NAME>
#
# SPDX-License-Identifier: MIT
import logging
import sys
import yaml
from zignalz import cli
logger = logging.getLogger(__name__)
class Config:
def __init__(self):
self.data = None
def sensor_name(self, device_id, sensor_id):
return self.data['devices'][device_id]['sensors'][sensor_id]
def set(self, file):
try:
logger.info(f'set config: {file}')
with open(file, 'r') as stream:
try:
self.data = yaml.safe_load(stream)
logger.info(f'Loaded config: {self.data}')
except yaml.YAMLError as exc:
logger.error(exc)
except OSError:
logger.error(f'Cannot open config file: {file}')
sys.exit(1)
config = Config()
def add_parser(subparsers):
parser = subparsers.add_parser('config', help='Manage configration')
subparsers = parser.add_subparsers(dest='cmd')
subparsers.required = True
load_parser = subparsers.add_parser(
'load', help='Load a configuration from a yaml file'
)
load_parser.set_defaults(func=load)
load_parser.add_argument(
'config_file',
default='zignalz.yaml',
help='Yaml file with configuration',
)
def load():
config.load(cli.args.config_file)
| StarcoderdataPython |
295150 | """Tests for user client API"""
# pylint: disable=unused-argument
import json
import pytest
def test_list_users(api_client):
"""Test list users for correct request"""
resp = api_client.users.list()
assert resp.status_code == 200
assert resp.json() == [{
"id": 39,
"username": "01BQRHXRDP1J63DF8QQB6B8TKA",
"profile": {
"name": "name",
"image": "test1.jpg",
"image_small": "test2.jpg",
"image_medium": "test3.jpg"
}
}]
def test_create_user(api_client):
"""Test create user"""
resp = api_client.users.create(
'user1',
email='<EMAIL>',
profile=dict(
name="<NAME>",
image="image1.jpg",
image_small="image2.jpg",
image_medium="image3.jpg",
email_optin=True
)
)
assert json.loads(resp.request.body) == {
"uid": "user1",
"email": "<EMAIL>",
"profile": {
"name": "<NAME>",
"image": "image1.jpg",
"image_small": "image2.jpg",
"image_medium": "image3.jpg",
"email_optin": True,
}
}
assert resp.status_code == 201
assert resp.json() == {
"id": 127,
"username": "01C53XW2FCHQE1PAPHZMH036HF",
"profile": {
"name": "<NAME>",
"image": "image1.jpg",
"image_small": "image2.jpg",
"image_medium": "image3.jpg"
}
}
def test_create_user_no_profile_props(api_client):
"""Updating with no args raises error"""
with pytest.raises(AttributeError) as err:
api_client.users.create('user1')
assert str(err.value) == "No fields provided to create"
def test_create_user_invalid_profile_props(api_client):
"""Updating with invalid arg raises error"""
with pytest.raises(AttributeError) as err:
api_client.users.create('user1', profile=dict(bad_arg=2))
assert str(err.value) == "Profile attribute bad_arg is not supported"
def test_get_user(api_client, use_betamax):
"""Test get user"""
resp = api_client.users.get("01BRMT958T3DW02ZAYDG7N6QCB")
assert resp.status_code == 200
assert resp.json() == {
"id": 41,
"username": "01BRMT958T3DW02ZAYDG7N6QCB",
"profile": {
"name": "<NAME>",
"image": "image1.jpg",
"image_small": "image4.jpg",
"image_medium": "image3.jpg",
}
}
def test_update_user(api_client):
"""Test patch user"""
resp = api_client.users.update(
"01BRMT958T3DW02ZAYDG7N6QCB",
uid='user1',
email='<EMAIL>',
profile=dict(image_small="image4.jpg")
)
assert json.loads(resp.request.body) == {
"uid": "user1",
"email": "<EMAIL>",
"profile": {
"image_small": "image4.jpg",
}
}
assert resp.status_code == 200
assert resp.json() == {
"id": 41,
"username": "01BRMT958T3DW02ZAYDG7N6QCB",
"profile": {
"name": "<NAME>",
"image": "image1.jpg",
"image_small": "image4.jpg",
"image_medium": "image3.jpg"
}
}
def test_update_user_no_profile_props(api_client):
"""Updating with no args raises error"""
with pytest.raises(AttributeError) as err:
api_client.users.update("01BRMT958T3DW02ZAYDG7N6QCB")
assert str(err.value) == "No fields provided to update"
def test_update_user_invalid_profile_props(api_client):
"""Updating with invalid arg raises error"""
with pytest.raises(AttributeError) as err:
api_client.users.update("01BRMT958T3DW02ZAYDG7N6QCB", profile=dict(bad_arg=2))
assert str(err.value) == "Profile attribute bad_arg is not supported"
| StarcoderdataPython |
11392026 | <filename>Software/lora_callback.py
def lora_cb(lora):
events = lora.events()
if events & LoRa.RX_PACKET_EVENT:
print('Lora packet received')
data = s.recv(64)
print(data)
if events & LoRa.TX_PACKET_EVENT:
print('Lora packet sent')
lora.callback(trigger=(LoRa.RX_PACKET_EVENT | LoRa.TX_PACKET_EVENT), handler=lora_cb)
| StarcoderdataPython |
5134508 | # -*- coding: utf-8 -*-
"""
Created on Mon Jan 13 01:29:08 2020
@author: MMOHTASHIM
"""
import pickle
import numpy
from tensorflow import keras
from tensorflow.keras.applications.mobilenet import MobileNet
import argparse
from tensorflow.keras.layers import Input
from tensorflow.keras.models import Model
import os
from tqdm import tqdm
parser=argparse.ArgumentParser(description="Encoding Image through InceptionResNetV2")
parser.add_argument('-p','--PATH',help="The Path for your Own Image Pickles Dictionary")
parser.add_argument('-s','--PATH_Filker',help="The Path for your Filker Pickles Dictionary")
args=parser.parse_args()
def deep_encoder_model():
base_model=MobileNet(include_top=True, weights='imagenet')
model=Model(inputs=base_model.input,outputs=base_model.get_layer('conv_pw_13').output)##hamper with this
print(model.summary())
return model
def main(path_1,path_2):
if not os.path.isdir(os.path.join(os.getcwd(),'dic_filker30k_images--encoded')):
os.mkdir(os.path.join(os.getcwd(),'dic_filker30k_images--encoded'))
if not os.path.isdir(os.path.join(os.getcwd(),'dic_my_images--encoded')):
os.mkdir(os.path.join(os.getcwd(),'dic_my_images--encoded'))
model=deep_encoder_model()
filker_pickles=os.listdir(path_2)
print(filker_pickles)
own_images_pickles=os.listdir(path_1)
for filker_pickle in tqdm(filker_pickles):
encoded_filker_dic={}
pickle_out = open(os.path.join(path_2,filker_pickle),"rb")
filker_dic = pickle.load(pickle_out)
for key_image in filker_dic.keys():
encoded_version=model.predict(filker_dic[key_image].reshape(1,224,224,3))
encoded_filker_dic[key_image]=encoded_version.reshape(7,7,1024)
pickle_out = open(os.getcwd()+"\\dic_filker30k_images--encoded\\{}-encoded.pickle".format(filker_pickle[:-7]),"wb")
pickle.dump(encoded_filker_dic, pickle_out)
pickle_out.close()
for own_image_pickle in tqdm(own_images_pickles):
encoded_own_image_dic={}
pickle_out = open(os.path.join(path_1,own_image_pickle),"rb")
own_image_dic = pickle.load(pickle_out)
for key_image in own_image_dic.keys():
encoded_version=model.predict(own_image_dic[key_image].reshape(1,224,224,3))
encoded_own_image_dic[key_image]=encoded_version.reshape(7,7,1024)
pickle_out = open(os.getcwd()+"\\dic_my_images--encoded\\{}-encoded.pickle".format(own_image_pickle[:-7]),"wb")
pickle.dump(encoded_own_image_dic, pickle_out)
pickle_out.close()
if __name__=="__main__":
main(args.PATH,args.PATH_Filker)
| StarcoderdataPython |
4847619 | from aacharts.aaenum.AAEnum import *
from aacharts.aaoptionsmodel.AAScrollablePlotArea import AAScrollablePlotArea
from aacharts.aaoptionsmodel.AAStyle import AAStyle
from aacharts.aaoptionsmodel.AAStyle import AAStyle
from aacharts.aaenum.AAEnum import *
from aacharts.aaoptionsmodel.AAYAxis import AAYAxis
from aacharts.aaoptionsmodel.AALabels import AALabels
from aacharts.aaoptionsmodel.AAXAxis import AAXAxis
from aacharts.aaoptionsmodel.AALabels import AALabels
from aacharts.aaoptionsmodel.AADataLabels import AADataLabels
from aacharts.aaoptionsmodel.AAMarker import AAMarker
from aacharts.aaoptionsmodel.AASeries import AASeries
from aacharts.aaoptionsmodel.AASubtitle import AASubtitle
from aacharts.aaoptionsmodel.AAOptions import AAOptions
from aacharts.aaoptionsmodel.AATitle import AATitle
from aacharts.aaoptionsmodel.AAChart import AAChart
from aacharts.aaoptionsmodel.AATooltip import AATooltip
from aacharts.aaoptionsmodel.AAPlotOptions import AAPlotOptions
from aacharts.aaoptionsmodel.AAAnimation import AAAnimation
from aacharts.aaoptionsmodel.AALegend import AALegend
from aacharts.aaoptionsmodel.AACredits import AACredits
from aacharts.aaoptionsmodel.AAScrollablePlotArea import AAScrollablePlotArea
class AAChartModel:
animationType: AAChartAnimationType = AAChartAnimationType.bounce # The type of chart animation
animationDuration: int = None # The chart rendering animation duration
title: str = "" # The chart title
titleStyle: AAStyle = None # The chart title style
subtitle: str = None # The chart subtitle
subtitleAlign: AAChartAlignType = None # The chart subtitle text align style
subtitleStyle: AAStyle = None # The chart subtitle style
chartType: AAChartType = AAChartType.column # The default series type for the chart. Can be any of the chart types listed under `AAChartType`. Defaults to line
stacking: AAChartStackingType = AAChartStackingType.none # Whether to stack the values of each series on top of each other. Possible values are null to disable, "normal" to stack by value or "percent". When stacking is enabled, data must be sorted in ascending X order
markerSymbol: AAChartSymbolType = AAChartSymbolType.circle # A predefined shape or symbol for the marker. When null, the symbol is pulled from options.symbols. Other possible values are "circle", "square", "diamond", "triangle" and "triangle-down"
markerSymbolStyle: AAChartSymbolStyleType = None
zoomType: AAChartZoomType = None # Decides in what dimensions the user can zoom by dragging the mouse. Can be one of x, y or xy
inverted: bool = None # Whether to invert the axes so that the x axis is vertical and y axis is horizontal. When true, the x axis is reversed by default. If a bar series is present in the chart, it will be inverted automatically.Inverting the chart doesn't have an effect if there are no cartesian series in the chart, or if the chart is polar.Defaults to false
xAxisReversed: bool = None # Whether to reverse the axis so that the highest number is closest to the origin. If the chart is inverted, the x axis is reversed by default. Defaults to false
yAxisReversed: bool = None # Whether to reverse the axis so that the highest number is closest to the origin. If the chart is inverted, the x axis is reversed by default. Defaults to false
crosshairs: bool = None # Enable or disable the crosshairs
polar: bool = None # When true, cartesian charts like line, spline, area and column are transformed into the polar coordinate system. Requires `AAHighchartsMore.js`. Defaults to false
margin: list = None
dataLabelsEnabled: bool = None # Enable or disable the data labels. Defaults to false
dataLabelsStyle: AAStyle = None # The data labels style
xAxisLabelsEnabled: bool = None # Enable or disable the axis labels. Defaults to true
xAxisLabelsStyle: AAStyle = None # The x axis labels style
categories: list = None # Set new categories for the axis
xAxisGridLineWidth: float = None # The width of the grid lines extending the ticks across the plot area.Defaults to 0
xAxisVisible: bool = None # Show the x axis or not
xAxisTickinterval: float = None # Custom x axis tick interval,It is useful when the x categories array is too long to show all of them
yAxisVisible: bool = None # Show the y axis or not
yAxisLabelsEnabled: bool = None # Enable or disable the axis labels. Defaults to true
yAxisLabelsStyle: AAStyle = None # The y axis labels style
yAxisTitle: str = None # The actual text of the axis title
xAxisTitle: str = None # The actual text of the axis title
yAxisLineWidth: float = None # The width of y axis line
yAxisGridLineWidth: float = None # The width of the grid lines extending the ticks across the plot area. Defaults to 1
yAxisMin: float = None # The y axis mini value
yAxisMax: float = None # The y axis max value
yAxisAllowDecimals: bool = None # The y axis values label allow decimals or not
tooltipEnabled: bool = None # Show the tooltip or not
tooltipValueSuffix: str = None # Custom tooltip value unit suffix
colorsTheme: list = None # An array containing the default colors for the chart's series. When all colors are used, new colors are pulled from the start again. Defaults to: ["#bb250c","#f67210","#fde680","#257679","#f1c6c5"]
series: list = None # An array of all the chart's series
legendEnabled: bool = None # Enable or disable the legend. Defaults to true
backgroundColor: str = None # The background color or gradient for the outer chart area. Defaults to #FFFFFF
borderRadius: float = None # The corner radius of the outer chart border. Defaults to 0
markerRadius: float = None # The radius of the point marker. Defaults to 4
touchEventEnabled: bool = None # Support touch event call back or not
scrollablePlotArea: AAScrollablePlotArea = None # Scroll properties if supported
def animationTypeSet(self, prop: AAChartAnimationType):
self.animationType = prop
return self
def animationDurationSet(self, prop: int):
self.animationDuration = prop
return self
def titleSet(self, prop: str):
self.title = prop
return self
def titleStyleSet(self, prop: AAStyle):
self.titleStyle = prop
return self
def subtitleSet(self, prop: str):
self.subtitle = prop
return self
def subtitleAlignSet(self, prop: AAChartAlignType):
self.subtitleAlign = prop
return self
def subtitleStyleSet(self, prop: AAStyle):
self.subtitleStyle = prop
return self
def chartTypeSet(self, prop: AAChartType):
self.chartType = prop
return self
def stackingSet(self, prop: AAChartStackingType):
self.stacking = prop
return self
def markerRadiusSet(self, prop: float):
self.markerRadius = prop
return self
def markerSymbolSet(self, prop: AAChartSymbolType):
self.markerSymbol = prop
return self
def markerSymbolStyleSet(self, prop: AAChartSymbolStyleType):
self.markerSymbolStyle = prop
return self
def zoomTypeSet(self, prop: AAChartZoomType):
self.zoomType = prop
return self
def invertedSet(self, prop: bool):
self.inverted = prop
return self
def xAxisReversedSet(self, prop: bool):
self.xAxisReversed = prop
return self
def yAxisReversedSet(self, prop: bool):
self.yAxisReversed = prop
return self
def tooltipEnabledSet(self, prop: bool):
self.tooltipEnabled = prop
return self
def tooltipValueSuffixSet(self, prop: str):
self.tooltipValueSuffix = prop
return self
def polarSet(self, prop: bool):
self.polar = prop
return self
def marginSet(self, top: float = 0, right: float = 0, bottom: float = 0, left: float = 0):
self.margin = [top, right, bottom, left]
return self
def dataLabelsEnabledSet(self, prop: bool):
self.dataLabelsEnabled = prop
return self
def dataLabelsStyleSet(self, prop: AAStyle):
self.dataLabelsStyle = prop
return self
def xAxisLabelsEnabledSet(self, prop: bool):
self.xAxisLabelsEnabled = prop
return self
def xAxisLabelsStyleSet(self, prop: AAStyle):
self.xAxisLabelsStyle = prop
return self
def categoriesSet(self, prop: list):
self.categories = prop
return self
def xAxisGridLineWidthSet(self, prop: float):
self.xAxisGridLineWidth = prop
return self
def xAxisVisibleSet(self, prop: bool):
self.xAxisVisible = prop
return self
def xAxisTickintervalSet(self, prop: float):
self.xAxisTickinterval = prop
return self
def yAxisVisibleSet(self, prop: bool):
self.yAxisVisible = prop
return self
def yAxisLabelsEnabledSet(self, prop: bool):
self.yAxisLabelsEnabled = prop
return self
def yAxisLabelsStyleSet(self, prop: AAStyle):
self.yAxisLabelsStyle = prop
return self
def yAxisTitleSet(self, prop: str):
self.yAxisTitle = prop
return self
def xAxisTitleSet(self, prop: str):
self.xAxisTitle = prop
return self
def yAxisLineWidthSet(self, prop: float):
self.yAxisLineWidth = prop
return self
def yAxisMinSet(self, prop: float):
self.yAxisMin = prop
return self
def yAxisMaxSet(self, prop: float):
self.yAxisMax = prop
return self
def yAxisAllowDecimalsSet(self, prop: bool):
self.yAxisAllowDecimals = prop
return self
def yAxisGridLineWidthSet(self, prop: float):
self.yAxisGridLineWidth = prop
return self
def colorsThemeSet(self, prop: list):
self.colorsTheme = prop
return self
def seriesSet(self, prop: list):
self.series = prop
return self
def legendEnabledSet(self, prop: bool):
self.legendEnabled = prop
return self
def backgroundColorSet(self, prop: str):
self.backgroundColor = prop
return self
def borderRadiusSet(self, prop: float):
self.borderRadius = prop
return self
def touchEventEnabledSet(self, prop: bool):
self.touchEventEnabled = prop
return self
def scrollablePlotAreaSet(self, prop: AAScrollablePlotArea):
self.scrollablePlotArea = prop
return self
def aa_toAAOptions(self):
from aacharts.aachartcreator.AAOptionsComposer import AAOptionsComposer
aaOptions = AAOptionsComposer.configureChartOptions(self)
return aaOptions
| StarcoderdataPython |
1681242 | import os
import sys
import json
from time import sleep
from importlib import import_module
from helper import bold_str, sprintf, cursorUpLines, setupFiles
# Default configuration file
dfltCfgFile = "config_dflt.py"
# Import default configuration file.
# Not using `import config_dflt` because of printing option (`networkUsage.py -s`)
# and we want to have only one instance of default config file name - `dfltCfgFile`
config_dflt = import_module(dfltCfgFile.replace(".py", ""))
def displayHelp():
print("""\
Script to monitor network usage.
Tested with python 3.9.5.
python networkUsage.py [OPTIONS]
Options:
-h, --help show this help
-f, --file json file location - OPTIONAL
If this parameter is not specified the file is located
at './logs/networkUsage.json' (relative to this script location).
-p, --print print network usage to the terminal also - OPTIONAL
-u, --interval update interval in seconds - OPTIONAL
Interval at which network usage is updated.
Defaults to 1 second.
-i, --interfaces interfaces to monitor - OPTIONAL
Full or partial (Regular Expression) interface name.
For multiple interfaces separate them with comma (without space).
Example: -i 'eth0:,wl*'
-i 'eth0:','wl*'
-c, --config custom configuration file - OPTIONAL
For example take a look at 'config_dflt.py' or
pass '-s' to script to print 'config_dflt.py' to terminal.
-s, --show print default configuration file to the terminal
""")
# https://www.tutorialspoint.com/python/python_command_line_arguments.htm
# Ignore "C901 'checkArgs' is too complex" - I don't think that it is too complex, also
# I don't know how mccabe's complexity could be reduced for this function
def checkArgs(networkUsageParam, argv): # noqa: C901
import getopt
shortopts = "hf:pu:i:c:s"
longopts = [
"help",
"file=",
"print",
"interval=",
"interfaces=",
"config=",
"show"
]
try:
opts, args = getopt.getopt(argv, shortopts, longopts)
except getopt.GetoptError:
displayHelp()
sys.exit(1)
for opt, arg in opts:
if opt in ('-h', "--help"):
displayHelp()
sys.exit(0)
elif opt in ("-f", "--file"):
networkUsageParam["networkUsageFile"] = arg
elif opt in ("-p", "--print"):
networkUsageParam["printUsageToTerminal"] = True
elif opt in ("-u", "--interval"):
networkUsageParam["updateInterval"] = float(arg)
elif opt in ("-i", "--interfaces"):
# "wl*","enp*","eth?:"
networkUsageParam["desiredInterfaces"] = arg.split(",")
elif opt in ("-c", "--config"):
# import configuration file
importConfigModule(arg, networkUsageParam)
elif opt in ("-s", "--show"):
printDfltCfgFile()
def printDfltCfgFile():
with open(dfltCfgFile, "r") as file:
print(file.read())
sys.exit(0)
def importConfigModule(moduleName, networkUsageParam):
moduleName = moduleName.replace(".py", "")
networkUsageConfig = import_module(moduleName)
try:
networkUsageParam["networkUsageFile"] = networkUsageConfig.networkUsageFile
except AttributeError:
pass
try:
networkUsageParam["printUsageToTerminal"] = networkUsageConfig.printUsageToTerminal
except AttributeError:
pass
try:
networkUsageParam["updateInterval"] = networkUsageConfig.updateInterval
except AttributeError:
pass
try:
networkUsageParam["desiredInterfaces"] = networkUsageConfig.desiredInterfaces
except AttributeError:
pass
def renderNetworkSpeed(byps):
if byps >= 10 ** 9:
return sprintf("{:0.2f} GB", byps / (10 ** 9))
elif byps >= 10 ** 6:
return sprintf("{:0.2f} MB", byps / (10 ** 6))
else:
return sprintf("{:0.2f} kB", byps / (10 ** 3))
class networkUsageClass:
def __init__(self,
desiredInterfaces=config_dflt.desiredInterfaces,
updateInterval=config_dflt.updateInterval,
networkUsageFile=config_dflt.networkUsageFile,
printUsageToTerminal=config_dflt.printUsageToTerminal):
# Used to select which interfaces to monitor.
self.desiredInterfaces = desiredInterfaces
# Update interval in seconds
self.updateInterval = updateInterval
# Where to save current network usage json file
self.networkUsageFile = networkUsageFile
# Print network usage to terminal also (not just to json file)
self.printUsageToTerminal = printUsageToTerminal
# Array of files (full path) which are checked at setup
# Needed in case some parent folder doesn't exists
self.filesToSetup = [
self.networkUsageFile,
]
# Command which is used to get RX bytes for specific interface
self.cmd_getRxBytes = "cat /proc/net/dev | grep {:s} | awk '{{printf(\"%s\\n\", $2)}}'"
# Command which is used to get TX bytes for specific interface
self.cmd_getTxBytes = "cat /proc/net/dev | grep {:s} | awk '{{printf(\"%s\\n\", $10)}}'"
# Command which is used to get all available interfaces
self.cmd_getAvailableInterfaces = "cat /proc/net/dev | grep : | awk '{printf(\"%s\\n\", $1)}' | sed 's/://g'"
# Array which is going to hold:
# - names of interfaces,
# - commands to get current bytes (cmd_getRxBytes, cmd_getTxBytes),
# - previously used bytes by interface (byRx_prev, byTx_prev),
# - current network usage (bypsRx, bypsTx).
self.selectedInterfaces = []
# Store current network usage of all interfaces
self.totalUsage = {"bypsRx": 0, "bypsTx": 0}
# Setup parent folders of files in filesToSetup array
setupFiles(self.filesToSetup)
# Get all the available interfaces, filter available interfaces to only the ones which we
# desire then setup commands to get bytes used by each interface and get starting bytes
availableInterfaces = self.getAvailableInterfaces()
self.filterDesiredInterfaces(availableInterfaces)
self.numbOfMonitoringInterfaces = len(self.selectedInterfaces)
self.printConfig()
if self.numbOfMonitoringInterfaces <= 0:
print("No interfaces selected!")
sys.exit(1)
self.setupInterfaces()
def printConfig(self):
print(bold_str("Json file") + ": " + self.networkUsageFile)
print(bold_str("Printing to terminal") + ": " + str(self.printUsageToTerminal))
print(bold_str("Update interval") + ": " + str(self.updateInterval))
print(bold_str("Desired interfaces to monitor") + ": " + str(self.desiredInterfaces))
for i in range(self.numbOfMonitoringInterfaces):
if i == 0:
print(bold_str("Monitoring interfaces") + ": - " + self.selectedInterfaces[i]["name"])
else:
print(" - " + self.selectedInterfaces[i]["name"])
# Get new bytes used by specified interface
def getBytes(self, interface):
byRx = int(os.popen(interface["cmd_getRxBytes"]).read())
byTx = int(os.popen(interface["cmd_getTxBytes"]).read())
return byRx, byTx
# Store new bytes used by specified interface
def storeFreshBytes(self, interface, byRx, byTx):
interface["byRx_prev"] = byRx
interface["byTx_prev"] = byTx
# Get all available interface
# Return available interfaces
def getAvailableInterfaces(self):
interfaces = os.popen(self.cmd_getAvailableInterfaces).read().splitlines()
return interfaces
# From all available interfaces filter only desired one
# Return filtered/selected interfaces (in json format)
def filterDesiredInterfaces(self, availableInterfaces):
import re
for availableInterface in availableInterfaces:
for desiredInterface in self.desiredInterfaces:
pattern = re.compile(desiredInterface)
if pattern.match(availableInterface):
self.selectedInterfaces.append({"name": availableInterface,
"bypsRx": 0,
"bypsTx": 0})
# Setup interfaces (prepare commands used to get RX and TX bytes,
# get starting RX and TX bytes)
def setupInterfaces(self):
for interface in self.selectedInterfaces:
interface["cmd_getRxBytes"] = sprintf(self.cmd_getRxBytes, interface["name"])
interface["cmd_getTxBytes"] = sprintf(self.cmd_getTxBytes, interface["name"])
byRx, byTx = self.getBytes(interface)
self.storeFreshBytes(interface, byRx, byTx)
# Print network usage by specified interface in kB
def printUsage(self):
for interface in self.selectedInterfaces:
print(sprintf("{:s} - Rx: {:s}, Tx: {:s}",
interface["name"],
renderNetworkSpeed(interface["bypsRx"]),
renderNetworkSpeed(interface["bypsTx"])))
print(sprintf("{:s} - Rx: {:s}, Tx: {:s}",
"total",
renderNetworkSpeed(self.totalUsage["bypsRx"]),
renderNetworkSpeed(self.totalUsage["bypsTx"])))
# Store interfaces to json file
# Also add usage by all interfaces - { "name": "total", "bypsRx": 100, "bypsTx": 100 }
def storeToFile(self):
jsonFileData = []
for interface in self.selectedInterfaces:
jsonFileData.append({
"name": interface["name"],
"bypsRx": interface["bypsRx"],
"bypsTx": interface["bypsTx"],
})
jsonFileData.append({
"name": "total",
"bypsRx": self.totalUsage["bypsRx"],
"bypsTx": self.totalUsage["bypsTx"],
})
with open(self.networkUsageFile, "w") as file:
file.write(json.dumps(jsonFileData))
# Monitor network usage
# - loop through all interfaces
# - store to json file
# - sleep until the next measurement
def monitorNetworkUsage(self):
if self.printUsageToTerminal is True:
print(bold_str("Network usage") + ":")
self.printUsage()
while True:
self.totalUsage["bypsRx"] = 0
self.totalUsage["bypsTx"] = 0
for interface in self.selectedInterfaces:
byRx, byTx = self.getBytes(interface)
interface["bypsRx"] = round((byRx - interface["byRx_prev"]) / self.updateInterval)
interface["bypsTx"] = round((byTx - interface["byTx_prev"]) / self.updateInterval)
self.storeFreshBytes(interface, byRx, byTx)
self.totalUsage["bypsRx"] += interface["bypsRx"]
self.totalUsage["bypsTx"] += interface["bypsTx"]
self.storeToFile()
if self.printUsageToTerminal is True:
cursorUpLines(self.numbOfMonitoringInterfaces + 1)
self.printUsage()
sleep(self.updateInterval)
# Start networkUsage script
# - check that all files can be opened/created (check file path)
# - get all the available interfaces to monitor
# - filter available interfaces to select only the desired ones
# - setup interfaces (prepare commands, make first measurement/monitoring...)
# - monitor network usage
def main():
networkUsageParam = {}
networkUsageParam["desiredInterfaces"] = config_dflt.desiredInterfaces
networkUsageParam["updateInterval"] = config_dflt.updateInterval
networkUsageParam["networkUsageFile"] = config_dflt.networkUsageFile
networkUsageParam["printUsageToTerminal"] = config_dflt.printUsageToTerminal
if len(sys.argv) > 1:
checkArgs(networkUsageParam, sys.argv[1:])
networkUsage = networkUsageClass(
desiredInterfaces=networkUsageParam["desiredInterfaces"],
updateInterval=networkUsageParam["updateInterval"],
networkUsageFile=networkUsageParam["networkUsageFile"],
printUsageToTerminal=networkUsageParam["printUsageToTerminal"])
networkUsage.monitorNetworkUsage()
if __name__ == "__main__":
main()
| StarcoderdataPython |
11248651 | import os
import json
from datetime import datetime as dt
from flask import request, send_file
from werkzeug.utils import secure_filename
from taky.dps import app
def url_for(f_hash):
"""
Returns the URL for the given hash
"""
return f"{request.host_url}Marti/sync/content?hash={f_hash}"
def get_meta(f_hash=None, f_name=None):
"""
Gets the metadata for an assigned filename or hash
"""
if f_hash:
meta_path = os.path.join(app.config["UPLOAD_PATH"], "meta", f"{f_hash}.json")
elif f_name:
meta_path = os.path.join(app.config["UPLOAD_PATH"], "meta", f"{f_name}.json")
else:
return {}
try:
with open(meta_path) as meta_fp:
return json.load(meta_fp)
except (json.JSONDecodeError, OSError):
return {}
def put_meta(meta):
"""
Updates the metadata - the supplied hash/UID is used to find the target file
"""
filename = meta.get("UID")
f_hash = meta.get("Hash")
# Save the file's meta/{filename}.json
meta_path = os.path.join(app.config["UPLOAD_PATH"], "meta", f"{filename}.json")
with open(meta_path, "w") as meta_fp:
json.dump(meta, meta_fp)
# Symlink the meta/{f_hash}.json to {filename}.json
meta_hash_path = os.path.join(app.config["UPLOAD_PATH"], "meta", f"{f_hash}.json")
try:
os.symlink(f"{filename}.json", meta_hash_path)
except FileExistsError:
pass
@app.route("/Marti/sync/search")
def datapackage_search():
"""
Search for a datapackage
Arguments:
keywords=missionpackage
tool=public
keywords=request.args.get('keywords')
"""
ret = []
for item in os.listdir(app.config["UPLOAD_PATH"]):
path = os.path.join(app.config["UPLOAD_PATH"], item)
if not os.path.isfile(path):
continue
# TODO: Check if keywords are in meta['Keywords'] or meta['UID']
meta = get_meta(f_name=item)
if meta and meta.get("Visibility", "public") == "public":
ret.append(meta)
return {"resultCount": len(ret), "results": ret}
@app.route("/Marti/sync/content")
def datapackage_get():
"""
Download a datapackage
Arguments:
hash: The file hash
receiver: The client downloading the file
"""
try:
f_hash = request.args["hash"]
except KeyError:
return "Must supply hash", 400
meta = get_meta(f_hash=f_hash)
name = os.path.join(app.config["UPLOAD_PATH"], meta["UID"])
if not os.path.exists(name):
return f"Can't find {name}", 404
return send_file(name, as_attachment=True, attachment_filename=meta["Name"])
@app.route("/Marti/sync/missionupload", methods=["POST"])
def datapackage_upload():
"""
Upload a datapackage to the server
Arguments:
hash=...
filename=... (lacking extension)
creatorUid=ANDROID-43...
Return:
The URL where the file can be downloaded
"""
try:
asset_fp = request.files["assetfile"]
creator_uid = request.args["creatorUid"]
f_hash = request.args["hash"]
except KeyError:
return "Invalid arguments", 400
filename = secure_filename(f"{creator_uid}_{asset_fp.filename}")
# Delete / unlink old files
meta = get_meta(f_name=filename)
if meta.get("Hash") != f_hash:
old_meta_hash_path = os.path.join(
app.config["UPLOAD_PATH"], "meta", f'{meta.get("Hash")}.json'
)
try:
os.unlink(old_meta_hash_path)
except: # pylint: disable=bare-except
pass
# Save the uploaded file
file_path = os.path.join(app.config["UPLOAD_PATH"], filename)
asset_fp.save(file_path)
sub_user = request.headers.get("X-USER", "Anonymous")
meta = {
"UID": filename, # What the file will be saved as
"Name": asset_fp.filename, # File name on the server
"Hash": f_hash, # SHA-256, checked
"PrimaryKey": 1, # Not used, must be >= 0
"SubmissionDateTime": dt.utcnow().isoformat() + "Z",
"SubmissionUser": sub_user,
"CreatorUid": creator_uid,
"Keywords": "kw",
"MIMEType": asset_fp.mimetype,
"Size": os.path.getsize(file_path), # Checked, do not fake
"Visibility": "private",
}
put_meta(meta)
# src/main/java/com/atakmap/android/missionpackage/http/MissionPackageDownloader.java:539
# This is needed for client-to-client data package transmission
return url_for(f_hash)
@app.route("/Marti/api/sync/metadata/<f_hash>/tool", methods=["PUT"])
def datapackage_metadata_tool(f_hash):
"""
Update the "tool" for the datapackage (ie: public / private)
"""
meta = get_meta(f_hash=f_hash)
if not meta:
return f"Could not find file matching {f_hash}", 404
visibility = (
"public" if request.get_data().decode("utf-8") == "public" else "private"
)
if meta.get("Visibility", "private") != visibility:
meta["Visibility"] = visibility
put_meta(meta)
return url_for(f_hash)
@app.route("/Marti/sync/missionquery")
def datapackage_exists():
"""
Called when trying to determine if the file exists on the server
Arguments:
hash: The file hash
"""
try:
f_hash = request.args["hash"]
except KeyError:
return "Must supply hash", 400
meta = get_meta(f_hash)
if not meta:
return "File not found", 404
return url_for(f_hash)
| StarcoderdataPython |
3568723 | """
# rebotes.py
# Ejercicio 1.5
@author: <NAME>
"""
# Ejercicio
altura = 100
toca_piso = 1
rebota = (100*.6)
#solicito que imprima al menos 10 veces los rebotes
while toca_piso <= 10:
print(round(rebota, ndigits=4))
toca_piso = toca_piso + 1
rebota = rebota * 0.6
"""
resultado:
60.0
36.0
21.6
12.96
7.776
4.6656
2.7994
1.6796
1.0078
0.6047
""" | StarcoderdataPython |
6949 | <filename>setup.py<gh_stars>1-10
import os, os.path
import subprocess
from distutils.core import setup
from py2exe.build_exe import py2exe
PROGRAM_NAME = 'icom_app'
PROGRAM_DESC = 'simple icom app'
NSIS_SCRIPT_TEMPLATE = r"""
!define py2exeOutputDirectory '{output_dir}\'
!define exe '{program_name}.exe'
; Uses solid LZMA compression. Can be slow, use discretion.
SetCompressor /SOLID lzma
; Sets the title bar text (although NSIS seems to append "Installer")
Caption "{program_desc}"
Name '{program_name}'
OutFile ${{exe}}
Icon '{icon_location}'
; Use XPs styles where appropriate
XPStyle on
; You can opt for a silent install, but if your packaged app takes a long time
; to extract, users might get confused. The method used here is to show a dialog
; box with a progress bar as the installer unpacks the data.
;SilentInstall silent
AutoCloseWindow true
ShowInstDetails nevershow
Section
DetailPrint "Extracting application..."
SetDetailsPrint none
InitPluginsDir
SetOutPath '$PLUGINSDIR'
File /r '${{py2exeOutputDirectory}}\*'
GetTempFileName $0
;DetailPrint $0
Delete $0
StrCpy $0 '$0.bat'
FileOpen $1 $0 'w'
FileWrite $1 '@echo off$\r$\n'
StrCpy $2 $TEMP 2
FileWrite $1 '$2$\r$\n'
FileWrite $1 'cd $PLUGINSDIR$\r$\n'
FileWrite $1 '${{exe}}$\r$\n'
FileClose $1
; Hide the window just before the real app launches. Otherwise you have two
; programs with the same icon hanging around, and it's confusing.
HideWindow
nsExec::Exec $0
Delete $0
SectionEnd
"""
class NSISScript(object):
NSIS_COMPILE = "makensis"
def __init__(self, program_name, program_desc, dist_dir, icon_loc):
self.program_name = program_name
self.program_desc = program_desc
self.dist_dir = dist_dir
self.icon_loc = icon_loc
self.pathname = "setup_%s.nsi" % self.program_name
def create(self):
contents = NSIS_SCRIPT_TEMPLATE.format(
program_name = self.program_name,
program_desc = self.program_desc,
output_dir = self.dist_dir,
icon_location = os.path.join(self.dist_dir, self.icon_loc))
with open(self.pathname, "w") as outfile:
outfile.write(contents)
def compile(self):
subproc = subprocess.Popen(
# "/P5" uses realtime priority for the LZMA compression stage.
# This can get annoying though.
[self.NSIS_COMPILE, self.pathname, "/P5"], env=os.environ)
subproc.communicate()
retcode = subproc.returncode
if retcode:
raise RuntimeError("NSIS compilation return code: %d" % retcode)
class build_installer(py2exe):
# This class first builds the exe file(s), then creates an NSIS installer
# that runs your program from a temporary directory.
def run(self):
# First, let py2exe do it's work.
py2exe.run(self)
lib_dir = self.lib_dir
dist_dir = self.dist_dir
# Create the installer, using the files py2exe has created.
script = NSISScript(PROGRAM_NAME,
PROGRAM_DESC,
dist_dir,
os.path.join('.', 'icon.ico'))
print "*** creating the NSIS setup script***"
script.create()
print "*** compiling the NSIS setup script***"
script.compile()
zipfile = r"lib\shardlib"
setup(
name = 'MyApp',
description = 'My Application',
version = '1.0',
window = [
{
'script': os.path.join('.','ICOM.py'),
'icon_resources': [(1, os.path.join('.', 'icom.ico'))],
'dest_base': PROGRAM_NAME,
},
],
options = {
'py2exe': {
# Py2exe options...
"optimize": 2
}
},
zipfile = zipfile,
data_files = [],# etc...
cmdclass = {"py2exe": build_installer},
) | StarcoderdataPython |
4806263 | <filename>client/python/easemlclient/easemlclient/model/type.py
"""
Implementation of the `ApiType` class.
"""
import requests
from copy import deepcopy
from enum import Enum
from typing import Dict, Any, TypeVar, Generic, Optional, Tuple, List, Type
from .core import Connection
T = TypeVar('T', bound='ApiType')
class ApiType(Generic[T]):
"""The User class contains information about users.
"""
def __init__(self: T, input: Dict[str, Any]) -> None:
self._dict: Dict[str, Any] = deepcopy(input)
self._updates: Dict[str, Any] = {}
self.T:Type[T] = type(self)
def _post(self: T, connection: Connection, url: str) -> T:
resp = requests.post(url, auth=connection.auth, json={**self._dict, **self._updates})
resp.raise_for_status()
return self.T({**self._dict, **self._updates})
def _patch(self: T, connection: Connection, url: str) -> T:
resp = requests.patch(url, auth=connection.auth, json=self._updates)
resp.raise_for_status()
return self.T({**self._dict, **self._updates})
def _get(self: T, connection: Connection, url: str) -> T:
resp = requests.get(url, auth=connection.auth)
resp.raise_for_status()
payload = resp.json()
return self.T(payload["data"])
def _download(self: T, connection: Connection, url: str) -> bytes:
resp = requests.get(url, auth=connection.auth)
resp.raise_for_status()
return resp.content
@classmethod
def from_dict(cls, input: Dict[str, Any]) -> 'ApiType':
"""Creates an instance of User given a dictionary.
Parameters
----------
input : Dict[str, Any]
The dictionary that can be used to reconstruct a User instance.
Returns
-------
User
Instance of the reconstructed User class.
"""
return cls(**input)
Q = TypeVar('Q', bound='ApiQuery')
class ApiQueryOrder(Enum):
ASC = "asc"
DESC = "desc"
class ApiQuery(Generic[T, Q]):
def __init__(self: Q, order_by: Optional[str] = None, order: Optional[ApiQueryOrder] = None,
limit: Optional[int] = None, cursor: Optional[str] = None) -> None:
self._query: Dict[str, Any] = {}
if order_by is not None:
order_by = order_by.replace("_", "-")
self._query["order-by"] = order_by
if order is not None:
self._query["order"] = order
if limit is not None:
self._query["limit"] = limit
if cursor is not None:
self._query["cursor"] = cursor
self.T:Type[T] = ApiType
def _run(self: Q, connection: Connection, url: str) -> Tuple[List[T], Optional[Q]]:
resp = requests.get(url, auth=connection.auth, params=self._query)
resp.raise_for_status()
payload = resp.json()
payload_data = payload["data"] or []
result = [self.T(x) for x in payload_data]
next_query: Optional[Q] = None
next_cursor = payload.get("metadata", {}).get("next-page-cursor", "")
if next_cursor is not None and next_cursor != "":
next_query = deepcopy(self)
next_query._query["cursor"] = next_cursor
return result, next_query
| StarcoderdataPython |
3541713 | <gh_stars>0
''' from math import hypot
a = float(input('Digite o valor de um cateto: '))
b = float(input('Digite o valor de outro cateto: '))
print('A hipotenusa dos catetos {} e {} é igual a {:.2f}: '.format(a, b, hypot(a, b))) '''
# ou
from math import sqrt
a = float(input('Digite o valor de um cateto: '))
b = float(input('Digite o valor de outro cateto: '))
c = sqrt(pow(a, 2) + pow(b, 2))
print('A hipotenusa dos catetos {} e {} é igual a {:.2f}'.format(a, b, c))
| StarcoderdataPython |
8104391 | <reponame>mail2nsrajesh/networking-bagpipe
# Copyright (c) 2016 Orange.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib import constants
from oslo_utils import uuidutils
import testscenarios
from neutron.tests.fullstack.resources import machine
from networking_bagpipe.tests.fullstack import base
from networking_bagpipe.tests.fullstack.resources.bgpvpn \
import config as bgpvpn_cfg
load_tests = testscenarios.load_tests_apply_scenarios
class TestConnectivitySameBGPVPN(base.BaGPipeBaseFullStackTestCase):
bagpipe_ml2 = False
evpn_driver = None
network_type = 'vxlan'
service_plugins = 'router,%s' % bgpvpn_cfg.BGPVPN_SERVICE
of_interface = 'ovs-ofctl'
bgpvpn = True
port_per_compute_per_net = 2
compute_node_count = 2
scenarios = [
('OpenVSwitch MPLS-over-GRE', {
'mech_drivers': 'openvswitch',
'l2_agent_type': constants.AGENT_TYPE_OVS,
'ipvpn_driver': 'ovs',
'ipvpn_encap': 'mpls-gre'
}),
('OpenVSwitch bare MPLS', {
'mech_drivers': 'openvswitch',
'l2_agent_type': constants.AGENT_TYPE_OVS,
'ipvpn_driver': 'ovs',
'ipvpn_encap': 'bare-mpls'
}),
('Linuxbridge', {
'mech_drivers': 'linuxbridge',
'l2_agent_type': constants.AGENT_TYPE_LINUXBRIDGE,
'ipvpn_driver': 'linux',
'ipvpn_encap': 'bare-mpls',
})
]
def test_network_connectivity(self):
tenant_uuid = uuidutils.generate_uuid()
bgpvpn = self.safe_client.create_bgpvpn(tenant_uuid,
route_targets=['64512:1'])
network_ids = list()
for subnet_cidr in (base.SUBNET_CIDR1, base.SUBNET_CIDR2):
network_ids.append(
self._create_net_subnet_bgpvpn_assoc(tenant_uuid, subnet_cidr,
bgpvpn['id'])[0]
)
fake_machines = list()
for network_id in network_ids:
fake_machines.extend([
self.useFixture(
machine.FakeFullstackMachine(
self.environment.hosts[i],
network_id,
tenant_uuid,
self.safe_client))
for i in
range(self.compute_node_count)*self.port_per_compute_per_net])
vms = machine.FakeFullstackMachinesList(fake_machines)
vms.block_until_all_boot()
vms.ping_all()
def test_router_connectivity(self):
tenant_uuid = uuidutils.generate_uuid()
bgpvpn = self.safe_client.create_bgpvpn(tenant_uuid,
route_targets=['64512:1'])
network1 = self.safe_client.create_network(tenant_uuid)
subnet1 = self.safe_client.create_subnet(
tenant_uuid, network1['id'], '10.0.0.0/24')
network2 = self.safe_client.create_network(tenant_uuid)
subnet2 = self.safe_client.create_subnet(
tenant_uuid, network2['id'], '192.168.127.12/24')
router = self.safe_client.create_router(tenant_uuid)
self.safe_client.add_router_interface(router['id'], subnet1['id'])
self.safe_client.add_router_interface(router['id'], subnet2['id'])
self.safe_client.create_router_association(tenant_uuid,
bgpvpn['id'],
router['id'])
network3 = self.safe_client.create_network(tenant_uuid)
self.safe_client.create_subnet(
tenant_uuid, network3['id'], '172.16.58.3/24')
self.safe_client.create_network_association(tenant_uuid,
bgpvpn['id'],
network3['id'])
fake_machines = list()
for network in (network1, network2, network3):
fake_machines.extend([
self.useFixture(
machine.FakeFullstackMachine(
self.environment.hosts[i],
network['id'],
tenant_uuid,
self.safe_client))
for i in
range(self.compute_node_count)*self.port_per_compute_per_net])
vms = machine.FakeFullstackMachinesList(fake_machines)
vms.block_until_all_boot()
vms.ping_all()
| StarcoderdataPython |
1880364 | from yargy import (
rule, or_, Parser,
)
from yargy.pipelines import pipeline
from yargy.predicates import (
type, in_,
normalized,
)
from yargy.interpretation import (
fact
)
from natasha.grammars import date, addr
from .helpers import select_span_tokens, ID_TOKENIZER, show_matches, TOKENIZER, load_named_entities
INT = type('INT')
Socdem = fact(
'Socdem',
['name', 'gender', 'date_of_birth', 'age', 'location']
)
GENDERS_DICT = {
'Женщина': 'female',
'женщина': 'female',
'мужчина': 'male',
'Мужчина': 'male'
}
GENDER = rule(in_(GENDERS_DICT)).interpretation(Socdem.gender.custom(GENDERS_DICT.get))
AGE = rule(
INT,
normalized('год')
).interpretation(Socdem.age)
LOCATION = rule(
or_(
addr.GOROD,
addr.DEREVNYA,
addr.SELO,
addr.POSELOK
)
).interpretation(Socdem.location)
def update_rules(name):
NAME = pipeline(name).interpretation(Socdem.name)
SOCDEM_ELEMS = rule(
or_(
NAME,
GENDER,
date.DATE,
AGE,
LOCATION
)
)
SOCDEM = rule(
NAME,
GENDER.optional(),
or_(
rule(
AGE.optional(),
date.DATE.interpretation(Socdem.date_of_birth).optional()),
rule(
date.DATE.interpretation(Socdem.date_of_birth).optional(),
AGE.optional()),
),
LOCATION.optional()
).interpretation(Socdem)
return SOCDEM_ELEMS, SOCDEM
# text = open('CV.txt', encoding='utf-8').read()
class SocdemExtractor:
def __init__(self, name=()):
self.SOCDEM_ELEMS, self.SOCDEM = update_rules(name)
def find(self, tokens):
parser = Parser(self.SOCDEM_ELEMS, tokenizer=ID_TOKENIZER)
matches = parser.findall(tokens)
spans = [_.span for _ in matches]
tokens = list(select_span_tokens(tokens, spans))
# print([_.value for _ in tokens])
parser = Parser(self.SOCDEM, tokenizer=ID_TOKENIZER)
match = parser.find(tokens)
return match
# tokens = list(TOKENIZER(text))
#
# match = SocdemExtractor().find(tokens)
# print(match.fact.as_json)
| StarcoderdataPython |
4842151 | """Shared constants for IPv4 and IPv6."""
# Protocol numbers - http://www.iana.org/assignments/protocol-numbers
IP_PROTO_IP = 0 # dummy for IP
IP_PROTO_HOPOPTS = IP_PROTO_IP # IPv6 hop-by-hop options
IP_PROTO_ICMP = 1 # ICMP
IP_PROTO_IGMP = 2 # IGMP
IP_PROTO_GGP = 3 # gateway-gateway protocol
IP_PROTO_IPIP = 4 # IP in IP
IP_PROTO_ST = 5 # ST datagram mode
IP_PROTO_TCP = 6 # TCP
IP_PROTO_CBT = 7 # CBT
IP_PROTO_EGP = 8 # exterior gateway protocol
IP_PROTO_IGP = 9 # interior gateway protocol
IP_PROTO_BBNRCC = 10 # BBN RCC monitoring
IP_PROTO_NVP = 11 # Network Voice Protocol
IP_PROTO_PUP = 12 # PARC universal packet
IP_PROTO_ARGUS = 13 # ARGUS
IP_PROTO_EMCON = 14 # EMCON
IP_PROTO_XNET = 15 # Cross Net Debugger
IP_PROTO_CHAOS = 16 # Chaos
IP_PROTO_UDP = 17 # UDP
IP_PROTO_MUX = 18 # multiplexing
IP_PROTO_DCNMEAS = 19 # DCN measurement
IP_PROTO_HMP = 20 # Host Monitoring Protocol
IP_PROTO_PRM = 21 # Packet Radio Measurement
IP_PROTO_IDP = 22 # Xerox NS IDP
IP_PROTO_TRUNK1 = 23 # Trunk-1
IP_PROTO_TRUNK2 = 24 # Trunk-2
IP_PROTO_LEAF1 = 25 # Leaf-1
IP_PROTO_LEAF2 = 26 # Leaf-2
IP_PROTO_RDP = 27 # "Reliable Datagram" proto
IP_PROTO_IRTP = 28 # Inet Reliable Transaction
IP_PROTO_TP = 29 # ISO TP class 4
IP_PROTO_NETBLT = 30 # Bulk Data Transfer
IP_PROTO_MFPNSP = 31 # MFE Network Services
IP_PROTO_MERITINP = 32 # Merit Internodal Protocol
IP_PROTO_SEP = 33 # Sequential Exchange proto
IP_PROTO_3PC = 34 # Third Party Connect proto
IP_PROTO_IDPR = 35 # Interdomain Policy Route
IP_PROTO_XTP = 36 # Xpress Transfer Protocol
IP_PROTO_DDP = 37 # Datagram Delivery Proto
IP_PROTO_CMTP = 38 # IDPR Ctrl Message Trans
IP_PROTO_TPPP = 39 # TP++ Transport Protocol
IP_PROTO_IL = 40 # IL Transport Protocol
IP_PROTO_IP6 = 41 # IPv6
IP_PROTO_SDRP = 42 # Source Demand Routing
IP_PROTO_ROUTING = 43 # IPv6 routing header
IP_PROTO_FRAGMENT = 44 # IPv6 fragmentation header
IP_PROTO_RSVP = 46 # Reservation protocol
IP_PROTO_GRE = 47 # General Routing Encap
IP_PROTO_MHRP = 48 # Mobile Host Routing
IP_PROTO_ENA = 49 # ENA
IP_PROTO_ESP = 50 # Encap Security Payload
IP_PROTO_AH = 51 # Authentication Header
IP_PROTO_INLSP = 52 # Integated Net Layer Sec
IP_PROTO_SWIPE = 53 # SWIPE
IP_PROTO_NARP = 54 # NBMA Address Resolution
IP_PROTO_MOBILE = 55 # Mobile IP, RFC 2004
IP_PROTO_TLSP = 56 # Transport Layer Security
IP_PROTO_SKIP = 57 # SKIP
IP_PROTO_ICMP6 = 58 # ICMP for IPv6
IP_PROTO_NONE = 59 # IPv6 no next header
IP_PROTO_DSTOPTS = 60 # IPv6 destination Woptions
IP_PROTO_ANYHOST = 61 # any host internal proto
IP_PROTO_CFTP = 62 # CFTP
IP_PROTO_ANYNET = 63 # any local network
IP_PROTO_EXPAK = 64 # SATNET and Backroom EXPAK
IP_PROTO_KRYPTOLAN = 65 # Kryptolan
IP_PROTO_RVD = 66 # MIT Remote Virtual Disk
IP_PROTO_IPPC = 67 # Inet Pluribus Packet Core
IP_PROTO_DISTFS = 68 # any distributed fs
IP_PROTO_SATMON = 69 # SATNET Monitoring
IP_PROTO_VISA = 70 # VISA Protocol
IP_PROTO_IPCV = 71 # Inet Packet Core Utility
IP_PROTO_CPNX = 72 # Comp Proto Net Executive
IP_PROTO_CPHB = 73 # Comp Protocol Heart Beat
IP_PROTO_WSN = 74 # Wang Span Network
IP_PROTO_PVP = 75 # Packet Video Protocol
IP_PROTO_BRSATMON = 76 # Backroom SATNET Monitor
IP_PROTO_SUNND = 77 # SUN ND Protocol
IP_PROTO_WBMON = 78 # WIDEBAND Monitoring
IP_PROTO_WBEXPAK = 79 # WIDEBAND EXPAK
IP_PROTO_EON = 80 # ISO CNLP
IP_PROTO_VMTP = 81 # Versatile Msg Transport
IP_PROTO_SVMTP = 82 # Secure VMTP
IP_PROTO_VINES = 83 # VINES
IP_PROTO_TTP = 84 # TTP
IP_PROTO_NSFIGP = 85 # NSFNET-IGP
IP_PROTO_DGP = 86 # Dissimilar Gateway Proto
IP_PROTO_TCF = 87 # TCF
IP_PROTO_EIGRP = 88 # EIGRP
IP_PROTO_OSPF = 89 # Open Shortest Path First
IP_PROTO_SPRITERPC = 90 # Sprite RPC Protocol
IP_PROTO_LARP = 91 # Locus Address Resolution
IP_PROTO_MTP = 92 # Multicast Transport Proto
IP_PROTO_AX25 = 93 # AX.25 Frames
IP_PROTO_IPIPENCAP = 94 # yet-another IP encap
IP_PROTO_MICP = 95 # Mobile Internet Ctrl
IP_PROTO_SCCSP = 96 # Semaphore Comm Sec Proto
IP_PROTO_ETHERIP = 97 # Ethernet in IPv4
IP_PROTO_ENCAP = 98 # encapsulation header
IP_PROTO_ANYENC = 99 # private encryption scheme
IP_PROTO_GMTP = 100 # GMTP
IP_PROTO_IFMP = 101 # Ipsilon Flow Mgmt Proto
IP_PROTO_PNNI = 102 # PNNI over IP
IP_PROTO_PIM = 103 # Protocol Indep Multicast
IP_PROTO_ARIS = 104 # ARIS
IP_PROTO_SCPS = 105 # SCPS
IP_PROTO_QNX = 106 # QNX
IP_PROTO_AN = 107 # Active Networks
IP_PROTO_IPCOMP = 108 # IP Payload Compression
IP_PROTO_SNP = 109 # Sitara Networks Protocol
IP_PROTO_COMPAQPEER = 110 # Compaq Peer Protocol
IP_PROTO_IPXIP = 111 # IPX in IP
IP_PROTO_VRRP = 112 # Virtual Router Redundancy
IP_PROTO_PGM = 113 # PGM Reliable Transport
IP_PROTO_ANY0HOP = 114 # 0-hop protocol
IP_PROTO_L2TP = 115 # Layer 2 Tunneling Proto
IP_PROTO_DDX = 116 # D-II Data Exchange (DDX)
IP_PROTO_IATP = 117 # Interactive Agent Xfer
IP_PROTO_STP = 118 # Schedule Transfer Proto
IP_PROTO_SRP = 119 # SpectraLink Radio Proto
IP_PROTO_UTI = 120 # UTI
IP_PROTO_SMP = 121 # Simple Message Protocol
IP_PROTO_SM = 122 # SM
IP_PROTO_PTP = 123 # Performance Transparency
IP_PROTO_ISIS = 124 # ISIS over IPv4
IP_PROTO_FIRE = 125 # FIRE
IP_PROTO_CRTP = 126 # Combat Radio Transport
IP_PROTO_CRUDP = 127 # Combat Radio UDP
IP_PROTO_SSCOPMCE = 128 # SSCOPMCE
IP_PROTO_IPLT = 129 # IPLT
IP_PROTO_SPS = 130 # Secure Packet Shield
IP_PROTO_PIPE = 131 # Private IP Encap in IP
IP_PROTO_SCTP = 132 # Stream Ctrl Transmission
IP_PROTO_FC = 133 # Fibre Channel
IP_PROTO_RSVPIGN = 134 # RSVP-E2E-IGNORE
IP_PROTO_RAW = 255 # Raw IP packets
IP_PROTO_RESERVED = IP_PROTO_RAW # Reserved
IP_PROTO_MAX = 255
| StarcoderdataPython |
12815454 | <filename>magi/agents/sac/config.py
"""Soft Actor-Critic agent parameters."""
import dataclasses
from typing import Optional
from acme import specs
from acme.adders import reverb as adders_reverb
import numpy as np
def target_entropy_from_env_spec(env_spec: specs.EnvironmentSpec) -> float:
"""Compute the heuristic target entropy"""
return -float(np.prod(env_spec.actions.shape))
@dataclasses.dataclass
class SACConfig:
"""Soft Actor-Critic agent parameters."""
entropy_coefficient: Optional[float] = None
target_entropy: float = 0
min_replay_size: int = 1000
max_replay_size: int = 1_000_000
replay_table_name: str = adders_reverb.DEFAULT_PRIORITY_TABLE
prefetch_size: Optional[int] = None
discount: float = 0.99
batch_size: int = 256
critic_learning_rate: float = 1e-3
critic_soft_update_rate: float = 0.005
actor_learning_rate: float = 1e-3
temperature_learning_rate: float = 1e-3
temperature_adam_b1: float = 0.5
init_temperature: float = 0.1
| StarcoderdataPython |
3435258 | '''
Copyright (c) 2008 <NAME>, <<EMAIL>>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
import logging
import os
import codecs
from pandac.PandaModules import VirtualFileSystem, Filename
from pandac.PandaModules import DSearchPath
from pano.resources.ResourcesLocation import AbstractResourceLocation
from pano.resources.ResourcesTypes import ResourcesTypes
class MultifileResourcesLocation(AbstractResourceLocation):
'''
Offers services for finding loading files from multifiles.
'''
def __init__(self, mfFilename, name, resTypes, hotswap=True, checkPeriod=10):
AbstractResourceLocation.__init__(self, name, '', resTypes, hotswap, checkPeriod)
self.log = logging.getLogger('pano.multifileResources')
# where we are mounted on the virtual filesystem
self.mountPoint = "/" + os.path.basename(mfFilename)
# the filename of the multifile
self.filename = mfFilename
# a sorted list of all filenames of supported types that were found in self.directory
self.resourcesNames = []
def dispose(self):
vfs = VirtualFileSystem.getGlobalPtr()
vfs.unmountPoint(Filename(self.mountPoint))
def indexResources(self):
vfs = VirtualFileSystem.getGlobalPtr()
vfs.mount(Filename(self.filename), self.mountPoint, VirtualFileSystem.MFReadOnly)
def containsResource(self, filename):
vfs = VirtualFileSystem.getGlobalPtr()
vfs.chdir(self.mountPoint)
flag = vfs.exists(Filename(filename))
vfs.chdir(self.mountPoint)
return flag
def getResourceFullPath(self, filename):
vfs = VirtualFileSystem.getGlobalPtr()
resFile = Filename(filename)
if vfs.exists(resFile):
searchPath = DSearchPath()
searchPath.appendDirectory(self.mountPoint)
# if the filename was resolved, resFile is updated to include the full path
if vfs.resolveFilename(resFile, searchPath):
return resFile.getFullpath()
def getResourceStream(self, name):
vfs = VirtualFileSystem.getGlobalPtr()
istream = vfs.openReadFile(Filename(name))
return istream
def getResourceAsString(self, filename, fullPath = False):
"""
Returns a string that represents the file's contents.
@param filename: The resource filename.
@param fullPath: Specifies if the filename parameter denotes a full path or a base filename.
"""
vfs = VirtualFileSystem.getGlobalPtr()
fs = vfs.readFile(Filename(filename), False)
if fs is not None:
return codecs.decode(fs, "utf-8")
def getResourceAsByteArray(self, filename, fullPath = False):
"""
Returns an array of bytes that represent the file's contents.
It performs the same functionality with getResourceAsString since the str type is used to
store byte arrays but it won't decode the string since it doesn't make sense for binary data.
@param filename: The resource filename.
@param fullPath: Specifies if the filename parameter denotes a full path or a base filename.
"""
vfs = VirtualFileSystem.getGlobalPtr()
return vfs.readFile(Filename(filename))
def _listResourcesImpl(self, parent, resType, fullPaths = True):
resFiles = []
directories = []
vfs = VirtualFileSystem.getGlobalPtr()
filesList = vfs.scanDirectory(parent)
if filesList is None:
return directories
for i in xrange(filesList.getNumFiles()):
fileEntry = filesList.getFile(i)
if fileEntry.isDirectory():
directories.append(fileEntry.getFilename())
continue
if ResourcesTypes.isExtensionOfType(fileEntry.getFilename().getExtension(), resType):
resFiles.append(fileEntry.getFilename().getFullpath() if fullPaths else fileEntry.getFilename().getBasename())
for dir in directories:
resFiles.extend(self._listResourcesImpl(dir, resType, fullPaths))
return resFiles
def listResources(self, resType, fullPaths=True):
'''
Returns a list of all resource filenames, of the given type, which are contained in this multifile.
'''
vfs = VirtualFileSystem.getGlobalPtr()
return self._listResourcesImpl(Filename(self.mountPoint), resType, fullPaths)
def __str__(self):
return 'Multifile resource location %s, mounted on %s, of type %s' % (self.name, self.mountPoint, self.resTypes)
| StarcoderdataPython |
112680 | # copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import numpy as np
class VQATokenPad(object):
def __init__(self,
max_seq_len=512,
pad_to_max_seq_len=True,
return_attention_mask=True,
return_token_type_ids=True,
truncation_strategy="longest_first",
return_overflowing_tokens=False,
return_special_tokens_mask=False,
infer_mode=False,
**kwargs):
self.max_seq_len = max_seq_len
self.pad_to_max_seq_len = max_seq_len
self.return_attention_mask = return_attention_mask
self.return_token_type_ids = return_token_type_ids
self.truncation_strategy = truncation_strategy
self.return_overflowing_tokens = return_overflowing_tokens
self.return_special_tokens_mask = return_special_tokens_mask
self.pad_token_label_id = paddle.nn.CrossEntropyLoss().ignore_index
self.infer_mode = infer_mode
def __call__(self, data):
needs_to_be_padded = self.pad_to_max_seq_len and len(data[
"input_ids"]) < self.max_seq_len
if needs_to_be_padded:
if 'tokenizer_params' in data:
tokenizer_params = data.pop('tokenizer_params')
else:
tokenizer_params = dict(
padding_side='right', pad_token_type_id=0, pad_token_id=1)
difference = self.max_seq_len - len(data["input_ids"])
if tokenizer_params['padding_side'] == 'right':
if self.return_attention_mask:
data["attention_mask"] = [1] * len(data[
"input_ids"]) + [0] * difference
if self.return_token_type_ids:
data["token_type_ids"] = (
data["token_type_ids"] +
[tokenizer_params['pad_token_type_id']] * difference)
if self.return_special_tokens_mask:
data["special_tokens_mask"] = data[
"special_tokens_mask"] + [1] * difference
data["input_ids"] = data["input_ids"] + [
tokenizer_params['pad_token_id']
] * difference
if not self.infer_mode:
data["labels"] = data[
"labels"] + [self.pad_token_label_id] * difference
data["bbox"] = data["bbox"] + [[0, 0, 0, 0]] * difference
elif tokenizer_params['padding_side'] == 'left':
if self.return_attention_mask:
data["attention_mask"] = [0] * difference + [
1
] * len(data["input_ids"])
if self.return_token_type_ids:
data["token_type_ids"] = (
[tokenizer_params['pad_token_type_id']] * difference +
data["token_type_ids"])
if self.return_special_tokens_mask:
data["special_tokens_mask"] = [
1
] * difference + data["special_tokens_mask"]
data["input_ids"] = [tokenizer_params['pad_token_id']
] * difference + data["input_ids"]
if not self.infer_mode:
data["labels"] = [self.pad_token_label_id
] * difference + data["labels"]
data["bbox"] = [[0, 0, 0, 0]] * difference + data["bbox"]
else:
if self.return_attention_mask:
data["attention_mask"] = [1] * len(data["input_ids"])
for key in data:
if key in [
'input_ids', 'labels', 'token_type_ids', 'bbox',
'attention_mask'
]:
if self.infer_mode:
if key != 'labels':
length = min(len(data[key]), self.max_seq_len)
data[key] = data[key][:length]
else:
continue
data[key] = np.array(data[key], dtype='int64')
return data
| StarcoderdataPython |
5074789 |
import sys
import json
import os.path
import datetime
from dagon import batch
from dagon import Workflow
from dagon.docker_task import DockerTask
# Check if this is the main
if __name__ == '__main__':
config={
"scratch_dir_base":"/tmp/test/",
"remove_dir":False
}
# Create the orchestration workflow
workflow=Workflow("DataFlow-Demo-Docker",config)
# The task a
taskA=DockerTask("A","echo I am A > f1.txt", image="ubuntu:18.04")
#taskA=DockerTask("Tokio","echo Soy Tokio > f1.txt", "ubuntu")
# The task b
taskB=DockerTask("Berlin","echo Soy Berlin > f2.txt; cat workflow://Tokio/f1.txt >> f2.txt",
"ubuntu")
# The task c
taskC=DockerTask("Nairobi","echo Soy Nairobi > f2.txt; cat workflow://Tokio/f1.txt >> f2.txt",
"ubuntu")
# The task d
taskD=DockerTask("Mosco","cat workflow://Berlin/f2.txt workflow://Nairobi/f2.txt > f3.txt", "ubuntu")
# add tasks to the workflow
workflow.add_task(taskA)
workflow.add_task(taskB)
workflow.add_task(taskC)
workflow.add_task(taskD)
workflow.make_dependencies()
jsonWorkflow=workflow.asJson()
with open('dataflow-demo-docker.json', 'w') as outfile:
stringWorkflow=json.dumps(jsonWorkflow,sort_keys=True, indent=2)
outfile.write(stringWorkflow)
# run the workflow
workflow.run()
| StarcoderdataPython |
6618877 | if __name__ == '__main__':
file = open('f2_l-d_kp_20_878.txt', 'r')
linesfile = file.readlines()
resulttoken0 = []
resulttoken1 = []
for x in linesfile:
resulttoken0.append(int(x.split()[0]))
resulttoken1.append(int(x.split()[1]))
file.close()
print('column 0 ( values ): ' + str(resulttoken0))
print('column 1 ( weights ): ' + str(resulttoken1))
| StarcoderdataPython |
6446059 | <filename>APP/TextSummarization/text_summarization.py
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
'''=================================================
@IDE :PyCharm
@Author :LuckyHuibo
@Date :2019/10/16 22:35
@Desc :
=================================================='''
if __name__ == "__main__":
pass | StarcoderdataPython |
9707957 | <reponame>petchat/senz.dev.dashboard
__author__ = 'heamon7'
SECRET_KEY = 'this is senz dashboard' | StarcoderdataPython |
1883880 | def horas_dias(h):
return h/24
x=float(input("Numero de Horas: "))
print(horas_dias(x))
| StarcoderdataPython |
11201498 | import time
from typing import Any, Callable, ClassVar, Dict, Optional, List
from dataclasses import dataclass, field
import pystan
from stanpyro.dppl import PyroModel
from stannumpyro.dppl import NumPyroModel
from scipy.stats import entropy, ks_2samp
import numpy as np
from jax import numpy as jnp
import jax.random
from torch import Tensor
def _ks(s1, s2):
s, p = ks_2samp(s1, s2)
return {"statistic": s, "pvalue": p}
def _distance(pyro_samples, stan_samples, dist):
if len(pyro_samples.shape) == 1:
return dist(stan_samples, pyro_samples)
if len(pyro_samples.shape) == 2:
res = {}
for i, (p, s) in enumerate(zip(pyro_samples.T, stan_samples.T)):
res[i] = dist(p, s)
return res
# Don't know what to compute here. Too many dimensions.
return {}
def _compare(res, ref, compare_params, dist):
divergence = {}
for k, a in res.items():
if not compare_params or k in compare_params:
assert k in ref, f"{k} is not in Stan results"
b = ref[k]
assert (
a.shape == b.shape
), f"Shape mismatch for {k}, Pyro {a.shape}, Stan {b.shape}"
divergence[k] = _distance(a, b, dist)
return divergence
def _convert_to_np(value):
if type(value) == Tensor:
return value.cpu().numpy()
elif isinstance(value, dict):
return {k: _convert_to_np(v) for k, v in value.items()}
elif isinstance(value, list):
return np.array([ _convert_to_np(v) for v in value])
else:
return value
@dataclass
class TimeIt:
name: str
timers: Dict[str, float]
def __enter__(self):
self.start = time.perf_counter()
def __exit__(self, *exc_info):
self.timers[self.name] = time.perf_counter() - self.start
@dataclass
class Config:
iterations: int = 100
warmups: int = 10
chains: int = 1
thin: int = 2
@dataclass
class MCMCTest:
name: str
model_file: str
pyro_file: Optional[str] = None
data: Dict[str, Any] = field(default_factory=dict)
compare_params: Optional[List[str]] = None
config: Config = Config()
with_pyro: bool = True
with_numpyro: bool = True
pyro_samples: Dict[str, Any] = field(init=False)
pyro__naive_samples: Dict[str, Any] = field(init=False)
numpyro_samples: Dict[str, Any] = field(init=False)
numpyro_naive_samples: Dict[str, Any] = field(init=False)
stan_samples: Dict[str, Any] = field(init=False)
timers: Dict[str, float] = field(init=False, default_factory=dict)
divergences: Dict[str, Any] = field(init=False, default_factory=dict)
def run_pyro(self):
assert self.with_pyro or self.with_numpyro, "Should run either Pyro or Numpyro"
if self.with_pyro:
with TimeIt("Pyro_Compilation", self.timers):
model = PyroModel(self.model_file, recompile=True, mode="mixed")
with TimeIt("Pyro_Runtime", self.timers):
mcmc = model.mcmc(
self.config.iterations,
warmups=self.config.warmups,
chains=self.config.chains,
thin=self.config.thin,
)
mcmc.run(self.data)
self.pyro_samples = mcmc.get_samples()
if self.with_numpyro:
with TimeIt("Numpyro_Compilation", self.timers):
model = NumPyroModel(self.model_file, recompile=True, mode="mixed")
with TimeIt("Numpyro_Runtime", self.timers):
mcmc = model.mcmc(
self.config.iterations,
warmups=self.config.warmups,
chains=self.config.chains,
thin=self.config.thin,
)
mcmc.run(jax.random.PRNGKey(0), self.data)
self.numpyro_samples = mcmc.get_samples()
def run_naive_pyro(self):
assert self.with_pyro or self.with_numpyro, "Should run either Pyro or Numpyro"
if self.with_pyro:
model = PyroModel(self.model_file, recompile=True, mode="comprehensive")
with TimeIt("Pyro_naive_Runtime", self.timers):
mcmc = model.mcmc(
self.config.iterations,
warmups=self.config.warmups,
chains=self.config.chains,
thin=self.config.thin,
)
mcmc.run(self.data)
self.pyro_naive_samples = mcmc.get_samples()
if self.with_numpyro:
model = NumPyroModel(self.model_file, recompile=True, mode="comprehensive")
with TimeIt("Numpyro_naive_Runtime", self.timers):
mcmc = model.mcmc(
self.config.iterations,
warmups=self.config.warmups,
chains=self.config.chains,
thin=self.config.thin,
)
mcmc.run(jax.random.PRNGKey(0), self.data)
self.numpyro_naive_samples = mcmc.get_samples()
def run_stan(self):
with TimeIt("Stan_Compilation", self.timers):
mcmc = pystan.StanModel(file=self.model_file)
with TimeIt("Stan_Runtime", self.timers):
fit = mcmc.sampling(
data=self.data,
iter=self.config.warmups + self.config.iterations,
chains=self.config.chains,
warmup=self.config.warmups,
thin=self.config.thin,
)
self.stan_samples = fit.extract(permuted=True)
def compare(self):
self.divergences = {
"pyro": {},
"numpyro": {},
"pyro_naive": {},
"numpyro_naive": {},
}
if self.with_pyro:
self.divergences["pyro"]["ks"] = _compare(
_convert_to_np(self.pyro_samples),
self.stan_samples,
self.compare_params,
_ks
)
self.divergences["pyro_naive"]["ks"] = _compare(
_convert_to_np(self.pyro_naive_samples),
self.stan_samples,
self.compare_params,
_ks
)
if self.with_numpyro:
self.divergences["numpyro"]["ks"] = _compare(
_convert_to_np(self.numpyro_samples),
self.stan_samples,
self.compare_params,
_ks
)
self.divergences["numpyro_naive"]["ks"] = _compare(
_convert_to_np(self.numpyro_naive_samples),
self.stan_samples,
self.compare_params,
_ks,
)
def run(self) -> Dict[str, Dict[str, Any]]:
self.run_pyro()
self.run_stan()
self.run_naive_pyro()
self.compare()
return {"divergences": self.divergences, "timers": self.timers}
| StarcoderdataPython |
9617419 | <gh_stars>0
import cv2
vidcap = cv2.VideoCapture('livevideo.mp4')
success,image = vidcap.read()
count = 0
success = True
while success:
success,image = vidcap.read()
print('Read a new frame: ', success)
if count % 50 == 0:
cv2.imwrite("imgs/frame%d.jpg" % count, image)
pass
# save frame as JPEG file
count += 1 | StarcoderdataPython |
5029682 | import copy
from typing import Callable, Dict, List, Optional, Tuple
from . import asciiart
class Action:
"""
Generic action class for updating the game state.
"""
def __init__(self, fn: Callable[["QuarantineStatus"], Optional[str]]):
self._fn: Callable[["QuarantineStatus"], Optional[str]] = fn
def apply(self, state: "QuarantineStatus"):
return self._fn(state)
class BasicAction(Action):
"""Class for an action that can be done by the user."""
def __init__(self, delta_energy: int, delta_fulfillment: int, message: str):
def _basic_apply(state: "QuarantineStatus") -> str:
state.energy += delta_energy
state.fulfillment += delta_fulfillment
return message
super().__init__(_basic_apply)
self.delta_energy = delta_energy
self.delta_fulfillment = delta_fulfillment
# Action names
ACTION_GET_SLOSHED = "drink_beer"
ACTION_ORDER_TAKEOUT = "eat_delivery"
ACTION_COOK_FOOD = "eat_homecooked"
ACTION_INFINITE_REDDIT = "scroll_reddit"
ACTION_REFRESH_INBOX = "check_email"
ACTION_ONLINE_SHOPPING = "buy_online"
ACTION_NETFLIX_AND_CHILL_W_YOURSELF = "binge_netflix"
ACTION_BRO_SPLIT_IT_UP = "workout"
ACTION_VIDEO_CHAT_WITH_THE_FAM = "zoom_call"
ACTION_STARE_OUT_WINDOW = "people_watch"
ACTION_COFFEEDENCE = "drink_caffeine"
ACTION_DANCE_LIKE_NO_ONES_WATCHING = "listen_to_radio"
# ASCII art associated with each action
ACTIONS_ASCII_ART: Dict[str, str] = {
ACTION_GET_SLOSHED: asciiart.ACTION_GET_SLOSHED_SCENE,
ACTION_ORDER_TAKEOUT: asciiart.ACTION_ORDER_TAKEOUT_SCENE,
ACTION_COOK_FOOD: asciiart.ACTION_COOK_FOOD_SCENE,
ACTION_INFINITE_REDDIT: asciiart.ACTION_INFINITE_REDDIT_SCENE,
ACTION_REFRESH_INBOX: asciiart.ACTION_REFRESH_INBOX_SCENE,
ACTION_ONLINE_SHOPPING: asciiart.ACTION_ONLINE_SHOPPING_SCENE,
ACTION_NETFLIX_AND_CHILL_W_YOURSELF: asciiart.ACTION_NETFLIX_AND_CHILL_W_YOURSELF_SCENE,
ACTION_BRO_SPLIT_IT_UP: asciiart.ACTION_BRO_SPLIT_IT_UP_SCENE,
ACTION_VIDEO_CHAT_WITH_THE_FAM: asciiart.ACTION_VIDEO_CHAT_WITH_THE_FAM_SCENE,
ACTION_STARE_OUT_WINDOW: asciiart.ACTION_STARE_OUT_WINDOW_SCENE,
ACTION_COFFEEDENCE: asciiart.ACTION_COFFEEDENCE_SCENE,
ACTION_DANCE_LIKE_NO_ONES_WATCHING: asciiart.ACTION_DANCE_LIKE_NO_ONES_WATCHING_SCENE,
}
# Action properties
ACTIONS: Dict[str, Action] = {
ACTION_GET_SLOSHED: BasicAction(
-10, +10, "You feel refreshed, and a little bit light-headed."
), # TODO: drunk_function?
# "move_room": BasicAction(
# -5, 0, "You're here. Now what?"
# ), # TODO: decrease fulfillment multiplicatively
ACTION_ORDER_TAKEOUT: BasicAction(
+5,
+5,
"The delivery charge brought the price up a surprising amount. Still... you deserved it.",
), # TODO: decrease energy and fulfillment multiplicatively
ACTION_COOK_FOOD: BasicAction(
+5, +10, "You wonder why you ever order delivery until you look at the clock."
), # TODO: decrease energy from eating too much, increase fulfillment multiplicatively
ACTION_INFINITE_REDDIT: BasicAction(
-5,
-5,
"You're getting really good at recognizing reposts. Those cat gifs are cute, though.",
), # TODO: decrease energy, decrease fulfillment multiplicatively
ACTION_REFRESH_INBOX: BasicAction(
0,
0,
'Another corporate email about "troubling and uncertain times" and a 20% off clearance sale.',
), # TODO: decrease fulfillment multiplicatively
ACTION_ONLINE_SHOPPING: BasicAction(
+10,
+20,
"How are you spending the same amount and you can't even leave your apartment?",
), # TODO: big decrease in energy and fulfillment
ACTION_NETFLIX_AND_CHILL_W_YOURSELF: BasicAction(
-10,
+20,
"Another episode down of a show you'll watch most of and then forget.\n "
"Not the worst use of time.",
), # TODO: big decrease in fulfillment
# "cook_food": BasicAction(-20, +20, "TODO"), # TODO: big increase in fulfillment
ACTION_BRO_SPLIT_IT_UP: BasicAction(
-20, +5, "You're tired, but in a good way."
), # TODO: Fibonacci increase in fulfillment
# "nap": BasicAction(
# +12, -10, "What a waste of time. Refreshing, though."
# ), # TODO: drop fulfillment to zero if a portion of day is spent napping
ACTION_VIDEO_CHAT_WITH_THE_FAM: BasicAction(
-10, 0, "Sorry, could you repeat that? The call froze."
),
# TODO: decrease fulfillment multiplicatively
ACTION_STARE_OUT_WINDOW: BasicAction(
0, +15, "A few people drift by, maybe 30% slower than they'd usually walk."
),
ACTION_COFFEEDENCE: BasicAction(
+20,
0,
"The buzzing at the base of your skull is louder. \n"
"Maybe you should get it looked at?",
),
# TODO: drink too much, can't sleep/nap for 3 actions
ACTION_DANCE_LIKE_NO_ONES_WATCHING: BasicAction(
0,
+15,
"For better or for worse, you're now more informed about the \n"
"state of the world. Some numbers are up; others are down.",
),
}
# TIME OF DAY
# Dictionary of day-portion tuples
TIME_OF_DAY = {
"morning": ("dawn", "mid-morning", "late morning"),
"afternoon": ("noon", "mid-afternoon", "late afternoon"),
"night": ("early evening", "dusk", "late evening", "midnight"),
}
def time_of_day_generator():
for day in range(0, 15):
for day_portion in TIME_OF_DAY:
for time in TIME_OF_DAY[day_portion]:
yield day + 1, time
class QuarantineStatus(object):
"""
Object for tracking user state. Possible rooms are "bedroom",
"living room", and "kitchen".
"""
def __init__(
self,
energy: int,
fulfillment: int,
action_history: List[Tuple["QuarantineStatus", Action]],
):
self.energy: int = energy
self.fulfillment: int = fulfillment
self.current_room = "bedroom"
self.time_gen = time_of_day_generator()
self.day_count, self.current_time = next(self.time_gen)
self._action_history: List[Tuple[QuarantineStatus, Action]] = action_history
@property
def available_actions(self) -> List[str]:
"""
Returns a list of available actions by copying the state and testing if they return None.
TODO: Separate "invalid" from "will just say something dumb and not do anything"
"""
# TODO: Performance
avail = []
for k, a in ACTIONS.items():
state_copy = copy.deepcopy(self)
if a.apply(state_copy) is not None:
avail.append(k)
return avail
# When applying an action, get the Action object from the global ACTIONS
# dict: `state.apply_action(ACTIONS["drink_beer"])`
def apply_action(self, action_name: str) -> str:
action: Action = ACTIONS[action_name]
result = action.apply(self)
if result is not None:
# TODO: handle exception when no more iteration can be done
self.day_count, self.current_time = next(self.time_gen)
return result
return "Sorry... that's not something you can do now."
| StarcoderdataPython |
169014 | <filename>lcls_live/bmad/tools.py
from lcls_live.klystron import Klystron, existing_LCLS_klystrons, unusable_faults
from lcls_live import Collimator
from math import isnan, sqrt
import pandas
def bmad_klystron_lines(klystron):
'''
Form Bmad lines to set klystron overlays.
'''
k = klystron
kname = 'K'+str(k.sector)+'_'+str(k.station)
bmad_name = 'O_'+kname
# Data
accelerating = k.is_accelerating()
#usable = not any(x in k.faults for x in unusable_faults)
usable=k.is_usable()
has_faults = k.has_faults()
phase=k.phase
enld=k.enld
if phase == None:
good_phase = False
else:
good_phase = not isnan(phase)
if not good_phase or not usable:
phase=0
lines = ['!---------------', '! '+kname]
if not accelerating:
lines.append('! is not accelerating')
if not usable:
lines.append('! is NOT usable')
if has_faults:
lines.append('! has faults:')
for f in k.faults:
lines.append('! '+f)
if accelerating and usable:
lines.append(bmad_name+'[ENLD_MeV] = '+str(enld))
lines.append(bmad_name+'[phase_deg] = '+str(phase))
else:
lines.append(bmad_name+'[ENLD_MeV] = 0')
return lines
def write_bmad_klystron_settings(klystrons, filePath='klystron_settings.bmad', verbose=False):
#timestamp = '! Acquired: '+ str(datetime.datetime.now()) + '\n'
data =map(bmad_klystron_lines, klystrons)
with open(filePath, 'w') as f:
#f.write(timestamp)
for l in data:
for x in l:
f.write(x+'\n')
if verbose:
print('Written:', filePath)
def bmad_linac_phasing_lines(epics):
"""
Linac phasing
Note that these overlays override individual klystron phases.
"""
lines = [
'! Linac overall phasing',
'O_L1[phase_deg] = 0 ! K21_1 sets this directly. This is a delta on top of that.',
'O_L2[phase_deg] = '+str(epics.caget('SIOC:SYS0:ML00:CALC204')),
'O_L3[phase_deg] = '+str(epics.caget('SIOC:SYS0:ML00:AO499'))
]
return lines
def write_bmad_linac_phasing_lines(filePath='linac_settings.bmad', epics=None, verbose=False):
"""
Writes linac phasing lines to a Bmad file. Requires epics (or proxy object).
"""
lines = bmad_linac_phasing_lines(epics)
with open(filePath, 'w') as f:
for l in lines:
f.write(l+'\n')
if verbose:
print('Written:', filePath)
def tao_BC_and_LEM_lines(epics):
"""
Linac energy set points and bunch compressor offsets
"""
bc1_e0=epics.caget('SIOC:SYS0:ML00:AO483')*1e6
bc2_e0=epics.caget('SIOC:SYS0:ML00:AO489')*1e9
l3_e0 =epics.caget('SIOC:SYS0:ML00:AO500')*1e9
# Charge in LTU
q_after_horn_cutting = epics.caget('SIOC:SYS0:ML00:CALC252')*1e-12 # pC -> C
bc1_offset=epics.caget('BMLN:LI21:235:MOTR')*1e-3
bc2_offset=epics.caget('BMLN:LI24:805:MOTR')*1e-3
bc1_current=epics.caget('SIOC:SYS0:ML00:AO485')
bc2_current=epics.caget('SIOC:SYS0:ML00:AO195')
# Catch bad settings
if bc1_current==0:
print('Warning: BC1 current is zero!')
bc1_sigma_z = 0
else:
# Assumes parabolic distribution
bc1_sigma_z = q_after_horn_cutting*299792458 / sqrt(10) / bc1_current
if bc2_current==0:
print('Warning: BC1 current is zero!')
bc2_sigma_z = 0
else:
# Assumes Gaussian distribution
bc2_sigma_z = q_after_horn_cutting*299792458 / sqrt(12) / bc2_current
lines = []
lines.append('set dat BC1.energy[1]|meas = '+str(bc1_e0))
lines.append('set dat BC2.energy[1]|meas = '+str(bc2_e0))
lines.append('set dat L3.energy[2]|meas = '+str(l3_e0))
lines.append('set dat BC1.offset[1]|meas = '+str(bc1_offset))
lines.append('set dat BC2.offset[1]|meas = '+str(bc2_offset))
lines.append(f'! Charge after horn cutting: {q_after_horn_cutting*1e12:10.4} pC')
lines.append(f'! For BC1 current {bc1_current} A')
lines.append('set dat BC1.beam[1]|meas = '+str( bc1_sigma_z))
lines.append(f'! For BC2 current {bc2_current} A')
lines.append('set dat BC2.beam[1]|meas = '+str( bc2_sigma_z))
return lines
def write_tao_BC_and_LEM_lines(filePath='LEM_settings.tao', epics=None, verbose=False):
"""
Writes tao LEM lines to a .tao file. Requires epics (or proxy object).
"""
lines = tao_BC_and_LEM_lines(epics)
with open(filePath, 'w') as f:
for l in lines:
f.write(l+'\n')
if verbose:
print('Written:', filePath)
return lines
def bmad_from_csv(csvfile, epics, outfile=None):
"""
Create Bmad-style settings from a CSV mapping file, and an epics interface.
Example:
bmad_from_csv('collimator_mapping.csv', epics, 'test.bmad')
"""
df = pandas.read_csv(csvfile)
pvlist = list(df['device_name'] +':'+ df['attribute'].str.strip())
# Get values
df['value'] = epics.caget_many(pvlist)
# Form lines
lines = df['bmad_ele_name']+'[' + df['bmad_attribute'] + '] = '+ (df['bmad_factor'].astype(str)+'*'+df['value'].astype(str))
if outfile:
with open(outfile, 'w') as f:
for line in lines:
f.write(line+'\n')
print('Written:', outfile)
return list(lines)
INFO_PVS = {
'GDET:FEE1:241:ENRC': 'FEL pulse energy from gas detector (mJ)',
'SIOC:SYS0:ML00:AO020': 'UV Pulse Length RMS (ps)',
'LASR:IN20:475:PWR1H': 'Laser heater power (uJ)',
'SIOC:SYS0:ML00:AO470':'Bunch charge off the cathode',
'SIOC:SYS0:ML00:CALC252': 'Bunch charge in the LTU',
'SIOC:SYS0:ML00:AO485': 'BC1 mean current (A)',
'SIOC:SYS0:ML00:AO195': 'BC2 peak current (A)',
'BLEN:LI21:265:AIMAX1H': 'BC1 bunch length monitor (A)',
'BLEN:LI24:886:BIMAX1H': 'BC2 bunch length monitor (A)',
'SIOC:SYS0:ML00:AO513':'DL1 Energy',
'SIOC:SYS0:ML00:AO483':'BC1 Energy',
'SIOC:SYS0:ML00:AO489':'BC2 Energy',
'SIOC:SYS0:ML00:AO500':'DL2 Energy',
'ACCL:LI21:1:L1S_S_PV': 'L1 Phase',
'SIOC:SYS0:ML00:CALC204':'L2 Phase',
'SIOC:SYS0:ML00:AO499':'L3 Phase',
'ACCL:IN20:350:FUDGE': 'L0 energy fudge factor',
'ACCL:LI21:1:FUDGE': 'L1 energy fudge factor',
'ACCL:LI22:1:FUDGE': 'L2 energy fudge factor',
'ACCL:LI25:1:FUDGE': 'L3 energy fudge factor',
'BMLN:LI21:235:MOTR': 'BC1 offset (mm)',
'BMLN:LI24:805:MOTR': 'BC2 offset (mm)',
'IOC:IN20:BP01:QANN': 'Expected charge after gun (nC)',
'BPMS:SYS0:2:QANN':'Expected Charge after BC charge cutting (nC)'}
| StarcoderdataPython |
4955212 | <gh_stars>0
#%%
import argparse
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
import os
import numpy as np
import json
from torch import alpha_dropout
import cv2
import torch
import torchvision.transforms as transforms
###################DTDmax#######################
# df1 = pd.read_csv('DTDmax.csv', names=['value'])
# plt.hist(df1,bins=800)
# plt.title("DTD")
# plt.xlim([0,7])
# plt.ylim([0,40])
# plt.xlabel("Max value")
# plt.ylabel("# of data")
# # plt.show()
# # print(df.head(5))
# # print("max:" ,df.max())
# # print("min:",df.min())
# print(df1['value'].dtype)
# print(df1['value'].describe(include='all'))
################### Imagenetomax #################
# df2 = pd.read_csv('NEWimagenetomax.csv', names=['dir','value'], sep=" ")
# print("ImageNet-O")
# print(df2['value'].dtype)
# print(df2['value'].describe(include='all'))
# print(df2.head())
# plt.hist(df2['value'],bins=1000,alpha=0.5)
# maxindex=df2["value"].idxmax()
# print(df2.iloc[maxindex+1])
# df2=df2.sort_values(by=['value'], axis=0,ascending=False)
# print(df2.head())
# print("tail:",df2.tail())
# print(df2)
# 2000rows * 2 columns
#first5 = [i["dir"] for i in df2][:5]
# top5= df2[:5]
# print(top5)
# print("=========")
# top5= top5["dir"]
# top5 = top5.tolist()
#print(top5)
#print(type(top5))
#print("first value : ",top5[:1])
# last5=df2[-5:]
# last5=last5["dir"]
# print(last5)
################ imagenet.json ######################
# with open('imagenet.json','r') as f:
# json_data=json.load(f)
# imagenetmax =[]
# for i in range(len(json_data['max'])):
# imagenetmax.append(json_data['max'][i])
# df3 = pd.DataFrame({"max":imagenetmax})
# print("ImageNet")
# # print(df3['max'].dtype)
# print(df3['max'].describe(include='all'))
# plt.hist(df3,bins=5000,alpha=0.5)
######################### plot and show ###############################
# plt.xlim([0,2.5])
# # plt.ylim([0,40])
# plt.xlabel("Max value")
# plt.title("Grad CAM")
# plt.show()
#######################################################################
curImgPath = '/home/juyoung/GradCAM/pytorch-grad-cam/imageneto_result_image/original19.863325.jpg'
rgb_img = cv2.imread(curImgPath, 1)
rgb_img = cv2.cvtColor(rgb_img,cv2.COLOR_BGR2RGB)
rgb_img = np.float32(rgb_img) / 255
transform = transforms.ToTensor()
rgb_img = transform(rgb_img)
print(rgb_img)
# rgb_img = np.float32(rgb_img) / 255
# print(rgb_img.shape)
# rgb_img - torch.from_numpy(rgb_img).float()
# print(type(rgb_img))
#%%
| StarcoderdataPython |
11319969 | def validateStackSequences(pushed: list[int], popped: list[int]) -> bool:
stack = []
popped = popped[::-1]
for p in pushed:
stack.append(p)
while stack and popped and stack[-1] == popped[-1]:
stack.pop()
popped.pop()
return len(popped) == 0 | StarcoderdataPython |
9771328 | import pytest
from symspellpy import Verbosity
ENTRIES = ["baked", "ax", "lake", "", "slaked"]
class TestSymSpellPyEdgeCases:
@pytest.mark.parametrize("symspell_long_entry", [ENTRIES], indirect=True)
def test_empty_string_has_all_short_deletes(self, symspell_long_entry):
sym_spell, entries = symspell_long_entry
assert len(entries[:-1]) == len(sym_spell.deletes[""])
assert all(entry in sym_spell.deletes[""] for entry in entries[:-1])
assert "abc" not in sym_spell.deletes[""]
def test_split_correction_part_of_single_term_correction(self, symspell_default):
symspell_default.create_dictionary_entry("where", 2)
symspell_default.create_dictionary_entry("is", 2)
symspell_default.create_dictionary_entry("whereas", 2)
symspell_default._bigrams["where is"] = 10
suggestions = symspell_default.lookup_compound("whereiz", 2)
assert "where is" == suggestions[0].term
assert 2 == suggestions[0].distance
assert 10 == suggestions[0].count
@pytest.mark.parametrize("symspell_long_entry", [["bank", "bink"]], indirect=True)
def test_no_common_char_with_phrase(self, symspell_long_entry):
sym_spell, _ = symspell_long_entry
results = sym_spell.lookup("knab", Verbosity.ALL, 4)
assert 2 == len(results)
assert "bank" == results[0].term
assert 3 == results[0].distance
assert "bink" == results[1].term
assert 4 == results[1].distance
| StarcoderdataPython |
3254729 | <gh_stars>1-10
import math
with open("input.txt") as fp:
matrix = [i.strip() for i in fp.readlines()]
height = len(matrix)
width = len(matrix[0])
row = 0
col = 0
def findTrees(matrix, height, width, row, col, rowJump, colJump):
trees = 0
while row < height and col <= width:
if matrix[row][col] == '#':
trees += 1
col = (col + colJump) % width
row += rowJump
return trees
ans = 1
for (i, j) in [(1, 1), (3, 1), (5, 1), (7, 1), (1, 2)]:
ans *= findTrees(matrix, height, width, row, col, j, i)
print(ans)
| StarcoderdataPython |
11298044 | <filename>contrib/Research/nlp/bert/BERT_tf_Soapeggpain/script/e2e_func_node/tools/performance/performanceAnalysis/drawexcel.py
# -*- coding: UTF-8 -*-
import os
import xlsxwriter
import numpy as np
"""
draw excel picture
"""
EXCEL_FILE = None
EXCEL_SHEET = None
DEFAULT_COL_LEN = 12
FIRST_ROW_NUM = 0
DATA_START_ROW_NUM = 1
FIRST_STEP_ROW_NUM = 1
SECOND_STEP_ROW_NUM = 2
SERIAL_LIST_COL_NUM = 0
INTER_LIST_COL_NUM = 1
BP_FP_LIST_COL_NUM = 2
BPEND_TO_ITER_COL_NUM = 3
FPSTART_TO_AR1_COL_NUM = 4
RA1_LIST_COL_NUM = 5
AR1END_TO_BPEND_COL_NUM = 6
BPEND_TO_AR2START_COL_NUM = 7
RA2_LIST_COL_NUM = 8
AR2END_TO_ITEREND_COL_NUM = 9
ITER_LIST_COL_NUM = 10
MEAN_ITER_COL_NUM = 12
MEAN_INTER_COL_NUM = 13
MEAN_BP_FP_COL_NUM = 14
MEAN_RA1_COL_NUM = 15
MEAN_RA2_COL_NUM = 16
LOSS_LIST_COL_NUM = 1
MIOU_LIST_COL_NUM = 2
class DrawExcel:
def __init__(self,
output_file_name=None,
iteration_list=None,
interval_list=None,
bp_fp_list=None,
bpend_to_iter_list=None,
allreduce1_start_list=None,
reduceadd1_list=None,
reduce1end_to_bpend_list=None,
bpend_to_reduce2start_list=None,
reduceadd2_list=None,
reduce2_to_iter_list=None):
"""
:param output_file_name: output excel file
:param iteration_list: whole iteration cost list
:param interval_list: whole interval cost list
:param bp_fp_list: whole bp_fp cost list
:param reduceadd1_list: whole reduceadd1 cost list
:param reduceadd2_list: whole reduceadd2 cost list
"""
self.excel_file_name = output_file_name
self.iteration_list = iteration_list
self.interval_list = interval_list
self.bp_fp_list = bp_fp_list
self.bpend_to_iter_list = bpend_to_iter_list
self.allreduce1_start_list = allreduce1_start_list
self.reduce1end_to_bpend_list = reduce1end_to_bpend_list
self.bpend_to_reduce2start_list = bpend_to_reduce2start_list
self.reduce2_to_iter_list = reduce2_to_iter_list
self.reduceadd1_list = reduceadd1_list
self.reduceadd2_list = reduceadd2_list
self.clear_output_file()
self.draw_excel_file()
def clear_output_file(self):
"""
clear old output files
:return:
"""
if os.path.exists(self.excel_file_name):
os.remove(self.excel_file_name)
def draw_line(self):
"""
draw excel chart
:return:
"""
global EXCEL_FILE
global EXCEL_SHEET
data_col_list = ('B', 'C', 'D', 'E', 'F','G')
mean_col_list = ('L', 'M', 'N', 'O', 'P')
name_col_list = ( 'Interval', 'BP FP', 'BPend_to_Iter','Reduceadd1', 'Reduceadd2','Iteration',)
data_color_list = ['red', 'blue', 'green', 'orange', 'magenta']
mean_color_list = ['brown', 'black', 'pink', 'purple', 'navy']
for i in range(len(data_col_list)):
chart_line = EXCEL_FILE.add_chart({'type': 'line'})
chart_line.add_series({
'name': '=DASHBOARD!$' + data_col_list[i] + '$1',
'categories': '=DASHBOARD!$A$2:$A$' + str(len(self.iteration_list)),
'values': '=DASHBOARD!$' + data_col_list[i] + '$2:$'
+ data_col_list[i] + '$' + str(len(self.iteration_list)),
'line': {'color': data_color_list[i], 'width': 1}
})
chart_mean = EXCEL_FILE.add_chart({'type': 'line'})
chart_mean.add_series({
'name': 'Mean Cost',
'categories': '=DASHBOARD!$A$2:$A$' + str(len(self.iteration_list)),
'values': '=DASHBOARD!$' + mean_col_list[i] + '$2:$'
+ mean_col_list[i] + '$' + str(len(self.iteration_list)),
'line': {'color': mean_color_list[i], 'width': 1.5}
})
chart_line.combine(chart_mean)
chart_line.show_hidden_data()
chart_line.set_title({'name': 'Calculate ' + name_col_list[i] + ' Cost Time'})
chart_line.set_x_axis({'name': "Serial Number"})
chart_line.set_y_axis({'name': 'Time (ms)'})
chart_line.set_size({'width': len(self.iteration_list), 'height': 300})
EXCEL_SHEET.insert_chart('G1', chart_line, {'x_offset': 25, 'y_offset': i * 300})
def draw_excel_file(self):
"""
generate excel file
:return:
"""
global EXCEL_FILE
global EXCEL_SHEET
EXCEL_FILE = xlsxwriter.Workbook(self.excel_file_name)
EXCEL_SHEET = EXCEL_FILE.add_worksheet('DASHBOARD')
first_row = ['index',
'liter_end_to_next_FP_start',
'BP_and_FP',
'BP_end_to_iter_end',
'FP_start_to_Reduce1_start',
'AllReduce1_total',
'Reduce1_end_to_BP_end',
'BP_end_to_reduce2_start',
'AllReduce2_total',
'AllReduce2_end_to_iter_end',
'total_time']
first_row_style = EXCEL_FILE.add_format({
'font_name': 'Times New Roman',
'bold': True,
'align': 'center',
'valign': 'vcenter',
'bg_color': '#92D050'
})
other_row_style = EXCEL_FILE.add_format({
'font_name': 'Times New Roman',
'bold': False
})
error_row_style = EXCEL_FILE.add_format({
'font_name': 'Times New Roman',
'font_color': 'red',
'bold': True
})
# write first row
for i in range(len(first_row)):
EXCEL_SHEET.write(FIRST_ROW_NUM, i, first_row[i], first_row_style)
if len(first_row[i]) > DEFAULT_COL_LEN:
EXCEL_SHEET.set_column(i, i, len(first_row[i]))
else:
EXCEL_SHEET.set_column(i, i, DEFAULT_COL_LEN)
# write every column list
if self.iteration_list is not None:
iteration_time_array = np.array(self.iteration_list)
mean_iteration_time = np.mean(iteration_time_array)
for i in range(len(self.iteration_list)):
# write serial number
EXCEL_SHEET.write(i + DATA_START_ROW_NUM, SERIAL_LIST_COL_NUM, i + DATA_START_ROW_NUM, other_row_style)
if self.iteration_list[i] > mean_iteration_time:
EXCEL_SHEET.write(i + SECOND_STEP_ROW_NUM,
ITER_LIST_COL_NUM,
self.iteration_list[i],
error_row_style)
else:
EXCEL_SHEET.write(i + SECOND_STEP_ROW_NUM,
ITER_LIST_COL_NUM,
self.iteration_list[i],
other_row_style)
EXCEL_SHEET.write(i + SECOND_STEP_ROW_NUM, MEAN_ITER_COL_NUM, mean_iteration_time, other_row_style)
if self.interval_list is not None:
interval_time_array = np.array(self.interval_list)
mean_interval_time = np.mean(interval_time_array)
for i in range(len(self.interval_list)):
EXCEL_SHEET.write(i + DATA_START_ROW_NUM, SERIAL_LIST_COL_NUM, i + DATA_START_ROW_NUM, other_row_style)
if self.interval_list[i] > mean_interval_time:
EXCEL_SHEET.write(i + SECOND_STEP_ROW_NUM,
INTER_LIST_COL_NUM,
self.interval_list[i],
error_row_style)
else:
EXCEL_SHEET.write(i + SECOND_STEP_ROW_NUM,
INTER_LIST_COL_NUM,
self.interval_list[i],
other_row_style)
EXCEL_SHEET.write(i + SECOND_STEP_ROW_NUM, MEAN_INTER_COL_NUM, mean_interval_time, other_row_style)
if self.bp_fp_list is not None:
bp_fp_time_array = np.array(self.bp_fp_list)
mean_bp_fp_time = np.mean(bp_fp_time_array)
for i in range(len(self.bp_fp_list)):
EXCEL_SHEET.write(i + DATA_START_ROW_NUM, SERIAL_LIST_COL_NUM, i + DATA_START_ROW_NUM, other_row_style)
if self.bp_fp_list[i] > mean_bp_fp_time:
EXCEL_SHEET.write(i + FIRST_STEP_ROW_NUM,
BP_FP_LIST_COL_NUM,
self.bp_fp_list[i],
error_row_style)
else:
EXCEL_SHEET.write(i + FIRST_STEP_ROW_NUM,
BP_FP_LIST_COL_NUM,
self.bp_fp_list[i],
other_row_style)
EXCEL_SHEET.write(i + FIRST_STEP_ROW_NUM, MEAN_BP_FP_COL_NUM, mean_bp_fp_time, other_row_style)
if self.bpend_to_iter_list is not None:
for i in range(len(self.bpend_to_iter_list)):
EXCEL_SHEET.write(i + DATA_START_ROW_NUM, SERIAL_LIST_COL_NUM, i + DATA_START_ROW_NUM, other_row_style)
EXCEL_SHEET.write(i + FIRST_STEP_ROW_NUM,
BPEND_TO_ITER_COL_NUM,
self.bpend_to_iter_list[i],
other_row_style)
if self.allreduce1_start_list is not None:
for i in range(len(self.allreduce1_start_list)):
EXCEL_SHEET.write(i + DATA_START_ROW_NUM, SERIAL_LIST_COL_NUM, i + DATA_START_ROW_NUM, other_row_style)
EXCEL_SHEET.write(i + FIRST_STEP_ROW_NUM,
FPSTART_TO_AR1_COL_NUM,
self.allreduce1_start_list[i],
other_row_style)
if self.reduceadd1_list is not None:
reduceadd1_time_array = np.array(self.reduceadd1_list)
mean_reduceadd1_time = np.mean(reduceadd1_time_array)
for i in range(len(self.reduceadd1_list)):
EXCEL_SHEET.write(i + DATA_START_ROW_NUM, SERIAL_LIST_COL_NUM, i + DATA_START_ROW_NUM, other_row_style)
if self.reduceadd1_list[i] > mean_reduceadd1_time:
EXCEL_SHEET.write(i + FIRST_STEP_ROW_NUM,
RA1_LIST_COL_NUM,
self.reduceadd1_list[i],
error_row_style)
else:
EXCEL_SHEET.write(i + FIRST_STEP_ROW_NUM,
RA1_LIST_COL_NUM,
self.reduceadd1_list[i],
other_row_style)
EXCEL_SHEET.write(i + FIRST_STEP_ROW_NUM, MEAN_RA1_COL_NUM, mean_reduceadd1_time, other_row_style)
if self.reduce1end_to_bpend_list is not None:
for i in range(len(self.reduce1end_to_bpend_list)):
EXCEL_SHEET.write(i + DATA_START_ROW_NUM, SERIAL_LIST_COL_NUM, i + DATA_START_ROW_NUM, other_row_style)
EXCEL_SHEET.write(i + FIRST_STEP_ROW_NUM,
AR1END_TO_BPEND_COL_NUM,
self.reduce1end_to_bpend_list[i],
other_row_style)
if self.bpend_to_reduce2start_list is not None:
for i in range(len(self.bpend_to_reduce2start_list)):
EXCEL_SHEET.write(i + DATA_START_ROW_NUM, SERIAL_LIST_COL_NUM, i + DATA_START_ROW_NUM, other_row_style)
EXCEL_SHEET.write(i + FIRST_STEP_ROW_NUM,
BPEND_TO_AR2START_COL_NUM,
self.bpend_to_reduce2start_list[i],
other_row_style)
if self.reduceadd2_list is not None:
reduceadd2_time_array = np.array(self.reduceadd2_list)
mean_reduceadd2_time = np.mean(reduceadd2_time_array)
for i in range(len(self.reduceadd2_list)):
EXCEL_SHEET.write(i + DATA_START_ROW_NUM, SERIAL_LIST_COL_NUM, i + DATA_START_ROW_NUM, other_row_style)
if self.reduceadd2_list[i] > mean_reduceadd2_time:
EXCEL_SHEET.write(i + FIRST_STEP_ROW_NUM,
RA2_LIST_COL_NUM,
self.reduceadd2_list[i],
error_row_style)
else:
EXCEL_SHEET.write(i + FIRST_STEP_ROW_NUM,
RA2_LIST_COL_NUM,
self.reduceadd2_list[i],
other_row_style)
EXCEL_SHEET.write(i + FIRST_STEP_ROW_NUM, MEAN_RA2_COL_NUM, mean_reduceadd2_time, other_row_style)
if self.reduce2_to_iter_list is not None:
for i in range(len(self.reduce2_to_iter_list)):
EXCEL_SHEET.write(i + DATA_START_ROW_NUM, SERIAL_LIST_COL_NUM, i + DATA_START_ROW_NUM, other_row_style)
EXCEL_SHEET.write(i + FIRST_STEP_ROW_NUM,
AR2END_TO_ITEREND_COL_NUM,
self.reduce2_to_iter_list[i],
other_row_style)
#self.draw_line()
EXCEL_SHEET.set_column(MEAN_ITER_COL_NUM, MEAN_RA2_COL_NUM, None, None, {'hidden': True})
EXCEL_FILE.close()
@staticmethod
def draw_loss_file(input_data1=None, input_data2=None, output_path=None):
file_name = xlsxwriter.Workbook(output_path)
file_sheet = file_name.add_worksheet('LossGraph')
first_row = ['Serial Number',
'Loss List',
'mIOU List']
first_row_style = file_name.add_format({
'font_name': 'Times New Roman',
'bold': True,
'align': 'center',
'valign': 'vcenter',
'bg_color': '#92D050'
})
other_row_style = file_name.add_format({
'font_name': 'Times New Roman',
'bold': False
})
for i in range(len(first_row)):
file_sheet.write(FIRST_ROW_NUM, i, first_row[i], first_row_style)
if len(first_row[i]) > DEFAULT_COL_LEN:
file_sheet.set_column(i, i, len(first_row[i]))
else:
file_sheet.set_column(i, i, DEFAULT_COL_LEN)
for i in range(len(input_data1)):
file_sheet.write(i + DATA_START_ROW_NUM, SERIAL_LIST_COL_NUM, i + DATA_START_ROW_NUM, other_row_style)
file_sheet.write(i + FIRST_STEP_ROW_NUM, LOSS_LIST_COL_NUM, float(input_data1[i]), other_row_style)
file_sheet.write(i + FIRST_STEP_ROW_NUM, MIOU_LIST_COL_NUM, float(input_data2[i]), other_row_style)
chart_line1 = file_name.add_chart({'type': 'line'})
chart_line1.add_series({
'name': '=LossGraph!$B$1',
'categories': '=LossGraph!$A$2:$A$' + str(len(input_data1)),
'values': '=LossGraph!$B$2:$B$' + str(len(input_data1)),
'line': {'color': 'blue', 'width': 1.5}
})
chart_line2 = file_name.add_chart({'type': 'line'})
chart_line2.add_series({
'name': '=LossGraph!$C$1',
'categories': '=LossGraph!$A$2:$A$' + str(len(input_data2)+1),
'values': '=LossGraph!$C$2:$C$' + str(len(input_data2)+1),
'line': {'color': 'orange', 'width': 1.5}
})
chart_line1.combine(chart_line2)
chart_line1.set_title({'name': 'Loss and mIOU Trend'})
chart_line1.set_x_axis({'name': "Serial Number"})
chart_line1.set_y_axis({'name': 'Value'})
chart_line1.set_size({'width': 1000, 'height': 600})
file_sheet.insert_chart('D1', chart_line1, {'x_offset': 25, 'y_offset': 0})
file_name.close()
| StarcoderdataPython |
1781627 | #!/usr/bin/python2
import __init__
from utils.log import FALOG
import time
import pika
import sys
import json
class Server(object):
def __init__(self, exchange, binding_keys, exchange_type, username = 'network_monitor', passwd = '<PASSWORD>', vhost = 'network_monitor',
host = '192.168.122.1', port = 5672):
self.exchange_type = exchange_type
credentials = pika.PlainCredentials(username, passwd)
self.connection = pika.BlockingConnection(pika.ConnectionParameters(host, port, vhost, credentials))
self.channel = self.connection.channel()
self.channel.exchange_declare(exchange= exchange,
exchange_type= exchange_type)
result = self.channel.queue_declare('', exclusive=True)
queue_name = result.method.queue
if exchange_type == "topic":
for binding_key in binding_keys:
self.channel.queue_bind(exchange=exchange,
queue=queue_name,
routing_key=binding_key)
elif exchange_type == "fanout":
self.channel.queue_bind(exchange = exchange, queue = queue_name)
else:
FALOG.error("exchange type error.")
sys.exit(-1)
print(' [*] Waiting for logs. To exit press CTRL+C')
#self.callback()
if exchange_type == "topic":
self.channel.basic_qos(prefetch_count = 1)
self.channel.basic_consume(on_message_callback = self.callback,
queue=queue_name)
else:
self.channel.basic_consume(on_message_callback = self.callback, queue = queue_name, auto_ack = False)
def run(self):
try:
self.channel.start_consuming()
except Exception, e:
print str(e) + "aaaa"
FALOG.error("network-monitor service down:%s" %(str(e)))
#sys.exit(1)
def callback(self, ch, method, props, body):
"""
body:message
properties:prop.reply_to
"""
# fanout type process
if self.exchange_type == "fanout":
pass
return
# topic type process
msg_type = method.routing_key.split(".")
if len(msg_type) < 3:
FALOG.error("receive msg routing_key with wrong format.")
if msg_type[0] == "api_to_server":
# other process
if msg_type[1] == "rpc":
response = {
"task_type": "start_re",
"exe_res": True,
"error_msg": "",
}
ch.basic_publish(exchange='',
routing_key=props.reply_to,
properties=pika.BasicProperties(correlation_id = \
props.correlation_id),
body = json.dumps(response))
elif msg_type[1] == "msg":
print "receive msg"
else:
FALOG.error("receive msg routing_key with wrong format[part 2].")
elif msg_type[0] == "agent_to_server":
if msg_type[1] == "rpc":
response = {
"task_type": "end_re",
"exe_res": True,
"error_msg": "",
}
ch.basic_publish(exchange='',
routing_key=props.reply_to,
properties=pika.BasicProperties(correlation_id = \
props.correlation_id),
body=json.dumps(response))
elif msg_type[1] == "msg":
print "receive msg"
else:
FALOG.error("receive msg routing_key with wrong format[part 2].")
# other process
else:
FALOG.error("receive msg routing_key with wrong format[part 1].")
ch.basic_ack(delivery_tag = method.delivery_tag)
print(" [x] %r:%r" % (method.routing_key, body))
class T(Server):
def __init__(self, exchange, binding_keys):
super(T, self).__init__(exchange, binding_keys)
#def callback(self):
# print "aaaaa"
# override callback
if __name__ == "__main__":
server = Server("server", ["api_to_server.*", "agent_to_server.*"], 'topic')
server.run()
#t = T("top", ["bbb"])
| StarcoderdataPython |
8022643 | import pandas as pd
from torch.utils.data import Dataset, DataLoader
import torch
# Create my own dataset class that reads data from csv, partitions it.
class ForexDataset(Dataset):
def __init__(self, csv_file, num_steps=20, train=True, train_size=0.8):
super(ForexDataset, self).__init__()
df = pd.read_csv(csv_file)
train_size = int(train_size * df.shape[0])
if train:
df = df.iloc[0:train_size]
else:
df = df.iloc[train_size:]
self.data = self.get_sequential_data(df=df, num_steps=num_steps)
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
return self.data[idx]
def get_sequential_data(self, df:pd.DataFrame, num_steps:int):
ask_price = pd.Series(df["Ask"])
data = []
for i in range(0, len(ask_price)-num_steps-1):
X = ask_price.values[i:i+num_steps]
y = ask_price.values[i+num_steps]
data.append((torch.tensor(X),torch.tensor(y)))
return data
if __name__ == "__main__":
data_train = ForexDataset(csv_file="../RNNs/data/EURUSD.csv", train=True)
data_test = ForexDataset(csv_file="../RNNs/data/EURUSD.csv", train=False)
print(len(data_train), len(data_test))
dataloader = DataLoader(dataset=data_train, batch_size=64, shuffle=False)
for (x, y) in iter(dataloader):
print( x, y) | StarcoderdataPython |
6599741 | <reponame>mstypulk/qiskit-terra
# -*- coding: utf-8 -*-
# Copyright 2019, IBM.
#
# This source code is licensed under the Apache License, Version 2.0 found in
# the LICENSE.txt file in the root directory of this source tree.
"""
Persistent value.
"""
from qiskit.pulse.channels import OutputChannel
from qiskit.pulse.common.timeslots import Interval, Timeslot, TimeslotCollection
from qiskit.pulse.exceptions import PulseError
from .instruction import Instruction
from .pulse_command import PulseCommand
class PersistentValue(PulseCommand):
"""Persistent value."""
def __init__(self, value):
"""create new persistent value command.
Args:
value (complex): Complex value to apply, bounded by an absolute value of 1.
The allowable precision is device specific.
Raises:
PulseError: when input value exceed 1.
"""
super().__init__(duration=0)
if abs(value) > 1:
raise PulseError("Absolute value of PV amplitude exceeds 1.")
self.value = complex(value)
def __eq__(self, other):
"""Two PersistentValues are the same if they are of the same type
and have the same value.
Args:
other (PersistentValue): other PersistentValue
Returns:
bool: are self and other equal.
"""
if type(self) is type(other) and \
self.value == other.value:
return True
return False
def __repr__(self):
return '%s(%s, value=%s)' % (self.__class__.__name__, self.name, self.value)
def __call__(self, channel: OutputChannel) -> 'PersistentValueInstruction':
return PersistentValueInstruction(self, channel)
class PersistentValueInstruction(Instruction):
"""Instruction to keep persistent value. """
def __init__(self, command: PersistentValue, channel: OutputChannel, start_time: int = 0):
slots = [Timeslot(Interval(start_time, start_time), channel)]
super().__init__(command, start_time, TimeslotCollection(slots))
self._channel = channel
@property
def command(self) -> PersistentValue:
"""PersistentValue command. """
return self._command
@property
def channel(self) -> OutputChannel:
"""OutputChannel channel."""
return self._channel
def __repr__(self):
return '%4d: %s -> %s' % (self._start_time, self._command, self._channel)
| StarcoderdataPython |
1868447 | from typing import List
from app.models import Category
from fastack import ModelController
from fastack_sqlmodel.globals import db
from fastack_sqlmodel.session import Session
from fastapi import Request, Response
from pydantic import BaseModel, conint, constr
from sqlalchemy.sql.elements import and_
class BodyCategorySchema(BaseModel):
name: constr(max_length=150)
class CategoryController(ModelController):
def retrieve(self, request: Request, id: int) -> Response:
session: Session = db.open()
with session:
category: Category = (
session.query(Category).where(Category.id == id).first()
)
if not category:
return self.json("Not Found", status=404)
return self.json("Detail Category", category)
def list(
self, request: Request, page: conint(gt=0) = 1, page_size: conint(gt=0) = 10
) -> Response:
session: Session = db.open()
with session:
categories: List[Category] = (
session.query(Category).order_by(Category.date_created.desc()).all()
)
return self.get_paginated_response(categories, page, page_size)
def create(self, request: Request, body: BodyCategorySchema) -> Response:
session: Session = db.open()
with session.begin():
category: Category = (
session.query(Category).where(Category.name == body.name).first()
)
if category:
return self.json("Already exist", status=400)
data = body.dict()
category = Category.create(session, **data)
return self.json("Created", category)
def update(self, request: Request, id: int, body: BodyCategorySchema) -> Response:
session: Session = db.open()
with session.begin():
qs = (
session.query(Category)
.where(and_(Category.name == body.name, Category.id != id))
.exists()
)
found = session.query(qs).scalar()
if found:
return self.json("Already exist", status=400)
category: Category = (
session.query(Category).where(Category.id == id).first()
)
if not category:
return self.json("Not Found", status=404)
data = body.dict()
category.update(session, **data)
return self.json("Updated", category)
def destroy(self, request: Request, id: int) -> Response:
session: Session = db.open()
with session.begin():
category: Category = (
session.query(Category).where(Category.id == id).first()
)
if not category:
return self.json("Not Found", status=404)
category.delete(session)
return self.json("Deleted")
| StarcoderdataPython |
6529599 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 17 18:19:37 2019
@author: matthew
"""
#%%
def dem_and_temporal_source_figure(sources, sources_mask, fig_kwargs, dem = None, temporal_data = None, fig_title = None):
""" Given sources recovered by a blind signal separation method (e.g. PCA or ICA) compare them in space to hte DEM,
and in time to the temporal baselines.
Inputs:
sources | rank 2 array | as row vectors. eg. 5 x999 for 5 sources.
sources_mask | rank 2 | boolean array with a True value for any masked pixel. Number of False pixels should be the same as the number of columns in row_vectors
fig_kwargs | dict | pass straing to plot_source_tc_correlations, see that for details.
dem | masked array | a DEM as a masked array. It should work if not availble.
temporal data | dict | contains the temporal_baselines and the tcs (time courses). It should work if not available.
fig_title | string | sets the window title and the name of the .png produced.
Returns:
Figure
History:
2021_09_12 | MEG | Written.
"""
import numpy as np
import numpy.ma as ma
from icasar.aux2 import update_mask_sources_ifgs
if dem is not None:
dem_ma = ma.masked_invalid(dem) # LiCSBAS dem uses nans, but lets switch to a masked array (with nans masked)
dem_new_mask, sources_new_mask, mask_both = update_mask_sources_ifgs(sources_mask, sources, # this takes mask and data as row vectors for one set of masked pixels (the sources from pca)
ma.getmask(dem_ma), ma.compressed(dem_ma)[np.newaxis,:]) # and the mask and data as row vectors from the other set of masked pixels (the DEM, hence why it's being turned into a row vector)
dem_to_sources_comparisons = signals_to_master_signal_comparison(sources_new_mask, dem_new_mask, density = True) # And then we can do kernel density plots for each IC and the DEM
else:
dem_to_sources_comparisons = None
dem_ma = None
if temporal_data is not None:
tcs_to_tempbaselines_comparisons = signals_to_master_signal_comparison(temporal_data['tcs'].T,
np.asarray(temporal_data['temporal_baselines'])[np.newaxis,:], density = True) # And then we can do kernel density plots for each IC and the DEM
else:
tcs_to_tempbaselines_comparisons = None
plot_source_tc_correlations(sources, sources_mask, dem_ma, dem_to_sources_comparisons, tcs_to_tempbaselines_comparisons, fig_title = fig_title, **fig_kwargs) # do the atual plotting
#%%
def visualise_ICASAR_inversion(interferograms, sources, time_courses, mask, n_data = 10):
"""
2021_03_03 | MEG | Written.
"""
import numpy as np
def plot_ifg(ifg, ax, mask, vmin, vmax):
"""
"""
w = ax.imshow(col_to_ma(ifg, mask), interpolation ='none', aspect = 'equal', vmin = vmin, vmax = vmax) #
axin = ax.inset_axes([0, -0.06, 1, 0.05])
fig.colorbar(w, cax=axin, orientation='horizontal')
ax.set_yticks([])
ax.set_xticks([])
import matplotlib.pyplot as plt
interferograms_mc = interferograms - np.mean(interferograms, axis = 1)[:, np.newaxis]
interferograms_ICASAR = time_courses @ sources
residual = interferograms_mc - interferograms_ICASAR
if n_data > interferograms.shape[0]:
n_data = interferograms.shape[0]
fig, axes = plt.subplots(3, n_data, figsize = (15,7))
if n_data == 1:
axes = np.atleast_2d(axes).T # make 2d, and a column (not a row)
row_labels = ['Data', 'Model', 'Resid.' ]
for ax, label in zip(axes[:,0], row_labels):
ax.set_ylabel(label)
for data_n in range(n_data):
vmin = np.min(np.stack((interferograms_mc[data_n,], interferograms_ICASAR[data_n,], residual[data_n])))
vmax = np.max(np.stack((interferograms_mc[data_n,], interferograms_ICASAR[data_n,], residual[data_n])))
plot_ifg(interferograms_mc[data_n,], axes[0,data_n], mask, vmin, vmax)
plot_ifg(interferograms_ICASAR[data_n,], axes[1,data_n], mask, vmin, vmax)
plot_ifg(residual[data_n,], axes[2,data_n], mask, vmin, vmax)
#%%
def plot_source_tc_correlations(sources, mask, dem = None, dem_to_ic_comparisons = None, tcs_to_tempbaselines_comparisons = None,
png_path = './', figures = "window", fig_title = None):
"""Given information about the ICs, their correlations with the DEM, and their time courses correlations with an intererograms temporal basleine,
create a plot of this information.
Inputs:
sources | rank 2 array | sources as row vectors.
mask | rank 2 boolean | to convert a source from a row vector to a rank 2 masked array.
dem | rank 2 array | The DEM. Can also be a masked array.
dem_to_ic_comparisons | dict | keys:
xyzs | list of rank 2 arrays | entry in the list for each signal, xyz are rows.
line_xys | list of rank 2 arrays | entry in the list for each signal, xy are points to plot for the lines of best fit
cor_coefs | list | correlation coefficients between each signal and the master signal.
tcs_to_tempbaselines_comparisons| dict | keys as above.
png_path | string | if a png is to be saved, a path to a folder can be supplied, or left as default to write to current directory.
figures | string, "window" / "png" / "png+window" | controls if figures are produced (either as a window, saved as a png, or both)
Returns:
figure
History:
2021_04_22 | MEG | Written.
2021_04_23 | MEG | Update so that axes are removed if they are not being used.
"""
import numpy as np
import matplotlib.pyplot as plt
from icasar.aux import col_to_ma
from icasar.aux2 import remappedColorMap, truncate_colormap
n_sources = sources.shape[0]
# colour map stuff
ifg_colours = plt.get_cmap('coolwarm')
cmap_mid = 1 - np.max(sources)/(np.max(sources) + abs(np.min(sources))) # get the ratio of the data that 0 lies at (eg if data is -15 to 5, ratio is 0.75)
if cmap_mid < (1/257): # this is a fudge so that if plot starts at 0 doesn't include the negative colorus for the smallest values
ifg_colours_cent = remappedColorMap(ifg_colours, start=0.5, midpoint=0.5, stop=1.0, name='shiftedcmap')
else:
ifg_colours_cent = remappedColorMap(ifg_colours, start=0.0, midpoint=cmap_mid, stop=1.0, name='shiftedcmap')
f, axes = plt.subplots(3, (n_sources+1), figsize = (15,7))
plt.subplots_adjust(wspace = 0.1)
f.canvas.set_window_title(f"{fig_title}")
# 1: Plot the DEM:
if dem is not None:
terrain_cmap = plt.get_cmap('terrain')
terrain_cmap = truncate_colormap(terrain_cmap, 0.2, 1)
dem_plot = axes[1,0].imshow(dem, cmap = terrain_cmap)
axin = axes[1,0].inset_axes([0, -0.06, 1, 0.05])
cbar_1 = f.colorbar(dem_plot, cax=axin, orientation='horizontal')
cbar_1.set_label('Height (m)', fontsize = 8)
axes[1,0].set_title('DEM')
axes[1,0].set_xticks([])
axes[1,0].set_yticks([])
else:
axes[1,0].set_axis_off()
# 2: Find the x and y limits for the 2d scatter plots
if dem_to_ic_comparisons is not None: # first check that it actually exists.
row1_all_xyzs = np.stack(dem_to_ic_comparisons['xyzs'], axis = 2) # merge together into a rank 3 numpy array. (3 x n_pixels x n_ics?)
row1_xlim = (np.min(row1_all_xyzs[0,]), np.max(row1_all_xyzs[0,])) # x limits are min and max of the first row
row1_ylim = (np.min(row1_all_xyzs[1,]), np.max(row1_all_xyzs[1,])) # y limits are min and max of the second row
if tcs_to_tempbaselines_comparisons is not None: # as above.
row2_all_xyzs = np.stack(tcs_to_tempbaselines_comparisons['xyzs'], axis = 2)
row2_xlim = (np.min(row2_all_xyzs[0,]), np.max(row2_all_xyzs[0,]))
row2_ylim = (np.min(row2_all_xyzs[1,]), np.max(row2_all_xyzs[1,]))
# 3: Loop through each IC
for ic_n in range(n_sources):
# 2a: Plotting the IC
im = axes[0,ic_n+1].imshow(col_to_ma(sources[ic_n,:], mask), cmap = ifg_colours_cent, vmin = np.min(sources), vmax = np.max(sources))
axes[0,ic_n+1].set_xticks([])
axes[0,ic_n+1].set_yticks([])
axes[0,ic_n+1].set_title(f"Source {ic_n}")
# 2B: Plotting the IC to DEM scatter, if the data are available
if dem_to_ic_comparisons is not None:
axes[1,ic_n+1].scatter(dem_to_ic_comparisons['xyzs'][ic_n][0,:],
dem_to_ic_comparisons['xyzs'][ic_n][1,:], c= dem_to_ic_comparisons['xyzs'][ic_n][2,:])
axes[1,ic_n+1].plot(dem_to_ic_comparisons['line_xys'][ic_n][0,:], dem_to_ic_comparisons['line_xys'][ic_n][1,:], c = 'r')
axes[1,ic_n+1].set_xlim(row1_xlim[0], row1_xlim[1])
axes[1,ic_n+1].set_ylim(row1_ylim[0], row1_ylim[1])
axes[1,ic_n+1].axhline(0, c='k')
axes[1,ic_n+1].yaxis.tick_right() # set ticks to be on the right.
if ic_n != (n_sources-1):
axes[1,ic_n+1].yaxis.set_ticklabels([]) # if it's not the last one, turn the tick labels off
else:
axes[1,ic_n+1].yaxis.set_ticks_position('right') # but if it is, make sure they're on the right.
axes[1,ic_n+1].set_ylabel(f"IC")
axes[1,ic_n+1].yaxis.set_label_position('right')
if ic_n == int(n_sources/2):
axes[1,ic_n+1].set_xlabel('Height (m)')
axes[1,ic_n+1].set_title(f"CorCoef: {np.round(dem_to_ic_comparisons['cor_coefs'][ic_n],2)}", fontsize = 7, color = 'r')
else:
axes[1,ic_n+1].set_axis_off()
if tcs_to_tempbaselines_comparisons is not None:
axes[2,ic_n+1].scatter(tcs_to_tempbaselines_comparisons['xyzs'][ic_n][0,:],
tcs_to_tempbaselines_comparisons['xyzs'][ic_n][1,:], c= tcs_to_tempbaselines_comparisons['xyzs'][ic_n][2,:])
axes[2,ic_n+1].plot(tcs_to_tempbaselines_comparisons['line_xys'][ic_n][0,:], tcs_to_tempbaselines_comparisons['line_xys'][ic_n][1,:], c = 'r')
axes[2,ic_n+1].set_xlim(row2_xlim[0], row2_xlim[1])
axes[2,ic_n+1].set_ylim(row2_ylim[0], row2_ylim[1]) # force them to all share a y axis. Gnerally not good as such varying scales.
axes[2,ic_n+1].axhline(0, c='k')
axes[2,ic_n+1].yaxis.tick_right()
if ic_n != (n_sources-1):
axes[2,ic_n+1].yaxis.set_ticklabels([]) # if it's not the last one, turn the tick labels off
else:
axes[2,ic_n+1].yaxis.set_ticks_position('right') # but if it is, make sure they're on the right.
axes[2,ic_n+1].set_ylabel(f"IC usage strength")
axes[2,ic_n+1].yaxis.set_label_position('right')
if ic_n == int(n_sources/2): # on roughly the middle plot....
axes[2,ic_n+1].set_xlabel('Temporal Baseline (days)') # add an x label.
axes[2,ic_n+1].set_title(f"CorCoef: {np.round(tcs_to_tempbaselines_comparisons['cor_coefs'][ic_n],2)}", fontsize = 7, color = 'r')
else:
axes[2,ic_n+1].set_axis_off()
# 3: The ICs colorbar
axin = axes[0,0].inset_axes([0.5, 0, 0.1, 1])
cbar_2 = f.colorbar(im, cax=axin, orientation='vertical')
cbar_2.set_label('IC')
axin.yaxis.set_ticks_position('left')
# last tidying up
for ax in [axes[0,0], axes[2,0]]:
ax.set_axis_off()
f.tight_layout()
if figures == 'window': # possibly save the output
pass
elif figures == "png":
f.savefig(f"{png_path}/{fig_title}.png")
plt.close()
elif figures == 'png+window':
f.savefig(f"{png_path}/{fig_title}.png")
else:
pass
#%%
def signals_to_master_signal_comparison(signals, master_signal, density = False):
""" Given an array of signals (as row vectors), compare it to a single signal and plot a kernel
density estimate, and calculate a line of best fit through the points with R**2 value.
Inputs:
signals | rank 2 | signals as rows. Even if there's only 1 signal, it still needs to be rank 2
master_signal | rank 2 | signal as a row, but has to be rank 2
density | boolean | If True, gaussian kernel density estimate for the points. Can be slow.
Returns:
signal_to_msignal_comparison | dict | keys:
xyzs | list of rank 2 arrays | entry in the list for each signal, xyz are rows.
line_xys | list of rank 2 arrays | entry in the list for each signal, xy are points to plot for the lines of best fit
cor_coefs | list | correlation coefficients between each signal and the master signal.
History:
2021_04_22 | MEG | Written.
2021_04_26 | MEG | Add check that the signals are of the same length.
"""
import numpy as np
from scipy.stats import gaussian_kde
import numpy.polynomial.polynomial as poly # used for lines of best fit through dem/source plots
n_signals, n_pixels = signals.shape # each signal is a row, observations of that are columns.
if n_pixels != master_signal.shape[1]:
raise Exception(f"The signals aren't of the same length (2nd dimension), as 'signals' is {n_pixels} long, but 'master_signal' is {master_signal.shape[1]} long. Exiting. ")
xyzs = [] # initiate
line_xys = []
cor_coefs = []
print(f"Starting to calculate the 2D kernel density estimates for the signals. Completed ", end = '')
for signal_n, signal in enumerate(signals): # signal is a row of signals, and loop through them.
# 1: Do the kernel density estimate
xy = np.vstack((master_signal, signal[np.newaxis,:])) # master signal will be on X and be the top row.
x = xy[:1,:]
y = xy[1:2,:]
if density:
z = gaussian_kde(xy)(xy)
idx = z.argsort() # need to be sorted so that when plotted, those with the highest z value go on top.
x, y, z = x[0,idx], y[0,idx], z[idx]
xyzs.append(np.vstack((x,y,z))) # 3 rows, for each of x,y and z
else:
xyzs.append(np.vstack((x,y,np.zeros(n_pixels)))) # if we're not doing the kernel density estimate, z value is just zeros.
# 2: calculate the lines of best fit
line_coefs = poly.polyfit(x, y, 1) # polynomial of order 1 (i.e. a line of best fit)
line_yvals = (poly.polyval(x, line_coefs)) # calculate the lines yvalues for all x points
line_xys.append(np.vstack((x, line_yvals))) # x vals are first row, y vals are 2nd row
# 3: And the correlation coefficient
cor_coefs.append(np.corrcoef(x, y)[1,0]) # which is a 2x2 matrix, but we just want the off diagonal (as thats the correlation coefficient between the signals)
print(f"{signal_n} ", end = '')
print('\n')
signal_to_msignal_comparison = {'xyzs' : xyzs,
'line_xys' : line_xys,
'cor_coefs' : cor_coefs}
return signal_to_msignal_comparison
#%%
def create_all_ifgs(ifgs_r2, ifg_dates, max_n_all_ifgs = 1000):
"""Given a rank 2 of incremental ifgs, calculate all the possible ifgs that still step forward in time (i.e. if deformation is positive in all incremental ifgs,
it remains positive in all the returned ifgs.) If acquisition dates are provided, the tmeporal baselines of all the possible ifgs can also be found.
Inputs:
ifgs_r2 | rank 2 array | Interferograms as row vectors.
ifg_dates | list of strings | dates in the form YYYYMMDD_YYYYMMDD. As the interferograms are incremental, this should be the same length as the number of ifgs
Returns:
ifgs_r2 | rank 2 array | Only the ones that are non-zero (the diagonal in ifgs_r3) and in the lower left corner (so deformation isn't reversed. )
History:
2021_04_13 | MEG | Written
2021_04_19 | MEG | add funcionality to calculate the temporal baselines of all possible ifgs.
2021_04_29 | MEG | Add functionality to handle networks with breaks in them.
"""
import numpy as np
from datetime import datetime, timedelta
import random
from icasar.aux2 import acquisitions_from_ifg_dates
def triange_lower_left_indexes(side_length):
""" For a square matrix of size side_length, get the index of all the values that are in the lower
left quadrant (i.e. all to the lower left of the diagonals).
Inputs:
side_length | int | side length of the square. e.g. 5 for a 5x5
Returns:
lower_left_indexes | rank 2 array | indexes of all elements below the diagonal.
History:
2021_04_13 | MEG | Written.
"""
import numpy as np
zeros_array = np.ones((side_length, side_length)) # initate as ones so none will be selected.
zeros_array = np.triu(zeros_array) # set the lower left to 0s
lower_left_indexes = np.argwhere(zeros_array == 0) # select only the lower lefts
return lower_left_indexes
n_ifgs, n_pixs = ifgs_r2.shape
# 1: Determine if the network is continuous, and if not split it into lists
ifg_dates_continuous = [] # list of the dates for a continuous network
ifgs_r2_continuous = [] # and the incremental interferograms in that network.
start_continuous_run = 0
for ifg_n in range(n_ifgs-1):
if (ifg_dates[ifg_n][9:] != ifg_dates[ifg_n+1][:8]): # if the dates don't agree
ifg_dates_continuous.append(ifg_dates[start_continuous_run:ifg_n+1]) # +1 as want to include the last date in the selection
ifgs_r2_continuous.append(ifgs_r2[start_continuous_run:ifg_n+1,])
start_continuous_run = ifg_n+1 # +1 so that when we index the next time, it doesn't include ifg_n
if ifg_n == n_ifgs -2: # of if we've got to the end of the list.
ifg_dates_continuous.append(ifg_dates[start_continuous_run:]) # select to the end.
ifgs_r2_continuous.append(ifgs_r2[start_continuous_run:,])
n_networks = len(ifg_dates_continuous) # get the number of connected networks.
# for item in ifgs_r2_continuous:
# print(item.shape)
# for item in ifg_dates_continuous:
# print(item)
# print('\n')
# import copy
# ifg_dates_copy = copy.copy(ifg_dates)
# for ifg_list in ifg_dates_continuous:
# for ifg in ifg_list:
# try:
# del ifg_dates_copy[ifg_dates_copy.index(ifg)]
# except:
# pass
# print(ifg_dates_copy)
# 2: Loop through each continuous network and make all possible ifgs.
ifgs_all_r2 = []
dates_all_r1 = []
for n_network in range(n_networks):
ifgs_r2_temp = ifgs_r2_continuous[n_network]
ifg_dates_temp = ifg_dates_continuous[n_network]
n_acq = ifgs_r2_temp.shape[0] + 1
# 2a: convert from daisy chain of incremental to a relative to a single master at the start of the time series.
acq1_def = np.zeros((1, n_pixs)) # deformation is 0 at the first acquisition
ifgs_cs = np.cumsum(ifgs_r2_temp, axis = 0) # convert from incremental to cumulative.
ifgs_cs = np.vstack((acq1_def, ifgs_cs)) # add the 0 at first time ifg to the other cumulative ones.
# 2b: create all possible ifgs
ifgs_cube = np.zeros((n_acq, n_acq, n_pixs)) # cube to store all possible ifgs in
for i in range(n_acq): # used to loop through each column
ifgs_cube[:,i,] = ifgs_cs - ifgs_cs[i,] # make one column (ie all the rows) by taking all the ifgs and subtracting one time from it
# 2c: Get only the positive ones (ie the lower left quadrant)
lower_left_indexes = triange_lower_left_indexes(n_acq) # get the indexes of the ifgs in the lower left corner (ie. non 0, and with unreveresed deformation. )
ifgs_all_r2.append(ifgs_cube[lower_left_indexes[:,0], lower_left_indexes[:,1], :]) # get those ifgs and store as row vectors.
# 2d: Calculate the dates that the new ifgs run between.
acq_dates = acquisitions_from_ifg_dates(ifg_dates_temp) # get the acquisitions from the ifg dates.
ifg_dates_all_r2 = np.empty([n_acq, n_acq], dtype='U17') # initate an array that can hold unicode strings.
for row_n, date1 in enumerate(acq_dates): # loop through rows
for col_n, date2 in enumerate(acq_dates): # loop through columns
ifg_dates_all_r2[row_n, col_n] = f"{date2}_{date1}"
ifg_dates_all_r1 = list(ifg_dates_all_r2[lower_left_indexes[:,0], lower_left_indexes[:,1]]) # just get the lower left corner (like for the ifgs)
dates_all_r1.append(ifg_dates_all_r1)
# 3: convert lists back to a single matrix of all interferograms.
ifgs_all_r2 = np.vstack(ifgs_all_r2) # now one big array of n_ifgs x n_pixels
dates_all_r1 = [item for sublist in dates_all_r1 for item in sublist] # dates_all_r1 is a list (one for each connected network) of lists (each ifg date). The turns them to a singe list.
# 4: Possibly delete some of these if we have too many:
if ifgs_all_r2.shape[0] > max_n_all_ifgs:
retained_args = np.arange(ifgs_all_r2.shape[0])
random.shuffle(retained_args)
retained_args = retained_args[:max_n_all_ifgs]
ifgs_all_r2 = ifgs_all_r2[retained_args,:]
dates_all_r1 = [dates_all_r1[retained_arg] for retained_arg in retained_args] # surely there is a better way to index a list with an array?
return ifgs_all_r2, dates_all_r1
#%%
def plot_spatial_signals(spatial_map, pixel_mask, timecourse, shape, title, shared = 0,
temporal_baselines = None, figures = "window", png_path = './'):
"""
Input:
spatial map | pxc matrix of c component maps (p pixels)
pixel_mask | mask to turn spaital maps back to regular grided masked arrays
codings | cxt matrix of c time courses (t long)
shape | tuple | the shape of the grid that the spatial maps are reshaped to
title | string | figure tite and png filename (nb .png will be added, don't include here)
shared | 0 or 1 | if 1, spatial maps share colorbar and time courses shared vertical axis
Temporal_baselines | x axis values for time courses. Useful if some data are missing (ie the odd 24 day ifgs in a time series of mainly 12 day)
figures | string, "window" / "png" / "png+window" | controls if figures are produced (either as a window, saved as a png, or both)
png_path | string | if a png is to be saved, a path to a folder can be supplied, or left as default to write to current directory.
Returns:
Figure, either as a window or saved as a png
2017/02/17 | modified to use masked arrays that are given as vectors by spatial map, but can be converted back to
masked arrays using the pixel mask
2017/05/12 | shared scales as decrived in 'shared'
2017/05/15 | remove shared colorbar for spatial maps
2017/10/16 | remove limit on the number of componets to plot (was 5)
2017/12/06 | Add a colorbar if the plots are shared, add an option for the time courses to be done in days
2017/12/?? | add the option to pass temporal baselines to the function
2020/03/03 | MEG | Add option to save figure as png and close window
"""
import numpy as np
import numpy.ma as ma
import matplotlib.pyplot as plt
import matplotlib
from icasar.aux2 import remappedColorMap, truncate_colormap
def linegraph(sig, ax, temporal_baselines = None):
""" signal is a 1xt row vector """
if temporal_baselines is None:
times = sig.size
a = np.arange(times)
else:
a = temporal_baselines
ax.plot(a,sig,marker='o', color='k')
ax.axhline(y=0, color='k', alpha=0.4)
# colour map stuff
ifg_colours = plt.get_cmap('coolwarm')
cmap_mid = 1 - np.max(spatial_map)/(np.max(spatial_map) + abs(np.min(spatial_map))) # get the ratio of the data that 0 lies at (eg if data is -15 to 5, ratio is 0.75)
if cmap_mid < (1/257): # this is a fudge so that if plot starts at 0 doesn't include the negative colorus for the smallest values
ifg_colours_cent = remappedColorMap(ifg_colours, start=0.5, midpoint=0.5, stop=1.0, name='shiftedcmap')
else:
ifg_colours_cent = remappedColorMap(ifg_colours, start=0.0, midpoint=cmap_mid, stop=1.0, name='shiftedcmap')
#make a list of ifgs as masked arrays (and not column vectors)
spatial_maps_ma = []
for i in range(np.size(spatial_map,1)):
spatial_maps_ma.append(ma.array(np.zeros(pixel_mask.shape), mask = pixel_mask ))
spatial_maps_ma[i].unshare_mask()
spatial_maps_ma[i][~spatial_maps_ma[i].mask] = spatial_map[:,i].ravel()
tmp, n_sources = spatial_map.shape
# if n_sources > 5:
# n_sources = 5
del tmp
f, (ax_all) = plt.subplots(2, n_sources, figsize=(15,7))
f.suptitle(title, fontsize=14)
f.canvas.set_window_title(title)
for i in range(n_sources):
im = ax_all[0,i].imshow(spatial_maps_ma[i], cmap = ifg_colours_cent, vmin = np.min(spatial_map), vmax = np.max(spatial_map))
ax_all[0,i].set_xticks([])
ax_all[0,i].set_yticks([])
# if shared == 0:
# ax_all[0,i].imshow(spatial_maps_ma[i])
# else:
# im = ax_all[0,i].imshow(spatial_maps_ma[i], vmin = np.min(spatial_map) , vmax = np.max(spatial_map))
for i in range(n_sources):
linegraph(timecourse[i,:], ax_all[1,i], temporal_baselines)
if temporal_baselines is not None:
ax_all[1,i].set_xlabel('Days')
if shared ==1:
ax_all[1,i].set_ylim([np.min(timecourse) , np.max(timecourse)])
if shared == 1: # if the colourbar is shared between each subplot, the axes need extending to make space for it.
f.tight_layout(rect=[0, 0, 0.94, 1])
cax = f.add_axes([0.94, 0.6, 0.01, 0.3])
f.colorbar(im, cax=cax, orientation='vertical')
if figures == 'window': # possibly save the output
pass
elif figures == "png":
f.savefig(f"{png_path}/{title}.png")
plt.close()
elif figures == 'png+window':
f.savefig(f"{png_path}/{title}.png")
else:
pass
#%%
def plot_temporal_signals(signals, title = None, signal_names = None,
figures = "window", png_path = './'):
"""Plot a set of time signals stored in a matrix as rows.
Inputs:
signals | rank 2 array | signals as row vectors. e.g. 1x100
title | string | figure title.
signals_names | list of strings | names of each signal
figures | string, "window" / "png" / "png+window" | controls if figures are produced (either as a window, saved as a png, or both)
png_path | string | if a png is to be saved, a path to a folder can be supplied, or left as default to write to current directory.
Returns:
Figure, either as a window or saved as a png
History:
2020/09/09 | MEG | Written
"""
import matplotlib.pyplot as plt
import numpy as np
n_signals = signals.shape[0]
fig1, axes = plt.subplots(n_signals,1, figsize = (10,6))
if title is not None:
fig1.canvas.set_window_title(title)
fig1.suptitle(title)
for signal_n, signal in enumerate(signals):
axes[signal_n].plot(np.arange(0, signals.shape[1]), signal)
if signal_names is not None:
axes[signal_n].set_ylabel(signal_names[signal_n])
axes[signal_n].grid(alpha = 0.5)
if signal_n != (n_signals-1):
axes[signal_n].set_xticklabels([])
if figures == 'window': # possibly save the output
pass
elif figures == "png":
fig1.savefig(f"{png_path}/{title}.png")
plt.close()
elif figures == 'png+window':
fig1.savefig(f"{png_path}/{title}.png")
else:
pass
# connect the figure and the function.
#%%
def plot_pca_variance_line(pc_vals, title = '', figures = 'window', png_path = './'):
"""
A function to display the cumulative variance in each dimension of some high D data
Inputs:
pc_vals | rank 1 array | variance in each dimension. Most important dimension first.
title | string | figure title
figures | string, "window" / "png" / "png+window" | controls if figures are produced (either as a window, saved as a png, or both)
png_path | string or None | if a png is to be saved, a path to a folder can be supplied, or left as default to write to current directory.
Returns:
figure, either as window or saved as a png
History:
2019/XX/XX | MEG | Written
2020/03/03 | MEG | Add option to save as png
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
f, ax = plt.subplots()
pc_vals_cs = np.concatenate((np.array([0]), np.cumsum(pc_vals)))
x_vals = np.arange(len(pc_vals_cs))
ax.plot(x_vals, pc_vals_cs/pc_vals_cs[-1])
ax.scatter(x_vals, pc_vals_cs/pc_vals_cs[-1])
ax.set_xlabel('Component number')
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
ax.set_ylabel('Cumulative Variance')
ax.set_ylim([0, 1])
ax.set_title(title)
f.canvas.set_window_title(title)
if figures == 'window':
pass
elif figures == "png":
f.savefig(f"{png_path}/01_pca_variance_line.png")
plt.close()
elif figures == 'png+window':
f.savefig(f"{png_path}/01_pca_variance_line.png")
else:
pass
#%%
def maps_tcs_rescale(maps, tcs):
"""
A function to rescale spaital maps to have unit range and rescale each's time cource (tc)
so that there is no change to the product of the two matrices
input:
maps | array | spatial maps as rows (e.g. 2x1775)
tcs | array | time courses as columns (e.g. 15x2)
Output:
maps_scaled | array | spatial maps as rows with each row having unit range
tcs_scaled | array | TCs scaled so that new maps x new tcs equals maps x tcs
2017/05/15 | written
"""
import numpy as np
def rescale_unit_range(signals):
"""
rescale a matrix of row vectors so that each row has a range of 1.
Also record the scaling factor required to rescale each row vector
signals are rows
Input:
signals: array with each signal as a new row
Output:
signals_rescale | array | each row has a range of 1
signals_factor | array | factor that each row is dvided by to adjust range
"""
import numpy as np
signals_rescale = np.ones(signals.shape) # initiate rescaled array
signals_factor = np.ones((np.size(signals, axis=0) , 1)) # initiate array to record scaling factor for each row
for i in np.arange(np.size(signals, axis = 0)):
signals_factor[i,0] = (np.max(signals[i,:])-np.min(signals[i,:]))
signals_rescale[i,:] = signals[i,:]/signals_factor[i,0]
return signals_rescale, signals_factor
maps_scaled , scaling = rescale_unit_range(maps)
tcs_scaled = tcs * np.ravel(scaling)
return maps_scaled, tcs_scaled
#%%
def bss_components_inversion(sources, interferograms):
"""
A function to fit an interferogram using components learned by BSS, and return how strongly
each component is required to reconstruct that interferogramm, and the
Inputs:
sources | n_sources x pixels | ie architecture I. Mean centered
interferogram | list of (n_ifgs x pixels) | Doesn't have to be mean centered, multiple interferograms to be fit can be fit by making the list as long as required.
Outputs:
m | rank 1 array | the strengths with which to use each source to reconstruct the ifg.
mean_l2norm | float | the misfit between the ifg and the ifg reconstructed from sources
"""
import numpy as np
inversion_results = []
for interferogram in interferograms:
interferogram -= np.mean(interferogram) # mean centre
n_pixels = np.size(interferogram)
d = interferogram.T # now n_pixels x n_ifgs
g = sources.T # a matrix of ICA sources and each is a column (n_pixels x n_sources)
### Begin different types of inversions.
m = np.linalg.inv(g.T @ g) @ g.T @ d # m (n_sources x 1), least squares
#m = g.T @ np.linalg.inv(g @ g.T) @ d # m (n_sources x 1), least squares with minimum norm condition. COULDN'T GET TO WORK.
#m = np.linalg.pinv(g) @ d # Moore-Penrose inverse of G for a simple inversion.
# u = 1e0 # bigger value favours a smoother m, which in turn can lead to a worse fit of the data. 1e3 gives smooth but bad fit, 1e1 is a compromise, 1e0 is rough but good fit.
# m = np.linalg.inv(g.T @ g + u*np.eye(g.shape[1])) @ g.T @ d; # Tikhonov solution
### end different types of inversion
d_hat = g@m
d_resid = d - d_hat
mean_l2norm = np.sqrt(np.sum(d_resid**2))/n_pixels # misfit between ifg and ifg reconstructed from sources
inversion_results.append({'tcs' : m,
'model' : d_hat,
'residual' : d_resid,
'l2_norm' : mean_l2norm})
return inversion_results
#%%
def col_to_ma(col, pixel_mask):
""" A function to take a column vector and a 2d pixel mask and reshape the column into a masked array.
Useful when converting between vectors used by BSS methods results that are to be plotted
Inputs:
col | rank 1 array |
pixel_mask | array mask (rank 2)
Outputs:
source | rank 2 masked array | colun as a masked 2d array
"""
import numpy.ma as ma
import numpy as np
source = ma.array(np.zeros(pixel_mask.shape), mask = pixel_mask )
source.unshare_mask()
source[~source.mask] = col.ravel()
return source
#%% taken from insar_tools.py
def r2_to_r3(ifgs_r2, mask):
""" Given a rank2 of ifgs as row vectors, convert it to a rank3.
Inputs:
ifgs_r2 | rank 2 array | ifgs as row vectors
mask | rank 2 array | to convert a row vector ifg into a rank 2 masked array
returns:
phUnw | rank 3 array | n_ifgs x height x width
History:
2020/06/10 | MEG | Written
"""
import numpy as np
import numpy.ma as ma
n_ifgs = ifgs_r2.shape[0]
ny, nx = col_to_ma(ifgs_r2[0,], mask).shape # determine the size of an ifg when it is converter from being a row vector
ifgs_r3 = np.zeros((n_ifgs, ny, nx)) # initate to store new ifgs
for ifg_n, ifg_row in enumerate(ifgs_r2): # loop through all ifgs
ifgs_r3[ifg_n,] = col_to_ma(ifg_row, mask)
mask_r3 = np.repeat(mask[np.newaxis,], n_ifgs, axis = 0) # expand the mask from r2 to r3
ifgs_r3_ma = ma.array(ifgs_r3, mask = mask_r3) # and make a masked array
return ifgs_r3_ma
#%% Copied from small_plot_functions.py
def r2_arrays_to_googleEarth(images_r3_ma, lons, lats, layer_name_prefix = 'layer', kmz_filename = 'ICs',
out_folder = './'):
""" Given one or several arrays in a rank3 array, create a multilayer Google Earth file (.kmz) of them.
Inputs:
images_r3_ma | rank3 masked array |x n_images x ny x nx
lons | rank 2 array | lons of each pixel in the image.
lats | rank 2 array | lats of each pixel in theimage.
layer_name_prefix | string | Can be used to set the name of the layes in the kmz (nb of the form layer_name_prefix_001 etc. )
kmz_filename | string | Sets the name of the kmz produced
out_folder | pathlib Path | path to location to save .kmz.
Returns:
kmz file
History:
2020/06/10 | MEG | Written
2021/03/11 | MEG | Update to handle incorrectly sized lons and lats arrays (e.g. rank2 arrays instead of rank 1)
"""
import numpy as np
import os
import shutil
import simplekml
from pathlib import Path
n_images = images_r3_ma.shape[0]
if type(out_folder) == str: # this should really be a path, but it could easily be a string.
out_folder = Path(out_folder) # if it is a string, conver it.
# 0 temporary folder for intermediate pngs
try:
os.mkdir('./temp_kml') # make a temporay folder to save pngs
except:
print("Can't create a folder for temporary kmls. Trying to delete 'temp_kml' incase it exisits already... ", end = "")
try:
shutil.rmtree('./temp_kml') # try to remove folder
os.mkdir('./temp_kml') # make a temporay folder to save pngs
print("Done. ")
except:
raise Exception("Problem making a temporary directory to store intermediate pngs" )
# 1: Initiate the kml
kml = simplekml.Kml()
# 2 Begin to loop through each iamge
for n_image in np.arange(n_images)[::-1]: # Reverse so that first IC is processed last and appears as visible
layer_name = f"{layer_name_prefix}_{str(n_image).zfill(3)}" # get the name of a layer a sttring
r2_array_to_png(images_r3_ma[n_image,], layer_name, './temp_kml/') # save as an intermediate .png
ground = kml.newgroundoverlay(name= layer_name) # add the overlay to the kml file
ground.icon.href = f"./temp_kml/{layer_name}.png" # and the actual image part
ground.gxlatlonquad.coords = [(lons[-1,0], lats[-1,0]), (lons[-1,-1],lats[-1,-1]), # lon, lat of image south west, south east
(lons[0,-1], lats[0,-1]), (lons[0,0],lats[0,0])] # north east, north west - order is anticlockwise around the square, startign in the lower left
#3: Tidy up at the end
kml.savekmz(out_folder / f"{kmz_filename}.kmz", format=False) # Saving as KMZ
shutil.rmtree('./temp_kml')
#%% Copied from small_plot_functions.py
def r2_array_to_png(r2, filename, png_folder = './'):
""" Given a rank 2 array/image, save it as a png with no borders.
If a masked array is used, transparency for masked areas is conserved.
Designed for use with Google Earth overlays.
Inputs:
r2 | rank 2 array | image / array to be saved
filename | string | name of .png
png_folder | string | folder to save in, end with / e.g. ./kml_outputs/
Returns:
png of figure
History:
2020/06/10 | MEG | Written
2021_05_05 | MEG | Change colours to coolwarm.
"""
import matplotlib.pyplot as plt
f, ax = plt.subplots(1,1)
ax.imshow(r2, cmap = plt.get_cmap('coolwarm'))
plt.gca().set_axis_off()
plt.subplots_adjust(top = 1, bottom = 0, right = 1, left = 0, hspace = 0, wspace = 0)
plt.margins(0,0)
plt.gca().xaxis.set_major_locator(plt.NullLocator())
plt.gca().yaxis.set_major_locator(plt.NullLocator())
plt.savefig(f"{png_folder}{filename}.png", bbox_inches = 'tight',pad_inches = 0, transparent = True)
plt.close()
#%%
def prepare_point_colours_for_2d(labels, cluster_order):
"""Given the label for each point (ie 1, 2 or 3 say, or -1 if noise) and the order of importance to the clusters
(ie cluster 3 is the most compact and isolated so has the highest Iq value, then cluster 1, then cluster 2), return
a list of colours for each point so they can be plotted using a standard .scatter funtcion. Ie all the points labelled
3 have the same colour.
Inputs:
label | rank 1 array | the label showing which cluster each point is in. e.g. (1000)
cluster_order | rank 1 array | to determine which cluster should be blue (the best one is always in blue, the 2nd best in orange etc. )
Returns:
labels_chosen_colours | np array | colour for each point. Same length as label.
History:
2020/09/10 | MEG | Written
2020/09/11 | MEG | Update so returns a numpy array and not a list (easier to index later on. )
"""
import numpy as np
n_clusters = len(cluster_order)
colours = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd',
'#8c564b', '#e377c2', '#7f7f7f', '#bcbd22', '#17becf'] # the standard nice Matplotlib colours
if n_clusters > 10: # if we have more than 10 clsuters, generate some random colours
for i in range(n_clusters - 10): # how many more colours we need (as we already have 10)
colours.append(('#%02X%02X%02X' % (np.random.randint(0,25),
np.random.randint(0,25),
np.random.randint(0,25)))) # generate colours randomly (ie a point between 0 and 255 in 3 dimensions. )
else:
colours = colours[:n_clusters] # crop to length if we have 10 or less colours
colours2 = [] # new list of colours, 1st item is the colour that label 0 should be (which is not necesarily blue)
for i in range(n_clusters): # loop through each cluster
colours2.append(colours[int(np.argwhere(cluster_order == i))]) # populate the list
labels_chosen_colours = [] # initiate a list where instead of label for each source, we have its colour
for label in(labels): # Loop through each point's label
if label == (-1): # if noise,
labels_chosen_colours.append('#c9c9c9') # colour is grey
else:
labels_chosen_colours.append(colours2[label]) # otherwise, the correct colour (nb colours 2 are reordered so the most imporant clusters have the usual blue etc. colours)
labels_chosen_colours = np.asarray(labels_chosen_colours) # convert from list to numpy array
return labels_chosen_colours
#%%
def prepare_legends_for_2d(clusters_by_max_Iq_no_noise, Iq):
"""Given the cluster order and the cluster quality index (Iq), create a lenend ready for plot_2d_interactive_fig.
Inputs:
clusters_by_max_Iq_no_noise | rank1 array | e.g. (3,2,4,1) if cluster 3 has the highest Iq.
Iq | list | Iq for each clusters. 1st item in list is Iq for 1st cluster.
Returns:
legend_dict | dict | contains the legend symbols (a list of complicated Matplotlib 2D line things), and the labels as a list of strings.
History:
2020/09/10 | MEG | Written
"""
import numpy as np
from matplotlib.lines import Line2D # for the manual legend
n_clusters = len(Iq)
legend_elements = [Line2D([0], [0], marker='o', color='w', markerfacecolor='#1f77b4'),
Line2D([0], [0], marker='o', color='w', markerfacecolor='#ff7f0e'),
Line2D([0], [0], marker='o', color='w', markerfacecolor='#2ca02c'),
Line2D([0], [0], marker='o', color='w', markerfacecolor='#d62728'),
Line2D([0], [0], marker='o', color='w', markerfacecolor='#9467bd'),
Line2D([0], [0], marker='o', color='w', markerfacecolor='#8c564b'),
Line2D([0], [0], marker='o', color='w', markerfacecolor='#e377c2'),
Line2D([0], [0], marker='o', color='w', markerfacecolor='#7f7f7f'),
Line2D([0], [0], marker='o', color='w', markerfacecolor='#bcbd22'),
Line2D([0], [0], marker='o', color='w', markerfacecolor='#17becf')]
if n_clusters > 10: # if we have more than 10 clsuters, repeat the same colours the required number of times
for i in range(n_clusters-10):
legend_elements.append(Line2D([0], [0], marker='o', color='w', markerfacecolor='#%02X%02X%02X' % (np.random.randint(0,255),
np.random.randint(0,255),
np.random.randint(0,255))))
legend_elements = legend_elements[:n_clusters] # crop to length
legend_labels = []
for i in clusters_by_max_Iq_no_noise:
legend_labels.append(f'#: {i}\nIq: {np.round(Iq[i], 2)} ') # make a list of strings to name each cluster
legend_labels.append('Noise')
legend_elements.append(Line2D([0], [0], marker='o', color='w', markerfacecolor='#c9c9c9')) # but if we have 10 clusters (which is the max we plot), Noise must be added as the 11th
legend_dict = {'elements' : legend_elements,
'labels' : legend_labels}
return legend_dict
| StarcoderdataPython |
4811238 | from xml.dom import minidom
import urllib
GEOCODER="http://ws.geonames.org/search?q=%s"
from math import *
def haversine(co1, co2):
lon1, lat1 = co1
lon2, lat2 = co2
# convert to radians
lon1 = lon1 * pi / 180
lon2 = lon2 * pi / 180
lat1 = lat1 * pi / 180
lat2 = lat2 * pi / 180
# haversine formula
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2
c = 2 * atan2(sqrt(a), sqrt(1-a))
km = 6367 * c
miles = km * 0.621
return miles
def get_geocode(place):
url = GEOCODER % place
fp = urllib.urlopen(url)
data = fp.read()
dom = minidom.parseString(data)
geonames = dom.getElementsByTagName("geoname")
if len(geonames) == 0:
return (None, None)
name = geonames[0].getElementsByTagName("name")[0].firstChild.nodeValue
country = geonames[0].getElementsByTagName("countryCode")[0].firstChild.nodeValue
lat = float(geonames[0].getElementsByTagName("lat")[0].firstChild.nodeValue)
lng = float(geonames[0].getElementsByTagName("lng")[0].firstChild.nodeValue)
return ("%s, %s" % (name, country), (lat, lng))
def cmd_distance_from(ensoapi, places):
"Show the distance between two places"
if places.find(" to ") != -1:
place1, place2 = places.split(" to ", 1)
else:
place1, place2 = places.split(None, 1)
show_distance(ensoapi, place1, place2)
def cmd_distance_to(ensoapi, place):
"Show the distance from Stourbridge to the place"
show_distance(ensoapi, "Stourbridge", place)
def show_distance(ensoapi, place1, place2):
place1name, place1loc = get_geocode(place1)
if not place1name:
ensoapi.display_message("Couldn't identify '%s' as a place" % place1)
return
place2name, place2loc = get_geocode(place2)
if not place2name:
ensoapi.display_message("Couldn't identify '%s' as a place" % place2)
return
distance = haversine(place1loc, place2loc)
ensoapi.display_message("Distance between %s and %s: %s miles" % (
place1name, place2name, round(distance, 1)
))
| StarcoderdataPython |
3550836 | <filename>futu/common/pb/Trd_GetFunds_pb2.py
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: Trd_GetFunds.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
import Common_pb2 as Common__pb2
import Trd_Common_pb2 as Trd__Common__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='Trd_GetFunds.proto',
package='Trd_GetFunds',
syntax='proto2',
serialized_pb=_b('\n\x12Trd_GetFunds.proto\x12\x0cTrd_GetFunds\x1a\x0c\x43ommon.proto\x1a\x10Trd_Common.proto\"T\n\x03\x43\x32S\x12%\n\x06header\x18\x01 \x02(\x0b\x32\x15.Trd_Common.TrdHeader\x12\x14\n\x0crefreshCache\x18\x02 \x01(\x08\x12\x10\n\x08\x63urrency\x18\x03 \x01(\x05\"N\n\x03S2C\x12%\n\x06header\x18\x01 \x02(\x0b\x32\x15.Trd_Common.TrdHeader\x12 \n\x05\x66unds\x18\x02 \x01(\x0b\x32\x11.Trd_Common.Funds\")\n\x07Request\x12\x1e\n\x03\x63\x32s\x18\x01 \x02(\x0b\x32\x11.Trd_GetFunds.C2S\"b\n\x08Response\x12\x15\n\x07retType\x18\x01 \x02(\x05:\x04-400\x12\x0e\n\x06retMsg\x18\x02 \x01(\t\x12\x0f\n\x07\x65rrCode\x18\x03 \x01(\x05\x12\x1e\n\x03s2c\x18\x04 \x01(\x0b\x32\x11.Trd_GetFunds.S2CB\x15\n\x13\x63om.futu.openapi.pb')
,
dependencies=[Common__pb2.DESCRIPTOR,Trd__Common__pb2.DESCRIPTOR,])
_C2S = _descriptor.Descriptor(
name='C2S',
full_name='Trd_GetFunds.C2S',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='header', full_name='Trd_GetFunds.C2S.header', index=0,
number=1, type=11, cpp_type=10, label=2,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='refreshCache', full_name='Trd_GetFunds.C2S.refreshCache', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='currency', full_name='Trd_GetFunds.C2S.currency', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=68,
serialized_end=152,
)
_S2C = _descriptor.Descriptor(
name='S2C',
full_name='Trd_GetFunds.S2C',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='header', full_name='Trd_GetFunds.S2C.header', index=0,
number=1, type=11, cpp_type=10, label=2,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='funds', full_name='Trd_GetFunds.S2C.funds', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=154,
serialized_end=232,
)
_REQUEST = _descriptor.Descriptor(
name='Request',
full_name='Trd_GetFunds.Request',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='c2s', full_name='Trd_GetFunds.Request.c2s', index=0,
number=1, type=11, cpp_type=10, label=2,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=234,
serialized_end=275,
)
_RESPONSE = _descriptor.Descriptor(
name='Response',
full_name='Trd_GetFunds.Response',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='retType', full_name='Trd_GetFunds.Response.retType', index=0,
number=1, type=5, cpp_type=1, label=2,
has_default_value=True, default_value=-400,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='retMsg', full_name='Trd_GetFunds.Response.retMsg', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='errCode', full_name='Trd_GetFunds.Response.errCode', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='s2c', full_name='Trd_GetFunds.Response.s2c', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=277,
serialized_end=375,
)
_C2S.fields_by_name['header'].message_type = Trd__Common__pb2._TRDHEADER
_S2C.fields_by_name['header'].message_type = Trd__Common__pb2._TRDHEADER
_S2C.fields_by_name['funds'].message_type = Trd__Common__pb2._FUNDS
_REQUEST.fields_by_name['c2s'].message_type = _C2S
_RESPONSE.fields_by_name['s2c'].message_type = _S2C
DESCRIPTOR.message_types_by_name['C2S'] = _C2S
DESCRIPTOR.message_types_by_name['S2C'] = _S2C
DESCRIPTOR.message_types_by_name['Request'] = _REQUEST
DESCRIPTOR.message_types_by_name['Response'] = _RESPONSE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
C2S = _reflection.GeneratedProtocolMessageType('C2S', (_message.Message,), dict(
DESCRIPTOR = _C2S,
__module__ = 'Trd_GetFunds_pb2'
# @@protoc_insertion_point(class_scope:Trd_GetFunds.C2S)
))
_sym_db.RegisterMessage(C2S)
S2C = _reflection.GeneratedProtocolMessageType('S2C', (_message.Message,), dict(
DESCRIPTOR = _S2C,
__module__ = 'Trd_GetFunds_pb2'
# @@protoc_insertion_point(class_scope:Trd_GetFunds.S2C)
))
_sym_db.RegisterMessage(S2C)
Request = _reflection.GeneratedProtocolMessageType('Request', (_message.Message,), dict(
DESCRIPTOR = _REQUEST,
__module__ = 'Trd_GetFunds_pb2'
# @@protoc_insertion_point(class_scope:Trd_GetFunds.Request)
))
_sym_db.RegisterMessage(Request)
Response = _reflection.GeneratedProtocolMessageType('Response', (_message.Message,), dict(
DESCRIPTOR = _RESPONSE,
__module__ = 'Trd_GetFunds_pb2'
# @@protoc_insertion_point(class_scope:Trd_GetFunds.Response)
))
_sym_db.RegisterMessage(Response)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\023com.futu.openapi.pb'))
# @@protoc_insertion_point(module_scope)
| StarcoderdataPython |
1891239 | from typing import IO
def _calculate_log2_num_bytes(value: int) -> int:
"""
Determine the number of bytes required to encode the input value.
Artificially limited to max of 8 bytes to be compliant
:param value:
:return: The calculate the number of bytes
"""
for log2_num_bytes in range(4):
limit = 1 << ((1 << log2_num_bytes) * 8)
if value < limit:
return log2_num_bytes
raise RuntimeError('Unable to calculate the number of bytes required for this value')
def decode(stream: IO[bytes]) -> int:
"""
Decode an integer from the provided stream
:param stream: The stream to parse
:return: The decoded integer value
"""
header = stream.read(1)[0]
if (header & 0x80) == 0:
return header & 0x7F
else:
type = (header & 0x60) >> 5
if type == 3:
return -(header & 0x1f)
elif type == 2:
signed_flag = bool(header & 0x10)
log2_value_length = header & 0x0F
value_length = 1 << log2_value_length
value = 0
for n in range(value_length):
byte_value = int(stream.read(1)[0])
shift = (value_length - (n + 1)) * 8
byte_value <<= shift
value |= byte_value
if signed_flag:
value = -value
return value
def encode(stream: IO[bytes], value: int):
"""
Encode a integer value into a bytes stream
:param stream:
:param value: The value to be encoded
:return: The generated byets
"""
is_signed = value < 0
abs_value = abs(value)
if not is_signed and abs_value <= 0x7f:
stream.write(bytes([abs_value]))
else:
if is_signed and abs_value <= 0x1F:
stream.write(bytes([0xE0 | abs_value]))
else:
# determine the number of bytes that will be needed to encode this value
log2_num_bytes = _calculate_log2_num_bytes(abs_value)
num_bytes = 1 << log2_num_bytes
# define the header
if is_signed:
header = 0xD0 | (log2_num_bytes & 0xF)
else:
header = 0xC0 | (log2_num_bytes & 0xF)
# encode all the parts fot the values
values = [(abs_value >> (n * 8)) & 0xFF for n in reversed(range(num_bytes))]
stream.write(bytes([header] + values))
def encode_fixed(stream: IO[bytes], value: int, num_bytes: int = 8):
# Pack given integer value in to specified number of bytes (big-endian)
stream.write(bytes([value >> n * 8 & 0xFF for n in reversed(range(num_bytes))]))
| StarcoderdataPython |
105873 | <reponame>xiaolao/PaddleX
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn.metrics import hamming_loss
from sklearn.metrics import accuracy_score as accuracy_metric
from sklearn.metrics import multilabel_confusion_matrix
from sklearn.metrics import precision_recall_fscore_support
from sklearn.metrics import average_precision_score
from sklearn.preprocessing import binarize
import numpy as np
__all__ = [
"multi_hot_encode", "hamming_distance", "accuracy_score",
"precision_recall_fscore", "mean_average_precision"
]
def multi_hot_encode(logits, threshold=0.5):
"""
Encode logits to multi-hot by elementwise for multilabel
"""
return binarize(logits, threshold)
def hamming_distance(output, target):
"""
Soft metric based label for multilabel classification
Returns:
The smaller the return value is, the better model is.
"""
return hamming_loss(target, output)
def accuracy_score(output, target, base="sample"):
"""
Hard metric for multilabel classification
Args:
output:
target:
base: ["sample", "label"], default="sample"
if "sample", return metric score based sample,
if "label", return metric score based label.
Returns:
accuracy:
"""
assert base in ["sample", "label"], 'must be one of ["sample", "label"]'
if base == "sample":
accuracy = accuracy_metric(target, output)
elif base == "label":
mcm = multilabel_confusion_matrix(target, output)
tns = mcm[:, 0, 0]
fns = mcm[:, 1, 0]
tps = mcm[:, 1, 1]
fps = mcm[:, 0, 1]
accuracy = (sum(tps) + sum(tns)) / (
sum(tps) + sum(tns) + sum(fns) + sum(fps))
return accuracy
def precision_recall_fscore(output, target):
"""
Metric based label for multilabel classification
Returns:
precisions:
recalls:
fscores:
"""
precisions, recalls, fscores, _ = precision_recall_fscore_support(target,
output)
return precisions, recalls, fscores
def mean_average_precision(logits, target):
"""
Calculate average precision
Args:
logits: probability from network before sigmoid or softmax
target: ground truth, 0 or 1
"""
if not (isinstance(logits, np.ndarray) and isinstance(target, np.ndarray)):
raise TypeError("logits and target should be np.ndarray.")
aps = []
for i in range(target.shape[1]):
ap = average_precision_score(target[:, i], logits[:, i])
aps.append(ap)
return np.mean(aps)
| StarcoderdataPython |
1882111 | #from server import mycrt
import pytest
import unittest
import requests
import json
#from mycrt import application
from .context import *
"""
if __name__ == '__main__':
if __package__ is None:
import sys
from os import path
sys.path.append( path.dirname( path.dirname( path.abspath(__file__) ) ) )
from server.mycrt import *
else:
from ..server.mycrt import *
"""
class TestFlaskApi(unittest.TestCase):
def setUp(self):
self.app = server.mycrt.application.test_client()
def test_rest_endpoint(self):
response = self.app.get('/test')
responseData = response.data.decode('UTF-8')
self.assertEqual(responseData, "Test REST endpoint.")
| StarcoderdataPython |
1738329 | <filename>colcon_hardware_acceleration/subverb/hypervisor.py
# Copyright 2022 <NAME>
# Licensed under the Apache License, Version 2.0
import os
import sys
import errno
from pathlib import Path
from colcon_core.plugin_system import satisfies_version
from colcon_hardware_acceleration.subverb import (
AccelerationSubverbExtensionPoint,
get_vitis_dir,
get_rawimage_path,
get_firmware_dir,
mount_rawimage,
umount_rawimage,
run,
mountpoint1,
mountpoint2,
mountpointn,
replace_kernel,
add_kernel,
exists,
copy_colcon_workspace,
copy_libstdcppfs
)
from colcon_hardware_acceleration.verb import green, yellow, red, gray
## Only dom0
TEMPLATE_CONFIG = """\
MEMORY_START=0x0
MEMORY_END=0x80000000
DEVICE_TREE=system.dtb
XEN=xen
UBOOT_SOURCE=boot.source
UBOOT_SCRIPT=boot.scr
"""
class HypervisorSubverb(AccelerationSubverbExtensionPoint):
"""
Configure the Xen hypervisor.
"""
def __init__(self): # noqa: D107
super().__init__()
satisfies_version(AccelerationSubverbExtensionPoint.EXTENSION_POINT_VERSION, "^1.0")
def add_arguments(self, *, parser): # noqa: D102
# debug arg, show configuration and leave temp. dir (do not delete)
argument = parser.add_argument("--debug", action="store_true", default=False)
# dom0 VM
argument = parser.add_argument(
"--dom0", action="store", dest="dom0_arg", choices=["preempt_rt", "vanilla"]
)
# domU VMs
argument = parser.add_argument(
"--domU",
action="append",
dest="domU_args",
choices=["preempt_rt", "vanilla"],
# nargs="+",
)
# dom0less VMs
argument = parser.add_argument(
"--dom0less",
action="append",
dest="dom0less_args",
choices=["preempt_rt", "vanilla"],
)
# VMs ramdisks (dom0 is NOT included)
argument = parser.add_argument(
"--ramdisk",
action="append",
dest="ramdisk_args",
help="ramdisks for VMs, excluding dom0.",
)
argument = parser.add_argument(
"--rootfs",
action="append",
dest="rootfs_args",
help="rootfs' for VMs, including dom0.",
)
argument = parser.add_argument(
"--install-dir",
dest="install_dir",
type=str,
help="relative path to the workspace directory to deploy in emulation (typically 'install-*').",
)
argument = parser.add_argument(
"--dom0-ramdisk",
dest="dom0_ramdisk",
type=str,
help="Dom0 ramdisk filename to use (instead of the default rootfs). Copied in the first partition.",
)
# try:
# from argcomplete.completers import ChoicesCompleter
# except ImportError:
# pass
# else:
# type_options = ["vanilla", "preempt_rt"]
# argument.completer = ChoicesCompleter(type_options)
# remember the subparser to print usage in case no subverb is passed
self.parser = parser
def default_hypervisor_setup(self, context):
"""
Default image setup using:
- dom0 and
- dom0less machine with a busybox ramdisk
"""
firmware_dir = get_firmware_dir()
# create auxiliary directory for compiling all artifacts for the hypervisor
auxdir = "/tmp/hypervisor"
run("mkdir " + auxdir, shell=True, timeout=1)
# copy the artifacts to auxiliary directory
run(
"cp " + firmware_dir + "/kernel/Image " + auxdir + "/Image",
shell=True,
timeout=1,
)
run("cp " + firmware_dir + "/xen " + auxdir + "/xen", shell=True, timeout=1)
run(
"cp "
+ firmware_dir
+ "/device_tree/system.dtb.xen "
+ auxdir
+ "/system.dtb",
shell=True,
timeout=1,
)
run(
"cp " + firmware_dir + "/initrd.cpio " + auxdir + "/initrd.cpio",
shell=True,
timeout=1,
)
# handle BOOT.BIN separately, priorizing first the symlink generated by building kernels
bootbin_symlink_path = Path(firmware_dir + "/BOOT.BIN")
if bootbin_symlink_path.is_symlink():
try:
os.stat(bootbin_symlink_path)
except OSError as e:
if e.errno == errno.ENOENT:
print(
"path %s does not exist or is a broken symlink"
% bootbin_symlink_path
)
sys.exit(1)
else:
raise e
if not os.path.exists(bootbin_symlink_path):
red("BOOT.BIN file " + bootbin_symlink_path + " not found.")
sys.exit(1)
green("- Found device BOOT.BIN file: " + str(bootbin_symlink_path))
run(
"cp " + str(bootbin_symlink_path) + " " + auxdir + "/BOOT.BIN",
shell=True,
timeout=1,
)
else:
green("- Using default BOOT.BIN.xen file.")
run(
"cp " + firmware_dir + "/bootbin/BOOT.BIN.xen " + auxdir + "/BOOT.BIN",
shell=True,
timeout=1,
)
# produce config
config = open(auxdir + "/xen.cfg", "w")
config.truncate(0) # delete previous content
config.write(TEMPLATE_CONFIG)
config.close()
# generate boot script
imagebuilder_dir = firmware_dir + "/imagebuilder"
imagebuilder_path = imagebuilder_dir + "/scripts/uboot-script-gen"
cmd = (
"cd "
+ auxdir
+ " && bash "
+ imagebuilder_path
+ ' -c xen.cfg -d . -t "load mmc 0:1"'
)
if context.args.debug:
gray(cmd)
outs, errs = run(cmd, shell=True, timeout=5)
if errs:
red("Something went wrong.\n" + "Review the output: " + errs)
sys.exit(1)
# print(outs)
# mount sd_card image
rawimage_path = get_rawimage_path("sd_card.img")
mount_rawimage(rawimage_path, 1)
# copy all artifacts
cmd = "sudo cp " + auxdir + "/* " + mountpoint1 + "/"
outs, errs = run(cmd, shell=True, timeout=5)
if errs:
red(
"Something went wrong while replacing the boot script.\n"
+ "Review the output: "
+ errs
)
sys.exit(1)
green("- Successfully copied all Xen artifacts.")
# umount raw disk image, (technically, only p1)
umount_rawimage(1)
# cleanup auxdir
if not context.args.debug:
run("sudo rm -r " + auxdir, shell=True, timeout=1)
def argument_checks(self, context):
"""
Check arguments provided and ensure they're reasonable
TODO: document arguments
"""
# ensure ramdisks don't overrun domUs + dom0less
# NOTE that dom0 doesn't count
if (
context.args.ramdisk_args
and context.args.domU_args
and context.args.dom0less_args
and (
len(context.args.domU_args) + len(context.args.dom0less_args)
< len(context.args.ramdisk_args)
)
or context.args.ramdisk_args
and context.args.dom0less_args
and (len(context.args.dom0less_args) < len(context.args.ramdisk_args))
):
red(
"- More ramdisks provided than VMs. Note that dom0's ramdisk should NOT be indicated (ramdisks <= domUs + dom0less)."
)
sys.exit(1)
# ensure rootfs don't overrun domUs + dom0less + dom0 (+1)
if context.args.rootfs_args and (
len(context.args.domU_args) + len(context.args.dom0less_args) + 1
< len(context.args.rootfs_args)
):
red(
"- More rootfs provided than VMs, including dom0's (rootfs <= domUs + dom0less + 1)."
)
sys.exit(1)
# ensure rootfs and ramdisks don't overrun domUs + dom0less + dom0 (+1)
if (
context.args.ramdisk_args
and context.args.rootfs_args
and (
len(context.args.domU_args) + len(context.args.dom0less_args) + 1
< len(context.args.ramdisk_args) + len(context.args.rootfs_args)
)
):
red(
"- More rootfs and ramdisks provided than VMs, including dom0's (rootfs + ramdisks <= domUs + dom0less + 1)."
)
sys.exit(1)
# inform if the domUs + dom0less + dom0 (+1) count is greater than rootfs + ramdisks count
if (
context.args.ramdisk_args
and context.args.rootfs_args
and (
len(context.args.domU_args) + len(context.args.dom0less_args) + 1
> len(context.args.ramdisk_args) + len(context.args.rootfs_args)
)
):
yellow("- More VMs than ramdisks and rootfs provided, will use defaults.")
# # inform if ramdisks is lower than VMs
# if not context.args.ramdisk_args:
# yellow(
# "- No ramdisks provided. Defaulting to " + str(default_ramdisk)
# )
#
# if context.args.ramdisk_args and (
# len(context.args.domU_args) > len(context.args.ramdisk_args)
# ):
# yellow(
# "- Number of ramdisks is lower than domU VMs. "
# "Last "
# + str(
# len(context.args.domU_args) - len(context.args.ramdisk_args)
# )
# + " VM will default to: "
# + str(default_ramdisk)
# )
def xen_fixes(self, partition=2):
"""
Fixup Xen FS
"""
firmware_dir = get_firmware_dir()
# mount sd_card image
rawimage_path = get_rawimage_path("sd_card.img")
mount_rawimage(rawimage_path, partition)
mountpoint_partition = mountpointn + str(partition)
# create Xen missing dir
cmd = "sudo mkdir -p " + mountpoint_partition + "/var/lib/xen"
outs, errs = run(cmd, shell=True, timeout=5)
if errs:
red(
"Something went wrong while creating Xen /var/lib/xen directory in rootfs.\n"
+ "Review the output: "
+ errs
)
sys.exit(1)
green("- Successfully created Xen /var/lib/xen directory in rootfs.")
if not self.get_board() == "kv260":
# setup /etc/inittab for Xen
cmd = (
"sudo sed -i 's-PS0:12345:respawn:/bin/start_getty 115200 ttyPS0 vt102-X0:12345:respawn:/sbin/getty 115200 hvc0-g' "
+ mountpoint_partition
+ "/etc/inittab"
)
outs, errs = run(cmd, shell=True, timeout=5)
if errs:
red(
"Something went wrong while setting up /etc/inittab for Xen in rootfs.\n"
+ "Review the output: "
+ errs
)
sys.exit(1)
green("- Successfully setup /etc/inittab for Xen in rootfs.")
# umount raw disk image
umount_rawimage(partition)
def main(self, *, context): # noqa: D102
"""
Create a Xen configuration, produce boot scripts and deploy
corresponding files into partitions.
TODO: ramdisk selection is currently not implemented.
NOTE: Location, syntax and other related matters are defined
within the `acceleration_firmware_kv260` package. Refer to it for more
details.
NOTE 2: to simplify implementation, for now, domUs will use rootfs
and dom0less ramdisks
"""
# TODO: review in the future
#
# if context.args.domU_args and context.args.dom0less_args:
# red("Simultaneous use of domU and dom0less VMs not supported.")
# sys.exit(1)
if not (
context.args.dom0_arg
or context.args.domU_args
or context.args.dom0less_args
):
# self.default_hypervisor_setup(context)
red("Please provide dom0 args at least")
sys.exit(0)
num_domus = 0 # NUM_DOMUS element in the configuration, also used for iterate over DomUs
num_dom0less = 0 # used to iterate over Dom0less
global TEMPLATE_CONFIG
default_ramdisk = "initrd.cpio"
default_rootfs = (
"rootfs.cpio.gz" # note rootfs could be provided in cpio.gz or tar.gz
)
# see imagebuilder for more details
# create auxiliary directory for compiling all artifacts for the hypervisor
auxdir = "/tmp/hypervisor"
run("mkdir " + auxdir + " 2> /dev/null", shell=True, timeout=1)
firmware_dir = get_firmware_dir() # directory where firmware is
# save last image, delete rest
if exists(firmware_dir + "/sd_card.img"):
if exists(firmware_dir + "/sd_card.img.old"):
run(
"sudo rm " + firmware_dir + "/sd_card.img.old",
shell=True,
timeout=1,
)
yellow("- Detected previous sd_card.img.old raw image, deleting.")
run(
"sudo mv "
+ firmware_dir
+ "/sd_card.img "
+ firmware_dir
+ "/sd_card.img.old",
shell=True,
timeout=1,
)
yellow(
"- Detected previous sd_card.img raw image, moving to sd_card.img.old."
)
#####################
# process Dom0
#####################
if context.args.dom0_arg:
# domU, dom0less, ramdisk and rootfs checks
self.argument_checks(context)
# replace Image in boot partition and assign silly ramdisk (not used)
if context.args.dom0_arg == "vanilla":
# copy to auxdir
run(
"cp " + firmware_dir + "/kernel/Image " + auxdir + "/Image",
shell=True,
timeout=1,
)
TEMPLATE_CONFIG += "DOM0_KERNEL=Image\n"
elif context.args.dom0_arg == "preempt_rt":
# # directly to boot partition
# replace_kernel("Image_PREEMPT_RT")
# copy to auxdir
run(
"cp "
+ firmware_dir
+ "/kernel/Image_PREEMPT_RT "
+ auxdir
+ "/Image_PREEMPT_RT",
shell=True,
timeout=1,
)
TEMPLATE_CONFIG += "DOM0_KERNEL=Image_PREEMPT_RT\n"
else:
red("Unrecognized dom0 arg.")
sys.exit(1)
if context.args.dom0_ramdisk:
# Dom's ramdisk
if os.path.exists(firmware_dir + "/" + context.args.dom0_ramdisk):
run(
"cp " + firmware_dir + "/" + context.args.dom0_ramdisk + " " + auxdir + "/" + context.args.dom0_ramdisk,
shell=True,
timeout=1,
)
TEMPLATE_CONFIG += "DOM0_RAMDISK="+ context.args.dom0_ramdisk + "\n"
green("- Dom0 ramdisk: " + context.args.dom0_ramdisk)
else:
red(context.args.dom0_ramdisk + " not found")
sys.exit(1)
else:
# Dom0's rootfs
if not context.args.rootfs_args or (len(context.args.rootfs_args) < 1):
yellow(
"- No rootfs for Dom0 provided. Defaulting to "
+ str(default_rootfs)
)
rootfs = default_rootfs
assert exists(firmware_dir + "/" + rootfs)
run(
"cp " + firmware_dir + "/" + rootfs + " " + auxdir + "/" + rootfs,
shell=True,
timeout=1,
)
else:
rootfs = context.args.rootfs_args[num_domus]
num_domus += 1 # jump over first rootfs arg
green("- Using " + rootfs + "rootfs for Dom0")
# this way, list will be consistent
# when interating over DomUs
TEMPLATE_CONFIG += "DOM0_ROOTFS=" + str(rootfs) + "\n"
if self.get_board() == "kv260":
# KV260 requires special arguments to handle the juggling of device tree overlays, as opposed to the ramdisk
# shipped in the OOB experience
TEMPLATE_CONFIG += 'DOM0_CMD="console=hvc0 earlycon=xen earlyprintk=xen clk_ignore_unused root=/dev/mmcblk0p2"\n'
#####################
# process DomUs
#####################
if context.args.domU_args:
for domu in context.args.domU_args:
# TODO: consider adding ramdisk support for domUs
# define rootfs for this domU, or default
if not context.args.rootfs_args or (
num_domus >= len(context.args.rootfs_args)
):
rootfs = default_rootfs
else:
rootfs = context.args.rootfs_args[num_domus]
if domu == "vanilla":
# add_kernel("Image") # directly to boot partition
# copy to auxdir
run(
"cp " + firmware_dir + "/kernel/Image " + auxdir + "/Image",
shell=True,
timeout=1,
)
TEMPLATE_CONFIG += (
"DOMU_KERNEL[" + str(num_domus) + ']="Image"\n'
)
elif domu == "preempt_rt":
# add_kernel("Image_PREEMPT_RT") # directly to boot partition
# copy to auxdir
run(
"cp "
+ firmware_dir
+ "/kernel/Image_PREEMPT_RT "
+ auxdir
+ "/Image_PREEMPT_RT",
shell=True,
timeout=1,
)
TEMPLATE_CONFIG += (
"DOMU_KERNEL[" + str(num_domus) + ']="Image_PREEMPT_RT"\n'
)
else:
red("Unrecognized domU arg.")
sys.exit(1)
# Add rootfs
TEMPLATE_CONFIG += (
"DOMU_ROOTFS[" + str(num_domus) + ']="' + str(rootfs) + '"\n'
)
TEMPLATE_CONFIG += "DOMU_NOBOOT[" + str(num_domus) + "]=y\n"
num_domus += 1
#####################
# process Dom0less
#####################
if context.args.dom0less_args:
for dom0less in context.args.dom0less_args:
# define ramdisk for this dom0less, or default
if not context.args.ramdisk_args or (
num_dom0less >= len(context.args.ramdisk_args)
):
ramdisk = default_ramdisk
else:
ramdisk = context.args.ramdisk_args[num_dom0less]
if dom0less == "vanilla":
run(
"cp " + firmware_dir + "/kernel/Image " + auxdir + "/Image",
shell=True,
timeout=1,
)
TEMPLATE_CONFIG += (
"DOMU_KERNEL["
+ str(num_dom0less + num_domus)
+ ']="Image"\n'
)
elif dom0less == "preempt_rt":
# add_kernel("Image_PREEMPT_RT")
run(
"cp "
+ firmware_dir
+ "/kernel/Image_PREEMPT_RT "
+ auxdir
+ "/Image_PREEMPT_RT",
shell=True,
timeout=1,
)
TEMPLATE_CONFIG += (
"DOMU_KERNEL["
+ str(num_dom0less + num_domus)
+ ']="Image_PREEMPT_RT"\n'
)
else:
red("Unrecognized dom0less arg.")
sys.exit(1)
TEMPLATE_CONFIG += (
"DOMU_RAMDISK["
+ str(num_dom0less + num_domus)
+ ']="'
+ str(ramdisk)
+ '"\n'
)
num_dom0less += 1
# account for Dom0less in the total as well
num_domus += num_dom0less
#####################
# configuration and images
#####################
# Add NUM_DOMUS at the end
TEMPLATE_CONFIG += "NUM_DOMUS=" + str(num_domus) + "\n"
# copy the artifacts to auxiliary directory
run("cp " + firmware_dir + "/xen " + auxdir + "/xen", shell=True, timeout=1)
run(
"cp "
+ firmware_dir
+ "/device_tree/system.dtb.xen "
+ auxdir
+ "/system.dtb",
shell=True,
timeout=1,
)
if self.get_board() == "kv260":
TEMPLATE_CONFIG += ('XEN_CMD="console=dtuart dtuart=serial0 dom0_mem=2G dom0_max_vcpus=1 ' +
'bootscrub=0 vwfi=native sched=null"\n')
# # NOTE: do it instead through device tree overlays
# run("cp " + firmware_dir + "/ramdisk.cpio.gz.u-boot " + auxdir + "/ramdisk.cpio.gz.u-boot", shell=True, timeout=1)
TEMPLATE_CONFIG += ('DT_OVERLAY[0]="zynqmp-sck-kv-g-qemu.dtbo"\n')
TEMPLATE_CONFIG += ('DT_OVERLAY[1]="mmc-enable.dtbo"\n')
TEMPLATE_CONFIG += ('NUM_DT_OVERLAY=2\n')
# copy files
run(
"cp "
+ firmware_dir
+ "/device_tree/mmc-enable.dtbo "
+ auxdir
+ "/mmc-enable.dtbo",
shell=True,
timeout=1,
)
run(
"cp "
+ firmware_dir
+ "/device_tree/zynqmp-sck-kv-g-qemu.dtbo "
+ auxdir
+ "/zynqmp-sck-kv-g-qemu.dtbo",
shell=True,
timeout=1,
)
# NOTE: BOOT.BIN in KV260 is handled differently
else:
# handle BOOT.BIN separately, priorizing first the symlink generated by building kernels
bootbin_symlink_path = Path(firmware_dir + "/BOOT.BIN")
if bootbin_symlink_path.is_symlink():
try:
os.stat(bootbin_symlink_path)
except OSError as e:
if e.errno == errno.ENOENT:
print(
"path %s does not exist or is a broken symlink"
% bootbin_symlink_path
)
sys.exit(1)
else:
raise e
if not os.path.exists(bootbin_symlink_path):
red("BOOT.BIN file " + bootbin_symlink_path + " not found.")
sys.exit(1)
green("- Found device BOOT.BIN file: " + str(bootbin_symlink_path))
run(
"cp " + str(bootbin_symlink_path) + " " + auxdir + "/BOOT.BIN",
shell=True,
timeout=1,
)
else:
green("- Using default BOOT.BIN.xen file.")
run(
"cp "
+ firmware_dir
+ "/bootbin/BOOT.BIN.xen "
+ auxdir
+ "/BOOT.BIN",
shell=True,
timeout=1,
)
# Add BOOT.BIN to template
TEMPLATE_CONFIG += ('BOOTBIN=BOOT.BIN\n')
if context.args.debug:
gray("Debugging config file:")
gray(TEMPLATE_CONFIG)
# initrd.cpio
# copy (at least) default ramdisk initrd.cpio and default rootfs rootfs.cpio.gz
run(
"cp "
+ firmware_dir
+ "/"
+ default_ramdisk
+ " "
+ auxdir
+ "/"
+ default_ramdisk,
shell=True,
timeout=1,
)
run(
"cp "
+ firmware_dir
+ "/"
+ default_rootfs
+ " "
+ auxdir
+ "/"
+ default_rootfs,
shell=True,
timeout=1,
)
# add other ramdisks, if neccessary:
if context.args.ramdisk_args:
for ramdisk in context.args.ramdisk_args:
assert exists(firmware_dir + "/" + ramdisk)
run(
"cp "
+ firmware_dir
+ "/"
+ ramdisk
+ " "
+ auxdir
+ "/"
+ ramdisk,
shell=True,
timeout=1,
)
green("- Copied to temporary directory ramdisk: " + ramdisk)
# add other rootfs, if neccessary:
if context.args.rootfs_args:
for rootfs in context.args.rootfs_args:
assert exists(firmware_dir + "/" + rootfs)
run(
"cp "
+ firmware_dir
+ "/"
+ rootfs
+ " "
+ auxdir
+ "/"
+ rootfs,
shell=True,
timeout=1,
)
green("- Copied to temporary directory rootfs: " + rootfs)
# produce config
config = open(auxdir + "/xen.cfg", "w")
config.truncate(0) # delete previous content
config.write(TEMPLATE_CONFIG)
config.close()
# generate boot script
yellow("- Generating boot script")
imagebuilder_dir = firmware_dir + "/imagebuilder"
imagebuilder_path_configscript = (
imagebuilder_dir + "/scripts/uboot-script-gen"
)
if self.get_board() == "kv260":
cmd = (
"cd "
+ auxdir
+ " && bash "
+ imagebuilder_path_configscript
+ ' -c xen.cfg -d . -t "load mmc 1:1"'
# + ' -c xen.cfg -d . -t sd'
)
else: # assume zcu102
cmd = (
"cd "
+ auxdir
+ " && bash "
+ imagebuilder_path_configscript
+ " -c xen.cfg -d . -t sd"
)
if context.args.debug:
gray(cmd)
outs, errs = run(cmd, shell=True, timeout=5)
if errs:
red(
"Something went wrong while generating config file.\n"
+ "Review the output: "
+ errs
)
sys.exit(1)
green("- Boot script ready")
# create sd card image
yellow(
"- Creating new sd_card.img, previous one will be moved to sd_card.img.old. This will take a few seconds, hold on..."
)
whoami, errs = run("whoami", shell=True, timeout=1)
if errs:
red(
"Something went wrong while fetching username.\n"
+ "Review the output: "
+ errs
)
sys.exit(1)
# build image, add 500 MB of slack on each rootfs-based partition
imagebuilder_path_diskimage = imagebuilder_dir + "/scripts/disk_image"
cmd = (
"cd "
+ auxdir
+ " && sudo bash "
+ imagebuilder_path_diskimage
+ " -c xen.cfg -d . -t sd -w "
+ auxdir
+ " -o "
+ firmware_dir
+ "/sd_card.img "
+ "-s 500"
)
if context.args.debug:
gray(cmd)
outs, errs = run(cmd, shell=True, timeout=300)
if errs:
red(
"Something went wrong while creating sd card image.\n"
+ "Review the output: "
+ errs
)
sys.exit(1)
green("- Image successfully created")
# permissions of the newly created image
cmd = (
"sudo chown "
+ whoami
+ ":"
+ whoami
+ " "
+ firmware_dir
+ "/sd_card.img"
)
outs, errs = run(cmd, shell=True)
if errs:
red(
"Something went wrong while creating sd card image.\n"
+ "Review the output: "
+ errs
)
sys.exit(1)
# ## use existing SD card image
# # mount sd_card image
# rawimage_path = get_rawimage_path("sd_card.img")
# mount_rawimage(rawimage_path, 1)
#
# # copy all artifacts
# cmd = "sudo cp " + auxdir + "/* " + mountpoint1 + "/"
# outs, errs = run(cmd, shell=True, timeout=5)
# if errs:
# red(
# "Something went wrong while replacing the boot script.\n"
# + "Review the output: "
# + errs
# )
# sys.exit(1)
# green("- Successfully copied all Xen artifacts.")
#
# # umount raw disk image, (technically, only p1)
# umount_rawimage(1)
# Xen SD card fixes
# NOTE: Only applicable to images with rootfs in partition 2,
# which are most, except those with dom0_ramdisk
if not context.args.dom0_ramdisk:
# creates missing tmp dirs for Xen proper functioning, configures /etc/inittab, etc.
# TODO: review this overtime in case PetaLinux output becomes differently
self.xen_fixes(partition=2)
copy_libstdcppfs(partition=2) # FIXME: copy missing libstdc++fs library
# apply fixes also to every domU
if context.args.domU_args:
for i in range(len(context.args.domU_args)):
self.xen_fixes(partition=i + 2 + 1)
copy_libstdcppfs(partition=i + 2 + 1) # FIXME: copy missing libstdc++fs library
# cleanup auxdir
if not context.args.debug:
run("sudo rm -r " + auxdir, shell=True, timeout=1)
# copy ROS workspace to image
if context.args.install_dir:
copy_colcon_workspace(context.args.install_dir)
else:
red("No dom0 specified, doing nothing.")
| StarcoderdataPython |
1720361 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2016-2017 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
plantweb render_file example.
"""
from __future__ import unicode_literals, absolute_import
from __future__ import print_function, division
from plantweb.render import render_file
CONTENT = """
digraph finite_state_machine {
rankdir=LR;
size="8,5"
node [shape = doublecircle]; LR_0 LR_3 LR_4 LR_8;
node [shape = circle];
LR_0 -> LR_2 [ label = "SS(B)" ];
LR_0 -> LR_1 [ label = "SS(S)" ];
LR_1 -> LR_3 [ label = "S($end)" ];
LR_2 -> LR_6 [ label = "SS(b)" ];
LR_2 -> LR_5 [ label = "SS(a)" ];
LR_2 -> LR_4 [ label = "S(A)" ];
LR_5 -> LR_7 [ label = "S(b)" ];
LR_5 -> LR_5 [ label = "S(a)" ];
LR_6 -> LR_6 [ label = "S(b)" ];
LR_6 -> LR_5 [ label = "S(a)" ];
LR_7 -> LR_8 [ label = "S(b)" ];
LR_7 -> LR_5 [ label = "S(a)" ];
LR_8 -> LR_6 [ label = "S(b)" ];
LR_8 -> LR_5 [ label = "S(a)" ];
}
"""
if __name__ == '__main__':
infile = 'mygraph.dot'
with open(infile, 'wb') as fd:
fd.write(CONTENT.encode('utf-8'))
print('==> INPUT FILE:')
print(infile)
outfile = render_file(
infile,
renderopts={
'engine': 'graphviz',
'format': 'png'
},
cacheopts={
'use_cache': False
}
)
print('==> OUTPUT FILE:')
print(outfile)
| StarcoderdataPython |
189983 | <gh_stars>0
from .node import NodeModelBase
| StarcoderdataPython |
79815 | import functools
from typing import Optional, Sequence
from fvcore.common.registry import Registry as _Registry
from tabulate import tabulate
class Registry(_Registry):
"""Extension of fvcore's registry that supports aliases."""
_ALIAS_KEYWORDS = ("_aliases", "_ALIASES")
def __init__(self, name: str):
super().__init__(name=name)
self._metadata_map = {}
def _get_aliases(self, obj_func_or_class):
for kw in self._ALIAS_KEYWORDS:
if hasattr(obj_func_or_class, kw):
return getattr(obj_func_or_class, kw)
return []
def register(self, obj: object = None, aliases: Sequence[str] = None) -> Optional[object]:
if obj is None:
# used as a decorator
def deco(func_or_class: object, aliases=None) -> object:
name = func_or_class.__name__
self._do_register(name, func_or_class)
if aliases is None:
aliases = self._get_aliases(func_or_class)
if not isinstance(aliases, (list, tuple, set)):
aliases = [aliases]
for alias in aliases:
self._do_register(alias, func_or_class, is_alias=True)
return func_or_class
kwargs = {"aliases": aliases}
if any(v is not None for v in kwargs.values()):
return functools.partial(deco, **kwargs)
else:
return deco
name = obj.__name__
self._do_register(name, obj)
if aliases is None:
aliases = self._get_aliases(obj) if isinstance(obj, type) else []
for alias in aliases:
self._do_register(alias, obj, is_alias=True)
def _do_register(self, name: str, obj: object, **kwargs) -> None:
docstring = obj.__doc__
if docstring is None:
docstring = ""
aliases = self._get_aliases(obj) if isinstance(obj, type) else None
if not aliases:
aliases = None
self._metadata_map[name] = {
"name": name,
"description": kwargs.pop("description", docstring.split("\n")[0]),
"aliases": aliases,
**kwargs,
}
return super()._do_register(name, obj)
def clear(self):
self._obj_map = {}
self._metadata_map = {}
def __repr__(self) -> str:
metadata = [v for v in self._metadata_map.values() if not v.get("is_alias", False)]
table = tabulate(metadata, headers="keys", tablefmt="fancy_grid")
return "Registry of {}:\n{}".format(self._name, table)
| StarcoderdataPython |
384009 | age = int(input())
def drinks(drink):
print(f"drink {drink}")
if age <= 14:
drinks("toddy")
elif age <= 18:
drinks("coke")
elif age <= 21:
drinks("beer")
else:
drinks("whisky")
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.