id stringlengths 1 265 | text stringlengths 6 5.19M | dataset_id stringclasses 7
values |
|---|---|---|
3312583 | <gh_stars>0
"""
TODO
"""
import asyncio
from biothings.web.query.builder import ESScrollID
from elasticsearch import NotFoundError, RequestError
from elasticsearch_dsl import MultiSearch, Search
class ResultInterrupt(Exception):
def __init__(self, data):
super().__init__()
self.data = data
class RawResultInterrupt(ResultInterrupt):
pass
class EndScrollInterrupt(ResultInterrupt):
def __init__(self):
super().__init__({
"success": False,
"error": "No more results to return."
})
class ESQueryBackend():
def __init__(self, client, indices=None):
self.client = client
self.indices = indices or {None: "_all"}
# a list of biothing_type -> index pattern mapping
# ---------------------------------------------------
# {
# None: "hg19_current",
# "hg19": "hg19_current",
# "hg38": "hg38_index1,hg38_index2",
# "_internal": "hg*_current"
# }
if None not in self.indices: # set default index pattern
self.indices[None] = next(iter(self.indices.values()))
def execute(self, query, **options):
assert isinstance(query, Search)
index = self.indices[options.get('biothing_type')]
return self.client.search(query.to_dict(), index)
class AsyncESQueryBackend(ESQueryBackend):
"""
Execute an Elasticsearch query
"""
def __init__(
self, client, indices=None,
scroll_time='1m', scroll_size=1000,
multisearch_concurrency=5,
total_hits_as_int=True
):
super().__init__(client, indices)
# for scroll queries
self.scroll_time = scroll_time # scroll context expiration timeout
self.scroll_size = scroll_size # result window size override value
# concurrency control
self.semaphore = asyncio.Semaphore(multisearch_concurrency)
# additional params
# https://www.elastic.co/guide/en/elasticsearch/reference/current/breaking-changes-7.0.html
# #hits-total-now-object-search-response
self.total_hits_as_int = total_hits_as_int
async def execute(self, query, **options):
"""
Execute the corresponding query. Must return an awaitable.
May override to add more. Handle uncaught exceptions.
Options:
fetch_all: also return a scroll_id for this query (default: false)
biothing_type: which type's corresponding indices to query (default in config.py)
"""
assert isinstance(query, (
# https://www.elastic.co/guide/en/elasticsearch/reference/current/search-search.html
# https://www.elastic.co/guide/en/elasticsearch/reference/current/search-multi-search.html
# https://www.elastic.co/guide/en/elasticsearch/reference/current/scroll-api.html
Search, MultiSearch, ESScrollID
))
if isinstance(query, ESScrollID):
try:
res = await self.client.scroll(
scroll_id=query.data, scroll=self.scroll_time,
rest_total_hits_as_int=self.total_hits_as_int)
except (
RequestError, # the id is not in the correct format of a context id
NotFoundError # the id does not correspond to any search context
):
raise ValueError("Invalid or stale scroll_id.")
else:
if options.get('raw'):
raise RawResultInterrupt(res)
if not res['hits']['hits']:
raise EndScrollInterrupt()
return res
# everything below require us to know which indices to query
index = self.indices[options.get('biothing_type')]
if isinstance(query, Search):
if options.get('fetch_all'):
query = query.extra(size=self.scroll_size)
query = query.params(scroll=self.scroll_time)
if self.total_hits_as_int:
query = query.params(rest_total_hits_as_int=True)
res = await self.client.search(query.to_dict(), index, **query._params)
elif isinstance(query, MultiSearch):
await self.semaphore.acquire()
try:
res = await self.client.msearch(query.to_dict(), index)
finally:
self.semaphore.release()
res = res['responses']
if options.get('raw'):
raise RawResultInterrupt(res)
return res
class MongoQueryBackend():
def __init__(self, client, collections):
self.client = client
self.collections = collections
if None not in self.collections: # set default collection pattern
self.collections[None] = next(iter(self.collections.values()))
def execute(self, query, **options):
client = self.client[self.collections[options.get('biothing_type')]]
return list(client.find(*query)
.skip(options.get('from', 0))
.limit(options.get('size', 10)))
class SQLQueryBackend():
def __init__(self, client):
self.client = client
def execute(self, query, **options):
result = self.client.execute(query)
return result.keys(), result.all()
| StarcoderdataPython |
3280948 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# This file defines the AngleFormatterLocator class which is a class that
# provides both a method for a formatter and one for a locator, for a given
# label spacing. The advantage of keeping the two connected is that we need to
# make sure that the formatter can correctly represent the spacing requested and
# vice versa. For example, a format of dd:mm cannot work with a tick spacing
# that is not a multiple of one arcminute.
import re
import warnings
import numpy as np
from matplotlib import rcParams
from astropy.extern import six
from astropy import units as u
from astropy.coordinates import Angle
DMS_RE = re.compile('^dd(:mm(:ss(.(s)+)?)?)?$')
HMS_RE = re.compile('^hh(:mm(:ss(.(s)+)?)?)?$')
DDEC_RE = re.compile('^d(.(d)+)?$')
DMIN_RE = re.compile('^m(.(m)+)?$')
DSEC_RE = re.compile('^s(.(s)+)?$')
SCAL_RE = re.compile('^x(.(x)+)?$')
class BaseFormatterLocator(object):
"""
A joint formatter/locator
"""
def __init__(self, values=None, number=None, spacing=None, format=None):
if (values, number, spacing).count(None) < 2:
raise ValueError("At most one of values/number/spacing can be specifed")
if values is not None:
self.values = values
elif number is not None:
self.number = number
elif spacing is not None:
self.spacing = spacing
else:
self.number = 5
self.format = format
@property
def values(self):
return self._values
@values.setter
def values(self, values):
if not isinstance(values, u.Quantity) or (not values.ndim == 1):
raise TypeError("values should be an astropy.units.Quantity array")
self._number = None
self._spacing = None
self._values = values
@property
def number(self):
return self._number
@number.setter
def number(self, number):
self._number = number
self._spacing = None
self._values = None
@property
def spacing(self):
return self._spacing
@spacing.setter
def spacing(self, spacing):
self._number = None
self._spacing = spacing
self._values = None
def minor_locator(self, spacing, frequency, value_min, value_max):
if self.values is not None:
return [] * self._unit
minor_spacing = spacing.value / frequency
values = self._locate_values(value_min, value_max, minor_spacing)
index = np.where((values % frequency) == 0)
index = index[0][0]
values = np.delete(values, np.s_[index::frequency])
return values * minor_spacing * self._unit
def _locate_values(self, value_min, value_max, spacing):
imin = np.ceil(value_min / spacing)
imax = np.floor(value_max / spacing)
values = np.arange(imin, imax + 1, dtype=int)
return values
class AngleFormatterLocator(BaseFormatterLocator):
"""
A joint formatter/locator
"""
def __init__(self, values=None, number=None, spacing=None, format=None):
self._unit = u.degree
self._sep = None
super(AngleFormatterLocator, self).__init__(values=values,
number=number,
spacing=spacing,
format=format)
@property
def spacing(self):
return self._spacing
@spacing.setter
def spacing(self, spacing):
if spacing is not None and (not isinstance(spacing, u.Quantity)
or spacing.unit.physical_type != 'angle'):
raise TypeError("spacing should be an astropy.units.Quantity instance with units of angle")
self._number = None
self._spacing = spacing
self._values = None
@property
def sep(self):
return self._sep
@sep.setter
def sep(self, separator):
self._sep = separator
@property
def format(self):
return self._format
@format.setter
def format(self, value):
self._format = value
if value is None:
return
if DMS_RE.match(value) is not None:
self._decimal = False
self._unit = u.degree
if '.' in value:
self._precision = len(value) - value.index('.') - 1
self._fields = 3
else:
self._precision = 0
self._fields = value.count(':') + 1
elif HMS_RE.match(value) is not None:
self._decimal = False
self._unit = u.hourangle
if '.' in value:
self._precision = len(value) - value.index('.') - 1
self._fields = 3
else:
self._precision = 0
self._fields = value.count(':') + 1
elif DDEC_RE.match(value) is not None:
self._decimal = True
self._unit = u.degree
self._fields = 1
if '.' in value:
self._precision = len(value) - value.index('.') - 1
else:
self._precision = 0
elif DMIN_RE.match(value) is not None:
self._decimal = True
self._unit = u.arcmin
self._fields = 1
if '.' in value:
self._precision = len(value) - value.index('.') - 1
else:
self._precision = 0
elif DSEC_RE.match(value) is not None:
self._decimal = True
self._unit = u.arcsec
self._fields = 1
if '.' in value:
self._precision = len(value) - value.index('.') - 1
else:
self._precision = 0
else:
raise ValueError("Invalid format: {0}".format(value))
if self.spacing is not None and self.spacing < self.base_spacing:
warnings.warn("Spacing is too small - resetting spacing to match format")
self.spacing = self.base_spacing
if self.spacing is not None:
ratio = (self.spacing / self.base_spacing).decompose().value
remainder = ratio - np.round(ratio)
if abs(remainder) > 1.e-10:
warnings.warn("Spacing is not a multiple of base spacing - resetting spacing to match format")
self.spacing = self.base_spacing * max(1, round(ratio))
@property
def base_spacing(self):
if self._decimal:
spacing = self._unit / (10. ** self._precision)
else:
if self._fields == 1:
spacing = 1. * u.degree
elif self._fields == 2:
spacing = 1. * u.arcmin
elif self._fields == 3:
if self._precision == 0:
spacing = 1. * u.arcsec
else:
spacing = u.arcsec / (10. ** self._precision)
if self._unit is u.hourangle:
spacing *= 15
return spacing
def locator(self, value_min, value_max):
if self.values is not None:
# values were manually specified
return self.values, 1.1 * u.arcsec
else:
if self.spacing is not None:
# spacing was manually specified
spacing_deg = self.spacing.to(u.degree).value
elif self.number is not None:
# number of ticks was specified, work out optimal spacing
# first compute the exact spacing
dv = abs(float(value_max - value_min)) / self.number * u.degree
if self.format is not None and dv < self.base_spacing:
# if the spacing is less than the minimum spacing allowed by the format, simply
# use the format precision instead.
spacing_deg = self.base_spacing.to(u.degree).value
else:
# otherwise we clip to the nearest 'sensible' spacing
if self._unit is u.degree:
from .utils import select_step_degree
spacing_deg = select_step_degree(dv).to(u.degree).value
else:
from .utils import select_step_hour
spacing_deg = select_step_hour(dv).to(u.degree).value
# We now find the interval values as multiples of the spacing and
# generate the tick positions from this.
values = self._locate_values(value_min, value_max, spacing_deg)
return values * spacing_deg * u.degree, spacing_deg * u.degree
def formatter(self, values, spacing):
if not isinstance(values, u.Quantity) and values is not None:
raise TypeError("values should be a Quantities array")
if len(values) > 0:
if self.format is None:
spacing = spacing.to(u.arcsec).value
if spacing > 3600:
fields = 1
precision = 0
elif spacing > 60:
fields = 2
precision = 0
elif spacing > 1:
fields = 3
precision = 0
else:
fields = 3
precision = -int(np.floor(np.log10(spacing)))
decimal = False
unit = u.degree
else:
fields = self._fields
precision = self._precision
decimal = self._decimal
unit = self._unit
if decimal:
sep = None
elif self._sep is not None:
sep = self._sep
else:
if unit == u.degree:
if rcParams['text.usetex']:
deg = r'$^\circ$'
else:
deg = six.u('\xb0')
sep = (deg, "'", '"')
else:
sep = ('h', 'm', 's')
angles = Angle(values)
string = angles.to_string(unit=unit,
precision=precision,
decimal=decimal,
fields=fields,
sep=sep).tolist()
return string
else:
return []
class ScalarFormatterLocator(BaseFormatterLocator):
"""
A joint formatter/locator
"""
def __init__(self, values=None, number=None, spacing=None, format=None, unit=None):
if unit is not None:
self._unit = unit
self._format_unit = unit
elif spacing is not None:
self._unit = spacing.unit
self._format_unit = spacing.unit
elif values is not None:
self._unit = values.unit
self._format_unit = values.unit
super(ScalarFormatterLocator, self).__init__(values=values,
number=number,
spacing=spacing,
format=format)
@property
def format_unit(self):
return self._format_unit
@format_unit.setter
def format_unit(self, unit):
if (not issubclass(unit.__class__, u.UnitBase)):
raise TypeError("unit should be an astropy UnitBase subclass")
self._format_unit = unit
@property
def spacing(self):
return self._spacing
@spacing.setter
def spacing(self, spacing):
if spacing is not None and not isinstance(spacing, u.Quantity):
raise TypeError("spacing should be an astropy.units.Quantity instance")
self._number = None
self._spacing = spacing
self._values = None
@property
def format(self):
return self._format
@format.setter
def format(self, value):
self._format = value
if value is None:
return
if SCAL_RE.match(value) is not None:
if '.' in value:
self._precision = len(value) - value.index('.') - 1
else:
self._precision = 0
if self.spacing is not None and self.spacing < self.base_spacing:
warnings.warn("Spacing is too small - resetting spacing to match format")
self.spacing = self.base_spacing
if self.spacing is not None:
ratio = (self.spacing / self.base_spacing).decompose().value
remainder = ratio - np.round(ratio)
if abs(remainder) > 1.e-10:
warnings.warn("Spacing is not a multiple of base spacing - resetting spacing to match format")
self.spacing = self.base_spacing * max(1, round(ratio))
elif not value.startswith('%'):
raise ValueError("Invalid format: {0}".format(value))
@property
def base_spacing(self):
return self._unit / (10. ** self._precision)
def locator(self, value_min, value_max):
if self.values is not None:
# values were manually specified
return self.values, 1.1 * self._unit
else:
if self.spacing is not None:
# spacing was manually specified
spacing = self.spacing.to(self._unit).value
elif self.number is not None:
# number of ticks was specified, work out optimal spacing
# first compute the exact spacing
dv = abs(float(value_max - value_min)) / self.number
if self.format is not None and (not self.format.startswith('%')) and dv < self.base_spacing.value:
# if the spacing is less than the minimum spacing allowed by the format, simply
# use the format precision instead.
spacing = self.base_spacing.to(self._unit).value
else:
from .utils import select_step_scalar
spacing = select_step_scalar(dv)
# We now find the interval values as multiples of the spacing and
# generate the tick positions from this
values = self._locate_values(value_min, value_max, spacing)
return values * spacing * self._unit, spacing * self._unit
def formatter(self, values, spacing):
if len(values) > 0:
if self.format is None:
if spacing.value < 1.:
precision = -int(np.floor(np.log10(spacing.value)))
else:
precision = 0
elif self.format.startswith('%'):
return [(self.format % x.value) for x in values]
else:
precision = self._precision
return [("{0:." + str(precision) + "f}").format(x.to(self._format_unit).value) for x in values]
else:
return []
| StarcoderdataPython |
1733661 | <reponame>minrk/binderhub<filename>testing/minikube/binderhub_config.py
# config file for testing with minikube-config.yaml
import subprocess
try:
minikube_ip = subprocess.check_output(['minikube', 'ip']).decode('utf-8').strip()
except (subprocess.SubprocessError, FileNotFoundError):
minikube_ip = '192.168.1.100'
c.BinderHub.hub_url = 'http://{}:30123'.format(minikube_ip)
c.BinderHub.hub_api_token = 'aec7d32df938c0f55e54f09244a350cb29ea612907ed4f07be13d9553d18a8e4'
c.BinderHub.use_registry = False
c.BinderHub.build_namespace = 'binder-test'
| StarcoderdataPython |
189674 | <filename>app/core/migrations/0003_auto_20210922_1112.py
# Generated by Django 3.2.7 on 2021-09-22 11:12
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
('core', '0002_auto_20210913_1515'),
]
operations = [
migrations.AlterField(
model_name='user',
name='groups',
field=models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups'),
),
migrations.AlterField(
model_name='user',
name='is_superuser',
field=models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status'),
),
migrations.CreateModel(
name='Tag',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| StarcoderdataPython |
1733423 | import argparse
from pybedtools import BedTool
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("bed12")
parser.add_argument("introns")
args = parser.parse_args()
bed = BedTool(args.bed12)
introns = bed.introns()
introns.remove_invalid().saveas(args.introns)
| StarcoderdataPython |
3228438 | <reponame>ChangjieChen/lucis_qgis<filename>algorithms/zonal_stats.py
import sys
import os
from PyQt5.QtCore import QCoreApplication
from qgis.core import (QgsProcessing, QgsProcessingAlgorithm,
QgsProcessingParameterFeatureSource,
QgsProcessingParameterRasterLayer,
QgsProcessingParameterNumber,
QgsProcessingParameterString,
QgsProcessingParameterVectorDestination)
from pylusat import zonal
class ZonalStats(QgsProcessingAlgorithm):
INPUT = 'INPUT'
RASTER = 'RASTER'
STATS = 'STATS'
OUTPUT_COLUMN_PREFIX = "OUTPUT_COLUMN_PREFIX"
NODATA = 'NODATA'
OUTPUT = 'ZonalStats'
def tr(self, string, context=''):
if context == '':
context = self.__class__.__name__
return QCoreApplication.translate(context, string)
def group(self):
return self.tr('LUCIS-OPEN Tools for QGIS')
def groupId(self):
return 'lucisopen'
def name(self):
return 'zonalstats'
def displayName(self):
return self.tr('Zonal Statistics')
def shortHelpString(self):
html_doc = '''
<p>Calculate statistics on values of raster within the zones of \
another dataset.</p>
<h3>Input layer</h3>
<p>Dataset that defines the zones and sets boundaries according \
to its geometries. The zones are only defined by a vector layer.\
</p>
<h3>Raster layer</h3>
<p>Raster that contains the values on which to calculate a \
statistic.</p>
<h3>Types of statistics</h3>
<p>Statistic type to be calculate.
The types of statistics defaults to ['count', 'min', 'max', \
'mean'].Other valid statistics are ['sum', 'std', 'median', \
'majority','minority', 'unique', 'range'].
Count—Count the number of cells have value, no data would not be \
counted.
Min(Minimum)—Determines the smallest value of all cells in the \
value raster that belong to the same zone as the output cell.
Max(Maximum)—Determines the largest value of all cells in the \
value raster that belong to the same zone as the output cell.
Mean—Calculates the average of all cells in the value raster that \
belong to the same zone as the output cell.
Sum—Calculates the total value of all cells in the value raster \
that belong to the same zone as the output cell.
Std(Standard deviation)—Calculates the standard deviation of all \
cells in the value raster that belong to the same zone as the \
output cell.
Median—Determines the median value of all cells in the value \
raster that belong to the same zone as the output cell.
Majority—Determines the value that occurs most often of all \
cells in the value raster that belong to the same zone as the \
output cell.
Minority—Determines the value that occurs least often of all \
cells in the value raster that belong to the same zone as the \
output cell.
Unique—Count the number of unique value in cells.
Range—Calculates the difference between the largest and smallest \
value of all cells in the value raster that belong to the same \
zone as the output cell.</p>
<h3>No data value</h3>
<p>Value should be considered as "no data" in the raster layer.\
</p>
<h3>Output layer</h3>
<p>Output vector layer</p>
'''
return html_doc
def createInstance(self):
return ZonalStats()
def __init__(self):
super().__init__()
def initAlgorithm(self, config=None):
self.addParameter(
QgsProcessingParameterFeatureSource(
self.INPUT,
self.tr('Input layer'),
types=[QgsProcessing.TypeVectorPolygon]
)
)
self.addParameter(
QgsProcessingParameterRasterLayer(
self.RASTER,
self.tr('Raster layer'),
)
)
self.addParameter(
QgsProcessingParameterString(
self.STATS,
self.tr('Types of statistics (separated by space)')
)
)
self.addParameter(
QgsProcessingParameterString(
self.OUTPUT_COLUMN_PREFIX,
self.tr('Output column prefix')
)
)
self.addParameter(
QgsProcessingParameterNumber(
self.NODATA,
self.tr('No data value'),
type=QgsProcessingParameterNumber.Integer,
optional=True
)
)
self.addParameter(
QgsProcessingParameterVectorDestination(
self.OUTPUT,
self.tr('Output layer'),
)
)
def processAlgorithm(self, parameters, context, feedback):
input_lyr = self.parameterAsVectorLayer(parameters, self.INPUT, context)
raster_lyr = self.parameterAsRasterLayer(parameters, self.RASTER, context)
stats = self.parameterAsString(parameters, self.STATS, context)
output_clm_prefix = self.parameterAsString(parameters,
self.OUTPUT_COLUMN_PREFIX,
context)
nodata = parameters[self.NODATA]
output_file = self.parameterAsOutputLayer(parameters, self.OUTPUT, context)
sys.path.insert(1, os.path.dirname(os.path.realpath(__file__)))
from loqlib import LUCISOpenQGISUtils
input_gdf = LUCISOpenQGISUtils.vector_to_gdf(input_lyr)
raster_path = raster_lyr.dataProvider().dataSourceUri()
output = zonal.zonal_stats_raster(input_gdf, raster_path, stats,
output_clm_prefix, nodata)
output.to_file(output_file, driver="GPKG")
return {self.OUTPUT: output_file}
| StarcoderdataPython |
3309208 | <reponame>jlfranklin/python-acquia-cloud-2<gh_stars>1-10
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from unittest.mock import patch
from acapi2.http_request import HttpRequest
from acapi2.tests import BaseTest
class TestHttpRequest(BaseTest):
def test_session(self):
http_request = HttpRequest()
http_request_1 = HttpRequest()
self.assertEqual(id(http_request.session), id(http_request_1.session))
def test_get_session(self):
request_session = HttpRequest()._get_session()
self.assertEqual(HttpRequest._session, request_session)
@patch("requests.Session.request")
def test_make_request(self, mock_session):
http_request = HttpRequest()
http_request.body = "body"
http_request.do()
mock_session.assert_called_once_with(
"GET",
"http://localhost/",
data="body",
headers={}
)
| StarcoderdataPython |
1670387 | <reponame>isudox/leetcode-solution
"""688. Knight Probability in Chessboard
https://leetcode.com/problems/knight-probability-in-chessboard/
"""
import functools
def knight_probability(self, n: int, k: int, row: int, column: int) -> float:
@functools.lru_cache(None)
def dfs(x: int, y: int, steps: int) -> int:
if steps == 0:
if 0 <= x < n and 0 <= y < n:
return 1
ret = 0
for nx, ny in [[x + 1, y + 2], [x + 1, y - 2], [x - 1, y + 2], [x - 1, y - 2],
[x - 2, y + 1], [x - 2, y - 1], [x + 2, y + 1], [x + 2, y - 1]]:
if 0 <= nx < n and 0 <= ny < n:
ret += dfs(nx, ny, steps - 1)
return ret
return dfs(row, column, k) / (8 ** k)
| StarcoderdataPython |
1687572 | import dataclasses
import enum
import re
from typing import Mapping, List, Optional, Set
from icontract import require, ensure
# crosshair: on
from python_by_contract_corpus.common import Lines
class Operation(enum.Enum):
"""Represent an operation corresponding to an instruction."""
NOP = "nop"
ACC = "acc"
JMP = "jmp"
assert len(set(op.value for op in Operation)) == sum(1 for op in Operation)
VALUE_TO_OPERATION = {op.value: op for op in Operation} # type: Mapping[str, Operation]
@dataclasses.dataclass(frozen=True)
class Instruction:
"""Represent an instruction of the boot code."""
operation: Operation #: the corresponding operation
argument: int #: the argument to the operation
def __repr__(self) -> str:
"""Represent the instruction as a string for debugging."""
return f"{self.operation.value} {self.argument}"
INSTRUCTION_RE = re.compile(
r"^(?P<operation>nop|acc|jmp) (?P<argument>[+-](0|[1-9][0-9]*))\Z"
)
@require(lambda line: INSTRUCTION_RE.match(line))
def parse_line(line: str) -> Instruction:
"""Parse a ``line`` of the boot code into an instruction."""
mtch = INSTRUCTION_RE.match(line)
assert mtch is not None
operation = VALUE_TO_OPERATION[mtch.group("operation")]
argument = int(mtch.group("argument"))
return Instruction(operation=operation, argument=argument)
@require(lambda lines: all(INSTRUCTION_RE.match(line) for line in lines))
@ensure(lambda lines, result: len(lines) == len(result))
def parse(lines: Lines) -> List[Instruction]:
"""Parse the boot code given as ``lines``."""
return [parse_line(line) for line in lines]
@require(
lambda instructions: all(
0 <= i + instruction.argument < len(instructions)
for i, instruction in enumerate(instructions)
if instruction.operation == Operation.JMP
)
)
def execute_instructions(instructions: List[Instruction]) -> Optional[int]:
"""
Execute the boot code given as ``instructions``.
:return:
The value in the accumulator just before an instruction is run
for the second time
"""
visited_lines = set() # type: Set[int]
current_line = 0
accumulator = 0
while True:
if current_line in visited_lines:
return accumulator
if current_line == len(instructions):
return None
visited_lines.add(current_line)
instruction = instructions[current_line]
if instruction.operation == Operation.NOP:
current_line += 1
elif instruction.operation == Operation.ACC:
accumulator += instruction.argument
current_line += 1
elif instruction.operation == Operation.JMP:
current_line += instruction.argument
else:
raise NotImplementedError(instruction.operation)
| StarcoderdataPython |
1752746 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ---------------------------------------
# Project: PKUYouth Webserver v2
# File: types.py
# Created Date: 2020-07-28
# Author: <NAME>
# ---------------------------------------
# Copyright (c) 2020 PKUYouth
import redis
from .pool import REDIS_CONNECTION_POOL
class RedisAutoExpiredMap(object):
def __init__(self, namespace, expires, connection_pool=REDIS_CONNECTION_POOL):
self._expires = expires
self._namespace = namespace
self._conn = redis.Redis(connection_pool=connection_pool)
def __repr__(self):
return "%s(namespace=%r, expires=%s)" % (
self.__class__.__name__,
self._namespace,
self._expires,
)
@property
def namespace(self):
return self._namespace
@property
def expires(self):
return self._expires
def _get_key(self, key):
return "%s_%s" % (self._namespace, key)
def _strip_namespace(self, key):
return key[ len(self._namespace) + 1 : ]
def __iter__(self):
return iter(self.keys())
def __contains__(self, key):
return self._conn.exists(self._get_key(key))
def __getitem__(self, key):
return self._conn.get(self._get_key(key))
def __setitem__(self, key, value):
return self._conn.setex(self._get_key(key), self._expires, value)
def __delitem__(self, key):
return self._conn.delete(self._get_key(key))
def resetex(self, key):
return self._conn.expire(self._get_key(key), self._expires)
def ttl(self, key):
return self._conn.ttl(self._get_key(key))
def keys(self, withns=False):
keys = self._conn.scan_iter("%s_*" % self._namespace)
yield from keys if withns else map(self._strip_namespace, keys)
def clear(self):
cnt = 0
for key in self.keys(withns=True):
cnt += self._conn.delete(key)
return cnt
class RedisContainer(object):
def __init__(self, name, connection_pool=REDIS_CONNECTION_POOL):
self._name = name
self._conn = redis.Redis(connection_pool=connection_pool)
@property
def name(self):
return self._name
def __repr__(self):
return "%s(%r)" % (
self.__class__.__name__,
self._name,
)
def __len__(self):
raise NotImplementedError
def all(self):
raise NotImplementedError
def __iter__(self):
return iter(self.all())
def empty(self):
return len(self) == 0
def clear(self):
return self._conn.delete(self._name)
def expires(self, time):
return self._conn.expire(self._name, time)
def ttl(self):
return self._conn.ttl(self._name)
class RedisList(RedisContainer):
def __len__(self):
return self._conn.llen(self._name)
def all(self):
return self._conn.lrange(self._name, 0, -1)
def __getitem__(self, pos):
return self._conn.lindex(self._name, pos)
def front(self):
return self._conn.lindex(self._name, 0)
def back(self):
return self._conn.lindex(self._name, -1)
def append(self, *values):
return self._conn.rpush(self._name, *values)
def pop(self):
return self._conn.rpop(self._name)
def appendleft(self, *values):
return self._conn.lpush(self._name, *values)
def popleft(self):
return self._conn.lpop(self._name)
| StarcoderdataPython |
3236853 | '''
Python program to split a given string (s) into strings if there is a space in the string, otherwise split on commas if there is a comma, otherwise return the list of lowercase letters with odd order (order of a = 0, b = 1, etc.)
Input:
a b c d
Split the said string into strings if there is a space in the string,
otherwise split on commas if there is a comma,
Output:
['a', 'b', 'c', 'd']
Input:
a,b,c,d
Split the said string into strings if there is a space in the string,
otherwise split on commas if there is a comma,
Output:
['a', 'b', 'c', 'd']
Input:
abcd
Split the said string into strings if there is a space in the string,
otherwise split on commas if there is a comma,
Output:
['b', 'd']
'''
#License: https://bit.ly/3oLErEI
def test(s):
#Compact return statement
'''
if " " in s:
return s.split(" ")
if "," in s:
return s.split(",")
return [c for c in s if c.islower() and ord(c) % 2 == 0]
'''
if " " in s:
return s.split(" ")
elif "," in s:
return s.split(",")
else:
subList =[]
for c in s:
if c.islower() and ord(c) % 2 == 0:
subList.append(c)
return subList
strs = "a b c d"
print("Original string:")
print(strs)
print("Split the said string into strings if there is a space in the string, \notherwise split on commas if there is a comma, \notherwise return the list of lowercase letters with odd order:")
print(test(strs))
strs = "a,b,c,d"
print("\nOriginal string:")
print(strs)
print("Split the said string into strings if there is a space in the string, \notherwise split on commas if there is a comma, \notherwise return the list of lowercase letters with odd order:")
print(test(strs))
strs = "abcd"
print("\nOriginal string:")
print(strs)
print("Split the said string into strings if there is a space in the string, \notherwise split on commas if there is a comma, \notherwise return the list of lowercase letters with odd order:")
print(test(strs))
| StarcoderdataPython |
106805 | from typing import Optional
from ._price import Price
from ._response import Response
class Security(Response):
@property
def _sec(self) -> dict:
return self
@property
def bid(self) -> Price:
return Price(self._sec['bid'])
@property
def closing_bid(self) -> Price:
return Price(self._sec['closingBid'])
@property
def country_code(self) -> Optional[str]:
return self._sec.get('countryCode')
@property
def opening_bid(self) -> Optional[Price]:
if not self._sec.get('openingBid'):
return None
return Price(self._sec['openingBid'])
@property
def description(self) -> Optional[str]:
return self._sec.get('description')
@property
def id(self) -> str:
return self._sec['id']
@property
def name(self) -> str:
return self._sec['name']
@property
def security_type(self) -> str:
return self._sec['securityType']
@property
def offer(self) -> Price:
return Price(self._sec['offer'])
@property
def ticker_code(self) -> str:
return self._sec['tickerCode']
@property
def today_low(self) -> Optional[Price]:
if not self.get('stats'):
return None
return Price(self['stats']['todayLow'])
@property
def today_high(self) -> Optional[Price]:
if not self.get('stats'):
return None
return Price(self['stats']['todayHigh'])
class SecurityNested(Security):
"""Like a regular security but with `following` and `followers` fields.
For some reason, the API returns all regular security fields
as nested into `security` if there is social info presented.
We just unwrap it because "flat is better than nested".
"""
@property
def _sec(self):
return self['security']
@property
def following(self) -> bool:
return self['socialInfo']['following']
@property
def followers(self) -> int:
return self['socialInfo']['followers']
| StarcoderdataPython |
3355131 | import numpy as np
from sklearn.preprocessing import scale,StandardScaler
import torch
import torch.utils.data as Data
import math
import torch.nn as nn
from sklearn.metrics import confusion_matrix, classification_report,accuracy_score
def gpu_available():
use_gpu = torch.cuda.is_available()
return use_gpu
def get_data(train_path,test_path,validate_path):
# load data and label
train_all = np.load(train_path)
test_all = np.load(test_path)
validate_all = np.load(validate_path)
train_data = train_all[:,0:1250]
train_label = train_all[:,1250]
test_data = test_all[:,0:1250]
test_label = test_all[:,1250]
validate_data = validate_all[:,0:1250]
validate_label = validate_all[:,1250]
return [train_data,train_label,test_data,test_label,validate_data,validate_label]
def z_score(train_data,test_data,validate_data):
scaler = StandardScaler().fit(train_data)
train_data = scaler.transform(train_data)
test_data = scaler.transform(test_data)
validate_data = scaler.transform(validate_data)
return [train_data,test_data,validate_data]
def dataTypeTransfer(train_data,test_data,validate_data):
train_data = torch.from_numpy(train_data)
train_data = torch.unsqueeze(train_data, dim=1).type(torch.FloatTensor)
test_data = torch.from_numpy(test_data)
test_data = torch.unsqueeze(test_data, dim=1).type(torch.FloatTensor)
validate_data = torch.from_numpy(validate_data)
validate_data = torch.unsqueeze(validate_data,dim=1).type(torch.FloatTensor)
return [train_data,test_data,validate_data]
class makeDataset(Data.Dataset):
def __init__(self,train_data,train_label):
self.x_data = train_data
label = train_label
self.len = train_data.shape[0]
self.y_data = torch.from_numpy(label).type(torch.long)
def __getitem__(self, index):
return self.x_data[index], self.y_data[index]
def __len__(self):
return self.len
class PositionalEncoding(nn.Module):
def __init__(self, d_model, dropout=0.1, max_len=5000):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
if d_model%2 != 0:
pe[:, 1::2] = torch.cos(position * div_term)[:,0:-1]
else:
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0).transpose(0, 1)
self.register_buffer('pe', pe)
def forward(self, x):
x = x + self.pe[:x.size(0), :]
return self.dropout(x)
def train(Epoch,model,criterion,optimizer,
train_loader,validate_loader,device,scheduler=True):
if scheduler == True:
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.1,
verbose=True, patience=10)
running_loss = 0.0
model.train()
optimizer.zero_grad()
train_acc = []
for epoch in range(Epoch):
print(1)
for batch_idx, data in enumerate(train_loader):
inputs, target = data
if device == True:
inputs = inputs.cuda()
target = target.cuda()
output = model(inputs)
loss = criterion(output, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
running_loss += loss.item()
if batch_idx % 100 == 99:
print('[%d, %5d] loss: %.3f' % (epoch + 1, Epoch, running_loss))
correct = 0
total = 0
validate_loss = 0.0
with torch.no_grad():
for data in train_loader:
inputs, target = data
if device == True:
inputs = inputs.cuda()
target = target.cuda()
output = model(inputs)
_,predicted = torch.max(output.data, dim=1)
total += target.size(0)
correct += (predicted == target).sum().item()
acc = 100*correct/total
train_acc.append(acc)
print('Accuracy on test set: %d %% [%d / %d]' % (acc, correct, total))
running_loss = 0.0
correct = 0
total = 0
validate_loss = 0.0
with torch.no_grad():
for data in validate_loader:
inputs, target = data
if device == True:
inputs = inputs.cuda()
target = target.cuda()
output = model(inputs)
loss = criterion(output, target)
_,predicted = torch.max(output.data, dim=1)
total += target.size(0)
correct += (predicted == target).sum().item()
validate_loss += loss.item()
acc = 100*correct/total
print(('Accuracy on validate set: %d %% [%d / %d]' % (acc, correct, total)))
print('[%d, %5d] Val loss: %.3f' % (epoch + 1, Epoch, validate_loss))
if scheduler == True:
scheduler.step(validate_loss)
# np.save('/content/drive/My Drive/'+'EDB_Trans4', train_acc)
return model
def evaluation(model,test_loader):
model.eval()
target_pred=[]
target_true=[]
y_score_roc = np.empty(shape=[0, 5])
with torch.no_grad():
for data in test_loader:
inputs, target = data
output = model(inputs)
_, predicted = torch.max(output, dim=1)
# output_softmax = F.softmax(output, dim=1).cpu()
target_pred += predicted.data.tolist()
target_true += target.data.tolist()
y_score_roc = np.concatenate((y_score_roc, output.cpu()), axis=0)
return target_pred, target_true, y_score_roc
def get_results(target_true,target_pred):
Acc = accuracy_score(target_true, target_pred)
report = classification_report(target_true, target_pred,digits=5)
# confusion matrix
Conf_Mat = confusion_matrix(target_true, target_pred)
Acc_N = Conf_Mat[0][0] / np.sum(Conf_Mat[0])
Acc_S = Conf_Mat[1][1] / np.sum(Conf_Mat[1])
Acc_V = Conf_Mat[2][2] / np.sum(Conf_Mat[2])
Acc_F = Conf_Mat[3][3] / np.sum(Conf_Mat[3])
Acc_Q = Conf_Mat[4][4] / np.sum(Conf_Mat[4])
TN = Conf_Mat[0][0]
FN = Conf_Mat[1][0]
TP = Conf_Mat[1][1]
FP = Conf_Mat[0][1]
# Sensitivity, hit rate, recall, or true positive rate
TPR = TP/(TP+FN)
# Specificity or true negative rate
TNR = TN/(TN+FP)
# Precision or positive predictive value
PPV = TP/(TP+FP)
# Negative predictive value
NPV = TN/(TN+FN)
# Fall out or false positive rate
FPR = FP/(FP+TN)
# False negative rate
FNR = FN/(TP+FN)
# False discovery rate
FDR = FP/(TP+FP)
# F!:
F1 = 2*((TPR*PPV)/(TPR+PPV))
print('PRINT RESULTS REPORT')
print('--------------------------------------')
print('--------------------------------------')
print('Confusion Matrix:')
print('True Positive = %.4f' % (TP))
print('True Negative = %.4f' % (TN))
print('False Positive =%.4f' % (FP))
print('False Negative =%.2f' % (FN))
print('--------------------------------------')
print('ACCURACY:')
print('\nAccuracy=%.2f%%' % (Acc * 100))
print('Accuracy_N=%.2f%%' % (Acc_N * 100))
print('Accuracy_S=%.2f%%' % (Acc_S * 100))
print('Accuracy_V=%.2f%%' % (Acc_V * 100))
print('Accuracy_F=%.2f%%' % (Acc_F * 100))
print('Accuracy_Q=%.2f%%' % (Acc_Q * 100))
print('--------------------------------------')
print('Other Evaluation Criteria:')
print('Recall = %.4f' % (TPR))
print('Precision = %.4f' % (PPV))
print('Specification =%.4f' % (TNR))
print('F1 =%.4f' % (F1))
print('--------------------------------------')
print('REPORT:')
print(report) | StarcoderdataPython |
3224451 | <reponame>myousefi2016/slepc4py<filename>test/test_object.py
from slepc4py import SLEPc
from petsc4py import PETSc
import unittest
# --------------------------------------------------------------------
class BaseTestObject(object):
CLASS, FACTORY = None, 'create'
TARGS, KARGS = (), {}
BUILD = None
def setUp(self):
self.obj = self.CLASS()
getattr(self.obj,self.FACTORY)(*self.TARGS, **self.KARGS)
if not self.obj: self.obj.create()
def tearDown(self):
self.obj = None
def testTypeRegistry(self):
type_reg = PETSc.__type_registry__
classid = self.obj.getClassId()
typeobj = self.CLASS
if isinstance(self.obj, PETSc.DMDA):
typeobj = PETSc.DM
self.assertTrue(type_reg[classid] is typeobj )
def testLogClass(self):
name = self.CLASS.__name__
logcls = PETSc.Log.Class(name)
classid = self.obj.getClassId()
self.assertEqual(logcls.id, classid)
def testClass(self):
self.assertTrue(isinstance(self.obj, self.CLASS))
self.assertTrue(type(self.obj) is self.CLASS)
def testNonZero(self):
self.assertTrue(bool(self.obj))
def testDestroy(self):
self.assertTrue(bool(self.obj))
self.obj.destroy()
self.assertFalse(bool(self.obj))
## self.assertRaises(PETSc.Error, self.obj.destroy)
## self.assertTrue(self.obj.this is this)
def testOptions(self):
self.assertFalse(self.obj.getOptionsPrefix())
prefix1 = 'my_'
self.obj.setOptionsPrefix(prefix1)
self.assertEqual(self.obj.getOptionsPrefix(), prefix1)
prefix2 = 'opt_'
self.obj.setOptionsPrefix(prefix2)
self.assertEqual(self.obj.getOptionsPrefix(), prefix2)
## self.obj.appendOptionsPrefix(prefix1)
## self.assertEqual(self.obj.getOptionsPrefix(),
## prefix2 + prefix1)
## self.obj.prependOptionsPrefix(prefix1)
## self.assertEqual(self.obj.getOptionsPrefix(),
## prefix1 + prefix2 + prefix1)
self.obj.setFromOptions()
def testName(self):
oldname = self.obj.getName()
newname = '%s-%s' %(oldname, oldname)
self.obj.setName(newname)
self.assertEqual(self.obj.getName(), newname)
self.obj.setName(oldname)
self.assertEqual(self.obj.getName(), oldname)
def testComm(self):
comm = self.obj.getComm()
self.assertTrue(isinstance(comm, PETSc.Comm))
self.assertTrue(comm in [PETSc.COMM_SELF, PETSc.COMM_WORLD])
def testRefCount(self):
self.assertEqual(self.obj.getRefCount(), 1)
self.obj.incRef()
self.assertEqual(self.obj.getRefCount(), 2)
self.obj.incRef()
self.assertEqual(self.obj.getRefCount(), 3)
self.obj.decRef()
self.assertEqual(self.obj.getRefCount(), 2)
self.obj.decRef()
self.assertEqual(self.obj.getRefCount(), 1)
self.obj.decRef()
self.assertFalse(bool(self.obj))
def testHandle(self):
self.assertTrue(self.obj.handle)
self.assertTrue(self.obj.fortran)
h, f = self.obj.handle, self.obj.fortran
if (h>0 and f>0) or (h<0 and f<0):
self.assertEqual(h, f)
self.obj.destroy()
self.assertFalse(self.obj.handle)
self.assertFalse(self.obj.fortran)
def testComposeQuery(self):
self.assertEqual(self.obj.getRefCount(), 1)
self.obj.compose('myobj', self.obj)
self.assertTrue(type(self.obj.query('myobj')) is self.CLASS)
self.assertEqual(self.obj.query('myobj'), self.obj)
self.assertEqual(self.obj.getRefCount(), 2)
self.obj.compose('myobj', None)
self.assertEqual(self.obj.getRefCount(), 1)
self.assertEqual(self.obj.query('myobj'), None)
def testProperties(self):
self.assertEqual(self.obj.getClassId(), self.obj.classid)
self.assertEqual(self.obj.getClassName(), self.obj.klass)
self.assertEqual(self.obj.getType(), self.obj.type)
self.assertEqual(self.obj.getName(), self.obj.name)
self.assertEqual(self.obj.getComm(), self.obj.comm)
self.assertEqual(self.obj.getRefCount(), self.obj.refcount)
def testShallowCopy(self):
import copy
rc = self.obj.getRefCount()
obj = copy.copy(self.obj)
self.assertTrue(obj is not self.obj)
self.assertTrue(obj == self.obj)
self.assertTrue(type(obj) is type(self.obj))
self.assertEqual(obj.getRefCount(), rc+1)
del obj
self.assertEqual(self.obj.getRefCount(), rc)
def testDeepCopy(self):
self.obj.setFromOptions()
import copy
rc = self.obj.getRefCount()
try:
obj = copy.deepcopy(self.obj)
except NotImplementedError:
return
self.assertTrue(obj is not self.obj)
self.assertTrue(obj != self.obj)
self.assertTrue(type(obj) is type(self.obj))
self.assertEqual(self.obj.getRefCount(), rc)
self.assertEqual(obj.getRefCount(), 1)
del obj
# --------------------------------------------------------------------
class TestObjectST(BaseTestObject, unittest.TestCase):
CLASS = SLEPc.ST
class TestObjectBV(BaseTestObject, unittest.TestCase):
CLASS = SLEPc.BV
def testDeepCopy(self): pass
class TestObjectEPS(BaseTestObject, unittest.TestCase):
CLASS = SLEPc.EPS
class TestObjectSVD(BaseTestObject, unittest.TestCase):
CLASS = SLEPc.SVD
class TestObjectPEP(BaseTestObject, unittest.TestCase):
CLASS = SLEPc.PEP
class TestObjectNEP(BaseTestObject, unittest.TestCase):
CLASS = SLEPc.NEP
class TestObjectMFN(BaseTestObject, unittest.TestCase):
CLASS = SLEPc.MFN
# --------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
4816207 | from sense_hat import SenseHat
import sys
sense = SenseHat()
if len(sys.argv) != 4:
sys.error("Arguments r g b missing")
else:
sense.clear((int(sys.argv[1]),int(sys.argv[2]),int(sys.argv[3])))
| StarcoderdataPython |
3212213 | <filename>Django/Cycl/migrations/0002_auto_20190804_1641.py
# Generated by Django 2.2.3 on 2019-08-04 14:41
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Cycl', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='lineup',
options={'ordering': ['rider']},
),
migrations.AlterModelOptions(
name='manage',
options={'ordering': ['staff']},
),
migrations.AlterModelOptions(
name='rider',
options={'ordering': ['lastName', 'firstName']},
),
migrations.AlterModelOptions(
name='staff',
options={'ordering': ['lastName', 'firstName']},
),
migrations.AlterModelOptions(
name='team',
options={'ordering': ['name']},
),
migrations.RemoveField(
model_name='country',
name='code',
),
migrations.AddField(
model_name='country',
name='alpha2Code',
field=models.CharField(default='AB', max_length=2),
preserve_default=False,
),
migrations.AddField(
model_name='country',
name='alpha3Code',
field=models.CharField(default='ABC', max_length=3),
preserve_default=False,
),
migrations.AddField(
model_name='country',
name='numericCode',
field=models.IntegerField(default=1),
preserve_default=False,
),
]
| StarcoderdataPython |
3210997 | <reponame>mrkday/SATOSA
from unittest.mock import mock_open, patch
import pytest
from satosa.metadata_creation.description import ContactPersonDesc, UIInfoDesc, OrganizationDesc, MetadataDescription
class TestContactPersonDesc(object):
def test_to_dict(self):
desc = ContactPersonDesc()
desc.contact_type = "test"
desc.given_name = "First"
desc.sur_name = "Tester"
desc.add_email_address("<EMAIL>")
serialized = desc.to_dict()
assert serialized["contact_type"] == "test"
assert serialized["given_name"] == "First"
assert serialized["sur_name"] == "Tester"
assert serialized["email_address"] == ["<EMAIL>"]
class TestUIInfoDesc(object):
def test_to_dict(self):
desc = UIInfoDesc()
desc.add_description("test", "en")
desc.add_display_name("my company", "en")
desc.add_logo("logo.jpg", 80, 80, "en")
serialized = desc.to_dict()
ui_info = serialized["service"]["idp"]["ui_info"]
assert ui_info["description"] == [{"text": "test", "lang": "en"}]
assert ui_info["display_name"] == [{"text": "my company", "lang": "en"}]
assert ui_info["logo"] == [{"text": "logo.jpg", "width": 80, "height": 80, "lang": "en"}]
def test_to_dict_for_logo_without_lang(self):
desc = UIInfoDesc()
desc.add_logo("logo.jpg", 80, 80, None)
serialized = desc.to_dict()
ui_info = serialized["service"]["idp"]["ui_info"]
assert ui_info["logo"] == [{"text": "logo.jpg", "width": 80, "height": 80}]
def test_to_dict_with_empty(self):
desc = UIInfoDesc()
assert desc.to_dict() == {}
class TestOrganizationDesc(object):
def test_to_dict(self):
desc = OrganizationDesc()
desc.add_display_name("Foo Testing", "en")
desc.add_name("Testing Co.", "en")
desc.add_url("https://test.example.com", "en")
serialized = desc.to_dict()
org_info = serialized["organization"]
assert org_info["display_name"] == [("Foo Testing", "en")]
assert org_info["name"] == [("Testing Co.", "en")]
assert org_info["url"] == [("https://test.example.com", "en")]
def test_to_dict_with_empty(self):
desc = OrganizationDesc()
assert desc.to_dict() == {}
class TestMetadataDescription(object):
def test_to_dict(self):
org_desc = OrganizationDesc()
org_desc.add_display_name("Foo Testing", "en")
org_desc.add_name("Testing Co.", "en")
org_desc.add_url("https://test.example.com", "en")
contact_desc = ContactPersonDesc()
contact_desc.contact_type = "test"
contact_desc.given_name = "First"
contact_desc.sur_name = "Tester"
contact_desc.add_email_address("<EMAIL>")
ui_desc = UIInfoDesc()
ui_desc.add_description("test", "en")
ui_desc.add_display_name("my company", "en")
ui_desc.add_logo("http://example.com/logo.jpg", 80, 80, "en")
desc = MetadataDescription("my_entity")
desc.organization = org_desc
desc.add_contact_person(contact_desc)
desc.ui_info = ui_desc
serialized = desc.to_dict()
assert serialized["entityid"] == "my_entity"
assert serialized["organization"]
assert serialized["contact_person"]
assert serialized["service"]["idp"]["ui_info"]
def test_set_organization_rejects_bad_input(self):
desc = MetadataDescription("my_entity")
with pytest.raises(TypeError):
desc.organization = "bad input"
def test_add_contact_person_rejects_bad_input(self):
desc = MetadataDescription("my_entity")
with pytest.raises(TypeError):
desc.add_contact_person("bad input")
def test_set_ui_info_rejects_bad_input(self):
desc = MetadataDescription("my_entity")
with pytest.raises(TypeError):
desc.ui_info = "bad input"
| StarcoderdataPython |
4806947 | <filename>tutorial_edge/loop_event/simple_coroutine.py
import asyncio
# Define a coroutine that takes in a future
async def myCoroutine():
print("My Coroutine")
# Spin up a quick and simple event loop
# and run until completed
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(myCoroutine())
finally:
loop.close()
| StarcoderdataPython |
191840 | <filename>utils/py2.py<gh_stars>1-10
from PyQt4 import QtGui, QtCore
class RenderManagement(QtGui.QWidget):
def __init__(self):
super(RenderManagement, self).__init__()
self.v_layout = QtGui.QVBoxLayout(self)
# Create 5 dynamic items
for i in range(5):
item = LightItem()
item.setTitle("LightItem%d" % i)
self.v_layout.addWidget(item)
# watch for the moveRequested signal on each item
item.moveRequested.connect(self.moveLightItem)
def moveLightItem(self, direction):
# the sender should always be a LightItem instance
obj = self.sender()
print(("Move LightItem %s in direction %s" % (obj, direction)))
# what is the current index of the widget in the layout?
idx = self.v_layout.indexOf(obj)
if idx == -1:
print("Widget is not in the layout:", obj)
return
if direction == QtCore.Qt.Key_Up:
# next index down
idx = max(idx-1, 0)
elif direction == QtCore.Qt.Key_Down:
# next index up
idx = min(idx+1, self.v_layout.count()-1)
else:
print("Not a key up or down")
return
# will insert the widget into a differnt index of the layout
self.v_layout.insertWidget(idx, obj)
class LightItem(QtGui.QGroupBox):
# custom signal, emitting the Qt.Key_X
moveRequested = QtCore.pyqtSignal(int)
def __init__(self):
super(LightItem, self).__init__()
# Generic layout of widgets
self.v_layout = QtGui.QVBoxLayout(self)
self.v_layout.setMargin(2)
self.splitter = QtGui.QSplitter(QtCore.Qt.Horizontal)
self.left = QtGui.QGroupBox('Left')
self.lineEdit = QtGui.QLineEdit()
self.left_layout = QtGui.QVBoxLayout(self.left)
self.left_layout.addWidget(self.lineEdit)
self.right = QtGui.QGroupBox('Right')
self.splitter.addWidget(self.left)
self.splitter.addWidget(self.right)
self.v_layout.addWidget(self.splitter)
# have LightItem watch all events on the QLineEdit,
# so that we do not have to subclass QLineEdit
self.lineEdit.installEventFilter(self)
def eventFilter(self, obj, event):
# Only watch for specific Key presses on the QLineEdit
# Everything else is pass-thru
if obj is self.lineEdit and event.type() == event.KeyPress:
key = event.key()
if key in (QtCore.Qt.Key_Up, QtCore.Qt.Key_Down):
print("Emitting moveRequested() for obj", obj)
self.moveRequested.emit(event.key())
return True
return False
if __name__ == "__main__":
app = QtGui.QApplication([])
manager = RenderManagement()
manager.resize(800,600)
manager.show()
app.exec_()
| StarcoderdataPython |
132109 | from django import forms
from Apis.canino.models import Perro
class PerroForm(forms.ModelForm):
# TODO: Define other fields here
class Meta:
model = Perro
fields = [
'nombre',
'sexo',
'raza',
'edad',
'rescate',
'adoptante',
'vacuna',
]
labels = {
'nombre':'Nombre',
'sexo':'Sexo',
'raza':'Raza',
'edad':'Edad',
'rescate':'fecha de rescate o ingreso',
'adoptante': 'Adoptante',
'vacuna':'Vacunas',
}
widgets = {
'nombre':forms.TextInput(attrs={'class':'form-control'}),
'sexo':forms.Select(attrs={'class': 'form-control'}),
'raza':forms.Select(attrs={'class':'form-control'}),
'edad':forms.TextInput(attrs={'class':'form-control'}),
'rescate':forms.TextInput(attrs={'class':'form-control'}),
'adoptante':forms.Select(attrs={'class':'form-control'}),
'vacuna':forms.CheckboxSelectMultiple(),
}
| StarcoderdataPython |
3372779 | <gh_stars>0
import datetime
from collections import OrderedDict
from unittest import TestCase
from .collect_information import (
deduplicate_msisdns,
get_addresses,
process_change,
process_identity,
process_optout,
process_registration,
process_subscription,
)
class GetAddressesTests(TestCase):
def test_ignore_invalid_numbers(self):
"""
Ignores invalid numbers
"""
self.assertEqual(get_addresses({"msisdn": {"abc": {}, "123": {}}}), [])
def test_normalises_numbers(self):
"""
If a number is not in E164 format, it should be converted
"""
self.assertEqual(
get_addresses({"msisdn": {"0820001001": {}}}), ["+27820001001"]
)
def test_default(self):
"""
If one of the addresses is default, only that address should be returned
"""
self.assertEqual(
get_addresses(
{
"msisdn": {
"+27820001001": {},
"+27820001002": {"default": True},
"+27820001003": {},
}
}
),
["+27820001002"],
)
def test_optout(self):
"""
If an address is opted out, it shouldn't be returned
"""
self.assertEqual(
get_addresses(
{"msisdn": {"+27820001001": {"optedout": True}, "+27820001002": {}}}
),
["+27820001002"],
)
def test_multiple(self):
"""
If there are multiple valid addresses, they should all be returned
"""
self.assertEqual(
get_addresses({"msisdn": {"+27820001001": {}, "+27820001002": {}}}),
["+27820001001", "+27820001002"],
)
class ProcessIdentityTests(TestCase):
def test_all_identity_details_stored(self):
"""
All the relevant details on the identity should be stored in the dictionary
"""
identities = {}
process_identity(
identities,
"identity-uuid",
{
"addresses": {"msisdn": {"+27820001001": {}}},
"operator_id": "operator_uuid",
"passport_no": "A12345",
"passport_origin": "zw",
"consent": True,
"sa_id_no": "123456",
"mom_given_name": "<NAME>",
"mom_family_name": "Test family name",
"faccode": "123456",
"id_type": "sa_id",
"lang_code": "zul_ZA",
"pmtct": {"risk_status": "high"},
"mom_dob": "1990-01-01",
},
2,
)
self.assertEqual(
identities,
{
"identity-uuid": {
"msisdns": ["+27820001001"],
"operator_id": "operator_uuid",
"passport_no": "A12345",
"passport_origin": "zw",
"consent": True,
"sa_id_no": "123456",
"mom_given_name": "<NAME>",
"mom_family_name": "Test family name",
"faccode": "123456",
"id_type": "sa_id",
"language": "zul",
"pmtct_risk": "high",
"mom_dob": "1990-01-01",
"uuid": "identity-uuid",
"failed_msgs_count": 2,
}
},
)
def test_no_fields(self):
"""
It should handle missing fields
"""
identities = {}
process_identity(
identities,
"identity-uuid",
{"addresses": {"msisdn": {"+27820001001": {}}}},
2,
)
self.assertEqual(
identities,
{
"identity-uuid": {
"msisdns": ["+27820001001"],
"uuid": "identity-uuid",
"failed_msgs_count": 2,
}
},
)
class ProcessOptOutTests(TestCase):
def test_optout_added(self):
"""
Adds the optout to the identity if there is none
"""
identities = {"identity-uuid": {"uuid": "identity-uuid"}}
process_optout(
identities, "identity-uuid", datetime.datetime(2020, 1, 1), "babyloss"
)
self.assertEqual(
identities,
{
"identity-uuid": {
"uuid": "identity-uuid",
"optout_timestamp": "2020-01-01T00:00:00",
"optout_reason": "babyloss",
}
},
)
def test_replace_optout(self):
"""
If this optout is newer than the one on the identity, it should be replaced
"""
identities = {
"identity-uuid": {
"uuid": "identity-uuid",
"optout_timestamp": "2020-01-01T00:00:00",
}
}
process_optout(
identities, "identity-uuid", datetime.datetime(2020, 1, 2), "babyloss"
)
self.assertEqual(
identities,
{
"identity-uuid": {
"uuid": "identity-uuid",
"optout_timestamp": "2020-01-02T00:00:00",
"optout_reason": "babyloss",
}
},
)
def test_skip_optout(self):
"""
If this optout is older than the one on the identity, it shouldn't be replaced
"""
identities = {
"identity-uuid": {
"uuid": "identity-uuid",
"optout_timestamp": "2020-01-02T00:00:00",
}
}
process_optout(
identities, "identity-uuid", datetime.datetime(2020, 1, 1), "babyloss"
)
self.assertEqual(
identities,
{
"identity-uuid": {
"uuid": "identity-uuid",
"optout_timestamp": "2020-01-02T00:00:00",
}
},
)
class ProcessRegistrationTests(TestCase):
def test_all_fields(self):
"""
It should extract the relevant fields from the registration onto the identity
"""
identities = {
"identity-uuid": {"uuid": "identity-uuid", "msisdns": ["+27820001001"]}
}
process_registration(
identities,
"identity-uuid",
{
"edd": "2020-01-01",
"faccode": "12345",
"id_type": "sa_id",
"mom_dob": "1990-01-01",
"mom_given_name": "test name",
"mom_family_name": "test family name",
"uuid_device": "identity-uuid",
"passport_no": "A12345",
"passport_origin": "zw",
"sa_id_no": "123456",
"consent": True,
"baby_dob": "2020-01-02",
"language": "zul_ZA",
},
)
self.assertEqual(
identities,
{
"identity-uuid": {
"uuid": "identity-uuid",
"msisdns": ["+27820001001"],
"edd": "2020-01-01",
"faccode": "12345",
"id_type": "sa_id",
"mom_dob": "1990-01-01",
"mom_given_name": "test name",
"mom_family_name": "test family name",
"passport_no": "A12345",
"passport_origin": "zw",
"sa_id_no": "123456",
"consent": True,
"baby_dobs": ["2020-01-02"],
"language": "zul",
"msisdn_device": "+27820001001",
}
},
)
def test_no_overwrite(self):
"""
Should not overwrite existing fields
"""
identities = {
"identity-uuid": {
"uuid": "identity-uuid",
"msisdns": ["+27820001001"],
"edd": "2020-01-01",
"faccode": "12345",
"id_type": "sa_id",
"mom_dob": "1990-01-01",
"mom_given_name": "<NAME>",
"mom_family_name": "test family name",
"msisdn_device": "+27820001002",
"passport_no": "A12345",
"passport_origin": "zw",
"sa_id_no": "123456",
"consent": True,
"baby_dobs": ["2020-01-02"],
"language": "zul",
}
}
process_registration(
identities,
"identity-uuid",
{
"edd": "2020-01-02",
"faccode": "12346",
"id_type": "passport",
"mom_dob": "1990-01-02",
"mom_given_name": "<NAME>2",
"mom_family_name": "test family name2",
"uuid_device": "identity-uuid",
"passport_no": "A12346",
"passport_origin": "mw",
"sa_id_no": "123457",
"consent": False,
"baby_dob": "2020-01-04",
"language": "xho_ZA",
},
)
self.assertEqual(
identities,
{
"identity-uuid": {
"uuid": "identity-uuid",
"msisdns": ["+27820001001"],
"edd": "2020-01-01",
"faccode": "12345",
"id_type": "sa_id",
"mom_dob": "1990-01-01",
"mom_given_name": "test name",
"mom_family_name": "test family name",
"passport_no": "A12345",
"passport_origin": "zw",
"sa_id_no": "123456",
"consent": True,
"baby_dobs": ["2020-01-02", "2020-01-04"],
"language": "zul",
"msisdn_device": "+27820001002",
}
},
)
def test_no_fields(self):
"""
Should handle missing fields
"""
identities = {"identity-uuid": {"uuid": "identity-uuid"}}
process_registration(identities, "identity-uuid", {})
self.assertEqual(identities, {"identity-uuid": {"uuid": "identity-uuid"}})
class ProcessChangeTests(TestCase):
def test_unknown_action(self):
"""
We should skip processing changes that are not optout or baby_switch
"""
identities = {"identity-uuid": {"uuid": "identity-uuid"}}
process_change(
identities,
"identity-uuid",
"unknown-action",
{},
datetime.datetime(2020, 1, 1),
)
self.assertEqual(identities, {"identity-uuid": {"uuid": "identity-uuid"}})
def test_optout(self):
"""
If there is no optout, one should be added
"""
identities = {"identity-uuid": {"uuid": "identity-uuid"}}
process_change(
identities,
"identity-uuid",
"momconnect_nonloss_optout",
{"reason": "unknown"},
datetime.datetime(2020, 1, 1),
)
self.assertEqual(
identities,
{
"identity-uuid": {
"uuid": "identity-uuid",
"optout_timestamp": "2020-01-01T00:00:00",
"optout_reason": "unknown",
}
},
)
def test_optout_overwrite(self):
"""
If the optout is newer, it should overwrite
"""
identities = {"identity-uuid": {"optout_timestamp": "2019-01-01T00:00:00"}}
process_change(
identities,
"identity-uuid",
"momconnect_nonloss_optout",
{},
datetime.datetime(2020, 1, 1),
)
self.assertEqual(
identities, {"identity-uuid": {"optout_timestamp": "2020-01-01T00:00:00"}}
)
def test_optout_no_overwrite(self):
"""
If the optout is older, it should not overwrite
"""
identities = {"identity-uuid": {"optout_timestamp": "2020-01-01T00:00:00"}}
process_change(
identities,
"identity-uuid",
"momconnect_nonloss_optout",
{},
datetime.datetime(2019, 1, 1),
)
self.assertEqual(
identities, {"identity-uuid": {"optout_timestamp": "2020-01-01T00:00:00"}}
)
def test_babyswitch_create(self):
"""
Should create the baby dob list if not exists
"""
identities = {"identity-uuid": {"uuid": "identity-uuid"}}
process_change(
identities,
"identity-uuid",
"baby_switch",
{},
datetime.datetime(2019, 1, 1),
)
self.assertEqual(
identities,
{
"identity-uuid": {
"uuid": "identity-uuid",
"baby_dobs": ["2019-01-01T00:00:00"],
}
},
)
def test_babyswitch_add(self):
"""
Should add to the baby dob list if exists
"""
identities = {"identity-uuid": {"baby_dobs": ["2020-01-01T00:00:00"]}}
process_change(
identities,
"identity-uuid",
"baby_switch",
{},
datetime.datetime(2019, 1, 1),
)
self.assertEqual(
identities,
{
"identity-uuid": {
"baby_dobs": ["2020-01-01T00:00:00", "2019-01-01T00:00:00"]
}
},
)
class ProcessSubscriptionTests(TestCase):
def test_channel_prefer_whatsapp(self):
"""
Should set the channel, but never overwrite WhatsApp with SMS
"""
identities = {"identity-uuid": {"uuid": "identity-uuid"}}
timestamp = datetime.datetime(2020, 1, 1)
process_subscription(identities, "identity-uuid", "momconnect", timestamp)
self.assertEqual(
identities, {"identity-uuid": {"uuid": "identity-uuid", "channel": "SMS"}}
)
process_subscription(
identities, "identity-uuid", "whatsapp_momconnect", timestamp
)
self.assertEqual(
identities,
{"identity-uuid": {"uuid": "identity-uuid", "channel": "WhatsApp"}},
)
process_subscription(identities, "identity-uuid", "momconnect", timestamp)
self.assertEqual(
identities,
{"identity-uuid": {"uuid": "identity-uuid", "channel": "WhatsApp"}},
)
def test_subscription_types(self):
"""
Should add to the subscription list depending on the name
"""
identities = {"identity-uuid": {"uuid": "identity-uuid"}}
timestamp = datetime.datetime(2020, 1, 1)
process_subscription(
identities, "identity-uuid", "pmtct_prebirth.hw_full.1", timestamp
)
self.assertEqual(identities["identity-uuid"]["pmtct_messaging"], "TRUE")
process_subscription(
identities, "identity-uuid", "loss_miscarriage.patient.1", timestamp
)
self.assertEqual(identities["identity-uuid"]["loss_messaging"], "TRUE")
self.assertEqual(identities["identity-uuid"]["optout_reason"], "miscarriage")
self.assertEqual(
identities["identity-uuid"]["optout_timestamp"], "2020-01-01T00:00:00"
)
process_subscription(
identities, "identity-uuid", "momconnect_prebirth.hw_partial.1", timestamp
)
self.assertEqual(identities["identity-uuid"]["public_messaging"], "TRUE")
self.assertEqual(
identities["identity-uuid"]["public_registration_date"],
"2020-01-01T00:00:00",
)
process_subscription(
identities, "identity-uuid", "momconnect_prebirth.hw_full.3", timestamp
)
self.assertEqual(identities["identity-uuid"]["prebirth_messaging"], "3")
process_subscription(
identities, "identity-uuid", "momconnect_postbirth.hw_full.2", timestamp
)
self.assertEqual(identities["identity-uuid"]["postbirth_messaging"], "TRUE")
class DeduplicateMSISDNsTests(TestCase):
def test_info_combined(self):
"""
If there are 2 identities with the same msisdn, their info should be combined
"""
self.assertEqual(
deduplicate_msisdns(
OrderedDict(
(
(
"identity1",
{
"msisdns": ["+27820001001"],
"item1": "value1",
"listitem": ["list1"],
},
),
(
"identity2",
{
"msisdns": ["+27820001001"],
"item2": "value2",
"listitem": ["list2"],
},
),
)
)
),
{
"+27820001001": {
"msisdn": "+27820001001",
"item1": "value1",
"item2": "value2",
"listitem": ["list1", "list2"],
}
},
)
| StarcoderdataPython |
4837332 | import logging
from hdx.scraper.base_scraper import BaseScraper
from hdx.utilities.dictandlist import dict_of_lists_add
from hdx.utilities.text import get_numeric_if_possible
logger = logging.getLogger(__name__)
class CovaxDeliveries(BaseScraper):
def __init__(self, datasetinfo, countryiso3s):
super().__init__(
"covax_deliveries",
datasetinfo,
{
"national": (
("Vaccine", "Funder", "Doses"),
(
"#meta+vaccine+producer",
"#meta+vaccine+funder",
"#capacity+vaccine+doses",
),
)
},
)
self.countryiso3s = countryiso3s
def run(self) -> None:
headers, iterator = self.get_reader().read(self.datasetinfo)
hxlrow = next(iterator)
doses_lookup = dict()
for row in iterator:
newrow = dict()
for key in row:
newrow[hxlrow[key]] = row[key]
countryiso = newrow["#country+code"]
if not countryiso or countryiso not in self.countryiso3s:
continue
key = f'{countryiso}|{newrow["#meta+vaccine+pipeline"]}|{newrow["#meta+vaccine+producer"]}|{newrow["#meta+vaccine+funder"]}'
nodoses = get_numeric_if_possible(newrow["#capacity+vaccine+doses"])
if nodoses:
doses_lookup[key] = doses_lookup.get(key, 0) + nodoses
producers = self.get_values("national")[0]
funders = self.get_values("national")[1]
doses = self.get_values("national")[2]
for key in sorted(doses_lookup):
countryiso, pipeline, producer, funder = key.split("|")
if pipeline == "COVAX":
funder = f"{pipeline}/{funder}"
dict_of_lists_add(producers, countryiso, producer)
dict_of_lists_add(funders, countryiso, funder)
dict_of_lists_add(doses, countryiso, str(doses_lookup[key]))
for countryiso in funders:
producers[countryiso] = "|".join(producers[countryiso])
funders[countryiso] = "|".join(funders[countryiso])
doses[countryiso] = "|".join(doses[countryiso])
| StarcoderdataPython |
1710509 | <filename>scraper.py
import urllib.request
import re
import webbrowser
from bs4 import BeautifulSoup
from functools import partial
import fire
class HNScraper(object):
def __show_in_browser(self, results):
output_file_path = '/tmp/jobs.html'
with open(output_file_path, 'w+') as f:
f.write("\n<hr>".join(results))
webbrowser.open('file://' + output_file_path)
def __get_comment_text(self, comment_html):
return "\n".join(map(str, comment_html.span.contents))
def __filter_attribute(self, attributes, comment):
return any(re.search(attribute, comment, re.IGNORECASE) for attribute in attributes)
def __filter_matches(self, comments_list, attributes):
return filter(partial(self.__filter_attribute, attributes), comments_list)
def scrape(self, url, locations=['new york'], occupations=['Data science', 'Data scientist']):
html = urllib.request.urlopen(url).read()
soup = BeautifulSoup(html, 'html.parser')
comments = list(
map(self.__get_comment_text, soup.find_all('div', class_='comment')))
occupation_matches = self.__filter_matches(comments, occupations)
location_occupation_matches = self.__filter_matches(
occupation_matches, locations)
results = list(location_occupation_matches)
if (len(results) > 0):
self.__show_in_browser(results)
else:
print('No results')
if __name__ == '__main__':
fire.Fire(HNScraper)
| StarcoderdataPython |
131622 | <filename>1/q6_expectation_maximization_python/parameters.py
def parameters():
epsilon = 0.0001 # regularization
K = 3 # number of desired clusters
n_iter = 5 # number of iterations
skin_n_iter = 5
skin_epsilon = 0.0001
skin_K = 3
theta = 2.0 # threshold for skin detection
return epsilon, K, n_iter, skin_n_iter, skin_epsilon, skin_K, theta
| StarcoderdataPython |
114718 | #########################################################
#讀取檔案
#########################################################
#csv file
import csv
import os
script_dir = os.path.dirname(__file__)
rel_path = "../data/raw/"
abs_file_path = os.path.join(script_dir, rel_path)
# 開啟 CSV 檔案
f = open("%stest.csv"%abs_file_path,'r')
# 以迴圈輸出每一列
for row in csv.reader(f):
print(row)
f.close();
"""
filelocation = "D:\\1-Project\\2018\\1-HLC\\3-Data\\DATA\\0828\\Student_Info_Dist.csv"
ecod= 'big5'
"""
#########################################################
#Random Forest parameter
#########################################################
"""
RF_testsize = 0.2
n_estimators = 200 # 幾棵樹
min_samples_split = 40
max_depth = 15
min_samples_leaf = 10
""" | StarcoderdataPython |
38944 |
from .changemanager_base import BaseChangeManager
from ..utils.psdict import PsDict
from ..table.tablechanges import TableChanges
from .slot import Slot
import copy
class DictChangeManager(BaseChangeManager):
"""
Manage changes that occured in a DataFrame between runs.
"""
def __init__(self,
slot,
buffer_created=True,
buffer_updated=True,
buffer_deleted=True,
buffer_exposed=False,
buffer_masked=False):
super(DictChangeManager, self).__init__(
slot,
buffer_created,
buffer_updated,
buffer_deleted,
buffer_exposed,
buffer_masked)
self._last_dict = None
data = slot.data()
if data.changes is None:
data.changes = TableChanges()
def reset(self, name=None):
super(DictChangeManager, self).reset(name)
self._last_dict = None
def update(self, run_number, data, mid):
# pylint: disable=unused-argument
assert isinstance(data, PsDict)
if data is None or (run_number != 0 and
run_number <= self._last_update):
return
data.fix_indices()
last_dict = self._last_dict
if last_dict is None:
data.changes.add_created(data.ids)
else:
data.changes.add_created(data.new_indices(last_dict))
data.changes.add_updated(data.updated_indices(last_dict))
data.changes.add_deleted(data.deleted_indices(last_dict))
changes = data.compute_updates(self._last_update, run_number, mid)
self._last_dict = copy.copy(data)
self._last_update = run_number
self._row_changes.combine(changes,
self.created.buffer,
self.updated.buffer,
self.deleted.buffer)
Slot.add_changemanager_type(PsDict, DictChangeManager)
| StarcoderdataPython |
3202840 | <reponame>ZaoLahma/DockerizeMe
class ServiceDiscoveryCtxt:
multicast_address = (None, None) | StarcoderdataPython |
1685220 | from alegra.resources import Contact
from alegra.resources import Invoice
from alegra.resources import Item
from alegra.resources import Retention
from alegra.resources import Tax
user = None
token = None
api_base = "https://api.alegra.com/api"
api_version = "v1"
| StarcoderdataPython |
1798347 | <reponame>WANGOMES/visors
'''
#################################################################################################
AUTOR: <NAME>
TRABALHO ACADEMICO: VIGILANCIA SOCIOASSISTENCIAL: MONITORAMENTO DE RISCOS E VULNERABILIDADES EM
TEMPO REAL POR MEIO DE MINERAÇÃO DE TEXTO NO TWITTER
UNIVERSIDADE: PONTIFÍCIA UNIVERSIDADE CATÓLICA DE MINAS GERAIS - PUCMINAS (UNID. SÃO GABRIEL)
CIDADE: BELO HORIZONTE / MG - BRASIL ANO: 2020
NOME PROTOTIPO: VISORS - VIGILANCIA SOCIOASSISTENCIAL EM REDES SOCIAIS
PALAVRAS-CHAVE: Vigilância Socioassistencial. Monitoramento em Tempo Real. Mineração de Dados.
Mineração de Texto.
#################################################################################################
'''
# ================================== ATENÇÃO ==========================================
# Para utilização desta aplicação é necessário obter as bibliotecas abaixo:
# sklearn - Disponível em <https://scikit-learn.org/stable/index.html>
# nltk - Disponível em <https://www.nltk.org/>
# pandas - Disponível em <https://pandas.pydata.org/pandas-docs/stable/getting_started/install.html>
# numpy - Disponível em <https://numpy.org/install/>
# matplotlib - Disponível em <https://matplotlib.org/3.3.3/users/installing.html>
# time - Disponível em <https://pypi.org/project/times/>
# pickle - Disponível em <https://pypi.org/project/pickle5/>
# wordcloud - Disponível em <https://pypi.org/project/wordcloud/>
# pymysql - Disponível em <https://pypi.org/project/PyMySQL/>
# tweepy - Disponível em <http://docs.tweepy.org/en/latest/install.html>
# ================================================================================================
from tweepy import Stream, OAuthHandler
from tweepy.streaming import StreamListener
import pymysql
import threading
import time
from datetime import datetime
import json
from twitter_credenciais import User_password
from interface import interface_terminal as terminal
conn = pymysql.connect("localhost","root","", "tw")
c = conn.cursor()
class listener(StreamListener):
def __init__(self, _futuro):
self.futuro = _futuro
self.contador = 0
def on_data(self, data):
all_data = json.loads(data)
if((json.dumps(all_data).startswith('{"limit":')==False)):
self.contador += 1
id_tweet = all_data["id"]
source = all_data["source"]
user_id = all_data["user"]["id"]
username = all_data["user"]["screen_name"]
user_url = all_data["user"]["url"]
user_description = all_data["user"]["description"]
user_local = all_data["user"]["location"]
date_tweet = all_data["created_at"]
if(all_data["geo"] != None):
geo = json.dumps(all_data["geo"])
elif(all_data["geo"] == None):
geo = all_data["geo"]
if(all_data["coordinates"] != None):
coordinates = json.dumps(all_data["coordinates"])
elif(all_data["coordinates"] == None):
coordinates = all_data["coordinates"]
tweet = all_data["text"]
if(all_data["place"] != None):
place = json.dumps(all_data["place"])
elif(all_data["place"] == None):
place = all_data["place"]
c.execute("INSERT INTO tweet_tb (id_tweet, source, user_id, username, user_url, user_description, user_local, date_tweet, geo, coordinates, tweet, place) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)",
(id_tweet, source, user_id, username, user_url, user_description, user_local, date_tweet, geo, coordinates, tweet, place))
conn.commit()
print(username,tweet)
if time.time() > self.futuro:
terminal.Mensagem('TOTAL TWEETS: %s' % self.contador, 'w')
return False
else:
return True
elif(json.dumps(all_data).startswith('{"limit":')):
print('\n'+'*' * 30 + ' Dict Json ' + '*' * 30 + '\n')
def on_error(self, status):
if status == 420:
# Retorna Falso quando on_data exceder o limite da API
print (status)
terminal.Mensagem('TOTAL TWEETS: %s' % self.contador, 'w')
return False
print (status)
terminal.Mensagem('TOTAL TWEETS: %s' % self.contador, 'w')
class Extractor():
def __init__(self, tempo_segundos):
terminal.Mensagem('Iniciando em Extrator','d')
inicio = time.time()
futuro = (inicio + tempo_segundos)
t = threading.Timer(3.0, self.Run(futuro))
t.setName('Thread-Extractor')
t.start()
if True:
terminal.Mensagem('Cancelando a Thread....', 'w')
t.cancel()
fim = time.time()
duracao = fim - inicio
strfim = time.strftime("\nFim: %A, %d %b %Y %H:%M:%S +0000", time.localtime(fim))
strinicio = time.strftime("\nInício: %A, %d %b %Y %H:%M:%S +0000", time.localtime(inicio))
texto = '%s Cancelada!%s%s\nDuração: %s' % (str(t.getName()), strinicio, strfim, duracao)
terminal.Mensagem(texto, 'ok')
def Run(self, futuro):
up = User_password()
auth = OAuthHandler(up.CONSUMER_KEY(), up.CONSUMER_SECRET())
auth.set_access_token(up.ACCESS_TOKEN(), up.ACCESS_TOKEN_SECRET())
twitterStream = Stream(auth, listener(futuro))
twitterStream.filter(follow=None,track=['a'],languages=['pt']) | StarcoderdataPython |
3248824 | import sys
import boto3
from src.helper import Helper
class EFSCleanup:
def __init__(self, logging, whitelist, settings, execution_log, region):
self.logging = logging
self.whitelist = whitelist
self.settings = settings
self.execution_log = execution_log
self.region = region
self._client_efs = None
self._dry_run = self.settings.get("general", {}).get("dry_run", True)
@property
def client_efs(self):
if not self._client_efs:
self._client_efs = boto3.client("efs", region_name=self.region)
return self._client_efs
def run(self):
self.file_systems()
def file_systems(self):
"""
Deletes EFS File Systems.
"""
self.logging.debug("Started cleanup of EFS File Systems.")
clean = (
self.settings.get("services", {})
.get("efs", {})
.get("file_system", {})
.get("clean", False)
)
if clean:
try:
resources = self.client_efs.describe_file_systems().get("FileSystems")
except:
self.logging.error("Could not list all EFS File Systems.")
self.logging.error(sys.exc_info()[1])
return False
ttl_days = (
self.settings.get("services", {})
.get("efs", {})
.get("file_system", {})
.get("ttl", 7)
)
for resource in resources:
resource_id = resource.get("FileSystemId")
resource_date = resource.get("CreationTime")
resource_number_of_mount_targets = resource.get("NumberOfMountTargets")
resource_action = None
if resource_id not in self.whitelist.get("efs", {}).get(
"file_system", []
):
delta = Helper.get_day_delta(resource_date)
if delta.days > ttl_days:
if resource_number_of_mount_targets > 0:
try:
resource_mount_targets = (
self.client_efs.describe_mount_targets(
FileSystemId=resource_id
).get("MountTargets")
)
except:
self.logging.error(
f"Could not list all EFS Mount Targets for EFS File System '{resource_id}'."
)
self.logging.error(sys.exc_info()[1])
resource_action = "ERROR"
else:
for mount_target in resource_mount_targets:
mount_target_id = mount_target.get("MountTargetId")
try:
if not self._dry_run:
self.client_efs.delete_mount_target(
MountTargetId=mount_target_id
)
except:
self.logging.error(
f"Could not delete EFS Mount Target '{mount_target_id}' from EFS File System '{resource_id}'."
)
self.logging.error(sys.exc_info()[1])
resource_action = "ERROR"
else:
self.logging.info(
f"EFS Mount Target '{mount_target_id}' was deleted for EFS File System {resource_id}."
)
if resource_action != "ERROR":
try:
if not self._dry_run:
self.client_efs.delete_file_system(
FileSystemId=resource_id
)
except:
self.logging.error(
f"Could not delete EFS File System '{resource_id}'."
)
self.logging.error(sys.exc_info()[1])
resource_action = "ERROR"
else:
self.logging.info(
f"EFS File System '{resource_id}' was created {delta.days} days ago "
"and has been deleted."
)
resource_action = "DELETE"
else:
self.logging.debug(
f"EFS File System '{resource_id}' was created {delta.days} days ago "
"(less than TTL setting) and has not been deleted."
)
resource_action = "SKIP - TTL"
else:
self.logging.debug(
f"EFS File System '{resource_id}' has been whitelisted and has not been deleted."
)
resource_action = "SKIP - WHITELIST"
Helper.record_execution_log_action(
self.execution_log,
self.region,
"EFS",
"File System",
resource_id,
resource_action,
)
self.logging.debug("Finished cleanup of EFS File Systems.")
return True
else:
self.logging.info("Skipping cleanup of EFS File Systems.")
return True
| StarcoderdataPython |
1678210 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 26 20:59:22 2020
@author: utsav
"""
import numpy as np
import cv2
import base64
import requests
import json
def to_image_string(image_filepath):
return base64.b64encode(open(image_filepath, 'rb').read())#.encode('base64')
def from_base64(base64_data):
nparr = np.fromstring(base64_data.decode('base64'), np.uint8)
return cv2.imdecode(nparr, cv2.IMREAD_ANYCOLOR)
def hit_api_validate(number):
# prepare headers for http request
content_type = 'application/json'
headers = {'content-type': content_type}
addr = 'http://localhost:9001'
url = addr + '/api/validate'
response = requests.post(url, json={"test_number": number} , headers=headers)
return json.loads(response.text)
def hit_api_extract(filepath):
img_bytes = to_image_string(filepath)
#convert byte to string
encoded_string = img_bytes.decode("utf-8")
# prepare headers for http request
content_type = 'application/json'
headers = {'content-type': content_type}
addr = 'http://localhost:9001'
url = addr + '/api/ocr'
response = requests.post(url, json={"doc_b64": encoded_string} , headers=headers)
return json.loads(response.text)
def hit_api_mask_aadhaar(filepath,number_list):
img_bytes = to_image_string(filepath)
#convert byte to string
encoded_string = img_bytes.decode("utf-8")
# prepare headers for http request
content_type = 'application/json'
headers = {'content-type': content_type}
addr = 'http://localhost:9001'
url = addr + '/api/mask'
response = requests.post(url, json={"doc_b64": encoded_string, 'aadhaar': number_list}, headers=headers)
r = json.loads(response.text)
if r['is_masked']:
save_name = "masked_"+filepath
decoded_data = base64.b64decode(r['doc_b64_masked'])
np_data = np.fromstring(decoded_data,np.uint8)
img = cv2.imdecode(np_data,cv2.IMREAD_UNCHANGED)
cv2.imwrite(save_name,img)
return "masked document saved as "+ save_name
else:
return "Unable to find given number in the image :/ (try brut mode)"
def hit_api_brut_mask(input_name,output_name):
img_bytes = to_image_string(input_name)
#convert byte to string
encoded_string = img_bytes.decode("utf-8")
# prepare headers for http request
content_type = 'application/json'
headers = {'content-type': content_type}
addr = 'http://localhost:9001'
url = addr + '/api/brut_mask'
response = requests.post(url, json={"doc_b64": encoded_string}, headers=headers)
r = json.loads(response.text)
save_name = output_name
decoded_data = base64.b64decode(r['doc_b64_brut_masked'])
np_data = np.fromstring(decoded_data,np.uint8)
img = cv2.imdecode(np_data,cv2.IMREAD_UNCHANGED)
cv2.imwrite(save_name,img)
return "masked document saved as "+ save_name
def hit_api_sample_pipe(input_name,output_name,brut = False):
img_bytes = to_image_string(input_name)
#convert byte to string
encoded_string = img_bytes.decode("utf-8")
# prepare headers for http request
content_type = 'application/json'
headers = {'content-type': content_type}
addr = 'http://localhost:9001'
url = addr + '/api/sample_pipe'
response = requests.post(url, json={"doc_b64": encoded_string, "brut" : brut}, headers=headers)
r = json.loads(response.text)
if r['is_masked']:
save_name = output_name
decoded_data = base64.b64decode(r['doc_b64_masked'])
np_data = np.fromstring(decoded_data,np.uint8)
img = cv2.imdecode(np_data,cv2.IMREAD_UNCHANGED)
cv2.imwrite(save_name,img)
print("Execution Mode =>",r['mode_executed'])
if r['mode_executed'] == "OCR-MASKING":
print("Aadhaar List =>",r['aadhaar_list'])
print("Validated Aadhaar list =>",r['valid_aadhaar_list'])
return "masked document saved as "+ save_name
else:
print("Execution Mode =>",r['mode_executed'])
print("Error =>",r['error'])
return "Unable to find given number in the image :/ (try brut mode)"
#####################Usage => ###################
#Validates Aadhaar card numbers using Verhoeff Algorithm.
number = 397788000234
print(hit_api_validate(number))
#Extract aadhaar Number from image '1.png'
image = '1.png' # I assume you have a way of picking unique filenames
print(hit_api_extract(image)) #Returns empty list if aadhaar is not found
#Mask aadhaar number card for given aadhaar card number
aadhaar_list = ['397788000234']
image = '1.png' # I assume you have a way of picking unique filenames
print(hit_api_mask_aadhaar(image,aadhaar_list)) #saves masked image as masked+image => masked_1.png
#Brut Mask any Readable Number from Aadhaar (works good for low res and bad quality images)
image = '1.png' # I assume you have a way of picking unique filenames
masked_image = 'brut_masked.png' # I assume you have a way of picking unique filenames
print(hit_api_brut_mask(image,masked_image))
#Usecase : You have an aadhaar doc, you want to mask first 8 digits of the aadhaar card
#Process : Image -> Extract Text -> Check for aadhaar number -> Mask first 8 digits // check validity of aadhaar number
# If aadhaar card number is not found using OCR, try brut mode and mask possible numbers.
#This is implemented in app.py Now lets hit this pipeline here
image = '1.png' # I assume you have a way of picking unique filenames
masked_image = 'masked_aadhaar.png' # I assume you have a way of picking unique filenames
brut_mode = True #uses brut mode incase if ocr fails
print(hit_api_sample_pipe(image,masked_image,brut_mode))
| StarcoderdataPython |
179216 | # To import required modules:
import numpy as np
import time
import os
import sys
import matplotlib
import matplotlib.cm as cm #for color maps
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec #for specifying plot attributes
from matplotlib import ticker #for setting contour plots to log scale
import scipy.integrate #for numerical integration
import scipy.misc #for factorial function
from scipy.special import erf #error function, used in computing CDF of normal distribution
import scipy.interpolate #for interpolation functions
import corner #corner.py package for corner plots
#matplotlib.rc('text', usetex=True)
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))))
from src.functions_general import *
from src.functions_compare_kepler import *
from src.functions_load_sims import *
from src.functions_plot_catalogs import *
from src.functions_plot_params import *
savefigures = False
savefigures_directory = '/Users/hematthi/Documents/GradSchool/Research/ExoplanetsSysSim_Clusters/Figures/Model_Optimization/AMD_system/Split_stars/Singles_ecc/Params11_KS/durations_norm_circ_singles_multis_GF2020_KS/Best_models/GP_med/'
##### To load the underlying populations:
loadfiles_directory = '/Users/hematthi/Documents/GradSchool/Research/ACI/Simulated_Data/AMD_system/Split_stars/Singles_ecc/Params11_KS/Distribute_AMD_per_mass/durations_norm_circ_singles_multis_GF2020_KS/GP_med/' #Lognormal_mass_Earthlike_rocky/
run_number = ''
N_sim, cos_factor, P_min, P_max, radii_min, radii_max = read_targets_period_radius_bounds(loadfiles_directory + 'periods%s.out' % run_number)
param_vals_all = read_sim_params(loadfiles_directory + 'periods%s.out' % run_number)
sssp_per_sys, sssp = compute_summary_stats_from_cat_phys(file_name_path=loadfiles_directory, run_number=run_number, load_full_tables=True)
##### To load some mass-radius tables:
# NWG-2018 model:
MR_table_file = '../../data/MRpredict_table_weights3025_R1001_Q1001.txt'
with open(MR_table_file, 'r') as file:
lines = (line for line in file if not line.startswith('#'))
MR_table = np.genfromtxt(lines, names=True, delimiter=', ')
# Li Zeng models:
MR_earthlike_rocky = np.genfromtxt('../../data/MR_earthlike_rocky.txt', names=['mass','radius']) # mass and radius are in Earth units
MR_pure_iron = np.genfromtxt('../../data/MR_pure_iron.txt', names=['mass','radius']) # mass and radius are in Earth units
# To construct an interpolation function for each MR relation:
MR_NWG2018_interp = scipy.interpolate.interp1d(10.**MR_table['log_R'], 10.**MR_table['05'])
MR_earthlike_rocky_interp = scipy.interpolate.interp1d(MR_earthlike_rocky['radius'], MR_earthlike_rocky['mass'])
MR_pure_iron_interp = scipy.interpolate.interp1d(MR_pure_iron['radius'], MR_pure_iron['mass'])
# To find where the Earth-like rocky relation intersects with the NWG2018 mean relation (between 1.4-1.5 R_earth):
def diff_MR(R):
M_NWG2018 = MR_NWG2018_interp(R)
M_earthlike_rocky = MR_earthlike_rocky_interp(R)
return np.abs(M_NWG2018 - M_earthlike_rocky)
# The intersection is approximately 1.472 R_earth
radii_switch = 1.472
# IDEA 1: Normal distribution for rho centered around Earth-like rocky, with a sigma_rho that grows with radius
# To define sigma_rho such that log10(sigma_rho) is a linear function of radius:
rho_earthlike_rocky = rho_from_M_R(MR_earthlike_rocky['mass'], MR_earthlike_rocky['radius']) # mean density (g/cm^3) for Earth-like rocky as a function of radius
rho_pure_iron = rho_from_M_R(MR_pure_iron['mass'], MR_pure_iron['radius']) # mean density (g/cm^3) for pure iron as a function of radius
sigma_rho_at_radii_switch = 3. # std of mean density (g/cm^3) at radii_switch
sigma_rho_at_radii_min = 1. # std of mean density (g/cm^3) at radii_min
rho_radius_slope = (np.log10(sigma_rho_at_radii_switch)-np.log10(sigma_rho_at_radii_min)) / (radii_switch - radii_min) # dlog(rho)/dR; slope between radii_min and radii_switch in log(rho)
sigma_rho = 10.**( rho_radius_slope*(MR_earthlike_rocky['radius'] - radii_min) + np.log10(sigma_rho_at_radii_min) )
# IDEA 2: Lognormal distribution for mass centered around Earth-like rocky, with a sigma_log_M that grows with radius
# To define sigma_log_M as a linear function of radius:
sigma_log_M_at_radii_switch = 0.3 # std of log_M (Earth masses) at radii_switch
sigma_log_M_at_radii_min = 0.04 # std of log_M (Earth masses) at radii_min
sigma_log_M_radius_slope = (sigma_log_M_at_radii_switch - sigma_log_M_at_radii_min) / (radii_switch - radii_min)
sigma_log_M = sigma_log_M_radius_slope*(MR_earthlike_rocky['radius'] - radii_min) + sigma_log_M_at_radii_min
##### To make mass-radius plots:
afs = 20 #axes labels font size
tfs = 20 #text labels font size
lfs = 16 #legend labels font size
bins = 100
# Density vs. radius for new model based on Li Zeng's Earth-like rocky:
fig = plt.figure(figsize=(8,8))
plot = GridSpec(4, 1, left=0.15, bottom=0.1, right=0.98, top=0.98, wspace=0, hspace=0)
ax = plt.subplot(plot[0,:]) # sigma_rho vs. radius
plt.plot(MR_earthlike_rocky['radius'], sigma_rho, color='orange', ls='-', lw=3, label=r'Linear $\log(\sigma_\rho)$ vs $R_p$')
plt.gca().set_yscale("log")
ax.tick_params(axis='both', labelsize=afs)
plt.xticks([])
plt.yticks([1., 2., 3., 4., 5.])
ax.yaxis.set_major_formatter(ticker.ScalarFormatter())
ax.yaxis.get_major_formatter().set_scientific(False)
ax.yaxis.get_major_formatter().set_useOffset(False)
plt.xlim([radii_min, radii_switch])
plt.ylim([0.9, 4.])
plt.ylabel(r'$\sigma_\rho$ ($g/cm^3$)', fontsize=tfs)
plt.legend(loc='upper left', bbox_to_anchor=(0.01,0.99), ncol=1, frameon=False, fontsize=lfs)
ax = plt.subplot(plot[1:,:]) # rho vs. radius
plt.plot(MR_pure_iron['radius'], rho_pure_iron, color='r', ls='--', lw=3, label='Pure iron')
plt.plot(MR_earthlike_rocky['radius'], rho_earthlike_rocky, color='orange', ls='--', lw=3, label='Earth-like rocky')
plt.fill_between(MR_earthlike_rocky['radius'], rho_earthlike_rocky - sigma_rho, rho_earthlike_rocky + sigma_rho, color='orange', alpha=0.5, label=r'Earth-like rocky $\pm \sigma_\rho$')
plt.fill_between(MR_earthlike_rocky['radius'], rho_earthlike_rocky - 2.*sigma_rho, rho_earthlike_rocky + 2.*sigma_rho, color='orange', alpha=0.3, label=r'Earth-like rocky $\pm 2\sigma_\rho$')
plt.fill_between(MR_earthlike_rocky['radius'], rho_earthlike_rocky - 3.*sigma_rho, rho_earthlike_rocky + 3.*sigma_rho, color='orange', alpha=0.1, label=r'Earth-like rocky $\pm 3\sigma_\rho$')
plt.axhline(y=1., color='c', lw=3, label='Water density (1 g/cm^3)')
plt.gca().set_yscale("log")
ax.tick_params(axis='both', labelsize=afs)
plt.minorticks_off()
plt.yticks([1., 2., 3., 4., 5., 7., 10., 15.])
ax.yaxis.set_minor_formatter(ticker.ScalarFormatter())
ax.yaxis.set_major_formatter(ticker.ScalarFormatter())
ax.yaxis.get_major_formatter().set_scientific(False)
ax.yaxis.get_major_formatter().set_useOffset(False)
plt.xlim([radii_min, radii_switch])
plt.ylim([0.9, 20.])
plt.xlabel(r'$R_p$ ($R_\oplus$)', fontsize=tfs)
plt.ylabel(r'$\rho$ ($g/cm^3$)', fontsize=tfs)
plt.legend(loc='lower right', bbox_to_anchor=(0.99,0.01), ncol=1, frameon=False, fontsize=lfs)
if savefigures:
plt.savefig(savefigures_directory + 'Density_radius.pdf')
plt.close()
plt.show()
# Mass vs. radius:
fig = plt.figure(figsize=(16,8))
plot = GridSpec(5, 5, left=0.1, bottom=0.1, right=0.98, top=0.98, wspace=0, hspace=0)
ax = plt.subplot(plot[1:,:4])
masses_all = sssp_per_sys['mass_all'][sssp_per_sys['mass_all'] > 0.]
radii_all = sssp_per_sys['radii_all'][sssp_per_sys['radii_all'] > 0.]
corner.hist2d(np.log10(radii_all), np.log10(masses_all), bins=50, plot_density=True, contour_kwargs={'colors': ['0.6','0.4','0.2','0']}, data_kwargs={'color': 'k'})
plt.plot(MR_table['log_R'], MR_table['05'], '-', color='g', label='Mean prediction (NWG2018)')
plt.fill_between(MR_table['log_R'], MR_table['016'], MR_table['084'], color='g', alpha=0.5, label=r'16%-84% (NWG2018)')
plt.plot(MR_table['log_R'], np.log10(M_from_R_rho(10.**MR_table['log_R'], rho=5.51)), color='b', label='Earth density (5.51 g/cm^3)')
plt.plot(MR_table['log_R'], np.log10(M_from_R_rho(10.**MR_table['log_R'], rho=3.9)), color='m', label='Mars density (3.9 g/cm^3)')
plt.plot(MR_table['log_R'], np.log10(M_from_R_rho(10.**MR_table['log_R'], rho=1.)), color='c', label='Water density (1 g/cm^3)')
plt.plot(MR_table['log_R'], np.log10(M_from_R_rho(10.**MR_table['log_R'], rho=7.9)), color='r', label='Iron density (7.9 g/cm^3)')
plt.plot(MR_table['log_R'], np.log10(M_from_R_rho(10.**MR_table['log_R'], rho=100.)), color='k', label='100 g/cm^3')
plt.plot(np.log10(MR_earthlike_rocky['radius']), np.log10(MR_earthlike_rocky['mass']), color='orange', ls='--', lw=3, label='Earth-like rocky')
#plt.fill_between(np.log10(MR_earthlike_rocky['radius']), np.log10(M_from_R_rho(MR_earthlike_rocky['radius'], rho=rho_earthlike_rocky-sigma_rho)), np.log10(M_from_R_rho(MR_earthlike_rocky['radius'], rho=rho_earthlike_rocky+sigma_rho)), color='orange', alpha=0.5, label=r'16%-84% ($\rho \sim \mathcal{N}(\rho_{\rm Earthlike\:rocky}, \sigma_\rho(R_p))$)') #label=r'$\rho \sim \mathcal{N}(\rho_{\rm Earthlike\:rocky}, 10^{[\frac{d\log\rho}{dR_p}(R_p - 0.5) + \log{\rho_0}]})$'
plt.fill_between(np.log10(MR_earthlike_rocky['radius']), np.log10(MR_earthlike_rocky['mass']) - sigma_log_M, np.log10(MR_earthlike_rocky['mass']) + sigma_log_M, color='orange', alpha=0.5, label=r'16%-84% ($\log{M_p} \sim \mathcal{N}(M_{p,\rm Earthlike\:rocky}, \sigma_{\log{M_p}})$)')
plt.plot(np.log10(MR_pure_iron['radius']), np.log10(MR_pure_iron['mass']), color='r', ls='--', lw=3, label='Pure iron')
#plt.axvline(x=np.log10(0.7), color='k', ls='--', lw=3)
plt.axvline(x=np.log10(radii_switch), color='k', ls='--', lw=3)
ax.tick_params(axis='both', labelsize=afs)
xtick_vals = np.array([0.5, 1., 2., 4., 10.])
ytick_vals = np.array([1e-1, 1., 10., 1e2])
plt.xticks(np.log10(xtick_vals), xtick_vals)
plt.yticks(np.log10(ytick_vals), ytick_vals)
plt.xlim([np.log10(radii_min), np.log10(radii_max)])
plt.ylim([np.log10(0.07), 2.])
plt.xlabel(r'$R_p$ ($R_\oplus$)', fontsize=tfs)
plt.ylabel(r'$M_p$ ($M_\oplus$)', fontsize=tfs)
plt.legend(loc='lower right', bbox_to_anchor=(0.99,0.01), ncol=1, frameon=False, fontsize=lfs)
ax = plt.subplot(plot[0,:4]) # top histogram
plt.hist(radii_all, bins=np.logspace(np.log10(radii_min), np.log10(radii_max), bins+1), histtype='step', color='k', ls='-', label=r'All')
#plt.axvline(x=0.7, color='k', ls='--', lw=3)
plt.axvline(x=radii_switch, color='k', ls='--', lw=3)
plt.gca().set_xscale("log")
plt.xlim([radii_min, radii_max])
plt.xticks([])
plt.yticks([])
plt.legend(loc='upper right', bbox_to_anchor=(0.99,0.99), ncol=1, frameon=False, fontsize=lfs)
ax = plt.subplot(plot[1:,4]) # side histogram
plt.hist(masses_all, bins=np.logspace(np.log10(0.07), 2., bins+1), histtype='step', orientation='horizontal', color='k', ls='-', label='All')
radii_cut = radii_switch
plt.hist(masses_all[radii_all > radii_cut], bins=np.logspace(np.log10(0.07), 2., bins+1), histtype='step', orientation='horizontal', color='b', ls='-', label=r'$R_p > %s R_\oplus$' % radii_cut)
plt.hist(masses_all[radii_all < radii_cut], bins=np.logspace(np.log10(0.07), 2., bins+1), histtype='step', orientation='horizontal', color='r', ls='-', label=r'$R_p < %s R_\oplus$' % radii_cut)
plt.gca().set_yscale("log")
plt.ylim([0.07, 1e2])
plt.xticks([])
plt.yticks([])
plt.legend(loc='upper right', bbox_to_anchor=(0.99,0.99), ncol=1, frameon=False, fontsize=lfs)
if savefigures:
plt.savefig(savefigures_directory + 'MR_diagram.pdf')
plt.close()
plt.show()
| StarcoderdataPython |
195364 | import dataclasses
from rentomatic.domain.room import Room
@dataclasses.dataclass
class MemRepo:
data: list
def list(self):
return [Room.from_dict(d) for d in self.data]
| StarcoderdataPython |
3289610 | <gh_stars>1-10
# -*- coding: utf-8 -*-
import datetime
from .client import ETGClient
from .models import (
GuestData,
)
class ETGHotelsClient(ETGClient):
def autocomplete(self, query,
language=None):
"""Finds regions and hotels by a part of their names.
:param query: part of hotel or region name.
:type query: str
:param language: (optional) language of the response, e.g. 'en', 'ru'.
:type language: str
:return: suggested hotels and regions, no more than 5 objects for each category.
:rtype: dict
"""
endpoint = 'api/b2b/v3/search/multicomplete/'
data = {
'query': query,
'language': language,
}
response = self.request('POST', endpoint, data=data)
return response
def search(self, ids, checkin, checkout, guests,
currency=None, residency=None, timeout=None, upsells=None,
language=None):
"""Searches hotels with available accommodation that meets the given conditions.
It is not recommended to let the users choose the rates from this method response.
:param ids: list of hotels identifiers or region identifier.
:type ids: list[str] or int
:param checkin: check-in date, no later than 366 days from today.
:type checkin: datetime.date
:param checkout: check-out date, no later than 30 days from check-in date.
:type checkout: datetime.date
:param guests: list of guests in the rooms.
The max number of rooms in one request is 6.
:type guests: list[GuestData]
:param currency: (optional) currency code of the rooms rate in the response, e.g. 'GBP', 'USD', 'RUB'.
Default value is contract currency.
:type currency: str or None
:param residency: (optional) guest's (or multiple guests') nationality.
Use it in case there are doubts regarding country/hotel policy towards citizens of a specific country.
Value format is specified by standard 'ISO 3166-1 alpha-2', e.g. 'gb', 'us', 'ru'.
:type residency: str or None
:param timeout: (optional) response timeout in seconds.
:type timeout: int or None
:param upsells: (optional) additional services request.
:type upsells: dict or None
:param language: (optional) language of static information in the response, e.g. 'en', 'ru'.
Default value is contract language.
:type language: str or None
:return: list of available hotels.
:rtype: list
"""
endpoint = None
if isinstance(ids, list):
endpoint = 'api/b2b/v3/search/serp/hotels/'
elif isinstance(ids, int):
endpoint = 'api/b2b/v3/search/serp/region/'
data = {
'ids': ids,
'region_id': ids,
'checkin': checkin.strftime('%Y-%m-%d'),
'checkout': checkout.strftime('%Y-%m-%d'),
'guests': guests,
'currency': currency,
'residency': residency,
'timeout': timeout,
'upsells': upsells if upsells is not None else {},
'language': language,
}
response = self.request('POST', endpoint, data=data)
hotels = list()
if isinstance(response, dict):
hotels = response.get('hotels')
return hotels
def search_by_hotels(self, ids, checkin, checkout, guests, **kwargs):
"""Searches hotels with available accommodation that meets the given conditions.
:param ids: list of hotels identifiers.
:type ids: list[str]
:param checkin: check-in date, no later than 366 days from today.
:type checkin: datetime.date
:param checkout: check-out date, no later than 30 days from check-in date.
:type checkout: datetime.date
:param guests: list of guests in the rooms.
The max number of rooms in one request is 6.
:type guests: list[GuestData]
:param kwargs: optional parameters.
For more information, see the description of ``self.search`` method.
:return: list of available hotels (Hotels Search Engine Results Page).
:rtype: list
"""
return self.search(ids, checkin, checkout, guests, **kwargs)
def search_by_region(self, region_id, checkin, checkout, guests, **kwargs):
"""Searches hotels with available accommodation that meets the given conditions.
:param region_id: region identifier.
:type region_id: int
:param checkin: check-in date, no later than 366 days from today.
:type checkin: datetime.date
:param checkout: check-out date, no later than 30 days from check-in date.
:type checkout: datetime.date
:param guests: list of guests in the rooms.
The max number of rooms in one request is 6.
:type guests: list[GuestData]
:param kwargs: optional parameters.
For more information, see the description of ``self.search`` method.
:return: list of available hotels (Region Search Engine Results Page).
:rtype: list
"""
return self.search(region_id, checkin, checkout, guests, **kwargs)
def hotelpage(self, hotel_id, checkin, checkout, guests,
currency=None, residency=None, upsells=None, language=None):
"""Returns actual rates for the given hotel.
This request is necessary to make a booking via API.
Value of `book_hash` in results of this API method can be passed as `book_hash` when sending booking requests.
:param hotel_id: hotel identifier.
:type hotel_id: str
:param checkin: check-in date, no later than 366 days from today.
:type checkin: datetime.date
:param checkout: check-out date, no later than 30 days from check-in date.
:type checkout: datetime.date
:param guests: list of guests in the rooms.
The max number of rooms in one request is 6.
:type guests: list[GuestData]
:param currency: (optional) currency code of the rooms rate in the response, e.g. 'GBP', 'USD', 'RUB'.
Default value is contract currency.
:type currency: str or None
:param residency: (optional) guest's (or multiple guests') nationality.
Use it in case there are doubts regarding country/hotel policy towards citizens of a specific country.
Value format is specified by standard 'ISO 3166-1 alpha-2', e.g. 'gb', 'us', 'ru'.
:type residency: str or None
:param timeout: (optional) response timeout in seconds.
:type timeout: int or None
:param upsells: (optional) additional services request.
:type upsells: dict or None
:param language: (optional) language of static information in the response, e.g. 'en', 'ru'.
Default value is contract language.
:type language: str or None
:return: hotel info with actual available rates.
:rtype: dict or None
"""
endpoint = 'api/b2b/v3/search/hp/'
data = {
'id': hotel_id,
'checkin': checkin.strftime('%Y-%m-%d'),
'checkout': checkout.strftime('%Y-%m-%d'),
'guests': guests,
'currency': currency,
'residency': residency,
'upsells': upsells if upsells is not None else {},
'language': language,
}
response = self.request('POST', endpoint, data=data)
hotel = None
if isinstance(response, dict) and isinstance(response.get('hotels'), list) and len(response.get('hotels')):
hotel = response.get('hotels')[0]
return hotel
def make_reservation(self, partner_order_id, book_hash, language, user_ip):
"""Makes a new reservation.
:param partner_order_id: unique order id on partner side, e.g. '0a0f4e6d-b337-43be-a5f8-484492ebe033'.
:type partner_order_id: str
:param book_hash: unique identifier of the rate from hotelpage response.
:type book_hash: str
:param language: language of the reservation, e.g. 'en', 'ru'.
:type language: str
:param user_ip: customer IP address, e.g. '8.8.8.8'.
:type user_ip: str
:return: reservation info.
:rtype: dict
"""
endpoint = 'api/b2b/v3/hotel/order/booking/form/'
data = {
'partner_order_id': partner_order_id,
'book_hash': book_hash,
'language': language,
'user_ip': user_ip,
}
response = self.request('POST', endpoint, data=data)
return response
def finish_reservation(self, partner, payment_type, rooms, user, language,
arrival_datetime=None, upsell_data=None, return_path=None):
"""Completes the reservation.
:param partner: partner information.
partner_order_id: partner order id.
comment: (optional) partner booking inner comment. It is visible only to the partner himself.
amount_sell_b2b2c: (optional) reselling price for the client in contract currency.
:type partner: dict
:param payment_type: payment information.
type: payment type option, possible values: 'now', 'hotel', 'deposit'.
amount: amount of the order.
currency_code: ISO currency code, e.g. 'EUR'.
init_uuid: (optional) token of the booking payment operation.
pay_uuid: (optional) token of the booking payment check.
:type payment_type: dict
:param rooms: guest data by the rooms.
:type rooms: list
:param user: guest additional information.
email: partner manager email.
phone: guest telephone number.
comment: (optional) guest comment sent to the hotel.
:type user: dict
:param language: language of the reservation, e.g. 'en', 'ru'.
:type language: str
:param arrival_datetime: (optional) estimated arrival time to the hotel.
:type arrival_datetime: datetime.datetime
:param upsell_data: (optional) upsell information.
:type upsell_data: list or None
:param return_path: (optional) URL on the partner side to which the user will be forwarded
by the payment gateway after 3D Secure verification.
:type return_path: str
:return: True if the reservation is completed.
:rtype: bool
"""
endpoint = 'api/b2b/v3/hotel/order/booking/finish/'
data = {
'partner': partner,
'payment_type': payment_type,
'rooms': rooms,
'user': user,
'language': language,
'arrival_datetime': arrival_datetime,
'upsell_data': upsell_data if upsell_data is not None else [],
'return_path': return_path,
}
self.request('POST', endpoint, data=data)
return True
def check_reservation_status(self, partner_order_id):
endpoint = 'api/b2b/v3/hotel/order/booking/finish/status/'
data = {
'partner_order_id': partner_order_id,
}
response = self.request('POST', endpoint, data=data)
return response
def cancel(self, partner_order_id):
"""Cancels reservation.
:param partner_order_id: partner order id, e.g. '0a0f4e6d-b337-43be-a5f8-484492ebe033'.
:type partner_order_id: str
:return: True if the reservation is canceled.
:rtype: bool
"""
endpoint = 'api/b2b/v3/hotel/order/cancel/'
data = {
'partner_order_id': partner_order_id,
}
self.request('POST', endpoint, data=data)
return True
def region_list(self, last_id=None, limit=None, types=None):
"""Returns information about regions.
:param last_id: (optional) all retrieved regions will have an ID that exceeds the given value.
:type last_id: int or None
:param limit: (optional) maximum number of regions in a response, cannot exceed 10000, default value = 1000.
:type limit: int or None
:param types: (optional) condition for filtering regions by region type, possible values:
``Airport``, ``Bus Station``, ``City``, ``Continent``, ``Country``,
``Multi-City (Vicinity)``, ``Multi-Region (within a country)``, ``Neighborhood``,
``Point of Interest``, ``Province (State)``,``Railway Station``, ``Street``,
``Subway (Entrace)``.
:type types: list[str] or None
:return: returns information about regions.
:rtype: list
"""
data = dict()
if last_id is not None:
data['last_id'] = last_id
if limit is not None:
data['limit'] = limit
if types is not None:
data['types'] = types
regions = self.request('GET', 'region/list', data=data)
return regions
def get_voucher(self, partner_order_id, language):
data = {
'partner_order_id': partner_order_id,
'language': language,
}
voucher = self.request('GET', 'api/b2b/v3/hotel/order/document/voucher/download/', data=data, stream=True)
return voucher
| StarcoderdataPython |
3288633 | <gh_stars>0
"""Module to connect to the sqlserver database."""
import pymssql
from config import _Config
class SqlServerConnect: # pylint: disable=too-few-public-methods
"""Provides the connector to the sqlserver database."""
@staticmethod
def connect(app_config: _Config):
"""Establishes the connection with sqlsqerver."""
try:
connection = pymssql.connect( # pylint: disable=no-member
**app_config.DB_MSSQL_CONFIG
)
return connection
except Exception as sqlserver_err:
raise sqlserver_err
@staticmethod
def get_row_query( # pylint: disable-msg=too-many-arguments
primary_keys: str,
input_col: str,
table_name: str,
output_col: str,
app_config: _Config,
limit: int = 100,
) -> str:
"""Returns database specific query for retrieving rows."""
cols_to_query = f"{primary_keys},{input_col}"
if app_config.SCHEMA_NAME:
table_name = f"{app_config.SCHEMA_NAME}.{table_name}"
return (
f"select top {limit} {cols_to_query} from {table_name} where "
f"{input_col} is not null and "
f"coalesce({output_col}, '') = ''"
)
| StarcoderdataPython |
19403 | """
Train
=====
Defines functions which train models and write model artifacts to disk.
"""
from __future__ import print_function
import os
import tempfile
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from tensorflow_mnist import model, paths
def train(path):
"""
Train a decision tree classifier using a floating point feature matrix and
a categorical classification target.
Arguments:
path (str): The path indicating where to save the final model artifacts
"""
# Construct the model graph
graph, x, y, step, initializer, accuracy, prediction = model.build()
# Start a training session
with tf.Session(graph=graph) as sess:
# Initialize the graph
sess.run(initializer)
# Train the model for 1000 steps
mnist = input_data.read_data_sets(tempfile.mkdtemp(), one_hot=True)
for _ in range(1000):
batch_xs, batch_ys = mnist.train.next_batch(100)
sess.run(step, feed_dict={x: batch_xs, y: batch_ys})
# Display accuracy measurement
print(sess.run(accuracy, feed_dict={x: mnist.test.images,
y: mnist.test.labels}))
# Save the variable data to disk
os.makedirs(path)
saver = tf.train.Saver()
saver.save(sess, path)
print('Success!')
def main():
"""
Load features and labels, train the neural network, and serialize model
artifact.
Note: This is the training entrypoint used by baklava!
"""
path = paths.model('mnist')
train(path)
| StarcoderdataPython |
189475 | from django.test import TestCase, Client
from django.urls import reverse
from blog_crypto.crypto_auth.models import CryptoUser, Profile
class ProfileViewTest(TestCase):
def test_profile_view_create_user_and_access_profile_page_return_success(self):
data_sign_up = {
'email': '<EMAIL>',
'password1': '<PASSWORD>*',
'password2': '<PASSWORD>*',
}
data_sign_in = {
'email': '<EMAIL>',
'password': '<PASSWORD>*',
}
response = self.client.post(reverse('sign up'), data_sign_up)
response = self.client.post(reverse('sign in'), data_sign_in)
self.assertEqual(CryptoUser.objects.count(), 1)
self.assertEqual(Profile.objects.count(), 1)
response = self.client.get(reverse('profile details'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'auth/profile.html')
| StarcoderdataPython |
3308189 | """Contains function :func:`~.measure` for measuring states"""
import random
import numpy as np
from collections import namedtuple
from qualg.scalars import is_number
from qualg.states import State
from qualg.operators import Operator
MeasurementResult = namedtuple("MeasurementResult", ["outcome", "probability", "post_meas_state"])
def measure(state, kraus_ops):
"""Measures a state with a given list of Kraus operators describing a POVM.
Parameters
----------
state : :class:`~.states.State`
The state to be measured.
kraus_ops : dict
Dictionary containing the Kraus operators describing the POVM as values and
the outcomes as keys.
Returns
-------
:class:`~.MeasurementResult`
The namedtuple returned contains:
* `outcome`: The outcome of the measurement, i.e. the key of the Kraus operator.
* `probability`: The probability of the outcome.
* `post_meas_state`: The post-measurement state.
Warning
-------
There is no check that the given Kraus operators are actually a valid POVM.
"""
if not isinstance(state, State):
raise TypeError("state should be a State")
if not isinstance(kraus_ops, dict):
raise TypeError("kraus_ops should be a dict")
r = random.random()
offset = 0
for outcome, kraus_op in kraus_ops.items():
if not isinstance(kraus_op, Operator):
raise TypeError("the values of kraus_ops should be Operator")
post_state = kraus_op * state
p = post_state.inner_product(post_state)
if not is_number(p):
raise NotImplementedError("Cannot perform measurement when inner product are not numbers")
if p < 0:
# NOTE: should not happen
raise ValueError("Seems the Kraus operators does not form positive operators")
if offset <= r <= offset + p:
post_state = post_state * (1 / np.sqrt(p))
return MeasurementResult(outcome, p, post_state)
offset += p
raise ValueError("Seems the Kraus operators does not sum up to one")
| StarcoderdataPython |
1660295 | class Solution:
def minimumAbsDifference(self, arr: List[int]) -> List[List[int]]:
mn = math.inf
ans = []
arr.sort()
n = len(arr)
for i in range(n - 1):
if arr[i + 1] - arr[i] < mn:
mn = arr[i + 1] - arr[i]
ans = [[arr[i], arr[i + 1]]]
elif arr[i + 1] - arr[i] == mn:
ans.append([arr[i], arr[i + 1]])
return ans
| StarcoderdataPython |
1624856 | # -*- coding: utf-8 -*-
"""Augmentation methods.
- Author: Curt-Park
- Email: <EMAIL>
- Reference:
https://arxiv.org/pdf/1805.09501.pdf
https://github.com/kakaobrain/fast-autoaugment/
"""
from abc import ABC
from itertools import chain
import random
from typing import List, Tuple
from PIL.Image import Image
import numpy as np
import torch
from torch.utils.data import Dataset
from src.augmentation.transforms import transforms_info
from src.utils import get_rand_bbox_coord, to_onehot
class Augmentation(ABC):
"""Abstract class used by all augmentation methods."""
def __init__(self, n_level: int = 10) -> None:
"""Initialize."""
self.transforms_info = transforms_info()
self.n_level = n_level
def _apply_augment(self, img: Image, name: str, level: int) -> Image:
"""Apply and get the augmented image.
Args:
img (Image): an image to augment
level (int): magnitude of augmentation in [0, n_level)
returns:
Image: an augmented image
"""
assert 0 <= level < self.n_level
augment_fn, low, high = self.transforms_info[name]
return augment_fn(img.copy(), level * (high - low) / self.n_level + low)
class SequentialAugmentation(Augmentation):
"""Sequential augmentation class."""
def __init__(
self,
policies: List[Tuple[str, float, int]],
n_level: int = 10,
) -> None:
"""Initialize."""
super(SequentialAugmentation, self).__init__(n_level)
self.policies = policies
def __call__(self, img: Image) -> Image:
"""Run augmentations."""
for name, pr, level in self.policies:
if random.random() > pr:
continue
img = self._apply_augment(img, name, level)
return img
class AutoAugmentation(Augmentation):
"""Auto augmentation class.
References:
https://arxiv.org/pdf/1805.09501.pdf
"""
def __init__(
self,
policies: List[List[Tuple[str, float, int]]],
n_select: int = 1,
n_level: int = 10,
) -> None:
"""Initialize."""
super(AutoAugmentation, self).__init__(n_level)
self.policies = policies
self.n_select = n_select
def __call__(self, img: Image) -> Image:
"""Run augmentations."""
chosen_policies = random.sample(self.policies, k=self.n_select)
for name, pr, level in chain.from_iterable(chosen_policies):
if random.random() > pr:
continue
img = self._apply_augment(img, name, level)
return img
class RandAugmentation(Augmentation):
"""Random augmentation class.
References:
RandAugment: Practical automated data augmentation with a reduced search space
(https://arxiv.org/abs/1909.13719)
"""
def __init__(
self,
transforms: List[str],
n_select: int = 2,
level: int = 14,
n_level: int = 31,
) -> None:
"""Initialize."""
super(RandAugmentation, self).__init__(n_level)
self.n_select = n_select
self.level = level if type(level) is int and 0 <= level < n_level else None
self.transforms = transforms
def __call__(self, img: Image) -> Image:
"""Run augmentations."""
chosen_transforms = random.sample(self.transforms, k=self.n_select)
for transf in chosen_transforms:
level = self.level if self.level else random.randint(0, self.n_level - 1)
img = self._apply_augment(img, transf, level)
return img
class CutMix(Dataset):
"""A Dataset class for CutMix.
References:
https://github.com/ildoonet/cutmix
"""
def __init__(
self, dataset: Dataset, num_classes: int, beta: float = 1.0, prob: float = 0.5
) -> None:
self.dataset = dataset
self.num_classes = num_classes
self.beta = beta
self.prob = prob
def __getitem__(self, index: int) -> Tuple[torch.Tensor, torch.Tensor]:
"""Convert image and label to a cutmix image and label.
Combine two training samples by cutting and pasting two images along a random box.
The ground truth label is also "mixed" via the combination ratio.
The combination ratio is sampled from a beta distribution.
"""
img, label = self.dataset[index] # label: int
label = torch.tensor([label], dtype=torch.long)
label_onehot = to_onehot(label, self.num_classes)
# sampling the length ratio of random box to the image
len_ratio = np.sqrt(np.random.beta(self.beta, self.beta))
if random.random() > self.prob or len_ratio < 1e-3:
return img, label_onehot.squeeze_(0)
w, h = img.size()[-2], img.size()[-1]
(x0, y0), (x1, y1) = get_rand_bbox_coord(w, h, len_ratio)
# compute the combination ratio
comb_ratio = (x1 - x0) * (y1 - y0) / (w * h)
rand_ind = np.random.randint(len(self))
rand_img, rand_label = self.dataset[rand_ind]
rand_label = torch.tensor([rand_label], dtype=torch.long)
img[:, x0:x1, y0:y1] = rand_img[:, x0:x1, y0:y1]
label_onehot = (1 - comb_ratio) * label_onehot + comb_ratio * to_onehot(
rand_label, self.num_classes
)
return img, label_onehot.squeeze_(0)
def __len__(self) -> int:
return len(self.dataset)
| StarcoderdataPython |
1776906 | <filename>browserdb.py
#!/usr/bin/python
__author__ = 'kilroy'
# (c) 2014, WasHere Consulting, Inc.
# Written for Infinite Skills
import sqlite3
conn = sqlite3.connect("cookies.sqlite")
sites = []
# need a cursor to keep track of where we are
cur = conn.cursor()
for row in cur.execute("SELECT * FROM moz_cookies"):
if row[1] not in sites:
sites.append(row[1])
sites.sort()
for s in sites:
print(s)
| StarcoderdataPython |
1661839 | import ConfigParser
from flask import Flask, jsonify, request
import json
from twilio.rest import TwilioRestClient
app = Flask(__name__)
config = ConfigParser.ConfigParser()
config.read('config.ini')
@app.route('/sms/<recipient>', methods=['POST'])
def sms_send(recipient):
auth = (
config.get('Twilio', 'account_sid'),
config.get('Twilio', 'auth_token'))
from_number = config.get('Twilio', 'from_number')
body = request.get_data() or 'OH HI THERE'
client = TwilioRestClient(*auth)
message = client.messages.create(to=recipient, from_=from_number,
body=body)
return jsonify(status=message.status)
if __name__ == '__main__':
app.run(debug=True)
| StarcoderdataPython |
4809340 |
# Test open and close.
${library_name_suffix}_${type_name}.open(test_source)
${library_name_suffix}_${type_name}.close()
# Test open and close a second time to validate clean up on close.
${library_name_suffix}_${type_name}.open(test_source)
${library_name_suffix}_${type_name}.close()
if os.path.isfile(test_source):
with open(test_source, "rb") as file_object:
# Test open_file_object and close.
${library_name_suffix}_${type_name}.open_file_object(file_object)
${library_name_suffix}_${type_name}.close()
# Test open_file_object and close a second time to validate clean up on close.
${library_name_suffix}_${type_name}.open_file_object(file_object)
${library_name_suffix}_${type_name}.close()
# Test open_file_object and close and dereferencing file_object.
${library_name_suffix}_${type_name}.open_file_object(file_object)
del file_object
${library_name_suffix}_${type_name}.close()
| StarcoderdataPython |
3344724 | <reponame>bvezilic/Transformer
from torchtext.data import Field, LabelField
from torchtext.datasets import SST
def load_SST(root_dir):
TEXT = Field(lower=True, tokenize="toktok", eos_token="<eos>")
LABEL = LabelField()
train, val, test = SST.splits(TEXT, LABEL, root=root_dir)
TEXT.build_vocab(train)
LABEL.build_vocab(train)
return train, val, test, TEXT, LABEL | StarcoderdataPython |
3278615 | #!/usr/bin/env python3
import cgi
import hashlib
import io
import json
import multiprocessing.pool
import time
import zlib
from urllib import parse
from wsgiref import util
from wsgiref.simple_server import WSGIRequestHandler, WSGIServer
from passlib import hash
def application(env, start_response):
request_uri = util.request_uri(env)
application_uri = util.application_uri(env)
request_method = env.get('REQUEST_METHOD', 'GET').upper()
request_content_type, request_content_args = cgi.parse_header(env.get('CONTENT_TYPE'))
request_encoding = request_content_args.get('encoding', 'utf-8')
accept_content_type = env.get('HTTP_ACCEPT')
request_origin = env.get('HTTP_ORIGIN') or 'localhost'
def send_json(obj, content_type='application/json'):
start_response('200 OK', [('Content-Type', content_type), ('Access-Control-Allow-Origin', request_origin)])
yield json.dumps(obj, indent=' ', sort_keys=True).encode('utf-8')
def send_error(code, message):
start_response(code, [('Content-Type', 'text/plain')])
yield str(message).encode('utf-8')
def send_file(file_path, content_type='text/html'):
start_response('200 OK', [('Content-Type', content_type)])
with open(file_path) as file:
yield file.read().encode('utf-8')
# return dump(env, start_response)
args = parse.parse_qs(env['QUERY_STRING'])
def arg(key):
if (key in args):
return args[key][0]
return None
def intarg(key):
if (key in args):
try:
return int(args[key][0])
except (ValueError):
pass
return None
try:
request_content_length = int(env.get('CONTENT_LENGTH', 0))
except (ValueError):
request_content_length = 0
request_body = env['wsgi.input'].read(request_content_length).decode(request_encoding)
if (request_content_type.endswith('/json') or request_content_type.endswith('+json')):
request_data = json.loads(request_body)
elif ('application/x-www-form-urlencoded' == request_content_type):
request_data = parse.parse_qs(request_body)
elif ('multipart/form-data' == request_content_type):
if ('boundary' in request_content_args):
request_content_args['boundary'] = request_content_args['boundary'].encode('ascii')
request_data = cgi.parse_multipart(io.BytesIO(request_body.encode(request_encoding)), request_content_args)
else:
request_data = {}
def request(key):
value = request_data.get(key)
if (not isinstance(value, str) and hasattr(value, '__getitem__')):
value = value[0]
if (isinstance(value, bytes)):
value = value.decode(request_encoding)
return value
def request_int(key):
value = request(key)
if (value is not None):
try:
return int(value)
except (ValueError):
pass
return None
# print(request_content_type)
# print(request_encoding)
# print(repr(request_data))
# print(repr(args))
path = util.shift_path_info(env)
if ('demo' == path):
path = util.shift_path_info(env)
if ('password' == path):
if ('algorithm' in args):
cleartext = arg('cleartext')
if (not cleartext):
return send_error('400 Bad Request', 'No cleartext specified')
try:
if ('md5_crypt' in args['algorithm']):
return send_json(hash.md5_crypt.encrypt(cleartext, salt=arg('salt')))
elif ('bcrypt' in args['algorithm']):
return send_json(hash.bcrypt.encrypt(cleartext, salt=arg('salt'), rounds=intarg('rounds'), ident='2b'))
elif ('sha1_crypt' in args['algorithm']):
return send_json(hash.sha1_crypt.encrypt(cleartext, salt=arg('salt'), rounds=intarg('rounds')))
elif ('sun_md5_crypt' in args['algorithm']):
return send_json(hash.sun_md5_crypt.encrypt(cleartext, salt=arg('salt'), rounds=intarg('rounds')))
elif ('sha256_crypt' in args['algorithm']):
return send_json(hash.sha256_crypt.encrypt(cleartext, salt=arg('salt'), rounds=intarg('rounds')))
elif ('sha512_crypt' in args['algorithm']):
return send_json(hash.sha512_crypt.encrypt(cleartext, salt=arg('salt'), rounds=intarg('rounds')))
else:
return send_error('400 Bad Request', 'Unknown algorithm')
except Exception as exc:
return send_error('400 Bad Request', exc)
else:
return send_json(['md5_crypt', 'bcrypt', 'sha1_crypt', 'sun_md5_crypt', 'sha256_crypt', 'sha512_crypt'])
elif ('hash' == path):
if ('algorithm' in args):
data = None
if ('GET' == request_method):
data = arg('data')
elif (request_method in ('POST', 'PUT')):
data = request('data')
if (not data):
return send_error('400 Bad Request', 'No data specified')
if ('sha256' == arg('algorithm')):
hasher = hashlib.sha256()
hasher.update(data.encode('utf-8'))
return send_json(hasher.hexdigest())
elif ('sha512' == arg('algorithm')):
hasher = hashlib.sha512()
hasher.update(data.encode('utf-8'))
return send_json(hasher.hexdigest())
else:
return send_error('400 Bad Request', 'Unknown algorithm')
else:
return send_json(['sha256', 'sha512'])
elif ('crc' == path):
data = arg('data')
if ('GET' == request_method):
value = intarg('value')
elif (request_method in ('POST', 'PUT')):
value = request_int('value')
if (not data):
return send_error('400 Bad Request', 'No data specified')
crc = zlib.crc32(data.encode('utf-8'), value) if (value is not None) else zlib.crc32(data.encode('utf-8'))
if ('application/json-patch+json' == accept_content_type):
patch = [
{'op': 'replace', 'path': '/public/output', 'value': crc},
{'op': 'replace', 'path': '/readonly/readonlyOutput', 'value': crc},
{'op': 'replace', 'path': '/private/value', 'value': crc},
{'op': 'replace', 'path': '/return', 'value': crc},
]
return send_json(patch, 'application/json-patch+json')
else:
api = {
'resources': {
'crc': {
'hrefTemplate': 'crc/{?data}',
'hrefVars': {
'data': 'param/hash/data'
},
'hints': {
'allow': ['PUT'],
'formats': {
'application/json': {},
'application/prs.remotewebobjectdemo.crc.v1+json-remote': {}
}
},
'functions': {
'update': {
'arguments': ['data'],
'format': 'application/json-patch+json',
'method': 'PUT',
'requestBody': ['value']
}
}
}
},
'state': {
'public': {
'output': crc
},
'private': {
'value': crc
},
'readonly': {
"readonlyOutput": crc
},
}
}
return send_json(api, 'application/prs.remotewebobjectdemo.crc.v1+json-remote')
elif ('tick' == path):
start_response('200 OK', [
('Content-Type', 'text/event-stream; charset=utf-8'),
('Access-Control-Allow-Origin', request_origin),
('Cache-Control', 'no-cache'),
])
def do_tick():
tick = 0
while True:
time.sleep(1)
tick += 1
yield "event: tick\ndata: {tick}\n\n".format(tick=tick).encode('utf-8')
return do_tick()
elif ('clock' == path):
start_response('200 OK', [
('Content-Type', 'text/event-stream; charset=utf-8'),
('Access-Control-Allow-Origin', request_origin),
('Cache-Control', 'no-cache'),
])
def do_clock():
while True:
time.sleep(1)
now = time.localtime()
if (0 == now.tm_sec):
event = 'minute'
else:
event = 'second'
data = {'hour': now.tm_hour, 'minute': now.tm_min, 'second': now.tm_sec}
yield "event: {event}\ndata: {data}\n\n".format(event=event, data=json.dumps(data)).encode('utf-8')
return do_clock()
elif (not path):
api = {
'resources': {
'password': {
'hrefTemplate': 'password/{?cleartext,algorithm,salt,rounds}',
'hrefVars': {
'cleartext': 'param/pass/cleartext',
'algorithm': 'param/pass/algorithm',
'salt': 'param/pass/salt',
'rounds': 'param/pass/rounds'
},
'hints': {
'allow': ['GET'],
'formats': {
'application/json': {},
'application/prs.remotewebobjectdemo.password.v1+json': {}
}
},
'functions': {
'getAlgorithms': {
'arguments': [],
'format': 'application/prs.remotewebobjectdemo.password.v1+json',
'method': 'GET'
},
'hashPassword': {
'arguments': ['cleartext', 'algorithm', 'salt', 'rounds'],
'format': 'application/prs.remotewebobjectdemo.password.v1+json',
'method': 'GET'
}
}
},
'hash': {
'hrefTemplate': 'hash/{?algorithm}',
'hrefVars': {
'data': 'param/hash/data',
'algorithm': 'param/hash/algorithm'
},
'hints': {
'allow': ['GET'],
'formats': {
'application/json': {},
'application/prs.remotewebobjectdemo.password.v1+json': {}
}
},
'functions': {
'hash256': {
'arguments': ['data'],
'format': 'application/prs.remotewebobjectdemo.hash.v1+json',
'requestFormat': 'application/x-www-form-urlencoded',
'method': 'POST',
'defaults': {
'algorithm': 'sha256'
}
},
'hash512': {
'arguments': ['data'],
'format': 'application/prs.remotewebobjectdemo.hash.v1+json',
'requestFormat': 'multipart/form-data',
'method': 'PUT',
'defaults': {
'algorithm': 'sha512'
}
}
}
},
'crc': {
'hrefTemplate': 'crc/{?data,value}',
'hrefVars': {
'data': 'param/hash/data',
'value': 'param/hash/value'
},
'hints': {
'allow': ['GET'],
'formats': {
'application/json': {},
'application/prs.remotewebobjectdemo.crc.v1+json-remote': {}
}
},
'functions': {
'crc32': {
'arguments': ['data'],
'format': 'application/prs.remotewebobjectdemo.crc.v1+json-remote',
'method': 'GET'
}
}
},
'tick': {
'href': 'tick',
'events': {
'tick': {}
}
},
'clock': {
'href': 'clock',
'events': {
'second': {},
'minute': {}
}
}
}
}
return send_json(api, 'application/prs.remotewebobjectdemo.crc.v1+json-remote')
else:
return send_error('404 Not Found', 'Not found')
elif ('' == path):
return send_file('index.html')
elif ('remotewebobject.js' == path):
return send_file('remotewebobject.js', 'application/javascript')
elif ('uritemplate.js' == path):
return send_file('uritemplate.js', 'application/javascript')
else:
return send_error('404 Not Found', 'Not found')
return send_error('500 Internal Server Error', 'unhandled code')
def dump(env, start_response):
start_response('200 OK', [('Content-Type', 'text/html')])
yield b'<pre>'
for key in env:
yield key.encode('utf-8') + b':' + str(env[key]).encode('utf-8') + b'\n'
yield b'\n'
yield b'request_uri=' + util.request_uri(env).encode('utf-8') + b'\n'
yield b'application_uri=' + util.application_uri(env).encode('utf-8') + b'\n'
path = util.shift_path_info(env)
while (path):
yield b'path=' + path.encode('utf-8') + b'\n'
path = util.shift_path_info(env)
args = parse.parse_qs(env['QUERY_STRING'])
for key in args:
yield key.encode('utf-8') + b': ' + b' '.join([value.encode('utf-8') for value in args[key]])
return []
class ThreadPoolWSGIServer(WSGIServer):
'''WSGI-compliant HTTP server. Dispatches requests to a pool of threads.'''
def __init__(self, thread_count=None, *args, **kwargs):
'''If 'thread_count' == None, we'll use multiprocessing.cpu_count() threads.'''
WSGIServer.__init__(self, *args, **kwargs)
self.thread_count = thread_count
self.pool = multiprocessing.pool.ThreadPool(self.thread_count)
# Inspired by SocketServer.ThreadingMixIn.
def process_request_thread(self, request, client_address):
try:
self.finish_request(request, client_address)
self.shutdown_request(request)
except:
self.handle_error(request, client_address)
self.shutdown_request(request)
def process_request(self, request, client_address):
self.pool.apply_async(self.process_request_thread, args=(request, client_address))
def make_server(host, port, app, thread_count=None, handler_class=WSGIRequestHandler):
'''Create a new WSGI server listening on `host` and `port` for `app`'''
httpd = ThreadPoolWSGIServer(thread_count, (host, port), handler_class)
httpd.set_app(app)
return httpd
if __name__ == "__main__": # called from the command line
httpd = make_server('localhost', 8051, application)
httpd.serve_forever()
| StarcoderdataPython |
1715591 | # -*- coding:utf-8 -*-
"""
【说明】
(1)由于pyspark不提供Hbase相关api,本样例使用Python调用Java的方式实现
(2)如果使用yarn-client模式运行,请确认Spark2x客户端Spark2x/spark/conf/spark-defaults.conf中
spark.yarn.security.credentials.hbase.enabled参数配置为true
"""
import sys
from py4j.java_gateway import java_import
from pyspark.sql import SparkSession
if __name__ == "__main__":
if len(sys.argv) < 4:
print("Usage: SparkOnStreamingToHbasePythonKafka10.py <checkPointDir> <topics> <brokers> <tableName>")
exit(-1)
# 创建SparkSession
spark = SparkSession\
.builder\
.appName("SparkStreamingtoHbasePythonkafka10") \
.getOrCreate()
# 向sc._jvm中导入要运行的类
java_import(spark._jvm, 'com.huawei.bigdata.spark.examples.streaming.SparkOnStreamingToHbasePythonKafka10')
# 创建类实例并调用方法
spark._jvm.SparkOnStreamingToHbasePythonKafka10().streamingToHbase(spark._jsc, sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4]) | StarcoderdataPython |
4808604 | <gh_stars>1-10
'''
area_curves.py
Find the area enclosed by two curves between two points
'''
from sympy import Integral, Symbol, SympifyError, sympify
def find_area(f1x, f2x, var, a, b):
a = Integral(f1x-f2x, (var, a, b)).doit()
return a
if __name__ == '__main__':
f1x = input('Enter the upper function in one variable: ')
f2x = input('Enter the lower upper function in one variable: ')
var = input('Enter the variable: ')
l = float(input('Enter the lower bound of the enclosed region: '))
u = float(input('Enter the upper bound of the enclosed region: '))
try:
f1x = sympify(f1x)
f2x = sympify(f2x)
except SympifyError:
print('One of the functions entered is invalid')
else:
var = Symbol(var)
print('Area enclosed by {0} and {1} is: {2} '.format(f1x, f2x, find_area(f1x, f2x, var, l, u)))
| StarcoderdataPython |
1688758 | # Copyright 2017 Adobe. All rights reserved.
import ast
import hashlib
import os
import re
from collections import OrderedDict
from fontTools.misc import etree as ET
from fontTools.misc import plistlib
from fontTools.ufoLib import UFOReader
from fontTools.ufoLib.glifLib import Glyph
from psautohint.ufoFont import (norm_float, HashPointPen,
UFOFontData as psahUFOFontData)
from afdko import convertfonttocid, fdkutils
__version__ = '1.34.6'
__doc__ = """
ufotools.py v1.34.6 Sep 24 2019
This module supports using the Adobe FDK tools which operate on 'bez'
files with UFO fonts. It provides low level utilities to manipulate UFO
data without fully parsing and instantiating UFO objects, and without
requiring that the AFDKO contain the robofab libraries.
Modified in Nov 2014, when AFDKO added the robofab libraries. It can now
be used with UFO fonts only to support the hash mechanism.
Developed in order to support checkoutlines and autohint, the code
supports two main functions:
- convert between UFO GLIF and bez formats
- keep a history of processing in a hash map, so that the (lengthy)
processing by autohint and checkoutlines can be avoided if the glyph has
already been processed, and the source data has not changed.
The basic model is:
- read GLIF file
- transform GLIF XML element to bez file
- call FDK tool on bez file
- transform new bez file to GLIF XML element with new data, and save in list
After all glyphs are done save all the new GLIF XML elements to GLIF
files, and update the hash map.
A complication in the Adobe UFO workflow comes from the fact we want to
make sure that checkoutlines and autohint have been run on each glyph in
a UFO font, when building an OTF font from the UFO font. We need to run
checkoutlines, because we no longer remove overlaps from source UFO font
data, because this can make revising a glyph much easier. We need to run
autohint, because the glyphs must be hinted after checkoutlines is run,
and in any case we want all glyphs to have been autohinted. The problem
with this is that it can take a minute or two to run autohint or
checkoutlines on a 2K-glyph font. The way we avoid this is to make and
keep a hash of the source glyph drawing operators for each tool. When
the tool is run, it calculates a hash of the source glyph, and compares
it to the saved hash. If these are the same, the tool can skip the
glyph. This saves a lot of time: if checkoutlines and autohint are run
on all glyphs in a font, then a second pass is under 2 seconds.
Another issue is that since we no longer remove overlaps from the source
glyph files, checkoutlines must write any edited glyph data to a
different layer in order to not destroy the source data. The ufotools
defines an Adobe-specific glyph layer for processed glyphs, named
"glyphs.com.adobe.type.processedGlyphs".
checkoutlines writes new glyph files to the processed glyphs layer only
when it makes a change to the glyph data.
When the autohint program is run, the ufotools must be able to tell
whether checkoutlines has been run and has altered a glyph: if so, the
input file needs to be from the processed glyph layer, else it needs to
be from the default glyph layer.
The way the hashmap works is that we keep an entry for every glyph that
has been processed, identified by a hash of its marking path data. Each
entry contains:
- a hash of the glyph point coordinates, from the default layer.
This is set after a program has been run.
- a history list: a list of the names of each program that has been run,
in order.
- an editStatus flag.
Altered GLIF data is always written to the Adobe processed glyph layer. The
program may or may not have altered the outline data. For example, autohint
adds private hint data, and adds names to points, but does not change any
paths.
If the stored hash for the glyph does not exist, the ufotools lib will save the
new hash in the hash map entry and will set the history list to contain just
the current program. The program will read the glyph from the default layer.
If the stored hash matches the hash for the current glyph file in the default
layer, and the current program name is in the history list,then ufotools
will return "skip=1", and the calling program may skip the glyph.
If the stored hash matches the hash for the current glyph file in the default
layer, and the current program name is not in the history list, then the
ufotools will return "skip=0". If the font object field 'usedProcessedLayer' is
set True, the program will read the glyph from the from the Adobe processed
layer, if it exists, else it will always read from the default layer.
If the hash differs between the hash map entry and the current glyph in the
default layer, and usedProcessedLayer is False, then ufotools will return
"skip=0". If usedProcessedLayer is True, then the program will consult the list
of required programs. If any of these are in the history list, then the program
will report an error and return skip =1, else it will return skip=1. The
program will then save the new hash in the hash map entry and reset the history
list to contain just the current program. If the old and new hash match, but
the program name is not in the history list, then the ufotools will not skip
the glyph, and will add the program name to the history list.
The only tools using this are, at the moment, checkoutlines, checkoutlinesufo
and autohint. checkoutlines and checkoutlinesufo use the hash map to skip
processing only when being used to edit glyphs, not when reporting them.
checkoutlines necessarily flattens any components in the source glyph file to
actual outlines. autohint adds point names, and saves the hint data as a
private data in the new GLIF file.
autohint saves the hint data in the GLIF private data area, /lib/dict,
as a key and data pair. See below for the format.
autohint started with _hintFormat1_, a reasonably compact XML representation of
the data. In Sep 2105, autohhint switched to _hintFormat2_ in order to be plist
compatible. This was necessary in order to be compatible with the UFO spec, by
was driven more immediately by the fact the the UFO font file normalization
tools stripped out the _hintFormat1_ hint data as invalid elements.
"""
_hintFormat1_ = """
Deprecated. See _hintFormat2_ below.
A <hintset> element identifies a specific point by its name, and
describes a new set of stem hints which should be applied before the
specific point.
A <flex> element identifies a specific point by its name. The point is
the first point of a curve. The presence of the <flex> element is a
processing suggestion, that the curve and its successor curve should
be converted to a flex operator.
One challenge in applying the hintset and flex elements is that in the
GLIF format, there is no explicit start and end operator: the first path
operator is both the end and the start of the path. I have chosen to
convert this to T1 by taking the first path operator, and making it a
move-to. I then also use it as the last path operator. An exception is a
line-to; in T1, this is omitted, as it is implied by the need to close
the path. Hence, if a hintset references the first operator, there is a
potential ambiguity: should it be applied before the T1 move-to, or
before the final T1 path operator? The logic here applies it before the
move-to only.
<glyph>
...
<lib>
<dict>
<key><com.adobe.type.autohint><key>
<data>
<hintSetList>
<hintset pointTag="point name">
(<hstem pos="<decimal value>" width="decimal value" />)*
(<vstem pos="<decimal value>" width="decimal value" />)*
<!-- where n1-5 are decimal values -->
<hstem3 stem3List="n0,n1,n2,n3,n4,n5" />*
<!-- where n1-5 are decimal values -->
<vstem3 stem3List="n0,n1,n2,n3,n4,n5" />*
</hintset>*
(<hintSetList>*
(<hintset pointIndex="positive integer">
(<stemindex>positive integer</stemindex>)+
</hintset>)+
</hintSetList>)*
<flexList>
<flex pointTag="point Name" />
</flexList>*
</hintSetList>
</data>
</dict>
</lib>
</glyph>
Example from "B" in SourceCodePro-Regular
<key><com.adobe.type.autohint><key>
<data>
<hintSetList id="64bf4987f05ced2a50195f971cd924984047eb1d79c8c43e6a0054f59cc85
dea23a49deb20946a4ea84840534363f7a13cca31a81b1e7e33c832185173369086">
<hintset pointTag="hintSet0000">
<hstem pos="0" width="28" />
<hstem pos="338" width="28" />
<hstem pos="632" width="28" />
<vstem pos="100" width="32" />
<vstem pos="496" width="32" />
</hintset>
<hintset pointTag="hintSet0005">
<hstem pos="0" width="28" />
<hstem pos="338" width="28" />
<hstem pos="632" width="28" />
<vstem pos="100" width="32" />
<vstem pos="454" width="32" />
<vstem pos="496" width="32" />
</hintset>
<hintset pointTag="hintSet0016">
<hstem pos="0" width="28" />
<hstem pos="338" width="28" />
<hstem pos="632" width="28" />
<vstem pos="100" width="32" />
<vstem pos="496" width="32" />
</hintset>
</hintSetList>
</data>
"""
_hintFormat2_ = """
A <dict> element in the hintSetList array identifies a specific point by its
name, and describes a new set of stem hints which should be applied before the
specific point.
A <string> element in the flexList identifies a specific point by its name.
The point is the first point of a curve. The presence of the element is a
processing suggestion, that the curve and its successor curve should be
converted to a flex operator.
One challenge in applying the hintSetList and flexList elements is that in
the GLIF format, there is no explicit start and end operator: the first path
operator is both the end and the start of the path. I have chosen to convert
this to T1 by taking the first path operator, and making it a move-to. I then
also use it as the last path operator. An exception is a line-to; in T1, this
is omitted, as it is implied by the need to close the path. Hence, if a hintset
references the first operator, there is a potential ambiguity: should it be
applied before the T1 move-to, or before the final T1 path operator? The logic
here applies it before the move-to only.
<glyph>
...
<lib>
<dict>
<key><com.adobe.type.autohint></key>
<dict>
<key>id</key>
<string> <fingerprint for glyph> </string>
<key>hintSetList</key>
<array>
<dict>
<key>pointTag</key>
<string> <point name> </string>
<key>stems</key>
<array>
<string>hstem <position value> <width value></string>*
<string>vstem <position value> <width value></string>*
<string>hstem3 <position value 0>...<position value 5>
</string>*
<string>vstem3 <position value 0>...<position value 5>
</string>*
</array>
</dict>*
</array>
<key>flexList</key>*
<array>
<string><point name></string>+
</array>
</dict>
</dict>
</lib>
</glyph>
Example from "B" in SourceCodePro-Regular
<key><com.adobe.type.autohint><key>
<dict>
<key>id</key>
<string>64bf4987f05ced2a50195f971cd924984047eb1d79c8c43e6a0054f59cc85dea23
a49deb20946a4ea84840534363f7a13cca31a81b1e7e33c832185173369086</string>
<key>hintSetList</key>
<array>
<dict>
<key>pointTag</key>
<string>hintSet0000</string>
<key>stems</key>
<array>
<string>hstem 338 28</string>
<string>hstem 632 28</string>
<string>hstem 100 32</string>
<string>hstem 496 32</string>
</array>
</dict>
<dict>
<key>pointTag</key>
<string>hintSet0005</string>
<key>stems</key>
<array>
<string>hstem 0 28</string>
<string>hstem 338 28</string>
<string>hstem 632 28</string>
<string>hstem 100 32</string>
<string>hstem 454 32</string>
<string>hstem 496 32</string>
</array>
</dict>
<dict>
<key>pointTag</key>
<string>hintSet0016</string>
<key>stems</key>
<array>
<string>hstem 0 28</string>
<string>hstem 338 28</string>
<string>hstem 632 28</string>
<string>hstem 100 32</string>
<string>hstem 496 32</string>
</array>
</dict>
</array>
<dict>
"""
XML = ET.XML
XMLElement = ET.Element
xmlToString = ET.tostring
debug = 0
def debugMsg(*args):
if debug:
print(args)
# UFO names
kDefaultGlyphsLayerName = "public.default"
kDefaultGlyphsLayer = "glyphs"
kProcessedGlyphsLayerName = "com.adobe.type.processedglyphs"
kProcessedGlyphsLayer = "glyphs.%s" % kProcessedGlyphsLayerName
DEFAULT_LAYER_ENTRY = [kDefaultGlyphsLayerName, kDefaultGlyphsLayer]
PROCESSED_LAYER_ENTRY = [kProcessedGlyphsLayerName, kProcessedGlyphsLayer]
kFontInfoName = "fontinfo.plist"
kContentsName = "contents.plist"
kLibName = "lib.plist"
kPublicGlyphOrderKey = "public.glyphOrder"
kAdobeDomainPrefix = "com.adobe.type"
kAdobHashMapName = "%s.processedHashMap" % kAdobeDomainPrefix
kAdobHashMapVersionName = "hashMapVersion"
kAdobHashMapVersion = (1, 0) # If major version differs, do not use.
kAutohintName = "autohint"
kCheckOutlineName = "checkOutlines"
kCheckOutlineNameUFO = "checkOutlines"
kOutlinePattern = re.compile(r"<outline.+?outline>", re.DOTALL)
kStemHintsName = "stemhints"
kStemListName = "stemList"
kStemPosName = "pos"
kStemWidthName = "width"
kHStemName = "hstem"
kVStemName = "vstem"
kHStem3Name = "hstem3"
kVStem3Name = "vstem3"
kStem3Pos = "stem3List"
kHintSetListName = "hintSetList"
kFlexListName = "hintSetList"
kHintSetName = "hintset"
kBaseFlexName = "flexCurve"
kPointTag = "pointTag"
kStemIndexName = "stemindex"
kFlexIndexListName = "flexList"
kHintDomainName1 = "com.adobe.type.autohint"
kHintDomainName2 = "com.adobe.type.autohint.v2"
kPointName = "name"
# Hint stuff
kStackLimit = 46
kStemLimit = 96
kHashIdPlaceholder = "HASH_ID_PLACEHOLDER"
COMP_TRANSFORM = OrderedDict([
('xScale', '1'),
('xyScale', '0'),
('yxScale', '0'),
('yScale', '1'),
('xOffset', '0'),
('yOffset', '0')
])
class UFOParseError(Exception):
pass
class BezParseError(Exception):
pass
class UFOFontData(object):
def __init__(self, parentPath, useHashMap, programName):
self.parentPath = parentPath
self.glyphMap = {}
self.processedLayerGlyphMap = {}
self.newGlyphMap = {}
self.glyphList = []
self.fontInfo = None
# If False, will skip reading hashmap and
# testing to see if glyph can be skipped.
# Should be used only when calling program is
# running in report mode only, and not changing
# any glyph data.
self.useHashMap = useHashMap
# Used to skip getting glyph data when glyph
# hash matches hash of current glyph data.
self.hashMap = {}
self.fontDict = None
self.programName = programName
self.curSrcDir = None
self.hashMapChanged = False
self.glyphDefaultDir = os.path.join(self.parentPath, "glyphs")
self.glyphLayerDir = os.path.join(self.parentPath,
kProcessedGlyphsLayer)
self.glyphWriteDir = self.glyphLayerDir
self.historyList = []
self.requiredHistory = [] # See documentation above.
# If False, then read data only from the default layer;
# else read glyphs from processed layer, if it exists.
self.useProcessedLayer = False
# If True, then write data to the default layer
self.writeToDefaultLayer = False
# If True, then do not skip any glyphs.
self.doAll = False
# track whether checkSkipGLyph has deleted an
# out-of-date glyph from the processed glyph layer
self.deletedGlyph = False
# If true, do NOT round x,y values when processing
self.allowDecimalCoords = False
self.glyphSet = UFOReader(self.parentPath,
validate=False).getGlyphSet(None)
def getUnitsPerEm(self):
unitsPerEm = 1000
if self.fontInfo is None:
self.loadFontInfo()
if self.fontInfo:
unitsPerEm = int(self.fontInfo["unitsPerEm"])
return unitsPerEm
def getPSName(self):
psName = "PSName-Undefined"
if self.fontInfo is None:
self.loadFontInfo()
if self.fontInfo:
psName = self.fontInfo.get("postscriptFontName", psName)
return psName
@staticmethod
def isCID():
return 0
def checkForHints(self, glyphName):
hasHints = 0
glyphPath = self.getGlyphProcessedPath(glyphName)
if glyphPath and os.path.exists(glyphPath):
with open(glyphPath, "r", encoding='utf-8') as fp:
data = fp.read()
if "hintSetList" in data:
hasHints = 1
return hasHints
def convertToBez(self, glyphName, removeHints, beVerbose, doAll=0):
# XXX unused args: removeHints, beVerbose
# convertGLIFToBez does not yet support
# hints - no need for removeHints arg.
bezString, width = convertGLIFToBez(self, glyphName, doAll)
hasHints = self.checkForHints(glyphName)
return bezString, width, hasHints
def updateFromBez(self, bezData, glyphName, width, beVerbose):
# XXX unused args: width, beVerbose
# For UFO font, we don't use the width parameter:
# it is carried over from the input glif file.
glifXML = convertBezToGLIF(self, glyphName, bezData)
self.newGlyphMap[glyphName] = glifXML
def saveChanges(self):
if not os.path.exists(self.glyphWriteDir):
os.makedirs(self.glyphWriteDir)
layerContentsFilePath = os.path.join(
self.parentPath, "layercontents.plist")
self.updateLayerContents(layerContentsFilePath)
glyphContentsFilePath = os.path.join(
self.glyphWriteDir, "contents.plist")
self.updateLayerGlyphContents(glyphContentsFilePath, self.newGlyphMap)
for glyphName, glifXML in self.newGlyphMap.items():
glyphPath = self.getWriteGlyphPath(glyphName)
with open(glyphPath, "wb") as fp:
et = ET.ElementTree(glifXML)
# check for and remove explicit 0 advance 'height' or 'width'
# or entire <advance> element if both are 0/not present.
advance = et.find("advance")
if advance is not None:
ht = float(advance.get('height', '-1'))
wx = float(advance.get('width', '-1'))
if ht == 0:
del advance.attrib['height']
ht = -1
if wx == 0:
del advance.attrib['width']
wx = -1
if ht == wx == -1:
# empty element; delete.
# Note, et.remove(advance) doesn't work; this does:
advance.getparent().remove(advance)
et.write(fp, encoding="UTF-8", xml_declaration=True)
# Recalculate glyph hashes
if self.writeToDefaultLayer:
glyph = Glyph(glyphName, self.glyphSet)
glyph.width = _get_glyph_width(glyph)
self.recalcHashEntry(glyphName, glyph)
if self.hashMapChanged:
self.writeHashMap()
def getWriteGlyphPath(self, glyphName):
if len(self.glyphMap) == 0:
self.loadGlyphMap()
glyphFileName = self.glyphMap[glyphName]
if not self.writeToDefaultLayer and (
glyphName in self.processedLayerGlyphMap):
glyphFileName = self.processedLayerGlyphMap[glyphName]
return os.path.join(self.glyphWriteDir, glyphFileName)
def getGlyphMap(self):
if len(self.glyphMap) == 0:
self.loadGlyphMap()
return self.glyphMap
def readHashMap(self):
hashPath = os.path.join(self.parentPath, "data", kAdobHashMapName)
if os.path.exists(hashPath):
with open(hashPath, "r", encoding='utf-8') as fp:
data = fp.read()
newMap = ast.literal_eval(data)
else:
newMap = {kAdobHashMapVersionName: kAdobHashMapVersion}
try:
version = newMap[kAdobHashMapVersionName]
if version[0] > kAdobHashMapVersion[0]:
raise UFOParseError("Hash map version is newer than program. "
"Please update the FDK")
elif version[0] < kAdobHashMapVersion[0]:
print("Updating hash map: was older version")
newMap = {kAdobHashMapVersionName: kAdobHashMapVersion}
except KeyError:
print("Updating hash map: was older version")
newMap = {kAdobHashMapVersionName: kAdobHashMapVersion}
self.hashMap = newMap
def writeHashMap(self):
hashMap = self.hashMap
if len(hashMap) == 0:
return # no glyphs were processed.
hashDir = os.path.join(self.parentPath, "data")
if not os.path.exists(hashDir):
os.makedirs(hashDir)
hashPath = os.path.join(hashDir, kAdobHashMapName)
hasMapKeys = sorted(hashMap.keys())
data = ["{"]
for gName in hasMapKeys:
data.append("'%s': %s," % (gName, hashMap[gName]))
data.append("}")
data.append("")
data = '\n'.join(data)
with open(hashPath, "w") as fp:
fp.write(data)
def getCurGlyphPath(self, glyphName):
if self.curSrcDir is None:
self.curSrcDir = self.glyphDefaultDir
# Get the glyph file name.
if len(self.glyphMap) == 0:
self.loadGlyphMap()
glyphFileName = self.glyphMap[glyphName]
path = os.path.join(self.curSrcDir, glyphFileName)
return path
def getGlyphSrcPath(self, glyphName):
if len(self.glyphMap) == 0:
self.loadGlyphMap()
glyphFileName = self.glyphMap[glyphName]
# Try for processed layer first
if self.useProcessedLayer and self.processedLayerGlyphMap:
try:
glyphFileName = self.processedLayerGlyphMap[glyphName]
self.curSrcDir = self.glyphLayerDir
glyphPath = os.path.join(self.glyphLayerDir, glyphFileName)
if os.path.exists(glyphPath):
return glyphPath
except KeyError:
pass
self.curSrcDir = self.glyphDefaultDir
glyphPath = os.path.join(self.curSrcDir, glyphFileName)
return glyphPath
def getGlyphDefaultPath(self, glyphName):
if len(self.glyphMap) == 0:
self.loadGlyphMap()
glyphFileName = self.glyphMap[glyphName]
glyphPath = os.path.join(self.glyphDefaultDir, glyphFileName)
return glyphPath
def getGlyphProcessedPath(self, glyphName):
if len(self.glyphMap) == 0:
self.loadGlyphMap()
if not self.processedLayerGlyphMap:
return None
try:
glyphFileName = self.processedLayerGlyphMap[glyphName]
glyphPath = os.path.join(self.glyphLayerDir, glyphFileName)
except KeyError:
glyphPath = None
return glyphPath
def updateHashEntry(self, glyphName, changed):
"""
Updates the dict to be saved as 'com.adobe.type.processedHashMap'.
It does NOT recalculate the hash.
"""
# srcHarsh has already been set: we are fixing the history list.
if not self.useHashMap:
return
# Get hash entry for glyph
srcHash, historyList = self.hashMap[glyphName]
self.hashMapChanged = True
# If the program always reads data from the default layer,
# and we have just created a new glyph in the processed layer,
# then reset the history.
if (not self.useProcessedLayer) and changed:
self.hashMap[glyphName] = [srcHash, [self.programName]]
# If the program is not in the history list, add it.
elif self.programName not in historyList:
historyList.append(self.programName)
def recalcHashEntry(self, glyphName, glyph):
hashBefore, historyList = self.hashMap[glyphName]
hash_pen = HashPointPen(glyph)
glyph.drawPoints(hash_pen)
hashAfter = hash_pen.getHash()
if hashAfter != hashBefore:
self.hashMap[glyphName] = [hashAfter, historyList]
self.hashMapChanged = True
def checkSkipGlyph(self, glyphName, newSrcHash, doAll):
skip = False
if not self.useHashMap:
return skip
if len(self.hashMap) == 0:
# Hash maps have not yet been read in. Get them.
self.readHashMap()
hashEntry = srcHash = None
historyList = []
programHistoryIndex = -1 # not found in historyList
# Get hash entry for glyph
try:
hashEntry = self.hashMap[glyphName]
srcHash, historyList = hashEntry
try:
programHistoryIndex = historyList.index(self.programName)
except ValueError:
pass
except KeyError:
# Glyph is as yet untouched by any program.
pass
if (srcHash == newSrcHash):
if (programHistoryIndex >= 0):
# The glyph has already been processed by this program,
# and there have been no changes since.
skip = True and (not doAll)
if not skip:
# case for Checkoutlines
if not self.useProcessedLayer:
self.hashMapChanged = True
self.hashMap[glyphName] = [newSrcHash, [self.programName]]
glyphPath = self.getGlyphProcessedPath(glyphName)
if glyphPath and os.path.exists(glyphPath):
os.remove(glyphPath)
else:
if (programHistoryIndex < 0):
historyList.append(self.programName)
else:
# case for autohint
if self.useProcessedLayer:
# Default layer glyph and stored glyph hash differ, and
# useProcessedLayer is True. If any of the programs in
# requiredHistory in are in the historyList, we need to
# complain and skip.
foundMatch = False
if len(historyList) > 0:
for programName in self.requiredHistory:
if programName in historyList:
foundMatch = True
if foundMatch:
print("Error. Glyph '%s' has been edited. You must first "
"run '%s' before running '%s'. Skipping." %
(glyphName, self.requiredHistory, self.programName))
skip = True
# If the source hash has changed, we need to
# delete the processed layer glyph.
self.hashMapChanged = True
self.hashMap[glyphName] = [newSrcHash, [self.programName]]
glyphPath = self.getGlyphProcessedPath(glyphName)
if glyphPath and os.path.exists(glyphPath):
os.remove(glyphPath)
self.deletedGlyph = True
return skip
@staticmethod
def getGlyphXML(glyphDir, glyphFileName):
glyphPath = os.path.join(glyphDir, glyphFileName) # default
etRoot = ET.ElementTree()
glifXML = etRoot.parse(glyphPath)
outlineXML = glifXML.find("outline")
try:
widthXML = glifXML.find("advance")
if widthXML is not None:
width = round(ast.literal_eval(widthXML.get("width", '0')), 9)
else:
width = 0
except UFOParseError as e:
print("Error. skipping glyph '%s' because of parse error: %s" %
(glyphFileName, e.message))
return None, None, None
return width, glifXML, outlineXML
def getOrSkipGlyph(self, glyphName, doAll=0):
# Get default glyph layer data, so we can check if the glyph
# has been edited since this program was last run.
# If the program name is in the history list, and the srcHash
# matches the default glyph layer data, we can skip.
if len(self.glyphMap) == 0:
self.loadGlyphMap()
glyphFileName = self.glyphMap.get(glyphName)
if not glyphFileName:
return None, True # skip
width, glifXML, outlineXML = self.getGlyphXML(self.glyphDefaultDir,
glyphFileName)
if glifXML is None:
return None, True # skip
# Hash is always from the default glyph layer.
useDefaultGlyphDir = True
newHash, _ = self.buildGlyphHashValue(
width, outlineXML, glyphName, useDefaultGlyphDir)
skip = self.checkSkipGlyph(glyphName, newHash, doAll)
# If self.useProcessedLayer and there is a glyph in the
# processed layer, get the outline from that.
if self.useProcessedLayer and self.processedLayerGlyphMap:
try:
glyphFileName = self.processedLayerGlyphMap[glyphName]
except KeyError:
pass
glyphPath = os.path.join(self.glyphLayerDir, glyphFileName)
if os.path.exists(glyphPath):
width, glifXML, _ = self.getGlyphXML(
self.glyphLayerDir, glyphFileName)
if glifXML is None:
return None, True # skip
return width, skip
def getGlyphList(self):
if len(self.glyphMap) == 0:
self.loadGlyphMap()
return self.glyphList
def loadGlyphMap(self):
# Need to both get the list of glyphs from contents.plist, and also
# the glyph order. The latter is take from the public.glyphOrder key
# in lib.plist, if it exists, else it is taken from the contents.plist
# file. Any glyphs in contents.plist which are not named in the
# public.glyphOrder are sorted after all glyphs which are named in the
# public.glyphOrder,, in the order that they occured in contents.plist.
contentsPath = os.path.join(self.parentPath, "glyphs", kContentsName)
self.glyphMap, self.glyphList = parsePList(contentsPath)
orderPath = os.path.join(self.parentPath, kLibName)
self.orderMap = parseGlyphOrder(orderPath)
if self.orderMap is not None:
orderIndex = len(self.orderMap)
orderList = []
# If there are glyphs in the font which are not named in the
# public.glyphOrder entry, add then in the order of the
# contents.plist file.
for glyphName in self.glyphList:
try:
entry = [self.orderMap[glyphName], glyphName]
except KeyError:
entry = [orderIndex, glyphName]
self.orderMap[glyphName] = orderIndex
orderIndex += 1
orderList.append(entry)
orderList.sort()
self.glyphList = []
for entry in orderList:
self.glyphList.append(entry[1])
else:
self.orderMap = {}
numGlyphs = len(self.glyphList)
for i in range(numGlyphs):
self.orderMap[self.glyphList[i]] = i
# We also need to get the glyph map for the processed layer,
# and use this when the glyph is read from the processed layer.
# Because checkoutlinesufo used the defcon library, it can write
# glyph file names that differ from what is in the default glyph layer.
contentsPath = os.path.join(self.glyphLayerDir, kContentsName)
if os.path.exists(contentsPath):
self.processedLayerGlyphMap, self.processedLayerGlyphList = \
parsePList(contentsPath)
def loadFontInfo(self):
fontInfoPath = os.path.join(self.parentPath, "fontinfo.plist")
if not os.path.exists(fontInfoPath):
return
self.fontInfo, _ = parsePList(fontInfoPath)
def updateLayerContents(self, contentsFilePath):
# UFO v2
if not os.path.exists(contentsFilePath):
contentsList = [DEFAULT_LAYER_ENTRY]
if not self.writeToDefaultLayer:
contentsList.append(PROCESSED_LAYER_ENTRY)
# UFO v3
else:
with open(contentsFilePath, 'r', encoding='utf-8') as fp:
contentsList = plistlib.load(fp)
if self.writeToDefaultLayer and (
PROCESSED_LAYER_ENTRY in contentsList):
contentsList.remove(PROCESSED_LAYER_ENTRY)
elif PROCESSED_LAYER_ENTRY not in contentsList and (
not self.writeToDefaultLayer):
contentsList.append(PROCESSED_LAYER_ENTRY)
with open(contentsFilePath, 'wb') as fp:
plistlib.dump(contentsList, fp)
def updateLayerGlyphContents(self, contentsFilePath, newGlyphData):
if os.path.exists(contentsFilePath):
with open(contentsFilePath, 'r', encoding='utf-8') as fp:
contentsDict = plistlib.load(fp)
else:
contentsDict = {}
for glyphName in newGlyphData.keys():
# Try for processed layer first
if self.useProcessedLayer and self.processedLayerGlyphMap:
try:
contentsDict[glyphName] = \
self.processedLayerGlyphMap[glyphName]
except KeyError:
contentsDict[glyphName] = self.glyphMap[glyphName]
else:
contentsDict[glyphName] = self.glyphMap[glyphName]
with open(contentsFilePath, 'wb') as fp:
plistlib.dump(contentsDict, fp)
def getFontInfo(self, fontPSName, inputPath, allow_no_blues, noFlex,
vCounterGlyphs, hCounterGlyphs, fdIndex=0):
if self.fontDict is not None:
return self.fontDict
if self.fontInfo is None:
self.loadFontInfo()
fdDict = convertfonttocid.FDDict()
# should be 1 if the glyphs are ideographic, else 0.
fdDict.LanguageGroup = self.fontInfo.get("languagegroup", "0")
fdDict.OrigEmSqUnits = self.getUnitsPerEm()
fdDict.FontName = self.getPSName()
upm = self.getUnitsPerEm()
low = min(-upm * 0.25,
float(self.fontInfo.get("openTypeOS2WinDescent", "0")) - 200)
high = max(upm * 1.25,
float(self.fontInfo.get("openTypeOS2WinAscent", "0")) + 200)
# Make a set of inactive alignment zones: zones outside
# of the font bbox so as not to affect hinting.
# Used when src font has no BlueValues or has invalid BlueValues.
# Some fonts have bad BBox values, so we don't let this be smaller
# than -upm*0.25, upm*1.25.
inactiveAlignmentValues = [low, low, high, high]
blueValues = sorted(self.fontInfo.get("postscriptBlueValues", []))
numBlueValues = len(blueValues)
if numBlueValues < 4:
if allow_no_blues:
blueValues = inactiveAlignmentValues
numBlueValues = len(blueValues)
else:
raise UFOParseError(
"ERROR: Font must have at least four values in its "
"BlueValues array for AC to work!")
# The first pair only is a bottom zone, where the first value
# is the overshoot position; the rest are top zones, and second
# value of the pair is the overshoot position.
blueValues[0] = blueValues[0] - blueValues[1]
for i in range(3, numBlueValues, 2):
blueValues[i] = blueValues[i] - blueValues[i - 1]
blueValues = [str(val) for val in blueValues]
numBlueValues = min(
numBlueValues, len(convertfonttocid.kBlueValueKeys))
for i in range(numBlueValues):
key = convertfonttocid.kBlueValueKeys[i]
value = blueValues[i]
setattr(fdDict, key, value)
otherBlues = self.fontInfo.get("postscriptOtherBlues", [])
if len(otherBlues) > 0:
i = 0
numBlueValues = len(otherBlues)
otherBlues.sort()
for i in range(0, numBlueValues, 2):
otherBlues[i] = otherBlues[i] - otherBlues[i + 1]
otherBlues = [str(val) for val in otherBlues]
numBlueValues = min(
numBlueValues, len(convertfonttocid.kOtherBlueValueKeys))
for i in range(numBlueValues):
key = convertfonttocid.kOtherBlueValueKeys[i]
value = otherBlues[i]
setattr(fdDict, key, value)
vstems = self.fontInfo.get("postscriptStemSnapV", [])
if len(vstems) == 0:
if allow_no_blues:
# dummy value. Needs to be larger than any hint will
# likely be, as the autohint program strips out any
# hint wider than twice the largest global stem width.
vstems = [fdDict.OrigEmSqUnits]
else:
raise UFOParseError(
"ERROR: Font does not have postscriptStemSnapV!")
vstems.sort()
if (len(vstems) == 0) or ((len(vstems) == 1) and (vstems[0] < 1)):
# dummy value that will allow PyAC to run
vstems = [fdDict.OrigEmSqUnits]
print("WARNING: There is no value or 0 value for DominantV.")
vstems = repr(vstems)
fdDict.DominantV = vstems
hstems = self.fontInfo.get("postscriptStemSnapH", [])
if len(hstems) == 0:
if allow_no_blues:
# dummy value. Needs to be larger than any hint will
# likely be, as the autohint program strips out any
# hint wider than twice the largest global stem width.
hstems = [fdDict.OrigEmSqUnits]
else:
raise UFOParseError(
"ERROR: Font does not have postscriptStemSnapH!")
hstems.sort()
if (len(hstems) == 0) or ((len(hstems) == 1) and (hstems[0] < 1)):
# dummy value that will allow PyAC to run
hstems = [fdDict.OrigEmSqUnits]
print("WARNING: There is no value or 0 value for DominantH.")
hstems = repr(hstems)
fdDict.DominantH = hstems
if noFlex:
fdDict.FlexOK = "false"
else:
fdDict.FlexOK = "true"
# Add candidate lists for counter hints, if any.
if vCounterGlyphs:
temp = " ".join(vCounterGlyphs)
fdDict.VCounterChars = "( %s )" % (temp)
if hCounterGlyphs:
temp = " ".join(hCounterGlyphs)
fdDict.HCounterChars = "( %s )" % (temp)
fdDict.BlueFuzz = self.fontInfo.get("postscriptBlueFuzz", 1)
# postscriptBlueShift
# postscriptBlueScale
self.fontDict = fdDict
return fdDict
def getfdInfo(self, psName, inputPath, allow_no_blues, noFlex,
vCounterGlyphs, hCounterGlyphs, glyphList, fdIndex=0):
fontDictList = []
fdGlyphDict = None
fdDict = self.getFontInfo(
psName, inputPath, allow_no_blues, noFlex, vCounterGlyphs,
hCounterGlyphs, fdIndex)
fontDictList.append(fdDict)
# Check the fontinfo file, and add any other font dicts
srcFontInfo = os.path.dirname(inputPath)
srcFontInfo = os.path.join(srcFontInfo, "fontinfo")
maxX = self.getUnitsPerEm() * 2
maxY = maxX
minY = -self.getUnitsPerEm()
if os.path.exists(srcFontInfo):
with open(srcFontInfo, "r", encoding='utf-8') as fi:
fontInfoData = fi.read()
fontInfoData = re.sub(r"#[^\r\n]+", "", fontInfoData)
if "FDDict" in fontInfoData:
blueFuzz = convertfonttocid.getBlueFuzz(inputPath)
fdGlyphDict, fontDictList, finalFDict = \
convertfonttocid.parseFontInfoFile(
fontDictList, fontInfoData, glyphList,
maxY, minY, psName, blueFuzz)
if finalFDict is None:
# If a font dict was not explicitly specified for the
# output font, use the first user-specified font dict.
convertfonttocid.mergeFDDicts(
fontDictList[1:], self.fontDict)
else:
convertfonttocid.mergeFDDicts(
[finalFDict], self.fontDict)
return fdGlyphDict, fontDictList
def getGlyphID(self, glyphName):
try:
gid = self.orderMap[glyphName]
except IndexError:
raise UFOParseError(
"Could not find glyph name '%s' in UFO font contents plist. "
"'%s'. " % (glyphName, self.parentPath))
return gid
@staticmethod
def _rd_val(str_val):
"""Round and normalize a (string) GLIF value"""
return repr(norm_float(round(ast.literal_eval(str_val), 9)))
def buildGlyphHashValue(self, width, outlineXML, glyphName,
useDefaultGlyphDir, level=0):
"""
glyphData must be the official <outline> XML from a GLIF.
We skip contours with only one point.
"""
dataList = ["w%s" % norm_float(round(width, 9))]
if level > 10:
raise UFOParseError(
"In parsing component, exceeded 10 levels of reference. "
"'%s'. " % (glyphName))
# <outline> tag is optional per spec., e.g. space glyph
# does not necessarily have it.
if outlineXML is not None:
for childContour in outlineXML:
if childContour.tag == "contour":
if len(childContour) < 2:
continue
for child in childContour:
if child.tag == "point":
ptType = child.get("type")
pointType = '' if ptType is None else ptType[0]
x = self._rd_val(child.get("x"))
y = self._rd_val(child.get("y"))
dataList.append("%s%s%s" % (pointType, x, y))
elif childContour.tag == "component":
# append the component hash.
compGlyphName = childContour.get("base")
if compGlyphName is None:
raise UFOParseError(
"'%s' is missing the 'base' attribute in a "
"component." % glyphName)
dataList.append("%s%s" % ("base:", compGlyphName))
if useDefaultGlyphDir:
try:
componentPath = self.getGlyphDefaultPath(
compGlyphName)
except KeyError:
raise UFOParseError(
"'%s' component glyph is missing from "
"contents.plist." % (compGlyphName))
else:
# If we are not necessarily using the default layer
# for the main glyph, then a missing component may not
# have been processed, and may just be in the default
# layer. We need to look for component glyphs in the
# src list first, then in the defualt layer.
try:
componentPath = self.getGlyphSrcPath(compGlyphName)
if not os.path.exists(componentPath):
componentPath = self.getGlyphDefaultPath(
compGlyphName)
except KeyError:
try:
componentPath = self.getGlyphDefaultPath(
compGlyphName)
except KeyError:
raise UFOParseError(
"'%s' component glyph is missing from "
"contents.plist." % (compGlyphName))
if not os.path.exists(componentPath):
raise UFOParseError(
"'%s' component file is missing: '%s'." %
(compGlyphName, componentPath))
etRoot = ET.ElementTree()
# Collect transform values
for trans_key, flbk_val in COMP_TRANSFORM.items():
value = childContour.get(trans_key, flbk_val)
dataList.append(self._rd_val(value))
componentXML = etRoot.parse(componentPath)
componentOutlineXML = componentXML.find("outline")
_, componentDataList = self.buildGlyphHashValue(
width, componentOutlineXML, glyphName,
useDefaultGlyphDir, level + 1)
dataList.extend(componentDataList)
data = "".join(dataList)
if len(data) >= 128:
data = hashlib.sha512(data.encode("ascii")).hexdigest()
return data, dataList
def getComponentOutline(self, componentItem):
compGlyphName = componentItem.get("base")
if compGlyphName is None:
raise UFOParseError(
"'%s' attribute missing from component '%s'." %
("base", xmlToString(componentItem)))
if not self.useProcessedLayer:
try:
compGlyphFilePath = self.getGlyphDefaultPath(compGlyphName)
except KeyError:
raise UFOParseError(
"'%s' component glyph is missing from "
"contents.plist." % compGlyphName)
else:
# If we are not necessarily using the default layer for the main
# glyph, then a missing component may not have been processed, and
# may just be in the default layer. We need to look for component
# glyphs in the src list first, then in the defualt layer.
try:
compGlyphFilePath = self.getGlyphSrcPath(compGlyphName)
if not os.path.exists(compGlyphFilePath):
compGlyphFilePath = self.getGlyphDefaultPath(compGlyphName)
except KeyError:
try:
compGlyphFilePath = self.getGlyphDefaultPath(compGlyphName)
except KeyError:
raise UFOParseError(
"'%s' component glyph is missing from "
"contents.plist." % compGlyphName)
if not os.path.exists(compGlyphFilePath):
raise UFOParseError(
"'%s' component file is missing: '%s'." %
(compGlyphName, compGlyphFilePath))
etRoot = ET.ElementTree()
glifXML = etRoot.parse(compGlyphFilePath)
outlineXML = glifXML.find("outline")
return outlineXML
def close(self):
if self.hashMapChanged:
self.writeHashMap()
self.hashMapChanged = False
def clearHashMap(self):
self.hashMap = {kAdobHashMapVersionName: kAdobHashMapVersion}
hashDir = os.path.join(self.parentPath, "data")
if not os.path.exists(hashDir):
return
hashPath = os.path.join(hashDir, kAdobHashMapName)
if os.path.exists(hashPath):
os.remove(hashPath)
def setWriteToDefault(self):
self.useProcessedLayer = False
self.writeToDefaultLayer = True
self.glyphWriteDir = self.glyphDefaultDir
def parseGlyphOrder(filePath):
orderMap = None
if os.path.exists(filePath):
publicOrderDict, _ = parsePList(filePath, kPublicGlyphOrderKey)
if publicOrderDict is not None:
orderMap = {}
glyphList = publicOrderDict[kPublicGlyphOrderKey]
numGlyphs = len(glyphList)
for i in range(numGlyphs):
orderMap[glyphList[i]] = i
return orderMap
def parsePList(filePath, dictKey=None):
# If dictKey is defined, parse and return only the data for that key.
#
# Updates July 2019:
# - use fontTools.misc.plistlib instead of ET to parse
# - use built-in OrderedDict as the dict_type to preserve ordering
# - use simpler filtering for non-None dictKey
plistDict = {}
plistKeys = []
with open(filePath, 'r', encoding='utf-8') as fp:
plistDict = plistlib.load(fp, dict_type=OrderedDict)
if dictKey is not None:
if dictKey in plistDict:
plistDict = {dictKey: plistDict[dictKey]}
else:
plistDict = None
if plistDict is not None:
plistKeys = list(plistDict.keys())
return plistDict, plistKeys
def convertGLIFToBez(ufoFontData, glyphName, doAll=0):
width, skip = ufoFontData.getOrSkipGlyph(glyphName, doAll)
if skip:
return None, width
glyph = ufoFontData.glyphSet[glyphName]
round_coords = not ufoFontData.allowDecimalCoords
bez = psahUFOFontData.get_glyph_bez(glyph, round_coords)
bezString = "\n".join(bez)
bezString = "\n".join(["% " + glyphName, "sc", bezString, "ed", ""])
return bezString, width
class HintMask:
# class used to collect hints for the current
# hint mask when converting bez to T2.
def __init__(self, listPos):
# The index into the pointList is kept
# so we can quickly find them later.
self.listPos = listPos
# These contain the actual hint values.
self.hList = []
self.vList = []
self.hstem3List = []
self.vstem3List = []
# The name attribute of the point which follows the new hint set.
self.pointName = "hintSet" + str(listPos).zfill(4)
def addHintSet(self, hintSetList):
# Add the hint set to hintSetList
newHintSetDict = XMLElement("dict")
hintSetList.append(newHintSetDict)
newHintSetKey = XMLElement("key")
newHintSetKey.text = kPointTag
newHintSetDict.append(newHintSetKey)
newHintSetValue = XMLElement("string")
newHintSetValue.text = self.pointName
newHintSetDict.append(newHintSetValue)
stemKey = XMLElement("key")
stemKey.text = "stems"
newHintSetDict.append(stemKey)
newHintSetArray = XMLElement("array")
newHintSetDict.append(newHintSetArray)
if (len(self.hList) > 0) or (len(self.vstem3List)):
isH = 1
addHintList(self.hList, self.hstem3List, newHintSetArray, isH)
if (len(self.vList) > 0) or (len(self.vstem3List)):
isH = 0
addHintList(self.vList, self.vstem3List, newHintSetArray, isH)
def makeStemHintList(hintsStem3, stemList, isH):
# In bez terms, the first coordinate in each pair is absolute,
# second is relative, and hence is the width.
if isH:
op = kHStem3Name
else:
op = kVStem3Name
newStem = XMLElement("string")
posList = [op]
for stem3 in hintsStem3:
for pos, width in stem3:
if isinstance(pos, float) and (int(pos) == pos):
pos = int(pos)
if isinstance(width, float) and (int(width) == width):
width = int(width)
posList.append("%s %s" % (pos, width))
posString = " ".join(posList)
newStem.text = posString
stemList.append(newStem)
def makeHintList(hints, newHintSetArray, isH):
# Add the list of hint operators
# In bez terms, the first coordinate in each pair is absolute,
# second is relative, and hence is the width.
for hint in hints:
if not hint:
continue
pos = hint[0]
if isinstance(pos, float) and (int(pos) == pos):
pos = int(pos)
width = hint[1]
if isinstance(width, float) and (int(width) == width):
width = int(width)
if isH:
op = kHStemName
else:
op = kVStemName
newStem = XMLElement("string")
newStem.text = "%s %s %s" % (op, pos, width)
newHintSetArray.append(newStem)
def addFlexHint(flexList, flexArray):
for pointTag in flexList:
newFlexTag = XMLElement("string")
newFlexTag.text = pointTag
flexArray.append(newFlexTag)
def fixStartPoint(outlineItem, opList):
# For the GLIF format, the idea of first/last point is funky, because
# the format avoids identifying a start point. This means there is no
# implied close-path line-to. If the last implied or explicit path-close
# operator is a line-to, then replace the "mt" with linto, and remove the
# last explicit path-closing line-to, if any. If the last op is a curve,
# then leave the first two point args on the stack at the end of the point
# list, and move the last curveto to the first op, replacing the move-to.
_, firstX, firstY = opList[0]
lastOp, lastX, lastY = opList[-1]
firstPointElement = outlineItem[0]
if (firstX == lastX) and (firstY == lastY):
del outlineItem[-1]
firstPointElement.set("type", lastOp)
else:
# we have an implied final line to. All we need to do is convert
# the inital moveto to a lineto.
firstPointElement.set("type", "line")
bezToUFOPoint = {
"mt": 'move',
"rmt": 'move',
"hmt": 'move',
"vmt": 'move',
"rdt": 'line',
"dt": 'line',
"hdt": "line",
"vdt": "line",
"rct": 'curve',
"ct": 'curve',
"rcv": 'curve', # Morisawa's alternate name for 'rct'.
"vhct": 'curve',
"hvct": 'curve',
}
def convertCoords(curX, curY):
showX = int(curX)
if showX != curX:
showX = curX
showY = int(curY)
if showY != curY:
showY = curY
return showX, showY
def convertBezToOutline(ufoFontData, glyphName, bezString):
""" Since the UFO outline element has no attributes to preserve, I can
just make a new one.
"""
# convert bez data to a UFO glif XML representation
#
# Convert all bez ops to simplest UFO equivalent. Add all hints to vertical
# and horizontal hint lists as encountered; insert a HintMask class
# whenever a new set of hints is encountered after all operators have been
# processed, convert HintMask items into hintmask ops and hintmask bytes
# add all hints as prefix review operator list to optimize T2 operators.
# if useStem3 == 1, then any counter hints must be processed as stem3
# hints, else the opposite. Counter hints are used only in LanguageGroup 1
# glyphs, aka ideographs
bezString = re.sub(r"%.+?\n", "", bezString) # supress comments
bezList = re.findall(r"(\S+)", bezString)
if not bezList:
return "", None
flexList = []
# Create an initial hint mask. We use this if
# there is no explicit initial hint sub.
hintMask = HintMask(0)
hintMaskList = [hintMask]
vStem3Args = []
hStem3Args = []
argList = []
opList = []
newHintMaskName = None
inPreFlex = False
hintInfoDict = None
opIndex = 0
curX = 0
curY = 0
newOutline = XMLElement("outline")
outlineItem = None
seenHints = 0
for token in bezList:
try:
val = float(token)
argList.append(val)
continue
except ValueError:
pass
if token == "newcolors":
pass
elif token in ["beginsubr", "endsubr"]:
pass
elif token in ["snc"]:
hintMask = HintMask(opIndex)
# If the new colors precedes any marking operator,
# then we want throw away the initial hint mask we
# made, and use the new one as the first hint mask.
if opIndex == 0:
hintMaskList = [hintMask]
else:
hintMaskList.append(hintMask)
newHintMaskName = hintMask.pointName
elif token in ["enc"]:
pass
elif token == "div":
value = argList[-2] / float(argList[-1])
argList[-2:] = [value]
elif token == "rb":
if newHintMaskName is None:
newHintMaskName = hintMask.pointName
hintMask.hList.append(argList)
argList = []
seenHints = 1
elif token == "ry":
if newHintMaskName is None:
newHintMaskName = hintMask.pointName
hintMask.vList.append(argList)
argList = []
seenHints = 1
elif token == "rm": # vstem3's are vhints
if newHintMaskName is None:
newHintMaskName = hintMask.pointName
seenHints = 1
vStem3Args.append(argList)
argList = []
if len(vStem3Args) == 3:
hintMask.vstem3List.append(vStem3Args)
vStem3Args = []
elif token == "rv": # hstem3's are hhints
seenHints = 1
hStem3Args.append(argList)
argList = []
if len(hStem3Args) == 3:
hintMask.hstem3List.append(hStem3Args)
hStem3Args = []
elif token == "preflx1":
# the preflx1/preflx2 sequence provides the same i as the flex
# sequence; the difference is that the preflx1/preflx2 sequence
# provides the argument values needed for building a Type1 string
# while the flex sequence is simply the 6 rcurveto points. Both
# sequences are always provided.
argList = []
# need to skip all move-tos until we see the "flex" operator.
inPreFlex = True
elif token == "preflx2a":
argList = []
elif token == "preflx2":
argList = []
elif token == "flxa": # flex with absolute coords.
inPreFlex = False
flexPointName = kBaseFlexName + str(opIndex).zfill(4)
flexList.append(flexPointName)
curveCnt = 2
i = 0
# The first 12 args are the 6 args for each of
# the two curves that make up the flex feature.
while i < curveCnt:
curX = argList[0]
curY = argList[1]
showX, showY = convertCoords(curX, curY)
newPoint = XMLElement(
"point", {"x": "%s" % showX, "y": "%s" % showY})
outlineItem.append(newPoint)
curX = argList[2]
curY = argList[3]
showX, showY = convertCoords(curX, curY)
newPoint = XMLElement(
"point", {"x": "%s" % showX, "y": "%s" % showY})
outlineItem.append(newPoint)
curX = argList[4]
curY = argList[5]
showX, showY = convertCoords(curX, curY)
opName = 'curve'
newPoint = XMLElement(
"point", {
"x": f"{showX}", "y": f"{showY}", "type": opName})
outlineItem.append(newPoint)
opList.append([opName, curX, curY])
opIndex += 1
if i == 0:
argList = argList[6:12]
i += 1
# attach the point name to the first point of the first curve.
outlineItem[-6].set(kPointName, flexPointName)
if newHintMaskName is not None:
# We have a hint mask that we want to attach to the first
# point of the flex op. However, there is already a flex
# name in that attribute. What we do is set the flex point
# name into the hint mask.
hintMask.pointName = flexPointName
newHintMaskName = None
argList = []
elif token == "flx":
inPreFlex = False
flexPointName = kBaseFlexName + str(opIndex).zfill(4)
flexList.append(flexPointName)
curveCnt = 2
i = 0
# The first 12 args are the 6 args for each of the two curves
# that make up the flex feature.
while i < curveCnt:
curX += argList[0]
curY += argList[1]
showX, showY = convertCoords(curX, curY)
newPoint = XMLElement(
"point", {"x": "%s" % showX, "y": "%s" % showY})
outlineItem.append(newPoint)
curX += argList[2]
curY += argList[3]
showX, showY = convertCoords(curX, curY)
newPoint = XMLElement(
"point", {"x": "%s" % showX, "y": "%s" % showY})
outlineItem.append(newPoint)
curX += argList[4]
curY += argList[5]
showX, showY = convertCoords(curX, curY)
opName = 'curve'
newPoint = XMLElement(
"point", {
"x": f"{showX}", "y": f"{showY}", "type": opName})
outlineItem.append(newPoint)
opList.append([opName, curX, curY])
opIndex += 1
if i == 0:
argList = argList[6:12]
i += 1
# attach the point name to the first point of the first curve.
outlineItem[-6].set(kPointName, flexPointName)
if newHintMaskName is not None:
# We have a hint mask that we want to attach to the first
# point of the flex op. However, there is already a flex name
# in that attribute. What we do is set the flex point name
# into the hint mask.
hintMask.pointName = flexPointName
newHintMaskName = None
argList = []
elif token == "sc":
pass
elif token == "cp":
pass
elif token == "ed":
pass
else:
if inPreFlex and (token[-2:] == "mt"):
continue
if token[-2:] in ["mt", "dt", "ct", "cv"]:
opIndex += 1
else:
print("Unhandled operation %s %s" % (argList, token))
raise BezParseError(
"Unhandled operation: '%s' '%s'.", argList, token)
dx = dy = 0
opName = bezToUFOPoint[token]
if token[-2:] in ["mt", "dt"]:
if token in ["mt", "dt"]:
curX = argList[0]
curY = argList[1]
else:
if token in ["rmt", "rdt"]:
dx = argList[0]
dy = argList[1]
elif token in ["hmt", "hdt"]:
dx = argList[0]
elif token in ["vmt", "vdt"]:
dy = argList[0]
curX += dx
curY += dy
showX, showY = convertCoords(curX, curY)
newPoint = XMLElement(
"point", {
"x": f"{showX}", "y": f"{showY}", "type": opName})
if opName == "move":
if outlineItem is not None:
if len(outlineItem) == 1:
# Just in case we see 2 moves in a row, delete the
# previous outlineItem if it has only the move-to
print("Deleting moveto: %s adding %s" % (
xmlToString(newOutline[-1]),
xmlToString(outlineItem)))
del newOutline[-1]
else:
# Fix the start/implied end path of the
# previous path.
fixStartPoint(outlineItem, opList)
opList = []
outlineItem = XMLElement('contour')
newOutline.append(outlineItem)
if newHintMaskName is not None:
newPoint.set(kPointName, newHintMaskName)
newHintMaskName = None
outlineItem.append(newPoint)
opList.append([opName, curX, curY])
else:
if token in ["ct", "cv"]:
curX = argList[0]
curY = argList[1]
showX, showY = convertCoords(curX, curY)
newPoint = XMLElement(
"point", {"x": "%s" % showX, "y": "%s" % showY})
outlineItem.append(newPoint)
curX = argList[2]
curY = argList[3]
showX, showY = convertCoords(curX, curY)
newPoint = XMLElement(
"point", {"x": "%s" % showX, "y": "%s" % showY})
outlineItem.append(newPoint)
curX = argList[4]
curY = argList[5]
showX, showY = convertCoords(curX, curY)
newPoint = XMLElement(
"point", {
"x": f"{showX}", "y": f"{showY}", "type": opName})
outlineItem.append(newPoint)
else:
if token in ["rct", "rcv"]:
curX += argList[0]
curY += argList[1]
showX, showY = convertCoords(curX, curY)
newPoint = XMLElement(
"point", {"x": "%s" % showX, "y": "%s" % showY})
outlineItem.append(newPoint)
curX += argList[2]
curY += argList[3]
showX, showY = convertCoords(curX, curY)
newPoint = XMLElement(
"point", {"x": "%s" % showX, "y": "%s" % showY})
outlineItem.append(newPoint)
curX += argList[4]
curY += argList[5]
showX, showY = convertCoords(curX, curY)
newPoint = XMLElement(
"point", {
"x": f"{showX}",
"y": f"{showY}",
"type": opName})
outlineItem.append(newPoint)
elif token == "vhct":
curY += argList[0]
showX, showY = convertCoords(curX, curY)
newPoint = XMLElement(
"point", {"x": "%s" % showX, "y": "%s" % showY})
outlineItem.append(newPoint)
curX += argList[1]
curY += argList[2]
showX, showY = convertCoords(curX, curY)
newPoint = XMLElement(
"point", {"x": "%s" % showX, "y": "%s" % showY})
outlineItem.append(newPoint)
curX += argList[3]
showX, showY = convertCoords(curX, curY)
newPoint = XMLElement(
"point", {
"x": f"{showX}",
"y": f"{showY}",
"type": opName})
outlineItem.append(newPoint)
elif token == "hvct":
curX += argList[0]
showX, showY = convertCoords(curX, curY)
newPoint = XMLElement(
"point", {"x": "%s" % showX, "y": "%s" % showY})
outlineItem.append(newPoint)
curX += argList[1]
curY += argList[2]
showX, showY = convertCoords(curX, curY)
newPoint = XMLElement(
"point", {"x": "%s" % showX, "y": "%s" % showY})
outlineItem.append(newPoint)
curY += argList[3]
showX, showY = convertCoords(curX, curY)
newPoint = XMLElement(
"point", {
"x": f"{showX}",
"y": f"{showY}",
"type": opName})
outlineItem.append(newPoint)
if newHintMaskName is not None:
# attach the pointName to the first point of the curve.
outlineItem[-3].set(kPointName, newHintMaskName)
newHintMaskName = None
opList.append([opName, curX, curY])
argList = []
if outlineItem is not None:
if len(outlineItem) == 1:
# Just in case we see two moves in a row, delete the previous
# outlineItem if it has zero length.
del newOutline[-1]
else:
fixStartPoint(outlineItem, opList)
# add hints, if any
# Must be done at the end of op processing to make sure we have seen
# all the hints in the bez string.
# Note that the hintmasks are identified in the opList by the point name.
# We will follow the T1 spec: a glyph may have stem3 counter hints or
# regular hints, but not both.
if (seenHints) or (len(flexList) > 0):
hintInfoDict = XMLElement("dict")
hintSetListItem = XMLElement("key")
hintSetListItem.text = kHintSetListName
hintInfoDict.append(hintSetListItem)
hintSetListArray = XMLElement("array")
hintInfoDict.append(hintSetListArray)
# Convert the rest of the hint masks to a hintmask op
# and hintmask bytes.
for hintMask in hintMaskList:
hintMask.addHintSet(hintSetListArray)
if len(flexList) > 0:
hintSetListItem = XMLElement("key")
hintSetListItem.text = kFlexIndexListName
hintInfoDict.append(hintSetListItem)
flexArray = XMLElement("array")
hintInfoDict.append(flexArray)
addFlexHint(flexList, flexArray)
# JH 24 Sep 2019
# hash now goes at end of glyphDict to match psautohint
idItem = XMLElement("key")
idItem.text = "id"
hintInfoDict.append(idItem)
idString = XMLElement("string")
idString.text = kHashIdPlaceholder
hintInfoDict.append(idString)
return newOutline, hintInfoDict
def addHintList(hints, hintsStem3, newHintSetArray, isH):
# A charstring may have regular vstem hints or vstem3 hints, but not both.
# Same for hstem hints and hstem3 hints.
if len(hintsStem3) > 0:
hintsStem3.sort()
numHints = len(hintsStem3)
hintLimit = int((kStackLimit - 2) / 2)
if numHints >= hintLimit:
hintsStem3 = hintsStem3[:hintLimit]
numHints = hintLimit
makeStemHintList(hintsStem3, newHintSetArray, isH)
else:
hints.sort()
numHints = len(hints)
hintLimit = int((kStackLimit - 2) / 2)
if numHints >= hintLimit:
hints = hints[:hintLimit]
numHints = hintLimit
makeHintList(hints, newHintSetArray, isH)
def addWhiteSpace(parent, level):
child = None
childIndent = '\n' + (" " * (level + 1))
prentIndent = '\n' + (" " * (level))
# print("parent Tag", parent.tag, repr(parent.text), repr(parent.tail))
for child in parent:
child.tail = childIndent
addWhiteSpace(child, level + 1)
if child is not None:
if parent.text is None:
parent.text = childIndent
child.tail = prentIndent
# print("lastChild Tag", child.tag, repr(child.text),
# repr(child.tail), "parent Tag", parent.tag)
def convertBezToGLIF(ufoFontData, glyphName, bezString, hintsOnly=False):
# I need to replace the contours with data from the bez string.
glyphPath = ufoFontData.getGlyphSrcPath(glyphName)
with open(glyphPath, "rb") as fp:
data = fp.read()
glifXML = XML(data)
outlineItem = None
libIndex = outlineIndex = -1
childIndex = 0
for childElement in glifXML:
if childElement.tag == "outline":
outlineItem = childElement
outlineIndex = childIndex
if childElement.tag == "lib":
libIndex = childIndex
childIndex += 1
newOutlineElement, hintInfoDict = convertBezToOutline(
ufoFontData, glyphName, bezString)
# print xmlToString(stemHints)
if not hintsOnly:
if outlineItem is None:
# need to add it. Add it before the lib item, if any.
if libIndex > 0:
glifXML.insert(libIndex, newOutlineElement)
else:
glifXML.append(newOutlineElement)
else:
# remove the old one and add the new one.
glifXML.remove(outlineItem)
glifXML.insert(outlineIndex, newOutlineElement)
# convertBezToGLIF is called only if the GLIF has been edited by a tool.
# We need to update the edit status in the has map entry.
# I assume that convertGLIFToBez has ben run before, which will add an
# entry for this glyph.
ufoFontData.updateHashEntry(glyphName, changed=True)
# Add the stem hints.
if hintInfoDict is not None:
widthXML = glifXML.find("advance")
if widthXML is not None:
width = int(ast.literal_eval(widthXML.get("width", '0')))
else:
width = 0
useDefaultGlyphDir = False
newGlyphHash, _ = ufoFontData.buildGlyphHashValue(
width, newOutlineElement, glyphName, useDefaultGlyphDir)
# We add this hash to the T1 data, as it is the hash which matches
# the output outline data. This is not necessarily the same as the
# hash of the source data - autohint can be used to change outlines.
if libIndex > 0:
libItem = glifXML[libIndex]
else:
libItem = XMLElement("lib")
glifXML.append(libItem)
dictItem = libItem.find("dict")
if dictItem is None:
dictItem = XMLElement("dict")
libItem.append(dictItem)
# Remove any existing hint data.
i = 0
childList = list(dictItem)
for childItem in childList:
i += 1
if (childItem.tag == "key") and (
(childItem.text == kHintDomainName1) or
(childItem.text == kHintDomainName2)):
dictItem.remove(childItem) # remove key
dictItem.remove(childList[i]) # remove data item.
glyphDictItem = dictItem
key = XMLElement("key")
key.text = kHintDomainName2
glyphDictItem.append(key)
glyphDictItem.append(hintInfoDict)
# As of September, 2019, the hash should be at the end of the glyph
# dict, so we iterate backwards from the end until we find the
# placeholder, then set to newGlyphHash
childList = list(hintInfoDict)
for child in childList[::-1]:
if getattr(child, 'text', "") == kHashIdPlaceholder:
child.text = newGlyphHash
break
addWhiteSpace(glifXML, 0)
return glifXML
def _get_glyph_width(glyph):
hash_pen = HashPointPen(glyph)
glyph.drawPoints(hash_pen)
return getattr(glyph, 'width', 0)
def regenerate_glyph_hashes(ufo_font_data):
"""
The handling of the glyph hashes is super convoluted.
This method fixes https://github.com/adobe-type-tools/afdko/issues/349
"""
for gname, gfilename in ufo_font_data.getGlyphMap().items():
gwidth, _, outline_xml = ufo_font_data.getGlyphXML(
ufo_font_data.glyphDefaultDir, gfilename)
hash_entry = ufo_font_data.hashMap.get(gname, None)
if not hash_entry:
continue
ghash, _ = ufo_font_data.buildGlyphHashValue(
gwidth, outline_xml, gname, True)
hash_entry[0] = ghash
def checkHashMaps(fontPath, doSync):
"""
Checks if the hashes of the glyphs in the default layer match the hash
values stored in the UFO's 'data/com.adobe.type.processedHashMap' file.
Returns a tuple of a boolean and a list. The boolean is True if all glyph
hashes matched. The list contains strings that report the glyph names
whose hash did not match.
If doSync is True, it will delete any glyph in the processed glyph
layer directory which does not have a matching glyph in the default
layer, or whose source glyph hash does not match. It will then update
the contents.plist file for the processed glyph layer, and delete
the program specific hash maps.
"""
msgList = []
allMatch = True
ufoFontData = UFOFontData(fontPath, True, '')
ufoFontData.readHashMap()
# Don't need to check the glyph hashes if there aren't any.
if not ufoFontData.hashMap:
return allMatch, msgList
for glyphName, glyphFileName in ufoFontData.getGlyphMap().items():
hash_entry = ufoFontData.hashMap.get(glyphName, None)
if not hash_entry:
continue
else:
oldHash = hash_entry[0]
width, _, outlineXML = ufoFontData.getGlyphXML(
ufoFontData.glyphDefaultDir, glyphFileName)
if outlineXML is None:
continue
newHash, _ = ufoFontData.buildGlyphHashValue(
width, outlineXML, glyphName, True)
if oldHash != newHash:
allMatch = False
if len(msgList) < 10:
msgList.append("Glyph %s seems to have been modified since "
"last time checkoutlinesufo processed this "
"font." % glyphName)
elif len(msgList) == 10:
msgList.append("(additional messages omitted)")
if doSync:
fileList = os.listdir(ufoFontData.glyphWriteDir)
fileList = filter(lambda fileName: fileName.endswith(".glif"),
fileList)
# invert glyphMap
fileMap = {}
for glyphName, fileName in ufoFontData.glyphMap.items():
fileMap[fileName] = glyphName
for fileName in fileList:
if fileName in fileMap and (
fileMap[fileName] in ufoFontData.hashMap):
continue
# Either not in glyphMap, or not in hashMap. Exterminate.
try:
glyphPath = os.path.join(ufoFontData.glyphWriteDir, fileName)
os.remove(glyphPath)
print("Removed outdated file: %s" % glyphPath)
except OSError:
print("Cannot delete outdated file: %s" % glyphPath)
return allMatch, msgList
kAdobeLCALtSuffix = ".adobe.lc.altsuffix"
def cleanUpGLIFFiles(defaultContentsFilePath, glyphDirPath, doWarning=True):
changed = 0
contentsFilePath = os.path.join(glyphDirPath, kContentsName)
# maps glyph names to files.
with open(contentsFilePath, 'r', encoding='utf-8') as fp:
contentsDict = plistlib.load(fp)
# First, delete glyph files that are not in the contents.plist file in
# the glyphDirPath. In some UFOfont files, we end up with case errors,
# so we need to check for a lower-case version of the file name.
fileDict = {}
for glyphName, fileName in contentsDict.items():
fileDict[fileName] = glyphName
lcFileName = fileName.lower()
if lcFileName != fileName:
fileDict[lcFileName + kAdobeLCALtSuffix] = glyphName
fileList = os.listdir(glyphDirPath)
for fileName in fileList:
if not fileName.endswith(".glif"):
continue
if fileName in fileDict:
continue
lcFileName = fileName.lower()
if (lcFileName + kAdobeLCALtSuffix) in fileDict:
# glif file exists which has a case-insensitive match to file name
# entry in the contents.plist file; assume latter is intended, and
# change the file name to match.
glyphFilePathOld = os.path.join(glyphDirPath, fileName)
glyphFilePathNew = os.path.join(glyphDirPath, lcFileName)
os.rename(glyphFilePathOld, glyphFilePathNew)
continue
glyphFilePath = os.path.join(glyphDirPath, fileName)
os.remove(glyphFilePath)
if doWarning:
print("Removing glif file %s that was not in the contents.plist "
"file: %s" % (glyphDirPath, contentsFilePath))
changed = 1
if defaultContentsFilePath == contentsFilePath:
return changed
# Now remove glyphs that are not referenced in the defaultContentsFilePath.
# Since the processed glyph layer is written with the defcon module,
# and the default layer may be written by anything, the actual glyph file
# names may be different for the same UFO glyph. We need to compare by UFO
# glyph name, not file name.
with open(defaultContentsFilePath, 'r', encoding='utf-8') as fp:
defaultContentsDict = plistlib.load(fp)
fileList = os.listdir(glyphDirPath)
for fileName in fileList:
if not fileName.endswith(".glif"):
continue
try:
glyphName = fileDict[fileName]
if glyphName not in defaultContentsDict:
glyphFilePath = os.path.join(glyphDirPath, fileName)
os.remove(glyphFilePath)
if doWarning:
print("Removing glif %s that was not in the "
"contents.plist file: %s" % (
glyphName, defaultContentsFilePath))
changed = 1
except KeyError:
print("Shouldn't happen %s %s" % (
glyphName, defaultContentsFilePath))
return changed
def cleanupContentsList(glyphDirPath, doWarning=True):
contentsFilePath = os.path.join(glyphDirPath, kContentsName)
# maps glyph names to files.
with open(contentsFilePath, 'r', encoding='utf-8') as fp:
contentsDict = plistlib.load(fp)
fileDict = {}
fileList = os.listdir(glyphDirPath)
for fileName in fileList:
fileDict[fileName] = 1
changed = 0
# now update and write the processed processedGlyphDirPath
# contents.plist file.
itemList = list(contentsDict.items())
for glyphName, fileName in itemList:
if fileName not in fileDict:
del contentsDict[glyphName]
changed = 1
if doWarning:
print("Removing contents.plist entry where glif was missing: "
"%s, %s, %s" % (glyphName, fileName, glyphDirPath))
if changed:
with open(contentsFilePath, 'wb') as fp:
plistlib.dump(contentsDict, fp)
def validateLayers(ufoFontPath, doWarning=True):
# Read glyphs/contents.plist file.
# Delete any glyphs on /glyphs or /processed glyphs which are not in
# glyphs/contents.plist file. Delete any entries in the contents.plist
# file which are not in the glyph files. Filter contents list with what's
# in /processed glyphs: write to process/plist file.' The most common way
# that this is needed in the AFDKO workflow is if someone kills
# checkoutlines/checkoutlinesufo or autohint while it is running. Since
# the program may delete glyphs from the processed layer while running,
# and the contents.plist file is updated only when the changed font is
# saved, the contents.plist file in the processed layer ends up referencing
# glyphs that aren't there anymore. You can also get extra glyphs not in
# the contents.plist file by several editing workflows.
# First, clean up the default layer.
glyphDirPath = os.path.join(ufoFontPath, "glyphs")
defaultContentsFilePath = os.path.join(
ufoFontPath, "glyphs", kContentsName)
# Happens when called on a font which is not a UFO font.
if not os.path.exists(defaultContentsFilePath):
return
# remove glif files not in contents.plist
cleanUpGLIFFiles(defaultContentsFilePath, glyphDirPath, doWarning)
# remove entries for glif files that don't exist
cleanupContentsList(glyphDirPath, doWarning)
# now for the processed dir.
glyphDirPath = os.path.join(ufoFontPath, kProcessedGlyphsLayer)
if not os.path.exists(glyphDirPath):
return
# Remove any glif files that are not in both the processed glif directory
# contents.plist file and the default contents .plist file.
# This will happen pretty often, as glif files are deleted from the
# processed glyph layer is their hash differs from the current hash for
# the glyph in the default layer.
cleanUpGLIFFiles(defaultContentsFilePath, glyphDirPath, doWarning)
cleanupContentsList(glyphDirPath, doWarning)
def makeUFOFMNDB(srcFontPath):
fontInfoPath = os.path.join(srcFontPath, kFontInfoName) # default
fiMap, _ = parsePList(fontInfoPath)
psName = "NoFamilyName-Regular"
familyName = "NoFamilyName"
styleName = "Regular"
try:
psName = fiMap["postscriptFontName"]
parts = psName.split("-")
familyName = parts[0]
if len(parts) > 1:
styleName = parts[1]
except KeyError:
print("ufotools [Warning] UFO font is missing 'postscriptFontName'")
try:
familyName = fiMap["openTypeNamePreferredFamilyName"]
except KeyError:
try:
familyName = fiMap["familyName"]
except KeyError:
print("ufotools [Warning] UFO font is missing 'familyName'")
try:
styleName = fiMap["openTypeNamePreferredSubfamilyName"]
except KeyError:
try:
styleName = fiMap["styleName"]
except KeyError:
print("ufotools [Warning] UFO font is missing 'styleName'")
fmndbPath = fdkutils.get_temp_file_path()
parts = []
parts.append("[%s]" % (psName))
parts.append("\tf=%s" % (familyName))
parts.append("\ts=%s" % (styleName))
parts.append("")
data = '\n'.join(parts)
with open(fmndbPath, "w") as fp:
fp.write(data)
return fmndbPath
| StarcoderdataPython |
3339184 | <reponame>FRC-1721/LAPIS
#!/usr/bin/env python
"""
Copyright (c) 2019-2020, Concord Robotics Inc
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Concord Robotics Inc nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
class RobotCommand:
def __init__(self, command_name, table):
self.command_table = table.table.getSubTable(command_name) # Connect it
def start(self): # Starts the command running
#if not(self.command_table.getBoolean("running", True)):
self.command_table.putBoolean("running", True)
def stop(self):
#if not(self.command_table.getBoolean("running", False)):
self.command_table.putBoolean("running", False)
def run_till_done(self): # Big danger
self.command_table.putBoolean("running", True)
while (self.command_table.getBoolean("running", True)):
pass
def toggle(self):
self.command_table.putBoolean("running", not self.command_table.getBoolean("running", True))
def check(self):
return (self.command_table.getBoolean("running", False))
| StarcoderdataPython |
3377614 | <gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os, sys
from wxalarmlib import application, config, utils
if __name__ == '__main__':
# avoid throw [UnicodeEncodeError: 'ascii' codec can't encode characters]
_unicode = None
if sys.version_info < (3, 0):
_unicode = unicode
from imp import reload
reload(sys)
sys.setdefaultencoding('utf-8')
else:
_unicode = str
# config
root_dir = utils.absdir(__file__)
process_id = os.getpid()
config = config.Config(root_dir, process_id)
store = application.WxAlarmDataStore(config)
# application
app = application.WxAlarmApp(store)
app.StartMain()
| StarcoderdataPython |
1678435 | <reponame>andbortnik/thenewboston-node
import logging
from typing import Optional
from thenewboston_node.business_logic.models import Node
from thenewboston_node.core.utils.types import hexstr
from .base import BaseMixin
logger = logging.getLogger(__name__)
class NetworkMixin(BaseMixin):
def get_node_by_identifier(self, identifier: hexstr, on_block_number: Optional[int] = None) -> Optional[Node]:
if on_block_number is None:
on_block_number = self.get_last_block_number()
return self.get_account_state_attribute_value(identifier, 'node', on_block_number)
def yield_nodes(self, block_number: Optional[int] = None):
known_accounts = set()
for account_number, account_state in self.yield_account_states(from_block_number=block_number):
node = account_state.node
if not node:
continue
if account_number in known_accounts:
continue
known_accounts.add(account_number)
yield node
def has_nodes(self):
return any(self.yield_nodes())
def get_primary_validator(self, block_number: Optional[int] = None) -> Optional[Node]:
if block_number is None:
block_number = self.get_next_block_number()
# We get last_block_number and blockchain_state here to avoid race conditions. Do not change it
last_block_number = self.get_last_block_number()
blockchain_state = self.get_blockchain_state_by_block_number(
last_block_number, inclusive=last_block_number > -1
)
for block in self.yield_blocks_slice_reversed(last_block_number, blockchain_state.get_last_block_number()):
for account_number, account_state in block.yield_account_states():
pv_schedule = account_state.primary_validator_schedule
if pv_schedule and pv_schedule.is_block_number_included(block_number):
return self.get_node_by_identifier(account_number)
# TODO(dmu) HIGH: Once we have more accounts this method will become slow. We need to optimize it
# by caching
for account_number, account_state in blockchain_state.yield_account_states():
pv_schedule = account_state.primary_validator_schedule
if pv_schedule and pv_schedule.is_block_number_included(block_number):
return self.get_node_by_identifier(account_number)
return None
| StarcoderdataPython |
32007 | <filename>saleor/app/management/commands/install_app.py
import json
from typing import Any, Optional
import requests
from django.core.exceptions import ValidationError
from django.core.management import BaseCommand, CommandError
from django.core.management.base import CommandParser
from ....app.validators import AppURLValidator
from ....core import JobStatus
from ...installation_utils import install_app
from ...models import AppInstallation
from .utils import clean_permissions
class Command(BaseCommand):
help = "Used to install new app."
def add_arguments(self, parser: CommandParser) -> None:
parser.add_argument("manifest-url", help="Url with app manifest.", type=str)
parser.add_argument(
"--activate",
action="store_true",
dest="activate",
help="Activates the app after installation",
)
def validate_manifest_url(self, manifest_url: str):
url_validator = AppURLValidator()
try:
url_validator(manifest_url)
except ValidationError:
raise CommandError(f"Incorrect format of manifest-url: {manifest_url}")
def fetch_manifest_data(self, manifest_url: str) -> dict:
response = requests.get(manifest_url)
response.raise_for_status()
return response.json()
def handle(self, *args: Any, **options: Any) -> Optional[str]:
activate = options["activate"]
manifest_url = options["manifest-url"]
self.validate_manifest_url(manifest_url)
manifest_data = self.fetch_manifest_data(manifest_url)
permissions = clean_permissions(manifest_data.get("permissions", []))
app_job = AppInstallation.objects.create(
app_name=manifest_data["name"], manifest_url=manifest_url
)
if permissions:
app_job.permissions.set(permissions)
try:
app = install_app(app_job, activate)
except Exception as e:
app_job.status = JobStatus.FAILED
app_job.save()
raise e
token = app.tokens.first()
return json.dumps({"auth_token": token.auth_token})
| StarcoderdataPython |
188302 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from rest_framework import serializers
from csdn.models import CsdnArticle, CsdnAuthor
__author__ = 'wfy'
__date__ = '2017/10/14 18:50'
class ArticleSerializer(serializers.ModelSerializer):
class Meta:
model = CsdnArticle
fields = '__all__'
class AuthorSerializer(serializers.ModelSerializer):
class Meta:
model = CsdnAuthor
fields = '__all__'
| StarcoderdataPython |
86651 | <gh_stars>0
"""
******
######
******
######
"""
# 4 6
for r in range(4): # 0 1 2 3
for c in range(6):
if r % 2 == 0:
print("*", end="")
else:
print("#", end="")
print()
| StarcoderdataPython |
3346827 | <reponame>DAIM-ML/autotf
import time
import logging
from tuner.initial_design.init_random_uniform import init_random_uniform
from tuner.parallel_solver.base_parallel_solver import BaseParallelSolver
from tuner.parallel_solver.base_parallel_solver import evaluate_func
logger = logging.getLogger(__name__)
class AsyncParallelSolver(BaseParallelSolver):
def __init__(self, objective_func, lower, upper,
acquisition_func, model, maximize_func,
initial_design=init_random_uniform,
initial_points=3,
output_path=None,
train_interval=1,
n_restarts=1,
n_workers=4,
rng=None):
"""
function description.
Parameters
----------
acquisition_func: BaseAcquisitionFunctionObject
The acquisition function which will be maximized.
"""
BaseParallelSolver.__init__(self, acquisition_func=acquisition_func, maximize_func=maximize_func, model=model,
init_points=initial_points, n_restarts=n_restarts, rng=rng,
initial_design=initial_design, lower=lower, upper=upper,
objective_func=objective_func, n_workers=n_workers)
self.start_time = time.time()
self.objective_func = objective_func
self.time_func_evals = []
self.time_overhead = []
self.train_interval = train_interval
self.output_path = output_path
self.time_start = None
def run(self, num_iterations=10, X=None, y=None):
"""
The main parallel optimization loop
Parameters
----------
num_iterations: int
The number of iterations
X: np.ndarray(N,D)
Initial points that are already evaluated
y: np.ndarray(N,1)
Function values of the already evaluated points
Returns
-------
np.ndarray(1,D)
Incumbent
np.ndarray(1,1)
(Estimated) function value of the incumbent
"""
# Save the time where we start the parallel optimization procedure
self.time_start = time.time()
if X is None and y is None:
self.initialize()
else:
self.X = X
self.y = y
# Main asynchronous parallel optimization loop
self.trial_statistics.clear()
evaluate_counter = self.init_points * self.num_workers
while evaluate_counter < num_iterations*self.num_workers:
if len(self.trial_statistics) > self.num_workers:
time.sleep(0.1)
else:
if (evaluate_counter+1) % self.train_interval == 0:
do_optimize = True
else:
do_optimize = False
# Choose next point to evaluate
start_time = time.time()
new_x = self.choose_next(self.X, self.y, do_optimize)
self.time_overhead.append(time.time() - start_time)
logger.info("Optimization overhead was %f seconds", self.time_overhead[-1])
logger.info("Next candidate %s", str(new_x))
self.trial_statistics.append(self.pool.submit(evaluate_func, (self.objective_func, new_x)))
evaluate_counter += 1
# Get the evaluation statistics
self.collect()
# Wait for all tasks finish
if not len(self.trial_statistics):
self.wait_tasks_finish()
self.collect()
logger.info("Return %s as incumbent with error %f ",
self.incumbents[-1], self.incumbents_values[-1])
return self.incumbents[-1], self.incumbents_values[-1]
def choose_next(self, X=None, y=None, do_optimize=True):
"""
Suggests a new point to evaluate.
Parameters
----------
X: np.ndarray(N,D)
Initial points that are already evaluated
y: np.ndarray(N,1)
Function values of the already evaluated points
do_optimize: bool
If true the hyperparameters of the model are
optimized before the acquisition function is
maximized.
Returns
-------
np.ndarray(1,D)
Suggested point
"""
if X is None and y is None:
x = self.initial_design(self.lower, self.upper, 1, rng=self.rng)[0, :]
elif X.shape[0] == 1:
# We need at least 2 data points to train a GP
x = self.initial_design(self.lower, self.upper, 1, rng=self.rng)[0, :]
else:
try:
logger.info("Train model...")
t = time.time()
self.model.train(X, y, do_optimize=do_optimize)
logger.info("Time to train the model: %f", (time.time() - t))
except:
logger.error("Model could not be trained!")
raise
self.acquisition_func.update(self.model)
logger.info("Maximize acquisition function...")
t = time.time()
x = self.maximize_func.maximize()
logger.info("Time to maximize the acquisition function: %f", (time.time() - t))
return x
| StarcoderdataPython |
1665124 | from datetime import datetime, timedelta
import time
import requests
import json
from matplotlib.pylab import date2num
from matplotlib import pyplot as plt
import mpl_finance as mpf
from pandas import DataFrame
import talib as ta
import sys
sys.path.append('..')
import DictCode as dc
plt.rcParams['font.family'] = 'sans-serif' #用来正常显示中文
plt.rcParams['font.sans-serif']=['SimHei'] #用来正常显示负号
def get_candles_data(url):
print(url)
response = requests.get(url)
data_arr = response.text.replace("[[",'').replace("]]",'').replace("\"","").split("],[")
quotes = []
for item_str in reversed(data_arr):
item = item_str.split(",")
sdatetime_num = date2num(datetime.strptime(item[0].replace("T",' ').replace('.000Z',''),'%Y-%m-%d'))
datas = (sdatetime_num,float(item[1]),float(item[2]),float(item[3]),float(item[4])) # 按照 candlestick_ohlc 要求的数据结构准备数据
quotes.append(datas)
return quotes
def get_candles_data_minutes(url):
print(url)
response = requests.get(url)
data_arr = response.text.replace("[[",'').replace("]]",'').replace("\"","").split("],[")
quotes = []
for item_str in reversed(data_arr):
item = item_str.split(",")
sdatetime_num = date2num(datetime.strptime(item[0].replace("T",' ').replace('.000Z',''),'%Y-%m-%d %H:%M:%S'))
datas = (sdatetime_num,float(item[1]),float(item[2]),float(item[3]),float(item[4])) # 按照 candlestick_ohlc 要求的数据结构准备数据
quotes.append(datas)
return quotes
def plot_candles(datas,title,tradePair,width):
close = []
high = []
low = []
tradeTime = []
for item in datas:
tradeTime.append(item[0])
high.append(item[2])
low.append(item[3])
close.append(item[4])
merge_dt_dict = {'tradeTime':tradeTime,
'high':high,
'low':low,
'close':close}
data_df = DataFrame(merge_dt_dict)
# fig, ax = plt.subplots(facecolor=(0, 0.3, 0.5),figsize=(37,37))
ax1 = plt.subplot2grid(((6,4)), (1,0), rowspan=4, colspan=4)
# fig.subplots_adjust(bottom=0.1)
ax1.xaxis_date()
# plt.xlabel('time')
plt.ylabel('price')
mpf.candlestick_ohlc(ax1,datas,width=width,colorup='r',colordown='green') # 上涨为红色K线,下跌为绿色,K线宽度为0.7
# upper,middle,lower=ta.BBANDS(data_df['close'], matype=ta.MA_Type.T3,timeperiod=20)
upper,middle,lower=ta.BBANDS(data_df['close'], timeperiod=20)
ax1.plot(data_df['tradeTime'],middle, 'blue',label='middle')
ax1.plot(data_df['tradeTime'],upper, 'y',label='upper')
ax1.plot(data_df['tradeTime'],lower, 'y',label='lower')
## 在顶部绘制ATR
ax0 = plt.subplot2grid(((6,4)), (0,0), sharex=ax1, rowspan=1, colspan=4,)
plt.title(tradePair+'['+title+']')
# rsi = ta.RSI(data_df['close'],timeperiod=14)
atr = ta.ATR(data_df['high'],data_df['low'],data_df['close'],timeperiod=14)
atrCol = 'blue'
posCol = '#386d13'
negCol = '#8f2020'
ax0.plot(data_df['tradeTime'], atr, atrCol, linewidth=1)
# ax0.fill_between(data_df['tradeTime'], atr, 70, where=(atr>=70), facecolor=negCol, edgecolor=negCol)
# ax0.fill_between(data_df['tradeTime'], atr, 30, where=(atr<=30), facecolor=posCol, edgecolor=posCol)
# ax0.set_yticks([30,70])
plt.ylabel('ATR')
### 在底部绘制MACD
ax2 = plt.subplot2grid(((6,4)), (5,0), sharex=ax1, rowspan=1, colspan=4,)
fillcolor = '#00ffe8'
macd, macdsignal, macdhist = ta.MACD(data_df['close'],fastperiod=6, slowperiod=12, signalperiod=9)
# emaslow, emafast, macd
ax2.plot(data_df['tradeTime'], macd, color='y', lw=1)
ax2.plot(data_df['tradeTime'], macdsignal, color='b', lw=1)
# ax2.fill_between(data_df['tradeTime'], macdhist, 0, where=(macdhist>0), facecolor="r", edgecolor=fillcolor)
ax2.fill_between(data_df['tradeTime'], macdhist, 0, where=(macdhist>0), facecolor="r")
ax2.fill_between(data_df['tradeTime'], macdhist, 0, where=(macdhist<0), facecolor="g")
# ax2.fill_between(data_df['tradeTime'], macdhist, 0, where=(macdhist<0), facecolor="g", edgecolor=fillcolor)
plt.grid(True)
plt.setp(ax0.get_xticklabels(), visible=False)
plt.setp(ax1.get_xticklabels(), visible=False)
plt.xticks(rotation=45) #日期显示的旋转角度
plt.savefig('../img/futures-'+tradePair+'-'+title+'.png')
if __name__ == '__main__':
url_sina_minutes ="http://stock2.finance.sina.com.cn/futures/api/json.php/IndexService.getInnerFuturesMiniKLine{}m?symbol={}"
url_sina_daily ="http://stock2.finance.sina.com.cn/futures/api/json.php/IndexService.getInnerFuturesDailyKLine?symbol={}"
code_index ="rb"
for code_index in dc.SYMBOL_JSON.keys():
data_candles_days = get_candles_data(url_sina_daily.format(dc.SYMBOL_JSON[code_index]["sylbom"]))
plot_candles(data_candles_days,"days",dc.SYMBOL_JSON[code_index]["name"],0.7)
time.sleep(1)
data_candles_60m = get_candles_data_minutes(url_sina_minutes.format(60,dc.SYMBOL_JSON[code_index]["sylbom"]))
plot_candles(data_candles_60m,"60m",dc.SYMBOL_JSON[code_index]["name"],0.7)
time.sleep(1)
# data_candles_15m = get_candles_data_minutes(url_sina_minutes.format(15,dc.SYMBOL_JSON[code_index]["sylbom"]))
# plot_candles(data_candles_15m,"15m",dc.SYMBOL_JSON[code_index]["name"],0.7)
# time.sleep(1)
# data_candles_5m = get_candles_data_minutes(url_sina_minutes.format(5,dc.SYMBOL_JSON[code_index]["sylbom"]))
# plot_candles(data_candles_5m,"5m",dc.SYMBOL_JSON[code_index]["name"],0.7)
# time.sleep(1) | StarcoderdataPython |
1674847 | <reponame>Unity05/WebCrawler<filename>crawler.py
from selenium import webdriver
import requests
import nltk
from bs4 import BeautifulSoup
from bs4.element import Comment
import time
import json
def split_to_sentences(content):
sentences = nltk.sent_tokenize(content)
return sentences
def search_for_keywords(words, keywords):
for word in words:
for keyword in keywords:
if word == keyword:
return True
return False
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
return u" ".join(t.strip() for t in visible_texts)
def go_through_urls(urls0):
urls1 = []
elems = None
for url in urls0:
try:
driver.get(url)
elems = driver.find_elements_by_xpath("//*[@href]")
resp = requests.get(url)
content = text_from_html(resp.text)
sentence_dict = split_to_sentences(content)
for sentence in sentence_dict:
words_dict = nltk.word_tokenize(sentence)
has_keywords = search_for_keywords(words_dict, keywords)
if has_keywords == True:
important_sentences.append([sentence, url])
print([sentence, url])
for elem in elems:
urls1.append(elem.get_attribute('href'))
except Exception:
time.sleep(10)
pass
print(str(elems))
return urls1
search_input = 'google'
keywords = ['nothing']
important_sentences = []
driver = webdriver.Chrome(r'C:\Users\Friedward\PycharmProjects\WebCrawler\chromedriver')
driver.get('https://www.google.com/')
search_field = driver.find_element_by_id('lst-ib')
search_field.send_keys(search_input)
search_field.submit()
elems = driver.find_elements_by_xpath("//*[@href]")
urls = []
for elem in elems:
url_start = str(elem.get_attribute('href'))[ :22]
url_sp_start = str(elem.get_attribute('href'))[ :26]
url_li_start = str(elem.get_attribute('href'))[ :27]
url_tl_start = str(elem.get_attribute('href'))[ :28]
check_http = str(elem.get_attribute('href'))[ :4]
check_https = str(elem.get_attribute('href'))[ :5]
print(str(url_start) + ' | ' + str(url_tl_start) + ' | ' + str(url_li_start) + ' | ' + str(url_sp_start) + ' | ' + str(elem.get_attribute('href')))
if check_http == 'http' or check_https == 'https':
if url_start != 'https://www.google.com' \
and url_sp_start != 'https://support.google.com' \
and url_li_start != 'https://accounts.google.com' \
and url_tl_start != 'https://translate.google.com':
urls.append(elem.get_attribute('href'))
for i2 in range(5):
urls = go_through_urls(urls)
print(str(urls))
with open('important_content', 'w') as important_content_file:
important_content_file.write(json.dumps(important_sentences))
| StarcoderdataPython |
3202625 | from utils import readYaml
from os import path
import collections
from field_names import FieldNames
line_fields_names = FieldNames()
def read_text(text_file, yaml_dir):
XC = "\u001b"
iBIBINFO = "bibinfo"
iSCROLLINFO = "scrollinfo"
iSCROLLNAME = "scrollname"
iSCROLLREF = "scrollref"
iTRANS = "trans"
iANALYSIS = "analysis"
iNUM = "num"
source_type = text_file.split('_')[-1].split('.')[0]
split_char = {'bib':'\t',
'nonbib':' '}[source_type]
cols_names = {
'bib': (iBIBINFO, iSCROLLINFO, iTRANS, iANALYSIS, iNUM),
'nonbib': (iSCROLLNAME, iSCROLLREF, iTRANS, iANALYSIS,)}[source_type]
n_cols = len(cols_names)
scrollDecl = readYaml(path.join(yaml_dir, 'scroll.yaml' ))
fixesDecl = readYaml(path.join(yaml_dir, 'fixes.yaml'))
lineFixes = fixesDecl["lineFixes"]
fieldFixes = fixesDecl["fieldFixes"]
fixL = "FIX (LINE)"
fixF = "FIX (FIELD)"
lines = collections.defaultdict(set)
prev_frag_line_num = None
prev_word_num = None
frag_line_num = None
subNum = None
interlinear = None
script = None
line_num = 0
parsed_data = []
lines = []
with open(text_file) as f:
for line in f:
line_num += 1
# "check for another language (like greek or paleo hebrew'
if XC in line:
xLine = line
if "(a)" in xLine:
interlinear = 1
elif "(b)" in xLine:
interlinear = 2
elif xLine.startswith(f"{XC}r"):
interlinear = ""
if "(fl)" in xLine:
script = 'paleohebrew'
elif "(f0)" in xLine:
script = 'greekcapital'
elif "(fy)" in xLine:
script = ""
continue
line = line.rstrip("\n")
if source_type == 'bib':
pass
elif source_type == 'nonbib':
if line.startswith(">"):
line = line[1:]
fields = line.split(split_char)
scroll = fields[0]
(fragment, frag_line_num) = fields[1].split(":", 1)
if frag_line_num != prev_frag_line_num:
interlinear = ""
prev_frag_line_num = frag_line_num
continue
else:
assert 0, '{} is not a valid source type'.format(source_type)
fields = line.split(split_char)
n_fields = len(fields)
if n_fields > n_cols:
# diag("FIELDS", f"too many: {nFields}", -1)
print('to many fields')
continue
elif n_fields < n_cols:
fields += [""] * (n_cols - n_fields)
line_data = collections.defaultdict(
lambda: "", ((f, c) for (f, c) in zip(cols_names, fields)),
)
parsed_word = collections.defaultdict(lambda: "")
parsed_word[line_fields_names.source_line_num] = line_num
trans = line_data[iTRANS]
if source_type == 'bib':
# do stuff
pass
else:
# some processing of reconstructions
if trans.startswith("]") and trans.endswith("["):
text = trans[1:-1]
if text.isdigit():
subNum = text[::-1]
continue
(fragment, rest) = line_data[iSCROLLREF].split(":", 1)
(frag_line_num, all_word) = rest.split(",", 1)
if frag_line_num != prev_frag_line_num:
interlinear = ""
parsed_word[line_fields_names.frag_label] = fragment
parsed_word[line_fields_names.frag_line_num] = frag_line_num
if line == "0":
if subNum:
parsed_word[line_fields_names.sub_num] = subNum
(word_num, sub_word_num) = all_word.split(".", 1)
parsed_word[line_fields_names.word_line_num] = word_num
parsed_word[line_fields_names.sub_word_num] = sub_word_num
if word_num == prev_word_num:
parsed_data[-1][line_fields_names.word_prefix] = True
prev_word_num = word_num
parsed_word[line_fields_names.scroll_name] = scroll
lines.append((scroll, fragment, line))
if interlinear:
parsed_word[line_fields_names.interlinear] = interlinear
if script:
parsed_word[line_fields_names.script_type] = script
analysis = line_data[iANALYSIS] or ""
(lang, lex, morph) = ("", "", "")
if "%" in analysis:
lang = 'aramiac'
(lex, morph) = analysis.split("%", 1)
elif "@" in analysis:
(lex, morph) = analysis.split("@", 1)
else:
lex = analysis
parsed_word[line_fields_names.transcript] = trans
parsed_word[line_fields_names.lang] = lang
parsed_word[line_fields_names.lex] = lex
parsed_word[line_fields_names.morph] = morph
#here we fix stuff by errors yaml
# if ln in fieldFix:
# for (field, (fr, to, expl)) in fieldFix[ln].items():
# rep = f"{field:<8} {fr:>6} => {to:<6} {expl}"
# iVal = oData[field]
# if iVal == fr:
# oData[field] = to
# diag(fixF, rep, 1)
# else:
# diag(fixF, rep, -1)
prev_frag_line_num = frag_line_num
parsed_data.append(parsed_word)
return parsed_data, lines | StarcoderdataPython |
1766038 | <reponame>DemjanUA/uchkin_diploma
import os
import numpy
# scipy.special for the sigmoid function expit()
from scipy import special
# neural network class definition
class neuralNetwork:
# initialise the neural network
def __init__(self, inputnodes, hiddennodes, outputnodes, learningrate):
# set number of nodes in each input, hidden, output layer
self.inodes = inputnodes
self.hnodes = hiddennodes
self.onodes = outputnodes
# link weight matrices, wih and who
# weights inside the arrays are w_i_j, where link is from node i to node j in the next layer
# w11 w21
# w12 w22 etc
self.wih = numpy.random.normal(0.0, pow(self.inodes, -0.5), (self.hnodes, self.inodes))
self.who = numpy.random.normal(0.0, pow(self.hnodes, -0.5), (self.onodes, self.hnodes))
# learning rate
self.lr = learningrate
# activation function is the sigmoid function
self.activation_function = lambda x: special.expit(x)
pass
# train the neural network
def train(self, inputs_list, targets_list):
# convert inputs list to 2d array
inputs = numpy.array(inputs_list, ndmin=2).T
targets = numpy.array(targets_list, ndmin=2).T
# calculate signals into hidden layer
hidden_inputs = numpy.dot(self.wih, inputs)
# calculate the signals emerging from hidden layer
hidden_outputs = self.activation_function(hidden_inputs)
# calculate signals into final output layer
final_inputs = numpy.dot(self.who, hidden_outputs)
# calculate the signals emerging from final output layer
final_outputs = self.activation_function(final_inputs)
# output layer error is the (target - actual)
output_errors = targets - final_outputs
# hidden layer error is the output_errors, split by weights, recombined at hidden nodes
hidden_errors = numpy.dot(self.who.T, output_errors)
# update the weights for the links between the hidden and output layers
self.who += self.lr * numpy.dot((output_errors * final_outputs * (1.0 - final_outputs)), numpy.transpose(hidden_outputs))
# update the weights for the links between the input and hidden layers
self.wih += self.lr * numpy.dot((hidden_errors * hidden_outputs * (1.0 - hidden_outputs)), numpy.transpose(inputs))
pass
# query the neural network
def query(self, inputs_list):
# convert inputs list to 2d array
inputs = numpy.array(inputs_list, ndmin=2).T
# calculate signals into hidden layer
hidden_inputs = numpy.dot(self.wih, inputs)
# calculate the signals emerging from hidden layer
hidden_outputs = self.activation_function(hidden_inputs)
# calculate signals into final output layer
final_inputs = numpy.dot(self.who, hidden_outputs)
# calculate the signals emerging from final output layer
final_outputs = self.activation_function(final_inputs)
return final_outputs
# save neural network weights
def save(self):
numpy.save('saved_wih.npy', self.wih)
numpy.save('saved_who.npy', self.who)
pass
# load neural network weights
def load(self):
self.wih = numpy.load(os.path.join(os.path.dirname(__file__), 'saved_wih.npy'))
self.who = numpy.load(os.path.join(os.path.dirname(__file__), 'saved_who.npy'))
pass | StarcoderdataPython |
3242533 | import os
from flask import (
Flask, flash, render_template,
redirect, request, session, url_for)
from flask_pymongo import PyMongo
from bson.objectid import ObjectId
from werkzeug.security import generate_password_hash, check_password_hash
from werkzeug.utils import secure_filename
import urllib.request
if os.path.exists("env.py"):
import env
app = Flask(__name__)
app.config["MONGO_DBNAME"] = os.environ.get("MONGO_DBNAME")
app.config["MONGO_URI"] = os.environ.get("MONGO_URI")
app.secret_key = os.environ.get("SECRET_KEY")
mongo = PyMongo(app)
# Routes
@app.route('/file/<filename>')
def file(filename):
return mongo.send_file(filename)
@app.route("/")
@app.route("/home")
def home():
books = list(mongo.db.Books.find().sort("_id", -1).limit(15))
return render_template("home.html", books=books)
@app.context_processor
def inject_user():
categories = list(mongo.db.category.find())
return dict(categories=categories)
@app.context_processor
def inject_user():
best_selling = "yes"
selling = list(mongo.db.Books.find(
{'book_best_selling': best_selling}).sort("_id", -1).limit(8))
return dict(selling=selling)
@app.context_processor
def inject_user():
discount = list(mongo.db.Books.find().sort("_id", 1).limit(8))
return dict(discount=discount)
@app.route("/login", methods=["GET", "POST"])
def login():
if request.method == "POST":
existing_user = mongo.db.users.find_one(
{"username": request.form.get("username").lower()})
if existing_user:
if check_password_hash(
existing_user["password"], request.form.get("password")):
session["user"] = request.form.get("username").lower()
session["role"] = existing_user["role"]
if session["role"] == "admin":
return redirect(url_for(
"profile", username=session["user"]))
else:
return redirect(url_for(
"home", username=session["user"]))
else:
flash("Incorrect Username and/or Password")
return redirect(url_for("login"))
else:
flash("Incorrect Username and/or Password")
return redirect(url_for("login"))
return render_template("login.html")
@app.route("/logout")
def logout():
flash("You have been logged out")
session.pop("user")
session.pop("role")
return redirect(url_for("login"))
@app.route("/register", methods=["GET", "POST"])
def register():
if request.method == "POST":
existing_user = mongo.db.users.find_one(
{"username": request.form.get("username").lower()})
if existing_user:
flash("Username already exists")
return redirect(url_for("register"))
user_role = "admin" if request.form.get("customSwitch1") else "user"
register = {
"role": user_role,
"username": request.form.get("username").lower(),
"password": generate_password_hash(request.form.get("password")),
"email": request.form.get("email").lower(),
"address": request.form.get("address").lower(),
"postal": request.form.get("postal").lower()
}
mongo.db.users.insert_one(register)
flash("Registration Successful!")
return render_template("login.html")
return render_template("register.html")
@app.route("/profile/<username>", methods=["GET", "POST"])
def profile(username):
user = mongo.db.users.find_one(
{"username": session["user"]})
if session["user"]:
return render_template("profile.html", user=user)
@app.route("/category_display/<value_id>", methods=["GET", "POST"])
def category_display(value_id):
category_id = mongo.db.category.find_one({"_id": ObjectId(value_id)})
category_book_value = list(mongo.db.Books.find(
{'book_category_id': category_id['_id']}))
return render_template("category_display.html", category_table=category_id, category_book=category_book_value)
APP_ROOT = os.path.dirname(os.path.abspath(__file__))
@app.route("/admin_book_insert", methods=["GET", "POST"])
def admin_book_insert():
if request.method == "POST":
existing_book = mongo.db.Books.find_one(
{"book_title": request.form.get("book_title").lower()})
if existing_book:
flash("This Book is already there")
return redirect(url_for("admin_book_insert"))
target = os.path.join(APP_ROOT, 'static/')
print(target)
if not os.path.isdir(target):
os.mkdir(target)
else:
print("couldn't create upload directory {}".format(target))
print(request.files.getlist("file"))
for upload in request.files.getlist("file"):
print(upload)
print("{} is the file name ".format(upload.filename))
filename = upload.filename
destination = "/".join([target, filename])
print("accepts incoming file:", filename)
print("save it to :", destination)
upload.save(destination)
book = {
"book_title": request.form.get("book_title").lower(),
"book_publisher_id": ObjectId(request.form.get("publisher_name").lower()),
"book_author_id": ObjectId(request.form.get("author_name").lower()),
"book_category_id": ObjectId(request.form.get("category_name").lower()),
"book_availabilty": request.form.get("book_availabilty").lower(),
"book_discount": request.form.get("book_discount").lower(),
"book_best_selling": request.form.get("book_best_selling").lower(),
"book_price": request.form.get("book_price").lower(),
"book_pages": request.form.get("book_pages").lower(),
"book_img": filename
}
mongo.db.Books.insert_one(book)
flash('successfully Inserted')
return redirect(url_for("managebooks"))
cat = mongo.db.category.find().sort("category_name", 1)
author = mongo.db.author.find().sort("author_name", 1)
publisher = mongo.db.publisher.find().sort("publisher_name", 1)
return render_template("admin_book_insert.html", cat=cat, publishers=publisher, authors=author)
@app.route("/managepublisher")
def managepublisher():
publisher = list(mongo.db.publisher.find())
return render_template("managepublisher.html", publisher=publisher)
@app.route("/publisherregister", methods=["GET", "POST"])
def publisherregister():
if request.method == "POST":
existing_user = mongo.db.publisher.find_one(
{"publisher_name": request.form.get("username").lower()})
if existing_user:
flash("Username already exists")
return redirect(url_for("publisherregister"))
register = {
"publisher_name": request.form.get("username").lower(),
}
mongo.db.publisher.insert_one(register)
flash("Registration Successful!")
return redirect(url_for("managepublisher"))
return render_template("publisherregister.html")
@app.route("/delete_publisher/<publisher_id>")
def delete_publisher(publisher_id):
mongo.db.publisher.remove({"_id": ObjectId(publisher_id)})
flash("Successfully Deleted")
return redirect(url_for("managepublisher"))
@app.route("/manageauthor")
def manageauthor():
author = list(mongo.db.author.find())
return render_template("manageauthor.html", author=author)
@app.route("/authorregister", methods=["GET", "POST"])
def authorregister():
if request.method == "POST":
existing_user = mongo.db.author.find_one(
{"author_name": request.form.get("username").lower()})
if existing_user:
flash("Username already exists")
return redirect(url_for("authorregister"))
register = {
"author_name": request.form.get("username").lower(),
}
mongo.db.author.insert_one(register)
flash("Registration Successful!")
return redirect(url_for("manageauthor"))
return render_template("authorregister.html")
@app.route("/delete_author/<author_id>")
def delete_author(author_id):
mongo.db.author.remove({"_id": ObjectId(author_id)})
flash("Successfully Deleted")
return redirect(url_for("manageauthor"))
@app.route("/managecategory")
def managecategory():
category = list(mongo.db.category.find())
return render_template("managecategory.html", category=category)
@app.route("/categoryregister", methods=["GET", "POST"])
def categoryregister():
if request.method == "POST":
existing = mongo.db.category.find_one(
{"category_name": request.form.get("username").lower()})
if existing:
flash("Category already exists")
return redirect(url_for("categoryregister"))
register = {
"category_name": request.form.get("username").lower(),
}
mongo.db.category.insert_one(register)
flash("Category Added!")
return redirect(url_for("managecategory"))
return render_template("categoryregister.html")
@app.route("/delete_category/<category_id>")
def delete_category(category_id):
mongo.db.category.remove({"_id": ObjectId(category_id)})
flash("Successfully Deleted")
return redirect(url_for("managecategory"))
@app.route("/manage_review")
def manage_review():
reviews_all = mongo.db.book_reviews.find().sort("_id", -1)
book_list = mongo.db.Books.find()
return render_template("managereview.html", book_list=book_list, reviews=reviews_all)
@app.route("/managebooks")
def managebooks():
book = list(mongo.db.Books.find())
return render_template("managebooks.html", book=book)
@app.route("/book_update/<book_id>", methods=["GET", "POST"])
def book_update(book_id):
if request.method == "POST":
myquery = {"_id": ObjectId(book_id)}
newvalues = {"$set": {"book_availabilty": request.form.get("book_availabilty"), "book_best_selling": request.form.get(
"book_best_selling"), "book_discount": request.form.get("book_discount"), "book_price": request.form.get("book_price"), "book_pages": request.form.get("book_pages")}}
mongo.db.Books.update(myquery, newvalues)
flash("Book Successfully Updated")
return redirect(url_for("managebooks"))
updated = mongo.db.Books.find_one({"_id": ObjectId(book_id)})
return render_template("book_update.html", updated=updated)
@app.route("/delete_book/<book_id>")
def delete_book(book_id):
mongo.db.Books.remove({"_id": ObjectId(book_id)})
flash("Successfully Deleted")
return redirect(url_for("managebooks"))
@app.route("/seereviews/<book_id>", methods=["GET", "POST"])
def seereviews(book_id):
book_id = mongo.db.Books.find_one({"_id": ObjectId(book_id)})
review_book_value = list(mongo.db.book_reviews.find(
{'book_id': book_id['_id'], 'review_status': "Approved"}))
return render_template("reviews.html", book_id=book_id, reveiewd_book=review_book_value)
@app.route("/update_review/<review_id>", methods=["GET", "POST"])
def update_review(review_id):
myquery = {"_id": ObjectId(review_id)}
newvalues = {"$set": {"review_status": "Approved"}}
flash("Successfully Updated")
mongo.db.book_reviews.update(myquery, newvalues)
return redirect(url_for("manage_review"))
@app.route("/postreview/<book_id>", methods=["GET", "POST"])
def postreview(book_id):
if request.method == "POST":
register = {
"book_id": ObjectId(book_id),
"book_review": request.form.get("review").lower(),
"reviewed_by": session["user"],
"review_status": "Not Approved"
}
mongo.db.book_reviews.insert_one(register)
flash("Your Review Can Only Be Viewed Once It Is Approved By The Site Owner!")
return redirect(url_for("home"))
updated = mongo.db.Books.find_one({"_id": ObjectId(book_id)})
return render_template("profile.html", updated=updated)
@app.route("/search", methods=["GET", "POST"])
def search():
query = request.form.get("query")
books = list(mongo.db.Books.find({"$text": {"$search": query}}))
return render_template("home.html", books=books)
if __name__ == "__main__":
app.run(host=os.environ.get("IP"),
port=int(os.environ.get("PORT")),
debug=True)
| StarcoderdataPython |
91975 | #!/usr/bin/env python
#-*- coding: utf-8 -*-
import sys
sys.dont_write_bytecode = True
import os
import re
import posixpath
try:
from setuptools import setup, Extension
except ImportError:
from distutils.core import setup, Extension
def get_str(var_name):
src_py = open('litefs.py').read()
return re.search(
r"%s\s*=\s*['\"]([^'\"]+)['\"]" % var_name, src_py).group(1)
def get_long_str(var_name):
src_py = open('litefs.py').read()
return re.search(
r"%s\s*=\s*['\"]{3}([^'\"]+)['\"]{3}" % var_name, src_py).group(1)
setup(
name='litefs',
version=get_str('__version__'),
description='Build a web server framework using Python.',
long_description=get_long_str('__doc__'),
author=get_str('__author__'),
author_email='<EMAIL>',
url='https://github.com/leafcoder/litefs',
py_modules=['litefs'],
license=get_str('__license__'),
platforms='any',
package_data={
'': ['*.txt', '*.md', 'LICENSE', 'MANIFEST.in'],
'demo': ['demo/*', '*.py'],
'test': ['test/*', '*.py']
},
install_requires=open('requirements.txt').read().split('\n'),
entry_points={
'console_scripts': [
'litefs=litefs:test_server',
]
},
classifiers=[
'Development Status :: 4 - Beta',
"Operating System :: OS Independent",
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content :: CGI Tools/Libraries',
'Topic :: Internet :: WWW/HTTP :: HTTP Servers',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
]
)
| StarcoderdataPython |
3295780 | __all__ = ['neuralnet']
| StarcoderdataPython |
80455 | <gh_stars>0
print('My name is')
for i in range(5):
print('elgun',i)
print('letsdo it')
# total=0
# for i in range(11):
# total=total+i
# print('Value of i: ',i)
# print('Value of total: ',total)
# print(total)
# i=0
# while i<6:
# print('The value of i:'+str(i))
# i+=2
# Stat=4, End=8, Step=2
# for i in range(4,8,2):
# print(i)
# for i in range(8,-4,-1):
# print(i)
# import random
# for i in range(5):
# print(random.randint(1,10))
#import sys
#while True:
# print('Type exit')
# resp=input()
# if resp=='exit':
# sys.exit()
#break
print('Sako is running')
| StarcoderdataPython |
3203115 | # Copyright (c) 2014 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from Crypto.Cipher import AES
from Crypto import Random
from ironic_neutron_plugin import config
from neutron.db import model_base
from neutron.db import models_v2
from neutron.openstack.common import log as logging
import sqlalchemy as sa
from sqlalchemy import orm as sa_orm
import base64
LOG = logging.getLogger(__name__)
def aes_encrypt(key, msg):
iv = Random.new().read(AES.block_size)
cipher = AES.new(key, AES.MODE_CFB, iv)
ciphertext = iv + cipher.encrypt(msg)
return base64.b64encode(ciphertext)
def aes_decrypt(key, msg):
msg = base64.b64decode(msg)
iv = msg[:AES.block_size]
cipher = AES.new(key, AES.MODE_CFB, iv)
msg = cipher.decrypt(msg[AES.block_size:])
return msg
class EncryptedValue(sa.TypeDecorator):
impl = sa.String
def process_bind_param(self, value, dialect):
if value:
key = config.cfg.CONF.ironic.credential_secret
value = aes_encrypt(key, value)
return value
def process_result_value(self, value, dialect):
if value:
key = config.cfg.CONF.ironic.credential_secret
value = aes_decrypt(key, value)
return value
class SwitchPort(model_base.BASEV2, models_v2.HasId):
"""Maps a device to a physical switch port."""
__tablename__ = "switch_ports"
switch_id = sa.Column(sa.String(255),
sa.ForeignKey("switches.id"),
nullable=False)
# Interface name (eth0, some other meaningful identifier)
name = sa.Column(sa.String(255), nullable=False)
# Switchport identifier (Ethernet1/1, something your mech understands)
port = sa.Column(sa.String(255), nullable=False)
# Some kind of externally-identifiable id suitable for mapping multiple
# ports to a single entity (ironic node_id)
hardware_id = sa.Column(sa.String(255), nullable=True)
# Extra
mac_address = sa.Column(sa.String(255), nullable=True)
def as_dict(self):
return {
u"id": self.id,
u"switch_id": self.switch_id,
u"name": self.name,
u"port": self.port,
u"hardware_id": self.hardware_id,
# extra
u"mac_address": self.mac_address
}
@classmethod
def make_dict(cls, d):
return {
u"id": d.get("id"),
u"switch_id": d.get("switch_id"),
u"name": d.get("name"),
u"port": d.get("port"),
u"hardware_id": d.get("hardware_id"),
u"mac_address": d.get("mac_address")
}
class Switch(model_base.BASEV2):
"""An external attachment point."""
__tablename__ = "switches"
id = sa.Column(sa.String(255), primary_key=True)
description = sa.Column(sa.String(255))
type = sa.Column(sa.String(255))
# TODO(morgabra) move this out into a separate model
host = sa.Column(sa.String(255))
username = sa.Column(sa.String(255), nullable=True)
password = sa.Column(EncryptedValue(255), nullable=True)
ports = sa_orm.relationship(
SwitchPort, lazy="joined", cascade="delete", backref="switch")
def as_dict(self):
return {
u"id": self.id,
u"description": self.description,
u"host": self.host,
u"username": self.username,
u"password": "*****",
u"type": self.type
}
class PortExt(model_base.BASEV2):
"""Keep track of extra information about neutron ports.
TODO(morgabra) This is not correct, but we need to stick
this data somewhere.
"""
__tablename__ = "port_ext"
# TODO(morgabra) FK to the actual model and cascade
port_id = sa.Column(sa.String(255), primary_key=True)
hardware_id = sa.Column(sa.String(255), nullable=True)
commit = sa.Column(sa.Boolean, nullable=False)
trunked = sa.Column(sa.Boolean, nullable=True)
def as_dict(self):
return {
u"port_id": self.port_id,
u"commit": self.commit,
u"trunked": self.trunked,
u"hardware_id": self.hardware_id
}
class SwitchPortBindingState(object):
INACTIVE = u"INACTIVE"
WANT_ACTIVE = u"WANT_ACTIVE"
ACTIVE = u"ACTIVE"
WANT_INACTIVE = u"WANT_INACTIVE"
ERROR = u"ERROR"
@classmethod
def as_dict(cls):
return {
u"INACTIVE": cls.INACTIVE,
u"WANT_ACTIVE": cls.WANT_ACTIVE,
u"ACTIVE": cls.ACTIVE,
u"WANT_INACTIVE": cls.WANT_INACTIVE,
u"ERROR": cls.ERROR
}
class SwitchPortBinding(model_base.BASEV2):
"""Keep track of which neutron ports are bound to which
physical switchports.
"""
__tablename__ = "switch_port_bindings"
# TODO(morgabra) FK to the actual model and cascade
port_id = sa.Column(sa.String(255), primary_key=True)
network_id = sa.Column(sa.String(255), primary_key=True)
switch_port_id = sa.Column(
sa.String(36),
sa.ForeignKey("switch_ports.id"),
primary_key=True)
state = sa.Column(sa.String(255),
default=SwitchPortBindingState.INACTIVE)
def as_dict(self):
return {
u"port_id": self.port_id,
u"network_id": self.network_id,
u"switch_port_id": self.switch_port_id,
u"state": self.state
}
| StarcoderdataPython |
76413 | <gh_stars>10-100
from .quality_metric_classes.metric_data import MetricData
from .quality_metric_classes.amplitude_cutoff import AmplitudeCutoff
from .quality_metric_classes.silhouette_score import SilhouetteScore
from .quality_metric_classes.num_spikes import NumSpikes
from .quality_metric_classes.firing_rate import FiringRate
from .quality_metric_classes.d_prime import DPrime
from .quality_metric_classes.l_ratio import LRatio
from .quality_metric_classes.presence_ratio import PresenceRatio
from .quality_metric_classes.isi_violation import ISIViolation
from .quality_metric_classes.snr import SNR
from .quality_metric_classes.isolation_distance import IsolationDistance
from .quality_metric_classes.noise_overlap import NoiseOverlap
from .quality_metric_classes.nearest_neighbor import NearestNeighbor
from .quality_metric_classes.drift_metric import DriftMetric
from .quality_metric_classes.parameter_dictionaries import update_all_param_dicts_with_kwargs
from collections import OrderedDict
from copy import deepcopy
import pandas
all_metrics_list = ["num_spikes", "firing_rate", "presence_ratio", "isi_violation", "amplitude_cutoff", "snr",
"max_drift", "cumulative_drift", "silhouette_score", "isolation_distance", "l_ratio",
"d_prime", "noise_overlap", "nn_hit_rate", "nn_miss_rate"]
def get_quality_metrics_list():
return all_metrics_list
def compute_num_spikes(
sorting,
sampling_frequency=None,
unit_ids=None,
**kwargs
):
"""
Computes and returns the num spikes for the sorted dataset.
Parameters
----------
sorting: SortingExtractor
The sorting result to be evaluated
sampling_frequency: float
The sampling frequency of the result. If None, will check to see if sampling frequency is in sorting extractor
unit_ids: list
List of unit ids to compute metric for. If not specified, all units are used
**kwargs: keyword arguments
Keyword arguments among the following:
save_property_or_features: bool
If True, the metric is saved as sorting property
verbose: bool
If True, will be verbose in metric computation
Returns
----------
num_spikes: np.ndarray
The number of spikes of the sorted units.
"""
params_dict = update_all_param_dicts_with_kwargs(kwargs)
if unit_ids is None:
unit_ids = sorting.get_unit_ids()
md = MetricData(sorting=sorting, sampling_frequency=sampling_frequency, recording=None,
apply_filter=False, freq_min=params_dict["freq_min"],
freq_max=params_dict["freq_max"], unit_ids=unit_ids,
duration_in_frames=None, verbose=params_dict['verbose'], raise_if_empty=False)
ns = NumSpikes(metric_data=md)
num_spikes = ns.compute_metric(**kwargs)
return num_spikes
def compute_firing_rates(
sorting,
duration_in_frames,
sampling_frequency=None,
unit_ids=None,
**kwargs
):
"""
Computes and returns the firing rates for the sorted dataset.
Parameters
----------
sorting: SortingExtractor
The sorting result to be evaluated.
duration_in_frames: int
Length of recording (in frames).
sampling_frequency: float
The sampling frequency of the result. If None, will check to see if sampling frequency is in sorting extractor
unit_ids: list
List of unit ids to compute metric for. If not specified, all units are used
**kwargs: keyword arguments
Keyword arguments among the following:
save_property_or_features: bool
If True, the metric is saved as sorting property
verbose: bool
If True, will be verbose in metric computation
Returns
----------
firing_rates: np.ndarray
The firing rates of the sorted units.
"""
params_dict = update_all_param_dicts_with_kwargs(kwargs)
if unit_ids is None:
unit_ids = sorting.get_unit_ids()
md = MetricData(sorting=sorting, sampling_frequency=sampling_frequency, recording=None,
apply_filter=False, freq_min=params_dict["freq_min"],
freq_max=params_dict["freq_max"], unit_ids=unit_ids,
duration_in_frames=duration_in_frames, verbose=params_dict['verbose'])
fr = FiringRate(metric_data=md)
firing_rates = fr.compute_metric(**kwargs)
return firing_rates
def compute_presence_ratios(
sorting,
duration_in_frames,
sampling_frequency=None,
unit_ids=None,
**kwargs
):
"""
Computes and returns the presence ratios for the sorted dataset.
Parameters
----------
sorting: SortingExtractor
The sorting result to be evaluated.
duration_in_frames: int
Length of recording (in frames).
sampling_frequency: float
The sampling frequency of the result. If None, will check to see if sampling frequency is in sorting extractor
unit_ids: list
List of unit ids to compute metric for. If not specified, all units are used
**kwargs: keyword arguments
Keyword arguments among the following:
save_property_or_features: bool
If True, the metric is saved as sorting property
verbose: bool
If True, will be verbose in metric computation
Returns
----------
presence_ratios: np.ndarray
The presence ratios of the sorted units.
"""
params_dict = update_all_param_dicts_with_kwargs(kwargs)
if unit_ids is None:
unit_ids = sorting.get_unit_ids()
md = MetricData(sorting=sorting, sampling_frequency=sampling_frequency, recording=None,
apply_filter=False, freq_min=params_dict["freq_min"],
freq_max=params_dict["freq_max"], unit_ids=unit_ids,
duration_in_frames=duration_in_frames, verbose=params_dict['verbose'])
pr = PresenceRatio(metric_data=md)
presence_ratios = pr.compute_metric(**kwargs)
return presence_ratios
def compute_isi_violations(
sorting,
duration_in_frames,
isi_threshold=ISIViolation.params['isi_threshold'],
min_isi=ISIViolation.params['min_isi'],
sampling_frequency=None,
unit_ids=None,
**kwargs
):
"""
Computes and returns the isi violations for the sorted dataset.
Parameters
----------
sorting: SortingExtractor
The sorting result to be evaluated.
duration_in_frames: int
Length of recording (in frames).
isi_threshold: float
The isi threshold for calculating isi violations
min_isi: float
The minimum expected isi value
sampling_frequency: float
The sampling frequency of the result. If None, will check to see if sampling frequency is in sorting extractor
unit_ids: list
List of unit ids to compute metric for. If not specified, all units are used
**kwargs: keyword arguments
Keyword arguments among the following:
save_property_or_features: bool
If True, the metric is saved as sorting property
verbose: bool
If True, will be verbose in metric computation
Returns
----------
isi_violations: np.ndarray
The isi violations of the sorted units.
"""
params_dict = update_all_param_dicts_with_kwargs(kwargs)
if unit_ids is None:
unit_ids = sorting.get_unit_ids()
md = MetricData(sorting=sorting, sampling_frequency=sampling_frequency, recording=None,
apply_filter=False, freq_min=params_dict["freq_min"],
freq_max=params_dict["freq_max"], unit_ids=unit_ids,
duration_in_frames=duration_in_frames, verbose=params_dict['verbose'])
iv = ISIViolation(metric_data=md)
isi_violations = iv.compute_metric(isi_threshold, min_isi, **kwargs)
return isi_violations
def compute_amplitude_cutoffs(
sorting,
recording,
unit_ids=None,
**kwargs
):
"""
Computes and returns the amplitude cutoffs for the sorted dataset.
Parameters
----------
sorting: SortingExtractor
The sorting result to be evaluated.
recording: RecordingExtractor
The given recording extractor from which to extract amplitudes
unit_ids: list
List of unit ids to compute metric for. If not specified, all units are used
**kwargs: keyword arguments
Keyword arguments among the following:
apply_filter: bool
If True, recording is bandpass-filtered.
freq_min: float
High-pass frequency for optional filter (default 300 Hz)
freq_max: float
Low-pass frequency for optional filter (default 6000 Hz)
save_property_or_features: bool
If true, it will save amplitudes in the sorting extractor
recompute_info: bool
If True, waveforms are recomputed
max_spikes_per_unit: int
The maximum number of spikes to extract per unit
method: str
If 'absolute' (default), amplitudes are absolute amplitudes in uV are returned.
If 'relative', amplitudes are returned as ratios between waveform amplitudes and template amplitudes.
peak: str
If maximum channel has to be found among negative peaks ('neg'), positive ('pos') or
both ('both' - default)
frames_before: int
Frames before peak to compute amplitude
frames_after: float
Frames after peak to compute amplitude
save_property_or_features: bool
If True, the metric is saved as sorting property
seed: int
Random seed for reproducibility
verbose: bool
If True, will be verbose in metric computation
Returns
----------
amplitude_cutoffs: np.ndarray
The amplitude cutoffs of the sorted units.
"""
params_dict = update_all_param_dicts_with_kwargs(kwargs)
if unit_ids is None:
unit_ids = sorting.get_unit_ids()
md = MetricData(sorting=sorting, sampling_frequency=recording.get_sampling_frequency(), recording=recording,
apply_filter=params_dict["apply_filter"], freq_min=params_dict["freq_min"],
freq_max=params_dict["freq_max"], unit_ids=unit_ids,
duration_in_frames=None, verbose=params_dict['verbose'])
md.compute_amplitudes(**kwargs)
ac = AmplitudeCutoff(metric_data=md)
amplitude_cutoffs = ac.compute_metric(**kwargs)
return amplitude_cutoffs
def compute_snrs(
sorting,
recording,
snr_mode=SNR.params['snr_mode'],
snr_noise_duration=SNR.params['snr_noise_duration'],
max_spikes_per_unit_for_snr=SNR.params['max_spikes_per_unit_for_snr'],
template_mode=SNR.params['template_mode'],
max_channel_peak=SNR.params['max_channel_peak'],
unit_ids=None,
**kwargs
):
"""
Computes and returns the snrs in the sorted dataset.
Parameters
----------
sorting: SortingExtractor
The sorting result to be evaluated.
recording: RecordingExtractor
The given recording extractor from which to extract amplitudes
snr_mode: str
Mode to compute noise SNR ('mad' | 'std' - default 'mad')
snr_noise_duration: float
Number of seconds to compute noise level from (default 10.0)
max_spikes_per_unit_for_snr: int
Maximum number of spikes to compute templates from (default 1000)
template_mode: str
Use 'mean' or 'median' to compute templates
max_channel_peak: str
If maximum channel has to be found among negative peaks ('neg'), positive ('pos') or both ('both' - default)
unit_ids: list
List of unit ids to compute metric for. If not specified, all units are used
**kwargs: keyword arguments
Keyword arguments among the following:
method: str
If 'absolute' (default), amplitudes are absolute amplitudes in uV are returned.
If 'relative', amplitudes are returned as ratios between waveform amplitudes and template amplitudes
peak: str
If maximum channel has to be found among negative peaks ('neg'), positive ('pos') or
both ('both' - default)
frames_before: int
Frames before peak to compute amplitude
frames_after: int
Frames after peak to compute amplitude
apply_filter: bool
If True, recording is bandpass-filtered
freq_min: float
High-pass frequency for optional filter (default 300 Hz)
freq_max: float
Low-pass frequency for optional filter (default 6000 Hz)
grouping_property: str
Property to group channels. E.g. if the recording extractor has the 'group' property and
'grouping_property' is 'group', then waveforms are computed group-wise.
ms_before: float
Time period in ms to cut waveforms before the spike events
ms_after: float
Time period in ms to cut waveforms after the spike events
dtype: dtype
The numpy dtype of the waveforms
compute_property_from_recording: bool
If True and 'grouping_property' is given, the property of each unit is assigned as the corresponding
property of the recording extractor channel on which the average waveform is the largest
max_channels_per_waveforms: int or None
Maximum channels per waveforms to return. If None, all channels are returned
n_jobs: int
Number of parallel jobs (default 1)
memmap: bool
If True, waveforms are saved as memmap object (recommended for long recordings with many channels)
save_property_or_features: bool
If true, it will save features in the sorting extractor
recompute_info: bool
If True, waveforms are recomputed
max_spikes_per_unit: int
The maximum number of spikes to extract per unit
seed: int
Random seed for reproducibility
verbose: bool
If True, will be verbose in metric computation
Returns
----------
snrs: np.ndarray
The snrs of the sorted units.
"""
params_dict = update_all_param_dicts_with_kwargs(kwargs)
if unit_ids is None:
unit_ids = sorting.get_unit_ids()
md = MetricData(sorting=sorting, sampling_frequency=recording.get_sampling_frequency(), recording=recording,
apply_filter=params_dict["apply_filter"], freq_min=params_dict["freq_min"],
duration_in_frames=None, freq_max=params_dict["freq_max"], unit_ids=unit_ids,
verbose=params_dict['verbose'])
snr = SNR(metric_data=md)
snrs = snr.compute_metric(snr_mode, snr_noise_duration, max_spikes_per_unit_for_snr,
template_mode, max_channel_peak, **kwargs)
return snrs
def compute_noise_overlaps(
sorting,
recording,
num_channels_to_compare=NoiseOverlap.params['num_channels_to_compare'],
num_features=NoiseOverlap.params['num_features'],
num_knn=NoiseOverlap.params['num_knn'],
max_spikes_per_unit_for_noise_overlap=NoiseOverlap.params['max_spikes_per_unit_for_noise_overlap'],
unit_ids=None,
**kwargs
):
"""
Computes and returns the noise overlaps in the sorted dataset.
Noise overlap estimates the fraction of ‘‘noise events’’ in a cluster, i.e., above-threshold events not associated
with true firings of this or any of the other clustered units. A large noise overlap implies a high false-positive
rate.
Implementation from ml_ms4alg. For more information see https://doi.org/10.1016/j.neuron.2017.08.030
Parameters
----------
sorting: SortingExtractor
The sorting result to be evaluated.
recording: RecordingExtractor
The given recording extractor from which to extract amplitudes
num_features: int
Number of features to use for PCA
num_knn: int
Number of nearest neighbors
max_spikes_per_unit_for_noise_overlap: int
Number of waveforms to use for noise overlaps estimation
unit_ids: list
List of unit ids to compute metric for. If not specified, all units are used
**kwargs: keyword arguments
Keyword arguments among the following:
method: str
If 'absolute' (default), amplitudes are absolute amplitudes in uV are returned.
If 'relative', amplitudes are returned as ratios between waveform amplitudes and template amplitudes
peak: str
If maximum channel has to be found among negative peaks ('neg'), positive ('pos') or
both ('both' - default)
frames_before: int
Frames before peak to compute amplitude
frames_after: int
Frames after peak to compute amplitude
apply_filter: bool
If True, recording is bandpass-filtered
freq_min: float
High-pass frequency for optional filter (default 300 Hz)
freq_max: float
Low-pass frequency for optional filter (default 6000 Hz)
grouping_property: str
Property to group channels. E.g. if the recording extractor has the 'group' property and
'grouping_property' is 'group', then waveforms are computed group-wise.
ms_before: float
Time period in ms to cut waveforms before the spike events
ms_after: float
Time period in ms to cut waveforms after the spike events
dtype: dtype
The numpy dtype of the waveforms
compute_property_from_recording: bool
If True and 'grouping_property' is given, the property of each unit is assigned as the corresponding
property of the recording extractor channel on which the average waveform is the largest
max_channels_per_waveforms: int or None
Maximum channels per waveforms to return. If None, all channels are returned
n_jobs: int
Number of parallel jobs (default 1)
memmap: bool
If True, waveforms are saved as memmap object (recommended for long recordings with many channels)
save_property_or_features: bool
If true, it will save features in the sorting extractor
recompute_info: bool
If True, waveforms are recomputed
max_spikes_per_unit: int
The maximum number of spikes to extract per unit
seed: int
Random seed for reproducibility
verbose: bool
If True, will be verbose in metric computation
Returns
----------
noise_overlaps: np.ndarray
The noise_overlaps of the sorted units.
"""
params_dict = update_all_param_dicts_with_kwargs(kwargs)
if unit_ids is None:
unit_ids = sorting.get_unit_ids()
md = MetricData(sorting=sorting, sampling_frequency=recording.get_sampling_frequency(), recording=recording,
apply_filter=params_dict["apply_filter"], freq_min=params_dict["freq_min"],
duration_in_frames=None, freq_max=params_dict["freq_max"], unit_ids=unit_ids,
verbose=params_dict['verbose'])
noise_overlap = NoiseOverlap(metric_data=md)
noise_overlaps = noise_overlap.compute_metric(num_channels_to_compare,
max_spikes_per_unit_for_noise_overlap,
num_features, num_knn, **kwargs)
return noise_overlaps
def compute_silhouette_scores(
sorting,
recording,
max_spikes_for_silhouette=SilhouetteScore.params['max_spikes_for_silhouette'],
unit_ids=None,
**kwargs
):
"""
Computes and returns the silhouette scores in the sorted dataset.
Parameters
----------
sorting: SortingExtractor
The sorting result to be evaluated
recording: RecordingExtractor
The given recording extractor from which to extract amplitudes
max_spikes_for_silhouette: int
Max spikes to be used for silhouette metric
unit_ids: list
List of unit ids to compute metric for. If not specified, all units are used
**kwargs: keyword arguments
Keyword arguments among the following:
method: str
If 'absolute' (default), amplitudes are absolute amplitudes in uV are returned.
If 'relative', amplitudes are returned as ratios between waveform amplitudes and template amplitudes
peak: str
If maximum channel has to be found among negative peaks ('neg'), positive ('pos') or
both ('both' - default)
frames_before: int
Frames before peak to compute amplitude
frames_after: int
Frames after peak to compute amplitude
apply_filter: bool
If True, recording is bandpass-filtered
freq_min: float
High-pass frequency for optional filter (default 300 Hz)
freq_max: float
Low-pass frequency for optional filter (default 6000 Hz)
grouping_property: str
Property to group channels. E.g. if the recording extractor has the 'group' property and
'grouping_property' is 'group', then waveforms are computed group-wise.
ms_before: float
Time period in ms to cut waveforms before the spike events
ms_after: float
Time period in ms to cut waveforms after the spike events
dtype: dtype
The numpy dtype of the waveforms
compute_property_from_recording: bool
If True and 'grouping_property' is given, the property of each unit is assigned as the corresponding
property of the recording extractor channel on which the average waveform is the largest
max_channels_per_waveforms: int or None
Maximum channels per waveforms to return. If None, all channels are returned
n_jobs: int
Number of parallel jobs (default 1)
memmap: bool
If True, waveforms are saved as memmap object (recommended for long recordings with many channels)
save_property_or_features: bool
If true, it will save features in the sorting extractor
recompute_info: bool
If True, waveforms are recomputed
max_spikes_per_unit: int
The maximum number of spikes to extract per unit
seed: int
Random seed for reproducibility
verbose: bool
If True, will be verbose in metric computation
Returns
----------
silhouette_scores: np.ndarray
The sihouette scores of the sorted units.
"""
params_dict = update_all_param_dicts_with_kwargs(kwargs)
if unit_ids is None:
unit_ids = sorting.get_unit_ids()
md = MetricData(sorting=sorting, sampling_frequency=recording.get_sampling_frequency(), recording=recording,
apply_filter=params_dict["apply_filter"], freq_min=params_dict["freq_min"],
duration_in_frames=None, freq_max=params_dict["freq_max"], unit_ids=unit_ids,
verbose=params_dict['verbose'])
md.compute_pca_scores(**kwargs)
silhouette_score = SilhouetteScore(metric_data=md)
silhouette_scores = silhouette_score.compute_metric(max_spikes_for_silhouette, **kwargs)
return silhouette_scores
def compute_d_primes(
sorting,
recording,
num_channels_to_compare=DPrime.params['num_channels_to_compare'],
max_spikes_per_cluster=DPrime.params['max_spikes_per_cluster'],
unit_ids=None,
**kwargs
):
"""
Computes and returns the d primes in the sorted dataset.
Parameters
----------
sorting: SortingExtractor
The sorting result to be evaluated
recording: RecordingExtractor
The given recording extractor from which to extract amplitudes
num_channels_to_compare: int
The number of channels to be used for the PC extraction and comparison
max_spikes_per_cluster: int
Max spikes to be used from each unit
unit_ids: list
List of unit ids to compute metric for. If not specified, all units are used
**kwargs: keyword arguments
Keyword arguments among the following:
method: str
If 'absolute' (default), amplitudes are absolute amplitudes in uV are returned.
If 'relative', amplitudes are returned as ratios between waveform amplitudes and template amplitudes
peak: str
If maximum channel has to be found among negative peaks ('neg'), positive ('pos') or
both ('both' - default)
frames_before: int
Frames before peak to compute amplitude
frames_after: int
Frames after peak to compute amplitude
apply_filter: bool
If True, recording is bandpass-filtered
freq_min: float
High-pass frequency for optional filter (default 300 Hz)
freq_max: float
Low-pass frequency for optional filter (default 6000 Hz)
grouping_property: str
Property to group channels. E.g. if the recording extractor has the 'group' property and
'grouping_property' is 'group', then waveforms are computed group-wise.
ms_before: float
Time period in ms to cut waveforms before the spike events
ms_after: float
Time period in ms to cut waveforms after the spike events
dtype: dtype
The numpy dtype of the waveforms
compute_property_from_recording: bool
If True and 'grouping_property' is given, the property of each unit is assigned as the corresponding
property of the recording extractor channel on which the average waveform is the largest
max_channels_per_waveforms: int or None
Maximum channels per waveforms to return. If None, all channels are returned
n_jobs: int
Number of parallel jobs (default 1)
memmap: bool
If True, waveforms are saved as memmap object (recommended for long recordings with many channels)
save_property_or_features: bool
If true, it will save features in the sorting extractor
recompute_info: bool
If True, waveforms are recomputed
max_spikes_per_unit: int
The maximum number of spikes to extract per unit
seed: int
Random seed for reproducibility
verbose: bool
If True, will be verbose in metric computation
Returns
----------
d_primes: np.ndarray
The d primes of the sorted units.
"""
params_dict = update_all_param_dicts_with_kwargs(kwargs)
if unit_ids is None:
unit_ids = sorting.get_unit_ids()
md = MetricData(sorting=sorting, sampling_frequency=recording.get_sampling_frequency(), recording=recording,
apply_filter=params_dict["apply_filter"], freq_min=params_dict["freq_min"],
freq_max=params_dict["freq_max"], unit_ids=unit_ids,
duration_in_frames=None, verbose=params_dict['verbose'])
md.compute_pca_scores(**kwargs)
d_prime = DPrime(metric_data=md)
d_primes = d_prime.compute_metric(num_channels_to_compare, max_spikes_per_cluster, **kwargs)
return d_primes
def compute_l_ratios(
sorting,
recording,
num_channels_to_compare=LRatio.params['num_channels_to_compare'],
max_spikes_per_cluster=LRatio.params['max_spikes_per_cluster'],
unit_ids=None,
**kwargs
):
"""
Computes and returns the l ratios in the sorted dataset.
Parameters
----------
sorting: SortingExtractor
The sorting result to be evaluated
recording: RecordingExtractor
The given recording extractor from which to extract amplitudes
num_channels_to_compare: int
The number of channels to be used for the PC extraction and comparison
max_spikes_per_cluster: int
Max spikes to be used from each unit
unit_ids: list
List of unit ids to compute metric for. If not specified, all units are used
**kwargs: keyword arguments
Keyword arguments among the following:
method: str
If 'absolute' (default), amplitudes are absolute amplitudes in uV are returned.
If 'relative', amplitudes are returned as ratios between waveform amplitudes and template amplitudes
peak: str
If maximum channel has to be found among negative peaks ('neg'), positive ('pos') or
both ('both' - default)
frames_before: int
Frames before peak to compute amplitude
frames_after: int
Frames after peak to compute amplitude
apply_filter: bool
If True, recording is bandpass-filtered
freq_min: float
High-pass frequency for optional filter (default 300 Hz)
freq_max: float
Low-pass frequency for optional filter (default 6000 Hz)
grouping_property: str
Property to group channels. E.g. if the recording extractor has the 'group' property and
'grouping_property' is 'group', then waveforms are computed group-wise.
ms_before: float
Time period in ms to cut waveforms before the spike events
ms_after: float
Time period in ms to cut waveforms after the spike events
dtype: dtype
The numpy dtype of the waveforms
compute_property_from_recording: bool
If True and 'grouping_property' is given, the property of each unit is assigned as the corresponding
property of the recording extractor channel on which the average waveform is the largest
max_channels_per_waveforms: int or None
Maximum channels per waveforms to return. If None, all channels are returned
n_jobs: int
Number of parallel jobs (default 1)
memmap: bool
If True, waveforms are saved as memmap object (recommended for long recordings with many channels)
save_property_or_features: bool
If true, it will save features in the sorting extractor
recompute_info: bool
If True, waveforms are recomputed
max_spikes_per_unit: int
The maximum number of spikes to extract per unit
seed: int
Random seed for reproducibility
verbose: bool
If True, will be verbose in metric computation
Returns
----------
l_ratios: np.ndarray
The l ratios of the sorted units.
"""
params_dict = update_all_param_dicts_with_kwargs(kwargs)
if unit_ids is None:
unit_ids = sorting.get_unit_ids()
md = MetricData(sorting=sorting, sampling_frequency=recording.get_sampling_frequency(), recording=recording,
apply_filter=params_dict["apply_filter"], freq_min=params_dict["freq_min"],
freq_max=params_dict["freq_max"], unit_ids=unit_ids,
duration_in_frames=None, verbose=params_dict['verbose'])
md.compute_pca_scores(**kwargs)
l_ratio = LRatio(metric_data=md)
l_ratios = l_ratio.compute_metric(num_channels_to_compare, max_spikes_per_cluster, **kwargs)
return l_ratios
def compute_isolation_distances(
sorting,
recording,
num_channels_to_compare=IsolationDistance.params['num_channels_to_compare'],
max_spikes_per_cluster=IsolationDistance.params['max_spikes_per_cluster'],
unit_ids=None,
**kwargs
):
"""
Computes and returns the isolation distances in the sorted dataset.
Parameters
----------
sorting: SortingExtractor
The sorting result to be evaluated.
recording: RecordingExtractor
The given recording extractor from which to extract amplitudes
num_channels_to_compare: int
The number of channels to be used for the PC extraction and comparison
max_spikes_per_cluster: int
Max spikes to be used from each unit
unit_ids: list
List of unit ids to compute metric for. If not specified, all units are used
**kwargs: keyword arguments
Keyword arguments among the following:
method: str
If 'absolute' (default), amplitudes are absolute amplitudes in uV are returned.
If 'relative', amplitudes are returned as ratios between waveform amplitudes and template amplitudes
peak: str
If maximum channel has to be found among negative peaks ('neg'), positive ('pos') or
both ('both' - default)
frames_before: int
Frames before peak to compute amplitude
frames_after: int
Frames after peak to compute amplitude
apply_filter: bool
If True, recording is bandpass-filtered
freq_min: float
High-pass frequency for optional filter (default 300 Hz)
freq_max: float
Low-pass frequency for optional filter (default 6000 Hz)
grouping_property: str
Property to group channels. E.g. if the recording extractor has the 'group' property and
'grouping_property' is 'group', then waveforms are computed group-wise.
ms_before: float
Time period in ms to cut waveforms before the spike events
ms_after: float
Time period in ms to cut waveforms after the spike events
dtype: dtype
The numpy dtype of the waveforms
compute_property_from_recording: bool
If True and 'grouping_property' is given, the property of each unit is assigned as the corresponding
property of the recording extractor channel on which the average waveform is the largest
max_channels_per_waveforms: int or None
Maximum channels per waveforms to return. If None, all channels are returned
n_jobs: int
Number of parallel jobs (default 1)
memmap: bool
If True, waveforms are saved as memmap object (recommended for long recordings with many channels)
save_property_or_features: bool
If true, it will save features in the sorting extractor
recompute_info: bool
If True, waveforms are recomputed
max_spikes_per_unit: int
The maximum number of spikes to extract per unit
seed: int
Random seed for reproducibility
verbose: bool
If True, will be verbose in metric computation
Returns
----------
isolation_distances: np.ndarray
The isolation distances of the sorted units.
"""
params_dict = update_all_param_dicts_with_kwargs(kwargs)
if unit_ids is None:
unit_ids = sorting.get_unit_ids()
md = MetricData(sorting=sorting, sampling_frequency=recording.get_sampling_frequency(), recording=recording,
apply_filter=params_dict["apply_filter"], freq_min=params_dict["freq_min"],
freq_max=params_dict["freq_max"], unit_ids=unit_ids,
duration_in_frames=None, verbose=params_dict['verbose'])
md.compute_pca_scores(**kwargs)
isolation_distance = IsolationDistance(metric_data=md)
isolation_distances = isolation_distance.compute_metric(num_channels_to_compare, max_spikes_per_cluster,
**kwargs)
return isolation_distances
def compute_nn_metrics(
sorting,
recording,
num_channels_to_compare=NearestNeighbor.params['num_channels_to_compare'],
max_spikes_per_cluster=NearestNeighbor.params['max_spikes_per_cluster'],
max_spikes_for_nn=NearestNeighbor.params['max_spikes_for_nn'],
n_neighbors=NearestNeighbor.params['n_neighbors'],
unit_ids=None,
**kwargs
):
"""
Computes and returns the nearest neighbor metrics in the sorted dataset.
Parameters
----------
sorting: SortingExtractor
The sorting result to be evaluated.
recording: RecordingExtractor
The given recording extractor from which to extract amplitudes
num_channels_to_compare: int
The number of channels to be used for the PC extraction and comparison
max_spikes_per_cluster: int
Max spikes to be used from each unit
max_spikes_for_nn: int
Max spikes to be used for nearest-neighbors calculation
n_neighbors: int
Number of neighbors to compare
unit_ids: list
List of unit ids to compute metric for. If not specified, all units are used
**kwargs: keyword arguments
Keyword arguments among the following:
method: str
If 'absolute' (default), amplitudes are absolute amplitudes in uV are returned.
If 'relative', amplitudes are returned as ratios between waveform amplitudes and template amplitudes
peak: str
If maximum channel has to be found among negative peaks ('neg'), positive ('pos') or
both ('both' - default)
frames_before: int
Frames before peak to compute amplitude
frames_after: int
Frames after peak to compute amplitude
apply_filter: bool
If True, recording is bandpass-filtered
freq_min: float
High-pass frequency for optional filter (default 300 Hz)
freq_max: float
Low-pass frequency for optional filter (default 6000 Hz)
grouping_property: str
Property to group channels. E.g. if the recording extractor has the 'group' property and
'grouping_property' is 'group', then waveforms are computed group-wise.
ms_before: float
Time period in ms to cut waveforms before the spike events
ms_after: float
Time period in ms to cut waveforms after the spike events
dtype: dtype
The numpy dtype of the waveforms
compute_property_from_recording: bool
If True and 'grouping_property' is given, the property of each unit is assigned as the corresponding
property of the recording extractor channel on which the average waveform is the largest
max_channels_per_waveforms: int or None
Maximum channels per waveforms to return. If None, all channels are returned
n_jobs: int
Number of parallel jobs (default 1)
memmap: bool
If True, waveforms are saved as memmap object (recommended for long recordings with many channels)
save_property_or_features: bool
If true, it will save features in the sorting extractor
recompute_info: bool
If True, waveforms are recomputed
max_spikes_per_unit: int
The maximum number of spikes to extract per unit
seed: int
Random seed for reproducibility
verbose: bool
If True, will be verbose in metric computation
Returns
----------
nn_metrics: np.ndarray
The nearest neighbor metrics of the sorted units.
"""
params_dict = update_all_param_dicts_with_kwargs(kwargs)
if unit_ids is None:
unit_ids = sorting.get_unit_ids()
md = MetricData(sorting=sorting, sampling_frequency=recording.get_sampling_frequency(), recording=recording,
apply_filter=params_dict["apply_filter"], freq_min=params_dict["freq_min"],
freq_max=params_dict["freq_max"], unit_ids=unit_ids,
duration_in_frames=None, verbose=params_dict['verbose'])
md.compute_pca_scores(**kwargs)
nn = NearestNeighbor(metric_data=md)
nn_metrics = nn.compute_metric(num_channels_to_compare, max_spikes_per_cluster,
max_spikes_for_nn, n_neighbors, **kwargs)
return nn_metrics
def compute_drift_metrics(
sorting,
recording,
drift_metrics_interval_s=DriftMetric.params['drift_metrics_interval_s'],
drift_metrics_min_spikes_per_interval=DriftMetric.params['drift_metrics_min_spikes_per_interval'],
unit_ids=None,
**kwargs
):
"""
Computes and returns the drift metrics in the sorted dataset.
Parameters
----------
sorting: SortingExtractor
The sorting result to be evaluated.
recording: RecordingExtractor
The given recording extractor from which to extract amplitudes
drift_metrics_interval_s: float
Time period for evaluating drift.
drift_metrics_min_spikes_per_interval: int
Minimum number of spikes for evaluating drift metrics per interval.
unit_ids: list
List of unit ids to compute metric for. If not specified, all units are used
**kwargs: keyword arguments
Keyword arguments among the following:
method: str
If 'absolute' (default), amplitudes are absolute amplitudes in uV are returned.
If 'relative', amplitudes are returned as ratios between waveform amplitudes and template amplitudes
peak: str
If maximum channel has to be found among negative peaks ('neg'), positive ('pos') or
both ('both' - default)
frames_before: int
Frames before peak to compute amplitude
frames_after: int
Frames after peak to compute amplitude
apply_filter: bool
If True, recording is bandpass-filtered
freq_min: float
High-pass frequency for optional filter (default 300 Hz)
freq_max: float
Low-pass frequency for optional filter (default 6000 Hz)
grouping_property: str
Property to group channels. E.g. if the recording extractor has the 'group' property and
'grouping_property' is 'group', then waveforms are computed group-wise.
ms_before: float
Time period in ms to cut waveforms before the spike events
ms_after: float
Time period in ms to cut waveforms after the spike events
dtype: dtype
The numpy dtype of the waveforms
compute_property_from_recording: bool
If True and 'grouping_property' is given, the property of each unit is assigned as the corresponding
property of the recording extractor channel on which the average waveform is the largest
max_channels_per_waveforms: int or None
Maximum channels per waveforms to return. If None, all channels are returned
n_jobs: int
Number of parallel jobs (default 1)
memmap: bool
If True, waveforms are saved as memmap object (recommended for long recordings with many channels)
save_property_or_features: bool
If true, it will save features in the sorting extractor
recompute_info: bool
If True, waveforms are recomputed
max_spikes_per_unit: int
The maximum number of spikes to extract per unit
seed: int
Random seed for reproducibility
verbose: bool
If True, will be verbose in metric computation
Returns
----------
dm_metrics: np.ndarray
The drift metrics of the sorted units.
"""
params_dict = update_all_param_dicts_with_kwargs(kwargs)
if unit_ids is None:
unit_ids = sorting.get_unit_ids()
md = MetricData(sorting=sorting, sampling_frequency=recording.get_sampling_frequency(), recording=recording,
apply_filter=params_dict["apply_filter"], freq_min=params_dict["freq_min"],
freq_max=params_dict["freq_max"], unit_ids=unit_ids,
duration_in_frames=None, verbose=params_dict['verbose'])
md.compute_pca_scores(**kwargs)
dm = DriftMetric(metric_data=md)
dm_metrics = dm.compute_metric(drift_metrics_interval_s, drift_metrics_min_spikes_per_interval, **kwargs)
return dm_metrics
def compute_quality_metrics(
sorting,
recording=None,
duration_in_frames=None,
sampling_frequency=None,
metric_names=None,
unit_ids=None,
as_dataframe=False,
isi_threshold=ISIViolation.params['isi_threshold'],
min_isi=ISIViolation.params['min_isi'],
snr_mode=SNR.params['snr_mode'],
snr_noise_duration=SNR.params['snr_noise_duration'],
max_spikes_per_unit_for_snr=SNR.params['max_spikes_per_unit_for_snr'],
template_mode=SNR.params['template_mode'],
max_channel_peak=SNR.params['max_channel_peak'],
max_spikes_per_unit_for_noise_overlap=NoiseOverlap.params['max_spikes_per_unit_for_noise_overlap'],
noise_overlap_num_features=NoiseOverlap.params['num_features'],
noise_overlap_num_knn=NoiseOverlap.params['num_knn'],
drift_metrics_interval_s=DriftMetric.params['drift_metrics_interval_s'],
drift_metrics_min_spikes_per_interval=DriftMetric.params['drift_metrics_min_spikes_per_interval'],
max_spikes_for_silhouette=SilhouetteScore.params['max_spikes_for_silhouette'],
num_channels_to_compare=13,
max_spikes_per_cluster=500,
max_spikes_for_nn=NearestNeighbor.params['max_spikes_for_nn'],
n_neighbors=NearestNeighbor.params['n_neighbors'],
**kwargs
):
"""
Computes and returns all specified metrics for the sorted dataset.
Parameters
----------
sorting: SortingExtractor
The sorting result to be evaluated.
recording: RecordingExtractor
The given recording extractor from which to extract amplitudes
duration_in_frames: int
Length of recording (in frames).
sampling_frequency: float
The sampling frequency of the result. If None, will check to see if sampling frequency is in sorting extractor
metric_names: list
List of metric names to be computed
unit_ids: list
List of unit ids to compute metric for. If not specified, all units are used
as_dataframe: bool
If True, will return dataframe of metrics. If False, will return dictionary.
isi_threshold: float
The isi threshold for calculating isi violations
min_isi: float
The minimum expected isi value
snr_mode: str
Mode to compute noise SNR ('mad' | 'std' - default 'mad')
snr_noise_duration: float
Number of seconds to compute noise level from (default 10.0)
max_spikes_per_unit_for_snr: int
Maximum number of spikes to compute templates for SNR from (default 1000)
template_mode: str
Use 'mean' or 'median' to compute templates
max_channel_peak: str
If maximum channel has to be found among negative peaks ('neg'), positive ('pos') or both ('both' - default)
max_spikes_per_unit_for_noise_overlap: int
Maximum number of spikes to compute templates for noise overlap from (default 1000)
noise_overlap_num_features: int
Number of features to use for PCA for noise overlap
noise_overlap_num_knn: int
Number of nearest neighbors for noise overlap
drift_metrics_interval_s: float
Time period for evaluating drift.
drift_metrics_min_spikes_per_interval: int
Minimum number of spikes for evaluating drift metrics per interval
max_spikes_for_silhouette: int
Max spikes to be used for silhouette metric
num_channels_to_compare: int
The number of channels to be used for the PC extraction and comparison
max_spikes_per_cluster: int
Max spikes to be used from each unit
max_spikes_for_nn: int
Max spikes to be used for nearest-neighbors calculation
n_neighbors: int
Number of neighbors to compare
**kwargs: keyword arguments
Keyword arguments among the following:
method: str
If 'absolute' (default), amplitudes are absolute amplitudes in uV are returned.
If 'relative', amplitudes are returned as ratios between waveform amplitudes and template amplitudes
peak: str
If maximum channel has to be found among negative peaks ('neg'), positive ('pos') or
both ('both' - default)
frames_before: int
Frames before peak to compute amplitude
frames_after: int
Frames after peak to compute amplitude
apply_filter: bool
If True, recording is bandpass-filtered
freq_min: float
High-pass frequency for optional filter (default 300 Hz)
freq_max: float
Low-pass frequency for optional filter (default 6000 Hz)
grouping_property: str
Property to group channels. E.g. if the recording extractor has the 'group' property and
'grouping_property' is 'group', then waveforms are computed group-wise.
ms_before: float
Time period in ms to cut waveforms before the spike events
ms_after: float
Time period in ms to cut waveforms after the spike events
dtype: dtype
The numpy dtype of the waveforms
compute_property_from_recording: bool
If True and 'grouping_property' is given, the property of each unit is assigned as the corresponding
property of the recording extractor channel on which the average waveform is the largest
max_channels_per_waveforms: int or None
Maximum channels per waveforms to return. If None, all channels are returned
n_jobs: int
Number of parallel jobs (default 1)
memmap: bool
If True, waveforms are saved as memmap object (recommended for long recordings with many channels)
save_property_or_features: bool
If true, it will save features in the sorting extractor
recompute_info: bool
If True, waveforms are recomputed
max_spikes_per_unit: int
The maximum number of spikes to extract per unit
seed: int
Random seed for reproducibility
verbose: bool
If True, will be verbose in metric computation
Returns
----------
metrics: dictionary OR pandas.dataframe
Dictionary or pandas.dataframe of metrics.
"""
params_dict = update_all_param_dicts_with_kwargs(kwargs)
metrics_dict = OrderedDict()
if metric_names is None:
metric_names = all_metrics_list
else:
bad_metrics = []
for m in metric_names:
if m not in all_metrics_list:
bad_metrics.append(m)
if len(bad_metrics) > 0:
raise ValueError(f"Improper feature names: {str(bad_metrics)}. The following features names can be "
f"calculated: {str(all_metrics_list)}")
if unit_ids is None:
unit_ids = sorting.get_unit_ids()
md = MetricData(sorting=sorting, sampling_frequency=sampling_frequency, recording=recording,
apply_filter=params_dict["apply_filter"], freq_min=params_dict["freq_min"],
freq_max=params_dict["freq_max"], unit_ids=unit_ids,
duration_in_frames=duration_in_frames, verbose=params_dict['verbose'])
if "firing_rate" in metric_names or "presence_ratio" in metric_names or "isi_violation" in metric_names:
if recording is None and duration_in_frames is None:
raise ValueError(
"duration_in_frames and recording cannot both be None when computing firing_rate, "
"presence_ratio, and isi_violation")
if "max_drift" in metric_names or "cumulative_drift" in metric_names or "silhouette_score" in metric_names \
or "isolation_distance" in metric_names or "l_ratio" in metric_names or "d_prime" in metric_names \
or "nn_hit_rate" in metric_names or "nn_miss_rate" in metric_names:
if recording is None:
raise ValueError("The recording cannot be None when computing max_drift, cumulative_drift, "
"silhouette_score isolation_distance, l_ratio, d_prime, nn_hit_rate, or amplitude_cutoff.")
else:
md.compute_pca_scores(**kwargs)
if "amplitude_cutoff" in metric_names:
if recording is None:
raise ValueError("The recording cannot be None when computing amplitude cutoffs.")
else:
md.compute_amplitudes(**kwargs)
if "snr" in metric_names:
if recording is None:
raise ValueError("The recording cannot be None when computing snr.")
if "num_spikes" in metric_names:
ns = NumSpikes(metric_data=md)
num_spikes = ns.compute_metric(**kwargs)
metrics_dict['num_spikes'] = num_spikes
if "firing_rate" in metric_names:
fr = FiringRate(metric_data=md)
firing_rates = fr.compute_metric(**kwargs)
metrics_dict['firing_rate'] = firing_rates
if "presence_ratio" in metric_names:
pr = PresenceRatio(metric_data=md)
presence_ratios = pr.compute_metric(**kwargs)
metrics_dict['presence_ratio'] = presence_ratios
if "isi_violation" in metric_names:
iv = ISIViolation(metric_data=md)
isi_violations = iv.compute_metric(isi_threshold, min_isi, **kwargs)
metrics_dict['isi_violation'] = isi_violations
if "amplitude_cutoff" in metric_names:
ac = AmplitudeCutoff(metric_data=md)
amplitude_cutoffs = ac.compute_metric(**kwargs)
metrics_dict['amplitude_cutoff'] = amplitude_cutoffs
if "snr" in metric_names:
snr = SNR(metric_data=md)
snrs = snr.compute_metric(snr_mode, snr_noise_duration, max_spikes_per_unit_for_snr,
template_mode, max_channel_peak, **kwargs)
metrics_dict['snr'] = snrs
if "max_drift" in metric_names or "cumulative_drift" in metric_names:
dm = DriftMetric(metric_data=md)
max_drifts, cumulative_drifts = dm.compute_metric(drift_metrics_interval_s,
drift_metrics_min_spikes_per_interval, **kwargs)
if "max_drift" in metric_names:
metrics_dict['max_drift'] = max_drifts
if "cumulative_drift" in metric_names:
metrics_dict['cumulative_drift'] = cumulative_drifts
if "silhouette_score" in metric_names:
silhouette_score = SilhouetteScore(metric_data=md)
silhouette_scores = silhouette_score.compute_metric(max_spikes_for_silhouette, **kwargs)
metrics_dict['silhouette_score'] = silhouette_scores
if "isolation_distance" in metric_names:
isolation_distance = IsolationDistance(metric_data=md)
isolation_distances = isolation_distance.compute_metric(num_channels_to_compare, max_spikes_per_cluster,
**kwargs)
metrics_dict['isolation_distance'] = isolation_distances
if "noise_overlap" in metric_names:
noise_overlap = NoiseOverlap(metric_data=md)
noise_overlaps = noise_overlap.compute_metric(num_channels_to_compare,
max_spikes_per_unit_for_noise_overlap,
noise_overlap_num_features,
noise_overlap_num_knn,
**kwargs)
metrics_dict['noise_overlap'] = noise_overlaps
if "l_ratio" in metric_names:
l_ratio = LRatio(metric_data=md)
l_ratios = l_ratio.compute_metric(num_channels_to_compare, max_spikes_per_cluster, **kwargs)
metrics_dict['l_ratio'] = l_ratios
if "d_prime" in metric_names:
d_prime = DPrime(metric_data=md)
d_primes = d_prime.compute_metric(num_channels_to_compare, max_spikes_per_cluster, **kwargs)
metrics_dict['d_prime'] = d_primes
if "nn_hit_rate" in metric_names or "nn_miss_rate" in metric_names:
nn = NearestNeighbor(metric_data=md)
nn_hit_rates, nn_miss_rates = nn.compute_metric(num_channels_to_compare, max_spikes_per_cluster,
max_spikes_for_nn, n_neighbors, **kwargs)
if "nn_hit_rate" in metric_names:
metrics_dict['nn_hit_rate'] = nn_hit_rates
if "nn_miss_rate" in metric_names:
metrics_dict['nn_miss_rate'] = nn_miss_rates
if as_dataframe:
metrics = pandas.DataFrame.from_dict(metrics_dict)
metrics = metrics.rename(index={original_idx: unit_ids[i] for
i, original_idx in enumerate(range(len(metrics)))})
else:
metrics = metrics_dict
return metrics
| StarcoderdataPython |
176355 | <reponame>S-Gholami/geometry-python
# author: @s.gholami
# -----------------------------------------------------------------------
# read_matrix_input.py
# -----------------------------------------------------------------------
# Accept inputs from console
# Populate the list with the inputs to form a matrix
def equations_to_matrix() -> list:
"""
:return: augmented matrix formed from user input (user inputs = linear equations)
:rtype: list
"""
n = int(input("input number of rows "))
m = int(input("input number of columns "))
A = []
for row_space in range(n):
print("input row ", row_space + 1)
row = input().split()
if len(row) == m:
row_map = list(map(int, row)) # use map function convert string to integer
A.append(row_map)
else:
print("length must be the column size of A")
equations_to_matrix()
print(A)
return A
# main for function call.
if __name__ == "__main__": # __name__ = m_determinant
equations_to_matrix()
else:
print("read_matrix_input.py is being imported into another module ")
| StarcoderdataPython |
3212589 | from datetime import timedelta
from sqlalchemy.sql import func
from flask import request
from zeus.api.utils import stats
from zeus.config import db
from zeus.models import Build, User
from zeus.utils import timezone
from .base import Resource
STAT_CHOICES = frozenset(
("builds.errored", "builds.total", "users.active", "users.created")
)
class InstallStatsResource(Resource):
def get(self):
"""
Return various stats per-day for the installation.
"""
stat = request.args.get("stat")
if not stat:
return self.error("invalid stat")
if stat not in STAT_CHOICES:
return self.error("invalid stat")
since = request.args.get("since")
if since:
date_end = timezone.fromtimestamp(float(since))
else:
date_end = timezone.now() + timedelta(days=1)
if stat == "users.active":
date_field = User.date_active
elif stat == "users.created":
date_field = User.date_created
else:
date_field = Build.date_created
date_end = date_end.replace(minute=0, second=0, microsecond=0)
resolution = request.args.get("resolution", "1d")
points = int(request.args.get("points") or stats.POINTS_DEFAULT[resolution])
if resolution == "1h":
grouper = func.date_trunc("hour", date_field)
decr_res = stats.decr_hour
elif resolution == "1d":
grouper = func.date_trunc("day", date_field)
date_end = date_end.replace(hour=0)
decr_res = stats.decr_day
elif resolution == "1w":
grouper = func.date_trunc("week", date_field)
date_end = date_end.replace(hour=0)
date_end -= timedelta(days=date_end.weekday())
decr_res = stats.decr_week
elif resolution == "1m":
grouper = func.date_trunc("month", date_field)
date_end = date_end.replace(hour=0, day=1)
decr_res = stats.decr_month
date_begin = date_end
for _ in range(points):
date_begin = decr_res(date_begin)
if stat.startswith("users."):
queryset = db.session.query(
grouper.label("grouper"), func.count(User.id)
).group_by("grouper")
else:
queryset = stats.build_queryset(stat, grouper)
queryset = queryset.filter(date_field >= date_begin, date_field < date_end)
queryset = queryset.limit(points)
results = {
# HACK(dcramer): force (but dont convert) the timezone to be utc
# while this isnt correct, we're not looking for correctness yet
k.replace(tzinfo=timezone.utc): v
for k, v in queryset
}
data = []
cur_date = date_end
for _ in range(points):
cur_date = decr_res(cur_date)
data.append(
{
"time": int(float(cur_date.strftime("%s.%f")) * 1000),
"value": (
int(float(results[cur_date]))
if results.get(cur_date)
else (0 if stat in stats.ZERO_FILLERS else None)
),
}
)
return self.respond(data)
| StarcoderdataPython |
1734686 | <gh_stars>0
# -*- coding: utf-8 -*-
import mysql.connector
import logging
class MySQLPipeline(object):
def open_spider(self, spider):
# Set your database connection details
self.connection = mysql.connector.connect(host="localhost", user="root", password="<PASSWORD>")
# Craeting a cursor object, DB & Tables
self.c = self.connection.cursor()
self.c.execute("create database if not exists Wallmart")
self.c.execute("use Wallmart")
self.c.execute('''
CREATE TABLE if not exists HomeFurnitureAppliances(
url VARCHAR(300) PRIMARY KEY,
productName TEXT,
price TEXT,
lvl1_category TEXT,
lvl2_category TEXT,
lvl3_category TEXT,
lvl4_category TEXT
)
''')
self.connection.commit()
logging.warning("Connection established to DATABASE !")
def close_spider(self, spider):
self.connection.close()
logging.warning("Connection to DATABASE closed !")
def process_item(self, item, spider):
# Stores only the unique listings to the databse.
try:
self.c.execute('''
INSERT INTO HomeFurnitureAppliances (url,productName,price,lvl1_category,lvl2_category,lvl3_category,lvl4_category) VALUES(%s,%s,%s,%s,%s,%s,%s)
''', (
item.get('product_url'),
item.get('product_name'),
item.get('product_price'),
item.get('lvl1_cat'),
item.get('lvl2_cat'),
item.get('lvl3_cat'),
item.get('lvl4_cat')
))
self.connection.commit()
except:
logging.warning("Skipped duplicate lsiting")
return item
| StarcoderdataPython |
3327957 | name = str(input('Digite um nome: ')).upper()
for i in range(0, len(name)+1):
print(name[:i]) | StarcoderdataPython |
60722 | from django.contrib import admin
from .models import Vote, Blog
# Register your models here.
admin.site.register(Vote)
admin.site.register(Blog) | StarcoderdataPython |
1795767 | <reponame>mikusjelly/saam<gh_stars>1-10
import os
import configparser
import yaml
__version__ = '0.0.1'
HOME = os.path.join(os.path.dirname(__file__), '..')
__cfg = configparser.ConfigParser()
__cfg.read(os.path.join(HOME, 'conf.ini'))
# ../tools/apktool/apktool.jar
__APKTOOL_DEFAULT = os.path.join(HOME, 'tools', 'apktool', 'apktool.jar')
__APKTOOL_CONF = __cfg.get('Paths', 'apktool')
APKTOOL_PATH = __APKTOOL_CONF if __APKTOOL_CONF else __APKTOOL_DEFAULT
# __APKTOOL_DEFAULT = os.path.join(HOME, 'tools', 'apktool', 'apktool.jar')
# __APKTOOL_CONF = __cfg.get('Paths', 'smali')
# APKTOOL_PATH = __APKTOOL_CONF if __APKTOOL_CONF else __APKTOOL_DEFAULT
__CFR_DEFAULT = os.path.join(HOME, 'tools', 'cfr.jar')
__CFR_CONF = __cfg.get('Paths', 'cfr')
CFR_PATH = __CFR_CONF if __CFR_CONF else __CFR_DEFAULT
RISKS_PATH = os.path.join(HOME, 'datas', 'risks.yml')
with open(RISKS_PATH, encoding='utf-8') as f:
RISKS = yaml.full_load(f.read())
YARA_PATH = os.path.join(HOME, 'rules')
YARAC_PATH = os.path.join(HOME, 'rules', 'rules.yarc')
| StarcoderdataPython |
1611207 | #!/usr/bin/env python
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from madgraph.interface import reweight_interface
from six.moves import map
from six.moves import range
from six.moves import zip
################################################################################
#
# Copyright (c) 2009 The MadGraph5_aMC@NLO Development team and Contributors
#
# This file is a part of the MadGraph5_aMC@NLO project, an application which
# automatically generates Feynman diagrams and matrix elements for arbitrary
# high-energy processes in the Standard Model and beyond.
#
# It is subject to the MadGraph5_aMC@NLO license which should accompany this
# distribution.
#
# For more information, visit madgraph.phys.ucl.ac.be and amcatnlo.web.cern.ch
#
###############################################################################
"""
####################################################################
#
# Routine to decay prodution events in a generic way,
# including spin correlation effects
#
# Ref: <NAME>, <NAME>, <NAME>, <NAME>
# JHEP 04 (2007) 081
#
#
#####################################################################
"""
import collections
import re
import os
import shutil
import logging
import time
import cmath
import copy
pjoin = os.path.join
from subprocess import Popen, PIPE, STDOUT
os.sys.path.append("../.")
import string
import itertools
#import madgraph.core.base_objects as base_objects
#import madgraph.core.helas_objects as helas_objects
#import madgraph.core.diagram_generation as diagram_generation
import models.import_ufo as import_ufo
#import madgraph.various.process_checks as process_checks
#from time import strftime
#from madgraph.interface.madgraph_interface import MadGraphCmd
import madgraph.interface.master_interface as Cmd
import madgraph.interface.madevent_interface as me_interface
import madgraph.iolibs.save_load_object as save_load_object
import madgraph.iolibs.files as files
import madgraph.fks.fks_common as fks_common
import aloha
logger = logging.getLogger('decay.stdout') # -> stdout
logger_stderr = logging.getLogger('decay.stderr') # ->stderr
import random
import math
from madgraph import MG5DIR, MadGraph5Error
import madgraph.various.misc as misc
#import time
class MadSpinError(MadGraph5Error):
pass
class Event:
""" class to read an event, record the information, write down the event in the lhe format.
This class is used both for production and decayed events"""
def __init__(self, inputfile=None, banner=None):
"""Store the name of the event file """
self.inputfile=inputfile
self.particle={}
self.banner = banner
def give_momenta(self, map_event=None):
""" return the set of external momenta of the event,
in two different formats:
p is list momenta, with each momentum a list [E,px,py,pz]
string is a sting
"""
if not map_event:
map_event = {}
for part in range(len(self.particle)):
map_event[part] = part
p=[]
string=""
for id in range(len(self.particle)):
particle = self.particle[map_event[id] + 1]
if particle["istup"] < 2:
mom = particle["momentum"]
p.append(mom)
string+= '%s %s %s %s \n' % (mom.E, mom.px, mom.py, mom.pz)
return p, string
def change_wgt(self, value=None, factor=None):
if value:
self.wgt = value
elif factor:
self.wgt *= factor
# change the wgt associate to the additional weight
start, stop = self.rwgt.find('<rwgt>'), self.rwgt.find('</rwgt>')
if start != -1 != stop :
pattern = re.compile(r'''<\s*wgt id=[\'\"](?P<id>[^\'\"]+)[\'\"]\s*>\s*(?P<val>[\ded+-.]*)\s*</wgt>''')
data = pattern.findall(self.rwgt)
if len(data)==0:
print(self.rwgt)
try:
text = ''.join(' <wgt id=\'%s\'> %+15.7e </wgt>\n' % (pid, float(value) * factor)
for (pid,value) in data)
except ValueError as error:
raise Exception('Event File has unvalid weight. %s' % error)
self.rwgt = self.rwgt[:start] + '<rwgt>\n'+ text + self.rwgt[stop:]
def string_event_compact(self):
""" return a string with the momenta of the event written
in an easy readable way
"""
line=[]
for part in range(1,len(self.particle)+1):
pid = self.particle[part]["pid"]
m = self.particle[part]["momentum"]
line.append("%i %s %.7g %.7g %.7g %.7g" %
(pid, m.px, m.py, m.pz, m.E, m.m))
line.append('')
return "\n".join(line)
def get_tag(self):
initial = []
final = []
order = [[],[]]
for part in self.particle.values():
pid = part['pid']
mother1 = part['mothup1']
mother2 = part['mothup2']
if 0 == mother1 == mother2:
initial.append(pid)
order[0].append(pid)
else:
final.append(pid)
order[1].append(pid)
initial.sort()
final.sort()
return (tuple(initial), tuple(final)), order
def string_event(self):
""" return a string with the information of the event written
in the lhe format.
"""
line=self.event_init_line # This is the <event> line
line1=' %2d %6d %+13.7e %14.8e %14.8e %14.8e' % \
(self.nexternal,self.ievent,self.wgt,self.scale,self.aqed,self.aqcd)
line+=line1+"\n"
scales= []
for item in range(1,len(list(self.event2mg.keys()))+1):
part=self.event2mg[item]
if part>0:
particle_line=self.get_particle_line(self.particle[part])
if abs(self.particle[part]["istup"]) == 1:
if "pt_scale" in self.particle[part]:
scales.append(self.particle[part]["pt_scale"])
else:
scales.append(None)
else:
particle_line=self.get_particle_line(self.resonance[part])
line+=particle_line
if any(scales):
sqrts = self.particle[1]["pt_scale"]
line += "<scales %s></scales>\n" % ' '.join(['pt_clust_%i=\"%s\"'
%(i-1,s if s else sqrts)
for i,s in enumerate(scales)
if i>1])
if self.diese:
line += self.diese
if self.rwgt:
line += self.rwgt
line+="</event> \n"
return line
def get_particle_line(self,leg):
line=" %8d %2d %4d %4d %4d %4d %+18.11e %+18.11e %+18.11e %18.11e %18.11e %10.4e %10.4e" \
% (leg["pid"], leg["istup"],leg["mothup1"],leg["mothup2"],\
leg["colup1"],leg["colup2"],leg["momentum"].px,leg["momentum"].py,\
leg["momentum"].pz,leg["momentum"].E, leg["mass"],\
0.0,float(leg["helicity"]) )
line+="\n"
return line
def reshuffle_resonances(self,mother):
""" reset the momentum of each resonance in the production event
to the sum of momenta of the daughters
"""
daughters=[]
for part in self.event2mg.keys():
index=self.event2mg[part]
if index>0:
if self.particle[index]["mothup1"]==mother:
daughters.append(index)
if index<0:
if self.resonance[index]["mothup1"]==mother:
daughters.append(index)
# if len(daughters)!=2:
# logger.info("Got more than 2 (%s) daughters for one particles" % len(daughters))
# logger.info("in one production event (before decay)")
if daughters[0]>0:
momentum_mother=self.particle[daughters[0]]["momentum"].copy()
else:
momentum_mother=self.resonance[daughters[0]]["momentum"].copy()
# there might be more than 2 daughters, add all their momentum to get the momentum of the mother
for index in range(1,len(daughters)):
if daughters[index]>0:
momentum_mother=momentum_mother.add(self.particle[daughters[index]]["momentum"])
else:
momentum_mother=momentum_mother.add(self.resonance[daughters[index]]["momentum"])
res=self.event2mg[mother]
del self.resonance[res]["momentum"]
self.resonance[res]["momentum"]=momentum_mother.copy()
# recurrence:
if self.resonance[res]["mothup1"]>2:
self.reshuffle_resonances(self.resonance[res]["mothup1"])
def reset_resonances(self):
""" re-evaluate the momentum of each resonance, based on the momenta
of the external particles
"""
mothers=[]
for part in self.particle.keys():
if self.particle[part]["mothup1"]>2 and \
self.particle[part]["mothup1"] not in mothers :
mothers.append(self.particle[part]["mothup1"])
self.reshuffle_resonances(self.particle[part]["mothup1"])
def assign_scale_line(self, line):
"""read the line corresponding to global event line
format of the line is:
Nexternal IEVENT WEIGHT SCALE AEW AS
"""
line = line.replace('d','e').replace('D','e')
inputs = line.split()
assert len(inputs) == 6
self.nexternal=int(inputs[0])
self.ievent=int(inputs[1])
self.wgt=float(inputs[2])
self.scale=float(inputs[3])
self.aqed=float(inputs[4])
self.aqcd=float(inputs[5])
def get_next_event(self):
""" read next event in the lhe event file """
line_type = 'none' # support type: init / event / rwgt
self.diese = ''
for line in self.inputfile:
origline = line
line = line.lower()
if line=="":
continue
# Find special tag in the line
if line[0]=="#":
self.diese+=origline
continue
if '<event' in line:
#start new_event
#Get the right attributes (e.g. <event id='123' npNLO='-1'>)
self.event_init_line=line.lstrip().replace('nplo','npLO').replace('npnlo','npNLO')
line_type = 'init'
continue
elif '<rwgt>' in line:
#re-weighting information block
line_type = 'rwgt'
#No Continue! need to keep track of this line
elif '</event>' in line:
if not self.particle:
continue
self.shat=self.particle[1]["momentum"].dot(self.particle[2]["momentum"])
return 1
elif line_type == 'rwgt' and 'wgt' in line:
# force to continue to be in rwgt line up to </rwgt>
line_type = 'rwgt'
elif "pt_clust_" in line:
line_type="clusteringv3"
elif '<' in line:
line_type = 'other_block'
if line_type == 'none':
continue
elif line_type == 'other_block':
self.diese += origline
# read the line and assign the date accordingly
elif line_type == 'init':
line_type = 'event'
self.assign_scale_line(line)
# initialize some local variable
index_prod=0
index_external=0
index_resonance=0
self.particle={}
self.resonance={}
self.max_col=500
self.diese=""
self.rwgt=""
self.event2mg={} # dict. event-like label <-> "madgraph-like" label
# in "madgraph-like", resonances are labeled -1, -2, ...
elif line_type == 'rwgt': #special aMC@NLO information
self.rwgt += line
if '</rwgt>' in line:
line_type = 'event'
elif line_type == 'event':
index_prod+=1
line=line.replace("\n","")
line = line.replace('d','e').replace('D','e')
inputs=line.split()
pid=int(inputs[0])
istup=int(inputs[1])
mothup1=int(inputs[2])
mothup2=int(inputs[3])
colup1=int(inputs[4])
if colup1>self.max_col:
self.max_col=colup1
colup2=int(inputs[5])
if colup2>self.max_col:
self.max_col=colup2
mom=momentum(float(inputs[9]),float(inputs[6]),float(inputs[7]),float(inputs[8]))
mass=float(inputs[10])
helicity=float(inputs[12])
if abs(istup)==1:
index_external+=1
self.event2mg[index_prod]=index_external
self.particle[index_external]={"pid":pid,"istup":istup,"mothup1":mothup1,\
"mothup2":mothup2,"colup1":colup1,"colup2":colup2,"momentum":mom,"mass":mass,"helicity":helicity}
elif istup==2:
index_resonance=index_resonance-1
self.event2mg[index_prod]=index_resonance
self.resonance[index_resonance]={"pid":pid,"istup":istup,"mothup1":mothup1,\
"mothup2":mothup2,"colup1":colup1,"colup2":colup2,"momentum":mom,"mass":mass,"helicity":helicity}
else:
logger.warning('unknown status in lhe file')
elif line_type == "clusteringv3":
scales = re.findall(r"""pt_clust_(\d+)=\"([e\+\-.\d]+)\"""", line)
scales = sorted(scales, key= lambda x: -1*int(x[0]))
for index in range(1,len(self.particle)+1):
if self.particle[index]["istup"] == 1:
self.particle[index]["pt_scale"] = scales.pop()[1]
if not self.banner:
self.particle[1]["pt_scale"] = self.particle[1]["momentum"].E + self.particle[2]["momentum"].E
else:
self.particle[1]["pt_scale"] = float(self.banner.get('run_card', 'ebeam1'))+float(self.banner.get('run_card', 'ebeam2'))
return "no_event"
class pid2label(dict):
""" dico pid:label for a given model"""
def __init__(self,model):
for particle in model["particles"]:
self[particle["pdg_code"]]=particle["name"]
self[-particle["pdg_code"]]=particle["antiname"]
class pid2color(dict):
""" dico pid:color rep. for a given model (ex: 5:-3 )"""
def __init__(self,model):
for particle in model["particles"]:
self[particle["pdg_code"]]=particle["color"]
if particle["color"] not in [1,8]:
self[-particle["pdg_code"]]=-particle["color"]
else:
self[-particle["pdg_code"]]=particle["color"]
class label2pid(dict):
""" dico label:pid for a given model"""
def __init__(self,model):
for particle in model["particles"]:
self[particle["name"]]=particle.get_pdg_code()
self[particle["antiname"]]=-particle.get_pdg_code()
if particle['self_antipart']:
self[particle["name"]]=abs(self[particle["name"]])
self[particle["antiname"]]=abs(self[particle["antiname"]])
class dc_branch_from_me(dict):
""" A dictionary to record information necessary to decay particles
{ -1 : {"d1": { "label": XX , "nb": YY }, "d2": { "label": XX , "nb": YY } },
-2 : {"d1": { "label": XX , "nb": YY }, "d2": { "label": XX , "nb": YY } },
....
}
"""
def __init__(self, process):
""" """
self.model = process.get('model')
self["tree"]={}
self.nexternal = 0
self.nb_decays = 1
#define a function to allow recursion.
def add_decay(proc, propa_id=-1):
# see what need to be decayed
to_decay = {}
for dec in proc.get('decay_chains'):
pid = dec.get('legs')[0].get('id')
if pid in to_decay:
to_decay[pid].append(dec)
else:
to_decay[pid] = [dec]
#done
self['tree'][propa_id] = {'nbody': len(proc.get('legs'))-1,\
'label':proc.get('legs')[0].get('id')}
# loop over the child
child_propa_id = propa_id
for c_nb,leg in enumerate(proc.get('legs')):
if c_nb == 0:
continue
self["tree"][propa_id]["d%s" % c_nb] = {}
c_pid = leg.get('id')
self["tree"][propa_id]["d%s" % c_nb]["label"] = c_pid
self["tree"][propa_id]["d%s" % c_nb]["labels"] = [c_pid]
if c_pid in to_decay:
child_propa_id -= 1
self["tree"][propa_id]["d%s" % c_nb]["index"] = child_propa_id
self.nb_decays += 1
child_propa_id = add_decay(to_decay[c_pid].pop(), child_propa_id)
else:
self.nexternal += 1
self["tree"][propa_id]["d%s" % c_nb]["index"] = self.nexternal
return child_propa_id
# launch the recursive loop
add_decay(process)
def generate_momenta(self,mom_init,ran, pid2width,pid2mass,BW_cut,E_collider, sol_nb=None):
"""Generate the momenta in each decay branch
If ran=1: the generation is random, with
a. p^2 of each resonance generated according to a BW distribution
b. cos(theta) and phi (angles in the rest frame of the decaying particle)
are generated according to a flat distribution (no grid)
the phase-space weight is also return (up to an overall normalization)
since it is needed in the unweighting procedure
If ran=0: evaluate the momenta based on the previously-generated p^2, cos(theta)
and phi in each splitting.
This is used in the reshuffling phase (e.g. when we give a mass to gluons
in the decay chain )
"""
index2mom={}
# pid2mom={} # a dict { pid : {"status":status, "momentum":momentum} }
assert isinstance(mom_init, momentum)
index2mom[-1] = {}
index2mom[-1]["momentum"] = mom_init
if index2mom[-1]['momentum'].m < 1e-3:
logger.warning('Decaying particle with m< 1e-3 GeV in generate_momenta')
index2mom[-1]["pid"] = self['tree'][-1]["label"]
index2mom[-1]["status"] = 2
weight=1.0
for res in range(-1,-self.nb_decays-1,-1):
tree = self["tree"][res]
# Here mA^2 has to be set to p^2:
#
# IF res=-1:
# p^2 has been either fixed to the value in the
# production lhe event, or generated according to a Breit-Wigner distr.
# during the reshuffling phase of the production event
# -> we just need to read the value here
# IF res<-1:
# p^2 has been generated during the previous iteration of this loop
# -> we just need to read the value here
mA=index2mom[res]["momentum"].m
if mA < 1.0e-3:
logger.debug('Warning: decaying parting with m<1 MeV in generate_momenta ')
mass_sum = mA
all_mass = []
for i in range(tree["nbody"]):
tag = "d%s" % (i+1)
d = tree[tag]["index"]
# For the daughters, the mass is either generate (intermediate leg + BW mode on)
# or set to the pole mass (external leg or BW mode off)
# If ran=0, just read the value from the previous generation of momenta
#(this is used for reshuffling purposes)
if d>0 or not BW_cut :
m = pid2mass(tree[tag]["label"])
elif ran==0: # reshuffling phase
m= tree[tag]["mass"]
else:
pid=tree[tag]["label"]
# NOTE: here pole and width are normalized by 4.0*mB**2,
# Just a convention
pole=0.25 #pid2mass[pid]**2/mA**2
w=pid2width(pid)
mpole=pid2mass(pid)
width=pid2width(pid)*pid2mass(pid)/(4.0*pid2mass(pid)**2) #/mA**2
m_min=max(mpole-BW_cut*w, 0.5)
m_max=mpole+BW_cut*w
if E_collider>0: m_max=min(m_max,0.99*E_collider)
zmin=math.atan(m_min**2/w/mpole-mpole/w)/width
zmax=math.atan(m_max**2/w/mpole-mpole/w)/width
m, jac=self.transpole(pole,width, zmin,zmax)
m = math.sqrt(m * 4.0 * mpole**2)
# record the mass for the reshuffling phase,
# in case the point passes the reweighting creteria
tree[tag]["mass"] = m
#update the weight of the phase-space point
weight=weight*jac
# for checking conservation of energy
mass_sum -= m
all_mass.append(m)
if mass_sum < 0:
logger.debug('mA<mB+mC in generate_momenta')
logger.debug('mA = %s' % mA)
return 0, 0, 0 # If that happens, throw away the DC phase-space point ...
# I don't expect this to be inefficient, since there is a BW cut
if tree["nbody"] > 2:
raise Exception('Phase Space generator not yet ready for 3 body decay')
if ran==1:
decay_mom=generate_2body_decay(index2mom[res]["momentum"],mA, all_mass[0],all_mass[1])
# record the angles for the reshuffling phase,
# in case the point passes the reweighting creteria
tree["costh"]=decay_mom.costh
tree["sinth"]=decay_mom.sinth
tree["cosphi"]=decay_mom.cosphi
tree["sinphi"]=decay_mom.sinphi
else:
# we are in the reshuffling phase,
# so we read the angles that have been stored from the
# previous phase-space point generation
costh=self["tree"][res]["costh"]
sinth=self["tree"][res]["sinth"]
cosphi=self["tree"][res]["cosphi"]
sinphi=self["tree"][res]["sinphi"]
decay_mom=generate_2body_decay(index2mom[res]["momentum"],mA, all_mass[0],all_mass[1],\
costh_val=costh, sinth_val=sinth, cosphi_val=cosphi, \
sinphi_val=sinphi)
# record the momenta for later use
index2mom[self["tree"][res]["d1"]["index"]]={}
index2mom[self["tree"][res]["d1"]["index"]]["momentum"]=decay_mom.momd1
if sol_nb is None:
sol_nb = random.randint(0,len(self["tree"][res]["d1"]["labels"])-1)
# print self["tree"][res]["d1"]["labels"]
# print '584 get sol_nb', sol_nb,'=>',self["tree"][res]["d1"]["labels"][sol_nb],self["tree"][res]["d2"]["labels"][sol_nb]
# else:
# print sol_nb, sol_nb is None,
# print 'take back', sol_nb,'=>',self["tree"][res]["d1"]["labels"][sol_nb],self["tree"][res]["d2"]["labels"][sol_nb]
index2mom[self["tree"][res]["d1"]["index"]]["pid"]=self["tree"]\
[res]["d1"]["labels"][sol_nb]
index2mom[self["tree"][res]["d2"]["index"]]={}
index2mom[self["tree"][res]["d2"]["index"]]["momentum"]=decay_mom.momd2
index2mom[self["tree"][res]["d2"]["index"]]["pid"]=self["tree"]\
[res]["d2"]["labels"][sol_nb]
if (self["tree"][res]["d1"]["index"]>0):
index2mom[self["tree"][res]["d1"]["index"]]["status"]=1
else:
index2mom[self["tree"][res]["d1"]["index"]]["status"]=2
if (self["tree"][res]["d2"]["index"]>0):
index2mom[self["tree"][res]["d2"]["index"]]["status"]=1
else:
index2mom[self["tree"][res]["d2"]["index"]]["status"]=2
return index2mom, weight, sol_nb
def transpole(self,pole,width, zmin, zmax):
""" routine for the generation of a p^2 according to
a Breit Wigner distribution
the generation window is
[ M_pole^2 - 30*M_pole*Gamma , M_pole^2 + 30*M_pole*Gamma ]
"""
z=zmin+(zmax-zmin)*random.random()
y = pole+width*math.tan(width*z)
jac=(width/math.cos(width*z))**2*(zmax-zmin)
return y, jac
def add_decay_ids(self, proc_list):
""" """
#define a function to allow recursion.
def add_decay(proc, propa_id=-1):
# see what need to be decayed
to_decay = {}
for dec in proc.get('decay_chains'):
pid = dec.get('legs')[0].get('id')
if pid in to_decay:
to_decay[pid].append(dec)
else:
to_decay[pid] = [dec]
# loop over the child
for c_nb,leg in enumerate(proc.get('legs')):
if c_nb == 0:
continue
# self["tree"][propa_id]["d%s" % c_nb] = {}
c_pid = leg.get('id')
self["tree"][propa_id]["d%s" % c_nb]["labels"].append(c_pid)
if c_pid in to_decay:
add_decay(to_decay[c_pid].pop(), propa_id-1)
# launch the recursive loop
for proc in proc_list:
add_decay(proc)
class momentum:
"""A class to handel 4-vectors and the associated operations """
def __init__(self,E,px,py,pz):
self.px=px
self.py=py
self.pz=pz
self.E=E
self.mod2=px**2+py**2+pz**2
self.sq=E**2-self.mod2
control = E**2+self.mod2
if not control:
self.m = 0
elif self.sq/control < 1e-8:
self.m=0.0
else:
self.m=math.sqrt(self.sq)
def dot3(self,q):
""" return |p|^2 (spatial components only) """
return self.px*q.px+self.py*q.py+self.pz*q.pz
def dot(self,q):
""" Minkowski inner product """
return self.E*q.E-self.px*q.px-self.py*q.py-self.pz*q.pz
def subtract(self,q):
tot=momentum(self.E-q.E,self.px-q.px,self.py-q.py,self.pz-q.pz)
return tot
def add(self,q):
tot=momentum(self.E+q.E,self.px+q.px,self.py+q.py,self.pz+q.pz)
return tot
__add__ = add
def nice_string(self):
return str(self.E)+" "+str(self.px)+" "+str(self.py)+" "+str(self.pz)
__str__ = nice_string
def boost(self, q):
""" boost a vector from a frame where q is at rest to a frame where q is given
This routine has been taken from HELAS
"""
qq = q.mod2
if (qq > 1E-10*abs(q.E)):
pq=self.dot3(q)
m=q.m
#if (abs(m-self.mA)>1e-6): print "warning: wrong mass"
lf=((q.E-m)*pq/qq+self.E)/m
pboost=momentum((self.E*q.E+pq)/m, self.px+q.px*lf,\
self.py+q.py*lf,self.pz+q.pz*lf)
else:
pboost=momentum(self.E,self.px,self.py,self.pz)
return pboost
def copy(self):
copy_mom=momentum(self.E,self.px,self.py,self.pz)
return copy_mom
def invrot(self,q):
# inverse of the "rot" operation
ppE=self.E
qt2 = (q.px)**2 + (q.py)**2
if(qt2==0.0):
if ( q.pz>0 ):
ppx = self.px
ppy = self.py
ppz = self.pz
else:
ppx = -self.px
ppy = -self.py
ppz = -self.pz
else:
qq = math.sqrt(qt2+q.pz**2)
qt=math.sqrt(qt2)
ppy=-q.py/qt*self.px+q.px/qt*self.py
if (q.pz==0):
ppx=-qq/qt*self.pz
if (q.py!=0):
ppz=(self.py-q.py*q.pz/qq/qt-q.px/qt*ppy)*qq/q.py
else:
ppz=(self.px-q.px*q.pz/qq/qt*ppx+q.py/qt*ppy)*qq/q.px
else:
if (q.py!=0):
ppz=(qt**2*self.py+q.py*q.pz*self.pz-q.px*qt*ppy)/qq/q.py
else:
ppz=(q.px*self.px+q.pz*self.pz)/qq
ppx=(-self.pz+q.pz/qq*ppz)*qq/qt
pp=momentum(ppE,ppx,ppy,ppz)
return pp
def rot(self, q):
""" rotate the spatial components of the vector from a frame where q is
aligned with the z axis to a frame where the direction of q is specified
as an argument
Taken from HELAS
"""
protE =self.E
qt2 = (q.px)**2 + (q.py)**2
if(qt2==0.0):
if ( q.pz>0 ):
protx = self.px
proty = self.py
protz = self.pz
else:
protx = -self.px
proty = -self.py
protz = -self.pz
else:
qq = math.sqrt(qt2+q.pz**2)
qt = math.sqrt(qt2)
protx = q.px*q.pz/qq/qt*self.px -q.py/qt*self.py +q.px/qq*self.pz
proty = q.py*q.pz/qq/qt*self.px +q.px/qt*self.py +q.py/qq*self.pz
protz = -qt/qq*self.px + q.pz/qq*self.pz
prot=momentum(protE,protx,proty,protz)
return prot
class generate_2body_decay:
"""generate momenta for a generic A > B + C decay """
def __init__(self,p,mA,mB,mC, costh_val=None, sinth_val=None, cosphi_val=None, sinphi_val=None):
""" Generate the momentum of B and C in the decay A -> B+C
If the angles are given, use them to reconstruct the momenta of B, C
in the rest fram of A.
If the routine is called without (costh_val, ...), then generate
cos(theta) and phi randomly (flat distr.) in the rest frame of A
Finally, boost the momenta of B and C in the frame where A has
momentum p
"""
self.mA=mA
self.mB=mB
self.mC=mC
pmod=self.lambda_fct()/(2.0 * self.mA)
if not costh_val:
costh=2.0*random.random()-1.0
sinth=math.sqrt(1-costh**2)
else:
costh=costh_val
sinth=sinth_val
if not cosphi_val:
phi=random.random()*2.0*math.pi
sinphi=math.sin(phi)
cosphi=math.cos(phi)
else:
sinphi=sinphi_val
cosphi=cosphi_val
energyB=math.sqrt(pmod**2+mB**2)
energyC=math.sqrt(pmod**2+mC**2)
pBrest=momentum(energyB, pmod*cosphi*sinth,pmod*sinphi*sinth, pmod*costh)
pCrest=momentum(energyC,-pmod*cosphi*sinth,-pmod*sinphi*sinth, -pmod*costh)
self.momd1=pBrest.boost(p)
self.momd2=pCrest.boost(p)
# record costh and phi for later use
self.costh=costh
self.sinth=sinth
self.cosphi=cosphi
self.sinphi=sinphi
def lambda_fct(self):
""" The usual lambda function involved in 2-body decay """
lam=self.mA**4+self.mB**4+self.mC**4
lam=lam-2.0*self.mA**2*self.mB**2-2.0*self.mA**2*self.mC**2\
-2.0*self.mC**2*self.mB**2
# if lam<0:
#print self.mA
#print self.mB
#print self.mC
return math.sqrt(lam)
class production_topo(dict):
""" A dictionnary to record information about a given topology of a production event
self["branchings"] is a list of the branchings defining the topology (see class branching)
self["get_mass2"] is a dictionnary {index -> mass**2 of the corresponding particle}
self["get_momentum"] is a dictionnary {index -> momentum of the corresponding particle}
self["get_id"] is a dictionnary {index -> pid the corresponding particle}
Note: index= "madgraph-like" numerotation of the particles
"""
def __init__(self, production, options):
""" Initialise the dictionaries+list used later on to record the information
about the topology of a production event.
Note that self["branchings"] is a list,
as it makes it easier to scan the topology in the ascendent order
"""
self["branchings"]=[]
self["get_mass2"]={}
self["get_momentum"]={}
self["get_id"]={}
self.production = production
self.options = options
def add_one_branching(self,index_propa, index_d1,index_d2,type_propa):
""" add the information of one splitting in the topology """
branch=branching(index_propa, index_d1,index_d2,type_propa)
self["branchings"].append(branch)
def print_topo(self):
"""Print the structure of the topology """
for branch in self["branchings"]:
d1=branch["index_d1"]
d2=branch["index_d2"]
propa=branch["index_propa"]
line=str(propa)+" > "
line+=str(d1)+" + "
line+=str(d2)+" , type="
line+=branch["type"]
print(line)
class AllMatrixElement(dict):
"""Object containing all the production topologies required for event to decay.
This contains the routine to add a production topologies if needed.
"""
def __init__(self, banner, options, decay_ids, model):
dict.__init__(self)
self.banner = banner
self.options = options
self.decay_ids = set([abs(id) for id in decay_ids])
self.has_particles_ambiguity = False
self.model = model
def add(self, topologies, keys):
"""Adding one element to the list of production_topo"""
for key in keys:
self[key] = topologies
def get_br(self, proc):
# get the branching ratio associated to a process
br = 1
ids = collections.defaultdict(list) #check for identical decay
for decay in proc.get('decay_chains'):
init, final = decay.get_initial_final_ids()
lhaid = tuple([len(final)] + [x for x in final])
ids[init[0]].append(decay)
if init[0] in self.banner.param_card['decay'].decay_table:
br *= self.banner.param_card['decay'].decay_table[init[0]].get(lhaid).value
br *= self.get_br(decay)
elif -init[0] in self.banner.param_card['decay'].decay_table:
init = -init[0]
lhaid=[x if self.model.get_particle(x)['self_antipart'] else -x
for x in final]
lhaid.sort()
lhaid = tuple([len(final)] + lhaid)
br *= self.banner.param_card['decay'].decay_table[init].get(lhaid).value
br *= self.get_br(decay)
elif init[0] not in self.decay_ids and -init[0] not in self.decay_ids:
logger.warning("No Branching ratio applied for %s. Please check if this is expected" % init[0])
br *= self.get_br(decay)
else:
raise MadGraph5Error("No valid decay for %s. No 2 body decay for that particle. (three body are not supported by MadSpin)" % init[0])
for decays in ids.values():
if len(decays) == 1:
continue
br *= math.factorial(len(decays))
while decays:
nb=1
curr = decays.pop()
while 1:
try:
decays.remove(curr)
nb+=1
except ValueError:
break
br /= math.factorial(nb)
return br
def add_decay(self, proc_list, me_path):
""" adding a decay to the possibility
me_path is the path of the fortran directory
br is the associate branching ratio
finals is the list of final states equivalent to this ME
matrix_element is the MG5 matrix element
"""
# Usefull debug code tell the status of the various imported decay
text = ''
data = []
for key, prod in self.items():
if prod in data:
continue
data.append(prod)
br = prod['total_br']
if br:
text += '%s %s %s\n' %(key, os.path.basename(prod['path']), br)
if not isinstance(proc_list, list):
proc_list = proc_list.get('processes')
tag = proc_list[0].get_initial_final_ids()
# process with decay not compatible with tag [process under consideration]
# or with process splitting different for process and decay.
to_postpose = [proc for proc in proc_list
if id(self[tag]) != id(self[proc.get_initial_final_ids()])]
finals = []
for proc in proc_list:
succeed = True # check if the decay is compatible with the process
#under consideration.
tmp = []
for dproc in proc.get('decay_chains'):
pid = dproc.get('legs')[0].get('id')
# check that the pid correspond -> if not postpone the process
# to be added in a second step (happen due to symmetry)
if pid not in tag[1]:
to_postpose.append(proc)
succeed= False
break
tmp.append((pid,dproc.get_final_ids_after_decay()))
if succeed and tmp not in finals:
finals.append(tmp)
# Treat Not compatible decay.
if to_postpose:
self.add_decay(to_postpose, me_path)
#
# Now that the various final states are computed, we can add the
# associated decay. First computing the branching ratio and then
# decaying topology
me = proc_list[0] # all the other are symmetric -> no need to keep those
decay_tags = [d.shell_string(pdg_order=True) for d in me['decay_chains']]
#avoid duplicate
if any(tuple(decay_tags)==t['decay_tag'] for t in self[tag]['decays']):
return
# the decay:
out = {'path': me_path,
'matrix_element': me,
'br': len(finals) * self.get_br(proc),
'finals': finals,
'base_order':[l.get('id') for l in me.get_legs_with_decays()] ,
'decay_struct':self.get_full_process_structure(proc_list),
'decay_tag': tuple(decay_tags)}
# adding it to the current object
self[tag]['decays'].append(out)
self[tag]['total_br'] += out['br']
# update the particle decaying in the process
decaying = [m.get('legs')[0].get('id') for m in me.get('decay_chains')]
decaying.sort()
self[tag]['decaying'] = tuple(decaying)
# sanity check
assert self[tag]['total_br'] <= 1.01, "wrong BR for %s: %s " % (tag,self[tag]['total_br'])
def get_full_process_structure(self, me_list):
""" return a string with the definition of the process fully decayed
and also a list of dc_branch objects with all infomation about the topology
of each decay branch
"""
me = me_list[0]
decay_struct = {}
to_decay = collections.defaultdict(list)
for i, proc in enumerate(me.get('decay_chains')):
pid = proc.get('legs')[0].get('id')
to_decay[pid].append((i,proc))
for leg in me.get('legs'):
pid = leg.get('id')
nb = leg.get('number')
if pid in to_decay:
i, proc = to_decay[pid].pop()
decay_struct[nb] = dc_branch_from_me(proc)
identical = [me.get('decay_chains')[i] for me in me_list[1:]]
decay_struct[nb].add_decay_ids(identical)
return decay_struct
def get_topologies(self, matrix_element):
"""Extraction of the phase-space self.topologies from mg5 matrix elements
This is used for the production matrix element only.
the routine is essentially equivalent to write_configs_file_from_diagrams
except that I don't write the topology in a file,
I record it in an object production_topo (the class is defined above in this file)
"""
# Extract number of external particles
( nexternal, ninitial) = matrix_element.get_nexternal_ninitial()
del nexternal
preconfigs = [(i+1, d) for i,d in enumerate(matrix_element.get('diagrams'))]
mapconfigs = [c[0] for c in preconfigs]
configs=[[c[1]] for c in preconfigs]
model = matrix_element.get('processes')[0].get('model')
topologies ={} # dictionnary {mapconfig number -> production_topology}
# this is the object to be returned at the end of this routine
s_and_t_channels = []
vert_list = [max([d for d in config if d][0].get_vertex_leg_numbers()) \
for config in configs if [d for d in config if d][0].\
get_vertex_leg_numbers()!=[]]
minvert = min(vert_list) if vert_list!=[] else 0
# Number of subprocesses
# nsubprocs = len(configs[0])
nconfigs = 0
new_pdg = model.get_first_non_pdg()
for iconfig, helas_diags in enumerate(configs):
if any([vert > minvert for vert in
[d for d in helas_diags if d][0].get_vertex_leg_numbers()]):
# Only 3-vertices allowed in configs.inc
continue
nconfigs += 1
# Need s- and t-channels for all subprocesses, including
# those that don't contribute to this config
empty_verts = []
stchannels = []
for h in helas_diags:
if h:
# get_s_and_t_channels gives vertices starting from
# final state external particles and working inwards
stchannels.append(h.get('amplitudes')[0].\
get_s_and_t_channels(ninitial, model, new_pdg))
else:
stchannels.append((empty_verts, None))
# For t-channels, just need the first non-empty one
tchannels = [t for s,t in stchannels if t != None][0]
# For s_and_t_channels (to be used later) use only first config
s_and_t_channels.append([[s for s,t in stchannels if t != None][0],
tchannels])
# Make sure empty_verts is same length as real vertices
if any([s for s,t in stchannels]):
empty_verts[:] = [None]*max([len(s) for s,t in stchannels])
# Reorganize s-channel vertices to get a list of all
# subprocesses for each vertex
schannels = list(zip(*[s for s,t in stchannels]))
else:
schannels = []
allchannels = schannels
if len(tchannels) > 1:
# Write out tchannels only if there are any non-trivial ones
allchannels = schannels + tchannels
# Write out propagators for s-channel and t-channel vertices
# use the AMP2 index to label the self.topologies
tag_topo=mapconfigs[iconfig]
topologies[tag_topo]=production_topo(topologies, self.options)
for verts in allchannels:
if verts in schannels:
vert = [v for v in verts if v][0]
else:
vert = verts
daughters = [leg.get('number') for leg in vert.get('legs')[:-1]]
last_leg = vert.get('legs')[-1]
if verts in schannels:
type_propa="s"
elif verts in tchannels[:-1]:
type_propa="t"
if (type_propa):
topologies[tag_topo].add_one_branching(last_leg.get('number'),\
daughters[0],daughters[1],type_propa)
return topologies
def get_decay_from_tag(self, production_tag, decay_tag):
for decay in self[production_tag]['decays']:
if decay['decay_tag']==decay_tag: return decay
msg = 'Unable to retrieve decay from decay_tag\n%s\n%s' %(production_tag, decay_tag)
raise Exception(msg)
def get_random_decay(self, production_tag,first=[]):
"""select randomly a decay channel"""
a = random.random() * self[production_tag]['total_br']
#print 'total', self[production_tag]['total_br']
if __debug__:
if not first:
sum = 0
for decay in self[production_tag]['decays']:
sum += decay['br']
assert sum == self[production_tag]['total_br']
first.append(1)
sum = 0
for decay in self[production_tag]['decays']:
sum += decay['br']
if a < sum:
return decay
def adding_me(self, matrix_elements, path):
"""Adding one element to the list based on the matrix element"""
for me in matrix_elements:
skip = [] # due to particles/anti-particles some me need to be add
# as a separate matrix element in the instance.
topo = self.get_topologies(me)
# get the orignal order:
initial = []
final = [l.get('id') for l in me.get('processes')[0].get('legs')\
if l.get('state') or initial.append(l.get('id'))]
decaying_base = [id for id in final if abs(id) in self.decay_ids]
decaying_base.sort()
topo['base_order'] = (initial , final)
# topo['matrix_element'] = me
tags = []
topo['tag2order'] = {}
for proc in me.get('processes'): # set of processes accounted by the me
initial = []
final = [l.get('id') for l in proc.get('legs')\
if l.get('state') or initial.append(l.get('id'))]
decaying = [id for id in final if abs(id) in self.decay_ids]
decaying.sort()
if decaying != decaying_base:
skip.append(proc)
continue
topo['decaying'] = ()
tags.append(proc.get_initial_final_ids())
topo['tag2order'][tags[-1]] = (initial , final)
if tags[0] not in self:
self.add(topo, tags) # mens self[tag]=topo for each tag in tags
topo['path'] = pjoin(path, 'SubProcesses',
'P%s' % me.get('processes')[0].shell_string())
topo['decays'] = []
topo['total_br'] = 0
topo['Pid'] = proc.get('id')
if skip:
self.add_me_symmetric(skip, topo)
def add_me_symmetric(self, process_list, topo):
""" """
self.has_particles_ambiguity = True
skip = [] # due to particles/anti-particles some may need to be add
# as a separate matrix element in the instance.
old_topo = topo
topo = dict(topo) #change the main pointer
topo['decays'] = [] # unlink information which need to be different.
topo['total_br'] = 0 #
topo['tag2order'] = {}
topo['decaying'] = ()
for key in topo.keys():
if isinstance(key, int):
topo[key] = copy.copy(topo[key])
assert id(old_topo) != id(topo)
assert id(topo['decays']) != id(old_topo['decays'])
tags = []
for i, proc in enumerate(process_list):
initial = []
final = [l.get('id') for l in proc.get('legs')\
if l.get('state') or initial.append(l.get('id'))]
decaying = [pid for pid in final if abs(pid) in self.decay_ids]
decaying.sort()
if i == 0:
decaying_base = decaying
if decaying != decaying_base:
skip.append(proc)
continue
tags.append(proc.get_initial_final_ids())
topo['tag2order'][tags[-1]] = (initial , final)
if tags[0] not in self:
self.add(topo, tags)
for key in topo.keys():
if isinstance(key, int):
topo[key].production = self[tags[0]]
if skip:
self.add_me_symmetric(skip, topo)
class branching(dict):
""" A dictionnary to record information about a given branching in a production event
self["type"] is the type of the branching , either "s" for s-channel or "t" for t-channel
self["invariant"] is a real number with the value of p^2 associated with the branching
self["index_d1"] is the mg index of the first daughter
self["index_d2"] is the mg index of the second daughter
self["index_propa"] is the mg index of the propagator
"""
def __init__(self, index_propa, index_d1,index_d2, s_or_t):
self["index_d1"]=index_d1
self["index_d2"]=index_d2
self["index_propa"]=index_propa
self["type"]=s_or_t
class width_estimate(object):
"""All methods used to calculate branching fractions"""
def __init__(self,resonances,path_me, banner, model, pid2label):
self.resonances=resonances
self.path_me=path_me
self.pid2label = pid2label
self.label2pid = self.pid2label
self.model = model
#print self.model
self.banner = banner
#self.model=
def update_branch(self,branches,to_add):
""" complete the definition of the branch by appending each element of to_add"""
newbranches={}
for item1 in branches.keys():
for item2 in to_add.keys():
tag=item1+item2
newbranches[tag]={}
newbranches[tag]['config']=branches[item1]['config']+to_add[item2]['config']
newbranches[tag]['br']=branches[item1]['br']*to_add[item2]['br']
return newbranches
def extract_br(self, decay_processes, mgcmd):
"""Find a way to get the branching ratio. (depending of the model/cards)"""
# calculate which br are interesting to compute.
to_decay = set(decay_processes.keys())
for decays in decay_processes.values():
for decay in decays:
if ',' in decay:
to_decay.update(set([l.split('>')[0].strip()
for l in decay.replace('(','').replace(')','').split(',')]))
# Maybe the branching fractions are already given in the banner:
self.extract_br_from_banner(self.banner)
to_decay = list(to_decay)
for part in to_decay[:]:
if part in mgcmd._multiparticles:
to_decay += [self.pid2label[id] for id in mgcmd._multiparticles[part]]
to_decay.remove(part)
to_decay = list(set([p for p in to_decay if not p in self.br]))
if to_decay:
logger.info('We need to recalculate the branching fractions for %s' % ','.join(to_decay))
if hasattr(self.model.get('particles')[0], 'partial_widths'):
logger.info('using the FeynRules formula present in the model (arXiv:1402.1178)')
else:
logger.info('Using MadWidth (arXiv:1402.1178)')
#self.extract_br_from_width_evaluation(to_decay)
self.launch_width_evaluation(to_decay, mgcmd)
return self.br
def get_BR_for_each_decay(self, decay_processes, multiparticles):
""" get the list for possible decays & the associated branching fraction """
model = self.model
base_model = self.model
pid2label = self.pid2label
ponctuation=[',','>',')','(']
new_decay_processes={}
for part in decay_processes.keys():
pos_symbol=-1
branch_list=decay_processes[part].split()
new_decay_processes[part]={}
new_decay_processes[part]['']={}
new_decay_processes[part]['']['config']=""
new_decay_processes[part]['']['br']=1.0
initial=""
final=[]
for index, item in enumerate(branch_list):
# First get the symbol at the next position
if index<len(branch_list)-1:
next_symbol=branch_list[index+1]
else:
next_symbol=''
# Then handle the symbol item case by case
if next_symbol=='>': # case1: we have a particle initiating a branching
initial=item
if item not in [ particle['name'] for particle in base_model['particles'] ] \
and item not in [ particle['antiname'] for particle in base_model['particles'] ]:
raise Exception("No particle "+item+ " in the model "+model)
continue
elif item=='>': continue # case 2: we have the > symbole
elif item not in ponctuation : # case 3: we have a particle originating from a branching
final.append(item)
if next_symbol=='' or next_symbol in ponctuation:
#end of a splitting, verify that it exists
if initial not in list(self.br.keys()):
logger.debug('Branching fractions of particle '+initial+' are unknown')
return 0
if len(final)>2:
raise Exception('splittings different from A > B +C are currently not implemented ')
if final[0] in list(multiparticles.keys()):
set_B=[pid2label[pid] for pid in multiparticles[final[0]]]
else:
if final[0] not in [ particle['name'] for particle in base_model['particles'] ] \
and final[0] not in [ particle['antiname'] for particle in base_model['particles'] ]:
raise Exception("No particle "+item+ " in the model ")
set_B=[final[0]]
if final[1] in list(multiparticles.keys()):
set_C=[pid2label[pid] for pid in multiparticles[final[1]]]
else:
if final[1] not in [ particle['name'] for particle in base_model['particles'] ] \
and final[1] not in [ particle['antiname'] for particle in base_model['particles'] ]:
raise Exception("No particle "+item+ " in the model "+model)
set_C=[final[1]]
splittings={}
counter=0
for chan in range(len(self.br[initial])): # loop over all channels
got_it=0
for d1 in set_B:
for d2 in set_C:
if (d1==self.br[initial][chan]['daughters'][0] and \
d2==self.br[initial][chan]['daughters'][1]) or \
(d2==self.br[initial][chan]['daughters'][0] and \
d1==self.br[initial][chan]['daughters'][1]):
split=" "+initial+" > "+d1+" "+d2+" "
# For the tag we need to order d1 d2, so that equivalent tags can be correctly idetified
list_daughters=sorted([d1,d2])
tag_split="|"+initial+">"+list_daughters[0]+list_daughters[1]
counter+=1
splittings[tag_split]={}
splittings[tag_split]['config']=split
splittings[tag_split]['br']=self.br[initial][chan]['br']
got_it=1
break # to avoid double counting in cases such as w+ > j j
if got_it: break
if len(splittings)==0:
logger.info('Branching '+initial+' > '+final[0]+' '+final[1])
logger.info('is currently unknown')
return 0
else:
new_decay_processes[part]=self.update_branch(new_decay_processes[part],splittings)
inital=""
final=[]
else: # case 4: ponctuation symbol outside a splitting
# just append it to all the current branches
fake_splitting={}
fake_splitting['']={}
fake_splitting['']['br']=1.0
fake_splitting['']['config']=item
new_decay_processes[part]=self.update_branch(new_decay_processes[part],fake_splitting)
return new_decay_processes
def print_branching_fractions(self):
""" print a list of all known branching fractions"""
for res in self.br.keys():
logger.info(' ')
logger.info('decay channels for '+res+' : ( width = '
+str(self.width_value[res])+' GeV )')
logger.info(' BR d1 d2' )
for decay in self.br[res]:
bran = decay['br']
d1 = decay['daughters'][0]
d2 = decay['daughters'][1]
logger.info(' %e %s %s ' % (bran, d1, d2) )
logger.info(' ')
def print_partial_widths(self):
""" print a list of all known partial widths"""
for res in self.br.keys():
logger.info(' ')
logger.info('decay channels for '+res+' :')
logger.info(' width d1 d2' )
for chan, decay in enumerate(self.br[res]):
width=self.br[res][chan]['width']
d1=self.br[res][chan]['daughters'][0]
d2=self.br[res][chan]['daughters'][1]
logger.info(' %e %s %s ' % (width, d1, d2) )
logger.info(' ')
def extract_br_from_width_evaluation(self, to_decay):
""" use MadGraph5_aMC@NLO to generate me's for res > all all
"""
raise DeprecationWarning
if os.path.isdir(pjoin(self.path_me,"width_calculator")):
shutil.rmtree(pjoin(self.path_me,"width_calculator"))
assert not os.path.exists(pjoin(self.path_me, "width_calculator"))
path_me = self.path_me
label2pid = self.label2pid
# first build a set resonances with pid>0
#particle_set= list(to_decay)
pids = set([abs(label2pid[name]) for name in to_decay])
particle_set = [label2pid[pid] for pid in pids]
modelpath = self.model.get('modelpath')
if os.path.basename(modelpath) != self.model['name']:
name, restrict = self.model['name'].rsplit('-',1)
if os.path.exists(pjoin(os.path.dirname(modelpath),name, 'restrict_%s.dat' % restrict)):
modelpath = pjoin(os.path.dirname(modelpath), self.model['name'])
commandline="import model %s\n" % modelpath
commandline+="generate %s > all all \n" % particle_set[0]
commandline+= "set automatic_html_opening False --no_save\n"
if len(particle_set)>1:
for index in range(1,len(particle_set)):
commandline+="add process %s > all all \n" % particle_set[index]
commandline += "output %s/width_calculator -f \n" % path_me
aloha.loop_mode = False
aloha.unitary_gauge = False
cmd = Cmd.MasterCmd()
for line in commandline.split('\n'):
cmd.run_cmd(line)
# WRONG Needs to takes the param_card from the input files.
ff = open(pjoin(path_me, 'width_calculator', 'Cards', 'param_card.dat'),'w')
ff.write(self.banner['slha'])
ff.close()
lhapdf = False
if 'lhapdf' in os.environ:
lhapdf = os.environ['lhapdf']
del os.environ['lhapdf']
# run but remove the pdf dependencies
cmd.import_command_file(['launch',
'set lpp1 0',
'set lpp2 0',
'done'])
if lhapdf:
os.environ['lhapdf'] = lhapdf
#me_cmd = me_interface.MadEventCmd(pjoin(path_me,'width_calculator'))
#me_cmd.exec_cmd('set automatic_html_opening False --no_save')
filename=pjoin(path_me,'width_calculator','Events','run_01','param_card.dat')
self.extract_br_from_card(filename)
def extract_br_for_antiparticle(self):
'''
for each channel with a specific br value,
set the branching fraction of the complex conjugated channel
to the same br value
'''
label2pid = self.label2pid
pid2label = self.label2pid
for res in list(self.br.keys()):
particle=self.model.get_particle(label2pid[res])
if particle['self_antipart']:
continue
anti_res=pid2label[-label2pid[res]]
self.br[anti_res] = []
if res in self.width_value:
self.width_value[anti_res]=self.width_value[res]
elif anti_res in self.width_value:
self.width_value[res]=self.width_value[anti_res]
res, anti_res = anti_res, res
for chan, decay in enumerate(self.br[res]):
self.br[anti_res].append({})
bran=decay['br']
d1=decay['daughters'][0]
d2=decay['daughters'][1]
d1bar=pid2label[-label2pid[d1]]
d2bar=pid2label[-label2pid[d2]]
self.br[anti_res][chan]['br']=bran
self.br[anti_res][chan]['daughters']=[]
self.br[anti_res][chan]['daughters'].append(d1bar)
self.br[anti_res][chan]['daughters'].append(d2bar)
if 'width' in decay:
self.br[anti_res][chan]['width']=decay['width']
def launch_width_evaluation(self,resonances, mgcmd):
""" launch the calculation of the partial widths """
label2pid = self.label2pid
pid2label = self.label2pid
model = self.model
# first build a set resonances with pid>0
# since compute_width cannot be used for particle with pid<0
particle_set = set()
for part in resonances:
if part in mgcmd._multiparticles:
for pid in mgcmd._multiparticles[part]:
particle_set.add(abs(pid))
continue
pid_part = abs(label2pid[part])
particle_set.add(abs(pid_part))
particle_set = list(particle_set)
argument = {'particles': particle_set,
'path': pjoin(self.path_me, 'param_card.dat'),
'output': pjoin(self.path_me, 'param_card.dat'),
'body_decay': 2}
self.compute_widths(model, argument)
self.extract_br_from_card(pjoin(self.path_me, 'param_card.dat'))
self.banner['slha'] = open(pjoin(self.path_me, 'param_card.dat')).read()
if hasattr(self.banner,'param_card'):
del self.banner.param_card
self.banner.charge_card('param_card')
return
def compute_widths(self, model, opts):
from madgraph.interface.master_interface import MasterCmd
import madgraph.iolibs.helas_call_writers as helas_call_writers
cmd = MasterCmd()
#self.define_child_cmd_interface(cmd, interface=False)
cmd.exec_cmd('set automatic_html_opening False --no_save')
if not opts['path']:
opts['path'] = pjoin(self.me_dir, 'Cards', 'param_card.dat')
if not opts['force'] :
self.ask_edit_cards(['param_card'],[], plot=False)
commandline = 'import model %s' % model.get('modelpath+restriction')
if not model.mg5_name:
commandline += ' --modelname'
cmd.exec_cmd(commandline)
line = 'compute_widths %s %s' % \
(' '.join([str(i) for i in opts['particles']]),
' '.join('--%s=%s' % (key,value) for (key,value) in opts.items()
if key not in ['model', 'force', 'particles'] and value))
#pattern for checking complex mass scheme.
has_cms = re.compile(r'''set\s+complex_mass_scheme\s*(True|T|1|true|$|;)''', re.M)
force_CMS = has_cms.search(self.banner['mg5proccard'])
if force_CMS:
cmd.exec_cmd('set complex_mass_scheme')
# cmd._curr_model = model
# cmd._curr_fortran_model = helas_call_writers.FortranUFOHelasCallWriter(model)
cmd.exec_cmd(line)
#self.child = None
del cmd
def extract_br_from_banner(self, banner):
"""get the branching ratio from the banner object:
for each resonance with label 'res', and for each channel with index i,
returns a dictionary branching_fractions[res][i]
with keys
'daughters' : label of the daughters (only 2 body)
'br' : value of the branching fraction"""
self.br = {}
# read the param_card internally to the banner
if not hasattr(banner, 'param_card'):
banner.charge_card('param_card')
param_card = banner.param_card
return self.extract_br_from_card(param_card)
def extract_br_from_card(self, param_card):
"""get the branching ratio from the banner object:
for each resonance with label 'res', and for each channel with index i,
returns a dictionary branching_fractions[res][i]
with keys
'daughters' : label of the daughters (only 2 body)
'br' : value of the branching fraction"""
if isinstance(param_card, str):
import models.check_param_card as check_param_card
param_card = check_param_card.ParamCard(param_card)
if 'decay' not in param_card or not hasattr(param_card['decay'], 'decay_table'):
return self.br
self.width_value={}
for id, data in param_card['decay'].decay_table.items():
# check is the new width is close to the one originally in the banner
recalculated_width=param_card['decay'].param_dict[(id,)].value
width_in_the_banner=self.banner.get('param_card', 'decay', abs(id)).value
relative_diff=abs(recalculated_width-width_in_the_banner)/recalculated_width
if (relative_diff > 0.05):
logger.warning('The LO estimate for the width of particle %s ' % id)
logger.warning('differs from the one in the banner by %d percent ' % (relative_diff*100))
label = self.pid2label[id]
current = [] # tmp name for self.br[label]
for parameter in data:
if parameter.lhacode[0] == 2:
d = [self.pid2label[pid] for pid in parameter.lhacode[1:]]
current.append({'daughters':d, 'br': parameter.value})
self.br[label] = current
self.width_value[label]=recalculated_width
#update the banner:
self.banner['slha'] = param_card.write(None)
self.banner.param_card = param_card
self.extract_br_for_antiparticle()
return self.br
class decay_misc:
"""class with various methods for the decay"""
@staticmethod
def get_all_resonances(banner, mgcmd, allowed):
""" return a list of labels of each resonance involved in the decay chain """
allowed = list(allowed)
found = set()
alias = {} # if an allowed particles is inside a multiparticle
# look at the content of the multiparticles in order to know which one
# we need to track.
multiparticles = mgcmd._multiparticles
model = mgcmd._curr_model
for name, content in multiparticles.items():
curr = [model.get_particle(id).get('name') \
for id in content
if model.get_particle(id).get('name') in allowed ]
if found:
alias[name] = set(curr)
# Now look which of the possible decay that we need to look at are indeed
# present in the final state of one process.
for line in banner.proc_card:
line.strip()
if line.startswith('generate') or line.startswith('add process'):
final_process = re.split(r'>.*>|>|[\$/,@\[]', line)[1]
for search in allowed:
if search in final_process:
found.add(search)
allowed.remove(search)
for search, data in alias.items():
if search in final_process:
found.update(data)
del alias[search]
# treat multiparticles
finalfound = set()
for name in found:
if name in mgcmd._multiparticles:
finalfound.update([model.get_particle(id).get('name')
for id in mgcmd._multiparticles[name]])
finalfound.discard(name)
else:
finalfound.add(name)
return finalfound
def reorder_branch(self,branch):
""" branch is a string with the definition of a decay chain
If branch contains " A > B C , B > ... "
reorder into " A > C B , B > ... "
"""
branch=branch.replace(',', ' , ')
branch=branch.replace(')', ' ) ')
branch=branch.replace('(', ' ( ')
list_branch=branch.split(" ")
for index in range(len(list_branch)-1,-1,-1):
if list_branch[index]==' ' or list_branch[index]=='': del list_branch[index]
#print list_branch
for index, item in enumerate(list_branch):
if item[-1] =="," and list_branch[index+1]!="(":
# search pos of B and C
counter=1
while 1:
if list_branch[index-counter].find("=")<0:
break
counter+=1
if list_branch[index-counter-1]==list_branch[index+1]:
# swap the two particles before the comma:
temp=list_branch[index-counter-1]
list_branch[index-counter-1]=list_branch[index-counter]
list_branch[index-counter]=temp
if item[-1] =="," and list_branch[index+1]=="(":
# search pos of B and C
counter=1
while 1:
if list_branch[index-counter].find("=")<0:
break
counter+=1
if list_branch[index-counter -1]==list_branch[index+2]:
# swap the two particles before the comma:
temp=list_branch[index-counter-1]
list_branch[index-counter-1]=list_branch[index-counter]
list_branch[index-counter]=temp
new_branch=""
for item in list_branch:
new_branch+=item+" "
return new_branch, list_branch[0]
def set_light_parton_massless(self,topo):
""" masses of light partons are set to zero for
the evaluation of the matrix elements
"""
light_partons=[21,1,2,3]
for part in topo["get_id"].keys():
if abs(topo["get_id"][part]) in light_partons :
topo["get_mass2"][part]=0.0
# need to check if last branch is a t-branching. If it is,
# we need to update the value of branch["m2"]
# since this will be used in the reshuffling procedure
if len(topo["branchings"])>0: # Exclude 2>1 self.topologies
if topo["branchings"][-1]["type"]=="t":
if topo["branchings"][-2]["type"]!="t":
logger.info('last branching is t-channel')
logger.info('but last-but-one branching is not t-channel')
else:
part=topo["branchings"][-1]["index_d2"]
if part >0: # reset the mass only if "part" is an external particle
topo["branchings"][-2]["m2"]=math.sqrt(topo["get_mass2"][part])
@staticmethod
def modify_param_card(pid2widths, path_me):
"""Modify the param_card w/r to what is read from the banner:
if the value of a width is set to zero in the banner,
it is automatically set to its default value in this code
"""
param_card=open(pjoin(path_me,'param_card.dat'), 'r')
new_param_card=""
while 1:
line=param_card.readline()
if line =="": break
list_line=line.split()
if len(list_line)>2:
if list_line[0]=="DECAY" and int(list_line[1]) in list(pid2widths.keys()):
list_line[2]=str(pid2widths[int(list_line[1])])
line=""
for item in list_line:
line+=item+ " "
line+="\n"
new_param_card+=line
param_card.close()
param_card=open(pjoin(path_me, 'param_card.dat'), 'w')
param_card.write(new_param_card)
param_card.close()
def select_one_topo(self,prod_values):
#
# prod_values[0] is the value of |M_prod|^2
# prod_values[1], prod_values[2], ... are the values of individual diagrams
# (called AMP2 in mg)
#
# first evaluate the sum of all single diagram values
total=0.0
cumul=[0.0]
for i in range(1,len(prod_values)):
cumul.append(cumul[i-1]+float(prod_values[i]))
total+=float(prod_values[i])
for i in range(len(cumul)): cumul[i]=cumul[i]/total
#print "Cumulative AMP2 values: "
#print cumul
select_topo=random.random()
for i in range(1,len(cumul)):
if select_topo>cumul[i-1] and select_topo<cumul[i]:
good_topo=i
break
#print "Selected topology"
#print good_topo
return good_topo, cumul
def get_final_state_compact(self,final_state_full):
dc_pos=final_state_full.find(",")
if dc_pos>0:
branch_list=final_state_full.split(",")
del branch_list[0]
list_obj=final_state_full.split()
final_state_compact=""
to_be_deleted=[]
for index, obj in enumerate(list_obj):
if obj==">":
to_be_deleted.append(list_obj[index-1])
for obj in list_obj:
if obj!=">" and obj!="," and obj not in to_be_deleted:
final_state_compact+=obj+" "
branches={}
for branch in branch_list:
list_part=branch.split()
branches[list_part[0]]={"finalstate":list_part[2:]}
branches[list_part[0]]["branch"], dummy= self.reorder_branch(branch)
else:
final_state_compact=final_state_full
branches={}
return final_state_compact, branches
def get_mean_sd(self,list_obj):
""" return the mean value and the standard deviation of a list of reals """
sum=0.0
N=float(len(list_obj))
for item in list_obj:
sum+=item
mean=sum/N
sd=0.0
for item in list_obj:
sd+=(item-mean)**2
if N > 1:
sd=math.sqrt(sd/(N-1.0))
else:
sd = mean/5.
return mean, sd
class decay_all_events(object):
def __init__(self, ms_interface, banner, inputfile, options):
"""Store all the component and organize special variable"""
# input
self.options = options
#max_weight_arg = options['max_weight']
self.path_me = os.path.realpath(options['curr_dir'])
if options['ms_dir']:
self.path_me = os.path.realpath(options['ms_dir'])
if not os.path.exists(self.path_me):
os.mkdir(self.path_me)
self.mgcmd = ms_interface.mg5cmd
self.mscmd = ms_interface
self.model = ms_interface.model
self.banner = banner
self.evtfile = inputfile
self.curr_event = Event(self.evtfile, banner)
self.inverted_decay_mapping={}
self.width_estimator = None
self.curr_dir = os.getcwd()
# dictionary to fortan evaluator
self.calculator = {}
self.calculator_nbcall = {}
# need to unbuffer all I/O in fortran, otherwise
# the values of matrix elements are not passed to the Python script
os.environ['GFORTRAN_UNBUFFERED_ALL']='y'
# Remove old stuff from previous runs
# so that the current run is not confused
# Don't have to do that for gridpack / or if asked.
if not (options["ms_dir"] or options["use_old_dir"]):
if os.path.isdir(pjoin(self.path_me,"production_me")):
shutil.rmtree(pjoin(self.path_me,"production_me"))
if os.path.isdir(pjoin(self.path_me,"full_me")):
shutil.rmtree(pjoin(self.path_me,"full_me"))
if os.path.isdir(pjoin(self.path_me,"decay_me")):
shutil.rmtree(pjoin(self.path_me,"decay_me"))
# Prepare some dict usefull for optimize model imformation
# pid -> label and label -> pid
self.pid2label=pid2label(self.model)
self.banner.check_pid(self.pid2label)
self.pid2label.update(label2pid(self.model))
self.pid2massvar={}
self.pid2widthvar={}
for part in self.model['particles']:
self.pid2massvar[int(part['pdg_code'])]=part['mass']
self.pid2widthvar[int(part['pdg_code'])]=part['width']
# load the Monte Carlo masses
self.MC_masses=self.get_MC_masses()
# logger.info('Value of the Monte Carlo masses: ')
# logger.info(self.MC_masses)
# dictionary pid > color_rep
self.pid2color = pid2color(self.model)
# energy of the collider
self.Ecollider=float(self.banner.get('run_card', 'ebeam1'))\
+float(self.banner.get('run_card', 'ebeam2'))
# write down the seed:
seedfile=open(pjoin(self.path_me, 'seeds.dat'),'w')
seedfile.write(' %s \n' % self.options['seed'])
seedfile.close()
# width and mass information will be filled up later
self.pid2width = lambda pid: self.banner.get('param_card', 'decay', abs(pid)).value
self.pid2mass = lambda pid: self.banner.get('param_card', 'mass', abs(pid)).value
if os.path.isfile(pjoin(self.path_me,"param_card.dat")):
os.remove(pjoin(self.path_me,"param_card.dat"))
# now overwrite the param_card.dat in Cards:
param_card=self.banner['slha']
#param_card=decay_tools.check_param_card( param_card)
# now we can write the param_card.dat:
# Note that the width of each resonance in the
# decay chain should be >0 , we will check that later on
model_name = os.path.basename(self.model.get('name'))
param=open(pjoin(self.path_me,'param_card.dat'),"w")
param.write(param_card)
param.close()
if model_name == 'mssm' or model_name.startswith('mssm-'):
#need to convert to SLHA2 format
import models.check_param_card as check_param_card
check_param_card.convert_to_mg5card(pjoin(self.path_me,'param_card.dat'))
self.list_branches = ms_interface.list_branches
decay_ids = [self.pid2label[key] for key in self.list_branches \
if key in self.pid2label]
for multi in self.mgcmd._multiparticles:
if multi in self.list_branches:
decay_ids += self.mgcmd._multiparticles[multi]
self.all_ME = AllMatrixElement(banner, self.options, decay_ids, self.model)
self.all_decay = {}
# generate BR and all the square matrix element based on the banner.
pickle_info = pjoin(self.path_me,"production_me", "all_ME.pkl")
if not options["use_old_dir"] or not os.path.exists(pickle_info):
self.generate_all_matrix_element()
self.save_to_file(pickle_info,
(self.all_ME,self.all_decay,self.width_estimator))
else:
try:
self.all_ME, self.all_decay,self.width_estimator = save_load_object.load_from_file(pjoin(self.path_me,"production_me", "all_ME.pkl"))
except Exception as error:
logger.debug(str(error))
self.generate_all_matrix_element()
self.save_to_file(pickle_info,
(self.all_ME,self.all_decay,self.width_estimator))
if not self.options["onlyhelicity"] and self.options['spinmode'] != 'onshell':
resonances = self.width_estimator.resonances
logger.debug('List of resonances: %s' % resonances)
self.extract_resonances_mass_width(resonances)
self.compile()
def save_to_file(self, *args):
return save_load_object.save_to_file(*args)
def get_MC_masses(self):
MC_masses={}
pid_heavyquarks=[4,5]
if 'montecarlomasses' in self.banner:
MC_masses_lines=self.banner['montecarlomasses'].split('\n')
for line in MC_masses_lines:
pidvalue=line.split()
if len(pidvalue)<2: continue # skip blank lines
pid=abs(int(pidvalue[0]))
value=float(pidvalue[1])
MC_masses[pid]=value
if pid in pid_heavyquarks:
value_ME=self.banner.get('param_card','mass', pid).value
if value_ME>1E-10:
if pid==5:
logger.warning('set the mass of the b-quark to its value in the param_card.dat: %s GeV ' % value_ME)
if pid==4:
logger.warning('set the mass of the c-quark to its value in the param_card.dat: %s GeV ' % value_ME)
MC_masses[pid]=value_ME
return MC_masses
def run(self):
"""Running the full code"""
max_weight_arg = self.options['max_weight']
decay_tools=decay_misc()
#Next step: we need to determine which matrix elements are really necessary
#==========================================================================
decay_mapping = self.get_identical_decay()
# also compute the inverted map, which will be used in the decay procedure
for tag in decay_mapping:
for equiv_decay in decay_mapping[tag]:
self.inverted_decay_mapping[equiv_decay[0]]=tag
self.mscmd.update_status('MadSpin: Estimate the maximum weight')
# Estimation of the maximum weight
#=================================
if max_weight_arg>0:
for key in self.all_ME:
for mode in self.all_ME['decays']:
mode['max_weight'] = max_weight_arg
elif self.options["onlyhelicity"]:
logger.info("not needed in helicity mode")
else:
#self.get_max_weight_from_1toN()
self.get_max_weight_from_event(decay_mapping)
# add probability of not writting events (for multi production with
# different decay
self.add_loose_decay()
# Store this object with all the associate number for gridpack:
if self.options['ms_dir']:
self.save_status_to_pickle()
self.ending_run()
def ending_run(self):
"""launch the unweighting and deal with final information"""
# launch the decay and reweighting
self.mscmd.update_status('MadSpin: Decaying Events')
efficiency = self.decaying_events(self.inverted_decay_mapping)
self.efficiency = efficiency
if efficiency != 1 and any(v==-1 for v in self.br_per_id.values()):
# need to change the banner information [nb_event/cross section]
files.cp(self.outputfile.name, '%s_tmp' % self.outputfile.name)
self.outputfile = open(self.outputfile.name, 'w')
self.write_banner_information(efficiency)
pos = self.outputfile.tell()
old = open('%s_tmp' % self.outputfile.name)
line=''
while '</init>' not in line:
line = old.readline()
self.outputfile.write(old.read())
files.rm('%s_tmp' % self.outputfile.name)
# Closing all run
self.terminate_fortran_executables()
if not self.options['ms_dir']:
shutil.rmtree(pjoin(self.path_me,'production_me'))
shutil.rmtree(pjoin(self.path_me,'full_me'))
if not self.options["onlyhelicity"]:
shutil.rmtree(pjoin(self.path_me,'decay_me'))
# set the environment variable GFORTRAN_UNBUFFERED_ALL
# to its original value
#os.environ['GFORTRAN_UNBUFFERED_ALL']='n'
def save_status_to_pickle(self):
import madgraph.iolibs.save_load_object as save_load_object
#don't store the event file in the pkl
evt_file, self.evtfile = self.evtfile, None
curr_event, self.curr_event = self.curr_event , None
mgcmd, self.mgcmd = self.mgcmd, None
mscmd, self.mscmd = self.mscmd , None
pid2mass, self.pid2mass = self.pid2mass, None
pid2width, self.pid2width = self.pid2width, None
#banner, self.banner = self.banner, None
#self.all_ME.banner = None
name = pjoin(self.options['ms_dir'], 'madspin.pkl')
save_load_object.save_to_file(name, self)
#restore the event file
self.evtfile = evt_file
self.curr_event = curr_event
self.mgcmd = mgcmd
self.mscmd = mscmd
self.pid2mass = pid2mass
self.pid2width = pid2width
#self.banner = banner
#self.all_ME.banner = banner
def decaying_events(self,inverted_decay_mapping):
"""perform the decay of each events"""
decay_tools = decay_misc()
# tools for checking if max_weight is too often broken.
report = collections.defaultdict(int,{'over_weight': 0})
logger.info(' ' )
logger.info('Decaying the events... ')
self.outputfile = open(pjoin(self.path_me,'decayed_events.lhe'), 'w')
self.write_banner_information()
event_nb, fail_nb = 0, 0
nb_skip = 0
trial_nb_all_events=0
starttime = time.time()
nb_fail_mc_mass=0
while 1: # loop on event file
production_tag, event_map = self.load_event()
if production_tag == 0 == event_map: #end of file
break
if event_nb and \
(event_nb % max(int(10**int(math.log10(float(event_nb)))),1000)==0):
running_time = misc.format_timer(time.time()-starttime)
logger.info('Event nb %s %s' % (event_nb, running_time))
self.mscmd.update_status(('$events',1,event_nb, 'decaying events'),
force=False, print_log=False)
if (event_nb==10001): logger.info('reducing number of print status. Next status update in 10000 events')
if self.options["onlyhelicity"]:
trial_nb, fail = self.adding_only_helicity(event_map, production_tag)
trial_nb_all_events+=trial_nb
fail_nb += fail
event_nb += 1
continue
# Here we need to select a decay configuration on a random basis:
decay = self.all_ME.get_random_decay(production_tag)
if not decay['decay_tag']:
#Not writting events due to global reweighting
nb_skip +=1
continue
else:
# for the matrix element, identify the master decay channel to which 'decay' is equivalent:
decay_tag_me=inverted_decay_mapping[decay['decay_tag']]
try:
decay_me=self.all_ME.get_decay_from_tag(production_tag, decay_tag_me)
except Exception:
#if the master didn't exsit try the original one.
decay_me=self.all_ME.get_decay_from_tag(production_tag, decay['decay_tag'])
event_nb+=1
report[decay['decay_tag']] += 1
indices_for_mc_masses, values_for_mc_masses=self.get_montecarlo_masses_from_event(decay['decay_struct'], event_map, decay['prod2full'])
nb_mc_masses=len(indices_for_mc_masses)
p, p_str=self.curr_event.give_momenta(event_map)
stdin_text=' %s %s %s %s %s \n' % ('2', self.options['BW_cut'], self.Ecollider, decay_me['max_weight'], self.options['frame_id'])
stdin_text+=p_str
# here I also need to specify the Monte Carlo Masses
stdin_text+=" %s \n" % nb_mc_masses
if nb_mc_masses>0:
stdin_text+='%s \n' % str(indices_for_mc_masses).strip('[]').replace(',', ' ')
stdin_text+='%s \n' % str(values_for_mc_masses).strip('[]').replace(',', ' ')
# here apply the reweighting procedure in fortran
output = self.loadfortran( 'unweighting', decay_me['path'], stdin_text)
if not output:
fail_nb +=1
continue
trial_nb, BWvalue, weight, momenta, failed, use_mc_masses, helicities = output
# next: need to fill all intermediate momenta
if nb_mc_masses>0 and use_mc_masses==0:nb_fail_mc_mass+=1
ext_mom=self.get_mom(momenta)
# fill all momenta in the decay branches
momenta_in_decay=self.get_int_mom_in_decay(decay['decay_struct'],ext_mom)
# reset extrenal momenta in the production event
self.reset_mom_in_prod_event(decay['decay_struct'],decay['prod2full'],\
event_map,momenta_in_decay,ext_mom, use_mc_masses, helicities)
# reset intermediate momenta in prod event
self.curr_event.reset_resonances()
#
decayed_event = self.decay_one_event_new(self.curr_event,decay['decay_struct'],\
event_map, momenta_in_decay,use_mc_masses, helicities)
# Treat the case we get too many failures for the PS generation.
if failed > 500 :
logger.debug('Got a production event with %s failures for the phase-space generation generation ' % failed)
# Treat the case that we ge too many overweight.
if weight > decay_me['max_weight']:
report['over_weight'] += 1
report['%s_f' % (decay['decay_tag'],)] +=1
if __debug__:
misc.sprint('''over_weight: %s %s, occurence: %s%%, occurence_channel: %s%%
production_tag:%s [%s], decay:%s [%s], BW_cut: %1g\n
''' %\
(weight/decay['max_weight'], decay['decay_tag'],
100 * report['over_weight']/event_nb,
100 * report['%s_f' % (decay['decay_tag'],)] / report[decay['decay_tag']],
os.path.basename(self.all_ME[production_tag]['path']),
production_tag,
os.path.basename(decay['path']),
decay['decay_tag'],BWvalue))
if weight > 10.0 * decay['max_weight']:
error = """Found a weight MUCH larger than the computed max_weight (ratio: %s).
This usually means that the Narrow width approximation reaches it's limit on part of the Phase-Space.
Do not trust too much the tale of the distribution and/or relaunch the code with smaller BW_cut.
This is for channel %s with current BW_value at : %g'""" \
% (weight/decay['max_weight'], decay['decay_tag'], BWvalue)
logger.error(error)
elif report['over_weight'] > max(0.005*event_nb,3):
error = """Found too many weight larger than the computed max_weight (%s/%s = %s%%).
Please relaunch MS with more events/PS point by event in the
computation of the maximum_weight.
""" % (report['over_weight'], event_nb, 100 * report['over_weight']/event_nb )
raise MadSpinError(error)
error = True
elif report['%s_f' % (decay['decay_tag'],)] > max(0.01*report[decay['decay_tag']],3):
error = """Found too many weight larger than the computed max_weight (%s/%s = %s%%),
for channel %s. Please relaunch MS with more events/PS point by event in the
computation of the maximum_weight.
""" % (report['%s_f' % (decay['decay_tag'],)],\
report['%s' % (decay['decay_tag'],)],\
100 * report['%s_f' % (decay['decay_tag'],)] / report[ decay['decay_tag']] ,\
decay['decay_tag'])
raise MadSpinError(error)
decayed_event.change_wgt(factor= self.branching_ratio)
#decayed_event.wgt = decayed_event.wgt * self.branching_ratio
self.outputfile.write(decayed_event.string_event())
#print "number of trials: "+str(trial_nb)
trial_nb_all_events+=trial_nb
self.outputfile.write('</LesHouchesEvents>\n')
self.evtfile.close()
self.outputfile.close()
if report['over_weight'] > max(0.15*math.sqrt(event_nb),1):
error = """Found many weight larger than the computed max_weight (%s/%s = %s%%).
""" % (report['over_weight'], event_nb, 100 * report['over_weight']/event_nb )
logger.warning(error)
for decay_tag in self.all_decay.keys():
if report['%s_f' % (decay_tag,)] > max(0.2*report[decay_tag],1):
error = """Found many weight larger than the computed max_weight (%s/%s = %s%%),
for channel %s.""" % (report['%s_f' % (decay_tag,)],\
report['%s' % (decay_tag,)],\
100 * report['%s_f' % (decay_tag,)] / report[decay_tag] ,\
decay_tag)
logger.warning(error)
logger.info('Total number of events written: %s/%s ' % (event_nb, event_nb+nb_skip))
logger.info('Average number of trial points per production event: '\
+str(float(trial_nb_all_events)/float(event_nb)))
logger.info('Branching ratio to allowed decays: %g' % self.branching_ratio)
logger.info('Number of events with weights larger than max_weight: %s' % report['over_weight'])
logger.info('Number of subprocesses '+str(len(self.calculator)))
logger.info('Number of failures when restoring the Monte Carlo masses: %s ' % nb_fail_mc_mass)
if fail_nb:
logger.info('Number of failures in reshuffling (event skipped): %s ' % fail_nb)
return event_nb/(event_nb+nb_skip)
def adding_only_helicity(self, event_map, production_tag):
"""no decays for this production mode, run in passthrough mode,
only adding the helicities to the events """
#no decays for this production mode, run in passthrough mode, only adding the helicities to the events
nb_mc_masses=0
p, p_str=self.curr_event.give_momenta(event_map)
stdin_text=' %s %s %s %s \n' % ('2', self.options['BW_cut'], self.Ecollider, 1.0, self.options['frame_id'])
stdin_text+=p_str
# here I also need to specify the Monte Carlo Masses
stdin_text+=" %s \n" % nb_mc_masses
mepath = self.all_ME[production_tag]['path']
decay = self.all_ME[production_tag]['decays'][0]
decay_me=self.all_ME.get_decay_from_tag(production_tag, decay['decay_tag'])
mepath = decay_me['path']
output = self.loadfortran( 'unweighting', mepath, stdin_text)
if not output:
# Event fail
return 0, 1
trial_nb, BWvalue, weight, momenta, failed, use_mc_masses, helicities = output
self.reset_helicityonly_in_prod_event(event_map, helicities)
decayed_event = self.curr_event
self.outputfile.write(decayed_event.string_event())
#print "number of trials: "+str(trial_nb)
return trial_nb, 0
def get_int_mom_in_decay(self,decay_struct,ext_mom):
""" fill """
momenta_in_decay={}
for part in decay_struct.keys():
branch=decay_struct[part]['mg_tree']
nb_splitting=len(branch)
for split in range(nb_splitting-1,-1,-1):
mother=branch[split][0]
d1=branch[split][1]
d2=branch[split][2]
if d1>0:
momenta_in_decay[d1]=ext_mom[d1-1] # list_momenta is ordered according to ME
if d2>0:
momenta_in_decay[d2]=ext_mom[d2-1] # list_momenta is ordered according to ME
momenta_in_decay[mother]=momenta_in_decay[d1].add(momenta_in_decay[d2])
return momenta_in_decay
def reset_mom_in_prod_event(self, decay_struct,prod2full, event_map, momenta_in_decay,ext_mom,use_mc_masses,helicities):
""" Reset the external momenta in the production event, since
the virtuality of decaying particles has slightly changed the kinematics
"""
for index in self.curr_event.event2mg.keys():
if self.curr_event.event2mg[index]>0:
part=self.curr_event.event2mg[index] # index for production ME
part_for_curr_evt=event_map[part-1]+1 # index for curr event
pid=self.curr_event.particle[part_for_curr_evt]['pid']
if part in decay_struct:
id_res=decay_struct[part]['mg_tree'][0][0]
self.curr_event.particle[part_for_curr_evt]['momentum']=momenta_in_decay[id_res].copy()
self.curr_event.particle[part_for_curr_evt]['mass']=self.curr_event.particle[part_for_curr_evt]['momentum'].m
else:
self.curr_event.particle[part_for_curr_evt]['momentum']=ext_mom[prod2full[part-1]-1]
self.curr_event.particle[part_for_curr_evt]['helicity']=helicities[prod2full[part-1]-1]
if not use_mc_masses or abs(pid) not in self.MC_masses:
try:
self.curr_event.particle[part_for_curr_evt]['mass']=self.banner.get('param_card','mass', abs(pid)).value
except KeyError:
if self.model.get_particle(abs(pid)).get('mass').lower() == 'zero':
self.curr_event.particle[part_for_curr_evt]['mass'] = 0
else:
raise
else:
self.curr_event.particle[part_for_curr_evt]['mass']=self.MC_masses[abs(pid)]
def reset_helicityonly_in_prod_event(self, event_map, helicities):
""" Reset the external momenta in the production event, since
the virtuality of decaying particles has slightly changed the kinematics
"""
for index in self.curr_event.event2mg.keys():
if self.curr_event.event2mg[index]>0:
part=self.curr_event.event2mg[index] # index for production ME
part_for_curr_evt=event_map[part-1]+1 # index for curr event
pid=self.curr_event.particle[part_for_curr_evt]['pid']
self.curr_event.particle[part_for_curr_evt]['helicity']=helicities[part-1]
def get_mom(self,momenta):
""" input: list of momenta in a string format
output: list of momenta in a 'momentum' format
"""
output=[]
for item in momenta:
comps=item.split()
mom=momentum(float(comps[0]),float(comps[1]),float(comps[2]),float(comps[3]))
output.append(mom)
return output
def get_identical_decay(self):
"""identify the various decay which are identical to each other"""
logger.info('detect independant decays')
start = time.time()
# Possbilitiy to Bypass this step
if len(self.all_decay) == 1:
relation = {}
base_tag = None
for prod in self.all_ME.values():
for decay in prod['decays']:
tags = decay['decay_tag']
for tag in tags:
if not base_tag:
relation[tag] = (tag, 1)
base_tag = tag
elif (tag,1) not in relation[base_tag]:
relation[tag] = (base_tag,1)
decay_mapping = self.get_process_identical_ratio(relation)
return decay_mapping
BW_cut = self.options['BW_cut']
#class the decay by class (nbody/pid)
nbody_to_decay = collections.defaultdict(list)
for decay in self.all_decay.values():
id = decay['dc_branch']['tree'][-1]['label']
id_final = decay['processes'][0].get_final_ids_after_decay()
cut = 0.0
mass_final = tuple([m if m> cut else 0 for m in map(self.pid2mass, id_final)])
nbody_to_decay[(decay['nbody'], abs(id), mass_final)].append(decay)
relation = {} # {tag: {(tag2, ratio)}}
# Loop over the class and create the relation information about the 1
for ((nbody, pid, finals),decays) in nbody_to_decay.items():
if len(decays) == 1:
continue
mom_init = momentum(self.pid2mass(pid), 0, 0, 0)
# create an object for the validation, keeping the ratio between
# MEM i and MEM j. this is set at zero when the ratio is not found
#constant
valid = dict([ ((i, j), True) for j in range(len(decays))
for i in range(len(decays))
if i != j])
for nb in range(125):
tree, jac, nb_sol = decays[0]['dc_branch'].generate_momenta(mom_init,\
True, self.pid2width, self.pid2mass, BW_cut,self.Ecollider)
if not tree:
continue
p_str = '%s\n%s\n'% (tree[-1]['momentum'],
'\n'.join(str(tree[i]['momentum']) for i in range(1, len(tree))
if i in tree))
values = {}
for i in range(len(decays)):
if any([valid[(i,j)] for j in range(len(decays)) if i !=j]):
values[i] = self.calculate_matrix_element('decay',
decays[i]['path'], p_str)
else:
#skip computation if all possibility are ruled out.
values[i] = 0
#check if the ratio is constant for all possibilities
for i in range(len(decays)):
for j in range(i+1, len(decays)):
if values[i] == 0 or values[j] == 0 or valid[(i,j)] == 0:
continue # already not valid
elif valid[(i,j)] is True:
valid[(i,j)] = values[j]/values[i]
valid[(j,i)] = valid[(i,j)]
elif (valid[(i,j)] - values[j]/values[i]) < 1e-6 * (valid[(i,j)] + values[j]/values[i]):
pass
else:
valid[(i, j)] = 0
valid[(j, i)] = 0
if __debug__:
for i in range(len(decays)):
comment= "| "
for j in range(len(decays)):
if i == j:
comment+= "%4e " % 1
continue
comment+= "%4e " % valid[(i,j)]
comment+= "|"+ os.path.basename(decays[i]['path'])
logger.debug(comment)
# store the result in the relation object. (using tag as key)
for i in range(len(decays)):
tag_i = decays[i]['tag'][2:]
for j in range(i+1, len(decays)):
tag_j = decays[j]['tag'][2:]
if valid[(i,j)] and tag_j not in relation:
relation[tag_j] = (tag_i, valid[(i,j)])
# fullfill the object with the already identify to one decay.
#and add those who doesn't have any relations.
for decay in self.all_decay.values():
tags = [m.shell_string(pdg_order=True)[2:] for m in decay['processes']]
init_tag = tags[0]
if init_tag not in relation:
out = (init_tag, 1)
else:
out = relation[init_tag]
for tag in tags[1:]:
relation[tag] = out
decay_mapping = self.get_process_identical_ratio(relation)
logger.info('Done in %ss' % (time.time()-start))
return decay_mapping
def get_process_identical_ratio(self, relation):
# Now that we have ratio relation between each tag, we need to say
#what is the relation between the decay of the production process.
#This is not only the product since some decay can be equivalent.
decay_mapping = {} # final output: {first_process: [(equiv_proc, ratio), ...]
tag2real = {} # basic tag [the one related via relation] -> first process
# basic tag ratio doesn't have any identical factor (this simplify calculation)
nb=0
for prod in self.all_ME.values():
for decay in prod['decays']:
tag = decay['decay_tag']
nb+=1
# build the basic tag (all equiv process are related to this tag)
basic_tag = []
ratio = 1
for t in tag:
if t in relation:
basic_tag.append(relation[t][0])
ratio *= relation[t][1]
else:
basic_tag.append(t)
basic_tag = tuple(basic_tag)
# compute identical factor ratio compare to a fully diffent decay
#that we have assume for the basic tag
if len(set(tag)) != len(tag):
for t in set(tag):
ratio /= math.factorial(tag.count(t))
# Now build the output
if basic_tag not in tag2real:
tag2real[basic_tag] = (tag, ratio)
decay_mapping[tag] = set([(tag, 1)])
ratio2=1
else:
real_tag, ratio2 = tag2real[basic_tag]
if real_tag != tag:
decay_mapping[real_tag].add((tag, ratio/ratio2))
return decay_mapping
@misc.mute_logger()
@misc.set_global()
def generate_all_matrix_element(self):
"""generate the full series of matrix element needed by Madspin.
i.e. the undecayed and the decay one. And associate those to the
madspin production_topo object"""
# 1. compute the partial width
# 2. compute the production matrix element
# 3. create the all_topology object
# 4. compute the full matrix element (use the partial to throw away
# pointless decay.
# 5. add the decay information to the all_topology object (with branching
# ratio)
# 0. clean previous run ------------------------------------------------
path_me = self.path_me
try:
shutil.rmtree(pjoin(path_me,'full_me'))
except Exception:
pass
try:
shutil.rmtree(pjoin(path_me,'production_me'))
except Exception as error:
pass
path_me = self.path_me
# 1. compute the partial width------------------------------------------
if not self.options["onlyhelicity"]:
self.get_branching_ratio()
# 2. compute the production matrix element -----------------------------
processes = [line[9:].strip() for line in self.banner.proc_card
if line.startswith('generate')]
processes += [' '.join(line.split()[2:]) for line in self.banner.proc_card
if re.search('^\s*add\s+process', line)]
mgcmd = self.mgcmd
modelpath = self.model.get('modelpath+restriction')
commandline="import model %s" % modelpath
if not self.model.mg5_name:
commandline += ' --modelname'
mgcmd.exec_cmd(commandline)
# Handle the multiparticle of the banner
#for name, definition in self.mscmd.multiparticles:
if hasattr(self.mscmd, 'multiparticles_ms'):
for name, pdgs in self.mscmd.multiparticles_ms.items():
if name == 'all':
continue
#self.banner.get('proc_card').get('multiparticles'):
mgcmd.do_define("%s = %s" % (name, ' '.join(repr(i) for i in pdgs)))
mgcmd.exec_cmd("set group_subprocesses False")
logger.info('generating the production square matrix element')
start = time.time()
commandline=''
for proc in processes:
if '[' in proc:
commandline += reweight_interface.ReweightInterface.get_LO_definition_from_NLO(proc, mgcmd._curr_model)
else:
commandline += 'add process %s; ' % proc
commandline = commandline.replace('add process', 'generate',1)
logger.info(commandline)
mgcmd.exec_cmd(commandline, precmd=True)
commandline = 'output standalone_msP %s %s' % \
(pjoin(path_me,'production_me'), ' '.join(list(self.list_branches.keys())))
mgcmd.exec_cmd(commandline, precmd=True)
logger.info('Done %.4g' % (time.time()-start))
# 3. Create all_ME + topology objects ----------------------------------
matrix_elements = mgcmd._curr_matrix_elements.get_matrix_elements()
self.all_ME.adding_me(matrix_elements, pjoin(path_me,'production_me'))
# 3b. simplify list_branches -------------------------------------------
# remove decay which are not present in any production ME.
final_states = set()
for me in matrix_elements:
for leg in me.get('base_amplitude').get('process').get('legs'):
if not leg.get('state'):
continue
label = self.model.get_particle(leg.get('id')).get_name()
if self.all_ME.has_particles_ambiguity:
final_states.add(self.pid2label[-1*self.pid2label[label]])
final_states.add(label)
for key in list(self.list_branches.keys()):
if key not in final_states and key not in self.mgcmd._multiparticles:
if (len(self.list_branches)>1):
del self.list_branches[key]
elif not self.options["onlyhelicity"]:
raise Exception(" No decay define for process.")
logger.info('keeping dummy decay for passthrough mode')
# 4. compute the full matrix element -----------------------------------
if not self.options["onlyhelicity"]:
logger.info('generating the full matrix element squared (with decay)')
start = time.time()
to_decay = list(self.mscmd.list_branches.keys())
decay_text = []
for decays in self.mscmd.list_branches.values():
for decay in decays:
if '=' not in decay:
decay += ' QCD=99'
if ',' in decay:
decay_text.append('(%s)' % decay)
else:
decay_text.append(decay)
decay_text = ', '.join(decay_text)
commandline = ''
for proc in processes:
if not proc.strip().startswith(('add','generate')):
proc = 'add process %s' % proc
commandline += self.get_proc_with_decay(proc, decay_text, mgcmd._curr_model, self.options)
commandline = commandline.replace('add process', 'generate',1)
logger.info(commandline)
mgcmd.exec_cmd(commandline, precmd=True)
# remove decay with 0 branching ratio.
mgcmd.remove_pointless_decay(self.banner.param_card)
commandline = 'output standalone_msF %s %s' % (pjoin(path_me,'full_me'),
' '.join(list(self.list_branches.keys())))
mgcmd.exec_cmd(commandline, precmd=True)
logger.info('Done %.4g' % (time.time()-start))
elif self.options["onlyhelicity"]:
logger.info("Helicity Matrix-Element")
commandline = 'output standalone_msF %s %s' % \
(pjoin(path_me,'full_me'), ' '.join(list(self.list_branches.keys())))
mgcmd.exec_cmd(commandline, precmd=True)
logger.info('Done %.4g' % (time.time()-start))
# 5. add the decay information to the all_topology object --------------
for matrix_element in mgcmd._curr_matrix_elements.get_matrix_elements():
me_path = pjoin(path_me,'full_me', 'SubProcesses', \
"P%s" % matrix_element.get('processes')[0].shell_string())
self.all_ME.add_decay(matrix_element, me_path)
# 5.b import production matrix elements (+ related info) in the full process directory
list_prodfiles=['matrix_prod.f','configs_production.inc','props_production.inc','nexternal_prod.inc']
for tag in self.all_ME:
prod_path=self.all_ME[tag]['path']
nfinal=len(self.all_ME[tag]['base_order'][1])
for dico in self.all_ME[tag]['decays']:
full_path=dico['path']
#print prod_path
#print full_path
#print ' '
for item in list_prodfiles:
#print full_path
prodfile=pjoin(prod_path,item)
destination=pjoin(full_path,item)
shutil.copyfile(prodfile, destination)
# we need to write the file config_decays.inc
self.generate_configs_file(nfinal,dico,full_path)
if self.options["onlyhelicity"]:
return
# 6. generate decay only part ------------------------------------------
logger.info('generate matrix element for decay only (1 - > N).')
start = time.time()
commandline = ''
i=0
for processes in self.list_branches.values():
for proc in processes:
commandline+="add process %s @%i --no_warning=duplicate;" % (proc,i)
i+=1
commandline = commandline.replace('add process', 'generate',1)
mgcmd.exec_cmd(commandline, precmd=True)
# remove decay with 0 branching ratio.
mgcmd.remove_pointless_decay(self.banner.param_card)
#
commandline = 'output standalone_msF %s' % pjoin(path_me,'decay_me')
logger.info(commandline)
mgcmd.exec_cmd(commandline, precmd=True)
logger.info('Done %.4g' % (time.time()-start))
#
self.all_decay = {}
for matrix_element in mgcmd._curr_matrix_elements.get_matrix_elements():
me = matrix_element.get('processes')[0]
me_string = me.shell_string()
dirpath = pjoin(path_me,'decay_me', 'SubProcesses', "P%s" % me_string)
#
self.all_decay[me_string] = {'path': dirpath,
'dc_branch':dc_branch_from_me(me),
'nbody': len(me.get_final_ids_after_decay()),
'processes': matrix_element.get('processes'),
'tag': me.shell_string(pdg_order=True)}
#
# if __debug__:
# #check that all decay matrix element correspond to a decay only
# for prod in self.all_ME.values():
# for decay in prod['matrix_element']['base_amplitude']['process']['decay_chains']:
# assert decay.shell_string() in self.all_decay
@staticmethod
def get_proc_with_decay(proc, decay_text, model, msoptions=None):
commands = []
if '[' in proc:
new_command = reweight_interface.ReweightInterface.get_LO_definition_from_NLO(proc, model)
new_procs = new_command.split(';')
else:
new_procs = [proc]
for new_proc in new_procs:
new_proc= new_proc.strip()
if new_proc.endswith(';'):
new_proc = new_proc[:-1]
#catch line like "define" where no decay need to be added
if not new_proc.strip():
continue
if new_proc.startswith('p '):
new_proc = 'add process %s' % new_proc
logger.critical("wrongly formatted input for MadSpin. Please report this!")
elif not new_proc.startswith(('add', 'generate')):
commands.append(new_proc)
continue
# check options
tmp, options = [], set(["--no_warning=duplicate"])
for arg in new_proc.split():
if arg.startswith('--'):
options.add(arg)
else:
tmp.append(arg)
new_proc = ' '.join(tmp)
options = list(options)
options.sort()
options = ' '.join(options)
# deal with @ syntax need to move it after the decay specification
if '@' in new_proc:
baseproc, proc_nb = new_proc.split('@')
try:
proc_nb = int(proc_nb)
except ValueError:
raise MadSpinError('MadSpin didn\'t allow order restriction after the @ comment: \"%s\" not valid' % proc_nb)
proc_nb = '@%i' % proc_nb
else:
baseproc = new_proc
proc_nb = ''
if msoptions and msoptions['global_order_coupling']:
if '@' in proc_nb:
proc_nb += " %s" % msoptions['global_order_coupling']
else:
proc_nb += " @0 %s" % msoptions['global_order_coupling']
nb_comma = baseproc.count(',')
if nb_comma == 0:
commands.append("%s, %s %s %s" % (baseproc, decay_text, proc_nb, options))
elif nb_comma == 1:
before, after = baseproc.split(',')
commands.append("%s, %s, (%s, %s) %s %s" % (before, decay_text, after, decay_text, proc_nb, options))
else:
part = baseproc.split(',')
if any('(' in p for p in part):
raise Exception('too much decay at MG level. this can not be done for the moment)')
else:
decay_part = []
for p in part[1:]:
decay_part.append("(%s, %s)" % (p, decay_text))
commands.append("%s, %s, %s %s %s" % (part[0], decay_text, ', '.join(decay_part), proc_nb, options))
commands.append('') #to have a ; at the end of the command
return ';'.join(commands)
def get_branching_ratio(self):
"""compute the branching ratio of all the decaying particles"""
# Compute the width branching ratio. Doing this at this point allows
#to remove potential pointless decay in the diagram generation.
resonances = decay_misc.get_all_resonances(self.banner,
self.mgcmd, list(self.mscmd.list_branches.keys()))
logger.debug('List of resonances:%s' % resonances)
path_me = os.path.realpath(self.path_me)
width = width_estimate(resonances, path_me, self.banner, self.model,
self.pid2label)
width.extract_br(self.list_branches, self.mgcmd)
width.print_branching_fractions()
#self.channel_br = width.get_BR_for_each_decay(self.decay_processes,
# self.mgcmd._multiparticles)
self.width_estimator = width
self.banner.param_card = width.banner.param_card
return width
def compile(self):
logger.info('Compiling code')
self.compile_fortran(self.path_me, mode="full_me")
if not self.options["onlyhelicity"]:
self.compile_fortran(self.path_me, mode="production_me")
self.compile_fortran(self.path_me, mode="decay_me")
def compile_fortran(self, path_me, mode='production_me'):
""" Compile the fortran executables associated with the evalutation of the
matrix elements (production process)
Returns the path to the fortran executable
"""
base_dir = pjoin(path_me, mode,"SubProcesses")
list_prod=os.listdir(base_dir)
logger.debug("""Finalizing %s's """% mode)
# COMPILATION OF LIBRARY
misc.compile( cwd=pjoin(path_me, mode,"Source","DHELAS"), mode='fortran')
file_madspin=pjoin(MG5DIR, 'MadSpin', 'src', 'lha_read_ms.f')
shutil.copyfile(file_madspin, pjoin(path_me, mode,"Source","MODEL","lha_read.f" ))
if not self.options["use_old_dir"]:
misc.compile(arg=['clean'], cwd=pjoin(path_me, mode,"Source","MODEL"), mode='fortran')
misc.compile( cwd=pjoin(path_me, mode,"Source","MODEL"), mode='fortran')
file=pjoin(path_me, 'param_card.dat')
shutil.copyfile(file,pjoin(path_me,mode,"Cards","param_card.dat"))
# get all paths to matix elements
list_prod=[]
if mode == 'full_me':
for tag in self.all_ME:
for dico in self.all_ME[tag]['decays']:
full_path=dico['path']
if full_path not in list_prod: list_prod.append(full_path)
elif mode == 'production_me':
for tag in self.all_ME:
prod_path=self.all_ME[tag]['path']
if prod_path not in list_prod: list_prod.append(prod_path)
elif mode == 'decay_me':
for dir in os.listdir(base_dir):
if dir[0] == 'P': list_prod.append(pjoin(base_dir, dir))
for i,me_path in enumerate(list_prod):
# if direc[0] == "P" and os.path.isdir(pjoin(base_dir, direc)):
# new_path = pjoin(base_dir, direc)
new_path = me_path
if mode == 'full_me':
file_madspin=pjoin(MG5DIR, 'MadSpin', 'src', 'driver.f')
shutil.copyfile(file_madspin, pjoin(new_path,"driver.f"))
elif mode == 'production_me':
file_madspin=pjoin(MG5DIR, 'MadSpin', 'src', 'driver_prod.f')
shutil.copyfile(file_madspin, pjoin(new_path,"check_sa.f"))
else:
file_madspin=pjoin(MG5DIR, 'MadSpin', 'src', 'driver_decay.f')
shutil.copyfile(file_madspin, pjoin(new_path,"check_sa.f"))
if mode=='full_me':
file_madspin=pjoin(MG5DIR, 'MadSpin', 'src', 'ranmar.f')
shutil.copyfile(file_madspin, pjoin(new_path,"ranmar.f"))
file_madspin=pjoin(path_me, 'seeds.dat')
files.ln(file_madspin, new_path)
file_madspin=pjoin(new_path, 'offset.dat')
open(file_madspin,'w').write('%i\n' % i)
if mode == 'full_me':
file_madspin=pjoin(MG5DIR, 'MadSpin', 'src', 'makefile_full')
elif mode == 'production_me':
file_madspin=pjoin(MG5DIR, 'MadSpin', 'src', 'makefile_prod')
else:
file_madspin=pjoin(MG5DIR, 'MadSpin', 'src', 'makefile_decay')
shutil.copyfile(file_madspin, pjoin(new_path,"makefile") )
# files to produce the parameters:
file_madspin=pjoin(MG5DIR, 'MadSpin', 'src', 'initialize.f')
shutil.copyfile(file_madspin,pjoin(new_path,"initialize.f"))
shutil.copyfile(pjoin(path_me, mode,'Source','MODEL','input.inc'),
pjoin(new_path,'input.inc'))
if not os.path.exists(pjoin(new_path,os.path.pardir, 'parameters.inc')):
if not self.options["use_old_dir"]:
misc.compile(arg=['clean'], cwd=new_path, mode='fortran')
misc.compile(arg=['init'],cwd=new_path,mode='fortran')
misc.call('./init', cwd=new_path)
shutil.copyfile(pjoin(new_path,'parameters.inc'),
pjoin(new_path,os.path.pardir, 'parameters.inc'))
if mode == 'production_me':
misc.compile(cwd=new_path, mode='fortran')
else:
misc.compile(cwd=new_path, mode='fortran')
misc.compile(arg=['check'], cwd=new_path, mode='fortran')
if __debug__:
if(os.path.getsize(pjoin(path_me, mode,'SubProcesses', 'parameters.inc'))<10):
print(pjoin(path_me, mode,'SubProcesses', 'parameters.inc'))
raise Exception("Parameters of the model were not written correctly ! %s " %\
os.path.getsize(pjoin(path_me, mode,'SubProcesses', 'parameters.inc')))
def extract_resonances_mass_width(self, resonances):
""" """
label2width = {}
label2mass = {}
pid2width = {}
#pid2mass = self.pid2mass
need_param_card_modif = False
# now extract the width of the resonances:
for particle_label in resonances:
try:
part=abs(self.pid2label[particle_label])
#mass = self.banner.get('param_card','mass', abs(part))
width = self.banner.get('param_card','decay', abs(part))
except ValueError as error:
continue
else:
if (width.value > 0.001):
label2width[particle_label]=float(width.value)
else: # the width is less than 1 MeV, need to use an effective width !!
# this is useful to handle cases like tau decays
label2width[particle_label]=0.001
need_param_card_modif = True
logger.warning('ATTENTION')
logger.warning('Found a very small width in the param_card for particle '\
+str(particle_label))
logger.warning('Use instead an effective width of 1 MeV ' )
#label2mass[particle_label]=float(mass.value)
#pid2mass[part]=label2mass[particle_label]
pid2width[abs(part)]=label2width[particle_label]
if label2width[particle_label]==0.0:
need_param_card_modif = True
for param in self.model["parameters"][('external',)]:
if param.lhablock=="DECAY" and param.lhacode==[abs(part)]:
label2width[particle_label]=max(param.value,0.001)
pid2width[abs(part)]=label2width[particle_label]
logger.warning('ATTENTION')
logger.warning('Found a zero width in the param_card for particle '\
+str(particle_label))
logger.warning('Use instead the default/effective value '\
+str(label2width[particle_label]))
# now we need to modify the values of the width
# in param_card.dat, since this is where the input
# parameters will be read when evaluating matrix elements
if need_param_card_modif:
decay_misc.modify_param_card(pid2width, self.path_me)
def get_max_weight_from_event(self, decay_mapping):
""" """
decay_tools = decay_misc()
# check all set of decay that need to be done:
decay_set = set()
for production in self.all_ME.values():
decay_set.add(production['decaying'])
numberev = self.options['Nevents_for_max_weight'] # number of events
numberps = self.options['max_weight_ps_point'] # number of phase pace points per event
logger.info(' ')
logger.info(' Estimating the maximum weight ')
logger.info(' ***************************** ')
logger.info(' Probing the first '+str(numberev)+' events')
logger.info(' with '+str(numberps)+' phase space points')
if len(decay_set) > 1:
logger.info(' For %s decaying particle type in the final states' % len(decay_set))
logger.info(' ')
probe_weight = []
starttime = time.time()
ev = -1
nb_decay = dict( (key,0) for key in decay_set)
probe_weight = dict( (key,[]) for key in decay_set)
while ev+1 < len(decay_set) * numberev:
production_tag, event_map = self.load_event()
if production_tag == 0 == event_map: #end of file
logger.info('Not enough events for at least one production mode.')
logger.info('This is ok as long as you don\'t reuse the max weight for other generations.')
break
#check if this event is usefull or not
decaying = self.all_ME[production_tag]['decaying']
if nb_decay[decaying] >= numberev:
continue
ev += 1
nb_decay[decaying] += 1
# mg5_me_prod, prod_values = self.evaluate_me_production(production_tag, event_map)
logger.debug('Event %s/%s: ' % (ev+1, len(decay_set)*numberev))
if (len(decay_set)*numberev -(ev+2)) >0:
self.mscmd.update_status((len(decay_set)*numberev -(ev+2),1,ev+1,
'MadSpin: Maximum weight'),
force=False, print_log=False)
#logger.debug('Selected topology : '+str(tag_topo))
max_decay = {}
mean_decay= {}
std_decay = {}
atleastonedecay=False
for decay in self.all_ME[production_tag]['decays']:
tag = decay['decay_tag']
if decay_mapping and not tag in decay_mapping:
continue
if not tag:
continue # No decay for this process
atleastonedecay = True
weight = self.get_max_weight_from_fortran(decay['path'], event_map,numberps,self.options['BW_cut'])
#weight=mg5_me_full*BW_weight_prod*BW_weight_decay/mg5_me_prod
if tag in max_decay:
max_decay[tag] = max([max_decay[tag], weight])
else:
max_decay[tag] = weight
#print weight, max_decay[name]
#raise Exception
if not atleastonedecay:
# NO decay [one possibility is all decay are identical to their particle]
logger.info('No independent decay for one type of final states -> skip those events for the maximum weight computation')
nb_decay[decaying] = numberev
ev += numberev -1
continue
probe_weight[decaying].append(max_decay)
self.terminate_fortran_executables()
self.calculator = {}
self.calculator_nbcall = {}
if ev % 5 == 0:
running_time = misc.format_timer(time.time()-starttime)
info_text = 'Event %s/%s : %s \n' % (ev + 1, len(decay_set)*numberev, running_time)
#for index,tag_decay in enumerate(max_decay):
# info_text += ' decay_config %s [%s] : %s\n' % \
# (index+1, ','.join(tag_decay), probe_weight[decaying][nb_decay[decaying]-1][tag_decay])
logger.info(info_text[:-1])
# Computation of the maximum weight used in the unweighting procedure
for decaying in probe_weight:
if not probe_weight[decaying]:
continue
#me_linked = [me for me in self.all_ME.values() if me['decaying'] == decaying]
for decay_tag in probe_weight[decaying][0].keys():
weights=[]
for ev in range(numberev):
try:
weights.append(probe_weight[decaying][ev][decay_tag])
except:
continue
if not weights:
logger.warning( 'no events for %s' % decay_tag)
continue
weights.sort(reverse=True)
assert len(weights) == 1 or weights[0] >= weights[1]
ave_weight, std_weight = decay_tools.get_mean_sd(weights)
base_max_weight = 1.05 * (ave_weight+self.options['nb_sigma']*std_weight)
for i in [20, 30, 40, 50]:
if len(weights) < i:
break
ave_weight, std_weight = decay_tools.get_mean_sd(weights[:i])
base_max_weight = max(base_max_weight, 1.05 * (ave_weight+self.options['nb_sigma']*std_weight))
if weights[0] > base_max_weight:
base_max_weight = 1.05 * weights[0]
for associated_decay, ratio in decay_mapping[decay_tag]:
max_weight= ratio * base_max_weight
if ratio != 1:
max_weight *= 1.1 #security
br = 0
#assign the value to the associated decays
for k,m in self.all_ME.items():
for mi in m['decays']:
if mi['decay_tag'] == associated_decay:
mi['max_weight'] = max_weight
br = mi['br']
nb_finals = len(mi['finals'])
if decay_tag == associated_decay:
logger.debug('Decay channel %s :Using maximum weight %s [%s] (BR: %s)' % \
(','.join(decay_tag), base_max_weight, max(weights), br/nb_finals))
else:
logger.debug('Decay channel %s :Using maximum weight %s (BR: %s)' % \
(','.join(associated_decay), max_weight, br/nb_finals))
# if __debug__:
# check that all decay have a max_weight and fix it if not the case.
for prod in self.all_ME.values():
for dec in prod['decays']:
if dec['decay_tag'] and not 'max_weight' in dec:
dec['max_weight'] = 0.
# assert 'max_weight' in dec and dec['max_weight'] ,\
# 'fail for %s (%s)' % (str(dec['decay_tag']), \
# os.path.basename(prod['path']))
self.evtfile.seek(0)
return
def load_event(self):
"""Load the next event and ensure that the ME is define"""
#decay_tools = decay_misc()
if self.curr_event.get_next_event() == 'no_event':
return 0, 0
production_tag, order = self.curr_event.get_tag()
P_order = self.all_ME[production_tag]['tag2order'][production_tag]
event_map = {}
evt_order = list(order[0])+list(order[1])
for i, id in enumerate(P_order[0] + P_order[1]):
in_event = [pos for pos, label in enumerate(evt_order) \
if label == id]
if i < len(order[0]):
in_event = [pos for pos in in_event if pos < len(order[0])]
else:
in_event = [pos for pos in in_event if pos >= len(order[0])]
if len(in_event) == 1:
in_event = in_event[0]
else:
config = random.randint(0, len(in_event)-1)
in_event = in_event[config]
evt_order[in_event] = 0
event_map[i] = in_event
if __debug__ and len(order[0]) == 2:
assert event_map[0] in [0,1], 'wrong event mapping %s' % event_map
assert event_map[1] in [0,1], 'wrong event mapping %s' % event_map
assert production_tag in self.all_ME
return production_tag, event_map
def get_max_weight_from_fortran(self, path, event_map,nbpoints,BWcut):
"""return the max. weight associated with me decay['path']"""
p, p_str=self.curr_event.give_momenta(event_map)
std_in=" %s %s %s %s %s \n" % ("1",BWcut, self.Ecollider, nbpoints, self.options['frame_id'])
std_in+=p_str
max_weight = self.loadfortran('maxweight',
path, std_in)
return max_weight
nb_load = 0
def loadfortran(self, mode, path, stdin_text, first=True):
""" call the fortran executable """
self.nb_load +=1
tmpdir = ''
if ('full',path) in self.calculator:
external = self.calculator[('full',path)]
self.calculator_nbcall[('full',path)] += 1
else:
logger.debug('we have %s calculator ready' % len(self.calculator))
tmpdir = path
executable_prod="./check"
my_env = os.environ.copy()
my_env["GFORTRAN_UNBUFFERED_ALL"] = "y"
external = Popen(executable_prod, stdout=PIPE, stdin=PIPE,
stderr=STDOUT, cwd=tmpdir, env=my_env)
self.calculator[('full',path,)] = external
self.calculator_nbcall[('full',path)] = 1
try:
external.stdin.write(stdin_text.encode())
external.stdin.flush()
except IOError as error:
if not first:
raise
try:
external.stdin.close()
except Exception as error:
misc.sprint(error)
try:
external.stdout.close()
except Exception as error:
misc.sprint(error)
try:
external.stderr.close()
except Exception as error:
misc.sprint(error)
try:
external.terminate()
except:
pass
del self.calculator[('full',path,)]
return self.loadfortran(mode, path, stdin_text, first=False)
if mode == 'maxweight':
maxweight=float(external.stdout.readline())
output = maxweight
elif mode == 'full_me':
me_value=float(external.stdout.readline())
output = me_value
elif mode == 'unweighting':
firstline=external.stdout.readline().split()
try:
nexternal=int(firstline[0])
trials= int(firstline[1])
BWvalue= float(firstline[2])
weight= float(firstline[3])
failed= float(firstline[4])
use_mc_masses=int(firstline[5])
except ValueError:
logger.debug(firstline)
return
momenta=[external.stdout.readline() for i in range(nexternal)]
lastline=external.stdout.readline().split()
helicities=[lastline[i] for i in range(len(lastline))]
output = trials, BWvalue, weight, momenta, failed, use_mc_masses, helicities
if len(self.calculator) > self.options['max_running_process']:
logger.debug('more than %s calculators. Perform cleaning' % self.options['max_running_process'])
nb_calls = list(self.calculator_nbcall.values())
nb_calls.sort()
cut = max([nb_calls[len(nb_calls)//2], 0.001 * nb_calls[-1]])
for key, external in list(self.calculator.items()):
nb = self.calculator_nbcall[key]
if nb < cut:
if key[0]=='full':
path=key[1]
end_signal="5 0 0 0 0\n" # before closing, write down the seed
external.stdin.write(end_signal)
ranmar_state=external.stdout.readline()
ranmar_file=pjoin(path,'ranmar_state.dat')
ranmar=open(ranmar_file, 'w')
ranmar.write(ranmar_state)
ranmar.close()
external.stdin.close()
external.stdout.close()
external.terminate()
del self.calculator[key]
del self.calculator_nbcall[key]
else:
self.calculator_nbcall[key] = self.calculator_nbcall[key] //10
return output
def calculate_matrix_element(self, mode, production, stdin_text):
"""routine to return the matrix element"""
if mode != "decay":
raise Exception("This function is only secure in mode decay.")
tmpdir = ''
if (mode, production) in self.calculator:
external = self.calculator[(mode, production)]
self.calculator_nbcall[(mode, production)] += 1
else:
logger.debug('we have %s calculator ready' % len(self.calculator))
if mode == 'prod':
tmpdir = pjoin(self.path_me,'production_me', 'SubProcesses',
production)
elif mode in ['full','decay']:
tmpdir = pjoin(self.path_me,'%s_me' % mode, 'SubProcesses',
production)
executable_prod="./check"
my_env = os.environ.copy()
my_env["GFORTRAN_UNBUFFERED_ALL"] = "y"
external = Popen(executable_prod, stdout=PIPE, stdin=PIPE,
stderr=STDOUT, cwd=tmpdir,
env=my_env,
bufsize=0)
assert (mode, production) not in self.calculator
self.calculator[(mode, production)] = external
self.calculator_nbcall[(mode, production)] = 1
external.stdin.write(stdin_text.encode())
if mode == 'prod':
info = int(external.stdout.readline().decode())
nb_output = abs(info)+1
else:
info = 1
nb_output = 1
std = []
for i in range(nb_output):
external.stdout.flush()
line = external.stdout.readline().decode()
std.append(line)
prod_values = ' '.join(std)
#prod_values = ' '.join([external.stdout.readline().decode() for i in range(nb_output)])
if info < 0:
print('ZERO DETECTED')
print(prod_values)
print(stdin_text)
os.system('lsof -p %s' % external.pid)
return ' '.join(prod_values.split()[-1*(nb_output-1):])
if len(self.calculator) > self.options['max_running_process']:
logger.debug('more than 100 calculator. Perform cleaning')
nb_calls = list(self.calculator_nbcall.values())
nb_calls.sort()
cut = max([nb_calls[len(nb_calls)//2], 0.001 * nb_calls[-1]])
for key, external in list(self.calculator.items()):
nb = self.calculator_nbcall[key]
if nb < cut:
external.stdin.close()
external.stdout.close()
external.terminate()
del self.calculator[key]
del self.calculator_nbcall[key]
else:
self.calculator_nbcall[key] = self.calculator_nbcall[key] //10
if mode == 'prod':
return prod_values
else:
return float(prod_values)
def generate_configs_file(self,nfinal,decay, path):
""" write the file configs_decay.inc
also record the itree information in a python variable,
this will be needed to write down the event
decay_struct['mg_tree'] = [(d1,d2, mother), (d1,d2,mother), ...]
with - BACKWARD ORDER,
- me indices
"""
decay_struct=decay['decay_struct']
me_index=2 # should match the particle index in the full matrix element
count_res=0 # count number of resonances
iforest=[]
pmasswidth=[]
# data (map_external2res(i), i=1,4)/1,2,-2,-4/
decay['prod2full']=[1,2]
map_external=' data (map_external2res(i), i=1,%s)/1,2,' %(nfinal+2)
for part in range(3,nfinal+3):
if part in decay_struct: # particle in the prod. event to be decayed
#print part
decay_struct[part]['mg_tree']=[]
nb_res=len(list(decay_struct[part]["tree"].keys()))
for res in range(-1,-nb_res-1,-1):
label=abs(decay_struct[part]["tree"][res]['label'])
mass=self.pid2massvar[label]
width=self.pid2widthvar[label]
me_res=-nb_res-res-count_res-1
indexd1=decay_struct[part]["tree"][res]["d1"]["index"]
if indexd1>0:
me_index+=1
me_d1=me_index
else:
# need to label resonances backward
me_d1 = -nb_res-indexd1-count_res-1
indexd2=decay_struct[part]["tree"][res]["d2"]["index"]
if indexd2>0:
me_index+=1
me_d2=me_index
else:
# need to label resonances backward
me_d2 = -nb_res-indexd2-count_res-1
iforest.append(" DATA (IDECAY(I, %s ),I=1,2)/ %s , %s / \n" % (me_res, me_d1, me_d2))
decay_struct[part]['mg_tree'].append((me_res,me_d1,me_d2))
pmasswidth.append(" PRMASS(%s)=%s \n" %(me_res,mass) )
pmasswidth.append(" PRWIDTH(%s)=%s \n" %(me_res,width) )
count_res=count_res+nb_res
map_external+='%s ,' % (-count_res)
decay['prod2full'].append(-count_res)
else:
me_index+=1
map_external+='%s ,' % me_index
decay['prod2full'].append(me_index)
map_external=map_external[:-1]+'/ \n'
trappe=open(pjoin(path,'configs_decay.inc'),'w')
trappe.write(map_external)
for item in iforest:
trappe.write(item)
trappe.write(' ns_channel_decay= %s \n' % count_res)
for item in pmasswidth:
trappe.write(item)
trappe.close()
def get_montecarlo_masses_from_event(self,decay_struct, event_map, map_prod2full):
"""
from the production event curr_event and from the decay channel 'decay_struct'
(which has just been selected randomly), get the MonteCarlo masses
"""
# in order to preserve the natural order in lhe file,
# we need the inverse of the dico event_map
inv_event_map={}
for i in event_map.keys():
inv_event_map[event_map[i]]=i
indices_for_mc_masses=[]
values_for_mc_masses=[]
for index in self.curr_event.event2mg.keys():
if self.curr_event.event2mg[index]>0: # no need to consider resonances in the production event file
part=inv_event_map[self.curr_event.event2mg[index]-1]+1 # index for prod. matrix element
part_for_curr_evt=self.curr_event.event2mg[index] # index for event file
if part not in decay_struct:
# get the pid
curr_pid=abs(self.curr_event.particle[part_for_curr_evt]['pid'])
if curr_pid in self.MC_masses:
#print part
#print map_prod2full
indices_for_mc_masses.append(map_prod2full[part-1])
values_for_mc_masses.append(self.MC_masses[curr_pid])
else:
# now we need to write the decay products in the event
# follow the decay chain order, so that we can easily keep track of the mother index
for res in range(-1,-len(list(decay_struct[part]["tree"].keys()))-1,-1):
index_d1=decay_struct[part]['mg_tree'][-res-1][1]
index_d2=decay_struct[part]['mg_tree'][-res-1][2]
pid_d1=abs(decay_struct[part]\
["tree"][res]["d1"]["label"])
pid_d2=abs(decay_struct[part]\
["tree"][res]["d2"]["label"])
if index_d1 >0 and pid_d1 in self.MC_masses:
indices_for_mc_masses.append(index_d1)
values_for_mc_masses.append(self.MC_masses[pid_d1])
if index_d2 >0 and pid_d2 in self.MC_masses:
indices_for_mc_masses.append(index_d2)
values_for_mc_masses.append(self.MC_masses[pid_d2])
return indices_for_mc_masses,values_for_mc_masses
def decay_one_event_new(self,curr_event,decay_struct, event_map, momenta_in_decay, use_mc_masses, helicities):
"""Write down the event
momenta is the list of momenta ordered according to the productin ME
"""
pid2color = self.pid2color
decayed_event=Event()
decayed_event.event2mg={}
decayed_event.ievent=curr_event.ievent
decayed_event.wgt=curr_event.wgt
decayed_event.scale=curr_event.scale
decayed_event.aqed=curr_event.aqed
decayed_event.aqcd=curr_event.aqcd
decayed_event.diese=curr_event.diese
decayed_event.rwgt=curr_event.rwgt
decayed_event.event_init_line=curr_event.event_init_line
part_number=0
external=0
maxcol=curr_event.max_col
# in order to preserve the natural order in lhe file,
# we need the inverse of the dico event_map
inv_event_map={}
for i in event_map.keys():
inv_event_map[event_map[i]]=i
sol_nb = None
for index in curr_event.event2mg.keys():
if curr_event.event2mg[index]>0:
part=inv_event_map[curr_event.event2mg[index]-1]+1 # index for prod. matrix element
part_for_curr_evt=curr_event.event2mg[index] # index for event file
if part not in decay_struct:
external+=1
part_number+=1
decayed_event.particle[part_number]=curr_event.particle[part_for_curr_evt]
decayed_event.event2mg[part_number]=part_number
else:
# now we need to write the decay products in the event
# follow the decay chain order, so that we can easily keep track of the mother index
map_to_part_number={}
for res in range(-1,-len(list(decay_struct[part]["tree"].keys()))-1,-1):
index_res_for_mom=decay_struct[part]['mg_tree'][-res-1][0]
if (res==-1):
part_number+=1
mom=momenta_in_decay[index_res_for_mom].copy()
pid=decay_struct[part]["tree"][res]['label']
istup=2
mothup1=curr_event.particle[part_for_curr_evt]["mothup1"]
mothup2=curr_event.particle[part_for_curr_evt]["mothup2"]
colup1=curr_event.particle[part_for_curr_evt]["colup1"]
colup2=curr_event.particle[part_for_curr_evt]["colup2"]
decay_struct[part]["tree"][res]["colup1"]=colup1
decay_struct[part]["tree"][res]["colup2"]=colup2
mass=mom.m
helicity=0.
decayed_event.particle[part_number]={"pid":pid,\
"istup":istup,"mothup1":mothup1,"mothup2":mothup2,\
"colup1":colup1,"colup2":colup2,"momentum":mom,\
"mass":mass,"helicity":helicity}
decayed_event.event2mg[part_number]=part_number
map_to_part_number[res]=part_number
#
# Extract color information so that we can write the color flow
#
colormother=pid2color[decay_struct[part]["tree"][res]["label"]]
colord1=pid2color[decay_struct[part]\
["tree"][res]["d1"]["label"]]
colord2=pid2color[decay_struct[part]\
["tree"][res]["d2"]["label"]]
colup1=decay_struct[part]["tree"][res]["colup1"]
colup2=decay_struct[part]["tree"][res]["colup2"]
# now figure out what is the correct color flow informatio
# Only consider 1,3, 3-bar and 8 color rep.
# Normally, the color flow needs to be determined only
# during the reshuffling phase, but it is currenlty assigned
# for each "trial event"
if abs(colord1)==1:
d2colup1=colup1
d2colup2=colup2
d1colup1=0
d1colup2=0
elif abs(colord2)==1:
d1colup1=colup1
d1colup2=colup2
d2colup1=0
d2colup2=0
elif colord1==3 and colord2==-3 and colormother ==1:
maxcol+=1
d1colup1=maxcol
d1colup2=0
d2colup1=0
d2colup2=maxcol
elif colord1==3 and colord2==-3 and colormother ==8:
d1colup1=colup1
d1colup2=0
d2colup1=0
d2colup2=colup2
elif colord1==-3 and colord2==3 and colormother ==8:
d1colup1=0
d1colup2=colup2
d2colup1=colup1
d2colup2=0
elif colord1==-3 and colord2==3 and colormother ==1:
maxcol+=1
d1colup1=0
d1colup2=maxcol
d2colup1=maxcol
d2colup2=0
elif colord1==3 and colord2==8 and colormother ==3:
maxcol+=1
d2colup1=colup1
d2colup2=maxcol
d1colup1=maxcol
d1colup2=0
elif colord2==3 and colord1==8 and colormother ==3:
maxcol+=1
d1colup1=colup1
d1colup2=maxcol
d2colup1=maxcol
d2colup2=0
elif colord1==-3 and colord2==8 and colormother ==-3:
maxcol+=1
d2colup2=colup2
d2colup1=maxcol
d1colup2=maxcol
d1colup1=0
elif colord2==-3 and colord1==8 and colormother ==-3:
maxcol+=1
d1colup2=colup2
d1colup1=maxcol
d2colup2=maxcol
d2colup1=0
elif colord1==-3 and colord2==-3 and colormother == 3:
maxcol+=2
d1colup1=0
d1colup2=maxcol
d2colup1=0
d2colup2=maxcol-1
elif (colord1==-3 and colord2==3 and colormother == 3) or\
(colord1==-3 and colord2==3 and colormother == -3):
maxcol+=2
d1colup1 = 0
d1colup2 = maxcol
d2colup1 = maxcol-1
d2colup2 = 0
elif (colord1==3 and colord2==-3 and colormother == 3) or\
(colord1==3 and colord2==-3 and colormother == -3):
maxcol+=2
d1colup1=maxcol
d1colup2=0
d2colup1=0
d2colup2=maxcol-1
elif colord1==3 and colord2==3 and colormother == -3:
maxcol+=2
d1colup1=maxcol
d1colup2=0
d2colup1=maxcol-1
d2colup2=0
elif colord2==8 and colord1==8 and colormother ==8:
maxcol+=1
ran = random.random()
if ran> 0.5:
d1colup2=colup2
d1colup1=maxcol
d2colup2=maxcol
d2colup1=colup1
else:
d1colup2=maxcol
d1colup1=colup1
d2colup2=colup2
d2colup1=maxcol
else:
raise Exception('color combination not treated by MadSpin (yet). (%s,%s,%s)' \
% (colord1,colord2,colormother))
part_number+=1
index_d1_for_mom=decay_struct[part]['mg_tree'][-res-1][1]
mom=momenta_in_decay[index_d1_for_mom].copy()
#mom=decay_products[decay_struct[part]\
# ["tree"][res]["d1"]["index"]]["momentum"]
pid=decay_struct[part]\
["tree"][res]["d1"]["label"]
indexd1=decay_struct[part]["tree"][res]["d1"]["index"]
if ( indexd1>0):
hel=helicities[index_d1_for_mom-1]
istup=1
external+=1
if not use_mc_masses or abs(pid) not in self.MC_masses:
mass=self.banner.get('param_card','mass', abs(pid)).value
else:
mass=self.MC_masses[abs(pid)]
else:
hel=0.
decay_struct[part]["tree"][indexd1]["colup1"]=d1colup1
decay_struct[part]["tree"][indexd1]["colup2"]=d1colup2
istup=2
mass=mom.m
map_to_part_number[indexd1]=part_number
mothup1=map_to_part_number[res]
mothup2=map_to_part_number[res]
decayed_event.particle[part_number]={"pid":pid,\
"istup":istup,"mothup1":mothup1,"mothup2":mothup2,\
"colup1":d1colup1,"colup2":d1colup2,"momentum":mom,\
"mass":mass,"helicity":hel}
decayed_event.event2mg[part_number]=part_number
part_number+=1
index_d2_for_mom=decay_struct[part]['mg_tree'][-res-1][2]
mom=momenta_in_decay[index_d2_for_mom].copy()
#mom=decay_products[decay_struct[part]["tree"][res]["d2"]\
# ["index"]]["momentum"]
pid=decay_struct[part]["tree"][res]["d2"]\
["label"]
indexd2=decay_struct[part]["tree"][res]["d2"]["index"]
if ( indexd2>0):
hel=helicities[index_d2_for_mom-1]
istup=1
external+=1
if not use_mc_masses or abs(pid) not in self.MC_masses:
mass=self.banner.get('param_card','mass', abs(pid)).value
else:
mass=self.MC_masses[abs(pid)]
else:
hel=0.
istup=2
decay_struct[part]["tree"][indexd2]["colup1"]=d2colup1
decay_struct[part]["tree"][indexd2]["colup2"]=d2colup2
mass=mom.m
map_to_part_number[indexd2]=part_number
mothup1=map_to_part_number[res]
mothup2=map_to_part_number[res]
decayed_event.particle[part_number]={"pid":pid,"istup":istup,\
"mothup1":mothup1,"mothup2":mothup2,"colup1":d2colup1,\
"colup2":d2colup2,\
"momentum":mom,"mass":mass,"helicity":hel}
decayed_event.event2mg[part_number]=part_number
else: # resonance in the production event
part=curr_event.event2mg[index]
part_number+=1
decayed_event.particle[part_number]=curr_event.resonance[part]
decayed_event.event2mg[part_number]=part_number
# Here I need to check that the daughters still have the correct mothup1 and mothup2
for part in curr_event.resonance.keys():
mothup1=curr_event.resonance[part]["mothup1"]
mothup2=curr_event.resonance[part]["mothup2"]
if mothup1==index:
if mothup2!=index: print("Warning: mothup1!=mothup2")
curr_event.resonance[part]["mothup1"]=part_number
curr_event.resonance[part]["mothup2"]=part_number
for part in curr_event.particle.keys():
mothup1=curr_event.particle[part]["mothup1"]
mothup2=curr_event.particle[part]["mothup2"]
if mothup1==index:
if mothup2!=index: print("Warning: mothup1!=mothup2")
curr_event.particle[part]["mothup1"]=part_number
curr_event.particle[part]["mothup2"]=part_number
decayed_event.nexternal=part_number
return decayed_event
def add_loose_decay(self):
""" in presence of multiprocess with multiple decay options all the
BR might not be identical. In such case, the total number of events should
drop such that the events file is still a unweighted physical events sample.
This routines add null decay (=> not written events) if appropriate."""
first = True
max_br = max([m['total_br'] for m in self.all_ME.values()])
if max_br >= 1:
if max_br > 1.0001:
raise MadSpinError('BR is larger than one.')
max_br = 1
for production in self.all_ME.values():
if production['total_br'] < max_br:
if production['total_br'] > 0.9999:
continue
if first:
first = False
min_br = min([m['total_br'] for m in self.all_ME.values()])
logger.info('''All production process does not have the same total Branching Ratio.
Therefore the total number of events after decay will be lower than the original file.
[max_br = %s, min_br = %s]''' % (max_br, min_br),'$MG:BOLD')
fake_decay = {'br': max_br - production['total_br'],
'path': None, 'matrix_element': None,
'finals': None, 'base_order': None,
'decay_struct':None, 'decay_tag': None}
production['decays'].append(fake_decay)
production['total_br'] = max_br
def write_banner_information(self, eff=1):
ms_banner = ""
cross_section = True # tell if possible to write the cross-section in advance
total_br = []
self.br_per_id = {}
for production in self.all_ME.values():
one_br = 0
partial_br = 0
for decay in production['decays']:
if not decay['decay_tag']:
cross_section = False
one_br += decay['br']
continue
partial_br += decay['br']
ms_banner += "# %s\n" % ','.join(decay['decay_tag']).replace('\n',' ')
ms_banner += "# BR: %s\n# max_weight: %s\n" % (decay['br'], decay['max_weight'])
one_br += decay['br']
if production['Pid'] not in self.br_per_id:
self.br_per_id[production['Pid']] = partial_br
elif self.br_per_id[production['Pid']] != partial_br:
self.br_per_id[production['Pid']] = -1
total_br.append(one_br)
if __debug__:
for production in self.all_ME.values():
assert production['total_br'] - min(total_br) < 1e-4
self.branching_ratio = max(total_br) * eff
#self.banner['madspin'] += ms_banner
# Update cross-section in the banner
if 'mggenerationinfo' in self.banner:
mg_info = self.banner['mggenerationinfo'].split('\n')
for i,line in enumerate(mg_info):
if 'Events' in line:
if eff == 1:
self.err_branching_ratio = 0
continue
initial_event = int(mg_info[i].split()[-1])
nb_event = int(initial_event * eff)
mg_info[i] = '# Number of Events : %i' % nb_event
if eff >0.5:
self.err_branching_ratio = max(total_br) * math.sqrt(initial_event - eff * initial_event)/initial_event
else:
self.err_branching_ratio = max(total_br) * math.sqrt(eff * initial_event)/initial_event
continue
if ':' not in line:
continue
info, value = line.rsplit(':',1)
try:
value = float(value)
except:
continue
if cross_section:
mg_info[i] = '%s : %s' % (info, value * self.branching_ratio)
else:
mg_info[i] = '%s : %s' % (info, value * self.branching_ratio)
self.banner['mggenerationinfo'] = '\n'.join(mg_info)
self.cross = 0
self.error = 0
if 'init' in self.banner and (eff!=1 or not any(v==-1 for v in self.br_per_id.values())) \
and not self.options['onlyhelicity']:
new_init =''
curr_proc = 0
has_missing=False
for line in self.banner['init'].split('\n'):
if len(line.split()) != 4:
new_init += '%s\n' % line
else:
curr_proc += 1
data = [float(nb) for nb in line.split()]
id = int(data[-1])
if id in self.br_per_id and not any(v==-1 for v in self.br_per_id.values()):
data[:3] = [data[i] * self.br_per_id[id] for i in range(3)]
else:
data[:3] = [ data[i] * self.branching_ratio for i in range(3)]
has_missing=True
new_init += ' %.12E %.12E %.12E %i\n' % tuple(data)
cross, error = [float(d) for d in data[:2]]
self.cross += cross
self.error += error**2
self.banner['init'] = new_init
self.error = math.sqrt(self.error)
if has_missing and curr_proc not in [0,1]:
logger.warning('''The partial cross section for each subprocess can not be determine. due
Reason: multiple final state in the same subprocess (and the presence of multiple BR)
Consequence: the <init> information of the lhe will therefore be incorrect. Please correct it if needed.''')
self.banner.write(self.outputfile, close_tag=False)
def terminate_fortran_executables(self, path_to_decay=0 ):
"""routine to terminate all fortran executables"""
if not path_to_decay:
for (mode, path) in self.calculator:
if mode=='decay':
external = self.calculator[(mode, path)]
try:
external.stdin.close()
except Exception as error:
misc.sprint(error)
continue
try:
external.stdout.close()
except Exception as error:
misc.sprint(error)
continue
external.terminate()
del external
elif mode=='full':
stdin_text="5 0 0 0 0\n".encode() # before closing, write down the seed
external = self.calculator[('full',path)]
try:
external.stdin.write(stdin_text)
external.stdin.flush()
except Exception as error:
misc.sprint(error)
raise
continue
ranmar_state=external.stdout.readline().decode()
ranmar_file=pjoin(path,'ranmar_state.dat')
ranmar=open(ranmar_file, 'w')
ranmar.write(ranmar_state)
ranmar.close()
try:
external.stdin.close()
except Exception as error:
misc.sprint(error)
try:
external.stdout.close()
except Exception as error:
misc.sprint(error)
external.terminate()
del external
else:
misc.sprint('not closed', mode, type(mode))
else:
try:
external = self.calculator[('full', path_to_decay)]
except Exception:
pass
else:
stdin_text="5 0 0 0 0"
external.stdin.write(stdin_text)
external.stdin.close()
external.stdout.close()
external.terminate()
del external
self.calculator = {}
class decay_all_events_onshell(decay_all_events):
"""special mode for onshell production"""
@misc.mute_logger()
@misc.set_global()
def generate_all_matrix_element(self):
"""generate the full series of matrix element needed by Madspin.
i.e. the undecayed and the decay one. And associate those to the
madspin production_topo object"""
# 1. compute the partial width
# 2. compute the production matrix element
# 3. create the all_topology object
# 4. compute the full matrix element (use the partial to throw away
# pointless decay.
# 5. add the decay information to the all_topology object (with branching
# ratio)
# 0. clean previous run ------------------------------------------------
path_me = self.path_me
try:
shutil.rmtree(pjoin(path_me,'madspin_me'))
except Exception:
pass
# 1. compute the partial width------------------------------------------
#self.get_branching_ratio()
# 2. compute the production matrix element -----------------------------
processes = [line[9:].strip() for line in self.banner.proc_card
if line.startswith('generate')]
processes += [' '.join(line.split()[2:]) for line in self.banner.proc_card
if re.search('^\s*add\s+process', line)]
mgcmd = self.mgcmd
modelpath = self.model.get('modelpath+restriction')
commandline="import model %s" % modelpath
if not self.model.mg5_name:
commandline += ' --modelname'
mgcmd.exec_cmd(commandline)
# Handle the multiparticle of the banner
#for name, definition in self.mscmd.multiparticles:
if hasattr(self.mscmd, 'multiparticles_ms'):
for name, pdgs in self.mscmd.multiparticles_ms.items():
if name == 'all':
continue
#self.banner.get('proc_card').get('multiparticles'):
mgcmd.do_define("%s = %s" % (name, ' '.join(repr(i) for i in pdgs)))
mgcmd.exec_cmd("set group_subprocesses False")
logger.info('generating the production square matrix element for onshell')
start = time.time()
commandline=''
for proc in processes:
if '[' in proc:
commandline += reweight_interface.ReweightInterface.get_LO_definition_from_NLO(proc, mgcmd._curr_model)
else:
commandline += 'add process %s ;' % proc
# commandline = commandline.replace('add process', 'generate',1)
# logger.info(commandline)
#
# mgcmd.exec_cmd(commandline, precmd=True)
# commandline = 'output standalone_msP %s %s' % \
# (pjoin(path_me,'production_me'), ' '.join(self.list_branches.keys()))
# mgcmd.exec_cmd(commandline, precmd=True)
# logger.info('Done %.4g' % (time.time()-start))
# 3. Create all_ME + topology objects ----------------------------------
# matrix_elements = mgcmd._curr_matrix_elements.get_matrix_elements()
# self.all_ME.adding_me(matrix_elements, pjoin(path_me,'production_me'))
# 4. compute the full matrix element -----------------------------------
logger.info('generating the full matrix element squared (with decay)')
# start = time.time()
to_decay = list(self.mscmd.list_branches.keys())
decay_text = []
for decays in self.mscmd.list_branches.values():
for decay in decays:
if '=' not in decay:
decay += ' QCD=99'
if ',' in decay:
decay_text.append('(%s)' % decay)
else:
decay_text.append(decay)
decay_text = ', '.join(decay_text)
# commandline = ''
for proc in processes:
if not proc.strip().startswith(('add','generate')):
proc = 'add process %s' % proc
commandline += self.get_proc_with_decay(proc, decay_text, mgcmd._curr_model)
# 5. add the decay information to the all_topology object --------------
# for matrix_element in mgcmd._curr_matrix_elements.get_matrix_elements():
# me_path = pjoin(path_me,'full_me', 'SubProcesses', \
# "P%s" % matrix_element.get('processes')[0].shell_string())
# self.all_ME.add_decay(matrix_element, me_path)
# 5.b import production matrix elements (+ related info) in the full process directory
# list_prodfiles=['matrix_prod.f','configs_production.inc','props_production.inc','nexternal_prod.inc']
# for tag in self.all_ME:
# prod_path=self.all_ME[tag]['path']
# nfinal=len(self.all_ME[tag]['base_order'][1])
# for dico in self.all_ME[tag]['decays']:
# full_path=dico['path']
#print prod_path
#print full_path
#print ' '
# for item in list_prodfiles:
#print full_path
# prodfile=pjoin(prod_path,item)
# destination=pjoin(full_path,item)
# shutil.copyfile(prodfile, destination)
# we need to write the file config_decays.inc
# self.generate_configs_file(nfinal,dico,full_path)
# if self.options["onlyhelicity"]:
# return
# 6. generate decay only part ------------------------------------------
logger.info('generate matrix element for decay only (1 - > N).')
# start = time.time()
# commandline = ''
i=0
for processes in self.list_branches.values():
for proc in processes:
commandline+="add process %s @%i --no_warning=duplicate --standalone;" % (proc,i)
i+=1
commandline = commandline.replace('add process', 'generate',1)
mgcmd.exec_cmd(commandline, precmd=True)
# remove decay with 0 branching ratio.
#mgcmd.remove_pointless_decay(self.banner.param_card)
#
commandline = 'output standalone %s' % pjoin(path_me,'madspin_me')
logger.info(commandline)
mgcmd.exec_cmd(commandline, precmd=True)
logger.info('Done %.4g' % (time.time()-start))
self.all_me = {}
# store information about matrix element
for matrix_element in mgcmd._curr_matrix_elements.get_matrix_elements():
me_string = matrix_element.get('processes')[0].shell_string()
for me in matrix_element.get('processes'):
dirpath = pjoin(path_me,'madspin_me', 'SubProcesses', "P%s" % me_string)
# get the orignal order:
initial = []
final = [l.get('id') for l in me.get_legs_with_decays()\
if l.get('state') or initial.append(l.get('id'))]
order = (tuple(initial), tuple(final))
initial.sort(), final.sort()
tag = (tuple(initial), tuple(final))
self.all_me[tag] = {'pdir': "P%s" % me_string, 'order': order}
return self.all_me
def compile(self):
logger.info('Compiling code')
#my_env = os.environ.copy()
#os.environ["GFORTRAN_UNBUFFERED_ALL"] = "y"
misc.compile(cwd=pjoin(self.path_me,'madspin_me', 'Source'),
nb_core=self.mgcmd.options['nb_core'])
misc.compile(['all'],cwd=pjoin(self.path_me,'madspin_me', 'SubProcesses'),
nb_core=self.mgcmd.options['nb_core'])
def save_to_file(self, *args):
import sys
with misc.stdchannel_redirected(sys.stdout, os.devnull):
return super(decay_all_events_onshell,self).save_to_file(*args)
| StarcoderdataPython |
1698130 | <reponame>weijiadeng-uber/neuropod<filename>source/bazel/python.bzl
# https://docs.bazel.build/versions/master/skylark/repository_rules.html
def _impl(repository_ctx):
# The `or` pattern below handles empty strings and unset env variables
# Using a default value only handles unset env variables
version = repository_ctx.os.environ.get("NEUROPOD_PYTHON_VERSION") or "2.7"
IS_MAC = repository_ctx.os.name.startswith("mac")
if IS_MAC:
# Get the libdir
res = repository_ctx.execute(["python" + version, "-c", "import os; from distutils import sysconfig; print(os.path.dirname(sysconfig.get_config_var('LIBDIR')))"])
if res.return_code != 0:
fail("Error getting python libdir: " + res.stderr)
# Create a symlink
python_path = repository_ctx.path(res.stdout.strip("\n"))
for f in python_path.readdir():
repository_ctx.symlink(f, f.basename)
else:
# TODO(vip): Add mac binaries
MAPPING = {
# Linux
"2.7-linux": {
"url": "https://github.com/VivekPanyam/python-prebuilts/releases/download/v0.0.1/python-2.7.17.tar.gz",
"sha256": "8edb75fb76873ae2eba21ef5c677cf29864b33f6abbf3928d010baab28dcc67e",
},
"3.5-linux": {
"url": "https://github.com/VivekPanyam/python-prebuilts/releases/download/v0.0.1/python-3.5.9.tar.gz",
"sha256": "d5b83a4565ccd746ce312fcca9998c2100aee37db807d37f42ff43c17e9f5dd7",
},
"3.6-linux": {
"url": "https://github.com/VivekPanyam/python-prebuilts/releases/download/v0.0.1/python-3.6.10.tar.gz",
"sha256": "f8d2e7b5468464ed653f832b363ebf228108ecc1744f0915cdbed2ab31eda99a",
},
"3.7-linux": {
"url": "https://github.com/VivekPanyam/python-prebuilts/releases/download/v0.0.1/python-3.7.7.tar.gz",
"sha256": "53eb870e33b7581b44f95f79fdbeb275ab3a03794270d3f5cb64699d7c65e2fa",
},
"3.8-linux": {
"url": "https://github.com/VivekPanyam/python-prebuilts/releases/download/v0.0.1/python-3.8.2.tar.gz",
"sha256": "8a93f738894db779c282a02fb7a88e4911538e26ed834a23bb1bc9f3e2fe9e04",
},
}
download_mapping = MAPPING["{}-{}".format(
version,
"mac" if IS_MAC else "linux",
)]
download_url = download_mapping["url"]
sha256 = download_mapping["sha256"]
repository_ctx.download_and_extract(download_url, sha256 = sha256)
# Generate a build file based on the template
repository_ctx.template(
"BUILD.bazel",
repository_ctx.path(Label(repository_ctx.attr.build_file_template)),
substitutions = {
"{PYTHON_VERSION}": version,
},
)
python_repository = repository_rule(
implementation = _impl,
local = True,
attrs = {
"build_file_template": attr.string(mandatory = True),
},
)
| StarcoderdataPython |
87721 | <reponame>gguilherme42/Livro-de-Python
def menu():
print("")
print(f'''<------ MENU ------>
A - para adição
S - para subtração
D - para divisão
M - para múltiplicação
X - para sair''')
def Aritmetica(a, operacao):
arit = {'A': lambda a, b: a + b,
'S': lambda a, b: a - b,
'D': lambda a, b: a / b,
'M': lambda a, b: a * b}
if operacao == 'A':
for i in range(1, 11):
print(f'{a} + {i} = {arit[operacao](a, i)}')
elif operacao == 'S':
for i in range(1, 11):
print(f'{a} - {i} = {arit[operacao](a, i)}')
elif operacao == 'D':
for i in range(1, 11):
print(f'{a} / {i} = {arit[operacao](a, i)}')
if operacao == 'M':
for i in range(1, 11):
print(f'{a} * {i} = {arit[operacao](a, i)}')
while True:
a = int(input('Número: '))
menu()
while True:
op = input('Operação: ').strip().upper()[0]
if op == 'X':
break
elif op not in 'ASDM':
print('Digite uma operação válida')
else:
Aritmetica(a, op)
if op == 'X':
break
| StarcoderdataPython |
4839140 | <filename>Python3/config.py
# DEVELOPER CONFIG (EDIT NOT RECOMMENDED)
BUILD_VERSION = "v1.13.23-public"
AUTHOR = "<NAME>"
PROGRAM_NAME = "GUI Hangman"
TOTAL_GUESSES_ALLOWED = 5
BLANK = "_____"
"""
CREDITS:
'back.png' Icon made by 'Kiranshastry' from www.flaticon.com (24px)
GUI HANGMAN
"""
"""
MIT License
Copyright (c) 2020 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
""" | StarcoderdataPython |
3374695 | <gh_stars>1-10
import logging
from flask import Flask, render_template, request, flash, redirect, url_for
from esipy import App, EsiClient, EsiSecurity
from lib import CharacterExplorer, all_esi_read_scopes
app = Flask(__name__)
app.config.from_json('config.json')
esi_headers = {'User-Agent': 'EVE Character Explorer | <EMAIL>'}
esi_app = App.create('https://esi.tech.ccp.is/latest/swagger.json?datasource=tranquility')
esi_security = EsiSecurity(
app=esi_app,
client_id=app.config['CLIENT_ID'],
secret_key=app.config['SECRET_KEY'],
redirect_uri=app.config['REDIRECT_URI'],
headers=esi_headers
)
esi_client = EsiClient(
security=esi_security,
headers=esi_headers
)
@app.route('/')
def index():
return render_template('index.html', sso_go=esi_security.get_auth_uri(scopes=all_esi_read_scopes))
@app.route('/view', methods=['POST'])
def view():
token = request.form.get('refresh_token')
if not token:
flash('No token supplied in request', 'warning')
return redirect(url_for('index'))
try:
explorer = CharacterExplorer(esi_app, esi_security, esi_client, token)
return render_template('view.html', explorer=explorer)
except Exception as e:
logging.exception('Could not load token data: ' + str(e))
flash('Could not load token data', 'warning')
return redirect(url_for('index'))
@app.route('/eve/callback')
def eve_callback():
code = request.args.get('code')
if not code:
flash('Login unsuccessful', 'warning')
return redirect(url_for('index'))
try:
tokens = esi_security.auth(code)
except:
flash('Could not get refresh token', 'warning')
return redirect(url_for('index'))
return render_template('token_show.html', token=tokens['refresh_token'])
@app.route('/mail/<token>/<int:mail_id>')
def get_mail_body(token, mail_id):
explorer = CharacterExplorer(esi_app, esi_security, esi_client, token, load_now=False)
return explorer.get_mail_body(mail_id)
@app.template_filter('mail_recipients')
def filter_mail_recipients(data):
return ', '.join([r['recipient_name'] for r in data])
| StarcoderdataPython |
3262575 | <filename>setup.py
from setuptools import setup
setup(
name="pyprince",
version="0.5",
license="MIT",
url="https://github.com/sufio/python-pyprince",
description="Prince xml python wrapper for converting HTML to PDF",
author="Sufio.com",
author_email="<EMAIL>",
tests_require=["pytest"],
packages=["pyprince"],
include_package_data=True,
)
| StarcoderdataPython |
3301937 | <filename>network_model.py
import numpy as np
import pandas as pd
import geopandas as gpd
class network_model():
def __init__(self, lines, subs, util, gen, bbox=None, loads=None, pop_dens=None):
self.lines = gpd.read_file(lines)
self.subs = gpd.read_file(subs)
self.util = gpd.read_file(util)
self.gen = gpd.read_file(gen)
if loads is None:
if pop_dens is not None:
loads = self.partition_loads(self.construct_voronoi(), pop_dens)
if edges is None:
edges = self.line_to_sub()
if node_gen is None:
node_gen = self.gen_to_sub()
self.net = pd.concat([loads.groupby('SUB_ID').sum()['summer_loa'], gen.groupby('SUB_ID').sum()['S_CAP_MW'].fillna(0)], axis=1, join='outer')[['summer_loa', 'S_CAP_MW']].fillna(0)
self.net = self.net['S_CAP_MW'] - self.net['summer_loa']
if bbox is not None:
self.nodes, self.edges, self.loads = self.set_bbox(*bbox)
# Set up graph
self.G = nx.Graph()
for i in self.loads.index:
self.G.add_node(i, load=self.loads[i])
for i in self.edges.index:
row = self.edges.loc[i]
self.G.add_edge(*tuple(row[['SUB_1', 'SUB_2']].astype(int).values),
tot_kv=row['TOT_CAP_KV'],
num_lines=int(row['NUM_LINES']),
length=row['LENGTH'])
def construct_voronoi(self):
from scipy import spatial
from geopandas import tools
from shapely import geometry
util = self.util
sub = self.sub
# Fix utility service areas with invalid geometry
invalid_util = util[~util['geometry'].apply(lambda x: x.is_valid)]
util.loc[invalid_util.index, 'geometry'] = util.loc[invalid_util.index, 'geometry'].apply(lambda x: x.buffer(0))
# Spatially join substations with utility service territories
sub_util = tools.sjoin(sub, util, op='within', how='left')
# Construct voronoi polygons for each substation
sub_xy = np.vstack(sub['geometry'].apply(lambda u: np.concatenate(u.xy)).values)
vor = spatial.Voronoi(sub_xy)
reg, vert = self.voronoi_finite_polygons_2d(vor,1)
# Convert voronoi diagram to polygons and insert into GeoDataFrame
v_poly = gpd.GeoSeries(pd.Series(reg).apply(lambda x: geometry.Polygon(vert[x])))
v_gdf = gpd.GeoDataFrame(pd.concat([sub.drop('geometry', axis=1), v_poly], axis=1)).rename(columns={0:'geometry'})
v_gdf.crs = sub.crs
# Spatially join utility service areas with voronoi polygons
j = tools.sjoin(util, v_gdf, op='intersects')
j['right_geom'] = j['UNIQUE_ID_right'].map(v_gdf.set_index('UNIQUE_ID')['geometry'])
j = j.dropna(subset=['geometry', 'right_geom']).set_index('UNIQUE_ID_left')
# Clip voronoi polygons to utility service area
j_inter = j.apply(lambda x: x['geometry'].intersection(x['right_geom']), axis=1)
# Create output GeoDataFrame with relevant fields
outdf = gpd.GeoDataFrame(pd.concat([j[['UNIQUE_ID_right', 'SUMMERPEAK', 'WINTERPEAK']].reset_index(), j_inter.reset_index()[0]], axis=1), crs=sub.crs).rename(columns={0:'geometry', 'UNIQUE_ID_left':'UTIL_ID', 'UNIQUE_ID_right':'SUB_ID'})
return outdf
def partition_loads(self, vor, pop_dens):
# voronoi_stats.shp
import rasterstats
import rasterio
#### FOR ICLUS, BEST PROJECTION IS EPSG 5070: NAD83/CONUS ALBERS
# vor = '/home/akagi/voronoi_intersect.shp'
# pop_dens = '/home/akagi/Desktop/rastercopy.tif'
# gdf = gpd.GeoDataFrame.from_file(vor)
# rast = rasterio.open(pop_dens)
if isinstance(vor, str):
gdf = gpd.read_file(vor)
if isinstance(pop_dens, str):
pop_dens = rasterio.open(pop_dens)
zones = gdf['geometry'].to_crs(pop_dens.crs)
rstats = pd.DataFrame.from_dict(rasterstats.zonal_stats(zones, pop_dens, stats=['sum', 'mean']))
util_stats = pd.concat([rstats, gdf], join='inner', axis=1)
tot_util = util_stats.groupby('UTIL_ID').sum()['sum']
util_stats['util_tot'] = util_stats['UTIL_ID'].map(tot_util)
util_stats['load_frac'] = util_stats['sum']/util_stats['util_tot']
util_stats['summer_load'] = util_stats['SUMMERPEAK']*util_stats['load_frac']
util_stats['winter_load'] = util_stats['WINTERPEAK']*util_stats['load_frac']
return util_stats
def gen_to_sub(self):
from shapely import geometry
from scipy import spatial
sub = self.sub
gen = self.gen
# Find nearest neighbors
tree = spatial.cKDTree(np.vstack(sub.geometry.apply(lambda x: x.coords[0]).values))
node_query = tree.query(np.vstack(gen.geometry.apply(lambda x: x.coords[0]).values))
crosswalk = pd.DataFrame(np.column_stack([gen[['UNIQUE_ID', 'S_CAP_MW']].values, s.iloc[node_query[1]]['UNIQUE_ID'].values.astype(int)]), columns=['GEN_ID', 'S_CAP_MW', 'SUB_ID'])
crosswalk = crosswalk[['GEN_ID', 'SUB_ID', 'S_CAP_MW']]
return crosswalk
#gen_to_sub_static.csv
def line_to_sub(self):
from shapely import geometry
from scipy import spatial
t = self.lines
s = self.subs
# Extract start and end nodes in networkK
start = t.geometry[t.geometry.type=='LineString'].apply(lambda x: np.array([x.xy[0][0], x.xy[1][0]])).append(t.geometry[t.geometry.type=='MultiLineString'].apply(lambda x: np.hstack([i.xy for i in x])[:,0])).sort_index()
end = t.geometry[t.geometry.type=='LineString'].apply(lambda x: np.array([x.xy[0][-1], x.xy[1][-1]])).append(t.geometry[t.geometry.type=='MultiLineString'].apply(lambda x: np.hstack([i.xy for i in x])[:,-1])).sort_index()
# Find nearest neighbors
tree = spatial.cKDTree(np.vstack(s.geometry.apply(lambda x: x.coords[0]).values))
start_node_query = tree.query(np.vstack(start.values))
end_node_query = tree.query(np.vstack(end.values))
# Create crosswalk table
crosswalk = pd.DataFrame(np.column_stack([t[['UNIQUE_ID', 'TOT_CAP_KV', 'NUM_LINES', 'Shape_Leng']].values, s.iloc[start_node_query[1]][['UNIQUE_ID', 'NAME']].values, start_node_query[0], s.iloc[end_node_query[1]][['UNIQUE_ID', 'NAME']].values, end_node_query[0]]), columns=['TRANS_ID', 'TOT_CAP_KV', 'NUM_LINES', 'LENGTH', 'SUB_1', 'NAME_1', 'ERR_1', 'SUB_2', 'NAME_2', 'ERR_2'])
crosswalk = crosswalk[['TRANS_ID', 'SUB_1', 'SUB_2', 'NAME_1', 'NAME_2', 'ERR_1', 'ERR_2', 'TOT_CAP_KV', 'NUM_LINES', 'LENGTH']]
return crosswalk
#edges.csv
def set_bbox(self, xmin, ymin, xmax, ymax):
t = self.lines
s = self.subs
edges = self.edges
net = self.net
bbox = tuple(xmin, ymin, xmax, ymax)
bbox_poly = shapely.geometry.MultiPoint(np.vstack(np.dstack(np.meshgrid(*np.hsplit(bbox, 2)))).tolist()).convex_hull
bbox_lines = t[t.intersects(bbox_poly)]['UNIQUE_ID'].astype(int).values
bbox_edges = edges[edges['TRANS_ID'].isin(bbox_lines)]
bbox_edges = bbox_edges[bbox_edges['SUB_1'] != bbox_edges['SUB_2']]
bbox_nodes = np.unique(bbox_edges[['SUB_1', 'SUB_2']].values.astype(int).ravel())
# Outer lines
edgesubs = pd.merge(t[t.intersects(bbox_poly.boundary)], edges, left_on='UNIQUE_ID', right_on='TRANS_ID')[['SUB_1_y', 'SUB_2_y']].values.ravel().astype(int)
# Nodes just outside of bbox (entering)
outer_nodes = np.unique(edgesubs[~np.in1d(edgesubs, s[s.within(bbox_poly)]['UNIQUE_ID'].values.astype(int))])
weights = s.loc[s['UNIQUE_ID'].astype(int).isin(edgesubs[~np.in1d(edgesubs, s[s.within(bbox_poly)]['UNIQUE_ID'].values.astype(int))])].set_index('UNIQUE_ID')['MAX_VOLT'].sort_index()
transfers = -net[bbox_nodes].sum()*(weights/weights.sum())
bbox_loads = net[bbox_nodes] + transfers.reindex(bbox_nodes).fillna(0)
return bbox_nodes, bbox_edges, bbox_loads
def voronoi_finite_polygons_2d(vor, radius=None):
"""
Reconstruct infinite voronoi regions in a 2D diagram to finite
regions.
Parameters
----------
vor : Voronoi
Input diagram
radius : float, optional
Distance to 'points at infinity'.
Returns
-------
regions : list of tuples
Indices of vertices in each revised Voronoi regions.
vertices : list of tuples
Coordinates for revised Voronoi vertices. Same as coordinates
of input vertices, with 'points at infinity' appended to the
end.
"""
if vor.points.shape[1] != 2:
raise ValueError("Requires 2D input")
new_regions = []
new_vertices = vor.vertices.tolist()
center = vor.points.mean(axis=0)
if radius is None:
radius = vor.points.ptp().max()*2
# Construct a map containing all ridges for a given point
all_ridges = {}
for (p1, p2), (v1, v2) in zip(vor.ridge_points, vor.ridge_vertices):
all_ridges.setdefault(p1, []).append((p2, v1, v2))
all_ridges.setdefault(p2, []).append((p1, v1, v2))
# Reconstruct infinite regions
for p1, region in enumerate(vor.point_region):
vertices = vor.regions[region]
if all([v >= 0 for v in vertices]):
# finite region
new_regions.append(vertices)
continue
# reconstruct a non-finite region
ridges = all_ridges[p1]
new_region = [v for v in vertices if v >= 0]
for p2, v1, v2 in ridges:
if v2 < 0:
v1, v2 = v2, v1
if v1 >= 0:
# finite ridge: already in the region
continue
# Compute the missing endpoint of an infinite ridge
t = vor.points[p2] - vor.points[p1] # tangent
t /= np.linalg.norm(t)
n = np.array([-t[1], t[0]]) # normal
midpoint = vor.points[[p1, p2]].mean(axis=0)
direction = np.sign(np.dot(midpoint - center, n)) * n
far_point = vor.vertices[v2] + direction * radius
new_region.append(len(new_vertices))
new_vertices.append(far_point.tolist())
# sort region counterclockwise
vs = np.asarray([new_vertices[v] for v in new_region])
c = vs.mean(axis=0)
angles = np.arctan2(vs[:,1] - c[1], vs[:,0] - c[0])
new_region = np.array(new_region)[np.argsort(angles)]
# finish
new_regions.append(new_region.tolist())
return new_regions, np.asarray(new_vertices)
| StarcoderdataPython |
1636423 | <reponame>fweissberg/xous-core
#!/usr/bin/python3
import argparse
from Crypto.Cipher import AES
from Crypto.Hash import SHA256
from Crypto.Random import get_random_bytes
import binascii
import logging
import sys
"""
Reverse the order of bits in a word that is bitwidth bits wide
"""
def bitflip(data_block, bitwidth=32):
if bitwidth == 0:
return data_block
bytewidth = bitwidth // 8
bitswapped = bytearray()
i = 0
while i < len(data_block):
data = int.from_bytes(data_block[i:i+bytewidth], byteorder='big', signed=False)
b = '{:0{width}b}'.format(data, width=bitwidth)
bitswapped.extend(int(b[::-1], 2).to_bytes(bytewidth, byteorder='big'))
i = i + bytewidth
return bytes(bitswapped)
# assumes a, b are the same length eh?
def xor_bytes(a, b):
i = 0
y = bytearray()
while i < len(a):
y.extend((a[i] ^ b[i]).to_bytes(1, byteorder='little'))
i = i + 1
return bytes(y)
def patcher(bit_in, patch):
bitstream = bit_in[64:-160]
if len(patch) > 0:
# find beginning of type2 area
position = 0
type = -1
command = 0
while type != 2:
command = int.from_bytes(bitflip(bitstream[position:position+4]), byteorder='big')
position = position + 4
if (command & 0xE0000000) == 0x20000000:
type = 1
elif (command & 0xE0000000) == 0x40000000:
type = 2
else:
type = -1
count = 0x3ffffff & command # not used, but handy to have around
ostream = bytearray(bitstream)
# position now sits at the top of the type2 region
# apply patches to each frame specified, ignoring the "none" values
for line in patch:
d = line[1]
for index in range(len(d)):
if d[index].strip() != 'none':
data = bitflip(int(d[index],16).to_bytes(4, 'big'))
frame_num = int(line[0],16)
for b in range(4):
stream_pos = position + frame_num * 101 * 4 + index * 4 + b
ostream[stream_pos] = data[b]
return bytes(ostream)
else:
return bitstream
def dumpframes(ofile, framestream):
position = 0
type = -1
command = 0
while type != 2:
command = int.from_bytes(framestream[position:position+4], byteorder='big')
position = position + 4
if (command & 0xE0000000) == 0x20000000:
type = 1
elif (command & 0xE0000000) == 0x40000000:
type = 2
else:
type = -1
count = 0x3ffffff & command
end = position + count
framecount = 0
while position < end:
ofile.write('0x{:08x},'.format(framecount))
framecount = framecount + 1
for i in range(101):
command = int.from_bytes(framestream[position:position + 4], byteorder='big')
position = position + 4
ofile.write(' 0x{:08x},'.format(command))
ofile.write('\n')
def main():
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
#ifile = 'bbram1_soc_csr.bin'
ifile = '../../betrusted_soc_bbram.bin'
#with open('bbram-test1.nky', "r") as nky:
with open('dummy.nky', "r") as nky:
for lines in nky:
line = lines.split(' ')
if line[1] == '0':
nky_key = line[2].rstrip().rstrip(';')
if line[1] == 'StartCBC':
nky_iv = line[2].rstrip().rstrip(';')
if line[1] == 'HMAC':
nky_hmac = line[2].rstrip().rstrip(';')
logging.debug("original key: %s", nky_key)
logging.debug("original iv: %s", nky_iv)
logging.debug("original hmac: %s", nky_hmac)
key_bytes = int(nky_key, 16).to_bytes(32, byteorder='big')
# open the input file, and recover the plaintext
with open(ifile, "rb") as f:
binfile = f.read()
for i in range(64):
print(binascii.hexlify(binfile[i*4:((i+1)*4)]))
# search for structure
# 0x3001_6004 -> specifies the CBC key
# 4 words of CBC IV
# 0x3003_4001 -> ciphertext len
# 1 word of ciphertext len
# then ciphertext
position = 0
iv_pos = 0
while position < len(binfile):
cwd = int.from_bytes(binfile[position:position+4], 'big')
if cwd == 0x3001_6004:
iv_pos = position+4
if cwd == 0x3003_4001:
break
position = position + 1
position = position + 4
ciphertext_len = 4* int.from_bytes(binfile[position:position+4], 'big')
logging.debug("ciphertext len: %d", ciphertext_len)
position = position + 4
active_area = binfile[position : position+ciphertext_len]
logging.debug("start of ciphertext: %d", position)
iv_bytes = bitflip(binfile[iv_pos : iv_pos+0x10]) # note that the IV is embedded in the file
logging.debug("recovered iv: %s", binascii.hexlify(iv_bytes))
cipher = AES.new(key_bytes, AES.MODE_CBC, iv_bytes)
logging.debug("first: %s", binascii.hexlify(bitflip(active_area[:16])))
plain_bitstream = cipher.decrypt(bitflip(active_area))
logging.debug("first: %s", binascii.hexlify(plain_bitstream[:16]))
with open('plain.bin', 'wb') as plain_f:
plain_f.write(bitflip(plain_bitstream))
#for i in range(64):
# print(binascii.hexlify(bitflip(plain_bitstream[i*4:((i+1)*4)])))
#print(binascii.hexlify(bitflip(plain_bitstream)))
logging.debug("raw hmac: %s", binascii.hexlify(plain_bitstream[:64]))
hmac = xor_bytes(plain_bitstream[:32], plain_bitstream[32:64])
logging.debug("hmac: %s", binascii.hexlify(hmac))
logging.debug("plaintext len: %d", len(plain_bitstream))
logging.debug("initial plaintext: %s", binascii.hexlify(plain_bitstream[:256]))
h1 = SHA256.new()
k = 0
while k < len(plain_bitstream) - 320 - 160: # HMAC does /not/ cover the whole file, it stops 320 + 160 bytes short of the end
h1.update(bitflip(plain_bitstream[k:k+16], 32))
k = k + 16
h1_digest = h1.digest()
logging.debug("new digest1 : %s", binascii.hexlify(h1_digest))
logging.debug("new digest1 (in stored order): %s", binascii.hexlify(bitflip(h1_digest)))
footer = int(0x3A3A3A3A3A3A3A3A3A3A3A3A3A3A3A3A3A3A3A3A3A3A3A3A3A3A3A3A3A3A3A3A).to_bytes(32, byteorder='big')
keyed_footer = xor_bytes(footer, hmac)
logging.debug("hmac (flipped): %s", binascii.hexlify(hmac))
logging.debug("masked_footer: %s", binascii.hexlify(keyed_footer))
h2 = SHA256.new()
h2.update(bitflip(keyed_footer))
h2.update(bitflip(footer))
#h2.update(keyed_footer)
#h2.update(footer)
h2.update(h1_digest)
h2_digest = h2.digest()
logging.debug("new digest2 : %s", binascii.hexlify(h2_digest))
logging.debug("new digest2 (in stored order): %s", binascii.hexlify(bitflip(h2_digest)))
logging.debug("ref digest: %s", binascii.hexlify(plain_bitstream[-32:]))
logging.debug("ref digest (flipped): %s", binascii.hexlify(bitflip(plain_bitstream[-32:])))
logging.debug("ref ending: %s", binascii.hexlify(plain_bitstream[-196:]))
if __name__ == "__main__":
main() | StarcoderdataPython |
128055 | <filename>algos/patterns/bitWiseXor/complement_base_10.py
# Every non-negative integer N has a binary representation, for example, 8 can be represented as “1000” in binary and 7 as “0111” in binary.
# The complement of a binary representation is the number in binary that we get when we change every 1 to a 0 and every 0 to a 1. For example, the binary complement of “1010” is “0101”.
# For a given positive number N in base-10, return the complement of its binary representation as a base-10 integer.
# Example 1:
# Input: 8
# Output: 7
# Explanation: 8 is 1000 in binary, its complement is 0111 in binary, which is 7 in base-10.
# Example 2:
# Input: 10
# Output: 5
# Explanation: 10 is 1010 in binary, its complement is 0101 in binary, which is 5 in base-10.
def getComplement(num):
'''
Time: O(b) where b is the number of bits used to represent a number.
Space: O(1)
'''
# first we need to get the all_set bits
bit_count = 0
d = num
while d > 0:
bit_count += 1
d = d >> 1
all_set_bits = pow(2, bit_count) - 1
return all_set_bits ^ num
if __name__ == '__main__':
num1 = 8
num2 = 10
r1 = getComplement(num1)
r2 = getComplement(num2)
print(r1, r2)
assert r1 == 7
assert r2 == 5
| StarcoderdataPython |
180224 | <reponame>ZhouXing19/ContAnalysisFinalProj
#All these packages need to be installed from pip
import gensim#For word2vec, etc
import argparse
import math
import numpy as np #For arrays
import pandas as pd #Gives us DataFrames
pd.options.mode.chained_assignment = None
import os #For looking through files
import os.path #For managing file paths
import io
import matplotlib.pyplot as plt #For graphics
import seaborn #Makes the graphics look nicer
import sklearn.metrics.pairwise #For cosine similarity
import sklearn.manifold #For T-SNE
import sklearn.decomposition #For PCA
import pickle
class GetEmbeddings:
def __init__(self,
sector,
textDatadfPath="./Data/MDA_2010_2020/textDatadf",
industryDataPath="./Data/MDA_2010_2020/master_industry.csv",
industryDescriptionPath="./Data/MDA_2010_2020/GICS_map_2018.xlsx",
filterDictPath="./Data/MDA_2010_2020/Zhou/dictionary_r_unfiltered.pkl"
):
self.sector = sector
assert self.sector in ['Information Technology', 'Financials', 'Energy', 'Materials']
self.textDatadfPath = textDatadfPath
self.industryDataPath = industryDataPath
self.industryDescriptionPath = industryDescriptionPath
self.filterDictPath = filterDictPath
self.load_industry_code()
self.sector_code = self.IndCodeTable[self.sector]
self.filter_dict = None
self.load_text_df()
def load_text_df(self):
textDatadfFiles = os.listdir(self.textDatadfPath)
self.textDatadf = pd.DataFrame()
for idx, file in enumerate(textDatadfFiles):
filepath = os.path.join(self.textDatadfPath, file)
this_df = pickle.load(open(filepath, 'rb'))
#filtered_df = this_df[:5]
filtered_df = this_df.loc[this_df['gind'] == self.sector_code]
filtered_df['normalized_tokens'] = filtered_df['normalized_tokens'].apply(lambda x: [x])
self.textDatadf = self.textDatadf.append(filtered_df)
if idx % 5 == 0:
print(f'====finished : [{idx}] files====')
print('----finished load_text_df----')
return
def load_fname_gind(self):
# 其实用不上你
industryData = pd.read_csv(self.industryDataPath)
self.FName_gind = industryData[['FName', 'gind']]
self.FName_gind.gind = self.FName_gind.gind.apply(self.ChangeCode).astype('Int64')
def load_industry_code(self):
industryDescData = pd.read_excel(self.industryDescriptionPath)
indData = industryDescData[industryDescData.columns[0:2]].dropna()
indData.columns = ['codes', 'indName']
self.CodeIndTable = indData.set_index('codes').to_dict()['indName']
self.IndCodeTable = {v: k for k, v in self.CodeIndTable.items()}
return
def load_filter_dict(self):
self.filter_dict = pickle.load(open(self.filterDictPath, 'rb'))
ori_len = len(self.filter_dict)
to_keeps = ['systematic', 'unsystematic', 'political', 'regulatory', 'financial', 'interest', 'rate', 'country',
'social', 'environmental', 'operational', 'management', 'legal', 'competition', 'economic', 'compliance',
'security','fraud', 'operational', 'operation', 'competition', 'risk', 'uncertainty', 'uncertainties', 'risks',
'personnel', 'salary', 'wage', 'pandemic', 'covid', 'covid-19', 'epidemic', 'health']
self.filter_dict.filter_extremes(no_below=20, no_above=0.6, keep_tokens=to_keeps)
updated_len = len(self.filter_dict)
print(f"---- removed [{ori_len - updated_len}] words ----")
def ChangeCode(self, val):
if math.isnan(val):
return val
return int(str(val)[:2])
def get_embeddings(self, save_wv = False):
self.senReleasesW2V = gensim.models.word2vec.Word2Vec(self.textDatadf['normalized_tokens'].sum())
if save_wv:
self.save_wv()
self.saveWordVecPairInFile(self.senReleasesW2V.wv)
return
def save_wv(self):
assert self.senReleasesW2V != None
pklPath = f'./Data/MDA_2010_2020/{self.sector}_wv.pkl'
if os.path.exists(pklPath):
os.remove(pklPath)
with open(pklPath, 'wb') as handle:
pickle.dump(self.senReleasesW2V.wv, handle, protocol=pickle.HIGHEST_PROTOCOL)
def saveWordVecPairInFile(self, wv, filtered=True):
print(f'----Start writing files for [{self.sector}] ----')
out_v_path = f'tsvs/{self.sector}_vectors.tsv'
out_m_path = f'tsvs/{self.sector}_metadata.tsv'
if os.path.exists(out_v_path):
os.remove(out_v_path)
if os.path.exists(out_m_path):
os.remove(out_m_path)
out_v = io.open(out_v_path, 'w', encoding='utf-8')
out_m = io.open(out_m_path, 'w', encoding='utf-8')
vocabulary = list(wv.vocab.keys())
print(f'----Preview vocab for [{self.sector}] : {vocabulary[:5]}----')
words = []
if filtered:
self.load_filter_dict()
assert self.filter_dict != None
words = set(self.filter_dict.values())
for index, word in enumerate(vocabulary):
if filtered and word in words:
vec = wv[word]
out_v.write('\t'.join([str(x) for x in vec]) + "\n")
out_m.write(word + "\n")
out_v.close()
out_m.close()
print(f'----Finished writing files for [{self.sector}] ----')
# textDatadfPath = "./Data/MDA_2010_2020/textDatadf"
# textDatadfFiles = os.listdir(textDatadfPath)
# newtextDatadf = pd.DataFrame()
# for file in textDatadfFiles:
# filepath = os.path.join(textDatadfPath, file)
# curDf = pickle.load(open(filepath, 'rb'))
# newtextDatadf = newtextDatadf.append(curDf)
# demo_df = newtextDatadf.head()
# demo_df_cp = demo_df[:]
# Load Industry data
# industryDataPath = "./Data/MDA_2010_2020/master_industry.csv"
# industryDescriptionPath = "./Data/MDA_2010_2020/GICS_map_2018.xlsx"
#
# industryData = pd.read_csv(industryDataPath)
# FName_gind = industryData[['FName', 'gind']]
# FName_gind.gind = FName_gind.gind.apply(ChangeCode).astype('Int64')
#
# industryDescData = pd.read_excel(industryDescriptionPath)
# indData = industryDescData[industryDescData.columns[0:2]].dropna()
# indData.columns = ['codes', 'indName']
# CodeIndTable = indData.set_index('codes').to_dict()['indName']
# IndCodeTable = {v: k for k, v in CodeIndTable.items()}
# indCodes = set(CodeIndTable.keys())
# desiredIndustries = ['Information Technology', 'Financials', 'Energy', 'Materials']
# desiredCodes = set([IndCodeTable[industry] for industry in desiredIndustries])
# demo_df['normalized_tokens'] = demo_df['normalized_tokens'].apply(lambda x: [x])
# demo_list = demo_df['normalized_tokens'].sum()
# senReleasesW2V = gensim.models.word2vec.Word2Vec(demo_list)
# senReleasesW2V.wv.vocab.keys()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--sector', default='Information Technology',
help='sector selected')
opt = parser.parse_args()
# opt.sector
getEmbedding = GetEmbeddings(opt.sector)
getEmbedding.get_embeddings(save_wv=True)
# this_sec = "Information Technology"
# this_ge = GetEmbeddings(this_sec)
# this_ge.get_embeddings(save_wv=True)
#
# this_wv = this_ge.senReleasesW2V.wv
#
#
# def normalize(vector):
# normalized_vector = vector / np.linalg.norm(vector)
# return normalized_vector
#
#
# def dimension(model, positives, negatives):
# diff = sum([normalize(model[x]) for x in positives]) - sum([normalize(model[y]) for y in negatives])
# return diff
#
#
# Risk_Uncertainty = dimension(this_wv, ['risk', 'risks', 'risky', 'risking', 'risk, '], ['uncertainty', 'uncertainties', 'uncertain'])
# Key_Words = ['political', 'regulatory', 'financial', 'interest', 'rate', 'country',\
# 'social', 'environmental', 'operational', 'management', 'legal', \
# 'competition', 'economic', 'compliance', 'security','fraud', \
# 'operational', 'operation', 'competition', ]
#
#
# def makeDF(model, word_list):
# RU = []
# for word in word_list:
# RU.append(
# sklearn.metrics.pairwise.cosine_similarity(this_wv[word].reshape(1, -1), Risk_Uncertainty.reshape(1, -1))[
# 0][0])
# df = pd.DataFrame({'Risk_Uncertainty': RU}, index=word_list)
# return df | StarcoderdataPython |
1752664 | import os
import numpy as np
import sys, unicodedata
# In[2]:
tbl = dict.fromkeys(i for i in range(sys.maxunicode)
if unicodedata.category(chr(i)).startswith('P'))
# In[4]:
for root,dirs, files in os.walk('tsv_files/'):
tsv_list = [file for file in files if file[-3:]=="tsv"]
whole_class_info=[[],[],[]]
with open(root+"assess_pregnancy.tsv") as cur_tsv:
separated_data = [line.split('\t') for line in cur_tsv.readlines()]
separated_data = np.asarray(separated_data)
data = separated_data[:,0]
labels = separated_data[:,1]
labels = [int(label) for label in labels]
for idx,label in enumerate(labels):
whole_class_info[label].append(data[idx].translate(tbl))
for idx, item in enumerate(whole_class_info):
with open("preg"+os.sep+str(idx)+".txt","w+") as write_f:
write_f.write("\n".join(item))
| StarcoderdataPython |
3287110 | <reponame>jdfergusson/ReChorder<filename>rechorder/rechorder/migrations/0004_auto_20200407_1140.py
# Generated by Django 3.0.4 on 2020-04-07 11:40
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rechorder', '0003_auto_20200406_1205'),
]
operations = [
migrations.AddField(
model_name='song',
name='artist',
field=models.CharField(max_length=200, null=True),
),
migrations.AddField(
model_name='song',
name='original_key',
field=models.IntegerField(default=0, validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(11)]),
preserve_default=False,
),
]
| StarcoderdataPython |
133799 | from bs4 import BeautifulSoup
import requests
from datetime import datetime
from urllib.parse import urljoin
# ask questions
# change variables in the next sections
# use those variables to change outcomes in what is being scraped
# if there are mistakes, catch them
#######
####### Hey! What's your name?
user_name = input("Hey! What's your name? ")
################################################################################################
# function for correction
# true or false to return next set of questions
def ask_and_confirm(__ask, var_name):
# __ask
while __ask == Y or __ask == y:
if __ask == True: # Yes
print("Awesome! "+var_name+", will help us connect relative links in the website if you decide to scrape them too!")
while __ask == N or __ask == n:
if __ask == False: # No, reask
var_name = input("Whoops! What's is the base link again? ")
return var_name
################################################################################################
base_url__ask = False
#######
####### Hi [user_name]! So what's the website you want to scrape?
websiteurl = input("Hi "+user_name+"! So what's the website you want to scrape? ")
# Great! Can you specify the static part of your website for us?
# E.g. if your website was http://foo.com/recipes01.html you would enter http://foo.com/
# Anther example. If your website was http://calvin.com/staff/staffpage01.html you would enter http://calvim.com/staff/
print('Great! Can you specify the static part of your website for us?')
print('E.g. if your website was http://foo.com/recipes01.html you would enter http://foo.com/')
print('Anther example. If your website was http://calvin.com/staff/staffpage01.html you would enter http://calvim.com/staff/')
# Enter your links' base url:
base_url = input("Enter your links' base url: ")
# Lets make sure everything was typed correctly, is this correct: [base_link] - [Y][N]
## base_url__ask = input("Lets make sure everything was typed correctly, is this correct: "+base_url+" - [Y][N]")
## [Y]
## base_url__ask == True # Yes
# Awesome! [base_link][5:].., will help us connect relative links in the website if you decide to scrape them too!
## [N]
## base_url_ask == False # No, reask
# Whoops! What's is the base link again?
base_url = ask_and_confirm(base_url__ask, base_url)
# Awesome! [base_link][5:].., will help us connect relative links in the website if you decide to scrape them too!
################################################################################################
################################################################################################
################################################################################################
# global relative
r = requests.get(websiteurl)
soup = BeautifulSoup(r.text, 'html.parser')
################################################################################################
################################################################################################
#######
####### Okay, websiteurl[10:].., lets specifcy where we're looking.
# What is the element type we're looking for in the page?
# The most common type of element would be a div, and we're looking at divs by default.
# If you want to change this Press N. Otherwise, would you like to continue [user_name]? - [Y, Continue][N, Change]
print("Okay, websiteurl[10:].., lets specifcy where we're looking. ")
print("What is the element type we're looking for in the page? ")
print("The most common type of element would be a div, and we're looking at divs by default. ")
print("If you want to change this Press N. Otherwise, would you like to continue "+user_name+"? - [Y, Continue][N, Change] ")
### [Y]
base_element = 'div'
### [N]
## Please type the element name:
base_element = input("Please type the element name: ")
collection__type = 'Id or Class not set correctly'
# if collection__type is not set_id or is not set_class:
# Is it an [#id] or [.class]?
if base_element == base_id:
collection__type = set_id
# Cool, so it's an Id, what's the id's name?:
id_name = input("Cool, so it's an Id, what's the id's name?: ")
## [user_name], lets make sure. Is this correct: [#][id_name] - [Y][N]
### [Y]
## Excellent, lets continue. >
### [N]
## Lets fix it, enter the id name:
id_name =input("Lets fix it, enter the id name: ")
## Excellent, lets continue. >
if base_element == base_class:
collection__type = set_class
# Cool, so it's a Class, what's the class's name?
class_name = 2
print("[user_name], let's make sure. Is this correct: [.][class_name] - [Y][N]")
### [Y]
## Excellent, lets continue. >
print("Excellent, lets continue. >")
### [N]
## Lets fix it, enter the class name:
## [user_name], let's make sure. Is this correct: [.][class_name] - [Y][N]
### [Y]
## Excellent, lets continue. >
print("Excellent, lets continue. >")
### [N]
## Lets fix it, enter the class name:
class_name = 2
## Excellent, lets continue. >
################################################################################################
################################################################################################
collection_list = []
if collection__type == set_id:
collection_id = soup.find(base_element, {"id": id_name})
if collection__type == set_class:
collection_class = soup.find(base_element, {"class": class_name})
################################################################################################
################################################################################################
#######
####### Lets continue! So we're looking at [#id]_[.class], what do we need from there?
# [headings][links][span][text][bold text][italic text][another id][another class][everything]
if inner_element == headings:
if collection__type == set_id or collection__type == set_class:
collect_links = collection_id.findAll('h1')
if inner_element == links:
if collection__type == set_id:
collect_links = collection_id.findAll('a', href=True)
for link in collect_links:
if link['href']:
relative = link['href']
collection_list.append(urljoin(base_url, relative))
# else:
# continue
if collection__type == set_class:
collect_links = collection_class.findAll('a', href=True)
for link in collect_links:
if link['href']:
relative = link['href']
collection_list.append(urljoin(base_url, relative))
# else:
# continue
#if inner_element == span:
# skip
#if inner_element == text:
# skip
#if inner_element == bold_text:
# skip
#if inner_element == italic_text:
# skip
#if inner_element == another_oct_id:
# skip
#if inner_element == another_dot_class:
# skip
#if inner_element == everything:
# skip
################################################################################################
################################################################################################
| StarcoderdataPython |
1742528 | <filename>t5/evaluation/eval_utils.py
# Copyright 2020 The T5 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for running offline evaluation."""
import collections
import functools
import os
from typing import Any, Callable, Iterable, Mapping, MutableSequence, Optional, Sequence, Union
from absl import logging
import numpy as np
import pandas as pd
import t5.data
from t5.models import mesh_transformer
from t5.models import utils as model_utils
import tensorflow.compat.v1 as tf
import typing_extensions
class Metric(object):
def __init__(self, name, group=None):
self.name = name
self.group = group or name
# This OrderedDict maps TensorBoard tags to nice-looking metric names.
# The order of the keys in the dict determine the order they get logged.
METRIC_NAMES = collections.OrderedDict([
("glue_average", Metric("Average GLUE Score")),
("glue_cola_v002/matthews_corrcoef", Metric("CoLA")),
("glue_sst2_v002/accuracy", Metric("SST-2")),
("glue_mrpc_v002/f1", Metric("MRPC (F1)", "MRPC")),
("glue_mrpc_v002/accuracy", Metric("MRPC (accuracy)", "MRPC")),
("glue_stsb_v002/pearson_corrcoef", Metric("STSB (Pearson)", "STSB")),
("glue_stsb_v002/spearman_corrcoef", Metric("STSB (Spearman)", "STSB")),
("glue_qqp_v002/f1", Metric("QQP (F1)", "QQP")),
("glue_qqp_v002/accuracy", Metric("QQP (accuracy)", "QQP")),
("glue_mnli_matched_v002/accuracy", Metric("MNLIm", "MNLI")),
("glue_mnli_mismatched_v002/accuracy", Metric("MNLImm", "MNLI")),
("glue_qnli_v002/accuracy", Metric("QNLI")),
("glue_rte_v002/accuracy", Metric("GLUE RTE")),
("cnn_dailymail_v002/rouge1", Metric("CNN/DM (ROUGE-1)", "CNN/DM")),
("cnn_dailymail_v002/rouge2", Metric("CNN/DM (ROUGE-2)", "CNN/DM")),
("cnn_dailymail_v002/rougeL", Metric("CNN/DM (ROUGE-L)", "CNN/DM")),
("cnn_dailymail_v002/rougeLsum", Metric("CNN/DM (ROUGE-L)", "CNN/DM")),
("squad_v010_allanswers/em", Metric("SQuAD (EM)", "SQuAD")),
("squad_v010_allanswers/f1", Metric("SQuAD (F1)", "SQuAD")),
("squad_v010_allanswers_span/em", Metric("SQuAD (EM)", "SQuAD")),
("squad_v010_allanswers_span/f1", Metric("SQuAD (F1)", "SQuAD")),
("squad_v010/em", Metric("SQuAD (EM)", "SQuAD")),
("squad_v010/f1", Metric("SQuAD (F1)", "SQuAD")),
("super_glue_average", Metric("Average SuperGLUE Score")),
("super_glue_boolq_v102/accuracy", Metric("BoolQ (accuracy)")),
("super_glue_cb_v102/mean_3class_f1", Metric("CB (F1)", "CB")),
("super_glue_cb_v102/accuracy", Metric("CB (accuracy)", "CB")),
("super_glue_copa_v102/accuracy", Metric("CoPA")),
("super_glue_multirc_v102/f1", Metric("MultiRC (F1)", "MultiRC")),
("super_glue_multirc_v102/exact_match", Metric("MultiRC (EM)", "MultiRC")),
("super_glue_record_v102/f1", Metric("ReCoRD (F1)", "ReCoRD")),
("super_glue_record_v102/em", Metric("ReCoRD (EM)", "ReCoRD")),
("super_glue_rte_v102/accuracy", Metric("SuperGLUE RTE")),
("super_glue_wic_v102/accuracy", Metric("WiC")),
("super_glue_wsc_v102_simple_eval/accuracy", Metric("WSC")),
("dpr_v001_simple/accuracy", Metric("DPR")),
("wmt_t2t_ende_v003/bleu", Metric("WMT T2T En-De")),
("wmt14_ende_v003/bleu", Metric("WMT14 En-De")),
("wmt15_enfr_v003/bleu", Metric("WMT15 En-Fr")),
("wmt16_enro_v003/bleu", Metric("WMT16 En-Ro")),
])
Event = collections.namedtuple("event", ["step", "value"])
def parse_events_files(tb_summary_dir):
"""Parse all TensorBoard events files in tb_summary_dir.
Args:
tb_summary_dir: str, path to look for events files in.
Returns:
A dict, where each key is a TensorBoard tag and each value is a list of
Event tuples with step and value attributes.
"""
events = collections.defaultdict(list)
for events_file in tf.io.gfile.glob(os.path.join(tb_summary_dir, "events.*")):
try:
for e in tf.train.summary_iterator(events_file):
for v in e.summary.value:
events[v.tag].append(Event(e.step, v.simple_value))
except tf.errors.DataLossError:
logging.info("Skipping %s due to truncated record.", events_file)
return events
def get_eval_metric_values(events):
"""Filter TensorBoard events to only include those for eval metrics.
Args:
events: dict of list of (step, value) tuples where keys are tags.
Returns:
Dict where key is task_name/metric_name and value is (step, value) tuple.
"""
eval_values = {}
for tag, event_values in events.items():
if tag.startswith("eval"):
_, task_name, metric_name = tag.split("/")
eval_values["{}/{}".format(task_name, metric_name)] = event_values
return eval_values
def sort_columns(df, metric_names=None):
metric_names = metric_names or METRIC_NAMES
column_order = list(collections.OrderedDict.fromkeys(
[m.name for m in metric_names.values() if m.name in df.columns]
))
return df.reindex(columns=column_order)
def compute_avg_glue(df, metric_names=None):
"""Compute average GLUE and SuperGLUE scores from a DataFrame.
Will only compute a given average score if all of the metrics for that
benchmark appear as columns in the DataFrame.
Args:
df: pandas.DataFrame, columns should be metric names.
metric_names: dict mapping tensorboard tag to metric name.
Returns:
A pandas.DataFrame which has GLUE and SuperGLUE averages calculated.
"""
# Use METRIC_NAMES defined at the top as default
metric_names = metric_names or METRIC_NAMES
all_glue_tags = {
k for k in metric_names.keys() if "glue" in k and "average" not in k
}
superglue_tags = {k for k in all_glue_tags if "super" in k}
glue_tags = all_glue_tags - superglue_tags
average_keys = ["Average GLUE Score", "Average SuperGLUE Score"]
for average_key, tags in zip(average_keys, [glue_tags, superglue_tags]):
# Only compute average if all metric names appear as columns in the DF
if {metric_names[t].name for t in tags}.issubset(set(df.columns)):
# Compute average over each metric group
group_to_metrics = collections.defaultdict(set)
for tag in tags:
metric = metric_names[tag]
group_to_metrics[metric.group].add(metric.name)
accum = None
for metrics in group_to_metrics.values():
group_avg = np.mean([df[k] for k in metrics], axis=0)
accum = group_avg if accum is None else accum + group_avg
# Compute average across all groups
average = accum/len(group_to_metrics)
df[average_key] = average
return df
def scores_to_df(scores, metric_names=None):
"""Convert `scores` into a pandas DataFrame."""
# Use METRIC_NAMES defined at the top as default
metric_names = metric_names or METRIC_NAMES
for tag in scores.keys():
if tag not in metric_names:
metric_names[tag] = Metric(tag)
logging.warning(
"TensorBoard tag %s not found in metric_names. "
"Using tag as metric name.",
tag)
# Sort the tags in scores according to metric_names order
sorted_tags = sorted(
scores.keys(), key=lambda x: list(metric_names.keys()).index(x)
)
columns = [metric_names[t].name for t in sorted_tags]
# Convert scores to dict with the format
# {step_number: {tag1: value, tag2: value, ...}}
step_scores = collections.defaultdict(
lambda: collections.OrderedDict([(t, np.nan) for t in sorted_tags])
)
for tag in sorted_tags:
for step, value in scores[tag]:
step_scores[step][tag] = value
sorted_items = sorted(list(step_scores.items()))
data = [list(r.values()) for _, r in sorted_items]
index = [s for s, _ in sorted_items]
df = pd.DataFrame(data, index, columns)
df.index.name = "step"
return df
def metric_group_max(df, metric_names=None):
"""Find the step which achieves the highest mean value for a group of metrics."""
# Use METRIC_NAMES defined at the top as default
metric_names = metric_names or METRIC_NAMES
group_to_metrics = collections.defaultdict(set)
for metric in metric_names.values():
group_to_metrics[metric.group].add(metric.name)
group_df = pd.DataFrame()
for group, metrics in group_to_metrics.items():
if not all(m in df for m in metrics):
continue
group_df[group] = df[metrics].mean(axis=1)
# Need to replace nan with large negative value for idxmax
group_max_step = group_df.fillna(-1e9).idxmax(axis=0)
metric_max = pd.Series()
metric_max_step = pd.Series()
for group_name, max_step in group_max_step.iteritems():
for metric in group_to_metrics[group_name]:
metric_max[metric] = df[metric][max_step]
metric_max_step[metric] = max_step
metric_max = metric_max.reindex(df.columns)
metric_max_step = metric_max_step.reindex(df.columns)
return metric_max, metric_max_step
def log_csv(df, metric_names=None, output_file=None):
"""Log scores to be copy/pasted into a spreadsheet."""
logging.info(",".join(df.columns))
metric_max, metric_max_step = metric_group_max(df, metric_names)
max_row = "max," + ",".join("{:.3f}".format(m) for m in metric_max)
logging.info(max_row)
idx_row = "step," + ",".join("{:d}".format(i) for i in metric_max_step)
logging.info(idx_row)
if output_file is not None:
with tf.io.gfile.GFile(output_file, "w") as f:
csv_string = df.to_csv(float_format="%.3f")
f.write(csv_string + max_row + "\n" + idx_row)
class PredictOrScoreFnCallable(typing_extensions.Protocol):
"""Signature for `predict_or_score_fn` passed to `run_eval`."""
def __call__(
self,
checkpoint_step: int,
vocabulary: Any,
tasks: Sequence[t5.data.Task],
examples: Sequence[Mapping[str, Mapping[str, str]]],
datasets: Mapping[str, tf.data.Dataset],
sequence_length: Union[None, Mapping[str, int]]
) -> MutableSequence[Union[str, float]]: ...
def run_eval(
mixture_or_task_name: str,
predict_or_score_fn: PredictOrScoreFnCallable,
checkpoint_steps: Iterable[int],
dataset_fn: Optional[Callable[
[t5.data.Task, Mapping[str, int], int, str, Optional[bool]],
tf.data.Dataset]] = None,
summary_dir: Optional[str] = None,
split: Optional[str] = "validation",
sequence_length: Optional[Mapping[str, int]] = None,
batch_size: Optional[int] = None):
"""Run evaluation on the given mixture or task.
Args:
mixture_or_task_name: str, the name of the Mixture or Task to evaluate
on. Must be pre-registered in the global `TaskRegistry` or
`MixtureRegistry.`
predict_or_score_fn: function, This function takes in the sequence length,
checkpoint step, tasks to evaluate, an eval_dataset_fn, a dict mapping
task names to cached examples, a dict mapping task names to datasets,
and returns a list of outputs or a list of scores.
checkpoint_steps: an iterator with integers for checkpoint steps to
evaluate on.
dataset_fn: function, This function takes a task and returns the dataset
associated with it. If None, the default mesh_eval_dataset_fn is used.
summary_dir: str, path to write TensorBoard events file summaries for
eval. If None, use model_dir/eval_{split}.
split: str, the mixture/task split to evaluate on.
sequence_length: an integer or a dict from feature-key to integer
the sequence length to pad or truncate to,
e.g. {"inputs": 512, "targets": 128}.
If None, sequence length is automatically computed during eval.
batch_size: integer, used only to check that expected padding matches the
targets. If None, the check is skipped.
"""
vocabulary = model_utils.get_vocabulary(mixture_or_task_name)
tasks = t5.data.get_subtasks(
t5.data.get_mixture_or_task(mixture_or_task_name))
tasks = model_utils.get_valid_eval_tasks(tasks, split)
if not tasks:
logging.info(
"All provided tasks have metric_fns=[] or no matching splits; "
"eval is not possible.")
return
if not dataset_fn:
def _get_task_eval_dataset(task, sequence_length, split):
# TODO(sharannarang): Replace with more general function.
eval_datasets = mesh_transformer.mesh_eval_dataset_fn(
sequence_length=sequence_length,
dataset_split=split,
mixture_or_task_name=task.name,
)
return eval_datasets[0].dataset_fn()
dataset_fn = _get_task_eval_dataset
summary_writer = None
cached_examples, cached_targets, cached_datasets, max_sequence_length = \
model_utils.get_targets_and_examples(
tasks=tasks,
dataset_fn=functools.partial(
dataset_fn, split=split, sequence_length=None))
if summary_dir:
model_utils.write_targets_and_examples(
summary_dir, cached_targets, cached_examples)
if sequence_length is None:
logging.info("Setting sequence lengths to %s", max_sequence_length)
sequence_length = max_sequence_length
elif (sequence_length["inputs"] < max_sequence_length["inputs"] or
sequence_length["targets"] < max_sequence_length["targets"]):
logging.warning(
"Given sequence lengths are insufficient for some evaluation inputs "
"or targets. These sequences will be truncated to fit, likely "
"leading to sub-optimal results. Consider passing `None` for "
"sequence_length to have them be automatically computed.\n Got: %s, "
"\n Max Lengths:%s", sequence_length, max_sequence_length)
elif (sequence_length["inputs"] > max_sequence_length["inputs"] or
sequence_length["targets"] > max_sequence_length["targets"]):
logging.warning(
"Given sequence lengths are longer than necessary for some "
"evaluation inputs or targets, resulting in wasted computation. "
"Consider passing `None` for sequence_length to have them be "
"automatically computed.\n Got: %s,\n Max Lengths: %s",
sequence_length, max_sequence_length)
for step in checkpoint_steps:
logging.info("Evaluating checkpoint step: %d", step)
outputs = predict_or_score_fn(
checkpoint_step=step,
vocabulary=vocabulary,
tasks=tasks,
examples=cached_examples,
datasets=cached_datasets,
sequence_length=sequence_length)
for task in tasks:
# Extract the portion of decodes corresponding to this dataset
examples = cached_examples[task.name]
dataset_size = len(examples)
predictions = [
task.postprocess_fn(d, example=ex)
for d, ex in zip(outputs[:dataset_size], examples)
]
# Remove the used decodes.
del outputs[:dataset_size]
if summary_dir:
predictions_filename = os.path.join(
summary_dir,
"{}_{}_predictions".format(task.name, step))
model_utils.write_lines_to_file(predictions, predictions_filename)
with tf.Graph().as_default():
if summary_dir:
summary_writer = summary_writer or tf.summary.FileWriter(
summary_dir)
for metric_fn in task.metric_fns:
if summary_dir:
summary = tf.Summary()
targets = cached_targets[task.name]
metric_result = metric_fn(targets, predictions)
for metric_name, metric_value in metric_result.items():
tag = "eval/{}/{}".format(task.name, metric_name)
logging.info("%s at step %d: %.3f", tag, step, metric_value)
if summary_dir:
summary.value.add(tag=tag, simple_value=metric_value)
summary_writer.add_summary(summary, step) # pytype: disable=attribute-error
if summary_dir:
summary_writer.flush() # pytype: disable=attribute-error
# Only padding should remain.
if batch_size:
expected_pad = -sum(len(t)
for t in cached_targets.values()) % batch_size
if outputs and len(outputs) != expected_pad:
raise ValueError("{} padded outputs, {} expected.".format(
len(outputs), expected_pad))
| StarcoderdataPython |
1609301 | <filename>utils/nth_root.py
def nth_root(x,n): # credit http://stackoverflow.com/questions/356090/, <NAME>
"""Finds the integer component of the n'th root of x,
an integer such that y ** n <= x < (y + 1) ** n.
"""
high = 1
while high ** n < x:
high *= 2
low = high/2
while low < high:
mid = (low + high) / 2
if low < mid and mid**n < x:
low = mid
elif high > mid and mid**n > x:
high = mid
else:
return mid
return mid + 1
| StarcoderdataPython |
1652374 | # apps/listings/admin.py
# Django modules
from django.contrib import admin
# Locals
from apps.listings.models import Category, Product
# Register your models here.
@admin.register(Category)
class CategoryAdmin(admin.ModelAdmin):
list_display = ('name', 'slug')
prepopulated_fields = {'slug': ('name',)}
@admin.register(Product)
class ProductAdmin(admin.ModelAdmin):
list_display =('name', 'category', 'slug', 'price', 'available')
list_filter = ('category', 'available')
list_editable = ('price', 'available')
prepopulated_fields = {'slug': ('name',)} | StarcoderdataPython |
3285577 | <reponame>anu-ka/coding-problems
# https://leetcode.com/problems/can-place-flowers/submissions/
# You have a long flowerbed in which some of the plots are planted, and some are not.
# However, flowers cannot be planted in adjacent plots.
# Given an integer array flowerbed containing 0's and 1's, where 0 means empty and 1 means not empty,
# and an integer n, return if n new flowers can be planted in the flowerbed without violating the no-adjacent-flowers rule.
import pytest
class Solution:
def canPlaceFlowers(self, flowerbed: list[int], n: int) -> bool:
i = 0
while i + 1 < len(flowerbed) and n > 0:
if flowerbed[i] == 0 and flowerbed[i + 1] == 0:
flowerbed[i] = 1
i += 2
n -= 1
elif flowerbed[i] == 0 and flowerbed[i + 1] != 0:
i += 3
else:
i += 2
if i < len(flowerbed) and flowerbed[i - 1] == 0 and flowerbed[i] == 0 and n > 0:
flowerbed[i] = 1
n -= 1
print(flowerbed)
if n == 0:
return True
return False
@pytest.mark.parametrize(
("flowerbed", "n", "expected"),
[([1, 0, 0, 0, 1], 1, True), ([1, 0, 0, 0, 1], 2, False)],
)
def test_basic(flowerbed: list[int], n: int, expected: bool) -> None:
assert expected == Solution().canPlaceFlowers(flowerbed, n)
| StarcoderdataPython |
3310498 | # -*- coding: utf-8 -*-
__author__ = "<NAME>, <NAME>, <NAME>"
__copyright__ = "Copyright 2019, The Information Security and Privacy Lab at the University of Lausanne (https://www.unil.ch/isplab/)"
__credits__ = ["<NAME>", "<NAME>",
"<NAME>", "<NAME>"]
__version__ = "1"
__license__ = "MIT"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__project__ = "Data-less Kin Genomic Privacy Estimator"
from contextlib import closing, contextmanager
import logging
import json
import sys
import traceback
from typing import Dict, List
from typing import Optional as Opt
from typing import Sequence as Seq
from typing import Tuple
import warnings
import mysql.connector
logger = logging.getLogger(__name__)
def connect_db(db_config):
"""Handles the connection to the database, returns None in case of error/missing configuration"""
if db_config:
logger
try:
db_connexion = closing(mysql.connector.connect(**db_config))
return db_connexion
except Exception as e:
error_msg_header = "Error connecting to database, not using it. See stacktrace:\n%s" % (e,)
error_traceback = traceback.format_exc()
logger.warning(error_msg_header)
logger.warning(error_traceback)
else:
logger.info("No databasse configuration given, returning empty context")
@contextmanager
def empty_context():
logger.info("yielding empty database connexion context")
yield None
return empty_context()
def db_exceptions_graceful_handler(request_function):
"""Handles graceful database error handling
Ensures an exception raised either by the database or by a function in storage.py
doesn't crash the whole program, but only returns a None value.
"""
def handle_db_exception_wrapper(*args, **kwargs):
try:
return request_function(*args, **kwargs)
except Exception as e:
error_msg_header = "A function in database package failed, see stacktrace:\n%s" % (e,)
error_traceback = traceback.format_exc()
logger.warning(error_msg_header)
logger.warning(error_traceback)
return handle_db_exception_wrapper
@db_exceptions_graceful_handler
def insert_new_request(db_connexion, tree_edges: Seq[Seq[str]], tree_sequenced_relatives: Seq[str], tree_target: str,
tree_signature: str, ip: str, user_id: str, user_agent: str, source: str, lng: str) -> Opt[int]:
"""Insert a new request in database
:return Return the id corresponding to the new inserted request, None if the request could not be inserted
"""
tree_nodes = [n for edge in tree_edges for n in edge]
assert all(len(e) == 2 for e in tree_edges)
assert tree_target in tree_nodes
assert tree_target not in tree_sequenced_relatives
assert all(n in tree_nodes for n in tree_sequenced_relatives)
cursor = db_connexion.cursor(prepared=True)
tree = json.dumps({
"edges": tree_edges,
"sequenced_relatives": tree_sequenced_relatives,
"target": tree_target,
})
cursor.execute('INSERT IGNORE INTO user(user_id, user_agent, source) VALUES (%s,%s,%s)',
(user_id, user_agent, source))
cursor.execute(
'INSERT IGNORE INTO request(tree, number_sequenced, signature, user_id, IP, lng) VALUES (%s,%s, %s,%s,%s, %s)',
(tree, len(tree_sequenced_relatives), tree_signature, user_id, ip, lng))
request_id = cursor.getlastrowid()
db_connexion.commit()
return request_id
@db_exceptions_graceful_handler
def insert_new_tree(db_connexion, tree: str, signature: str, number_sequenced: int = None) -> Opt[int]:
"""Insert a new tree in database
:param tree: the serialized tree: output of SequencedFamilyTree.serialize()
:param signature: the signature of the minimized SequencedFamilyTree (SequencedFamilyTree.signature)
:return Return the id corresponding to the newly inserted row in tree table, None if the tree could not be inserted
"""
cursor = db_connexion.cursor(prepared=True)
cursor.execute('INSERT IGNORE INTO tree(tree, signature, number_sequenced) VALUES (%s,%s,%s)',
(tree, signature, number_sequenced))
db_connexion.commit()
tree_id = cursor.getlastrowid()
return tree_id
@db_exceptions_graceful_handler
def insert_null_privacy_metrics(db_connexion, request_id: int, mafs, tree_id: int = None) -> Dict[float, int]:
"""Insert privacy metrics for a tree
:param request_id: id of the request at the origin of the tree
:param privacy_metrics: a list of 3-tuples where each is of the
form (maf, posterior_entropy, exp_error).
The last 2 elements of each tuple might be None, when those
must be computed by the daemon later.
:return a dictionary with keys=maf, and values = corresponding row id in value table
"""
values_id = dict()
cursor = db_connexion.cursor(prepared=True)
params = [(request_id, maf, tree_id) for maf in mafs]
for param in params:
cursor.execute('INSERT INTO value(request_id, maf, tree_id) VALUES (%s, %s, %s)', param)
values_id[param[1]] = cursor.lastrowid
db_connexion.commit()
return values_id
@db_exceptions_graceful_handler
def check_cache(db_connexion, signature: str) -> Opt[bool]:
"""Checks if the given signature is in the cache"""
cursor = db_connexion.cursor(prepared=True)
cursor.execute('SELECT * FROM tree WHERE signature=%s', (signature,))
cursor.fetchall()
result = (cursor.rowcount > 0)
return result
@db_exceptions_graceful_handler
def get_tree_privacy_metrics(db_connexion, signature: str) -> Opt[List[Tuple[float, float, float, float]]]:
"""Retrieves the privacy metrics for each maf for the given tree signature
:param tree_signature: a string representing the tree unique signature
:return: None if the tree signature is not in the db_connexion, otherwise
a list of 4-tuples where each is of the form (maf, prior_entropy, posterior_entropy, exp_error)
"""
cursor = db_connexion.cursor(prepared=True)
cursor.execute(
'SELECT DISTINCT maf, posterior_entropy, exp_error, computation_time FROM value LEFT JOIN request ON value.request_id=request.id WHERE signature=%s and posterior_entropy is not NULL',
(signature,))
result = []
for maf, posterior_entropy, exp_error, computation_time in cursor.fetchall():
if 0 <= exp_error <= 1 and 0 <= posterior_entropy <= 1.6:
result.append(
(maf, posterior_entropy, exp_error, computation_time))
return result
@db_exceptions_graceful_handler
def update_privacy_metric(db_connexion, value_id: int, posterior_entropy: float, exp_error: float,
computation_time: float = -1) -> None:
"""Update the privacy metric of a maf/request_id (identified by the value_id) already added in the database
:param value_id: the row id in the value table
:param posterior_entropy: calculated posterior entropy for given row
:param exp_error: calculated expected error for given row
:return:
"""
cursor = db_connexion.cursor(prepared=True)
cursor.execute('UPDATE value SET posterior_entropy=%s, exp_error=%s, computation_time=%s WHERE id=%s',
(posterior_entropy, exp_error, computation_time, value_id))
db_connexion.commit()
@db_exceptions_graceful_handler
def get_null_privacy_metrics(db_connexion, nb_entries: int) -> Opt[List[Tuple[str, float, int]]]:
"""Select entries from table value where the privacy metrics are None
:param nb_entry: number of maximum return entries
:return: None if there are no privacy metrics to calculate, otherwise a list of tuple of the form (serialized tree, maf, value id)
"""
cursor = db_connexion.cursor(prepared=True)
cursor.execute(
'SELECT tree.tree, maf, value.id FROM value JOIN request ON value.request_id=request.id JOIN tree ON request.signature=tree.signature WHERE posterior_entropy is NULL ORDER BY POSITION("1" IN REVERSE(BIN(ROUND(POW(2, 12)*maf)))) DESC, updated_at ASC LIMIT %s',
(nb_entries,))
results = cursor.fetchall()
if cursor.rowcount > 0:
return [row for row in results]
| StarcoderdataPython |
11585 | from django.contrib.auth import authenticate, login
from django.shortcuts import render, redirect
from cart.models import Cart
from django.views import View
from .forms import LoginForm, RegistrationForm, CreateCompanyForm
from customer.models import Customer, ShippingAddress
from src.utils.mixins import CustomerMixin
from checkout.models import ApplyOrganization
class LoginView(CustomerMixin, View):
def get(self, request, *args, **kwargs):
if request.user.is_authenticated:
return redirect('catalog')
form = LoginForm()
return render(request, 'customer/login.html', {'form': form})
def post(self, request, *args, **kwargs):
form = LoginForm(request.POST or None)
if form.is_valid():
email = form.cleaned_data['email']
password = form.cleaned_data['password']
user = authenticate(request, email=email, password=password)
if user:
login(request, user)
return redirect('catalog')
return render(request, 'customer/login.html', {'form': form})
class RegistrationView(View):
def get(self, request, *args, **kwargs):
if request.user.is_authenticated:
return redirect('catalog')
form = RegistrationForm()
return render(request, 'customer/register.html', {'form': form})
def post(self, request, *args, **kwargs):
form = RegistrationForm(request.POST or None, request.FILES or None)
if form.is_valid():
new_user = form.save(commit=False)
customer = Customer.objects.create(user=new_user, status="Unrecognized")
customer.save()
cart = Cart.objects.create(customer=customer)
cart.save()
address = ShippingAddress.objects.create(customer=customer)
address.save()
new_user.set_password(form.cleaned_data['<PASSWORD>'])
new_user.save()
return redirect('login')
return render(request, 'customer/register.html', {'form': form})
class CreateCompany(View):
def get(self, request, *args, **kwargs):
if request.user.is_authenticated and request.user.STATUS_AUTH == "Recognized":
form = CreateCompanyForm()
return render(request, 'customer/create_company.html', {'form': form})
return redirect('catalog')
def post(self, request, *args, **kwargs):
if request.user.is_authenticated and request.user.STATUS_AUTH == "Recognized":
form = CreateCompanyForm(request.POST or None, request.FILES or None)
if form.is_valid():
new_company = form.save(commit=False)
new_company.STATUS_COMPANY = "No verify"
new_company.user = request.user
new_company.save()
return redirect('catalog')
return render(request, 'customer/register.html', {'form': form})
| StarcoderdataPython |
1602488 | ## set up logging
import logging, os
logging.basicConfig(level=os.environ.get("LOGLEVEL","INFO"))
log = logging.getLogger(__name__)
## import modules
import octvi.exceptions, octvi.array, gdal
from gdalnumeric import *
import numpy as np
def getDatasetNames(stack_path:str) -> list:
"""
Returns list of all subdataset names, in format
suitable for passing to other functions'
'dataset_name' argument
"""
## parsing arguments
ext = os.path.splitext(stack_path)[1]
if ext == ".hdf":
splitter = ":"
elif ext == ".h5":
splitter = "/"
else:
raise octvi.exceptions.FileTypeError("File must be of format .hdf or .h5")
## loop over all subdatasets
outSds = []
ds = gdal.Open(stack_path,0) # open stack as gdal dataset
for sd in ds.GetSubDatasets():
sdName = sd[0].split(splitter)[-1] # split name out of path
outSds.append(sdName.strip("\"")) # strip away quotes
return outSds
def datasetToPath(stack_path,dataset_name) -> str:
## parsing arguments
ext = os.path.splitext(stack_path)[1]
if ext == ".hdf":
splitter = ":"
elif ext == ".h5":
splitter = "/"
else:
raise octvi.exceptions.FileTypeError("File must be of format .hdf or .h5")
## searching heirarchy for matching subdataset
outSd = None
ds = gdal.Open(stack_path,0) # open stack as gdal dataset
for sd in ds.GetSubDatasets():
sdName = sd[0].split(splitter)[-1]
if sdName.strip("\"") == dataset_name:
outSd = sd[0]
if outSd is None:
raise octvi.exceptions.DatasetNotFoundError(f"Dataset '{dataset_name}' not found in '{os.path.basename(stack_path)}'")
return outSd
def datasetToArray(stack_path,dataset_name) -> "numpy array":
"""
This function copies a specified subdataset from a heirarchical format
(such as HDF or NetCDF) to a single file such as a Tiff.
...
Parameters
----------
stack_path: str
Full path to heirarchical file containing the desired subdataset
dataset_name: str
Name of desired subdataset, as it appears in the heirarchical file
"""
sd = datasetToPath(stack_path, dataset_name)
## return subdataset as numpy array
subDs = gdal.Open(sd, 0)
subDs_band = subDs.GetRasterBand(1)
return BandReadAsArray(subDs_band)
def datasetToRaster(stack_path,dataset_name, out_path,dtype = None, *args, **kwargs) -> None:
"""
Wrapper for extractAsArray and arrayToRaster which pulls
subdataset from hdf or h5 file and saves to new location.
...
Arguments
---------
stack_path: str
dataset_name: str
out_path: str
"""
sd_array = datasetToArray(stack_path, dataset_name)
return octvi.array.toRaster(sd_array, out_path, model_file = stack_path,dtype=dtype)
def ndviToArray(in_stack) -> "numpy array":
"""
This function finds the correct Red and NIR bands
from a hierarchical file, calculates an NDVI array,
and returns the outpus in numpy array format.
Valid input formats are MODIS HDF or VIIRS HDF5 (h5).
...
Parameters
----------
in_stack: str
Full path to input hierarchical file
"""
suffix = os.path.basename(in_stack).split(".")[0][3:7]
# check whether it's an ndvi product
if suffix == "09Q4" or suffix == "13Q4":
arr_ndvi = datasetToArray(in_stack, "250m 8 days NDVI")
elif suffix == "13Q1":
arr_ndvi = datasetToArray(in_stack, "250m 16 days NDVI")
elif suffix == "09CM":
## determine correct band subdataset names
ext = os.path.splitext(in_stack)[1]
if ext == ".hdf":
sdName_red = "Coarse Resolution Surface Reflectance Band 1"
sdName_nir = "Coarse Resolution Surface Reflectance Band 2"
elif ext == '.h5':
sdName_red = "SurfReflect_I1"
sdName_nir = "SurfReflect_I2"
## extract red and nir bands from stack
arr_red = datasetToArray(in_stack,sdName_red)
arr_nir = datasetToArray(in_stack,sdName_nir)
## perform calculation
arr_ndvi = octvi.array.calcNdvi(arr_red,arr_nir)
else:
## determine correct band subdataset names
ext = os.path.splitext(in_stack)[1]
if ext == ".hdf":
sdName_red = "sur_refl_b01"
sdName_nir = "sur_refl_b02"
elif ext == ".h5":
sdName_red = "SurfReflect_I1"
sdName_nir = "SurfReflect_I2"
else:
raise octvi.exceptions.FileTypeError("File must be of type .hdf or .h5")
## extract red and nir bands from stack
arr_red = datasetToArray(in_stack,sdName_red)
arr_nir = datasetToArray(in_stack,sdName_nir)
## perform calculation
arr_ndvi = octvi.array.calcNdvi(arr_red,arr_nir)
return arr_ndvi
def gcviToArray(in_stack:str) -> "numpy array":
"""
This function finds the correct Green and NIR bands
from a hierarchical file, calculates a GCVI array,
and returns the outpus in numpy array format.
Valid input format is MOD09CMG HDF.
...
Parameters
----------
in_stack: str
Full path to input hierarchical file
"""
suffix = os.path.basename(in_stack).split(".")[0][3:7]
# check whether it's an ndvi product
if suffix == "09CM":
## determine correct band subdataset names
ext = os.path.splitext(in_stack)[1]
if ext == ".hdf":
sdName_green = "Coarse Resolution Surface Reflectance Band 4"
sdName_nir = "Coarse Resolution Surface Reflectance Band 2"
elif ext == '.h5':
sdName_green = "SurfReflect_M4"
sdName_nir = "SurfReflect_I2"
## extract red and nir bands from stack
arr_green = datasetToArray(in_stack,sdName_green)
arr_nir = datasetToArray(in_stack,sdName_nir)
## perform calculation
arr_gcvi = octvi.array.calcGcvi(arr_green,arr_nir)
elif suffix == "09A1":
sdName_green = "sur_refl_b04"
sdName_nir = "sur_refl_b02"
arr_green = datasetToArray(in_stack,sdName_green)
arr_nir = datasetToArray(in_stack,sdName_nir)
arr_gcvi = octvi.array.calcGcvi(arr_green,arr_nir)
else:
raise octvi.exceptions.UnsupportedError("Only MOD09CMG and MOD09A1 imagery is supported for GCVI generation")
return arr_gcvi
def ndwiToArray(in_stack:str) -> "numpy array":
"""
This function finds the correct SWIR and NIR bands
from a hierarchical file, calculates a NDWI array,
and returns the outpus in numpy array format.
Valid input format is HDF.
...
Parameters
----------
in_stack: str
Full path to input hierarchical file
"""
suffix = os.path.basename(in_stack).split(".")[0][3:7]
if suffix == "09A1":
sdName_nir = "sur_refl_b02"
sdName_swir = "sur_refl_b05"
arr_nir = datasetToArray(in_stack, sdName_nir)
arr_swir = datasetToArray(in_stack,sdName_swir)
arr_ndwi = octvi.array.calcNdwi(arr_nir,arr_swir)
else:
raise octvi.exceptions.UnsupportedError("Only MOD09A1 imagery is supported for GCVI generation")
return arr_ndwi
def ndviToRaster(in_stack,out_path,qa_name=None) -> str:
"""
This function directly converts a hierarchical data
file into an NDVI raster.
Returns the string path to the output file
***
Parameters
----------
in_stack:str
out_path:str
qa_name (optional):str
Name of QA dataset, if included produces
two-band tiff
"""
# create ndvi array
ndviArray = ndviToArray(in_stack)
# apply cloud, shadow, and water masks
ndviArray = octvi.array.mask(ndviArray, in_stack)
sample_sd = getDatasetNames(in_stack)[0]
#ext = os.path.splitext(in_stack)[1]
#if ext == ".hdf":
#sample_sd = "sur_refl_b01"
#elif ext == ".h5":
#sample_sd = "SurfReflect_I1"
#else:
#raise octvi.exceptions.FileTypeError("File must be of format .hdf or .h5")
if qa_name is None:
#octvi.array.toRaster(ndviArray,out_path,datasetToPath(in_stack,sample_sd))
octvi.array.toRaster(ndviArray,out_path,in_stack,sample_sd)
else:
# get qa array
qaArray = datasetToArray(in_stack,qa_name)
# create multiband at out_path
#octvi.array.toRaster(ndviArray,out_path,datasetToPath(in_stack,sample_sd),qa_array = qaArray)
octvi.array.toRaster(ndviArray,out_path,in_stack,qa_array = qaArray)
return out_path
def gcviToRaster(in_stack:str,out_path:str) -> str:
"""
This function directly converts a hierarchical data
file into a GCVI raster.
Returns the string path to the output file
"""
# create gcvi array
gcviArray = gcviToArray(in_stack)
# apply cloud, shadow, and water masks
gcviArray = octvi.array.mask(gcviArray, in_stack)
sample_sd = getDatasetNames(in_stack)[0]
#ext = os.path.splitext(in_stack)[1]
#if ext == ".hdf":
#sample_sd = "sur_refl_b01"
#elif ext == ".h5":
#sample_sd = "SurfReflect_I1"
#else:
#raise octvi.exceptions.FileTypeError("File must be of format .hdf or .h5")
octvi.array.toRaster(gcviArray,out_path,datasetToPath(in_stack,sample_sd))
return out_path
def ndwiToRaster(in_stack:str, out_path:str) -> str:
"""
This function directly converts a hierarchical data
file into an NDWI raster.
Returns the string path to the output file
"""
# create gcvi array
ndwiArray = ndwiToArray(in_stack)
# apply cloud, shadow, and water masks
ndwiArray = octvi.array.mask(ndwiArray, in_stack)
sample_sd = getDatasetNames(in_stack)[0]
octvi.array.toRaster(ndwiArray,out_path,datasetToPath(in_stack,sample_sd))
return out_path
def cmgToViewAngArray(source_stack,product="MOD09CMG") -> "numpy array":
"""
This function takes the path to a M*D CMG file, and returns
the view angle of each pixel. Ephemeral water pixels are
set to 999, to be used as a last resort in compositing.
Returns a numpy array of the same dimensions as the input raster.
***
Parameters
----------
source_stack:str
Path to the M*D CMG .hdf file on disk
"""
if product == "MOD09CMG":
vang_arr = datasetToArray(source_stack,"Coarse Resolution View Zenith Angle")
state_arr = datasetToArray(source_stack,"Coarse Resolution State QA")
water = ((state_arr & 0b111000)) # check bits
vang_arr[water==32]=9999 # ephemeral water???
vang_arr[vang_arr<=0]=9999
elif product == "VNP09CMG":
vang_arr = datasetToArray(source_stack,"SensorZenith")
vang_arr[vang_arr<=0]=9999
return vang_arr
def cmgListToWaterArray(stacks:list,product="MOD09CMG") -> "numpy array":
"""
This function takes a list of CMG .hdf files, and returns
a binary array, with "0" for non-water pixels and "1" for
water pixels. If any file flags water in a pixel, its value
is stored as "1"
***
Parameters
----------
stacks:list
List of hdf filepaths (M*D**CMG)
"""
water_list = []
for source_stack in stacks:
if product == "MOD09CMG":
state_arr = datasetToArray(source_stack,"Coarse Resolution State QA")
water = ((state_arr & 0b111000)) # check bits
water[water==56]=1 # deep ocean
water[water==48]=1 # continental/moderate ocean
water[water==24]=1 # shallow inland water
water[water==40]=1 # deep inland water
water[water==0]=1 # shallow ocean
water[state_arr==0]=0
water[water!=1]=0 # set non-water to zero
elif product == "VNP09CMG":
state_arr = datasetToArray(source_stack,"State_QA")
water = ((state_arr & 0b111000)) # check bits 3-5
water[water == 40] = 0 # "coastal" = 101
water[water>8]=1 # sea water = 011; inland water = 010
water[water!=1]=0 # set non-water to zero
water[water!=0]=1
water_list.append(water)
water_final = np.maximum.reduce(water_list)
return water_final
def cmgToRankArray(source_stack,product="MOD09CMG") -> "numpy array":
"""
This function takes the path to a MOD**CMG file, and returns
the rank of each pixel, as defined on page 7 of the MOD09 user
guide (http://modis-sr.ltdri.org/guide/MOD09_UserGuide_v1.4.pdf)
Returns a numpy array of the same dimensions as the input raster
***
Parameters
----------
source_stack:str
Path to the CMG .hdf/.h5 file on disk
product:str
String of either MOD09CMG or VNP09CMG
"""
if product == "MOD09CMG":
qa_arr = datasetToArray(source_stack,"Coarse Resolution QA")
state_arr = datasetToArray(source_stack,"Coarse Resolution State QA")
vang_arr = datasetToArray(source_stack,"Coarse Resolution View Zenith Angle")
vang_arr[vang_arr<=0]=9999
sang_arr = datasetToArray(source_stack,"Coarse Resolution Solar Zenith Angle")
rank_arr = np.full(qa_arr.shape,10) # empty rank array
## perform the ranking!
logging.debug("--rank 9: SNOW")
SNOW = ((state_arr & 0b1000000000000) | (state_arr & 0b1000000000000000)) # state bit 12 OR 15
rank_arr[SNOW>0]=9 # snow
del SNOW
logging.debug("--rank 8: HIGHAEROSOL")
HIGHAEROSOL=(state_arr & 0b11000000) # state bits 6 AND 7
rank_arr[HIGHAEROSOL==192]=8
del HIGHAEROSOL
logging.debug("--rank 7: CLIMAEROSOL")
CLIMAEROSOL=(state_arr & 0b11000000) # state bits 6 & 7
#CLIMAEROSOL=(cloudMask & 0b100000000000000) # cloudMask bit 14
rank_arr[CLIMAEROSOL==0]=7 # default aerosol level
del CLIMAEROSOL
logging.debug("--rank 6: UNCORRECTED")
UNCORRECTED = (qa_arr & 0b11) # qa bits 0 AND 1
rank_arr[UNCORRECTED==3]=6 # flagged uncorrected
del UNCORRECTED
logging.debug("--rank 5: SHADOW")
SHADOW = (state_arr & 0b100) # state bit 2
rank_arr[SHADOW==4]=5 # cloud shadow
del SHADOW
logging.debug("--rank 4: CLOUDY")
# set adj to 11 and internal to 12 to verify in qa output
CLOUDY = ((state_arr & 0b11)) # state bit 0 OR bit 1 OR bit 10 OR bit 13
#rank_arr[CLOUDY!=0]=4 # cloud pixel
del CLOUDY
CLOUDADJ = (state_arr & 0b10000000000000)
#rank_arr[CLOUDADJ>0]=4 # adjacent to cloud
del CLOUDADJ
CLOUDINT = (state_arr & 0b10000000000)
rank_arr[CLOUDINT>0]=4
del CLOUDINT
logging.debug("--rank 3: HIGHVIEW")
rank_arr[sang_arr>(85/0.01)]=3 # HIGHVIEW
logging.debug("--rank 2: LOWSUN")
rank_arr[vang_arr>(60/0.01)]=2 # LOWSUN
# BAD pixels
logging.debug("--rank 1: BAD pixels") # qa bits (2-5 OR 6-9 == 1110)
BAD = ((qa_arr & 0b111100) | (qa_arr & 0b1110000000))
rank_arr[BAD==112]=1
rank_arr[BAD==896]=1
rank_arr[BAD==952]=1
del BAD
logging.debug("-building water mask")
water = ((state_arr & 0b111000)) # check bits
water[water==56]=1 # deep ocean
water[water==48]=1 # continental/moderate ocean
water[water==24]=1 # shallow inland water
water[water==40]=1 # deep inland water
water[water==0]=1 # shallow ocean
rank_arr[water==1]=0
vang_arr[water==32]=9999 # ephemeral water???
water[state_arr==0]=0
water[water!=1]=0 # set non-water to zero
elif product == "VNP09CMG":
#print("cmgToRankArray(product='VNP09CMG')")
qf2 = datasetToArray(source_stack,"SurfReflect_QF2")
qf4 = datasetToArray(source_stack,"SurfReflect_QF4")
state_arr = datasetToArray(source_stack,"State_QA")
vang_arr = datasetToArray(source_stack,"SensorZenith")
vang_arr[vang_arr<=0]=9999
sang_arr = datasetToArray(source_stack,"SolarZenith")
rank_arr = np.full(state_arr.shape,10) # empty rank array
## perform the ranking!
logging.debug("--rank 9: SNOW")
SNOW = (state_arr & 0b1000000000000000) # state bit 15
rank_arr[SNOW>0]=9 # snow
del SNOW
logging.debug("--rank 8: HIGHAEROSOL")
HIGHAEROSOL=(qf2 & 0b10000) # qf2 bit 4
rank_arr[HIGHAEROSOL!=0]=8
del HIGHAEROSOL
logging.debug("--rank 7: AEROSOL")
CLIMAEROSOL=(state_arr & 0b1000000) # state bit 6
#CLIMAEROSOL=(cloudMask & 0b100000000000000) # cloudMask bit 14
#rank_arr[CLIMAEROSOL==0]=7 # "No"
del CLIMAEROSOL
# logging.debug("--rank 6: UNCORRECTED")
# UNCORRECTED = (qa_arr & 0b11) # qa bits 0 AND 1
# rank_arr[UNCORRECTED==3]=6 # flagged uncorrected
# del UNCORRECTED
logging.debug("--rank 5: SHADOW")
SHADOW = (state_arr & 0b100) # state bit 2
rank_arr[SHADOW!=0]=5 # cloud shadow
del SHADOW
logging.debug("--rank 4: CLOUDY")
# set adj to 11 and internal to 12 to verify in qa output
# CLOUDY = ((state_arr & 0b11)) # state bit 0 OR bit 1 OR bit 10 OR bit 13
# rank_arr[CLOUDY!=0]=4 # cloud pixel
# del CLOUDY
# CLOUDADJ = (state_arr & 0b10000000000) # nonexistent for viirs
# #rank_arr[CLOUDADJ>0]=4 # adjacent to cloud
# del CLOUDADJ
CLOUDINT = (state_arr & 0b10000000000) # state bit 10
rank_arr[CLOUDINT>0]=4
del CLOUDINT
logging.debug("--rank 3: HIGHVIEW")
rank_arr[sang_arr>(85/0.01)]=3 # HIGHVIEW
logging.debug("--rank 2: LOWSUN")
rank_arr[vang_arr>(60/0.01)]=2 # LOWSUN
# BAD pixels
logging.debug("--rank 1: BAD pixels") # qa bits (2-5 OR 6-9 == 1110)
BAD = (qf4 & 0b110)
rank_arr[BAD!= 0]=1
del BAD
logging.debug("-building water mask")
water = ((state_arr & 0b111000)) # check bits 3-5
water[water == 40] = 0 # "coastal" = 101
water[water>8]=1 # sea water = 011; inland water = 010
# water[water==16]=1 # inland water = 010
# water[state_arr==0]=0
water[water!=1]=0 # set non-water to zero
water[water!=0]=1
rank_arr[water==1]=0
# return the results
return rank_arr
def cmgBestViPixels(input_stacks:list,vi="NDVI",product = "MOD09CMG",snow_mask=False) -> "numpy array":
"""
This function takes a list of hdf stack paths, and
returns the 'best' VI value for each pixel location,
determined through the ranking method (see
cmgToRankArray() for details).
***
Parameters
----------
input_stacks:list
A list of strings, each pointing to a CMG hdf/h5 file
on disk
product:str
A string of either "MOD09CMG" or "VNP09CMG"
"""
viExtractors = {
"NDVI":ndviToArray,
"GCVI":gcviToArray
}
rankArrays = [cmgToRankArray(hdf,product) for hdf in input_stacks]
vangArrays = [cmgToViewAngArray(hdf,product) for hdf in input_stacks]
try:
viArrays = [viExtractors[vi](hdf) for hdf in input_stacks]
except KeyError:
raise octvi.exceptions.UnsupportedError(f"Index type '{vi}' is not recognized or not currently supported.")
# no nodata wanted
for i in range(len(rankArrays)):
rankArrays[i][viArrays[i] == -3000] = 0
# apply snow mask if requested
if snow_mask:
for rankArray in rankArrays:
rankArray[rankArray==9] = 0
idealRank = np.maximum.reduce(rankArrays)
# mask non-ideal view angles
for i in range(len(vangArrays)):
vangArrays[i][rankArrays[i] != idealRank] = 9998
vangArrays[i][vangArrays[i] == 0] = 9997
idealVang = np.minimum.reduce(vangArrays)
#print("Max vang:")
#print(np.amax(idealVang))
#octvi.array.toRaster(idealVang,"C:/temp/MOD09CMG.VANG.tif",input_stacks[0])
#octvi.array.toRaster(idealRank,"C:/temp/VNP09CMG.RANK.tif",input_stacks[0])
finalVi = np.full(viArrays[0].shape,-3000)
# mask each ndviArray to only where it matches ideal rank
for i in range(len(viArrays)):
finalVi[vangArrays[i] == idealVang] = viArrays[i][vangArrays[i] == idealVang]
# mask out ranks that are too low
finalVi[idealRank <=7] = -3000
# mask water
water = cmgListToWaterArray(input_stacks,product)
finalVi[water==1] = -3000
# return result
return finalVi
def qaTo8BitArray(stack_path) -> "numpy array":
"""Returns an 8-bit QA array for the passed image file
MODIS and VIIRS use 16-bit QA layers, but many of those bits
are redundant or unnecessary for purposes of VI mapping. For
example, non-land pixels are masked by default, so the land/
water flag is unused.
This function pares down the 16-bit mask into an 8-bit
version that retains all necessary functionality.
***
Parameters
----------
stack_path:str
Full path to input hierarchical file on disk
"""
log.warning("octvi.extract.qaTo8BitArray() is not implemented!")
return None
| StarcoderdataPython |
1683783 | <filename>tests/v0x01/test_controller2switch/test_stats_reply.py
"""Test for StatsReply message."""
import unittest
from pyof.v0x01.controller2switch import common, stats_reply
class TestStatsReply(unittest.TestCase):
"""Test for StatsReply message."""
def setUp(self):
"""Baisc Test Setup."""
self.message = stats_reply.StatsReply()
self.message.header.xid = 1
self.message.type = common.StatsTypes.OFPST_FLOW
self.message.flags = 0x0001
self.message.body = []
def test_get_size(self):
"""[Controller2Switch/StatsReply] - size 12."""
self.assertEqual(self.message.get_size(), 12)
@unittest.skip('Not yet implemented')
def test_pack(self):
"""[Controller2Switch/StatsReply] - packing."""
# TODO
pass
@unittest.skip('Not yet implemented')
def test_unpack(self):
"""[Controller2Switch/StatsReply] - unpacking."""
# TODO
pass
| StarcoderdataPython |
129944 | <filename>tests/unit/test_views.py
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import elasticsearch
import pretend
import pytest
from pyramid.httpexceptions import (
HTTPBadRequest,
HTTPNotFound,
HTTPSeeOther,
HTTPServiceUnavailable,
)
from trove_classifiers import classifiers
from webob.multidict import MultiDict
from warehouse import views
from warehouse.views import (
current_user_indicator,
flash_messages,
forbidden,
forbidden_include,
force_status,
health,
httpexception_view,
index,
list_classifiers,
locale,
opensearchxml,
robotstxt,
search,
service_unavailable,
session_notifications,
stats,
)
from ..common.db.accounts import UserFactory
from ..common.db.classifiers import ClassifierFactory
from ..common.db.packaging import FileFactory, ProjectFactory, ReleaseFactory
class TestHTTPExceptionView:
def test_returns_context_when_no_template(self, pyramid_config):
pyramid_config.testing_add_renderer("non-existent.html")
response = context = pretend.stub(status_code=499)
request = pretend.stub()
assert httpexception_view(context, request) is response
@pytest.mark.parametrize("status_code", [403, 404, 410, 500])
def test_renders_template(self, pyramid_config, status_code):
renderer = pyramid_config.testing_add_renderer("{}.html".format(status_code))
context = pretend.stub(
status="{} My Cool Status".format(status_code),
status_code=status_code,
headers={},
)
request = pretend.stub()
response = httpexception_view(context, request)
assert response.status_code == status_code
assert response.status == "{} My Cool Status".format(status_code)
renderer.assert_()
@pytest.mark.parametrize("status_code", [403, 404, 410, 500])
def test_renders_template_with_headers(self, pyramid_config, status_code):
renderer = pyramid_config.testing_add_renderer("{}.html".format(status_code))
context = pretend.stub(
status="{} My Cool Status".format(status_code),
status_code=status_code,
headers={"Foo": "Bar"},
)
request = pretend.stub()
response = httpexception_view(context, request)
assert response.status_code == status_code
assert response.status == "{} My Cool Status".format(status_code)
assert response.headers["Foo"] == "Bar"
renderer.assert_()
def test_renders_404_with_csp(self, pyramid_config):
renderer = pyramid_config.testing_add_renderer("404.html")
csp = {}
services = {"csp": pretend.stub(merge=csp.update)}
context = HTTPNotFound()
request = pretend.stub(find_service=lambda name: services[name], path="")
response = httpexception_view(context, request)
assert response.status_code == 404
assert response.status == "404 Not Found"
assert csp == {
"frame-src": ["https://www.youtube-nocookie.com"],
"script-src": ["https://www.youtube.com", "https://s.ytimg.com"],
}
renderer.assert_()
def test_simple_404(self):
csp = {}
services = {"csp": pretend.stub(merge=csp.update)}
context = HTTPNotFound()
for path in ("/simple/not_found_package", "/simple/some/unusual/path/"):
request = pretend.stub(find_service=lambda name: services[name], path=path)
response = httpexception_view(context, request)
assert response.status_code == 404
assert response.status == "404 Not Found"
assert response.content_type == "text/plain"
assert response.text == "404 Not Found"
class TestForbiddenView:
def test_logged_in_returns_exception(self, pyramid_config):
renderer = pyramid_config.testing_add_renderer("403.html")
exc = pretend.stub(status_code=403, status="403 Forbidden", headers={})
request = pretend.stub(authenticated_userid=1)
resp = forbidden(exc, request)
assert resp.status_code == 403
renderer.assert_()
def test_logged_out_redirects_login(self):
exc = pretend.stub()
request = pretend.stub(
authenticated_userid=None,
path_qs="/foo/bar/?b=s",
route_url=pretend.call_recorder(
lambda route, _query: "/accounts/login/?next=/foo/bar/%3Fb%3Ds"
),
)
resp = forbidden(exc, request)
assert resp.status_code == 303
assert resp.headers["Location"] == "/accounts/login/?next=/foo/bar/%3Fb%3Ds"
class TestForbiddenIncludeView:
def test_forbidden_include(self):
exc = pretend.stub()
request = pretend.stub()
resp = forbidden_include(exc, request)
assert resp.status_code == 403
assert resp.content_type == "text/html"
assert resp.content_length == 0
class TestServiceUnavailableView:
def test_renders_503(self, pyramid_config, pyramid_request):
renderer = pyramid_config.testing_add_renderer("503.html")
renderer.string_response = "A 503 Error"
resp = service_unavailable(pretend.stub(), pyramid_request)
assert resp.status_code == 503
assert resp.content_type == "text/html"
assert resp.body == b"A 503 Error"
def test_robotstxt(pyramid_request):
assert robotstxt(pyramid_request) == {}
assert pyramid_request.response.content_type == "text/plain"
def test_opensearchxml(pyramid_request):
assert opensearchxml(pyramid_request) == {}
assert pyramid_request.response.content_type == "text/xml"
class TestIndex:
def test_index(self, db_request):
project = ProjectFactory.create()
release1 = ReleaseFactory.create(project=project)
release1.created = datetime.date(2011, 1, 1)
release2 = ReleaseFactory.create(project=project)
release2.created = datetime.date(2012, 1, 1)
FileFactory.create(
release=release1,
filename="{}-{}.tar.gz".format(project.name, release1.version),
python_version="source",
)
UserFactory.create()
assert index(db_request) == {
# assert that ordering is correct
"latest_releases": [release2, release1],
"trending_projects": [release2],
"num_projects": 1,
"num_users": 3,
"num_releases": 2,
"num_files": 1,
}
class TestLocale:
@pytest.mark.parametrize(
("referer", "redirect", "get", "valid"),
[
(None, "/fake-route", {"locale_id": "en"}, True),
("http://example.com", "/fake-route", {"nonsense": "arguments"}, False),
("/robots.txt", "/robots.txt", {"locale_id": "non-existent-locale"}, False),
],
)
def test_locale(self, referer, redirect, get, valid, monkeypatch):
localizer = pretend.stub(translate=lambda *a: "translated")
make_localizer = pretend.call_recorder(lambda *a: localizer)
monkeypatch.setattr(views, "make_localizer", make_localizer)
tdirs = pretend.stub()
request = pretend.stub(
GET=get,
referer=referer,
route_path=pretend.call_recorder(lambda r: "/fake-route"),
session=pretend.stub(flash=pretend.call_recorder(lambda *a, **kw: None)),
host=None,
registry=pretend.stub(queryUtility=lambda *a: tdirs),
)
result = locale(request)
assert isinstance(result, HTTPSeeOther)
assert result.location == redirect
if valid:
assert "Set-Cookie" in result.headers
assert f"_LOCALE_={get['locale_id']};" in result.headers["Set-Cookie"]
assert make_localizer.calls == [pretend.call(get["locale_id"], tdirs)]
assert request.session.flash.calls == [
pretend.call("translated", queue="success")
]
else:
assert "Set-Cookie" not in result.headers
def test_esi_current_user_indicator():
assert current_user_indicator(pretend.stub()) == {}
def test_esi_flash_messages():
assert flash_messages(pretend.stub()) == {}
def test_esi_session_notifications():
assert session_notifications(pretend.stub()) == {}
class TestSearch:
@pytest.mark.parametrize("page", [None, 1, 5])
def test_with_a_query(self, monkeypatch, db_request, metrics, page):
params = MultiDict({"q": "foo bar"})
if page is not None:
params["page"] = page
db_request.params = params
db_request.es = pretend.stub()
es_query = pretend.stub()
get_es_query = pretend.call_recorder(lambda *a, **kw: es_query)
monkeypatch.setattr(views, "get_es_query", get_es_query)
page_obj = pretend.stub(page_count=(page or 1) + 10, item_count=1000)
page_cls = pretend.call_recorder(lambda *a, **kw: page_obj)
monkeypatch.setattr(views, "ElasticsearchPage", page_cls)
url_maker = pretend.stub()
url_maker_factory = pretend.call_recorder(lambda request: url_maker)
monkeypatch.setattr(views, "paginate_url_factory", url_maker_factory)
assert search(db_request) == {
"page": page_obj,
"term": params.get("q", ""),
"order": "",
"applied_filters": [],
"available_filters": [],
}
assert get_es_query.calls == [
pretend.call(db_request.es, params.get("q"), "", [])
]
assert page_cls.calls == [
pretend.call(es_query, url_maker=url_maker, page=page or 1)
]
assert url_maker_factory.calls == [pretend.call(db_request)]
assert metrics.histogram.calls == [
pretend.call("warehouse.views.search.results", 1000)
]
@pytest.mark.parametrize("page", [None, 1, 5])
def test_with_classifiers(self, monkeypatch, db_request, metrics, page):
params = MultiDict([("q", "foo bar"), ("c", "foo :: bar"), ("c", "fiz :: buz")])
if page is not None:
params["page"] = page
db_request.params = params
es_query = pretend.stub()
db_request.es = pretend.stub()
get_es_query = pretend.call_recorder(lambda *a, **kw: es_query)
monkeypatch.setattr(views, "get_es_query", get_es_query)
classifier1 = ClassifierFactory.create(classifier="foo :: bar")
classifier2 = ClassifierFactory.create(classifier="foo :: baz")
classifier3 = ClassifierFactory.create(classifier="fiz :: buz")
project = ProjectFactory.create()
release1 = ReleaseFactory.create(project=project)
release1.created = datetime.date(2011, 1, 1)
release1._classifiers.append(classifier1)
release1._classifiers.append(classifier2)
page_obj = pretend.stub(page_count=(page or 1) + 10, item_count=1000)
page_cls = pretend.call_recorder(lambda *a, **kw: page_obj)
monkeypatch.setattr(views, "ElasticsearchPage", page_cls)
url_maker = pretend.stub()
url_maker_factory = pretend.call_recorder(lambda request: url_maker)
monkeypatch.setattr(views, "paginate_url_factory", url_maker_factory)
search_view = search(db_request)
assert search_view == {
"page": page_obj,
"term": params.get("q", ""),
"order": "",
"applied_filters": params.getall("c"),
"available_filters": [
{
"foo": {
classifier1.classifier.split(" :: ")[1]: {},
classifier2.classifier.split(" :: ")[1]: {},
}
}
],
}
assert ("fiz", [classifier3.classifier]) not in search_view["available_filters"]
assert page_cls.calls == [
pretend.call(es_query, url_maker=url_maker, page=page or 1)
]
assert url_maker_factory.calls == [pretend.call(db_request)]
assert get_es_query.calls == [
pretend.call(db_request.es, params.get("q"), "", params.getall("c"))
]
assert metrics.histogram.calls == [
pretend.call("warehouse.views.search.results", 1000)
]
def test_returns_404_with_pagenum_too_high(self, monkeypatch, db_request, metrics):
params = MultiDict({"page": 15})
db_request.params = params
es_query = pretend.stub()
db_request.es = pretend.stub(query=lambda *a, **kw: es_query)
page_obj = pretend.stub(page_count=10, item_count=1000)
page_cls = pretend.call_recorder(lambda *a, **kw: page_obj)
monkeypatch.setattr(views, "ElasticsearchPage", page_cls)
url_maker = pretend.stub()
url_maker_factory = pretend.call_recorder(lambda request: url_maker)
monkeypatch.setattr(views, "paginate_url_factory", url_maker_factory)
with pytest.raises(HTTPNotFound):
search(db_request)
assert page_cls.calls == [
pretend.call(es_query, url_maker=url_maker, page=15 or 1)
]
assert url_maker_factory.calls == [pretend.call(db_request)]
assert metrics.histogram.calls == []
def test_raises_400_with_pagenum_type_str(self, monkeypatch, db_request, metrics):
params = MultiDict({"page": "abc"})
db_request.params = params
es_query = pretend.stub()
db_request.es = pretend.stub(query=lambda *a, **kw: es_query)
page_obj = pretend.stub(page_count=10, item_count=1000)
page_cls = pretend.call_recorder(lambda *a, **kw: page_obj)
monkeypatch.setattr(views, "ElasticsearchPage", page_cls)
url_maker = pretend.stub()
url_maker_factory = pretend.call_recorder(lambda request: url_maker)
monkeypatch.setattr(views, "paginate_url_factory", url_maker_factory)
with pytest.raises(HTTPBadRequest):
search(db_request)
assert page_cls.calls == []
assert metrics.histogram.calls == []
def test_returns_503_when_es_unavailable(self, monkeypatch, db_request, metrics):
params = MultiDict({"page": 15})
db_request.params = params
es_query = pretend.stub()
db_request.es = pretend.stub(query=lambda *a, **kw: es_query)
def raiser(*args, **kwargs):
raise elasticsearch.ConnectionError()
monkeypatch.setattr(views, "ElasticsearchPage", raiser)
url_maker = pretend.stub()
url_maker_factory = pretend.call_recorder(lambda request: url_maker)
monkeypatch.setattr(views, "paginate_url_factory", url_maker_factory)
with pytest.raises(HTTPServiceUnavailable):
search(db_request)
assert url_maker_factory.calls == [pretend.call(db_request)]
assert metrics.increment.calls == [pretend.call("warehouse.views.search.error")]
assert metrics.histogram.calls == []
def test_classifiers(db_request):
assert list_classifiers(db_request) == {"classifiers": sorted(classifiers)}
def test_stats(db_request):
project = ProjectFactory.create()
release1 = ReleaseFactory.create(project=project)
release1.created = datetime.date(2011, 1, 1)
FileFactory.create(
release=release1,
filename="{}-{}.tar.gz".format(project.name, release1.version),
python_version="source",
size=69,
)
assert stats(db_request) == {
"total_packages_size": 69,
"top_packages": {project.name: {"size": 69}},
}
def test_health():
request = pretend.stub(
db=pretend.stub(execute=pretend.call_recorder(lambda q: None))
)
assert health(request) == "OK"
assert request.db.execute.calls == [pretend.call("SELECT 1")]
class TestForceStatus:
def test_valid(self):
with pytest.raises(HTTPBadRequest):
force_status(pretend.stub(matchdict={"status": "400"}))
def test_invalid(self):
with pytest.raises(HTTPNotFound):
force_status(pretend.stub(matchdict={"status": "599"}))
| StarcoderdataPython |
1668013 | from configuration import Configuration
from html_reader import HTLMReader
if __name__ == "__main__":
text = "A<b>l</b>a ma <i>k</i>o<u>t</u>a"
print(f"Oryginalny tekst:\n\t{text}\n")
# 1)RemoveTag 2)TagToUppercase 3)CapitalizeTag 4)FormatTag
builder_list = [Configuration().remove_tag, Configuration().tag_to_uppercase,
Configuration().capitalize_tag, Configuration().format_tag]
reader = HTLMReader(None, text)
for i, builder in enumerate(builder_list):
reader.builder = builder
print(f"{i+1}){str(reader.builder)}:\n\t{reader.parse_file()}")
| StarcoderdataPython |
1647435 | #!/usr/bin/env python3
import urllib.request as r
import urllib.parse as p
import json
import time
def get_with_dims(dims):
return r.urlopen("http://0.0.0.0:30001/splat?dims=" + dims).read().decode('utf-8')
print("\n\n" + str(get_with_dims("2,2"))) | StarcoderdataPython |
107225 | import os
import sys
cwd = os.getcwd()
sys.path.insert(0, cwd+'/../..')
import pandas
from plot.box.utils_plot import *
from plot.box.paths_cartpoleNoisyA_test import *
def sweep_model():
k10_far_cms = {
"normal": cpn01_k10_far_reward002,
"0.01 terminal": cpn01_k10_far_reward002_risk001,
"0.1 terminal": cpn01_k10_far_reward002_risk01,
"chosen data": cpn01_k10_far_reward002_withT,
"0.01 terminal chosen data": cpn01_k10_far_reward002_withT_risk001,
"0.1 terminal chosen data": cpn01_k10_far_reward002_withT_risk01,
}
te = {"true": cpn01_true}
plot_generation(te, k10_far_cms, ranges, "total-reward", "../img/test_10k_k10_far_model", outer=10, sparse_reward=-1, max_len=1000)
# plot_each_run(te, k10_far_cms, "total-reward", "../img/test_10k_k10_far_model_run", outer=10, sparse_reward=-1, max_len=1000)
def data_density():
datasets = {
"eps 1": "../../data/hyperparam_v4/cartpole-noisy-action_test/noise_0/offline_data/esarsa/step500k_env/fixed_eps1/param_0/",
"training": "../../data/hyperparam_v4/cartpole-noisy-action_test/noise_0/offline_data/esarsa/step500k_env/learning/param_0",
"eps 0": "../../data/hyperparam_v4/cartpole-noisy-action_test/noise_0/offline_data/esarsa/step500k_env/fixed_eps0/param_0",
}
dimension = {
0: "cart position",
1: "cart velocity",
2: "pole angle",
3: "pole angular velocity",
}
group = {"cart": [0, 1], "pole": [2, 3]}
key="new state"
for i in range(1):
run = "traces-{}".format(i)
plot_dataset(datasets, key, dimension, group, run, "../img/data_density", setlim=[0, 10000])
def termination_type():
datasets = {
"eps 1": "../../data/hyperparam_v4/cartpole-noisy-action_test/noise_0.1perc/offline_data/esarsa/step500k_env/fixed_eps1/param_0/",
"training": "../../data/hyperparam_v4/cartpole-noisy-action_test/noise_0.1perc/offline_data/esarsa/step500k_env/learning/param_0",
"eps 0": "../../data/hyperparam_v4/cartpole-noisy-action_test/noise_0.1perc/offline_data/esarsa/step500k_env/fixed_eps0/param_0",
}
types = ["pos", "ang"]
for i in range(1):
run = "traces-{}".format(i)
# plot_termination(datasets, types, run, "../img/data_termination_scatter")
plot_termination_perc(datasets, types, run, "../img/data_termination")
def visit_log():
path = "../temp/visits-1.csv"
visits = np.array(pandas.read_csv(path)["visits"])
# print(visits.argmax(), visits.max(), visits.sum())
# x = np.arange(len(visits))
non_zero = np.where(visits>0)[0]
visits = visits[non_zero]
x = np.arange(len(visits))
print("Non-zero data count", len(non_zero))
fig, ax = plt.subplots()
# ax.bar(x, visits, log=True)
ax.bar(x, visits)
plt.ylabel("visit times")
plt.show()
if __name__ == '__main__':
ranges = [0, 0.05, 0.1, 0.2, 0.5, 0.7, 0.9]
# sweep_model()
# data_density()
# termination_type()
visit_log() | StarcoderdataPython |
3263081 | import logging
LOGGER = logging.getLogger(__name__)
def version():
"""Return version number
"""
version = None
try:
import pkg_resources
except ImportError as e:
LOGGER.error("Can't find version number, please install setuptools")
raise e
try:
version = pkg_resources.get_distribution("htsworkflow")
except pkg_resources.DistributionNotFound as e:
LOGGER.error("Package not installed")
return version
| StarcoderdataPython |
1606176 | <gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import ConfigParser
from flod_common.session.cookie_helper import get_auth_type_from_cookie, get_redirect_target_from_cookie, invalidate_redirect_target_cookie, set_auth_type_cookie, \
set_redirect_target_cookie
import os
from datetime import datetime
import requests
from flask import (render_template, abort, request, redirect, make_response)
from flask.ext.login import login_user, \
logout_user, login_required, current_user
from idporten.saml import AuthRequest, LogoutRequest, Response as IdpResponse, LogoutResponse
import repo
import authentication
from adfs.saml import AuthRequest as ADFSAuthRequest, Response as ADFSResponse
from flod_aktor_frontend import app
import proxy
import adfs_helper
from flod_common.session.utils import make_auth_token
import xmlParser
APP_NAME = u"Aktørbasen"
@app.errorhandler(401)
def not_authorized_handler(e):
return 'Ingen tilgang til siden', 401
def page_links():
over = [
{
"title": u"Forsiden",
"path": "/"
},
{
"title": u"Finn aktør",
"path": "/organisations"
}
]
under = []
if not current_user.is_authenticated() or current_user.is_idporten_user() or current_user.is_aktorregister_admin():
over.append({
"title": u"Registrer aktør",
"path": "/register_org"
})
if current_user.is_authenticated():
if current_user.is_aktorregister_admin():
over.append({
"title": u"Oppdater medlemsdata",
"path": "/organisations/updatememberdata"
})
over.append({
"title": u"Paraplyorganisasjoner",
"path": "/umbrella_organisations"
})
if current_user.is_idporten_user():
under.append(
{
"title": u"Min profil",
"path": "/profile",
"right": True,
"requires_login": True,
}
)
over.append({
"title": u"Om Aktørbasen",
"path": "https://www.trondheim.kommune.no/aktorbasen/",
"external": True
})
links = {
"over": over,
"under": under
}
return links
DEBUG = os.environ.get('DEBUG') == 'True'
DEBUG_PASSWORD = os.environ.get('DEBUG_PASSWORD')
# specifically mock idporten and adfs if set. remove from prod?
MOCK_IDPORTEN = os.environ.get('MOCK_IDPORTEN') == 'True'
MOCK_ADFS = os.environ.get('MOCK_ADFS') == 'True'
def read_config(config_file, config_path="."):
config = ConfigParser.RawConfigParser()
config_path = os.path.expanduser(config_file)
config_path = os.path.abspath(config_path)
with open(config_path) as f:
config.readfp(f)
return config
@app.before_first_request
def configure_idporten():
# skip config if mocking
if MOCK_IDPORTEN:
app.logger.info("Running in test/dev environment with mocked IDPorten. Skip IDPorten configuration.")
return
app.idporten_config = read_config(os.environ['FLOD_AKTOR_SAML_CONFIG'])
# IDporten settings
app.idporten_settings = {
'assertion_consumer_service_url': app.idporten_config.get('saml', 'assertion_consumer_service_url'),
'issuer': app.idporten_config.get('saml', 'issuer'),
'name_identifier_format': app.idporten_config.get('saml', 'name_identifier_format'),
'idp_sso_target_url': app.idporten_config.get('saml', 'idp_sso_target_url'),
'idp_cert_file': app.idporten_config.get('saml', 'idp_cert_file'),
'private_key_file': app.idporten_config.get('saml', 'private_key_file'),
'logout_target_url': app.idporten_config.get('saml', 'logout_target_url'),
}
@app.before_first_request
def configure_adfs():
# Skip config if mocking
if MOCK_ADFS:
app.logger.info('Running in test/dev environment with mocked ADFS. Skip ADFS configuration.')
return
app.adfs_config = read_config(os.environ['FLOD_AKTOR_ADFS_CONFIG'])
app.adfs_settings = {
'assertion_consumer_service_url': app.adfs_config.get('saml', 'assertion_consumer_service_url'),
'issuer': app.adfs_config.get('saml', 'issuer'),
'name_identifier_format': app.adfs_config.get('saml', 'name_identifier_format'),
'idp_sso_target_url': app.adfs_config.get('saml', 'idp_sso_target_url'),
'idp_cert_file': app.adfs_config.get('saml', 'idp_cert_file'),
'sp_private_key': app.adfs_config.get('saml', 'secret_key'),
'logout_target_url': app.adfs_config.get('saml', 'logout_target_url'),
}
# idp_cert_file has priority over idp_cert_fingerprint
cert_file = app.adfs_settings.pop('idp_cert_file', None)
if cert_file:
cert_path = os.path.expanduser(cert_file)
cert_path = os.path.abspath(cert_path)
with open(cert_path) as f:
app.adfs_settings['idp_cert_fingerprint'] = f.read()
def get_attribute_or_404(saml_response, attribute):
values = saml_response.get_assertion_attribute_value(attribute)
if len(values) == 0:
app.logger.error('Could not find attribute in SAML response: %s',
attribute)
abort(404)
return values[0]
@app.route('/login', methods=['GET', 'POST'])
def login():
if current_user.is_authenticated():
response = redirect(request.args.get('next') or '/')
else:
auth_type = get_auth_type_from_cookie(request)
if auth_type == 'active_directory':
response = redirect('/admin/login')
else:
response = make_response(render_template('login.html', app_name=APP_NAME))
# Save the location of the page the user is trying to reach in a cookie.
# This makes it possible to redirect correctly when user comes back
# from id-porten/adfs.
set_redirect_target_cookie(response)
return response
@app.route('/admin/login', methods=['GET', 'POST'])
def admin_login():
# Skip ADFS login in when mocking
if MOCK_ADFS:
return login_adfs_mock()
# Default to ADFS login
url = ADFSAuthRequest.create(**app.adfs_settings)
return redirect(url)
@app.route('/adfs/ls/', methods=['POST'])
def logged_in_from_adfs():
app.logger.info('User logged in via ADFS')
SAMLResponse = request.values['SAMLResponse']
try:
res = ADFSResponse(SAMLResponse, app.adfs_settings["idp_cert_fingerprint"])
res.decrypt(app.adfs_settings["sp_private_key"])
valid = res.is_valid()
if not valid:
app.logger.error('Invalid response from ADFS')
abort(404)
def to_unicode(in_str):
return in_str.encode("utf-8")
name_id = to_unicode(res.name_id)
ident = get_attribute_or_404(res, "http://schemas.microsoft.com/ws/2008/06/identity/claims/windowsaccountname")
name = to_unicode(get_attribute_or_404(res, "http://schemas.xmlsoap.org/claims/CommonName"))
email = to_unicode(
get_attribute_or_404(res, "http://schemas.xmlsoap.org/ws/2005/05/identity/claims/emailaddress"))
app.logger.info('Logging in: name_id=%s name=%s ident=%s email=%s', name_id, name, ident, email)
data = {"misc": {"name": name,
"email": email,
"ident": ident}}
claims = adfs_helper.parse_claims(res._document)
app.logger.info('Claims in SAML response: %s', claims)
roles = adfs_helper.extract_roles(claims)
app.logger.info('Requested roles parsed from claims: %s', roles)
auth_user = authentication.login_adfs_user_by_private_id(ident, data)
user_roles = authentication.update_user_roles(auth_user.user_id, roles)
auth_user.roles = user_roles
app.logger.info('User roles after update: %s', user_roles)
app.logger.info('Logged in: %s name=%s ident=%s email=%s',
datetime.now().isoformat(),
name, ident, email)
login_user(auth_user, remember=True)
# Check if the user wants to redirect to a specific page
redirect_target = get_redirect_target_from_cookie(request)
response = make_response(redirect(redirect_target or request.args.get('next') or '/'))
invalidate_redirect_target_cookie(response)
set_cookie(response, 'auth_token', make_auth_token(auth_user.user_id))
# set authentication type cookie
set_auth_type_cookie(response, "active_directory")
return response
except Exception as e:
app.logger.error('Logging failed: %s', e)
abort(404, 'Ugyldig innlogging.')
def login_adfs_mock():
if request.method != 'POST':
return render_template('admin_login.html', roles=authentication.adfs_roles())
username = request.form['username']
password = request.form['password']
if DEBUG_PASSWORD is None or password != <PASSWORD>:
app.logger.error('Running in debug mode, but DEBUG_PASSWORD is not set')
abort(403)
auth_user = authentication.login_adfs_user_by_private_id(username, {})
roles = request.form.getlist('roles')
user_roles = authentication.update_user_roles(auth_user.user_id, roles)
auth_user.roles = user_roles
login_user(auth_user, remember=True)
# Check if the user wants to redirect to a specific page
redirect_target = get_redirect_target_from_cookie(request)
response = make_response(redirect(redirect_target or request.args.get('next') or '/'))
invalidate_redirect_target_cookie(response)
set_cookie(response, 'auth_token', make_auth_token(auth_user.user_id))
# set authentication type cookie
set_auth_type_cookie(response, "active_directory")
return response
@app.route('/bruker/login', methods=['GET', 'POST'])
def bruker_login():
if MOCK_IDPORTEN:
return login_idporten_mock()
# Encrypt "authentication" URL with our private key. We redirect to that URL.
auth_request = AuthRequest(**app.idporten_settings)
url = auth_request.get_signed_url(app.idporten_settings["private_key_file"])
app.logger.info("url=%s", url)
return redirect(url)
@app.route('/idporten/login_from_idp', methods=['POST', 'GET'])
def logged_in():
# IDPorten redirects to this URL if all ok with login
app.logger.info("User logged in via ID-porten: request.values=%s",
request.values)
SAMLResponse = request.values['SAMLResponse']
res = IdpResponse(
SAMLResponse,
"TODO: remove signature parameter"
)
# Decrypt response from IDPorten with our private key, and make sure that the response is valid
# (it was encrypted with same key)
valid = res.is_valid(app.idporten_settings["idp_cert_file"], app.idporten_settings["private_key_file"])
if valid:
national_id_number = res.get_decrypted_assertion_attribute_value("uid")[0]
idporten_parameters = {
"session_index": res.get_session_index(),
"name_id": res.name_id
}
auth_user = authentication.login_idporten_user_by_private_id(national_id_number,
idporten_parameters)
login_user(auth_user, remember=True)
# Force the user to fill in the profile if unregistered
if not auth_user.is_registered():
app.logger.info("Logged in: %s Uregistrert bruker (%s)",
datetime.now().isoformat(),
national_id_number[:6])
response = make_response(redirect("/profile"))
else:
app.logger.info("Logged in: %s %s %s (%s)",
datetime.now().isoformat(),
auth_user.first_name,
auth_user.last_name,
national_id_number[:6])
# Check if the user wants to redirect to a specific page
redirect_target = get_redirect_target_from_cookie(request)
response = make_response(redirect(redirect_target or request.args.get('next') or '/'))
invalidate_redirect_target_cookie(response)
set_cookie(response, 'auth_token', make_auth_token(auth_user.user_id))
return response
else:
abort(404, 'Ugyldig innlogging.')
def login_idporten_mock():
if request.method != 'POST':
return render_template('bruker_login.html')
ssn = request.form['ssn']
password = request.form['password']
if DEBUG_PASSWORD is None or password != DEBUG_PASSWORD:
app.logger.error('Running in debug mode, incorrect DEBUG_PASSWORD or not set')
abort(403)
auth_user = authentication.login_idporten_user_by_private_id(ssn, {})
login_user(auth_user, remember=True)
# Force the user to fill in the profile if unregistered
if not auth_user.is_registered():
response = make_response(redirect("/profil"))
else:
# Check if the user wants to redirect to a specific page
redirect_target = get_redirect_target_from_cookie(request)
response = make_response(redirect(redirect_target or request.args.get('next') or '/'))
invalidate_redirect_target_cookie(response)
set_cookie(response, 'auth_token', make_auth_token(auth_user.user_id))
return response
@app.route('/logout')
def logout():
# Remove the user information from the session
app.logger.info("Logout requested")
url = '/'
if current_user.authentication_type == 'id_porten':
if MOCK_IDPORTEN:
logout_user()
return redirect('/')
logout_request = LogoutRequest(name_id=current_user.misc["name_id"],
session_index=current_user.misc["session_index"],
**app.idporten_settings)
app.logger.info("logout_request.raw_xml=%s", logout_request.raw_xml)
url = logout_request.get_signed_url(app.idporten_settings["private_key_file"])
app.logger.info("Logging out: url=%s", url)
elif current_user.authentication_type == 'active_directory':
if MOCK_ADFS:
logout_user()
return redirect('/')
# Note: We never really log out from adfs, it is SSO in TK and we only want
# to log out from our system
logout_user()
# Redirect to logout path on adfs idp
url = app.adfs_settings['logout_target_url'] + '?wa=wsignout1.0'
app.logger.info("Logging out: url=%s", url)
return redirect(url)
@app.route('/idporten/logout_from_idp')
def handle_idporten_logout_response():
# If user logs out IN IDporten, then IDporten sends the logout request to us
# , and we need to continue the logout process the normal way
if 'SAMLRequest' in request.values:
return logout()
# Got response from logout request from IDporten, continue logging out
saml_response = request.values['SAMLResponse']
logout_response = LogoutResponse(saml_response)
if not logout_response.is_success():
app.logger.info(("Logout from Idporten failed, proceeding with logout"
"anyway"))
logout_user()
return redirect("/")
def set_cookie(response, key, content):
"""Use HTTPS only cookies in non-debug mode"""
if DEBUG:
response.set_cookie(key, content, httponly=True)
else:
response.set_cookie(key, content, httponly=True, secure=True)
def render_flod_template(template, **kwargs):
stripped_user = None
stripped_person = None
user_mode = None
if not current_user.is_anonymous():
if current_user.is_idporten_user():
person = authentication.get_current_person()
stripped_person = {
"name": person['name'],
"uri": "/persons/%d" % person['person_id'],
"id": person['person_id']
}
user_mode = 'soker'
elif current_user.is_adfs_user() and current_user.is_aktorregister_admin():
user_mode = 'admin'
stripped_user = {
"id": current_user.user_id,
"private_id": current_user.private_id
}
return render_template(
template,
person=stripped_person,
user=stripped_user,
pages=page_links(),
app_name=APP_NAME,
user_mode=user_mode,
**kwargs
)
### Routes ###
@app.route('/')
def home():
"""Render home page."""
return render_flod_template(
'home.html',
can_register=not current_user.is_authenticated() or current_user.is_idporten_user() or current_user.is_aktorregister_admin()
)
@app.route('/profile')
@login_required
def profile():
"""Render profile page."""
user_data = authentication.get_current_person()
try:
organisations = repo.get_organisations_for_person(
current_user.person_id, auth_token_username=current_user.user_id)
umbrella_organisations = repo.get_umbrella_organisations_for_person(
current_user.person_id,
auth_token_username=current_user.user_id,
)
return render_flod_template(
'profile.html',
user_data=user_data,
organisations=organisations,
umbrella_organisations=umbrella_organisations
)
except requests.exceptions.ConnectionError:
app.logger.exception('Request failed')
return "", 500
@app.route('/organisations')
def organisations_list():
allowed_args = ["name", "brreg_activity_code", "flod_activity_type", "area"]
params = {}
for arg in allowed_args:
if arg in request.args and request.args[arg]:
params[arg] = request.args[arg]
try:
flod_activity_types = repo.get_flod_activity_types()
brreg_activity_codes = repo.get_brreg_activity_codes()
districts = repo.get_districts_without_whole_trd()
if params:
if current_user.is_anonymous():
user_id = None
else:
user_id = current_user.user_id
organisations = repo.get_all_organisations(params, user_id)
else:
organisations = []
emails = []
if current_user.is_authenticated() and current_user.is_aktorregister_admin():
emails = [email for email in (o.get('email_address') for o in organisations) if email]
emails += [email for email in (o.get('local_email_address') for o in organisations) if email]
return render_flod_template(
'organisations_list.html',
organisations=organisations,
params=params,
emails=json.dumps(emails),
brreg_activity_codes=brreg_activity_codes,
flod_activity_types=flod_activity_types,
districts=districts
)
except requests.exceptions.ConnectionError:
app.logger.exception('Request failed')
return "", 500
@app.route('/register_org')
@login_required
def register_org():
if not (current_user.is_idporten_user() or current_user.is_aktorregister_admin()):
abort(401)
"""Render home page."""
try:
recruiting_districts = repo.get_districts()
districts = repo.get_districts_without_whole_trd()
brreg_activity_codes = repo.get_brreg_activity_codes()
return render_flod_template(
'register_org.html',
districts=json.dumps(districts),
recruiting_districts=json.dumps(recruiting_districts),
brreg_activity_codes=json.dumps(brreg_activity_codes)
)
except requests.exceptions.ConnectionError:
app.logger.exception('Request failed')
return "", 500
def render_org_template(template, organisation_id, requires_owner=True, **kwargs):
try:
is_member = True
if current_user.is_authenticated() and current_user.is_idporten_user():
organisations = repo.get_organisations_for_person(
current_user.person_id,
auth_token_username=current_user.user_id)
try:
org = next(org for org in organisations if org["id"] == organisation_id)
is_member = True
except StopIteration:
if requires_owner:
abort(403)
is_member = False
return render_flod_template(
template,
organisation_id=organisation_id,
is_member=is_member,
**kwargs
)
except requests.exceptions.ConnectionError:
app.logger.exception('Request failed')
return "", 500
@app.route("/organisations/<int:organisation_id>")
def organisation(organisation_id):
try:
organisation = repo.get_organisation(
organisation_id, getattr(current_user, 'user_id', None))
recruiting_districts = repo.get_districts()
districts = repo.get_districts_without_whole_trd()
try:
organisation["area"] = next(
district["name"] for district in districts if district["id"] == organisation.get("area"))
except StopIteration:
organisation["area"] = None
try:
organisation["recruitment_area"] = next(
district["name"] for district in recruiting_districts if district["id"] == organisation.get("recruitment_area"))
except StopIteration:
organisation["recruitment_area"] = None
brreg_activity_codes = repo.get_brreg_activity_codes()
organisation["brreg_activity_code"] = [code for code in brreg_activity_codes if
code["code"] in organisation.get("brreg_activity_code", [])]
activity_types = [type["flod_activity_types"] for type in organisation.get("brreg_activity_code")]
activity_types = [y for x in activity_types for y in x]
organisation["flod_activity_type"] = [type for type in activity_types if
type["id"] in organisation.get("flod_activity_type", [])]
for key, value in organisation.items():
if value == "" or value is None:
del organisation[key]
return render_org_template(
'org_info.html',
organisation_id,
requires_owner=False,
organisation=organisation
)
except requests.exceptions.ConnectionError:
app.logger.exception('Request failed')
return "", 500
except requests.exceptions.HTTPError as e:
abort(e.response.status_code)
@app.route("/organisations/<int:organisation_id>/edit")
@login_required
def edit_organisation(organisation_id):
try:
recruiting_districts = repo.get_districts()
districts = repo.get_districts_without_whole_trd()
brreg_activity_codes = repo.get_brreg_activity_codes()
organisation = repo.get_organisation(
organisation_id, getattr(current_user, 'user_id', None))
return render_org_template(
'edit_org.html',
organisation_id,
organisation=json.dumps(organisation),
districts=json.dumps(districts),
recruiting_districts=json.dumps(recruiting_districts),
brreg_activity_codes=json.dumps(brreg_activity_codes)
)
except requests.exceptions.ConnectionError:
app.logger.exception('Request failed')
return "", 500
except requests.exceptions.HTTPError as e:
abort(e.response.status_code)
@app.route("/organisations/<int:organisation_id>/members")
@login_required
def add_org_members(organisation_id):
try:
organisation = repo.get_organisation(
organisation_id, getattr(current_user, 'user_id', None))
members = repo.get_members(
organisation_id,
auth_token_username=current_user.user_id
)
return render_org_template(
'org_members.html',
organisation_id,
organisation=organisation,
members=json.dumps(members)
)
except requests.exceptions.ConnectionError:
app.logger.exception('Request failed')
return "", 500
def map_internal_notes_to_users(notes):
for note in notes:
note["user"] = repo.get_user(note["auth_id"], note["auth_id"])
@app.route("/organisations/<int:organisation_id>/internal_notes")
@login_required
def internal_notes(organisation_id):
try:
organisation = repo.get_organisation(
organisation_id, getattr(current_user, 'user_id', None))
notes = repo.get_notes(organisation_id, getattr(current_user, 'user_id', None))
map_internal_notes_to_users(notes)
return render_org_template(
'internal_notes.html',
organisation_id,
organisation=json.dumps(organisation),
internal_notes=json.dumps(notes)
)
except requests.exceptions.ConnectionError:
app.logger.exception('Request failed')
return "", 500
@app.route('/organisations/updatememberdata', methods=['GET', 'POST'])
@login_required
def organisation_update_member_data():
if not (current_user.is_authenticated() and current_user.is_aktorregister_admin()):
abort(401)
try:
# Not ideal, we should have a fixed setting
# Will cause 413 if exceeded
app.config['MAX_CONTENT_LENGTH'] = 20 * 1024 * 1024 # 20 MB
allowed_extensions = ['xml']
messages = []
updated_organisations = []
if request.method == 'POST' and len(request.files) > 0:
file = request.files['document']
filename = file.filename
if '.' in filename and filename.rsplit('.', 1)[1] in allowed_extensions:
organisations = xmlParser.get_organisations_from_nif_idrettsraad_xml(file.stream)
organisations_service = proxy.gui_service_name_to_service_proxy['organisations']
updated_organisations = organisations_service.update_organisations(organisations, auth_token_username=current_user.user_id)
messages.append({'status': 'success', 'message': 'Filen ble parset. Se under for hvilke organisasjoner som ble oppdatert.'})
else:
messages.append({'status': 'error', 'message': u'Ugyldig filtype. Filnavnet må være på formatet "filnavn.xml"'})
return render_flod_template(
'organisations_updatemembers.html',
messages=messages,
updated_organisations=updated_organisations
)
except requests.exceptions.ConnectionError:
app.logger.exception('Request failed')
return "", 500
@app.route('/umbrella_organisations')
@login_required
def umbrella_organisations_list():
if not (current_user.is_authenticated() and current_user.is_aktorregister_admin()):
abort(401)
try:
umbrella_organisations = repo.get_all_umbrella_organisations(
auth_token_username=current_user.user_id
)
return render_flod_template(
'umbrella_organisations_list.html',
umbrella_organisations=umbrella_organisations
)
except requests.exceptions.ConnectionError:
app.logger.exception('Request failed')
return "", 500
@app.route('/umbrella_organisation/<int:id>')
@login_required
def umbrella_organisation_detail(id):
if not (current_user.is_authenticated() and current_user.is_aktorregister_admin()):
abort(401)
try:
umbrella_organisation = repo.get_umbrella_organisation(
id,
auth_token_username=current_user.user_id
)
flod_activity_types = repo.get_flod_activity_types()
brreg_activity_codes = repo.get_brreg_activity_codes()
return render_flod_template(
'umbrella_organisation_detail.html',
umbrella_organisation=json.dumps(umbrella_organisation),
auth=repo.get_user(current_user.user_id, current_user.user_id),
brreg_activity_codes=brreg_activity_codes,
flod_activity_types=flod_activity_types
)
except requests.exceptions.ConnectionError:
app.logger.exception('Request failed')
return "", 500
@app.route('/umbrella_organisation')
@login_required
def umbrella_organisation_new():
if not (current_user.is_authenticated() and current_user.is_aktorregister_admin()):
abort(401)
flod_activity_types = repo.get_flod_activity_types()
brreg_activity_codes = repo.get_brreg_activity_codes()
try:
return render_flod_template(
'umbrella_organisation_detail.html',
umbrella_organisation=json.dumps(None),
auth=repo.get_user(current_user.user_id, current_user.user_id),
brreg_activity_codes=brreg_activity_codes,
flod_activity_types=flod_activity_types
)
except requests.exceptions.ConnectionError:
app.logger.exception('Request failed')
return "", 500
| StarcoderdataPython |
3227049 | <reponame>SahandAslani/ballistica
# Copyright (c) 2011-2020 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# -----------------------------------------------------------------------------
"""Functionality related to the server manager script."""
from __future__ import annotations
from enum import Enum
from dataclasses import dataclass, field
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from typing import Optional, Tuple, List
@dataclass
class ServerConfig:
"""Configuration for the server manager app (<appname>_server)."""
# Name of our server in the public parties list.
party_name: str = 'FFA'
# If True, your party will show up in the global public party list
# Otherwise it will still be joinable via LAN or connecting by IP address.
party_is_public: bool = True
# If True, all connecting clients will be authenticated through the master
# server to screen for fake account info. Generally this should always
# be enabled unless you are hosting on a LAN with no internet connection.
authenticate_clients: bool = True
# IDs of server admins. Server admins are not kickable through the default
# kick vote system and they are able to kick players without a vote. To get
# your account id, enter 'getaccountid' in settings->advanced->enter-code.
admins: List[str] = field(default_factory=list)
# Whether the default kick-voting system is enabled.
enable_default_kick_voting: bool = True
# UDP port to host on. Change this to work around firewalls or run multiple
# servers on one machine.
# 43210 is the default and the only port that will show up in the LAN
# browser tab.
port: int = 43210
# Max devices in the party. Note that this does *NOT* mean max players.
# Any device in the party can have more than one player on it if they have
# multiple controllers. Also, this number currently includes the server so
# generally make it 1 bigger than you need. Max-players is not currently
# exposed but I'll try to add that soon.
max_party_size: int = 6
# Options here are 'ffa' (free-for-all) and 'teams'
# This value is only used if you do not supply a playlist_code (see below).
# In that case the default teams or free-for-all playlist gets used.
session_type: str = 'ffa'
# To host your own custom playlists, use the 'share' functionality in the
# playlist editor in the regular version of the game.
# This will give you a numeric code you can enter here to host that
# playlist.
playlist_code: Optional[int] = None
# Whether to shuffle the playlist or play its games in designated order.
playlist_shuffle: bool = True
# If True, keeps team sizes equal by disallowing joining the largest team
# (teams mode only).
auto_balance_teams: bool = True
# Whether to enable telnet access.
# IMPORTANT: This option is no longer available, as it was being used
# for exploits. Live access to the running server is still possible through
# the mgr.cmd() function in the server script. Run your server through
# tools such as 'screen' or 'tmux' and you can reconnect to it remotely
# over a secure ssh connection.
enable_telnet: bool = False
# Series length in teams mode (7 == 'best-of-7' series; a team must
# get 4 wins)
teams_series_length: int = 7
# Points to win in free-for-all mode (Points are awarded per game based on
# performance)
ffa_series_length: int = 24
# If you provide a custom stats webpage for your server, you can use
# this to provide a convenient in-game link to it in the server-browser
# beside the server name.
# if ${ACCOUNT} is present in the string, it will be replaced by the
# currently-signed-in account's id. To fetch info about an account,
# your backend server can use the following url:
# http://bombsquadgame.com/accountquery?id=ACCOUNT_ID_HERE
stats_url: Optional[str] = None
# NOTE: as much as possible, communication from the server-manager to the
# child-process should go through these and not ad-hoc Python string commands
# since this way is type safe.
class ServerCommand:
"""Base class for commands that can be sent to the server."""
@dataclass
class StartServerModeCommand(ServerCommand):
"""Tells the app to switch into 'server' mode."""
config: ServerConfig
class ShutdownReason(Enum):
"""Reason a server is shutting down."""
NONE = 'none'
RESTARTING = 'restarting'
@dataclass
class ShutdownCommand(ServerCommand):
"""Tells the server to shut down."""
reason: ShutdownReason
immediate: bool
@dataclass
class ChatMessageCommand(ServerCommand):
"""Chat message from the server."""
message: str
clients: Optional[List[int]]
@dataclass
class ScreenMessageCommand(ServerCommand):
"""Screen-message from the server."""
message: str
color: Optional[Tuple[float, float, float]]
clients: Optional[List[int]]
@dataclass
class ClientListCommand(ServerCommand):
"""Print a list of clients."""
@dataclass
class KickCommand(ServerCommand):
"""Kick a client."""
client_id: int
ban_time: Optional[int]
| StarcoderdataPython |
1765284 | """Library for the conversion from NEMO output to XGCM data sets."""
import numpy as np
import xarray as xr
from . import orca_names
def trim_and_squeeze(ds,
model_config="GLOBAL",
y_slice=None, x_slice=None,
**kwargs):
"""Remove redundant grid points and drop singleton dimensions.
Parameters
----------
ds : xr Dataset | DataArray
The object to trim.
model_config : immutable
Selects pre-defined trimming setup. If omitted, or if the model_config
is not known here, no trimming will be done.
Possible configurations:
- `"GLOBAL"` (*default*) : `.isel(y=slice(1, 11), x=slice(1, -1))`
- `"NEST"` : No trimming
y_slice : tuple
How to slice in y-dimension? `y_slice=(1, -1)` will slice from 1 to
-1, which amounts to dropping the first and last index along the
y-dimension. This will override selection along y given by
`model_config`.
x_slice : tuple
See y_slice. This will override selection along x given by
`model_config`.
Returns
-------
trimmed ds
"""
# Be case-insensitive
if isinstance(model_config, str):
model_config = model_config.upper()
how_to_trim = {
"GLOBAL": {"y": (1, -1), "x": (1, -1)},
"NEST": {},
}
yx_slice_dict = how_to_trim.get(
model_config, {})
if y_slice is None:
y_slice = yx_slice_dict.get("y")
if x_slice is None:
x_slice = yx_slice_dict.get("x")
if (y_slice is not None) and ("y" in ds.dims):
ds = ds.isel(y=slice(*y_slice))
if (x_slice is not None) and ("x" in ds.dims):
ds = ds.isel(x=slice(*x_slice))
def _is_singleton(ds, dim):
return (ds[dim].size == 1)
def _is_time_dim(ds, dim):
return (dim in orca_names.t_dims and
np.issubdtype(ds[dim].dtype,
np.datetime64))
def _is_z_dim(ds, dim):
return (dim in orca_names.z_dims)
to_squeeze = [dim for dim in ds.dims
if (_is_singleton(ds, dim) and
not _is_time_dim(ds, dim) and
not _is_z_dim(ds, dim))]
ds = ds.squeeze(dim=to_squeeze)
return ds
def create_minimal_coords_ds(mesh_mask, **kwargs):
"""Create a minimal set of coordinates from a mesh-mask dataset.
This creates `"central"` and `"right"` grid points for the horizontal grid
and `"central"` and `"left"` grid points in the vertical.
"""
try:
N_z = len(mesh_mask.coords["z"])
except KeyError:
N_z = len(mesh_mask.coords["nav_lev"])
N_y = len(mesh_mask.coords["y"])
N_x = len(mesh_mask.coords["x"])
coords = {
"z_c": (["z_c", ], np.arange(1, N_z + 1),
{"axis": "Z"}),
"z_l": (["z_l", ], np.arange(1, N_z + 1) - 0.5,
{"axis": "Z", "c_grid_axis_shift": - 0.5}),
"y_c": (["y_c", ], np.arange(1, N_y + 1),
{"axis": "Y"}),
"y_r": (["y_r", ], np.arange(1, N_y + 1) + 0.5,
{"axis": "Y", "c_grid_axis_shift": 0.5}),
"x_c": (["x_c", ], np.arange(1, N_x + 1),
{"axis": "X"}),
"x_r": (["x_r", ], np.arange(1, N_x + 1) + 0.5,
{"axis": "X", "c_grid_axis_shift": 0.5})
}
return xr.Dataset(coords=coords)
def get_name_dict(dict_name, **kwargs):
"""Return potentially updated name dictionary.
Parameters
----------
dict_name : str
Name of the dict from `xorca.orca_names` to be returned / updated.
`get_name_dict` will look for a `kwarg` called `"update_" + dict_name`
that will be used to override / add keys from `dict_name`.
If `dict_name` is not in `xorca.orca_names`, an empty dict will be
updated with `kwargs["update_" + dict_name]`.
Returns
-------
dict
Updated dict.
Examples
--------
```python
print(get_name_dict("rename_dims"))
# -> {"time_counter": "t", "Z": "z", "Y": "y", "X": "x"}
print(get_name_dict("rename_dims", update_rename_dims={"SIGMA": "sigma"}))
# -> {"time_counter": "t", "Z": "z", "Y": "y", "X": "x", "SIGMA": "sigma"}
print(get_name_dict("not_defined", update_rename_dims={"SIGMA": "sigma"}))
# -> {"SIGMA": "sigma"}
```
"""
orig_dict = orca_names.__dict__.get(dict_name, {}).copy()
update_dict = kwargs.get("update_" + dict_name, {})
orig_dict.update(update_dict)
return orig_dict
def copy_coords(return_ds, ds_in, **kwargs):
"""Copy coordinates and map them to the correct grid.
This copies all coordinates defined in `xorca.orca_names.orca_coords` from
`ds_in` to `return_ds`.
"""
for key, names in get_name_dict("orca_coords", **kwargs).items():
new_name = key
new_dims = names["dims"]
for old_name in names.get("old_names", [new_name, ]):
# This will first try and copy `old_name` from the input ds coords
# and then from the input ds variables. As soon as a ds can be
# copied sucessfully (that is , if they are present and have the
# correct shape), the loop is broken and the next target coordinate
# will be built.
if old_name in ds_in.coords:
try:
return_ds.coords[new_name] = (new_dims,
ds_in.coords[old_name].data)
break
except ValueError as e:
pass
if old_name in ds_in:
try:
return_ds.coords[new_name] = (new_dims,
ds_in[old_name].data)
break
except ValueError as e:
pass
return return_ds
def copy_vars(return_ds, raw_ds, **kwargs):
"""Copy variables and map them to the correct grid.
This copies all variables defined in `xorca.orca_names.orca_variables` from
`raw_ds` to `return_ds`.
"""
for key, names in get_name_dict("orca_variables", **kwargs).items():
new_name = key
new_dims = names["dims"]
old_names = names.get("old_names", [new_name, ])
for old_name in old_names:
if old_name in raw_ds:
try:
return_ds[new_name] = (new_dims, raw_ds[old_name].data)
break
except ValueError as e:
pass
return return_ds
def rename_dims(ds, **kwargs):
"""Rename dimensions.
This renames all dimensions defined in `xorca.orca_names.rename_dims` and
returns the data set with renamed dimensinos.
"""
rename_dict = {
k: v for k, v in get_name_dict("rename_dims", **kwargs).items()
if k in ds.dims
}
return ds.rename(rename_dict)
def force_sign_of_coordinate(ds, **kwargs):
"""Force definite sign of coordinates.
For all coordinates defined in `xorca.orca_names.orca_coordinates`, enforce
a sign if there is an item telling us to do so. This is most useful to
ensure that, e.g., depth is _always_ pointing upwards or downwards.
"""
for k, v in get_name_dict("orca_coords", **kwargs).items():
force_sign = v.get("force_sign", False)
if force_sign and k in ds.coords:
ds[k] = force_sign * abs(ds[k])
return ds
def open_mf_or_dataset(data_files, **kwargs):
"""Open data_files as multi-file or a single-file xarray Dataset."""
try:
mesh_mask = xr.open_mfdataset(data_files, chunks={})
except TypeError as e:
mesh_mask = xr.open_dataset(data_files, chunks={})
return mesh_mask
def get_all_compatible_chunk_sizes(chunks, dobj):
"""Return only thos chunks that are compatible with the given data.
Parameters
----------
chunks : dict
Dictionary with all possible chunk sizes. (Keys are dimension names,
values are integers for the corresponding chunk size.)
dobj : dataset or data array
Dimensions of dobj will be used to filter the `chunks` dict.
Returns
-------
dict
Dictionary with only those items of `chunks` that can be applied to
`dobj`.
"""
return {k: v for k, v in chunks.items() if k in dobj.dims}
def set_time_independent_vars_to_coords(ds):
"""Make sure all time-independent variables are coordinates."""
return ds.set_coords([v for v in ds.data_vars.keys()
if 't' not in ds[v].dims])
def preprocess_orca(mesh_mask, ds, **kwargs):
"""Preprocess orca datasets before concatenating.
This is meant to be used like:
```python
ds = xr.open_mfdataset(
data_files,
preprocess=(lambda ds:
preprocess_orca(mesh_mask, ds)))
```
Parameters
----------
mesh_mask : Dataset | Path | sequence | string
An xarray `Dataset` or anything accepted by `xr.open_mfdataset` or,
`xr.open_dataset`: A single file name, a sequence of Paths or file
names, a glob statement.
ds : xarray dataset
Xarray dataset to be processed before concatenating.
input_ds_chunks : dict
Chunks for the ds to be preprocessed. Pass chunking for any input
dimension that might be in the input data.
Returns
-------
xarray dataset
"""
# make sure input ds is chunked
input_ds_chunks = get_all_compatible_chunk_sizes(
kwargs.get("input_ds_chunks", {}), ds)
ds = ds.chunk(input_ds_chunks)
# construct minimal grid-aware data set from mesh-mask info
if not isinstance(mesh_mask, xr.Dataset):
mesh_mask = open_mf_or_dataset(mesh_mask, **kwargs)
mesh_mask = trim_and_squeeze(mesh_mask, **kwargs)
return_ds = create_minimal_coords_ds(mesh_mask, **kwargs)
# make sure dims are called correctly and trim input ds
ds = rename_dims(ds, **kwargs)
ds = trim_and_squeeze(ds, **kwargs)
# copy coordinates from the mesh-mask and from the data set
return_ds = copy_coords(return_ds, mesh_mask, **kwargs)
return_ds = copy_coords(return_ds, ds, **kwargs)
# copy variables from the data set
return_ds = copy_vars(return_ds, ds, **kwargs)
# make sure depth is positive upward
return_ds = force_sign_of_coordinate(return_ds, **kwargs)
# make everything that does not depend on time a coord
return_ds = set_time_independent_vars_to_coords(return_ds)
return return_ds
def _get_first_time_step_if_any(dobj):
if "t" in dobj.coords:
return dobj.coords["t"].data[0]
def load_xorca_dataset(data_files=None, aux_files=None, decode_cf=True,
**kwargs):
"""Create a grid-aware NEMO dataset.
Parameters
----------
data_files : Path | sequence | string
Anything accepted by `xr.open_mfdataset` or, `xr.open_dataset`: A
single file name, a sequence of Paths or file names, a glob statement.
aux_files : Path | sequence | string
Anything accepted by `xr.open_mfdataset` or, `xr.open_dataset`: A
single file name, a sequence of Paths or file names, a glob statement.
input_ds_chunks : dict
Chunks for the ds to be preprocessed. Pass chunking for any input
dimension that might be in the input data.
target_ds_chunks : dict
Chunks for the final data set. Pass chunking for any of the likely
output dims: `("t", "z_c", "z_l", "y_c", "y_r", "x_c", "x_r")`
decode_cf : bool
Do we want the CF decoding to be done already? Default is True.
Returns
-------
dataset
"""
default_input_ds_chunks = {
"time_counter": 1, "t": 1,
"z": 2, "deptht": 2, "depthu": 2, "depthv": 2, "depthw": 2,
"y": 200, "x": 200
}
# get and remove (pop) the input_ds_chunks from kwargs
# to make sure that chunking is not applied again during preprocess_orca
input_ds_chunks = kwargs.pop("input_ds_chunks",
default_input_ds_chunks)
default_target_ds_chunks = {
"t": 1,
"z_c": 2, "z_l": 2,
"y_c": 200, "y_r": 200,
"x_c": 200, "x_r": 200
}
target_ds_chunks = kwargs.get("target_ds_chunks",
default_target_ds_chunks)
# First, read aux files to learn about all dimensions. Then, open again
# and specify chunking for all applicable dims. It is very important to
# already pass the `chunks` arg to `open_[mf]dataset`, to ensure
# distributed performance.
_aux_files_chunks = map(
lambda af: get_all_compatible_chunk_sizes(
input_ds_chunks, xr.open_dataset(af, decode_cf=False)),
aux_files)
aux_ds = xr.Dataset()
for af, ac in zip(aux_files, _aux_files_chunks):
aux_ds.update(
rename_dims(xr.open_dataset(af, decode_cf=False,
chunks=ac)))
# Again, we first have to open all data sets to filter the input chunks.
_data_files_chunks = map(
lambda df: get_all_compatible_chunk_sizes(
input_ds_chunks, xr.open_dataset(df, decode_cf=decode_cf)),
data_files)
# Automatically combine all data files
ds_xorca = xr.combine_by_coords(
sorted(
map(
lambda ds: preprocess_orca(aux_ds, ds, **kwargs),
map(lambda df, chunks: rename_dims(
xr.open_dataset(df, chunks=chunks, decode_cf=decode_cf),
**kwargs),
data_files, _data_files_chunks)),
key=_get_first_time_step_if_any))
# Add info from aux files
ds_xorca.update(preprocess_orca(aux_ds, aux_ds, **kwargs))
# Chunk the final ds
ds_xorca = ds_xorca.chunk(
get_all_compatible_chunk_sizes(target_ds_chunks, ds_xorca))
return ds_xorca
def load_xorca_dataset_auto(data_files=None, aux_files=None, decode_cf=True,
**kwargs):
"""Create a grid-aware NEMO dataset from netcdf files or zarr stores.
Parameters
----------
data_files : Path | sequence | string
Either Netcdf files or Zarr stores containing the data.
Anything accepted by `xr.open_mfdataset`, or `xr.open_dataset`, or
`xr.open_zarr`: A single path or file name, a sequence of Paths or
file names, a glob statement.
aux_files : Path | sequence | string
Anything accepted by `xr.open_mfdataset`, or `xr.open_dataset`, or
`xr.open_zarr`: A single path or file name, a sequence of Paths or
file names, a glob statement.
input_ds_chunks : dict
Chunks for the ds to be preprocessed. Pass chunking for any input
dimension that might be in the input data.
target_ds_chunks : dict
Chunks for the final data set. Pass chunking for any of the likely
output dims: `("t", "z_c", "z_l", "y_c", "y_r", "x_c", "x_r")`
decode_cf : bool
Do we want the CF decoding to be done already? Default is True.
Returns
-------
dataset
"""
default_input_ds_chunks = {
"time_counter": 1, "t": 1,
"z": 2, "deptht": 2, "depthu": 2, "depthv": 2, "depthw": 2,
"y": 200, "x": 200
}
# get and remove (pop) the input_ds_chunks from kwargs
# to make sure that chunking is not applied again during preprocess_orca
input_ds_chunks = kwargs.pop("input_ds_chunks",
default_input_ds_chunks)
default_target_ds_chunks = {
"t": 1,
"z_c": 2, "z_l": 2,
"y_c": 200, "y_r": 200,
"x_c": 200, "x_r": 200
}
target_ds_chunks = kwargs.get("target_ds_chunks",
default_target_ds_chunks)
# Generalized function to enable reading of boh netcdf files and zarr
# stores
def _open_dataset_or_zarr(*args, **kwargs):
try:
return xr.open_dataset(*args, **kwargs)
except:
return xr.open_zarr(*args, **kwargs)
else:
raise ValueError(
"Could not open dataset or zarr with" +
f"args={args} and kwargs={kwargs}."
)
# First, read aux files to learn about all dimensions. Then, open again
# and specify chunking for all applicable dims. It is very important to
# already pass the `chunks` arg to `open_[mf]dataset`, to ensure
# distributed performance.
_aux_files_chunks = map(
lambda af: get_all_compatible_chunk_sizes(
input_ds_chunks, _open_dataset_or_zarr(af, decode_cf=False)),
aux_files)
aux_ds = xr.Dataset()
for af, ac in zip(aux_files, _aux_files_chunks):
aux_ds.update(
rename_dims(_open_dataset_or_zarr(
af, decode_cf=False, chunks=ac
))
)
# Again, we first have to open all data sets to filter the input chunks.
_data_files_chunks = map(
lambda df: get_all_compatible_chunk_sizes(
input_ds_chunks, _open_dataset_or_zarr(df, decode_cf=decode_cf)),
data_files)
# Automatically combine all data files
ds_xorca = xr.combine_by_coords(
sorted(
map(
lambda ds: preprocess_orca(aux_ds, ds, **kwargs),
map(lambda df, chunks: rename_dims(
_open_dataset_or_zarr(
df, chunks=chunks, decode_cf=decode_cf
),
**kwargs),
data_files, _data_files_chunks)),
key=_get_first_time_step_if_any))
# Add info from aux files
ds_xorca.update(preprocess_orca(aux_ds, aux_ds, **kwargs))
# Chunk the final ds
ds_xorca = ds_xorca.chunk(
get_all_compatible_chunk_sizes(target_ds_chunks, ds_xorca))
return ds_xorca
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.