id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
9665232 | <filename>{{cookiecutter.project_slug}}/app/spacy_extractor.py<gh_stars>100-1000
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from typing import Dict, List
import spacy
from spacy.language import Language
class SpacyExtractor:
"""class SpacyExtractor encapsulates logic to pipe Records with an id and text body
through a spacy model and return entities separated by Entity Type
"""
def __init__(
self, nlp: Language, input_id_col: str = "id", input_text_col: str = "text"
):
"""Initialize the SpacyExtractor pipeline.
nlp (spacy.language.Language): pre-loaded spacy language model
input_text_col (str): property on each document to run the model on
input_id_col (str): property on each document to correlate with request
RETURNS (EntityRecognizer): The newly constructed object.
"""
self.nlp = nlp
self.input_id_col = input_id_col
self.input_text_col = input_text_col
def _name_to_id(self, text: str):
"""Utility function to do a messy normalization of an entity name
text (str): text to create "id" from
"""
return "-".join([s.lower() for s in text.split()])
def extract_entities(self, records: List[Dict[str, str]]):
"""Apply the pre-trained model to a batch of records
records (list): The list of "document" dictionaries each with an
`id` and `text` property
RETURNS (list): List of responses containing the id of
the correlating document and a list of entities.
"""
ids = (doc[self.input_id_col] for doc in records)
texts = (doc[self.input_text_col] for doc in records)
res = []
for doc_id, spacy_doc in zip(ids, self.nlp.pipe(texts)):
entities = {}
for ent in spacy_doc.ents:
ent_id = ent.kb_id
if not ent_id:
ent_id = ent.ent_id
if not ent_id:
ent_id = self._name_to_id(ent.text)
if ent_id not in entities:
if ent.text.lower() == ent.text:
ent_name = ent.text.capitalize()
else:
ent_name = ent.text
entities[ent_id] = {
"name": ent_name,
"label": ent.label_,
"matches": [],
}
entities[ent_id]["matches"].append(
{"start": ent.start_char, "end": ent.end_char, "text": ent.text}
)
res.append({"id": doc_id, "entities": list(entities.values())})
return res
| StarcoderdataPython |
3271975 | <reponame>comcon1/ASPAM_webinterface<filename>server/src/image_loader.py
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os, sys, argparse
from config import *
from servutils import *
import profile,traceback
'''
Standalone script that loads images by parameters.
In future this script should be replaced with more fast procedures.
'''
prs = argparse.ArgumentParser(description="")
# the major type! what image to create!
prs.add_argument('-x', dest='typ', metavar='T', type=str, help='type of the image')
prs.add_argument('-d', dest="exdir", metavar='FILENAME', type=str, help='experiment directory', default='.')
prs.add_argument('-i', dest='datfil', metavar='FILENAME', type=str, help='experiment file',default='data00.xvg')
prs.add_argument('-f', dest="fromdate", metavar='N', type=int, help='from date', default=-1)
prs.add_argument('-t', dest="tilldate", metavar='N', type=int, help='till file', default=-1)
prs.add_argument('-r', dest="selected_rats", type=str, help='selected rats as list "1,2,3,.."', default='')
prs.add_argument('-s', dest="scale", type=str, help='scale of the plot', default='')
prs.add_argument('-y', dest="yunits", type=str, help='Units of the OY axis', default='meters')
prs.add_argument('-u', dest="regen_cache", action="store_true", help='force regenerate')
prs.add_argument('-p', dest="fastpreview", action="store_true", help='mode of fastest look')
#prs.add_argument('-l', dest="log", metavar='FILE', type=str, help='log file',default='logger.log')
args = prs.parse_args()
futurefile = os.path.join(args.exdir, args.datfil)
print futurefile
def MUMU():
if args.typ in ['expreview_raw', 'expreview_cumulative']:
try:
rca = dq.RotCurveAnalyzer(futurefile)
ip0 = qi.RotImageParameters(rca.loader)
except Exception as e:
print '******** ERROR DURING LOADING AND ANALYSING!! *********'
traceback.print_exc()
sys.exit(1)
if args.fastpreview:
# force reset dates
args.fromdate = ip0.et - 3600
args.tilldate = ip0.et
print 'Data was recorded in diapazone: %d-%d' % (ip0.bt, ip0.et)
_fromdate = dq.tu.lower_day(ip0.bt) if args.fromdate == -1 else int(args.fromdate)
_tilldate = dq.tu.lower_day(ip0.et) if args.tilldate == -1 else int(args.tilldate)
ip0.setDiapT(_fromdate, _tilldate+24*3600)
print 'Requesting image for data in range %d-%d' % (_fromdate, _tilldate+24*3600)
print 'Frame range: %d-%d:%d' % (ip0.startt, ip0.stopt, ip0._tstep)
ip0.setFigSize(tuple(map(float,args.scale.split(':'))))
__ratlist = map(int,args.selected_rats.split(','))
ip0.setRatList(__ratlist)
ip0.Yunits = args.yunits
if args.regen_cache:
ip0.setRegen()
try:
ip0.plotType = 'raw' if args.typ == 'expreview_raw' else 'cumulative'
ir0 = qi.RotImageRequest(rca, ip0)
except Exception as e:
print '******** ERROR DURING REQUEST OF PICTURE PREPARATION!! *********'
traceback.print_exc()
sys.exit(1)
# print resulting information
print 'RESULT_BT:' + str(ip0.bt)
print 'RESULT_ET:' + str(ip0.et)
print 'IMAGE_PATH:' + ir0.getImage(absolute=False)
MUMU()
sys.exit(0) | StarcoderdataPython |
346963 | nome = ((str(input('\033[0;34mQual é seu nome? \033[m'))).strip())
print('\033[0;35mOlá {}, prazer em conhece-lo\033[m'.format(nome))
| StarcoderdataPython |
386034 | <reponame>django-doctor/lite-frontend
import bleach
from django import template
from django.template import engines
from markdown import Markdown
register = template.Library()
@register.simple_tag(takes_context=True)
def markdown_to_html(context):
"""
Template tag to render the html and replace
placeholder tags with user requested params
"""
# Clean initial content
text = bleach.clean(context.get("content", ""))
# Convert markdown text to html
md = Markdown()
text = md.convert(text)
if not text:
return ""
# Do string substitution for placeholders
django_engine = engines["django"]
page = django_engine.from_string(text)
text = page.render(context.flatten())
return text
| StarcoderdataPython |
3452929 | <filename>writer.py
import argparse
from bs4 import BeautifulSoup
import calendar
import csv
import json
import markdown
import os
import sys
import time
from tqdm import tqdm
_owner_path_template = os.path.join('{src_dir}', '{owner}')
_repo_path_template = os.path.join('{src_dir}', '{owner}', '{repo}')
_pull_path_template = os.path.join('{src_dir}', '{owner}', '{repo}', 'pull-{pull_number}.json')
_issue_path_template = os.path.join('{src_dir}', '{owner}', '{repo}', 'issue-{issue_number}.json')
_dataset_header = [
'repo_id',
'issue_number',
'issue_title',
'issue_body_md',
'issue_body_plain',
'issue_created_at',
'issue_author_id',
'issue_author_association',
'issue_label_ids',
'pull_number',
'pull_created_at',
'pull_merged_at',
'pull_comments',
'pull_review_comments',
'pull_commits',
'pull_additions',
'pull_deletions',
'pull_changed_files',
]
_author_association_value = {
'COLLABORATOR': 0,
'CONTRIBUTOR': 1,
'FIRST_TIMER': 2,
'FIRST_TIME_CONTRIBUTOR': 3,
'MANNEQUIN': 4,
'MEMBER': 5,
'NONE': 6,
'OWNER': 7,
}
def write_dataset(src_dir, dst_file, limit_rows=0):
"""Reads JSON files downloaded by the Crawler and writes a CSV file from their
data.
The CSV file will have the following columns:
- repo_id: Integer
- issue_number: Integer
- issue_title: Text
- issue_body_md: Text, in Markdown format, can be empty
- issue_body_plain: Text, in plain text, can be empty
- issue_created_at: Integer, in Unix time
- issue_author_id: Integer
- issue_author_association: Integer enum (see values below)
- issue_label_ids: Comma-separated integers, can be empty
- pull_number: Integer
- pull_created_at: Integer, in Unix time
- pull_merged_at: Integer, in Unix time
- pull_comments: Integer
- pull_review_comments: Integer
- pull_commits: Integer
- pull_additions: Integer
- pull_deletions: Integer
- pull_changed_files: Integer
The value of issue_body_plain is converted from issue_body_md. The conversion is
not always perfect. In some cases, issue_body_plain still contains some Markdown
tags.
The value of issue_author_association can be one of the following:
- 0: Collaborator
- 1: Contributor
- 2: First-timer
- 3: First-time contributor
- 4: Mannequin
- 5: Member
- 6: None
- 7: Owner
Rows are sorted by repository owner username, repository name, pull request
number, and then issue number.
The source directory must contain owner/repo/issue-N.json and
owner/repo/pull-N.json files. The destination directory of Crawler should
normally be used as the source directory of Writer. The destination file will be
overwritten if it already exists.
Args:
src_dir (str): Source directory.
dst_file (str): Destination CSV file.
limit_rows (int): Maximum number of rows to write.
"""
repo_full_names = []
repo_num_rows = []
total_num_rows = 0
def print_results():
for r, n in zip(repo_full_names, repo_num_rows):
print('{}: {:,}'.format(r, n))
print('Total: {:,}'.format(total_num_rows))
with open(dst_file, 'w', newline='') as dataset_file:
dataset = csv.writer(dataset_file)
dataset.writerow(_dataset_header)
owner_repo_pairs = _sorted_owner_repo_pairs(src_dir)
num_repos = len(owner_repo_pairs)
for i, (owner, repo) in enumerate(owner_repo_pairs):
repo_full_name = '{}/{}'.format(owner, repo)
repo_full_names.append(repo_full_name)
repo_num_rows.append(0)
print('{} ({:,}/{:,})'.format(repo_full_name, i + 1, num_repos))
for pull_number in tqdm(_sorted_pull_numbers(src_dir, owner, repo)):
pull = _read_json(_pull_path_template.format(src_dir=src_dir, owner=owner, repo=repo, pull_number=pull_number))
pull['linked_issue_numbers'].sort()
for issue_number in pull['linked_issue_numbers']:
issue = _read_json(_issue_path_template.format(src_dir=src_dir, owner=owner, repo=repo, issue_number=issue_number))
dataset.writerow(_dataset_row(issue, pull))
repo_num_rows[i] += 1
total_num_rows += 1
if total_num_rows == limit_rows:
print('Limit of {:,} rows reached'.format(limit_rows))
print_results()
return
print('Finished')
print_results()
def _sorted_owner_repo_pairs(src_dir):
pairs = [] # [(owner1,repo1), (owner2,repo2)]
owners = os.listdir(src_dir)
owners.sort()
for owner in owners:
repos = os.listdir(_owner_path_template.format(src_dir=src_dir, owner=owner))
repos.sort()
for repo in repos:
pairs.append((owner, repo))
return pairs
def _sorted_pull_numbers(src_dir, owner, repo):
filenames = os.listdir(_repo_path_template.format(src_dir=src_dir, owner=owner, repo=repo))
pull_numbers = [int(f[5:-5]) for f in filenames if f.startswith('pull-')]
pull_numbers.sort()
return pull_numbers
def _read_json(path):
with open(path, 'r') as f:
return json.load(f)
def _dataset_row(issue, pull):
if issue.get('body') is None:
issue_body_md = ''
issue_body_plain = ''
else:
issue_body_md = issue['body']
issue_body_plain = _md_to_text(issue_body_md)
issue_label_ids = ','.join(str(l['id']) for l in issue['labels'])
return [
pull['base']['repo']['id'],
issue['number'],
issue['title'],
issue_body_md,
issue_body_plain,
_iso_to_unix(issue['created_at']),
issue['user']['id'],
_author_association_value[issue['author_association']],
issue_label_ids,
pull['number'],
_iso_to_unix(pull['created_at']),
_iso_to_unix(pull['merged_at']),
pull['comments'],
pull['review_comments'],
pull['commits'],
pull['additions'],
pull['deletions'],
pull['changed_files'],
]
def _md_to_text(md):
html = markdown.markdown(md)
soup = BeautifulSoup(html, features='html.parser')
return soup.get_text()
def _iso_to_unix(iso):
utc_time = time.strptime(iso, '%Y-%m-%dT%H:%M:%SZ')
return calendar.timegm(utc_time)
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='Read JSON files downloaded by the Crawler and write a CSV file from their data. '
'The source directory must contain owner/repo/issue-N.json and owner/repo/pull-N.json files. '
'The destination directory of Crawler should normally be used as the source directory of Writer. '
'The destination file will be overwritten if it already exists.')
parser.add_argument('-l', '--limit-rows', type=int, default=0,
help='limit number of rows to write, ignored if non-positive')
parser.add_argument('src_dir', type=str,
help='source directory')
parser.add_argument('dst_file', type=str,
help='destination CSV file')
args = parser.parse_args()
write_dataset(args.src_dir, args.dst_file, limit_rows=args.limit_rows)
if __name__ == '__main__':
main()
| StarcoderdataPython |
9666265 | # Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Image represented by either a URI or byte stream."""
from base64 import b64encode
from google.cloud._helpers import _to_bytes
from google.cloud.vision.entity import EntityAnnotation
from google.cloud.vision.face import Face
from google.cloud.vision.feature import Feature
from google.cloud.vision.feature import FeatureTypes
from google.cloud.vision.color import ImagePropertiesAnnotation
from google.cloud.vision.safe import SafeSearchAnnotation
class Image(object):
"""Image representation containing information to be annotate.
:type content: bytes
:param content: Byte stream of an image.
:type source_uri: str
:param source_uri: Google Cloud Storage URI of image.
:type client: :class:`~google.cloud.vision.client.Client`
:param client: Instance of Vision client.
"""
def __init__(self, client, content=None, source_uri=None):
self.client = client
self._content = None
self._source = None
if source_uri:
self._source = source_uri
else:
self._content = b64encode(_to_bytes(content))
def as_dict(self):
"""Generate dictionary structure for request.
:rtype: dict
:returns: Dictionary with source information for image.
"""
if self.content:
return {
'content': self.content
}
else:
return {
'source': {
'gcs_image_uri': self.source
}
}
@property
def content(self):
"""Base64 encoded image content.
:rtype: str
:returns: Base64 encoded image bytes.
"""
return self._content
@property
def source(self):
"""Google Cloud Storage URI.
:rtype: str
:returns: String of Google Cloud Storage URI.
"""
return self._source
def _detect_annotation(self, feature):
"""Generic method for detecting a single annotation.
:type feature: :class:`~google.cloud.vision.feature.Feature`
:param feature: The ``Feature`` indication the type of annotation to
perform.
:rtype: list
:returns: List of
:class:`~google.cloud.vision.entity.EntityAnnotation`.
"""
reverse_types = {
'LABEL_DETECTION': 'labelAnnotations',
'LANDMARK_DETECTION': 'landmarkAnnotations',
'LOGO_DETECTION': 'logoAnnotations',
'TEXT_DETECTION': 'textAnnotations',
}
detected_objects = []
result = self.client.annotate(self, [feature])
for response in result[reverse_types[feature.feature_type]]:
detected_object = EntityAnnotation.from_api_repr(response)
detected_objects.append(detected_object)
return detected_objects
def detect_faces(self, limit=10):
"""Detect faces in image.
:type limit: int
:param limit: The number of faces to try and detect.
:rtype: list
:returns: List of :class:`~google.cloud.vision.face.Face`.
"""
faces = []
face_detection_feature = Feature(FeatureTypes.FACE_DETECTION, limit)
result = self.client.annotate(self, [face_detection_feature])
for face_response in result['faceAnnotations']:
face = Face.from_api_repr(face_response)
faces.append(face)
return faces
def detect_labels(self, limit=10):
"""Detect labels that describe objects in an image.
:type limit: int
:param limit: The maximum number of labels to try and detect.
:rtype: list
:returns: List of :class:`~google.cloud.vision.entity.EntityAnnotation`
"""
feature = Feature(FeatureTypes.LABEL_DETECTION, limit)
return self._detect_annotation(feature)
def detect_landmarks(self, limit=10):
"""Detect landmarks in an image.
:type limit: int
:param limit: The maximum number of landmarks to find.
:rtype: list
:returns: List of
:class:`~google.cloud.vision.entity.EntityAnnotation`.
"""
feature = Feature(FeatureTypes.LANDMARK_DETECTION, limit)
return self._detect_annotation(feature)
def detect_logos(self, limit=10):
"""Detect logos in an image.
:type limit: int
:param limit: The maximum number of logos to find.
:rtype: list
:returns: List of
:class:`~google.cloud.vision.entity.EntityAnnotation`.
"""
feature = Feature(FeatureTypes.LOGO_DETECTION, limit)
return self._detect_annotation(feature)
def detect_properties(self, limit=10):
"""Detect the color properties of an image.
:type limit: int
:param limit: The maximum number of image properties to find.
:rtype: list
:returns: List of
:class:`~google.cloud.vision.color.ImagePropertiesAnnotation`.
"""
feature = Feature(FeatureTypes.IMAGE_PROPERTIES, limit)
result = self.client.annotate(self, [feature])
response = result['imagePropertiesAnnotation']
return ImagePropertiesAnnotation.from_api_repr(response)
def detect_safe_search(self, limit=10):
"""Retreive safe search properties from an image.
:type limit: int
:param limit: The number of faces to try and detect.
:rtype: list
:returns: List of
:class:`~google.cloud.vision.sage.SafeSearchAnnotation`.
"""
safe_detection_feature = Feature(FeatureTypes.SAFE_SEARCH_DETECTION,
limit)
result = self.client.annotate(self, [safe_detection_feature])
safe_search_response = result['safeSearchAnnotation']
return SafeSearchAnnotation.from_api_repr(safe_search_response)
def detect_text(self, limit=10):
"""Detect text in an image.
:type limit: int
:param limit: The maximum instances of text to find.
:rtype: list
:returns: List of
:class:`~google.cloud.vision.entity.EntityAnnotation`.
"""
feature = Feature(FeatureTypes.TEXT_DETECTION, limit)
return self._detect_annotation(feature)
| StarcoderdataPython |
1798959 | from django import forms
from .models import InlineImage
class InlineImageForm(forms.ModelForm):
class Meta:
model = InlineImage
fields = (
'image_file',
)
def __init__(self, *args, **kwargs):
super(InlineImageForm, self).__init__(*args, **kwargs)
self.fields['image_file'].widget.attrs.update(
{
'class': 'box__file',
'data-multiple-caption': '{count} files selected',
'multiple': '',
}
) | StarcoderdataPython |
3463281 | <reponame>ast0815/likelihood-machine
"""Binning/histogramming classes for scientific computing
YAML interface
==============
All classes defined in `binning` can be stored as and read from YAML files
using the ``binning.yaml`` module::
with open("filename.yml", 'w') as f:
binning.yaml.dump(some_binning, f)
with open("filename.yml", 'r') as f:
some_binning = binning.yaml.full_load(f)
"""
from copy import deepcopy
from tempfile import TemporaryFile
import numpy as np
import yaml
from numpy.lib.recfunctions import rename_fields
class PhaseSpace(yaml.YAMLObject):
"""A PhaseSpace defines the possible combinations of variables that characterize an event.
Parameters
----------
variables : iterable of strings
The set of variables that define the phase space.
Attributes
----------
variables : set of str
The set of variables that define the phase space.
Notes
-----
A PhaseSpace can be seen as the carthesian product of its `variables`::
>>> ps = PhaseSpace(variables=['a', 'b', 'c'])
>>> print ps
('a' X 'c' X 'b')
You can check whether a variable is part of a phase space::
>>> 'a' in ps
True
Phase spaces can be compared to one another.
Check whether two phase spaces are identical::
>>> PhaseSpace(['a','b']) == PhaseSpace(['b', 'a'])
True
>>> PhaseSpace(['a', 'b']) == PhaseSpace(['a', 'c'])
False
>>> PhaseSpace(['a', 'b']) != PhaseSpace(['a', 'c'])
True
Check whether one phase space is a sub-space of the other::
>>> PhaseSpace(['a', 'b','c')] > PhaseSpace(['a', 'b'])
True
>>> PhaseSpace(['a', 'c']) < PhaseSpace(['a', 'b','c'])
True
"""
def __init__(self, variables):
self.variables = set(variables)
def __contains__(self, var):
return var in self.variables
def __len__(self):
return len(self.variables)
def __eq__(self, phasespace):
return self.variables == phasespace.variables
def __ne__(self, phasespace):
return self.variables != phasespace.variables
def __le__(self, phasespace):
return self.variables <= phasespace.variables
def __ge__(self, phasespace):
return self.variables >= phasespace.variables
def __lt__(self, phasespace):
return (self.variables <= phasespace.variables) and not (
self.variables == phasespace.variables
)
def __gt__(self, phasespace):
return (self.variables >= phasespace.variables) and not (
self.variables == phasespace.variables
)
def __mul__(self, phasespace):
return PhaseSpace(variables=(self.variables | phasespace.variables))
def __div__(self, phasespace):
return PhaseSpace(variables=(self.variables - phasespace.variables))
def __truediv__(self, phasespace):
# Python 3 div operator
return self.__div__(phasespace)
def __str__(self):
return "('" + "' X '".join(self.variables) + "')"
def __repr__(self):
return f"{type(self).__name__}(variables={self.variables!r})"
def clone(self):
"""Return a copy of the object."""
return deepcopy(self)
@classmethod
def to_yaml(cls, dumper, obj):
return dumper.represent_sequence("!PhaseSpace", list(obj.variables))
@classmethod
def from_yaml(cls, loader, node):
seq = loader.construct_sequence(node)
return cls(variables=seq)
yaml_loader = yaml.FullLoader
yaml_tag = "!PhaseSpace"
class Bin(yaml.YAMLObject):
"""A Bin is a container for a value that is defined on a subset of an n-dimensional phase space.
Parameters
----------
phasespace : PhaseSpace
The :class:`PhaseSpace` the `Bin` resides in.
value : float, optional
The initialization value of the bin. Default: 0.0
entries : int, optional
The initialization value of the number of entries. Default: 0
sumw2 : float, optional
The initialization value of the sum of squared weights. Default: ``value**2``
value_array : slice of ndarray, optional
A slice of a numpy array, where the value of the bin will be stored.
Default: ``None``
entries_array : slice of ndarray, optional
A slice of a numpy array, where the number entries will be stored.
Default: ``None``
sumw2_array : slice of ndarray, optional
A slice of a numpy array, where the squared weights will be stored.
Default: ``None``
dummy : bool, optional
Do not create a any arrays to store the data.
Default: ``False``
Attributes
----------
value : float
The value of the bin.
entries : int
The number of entries in the bin.
sumw2 : float
The sum of squared weights in the bin.
phasespace : PhaseSpace
The :class:`PhaseSpace` the bin is defined on
"""
def __init__(self, **kwargs):
self.phasespace = kwargs.pop("phasespace", None)
if self.phasespace is None:
raise TypeError("Undefined phase space!")
if not kwargs.pop("dummy", False):
self.value_array = kwargs.pop("value_array", None)
if self.value_array is None:
self.value_array = np.array([kwargs.pop("value", 0.0)], dtype=float)
self.entries_array = kwargs.pop("entries_array", None)
if self.entries_array is None:
self.entries_array = np.array([kwargs.pop("entries", 0)], dtype=int)
self.sumw2_array = kwargs.pop("sumw2_array", None)
if self.sumw2_array is None:
self.sumw2_array = np.array(
[kwargs.pop("sumw2", self.value**2)], dtype=float
)
else:
for key in ["value_array", "entries_array", "sumw2_array"]:
if key in kwargs:
del kwargs[key]
if len(kwargs) > 0:
raise TypeError(f"Unknown kwargs: {kwargs}")
@property
def value(self):
"""(float) The value of the bin.
The sum of weights.
"""
return self.value_array[0]
@value.setter
def value(self, v):
self.value_array[0] = v
@property
def entries(self):
"""(int) The number of entries in the bin."""
return self.entries_array[0]
@entries.setter
def entries(self, v):
self.entries_array[0] = v
@property
def sumw2(self):
"""(float) The sum of squared weights in the bin."""
return self.sumw2_array[0]
@sumw2.setter
def sumw2(self, v):
self.sumw2_array[0] = v
def event_in_bin(self, event):
"""Check whether the variable combination falls within the bin.
Parameters
----------
event : dict like
A dictionary (or similar object) with one value of each variable
in the binning, e.g.::
{'x': 1.4, 'y': -7.47}
Returns
-------
bool
Whether or not the variable combination lies within the bin.
"""
raise NotImplementedError("This method must be defined in an inheriting class.")
def fill(self, weight=1.0):
"""Add the weight(s) to the bin.
Also increases the number of entries and sum of squared weights accordingly.
Parameters
----------
weight : float or iterable of floats, optional
Weight(s) to be added to the value of the bin.
"""
try:
# Does the weight have a length?
n = len(weight)
except TypeError:
# No
w = weight
w2 = w**2
n = 1
else:
# Yes
weight = np.asarray(weight)
w = np.sum(weight)
w2 = np.sum(weight**2)
self.value += w
self.entries += n
self.sumw2 += w2
def is_dummy(self):
"""Return `True` if there is no data array linked to this bin."""
try:
self.value_array
except AttributeError:
return True
else:
return False
def __contains__(self, event):
"""Return True if the event falls within the bin."""
return self.event_in_bin(event)
def __eq__(self, other):
"""Bins are equal if they are of the same type, defined on the same phase space."""
return type(self) == type(other) and self.phasespace == other.phasespace
def __ne__(self, other):
return not self == other
def __add__(self, other):
ret = deepcopy(self)
ret.value = self.value + other.value
ret.entries = self.entries + other.entries
ret.sumw2 = self.sumw2 + other.sumw2
return ret
def __sub__(self, other):
ret = deepcopy(self)
ret.value = self.value - other.value
return ret
def __mul__(self, other):
ret = deepcopy(self)
ret.value = self.value * other.value
return ret
def __div__(self, other):
ret = deepcopy(self)
ret.value = self.value / other.value
return ret
def __truediv__(self, other):
# Python 3 div operator
return self.__div__(other)
def __repr__(self):
return "{}({})".format(
type(self).__name__,
", ".join([f"{k}={v!r}" for k, v in self._get_clone_kwargs().items()]),
)
def _get_clone_kwargs(self, **kwargs):
"""Get the necessary arguments to clone this object."""
args = {
"phasespace": deepcopy(self.phasespace),
}
if self.is_dummy() or kwargs.get("dummy", False):
args["dummy"] = True
else:
args.update(
{
"value_array": deepcopy(self.value_array),
"entries_array": deepcopy(self.entries_array),
"sumw2_array": deepcopy(self.sumw2_array),
}
)
args.update(kwargs)
return args
def clone(self, **kwargs):
"""Create a functioning copy of the Bin.
Can specify additional kwargs for the initialisation of the new Binning.
"""
args = self._get_clone_kwargs(**kwargs)
return type(self)(**args)
@classmethod
def to_yaml(cls, dumper, obj):
dic = obj._get_clone_kwargs(dummy=True)
if not obj.is_dummy():
del dic["dummy"]
return dumper.represent_mapping(cls.yaml_tag, dic)
@classmethod
def from_yaml(cls, loader, node):
dic = loader.construct_mapping(node, deep=True)
return cls(**dic)
yaml_loader = yaml.FullLoader
yaml_tag = "!Bin"
class RectangularBin(Bin):
"""A Bin defined by min and max values in all variables.
Parameters
----------
variables : iterable of str
The variables with defined edges.
edges : iterable of (int, int)
lower and upper edges for all variables::
[[x_lower, x_upper], [y_lower, y_upper], ...]
include_lower : bool, optional
Does the bin include the lower edges?
include_upper : bool, optional
Does the bin include the upper edges?
**kwargs : optional
Additional keyword arguments are passed to :class:`Bin`.
Attributes
----------
value : float
The value of the bin.
entries : int
The number of entries in the bin.
sumw2 : float
The sum of squared weights in the bin.
phasespace : PhaseSpace
The :class:`PhaseSpace` the bin is defined on
variables : tuple of str
The variable names.
edges : tuple of (int, int)
The bin edges for each variable.
include_lower : bool
Does the bin include the lower edges?
include_upper : bool
Does the bin include the upper edges?
"""
def __init__(
self, variables, edges, include_lower=True, include_upper=False, **kwargs
):
self.variables = tuple(variables)
self.edges = tuple(tuple(x) for x in edges)
self.include_lower = bool(include_lower)
self.include_upper = bool(include_upper)
# Create PhaseSpace from edges if necessary
phasespace = kwargs.get("phasespace", None)
if phasespace is None:
kwargs["phasespace"] = PhaseSpace(self.variables)
# Handle default bin initialization
Bin.__init__(self, **kwargs)
# Check that all edges are valid tuples
for i, var in enumerate(self.variables):
if var not in self.phasespace:
raise ValueError(f"Variable not part of PhaseSpace: {var}")
mi, ma = self.edges[i]
if ma < mi:
raise ValueError(
f"Upper edge is smaller than lower edge for variable {var}."
)
def event_in_bin(self, event):
"""Check whether an event is within all bin edges.
Parameters
----------
event : dict like
A dictionary (or similar object) with one value of each variable
in the binning, e.g.::
{'x': 1.4, 'y': -7.47}
Returns
-------
bool
Whether or not the variable combination lies within the bin.
"""
inside = True
for i, var in enumerate(self.variables):
mi, ma = self.edges[i]
val = event[var]
if self.include_lower:
if val < mi:
inside = False
break
else:
if val <= mi:
inside = False
break
if self.include_upper:
if val > ma:
inside = False
break
else:
if val >= ma:
inside = False
break
return inside
def get_center(self):
"""Return the bin center coordinates.
Returns
-------
ndarray
The center coordinates for each variable.
"""
arr = np.asfarray(self.edges)
return arr.sum(axis=1) / 2.0
def __eq__(self, other):
"""RectangularBins are equal if they have the same edges."""
return (
Bin.__eq__(self, other)
and sorted(zip(self.variables, self.edges))
== sorted(zip(other.variables, other.edges))
and self.include_lower == other.include_lower
and self.include_upper == other.include_upper
)
def _get_clone_kwargs(self, **kwargs):
"""Get the necessary arguments to clone this object."""
args = {
"include_upper": self.include_upper,
"include_lower": self.include_lower,
"variables": list(self.variables),
"edges": np.asarray(self.edges).tolist(),
}
args.update(Bin._get_clone_kwargs(self, **kwargs))
return args
yaml_tag = "!RectangularBin"
class CartesianProductBin(Bin):
"""A Bin that is part of a CartesianProductBinning.
An event is part of a bin, if it has the right data indices in the
constituent binnings.
Parameters
----------
binnings : iterable of Binning
data_indices : iterable of int
Specifies the constituent binnings and the respective data indices.
**kwargs : optional
Additional keyword arguments are passed to :class:`Bin`.
Attributes
----------
value : float
The value of the bin.
entries : int
The number of entries in the bin.
sumw2 : float
The sum of squared weights in the bin.
phasespace : PhaseSpace
The :class:`PhaseSpace` the bin is defined on
binnings : tuple of Binning
data_indices : tuple of int
Specifies the constituent binnings and the respective data indices.
"""
def __init__(self, binnings, data_indices, **kwargs):
self.binnings = tuple(binnings)
self.data_indices = tuple(data_indices)
# Create PhaseSpace from binnings if necessary
if "phasespace" not in kwargs:
kwargs["phasespace"] = PhaseSpace([])
for binning in self.binnings:
kwargs["phasespace"] *= binning.phasespace
Bin.__init__(self, **kwargs)
def event_in_bin(self, event):
"""Check whether an event is within the bin.
Parameters
----------
event : dict like
A dictionary (or similar object) with one value of each variable
in the binning, e.g.::
{'x': 1.4, 'y': -7.47}
Returns
-------
bool
Whether or not the variable combination lies within the bin.
"""
# Check that the event is at the right data position in all binnings
for binning, i in zip(self.binnings, self.data_indices):
if binning.get_event_data_index(event) != i:
return False
else:
return True
def __eq__(self, other):
"""CartesianProductBins are equal, if the binnings and indices are equal."""
try:
if len(self.binnings) != len(other.binnings):
return False
# Try both combinations of self and other
for A, B in [(self, other), (other, self)]:
for self_binning, i in zip(A.binnings, A.data_indices):
# For each binning and index in self...
for other_binning, j in zip(B.binnings, B.data_indices):
# ... check that there is a matchin binning and index in other
if self_binning == other_binning and i == j:
break
else:
# Otherwise return `False`
return False
# Found a match for all elements
return Bin.__eq__(self, other)
except AttributeError:
return False
def _get_clone_kwargs(self, **kwargs):
"""Get the necessary arguments to clone this object."""
args = {
"binnings": [binning.clone(dummy=True) for binning in self.binnings],
"data_indices": list(self.data_indices),
}
args.update(Bin._get_clone_kwargs(self, **kwargs))
return args
yaml_tag = "!CartesianProductBin"
class Binning(yaml.YAMLObject):
"""A Binning is a set of disjunct Bins.
Parameters
----------
bins : list of Bin
The list of disjoint bins.
subbinnings : dict of {bin_index: Binning}, optional
Subbinnings to replace certain bins.
value_array : slice of ndarray, optional
A slice of a numpy array, where the values of the bins will be stored.
entries_array : slice of ndarray, optional
A slice of a numpy array, where the number of entries will be stored.
sumw2_array : slice of ndarray, optional
A slice of a numpy array, where the squared weights will be stored.
phasespace : PhaseSpace, optional
The :class:`PhaseSpace` the binning resides in.
dummy : bool, optional
Do not create any arrays to store the data.
Attributes
----------
bins : tuple of Bin
The list of disjoint bins on the PhaseSpace.
nbins : int
The number of bins in the binning.
data_size : int
The number of elements in the data arrays.
Might differ from ``nbins`` due to subbinnings.
subbinnings : dict of {bin_index: Binning}, optional
Subbinnings to replace certain bins.
value_array : slice of ndarray
A slice of a numpy array, where the values of the bins are stored.
entries_array : slice of ndarray
A slice of a numpy array, where the number of entries are stored.
sumw2_array : slice of ndarray
A slice of a numpy array, where the squared weights are stored.
phasespace : PhaseSpace
The :class:`PhaseSpace` the binning resides in.
Notes
-----
Subbinnings are used to get a finer binning within a given bin. The bin to
be replaced by the finer binning is specified using the *native* bin
index, i.e. the number it would have before the sub binnings are assigned.
Subbinnings are inserted into the numpy arrays at the position of the
original bins. This changes the *effective* bin number of all later bins.
The data itself is stored in Numpy arrays (or views of such) that are
managed by the :class:`Binning`. The arrays are linked to the contained
:class:`Bin` objects and subbinnings by setting their respective storage
arrays to sliced views of the data arrays. The original arrays in the bins
and subbinnings will always be replaced.
"""
def __init__(
self,
bins,
subbinnings=None,
value_array=None,
entries_array=None,
sumw2_array=None,
phasespace=None,
dummy=False,
):
if isinstance(bins, _BinProxy):
self.bins = bins
else:
self.bins = tuple(bins)
if subbinnings is None:
self.subbinnings = {}
else:
self.subbinnings = dict(subbinnings)
self.phasespace = phasespace
if self.phasespace is None:
self.phasespace = self._get_phasespace()
self.nbins = len(self.bins)
self.data_size = self.nbins
for binning in self.subbinnings.values():
self.data_size += (
binning.data_size - 1
) # Minus one, since one bin gets replaced
if not dummy:
self.value_array = value_array
if self.value_array is None:
self.value_array = np.zeros(self.data_size, dtype=float)
if self.value_array.shape != (self.data_size,):
raise TypeError("Value array shape is not same as (data_size,)!")
self.entries_array = entries_array
if self.entries_array is None:
self.entries_array = np.zeros(self.data_size, dtype=int)
if self.entries_array.shape != (self.data_size,):
raise TypeError("Entries array shape is not same as (data_size,)!")
self.sumw2_array = sumw2_array
if self.sumw2_array is None:
self.sumw2_array = np.zeros(self.data_size, dtype=float)
if self.sumw2_array.shape != (self.data_size,):
raise TypeError("Sumw2 array shape is not same as (data_size,)!")
self.link_arrays()
else:
self.value_array = None
self.entries_array = None
self.sumw2_array = None
def _get_phasespace(self):
"""Get PhaseSpace from Bins and subbinnings."""
ps = PhaseSpace([])
for bin in self.bins:
ps *= bin.phasespace
for binning in self.subbinnings.values():
ps *= binning.phasespace
return ps
def link_arrays(self):
"""Link the data storage arrays into the bins and sub_binnings."""
self._link_bins()
self._link_subbinnings()
def _link_bins(self):
for i, bin in enumerate(self.bins):
j = self.get_bin_data_index(i)
bin.value_array = self.value_array[j : j + 1]
bin.entries_array = self.entries_array[j : j + 1]
bin.sumw2_array = self.sumw2_array[j : j + 1]
def _link_subbinnings(self):
for i, binning in self.subbinnings.items():
j = self.get_bin_data_index(i)
n = binning.data_size
binning.value_array = self.value_array[j : j + n]
binning.entries_array = self.entries_array[j : j + n]
binning.sumw2_array = self.sumw2_array[j : j + n]
# Also make the subbinnings link the new arrays
binning.link_arrays()
def get_event_data_index(self, event):
"""Get the data array index of the given event.
Returns `None` if the event does not belong to any bin.
Parameters
----------
event : dict like
A dictionary (or similar object) with one value of each variable
in the binning, e.g.::
{'x': 1.4, 'y': -7.47}
Returns
-------
int or None
The bin number
See also
--------
get_event_bin_index
"""
bin_i = self.get_event_bin_index(event)
data_i = self.get_bin_data_index(bin_i)
if bin_i in self.subbinnings:
data_i += self.subbinnings[bin_i].get_event_data_index(event)
return data_i
def get_event_bin_index(self, event):
"""Get the bin number of the given event.
Returns `None` if the event does not belong to any bin.
Parameters
----------
event : dict like
A dictionary (or similar object) with one value of each variable
in the binning, e.g.::
{'x': 1.4, 'y': -7.47}
Returns
-------
int or None
The bin number
Notes
-----
The bin number can be used to access the corresponding :class:`Bin`,
or the subbinning in that bin (if it exists)::
i = binning.get_event_bin_index(event)
binning.bins[i]
binning.subbinnings[i]
This is *not* the same as the corresponding index in the data array if
there are any subbinnings present.
This is a dumb method that just loops over all bins until it finds a
fitting one. It should be replaced with something smarter for more
specifig binning classes.
See also
--------
get_event_data_index
get_event_bin
"""
for i in range(len(self.bins)):
if event in self.bins[i]:
return i
return None
def get_bin_data_index(self, bin_i):
"""Calculate the data array index from the bin number."""
if bin_i is None:
return None
data_i = bin_i
for i, binning in self.subbinnings.items():
if i < bin_i:
data_i = data_i + (
binning.data_size - 1
) # Minus one, because the original bin is replaced
return data_i
def get_data_bin_index(self, data_i):
"""Calculate the bin number from the data array index.
All data indices inside a subbinning will return the bin index of that
subbinning.
"""
if data_i is None:
return None
bin_i = data_i
for i in sorted(self.subbinnings.keys()):
if i > bin_i:
return bin_i
if i + self.subbinnings[i].data_size > bin_i:
return i
bin_i -= self.subbinnings[i].data_size - 1
return bin_i
def get_event_bin(self, event):
"""Get the bin of the event.
Returns `None` if the event does not fit in any bin.
Parameters
----------
event : dict like
A dictionary (or similar object) with one value of each variable
in the binning, e.g.::
{'x': 1.4, 'y': -7.47}
Returns
-------
Bin or None
The :class:`Bin` object the event fits into.
"""
i = self.get_event_bin_index(event)
if i is not None:
return self.bins[i]
else:
return None
def get_adjacent_bin_indices(self):
"""Return a list of adjacent bin indices.
Returns
-------
adjacent_indices : list of ndarray
The adjacent indices of each bin
"""
# The general case is that we just don't know which bin is adjacent to
# which. Return a list of empty lists.
return [np.array([], dtype=int)] * self.nbins
def get_adjacent_data_indices(self):
"""Return a list of adjacent data indices.
Returns
-------
adjacent_indices : list of ndarray
The adjacent indices of each data index
Notes
-----
Data indices inside a subbinning will only ever be adjacent to other
indices inside the same subbinning. There is no information available
about which bins in a subbinning are adjacent to which bins in the
parent binning.
"""
# Start with adjacent bins
i_bin = self.get_adjacent_bin_indices()
# Replace bin indices with data indices
# and remove references to subbinnings
i_data = []
for i, adj in enumerate(i_bin):
if i not in self.subbinnings:
# Regular bin
# Add neighbouring bins translated to data indices
i_data.append([])
for j in adj:
if j not in self.subbinnings:
i_data[-1].append(self.get_bin_data_index(j))
i_data[-1] = np.array(i_data[-1], dtype=int)
else:
# Subbinning
# Add its adjacent data indices offset to correct position
offset = self.get_bin_data_index(i)
for adj in self.subbinnings[i].get_adjacent_data_indices():
i_data.append(adj + offset)
return i_data
def fill(self, event, weight=1, raise_error=False, rename=None):
"""Fill the events into their respective bins.
Parameters
----------
event : [iterable of] dict like or Numpy structured array or Pandas DataFrame
The event(s) to be filled into the binning.
weight : float or iterable of floats, optional
The weight of the event(s).
Can be either a scalar which is then used for all events
or an iterable of weights for the single events.
Default: 1.
raise_error : bool, optional
Raise a ValueError if an event is not in the binning.
Otherwise ignore the event.
Default: False
rename : dict, optional
Dict for translating event variable names to binning variable names.
Default: `{}`, i.e. no translation
"""
try:
if len(event) == 0:
# Empty iterable? Stop right here
return
except TypeError:
# Not an iterable
event = [event]
if rename is None:
rename = {}
if len(rename) > 0:
try:
# Numpy array?
event = rename_fields(event, rename)
except AttributeError:
try:
# Pandas DataFrame?
event = event.rename(index=str, columns=rename)
except AttributeError:
# Dict?
for e in event:
for name in rename:
e[rename[name]] = e[name]
ibins = None
if ibins is None:
try:
# Try to get bin numbers from a pandas DataFrame
ibins = list(
map(
lambda irow: self.get_event_data_index(irow[1]),
event.iterrows(),
)
)
except AttributeError:
# Seems like this is not a DataFrame
pass
if ibins is None:
try:
# Try to get bin numbers from structured numpy array
ibins = list(map(self.get_event_data_index, np.nditer(event)))
except TypeError:
# Seems like this is not a structured numpy array
pass
if ibins is None:
try:
# Try to get bin numbers from any iterable of events
ibins = list(map(self.get_event_data_index, event))
except TypeError:
# We probably only have a single event
ibins = [self.get_event_data_index(event)]
if raise_error and None in ibins:
raise ValueError("Event not part of binning!")
# Compare len of weight list and event list
try:
if len(ibins) != len(weight):
raise ValueError("Different length of event and weight lists!")
except TypeError:
weight = [weight] * len(ibins)
for i, w in zip(ibins, weight):
if i is not None:
self.fill_data_index(i, w)
def fill_data_index(self, i, weight=1.0):
"""Add the weight(s) to the given data position.
Also increases the number of entries and sum of squared weights accordingly.
Parameters
----------
i : int
The index of the data arrays to be filled.
weight : float or iterable of floats, optional
Weight(s) to be added to the value of the bin.
"""
try:
# Does the weight have a length?
n = len(weight)
except TypeError:
# No
w = weight
w2 = w**2
n = 1
else:
# Yes
weight = np.asarray(weight)
w = np.sum(weight)
w2 = np.sum(weight**2)
self.value_array[i] += w
self.entries_array[i] += n
self.sumw2_array[i] += w2
@staticmethod
def _genfromtxt(filename, delimiter=",", names=True, chunksize=10000):
"""Replacement for numpy's genfromtxt, that should need less memory."""
with open(filename) as f:
if names:
namelist = f.readline().split(delimiter)
dtype = [(name.strip(), float) for name in namelist]
else:
namelist = None
dtype = float
arr = np.array([], dtype=dtype)
rows = []
for line in f:
if len(rows) >= chunksize:
arr = np.concatenate((arr, np.array(rows, dtype=dtype)), axis=0)
rows = []
rows.append(tuple(map(float, line.split(delimiter))))
arr = np.concatenate((arr, np.array(rows, dtype=dtype)), axis=0)
return arr
_csv_buffer = {}
@classmethod
def _load_csv_file_buffered(cls, filename, chunksize):
"""Load a CSV file and save the resulting array in a temporary file.
If the same file is loaded a second time, the buffer is loaded instead
of re-parsing the CSV file.
"""
if filename in cls._csv_buffer:
# File has been loaded before
f = cls._csv_buffer[filename]
f.seek(0)
arr = np.load(f)
else:
# New file
f = TemporaryFile()
arr = cls._genfromtxt(
filename, delimiter=",", names=True, chunksize=chunksize
)
np.save(f, arr)
cls._csv_buffer[filename] = f
return arr
@classmethod
def fill_multiple_from_csv_file(
cls,
binnings,
filename,
weightfield=None,
weight=1.0,
rename=None,
cut_function=lambda x: x,
buffer_csv_files=False,
chunksize=10000,
**kwargs,
):
"""Fill multiple Binnings from the same csv file(s).
This method saves time, because the numpy array only has to be
generated once. Other than the list of binnings to be filled, the
(keyword) arguments are identical to the ones used by the instance
method :meth:`fill_from_csv_file`.
"""
if rename is None:
rename = {}
# Handle lists recursively
if isinstance(filename, list):
try:
for item, w in zip(filename, weight):
cls.fill_multiple_from_csv_file(
binnings,
item,
weightfield=weightfield,
weight=w,
rename=rename,
cut_function=cut_function,
buffer_csv_files=buffer_csv_files,
**kwargs,
)
except TypeError:
for item in filename:
cls.fill_multiple_from_csv_file(
binnings,
item,
weightfield=weightfield,
weight=weight,
rename=rename,
cut_function=cut_function,
buffer_csv_files=buffer_csv_files,
**kwargs,
)
return
if buffer_csv_files:
data = cls._load_csv_file_buffered(filename, chunksize=chunksize)
else:
data = cls._genfromtxt(
filename, delimiter=",", names=True, chunksize=chunksize
)
data = rename_fields(data, rename)
data = cut_function(data)
if weightfield is not None:
weight = data[weightfield] * weight
for binning in binnings:
binning.fill(data, weight=weight, **kwargs)
def fill_from_csv_file(self, *args, **kwargs):
"""Fill the binning with events from a CSV file.
Parameters
----------
filename : string or list of strings
The csv file with the data. Can be a list of filenames.
weightfield : string, optional
The column with the event weights.
weight : float or iterable of floats, optional
A single weight that will be applied to all events in the file.
Can be an iterable with one weight for each file if `filename` is a list.
rename : dict, optional
A dict with columns that should be renamed before filling::
{'csv_name': 'binning_name'}
cut_function : function, optional
A function that modifies the loaded data before filling into the binning,
e.g.::
cut_function(data) = data[ data['binning_name'] > some_threshold ]
This is done *after* the optional renaming.
buffer_csv_files : bool, optional
Save the results of loading CSV files in temporary files
that can be recovered if the same CSV file is loaded again. This
speeds up filling multiple Binnings with the same CSV-files considerably!
Default: False
chunksize : int, optional
Load csv file in chunks of <chunksize> rows. This reduces the memory
footprint of the loading operation, but can slow it down.
Default: 10000
Notes
-----
The file must be formated like this::
first_varname,second_varname,...
<first_value>,<second_value>,...
<first_value>,<second_value>,...
<first_value>,<second_value>,...
...
For example::
x,y,z
1.0,2.1,3.2
4.1,2.0,2.9
3,2,1
All values are interpreted as floats. If `weightfield` is given, that
field will be used as weigts for the event. Other keyword arguments
are passed on to the Binning's :meth:`fill` method. If filename is a list,
all elemets are handled recursively.
"""
# Actual filling is handled by static method
Binning.fill_multiple_from_csv_file([self], *args, **kwargs)
def reset(self, value=0.0, entries=0, sumw2=0.0):
"""Reset all bin values to 0.
Parameters
----------
value : float, optional
Set the bin values to this value.
entries : int, optional
Set the number of entries in each bin to this value.
sumw2 : float, optional
Set the sum of squared weights in each bin to this value.
"""
self.value_array.fill(value)
self.entries_array.fill(entries)
self.sumw2_array.fill(sumw2)
def get_values_as_ndarray(self, shape=None, indices=None):
"""Return the bin values as ndarray.
Parameters
----------
shape: tuple of ints
Shape of the resulting array.
Default: ``(len(bins),)``
indices: list of ints
Only return the given bins.
Default: Return all bins.
Returns
-------
ndarray
An ndarray with the values of the bins.
"""
if indices is None:
indices = slice(None, None, None)
ret = np.array(self.value_array[indices])
if shape is not None:
ret = ret.reshape(shape, order="C")
else:
ret = ret.reshape((ret.size,), order="C")
return ret
def set_values_from_ndarray(self, arr):
"""Set the bin values to the values of the ndarray."""
self.value_array.flat[:] = np.asarray(arr).flat
def get_entries_as_ndarray(self, shape=None, indices=None):
"""Return the number of entries in the bins as ndarray.
Parameters
----------
shape: tuple of ints
Shape of the resulting array.
Default: ``(len(bins),)``
indices: list of ints
Only return the given bins.
Default: Return all bins.
Returns
-------
ndarray
An ndarray with the numbers of entries of the bins.
"""
if indices is None:
indices = slice(None, None, None)
ret = np.array(self.entries_array[indices])
if shape is not None:
ret = ret.reshape(shape, order="C")
else:
ret = ret.reshape((ret.size,), order="C")
return ret
def set_entries_from_ndarray(self, arr):
"""Set the number of bin entries to the values of the ndarray."""
self.entries_array.flat[:] = np.asarray(arr).flat
def get_sumw2_as_ndarray(self, shape=None, indices=None):
"""Return the sum of squared weights in the bins as ndarray.
Parameters
----------
shape: tuple of ints
Shape of the resulting array.
Default: ``(len(bins),)``
indices: list of ints
Only return the given bins.
Default: Return all bins.
Returns
-------
ndarray
An ndarray with the sum of squared weights of the bins.
"""
if indices is None:
indices = slice(None, None, None)
ret = np.copy(self.sumw2_array[indices])
if shape is not None:
ret = ret.reshape(shape, order="C")
else:
ret = ret.reshape((ret.size,), order="C")
return ret
def set_sumw2_from_ndarray(self, arr):
"""Set the sums of squared weights to the values of the ndarray."""
self.sumw2_array.flat[:] = np.asarray(arr).flat
def event_in_binning(self, event):
"""Check whether an event fits into any of the bins."""
i = self.get_event_data_index(event)
if i is None:
return False
else:
return True
def is_dummy(self):
"""Return `True` if there is no data array linked to this binning."""
if self.value_array is None:
return True
else:
return False
def __contains__(self, event):
return self.event_in_binning(event)
def __eq__(self, other):
"""Binnings are equal if all bins and the phase space are equal."""
return (
self.bins == other.bins
and self.phasespace == other.phasespace
and self.subbinnings == other.subbinnings
)
def __ne__(self, other):
return not self == other
def marginalize_subbinnings_on_ndarray(self, array, bin_indices=None):
"""Marginalize out the bins corresponding to the subbinnings.
Parameters
----------
array : ndarray
The data to work on.
bin_indices : list of int, optional
The bin indices of the subbinnings to be marginalized.
If no indices are specified, all subbinnings are marginalized.
Returns
-------
new_array : ndarray
"""
if bin_indices is None:
bin_indices = self.subbinnings.keys()
# Create working copy of input array
new_array = np.array(array)
# Determine indices to be removed and set new values
remove_i = []
for i in bin_indices:
if i in self.subbinnings:
binning = self.subbinnings[i]
else:
raise ValueError("No subbinning at bin index %d!" % (i,))
i_data = self.get_bin_data_index(i)
n_data = binning.data_size
remove_i.extend(
range(i_data + 1, i_data + n_data)
) # Skip first one, since we substitute a single bin
# Set marginalized value
new_array[i_data] = np.sum(new_array[i_data : i_data + n_data])
# Remove marginalized elements
remove_i = np.array(sorted(remove_i), dtype=int)
new_array = np.delete(new_array, remove_i, axis=0)
return new_array
def marginalize_subbinnings(self, bin_indices=None):
"""Return a clone of the Binning with subbinnings removed.
Parameters
----------
bin_indices : list of int, optional
The bin indices of the subbinnings to be marginalized.
If no indices are specified, all subbinnings are marginalized.
Returns
-------
new_binning : Binning
"""
if bin_indices is None:
bin_indices = self.subbinnings.keys()
# Clone the subbinnings that will remain in the binning
subbinnings = {}
for i in self.subbinnings:
if i not in bin_indices:
binning = self.subbinnings[i].clone(dummy=True)
subbinnings[i] = binning
kwargs = {"subbinnings": subbinnings}
if self.is_dummy():
pass
else:
kwargs.update(
{
"value_array": self.marginalize_subbinnings_on_ndarray(
self.value_array, bin_indices
),
"entries_array": self.marginalize_subbinnings_on_ndarray(
self.entries_array, bin_indices
),
"sumw2_array": self.marginalize_subbinnings_on_ndarray(
self.sumw2_array, bin_indices
),
}
)
return self.clone(**kwargs)
def insert_subbinning_on_ndarray(self, array, bin_index, insert_array):
"""Insert values of a new subbinning into the array.
Parameters
----------
array : ndarray
The data to work on.
bin_index : int
The bin to be replaced with the subbinning.
insert_array : ndarrau
The array to be inserted.
Returns
-------
new_array : ndarray
The modified array.
"""
i_data = self.get_bin_data_index(bin_index)
new_array = np.insert(
array, i_data + 1, insert_array[1:], axis=0
) # Do not insert the first element
new_array[i_data] = insert_array[
0
] # Instead set overwrite the values of the bin
return new_array
def insert_subbinning(self, bin_index, binning):
"""Insert a new subbinning into the binning.
Parameters
----------
bin_index : int
The bin to be replaced with the subbinning.
binning : Binning
The new subbinning
Returns
-------
new_binning : Binning
A copy of this binning with the new subbinning.
Warnings
--------
This will replace the content of the bin with the content of the new
subbinning!
"""
if bin_index in self.subbinnings:
raise ValueError("Bin %d already has a subbinning!" % (bin_index,))
subbinnings = {}
for i, b in self.subbinnings.items():
subbinnings[i] = b.clone(dummy=True)
subbinnings[bin_index] = binning
kwargs = {
"subbinnings": subbinnings,
"value_array": self.insert_subbinning_on_ndarray(
self.value_array, bin_index, binning.value_array
),
"entries_array": self.insert_subbinning_on_ndarray(
self.entries_array, bin_index, binning.entries_array
),
"sumw2_array": self.insert_subbinning_on_ndarray(
self.sumw2_array, bin_index, binning.sumw2_array
),
}
return self.clone(**kwargs)
def __add__(self, other):
ret = self.clone()
ret.set_values_from_ndarray(
self.get_values_as_ndarray() + other.get_values_as_ndarray()
)
ret.set_entries_from_ndarray(
self.get_entries_as_ndarray() + other.get_entries_as_ndarray()
)
ret.set_sumw2_from_ndarray(
self.get_sumw2_as_ndarray() + other.get_sumw2_as_ndarray()
)
return ret
def _get_clone_kwargs(self, **kwargs):
"""Get the necessary arguments to clone this object."""
args = {
"subbinnings": {
i: binning.clone(dummy=True) for i, binning in self.subbinnings.items()
},
"phasespace": deepcopy(self.phasespace),
}
if "bins" in kwargs:
# Overwrite bins and do not re-create them one by one
args["bins"] = kwargs["bins"]
else:
# Re-create the bins one by one
args["bins"] = [bin.clone(dummy=True) for bin in self.bins]
if self.is_dummy() or kwargs.get("dummy", False):
args["dummy"] = True
else:
args.update(
{
"value_array": deepcopy(self.value_array),
"entries_array": deepcopy(self.entries_array),
"sumw2_array": deepcopy(self.sumw2_array),
}
)
args.update(kwargs)
return args
def clone(self, **kwargs):
"""Create a functioning copy of the Binning.
Can specify additional kwargs for the initialisation of the new Binning.
"""
args = self._get_clone_kwargs(**kwargs)
return type(self)(**args)
def __repr__(self):
return "{}({})".format(
type(self).__name__,
", ".join([f"{k}={v!r}" for k, v in self._get_clone_kwargs().items()]),
)
@classmethod
def to_yaml(cls, dumper, obj):
dic = obj._get_clone_kwargs(dummy=True)
if not obj.is_dummy():
del dic["dummy"]
return dumper.represent_mapping(cls.yaml_tag, dic)
@classmethod
def from_yaml(cls, loader, node):
dic = loader.construct_mapping(node, deep=True)
return cls(**dic)
yaml_loader = yaml.FullLoader
yaml_tag = "!Binning"
class RectangularBinning(Binning):
"""Binning that contains only :class:`RectangularBin`
Parameters
----------
variables : list of str
The variables the binning is defined on.
bin_edges : list of ((float, float), (float, float), ...)
The list of bin edges defining the bins. The tuples contain the lower
and upper edges of all `variables`, e.g.::
[
((x_low, x_high), (y_low, y_high)),
((x_low, x_high), (y_low, y_high)),
...
]
**kwargs : optional
Additional keyword arguments will be passed to :class:`Binning`.
Attributes
----------
variables : tuple of str
The variables corresponding to the bin edges.
include_upper : bool
Include the upper rather than the lower bin edges.
bins : tuple of Bin
The tuple of RectangularBins.
nbins : int
The number of bins in the binning.
data_size : int
The number of elements in the data arrays.
Might differ from ``nbins`` due to subbinnings.
subbinnings : dict of {bin_index: Binning}
Subbinnings to replace certain bins.
value_array : slice of ndarray
A slice of a numpy array, where the values of the bins are stored.
entries_array : slice of ndarray
A slice of a numpy array, where the number of entries are stored.
sumw2_array : slice of ndarray
A slice of a numpy array, where the squared weights are stored.
phasespace : PhaseSpace
The :class:`PhaseSpace` the binning resides in.
"""
def __init__(self, variables, bin_edges, include_upper=False, **kwargs):
self.variables = tuple(variables)
self.include_upper = bool(include_upper)
bins = []
for edges in bin_edges:
bins.append(
RectangularBin(
variables=variables,
edges=edges,
include_upper=self.include_upper,
include_lower=not self.include_upper,
dummy=True,
)
)
Binning.__init__(self, bins=bins, **kwargs)
def _get_clone_kwargs(self, **kwargs):
"""Get the necessary arguments to clone this object."""
variables = list(self.variables)
bin_edges = [] # Turn all tuples into lists
for bn in self.bins:
bin_edges.append([list(x) for x in bn.edges])
args = {
"variables": list(variables),
"bin_edges": bin_edges,
"include_upper": self.include_upper,
}
args.update(Binning._get_clone_kwargs(self, bins=None, **kwargs))
del args["bins"]
return args
yaml_tag = "!RectangularBinning"
class _BinProxy:
"""Base class for all bin proxies."""
def __init__(self, binning):
self.binning = binning
def __len__(self):
return self.binning.nbins
def __iter__(self):
for i in range(len(self)):
yield self[i]
def __eq__(self, other):
return self.binning == other.binning
def __ne__(self, other):
return not self == other
class _CartesianProductBinProxy(_BinProxy):
"""Indexable class that creates bins on the fly."""
def __getitem__(self, index):
"""Dynamically build an CartesianProductBin when requested."""
tup = self.binning.get_bin_index_tuple(index)
index = self.binning.get_bin_data_index(index)
val_slice = self.binning.value_array[index : index + 1]
ent_slice = self.binning.entries_array[index : index + 1]
sumw2_slice = self.binning.sumw2_array[index : index + 1]
binnings = []
data_indices = []
for i, j in enumerate(tup):
binnings.append(self.binning.binnings[i])
data_indices.append(j)
bin = CartesianProductBin(
binnings,
data_indices,
value_array=val_slice,
entries_array=ent_slice,
sumw2_array=sumw2_slice,
)
return bin
class CartesianProductBinning(Binning):
"""A Binning that is the cartesian product of two or more Binnings
Parameters
----------
binnings : list of Binning
The Binning objects to be multiplied.
Attributes
----------
binnings : tuple of Binning
The :class:`Binning` objects that make up the Cartesian product.
bins : proxy for Bins
Proxy that will generate :class:`CartesianProductBin` instances,
when accessed.
nbins : int
The number of bins in the binning.
bins_shape : tuple of int
The sizes of the constituent binnings.
data_size : int
The number of elements in the data arrays.
Might differ from ``nbins`` due to subbinnings.
subbinnings : dict of {bin_index: Binning}
Subbinnings to replace certain bins.
value_array : slice of ndarray
A slice of a numpy array, where the values of the bins are stored.
entries_array : slice of ndarray
A slice of a numpy array, where the number of entries are stored.
sumw2_array : slice of ndarray
A slice of a numpy array, where the squared weights are stored.
phasespace : PhaseSpace
The :class:`PhaseSpace` the binning resides in.
Notes
-----
This creates a Binning with as many bins as the product of the number of
bins in the iput binnings.
"""
def __init__(self, binnings, **kwargs):
self.binnings = tuple(binnings)
self.bins_shape = tuple(binning.data_size for binning in self.binnings)
self._stepsize = [1]
# Calculate the step size (or stride) for each binning index.
# We use a row-major ordering (C-style).
# The index of the later binnings varies faster than the ones before:
#
# (0,0) <-> 0
# (0,1) <-> 1
# (0,2) <-> 2
# (1,0) <-> 3
# ...
#
# _stepsize is 1 longer than binnings and bins_shape!
for n in reversed(self.bins_shape):
self._stepsize.insert(0, self._stepsize[0] * n)
self._stepsize = tuple(self._stepsize)
self.nbins = self._stepsize[0]
phasespace = kwargs.get("phasespace", None)
if phasespace is None:
# Create phasespace from binnings
phasespace = PhaseSpace([])
for binning in self.binnings:
phasespace *= binning.phasespace
kwargs["phasespace"] = phasespace
bins = kwargs.pop("bins", None)
if bins is not None:
raise TypeError(
"Cannot define bins of CartesianProductBinning! Define binnings instead."
)
else:
# Create bin proxy
bins = _CartesianProductBinProxy(self)
kwargs["bins"] = bins
Binning.__init__(self, **kwargs)
def _link_bins(self):
# We do not need to link each bin separately,
# the bin proxy takes care of this
pass
def get_tuple_bin_index(self, tup):
"""Translate a tuple of binning specific bin indices to the linear bin index of the event.
Turns this::
(i_x, i_y, i_z)
into this::
i_bin
The order of the indices in the tuple must conform to the order of
`binnings`. The bins are ordered row-major (C-style), i.e. increasing
the bin number of the last binning by one increases the overall bin
number also by one. The increments of the other variables depend on the
number of bins in each variable.
"""
if None in tup:
return None
i_bin = 0
for i, s in zip(tup, self._stepsize[1:]):
i_bin += s * i
return i_bin
def get_bin_index_tuple(self, i_bin):
"""Translate the linear bin index of the event to a tuple of single binning bin indices.
Turns this::
i_bin
into this::
(i_x, i_y, i_z)
The order of the indices in the tuple conforms to the order of
`binnings`. The bins are ordered row-major (C-style), i.e. increasing
the bin number of the last variable by one increases the overall bin
number also by one. The increments of the other variables depend on the
number of bins in each variable.
"""
if i_bin is None or i_bin < 0 or i_bin >= self.nbins:
return tuple([None] * len(self.binnings))
tup = tuple(
(i_bin % t) // s for t, s in zip(self._stepsize[:-1], self._stepsize[1:])
)
return tup
def get_event_tuple(self, event):
"""Get the variable index tuple for a given event."""
tup = []
for binning in self.binnings:
i = binning.get_event_data_index(event)
tup.append(i)
return tuple(tup)
def get_event_bin_index(self, event):
"""Get the bin index for a given event."""
tup = self.get_event_tuple(event)
return self.get_tuple_bin_index(tup)
def get_adjacent_bin_indices(self):
"""Return a list of adjacent bin indices.
Returns
-------
adjacent_indices : list of ndarray
The adjacent indices of each bin
"""
# Adjacent bins are based on the adjacent data indices of the
# constituting binnings
adj_tuple = tuple(b.get_adjacent_data_indices() for b in self.binnings)
adj = []
# For all bins
for i_bin in range(self.nbins):
i_adj = []
# Get the tuple of binning data indices
tup = self.get_bin_index_tuple(i_bin)
for i_binning in range(len(tup)):
variations = adj_tuple[i_binning][tup[i_binning]]
var_tup = list(tup)
for k in variations:
var_tup[i_binning] = k
i_adj.append(self.get_tuple_bin_index(var_tup))
adj.append(np.array(sorted(i_adj), dtype=int))
return adj
def marginalize(self, binning_i, reduction_function=np.sum):
"""Marginalize out the given binnings and return a new CartesianProductBinning.
Parameters
----------
binning_i : iterable of int
Iterable of index of binning to be marginalized.
reduction_function : function
Use this function to marginalize out the entries over the specified variables.
Must support the `axis` keyword argument.
Default: numpy.sum
"""
try:
len(binning_i)
except TypeError:
binning_i = [binning_i]
# Create new binning
new_binnings = [binning.clone(dummy=True) for binning in self.binnings]
for i in sorted(binning_i, reverse=True):
del new_binnings[i]
new_binning = CartesianProductBinning(new_binnings)
# Copy and project values, from binning without subbinnings
axes = tuple(sorted(binning_i))
temp_binning = self.marginalize_subbinnings()
new_values = reduction_function(
temp_binning.get_values_as_ndarray(shape=temp_binning.bins_shape), axis=axes
)
new_entries = reduction_function(
temp_binning.get_entries_as_ndarray(shape=temp_binning.bins_shape),
axis=axes,
)
new_sumw2 = reduction_function(
temp_binning.get_sumw2_as_ndarray(shape=temp_binning.bins_shape), axis=axes
)
new_binning.set_values_from_ndarray(new_values)
new_binning.set_entries_from_ndarray(new_entries)
new_binning.set_sumw2_from_ndarray(new_sumw2)
return new_binning
def _unpack(self):
"""Return the unpacked last remaining binning."""
if len(self.binnings) != 1:
raise RuntimeError("Unpacking only works if there is exactly one binning.")
if len(self.subbinnings) != 0:
raise RuntimeError(
"Unpacking only works if there is exactly zero subbinnings."
)
kwargs = {
"value_array": self.value_array,
"entries_array": self.entries_array,
"sumw2_array": self.sumw2_array,
"dummy": False,
}
return self.binnings[0].clone(**kwargs)
def project(self, binning_i, **kwargs):
"""Project the binning onto the given binnings and return a new CartesianProductBinning.
The order of the original binnings is preserved. If a single ``int`` is
provided, the returned Binning is of the same type as the respective
binning.
Parameters
----------
binning_i : iterable of int, or int
Iterable of index of binning to be marginalized.
kwargs : optional
Additional keyword arguments are passed on to :meth:`marginalize`.
Returns
-------
CartesianProductBinning or type(self.binnings[binning_i])
"""
try:
i = list(binning_i)
except TypeError:
i = [binning_i]
# Which variables to remove
rm_i = list(range(len(self.binnings)))
list(map(rm_i.remove, i))
ret = self.marginalize(rm_i, **kwargs)
if isinstance(binning_i, int):
return ret._unpack()
else:
return ret
def __eq__(self, other):
"""CartesianProductBinnings are equal if the included Binnings match."""
return (
type(self) == type(other)
and self.binnings == other.binnings
and self.subbinnings == other.subbinnings
)
def _get_clone_kwargs(self, **kwargs):
"""Get the necessary arguments to clone this object."""
args = {
"binnings": list(binning.clone(dummy=True) for binning in self.binnings),
}
args.update(Binning._get_clone_kwargs(self, bins=None, **kwargs))
del args["bins"]
return args
yaml_tag = "!CartesianProductBinning"
class _LinearBinProxy(_BinProxy):
"""Indexable class that creates bins on the fly."""
def __getitem__(self, index):
"""Dynamically build a RectangularBin when requested."""
variable = self.binning.variable
lower = self.binning.bin_edges[index]
upper = self.binning.bin_edges[index + 1]
data_index = self.binning.get_bin_data_index(index)
args = {
"variables": [variable],
"edges": [(lower, upper)],
"include_lower": not self.binning.include_upper,
"include_upper": self.binning.include_upper,
}
if not self.binning.is_dummy():
args.update(
{
"value_array": self.binning.value_array[
data_index : data_index + 1
],
"entries_array": self.binning.entries_array[
data_index : data_index + 1
],
"sumw2_array": self.binning.sumw2_array[
data_index : data_index + 1
],
}
)
rbin = RectangularBin(**args)
return rbin
class LinearBinning(Binning):
"""A simple binning, defined by bin edges on a single variable.
Parameters
----------
variable : str
The name of te defining variable.
bin_edges : list of float
The bin edges defining the bins.
include_upper : bool, optional
Include the upper edge of bins instead of the default lower edge.
**kwargs : optional
Additional keyword arguments are handed to :class:`Binning`.
Attributes
----------
variable : str
The variable on which the bin edges are defined.
bin_edges : ndarray
The bin edges.
include_upper : bool
Are the upper edges included in each bin?
bins : proxy for Bins
Proxy that will generate :class:`RectangularBin` instances,
when accessed.
nbins : int
The number of bins in the binning.
data_size : int
The number of elements in the data arrays.
Might differ from ``nbins`` due to subbinnings.
subbinnings : dict of {bin_index: Binning}, optional
Subbinnings to replace certain bins.
value_array : slice of ndarray
A slice of a numpy array, where the values of the bins are stored.
entries_array : slice of ndarray
A slice of a numpy array, where the number of entries are stored.
sumw2_array : slice of ndarray
A slice of a numpy array, where the squared weights are stored.
phasespace : PhaseSpace
The :class:`PhaseSpace` the binning resides in.
"""
def __init__(self, variable, bin_edges, include_upper=False, **kwargs):
self.variable = variable
self.bin_edges = np.asfarray(bin_edges)
self.include_upper = bool(include_upper)
self.nbins = self.bin_edges.size - 1
phasespace = kwargs.get("phasespace", None)
if phasespace is None:
# Create phasespace from variable
phasespace = PhaseSpace([variable])
kwargs["phasespace"] = phasespace
bins = kwargs.pop("bins", None)
if bins is not None:
raise TypeError(
"Cannot define bins of LinearBinning! Define bin edges instead."
)
else:
# Create bin proxy
bins = _LinearBinProxy(self)
kwargs["bins"] = bins
Binning.__init__(self, **kwargs)
def _link_bins(self):
# We do not need to link each bin separately,
# the bin proxy takes care of this
pass
def get_event_bin_index(self, event):
"""Get the bin index for a given event."""
i = int(
np.digitize(event[self.variable], self.bin_edges, right=self.include_upper)
)
# Deal with Numpy's way of handling over- and underflows
if i > 0 and i < len(self.bin_edges):
i -= 1
else:
i = None
return i
def get_adjacent_bin_indices(self):
"""Return a list of adjacent bin indices.
Returns
-------
adjacent_indices : list of ndarray
The adjacent indices of each bin
"""
# Adjacent bins are the ones before and after
i_bin = np.arange(self.nbins)
i_bin_m = i_bin - 1
i_bin_p = i_bin + 1
adj = list(zip(i_bin_m, i_bin_p))
adj = list(map(np.array, adj))
# Remove out of range elements
adj[0] = np.array([adj[0][1]])
adj[-1] = np.array([adj[-1][0]])
return adj
def slice(self, start, stop, step=1):
"""Return a new LinearBinning containing the given variable slice
Parameters
----------
start : int
end : int
step : int, optional
The start and stop positions as used with Python slice objects.
Returns
-------
sliced_binning : LinearBinning
A :class:`LinearBinning` consisting of the specified slice.
Notes
-----
This will remove any ``subbinnings`` the linear binning might have.
"""
bin_slice = slice(start, stop, step)
# Create new binning
lower = self.bin_edges[:-1][bin_slice]
upper = self.bin_edges[1:][bin_slice]
new_bin_edges = list(lower) + [upper[-1]]
new_binning = LinearBinning(
variable=self.variable,
bin_edges=new_bin_edges,
include_upper=self.include_upper,
)
# Copy and slice values
temp_binning = self.marginalize_subbinnings()
new_values = temp_binning.get_values_as_ndarray()[bin_slice]
new_entries = temp_binning.get_entries_as_ndarray()[bin_slice]
new_sumw2 = temp_binning.get_sumw2_as_ndarray()[bin_slice]
new_binning.set_values_from_ndarray(new_values)
new_binning.set_entries_from_ndarray(new_entries)
new_binning.set_sumw2_from_ndarray(new_sumw2)
return new_binning
def remove_bin_edges(self, bin_edge_indices):
"""Return a new LinearBinning with the given bin edges removed.
The values of the bins adjacent to the removed bin edges will be
summed up in the resulting larger bin. Please note that bin values
are lost if the first or last binedge of a variable are removed.
Parameters
----------
bin_edge_indices : lists of integers
A list specifying the bin edge indices that should be removed.
Notes
-----
This will remove any ``subbinnings`` the linear binning might have.
"""
# Create new binning
new_bin_edges = list(self.bin_edges)
for i in sorted(bin_edge_indices, reverse=True):
del new_bin_edges[i]
new_binning = LinearBinning(
variable=self.variable,
bin_edges=new_bin_edges,
include_upper=self.include_upper,
)
# Copy and slice values
temp_binning = self.marginalize_subbinnings()
new_values = temp_binning.get_values_as_ndarray()
new_entries = temp_binning.get_entries_as_ndarray()
new_sumw2 = temp_binning.get_sumw2_as_ndarray()
for i in sorted(bin_edge_indices, reverse=True):
if i > 0 and i < new_values.size:
new_values[i - 1] += new_values[i]
new_entries[i - 1] += new_entries[i]
new_sumw2[i - 1] += new_sumw2[i]
if i < new_values.size:
new_values = np.delete(new_values, i)
new_entries = np.delete(new_entries, i)
new_sumw2 = np.delete(new_sumw2, i)
else:
new_values = np.delete(new_values, -1)
new_entries = np.delete(new_entries, -1)
new_sumw2 = np.delete(new_sumw2, -1)
new_binning.set_values_from_ndarray(new_values)
new_binning.set_entries_from_ndarray(new_entries)
new_binning.set_sumw2_from_ndarray(new_sumw2)
return new_binning
def _get_clone_kwargs(self, **kwargs):
"""Get the necessary arguments to clone this object."""
args = {
"variable": self.variable,
"bin_edges": self.bin_edges.tolist(),
"include_upper": self.include_upper,
}
args.update(Binning._get_clone_kwargs(self, bins=None, **kwargs))
del args["bins"]
return args
def __eq__(self, other):
"""Linear binnings are equal if the variable and edges match."""
return (
type(self) == type(other)
and self.variable == other.variable
and np.all(self.bin_edges == other.bin_edges)
and self.include_upper == other.include_upper
and self.subbinnings == other.subbinnings
)
yaml_tag = "!LinearBinning"
class _RectilinearBinProxy(_BinProxy):
"""Indexable class that creates bins on the fly."""
def __getitem__(self, index):
"""Dynamically build a RectangularBin when requested."""
tup = self.binning.get_bin_index_tuple(index)
edges = tuple(
(edg[j], edg[j + 1]) for edg, j in zip(self.binning.bin_edges, tup)
)
data_index = self.binning.get_bin_data_index(index)
args = {
"variables": self.binning.variables,
"edges": edges,
"include_lower": not self.binning.include_upper,
"include_upper": self.binning.include_upper,
}
if not self.binning.is_dummy():
args.update(
{
"value_array": self.binning.value_array[
data_index : data_index + 1
],
"entries_array": self.binning.entries_array[
data_index : data_index + 1
],
"sumw2_array": self.binning.sumw2_array[
data_index : data_index + 1
],
}
)
rbin = RectangularBin(**args)
return rbin
class RectilinearBinning(CartesianProductBinning):
"""Special case of :class:`CartesianProductBinning` only consisting of :class:`LinearBinning`
Parameters
----------
variables : iterable of str
bin_edges : iterable of iterable of float
The variable names and bin edges for the LinearBinnings.
include_upper : bool, optional
Make bins include upper edges instead of lower edges.
**kwargs : optional
Additional keyword arguments will be passed to :class:`CartesianProductBinning`.
Attributes
----------
variables : tuple of str
The variables on which the bin edges are defined.
bin_edges : tuple of ndarray
The bin edges defining the :class:`LinearBinning` objects.
include_upper : bool
Are the upper edges included in each bin?
binnings : list of LinearBinning
The :class:`LinearBinning` objects that make up the Cartesian product.
bins : list of Bin
The :class:`RectangularBin` instances.
nbins : int
The number of bins in the binning.
bins_shape : tuple of int
The sizes of the constituent binnings.
data_size : int
The number of elements in the data arrays.
Might differ from ``nbins`` due to subbinnings.
subbinnings : dict of {bin_index: Binning}, optional
Subbinnings to replace certain bins.
value_array : slice of ndarray
A slice of a numpy array, where the values of the bins are stored.
entries_array : slice of ndarray
A slice of a numpy array, where the number of entries are stored.
sumw2_array : slice of ndarray
A slice of a numpy array, where the squared weights are stored.
phasespace : PhaseSpace
The :class:`PhaseSpace` the binning resides in.
"""
def __init__(self, variables, bin_edges, include_upper=False, **kwargs):
self.variables = tuple(variables)
self.bin_edges = tuple(np.array(edg) for edg in bin_edges)
self.include_upper = bool(include_upper)
binnings = []
for var, edges in zip(self.variables, self.bin_edges):
binnings.append(
LinearBinning(var, edges, include_upper=include_upper, dummy=True)
)
kwargs["binnings"] = binnings
bins = kwargs.pop("bins", None)
if bins is not None:
raise TypeError(
"Cannot define bins of RectilinearBinning! Define bin edges instead."
)
else:
# Create bin proxy
bins = _RectilinearBinProxy(self)
CartesianProductBinning.__init__(self, **kwargs)
# Replace cartesian proxy with one returning rectangular bins
self.bins = bins
def get_variable_index(self, variable):
"""Return the index of the binning corresponding to this variable."""
if isinstance(variable, int):
return variable
else:
return self.variables.index(variable)
def marginalize(self, binning_i, reduction_function=np.sum):
"""Marginalize out the given binnings and return a new RectilinearBinning.
Parameters
----------
binning_i : iterable of int/str
Iterable of index/variable of binning to be marginalized.
reduction_function : function
Use this function to marginalize out the entries over the specified variables.
Must support the `axis` keyword argument.
"""
try:
len(binning_i)
except TypeError:
binning_i = [binning_i]
binning_i = [self.get_variable_index(i) for i in binning_i]
# Create new binning
new_variables = list(self.variables)
new_bin_edges = list(deepcopy(self.bin_edges))
for i in sorted(binning_i, reverse=True):
del new_bin_edges[i]
del new_variables[i]
new_binning = RectilinearBinning(
variables=new_variables,
bin_edges=new_bin_edges,
include_upper=self.include_upper,
)
# Copy and project values, from binning without subbinnings
axes = tuple(sorted(binning_i))
temp_binning = self.marginalize_subbinnings()
new_values = reduction_function(
temp_binning.get_values_as_ndarray(shape=temp_binning.bins_shape), axis=axes
)
new_entries = reduction_function(
temp_binning.get_entries_as_ndarray(shape=temp_binning.bins_shape),
axis=axes,
)
new_sumw2 = reduction_function(
temp_binning.get_sumw2_as_ndarray(shape=temp_binning.bins_shape), axis=axes
)
new_binning.set_values_from_ndarray(new_values)
new_binning.set_entries_from_ndarray(new_entries)
new_binning.set_sumw2_from_ndarray(new_sumw2)
return new_binning
def project(self, binning_i, **kwargs):
"""Project the binning onto the given binnings and return a new RectilinearBinning.
The order of the original binnings is preserved. If a single ``int`` is
provided, the returned Binning is of the same type as the respective
binning.
Parameters
----------
binning_i : iterable of int/str, or int/str
Iterable of index of binning to be marginalized.
**kwargs : optional
Additional keyword arguments are passed on to :meth:`marginalize`.
Returns
-------
RectilinearBinning or type(self.binnings[binning_i])
"""
try:
i = list(binning_i)
except TypeError:
i = [binning_i]
i = [self.get_variable_index(var) for var in binning_i]
# Which variables to remove
rm_i = list(range(len(self.binnings)))
list(map(rm_i.remove, i))
ret = self.marginalize(rm_i, **kwargs)
if isinstance(binning_i, int) or isinstance(binning_i, str):
return ret._unpack()
else:
return ret
def slice(self, slices):
"""Return a new RectilinearBinning containing the given variable slice
Parameters
----------
slices : dict of (variable, (start, stop[, step]))
The start and stop positions for the slices of all variables that
should be sliced.
Returns
-------
sliced_binning : RectilinearBinning
A :class:`RectilinearBinning` consisting of the specified slices.
Notes
-----
This will remove any ``subbinnings`` the binning might have.
"""
# Create new binning edges and slice tuple
new_bin_edges = list(deepcopy(self.bin_edges))
all_slices = []
for i, (var, edges) in enumerate(zip(self.variables, self.bin_edges)):
if var in slices:
bin_slice = slice(*slices[var])
lower = edges[:-1][bin_slice]
upper = edges[1:][bin_slice]
new_bin_edges[i] = list(lower) + [upper[-1]]
all_slices.append(bin_slice)
else:
# This variable does not have to be sliced
all_slices.append(slice(None))
all_slices = tuple(all_slices)
# Create new binning
new_binning = RectilinearBinning(
variables=self.variables,
bin_edges=new_bin_edges,
include_upper=self.include_upper,
)
# Copy and slice values
temp_binning = self.marginalize_subbinnings()
new_values = temp_binning.get_values_as_ndarray(shape=temp_binning.bins_shape)[
all_slices
]
new_entries = temp_binning.get_entries_as_ndarray(
shape=temp_binning.bins_shape
)[all_slices]
new_sumw2 = temp_binning.get_sumw2_as_ndarray(shape=temp_binning.bins_shape)[
all_slices
]
new_binning.set_values_from_ndarray(new_values)
new_binning.set_entries_from_ndarray(new_entries)
new_binning.set_sumw2_from_ndarray(new_sumw2)
return new_binning
def remove_bin_edges(self, bin_edge_indices):
"""Return a new RectilinearBinning with the given bin edges removed.
The values of the bins adjacent to the removed bin edges will be
summed up in the resulting larger bin. Please note that bin values
are lost if the first or last binedge of a variable are removed.
Parameters
----------
bin_edge_indices : dict of (variable: list of int)
Lists specifying the bin edge indices that should be removed.
Notes
-----
This will remove any ``subbinnings`` the rectilinear binning might have.
"""
# Create new binning
new_bin_edges = []
for var, edg in zip(self.variables, self.bin_edges):
new_edg = list(edg)
if var in bin_edge_indices:
for i in sorted(bin_edge_indices[var], reverse=True):
del new_edg[i]
new_bin_edges.append(new_edg)
new_binning = RectilinearBinning(
variables=self.variables,
bin_edges=new_bin_edges,
include_upper=self.include_upper,
)
# Copy and slice values
temp_binning = self.marginalize_subbinnings()
new_values = temp_binning.get_values_as_ndarray(shape=temp_binning.bins_shape)
new_entries = temp_binning.get_entries_as_ndarray(shape=temp_binning.bins_shape)
new_sumw2 = temp_binning.get_sumw2_as_ndarray(shape=temp_binning.bins_shape)
for j, var in enumerate(self.variables):
if var in bin_edge_indices:
for i in sorted(bin_edge_indices[var], reverse=True):
if i > 0 and i < new_values.shape[j]:
lower_tuple = (slice(None),) * j + (i - 1,) + (Ellipsis,)
upper_tuple = (slice(None),) * j + (i,) + (Ellipsis,)
new_values[lower_tuple] += new_values[upper_tuple]
new_entries[lower_tuple] += new_entries[upper_tuple]
new_sumw2[lower_tuple] += new_sumw2[upper_tuple]
if i < new_values.shape[j]:
new_values = np.delete(new_values, i, axis=j)
new_entries = np.delete(new_entries, i, axis=j)
new_sumw2 = np.delete(new_sumw2, i, axis=j)
else:
new_values = np.delete(new_values, -1, axis=j)
new_entries = np.delete(new_entries, -1, axis=j)
new_sumw2 = np.delete(new_sumw2, -1, axis=j)
new_binning.set_values_from_ndarray(new_values)
new_binning.set_entries_from_ndarray(new_entries)
new_binning.set_sumw2_from_ndarray(new_sumw2)
return new_binning
def __eq__(self, other):
"""RectilinearBinnings are equal if the bin edges and variables match."""
return (
type(self) == type(other)
and self.variables == other.variables
and all(
tuple(
np.array_equal(self.bin_edges[i], other.bin_edges[i])
for i in range(len(self.variables))
)
)
and self.include_upper == other.include_upper
and self.subbinnings == other.subbinnings
)
def _get_clone_kwargs(self, **kwargs):
"""Get the necessary arguments to clone this object."""
args = {
"variables": list(self.variables),
"bin_edges": [edg.tolist() for edg in self.bin_edges],
"include_upper": self.include_upper,
}
args.update(Binning._get_clone_kwargs(self, bins=None, **kwargs))
del args["bins"]
return args
yaml_tag = "!RectilinearBinning"
| StarcoderdataPython |
1890063 | <gh_stars>0
from tkinter import *
def showPosEvent(event):
print('Widget=%s X=%s Y=%s' % (event.widget, event.x, event.y))
def showAllEvent(event):
print(event)
for attr in dir(event):
if not attr.startswith('__'):
print(attr, '=>', getattr(event, attr))
def onKeyPress(event):
print('Got key press:', event.char)
def onArrowKey(event):
print('Got up arrow key press')
def onReturnKey(event):
print('Got return key press')
def onLeftClick(event):
print('Got left mouse button click:', end=' ')
showPosEvent(event)
def onRightClick(event):
print('Got right mouse button click:', end=' ')
showPosEvent(event)
def onMiddleClick(event):
print('Got middle mouse button click:', end=' ')
showPosEvent(event)
showAllEvent(event)
def onLeftDrag(event):
print('Got left mouse button drag:', end=' ')
showPosEvent(event)
def onDoubleLeftClick(event):
print('Got double left mouse click', end=' ')
showPosEvent(event)
tkroot.quit()
tkroot = Tk()
labelfont = ('courier', 20, 'bold') # family, size, style
widget = Label(tkroot, text='Hello bind world')
widget.config(bg='red', font=labelfont) # red background, large font
widget.config(height=5, width=20) # initial size: lines,chars
widget.pack(expand=YES, fill=BOTH)
widget.bind('<Button-1>', onLeftClick) # mouse button clicks
widget.bind('<Button-3>', onRightClick)
widget.bind('<Button-2>', onMiddleClick) # middle=both on some mice
widget.bind('<Double-1>', onDoubleLeftClick) # click left twice
widget.bind('<B1-Motion>', onLeftDrag) # click left and move
widget.bind('<KeyPress>', onKeyPress) # all keyboard presses
widget.bind('<Up>', onArrowKey) # arrow button pressed
widget.bind('<Return>', onReturnKey) # return/enter key pressed
widget.focus() # or bind keypress to tkroot
tkroot.title('Click Me')
tkroot.mainloop()
| StarcoderdataPython |
1923413 | # Generated by Django 3.2.9 on 2021-11-25 17:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0006_auto_20211125_1714'),
]
operations = [
migrations.CreateModel(
name='Venue',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=150, unique=True)),
('department', models.ManyToManyField(to='accounts.Department')),
],
),
]
| StarcoderdataPython |
6614650 | <reponame>sa-y-an/retro<filename>stress_detector/dev_settings.py
from pathlib import Path
import os
import json
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
params = json.load(open(os.path.join(BASE_DIR, 'stress_detector/config.json'), 'r'))
SECRET_KEY = params['SECRET_KEY']
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = params['DEBUG']
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'home',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'stress_detector.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'stress_detector.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
#SMTP Configuration
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_PORT = 587
EMAIL_USE_TLS = True
EMAIL_HOST_USER = params["EMAIL_HOST_USER"]
EMAIL_HOST_PASSWORD = params["EMAIL_HOST_PASSWORD"]
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| StarcoderdataPython |
129869 | # This example scans for any BLE advertisements and prints one advertisement and one scan response
# from every device found. This scan is more detailed than the simple test because it includes
# specialty advertising types.
from adafruit_ble import BLERadio
from adafruit_ble.advertising import Advertisement
from adafruit_ble.advertising.standard import ProvideServicesAdvertisement
ble = BLERadio()
print("scanning")
found = set()
scan_responses = set()
# By providing Advertisement as well we include everything, not just specific advertisements.
for advertisement in ble.start_scan(ProvideServicesAdvertisement, Advertisement):
addr = advertisement.address
if advertisement.scan_response and addr not in scan_responses:
scan_responses.add(addr)
elif not advertisement.scan_response and addr not in found:
found.add(addr)
else:
continue
print(addr, advertisement)
print("\t" + repr(advertisement))
print()
print("scan done")
| StarcoderdataPython |
1742814 | <reponame>vmiheer/t2sp<gh_stars>10-100
import halide as hl
import numpy as np
import gc
def test_ndarray_to_buffer():
a0 = np.ones((200, 300), dtype=np.int32)
# Buffer always shares data (when possible) by default,
# and maintains the shape of the data source. (note that
# the ndarray is col-major by default!)
b0 = hl.Buffer(a0, "float32_test_buffer")
assert b0.type() == hl.Int(32)
assert b0.name() == "float32_test_buffer"
assert b0.all_equal(1)
assert b0.dim(0).min() == 0
assert b0.dim(0).max() == 199
assert b0.dim(0).extent() == 200
assert b0.dim(0).stride() == 300
assert b0.dim(1).min() == 0
assert b0.dim(1).max() == 299
assert b0.dim(1).extent() == 300
assert b0.dim(1).stride() == 1
a0[12, 34] = 56
assert b0[12, 34] == 56
b0[56, 34] = 12
assert a0[56, 34] == 12
def test_buffer_to_ndarray():
buf = hl.Buffer(hl.Int(16), [4, 4])
assert buf.type() == hl.Int(16)
buf.fill(0)
buf[1, 2] = 42
assert buf[1, 2] == 42
# Should share storage with buf
array_shared = np.array(buf, copy = False)
assert array_shared.shape == (4, 4)
assert array_shared.dtype == np.int16
assert array_shared[1, 2] == 42
# Should *not* share storage with buf
array_copied = np.array(buf, copy = True)
assert array_copied.shape == (4, 4)
assert array_copied.dtype == np.int16
assert array_copied[1, 2] == 42
buf[1, 2] = 3
assert array_shared[1, 2] == 3
assert array_copied[1, 2] == 42
# Ensure that Buffers that have nonzero mins get converted correctly,
# since the Python Buffer Protocol doesn't have the 'min' concept
cropped = buf.copy()
cropped.crop(dimension = 0, min = 1, extent = 2)
# Should share storage with cropped (and buf)
cropped_array_shared = np.array(cropped, copy = False)
assert cropped_array_shared.shape == (2, 4)
assert cropped_array_shared.dtype == np.int16
assert cropped_array_shared[0, 2] == 3
# Should *not* share storage with anything
cropped_array_copied = np.array(cropped, copy = True)
assert cropped_array_copied.shape == (2, 4)
assert cropped_array_copied.dtype == np.int16
assert cropped_array_copied[0, 2] == 3
cropped[1, 2] = 5
assert buf[1, 2] == 3
assert array_shared[1, 2] == 3
assert array_copied[1, 2] == 42
assert cropped[1, 2] == 5
assert cropped_array_shared[0, 2] == 5
assert cropped_array_copied[0, 2] == 3
def _assert_fn(e):
assert e
def test_for_each_element():
buf = hl.Buffer(hl.Float(32), [3, 4])
for x in range(3):
for y in range(4):
buf[x, y] = x + y
# Can't use 'assert' in a lambda, but can call a fn that uses it.
buf.for_each_element(lambda pos, buf=buf: _assert_fn(buf[pos[0], pos[1]] == pos[0] + pos[1]))
def test_fill_all_equal():
buf = hl.Buffer(hl.Int(32), [3, 4])
buf.fill(3)
assert buf.all_equal(3)
buf[1, 2] = 4
assert not buf.all_equal(3)
def test_bufferinfo_sharing():
# Torture-test to ensure that huge Python Buffer Protocol allocations are properly
# shared (rather than copied), and also that the lifetime is held appropriately
a0 = np.ones((20000, 30000), dtype=np.int32)
b0 = hl.Buffer(a0)
del a0
for i in range(200):
b1 = hl.Buffer(b0)
b0 = b1
b1 = None
gc.collect()
b0[56, 34] = 12
assert b0[56, 34] == 12
def test_float16():
array_in = np.zeros((256, 256, 3), dtype=np.float16, order='F')
hl_img = hl.Buffer(array_in)
array_out = np.array(hl_img, copy = False)
def test_int64():
array_in = np.zeros((256, 256, 3), dtype=np.int64, order='F')
hl_img = hl.Buffer(array_in)
array_out = np.array(hl_img, copy = False)
def test_make_interleaved():
w = 7
h = 13
c = 3
b = hl.Buffer.make_interleaved(type = hl.UInt(8), width = w, height = h, channels = c)
assert b.dim(0).min() == 0
assert b.dim(0).extent() == w
assert b.dim(0).stride() == c
assert b.dim(1).min() == 0
assert b.dim(1).extent() == h
assert b.dim(1).stride() == w * c
assert b.dim(2).min() == 0
assert b.dim(2).extent() == c
assert b.dim(2).stride() == 1
a = np.array(b, copy = False)
assert a.shape == (w, h, c)
assert a.strides == (c, w*c, 1)
assert a.dtype == np.uint8
def test_interleaved_ndarray():
w = 7
h = 13
c = 3
a = np.ndarray(dtype=np.uint8, shape=(w, h, c), strides=(c, w*c, 1))
assert a.shape == (w, h, c)
assert a.strides == (c, w*c, 1)
assert a.dtype == np.uint8
b = hl.Buffer(a)
assert b.type() == hl.UInt(8)
assert b.dim(0).min() == 0
assert b.dim(0).extent() == w
assert b.dim(0).stride() == c
assert b.dim(1).min() == 0
assert b.dim(1).extent() == h
assert b.dim(1).stride() == w * c
assert b.dim(2).min() == 0
assert b.dim(2).extent() == c
assert b.dim(2).stride() == 1
def test_reorder():
W = 7
H = 5
C = 3
Z = 2
a = hl.Buffer(type = hl.UInt(8), sizes = [W, H, C], storage_order = [2, 0, 1])
assert a.dim(0).extent() == W
assert a.dim(1).extent() == H
assert a.dim(2).extent() == C
assert a.dim(2).stride() == 1
assert a.dim(0).stride() == C
assert a.dim(1).stride() == W * C
b = hl.Buffer(hl.UInt(8), [W, H, C, Z], [2, 3, 0, 1])
assert b.dim(0).extent() == W
assert b.dim(1).extent() == H
assert b.dim(2).extent() == C
assert b.dim(3).extent() == Z
assert b.dim(2).stride() == 1
assert b.dim(3).stride() == C
assert b.dim(0).stride() == C * Z
assert b.dim(1).stride() == W * C * Z
b2 = hl.Buffer(hl.UInt(8), [C, Z, W, H])
assert b.dim(0).extent() == b2.dim(2).extent()
assert b.dim(1).extent() == b2.dim(3).extent()
assert b.dim(2).extent() == b2.dim(0).extent()
assert b.dim(3).extent() == b2.dim(1).extent()
assert b.dim(0).stride() == b2.dim(2).stride()
assert b.dim(1).stride() == b2.dim(3).stride()
assert b.dim(2).stride() == b2.dim(0).stride()
assert b.dim(3).stride() == b2.dim(1).stride()
b2.transpose([2, 3, 0, 1])
assert b.dim(0).extent() == b2.dim(0).extent()
assert b.dim(1).extent() == b2.dim(1).extent()
assert b.dim(2).extent() == b2.dim(2).extent()
assert b.dim(3).extent() == b2.dim(3).extent()
assert b.dim(0).stride() == b2.dim(0).stride()
assert b.dim(1).stride() == b2.dim(1).stride()
assert b.dim(2).stride() == b2.dim(2).stride()
assert b.dim(3).stride() == b2.dim(3).stride()
def test_overflow():
# size = INT_MAX
w_intmax = 0x7FFFFFFF
# When size == INT_MAX, we should not emit error
size_intmax = np.ndarray(dtype=np.uint8, shape=(w_intmax))
hl.Buffer(size_intmax)
# size = INT_MAX + 1
w_over_intmax = 0x7FFFFFFF + 1
# We should emit the error when the size > INT_MAX
size_over_intmax = np.ndarray(dtype=np.uint8, shape=(w_over_intmax))
try:
hl.Buffer(size_over_intmax)
except ValueError as e:
assert 'Out of range arguments to make_dim_vec.' in str(e)
def test_buffer_to_str():
b = hl.Buffer()
assert str(b) == '<undefined halide.Buffer>'
b = hl.Buffer(hl.Int(32), [128, 256])
assert str(b) == '<halide.Buffer of type int32 shape:[[0,128,1],[0,256,128]]>'
if __name__ == "__main__":
test_make_interleaved()
test_interleaved_ndarray()
test_ndarray_to_buffer()
test_buffer_to_ndarray()
test_for_each_element()
test_fill_all_equal()
test_bufferinfo_sharing()
test_float16()
test_int64()
test_reorder()
test_overflow()
test_buffer_to_str()
| StarcoderdataPython |
5184982 | <gh_stars>1-10
"""
Markov Chain methods in Python.
A toolkit of stochastic methods for biometric analysis. Features
a Metropolis-Hastings MCMC sampler and both linear and unscented
(non-linear) Kalman filters.
Pre-requisite modules: numpy, matplotlib
Required external components: TclTk
"""
__version__ = '2.1alpha'
try:
import numpy
except ImportError:
raise ImportError, 'NumPy does not seem to be installed. Please see the user guide.'
# Core modules
from threadpool import *
try:
import Container_values
del Container_values
except ImportError:
raise ImportError, 'You seem to be importing PyMC from inside its source tree. Please change to another directory and try again.'
from Node import *
from Container import *
from PyMCObjects import *
from InstantiationDecorators import *
from CommonDeterministics import *
from distributions import *
from Model import *
from StepMethods import *
from MCMC import *
from NormalApproximation import *
from tests import test
# Utilities modules
import utils
import CommonDeterministics
from CircularStochastic import *
import distributions
import gp
# Optional modules
try:
from diagnostics import *
except ImportError:
pass
try:
import ScipyDistributions
except ImportError:
pass
try:
import parallel
except ImportError:
pass
try:
import sandbox
except ImportError:
pass
try:
import graph
except ImportError:
pass
try:
import Matplot
except:
pass
| StarcoderdataPython |
11235702 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import json
import os
import time
import sys
if sys.version_info[0] == 2:
reload(sys)
sys.setdefaultencoding('utf-8')
import tqdm
from selenium import webdriver
b = webdriver.Chrome()
url = "https://cn.bing.com/search?q={}&ensearch=1"
def get_pdf_url(url):
try:
b.get(url)
except Exception as e:
return ""
time.sleep(1)
searchboxes = b.find_elements_by_class_name('b_algo')
for box in searchboxes:
link = str(box.find_element_by_tag_name('a').get_attribute('href'))
if link.endswith('.pdf'):
return link
return ""
def fetch_url(args):
papers = []
if os.path.exists(args.json_file):
with open(args.json_file, 'r') as fid:
papers = json.load(fid)
t = tqdm.tqdm()
t.total = len(papers)
new_papers = []
for p in papers:
t.update()
title = p["title"].replace(' ', '+')
title += "+pdf"
try:
p["url"] = get_pdf_url(url.format(title))
except Exception as e:
p["url"] = ""
new_papers.append(p)
with open(args.json_file, 'w') as fid:
json.dump(new_papers, fid)
print("Write to {}".format(args.json_file))
def main(args):
fetch_url(args)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Fetch urls from internet using papers.json')
parser.add_argument('--json_file', default='papers.json', type=str, help='input json file')
args = parser.parse_args()
main(args)
| StarcoderdataPython |
6617268 | # -*- coding: UTF-8 -*-
lan = 980
for i in range(5):
lan += 10
print(lan)
for i in range(5):
lan -= 10
print(lan)
for i in range(5):
lan *= 10
print(lan)
for i in range(5):
lan /= 10
print(lan)
for i in range(5):
lan %= 10
print(lan)
for i in range(5):
lan **= 10
print(lan)
for i in range(5):
lan //= 10
print(lan)
| StarcoderdataPython |
8141849 | <gh_stars>1-10
ea = BeginEA()
functions_with_assert = set()
function_renames = {}
for function_start in Functions(SegStart(ea), SegEnd(ea)):
function_name = GetFunctionName(function_start)
if not function_name.startswith('sub_'):
continue
function_end = FindFuncEnd(function_start)
current_address = function_start
while current_address != BADADDR:
code_refs = list(CodeRefsFrom(current_address, 1))
for ref in code_refs:
if GetFunctionName(ref) == "__assert":
functions_with_assert.add(function_name)
# Step back from current_address, find what was loaded into a2
instruction_address_for_argument = current_address
for x in range(10):
instruction_address_for_argument = PrevHead(instruction_address_for_argument)
mnem = GetMnem(instruction_address_for_argument)
if mnem == "la":
register = GetOpnd(instruction_address_for_argument, 0)
if register == "$a3":
string_location = GetOperandValue(instruction_address_for_argument, 1)
string_value = GetString(string_location, -1, ASCSTR_C)
function_renames[function_name] = string_value
current_address = NextHead(current_address, function_end)
functions_with_renames = [f for f in functions_with_assert if f in function_renames]
unidentified_funtions = [f for f in functions_with_assert if f not in function_renames]
for f in functions_with_renames:
print "Found %s using __assert with function name %s" % (f, function_renames[f])
for f in unidentified_funtions:
print "Found unidentified function %s using assert" % f
| StarcoderdataPython |
6512282 | import osmnx as ox
import numpy as np
import networkx as nx
from networkx.readwrite import json_graph
import matplotlib.pyplot as plt
import re
import json
#creating a graph
#simplify graph
G = ox.graph_from_point((33.775259139909664, -84.39705848693849), distance = 500, network_type='drive')
#fig, ax = ox.plot_graph(G,show=True, close=False,
#edge_color='black')
ox.plot_graph(G)
# gdf_nodes, gdf_edges = ox.graph_to_gdfs(G)
# gdf_u_v = gdf_edges[['u','v', 'osmid']]
# #print(gdf_edges)
# networkx_edges = [tuple(edge) for edge in gdf_u_v.values]
# print("Network edges : {}".format(networkx_edges))
# networkx_graph = nx.MultiDiGraph()
# #networkx_graph = nx.read_graphml("graphml.xml")
# networkx_graph.add_edges_from(networkx_edges, id = networkx_edges[2])
# #print("# of edges : {}".format(len(networkx_graph.edges)))
# pos=nx.spring_layout(networkx_graph)
# nx.draw(networkx_graph, pos = pos, node_size = 5, node_color = "blue")
# plt.show()
# num_cameras = 3
# #generate random camera locations. only works for straight lines
# random_indices = np.arange(len(networkx_edges))
# np.random.shuffle(random_indices)
# random_edges_indices = random_indices[:num_cameras]
# #generate random cam ids
# rand_to_generate = 3 * num_cameras
# random_indices = np.arange(1000, 9999)
# np.random.shuffle(random_indices)
# rand_ids = random_indices[:rand_to_generate]
# j = 0 #for assiging ids to cam and edges
# for i in range(num_cameras):
# random_edge_index = random_edges_indices[i]
# random_edge = gdf_edges[random_edge_index:random_edge_index + 1][:]
# # line = random_edge['geometry'].bounds
# # line_start_x = line['minx']
# # line_start_y = line['miny']
# # line_end_x = line['maxx']
# # line_end_y = line['maxy']
# # slope = (line_end_y - line_start_y) / (line_end_x - line_start_x)
# # random_x = (line_end_x - line_start_x) * np.random.uniform(0, 1) + line_start_x
# # random_point = [random_x, (slope * (random_x - line_start_x)) + line_start_y]
# #print(random_edge)
# u_node = int(random_edge['u'])
# v_node = int(random_edge['v'])
# print("U : {}, V : {}".format(u_node, v_node))
# networkx_graph.remove_edge(u_node, v_node)
# # cam_id = str(np.random.randint(1000, 9999))
# cam_id = rand_ids[j]
# new_edge_1_id = rand_ids[j+1]
# new_edge_2_id = rand_ids[j+2]
# networkx_graph.add_node("cam"+str(cam_id))
# networkx_graph.add_edge(u_node, "cam"+str(cam_id), key="cam_edge"+str(new_edge_1_id))
# networkx_graph.add_edge("cam"+str(cam_id), v_node, key="cam_edge"+str(new_edge_2_id))
# j+=3
# #do coloring
# color_map = []
# cam_labels = {}
# for node in list(networkx_graph.nodes):
# if re.search("cam", str(node)):
# color_map.append("red")
# cam_labels[node] = node
# else:
# color_map.append("blue")
# print("# of edges : {}".format(len(networkx_graph.edges)))
# pos = nx.fruchterman_reingold_layout(networkx_graph)
# nx.draw(networkx_graph, node_size = 10, pos =pos, node_color = color_map, with_labels = False)
# nx.draw_networkx_labels(networkx_graph, pos=pos, labels = cam_labels,font_size=10,font_color='r')
# plt.show()
# print("Network edges : {}".format(networkx_graph.edges))
# #nx.write_graphml_lxml(networkx_graph, "graphml.xml")
# #print json
# graph_json = {}
# graph_json["edges"] = []
# for edge in list(networkx_graph.edges):
# json_edge = {}
# json_edge["u"] = str(edge[0])
# json_edge["v"] = str(edge[1])
# json_edge["id"] = str(edge[2])
# graph_json["edges"].append(json_edge)
# with open('graph.json', 'w') as graph_json_file:
# json.dump(graph_json, graph_json_file, indent=4)
# G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
# G.add_path([0,1,2])
# print(G.edges())
# G.remove_node(1)
# G.edges()
# G = nx.MultiDiGraph()
# G.add_edges_from([(1, 2), (1, 3), (1, 4)]) # key_list returned
# print(G.edges)
# G.remove_edge(1, 2)
# # print(G.edges)
# print(gdf_nodes)
# networkx_graph.add_nodes_from(networkx_nodes)
# print(gdf_edges.head()['geometry'][1])
# fig, ax = ox.plot_graph(G,show=False, close=False,
# edge_color='black')
# print("Random point : {}".format(random_point))
# ax.scatter(random_point[0], random_point[1], c='red', s=50)
# print("line_start_x : {} \n line_start_y : {} \n line_end_x : {} \n line_end_y : {}".format(line_start_x, line_start_y, line_end_x, line_end_y)) | StarcoderdataPython |
9751340 | <reponame>jsalvatier/Theano-1
import unittest
import theano
import theano.tensor as T
from theano import function, shared
from theano.tests import unittest_tools as utt
from theano.tensor.nnet.ConvTransp3D import convTransp3D
from theano.tensor.nnet.ConvGrad3D import convGrad3D
from theano.tensor.nnet.Conv3D import conv3D
import numpy as N
import copy
import theano.sparse
if theano.sparse.enable_sparse:
from scipy import sparse
from nose.plugins.skip import SkipTest
floatX = theano.config.floatX
#TODO: each individual test method should seed rng with utt.fetch_seed()
# as it is right now, setUp does the seeding, so if you run just
# a subset of the tests they will do different things than if you
# run all of them
class DummyConv3D:
"""A dummy version of Conv3D passed to verify_grad
Stores a fixed stride, since stride is not differentiable
Exposes only one scalar argument, which is used as the position
along a parametrically defined line, with 0 being at VwbVals
Direction of the line is chosen randomly at construction
The reason for locking the inputs to lie on this line is so that the
verify_grad will not need to test hundreds of variables. Disadvantage
is we can't be certain that all of them are correct, advantange is that
this random projection lets us test lots of variables very quickly """
def __init__(self, rng, VWbVals, d):
"""
param: rng Random number generator used to pick direction of the line
param: VWbVals tuple containing values to test V,W,b around
param: d shared variable for d, the stride
"""
self.V, self.W, self.b = VWbVals
self.dV = shared(rng.uniform(-1,1,self.V.get_value(borrow=True).shape))
self.dW = shared(rng.uniform(-1,1,self.W.get_value(borrow=True).shape))
self.db = shared(rng.uniform(-1,1,self.b.get_value(borrow=True).shape))
self.d = d
def __call__(self, t):
output = conv3D(self.V+t*self.dV,self.W+t*self.dW,self.b+t*self.db,self.d)
return output
class DummyConvGrad3D:
def __init__(self, rng, VdHvals, d, WShape):
"""
param: rng Random number generator used to pick direction of the line
param: VWbVals tuple containing values to test V,W,b around
param: d shared variable for d, the stride
"""
self.V, self.dCdH = VdHvals
self.dV = shared(rng.uniform(-1,1,self.V.get_value(borrow=True).shape))
self.ddCdH = shared(rng.uniform(-1,1,self.dCdH.get_value(borrow=True).shape))
self.d = d
self.WShape = WShape
def __call__(self, t):
output = convGrad3D(self.V+t*self.dV,self.d,self.WShape,self.dCdH + t * self.ddCdH)
return output
class DummyConvTransp3D:
def __init__(self, rng, WbHvals, d, RShape):
"""
param: rng Random number generator used to pick direction of the line
param: VWbVals tuple containing values to test V,W,b around
param: d shared variable for d, the stride
"""
self.W, self.b, self.H = WbHvals
self.dW = rng.uniform(-1,1,self.W.get_value(borrow=True).shape)
self.db = rng.uniform(-1,1,self.b.get_value(borrow=True).shape)
self.dH = rng.uniform(-1,1,self.H.get_value(borrow=True).shape)
self.dW, self.db, self.dH = shared(self.dW), shared(self.db), shared(self.dH)
self.d = d
self.RShape = RShape
def __call__(self, t):
output = convTransp3D(self.W+t*self.dW,self.b+t*self.db,self.d,self.H+t*self.dH, self.RShape)
return output
class TestConv3D(unittest.TestCase):
def setUp(self):
utt.seed_rng()
self.rng = N.random.RandomState(utt.fetch_seed())
mode = copy.copy(theano.compile.mode.get_default_mode())
mode.check_py_code = False
self.W = shared(N.ndarray(shape=(1,1,1,1,1), dtype=floatX))
self.b = shared(N.zeros(1,dtype=floatX))
self.rb = shared(N.zeros(1,dtype=floatX))
self.V = shared(N.ndarray(shape=(1,1,1,1,1), dtype=floatX))
self.d = shared(N.ndarray(shape=(3,),dtype=int))
self.H = conv3D(self.V, self.W, self.b, self.d)
self.H_func = function([], self.H, mode = mode)
self.H_shape_func = function( [], self.H.shape, mode = mode)
self.RShape = T.vector(dtype='int64')
self.otherH = T.TensorType(floatX,(False,False,False,False,False))(name='otherH')
self.transp = convTransp3D(self.W, self.rb, self.d, self.otherH, self.RShape)
self.transp_func = function([self.otherH,self.RShape],self.transp, mode=mode)
self.R = convTransp3D(self.W, self.rb, self.d, self.H, self.RShape)
self.R_func = function([self.RShape], self.R, mode = mode)
self.R_shape_func = function([self.RShape], self.R.shape)
self.reconsObj = T.sum(T.sqr(self.V-self.R))
self.reconsObjFunc = function([self.RShape], self.reconsObj, mode=mode)
self.gradientsFunc = function([self.RShape], [ T.grad(self.reconsObj, self.W), T.grad(self.reconsObj, self.H), T.grad(self.reconsObj, self.V), T.grad(self.reconsObj,self.b) ] , mode=mode)
self.check_c_against_python = function([self.RShape], [ T.grad(self.reconsObj, self.W), T.grad(self.reconsObj, self.H), T.grad(self.reconsObj, self.V), T.grad(self.reconsObj,self.b) ] , mode='DEBUG_MODE')
self.dCdW_shape_func = function([self.RShape], T.grad(self.reconsObj, self.W).shape, mode=mode)
def random_tensor(self,*dims):
return N.asarray(self.rng.uniform(-.05,.05,dims),dtype=floatX)
def randomize(self):
batchSize = self.rng.randint(1,4)
videoDur = self.rng.randint(8,30)
filterWidth = self.rng.randint(1,8)
filterHeight = self.rng.randint(1,8)
filterDur = self.rng.randint(1,8)
tsteps = self.rng.randint(1,4)
rsteps = self.rng.randint(1,4)
csteps = self.rng.randint(1,4)
videoDur = tsteps * filterDur + self.rng.randint(0,3)
videoWidth = csteps * filterWidth + self.rng.randint(0,3)
videoHeight = rsteps * filterHeight + self.rng.randint(0,3)
numFilters = self.rng.randint(1,3)
inputChannels = self.rng.randint(1,3)
self.d.get_value(borrow=True, return_internal_type=True)[0] = self.rng.randint(1,15)
self.d.get_value(borrow=True, return_internal_type=True)[1] = self.rng.randint(1,15)
self.d.get_value(borrow=True, return_internal_type=True)[2] = self.rng.randint(1,15)
outputHeight = int( (videoHeight - filterHeight) / self.d.get_value(borrow=True)[0] )+1
outputWidth = int( (videoWidth - filterWidth) / self.d.get_value(borrow=True)[1] )+1
outputDur = int( (videoDur - filterDur) / self.d.get_value(borrow=True)[2] ) +1
self.W.set_value(
self.random_tensor(numFilters,filterHeight,filterWidth,filterDur,inputChannels),
borrow=True)
self.b.set_value(self.random_tensor(numFilters), borrow=True)
self.rb.set_value(self.random_tensor(inputChannels), borrow=True)
self.V.set_value(
self.random_tensor(batchSize,videoHeight,videoWidth,videoDur,inputChannels),
borrow=True)
self.rb.set_value(self.random_tensor(inputChannels), borrow=True)
def test_c_against_python(self):
self.randomize()
self.check_c_against_python(self.V.get_value(borrow=True).shape[1:4])
def test_c_against_mat_mul(self):
#Use a filter of the same size as the image, so the convolution is just a dense matrix multiply
#Check that dense matrix multiplication gives the same result as convolution
batchSize = self.rng.randint(1,10)
videoDur = self.rng.randint(3,10)
videoWidth = self.rng.randint(1,5)
videoHeight = self.rng.randint(1,5)
filterWidth = videoWidth
filterHeight = videoHeight
filterDur = videoDur
numFilters = self.rng.randint(1,3)
inputChannels = self.rng.randint(1,4)
self.d.get_value(borrow=True, return_internal_type=True)[0] = self.rng.randint(1,15)
self.d.get_value(borrow=True, return_internal_type=True)[1] = self.rng.randint(1,15)
self.d.get_value(borrow=True, return_internal_type=True)[2] = self.rng.randint(1,15)
self.W.set_value(
self.random_tensor(numFilters,filterHeight,filterWidth,filterDur,inputChannels),
borrow=True)
self.W.set_value(
self.W.get_value(borrow=True) * (self.W.get_value(borrow=True) < 1e-5),
borrow=True)
self.b.set_value(self.random_tensor(numFilters), borrow=True)
self.V.set_value(
self.random_tensor(batchSize,videoHeight,videoWidth,videoDur,inputChannels),
borrow=True)
Hv = self.H_func()
assert Hv.shape[1] == 1
assert Hv.shape[2] == 1
assert Hv.shape[3] == 1
n = inputChannels * videoHeight * videoWidth * videoDur
W_mat = N.zeros((n, numFilters))
V_mat = N.zeros((batchSize,n))
Hv_mat = N.zeros((batchSize, numFilters))
for qi in xrange(0,numFilters):
W_mat[:,qi] = self.W.get_value(borrow=True)[qi,:,:,:,:].reshape((n))
Hv_mat[:,qi] = Hv[:,0,0,0,qi]
for qi in xrange(0,batchSize):
V_mat[qi,:] = self.V.get_value(borrow=True)[qi,:,:,:,:].reshape((n))
H_mat = N.dot(V_mat,W_mat) + self.b.get_value(borrow=True)
tol = 1e-5
if floatX == 'float32':
tol = 1e-4
if N.abs(H_mat-Hv_mat).max() > tol and not N.allclose(H_mat,Hv_mat):
print H_mat
print Hv_mat
print 'max error: '+str(N.abs(H_mat-Hv_mat).max())
W.get_value(borrow=True)[W.get_value(borrow=True) != 0] += 1.0
print 'min non-zero kernel mag: '+str(N.abs(W.get_value(borrow=True)).min())
assert False
def test_c_against_mat_transp_mul(self):
#Use a filter of the same size as the image, so the convolution is just a dense matrix multiply
#Check that dense matrix multiplication by the transpose of the matrix gives the same result as ConvTransp
batchSize = self.rng.randint(1,10)
videoDur = self.rng.randint(3,15)
videoWidth = self.rng.randint(3,15)
videoHeight = self.rng.randint(3,15)
filterWidth = videoWidth
filterHeight = videoHeight
filterDur = videoDur
numFilters = self.rng.randint(1,15)
inputChannels = self.rng.randint(1,15)
self.d.get_value(borrow=True, return_internal_type=True)[0] = self.rng.randint(1,15)
self.d.get_value(borrow=True, return_internal_type=True)[1] = self.rng.randint(1,15)
self.d.get_value(borrow=True, return_internal_type=True)[2] = self.rng.randint(1,15)
self.W.set_value(
self.random_tensor(numFilters,filterHeight,filterWidth,filterDur,inputChannels),
borrow=True)
self.b.set_value(self.random_tensor(numFilters), borrow=True)
self.V.set_value(
self.random_tensor(batchSize,videoHeight,videoWidth,videoDur,inputChannels),
borrow=True)
self.rb.set_value(self.random_tensor(inputChannels), borrow=True)
H_shape = self.H_shape_func()
assert H_shape[1] == 1
assert H_shape[2] == 1
assert H_shape[3] == 1
Hv = self.random_tensor( * H_shape )
Vv = self.transp_func(Hv,[videoHeight,videoWidth,videoDur])
n = inputChannels * videoHeight * videoWidth * videoDur
rbim = N.zeros((videoHeight,videoWidth,videoDur,inputChannels))
for qi in xrange(0,inputChannels):
rbim[:,:,:,qi] = self.rb.get_value(borrow=True)[qi]
rbv = rbim.reshape((n))
W_mat = N.zeros((numFilters, n))
Vv_mat = N.zeros((n, batchSize))
Hv_mat = N.zeros((numFilters,batchSize))
for qi in xrange(0,numFilters):
W_mat[qi,:] = self.W.get_value(borrow=True)[qi,:,:,:,:].reshape((n))
Hv_mat[qi,:] = Hv[:,0,0,0,qi]
for qi in xrange(0,batchSize):
Vv_mat[:,qi] = Vv[qi,:,:,:,:].reshape((n))
V_mat = (N.dot(W_mat.transpose(),Hv_mat).transpose() + rbv).transpose()
if N.abs(V_mat-Vv_mat).max() > 1e-5:
print V_mat
print Vv_mat
for qq in xrange(V_mat.shape[0]):
for qqq in xrange(Vv_mat.shape[1]):
if abs(V_mat[qq,qqq]-Vv_mat[qq,qqq]) > 1e-5:
print 'wrong at '+str((qq,qqq))+': '+str((V_mat[qq,qqq],Vv_mat[qq,qqq]))
assert False
def test_c_against_sparse_mat_transp_mul(self):
#like test_c_against_mat_transp_mul but using a sparse matrix and a kernel that is smaller than the image
if not theano.sparse.enable_sparse:
raise SkipTest('Optional package sparse disabled')
batchSize = self.rng.randint(1,3)
filterWidth = self.rng.randint(1,8)
filterHeight = self.rng.randint(1,8)
filterDur = self.rng.randint(1,8)
self.d.get_value(borrow=True, return_internal_type=True)[0] = self.rng.randint(1,15)
self.d.get_value(borrow=True, return_internal_type=True)[1] = self.rng.randint(1,15)
self.d.get_value(borrow=True, return_internal_type=True)[2] = self.rng.randint(1,15)
dr = self.d.get_value(borrow=True)[0]
dc = self.d.get_value(borrow=True)[1]
dt = self.d.get_value(borrow=True)[2]
numFilters = self.rng.randint(1,3)
row_steps = self.rng.randint(1,4)
col_steps = self.rng.randint(1,4)
time_steps = self.rng.randint(1,4)
#print (row_steps,col_steps,time_steps)
videoDur = (time_steps-1)*dt+filterDur + self.rng.randint(0,3)
videoWidth = (col_steps-1)*dc+filterWidth + self.rng.randint(0,3)
videoHeight = (row_steps-1)*dr+filterHeight + self.rng.randint(0,3)
inputChannels = self.rng.randint(1,15)
self.W.set_value(
self.random_tensor(numFilters,filterHeight,filterWidth,filterDur,inputChannels),
borrow=True)
self.b.set_value(self.random_tensor(numFilters), borrow=True)
#just needed so H_shape works
self.V.set_value(
self.random_tensor(batchSize,videoHeight,videoWidth,videoDur,inputChannels),
borrow=True)
self.rb.set_value(self.random_tensor(inputChannels), borrow=True)
H_shape = self.H_shape_func()
#make index maps
h = N.zeros( H_shape[1:])
r = N.zeros( H_shape[1:])
c = N.zeros( H_shape[1:])
t = N.zeros( H_shape[1:])
for qi in xrange(0,H_shape[4]):
h[:,:,:,qi] = qi
for qi in xrange(0,H_shape[1]):
r[qi,:,:,:] = qi
for qi in xrange(0,H_shape[2]):
c[:,qi,:,:] = qi
for qi in xrange(0,H_shape[3]):
t[:,:,qi,:] = qi
hn = H_shape[1] * H_shape[2] * H_shape[3] * H_shape[4]
h = h.reshape((hn))
r = r.reshape((hn))
c = c.reshape((hn))
t = t.reshape((hn))
Hv = self.random_tensor( * H_shape )
Vv = self.transp_func(Hv,[videoHeight,videoWidth,videoDur])
n = inputChannels * videoHeight * videoWidth * videoDur
rbim = N.zeros((videoHeight,videoWidth,videoDur,inputChannels))
for qi in xrange(0,inputChannels):
rbim[:,:,:,qi] = self.rb.get_value(borrow=True)[qi]
rbv = rbim.reshape((n))
W_mat = N.zeros((hn,n))
Vv_mat = N.zeros((n, batchSize))
Hv_mat = N.zeros((hn,batchSize))
for qi in xrange(0,hn):
hi = h[qi]
ri = r[qi]
ci = c[qi]
ti = t[qi]
placed_filter = N.zeros(self.V.get_value(borrow=True).shape[1:])
placed_filter[
ri*dr:ri*dr+self.W.get_value(borrow=True).shape[1],
ci*dc:ci*dc+self.W.get_value(borrow=True).shape[2],
ti*dt:ti*dt+self.W.get_value(borrow=True).shape[3],
:] = self.W.get_value(borrow=True)[hi,:,:,:,:]
W_mat[qi,:] = placed_filter.reshape((n))
Hv_mat[qi,:] = Hv[:,ri,ci,ti,hi]
for qi in xrange(0,batchSize):
Vv_mat[:,qi] = Vv[qi,:,:,:,:].reshape((n))
W_mat_T = sparse.csr_matrix(W_mat.transpose())
temp = W_mat_T * Hv_mat
V_mat = (temp.transpose() + rbv).transpose()
if N.abs(V_mat-Vv_mat).max() > 1e-5:
print 'mul'
print V_mat
print 'conv'
print Vv_mat
for i in xrange(0,n):
for j in xrange(0,batchSize):
if abs(V_mat[i,j] - Vv_mat[i,j]) > 1e-5:
print 'wrong at %d,%d: %f mul versus %f conv' % (i,j,V_mat[i,j],Vv_mat[i,j])
assert False
def test_infer_shape(self):
self.randomize()
Hv = self.H_func()
H_shape = self.H_shape_func()
assert N.all(Hv.shape == H_shape)
gradients = self.gradientsFunc(self.V.get_value(borrow=True).shape[1:4])
dCdWv = gradients[0]
dCdW_shape = self.dCdW_shape_func(self.V.get_value(borrow=True).shape[1:4])
assert N.all(dCdWv.shape == dCdW_shape)
Rv = self.R_func(self.V.get_value(borrow=True).shape[1:4])
R_shape = self.R_shape_func(self.V.get_value(borrow=True).shape[1:4])
assert N.all(Rv.shape == R_shape)
def test_gradient(self):
self.randomize()
rng, V,W,b,d,rb = self.rng, self.V, self.W, self.b, self.d, self.rb
dCdH = shared(self.random_tensor( *self.H_shape_func() ))
testsPerDir = 2
theano.tests.unittest_tools.verify_grad(DummyConv3D(rng, (V,W,b), d), [0.0], n_tests=testsPerDir)
theano.tests.unittest_tools.verify_grad(DummyConvTransp3D(rng, (W,rb,dCdH), d,V.get_value(borrow=True).shape[1:4]), [0.0], n_tests=testsPerDir)
theano.tests.unittest_tools.verify_grad(DummyConvGrad3D(rng, (V,dCdH), d, W.get_value(borrow=True).shape), [0.0], n_tests=testsPerDir)
| StarcoderdataPython |
11382427 | import compressible_sr.eos as eos
def test_eos_consistency():
dens = 1.0
eint = 1.0
gamma = 1.4
p = eos.pres(gamma, dens, eint)
dens_eos = eos.dens(gamma, p, eint)
assert dens == dens_eos
rhoe_eos = eos.rhoe(gamma, p)
assert dens*eint == rhoe_eos
h = eos.h_from_eps(gamma, eint)
assert (1 + gamma*eint) == h
rhoh = eos.rhoh_from_rho_p(gamma, dens, p)
assert dens*h == rhoh
| StarcoderdataPython |
1910902 | <reponame>nikhilvijay-symc/cwa-remediation-packages<filename>AWS remediation scripts/SYMC_CWA_Remediation_Worker_S3/raise_sns_notification.py
import boto3
import json
import sys
def prepareSNSJsonMessage(message):
try:
outputMessage = dict()
outputMessage['default']=message
outputMessage['lambda']=message
return json.dumps(outputMessage)
except Exception as e:
print("Error Occurred while preparing SNS Json Message : " + str(e))
raise
def raise_sns_notification(output_sns_topic_arn,output_sns_region,payload,payloadType):
try:
print("Raising SNS Notification for, output_sns_topic_arn : " + output_sns_topic_arn + " - output_sns_topic_region : " + output_sns_region)
clientSNS = boto3.client('sns',output_sns_region)
clientSNS.publish(
TopicArn = output_sns_topic_arn ,
Message = payload,
MessageStructure = 'json' if payloadType.lower() == 'json' else '')
print("Raised SNS Notification Succesfully")
except:
print('Error Occurred while publishing message to SNS Topic ' + output_sns_region + ', Details:- ' + str(sys.exc_info()[0]))
raise | StarcoderdataPython |
1974672 | """Top level intialization of BubbleBox"""
from . import api
from . import library
from . import resources
| StarcoderdataPython |
3249169 | <filename>Leetcode/Python/_347.py
class Solution:
def topKFrequent(self, nums: List[int], k: int) -> List[int]:
counter = {}
for num in nums:
if num not in counter:
counter[num] = 0
counter[num] += 1
lst = sorted(counter.items(), key=lambda item: item[1])[-k:]
print(lst)
out = [(lambda x: x[0])(x) for x in lst]
return out
| StarcoderdataPython |
3472156 | # Given a string, sort it in decreasing order based on the frequency of characters.
# Example 1:
# Input:
# "tree"
# Output:
# "eert"
# Explanation:
# 'e' appears twice while 'r' and 't' both appear once.
# So 'e' must appear before both 'r' and 't'. Therefore "eetr" is also a valid answer.
# Example 2:
# Input:
# "cccaaa"
# Output:
# "cccaaa"
# Explanation:
# Both 'c' and 'a' appear three times, so "aaaccc" is also a valid answer.
# Note that "cacaca" is incorrect, as the same characters must be together.
# Example 3:
# Input:
# "Aabb"
# Output:
# "bbAa"
# Explanation:
# "bbaA" is also a valid answer, but "Aabb" is incorrect.
# Note that 'A' and 'a' are treated as two different characters.
class Solution:
def frequencySort(self, s: str) -> str:
# make a dict to keep input from string
cache = {}
for i in s:
if i not in cache:
cache[i] = 1
else:
cache[i] += 1
# make a result empty string
result = ''
# loop through the dict based on the sorted reverse values
sort_cache = sorted(cache.items(), key=lambda x: x[1], reverse=True)
# add the char to the result string
# with value being the amound of that char
for i in sort_cache:
if i[1] > 1:
result = result + i[1] * i[0]
else:
result += i[0]
return result
s = 'tree'
s = 'aaaccc'
s = 'bbAa'
print(Solution().frequencySort(s))
| StarcoderdataPython |
110656 | <gh_stars>0
from django.db import models
# Create your models here.
from billing.models import BillingProfile
ADDRESS_TYPES = (
# ('billing', 'Facturacion'), # ('billing','Billing'),
('shipping', 'Envio'), # ('shipping','Shipping'),
)
class Address(models.Model):
billing_profile = models.ForeignKey(BillingProfile, on_delete=models.CASCADE)
address_type = models.CharField(max_length=120, choices=ADDRESS_TYPES)
direccion_linea_1 = models.CharField(max_length=120)
direccion_linea_2 = models.CharField(max_length=120, null=True, blank=True)
# pais = models.CharField(max_length=120, default='Costa Rica')
provincia = models.CharField(max_length=120)
canton = models.CharField(max_length=120)
distrito = models.CharField(max_length=120, null=True, blank=True)
def __str__(self):
return str(self.billing_profile)
def get_address(self):
return "{linea1} {linea2}, {provincia}, {canton}, {distrito} ".format( # {postal}
linea1 = self.direccion_linea_1,
linea2 = self.direccion_linea_2 or "",
# pais = self.pais,
provincia = self.provincia,
canton = self.canton,
distrito = self.distrito,
)
| StarcoderdataPython |
4844580 | # 服务端
import sys
import json
import requests
from PyQt5.QtNetwork import QTcpServer, QHostAddress
from PyQt5.QtWidgets import QApplication, QWidget, QTextBrowser, QVBoxLayout
from neo4j import *
from ModelProcess import *
import pickle
class Server(QWidget):
def __init__(self, model, prediction, vocabulary):
super(Server, self).__init__()
self.model = model
self.prediction = prediction
self.vocabulary = vocabulary
self.resize(500, 450)
# 1
self.browser = QTextBrowser(self)
self.v_layout = QVBoxLayout()
self.v_layout.addWidget(self.browser)
self.setLayout(self.v_layout)
# 2
self.server = QTcpServer(self)
if not self.server.listen(QHostAddress.LocalHost, 8080):
self.browser.append(self.server.errorString())
self.server.newConnection.connect(self.new_socket_slot)
def new_socket_slot(self):
sock = self.server.nextPendingConnection()
peer_address = sock.peerAddress().toString()
peer_port = sock.peerPort()
news = 'Connected with address {}, port{}'.format(peer_address, str(peer_port))
self.browser.append(news)
sock.readyRead.connect(lambda: self.read_data_slot(sock))
sock.disconnected.connect(lambda: self.disconnected_slot(sock))
# 3
def read_data_slot(self, sock):
while sock.bytesAvailable():
datagram = sock.read(sock.bytesAvailable())
message = datagram.decode()
answer = self.get_answer(message).encode()
sock.write(bytes(answer))
def get_answer(self, message):
sentence = pre_dosegment(message)
sentence = ' '.join(sentence)
prediction1 = self.model.test(str(sentence), self.vocabulary)
query = match_question(prediction1, str(message))
answer = self.prediction.run(query, prediction1)
return answer
# 4
def disconnected_slot(self, sock):
peer_address = sock.peerAddress().toString()
peer_port = sock.peerPort()
news = 'Disconnected with address {}, port {}'.format(peer_address, str(peer_port))
self.browser.append(news)
sock.close()
if __name__ == '__main__':
app = QApplication(sys.argv)
prediction = predict(test_graph)
model = NaiveBayesModelMe()
pkl_file = open("vocabulary.pkl", "rb")
vocabulary = pickle.load(pkl_file)
demo = Server(model, prediction, vocabulary)
demo.show()
sys.exit(app.exec_())
| StarcoderdataPython |
64562 | <reponame>Marsll/md-simulator
"""tests for short_ranged for the lennard jones forces and potential"""
from ..neighbor_order_pbc import create_nb_order
from ..neighbor_list import NeighborList
from ..short_ranged import pair_potential, pair_force, potentials
import numpy as np
import numpy.testing as npt
import scipy.constants as const
def test_potential_1d_0():
pos1 = np.array([0])
pos2 = np.array([4])
box = (5,)
par1 = np.ones(3)
par2 = np.ones(3)
sigma_c = 1
potential = pair_potential(pos1, pos2, par1 ,par2 ,sigma_c, box, r_cut=5, coulomb=False)
potenital_ref = 0
npt.assert_equal(potential, potenital_ref)
def test_potential_3d():
pos1 = np.array([6, 7, 0])
pos2 = np.array([0, 0, 0])
box = (10, 10, 10)
par1 = np.ones(3)
par2 = np.ones(3)
sigma_c = 1
potential = pair_potential(pos1, pos2, par1 ,par2 ,sigma_c, box, r_cut=10, coulomb=False)
potenital_ref = -0.00025598361
npt.assert_almost_equal(potential, potenital_ref)
def test_force_3d():
pos1 = np.array([0, 6, 7])
pos2 = np.array([0, 0, 0])
box = (10, 10, 10)
par1 = np.ones(3)
par2 = np.ones(3)
sigma_c = 1
force = pair_force(pos1, pos2, par1 ,par2 ,sigma_c, box, r_cut=10, coulomb=False)
force_ref = 0.00030716067 * np.array([0, 4, 3]) / 5
npt.assert_almost_equal(force, force_ref)
def test_potential_3d_three_particles():
"""8 cells, three particles, all in same octant"""
box = [10, 10, 10]
r_cut = 5
nb_order = create_nb_order(box, r_cut)
ppos = np.array([[0, 0, 0], [3, 4, 0], [6, 7, 0]])
nl = NeighborList(box, ppos, r_cut)
params = np.ones([3,3])
sigma_c = 1
potential = potentials(
ppos, params, sigma_c, nl, nb_order, r_cut, lj=True, coulomb=False)
potential_ref = 2 * 4 * ((1 / 5)**12 - (1 / 5)**6) + 4 * ((1 / np.sqrt(18))**12 - (1 / np.sqrt(18))**6)
npt.assert_almost_equal(potential, potential_ref)
def test_potential_2d_three_particles_2():
"""12 cells, three particles"""
box = [3, 4]
r_cut = 1
nb_order = create_nb_order(box, r_cut)
ppos = np.array([[0, 0], [1.5, 2.5], [2.5, 3.5]])
nl = NeighborList(box, ppos, r_cut)
params = np.ones([3,3])
sigma_c = 1
potential = potentials(
ppos, params, sigma_c, nl, nb_order, r_cut, lj=True, coulomb=False)
pot1 = 4 * ((1 / np.sqrt(0.5**2 + 0.5**2))**12 - (1 / np.sqrt(0.5**2 + 0.5**2))**6)
potential_ref = pot1
npt.assert_almost_equal(potential, potential_ref)
def test_potential_2d_four_particles_3():
"""12 cells, no neighbours."""
box = [15, 20]
r_cut = 5
nb_order = create_nb_order(box, r_cut)
ppos = np.array([[0, 0], [0, 11], [9, 19], [9, 6]])
nl = NeighborList(box, ppos, r_cut)
params = np.ones([4,3])
sigma_c = 1
potential = potentials(
ppos, params, sigma_c, nl, nb_order, r_cut, lj=True, coulomb=False)
potential_ref = 0
npt.assert_almost_equal(potential, potential_ref)
def test_potential_3d_four_particles_3():
"""12 cells, some neighbours."""
box = [20, 15, 20]
r_cut = 5
nb_order = create_nb_order(box, r_cut)
ppos = np.array([[0, 0, 0], [0, 0, 5], [6, 0, 5], [19, 14, 19]])
nl = NeighborList(box, ppos, r_cut)
params = np.ones([4,3])
sigma_c = 1
potential = potentials(
ppos, params, sigma_c, nl, nb_order, r_cut, lj=True, coulomb=False)
pot1 = 4 * ((1 / 5)**12 - (1 / 5)**6)
pot2 = 4 * ((1 / 3)**6- (1 / 3)**3)
potential_ref = pot1 + pot2
npt.assert_almost_equal(potential, potential_ref)
| StarcoderdataPython |
1610637 | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
Note: this file is a demo version of pre_process_sysu.py, to prepare a demo dataset(save as .npy file) with
a small number of identities for debugging neural network.
"""
import os
import numpy as np
from PIL import Image
# todo_change your own path
data_path = "./data/SYSU-MM01"
rgb_cameras = ['cam1', 'cam2', 'cam4', 'cam5']
ir_cameras = ['cam3', 'cam6']
# load id info
file_path_train = os.path.join(data_path, 'exp/train_id_demo.txt')
with open(file_path_train, 'r') as file:
ids = file.read().splitlines()
ids = [int(y) for y in ids[0].split(',')]
id_train = ["%04d" % x for x in ids]
files_rgb = []
files_ir = []
for ide in sorted(id_train):
for cam in rgb_cameras:
img_dir = os.path.join(data_path, cam, ide)
if os.path.isdir(img_dir):
new_files = sorted([img_dir + '/' + i for i in os.listdir(img_dir)])
files_rgb.extend(new_files)
for cam in ir_cameras:
img_dir = os.path.join(data_path, cam, ide)
if os.path.isdir(img_dir):
new_files = sorted([img_dir + '/' + i for i in os.listdir(img_dir)])
files_ir.extend(new_files)
# relabel
pid_container = set()
for img_path in files_ir:
# print(img_path)
# print(img_path[-13:-9])
pid = int(img_path[-13:-9])
pid_container.add(pid)
pid2label = {pid: label for label, pid in enumerate(pid_container)}
fix_image_width = 144
fix_image_height = 288
def read_imgs(train_image):
"""read_img"""
train_data = []
labels = []
for ipath in train_image:
# img
img = Image.open(ipath)
img = img.resize((fix_image_width, fix_image_height), Image.ANTIALIAS)
pix_array = np.array(img)
train_data.append(pix_array)
# label
pid_label = int(ipath[-13:-9])
pid_label = pid2label[pid_label]
labels.append(pid_label)
return np.array(train_img), np.array(train_label)
# rgb imges
train_img, train_label = read_imgs(files_rgb)
np.save(os.path.join(data_path, 'demo_train_rgb_resized_img.npy'), train_img)
np.save(os.path.join(data_path, 'demo_train_rgb_resized_label.npy'), train_label)
# ir imges
train_img, train_label = read_imgs(files_ir)
np.save(os.path.join(data_path, 'demo_train_ir_resized_img.npy'), train_img)
np.save(os.path.join(data_path, 'demo_train_ir_resized_label.npy'), train_label)
| StarcoderdataPython |
1707911 | ##Gladiator Arena 2.0
##<NAME>
##10-12-17
##A remake of Gladiator Arena in python
from random import randint
########################
def deathScreen():
print("You died!!")
playAgain=input("Would you like to play again? (yes/no)\n")
if playAgain=="yes":
fight()
else:
quit()
######################
def enemyAttack(playerHp,dodge,enemyDamages,enemyChances):
attackChoice = randint(1,3)
if attackChoice ==1:
attackChance = randint(0,100)
if attackChance<=enemyChances[0]-dodge:
playerHp=playerHp-enemyDamages[0]
print("-----------------------------------")
print("The enemy swings quickly and deals",enemyDamages[0],"damage")
print("-----------------------------------")
else:
print("\nEnemy miss\n")
if attackChoice ==2:
attackChance = randint(0,100)
if attackChance<=enemyChances[1]-dodge:
playerHp=playerHp-enemyDamages[0]
print("-----------------------------------")
print("The enemy strikes and deals",enemyDamages[1],"damage")
print("-----------------------------------")
else:
print("\nEnemy miss\n")
if attackChoice ==3:
attackChance = randint(0,100)
if attackChance<=enemyChances[2]-dodge:
playerHp=playerHp-enemyDamages[2]
print("-----------------------------------")
print("The enemy lands a heavy blow and deals",enemyDamages[2],"damage")
print("-----------------------------------")
else:
print("\nEnemy misses\n")
return(playerHp)
###################################
def playerAttack(Class,enemyHp,attackBonus):
if Class == "knight":
damages = [4,7,10]
if Class == "mage":
damages = [3,8,11]
if Class == "archer":
damages= [3,6,9]
playerChoice = int(input("\nPick a move\n1)Light\n2)Medium\n3)Heavy\n"))
if playerChoice == 1:
attackDmg = damages[0]+attackBonus
attackChance = randint(0,100)
if attackChance <=100:
enemyHp = enemyHp-attackDmg
print("---------------------------")
print("You deal",attackDmg,"damage")
print("---------------------------")
else:
print("\nYou miss\n")
if playerChoice == 2:
attackDmg = damages[1]+attackBonus
attackChance = randint(0,100)
if attackChance <=70:
enemyHp = enemyHp-attackDmg
print("---------------------------")
print("You deal",attackDmg,"damage")
print("---------------------------")
else:
print("\nYou miss\n")
if playerChoice == 3:
attackDmg = damages[2]+attackBonus
attackChance = randint(0,100)
if attackChance <=40:
enemyHp = enemyHp-attackDmg
print("---------------------------")
print("You deal",attackDmg,"damage")
print("---------------------------")
else:
print("\nYou miss\n")
else:
("You trip and nearly fall!")
return enemyHp
########################################################
def enemy(Class,hpBonus,attackBonus,dodgeBonus,enemyHp,enemyDamages,enemyType,enemyChances):
if enemyType ==1:
print("\nEnemy is light\n")
if enemyType ==2:
print("\nEnemy is medium\n")
if enemyType ==3:
print("\nEnemy is heavy\n")
playerHp=20 + hpBonus
dodge = 10 + dodgeBonus
while enemyHp >0:
print("---------------------------")
print("You have",playerHp,"hp")
print("The enemy has",enemyHp,"hp")
print("---------------------------")
enemyHp=playerAttack(Class,enemyHp,attackBonus)
if enemyHp <=0:
print("Enemy killed!")
break
playerHp= enemyAttack(playerHp,dodge,enemyDamages,enemyChances)
if playerHp<=0:
deathScreen()
return
####################################################
#####################################################
def fight():
hpBonus=0
attackBonus=0
dodgeBonus = 0
i=0
gold = 0
print("knights have extra health, mages do extra damage, archers are highly evasive")
Class=input("Pick a class: knight, mage, or archer\n")
if Class== "knight":
hpBonus = 10
if Class=="mage":
attackBonus = 6
hpBonus = -7
if Class=="archer":
dodgeBonus = 25
attackBonus = 1
while i <9:
enemyType= randint(1,3)
goToShop = input("Would you like to visit the shop? (yes/no)\n")
if goToShop == "yes":
while True:
print("\nYou have",gold,"gold pieces")
purchase=int(input("pick an item:\n1)+3hp Cost 3 gold\n2)+1attack Cost 2 gold\n3)+5Dodge Cost 3 gold\n4)Leave shop\n"))
if purchase ==1:
if gold >=3:
gold = gold-3
hpBonus = hpBonus +3
print("Purchased!")
else:
print("Not enough gold")
if purchase ==2:
if gold >=2:
gold = gold-2
attackBonus = attackBonus +1
print("Purchased!")
else:
print("Not enough gold")
if purchase ==3:
if gold >=3:
gold = gold-3
dodgeBonus = dodgeBonus +5
print("Purchased!")
else:
print("Not enough gold")
if purchase ==4:
break
if enemyType == 1:
enemyHp= 10
enemyDamages=[3,5,7]
enemyChances=[100,80,70]
enemy(Class,hpBonus,attackBonus,dodgeBonus,enemyHp,enemyDamages,enemyType,enemyChances)
gold = gold +1
elif enemyType == 2:
enemyHp= 20
enemyDamages=[4,7,10]
enemyChances=[100,60,40]
enemy(Class,hpBonus,attackBonus,dodgeBonus,enemyHp,enemyDamages,enemyType,enemyChances)
gold = gold +2
elif enemyType == 3:
enemyHp= 30
enemyDamages=[5,7,13]
enemyChances=[80,55,40]
enemy(Class,hpBonus,attackBonus,dodgeBonus,enemyHp,enemyDamages,enemyType,enemyChances)
gold = gold + 3
i=i+1
#Boss fight
print("You approach the defending champion... goodluck")
enemyHp=100
enemyDamages=[10,15,20]
enemyChances=[80,70,50]
enemyType=4
enemy(Class,hpBonus,attackBonus,dodgeBonus,enemyHp,enemyDamages,enemyType,enemyChances)
print("you win!")
deathScreen()
######################################################################
def mainMenu():
ready = "no"
while ready == "no":
print("Welcome to Gladiator Arena 2.0 a python re-make\nThe goal is to beat 9 enemies and the defending champion")
age = int(input("Enter your age to play: "))
if age >=18:
ready = input("Are you ready?(yes/no)\n")
else:
print("Sorry too young to play")
fight()
###################################################################
mainMenu()
| StarcoderdataPython |
11370817 | <gh_stars>1-10
# Author: Yubo "Paul" Yang
# Email: <EMAIL>
# Routines to read linear combination of atomic orbitals (LCAO) hdf5 file
from qharv.seed import xml
from qharv.seed.wf_h5 import read, ls
# ====================== level 1: extract basic info =======================
def abs_grid(fp, iabs):
"""Extract <grid> for some <atomicBasisSet>
Args:
fp (h5py.File): LCAO h5
Return:
lxml.etree.Element: <grid>
"""
path = 'basisset/atomicBasisSet%d' % iabs
grid = xml.etree.Element('grid')
_add_attribs(grid, fp, path, ['type', 'ri', 'rf', 'npts'], prefix='grid_')
return grid
def bg_radfunc(fp, iabs, ibg, irf):
"""Extract <radfunc> for some <basisGroup>
Args:
fp (h5py.File): LCAO h5
Return:
lxml.etree.Element: <radfunc>
"""
path = 'basisset/atomicBasisSet%d/basisGroup%d' % (iabs, ibg)
rfpath = '%s/radfunctions/DataRad%d' % (path, irf)
rf = xml.etree.Element('radfunc')
_add_attribs(rf, fp, rfpath, ['exponent', 'contraction'])
return rf
def basis_group(fp, iabs, ibg):
"""Extract <basisGroup>
Args:
fp (h5py.File): LCAO h5
Return:
lxml.etree.Element: <basisGroup>
"""
bgpath = 'basisset/atomicBasisSet%d/basisGroup%d' % (iabs, ibg)
bg = xml.etree.Element('basisGroup')
_add_attribs(bg, fp, bgpath, ['rid', 'n', 'l', 'type'])
# add radial functions
nrf = fp['%s/NbRadFunc' % bgpath][()][0]
for irf in range(nrf):
rf = bg_radfunc(fp, iabs, ibg, irf)
bg.append(rf)
return bg
def _add_attribs(node, fp, path, attribs, prefix=''):
for attrib in attribs:
apath = '%s/%s' % (path, prefix+attrib)
val = fp[apath][()][0]
node.set(attrib, str(val))
# ====================== level 2: hdf5 to xml =======================
def basisset(fp):
"""Extract <basisset>
Args:
fp (h5py.File): LCAO h5
Return:
lxml.etree.Element: <basisset>
"""
bs = xml.etree.Element('basisset')
bsname = fp['basisset/name'][()][0]
bs.set('name', bsname)
nabs = fp['basisset/NbElements'][()][0]
for iabs in range(nabs): # atomicBasisSet
path = 'basisset/atomicBasisSet%d' % iabs
myabs = xml.etree.Element('atomicBasisSet')
abs_attribs = ['name', 'angular', 'elementType', 'normalized']
_add_attribs(myabs, fp, path, abs_attribs)
# !!!! make type "Gaussian", otherwise default to "Numeric"
myabs.set('type', 'Gaussian')
# each atomic basis set should have a <grid> and a few <basisGroup>s
grid = abs_grid(fp, iabs)
myabs.append(grid)
# build basis groups
nbg = fp['%s/NbBasisGroups' % path][()][0]
for ibg in range(nbg):
bg = basis_group(fp, iabs, ibg)
myabs.append(bg)
bs.append(myabs)
return bs
def sposet(fp, bsname, cname, nstate=None, ik=0, ispin=0):
path = 'KPTS_%d/eigenset_%d' % (ik, ispin)
mo_coeff = fp[path][()].T
nmo, nao = mo_coeff.shape
if nstate is None:
nstate = nmo
if nstate > nmo:
raise RuntimeError('state %d/%d is not available' % (nstate, nmo))
# build <sposet>
ss = xml.etree.Element('sposet')
ss.set('basisset', bsname)
ss.set('name', 'spo-ud')
ss.set('size', str(nstate))
ss.set('optimize', 'yes')
# add <coefficient>
cnode = xml.etree.Element('coefficient')
cnode.set('size', str(nao))
cnode.set('id', cname)
cnode.text = xml.arr2text(mo_coeff[:nstate, :])
ss.append(cnode)
return ss
| StarcoderdataPython |
9777459 | # -*- coding: utf-8 -*-
# Copyright © 2018, 2019 <NAME> <<EMAIL>>
#
# Permission to use, copy, modify, and/or distribute this software for
# any purpose with or without fee is hereby granted, provided that the
# above copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER
# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF
# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Module for rendering matrix messages in Weechat."""
from __future__ import unicode_literals
from nio import Api
from .globals import W
from .colors import Formatted
class Render(object):
"""Class collecting methods for rendering matrix messages in Weechat."""
@staticmethod
def _media(url, description):
return ("{del_color}<{ncolor}{desc}{del_color}>{ncolor} "
"{del_color}[{ncolor}{url}{del_color}]{ncolor}").format(
del_color=W.color("chat_delimiters"),
ncolor=W.color("reset"),
desc=description, url=url)
@staticmethod
def media(mxc, body, homeserver=None):
"""Render a mxc media URI."""
url = Api.mxc_to_http(mxc, homeserver)
description = "{}".format(body) if body else "file"
return Render._media(url, description)
@staticmethod
def encrypted_media(mxc, body, key, hash, iv, homeserver=None, mime=None):
"""Render a mxc media URI of an encrypted file."""
http_url = Api.encrypted_mxc_to_plumb(
mxc,
key,
hash,
iv,
homeserver,
mime,
)
url = http_url if http_url else mxc
description = "{}".format(body) if body else "file"
return Render._media(url, description)
@staticmethod
def message(body, formatted_body):
"""Render a room message."""
if formatted_body:
formatted = Formatted.from_html(formatted_body)
return formatted.to_weechat()
return body
@staticmethod
def redacted(censor, reason=None):
"""Render a redacted event message."""
reason = (
', reason: "{reason}"'.format(reason=reason)
if reason
else ""
)
data = (
"{del_color}<{log_color}Message redacted by: "
"{censor}{log_color}{reason}{del_color}>{ncolor}"
).format(
del_color=W.color("chat_delimiters"),
ncolor=W.color("reset"),
log_color=W.color("logger.color.backlog_line"),
censor=censor,
reason=reason,
)
return data
@staticmethod
def room_encryption(nick):
"""Render a room encryption event."""
return "{nick} has enabled encryption in this room".format(nick=nick)
@staticmethod
def unknown(message_type, content=None):
"""Render a message of an unknown type."""
content = (
': "{content}"'.format(content=content)
if content
else ""
)
return "Unknown message of type {t}{c}".format(
t=message_type,
c=content
)
@staticmethod
def megolm():
"""Render an undecrypted megolm event."""
return ("{del_color}<{log_color}Unable to decrypt: "
"The sender's device has not sent us "
"the keys for this message{del_color}>{ncolor}").format(
del_color=W.color("chat_delimiters"),
log_color=W.color("logger.color.backlog_line"),
ncolor=W.color("reset"))
@staticmethod
def bad(event):
"""Render a malformed event of a known type"""
return "Bad event received, event type: {t}".format(t=event.type)
| StarcoderdataPython |
6401692 | <filename>ChessOpenings.py
from bs4 import BeautifulSoup
import requests
req = requests.get('https://www.365chess.com/eco.php')
soup = BeautifulSoup(req.content, 'html.parser')
table = soup.find_all('div', class_ = 'opname' )
chessOpenings = [i.getText() for i in table]
print(len(chessOpenings)) | StarcoderdataPython |
185165 | <reponame>AkashSCIENTIST/CompetitiveSolutions<filename>HackerRank/Python/Validating phone numbers.py
# Enter your code here. Read input from STDIN. Print output to STDOUT
import re
r = re.compile("[789]\d{9}$")
for _ in range(int(input())):
s = input()
if bool(r.match(s)):
print("YES")
else:
print("NO")
| StarcoderdataPython |
9766941 | <gh_stars>0
from graph_component.models import * # noqa
| StarcoderdataPython |
3468267 | from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import time
import datetime
__all__ = [
'timestamp',
'now'
]
def timestamp():
"""Generate a timestamp using the current date and time.
Returns
-------
str
The timestamp.
Examples
--------
>>> type(timestamp()) == type('')
True
"""
return datetime.datetime.fromtimestamp(time.time()).strftime('%Y%m%d%H%M%S')
def now():
"""Generate a timestamp using the current date and time.
Returns
-------
str
The timestamp.
Examples
--------
>>> type(now()) == type('')
True
"""
return timestamp()
# ==============================================================================
# Main
# ==============================================================================
if __name__ == "__main__":
import doctest
doctest.testmod(globs=globals())
| StarcoderdataPython |
1722246 | <gh_stars>0
from doab.tests.test_types import IntersectAcceptanceTest, TestManager, ReferenceParsingTest
from doab.parsing.reference_miners import (
BloomsburyAcademicMiner,
CambridgeCoreParser,
CitationTXTReferenceMiner,
SpringerMiner,
)
@TestManager.register
class PalgraveCUPIntersect(IntersectAcceptanceTest):
CITATION = "<NAME>. 1993. The Two Cultures"
BOOK_IDS = {"16498", "27401"}
@TestManager.register
class PalgraveAcceptanceTestA(IntersectAcceptanceTest):
CITATION = "Foucault, M. (1991). Discipline and Punish. The Birth of the Prison St. Ives: Penguin"
BOOK_IDS = {"24596", "20716", "27401"} #24598
@TestManager.register
class PalgraveAcceptanceTestB(IntersectAcceptanceTest):
CITATION = "<NAME>., <NAME>., & <NAME>. (2016). The impact of delays on maternal and neonatal outcomes in Ugandan public health facilities: The role of absenteeism. Health Policy and Planning, 1–10. doi:10.1093/heapol/czw046."
BOOK_IDS = {"21612", "20717", "21612"}
@TestManager.register
class PalgraveAcceptanceTestC(IntersectAcceptanceTest):
CITATION = "<NAME>., & <NAME>. (2014). Developing cultural sensitivity and awareness in nursing overseas. Nursing Standard, 28(44), 39–43.CrossRef"
BOOK_IDS = {"21610", "21612"}
@TestManager.register
class OpenEditionsTestA(IntersectAcceptanceTest):
CITATION = "<NAME>, Les Structures anthropologiques de l'imaginaire, Paris, Bordas, 1969."
BOOK_IDS = {"16988", "19329", "20818", "20855", "20862", "20941", "21060", "21251", "22074", "22229", "22264"}
@TestManager.register
class OpenEditionsTestB(IntersectAcceptanceTest):
CITATION = "Foucault M. (1975), Surveiller et punir, Paris, Gallimard."
BOOK_IDS = {"9337", "20851", "21101", "21176", "21251"}
@TestManager.register
class OpenEditionsTestC(IntersectAcceptanceTest):
CITATION = "Brynen Rex 1995, The Neopatrimonial Dimension of Palestinian Politics , Journal of Palestine Studies 1, p. 23-36."
BOOK_IDS = {"15809", "15815", "16571", "16583", "16604"}
@TestManager.register
class CEDEJParsingTest(ReferenceParsingTest):
PUBLISHER_NAME = "CEDEJ"
BOOK_REFERENCE_COUNTS = {
"22138": 43,
"22141": 51,
"22142": 127,
"22143": 103,
"22213": 102,
}
MINER = CitationTXTReferenceMiner
@TestManager.register
class PalgraveParsingTest(ReferenceParsingTest):
PUBLISHER_NAME = "<NAME>"
BOOK_REFERENCE_COUNTS = {
"26919": 957,
"27363": 387,
"27364": 157,
"27401": 209,
"27402": 398,
}
MINER = SpringerMiner
@TestManager.register
class BloomsburyParsingTest(ReferenceParsingTest):
PUBLISHER_NAME = "<NAME>"
BOOK_REFERENCE_COUNTS = {
"14368": 94,
"15449": 145,
"14372": 15,
"14373": 211,
"14376": 32,
}
MINER = BloomsburyAcademicMiner
@TestManager.register
class CasaVelazquezParsingTest(ReferenceParsingTest):
PUBLISHER_NAME = "<NAME>"
BOOK_REFERENCE_COUNTS = {
"22583": 431,
"22584": 84,
"22585": 531,
"22586": 495,
"22587": 453,
}
MINER = CitationTXTReferenceMiner
@TestManager.register
class CambridgeCoreParsingTest(ReferenceParsingTest):
PUBLISHER_NAME = "Cambridge University Press"
BOOK_REFERENCE_COUNTS = {
"15986": 773,
"15989": 338,
"16001": 477,
"16498": 387,
"21821": 388,
}
MINER = CambridgeCoreParser
| StarcoderdataPython |
3207948 | <filename>matrix_game/q_mix.py
"""TF2 simple Qmix Implementation for matrix game."""
# Import all packages
import tensorflow as tf
class QmixNet(tf.keras.Model):
def __init__(self, matrix_dims, name='Qmix', **kwargs):
super(QmixNet, self).__init__(name=name, **kwargs)
q_init = tf.zeros_initializer()
self.q_1 = tf.Variable(initial_value=q_init(shape=(matrix_dims[0],), dtype='float32'), trainable=True)
self.q_2 = tf.Variable(initial_value=q_init(shape=(matrix_dims[1],), dtype='float32'), trainable=True)
nmbr_units = 5
b_init = tf.zeros_initializer()
self.b_0 = tf.Variable(initial_value=b_init(shape=(nmbr_units,), dtype='float32'), trainable=True)
self.b_1 = tf.Variable(initial_value=b_init(shape=(1,), dtype='float32'), trainable=True)
w_init = tf.random_normal_initializer()
self.w_0 = tf.Variable(initial_value=w_init(shape=(2, nmbr_units), dtype='float32'), trainable=True)
self.w_1 = tf.Variable(initial_value=w_init(shape=(nmbr_units, 1), dtype='float32'), trainable=True)
@tf.function
def call(self, actions):
x = tf.expand_dims(tf.stack([self.q_1[actions[0]], self.q_2[actions[1]]]), axis=0)
x = tf.matmul(x, tf.math.exp(self.w_0)) + self.b_0
x = tf.nn.elu(x)
output = tf.matmul(x, tf.math.exp(self.w_1)) + self.b_1
return self.q_1[actions[0]], self.q_2[actions[1]], output
class Qmix(object):
"""Qmix for matrix game."""
def __init__(self, matrix_dims, step_size):
self._optimizer = tf.keras.optimizers.SGD(learning_rate=step_size)
self._q_mixer = QmixNet(matrix_dims)
@tf.function
def learn(self, actions, r):
with tf.GradientTape(persistent=True, watch_accessed_variables=False) as tape:
tape.watch(self._q_mixer.trainable_weights)
q1, q2, q_out = self._q_mixer(actions, training=True)
loss = 0.5 * tf.square(q_out - r, name='loss')
grads = tape.gradient(loss, self._q_mixer.trainable_weights)
self._optimizer.apply_gradients(list(zip(grads, self._q_mixer.trainable_weights)))
return q1, q2, q_out
@tf.function
def obtain_q(self, actions):
"""Obtain q's."""
return self._q_mixer(actions, training=False)
| StarcoderdataPython |
9610736 | """Utilities for TableNet."""
from .marmot import MarmotDataModule, MarmotDataset
from .tablenet import TableNetModule
__all__ = ['MarmotDataModule', 'TableNetModule']
| StarcoderdataPython |
9776055 | <gh_stars>10-100
import numpy
import pandas as pd
from datetime import datetime
import math
import pathlib
import sys
import os
from pyexcel_ods import get_data
import sativ_indemnity_parser_out_2020
import sinat_indemnity_parser_out_2020
import mativ_indemnity_parser_jan_2020
import mativ_indemnity_parser_fev_2020
import sativ_indemnity_parser_mar_2020
import mativ_indemnity_parser_abr_2020
import sativ_indemnity_parser_abr_2020
import mativ_indemnity_parser_maio_jul_2020
import minat_indemnity_parser_jul_jun_2020
import sativ_indemnity_parser_jun_ago_dez_2020
import sinat_indemnity_parser_jun_2020
import sinat_indemnity_parser_jul_2020
import mativ_indemnity_parser_ago_2020
import minat_indemnity_parser_ago_2020
import sinat_indemnity_parser_ago_2020
import mativ_indemnity_parser_set_2020
import minat_indemnity_parser_set_2020
import sinat_indemnity_parser_dez_2020
#Transforma uma tupla em um objeto dataframe do pandas . Este método é necessário
#devido á inabilidade do pandas de converter certas planilhas em um dataframe
# sem determinados tratamentos;
def mount_df(sheet):
keys = []
#Coletando keys para o df
for key in sheet[0][0:4]:
keys.append(key)
for key in sheet[1]:
keys.append(key)
#Tratando colunas com nomes iguais
equal_columns = ['AUXÍLIO-ALIMENTAÇÃO','AUXÍLIO-EDUCAÇÃO','AUXÍLIO-SAÚDE','AUXÍLIO-LOCOMOÇÃO','AUXÍLIO-MORADIA','INDENIZAÇÃO DE TRANSPORTE']
indexes = []
for col in keys:
if col in equal_columns:
indexes.append(keys.index(col))
for i in range(len(indexes)):
if (i % 2) == 0:
keys[indexes[i]] = keys[indexes[i]] + '/VERBAS INDENIZATÓRIAS'
else:
keys[indexes[i]] = keys[indexes[i]] + '/OUTRAS REMUNERAÇÕES RETROATIVAS/TEMPORÁRIAS'
#Remove nome das colunas
sheet.pop(0)
sheet.pop(0)
return pd.DataFrame(sheet, columns=keys)
#Lê os dados baixados pelo crawler
def read_data(path):
try:
data = pd.read_excel(pathlib.Path(path), engine= 'odf')
#Se o pandas tiver problemas ao ler os heathers seu retorno é um df Null
if data.isnull().all().all().all():
sheet = get_data(path)['Sheet1']
data = mount_df(sheet)
return data
except Exception as excep:
sys.stderr.write("'Não foi possível ler o arquivo: " +
path + '. O seguinte erro foi gerado: ' + excep)
os._exit(1)
# Strange way to check nan. Only I managed to make work
# Source: https://stackoverflow.com/a/944712/5822594
def is_nan(string):
return string != string
def get_begin_row(rows, begin_string):
begin_row = 0
for row in rows:
begin_row += 1
if row[0] == begin_string:
break
#Continua interando até encontrarmos um valor que não seja string em
#branco. Isto ocorre pelo formato da planilha
while is_nan(rows[begin_row][0]):
begin_row += 1
return begin_row
def get_end_row(rows, begin_row):
end_row = 0
for row in rows:
# Primeiro vamos ao row inicial
if end_row < begin_row:
end_row += 1
continue
# Continuamos movendo até achar um row em branco
if is_nan(row[0]):
break
end_row += 1
return end_row
def type_employee(fn):
if 'MATIV' in fn or 'MINAT' in fn:
return 'membro'
if 'SATIV' in fn or 'SINAT' in fn:
return 'servidor'
if 'PENSI' in fn:
return 'pensionista'
if 'COLAB' in fn:
return 'colaborador'
raise ValueError('Tipo inválido de funcionário público: ' + fn)
def clean_currency_val(value):
if isinstance(value, str):
return value.replace('R$', '').replace('.', '').replace(',', '.').replace(' ', '').replace('"','').replace("'",'')
return value
def clean_currency(data, beg_col, end_col):
for col in data.columns[beg_col:end_col]:
data[col] = data[col].apply(clean_currency_val)
def parse_employees(file_name):
rows = read_data(file_name)
clean_currency(rows, 4, len(rows.columns))
rows = rows.to_numpy()
begin_string = 'Matrícula'
begin_row = get_begin_row(rows, begin_string)
end_row = get_end_row(rows, begin_row)
typeE = type_employee(file_name)
activeE = 'INAT' not in file_name and "PENSI" not in file_name
employees = {}
curr_row = 0
for row in rows:
if curr_row < begin_row:
curr_row += 1
continue
#Se possível é preferível tratar a matrícula como float do que como string
#Pois a situação matricula.0 = matricula
try:
reg = float(row[0])
except:
reg = str(row[0])
role = row[2]
workplace = row[3]
remuneration = float(row[4]) #Remuneração do cargo efetivo
other_verbs = float(row[5]) #Outras verbas remuneratórias, legais ou judiciais
trust_pos = float(row[6]) #Posição de Confiança
christmas_bonus = float(row[7]) #Gratificação natalina
abono_permanencia = float(row[9]) #Abono Permanência
terco_ferias = float(row[8]) # Férias (1/3 constitucional)
idemnity = float(row[16]) #Indenizações
temp_remu = float(row[17]) # Outras remunerações retroativas/temporárias
prev_contrib = float(row[13]) #Contribuição previdenciária
ceil_ret = float(row[15]) #Retenção por teto constitucional
income_tax = float(row[14]) #Imposto de renda
#Importante devido a natureza das formas de registro o orgão já reportadas o registro ou matrícula
#para fins de comparação e de chave deve ser visto como um number(Float). entretanto para fins de armazenamento
#deve-se considera-lo como uma string, de modo a manter a coerência com nosso pipeline.
if '2020' in file_name:
employees[reg] = {
'reg': str(row[0]),
'name': row[1],
'type': typeE,
'active': activeE,
"income":
{
'total': remuneration + other_verbs,
'wage': remuneration + other_verbs,
'perks':{
},
'other':
{ #Gratificações
#Posição de confiança + Gratificação natalina + Férias (1/3 constitucional) + Abono de permanência
'total': trust_pos + christmas_bonus + terco_ferias + abono_permanencia,
'trust_position': trust_pos,
# Gratificação natalina + Férias (1/3 constitucional) + Abono Permanencia
'others_total': christmas_bonus + terco_ferias + abono_permanencia,
'others': {
'Gratificação natalina': christmas_bonus,
'Férias (1/3 constitucional)': terco_ferias,
'Abono de permanência': abono_permanencia,
}
},
},
'discounts':
{
'total': round(prev_contrib + ceil_ret + income_tax, 2),
'prev_contribution': prev_contrib,
'ceil_retention': ceil_ret,
'income_tax': income_tax
}
}
else:
prev_contrib = float(row[11])
income_tax = float(row[12])
ceil_ret = float(row[13])
# Na ausência de detalhes acerca de indenizações, devemos atualizar os totais aqui mesmo
employees[reg] = {
'reg': str(row[0]),
'name': row[1],
'type': typeE,
'active': activeE,
"income":
{
'total': remuneration + other_verbs + trust_pos + christmas_bonus + terco_ferias + abono_permanencia,
'wage': remuneration + other_verbs,
'perks':{
'total': idemnity,
},
'other':
{ #Gratificações
#Posição de confiança + Gratificação natalina + Férias (1/3 constitucional) + Abono de permanência
'total': trust_pos + christmas_bonus + terco_ferias + abono_permanencia + temp_remu,
'trust_position': trust_pos,
# Gratificação natalina + Férias (1/3 constitucional) + Abono Permanencia
'others_total': christmas_bonus + terco_ferias + abono_permanencia,
'others': {
'Gratificação natalina': christmas_bonus,
'Férias (1/3 constitucional)': terco_ferias,
'Abono de permanência': abono_permanencia,
}
},
},
'discounts':
{
'total': round(prev_contrib + ceil_ret + income_tax, 2),
'prev_contribution': prev_contrib,
'ceil_retention': ceil_ret,
'income_tax': income_tax
}
}
# Esta condição é necesssária pois a coluna de local de trabalho e role
# podem não ser preenchidas. Por exemplo, servidores e membros inativos.
# Quando a coluna é vazia, o tipo que vem é float e o conteúdo nan
# Quando a coluna está preenchida, o tipo que vem é str.
if type(workplace) == str:
employees[reg]['workplace'] = workplace
if type(role) == str:
employees[reg]['role'] = role
curr_row += 1
if curr_row >= end_row:
break
return employees
def parse_colab(file_name):
rows = read_data(file_name)
clean_currency(rows, 2, 5)
rows = rows.to_numpy()
begin_string = 'LOTAÇÃO'
begin_row = get_begin_row(rows, begin_string)
end_row = get_end_row(rows, begin_row)
typeE = type_employee(file_name)
activeE = True
employees = {}
curr_row = 0
for row in rows:
if curr_row < begin_row:
curr_row += 1
continue
name = row[1]
#Existem situações de pula de linha encontradas na planilha de colaboradores gerando identificadores nulos
if (not pd.isnull(name)):
#O identificador de colaboradores é o nome
wage = float(row[2]) #Valor bruto recebido pelo colaborador
employees[name] = {
'name': name,
#Descrição do serviço prestado e número do processo de pagamento ao servidor
'role': str(row[9]) + ' ' + str(row[8]),
'type': typeE,
'workplace': row[0],
'active': activeE,
"income":
{
'total': wage,
'wage': wage,
},
'discounts':
{
'total': float(row[4]),
'income_tax': float(row[3])
}
}
curr_row += 1
if curr_row >= end_row:
break
return employees
def update_mativ_indemnity(rows, employees):
curr_row = 0
begin_row = 1
for row in rows:
if curr_row < begin_row:
curr_row += 1
continue
reg = float(row[0])
aux_ali = float(row[4]) # AUXÍLIO-ALIMENTAÇÃO/VERBAS INDENIZATÓRIAS
aux_ali_remu = float(row[11]) #AUXÍLIO-ALIMENTAÇÃO/OUTRAS REMUNERAÇÕES RETROATIVAS/TEMPORÁRIAS
aux_saude = float(row[6]) #AUXÍLIO-SAÚDE/VERBAS INDENIZATÓRIAS
aux_saude_remu = float(row[13]) #AUXÍLIO-SAÚDE/OUTRAS REMUNERAÇÕES RETROATIVAS/TEMPORÁRIAS
aux_edu = float(row[5]) #AUXÍLIO-EDUCAÇÃO/VERBAS INDENIZATÓRIAS
aux_edu_remu = float(row[12]) #AUXÍLIO-EDUCAÇÃO/OUTRAS REMUNERAÇÕES RETROATIVAS/TEMPORÁRIAS
conversao_licenca = float(row[7]) #CONVERSÃO DE LICENÇA ESPECIAL
devolucao_rra = float(row[8]) #DEVOLUÇÃO IR RRA
indemnity_vacation = float(row[9]) #INDENIZAÇÃO DE FÉRIAS NÃO USUFRUÍDAS
indemnity_licence = float(row[10]) #INDENIZAÇÃO POR LICENÇA NÃO GOZADA
devolucao_fundo = float(row[14]) #DEVOLUÇÃO FUNDO DE RESERVA
diff_aux = float(row[15]) #DIFERENÇAS DE AUXÍLIOS
gratification = float(row[16])
transportation = float(row[17])
parcelas_atraso = float(row[18]) #PARCELAS PAGAS EM ATRASO
emp = employees[reg]
emp['income']['perks'].update({
'food': aux_ali + aux_ali_remu ,
'transportation': transportation,
'health': aux_saude + aux_saude_remu,
})
emp['income']['other']['others'].update({
#Auxílio educação está disposto em 2 colunas diferentes
'AUXÍLIO-EDUCAÇÃO': aux_edu + aux_edu_remu,
'CONVERSÃO DE LICENÇA ESPECIAL': conversao_licenca,
'DEVOLUÇÃO IR RRA': devolucao_rra,
'INDENIZAÇÃO DE FÉRIAS NÃO USUFRUÍDAS': indemnity_vacation,
'INDENIZAÇÃO POR LICENÇA NÃO GOZADA': indemnity_licence,
'DEVOLUÇÃO FUNDO DE RESERVA': devolucao_fundo,
'DIFERENÇAS DE AUXÍLIOS': diff_aux,
'PARCELAS PAGAS EM ATRASO': parcelas_atraso
})
emp['income']['other'].update({
'total': round(emp['income']['other']['total'] + aux_edu + aux_edu_remu +
conversao_licenca + devolucao_rra + indemnity_vacation + indemnity_licence +
devolucao_fundo + diff_aux + parcelas_atraso + gratification, 2),
'gratification': gratification,
'others_total': round(emp['income']['other']['others_total'] +
aux_edu + aux_edu_remu + conversao_licenca + devolucao_rra +
indemnity_vacation + indemnity_licence + devolucao_fundo +
diff_aux + parcelas_atraso,2),
})
emp['income']['perks'].update({
'total': round( aux_ali + aux_ali_remu + transportation + aux_saude + aux_saude_remu , 2)
})
emp['income'].update({
'total': round(emp['income']['total'] + emp['income']['perks']['total'] + emp['income']['other']['total'], 2)
})
if (rows[curr_row] == rows[-1]).all():
break
curr_row += 1
return employees
def update_minat_indemnity(rows, employees):
curr_row = 0
begin_row = 1
for row in rows:
if curr_row < begin_row:
curr_row += 1
continue
reg = float(row[0])
aux_edu = float(row[4]) # AUXÍLIO-EDUCAÇÃO/VERBAS_INDENIZATÒRIAS
aux_edu_remu = float(row[9]) #AUXÍLIO-EDUCAÇÃO/OUTRAS REMUNERAÇÕES RETROATIVAS/TEMPORÁRIAS
aux_saude = float(row[5]) #AUXÍLIO-SAÚDE/VERBAS_INDENIZATÒRIAS
aux_saude_remu = float(row[10]) #AUXÍLIO-SAÚDE/OUTRAS REMUNERAÇÕES RETROATIVAS/TEMPORÁRIAS
devolucao_rra = float(row[6]) #DEVOLUÇÃO IR RRA
indemnity_vacation = float(row[7]) #INDENIZAÇÃO DE FÉRIAS NÃO USUFRUÍDAS
licence = float(row[8]) #INDENIZAÇÃO DE LICENÇA ESPECIAL/PRÊMIO NÃO USUFRUÍDA
devolucao_fundo = float(row[11]) #DEVOLUÇÃO FUNDO DE RESERVA
emp = employees[reg]
emp['income']['perks'].update({
'health': aux_saude + aux_saude_remu,
})
emp['income']['other']['others'].update({
#Auxílio educação está disposto em 2 colunas diferentes
'AUXÍLIO-EDUCAÇÃO': aux_edu + aux_edu_remu,
'DEVOLUÇÃO IR RRA': devolucao_rra,
'INDENIZAÇÃO DE FÉRIAS NÃO USUFRUÍDAS': indemnity_vacation,
'INDENIZAÇÃO DE LICENÇA ESPECIAL/PRÊMIO NÃO USUFRUÍDA': licence,
'DEVOLUÇÃO FUNDO DE RESERVA': devolucao_fundo,
})
emp['income']['other'].update({
'total': round(emp['income']['other']['total'] + aux_edu + aux_edu_remu +
devolucao_rra + indemnity_vacation + licence + devolucao_fundo, 2),
'others_total': round(emp['income']['other']['others_total'] +
aux_edu + aux_edu_remu + devolucao_rra + indemnity_vacation + licence + devolucao_fundo, 2),
})
emp['income']['perks'].update({
'total': round(aux_saude + aux_saude_remu, 2)
})
emp['income'].update({
'total': round(emp['income']['total'] + emp['income']['perks']['total'] + emp['income']['other']['total'], 2)
})
if (rows[curr_row] == rows[-1]).all():
break
curr_row += 1
return employees
def update_sativ_indemnity(rows, employees):
curr_row = 0
begin_row = 1
for row in rows:
if curr_row < begin_row:
curr_row += 1
continue
reg = float(row[0])
aux_adocao = float(row[4]) #AUXÍLIO-ADOÇÃO/VERBAS INDENIZATÓRIAS
aux_ali = float(row[5]) #AUXÍLIO-ALIMENTAÇÃO/VERBAS INDENIZATÓRIAS
aux_ali_remu = float(row[10]) #AUXÍLIO-ALIMENTAÇÃO/OUTRAS REMUNERAÇÕES RETROATIVAS/TEMPORÁRIAS
aux_edu = float(row[6]) #AUXÍLIO-EDUCAÇÃO/VERBAS INDENIZATÓRIAS
aux_edu_remu = float(row[11]) #AUXÍLIO-EDUCAÇÃO/OUTRAS REMUNERAÇÕES RETROATIVAS/TEMPORÁRIAS
aux_saude = float(row[7]) #AUXÍLIO-SAÚDE/VERBAS INDENIZATÓRIAS
aux_saude_remu = float(row[13]) #AUXÌLIO-SAUDE?OUTRAS REMUNERAÇÕES RETROATIVAS/TEMPORÁRIAS
indemnity_vacation = float(row[8]) #INDENIZAÇÃO DE FÉRIAS NÃO USUFRUÍDAS
licence = float(row[9]) #INDENIZAÇÃO DE LICENÇA ESPECIAL/PRÊMIO NÃO USUFRUÍDA
transportation = float(row[12]) #AUXÍLIO-LOCOMOÇÃO
diff_aux = float(row[14]) #DIFERENÇAS DE AUXÍLIOS
gratification = float(row[15]) #GRATIFICAÇÕES EVENTUAIS
parcelas_atraso = float(row[16]) #PARCELAS PAGAS EM ATRASO
sub = float(row[17]) #SUBSTITUIÇÃO DE CARGO EM COMISSÃO / FUNÇÃO GRATIFICADA
emp = employees[reg]
emp['income']['perks'].update({
'food': aux_ali + aux_ali_remu ,
'transportation': transportation,
'health': aux_saude + aux_saude_remu,
})
emp['income']['other']['others'].update({
#Auxílio educação está disposto em 2 colunas diferentes
'AUXÍLIO-ADOÇÃO': aux_adocao,
'AUXÍLIO-EDUCAÇÃO': aux_edu + aux_edu_remu,
'INDENIZAÇÃO DE FÉRIAS NÃO USUFRUÍDAS': indemnity_vacation,
'INDENIZAÇÃO DE LICENÇA ESPECIAL/PRÊMIO NÃO USUFRUÍDA': licence,
'DIFERENÇAS DE AUXÍLIOS': diff_aux,
'PARCELAS PAGAS EM ATRASO': parcelas_atraso,
'SUBSTITUIÇÃO DE CARGO EM COMISSÃO / FUNÇÃO GRATIFICADA': sub
})
emp['income']['other'].update({
'total': round(emp['income']['other']['total'] + aux_adocao + aux_edu +
aux_edu_remu + indemnity_vacation + licence + diff_aux + parcelas_atraso
+ sub + gratification, 2),
'gratification': gratification,
'others_total': round(emp['income']['other']['others_total'] + aux_adocao +
aux_edu + aux_edu_remu + indemnity_vacation + licence + diff_aux +
parcelas_atraso + sub, 2),
})
emp['income']['perks'].update({
'total': round( aux_ali + aux_ali_remu + transportation + aux_saude + aux_saude_remu , 2)
})
emp['income'].update({
'total': round(emp['income']['total'] + emp['income']['perks']['total'] + emp['income']['other']['total'], 2)
})
if (rows[curr_row] == rows[-1]).all():
break
curr_row += 1
return employees
def update_sinat_indemnity(rows, employees):
curr_row = 0
begin_row = 1
for row in rows:
if curr_row < begin_row:
curr_row += 1
continue
reg = float(row[0])
aux_adocao = float(row[4]) #AUXÍLIO-ADOÇÃO/VERBAS INDENIZATÓRIAS
aux_ali = float(row[5]) #AUXÍLIO-ALIMENTAÇÃO/VERBAS INDENIZATÓRIAS
aux_saude = float(row[6]) #AUXÍLIO-SAÚDE/VERBAS INDENIZATÓRIAS
aux_saude_remu = float(row[10]) #AUXÍLIO-SAÚDE/OUTRAS REMUNERAÇÕES RETROATIVAS/TEMPORÁRIAS
indemnity_vacation = float(row[7]) #INDENIZAÇÃO DE FÉRIAS NÃO USUFRUÍDAS
licence = float(row[8]) #INDENIZAÇÃO DE LICENÇA ESPECIAL/PRÊMIO NÃO USUFRUÍDA
transportation = float(row[9])
diff_aux = float(row[11]) #DIFERENÇAS DE AUXÍLIOS
parcelas_atraso = float(row[12]) #PARCELAS PAGAS EM ATRASO
emp = employees[reg]
emp['income']['perks'].update({
'food': aux_ali ,
'transportation': transportation,
'health': aux_saude + aux_saude_remu,
})
emp['income']['other']['others'].update({
#Auxílio educação está disposto em 2 colunas diferentes
'AUXÍLIO-ADOÇÃO': aux_adocao,
'INDENIZAÇÃO DE FÉRIAS NÃO USUFRUÍDAS': indemnity_vacation,
'INDENIZAÇÃO DE LICENÇA ESPECIAL/PRÊMIO NÃO USUFRUÍDA': licence,
'DIFERENÇAS DE AUXÍLIOS': diff_aux,
'PARCELAS PAGAS EM ATRASO': parcelas_atraso,
})
emp['income']['other'].update({
'total': round(emp['income']['other']['total'] + aux_adocao + indemnity_vacation
+ licence + diff_aux + parcelas_atraso, 2),
'others_total': round(emp['income']['other']['others_total'] + aux_adocao +
indemnity_vacation + licence + diff_aux + parcelas_atraso , 2),
})
emp['income']['perks'].update({
'total': round( aux_ali + transportation + aux_saude + aux_saude_remu , 2)
})
emp['income'].update({
'total': round(emp['income']['total'] + emp['income']['perks']['total'] + emp['income']['other']['total'], 2)
})
if (rows[curr_row] == rows[-1]).all():
break
curr_row += 1
return employees
def update_pensi_indemnity(rows, employees):
curr_row = 0
begin_row = 1
for row in rows:
if curr_row < begin_row:
curr_row += 1
continue
reg = row[0]
aux_saude = row[4] #AUXÍLIO-SAÚDE/VERBAS INDENIZATÓRIAS
devolucao_rra = row[5] #DEVOLUÇÃO IR RRA
devolucao_fundo = row[6] #DEVOLUÇÃO FUNDO DE RESERVA
p_equi = row[7] #PARCELA AUTÔNOMA DE EQUIVALÊNCIA
parcelas_atraso = row[8] #PARCELAS PAGAS EM ATRASO
emp = employees[reg]
emp['income']['perks'].update({
'health': aux_saude,
})
emp['income']['other']['others'].update({
#Auxílio educação está disposto em 2 colunas diferentes
'DEVOLUÇÃO IR RRA': devolucao_rra,
'DEVOLUÇÃO FUNDO DE RESERVA': devolucao_fundo,
'PARCELA AUTÔNOMA DE EQUIVALÊNCIA': p_equi,
'PARCELAS PAGAS EM ATRASO': parcelas_atraso,
})
emp['income']['other'].update({
'total': round(emp['income']['other']['total'] + devolucao_fundo +
devolucao_rra + p_equi + parcelas_atraso, 2),
'others_total': round(emp['income']['other']['others_total'] + devolucao_rra +
devolucao_fundo + p_equi + parcelas_atraso , 2),
})
emp['income']['perks'].update({
'total': round(aux_saude , 2)
})
emp['income'].update({
'total': round(emp['income']['total'] + emp['income']['perks']['total'] + emp['income']['other']['total'], 2)
})
if (rows[curr_row] == rows[-1]).all():
break
curr_row += 1
return employees
def update_employee_indemnity(file_name, employees):
rows = read_data(file_name)
clean_currency(rows, 4, len(rows.columns))
rows = rows.to_numpy()
if 'MATIV' in file_name:
if '2020_01' in file_name:
mativ_indemnity_parser_jan_2020.parse(rows, employees)
elif '2020_02' in file_name:
mativ_indemnity_parser_fev_2020.parse(rows, employees)
elif '2020_04' in file_name:
mativ_indemnity_parser_abr_2020.parse(rows, employees)
elif '2020_05' in file_name:
mativ_indemnity_parser_maio_jul_2020.parse(rows, employees)
elif '2020_06' in file_name:
mativ_indemnity_parser_maio_jul_2020.parse(rows, employees)
elif '2020_07' in file_name:
mativ_indemnity_parser_maio_jul_2020.parse(rows, employees)
elif '2020_08' in file_name:
mativ_indemnity_parser_ago_2020.parse(rows, employees)
elif '2020_09' in file_name:
mativ_indemnity_parser_set_2020.parse(rows, employees)
else:
update_mativ_indemnity(rows, employees)
elif 'MINAT' in file_name:
if '2020_06' in file_name:
minat_indemnity_parser_jul_jun_2020.parse(rows, employees)
elif '2020_07' in file_name:
minat_indemnity_parser_jul_jun_2020.parse(rows, employees)
elif '2020_08' in file_name:
minat_indemnity_parser_ago_2020.parse(rows, employees)
elif '2020_09' in file_name:
minat_indemnity_parser_set_2020.parse(rows, employees)
else:
update_minat_indemnity(rows, employees)
elif 'SATIV' in file_name:
if '2020_10'in file_name:
sativ_indemnity_parser_out_2020.parse(rows, employees)
elif '2020_03' in file_name:
sativ_indemnity_parser_mar_2020.parse(rows, employees)
elif '2020_04' in file_name:
sativ_indemnity_parser_abr_2020.parse(rows, employees)
elif '2020_06' in file_name:
sativ_indemnity_parser_jun_ago_dez_2020.parse(rows, employees)
elif '2020_08' in file_name:
sativ_indemnity_parser_jun_ago_dez_2020.parse(rows, employees)
elif '2020_12' in file_name:
sativ_indemnity_parser_jun_ago_dez_2020.parse(rows, employees)
else:
update_sativ_indemnity(rows, employees)
elif 'SINAT' in file_name:
if '2020_10' in file_name:
sinat_indemnity_parser_out_2020.parse(rows, employees)
elif '2020_06' in file_name:
sinat_indemnity_parser_jun_2020.parse(rows, employees)
elif '2020_07' in file_name:
sinat_indemnity_parser_jul_2020.parse(rows, employees)
elif '2020_08' in file_name:
sinat_indemnity_parser_ago_2020.parse(rows, employees)
elif '2020_12' in file_name:
sinat_indemnity_parser_dez_2020.parse(rows, employees)
else:
update_sinat_indemnity(rows, employees)
elif 'PENSI' in file_name:
update_pensi_indemnity(file_name, employees)
def parse(file_names):
employees = {}
# Colaboradores precisam de um parser distinto
for fn in file_names:
if ('Verbas Indenizatórias' not in fn) and ('COLAB' not in fn):
employees.update(parse_employees(fn))
elif ('COLAB' in fn):
employees.update(parse_colab(fn))
for fn in file_names:
if 'Verbas Indenizatórias' in fn:
update_employee_indemnity(fn, employees)
return list(employees.values())
| StarcoderdataPython |
6410177 | from django.db import models
from moondance.meta_models import MetaModel
class Tax_Rate_State(MetaModel):
state = models.CharField(max_length=200, unique=True)
base_rate = models.DecimalField(
max_digits=4, decimal_places=2, null=True, blank=True
)
def __str__(self):
return "{}".format(self.state)
class Meta:
verbose_name = "State Tax Rate"
verbose_name_plural = "State Tax Rates"
ordering = ("state",)
class Tax_Rate_County(MetaModel):
state = models.ForeignKey(
Tax_Rate_State,
on_delete=models.PROTECT,
related_name="Tax_Rate_County_state_fk",
)
county = models.CharField(max_length=200)
base_rate = models.DecimalField(max_digits=4, decimal_places=2)
transit_rate = models.DecimalField(max_digits=4, decimal_places=2)
def __str__(self):
return "{}".format(self.county)
class Meta:
verbose_name = "County Tax Rate"
verbose_name_plural = "County Tax Rates"
ordering = (
"state",
"county",
)
unique_together = (
(
"state",
"county",
),
)
| StarcoderdataPython |
1859681 | <filename>djangoql/forms.py
from django import forms
from .models import Query
class QueryForm(forms.ModelForm):
class Meta:
model = Query
fields = ['name', 'text', 'private']
| StarcoderdataPython |
1693633 | import time
import warnings
from collections import defaultdict
import numpy as np
import pandas as pd
import torch
from sklearn.model_selection import train_test_split
from torch import nn
from transformers import *
from nlp_model import SentimentClassifier
from preprocessing import data_loader, preprocess, tokenizer
warnings.filterwarnings("ignore")
# Define Constants
EPOCHS = 3
BATCH_SIZE = 16
MAX_LEN = 256
# Check if GPU is available
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def train_epoch(model, data_loader, loss_fn, optimizer, device, scheduler, n_examples):
# change to traning mode
model = model.train()
losses = []
correct_predictions = 0
for index, d in enumerate(data_loader):
input_ids = d["input_ids"].to(device)
attention_mask = d["attention_mask"].to(device)
targets = d["targets"].to(device)
outputs = model(input_ids=input_ids,
attention_mask=attention_mask)
_, preds = torch.max(outputs, dim=1)
loss = loss_fn(outputs, targets)
correct_predictions += torch.sum(preds == targets)
losses.append(loss.item())
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)
optimizer.step()
scheduler.step()
optimizer.zero_grad()
if index % 100 == 0:
print("Iteration {}/{}, loss is {}".format(index, len(data_loader), loss))
return correct_predictions.double() / n_examples, np.mean(losses)
def eval_model(model, data_loader, loss_fn, device, n_examples):
# change to evaluation mode
model = model.eval()
losses = []
correct_predictions = 0
with torch.no_grad():
for index, d in enumerate(data_loader):
input_ids = d["input_ids"].to(device)
attention_mask = d["attention_mask"].to(device)
targets = d["targets"].to(device)
outputs = model(
input_ids=input_ids,
attention_mask=attention_mask
)
_, preds = torch.max(outputs, dim=1)
loss = loss_fn(outputs, targets)
correct_predictions += torch.sum(preds == targets)
losses.append(loss.item())
#if index // 20 == 0:
# print("Iteration {}/{}, loss is {}".format(index, len(data_loader), loss))
return correct_predictions.double() / n_examples, np.mean(losses)
if __name__ == "__main__":
# read train and test csv file
train = pd.read_csv("./data/train.csv")
# test = pd.read_csv("./data/test.csv")
# preprocess the data
train = preprocess(train)
print(train.shape)
# train validation split
train, validation, _, _ = train_test_split(train, train, test_size=0.2, random_state=42)
# construct dataloader
train_data_loader = data_loader(train, tokenizer, MAX_LEN, BATCH_SIZE)
val_data_loader = data_loader(validation, tokenizer, MAX_LEN, BATCH_SIZE)
# test_data_loader = data_loader(test, tokenizer, MAX_LEN, BATCH_SIZE)
# construct model
model = SentimentClassifier(n_classes = 3)
model = model.to(device)
# define AdamW optimizer from the tranformers package
optimizer = AdamW(model.parameters(), lr=2e-5, correct_bias=False)
# total steps during training process
total_steps = len(train_data_loader) * EPOCHS
# use a warm-up scheduler as suggested in the paper
scheduler = get_linear_schedule_with_warmup(
optimizer,
num_warmup_steps=0,
num_training_steps=total_steps
)
# define cross entropy loss for classification problem
loss_fn = torch.nn.CrossEntropyLoss().to(device)
# this will be store "train_acc", "train_loss", "val_acc" and "val_loss"
history = defaultdict(list)
# best accuracy from the best model across the whole training process
best_accuracy = 0
# the main training for loop
for epoch in range(EPOCHS):
print(f'Epoch {epoch + 1}/{EPOCHS}')
print('-' * 10)
start_time = time.time()
print("Current Epoch starts at: {}".format(time.ctime()))
# training process
train_acc, train_loss = train_epoch(
model,
train_data_loader,
loss_fn,
optimizer,
device,
scheduler,
len(train)
)
print(f'Train loss {train_loss} accuracy {train_acc}')
# validation process
val_acc, val_loss = eval_model(
model,
val_data_loader,
loss_fn,
device,
len(validation)
)
print(f'Val loss {val_loss} accuracy {val_acc}')
print("Current Epoch ends at: {}".format(time.ctime()))
print("Time used for training the current epoch:{} \n".format(time.time()-start_time))
# put all the accuracy and loss information into the history dictionary
history['train_acc'].append(train_acc)
history['train_loss'].append(train_loss)
history['val_acc'].append(val_acc)
history['val_loss'].append(val_loss)
# identify and save the best model
if val_acc > best_accuracy:
torch.save(model.state_dict(), 'best_model_state.bin')
best_accuracy = val_acc
| StarcoderdataPython |
12826662 | # coding=utf-8
import uuid
from virtualisation.clock.abstractclock import AbstractClock
from virtualisation.wrapper.abstractwrapper import AbstractComposedWrapper, AbstractWrapper
from virtualisation.sensordescription import SensorDescription
from virtualisation.wrapper.history.csvhistory import CSVHistoryReader
from virtualisation.wrapper.parser.csvparser import CSVParser
from virtualisation.wrapper.parser.jsonparser import JSONParser
from virtualisation.wrapper.connection.httpconnection import HttpPullConnection
from virtualisation.misc.log import Log
__author__ = '<NAME> (<EMAIL>)'
_X = lambda x: x.replace(" ", "_")
"""
Not using a ComposedWrapper this time is on purpose, to test if a AbstractWrapper instance will work on its own.
Service response example:
[ {"id":158,
"guid":"24c146ba-bad4-e111-9e49-005056a05270",
"x":543153.95094,
"y":465010.490411,
"incidentid":"cdfd420b-712f-e511-be17-005056a05270",
"i":"Spatii Verzi",
"title":"Toaletare iarba si gard viu, Strada Lanii, Nr. 140",
"createdon":"21/07/2015",
"statecode":1
"ticketnumber":"CAS-07445-W8V7F7"
"description":"Solicita toaletare iarba la nr. 140."
"indsoft_publiclyvisible":true
"comments":"\u003cbr /\u003e\u003ci style\u003d\u0027color:blue\u0027\u003eRezolvat la data: 28/07/2015\u003cbr /\u003e operatiunile se executa conform graficelor\u003cbr /\u003e\u003c/i\u003e"
"incidentState":"SOLVED"
"timestamp":1438176297218}]
"""
incident_types = ["0f75c5c1-b3f2-e111-a5d4-005056a05270",
"92a136cc-b3f2-e111-a5d4-005056a05270",
"8796aff1-b3f2-e111-a5d4-005056a05270",
"e3219619-c3d4-e111-9e49-005056a05270",
"c32e4f0a-c3d4-e111-9e49-005056a05270",
"03eaeadd-c1d4-e111-9e49-005056a05270",
"a37d1750-c2d4-e111-9e49-005056a05270",
"03a169f2-c2d4-e111-9e49-005056a05270",
"43798fff-c2d4-e111-9e49-005056a05270",
"24c146ba-bad4-e111-9e49-005056a05270",
"164aefaa-1403-e211-85da-005056a05270",
"9bf5c28b-f00d-e211-8e92-005056a05270"
]
incident_topics = [
"Animals",
"Water and sewage",
"Missing sewerage cover",
"Construction",
"road pit",
"lighting",
"Urban furniture: e.g park bench",
"Sanitation",
"Road signs",
"Green areas",
"pole / pillar",
"Transport"
]
def makeSource(incident_type):
return "http://www.bamct.siemens.ro:9000/brasovDataCollector/indidentsView?incident_type=%s" % incident_type
class IncidentConnection(HttpPullConnection):
def __init__(self, wrapper):
super(IncidentConnection, self).__init__(wrapper)
def next(self):
data = super(IncidentConnection, self).next()
if not data or data.strip() == "Measurement not available":
return None
else:
return data
class BrasovIncidentWrapper(AbstractWrapper):
def __init__(self, number, incident_type, incident_topic):
super(BrasovIncidentWrapper, self).__init__()
self.number = number
self.sensordescription = SensorDescription()
self.sensordescription.namespace = "http://ict-citypulse.eu/"
self.sensordescription.sensorID = "brasov_incidents_" + _X(incident_topic)
self.sensordescription.sensorName = self.sensordescription.sensorID
self.sensordescription.fullSensorID = self.sensordescription.namespace + "brasov/" + self.sensordescription.sensorID
self.sensordescription.location = "n/a"
self.sensordescription.source = makeSource(incident_type)
self.sensordescription.author = "cityofbrasov"
self.sensordescription.sensorType = "Brasov_Incidents"
self.sensordescription.graphName = "brasov_incidents#"
self.sensordescription.sourceType = "pull_http"
self.sensordescription.sourceFormat = "application/json"
self.sensordescription.information = "List of incidents reported by citizens starting from November 2014 about " + incident_topic
self.sensordescription.cityName = "Brasov"
self.sensordescription.countryName = "Romania"
self.sensordescription.movementBuffer = 0
self.sensordescription.maxLatency = 2
self.sensordescription.updateInterval = 60 * 60
self.sensordescription.messagebus.routingKey = "Brasov.Incidents." + _X(incident_topic)
self.sensordescription.fields = ["id",
"comments",
"createdon",
"description",
"guid",
"incidentState",
"incidentid",
"indsoft_publiclyvisible",
"statecode",
"ticketnumber",
"timestamp",
"title",
"x",
"y"
]
self.sensordescription.field.id.propertyName = "Property"
self.sensordescription.field.id.propertyPrefix = "ssn"
self.sensordescription.field.id.propertyURI = self.sensordescription.namespace + "brasov/incidents#ID"
self.sensordescription.field.id.min = 0
self.sensordescription.field.id.max = 99999999
self.sensordescription.field.id.dataType = "int"
self.sensordescription.field.comments.propertyName = "Property"
self.sensordescription.field.comments.propertyPrefix = "ssn"
self.sensordescription.field.comments.propertyURI = self.sensordescription.namespace + "brasov/incidents#Comments"
self.sensordescription.field.comments.min = ""
self.sensordescription.field.comments.max = ""
self.sensordescription.field.comments.dataType = "str"
self.sensordescription.field.createdon.propertyName = "MeasuredTime"
self.sensordescription.field.createdon.propertyURI = self.sensordescription.namespace + "brasov/incidents#CreatedOn"
self.sensordescription.field.createdon.unit = self.sensordescription.namespace + "unit:time"
self.sensordescription.field.createdon.min = "2012-01-01T00:00:00"
self.sensordescription.field.createdon.max = "2099-12-31T23:59:59"
self.sensordescription.field.createdon.dataType = "datetime.datetime"
self.sensordescription.field.createdon.format = "%d/%m/%Y"
self.sensordescription.field.description.propertyName = "Property"
self.sensordescription.field.description.propertyPrefix = "ssn"
self.sensordescription.field.description.propertyURI = self.sensordescription.namespace + "brasov/incidents#Description"
self.sensordescription.field.description.min = ""
self.sensordescription.field.description.max = ""
self.sensordescription.field.description.dataType = "str"
self.sensordescription.field.guid.propertyName = "Property"
self.sensordescription.field.guid.propertyPrefix = "ssn"
self.sensordescription.field.guid.propertyURI = self.sensordescription.namespace + "brasov/incidents#IncidentType"
self.sensordescription.field.guid.min = ""
self.sensordescription.field.guid.max = ""
self.sensordescription.field.guid.dataType = "str" #enum
self.sensordescription.field.i.propertyName = "Property"
self.sensordescription.field.i.propertyPrefix = "ssn"
self.sensordescription.field.i.propertyURI = self.sensordescription.namespace + "brasov/incidents#IncidentCategory"
self.sensordescription.field.i.min = ""
self.sensordescription.field.i.max = ""
self.sensordescription.field.i.dataType = "str"
self.sensordescription.field.incidentState.propertyName = "Property"
self.sensordescription.field.incidentState.propertyPrefix = "ssn"
self.sensordescription.field.incidentState.propertyURI = self.sensordescription.namespace + "brasov/incidents#State"
self.sensordescription.field.incidentState.min = 0
self.sensordescription.field.incidentState.max = 2
self.sensordescription.field.incidentState.dataType = "int"
self.sensordescription.field.incidentid.propertyName = "Property"
self.sensordescription.field.incidentid.propertyPrefix = "ssn"
self.sensordescription.field.incidentid.propertyURI = self.sensordescription.namespace + "brasov/incidents#IncedentID"
self.sensordescription.field.incidentid.min = ""
self.sensordescription.field.incidentid.max = ""
self.sensordescription.field.incidentid.dataType = "str"
self.sensordescription.field.indsoft_publiclyvisible.propertyName = "Property"
self.sensordescription.field.indsoft_publiclyvisible.propertyPrefix = "ssn"
self.sensordescription.field.indsoft_publiclyvisible.propertyURI = self.sensordescription.namespace + "brasov/incidents#Public"
self.sensordescription.field.indsoft_publiclyvisible.min = 0
self.sensordescription.field.indsoft_publiclyvisible.max = 1
self.sensordescription.field.indsoft_publiclyvisible.dataType = "int"
self.sensordescription.field.statecode.propertyName = "Property"
self.sensordescription.field.statecode.propertyPrefix = "ssn"
self.sensordescription.field.statecode.propertyURI = self.sensordescription.namespace + "brasov/incidents#StateCode"
self.sensordescription.field.statecode.min = 0
self.sensordescription.field.statecode.max = 10
self.sensordescription.field.statecode.dataType = "int"
self.sensordescription.field.ticketnumber.propertyName = "Property"
self.sensordescription.field.ticketnumber.propertyPrefix = "ssn"
self.sensordescription.field.ticketnumber.propertyURI = self.sensordescription.namespace + "brasov/incidents#TicketNumber"
self.sensordescription.field.ticketnumber.min = ""
self.sensordescription.field.ticketnumber.max = ""
self.sensordescription.field.ticketnumber.dataType = "str"
self.sensordescription.field.timestamp.propertyName = "MeasuredTime"
self.sensordescription.field.timestamp.propertyURI = self.sensordescription.namespace + "city#MeasuredTime"
self.sensordescription.field.timestamp.unit = self.sensordescription.namespace + "unit:time"
self.sensordescription.field.timestamp.min = "2012-01-01T00:00:00"
self.sensordescription.field.timestamp.max = "2099-12-31T23:59:59"
self.sensordescription.field.timestamp.dataType = "datetime.datetime"
self.sensordescription.field.timestamp.format = "UNIX5"
self.sensordescription.field.timestamp.skip_annotation = True
self.sensordescription.field.title.propertyName = "Property"
self.sensordescription.field.title.propertyPrefix = "ssn"
self.sensordescription.field.title.propertyURI = self.sensordescription.namespace + "brasov/incidents#Title"
self.sensordescription.field.title.min = ""
self.sensordescription.field.title.max = ""
self.sensordescription.field.title.dataType = "str"
self.sensordescription.field.x.propertyName = "Property"
self.sensordescription.field.x.propertyPrefix = "ssn"
self.sensordescription.field.x.propertyURI = self.sensordescription.namespace + "brasov/incidents#LocationX"
self.sensordescription.field.x.min = ""
self.sensordescription.field.x.max = ""
self.sensordescription.field.x.dataType = "float"
self.sensordescription.field.y.propertyName = "Property"
self.sensordescription.field.y.propertyPrefix = "ssn"
self.sensordescription.field.y.propertyURI = self.sensordescription.namespace + "brasov/incidents#LocationY"
self.sensordescription.field.y.min = ""
self.sensordescription.field.y.max = ""
self.sensordescription.field.y.dataType = "float"
self.sensordescription.timestamp.inField = "timestamp"
self.sensordescription.timestamp.format = "UNIX5"
self.parser = JSONParser(self)
self.connection = IncidentConnection(self)
def getSensorDescription(self):
return self.sensordescription
def setReplayMode(self, mode):
super(BrasovIncidentWrapper, self).setReplayMode(mode)
# fieldnames of service: "id", "comments", "createdon", "description", "guid", "incidentState", "incidentid", "indsoft_publiclyvisible", "statecode", "ticketnumber", "timestamp", "title", "x", "y"
fieldnames = ["id", "comments", "createdon", "description", "guid", "i", "incidentState", "incidentid", "indsoft_publiclyvisible", "statecode", "ticketnumber", "timestamp", "title", "x", "y"]
try:
fobj = AbstractWrapper.getFileObject(__file__, "incidents%d.csv" % self.number, "rU")
self.historyreader = CSVHistoryReader(self, fobj, delimiter=',')
self.historyreader.multiple_observations = False
self.historyparser = CSVParser(self, fieldnames)
except Exception as e:
Log.e("setReplayMode in Brasov Incident Wrapper", self.number, e)
self.historyreader = None
class BrasovIncidentWrapper0(BrasovIncidentWrapper):
def __init__(self):
super(BrasovIncidentWrapper0, self).__init__(0, incident_types[0], incident_topics[0])
class BrasovIncidentWrapper1(BrasovIncidentWrapper):
def __init__(self):
super(BrasovIncidentWrapper1, self).__init__(1, incident_types[1], incident_topics[1])
class BrasovIncidentWrapper2(BrasovIncidentWrapper):
def __init__(self):
super(BrasovIncidentWrapper2, self).__init__(2, incident_types[2], incident_topics[2])
class BrasovIncidentWrapper3(BrasovIncidentWrapper):
def __init__(self):
super(BrasovIncidentWrapper3, self).__init__(3, incident_types[3], incident_topics[3])
class BrasovIncidentWrapper4(BrasovIncidentWrapper):
def __init__(self):
super(BrasovIncidentWrapper4, self).__init__(4, incident_types[4], incident_topics[4])
class BrasovIncidentWrapper5(BrasovIncidentWrapper):
def __init__(self):
super(BrasovIncidentWrapper5, self).__init__(5, incident_types[5], incident_topics[5])
class BrasovIncidentWrapper6(BrasovIncidentWrapper):
def __init__(self):
super(BrasovIncidentWrapper6, self).__init__(6, incident_types[6], incident_topics[6])
class BrasovIncidentWrapper7(BrasovIncidentWrapper):
def __init__(self):
super(BrasovIncidentWrapper7, self).__init__(7, incident_types[7], incident_topics[7])
class BrasovIncidentWrapper8(BrasovIncidentWrapper):
def __init__(self):
super(BrasovIncidentWrapper8, self).__init__(8, incident_types[8], incident_topics[8])
class BrasovIncidentWrapper9(BrasovIncidentWrapper):
def __init__(self):
super(BrasovIncidentWrapper9, self).__init__(9, incident_types[9], incident_topics[9])
class BrasovIncidentWrapper10(BrasovIncidentWrapper):
def __init__(self):
super(BrasovIncidentWrapper10, self).__init__(10, incident_types[10], incident_topics[10])
class BrasovIncidentWrapper11(BrasovIncidentWrapper):
def __init__(self):
super(BrasovIncidentWrapper11, self).__init__(11, incident_types[11], incident_topics[11])
| StarcoderdataPython |
3315324 | <reponame>JonnyBoy2000/Kira-Public
import discord
import random
import asyncio
import datetime
from datetime import timedelta
from discord.ext import commands
from utils import checks
from utils.mod import mass_purge, slow_deletion
class Mod(commands.Cog):
def __init__(self, bot):
self.bot = bot
def format_mod_embed(self, ctx, user, success, method):
'''Helper function to format an embed to prevent extra code'''
em = discord.Embed()
em.colour = 0x36393E
em.set_author(name=method.title(), icon_url=user.avatar_url)
em.set_footer(text='User ID: {}'.format(user.id))
if success:
if method == 'ban' or method == 'unban':
em.description = '{} was just {}ned.'.format(user, method)
else:
em.description = '{} was just {}d.'.format(user, method)
else:
em.description = 'You do not have the permissions to {} users.'.format(method)
return em
def _role_from_string(self, guild, rolename, roles=None):
if roles is None:
roles = guild.roles
role = discord.utils.find(lambda r: r.name.lower() == rolename.lower(),
roles)
return role
@commands.command()
@commands.guild_only()
async def clean(self, ctx):
"""Clean bot messages and command messages."""
logs = await self.bot.db.settings.find_one({"guild_id": ctx.guild.id})
can_mass_purge = ctx.channel.permissions_for(ctx.guild.me).manage_messages
await ctx.channel.purge(limit=100, check=lambda m: m.author == ctx.bot.user, before=ctx.message, after=datetime.datetime.now() - timedelta(days=14), bulk=can_mass_purge)
try:
await ctx.channel.purge(limit=100, check=lambda m: (m.content.startswith("k.") or m.content.startswith("k!") or m.content.startswith(str(logs['prefix']))), before=ctx.message, after=datetime.datetime.now() - timedelta(days=14), bulk=can_mass_purge)
except:
pass
await ctx.message.add_reaction('\u2705')
@commands.command()
@commands.guild_only()
@commands.check(checks.guild)
@commands.check(checks.kick)
async def voicekick(self, ctx, member: discord.Member):
"""Kick a member from voice chat"""
if member.voice is not None:
kick_channel = await ctx.guild.create_voice_channel(name=self.bot.user.name)
await member.move_to(kick_channel)
await kick_channel.delete()
await ctx.send("{0.name} has been kicked from voice".format(member))
else:
await ctx.send("{0.name} is not in a voice channel".format(member))
@commands.command()
@commands.guild_only()
@commands.check(checks.role)
async def addrole(self, ctx, role: discord.Role, user : discord.Member=None):
"""Adds a role to a user."""
if user is None:
user = ctx.author
try:
await user.add_roles(role)
await ctx.send('<:check:534931851660230666> Added role {} to {}'.format(role.name, user.name))
except discord.errors.Forbidden:
await ctx.send("<:notdone:334852376034803714> I don't have `manage_roles` permissions!")
@commands.command(aliases=['bc'])
@commands.check(checks.delete)
@commands.guild_only()
async def botclean(self, ctx, amount: int=100):
"""Deletes messages from bots in a channel"""
def is_bot(m):
return m.author.bot
try:
await ctx.channel.purge(limit=amount, check=is_bot)
except discord.HTTPException:
await ctx.send("The bot is missing permissions to delete messages.")
@commands.command()
@commands.guild_only()
@commands.check(checks.kick)
async def kick(self, ctx, member : discord.Member, *, reason : str = "[No reason specified]"):
"""Kick a user from your guild."""
reason = "[{}] {}".format(str(ctx.author), reason)
if member.top_role.position >= ctx.author.top_role.position:
await ctx.send("I can't do this.")
return
if member == ctx.guild.owner:
await ctx.send("I can't do this.")
return
if member == ctx.me:
await ctx.send("I can't do this.")
return
confcode = "{}".format(random.randint(1000,9999))
msg = "Please type in the confirmation code to confirm and kick this user, or wait 30 seconds to cancel."
e = discord.Embed()
e.colour = 0x36393E
e.description = msg
e.title = f"Kicking user {str(member)}:"
e.add_field(name="Reason:", value=reason)
e.add_field(name="Confirmation Code:", value=confcode)
m = await ctx.send(embed=e)
def a(m):
return m.content == confcode and m.channel == ctx.channel and m.author == ctx.author
try:
msg = await self.bot.wait_for("message", check=a, timeout=30)
except asyncio.TimeoutError:
await m.delete()
await ctx.send("Operation cancelled.")
return
await m.delete()
try:
await ctx.guild.kick(member, reason=reason)
await ctx.send("User {} was successfully kicked.".format(str(member)))
except (discord.HTTPException, discord.Forbidden) as e:
await ctx.send("I couldn't kick the user. Have you checked I have the proper permissions and that my role is higher than the user you want to kick?")
@commands.command()
@commands.guild_only()
@commands.check(checks.ban)
async def ban(self, ctx, member : discord.Member, *, reason : str = "[No reason specified]"):
"""Ban a user from your guild."""
reason = "[{}] {}".format(str(ctx.author), reason)
if member.top_role.position >= ctx.author.top_role.position:
await ctx.send("I can't do this.")
return
if member == ctx.guild.owner:
await ctx.send("I can't do this.")
return
if member == ctx.me:
await ctx.send("I can't do this.")
return
confcode = "{}".format(random.randint(1000,9999))
msg = "Please type in the confirmation code to confirm and ban this user, or wait 30 seconds to cancel."
e = discord.Embed()
e.colour = 0x36393E
e.description = msg
e.title = f"Banning user {str(member)}:"
e.add_field(name="Reason:", value=reason)
e.add_field(name="Confirmation Code:", value=confcode)
m = await ctx.send(embed=e)
def a(m):
return m.content == confcode and m.channel == ctx.channel and m.author == ctx.author
try:
msg = await self.bot.wait_for("message", check=a, timeout=30)
except asyncio.TimeoutError:
await m.delete()
await ctx.send("Operation cancelled.")
return
await m.delete()
try:
await ctx.guild.ban(member, reason=reason, delete_message_days=7)
await ctx.send("User {} was successfully banned.".format(str(member)))
except (discord.HTTPException, discord.Forbidden) as e:
await ctx.send("I couldn't ban the user. Have you checked I have the proper permissions and that my role is higher than the user you want to ban?")
@commands.command()
@commands.guild_only()
@commands.check(checks.ban)
async def hackban(self, ctx, userid, *, reason : str = "[No reason specified]"):
"""Hackban a user from your guild."""
reason = "[{}] {}".format(str(ctx.author), reason)
user = discord.Object(id=userid)
try:
name = await self.bot.http.get_user_info(userid)
except:
await ctx.send("User not found.")
return
confcode = "{}".format(random.randint(1000,9999))
msg = "Please type in the confirmation code to confirm and ban this user, or wait 30 seconds to cancel."
e = discord.Embed()
e.colour = 0x36393E
e.description = msg
e.title = f"Banning user {str(user)}:"
e.add_field(name="Reason:", value=reason)
e.add_field(name="Confirmation Code:", value=confcode)
m = await ctx.send(embed=e)
def a(m):
return m.content == confcode and m.channel == ctx.channel and m.author == ctx.author
try:
msg = await self.bot.wait_for("message", check=a, timeout=30)
except asyncio.TimeoutError:
await m.delete()
await ctx.send("Operation cancelled.")
return
await m.delete()
try:
await ctx.guild.ban(user)
await ctx.send(f"User {name['username']} was successfully banned.")
except (discord.HTTPException, discord.Forbidden) as e:
await ctx.send("I couldn't ban the user. Have you checked I have the proper permissions and that my role is higher than the user you want to ban?")
@commands.command()
@commands.guild_only()
@commands.check(checks.ban)
async def softban(self, ctx, member : discord.Member, *, reason : str = "[No reason specified]"):
"""Softban a user from your guild."""
reason = "[{}] {}".format(str(ctx.author), reason)
if member.top_role.position >= ctx.author.top_role.position:
await ctx.send("I can't do this.")
return
if member == ctx.guild.owner:
await ctx.send("I can't do this.")
return
if member == ctx.me:
await ctx.send("I can't do this.")
return
confcode = "{}".format(random.randint(1000,9999))
msg = "Please type in the confirmation code to confirm and softban this user, or wait 30 seconds to cancel."
e = discord.Embed()
e.colour = 0x36393E
e.description = msg
e.title = f"Soft Banning user {str(member)}:"
e.add_field(name="Reason:", value=reason)
e.add_field(name="Confirmation Code:", value=confcode)
m = await ctx.send(embed=e)
def a(m):
return m.content == confcode and m.channel == ctx.channel and m.author == ctx.author
try:
msg = await self.bot.wait_for("message", check=a, timeout=30)
except asyncio.TimeoutError:
await m.delete()
await ctx.send("Operation cancelled.")
return
await m.delete()
try:
await ctx.guild.ban(member, reason=reason, delete_message_days=7)
await ctx.guild.unban(member, reason=reason)
await ctx.send("User {} was successfully softbanned.".format(str(member)))
except (discord.HTTPException, discord.Forbidden) as e:
await ctx.send("I couldn't softban the user. Have you checked I have the proper permissions and that my role is higher than the user you want to softban?")
@commands.command()
@commands.guild_only()
@commands.check(checks.guild)
async def mute(self, ctx, user: discord.Member):
"""Mute someone from the channel"""
try:
await ctx.channel.set_permissions(user, send_messages=False)
except:
success=False
else:
success=True
em=self.format_mod_embed(ctx, user, success, 'mute')
await ctx.send(embed=em)
@commands.command()
@commands.guild_only()
@commands.check(checks.delete)
async def prune(self, ctx: commands.Context, number: int):
"""Prunes messages from the channel."""
channel = ctx.channel
author = ctx.author
is_bot = self.bot.user.bot
to_delete = []
tmp = ctx.message
done = False
while len(to_delete) - 1 < number and not done:
async for message in channel.history(limit=1000, before=tmp):
if len(to_delete) - 1 < number and \
(ctx.message.created_at - message.created_at).days < 14:
to_delete.append(message)
elif (ctx.message.created_at - message.created_at).days >= 14:
done = True
break
tmp = message
if is_bot:
await mass_purge(to_delete, channel)
else:
await slow_deletion(to_delete)
@commands.command()
@commands.guild_only()
@commands.check(checks.guild)
async def unmute(self, ctx, user: discord.Member):
"""Unmute someone from the channel"""
try:
await ctx.channel.set_permissions(user, send_messages=True)
except:
success=False
else:
success=True
em= self.format_mod_embed(ctx, user, success, 'unmute')
await ctx.send(embed=em)
def setup(bot):
bot.add_cog(Mod(bot))
| StarcoderdataPython |
3351588 | from abc import abstractmethod, ABC
from typing import Optional, Tuple
import gym
import numpy as np
from .. import BaseTask
from ..normalizer import Normalizer
class Controller(ABC):
"""
An abstract base class for the different types of controllers (e.g. torque or velocity controller).
"""
def __init__(self, name: str):
self.__name: str = name
self.__task: Optional[BaseTask] = None
self.__action_limits_lower: Optional[np.ndarray] = None
self.__action_limits_upper: Optional[np.ndarray] = None
self.__normalizer: Optional[Normalizer] = None
def initialize(self, task: BaseTask) -> gym.spaces.Space:
"""
Initializes the controller with for a given task.
"""
self.__task: BaseTask = task
self.__action_limits_lower, self.__action_limits_upper = self._initialize(task)
assert len(self.__action_limits_lower) == len(self.__action_limits_upper), \
"Sizes of action limits do not match ({} vs {})".format(
len(self.__action_limits_upper), len(self.__action_limits_upper))
self.__normalizer = Normalizer(self.__action_limits_lower, self.__action_limits_upper)
lower = -np.ones_like(self.__action_limits_lower, dtype=np.float32)
upper = np.ones_like(self.__action_limits_upper, dtype=np.float32)
return gym.spaces.Box(lower, upper)
def actuate(self, action_normalized: np.ndarray) -> None:
"""
Actuate the robot component (e.g. arm or gripper) according to a given action.
:param action_normalized: a normalized action as (N, ) array (in [-1, 1])
"""
action_clipped = np.maximum(np.minimum(action_normalized, np.ones_like(action_normalized)),
-np.ones_like(action_normalized))
self._actuate_denormalized(self.__normalizer.denormalize(action_clipped))
@abstractmethod
def _actuate_denormalized(self, action: np.ndarray) -> None:
"""
Actuates the robot according to a given (unnormalized) action. Must be implemented by the concrete instantiation
of the controller.
:param action: an action as (N, ) array
"""
pass
@abstractmethod
def _initialize(self, task: BaseTask) -> Tuple[np.ndarray, np.ndarray]:
"""
Configure the joints of the robot to be used with the controller. Must be implemented by the concrete
instantiation of the controller.
:param task: the task in which the controller is used
:return: a (lower, upper)-tuple of the lower and upper limits of the unnormalized action
"""
pass
@property
def task(self) -> BaseTask:
return self.__task
@property
def name(self) -> str:
return self.__name
@property
def action_limits_lower(self) -> np.ndarray:
return self.__action_limits_lower
@property
def action_limits_upper(self) -> np.ndarray:
return self.__action_limits_upper
def __repr__(self) -> str:
return "Controller {}".format(self.__name)
| StarcoderdataPython |
3275400 | <filename>p3_collab-compet/ddpg_agent.py<gh_stars>0
import numpy as np
import random
import copy
from collections import namedtuple, deque
from model import Actor, Critic
from sum_tree import SumTree
import torch
import torch.nn.functional as F
import torch.optim as optim
BUFFER_SIZE = int(1e6) # replay buffer size
BATCH_SIZE = 512 # minibatch size
GAMMA = 0.99 # discount factor
TAU = 1e-3 # for soft update of target parameters
TAU_INCREASE = 1.001 # rate of increase of tau
LR_ACTOR = 0.5e-4 # learning rate of the actor
LR_CRITIC = 0.5e-5 # learning rate of the critic
WEIGHT_DECAY = 0.0 # L2 weight decay
NOISE_THETA = 0.15 # OUNoise theta
NOISE_SIGMA = 0.2 # OUNoise sigma
ALPHA = 0.6 # alpha parameter of prioritized replay
EPSILON_ERROR = 1e-7 # minimum TD-error for prioritized replay
MAXIMUM_ERROR = 1e4 # default error for experiences newly added to the replay buffer
RANDOM_ACTION_PERIOD = 1500 # length of the exploration period in early training
MINIMUM_RANDOM_ACTION_PROB = 0.01 # probability of taking random actions during the exploration period
ACTOR_LAYER_SIZES = [256, 128]
CRITIC_LAYER_SIZES = [256, 128]
def if_print(condition, item):
if condition:
print(item)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def override_config(config):
"""Overrides the above global parameters used by the Agent."""
global BUFFER_SIZE, BATCH_SIZE
global GAMMA, TAU, TAU_INCREASE
global LR_ACTOR, LR_CRITIC, WEIGHT_DECAY
global NOISE_THETA, NOISE_SIGMA
global ALPHA, EPSILON_ERROR, MAXIMUM_ERROR
global RANDOM_ACTION_PERIOD, MINIMUM_RANDOM_ACTION_PROB
global ACTOR_LAYER_SIZES, CRITIC_LAYER_SIZES
BUFFER_SIZE = config["buffer_size"]
BATCH_SIZE = config["batch_size"]
GAMMA = config["gamma"]
TAU = config["tau"]
TAU_INCREASE = config["tau_increase"]
LR_ACTOR = config["lr_actor"]
LR_CRITIC = config["lr_critic"]
WEIGHT_DECAY = config["weight_decay"]
NOISE_THETA = config["noise_theta"]
NOISE_SIGMA = config["noise_sigma"]
ALPHA = config["alpha"]
EPSILON_ERROR = config["epsilon_error"]
MAXIMUM_ERROR = config["maximum_error"]
RANDOM_ACTION_PERIOD = config["random_action_period"]
MINIMUM_RANDOM_ACTION_PROB = config["minimum_random_action_prob"]
ACTOR_LAYER_SIZES = config["actor_layer_sizes"]
CRITIC_LAYER_SIZES = config["critic_layer_sizes"]
class Agent():
"""Interacts with and learns from the environment."""
def __init__(self, state_size, action_size, random_seed,
prioritized_replay=False, use_ounoise=True, parallel_agents=1,
train_every=20, train_steps=10):
"""Initialize an Agent object.
Params
======
state_size (int): dimension of each state
action_size (int): dimension of each action
random_seed (int): random seed
prioritized_replay (bool): if True, use prioritized replay.
use_ounoise (bool): if True, uses Ornstein-Uhlenbeck processes
to add noise to the output of the policy
parallel_agents (int): number of agents running in parallel
train_every (int): number of steps to take before switching to train mode
train_steps (int): number of times to update the network in train mode
"""
# print(f"Agent: state_size={state_size}, action_size={action_size}")
# print(f"Actor_layer_sizes={ACTOR_LAYER_SIZES}, Critic_layer_sizes={CRITIC_LAYER_SIZES}")
# print(f"lr_actor={LR_ACTOR}, lr_critic={LR_CRITIC}")
# print(f"train_every={train_every}, train_steps={train_steps}")
# print(f"buffer_size={BUFFER_SIZE}, batch_size={BATCH_SIZE}")
# print(f"gamma={GAMMA}, tau={TAU}, weight_decay={WEIGHT_DECAY}")
# print(f"use_ounoise={use_ounoise}, noise_theta={NOISE_THETA}, noise_sigma={NOISE_SIGMA}")
# print(f"prioritized_replay={prioritized_replay}")
# print("\n")
self.state_size = state_size
self.action_size = action_size
self.seed = random.seed(random_seed)
self.prioritized_replay = prioritized_replay
self.use_ounoise = use_ounoise
self.parallel_agents = parallel_agents
self.train_every = train_every
self.train_steps = train_steps
self.tau = TAU
# Note that with a sum_tree implementation, alpha has to be fixed
# and alpha is applied at the time an item is added or updated.
self.alpha = ALPHA
# The actor uses its own state only
actor_state_size = self.state_size
# The critic evaluates the action of the actor in a context that
# includes the states of both agents and the action that the
# opposing agent would have taken in that combined state.
# Since the critic is used only during training, and training uses
# a self-play scheme, the critic has full access to such information.
critic_state_size = self.state_size * 2 + self.action_size
critic_action_size = action_size
# Actor Network (w/ Target Network)
self.actor_local = Actor(actor_state_size, action_size, random_seed,
actor_layer_sizes=ACTOR_LAYER_SIZES).to(device)
self.actor_target = Actor(actor_state_size, action_size, random_seed,
actor_layer_sizes=ACTOR_LAYER_SIZES).to(device)
self.actor_optimizer = optim.Adam(self.actor_local.parameters(), lr=LR_ACTOR)
# Critic Network (w/ Target Network)
self.critic_local = Critic(critic_state_size, critic_action_size, random_seed,
critic_layer_sizes=CRITIC_LAYER_SIZES).to(device)
self.critic_target = Critic(critic_state_size, critic_action_size, random_seed,
critic_layer_sizes=CRITIC_LAYER_SIZES).to(device)
self.critic_optimizer = optim.Adam(self.critic_local.parameters(),
lr=LR_CRITIC, weight_decay=WEIGHT_DECAY)
# Copy the local networks into the target networks
self.soft_update_targets(tau=1.0)
# Noise process
self.noise = OUNoise(action_size * parallel_agents, random_seed)
# Replay memory
self.memory = ReplayBuffer(BUFFER_SIZE, BATCH_SIZE, random_seed,
prioritized_replay=prioritized_replay)
# Initialize time step (for updating every UPDATE_EVERY steps)
self.t_step = 0
self.reset()
def reset(self, theta=NOISE_THETA, sigma=NOISE_SIGMA):
"""Reset the noise generator given the theta and sigma parameters."""
self.noise.reset(theta, sigma)
def act(self, state, add_noise=True):
"""Returns actions for given state as per current policy.
Noise can be added to the action proposed by the policy to
encourage exploration. Moreover, in the initial steps of training,
the actions proposed by the policy may be randomly overriden
by completely random actions, to encourage even more diversified
exploration, which is controlled by two hyper-parameters
RANDOM_ACTION_PERIOD and MINIMUM_RANDOM_ACTION_PROB.
Params
======
add_noise (bool): if True, add noise to the actor's policy
"""
state = torch.from_numpy(state).float().to(device)
self.actor_local.eval()
with torch.no_grad():
action = self.actor_local(state).cpu().data.numpy()
self.actor_local.train()
if add_noise:
if self.use_ounoise:
action += self.noise.sample().reshape((-1, self.action_size))[:len(action)]
else:
action += np.random.standard_normal(action.shape)
action = np.clip(action, -1, 1)
# Flip a coin to see if you better take a random action instead.
# Reduce the randomness as time goes by
for i in range(action.shape[0]):
random_action_prob = max((RANDOM_ACTION_PERIOD - self.t_step)/RANDOM_ACTION_PERIOD,
MINIMUM_RANDOM_ACTION_PROB)
if (np.random.random() <= random_action_prob):
# take a random action
action[i] = np.random.uniform(-1.0, 1.0, action.shape[1])
return action
def step(self, state, action, reward, next_state, done, beta=0.0):
"""Update the agent based on the taken step.
Save experience in replay memory.
Use random sample from buffer to learn.
Params
=====
beta (float): beta power used for importance sampling weights
"""
# Choose an error for these experiences
if self.prioritized_replay:
# Set the error for this experience to be equal to the highest
# error in the replay buffer.
error = self.memory.get_highest_error()
else:
error = None
other_state = state[::-1]
combined_state = np.concatenate((state, other_state), axis=1)
other_action = action[::-1]
combined_action = np.concatenate((action, other_action), axis=1)
other_next_state = next_state[::-1]
combined_next_state = np.concatenate((next_state, other_next_state), axis=1)
combined_reward = reward
# Save experiences into the replay buffer, along with their errors
for s, a, r, n_s, d in zip(combined_state, combined_action,
combined_reward, combined_next_state, done):
self.memory.add(s, a, r, n_s, d, error)
# Learn every train_every time steps.
if (self.t_step + 1) % self.train_every == 0:
# If enough samples are available in memory, get random subset and learn
if len(self.memory) > BATCH_SIZE:
# learn for train_steps times
for i in range(self.train_steps):
experiences, indices, priorities = self.memory.sample()
self.learn(experiences, indices, priorities, GAMMA, beta=beta)
self.t_step += 1
def learn(self, experiences, indices, priorities, gamma, beta):
"""Update policy and value parameters using given batch of experience tuples.
Q_targets = r + γ * critic_target(next_state, actor_target(next_state))
where:
actor_target(state) -> action
critic_target(state, action) -> Q-value
Params
======
experiences (Tuple[torch.Variable]): tuple of (s, a, r, s', done, error) tuples
indices (int): index of experiences in the replay buffer
priorities (float): priority of experiences in the replay buffer
gamma (float): discount factor
beta (float): beta power used for importance sampling weights
"""
states, actions, rewards, next_states, dones = experiences
# extract the states, next_states, and actions from the perspective
# of each of the two agents: a1, and a2
a1_a2_states = states
a1_states = states[:, :self.state_size]
a2_states = states[:, self.state_size:]
a1_a2_next_states = next_states
a1_next_states = next_states[:, :self.state_size]
a2_next_states = next_states[:, self.state_size:]
a1_actions = actions[:, :self.action_size]
a2_actions = actions[:, self.action_size:]
# ---------------------------- update critic ---------------------------- #
# Get predicted next-state actions and Q values from target models
self.actor_target.eval()
self.critic_target.eval()
with torch.no_grad():
a1_next_actions = self.actor_target(a1_next_states)
a2_next_actions = self.actor_target(a2_next_states)
a1_a2_next_states_a2_next_actions = torch.cat((a1_a2_next_states, a2_next_actions), dim=1)
Q_targets_next = self.critic_target(a1_a2_next_states_a2_next_actions, a1_next_actions)
# Adjust the target_best_next_action_value by importance sampling weights
if self.prioritized_replay:
weights = (len(self.memory) * priorities) ** -beta
maximum_weight, _ = torch.max(weights, dim=0, keepdim=True)
weights /= maximum_weight
Q_targets_next *= weights
# Compute Q targets for current states (y_i)
Q_targets = rewards + (gamma * Q_targets_next * (1 - dones))
# Compute critic loss
a1_a2_states_a2_actions = torch.cat((a1_a2_states, a2_actions), dim=1)
Q_expected = self.critic_local(a1_a2_states_a2_actions, a1_actions)
critic_loss = F.mse_loss(Q_expected, Q_targets)
# Minimize the loss
self.critic_optimizer.zero_grad()
critic_loss.backward()
self.critic_optimizer.step()
# Update errors of sampled experiences
if self.prioritized_replay:
errors = torch.abs(Q_expected - Q_targets).squeeze().detach().to('cpu').numpy()
errors = (errors + EPSILON_ERROR) ** self.alpha
self.memory.update_errors(indices, errors)
# ---------------------------- update actor ---------------------------- #
# Compute actor loss
a1_actions = self.actor_local(a1_states)
a2_actions = self.actor_local(a2_states)
a1_a2_states_a2_actions = torch.cat((a1_a2_states, a2_actions), dim=1)
actor_loss = -self.critic_local(a1_a2_states_a2_actions, a1_actions).mean()
# Minimize the loss
self.actor_optimizer.zero_grad()
actor_loss.backward()
self.actor_optimizer.step()
# ----------------------- update target networks ----------------------- #
self.tau = min(5e-1, self.tau * TAU_INCREASE)
self.soft_update_targets(tau=self.tau)
def soft_update_targets(self, tau=TAU):
self.soft_update(self.critic_local, self.critic_target, tau)
self.soft_update(self.actor_local, self.actor_target, tau)
def soft_update(self, local_model, target_model, tau):
"""Soft update model parameters.
θ_target = τ*θ_local + (1 - τ)*θ_target
Params
======
local_model: PyTorch model (weights will be copied from)
target_model: PyTorch model (weights will be copied to)
tau (float): interpolation parameter
"""
for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):
target_param.data.copy_(tau*local_param.data + (1.0-tau)*target_param.data)
class OUNoise:
"""Ornstein-Uhlenbeck process."""
def __init__(self, size, seed, mu=0., theta=NOISE_THETA, sigma=NOISE_SIGMA):
"""Initialize parameters and the noise processes."""
self.size = size
self.mu = mu * np.ones(size)
self.seed = random.seed(seed)
self.reset(theta, sigma)
def reset(self, theta=NOISE_THETA, sigma=NOISE_SIGMA):
"""Reset the internal state (= noise) to mean (mu),
and decay the theta and sigma parameters."""
self.state = copy.copy(self.mu)
self.theta = theta
self.sigma = sigma
def sample(self):
"""Update internal state and return it as a noise sample."""
x = self.state
noise = np.random.standard_normal(self.size)
dx = self.theta * (self.mu - x) + self.sigma * noise
self.state = x + dx
return self.state
class ReplayBuffer:
"""Fixed-size buffer to store experience tuples."""
def __init__(self, buffer_size, batch_size, seed, prioritized_replay=False):
"""Initialize a ReplayBuffer object.
Params
======
buffer_size (int): maximum size of buffer
batch_size (int): size of each training batch
seed (int): random seed
prioritized_replay (bool): if True, use prioritized replay
"""
self.buffer_size = buffer_size
self.batch_size = batch_size
self.seed = random.seed(seed)
self.prioritized_replay = prioritized_replay
if not prioritized_replay:
self.memory = deque(maxlen=buffer_size)
else:
self.memory = [None] * buffer_size
self.sum_tree = SumTree(buffer_size)
self.data_index = 0
self.experience = namedtuple("Experience", field_names=["state", "action", "reward", "next_state", "done"])
def add(self, state, action, reward, next_state, done, error=0.0):
"""Add a new experience to memory."""
e = self.experience(state, action, reward, next_state, done)
if not self.prioritized_replay:
self.memory.append(e)
else:
self.memory[self.data_index] = e
self.sum_tree.add(error)
self.data_index += 1
if self.data_index == self.buffer_size:
self.data_index = 0
def sample(self):
"""Randomly sample a batch of experiences from memory."""
if not self.prioritized_replay:
experiences = random.sample(self.memory, k=self.batch_size)
indices = None
priorities = None
else:
indices, priorities = self.sum_tree.weighted_sample(self.batch_size)
total_sum = self.sum_tree.total_sum()
priorities = np.array(priorities) / total_sum
experiences = [self.memory[index] for index in indices]
states = torch.from_numpy(np.vstack([e.state for e in experiences if e is not None])).float().to(device)
actions = torch.from_numpy(np.vstack([e.action for e in experiences if e is not None])).float().to(device)
rewards = torch.from_numpy(np.vstack([e.reward for e in experiences if e is not None])).float().to(device)
next_states = torch.from_numpy(np.vstack([e.next_state for e in experiences if e is not None])).float().to(device)
dones = torch.from_numpy(np.vstack([e.done for e in experiences if e is not None]).astype(np.uint8)).float().to(device)
if self.prioritized_replay:
priorities = torch.from_numpy(np.vstack(priorities)).float().to(device)
else:
priorities = None
return (states, actions, rewards, next_states, dones), indices, priorities
def get_highest_error(self):
"""Return the highest error of all experiences in the replay buffer.
Used only for prioritized replay.
"""
assert(self.prioritized_replay == True)
largest_probability = self.sum_tree.largest_item()
if largest_probability == 0.0:
return (MAXIMUM_ERROR)
else:
return largest_probability
def update_errors(self, indices, errors):
"""Update the TD-errors of sampled replay buffer experiences.
Used only for prioritized replay.
Params
======
indices (int): replay buffer locations that need to be updated.
errors (float): new values for the error field of the replay buffer locations
"""
assert(self.prioritized_replay == True)
for i in range(len(indices)):
self.sum_tree.update(i, errors[i])
def __len__(self):
"""Return the current size of internal memory."""
if not self.prioritized_replay:
return len(self.memory)
else:
return len(self.sum_tree)
| StarcoderdataPython |
9699271 | <reponame>lcmonteiro/tool-gtrans
# #################################################################################################
# -------------------------------------------------------------------------------------------------
# File: google_browser.py
# Author: <NAME>
#
# Created on jan 6, 2020, 22:00 PM
# -------------------------------------------------------------------------------------------------
# #################################################################################################
import chromedriver_binary
#
from selenium import webdriver
from time import sleep, time
# -------------------------------------------------------------------------------------------------
# GoogleAPI
# -------------------------------------------------------------------------------------------------
class GoogleBrowser:
# TAGS
__ENDL = "<+++>"
__ENDS = "<###>"
__IAREA = "source"
__OAREA = "//span[contains(@class, 'tlid-translation')]"
def __init__(self, to_lang='en', from_lang=None):
self.__from = from_lang if from_lang is not None else 'auto'
self.__to = to_lang
self.__engine = self.__engine(self.__from, self.__to)
def __del__(self):
self.__engine.close()
def translate(self, data, timeout=10):
# write on page
self.__write(self.__serialize(data))
# read
timeout = time() + timeout
while time() < timeout:
sleep(1)
translated = self.__unserialize(self.__read())
if len(translated) == len(data):
self.__clear()
return translated
self.__clear()
raise TimeoutError()
# -----------------------------------------------------------------------------------
# private
# -----------------------------------------------------------------------------------
def __engine(self, from_lang, to_lang):
from os.path import dirname, join, realpath
from random import choice
# load user agents
with open(join(dirname(realpath(__file__)), 'uagents.txt')) as f:
uagents = [ua.strip() for ua in f if ua.strip()]
# driver options
options = webdriver.chrome.options.Options()
options.add_argument("--headless")
options.add_argument("--no-sandbox")
options.add_argument("--disable-notifications")
options.add_argument(f"user-agent={choice(uagents)}")
# open driver
driver = webdriver.Chrome(options=options)
#driver = webdriver.Chrome()
driver.get(f'http://translate.google.com/#{from_lang}/{to_lang}/')
return driver
def __write(self, data):
# input area
iarea = self.__engine.find_element_by_id(self.__IAREA)
iarea.send_keys(data)
def __read(self):
# output area
oarea = self.__engine.find_element_by_xpath(self.__OAREA)
text = oarea.text.replace('\n','')
return text if text else ' '.join(
[e.text for e in oarea.find_elements_by_xpath('./span')]).replace('\n','')
def __clear(self):
self.__engine.find_element_by_id(self.__IAREA).clear()
sleep(2)
def __serialize(self, data):
return f'\n{self.__ENDS}\n'.join([d.replace('\n', f'{self.__ENDL}') for d in data])
def __unserialize(self, data):
from re import split
return [d.replace(f'{self.__ENDL}', '\n') for d in split(f' ?{self.__ENDS} ?', data)]
# #################################################################################################
# -------------------------------------------------------------------------------------------------
# end
# -------------------------------------------------------------------------------------------------
################################################################################################### | StarcoderdataPython |
9695721 | from sklearn import preprocessing
import sklearn
from sklearn.utils import shuffle
from sklearn.neighbors import KNeighborsClassifier
import pandas as pd
import numpy as np
from sklearn import linear_model, preprocessing
data = pd.read_csv("car.data")
print(data.head())
# Preprocesing convert string to numbers
le=preprocessing.LabelEncoder()
buying = le.fit_transform(list(data["buying"]))
maint = le.fit_transform(list(data["maint"]))
door = le.fit_transform(list(data["door"]))
persons = le.fit_transform(list(data["persons"]))
lug_boot = le.fit_transform(list(data["lug_boot"]))
safety = le.fit_transform(list(data["safety"]))
cls = le.fit_transform(list(data["class"]))
predict = "class"
X = list(zip(buying, maint, door, persons, lug_boot, safety)) # features
y = list(cls) # labels
x_train, x_test, y_train, y_test = sklearn.model_selection.train_test_split(X, y, test_size = 0.1)
print(x_train,y_test)
model = KNeighborsClassifier(n_neighbors=9)
#Train
model.fit(x_train, y_train)
#Accuaricity
acc=model.score(x_test,y_test)
print(acc)
#Predict
predicted = model.predict(x_test)
names = ["unacc", "acc", "good", "vgood"]
for x in range(len(predicted)):
print("Predicted: ", names[predicted[x]], "Data: ", x_test[x], "Actual: ", names[y_test[x]])
# Now we will we see the neighbors of each point in our testing data
n = model.kneighbors([x_test[x]], 9, True)
print("N: ", n) | StarcoderdataPython |
122021 | <filename>find majority element.py
#Given an array of size n, find the majority element. The majority element is the element that appears more than ⌊ n/2 ⌋ times.
#You may assume that the array is non-empty and the majority element always exist in the array.
#solution 1
from collections import Counter
class Solution:
def majorityElement(self, nums: List[int]) -> int:
return Counter(nums).most_common(1)[0][0]
#solution 2
def majorityElement(self, nums: List[int]) -> int:
d = dict()
result = -1
half = len(nums)/2
for num in nums:
if num in d.keys():
d[num]+=1
else:
d[num]=1
for k in d.keys():
if d[k] >= half:
result = k
break
print(result)
return result
| StarcoderdataPython |
6622948 | import math
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from PIL import Image
# from tf_cnnvis import *
def weight(shape):
return tf.Variable(tf.truncated_normal(shape, stddev=0.1))
def bias(length):
return tf.Variable(tf.constant(0.1, shape=[length]))
def layer(input, num_input_channels, filter_size, num_filters, use_bn=False,
use_relu=True, use_pool=True, use_dropout=True):
shape = [filter_size, filter_size, num_input_channels, num_filters]
weights = weight(shape)
biases = bias(num_filters)
layer = tf.nn.conv2d(input=input, filter=weights, strides=[1, 1, 1, 1],
padding="SAME")
layer += biases
if use_bn:
layer = tf.layers.batch_normalization(layer, training=training)
if use_relu:
layer = tf.nn.relu(layer)
if use_pool:
layer = tf.nn.max_pool(value=layer, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding="SAME")
if use_dropout:
layer = tf.nn.dropout(layer, keep_prob)
return layer
def save_layer(layer, image, image_name, use):
image = image.reshape(img_size_flat)
feed_dict = {x: [image], keep_prob: 0.5}
values = session.run(layer, feed_dict=feed_dict)
num_filters = values.shape[3]
num_grids = int(math.ceil(math.sqrt(num_filters)))
fig, axes = plt.subplots(num_grids, num_grids)
for i, ax in enumerate(axes.flat):
if i < num_filters:
img = values[0, :, :, i]
ax.imshow(img, interpolation='nearest', cmap='binary')
fig.savefig("data/layers/features/" + image_name +
"_" + use + ".png")
keep_prob = tf.placeholder(tf.float32)
filter_size1 = 3
num_filters1 = 32
filter_size2 = 3
num_filters2 = 64
filter_size3 = 3
num_filters3 = 128
filter_size4 = 3
num_filters4 = 256
num_channels = 3
img_size = 128
img_size_flat = img_size * img_size * num_channels
img_shape = (img_size, img_size)
training = True
x = tf.placeholder(tf.float32, shape=[None, img_size_flat], name='x')
x_image = tf.reshape(x, [-1, img_size, img_size, num_channels])
layer1 = layer(input=x_image, num_input_channels=num_channels,
filter_size=filter_size1, num_filters=num_filters1)
session = tf.Session()
session.run(tf.global_variables_initializer())
img0 = Image.open("record/images/not_preprocessed/test/test_34.png")
image0 = np.array(img0)
img1 = Image.open("record/images/not_preprocessed/test/test_31.png")
image1 = np.array(img1)
save_layer(layer=layer1, image=image0, image_name="maze", use="conv")
save_layer(layer=layer1, image=image1, image_name="pig", use="conv")
# image0 = image0.reshape(img_size_flat)
# feed_dict = {x: [image0], keep_prob: 0.5}
# layers = ["r", "p", "c"]
# is_success = deconv_visualization(sess_graph_path=session,
# value_feed_dict=feed_dict,
# input_tensor=x_image, layers=layers,
# path_logdir="record/images/layers/maze/",
# path_outdir="record/images/layers/maze/")
# image1 = image1.reshape(img_size_flat)
# feed_dict = {x: [image1], keep_prob: 0.5}
# layers = ["r", "p", "c"]
# is_success = deconv_visualization(sess_graph_path=session,
# value_feed_dict=feed_dict,
# input_tensor=x_image, layers=layers,
# path_logdir="record/images/layers/pig/",
# path_outdir="record/images/layers/pig/")
session.close()
img0.close()
img1.close()
| StarcoderdataPython |
11208730 | <filename>Executor/lib/conn_device.py
import logging
import yaml
from jnpr.junos import Device
from jnpr.junos.exception import ConnectError
# Logging
logger = logging.getLogger(__name__)
class ConnDevice(object):
def __init__(self, config_path='config/devices.yaml'):
"""Common interface for connecting to Junos network devices.
:param config_path: Location of the credentials for each network device
:type config_path: str
"""
self.network_devices = {}
self.connected_devices = {}
self._import_network_devices(config_path)
def _import_network_devices(self, network_device_file):
"""Import the hostnames, username and password for each network device
:param config_path: Location of the credentials for each network device
:type config_path: str
"""
logger.debug('Loading network devices into JunosCollector')
with open(network_device_file, 'r') as f:
import_devices = yaml.load(f.read())
for device in import_devices['devices']:
self.network_devices[device['name']] = device
logger.debug('Imported credentials for %s', device['name'])
for _, device in self.network_devices.items():
self._connect_to_device(device)
def _connect_to_device(self, device):
"""Connects to the network device via Netconf
:param device: Contains the necessary information to connect to the device
:type device: dict
"""
try:
logger.debug('Connecting to %s', device['ip'])
dev = Device(host=device['ip'], user=device['user'], password=device['password'])
dev.open()
logger.info('Successfully connected to %s', device['ip'])
except ConnectError as e:
logger.error('%s', str(e))
self.connected_devices[device['name']] = dev
| StarcoderdataPython |
4819436 | # Um professor quer sortear um dos seus quatro alunos para
# apagar o quadro. Faça um programa que ajude ele, lendo o nome deles e
# escrevendo nome do escolhido
"""import random
print('Escolhendo um aluno... ')
n = random.randint(1, 4)
if (n == 1):
print('O aluno escolhido foi o Ciro')
elif (n == 2):
print('O aluno escolhido foi o Belmiro')
elif (n == 3):
print('O aluno escolhido foi a Julia')
else:
print('O aluno escolhido foi a Mônica')"""
# Acima foi minha resolução
# Abaixo a resolução do professor Guanabara
import random
print('Escolha um aluno destes citados!')
n1 = str(input('Primeiro aluno: '))
n2 = str(input('Segundo aluno: '))
n3 = str(input('Terceiro aluno: '))
n4 = str(input('Quarto aluno: '))
lista = [n1, n2, n3, n4]
escolhido = random.choice(lista)
print('O aluno escolhido foi {}'.format(escolhido))
| StarcoderdataPython |
6481650 | #!/usr/bin/python
import argparse
from board_server_manager import BoardServerManager
def main():
parser = argparse.ArgumentParser(description='Board server settings')
parser.add_argument('-sp', '--PORT', help='server port', type=int,
default=80, required=False)
parser.add_argument('-sip', '--IP', help='server ip', type=str,
default='', required=False)
parser.add_argument('-dh', '--DBHOST', help='database host', type=str,
required=True)
parser.add_argument('-du', '--DBUSER', help='database user', type=str,
required=True)
parser.add_argument('-dp', '--DBPASSWD', help='database password', type=str,
required=True)
parser.add_argument('-dn', '--DBNAME', help='database name', type=str,
required=True)
args = parser.parse_args()
bsm = BoardServerManager(args)
bsm.activate()
if __name__ == "__main__":
main()
| StarcoderdataPython |
9765934 | class Unpacker(object):
"""Helper class to unpack data received from WIZNet device"""
def __init__(self, data, pos=0):
self.data = data
self.initialpos = self.pos = pos
def unpack(self, s2e):
self.pos = self.initialpos
for field in s2e._fields:
name = field[0]
unpacker = getattr(self, "unpack_%s" % (field[1]), )
unpacked = unpacker(*field[2:])
logger.debug("Unpacked %s = %s" % (name, unpacked))
setattr(s2e, name, unpacked)
return s2e
def unpack_ip(self):
ip = struct.unpack(">BBBB", self.data[self.pos:self.pos + 4])
self.pos += 4
return ".".join(["%d" for x in xrange(4)]) % ip
def unpack_firmversion(self):
version = struct.unpack(">BB", self.data[self.pos:self.pos + 2])
self.pos += 2
return ".".join([ "%d" for x in xrange(2)]) % version
def unpack_mac(self):
mac = struct.unpack(">BBBBBB", self.data[self.pos:self.pos + 6])
self.pos += 6
return ":".join([ "%02x" for x in xrange(6)]) % mac
def unpack_short(self):
short = struct.unpack(">H", self.data[self.pos:self.pos + 2])[0]
self.pos += 2
return short
def unpack_byte(self):
byte = struct.unpack("B", self.data[self.pos])[0]
self.pos += 1
return byte
def unpack_bytes(self, length):
fmt = "B" * length
outfmt = "0x%02x " * length
bytes = struct.unpack(fmt, self.data[self.pos:self.pos + length])
self.pos += length
return outfmt % bytes
def unpack_bool(self, inverted=False):
fmt = "B"
b = bool(struct.unpack(fmt, self.data[self.pos])[0])
self.pos += 1
if inverted:
return not b
else:
return b
def unpack_str(self, length, outfmt="%s"):
fmt = ">%(length)ss" % {"length": length}
value = struct.unpack(fmt, self.data[self.pos:self.pos + length])[0]
self.pos += length
return outfmt % (value,)
def unpack_dictvalues(self, dictvalues, default=None):
"""1 Byte of data to dict value"""
fmt = "B"
key = struct.unpack(fmt, self.data[self.pos])[0]
self.pos += 1
return dictvalues.get(key, default) | StarcoderdataPython |
11369351 | <reponame>MisterAI/AutoTeSG
#!/usr/bin/python
import sys, getopt
import astor
from ast import walk, FunctionDef
from CodeInstrumentator import CodeInstrumentator, RemovePrintStmts
from TestDataGenerator import TestDataGenerator
import BranchCollector
from AVM import AVM
__version__ = '0.0.1'
def main(argv):
try:
opts, args = getopt.getopt(argv,"hv")
except getopt.GetoptError:
print('Usage: AutoTeSG.py [-h] [-v] FILE...')
sys.exit(1)
for opt, arg in opts:
if opt == '-h':
print('Usage: AutoTeSG.py [-h] [-v] FILE...')
sys.exit()
elif opt == '-v':
print('AutoTeSG ' + __version__)
print('')
print('Written by <NAME>., 2018')
sys.exit()
if not len(args) > 0:
print('Please specify an input file!')
sys.exit()
for input_file in args:
try:
myAST = astor.code_to_ast.parse_file(input_file)
except Exception as e:
raise e
print('Analysing file ' + str(input_file))
RemovePrintStmts().visit(myAST)
CodeInstrumentator().visit(myAST)
for node in walk(myAST):
if isinstance(node, FunctionDef):
header_string = ('Generating test data for function \''
+ str(node.name)
+ '\'\n\nBranch, Corresponding input values ')
variable_names = []
for in_param in node.args.args:
try:
variable_names.append(in_param.id)
except AttributeError as e:
variable_names.append(in_param.arg)
header_string += str(variable_names)
print(header_string)
# get all possible branches of function
branches = BranchCollector.collect_branches(node)
# save successful inputs for every branching line as
# Tupel:(lineno, input_list)
input_tuples = []
for branch in branches:
# print('Searching covering input for branch ' + str(branch))
AVM().AVMsearch(myAST, node, branch, input_tuples)
print('\n')
if __name__ == "__main__":
main(sys.argv[1:])
| StarcoderdataPython |
4838907 | from .WebIDLLexer import WebIDLLexer
from .WebIDLParser import WebIDLParser
from .WebIDLParserVisitor import WebIDLParserVisitor
| StarcoderdataPython |
30168 | from django.shortcuts import render
# Create your views here.
from django.shortcuts import render,redirect, render_to_response
from .models import *
from django.views.generic import TemplateView, ListView, DetailView, CreateView, UpdateView, DeleteView
from django.urls import reverse
from django.http import HttpResponseRedirect
def loginView(request):
if request.method =="GET":
pass
elif request.method == 'POST':
pass
return render(request, 'myApp/login.html')
| StarcoderdataPython |
346070 |
s = "abcxyz"
print "original("+s+")"
print "strip("+s.lstrip("cba")+")"
| StarcoderdataPython |
274857 | '''Soma Simples'''
A = int(input())
B = int(input())
def CalculaSomaSimples(a: int, b: int):
resultado = int(a+b)
return('SOMA = {}'.format(resultado))
print(CalculaSomaSimples(A,B))
| StarcoderdataPython |
5192698 | <filename>referrals/migrations/0001_initial.py
# Generated by Django 2.0.2 on 2018-04-16 07:53
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='FlatReferral',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('referred', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='referrers', to=settings.AUTH_USER_MODEL)),
('referrer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='referreds', to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'flat_referral',
'ordering': ['id'],
},
),
migrations.CreateModel(
name='Link',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('token', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='MultiLevelReferral',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('depth', models.PositiveIntegerField()),
('numchild', models.PositiveIntegerField(default=0)),
('path', models.CharField(max_length=25500)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'multi_level_referral',
},
),
migrations.AlterUniqueTogether(
name='flatreferral',
unique_together={('referrer', 'referred')},
),
]
| StarcoderdataPython |
4840816 | #!/usr/bin/python
import sys
import os
sys.path.append("../../src/")
sys.path.append("../util/")
import commands
import common
CXXTAGS_QUERY = "../../bin/cxxtags_query"
if len(sys.argv) != 2:
print "usage: cmd db_file"
exit(1)
cur_dir = os.getcwd()
db_dir = sys.argv[1]
q_list = [
# main.cpp
"ref " + db_dir + " " + cur_dir + "/main.cpp 3 11",
"decl " + db_dir + " " + cur_dir + "/main.cpp 7 5",
]
a_list = [
# main.cpp
[
"test|"+cur_dir+"/main.cpp|7|5| test::cout << \"TEST\\n\";",
],
[
"test|"+cur_dir+"/main.cpp|3|11|namespace test = std;",
],
]
err = 0
i = 0
for q in q_list:
err += common.test_one(q, a_list[i])
i+=1
if err == 0:
print "OK"
else:
print "ERR: %d"%(err)
exit(err)
| StarcoderdataPython |
6574620 | <gh_stars>1-10
import unittest.mock
from app.exit_code import ExitCode
from app.scene import Scene
from app.window import Window
from tests import events
class WindowTestCase(unittest.TestCase):
@unittest.mock.patch('pygame.event.get', return_value=[events.quit_event, events.any_key_event])
def test_should_run_fine(self, mock):
window = Window(800, 600, 'title')
self.assertTrue(window.run().equals(ExitCode.success()))
def test_should_add_scenes(self):
window = Window(800, 600, 'Test')
window.add_scene(Scene(window))
self.assertEqual(1, len(window.scenes))
def test_should_run_scenes(self):
window = Window(800, 600, 'Test')
scene = Scene(window)
with unittest.mock.patch.object(scene, 'run', wraps=scene.run) as spy:
window.add_scene(scene)
window.run()
spy.assert_called()
def test_should_exit_with_error(self):
window = Window(800, 600, 'Test')
error_scene = Scene(window)
with unittest.mock.patch.object(error_scene, 'run', wraps=error_scene.run, return_value=ExitCode(-1)):
window.add_scene(error_scene)
self.assertTrue(window.run().equals(ExitCode(-1)))
def test_should_allow_play_again(self):
window = Window(800, 600, 'Test')
play_again_scene = Scene(window)
with unittest.mock.patch.object(
play_again_scene, 'run', wraps=play_again_scene.run,
side_effect=[ExitCode.play_again(), ExitCode.success()]):
window.add_scene(play_again_scene)
self.assertTrue(window.run().equals(ExitCode.success()))
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
1919707 | """
This file contains extensions to Click
"""
from collections import OrderedDict
import click
class PywbemcliGroup(click.Group):
"""
Extend Click Group class to:
1. Order the display of commands within the help.
The commands are ordered in the order that their definitions
appears in the source code for each command group.
This extension has a general name because it may be used for more than
one extension to the Click.Group class.
"""
# Use ordered dictionary to sort commands by their order defined in the
# _cmd_... source file.
def __init__(self, name=None, commands=None, **attrs):
"""
Use OrderedDict to keep order commands inserted into command dict
"""
if commands is None:
commands = OrderedDict()
elif not isinstance(commands, OrderedDict):
commands = OrderedDict(commands)
click.Group.__init__(self, name=name,
commands=commands,
**attrs)
def list_commands(self, ctx):
"""
Replace list_commands to eliminate the sorting
"""
return self.commands.keys()
class PywbemcliTopGroup(click.Group):
"""
Extensions to be used with the top level help (pywbemcli --help)
Extend Click Group class to:
1. Order the display of the commands and command groups in the top level
help output to sort and then put names defined in the predefined_list
at the end of the list of commands/groups. Since ordering of the top
level cannot be tied to order commands are inserted in list, we elected
to just move the generic ones to the end of the list.
This extension has a general name because it may be used for more than
one extension to the Click.Group class..
"""
def list_commands(self, ctx):
"""
Order commands by sorting and then moving any commands defined in
move_to_end list to the end of the list.
"""
# tuple of commands to move to bottom after sort
move_to_end = ('connection', 'help', 'repl')
cmd_list = sorted(self.commands.keys())
pop_count = 0
# reorder list so the move_to_end list commands are at bottom
for i in range(len(cmd_list)):
if cmd_list[i - pop_count] in move_to_end:
cmd_list.append(cmd_list.pop(i - pop_count))
pop_count += 1
return cmd_list
| StarcoderdataPython |
25920 | from .unit_change_dialog import UnitChangeDialog
| StarcoderdataPython |
3256781 | #!/usr/bin/env python3
# MQTT Minecraft server feeder
# Feeds Minecraft server via RCON with commands received via MQTT
# requirements
# pip3 install paho-mqtt
# pip3 install mcrcon
# pip3 install python-dotenv
import paho.mqtt.client as mqtt
from time import time, sleep
import signal
import socket
import sys
from mcrcon import MCRcon
import os
from dotenv import load_dotenv
import re
# configuration
load_dotenv() # take environment variables from .env.
base_topic = 'PI1'
mqtt_hostname = os.getenv('MQTT_HOSTNAME')
mqtt_port = int(os.getenv('MQTT_PORT'))
#mqtt_clientname = 'mc-log-mqtt'
rcon_hostname = os.getenv('RCON_HOSTNAME')
rcon_port = os.getenv('RCON_PORT')
rcon_password = os.getenv('RCON_PASSWORD')
def send_to_RCON(command):
print('Sending to RCON:', command)
response = mcr.command(command)
print('Response from RCON:', response)
# MQTT receive callback
######## edit here what to do when MQTT message is received ################
def on_message(client, userdata, message):
value=str(message.payload.decode("utf-8"))
print("message received ",message.topic,value)
if "temperature" in message.topic:
command='execute at 7a278519-5e0d-4ed5-b847-f5fc311b2170 run data merge block 217 6 -13 {Text2:"{\\"text\\":\\"\\\\u1405 ' + value + '°C \\\\u140a\\",\\"bold\\":true}"}'
send_to_RCON(command)
if float(value) < 25 :
command='execute at 7a278519-5e0d-4ed5-b847-f5fc311b2170 run clone 247 4 90 243 4 82 215 4 -23'
send_to_RCON(command)
elif float(value) < 27 :
command='execute at 7a278519-5e0d-4ed5-b847-f5fc311b2170 run clone 247 4 100 243 4 92 215 4 -23'
send_to_RCON(command)
else:
command='execute at 7a278519-5e0d-4ed5-b847-f5fc311b2170 run clone 247 4 110 243 4 102 215 4 -23'
send_to_RCON(command)
elif "humidity" in message.topic:
command='execute at 7a278519-5e0d-4ed5-b847-f5fc311b2170 run data merge block 217 6 -13 {Text4:"{\\"text\\":\\"\\\\u1405 ' + value + '% \\\\u140a\\",\\"bold\\":true}"}'
send_to_RCON(command)
if float(value) < 50 :
command='execute at 7a278519-5e0d-4ed5-b847-f5fc311b2170 run function minecraft:greenhouse-normal'
send_to_RCON(command)
else:
command='execute at 7a278519-5e0d-4ed5-b847-f5fc311b2170 run function minecraft:greenhouse-mossy'
send_to_RCON(command)
elif "weather" in message.topic:
command='execute at 7a278519-5e0d-4ed5-b847-f5fc311b2170 run weather ' + value
send_to_RCON(command)
elif "buttonBlue" in message.topic:
if value == "on" :
command='execute at @e[tag=world] if block 228 4 115 minecraft:diamond_block run setblock 180 4 113 minecraft:redstone_block'
send_to_RCON(command)
else:
command='execute at @e[tag=world] if block 228 4 115 minecraft:diamond_block run setblock 180 4 113 minecraft:air'
send_to_RCON(command)
elif "buttonGreen" in message.topic:
if value == "on" :
command='execute at @e[tag=world] if block 228 4 115 minecraft:diamond_block run setblock 183 4 111 minecraft:redstone_block'
send_to_RCON(command)
else:
command='execute at @e[tag=world] if block 228 4 115 minecraft:diamond_block run setblock 183 4 111 minecraft:air'
send_to_RCON(command)
elif "buttonRed" in message.topic:
command='execute at @e[tag=world] if block 228 4 115 minecraft:diamond_block run setblock 190 4 116 minecraft:redstone_block'
send_to_RCON(command)
elif "joystickX" in message.topic:
if value == "R" :
command='execute at @e[tag=emeraldblockmovable] run tp @e[tag=emeraldblockmovable] ~0.25 ~ ~'
send_to_RCON(command)
else:
command='execute at @e[tag=emeraldblockmovable] run tp @e[tag=emeraldblockmovable] ~-0.25 ~ ~'
send_to_RCON(command)
elif "joystickY" in message.topic:
if value == "U" :
command='execute at @e[tag=emeraldblockmovable] run tp @e[tag=emeraldblockmovable] ~ ~ ~-0.25'
send_to_RCON(command)
else:
command='execute at @e[tag=emeraldblockmovable] run tp @e[tag=emeraldblockmovable] ~ ~ ~0.25'
send_to_RCON(command)
elif "joystickB" in message.topic:
command='execute at @e[tag=world] if block 228 4 115 minecraft:diamond_block run setblock 183 4 113 minecraft:redstone_block'
send_to_RCON(command)
elif "SendMessage" in message.topic:
command='tellraw @a {"text":"' + value + '"}'
send_to_RCON(command)
elif "farmbutton" in message.topic:
command='execute at @e[tag=world] run setblock 200 4 128 minecraft:redstone_block'
send_to_RCON(command)
elif "buzzer" in message.topic:
command='execute at @e[tag=world] run setblock 197 4 113 minecraft:redstone_block'
send_to_RCON(command)
elif "ESPblock" in message.topic:
tokens = re.findall(r'/(.*)', message.topic)
name = tokens[0]
command='execute as @e[tag=receiver,name=\'' + name + '\'] at @s run setblock ~ ~-1 ~ target[power=' + value +']'
send_to_RCON(command)
###### END of the section to be edited
# Hook for cleanup after interrupt
def signal_handler(signal, frame):
global interrupted
interrupted = True
signal.signal(signal.SIGINT, signal_handler)
interrupted = False
def on_connect(client, userdata, flags, rc):
if rc==0:
client.connected_flag=True
print("MQTT on_connect callback: connected ok")
else:
print("MQTT on_connect callback: Bad connection Returned code=",rc)
# MQTT connection initialization
mqtt_client = mqtt.Client()
mqtt_client.on_connect=on_connect #bind call back function
mqtt_client.loop_start()
mqtt.Client.connected_flag=False
print("Connecting to MQTT broker ",mqtt_hostname, "MQTT port: ", mqtt_port)
mqtt_client.connect(mqtt_hostname,port=mqtt_port)
while not mqtt_client.connected_flag: #wait in loop
print("Wait for MQTT callback")
sleep(1)
mqtt_client.on_message = on_message
# RCON connection initialization
# Try to connect in infinite loop
print("Connecting to RCON at ",rcon_hostname, "RCON port (not implemented): ", rcon_port)
rconOK = False
while (not rconOK and not interrupted):
try:
mcr = MCRcon(rcon_hostname, rcon_password)
mcr.connect()
except ConnectionError as e:
print(e)
print("Connect to rcon failed. Next try after 30s")
sleep(30)
else:
print("RCON connection established.")
rconOK=True
if interrupted:
sys.exit(0)
mqtt_client.loop_start()
sleep(1)
topic = '{}/#'.format(base_topic)
try:
mqtt_client.subscribe(topic)
except:
print("MQTT subscribe error.")
sys.exit(1)
else:
print("MQTT subscribed to topic ", topic)
# main loop
while True:
sleep(1)
if interrupted:
# cleanup
print("mc-rcon-mqtt ending.")
mqtt_client.disconnect()
mcr.disconnect()
break
| StarcoderdataPython |
3319067 | <reponame>rayheberer/LambdaCodingChallenges<filename>Week 14 Reinforcement Learning/Code Challenges/Day 2 Discounted Rewards/reward.py
def reward(R, gamma):
total_R = 0
reward = R
epsilon = 0.00001
while abs(reward) > epsilon:
total_R += reward
reward *= gamma
return total_R | StarcoderdataPython |
3389312 | from multiprocessing import TimeoutError
from FreeTAKServer.controllers.FederatedCoTController import FederatedCoTController
from FreeTAKServer.model.FTSModel.Event import Event
from FreeTAKServer.model.protobufModel.fig_pb2 import FederatedEvent
import codecs
import socket
from FreeTAKServer.model.ServiceObjects.Federate import Federate
class FederationServiceController:
def __init__(self, ip, port, connection):
self.ip = ip
self.port = port
self.pool = None
self.buffer = 4
self.excessData = None
self.killSwitch = False
self.connection = connection
def start(self):
federateObject = Federate()
federateObject.federationController = self
federateObject.IP = self.connection.getpeername()[0]
federateObject.Port = self.connection.getpeername()[1]
federateObject.Socket = self.connection
return federateObject
def establish_connection(self, ip, port):
# returns a socket object
pass
def disconnect(self):
try:
self.connection.shutdown(socket.SHUT_RDWR)
self.connection.close()
return 1
except:
self.connection.close()
return 1
def generate_header(self, contentlength):
tempHex = format(contentlength, 'x')
if (len(tempHex) % 2) == 0:
filteredhex = [(tempHex[i:i + 2]) for i in range(0, len(tempHex), 2)]
while len(filteredhex) < 4:
filteredhex.insert(0, '00')
filteredhex = r'\x'.join(filteredhex)
filteredhex = r'\x' + filteredhex
filteredhex = codecs.escape_decode(filteredhex)[0]
return filteredhex
else:
tempHex = '0'+tempHex
filteredhex = [(tempHex[i:i + 2]) for i in range(0, len(tempHex), 2)]
while len(filteredhex) < 4:
filteredhex.insert(0, '00')
filteredhex = r'\x'.join(filteredhex)
filteredhex = r'\x' + filteredhex
filteredhex = codecs.escape_decode(filteredhex)[0]
return filteredhex
def get_header_length(self, header):
try:
from binascii import hexlify
headerInHex = header.split(b'\\x')
if len(headerInHex[-1]) == 3:
headerInHex[-1][2] = hexlify(headerInHex[-1][2])
headerInHex = b''.join(headerInHex)
return int(headerInHex, 16)
except:
return -1
def receive_data_from_federates(self):
# returns data received from federate
# the following logic receives data from the federate and processes the protobuf
# up to 100 CoT's
dataCount = 0
dataArray = []
# 100 is the limit of data which can be received from a federate in one iteration
while dataCount < 100:
dataCount += 1
try:
try:
self.connection.settimeout(0.01)
data = self.connection.recv(self.buffer)
self.connection.settimeout(0)
except TimeoutError:
break
except Exception as e:
self.disconnect()
self.killSwitch = True
return 0
if data != [b'']:
header = data[0]
content = self.connection.recv(self.get_header_length(header))
EmptyFTSObject = Event.FederatedCoT()
protoObject = FederatedEvent().FromString(content)
print(protoObject)
FTSObject = FederatedCoTController().serialize_main_contentv1(protoObject, EmptyFTSObject)
print('received data from Federate')
print(content)
else:
self.killSwitch = True
dataArray.append(FTSObject)
except Exception as e:
pass
def send_data_to_federates(self, data):
try:
# sends supplied data to supplied socket upon being called
federatedEvent = FederatedEvent()
ProtoObj = FederatedCoTController().serialize_from_FTS_modelv1(federatedevent=federatedEvent, ftsobject=data)
protostring = ProtoObj.SerializeToString()
header = self.generate_header(len(protostring))
protostring = header + protostring
print(b'sent '+protostring+b' to federate')
self.connection.send(protostring)
return 1
except Exception as e:
pass
def recv_in_data_pipe(self, pipe):
pass
def send_in_data_pipe(self, pipe, data):
pass
if __name__ == '__main__':
FederationServiceController
content = b'\n\xb1\x06\x08\xd8\x81\xa6\xa1\xec.\x10\x84\xf0\xa5\xa1\xec.\x18\x84\xa8\xbf\xca\xec.1\x00\x00\x00\xe0\xcf\x12cA9\x00\x00\x00\xe0\xcf\x12cAA\x00\x00\x00\xe0\xcf\x12cAJjGeoChat.S-1-5-21-2720623347-3037847324-4167270909-1002.All Chat Rooms.673a0aa4-c8eb-4bb5-aa7e-abab7d8d89a0R\x05b-t-fZ\th-g-i-g-ob\x80\x05<detail><__chat id="All Chat Rooms" chatroom="All Chat Rooms" senderCallsign="FEATHER" groupOwner="false"><chatgrp id="All Chat Rooms" uid0="S-1-5-21-2720623347-3037847324-4167270909-1002" uid1="All Chat Rooms"/></__chat><link uid="S-1-5-21-2720623347-3037847324-4167270909-1002" type="a-f-G-U-C-I" relation="p-p"/><remarks source="BAO.F.WinTAK.S-1-5-21-2720623347-3037847324-4167270909-1002" sourceID="S-1-5-21-2720623347-3037847324-4167270909-1002" to="All Chat Rooms" time="2021-01-02T17:33:40.74Z">aa</remarks><_flow-tags_ TAK-Server-c0581fed97ff4cb89eb8666a8794670cc9f77ddb-badf-48da-abe7-84545ecda69d="2021-01-02T17:33:43Z"/></detail>'
y = FederatedEvent().FromString(content)
1==1 | StarcoderdataPython |
9723726 | from __future__ import annotations
# Packages
import pygame
# Helpers
from ..Helpers.Cords import Cords
# Elements
from .Element import Element
class Position(Element):
def __init__(self: Position, x: float = 0.00, y: float = 0.00, color: pygame.Color = pygame.Color(255, 255, 255)) -> Position:
"""Initialize game element.
Args:
self (Position): Itself.
x (float, optional): Start X-cordinate. Defaults to 0.00.
y (float, optional): Start Y-cordinate. Defaults to 0.00.
speed (float, optional): Starting speed. Defaults to 1000.00.
color (pygame.Color, optional): Color of the element. Defaults to pygame.Color(255, 255, 255).
Returns:
Position: Itself.
"""
Element.__init__(
self,
color
)
self._cords = Cords(x, y)
self._has_hitbox = True
def width(self: Position) -> float:
"""Width of the element.
Args:
self (Position): Itself.
Returns:
float: Width of the element.
"""
return 1.00
def height(self: Position) -> float:
"""Height of the element.
Args:
self (Position): Itself.
Returns:
float: Height of the element.
"""
return 1.00
def hitbox(self: Position, scale: Cords) -> pygame.Rect:
"""Hitbox for element.
Args:
self (Position): Itself.
scale (Cords): Scale.
Returns:
pygame.Rect: Hitbox for element.
"""
rect = pygame.Rect(
0, 0,
self.width()*scale.x,
self.height()+scale.y
)
rect.center = (
self._cords.x,
self._cords.y
)
return rect
| StarcoderdataPython |
4947400 | <filename>Atividades/PY 01/maximo_3.py
def maximo(x,y,z):
if x > y and x > z:
return x
elif y > z and y > x:
return y
elif x == y == z:
return x
else:
return z | StarcoderdataPython |
6520343 | <reponame>code-review-doctor/amy<gh_stars>10-100
# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-06-08 16:10
from __future__ import unicode_literals
from django.db import migrations
def forward(apps, schema_editor):
Group = apps.get_model('auth', 'Group')
Group.objects.get_or_create(name='administrators')
class Migration(migrations.Migration):
dependencies = [
('workshops', '0096_change_help_text_in_training_request'),
]
operations = [
migrations.RunPython(forward, reverse_code=migrations.RunPython.noop),
]
| StarcoderdataPython |
3200873 | <reponame>bobobo80/python-crawler-test<filename>tasks/workers.py
"""
celery workers 启动文件
"""
from celery import Celery
from kombu import Exchange, Queue
import config
tasks = ['tasks.links', 'tasks.logs']
app = Celery('mfw_task', include=tasks, broker=config.CELERY_BROKER, backend=config.CELERY_BACKEND)
app.conf.update(
task_serializer='json',
accept_content=['json'],
result_serializer='json',
timezone='Asia/Shanghai',
enable_utc=True,
beat_schedule={
'links_download': {
'task': 'tasks.links.schedule_download_links',
'schedule': 60 * 2,
},
'logs_download': {
'task': 'tasks.logs.schedule_download_logs',
'schedule': 60 * 2,
},
'log_parser': {
'task': 'tasks.logs.schedule_parser_logs',
'schedule': 60 * 2,
},
},
celery_queues=(
Queue('links_queue', exchange=Exchange('links_queue', type='direct'), routing_key='for_links'),
Queue('logs_queue', exchange=Exchange('logs_queue', type='direct'), routing_key='for_logs'),
),
)
| StarcoderdataPython |
8032304 | <filename>keystone_tempest_plugin/tests/rbac/v3/test_role_assignment.py
# Copyright 2020 SUSE LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
from tempest.api.identity import base
from tempest.lib.common.utils import data_utils
from tempest.lib import exceptions
from keystone_tempest_plugin.tests.rbac.v3 import base as rbac_base
class IdentityV3RbacAssignmentTest(rbac_base.IdentityV3RbacBaseTests,
metaclass=abc.ABCMeta):
@classmethod
def setup_clients(cls):
super(IdentityV3RbacAssignmentTest, cls).setup_clients()
cls.persona = getattr(cls, 'os_%s' % cls.credentials[0])
cls.client = cls.persona.role_assignments_client
cls.admin_client = cls.os_system_admin
@classmethod
def resource_setup(cls):
super(IdentityV3RbacAssignmentTest, cls).resource_setup()
cls._setup_assignments()
@classmethod
def _setup_assignments(cls):
cls.own_domain = cls.persona.credentials.domain_id
cls.role_id = cls.admin_client.roles_v3_client.create_role(
name=data_utils.rand_name('role'))['role']['id']
cls.addClassResourceCleanup(
cls.admin_client.roles_v3_client.delete_role, cls.role_id)
cls.user_in_domain = cls.admin_client.users_v3_client.create_user(
name=data_utils.rand_name('user'),
domain_id=cls.own_domain)['user']['id']
cls.addClassResourceCleanup(
cls.admin_client.users_v3_client.delete_user,
cls.user_in_domain)
cls.group_in_domain = cls.admin_client.groups_client.create_group(
name=data_utils.rand_name('group'),
domain_id=cls.own_domain)['group']['id']
cls.addClassResourceCleanup(
cls.admin_client.groups_client.delete_group,
cls.group_in_domain)
cls.project_in_domain = (
cls.admin_client.projects_client.create_project(
name=data_utils.rand_name('project'),
domain_id=cls.own_domain)['project']['id'])
cls.addClassResourceCleanup(
cls.admin_client.projects_client.delete_project,
cls.project_in_domain)
cls.other_domain = cls.admin_client.domains_client.create_domain(
name=data_utils.rand_name('domain'))['domain']['id']
cls.addClassResourceCleanup(
cls.admin_client.domains_client.delete_domain,
cls.other_domain)
cls.addClassResourceCleanup(
cls.admin_client.domains_client.update_domain,
cls.other_domain,
enabled=False)
cls.user_other_domain = cls.admin_client.users_v3_client.create_user(
name=data_utils.rand_name('user'),
domain_id=cls.other_domain)['user']['id']
cls.addClassResourceCleanup(
cls.admin_client.users_v3_client.delete_user,
cls.user_other_domain)
cls.group_other_domain = cls.admin_client.groups_client.create_group(
name=data_utils.rand_name('group'),
domain_id=cls.other_domain)['group']['id']
cls.addClassResourceCleanup(
cls.admin_client.groups_client.delete_group,
cls.group_other_domain)
cls.project_other_domain = (
cls.admin_client.projects_client.create_project(
name=data_utils.rand_name('project'),
domain_id=cls.other_domain)['project']['id'])
cls.addClassResourceCleanup(
cls.admin_client.projects_client.delete_project,
cls.project_other_domain)
roles_client = cls.admin_client.roles_v3_client
roles_client.create_user_role_on_project(
cls.project_in_domain,
cls.user_in_domain,
cls.role_id)
roles_client.create_user_role_on_project(
cls.project_in_domain,
cls.user_other_domain,
cls.role_id)
roles_client.create_user_role_on_project(
cls.project_other_domain,
cls.user_in_domain,
cls.role_id)
roles_client.create_user_role_on_project(
cls.project_other_domain,
cls.user_other_domain,
cls.role_id)
roles_client.create_user_role_on_domain(
cls.own_domain,
cls.user_in_domain,
cls.role_id)
roles_client.create_user_role_on_domain(
cls.own_domain,
cls.user_other_domain,
cls.role_id)
roles_client.create_user_role_on_domain(
cls.other_domain,
cls.user_in_domain,
cls.role_id)
roles_client.create_user_role_on_domain(
cls.other_domain,
cls.user_other_domain,
cls.role_id)
roles_client.create_user_role_on_system(
cls.user_in_domain,
cls.role_id)
roles_client.create_user_role_on_system(
cls.user_other_domain,
cls.role_id)
roles_client.create_group_role_on_project(
cls.project_in_domain,
cls.group_in_domain,
cls.role_id)
roles_client.create_group_role_on_project(
cls.project_in_domain,
cls.group_other_domain,
cls.role_id)
roles_client.create_group_role_on_project(
cls.project_other_domain,
cls.group_in_domain,
cls.role_id)
roles_client.create_group_role_on_project(
cls.project_other_domain,
cls.group_other_domain,
cls.role_id)
roles_client.create_group_role_on_domain(
cls.own_domain,
cls.group_in_domain,
cls.role_id)
roles_client.create_group_role_on_domain(
cls.own_domain,
cls.group_other_domain,
cls.role_id)
roles_client.create_group_role_on_domain(
cls.other_domain,
cls.group_in_domain,
cls.role_id)
roles_client.create_group_role_on_domain(
cls.other_domain,
cls.group_other_domain,
cls.role_id)
roles_client.create_group_role_on_system(
cls.group_in_domain,
cls.role_id)
roles_client.create_group_role_on_system(
cls.group_other_domain,
cls.role_id)
cls.assignments = [
{
'user_id': cls.user_in_domain,
'project_id': cls.project_in_domain,
'role_id': cls.role_id
},
{
'user_id': cls.user_other_domain,
'project_id': cls.project_in_domain,
'role_id': cls.role_id
},
{
'user_id': cls.user_in_domain,
'project_id': cls.project_other_domain,
'role_id': cls.role_id
},
{
'user_id': cls.user_other_domain,
'project_id': cls.project_other_domain,
'role_id': cls.role_id
},
{
'user_id': cls.user_in_domain,
'domain_id': cls.own_domain,
'role_id': cls.role_id
},
{
'user_id': cls.user_other_domain,
'domain_id': cls.own_domain,
'role_id': cls.role_id
},
{
'user_id': cls.user_in_domain,
'domain_id': cls.other_domain,
'role_id': cls.role_id
},
{
'user_id': cls.user_other_domain,
'domain_id': cls.other_domain,
'role_id': cls.role_id
},
{
'user_id': cls.user_in_domain,
'system': 'all',
'role_id': cls.role_id
},
{
'user_id': cls.user_other_domain,
'system': 'all',
'role_id': cls.role_id
},
{
'group_id': cls.group_in_domain,
'project_id': cls.project_in_domain,
'role_id': cls.role_id
},
{
'group_id': cls.group_other_domain,
'project_id': cls.project_in_domain,
'role_id': cls.role_id
},
{
'group_id': cls.group_in_domain,
'project_id': cls.project_other_domain,
'role_id': cls.role_id
},
{
'group_id': cls.group_other_domain,
'project_id': cls.project_other_domain,
'role_id': cls.role_id
},
{
'group_id': cls.group_in_domain,
'domain_id': cls.own_domain,
'role_id': cls.role_id
},
{
'group_id': cls.group_other_domain,
'domain_id': cls.own_domain,
'role_id': cls.role_id
},
{
'group_id': cls.group_in_domain,
'domain_id': cls.other_domain,
'role_id': cls.role_id
},
{
'group_id': cls.group_other_domain,
'domain_id': cls.other_domain,
'role_id': cls.role_id
},
{
'group_id': cls.group_in_domain,
'system': 'all',
'role_id': cls.role_id
},
{
'group_id': cls.group_other_domain,
'system': 'all',
'role_id': cls.role_id
},
]
def _extract_role_assignments_from_response_body(self, r):
# Condense the role assignment details into a set of key things we can
# use in assertions.
assignments = []
for assignment in r['role_assignments']:
a = {}
if 'project' in assignment['scope']:
a['project_id'] = assignment['scope']['project']['id']
elif 'domain' in assignment['scope']:
a['domain_id'] = assignment['scope']['domain']['id']
elif 'system' in assignment['scope']:
a['system'] = 'all'
if 'user' in assignment:
a['user_id'] = assignment['user']['id']
elif 'group' in assignment:
a['group_id'] = assignment['group']['id']
a['role_id'] = assignment['role']['id']
assignments.append(a)
return assignments
@abc.abstractmethod
def test_identity_list_role_assignments(self):
"""Test identity:list_role_assignments policy.
This test must check:
* whether the persona can list all user and group assignments across
the deployment
* whether the persona can list all user and group assignments in
a domain
* whether the persona can list user and group assignments with names
* whether the persona can filter user and group assignments by domain
* whether the persona can filter user and group assignments by
project in their own domain
* whether the persona can filter user and group assignments by
project in another domain
* whether the persona can filter user and group assignments by system
* whether the persona can filter user assignments by user in their
own domain
* whether the persona can filter user assignments by user in another
domain
* whether the persona can filter group assignments by group in their
own domain
* whether the persona can filter group assignments by group in
another domain
* whether the persona can filter role assignments by global role
* whether the persona can filter assignments by project and role
* whether the persona can filter assignments by domain and role
* whether the persona can filter assignments by system and role
* whether the persona can filter assignments by user and role
* whether the persona can filter assignments by group and role
* whether the persona can filter assignments by project and user
* whether the persona can filter assignments by project and group
* whether the persona can filter assignments by domain and user
* whether the persona can filter assignments by domain and group
"""
pass
@abc.abstractmethod
def test_identity_list_role_assignments_for_tree(self):
"""Test identity:list_role_assignments_for_tree policy.
This test must check:
* whether the persona can list role assignments for a subtree of a
project in their own domain
* whether the persona can list role assignments for a subtree of a
project in another domain
* whether the persona can list role assignments for a subtree of a
project on which they have a role assignment (if applicable)
"""
pass
class SystemAdminTests(IdentityV3RbacAssignmentTest, base.BaseIdentityTest):
credentials = ['system_admin']
def test_identity_list_role_assignments(self):
# Listing all assignments with no filters should return all assignments
resp = self.do_request('list_role_assignments')
actual = self._extract_role_assignments_from_response_body(resp)
for assignment in self.assignments:
self.assertIn(assignment, actual)
# Listing all assignments with names
query = {'include_names': True}
resp = self.do_request('list_role_assignments', **query)
actual = self._extract_role_assignments_from_response_body(resp)
for assignment in self.assignments:
self.assertIn(assignment, actual)
# Filter assignments by own domain should succeed
expected = [a for a in self.assignments
if a.get('domain_id') == self.own_domain]
not_expected = [a for a in self.assignments if a not in expected]
query = {'scope.domain.id': self.own_domain}
resp = self.do_request('list_role_assignments', **query)
actual = self._extract_role_assignments_from_response_body(resp)
for assignment in expected:
self.assertIn(assignment, actual)
for assignment in not_expected:
self.assertNotIn(assignment, actual)
# Filter assignments by other domain should succeed
expected = [a for a in self.assignments
if a.get('domain_id') == self.other_domain]
not_expected = [a for a in self.assignments if a not in expected]
query = {'scope.domain.id': self.other_domain}
resp = self.do_request('list_role_assignments', **query)
actual = self._extract_role_assignments_from_response_body(resp)
for assignment in expected:
self.assertIn(assignment, actual)
for assignment in not_expected:
self.assertNotIn(assignment, actual)
# Filter assignments by project in own domain should succeed
expected = [a for a in self.assignments
if a.get('project_id') == self.project_in_domain]
not_expected = [a for a in self.assignments if a not in expected]
query = {'scope.project.id': self.project_in_domain}
resp = self.do_request('list_role_assignments', **query)
actual = self._extract_role_assignments_from_response_body(resp)
for assignment in expected:
self.assertIn(assignment, actual)
for assignment in not_expected:
self.assertNotIn(assignment, actual)
# Filter assignments by project in other domain should succeed
expected = [a for a in self.assignments
if a.get('project_id') == self.project_other_domain]
not_expected = [a for a in self.assignments if a not in expected]
query = {'scope.project.id': self.project_other_domain}
resp = self.do_request('list_role_assignments', **query)
actual = self._extract_role_assignments_from_response_body(resp)
for assignment in expected:
self.assertIn(assignment, actual)
for assignment in not_expected:
self.assertNotIn(assignment, actual)
# Filter assignments by system should succeed
expected = [a for a in self.assignments if a.get('system') == 'all']
not_expected = [a for a in self.assignments if a not in expected]
query = {'scope.system': 'all'}
resp = self.do_request('list_role_assignments', **query)
actual = self._extract_role_assignments_from_response_body(resp)
for assignment in expected:
self.assertIn(assignment, actual)
for assignment in not_expected:
self.assertNotIn(assignment, actual)
# Filter assignments by user in own domain should succeed
expected = [a for a in self.assignments
if a.get('user_id') == self.user_in_domain]
not_expected = [a for a in self.assignments if a not in expected]
query = {'user.id': self.user_in_domain}
resp = self.do_request('list_role_assignments', **query)
actual = self._extract_role_assignments_from_response_body(resp)
for assignment in expected:
self.assertIn(assignment, actual)
for assignment in not_expected:
self.assertNotIn(assignment, actual)
# Filter assignments by user in other domain should succeed
expected = [a for a in self.assignments
if a.get('user_id') == self.user_other_domain]
not_expected = [a for a in self.assignments if a not in expected]
query = {'user.id': self.user_other_domain}
resp = self.do_request('list_role_assignments', **query)
actual = self._extract_role_assignments_from_response_body(resp)
for assignment in expected:
self.assertIn(assignment, actual)
for assignment in not_expected:
self.assertNotIn(assignment, actual)
# Filter assignments by group in own domain should succeed
expected = [a for a in self.assignments
if a.get('group_id') == self.group_in_domain]
not_expected = [a for a in self.assignments if a not in expected]
query = {'group.id': self.group_in_domain}
resp = self.do_request('list_role_assignments', **query)
actual = self._extract_role_assignments_from_response_body(resp)
for assignment in expected:
self.assertIn(assignment, actual)
for assignment in not_expected:
self.assertNotIn(assignment, actual)
# Filter assignments by group in other domain should succeed
expected = [a for a in self.assignments
if a.get('group_id') == self.group_other_domain]
not_expected = [a for a in self.assignments if a not in expected]
query = {'group.id': self.group_other_domain}
resp = self.do_request('list_role_assignments', **query)
actual = self._extract_role_assignments_from_response_body(resp)
for assignment in expected:
self.assertIn(assignment, actual)
for assignment in not_expected:
self.assertNotIn(assignment, actual)
# Filter assignments by global role should succeed
expected = self.assignments
query = {'role.id': self.role_id}
resp = self.do_request('list_role_assignments', **query)
actual = self._extract_role_assignments_from_response_body(resp)
# Reverse the check: only ephemeral tempest roles should be in the list
for assignment in actual:
self.assertIn(assignment, expected)
# Filter assignments by project and role should succeed
expected = [a for a in self.assignments
if a.get('project_id') == self.project_in_domain]
not_expected = [a for a in self.assignments if a not in expected]
query = {'scope.project.id': self.project_in_domain,
'role.id': self.role_id}
resp = self.do_request('list_role_assignments', **query)
actual = self._extract_role_assignments_from_response_body(resp)
for assignment in not_expected:
self.assertNotIn(assignment, actual)
# Reverse the check: only ephemeral tempest roles should be in the list
for assignment in actual:
self.assertIn(assignment, expected)
# Filter assignments by domain and role should succeed
expected = [a for a in self.assignments
if a.get('domain_id') == self.other_domain]
not_expected = [a for a in self.assignments if a not in expected]
query = {'scope.domain.id': self.other_domain, 'role.id': self.role_id}
resp = self.do_request('list_role_assignments', **query)
actual = self._extract_role_assignments_from_response_body(resp)
for assignment in not_expected:
self.assertNotIn(assignment, actual)
# Reverse the check: only ephemeral tempest roles should be in the list
for assignment in actual:
self.assertIn(assignment, expected)
# Filter assignments by system and role should succeed
expected = [a for a in self.assignments if a.get('system') == 'all']
not_expected = [a for a in self.assignments if a not in expected]
query = {'scope.system': 'all', 'role.id': self.role_id}
resp = self.do_request('list_role_assignments', **query)
actual = self._extract_role_assignments_from_response_body(resp)
for assignment in not_expected:
self.assertNotIn(assignment, actual)
# Reverse the check: only ephemeral tempest roles should be in the list
for assignment in actual:
self.assertIn(assignment, expected)
# Filter assignments by user and role should succeed
expected = [a for a in self.assignments
if a.get('user_id') == self.user_in_domain]
not_expected = [a for a in self.assignments if a not in expected]
query = {'user.id': self.user_in_domain, 'role.id': self.role_id}
resp = self.do_request('list_role_assignments', **query)
actual = self._extract_role_assignments_from_response_body(resp)
for assignment in not_expected:
self.assertNotIn(assignment, actual)
# Reverse the check: only ephemeral tempest roles should be in the list
for assignment in actual:
self.assertIn(assignment, expected)
# Filter assignments by group and role should succeed
expected = [a for a in self.assignments
if a.get('group_id') == self.group_in_domain]
not_expected = [a for a in self.assignments if a not in expected]
query = {'group.id': self.group_in_domain, 'role.id': self.role_id}
resp = self.do_request('list_role_assignments', **query)
actual = self._extract_role_assignments_from_response_body(resp)
for assignment in not_expected:
self.assertNotIn(assignment, actual)
# Reverse the check: only ephemeral tempest roles should be in the list
for assignment in actual:
self.assertIn(assignment, expected)
# Filter assignments by project and user should succeed
expected = [a for a in self.assignments
if a.get('project_id') == self.project_in_domain
and a.get('user_id') == self.user_in_domain]
not_expected = [a for a in self.assignments if a not in expected]
query = {'user.id': self.user_in_domain,
'scope.project.id': self.project_in_domain}
resp = self.do_request('list_role_assignments', **query)
actual = self._extract_role_assignments_from_response_body(resp)
for assignment in expected:
self.assertIn(assignment, actual)
for assignment in not_expected:
self.assertNotIn(assignment, actual)
# Filter assignments by project and group should succeed
expected = [a for a in self.assignments
if a.get('project_id') == self.project_in_domain
and a.get('group_id') == self.group_in_domain]
not_expected = [a for a in self.assignments if a not in expected]
query = {'group.id': self.group_in_domain,
'scope.project.id': self.project_in_domain}
resp = self.do_request('list_role_assignments', **query)
actual = self._extract_role_assignments_from_response_body(resp)
for assignment in expected:
self.assertIn(assignment, actual)
for assignment in not_expected:
self.assertNotIn(assignment, actual)
# Filter assignments by domain and user should succeed
expected = [a for a in self.assignments
if a.get('domain_id') == self.own_domain
and a.get('user_id') == self.user_in_domain]
not_expected = [a for a in self.assignments if a not in expected]
query = {'user.id': self.user_in_domain,
'scope.domain.id': self.own_domain}
resp = self.do_request('list_role_assignments', **query)
actual = self._extract_role_assignments_from_response_body(resp)
for assignment in expected:
self.assertIn(assignment, actual)
for assignment in not_expected:
self.assertNotIn(assignment, actual)
# Filter assignments by domain and group should succeed
expected = [a for a in self.assignments
if a.get('domain_id') == self.own_domain
and a.get('group_id') == self.group_in_domain]
not_expected = [a for a in self.assignments if a not in expected]
query = {'group.id': self.group_in_domain,
'scope.domain.id': self.own_domain}
resp = self.do_request('list_role_assignments', **query)
actual = self._extract_role_assignments_from_response_body(resp)
for assignment in expected:
self.assertIn(assignment, actual)
for assignment in not_expected:
self.assertNotIn(assignment, actual)
def test_identity_list_role_assignments_for_tree(self):
# Should see subtree assignments for project in own domain
subproject_id = self.admin_client.projects_client.create_project(
name=data_utils.rand_name('project'),
domain_id=self.own_domain,
parent_id=self.project_in_domain)['project']['id']
self.addCleanup(self.admin_client.projects_client.delete_project,
subproject_id)
self.admin_client.roles_v3_client.create_user_role_on_project(
subproject_id, self.user_in_domain, self.role_id)
query = {'scope.project.id': self.project_in_domain,
'include_subtree': True}
resp = self.do_request('list_role_assignments', **query)
actual = self._extract_role_assignments_from_response_body(resp)
expected_assignment = {'user_id': self.user_in_domain,
'project_id': subproject_id,
'role_id': self.role_id}
self.assertIn(expected_assignment, actual)
# Should see subtree assignments for project in other domain
subproject_id = self.admin_client.projects_client.create_project(
name=data_utils.rand_name('project'),
domain_id=self.other_domain,
parent_id=self.project_other_domain)['project']['id']
self.addCleanup(self.admin_client.projects_client.delete_project,
subproject_id)
self.admin_client.roles_v3_client.create_user_role_on_project(
subproject_id, self.user_in_domain, self.role_id)
query = {'scope.project.id': self.project_other_domain,
'include_subtree': True}
resp = self.do_request('list_role_assignments', **query)
actual = self._extract_role_assignments_from_response_body(resp)
expected_assignment = {'user_id': self.user_in_domain,
'project_id': subproject_id,
'role_id': self.role_id}
self.assertIn(expected_assignment, actual)
class SystemMemberTests(SystemAdminTests):
credentials = ['system_member', 'system_admin']
class SystemReaderTests(SystemMemberTests):
credentials = ['system_reader', 'system_admin']
class DomainAdminTests(IdentityV3RbacAssignmentTest, base.BaseIdentityTest):
credentials = ['domain_admin', 'system_admin']
def test_identity_list_role_assignments(self):
# Listing all assignments with no filters should only return
# assignments in own domain
expected = [a for a in self.assignments
if a.get('project_id') == self.project_in_domain
or a.get('domain_id') == self.own_domain]
not_expected = [a for a in self.assignments if a not in expected]
resp = self.do_request('list_role_assignments')
actual = self._extract_role_assignments_from_response_body(resp)
for assignment in expected:
self.assertIn(assignment, actual)
for assignment in not_expected:
self.assertNotIn(assignment, actual)
# Listing all assignments with names and no filters should only return
# assignments in own domain
query = {'include_names': True}
resp = self.do_request('list_role_assignments', **query)
actual = self._extract_role_assignments_from_response_body(resp)
for assignment in expected:
self.assertIn(assignment, actual)
for assignment in not_expected:
self.assertNotIn(assignment, actual)
# Filter assignments by own domain should succeed
expected = [a for a in self.assignments
if a.get('domain_id') == self.own_domain]
not_expected = [a for a in self.assignments if a not in expected]
query = {'scope.domain.id': self.own_domain}
resp = self.do_request('list_role_assignments', **query)
actual = self._extract_role_assignments_from_response_body(resp)
for assignment in expected:
self.assertIn(assignment, actual)
for assignment in not_expected:
self.assertNotIn(assignment, actual)
# Filter assignments by other domain should be empty
query = {'scope.domain.id': self.other_domain}
resp = self.do_request('list_role_assignments', **query)
actual = self._extract_role_assignments_from_response_body(resp)
self.assertEmpty(actual)
# Filter assignments by project in own domain should succeed
expected = [a for a in self.assignments
if a.get('project_id') == self.project_in_domain]
not_expected = [a for a in self.assignments if a not in expected]
query = {'scope.project.id': self.project_in_domain}
resp = self.do_request('list_role_assignments', **query)
actual = self._extract_role_assignments_from_response_body(resp)
for assignment in expected:
self.assertIn(assignment, actual)
for assignment in not_expected:
self.assertNotIn(assignment, actual)
# Filter assignments by project in other domain should be empty
query = {'scope.project.id': self.project_other_domain}
resp = self.do_request('list_role_assignments', **query)
actual = self._extract_role_assignments_from_response_body(resp)
self.assertEmpty(actual)
# Filter assignments by system should be empty
query = {'scope.system': 'all'}
resp = self.do_request('list_role_assignments', **query)
actual = self._extract_role_assignments_from_response_body(resp)
self.assertEmpty(actual)
# Filter assignments by user in own domain should get assignments for
# that user only for projects in own domain or for own domain itself
expected = [a for a in self.assignments
if a.get('user_id') == self.user_in_domain
and (a.get('project_id') == self.project_in_domain
or a.get('domain_id') == self.own_domain)]
not_expected = [a for a in self.assignments if a not in expected]
query = {'user.id': self.user_in_domain}
resp = self.do_request('list_role_assignments', **query)
actual = self._extract_role_assignments_from_response_body(resp)
for assignment in expected:
self.assertIn(assignment, actual)
for assignment in not_expected:
self.assertNotIn(assignment, actual)
# Filter assignments by user in other domain should still work but only
# return assignments for projects in own domain or for own domain
# itself
expected = [a for a in self.assignments
if a.get('user_id') == self.user_other_domain
and (a.get('project_id') == self.project_in_domain
or a.get('domain_id') == self.own_domain)]
not_expected = [a for a in self.assignments if a not in expected]
query = {'user.id': self.user_other_domain}
resp = self.do_request('list_role_assignments', **query)
actual = self._extract_role_assignments_from_response_body(resp)
for assignment in expected:
self.assertIn(assignment, actual)
for assignment in not_expected:
self.assertNotIn(assignment, actual)
# Filter assignments by group in own domain should succeed
expected = [a for a in self.assignments
if a.get('group_id') == self.group_in_domain
and (a.get('project_id') == self.project_in_domain
or a.get('domain_id') == self.own_domain)]
not_expected = [a for a in self.assignments if a not in expected]
query = {'group.id': self.group_in_domain}
resp = self.do_request('list_role_assignments', **query)
actual = self._extract_role_assignments_from_response_body(resp)
for assignment in expected:
self.assertIn(assignment, actual)
for assignment in not_expected:
self.assertNotIn(assignment, actual)
# Filter assignments by group in other domain should still work but
# only return assignments for projects in own domain or for own domain
# itself
expected = [a for a in self.assignments
if a.get('group_id') == self.group_other_domain
and (a.get('project_id') == self.project_in_domain
or a.get('domain_id') == self.own_domain)]
not_expected = [a for a in self.assignments if a not in expected]
query = {'group.id': self.group_other_domain}
resp = self.do_request('list_role_assignments', **query)
actual = self._extract_role_assignments_from_response_body(resp)
for assignment in expected:
self.assertIn(assignment, actual)
for assignment in not_expected:
self.assertNotIn(assignment, actual)
# Filter assignments by global role should only return role
# assignments for own domain
expected = [a for a in self.assignments
if a.get('project_id') == self.project_in_domain
or a.get('domain_id') == self.own_domain]
not_expected = [a for a in self.assignments if a not in expected]
query = {'role.id': self.role_id}
resp = self.do_request('list_role_assignments', **query)
actual = self._extract_role_assignments_from_response_body(resp)
for assignment in expected:
self.assertIn(assignment, actual)
for assignment in not_expected:
self.assertNotIn(assignment, actual)
# Filter assignments by project and role should succeed
expected = [a for a in self.assignments
if a.get('project_id') == self.project_in_domain]
not_expected = [a for a in self.assignments if a not in expected]
query = {'scope.project.id': self.project_in_domain,
'role.id': self.role_id}
resp = self.do_request('list_role_assignments', **query)
actual = self._extract_role_assignments_from_response_body(resp)
for assignment in not_expected:
self.assertNotIn(assignment, actual)
# Reverse the check: only ephemeral tempest roles should be in the list
for assignment in actual:
self.assertIn(assignment, expected)
# Filter assignments by domain and role should succeed
expected = [a for a in self.assignments
if a.get('domain_id') == self.other_domain]
not_expected = [a for a in self.assignments if a not in expected]
query = {'scope.domain.id': self.other_domain, 'role.id': self.role_id}
resp = self.do_request('list_role_assignments', **query)
actual = self._extract_role_assignments_from_response_body(resp)
for assignment in not_expected:
self.assertNotIn(assignment, actual)
# Reverse the check: only ephemeral tempest roles should be in the list
for assignment in actual:
self.assertIn(assignment, expected)
# Filter assignments by system and role should be empty
query = {'scope.system': 'all', 'role.id': self.role_id}
resp = self.do_request('list_role_assignments', **query)
actual = self._extract_role_assignments_from_response_body(resp)
self.assertEmpty(actual)
# Filter assignments by user and role should should get assignments for
# that user only for projects in own domain or for own domain itself
expected = [a for a in self.assignments
if a.get('user_id') == self.user_in_domain
and (a.get('project_id') == self.project_in_domain
or a.get('domain_id') == self.own_domain)]
not_expected = [a for a in self.assignments if a not in expected]
query = {'user.id': self.user_in_domain, 'role.id': self.role_id}
resp = self.do_request('list_role_assignments', **query)
actual = self._extract_role_assignments_from_response_body(resp)
for assignment in not_expected:
self.assertNotIn(assignment, actual)
# Reverse the check: only ephemeral tempest roles should be in the list
for assignment in actual:
self.assertIn(assignment, expected)
# Filter assignments by group and role should get assignments for
# that group only for projects in own domain or for own domain itself
expected = [a for a in self.assignments
if a.get('group_id') == self.group_in_domain
and (a.get('project_id') == self.project_in_domain
or a.get('domain_id') == self.own_domain)]
not_expected = [a for a in self.assignments if a not in expected]
query = {'group.id': self.group_in_domain, 'role.id': self.role_id}
resp = self.do_request('list_role_assignments', **query)
actual = self._extract_role_assignments_from_response_body(resp)
for assignment in not_expected:
self.assertNotIn(assignment, actual)
# Reverse the check: only ephemeral tempest roles should be in the list
for assignment in actual:
self.assertIn(assignment, expected)
# Filter assignments by project and user should succeed
expected = [a for a in self.assignments
if a.get('project_id') == self.project_in_domain
and a.get('user_id') == self.user_in_domain]
not_expected = [a for a in self.assignments if a not in expected]
query = {'user.id': self.user_in_domain,
'scope.project.id': self.project_in_domain}
resp = self.do_request('list_role_assignments', **query)
actual = self._extract_role_assignments_from_response_body(resp)
for assignment in expected:
self.assertIn(assignment, actual)
for assignment in not_expected:
self.assertNotIn(assignment, actual)
# Filter assignments by project and group should succeed
expected = [a for a in self.assignments
if a.get('project_id') == self.project_in_domain
and a.get('group_id') == self.group_in_domain]
not_expected = [a for a in self.assignments if a not in expected]
query = {'group.id': self.group_in_domain,
'scope.project.id': self.project_in_domain}
resp = self.do_request('list_role_assignments', **query)
actual = self._extract_role_assignments_from_response_body(resp)
for assignment in expected:
self.assertIn(assignment, actual)
for assignment in not_expected:
self.assertNotIn(assignment, actual)
# Filter assignments by domain and user should succeed
expected = [a for a in self.assignments
if a.get('domain_id') == self.own_domain
and a.get('user_id') == self.user_in_domain]
not_expected = [a for a in self.assignments if a not in expected]
query = {'user.id': self.user_in_domain,
'scope.domain.id': self.own_domain}
resp = self.do_request('list_role_assignments', **query)
actual = self._extract_role_assignments_from_response_body(resp)
for assignment in expected:
self.assertIn(assignment, actual)
for assignment in not_expected:
self.assertNotIn(assignment, actual)
# Filter assignments by domain and group should succeed
expected = [a for a in self.assignments
if a.get('domain_id') == self.own_domain
and a.get('group_id') == self.group_in_domain]
not_expected = [a for a in self.assignments if a not in expected]
query = {'group.id': self.group_in_domain,
'scope.domain.id': self.own_domain}
resp = self.do_request('list_role_assignments', **query)
actual = self._extract_role_assignments_from_response_body(resp)
for assignment in expected:
self.assertIn(assignment, actual)
for assignment in not_expected:
self.assertNotIn(assignment, actual)
def test_identity_list_role_assignments_for_tree(self):
# Should see subtree assignments for project in own domain
subproject_id = self.admin_client.projects_client.create_project(
name=data_utils.rand_name('project'),
domain_id=self.own_domain,
parent_id=self.project_in_domain)['project']['id']
self.addCleanup(self.admin_client.projects_client.delete_project,
subproject_id)
self.admin_client.roles_v3_client.create_user_role_on_project(
subproject_id, self.user_in_domain, self.role_id)
query = {'scope.project.id': self.project_in_domain,
'include_subtree': True}
resp = self.do_request('list_role_assignments', **query)
actual = self._extract_role_assignments_from_response_body(resp)
expected_assignment = {'user_id': self.user_in_domain,
'project_id': subproject_id,
'role_id': self.role_id}
self.assertIn(expected_assignment, actual)
# Should not see subtree assignments for project in other domain
query = {'scope.project.id': self.project_other_domain,
'include_subtree': True}
self.do_request('list_role_assignments',
expected_status=exceptions.Forbidden, **query)
class DomainMemberTests(DomainAdminTests):
credentials = ['domain_member', 'system_<PASSWORD>']
class DomainReaderTests(DomainMemberTests):
credentials = ['domain_reader', 'system_admin']
class ProjectAdminTests(IdentityV3RbacAssignmentTest, base.BaseIdentityTest):
credentials = ['project_<PASSWORD>', '<PASSWORD>']
def test_identity_list_role_assignments(self):
# Listing all assignments with no filters should fail
self.do_request('list_role_assignments',
expected_status=exceptions.Forbidden)
# Listing all assignments with names and no filters should fail
query = {'include_names': True}
self.do_request('list_role_assignments',
expected_status=exceptions.Forbidden, **query)
# Filter assignments by own domain should fail
query = {'scope.domain.id': self.own_domain}
self.do_request('list_role_assignments',
expected_status=exceptions.Forbidden, **query)
# Filter assignments by other domain should fail
query = {'scope.domain.id': self.other_domain}
self.do_request('list_role_assignments',
expected_status=exceptions.Forbidden, **query)
# Filter assignments by project in own domain should fail
query = {'scope.project.id': self.project_in_domain}
self.do_request('list_role_assignments',
expected_status=exceptions.Forbidden, **query)
# Filter assignments by project in other domain should fail
query = {'scope.project.id': self.project_other_domain}
self.do_request('list_role_assignments',
expected_status=exceptions.Forbidden, **query)
# Filter assignments by system should fail
query = {'scope.system': 'all'}
self.do_request('list_role_assignments',
expected_status=exceptions.Forbidden, **query)
# Filter assignments by user in own domain should fail
query = {'user.id': self.user_in_domain}
self.do_request('list_role_assignments',
expected_status=exceptions.Forbidden, **query)
# Filter assignments by user in other domain should fail
query = {'user.id': self.user_other_domain}
self.do_request('list_role_assignments',
expected_status=exceptions.Forbidden, **query)
# Filter assignments by group in own domain should fail
query = {'group.id': self.group_in_domain}
self.do_request('list_role_assignments',
expected_status=exceptions.Forbidden, **query)
# Filter assignments by group in other domain should fail
query = {'group.id': self.group_other_domain}
self.do_request('list_role_assignments',
expected_status=exceptions.Forbidden, **query)
# Filter assignments by global role should fail
query = {'role.id': self.role_id}
self.do_request('list_role_assignments',
expected_status=exceptions.Forbidden, **query)
# Filter assignments by project and role should fail
query = {'scope.project.id': self.project_in_domain,
'role.id': self.role_id}
self.do_request('list_role_assignments',
expected_status=exceptions.Forbidden, **query)
# Filter assignments by domain and role should fail
query = {'scope.domain.id': self.other_domain, 'role.id': self.role_id}
self.do_request('list_role_assignments',
expected_status=exceptions.Forbidden, **query)
# Filter assignments by system and role should fail
query = {'scope.system': 'all', 'role.id': self.role_id}
self.do_request('list_role_assignments',
expected_status=exceptions.Forbidden, **query)
# Filter assignments by user and role should should fail
query = {'user.id': self.user_in_domain, 'role.id': self.role_id}
self.do_request('list_role_assignments',
expected_status=exceptions.Forbidden, **query)
# Filter assignments by group and role should fail
query = {'group.id': self.group_in_domain, 'role.id': self.role_id}
self.do_request('list_role_assignments',
expected_status=exceptions.Forbidden, **query)
# Filter assignments by project and user should fail
query = {'user.id': self.user_in_domain,
'scope.project.id': self.project_in_domain}
self.do_request('list_role_assignments',
expected_status=exceptions.Forbidden, **query)
# Filter assignments by project and group should fail
query = {'group.id': self.group_in_domain,
'scope.project.id': self.project_in_domain}
self.do_request('list_role_assignments',
expected_status=exceptions.Forbidden, **query)
# Filter assignments by domain and user should fail
query = {'user.id': self.user_in_domain,
'scope.domain.id': self.own_domain}
self.do_request('list_role_assignments',
expected_status=exceptions.Forbidden, **query)
# Filter assignments by domain and group should fail
query = {'group.id': self.group_in_domain,
'scope.domain.id': self.own_domain}
self.do_request('list_role_assignments',
expected_status=exceptions.Forbidden, **query)
def test_identity_list_role_assignments_for_tree(self):
# Should not see subtree assignments for project in own domain
query = {'scope.project.id': self.project_in_domain,
'include_subtree': True}
self.do_request('list_role_assignments',
expected_status=exceptions.Forbidden, **query)
# Should not see subtree assignments for project in other domain
query = {'scope.project.id': self.project_other_domain,
'include_subtree': True}
self.do_request('list_role_assignments',
expected_status=exceptions.Forbidden, **query)
# Should see subtree for own project
own_project = self.persona.credentials.project_id
subproject_id = self.admin_client.projects_client.create_project(
name=data_utils.rand_name('project'),
domain_id=self.own_domain,
parent_id=own_project)['project']['id']
self.addCleanup(self.admin_client.projects_client.delete_project,
subproject_id)
self.admin_client.roles_v3_client.create_user_role_on_project(
subproject_id, self.user_other_domain, self.role_id)
query = {'scope.project.id': own_project,
'include_subtree': True}
resp = self.do_request('list_role_assignments', **query)
expected_assignment = {'user_id': self.user_other_domain,
'project_id': subproject_id,
'role_id': self.role_id}
actual = self._extract_role_assignments_from_response_body(resp)
self.assertIn(expected_assignment, actual)
class ProjectMemberTests(ProjectAdminTests):
credentials = ['project_member', 'system_admin']
def test_identity_list_role_assignments_for_tree(self):
# Should not see subtree assignments for project in own domain
query = {'scope.project.id': self.project_in_domain,
'include_subtree': True}
self.do_request('list_role_assignments',
expected_status=exceptions.Forbidden, **query)
# Should not see subtree assignments for project in other domain
query = {'scope.project.id': self.project_other_domain,
'include_subtree': True}
self.do_request('list_role_assignments',
expected_status=exceptions.Forbidden, **query)
# Should not see subtree for own project
own_project = self.persona.credentials.project_id
query = {'scope.project.id': own_project,
'include_subtree': True}
self.do_request('list_role_assignments',
expected_status=exceptions.Forbidden, **query)
class ProjectReaderTests(ProjectMemberTests):
credentials = ['project_reader', 'system_admin']
| StarcoderdataPython |
1709240 | # -*- coding: utf-8 -*-
# snapshottest: v1 - https://goo.gl/zC4yUc
from __future__ import unicode_literals
from snapshottest import Snapshot
snapshots = Snapshot()
snapshots['TestDatasetMutations.test_create_dataset 1'] = {
'data': {
'createDataset': {
'dataset': {
'datasetType': {
'description': 'Dataset storage provided by your Gigantum account supporting files up to 5GB in size',
'id': 'R<KEY>',
'name': 'Gigantum Cloud'
},
'description': 'my test dataset',
'id': 'R<KEY>',
'name': 'test-dataset-1',
'schemaVersion': 2
}
}
}
}
snapshots['TestDatasetMutations.test_create_dataset 2'] = {
'data': {
'dataset': {
'datasetType': {
'description': 'Dataset storage provided by your Gigantum account supporting files up to 5GB in size',
'id': '<KEY>',
'name': 'Gigantum Cloud'
},
'description': 'my test dataset',
'id': '<KEY>',
'name': 'test-dataset-1',
'schemaVersion': 2
}
}
}
snapshots['TestDatasetMutations.test_download_dataset_files 1'] = {
'data': {
'downloadDatasetFiles': {
'updatedFileEdges': [
{
'node': {
'isLocal': True,
'key': 'test1.txt',
'name': 'dataset100',
'size': '10'
}
}
]
}
}
}
snapshots['TestDatasetMutations.test_download_dataset_files 2'] = {
'data': {
'downloadDatasetFiles': {
'updatedFileEdges': [
{
'node': {
'id': 'RGF0YXNldEZpbGU6ZGVmYXVsdCZkYXRhc2V0MTAwJnRlc3QyLnR4dA==',
'isLocal': True,
'name': 'dataset100',
'size': '7'
}
}
]
}
}
}
snapshots['TestDatasetMutations.test_modify_dataset_link 1'] = {
'data': {
'modifyDatasetLink': {
'newLabbookEdge': {
'node': {
'description': 'testing dataset links',
'id': 'TGFiYm9vazpkZWZhdWx0JnRlc3QtbGI=',
'linkedDatasets': [
{
'name': 'dataset100'
}
],
'name': 'test-lb'
}
}
}
}
}
snapshots['TestDatasetMutations.test_modify_dataset_link 2'] = {
'data': {
'modifyDatasetLink': {
'newLabbookEdge': {
'node': {
'description': 'testing dataset links',
'id': 'TGFiYm9vazpkZWZhdWx0JnRlc3QtbGI=',
'linkedDatasets': [
],
'name': 'test-lb'
}
}
}
}
}
snapshots['TestDatasetMutations.test_modify_dataset_link_errors 1'] = {
'data': {
'modifyDatasetLink': None
},
'errors': [
{
'locations': [
{
'column': 22,
'line': 4
}
],
'message': 'Unsupported action. Use `link` or `unlink`',
'path': [
'modifyDatasetLink'
]
}
]
}
| StarcoderdataPython |
1702495 | from os.path import dirname, join
from unittest import TestCase
from pytezos import pytezos, ContractInterface
initial_storage = {
'admin': {
'admin': pytezos.key.public_key_hash(),
'paused': False
},
'assets': {
'hook': {
'hook': """
{ DROP ;
PUSH address "KT1V4jijVy1HfVWde6HBVD1cCygZDtFJK4Xz" ;
CONTRACT (pair
(pair
(list %batch (pair (pair (nat %amount) (option %from_ address))
(pair (option %to_ address) (nat %token_id))))
(address %fa2))
(address %operator)) ;
IF_NONE { FAIL } {} }
""",
'permissions_descriptor': {
'custom': {
'config_api': None,
'tag': 'none'
},
'operator': 'operator_transfer_permitted',
'receiver': 'optional_owner_hook',
'self': 'self_transfer_permitted',
'sender': 'optional_owner_hook'
}
},
'ledger': {},
'operators': {},
'tokens': {}
}
}
class TestMac(TestCase):
@classmethod
def setUpClass(cls):
cls.mac = ContractInterface.create_from(join(dirname(__file__), 'mac.tz'))
cls.maxDiff = None
def test_pause(self):
res = self.mac.pause(True).interpret(
storage=initial_storage,
source=pytezos.key.public_key_hash(),
sender=pytezos.key.public_key_hash())
self.assertTrue(res.storage['admin']['paused'])
def test_is_operator_callback(self):
res = self.mac.is_operator(callback='KT1V4jijVy1HfVWde6HBVD1cCygZDtFJK4Xz', # does not matter
operator={
'operator': pytezos.key.public_key_hash(),
'owner': pytezos.key.public_key_hash(),
'tokens': {'all_tokens': None}
}) \
.interpret(storage=initial_storage)
self.assertEqual(1, len(res.operations))
def test_transfer(self):
initial_storage_balance = initial_storage.copy()
initial_storage_balance['assets']['ledger'] = {
(pytezos.key.public_key_hash(), 0): 42000
}
res = self.mac.transfer([
dict(amount=1000,
from_=pytezos.key.public_key_hash(),
to_='<KEY>',
token_id=0)]) \
.interpret(storage=initial_storage_balance)
self.assertDictEqual({
(pytezos.key.public_key_hash(), 0): 41000,
('<KEY>', 0): 1000
}, res.big_map_diff['assets/ledger'])
| StarcoderdataPython |
1877984 | <gh_stars>1-10
# -*- encoding: utf-8 -*-
'''
@File : test_axml.py
@Author : Loopher
@Version : 1.0
@License : (C)Copyright 2020-2021, Loopher
@Desc : None
'''
# Here put the import lib
import os
import unittest
from androyara.core.axml_parser import AndroidManifestXmlParser
root = os.path.abspath(os.path.dirname(__file__))
sample = root[:root.rfind(os.sep)]
class AxmlTesst(unittest.TestCase):
def test_axml(self):
for xml in [sample+os.sep+"samples"+os.sep+"AndroidManifest.xml"]:
# axml = AndroidManifestXmlParser(xml)
# # print(axml)
# # print(axml.get_all_export_components())
# print(axml.get_main_activity())
# axml.get_main_activity()
pass
| StarcoderdataPython |
8130588 | <filename>sb_code/sb_train.py<gh_stars>0
#from stable_baselines3.common.env_checker import check_env
from gym_duckietown.envs.duckietown_env import DuckietownEnv
from gym_duckietown.simulator import Simulator
from sb_code.wrapper import NormalizeWrapper, ResizeWrapper, RewardWrapper, FinalLayerObservationWrapper, \
DiscreteWrapper, PositiveVelocityActionWrapper, Map1EvalRewardWrapper
from aido_code.reward_wrappers import DtRewardPosAngle, DtRewardVelocity, DtRewardWrapperDistanceTravelled
import gym
import numpy as np
from stable_baselines3.common.evaluation import evaluate_policy
from stable_baselines3 import SAC, DQN, A2C, PPO
from stable_baselines3.common.vec_env import VecNormalize, VecFrameStack, VecTransposeImage
import os.path as osp
from stable_baselines3.common.env_util import make_vec_env
from stable_baselines3.common.callbacks import CallbackList, CheckpointCallback, EvalCallback
from sb_code.custom_callbacks import SaveNormalization, CustomBestModelCallback
from sb_code.Logger import Logger
from sys import platform
import torch
import random
import shutil
# Below 2 lines is for Windows 10 Environment. Comment if running on other OS
if platform == 'win32':
import os
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
# Below is training on linux and if GPU is available
if platform == 'linux' and torch.cuda.is_available():
from pyvirtualdisplay import Display
display = Display(visible=0, size=(640, 480))
display.start()
PROJECT_PATH = osp.abspath(osp.dirname(osp.dirname(__file__)))
# Step 1. Initialize some parameters
custom_params = {
# ENVIORNMENT Set up
'map': 'map2',
'ep_len': 1500,
'seed': 2,
# WRAPPERS
'USING_VAE' : True, # whether to use VAE
'VAE_LATENT_DIM': 512,
'FRAME_STACK' : 20,
'USING_NORMALIZATION' : True,
'discrete': True,
# TRAINING
'eval_freq': 5000, # All are steps running
'save_freq': 30000,
'eval_episodes': 5,
'restore': False,
'load_path': osp.join(PROJECT_PATH, "results", "dqn", "2021-04-19_dqn", "2021-04-19_14-52-12_dqn", "phong_best", "phong_best"),
# ALGORITHMS PARAMETERS
'algo' : 'dqn',
'sac_parameters': {
'buffer_size': int(1e5),
'gradient_steps': 64,
'train_freq': 64,
'optimize_memory_usage': True,
'learning_starts': 1000
},
'dqn_parameters': {
'optimize_memory_usage': True,
'buffer_size': int(2e5),
'exploration_fraction': 0.25,
'gamma': .98,
'train_freq': (1, 'episode'),
'gradient_steps': 10,
'target_update_interval': 50000
},
'a2c_parameters': {
'use_sde': True,
'normalize_advantage': True,
},
'ppo_parameters': {
'batch_size': 256,
'n_epochs': 20,
'gamma': 0.99,
'gae_lambda': 0.9,
'sde_sample_freq': 4,
'learning_rate': 3e-5,
'use_sde': True,
'clip_range': 0.4
#'policy_kwargs': dict(log_std_init=-2,ortho_init=False)
}
}
custom_params['policy'] = 'MlpPolicy' if custom_params['USING_VAE'] else 'CnnPolicy'
# Step 2. Initialize the environment
np.random.seed(custom_params['seed'])
torch.manual_seed(custom_params['seed'])
random.seed(custom_params['seed'])
def setup_env():
env = DuckietownEnv(
map_name=custom_params['map'],
domain_rand=False,
draw_bbox=False,
max_steps=custom_params['ep_len'],
seed=custom_params['seed']
)
train_env = env
eval_env = env
# My Wrappers
#train_env = DtRewardWrapperDistanceTravelled(train_env)
#train_env = DtRewardVelocity(train_env)
train_env = RewardWrapper(train_env)
train_env = ResizeWrapper(train_env, shape=(60, 80, 3))
if custom_params['map'] == 'map1':
eval_env = Map1EvalRewardWrapper(eval_env)
eval_env = ResizeWrapper(eval_env, shape=(60, 80, 3))
if custom_params['discrete']:
train_env = DiscreteWrapper(train_env)
eval_env = DiscreteWrapper(eval_env)
if custom_params['USING_VAE']:
train_env = NormalizeWrapper(train_env) # No need to use normalization if image
train_env = FinalLayerObservationWrapper(train_env, latent_dim=custom_params['VAE_LATENT_DIM'], map=custom_params['map'])
eval_env = NormalizeWrapper(eval_env) # No need to use normalization if image
eval_env = FinalLayerObservationWrapper(eval_env, latent_dim=custom_params['VAE_LATENT_DIM'],
map=custom_params['map'])
return train_env, eval_env
# Step 3. Check the custom environment. Must do it before any wrappers
#check_env(env)
# Step 3.a Our Wrapper
train_env, eval_env = setup_env()
# Step 3.b. To make Vectorized Environment to be able to use Normalize or FramStack (Optional)
eval_env = make_vec_env(lambda: eval_env, n_envs=1)
if custom_params['algo'] == 'dqn':
env = make_vec_env(lambda: train_env, n_envs=1)
else:
env = make_vec_env(lambda: train_env, n_envs=1)
# Step 3.b Passing through Normalization and stack frame (Optional)
env = VecFrameStack(env, n_stack=custom_params['FRAME_STACK']) # Use 1 for now because we use image
eval_env = VecFrameStack(eval_env, n_stack=custom_params['FRAME_STACK']) # Use 1 for now because we use image
if not custom_params['USING_VAE']:
env = VecTransposeImage(env) # Uncomment if using 3d obs
eval_env = VecTransposeImage(eval_env)
if custom_params['USING_NORMALIZATION']:
env = VecNormalize(env, norm_obs=True, norm_reward=False) # If using normalize, must save
eval_env = VecNormalize(eval_env, norm_obs=True, norm_reward=False)
# Step 4. Make Logger corrsponding to the name of algorithm
logger = Logger(custom_params['algo'])
shutil.copy(osp.join(PROJECT_PATH, "sb_code", "wrapper.py"), logger.output_dir)
# Step 5. Creating callbacks
checkpoint_callback = CheckpointCallback(save_freq=custom_params['save_freq'], save_path=logger.output_dir,
name_prefix='rl_model')
custom_bestmodel_callback = CustomBestModelCallback(eval_env=eval_env, eval_freq=custom_params['eval_freq'], logger=logger, custom_params=custom_params)
savestats_callback = SaveNormalization(save_path=osp.join(logger.output_dir, "vec_normalization.pkl")) # If using normalize, must create this callback
eval_callback = EvalCallback(eval_env = eval_env, n_eval_episodes=custom_params['eval_episodes'], callback_on_new_best=savestats_callback,
eval_freq=custom_params['eval_freq'],
best_model_save_path=osp.join(logger.output_dir, "best_model"),
log_path=osp.join(logger.output_dir, "results"))
callback = CallbackList([checkpoint_callback, eval_callback, custom_bestmodel_callback])
if custom_params['algo'] == 'sac':
model = SAC(policy = custom_params['policy'], env=env, verbose=1, **custom_params['sac_parameters'], tensorboard_log=logger.output_dir)
elif custom_params['algo'] == 'dqn':
model = DQN(policy = custom_params['policy'], env=env, verbose=1, **custom_params['dqn_parameters'], tensorboard_log=logger.output_dir)
if custom_params['restore'] == True:
print(f'Loading retrained model at {custom_params["load_path"]}')
model = DQN.load(custom_params['load_path'], env=env, exploration_initial_eps=0.7)
elif custom_params['algo'] == 'a2c':
model = A2C(policy = custom_params['policy'], env=env, verbose=1, **custom_params['a2c_parameters'], tensorboard_log=logger.output_dir)
elif custom_params['algo'] == 'ppo':
model = PPO(policy = custom_params['policy'], env=env, verbose=1, **custom_params['ppo_parameters'], tensorboard_log=logger.output_dir)
else:
raise ValueError("Invalid algo")
logger.save_config(custom_params)
if custom_params['algo'] == 'dqn':
model.learn(total_timesteps=10000000, log_interval=1000, callback=callback) # Log_interval = number of episodes
else:
model.learn(total_timesteps=10000000, log_interval=100, callback=callback) # Log_interval = number of episodes
| StarcoderdataPython |
6497302 | <gh_stars>1-10
'''
Module containing the main menu instances
Written by <NAME>
'''
import sys
sys.path.insert(0, '..')
from classes import interface
from instances import instance_config as icng
import pygame as pg
COLORS = pg.colordict.THECOLORS
text = [
interface.text_center(
text='MAYHEM 2: ELECTRIC BOOGALOO',
pos=[None, icng.w_shape[1]*0.05],
color=COLORS['maroon'],
font=icng.font,
font_size=icng.mayhem_title_fsize,
window_shape=icng.w_shape
)
]
state_buttons = [
interface.button_center_change_state(
text='Explore in singleplayer',
pos=[None, icng.w_shape[1]*0.15],
color=COLORS['beige'],
color_mouseover=COLORS['cyan'],
font=icng.font,
font_size=icng.main_menu_fsize,
window_shape=icng.w_shape,
next_state='mayhem'
),
interface.button_center_change_state(
text='Online multiplayer',
pos=[None, icng.w_shape[1]*0.25],
color=COLORS['beige'],
color_mouseover=COLORS['cyan'],
font=icng.font,
font_size=icng.main_menu_fsize,
window_shape=icng.w_shape,
next_state='mayhem_online'
),
interface.button_center_change_state(
text='Server menu',
pos=[None, icng.w_shape[1]*0.35],
color=COLORS['beige'],
color_mouseover=COLORS['cyan'],
font=icng.font,
font_size=icng.main_menu_fsize,
window_shape=icng.w_shape,
next_state='server_menu'
),
interface.button_center_change_state(
text='Pacman cage fight',
pos=[None, icng.w_shape[1]*0.45],
color=COLORS['beige'],
color_mouseover=COLORS['cyan'],
font=icng.font,
font_size=icng.main_menu_fsize,
window_shape=icng.w_shape,
next_state='pacman_game'
),
interface.button_center_change_state(
text='Information about Mayhem 2: Electric Boogaloo',
pos=[None, icng.w_shape[1]*0.65],
color=COLORS['beige'],
color_mouseover=COLORS['cyan'],
font=icng.font,
font_size=icng.main_menu_fsize,
window_shape=icng.w_shape,
next_state='info_spacemayhem'
),
interface.button_center_change_state(
text='Information about Pacman cage fight',
pos=[None, icng.w_shape[1]*0.75],
color=COLORS['beige'],
color_mouseover=COLORS['cyan'],
font=icng.font,
font_size=icng.main_menu_fsize,
window_shape=icng.w_shape,
next_state='info_pacmanmayhem'
),
interface.button_center_change_state(
text='Exit',
pos=[None, icng.w_shape[1]*0.90],
color=COLORS['beige'],
color_mouseover=COLORS['cyan'],
font=icng.font,
font_size=icng.main_menu_fsize,
window_shape=icng.w_shape,
next_state='exit'
),
]
all_to_be_drawn = text + state_buttons | StarcoderdataPython |
5134025 | from flask.signals import Namespace
_signals = Namespace()
# session_ended = _signals.signal('session-ended')
| StarcoderdataPython |
1766986 | <gh_stars>0
import json
from collections import OrderedDict
class Cube(object):
def __init__(self, dimensions):
super().__init__()
self.dimensions = dimensions
self.cube = {}
self.dictionaries = {}
self._current_cells = {} # a dictionary dim->cell for the current fact
@staticmethod
def _get(fact, key):
if callable(key):
return key(fact)
else:
return fact[key]
@staticmethod
def _value(fact, dimension):
"""
Extract dimension value from fact.
:param fact: The fact object, i.e. dict.
:param dimension: The dimension specification.
:returns: Tuple with the object that the values was directly taken from and the value.
"""
# determine parent based on parent key or parent function
if "parent" in dimension:
parent = Cube._get(fact, dimension["parent"])
else:
parent = fact
# use value function if there is one
if callable(dimension["value"]):
return parent, dimension["value"](parent)
# determine parent based on qualified value key
key = dimension["value"]
key_items = key.split(".")
for key_item in key_items[:-1]:
if parent is None:
break
parent = parent[key_item]
# determine value
if parent is None:
value = None
else:
value = parent[key_items[-1]]
return parent, value
@staticmethod
def _normalize(value):
str_value = str(value)
return str_value.replace("$", "&#FF04;").replace(".", "&#FF0E;")
def _cell(self, fact):
cell = self.cube
for dimension in self.dimensions:
parent, value = self._value(fact, dimension)
value = self._normalize(value)
self._current_cells[dimension["name"]] = cell, value
is_new = value not in cell
cell = cell.setdefault(value, {})
if value is not None and is_new and "dictionary" in dimension:
dictionary_spec = dimension["dictionary"]
dictionary = self.dictionaries.setdefault(dictionary_spec["dictionary"], {})
dictionary_key = self._normalize(self._get(parent, dictionary_spec["key"]))
dictionary[dictionary_key] = self._get(parent, dictionary_spec["value"])
return cell
def add_fact(self, fact):
cell = self._cell(fact)
current = cell.get("_", 0)
cell["_"] = current + 1
def remove_fact(self, fact):
cell = self._cell(fact)
current = cell.get("_", 0)
if current == 0:
# TODO clients should be warned about this situation
pass
cell["_"] = current - 1
if current == 1:
# the cell is empty now, it can be removed
del(cell["_"])
for dimension in reversed(self.dimensions):
if len(cell) == 0:
parent_cell, value = self._current_cells[dimension["name"]]
del(parent_cell[value])
cell = parent_cell
else:
break
def to_json(self):
data = OrderedDict(dimensions=[dimension["name"] for dimension in self.dimensions],
cube=self.cube, dictionaries=self.dictionaries)
return json.dumps(data, indent=2)
| StarcoderdataPython |
6619881 | <reponame>kimholmgren/grocerystore_path_simulation<filename>grocerypathsim/path_generator.py<gh_stars>0
import numpy as np
import matplotlib.pyplot as plt
import cv2
from pathfinding.core.diagonal_movement import DiagonalMovement
from pathfinding.core.grid import Grid
from pathfinding.finder.a_star import AStarFinder
class PathGenerator:
def __init__(self, storelayout, start_coords=[499,0]):
"""
Initialize path generator object
:param storelayout: store layout object
:param start_coords: where customers are generated spatially
"""
self.slayout = storelayout
self.dpt_coord_choices = [self.slayout.product_options[i].shape[1] for
i in
range(len(self.slayout.product_options.keys()))]
self.start_coords = start_coords
def generate_pixel_coordinates(self, shopping_list):
"""
Generate a random set of coordinates to visit based on the
departments in a given shopping list
:param shopping_list: Current shopping list generated from
ShoppingListGen
:return: list of pixel coordinates to visit
"""
visited_dpts = shopping_list['mapped_dpt']
visited_pixel_coords = []
for d in visited_dpts:
curr_pixel_ind_choice = np.random.choice(self.dpt_coord_choices[d])
curr_pixel = self.slayout.product_options[d][:,
curr_pixel_ind_choice]
visited_pixel_coords.append(curr_pixel)
visited_pixel_coords = np.array(visited_pixel_coords)
return visited_pixel_coords
def order_coords(self, pixel_coords):
"""
Generate the order for the path from a list of coordinates to visit
:param pixel_coords: Coordinates to visit
:return: ordered list of coordinates
"""
euclidean_dist = 0
ordered_path = [np.array(self.start_coords)]
curr_loc = self.start_coords
while len(pixel_coords)>0:
# compute euclidean distances from current location
dists = [np.linalg.norm(a - curr_loc) for a in pixel_coords]
for i, d in enumerate(dists):
if d==0:
dists[i]=.5
# compute probabilities
p = np.power(np.reciprocal(dists), 5)
p = p / p.sum()
# choose next point
next_point_index = np.random.choice(list(range(len(p))), p=p)
euclidean_dist += dists[next_point_index]
next_point = pixel_coords[next_point_index]
pixel_coords = np.vstack((pixel_coords[:next_point_index],
pixel_coords[next_point_index+1:]))
# add to ordered list
ordered_path.append(next_point)
curr_loc = next_point
# when no items remain visit the checkout area
checkout_ind = np.random.choice(self.dpt_coord_choices[
self.slayout.checkout_index])
checkout_point = self.slayout.product_options[
self.slayout.checkout_index][:, checkout_ind]
ordered_path.append(checkout_point)
return ordered_path, euclidean_dist
# now we have an ordered list of points to visit, and a store layout
# denoting where we can walk if we choose to compute a path around
# obstacles rather than euclidean distance
def calc_path_astar(self, ordered):
"""
Calculate the walking path using the A* algorithm
:param ordered: ordered set of coordinates.
:return: path, distance
"""
distance = 0
full_path = []
# make sure all destination points are walkable
for o in ordered:
x, y = o
self.slayout.walkable[x, y] = 1
# calculate path
for i in range(len(ordered)-1):
# define the grid and the solver
grid = Grid(matrix=self.slayout.walkable)
finder = AStarFinder(diagonal_movement=DiagonalMovement.always)
start = ordered[i]
start_node = grid.node(start[1], start[0])
end = ordered[i+1]
end_node = grid.node(end[1], end[0])
path, runs = finder.find_path(start_node, end_node, grid)
distance += len(path)
full_path.extend(path)
return full_path, distance
def plot_ordered_coords(self, visited_pixel_coords):
plt.imshow(cv2.cvtColor(self.slayout.layout, cv2.COLOR_BGR2RGB))
xs, ys = [p[0] for p in visited_pixel_coords], [p[1] for p in visited_pixel_coords]
plt.scatter(ys, xs, color="purple")
for i in range(len(visited_pixel_coords)):
plt.text(visited_pixel_coords[i][1] - 10,
visited_pixel_coords[i][0] + 25, str(i))
plt.show()
def plot_astar_path(self, full_path, ordered):
plt.imshow(cv2.cvtColor(self.slayout.layout, cv2.COLOR_BGR2RGB))
xs, ys = [p[0] for p in full_path], [p[1] for p in full_path]
plt.scatter(xs, ys, color="gray", s=10)
xs, ys = [p[0] for p in ordered], [p[1] for p in ordered]
plt.scatter(ys, xs, marker='x', color='red')
for i in range(len(ordered)):
plt.text(ordered[i][1] - 10,
ordered[i][0] + 25, str(i), fontsize='large',
fontdict={'weight': 'heavy', 'color': 'black'})
plt.show()
def plot_euclidean_path(self, visited_pixel_coords):
plt.clf()
plt.imshow(cv2.cvtColor(self.slayout.layout, cv2.COLOR_BGR2RGB))
xs, ys = [p[0] for p in visited_pixel_coords], [p[1] for p in
visited_pixel_coords]
plt.scatter(ys, xs, marker='x', color='red', zorder=2)
for i in range(len(visited_pixel_coords)):
plt.text(visited_pixel_coords[i][1] - 10,
visited_pixel_coords[i][0] + 25, str(i), fontsize='large',
fontdict={'weight': 'heavy', 'color': 'black'})
plt.plot(ys, xs, color="gray", linewidth=4, zorder=1)
plt.show()
| StarcoderdataPython |
11256828 | <gh_stars>0
import sys
import warnings
warnings.warn("package 'chainerio' is deprecated and will be removed."
" Please use 'pfio' instead.",
DeprecationWarning)
# make sure pfio is in sys.modules
import pfio # NOQA
sys.modules[__name__] = __import__('pfio')
| StarcoderdataPython |
188524 | <gh_stars>1-10
"""Custom storage classes for static and media files."""
from django.conf import settings
from storages.backends.s3boto import S3BotoStorage
class StaticStorage(S3BotoStorage):
"""Custom storage class for static files."""
location = settings.STATICFILES_LOCATION
| StarcoderdataPython |
11309845 | """
Every user gets their own Redis hash to store "temporary" values in.
All values time out 24h after last update.
The hash is reset when the user logs out, or the user's id changes.
This is intended to persist some data between Dash callbacks.
If you use this for routes that take JWT authentication, there is no "logout" so the hash just expires after 24 hours.
"""
import os
from datetime import timedelta
from flask import g, has_app_context
from flask_login import current_user
from redis import Redis
def get_redis():
r = getattr(g, '_redis', None) if has_app_context() else None
if r is None:
redis_host = os.environ.get('REDISSERVER', 'redis')
redis_port = int(os.environ.get('REDISPORT', 6379))
r = Redis(host=redis_host, port=redis_port, db=0)
if has_app_context():
g._redis = r
return r
def set_value(key, value, hash_name=None):
"""
:param key:
:param value:
:param hash_name:
:return:
"""
r = get_redis()
if hash_name is None:
hash_name = f'user{current_user.id}'
if value is None:
r.hdel(hash_name, key)
else:
r.hset(hash_name, key, value)
r.expire(hash_name, timedelta(hours=24))
def get_value(key):
"""
:param key:
:return:
"""
r = get_redis()
hash_name = f'user{current_user.id}'
return r.hget(hash_name, key)
def delete_value(key):
"""
:param key:
:return:
"""
r = get_redis()
hash_name = f'user{current_user.id}'
return r.hdel(hash_name, key)
def clear_user_hash(user_id):
r = get_redis()
hash_name = f'user{user_id}'
return r.delete(hash_name)
def exists(key):
r = get_redis()
hash_name = f'user{current_user.id}'
return r.hexists(hash_name, key)
| StarcoderdataPython |
12859930 | """Create plots for learning from varying numbers of demonstrations."""
import os
import matplotlib
import matplotlib.pyplot as plt
import pandas as pd
from predicators.scripts.analyze_results_directory import create_dataframes, \
get_df_for_entry
pd.options.mode.chained_assignment = None # default='warn'
# plt.rcParams["font.family"] = "CMU Serif"
############################ Change below here ################################
# Details about the plt figure.
DPI = 500
FONT_SIZE = 18
# Groups over which to take mean/std.
GROUPS = [
"ENV", "APPROACH", "EXCLUDED_PREDICATES", "EXPERIMENT_ID",
"NUM_TRAIN_TASKS", "CYCLE"
]
# All column names and keys to load into the pandas tables before plotting.
COLUMN_NAMES_AND_KEYS = [
("ENV", "env"),
("APPROACH", "approach"),
("EXCLUDED_PREDICATES", "excluded_predicates"),
("EXPERIMENT_ID", "experiment_id"),
("SEED", "seed"),
("NUM_TRAIN_TASKS", "num_train_tasks"),
("CYCLE", "cycle"),
("NUM_SOLVED", "num_solved"),
("AVG_NUM_PREDS", "avg_num_preds"),
("AVG_TEST_TIME", "avg_suc_time"),
("AVG_NODES_CREATED", "avg_num_nodes_created"),
("LEARNING_TIME", "learning_time"),
("PERC_SOLVED", "perc_solved"),
]
DERIVED_KEYS = [("perc_solved",
lambda r: 100 * r["num_solved"] / r["num_test_tasks"])]
# The first element is the name of the metric that will be plotted on the
# x axis. See COLUMN_NAMES_AND_KEYS for all available metrics. The second
# element is used to label the x axis.
X_KEY_AND_LABEL = [
("NUM_TRAIN_TASKS", "Number of Training Tasks"),
# ("LEARNING_TIME", "Learning time in seconds"),
]
# Same as above, but for the y axis.
Y_KEY_AND_LABEL = [
("PERC_SOLVED", "% Evaluation Tasks Solved"),
# ("AVG_NODES_CREATED", "Averaged nodes created"),
]
# PLOT_GROUPS is a nested dict where each outer dict corresponds to one plot,
# and each inner entry corresponds to one line on the plot.
# The keys of the outer dict are plot titles.
# The keys of the inner dict are (legend label, marker, df selector).
PLOT_GROUPS = {
"Learning from Few Demonstrations": [
("PickPlace1D", "o",
lambda df: df["EXPERIMENT_ID"].apply(lambda v: "cover_main_" in v)),
("Blocks", ".",
lambda df: df["EXPERIMENT_ID"].apply(lambda v: "blocks_main_" in v)),
("Painting", "*",
lambda df: df["EXPERIMENT_ID"].apply(lambda v: "painting_main_" in v)
),
("Tools", "s",
lambda df: df["EXPERIMENT_ID"].apply(lambda v: "tools_main_" in v)),
],
"GNN Shooting LfD": [
("PickPlace1D", "o", lambda df: df["EXPERIMENT_ID"].apply(
lambda v: "cover_gnn_shooting_" in v)),
("Blocks", ".", lambda df: df["EXPERIMENT_ID"].apply(
lambda v: "blocks_gnn_shooting_" in v)),
("Painting", "*", lambda df: df["EXPERIMENT_ID"].apply(
lambda v: "painting_gnn_shooting_" in v)),
("Tools", "s", lambda df: df["EXPERIMENT_ID"].apply(
lambda v: "tools_gnn_shooting_" in v)),
],
"GNN Model-Free LfD": [
("PickPlace1D", "o", lambda df: df["EXPERIMENT_ID"].apply(
lambda v: "cover_gnn_modelfree_" in v)),
("Blocks", ".", lambda df: df["EXPERIMENT_ID"].apply(
lambda v: "blocks_gnn_modelfree_" in v)),
("Painting", "*", lambda df: df["EXPERIMENT_ID"].apply(
lambda v: "painting_gnn_modelfree_" in v)),
("Tools", "s", lambda df: df["EXPERIMENT_ID"].apply(
lambda v: "tools_gnn_modelfree_" in v)),
],
}
# If True, add (0, 0) to every plot
ADD_ZERO_POINT = True
Y_LIM = (-5, 110)
#################### Should not need to change below here #####################
def _main() -> None:
outdir = os.path.join(os.path.dirname(os.path.realpath(__file__)),
"results")
os.makedirs(outdir, exist_ok=True)
matplotlib.rcParams.update({'font.size': FONT_SIZE})
grouped_means, grouped_stds, _ = create_dataframes(COLUMN_NAMES_AND_KEYS,
GROUPS, DERIVED_KEYS)
means = grouped_means.reset_index()
stds = grouped_stds.reset_index()
for x_key, x_label in X_KEY_AND_LABEL:
for y_key, y_label in Y_KEY_AND_LABEL:
for plot_title, d in PLOT_GROUPS.items():
_, ax = plt.subplots()
for label, marker, selector in d:
exp_means = get_df_for_entry(x_key, means, selector)
exp_stds = get_df_for_entry(x_key, stds, selector)
xs = exp_means[x_key].tolist()
ys = exp_means[y_key].tolist()
y_stds = exp_stds[y_key].tolist()
if ADD_ZERO_POINT:
xs = [0] + xs
ys = [0] + ys
y_stds = [0] + y_stds
ax.errorbar(xs,
ys,
yerr=y_stds,
label=label,
marker=marker)
ax.set_xticks(xs)
ax.set_title(plot_title)
ax.set_xlabel(x_label)
ax.set_ylabel(y_label)
ax.set_ylim(Y_LIM)
plt.legend()
plt.tight_layout()
filename = f"{plot_title}_{x_key}_{y_key}.png"
filename = filename.replace(" ", "_").lower()
outfile = os.path.join(outdir, filename)
plt.savefig(outfile, dpi=DPI)
print(f"Wrote out to {outfile}")
if __name__ == "__main__":
_main()
| StarcoderdataPython |
9735995 | <reponame>philipjameson/buckit
load("@bazel_skylib//lib:partial.bzl", "partial")
load("@bazel_skylib//lib:paths.bzl", "paths")
load("@fbcode_macros//build_defs/facebook:python_wheel_overrides.bzl", "python_wheel_overrides")
load("@fbcode_macros//build_defs/lib:cxx_platform_info.bzl", "CxxPlatformInfo")
load("@fbcode_macros//build_defs/lib:rule_target_types.bzl", "rule_target_types")
load("@fbcode_macros//build_defs/lib:third_party.bzl", "third_party")
load("@fbcode_macros//build_defs:compiler.bzl", "compiler")
load("@fbcode_macros//build_defs:third_party_config.bzl", "third_party_config")
def _translate_target(fbcode_platform, tp_type, base, base_path, name):
"""
Translate a `third-party//` or `third-party-tools//` target to an fbcode
target pointing to the fbcode-platform-specific third-party2 root.
"""
# Process PyFI overrides
if python_wheel_overrides.should_use_overrides():
if fbcode_platform in python_wheel_overrides.PYFI_SUPPORTED_PLATFORMS:
target = python_wheel_overrides.PYFI_OVERRIDES.get(base_path)
if target != None:
return target
# Redirect unsupported projects to an error rule.
config = third_party_config["platforms"][fbcode_platform][tp_type]
if (base_path not in config["projects"] and
# Gross workaround to handle deprecated `auxiliary_versions`.
base_path.rsplit("-")[0] not in config.get("auxiliary_versions", {})):
return rule_target_types.RuleTarget(
"fbcode",
"third-party-buck/missing/{0}".format(base_path),
name,
)
# Translate to the appropriate third-party-buck root.
return rule_target_types.RuleTarget(
"fbcode",
paths.join(base, base_path),
name,
)
def _build_tp2_virtual_cells(fbcode_platform):
"""
Build virtual cells mapping `third-party//` and third-party-tools//` to the
tp2 platform-specific build and tools roots.
"""
return {
"third-party": partial.make(
_translate_target,
fbcode_platform,
"build",
third_party.get_build_path(fbcode_platform),
),
"third-party-tools": partial.make(
_translate_target,
fbcode_platform,
"tools",
third_party.get_tools_path(fbcode_platform),
),
}
# Memoize the per-fbcode-platform virtual cells for use in older code that
# hasn't moved over to using the `CxxPlatformInfo` objects.
_TP2_VIRTUAL_CELLS = {
p: _build_tp2_virtual_cells(p)
for p in third_party_config["platforms"]
}
def _build_platforms(platforms_config, virtual_cells = True):
"""
Returns the list of fbcode-based C/C++ platforms.
"""
platforms = []
# TODO(agallagher): We should generate this list from
# `fbcode/toold/build/gen_modes.py` to avoid code duplication.
for name, info in sorted(platforms_config.items()):
for compiler_family in compiler.COMPILERS:
platforms.append(
CxxPlatformInfo(
alias = name,
compiler_family = compiler_family,
host_arch = info["architecture"],
host_os = "linux",
name = "{}-{}".format(name, compiler_family),
target_arch = info["architecture"],
target_os = "linux", # Should this be "fbcode"?
virtual_cells = _TP2_VIRTUAL_CELLS[name] if virtual_cells else None,
),
)
return platforms
_PLATFORMS = _build_platforms(third_party_config["platforms"])
fbcode_cxx_platforms = struct(
build_platforms = _build_platforms, # visible for testing
build_tp2_virtual_cells = _build_tp2_virtual_cells, # visible for testing
PLATFORMS = _PLATFORMS,
TP2_VIRTUAL_CELLS = _TP2_VIRTUAL_CELLS,
)
| StarcoderdataPython |
4942235 | <filename>netbox_netdisco/core/__init__.py
from .inventory import Inventory
#Inventory.collect() | StarcoderdataPython |
1945649 | import sys
import argparse
from ._docker import run_docker
from ._kubernetes import run_kubernetes
from ._native import run_native, RUNFILE_ENV_VAR
import yaml
import logging
MODULE_NAME = "fv3config.run"
STDOUT_FILENAME = "stdout.log"
STDERR_FILENAME = "stderr.log"
DOCKER_FLAGS = "-it"
def _parse_args():
parser = argparse.ArgumentParser(
description="""Run the FV3GFS model.
Will use google cloud storage key at $GOOGLE_APPLICATION_CREDENTIALS by default.
"""
)
parser.add_argument(
"config", type=str, action="store", help="location of fv3config yaml file"
)
parser.add_argument(
"outdir",
type=str,
action="store",
help="location to copy final run directory, used as run directory if local",
)
parser.add_argument(
"--runfile",
type=str,
action="store",
help="Location of python script to execute with mpirun. If not specified, a "
f"default is used, which can be overriden by setting the {RUNFILE_ENV_VAR}.",
)
parser.add_argument(
"--dockerimage",
type=str,
action="store",
help="if passed, execute inside a docker image with the given name",
)
parser.add_argument(
"--keyfile",
type=str,
action="store",
help="google cloud storage key to use for cloud copy commands",
)
parser.add_argument(
"--kubernetes",
action="store_true",
default=False,
help=(
"if given, ignore --keyfile and output a yaml kubernetes config to stdout "
"instead of submitting a run"
),
)
parser.add_argument(
"--capture-output",
action="store_true",
default=False,
help="If given, save the outputs of the fv3gfs call in a outdir/stderr.log and "
"outdir/stdout.log. Not recommended for use with docker or kubernetes. "
"It is recommended to use default linux pipes or docker's and kuberentes' logging "
"functionality.",
)
return parser.parse_args()
def main():
"""Run the FV3GFS model based on a configuration dictionary.
Copies the resulting run directory to a target location.
"""
args = _parse_args()
logging.basicConfig(level=logging.INFO)
if args.dockerimage is not None:
if args.kubernetes:
job = run_kubernetes(
args.config,
args.outdir,
args.dockerimage,
runfile=args.runfile,
submit=False,
)
yaml.dump(job.to_dict(), stream=sys.stdout)
else:
run_docker(
args.config,
args.outdir,
args.dockerimage,
runfile=args.runfile,
keyfile=args.keyfile,
capture_output=args.capture_output,
)
else:
run_native(
args.config,
args.outdir,
runfile=args.runfile,
capture_output=args.capture_output,
)
if __name__ == "__main__":
sys.exit(main())
| StarcoderdataPython |
9695782 | #!/Python27/python
import cgi, cgitb
form = cgi.FieldStorage()
if form.getvalue('subject'):
subject = form.getvalue('subject')
else:
subject = "Not set"
print("Content-type:text/html\r\n\r\n")
print("<html>")
print("<head>")
print("<title>Radio for CGI Program</title>")
print("</head>")
print("<body>")
print("<h2> Selected Subject is %s</h2>" % subject)
print("</body>")
print("</html>") | StarcoderdataPython |
1671635 | """Initial database
Revision ID: 7c3929047190
Revises:
Create Date: 2021-03-13 13:22:38.768112
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '7c3929047190'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('events',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=80), nullable=False),
sa.Column('hostname', sa.String(length=80), nullable=True),
sa.Column('location', sa.String(length=255), nullable=True),
sa.Column('description', sa.UnicodeText(), nullable=True),
sa.Column('boilerplate', sa.UnicodeText(), nullable=True),
sa.Column('resources', sa.UnicodeText(), nullable=True),
sa.Column('logo_url', sa.String(length=255), nullable=True),
sa.Column('custom_css', sa.UnicodeText(), nullable=True),
sa.Column('webpage_url', sa.String(length=255), nullable=True),
sa.Column('community_url', sa.String(length=255), nullable=True),
sa.Column('community_embed', sa.UnicodeText(), nullable=True),
sa.Column('certificate_path', sa.String(length=1024), nullable=True),
sa.Column('starts_at', sa.DateTime(), nullable=False),
sa.Column('ends_at', sa.DateTime(), nullable=False),
sa.Column('is_hidden', sa.Boolean(), nullable=True),
sa.Column('is_current', sa.Boolean(), nullable=True),
sa.Column('lock_editing', sa.Boolean(), nullable=True),
sa.Column('lock_starting', sa.Boolean(), nullable=True),
sa.Column('lock_resources', sa.Boolean(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_table('roles',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=80), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=80), nullable=False),
sa.Column('email', sa.String(length=80), nullable=False),
sa.Column('webpage_url', sa.String(length=128), nullable=True),
sa.Column('sso_id', sa.String(length=128), nullable=True),
sa.Column('password', sa.String(length=128), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=False),
sa.Column('active', sa.Boolean(), nullable=True),
sa.Column('is_admin', sa.Boolean(), nullable=True),
sa.Column('cardtype', sa.String(length=80), nullable=True),
sa.Column('carddata', sa.String(length=255), nullable=True),
sa.Column('my_story', sa.UnicodeText(), nullable=True),
sa.Column('my_goals', sa.UnicodeText(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('email'),
sa.UniqueConstraint('username')
)
op.create_table('categories',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=80), nullable=False),
sa.Column('description', sa.UnicodeText(), nullable=True),
sa.Column('logo_color', sa.String(length=7), nullable=True),
sa.Column('logo_icon', sa.String(length=20), nullable=True),
sa.Column('event_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['event_id'], ['events.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('resources',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=80), nullable=False),
sa.Column('type_id', sa.Integer(), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=False),
sa.Column('is_visible', sa.Boolean(), nullable=True),
sa.Column('progress_tip', sa.Integer(), nullable=True),
sa.Column('source_url', sa.String(length=2048), nullable=True),
sa.Column('download_url', sa.String(length=2048), nullable=True),
sa.Column('summary', sa.String(length=140), nullable=True),
sa.Column('sync_content', sa.UnicodeText(), nullable=True),
sa.Column('content', sa.UnicodeText(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_table('users_roles',
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('role_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['role_id'], ['roles.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('user_id', 'role_id')
)
op.create_table('projects',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=80), nullable=False),
sa.Column('summary', sa.String(length=120), nullable=True),
sa.Column('image_url', sa.String(length=255), nullable=True),
sa.Column('source_url', sa.String(length=255), nullable=True),
sa.Column('webpage_url', sa.String(length=2048), nullable=True),
sa.Column('is_webembed', sa.Boolean(), nullable=True),
sa.Column('contact_url', sa.String(length=255), nullable=True),
sa.Column('autotext_url', sa.String(length=255), nullable=True),
sa.Column('is_autoupdate', sa.Boolean(), nullable=True),
sa.Column('autotext', sa.UnicodeText(), nullable=True),
sa.Column('longtext', sa.UnicodeText(), nullable=False),
sa.Column('hashtag', sa.String(length=40), nullable=True),
sa.Column('logo_color', sa.String(length=7), nullable=True),
sa.Column('logo_icon', sa.String(length=40), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=False),
sa.Column('updated_at', sa.DateTime(), nullable=False),
sa.Column('is_hidden', sa.Boolean(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('event_id', sa.Integer(), nullable=True),
sa.Column('category_id', sa.Integer(), nullable=True),
sa.Column('progress', sa.Integer(), nullable=True),
sa.Column('score', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['category_id'], ['categories.id'], ),
sa.ForeignKeyConstraint(['event_id'], ['events.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_table('activities',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.Enum('create', 'update', 'star', name='activity_type'), nullable=True),
sa.Column('action', sa.String(length=32), nullable=True),
sa.Column('timestamp', sa.DateTime(), nullable=False),
sa.Column('content', sa.UnicodeText(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('project_id', sa.Integer(), nullable=False),
sa.Column('project_progress', sa.Integer(), nullable=True),
sa.Column('project_score', sa.Integer(), nullable=True),
sa.Column('resource_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['project_id'], ['projects.id'], ),
sa.ForeignKeyConstraint(['resource_id'], ['resources.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('activities')
op.drop_table('projects')
op.drop_table('users_roles')
op.drop_table('resources')
op.drop_table('categories')
op.drop_table('users')
op.drop_table('roles')
op.drop_table('events')
# ### end Alembic commands ###
| StarcoderdataPython |
3488118 | import unittest
import quarkchain.db
from quarkchain.cluster.root_state import RootState
from quarkchain.cluster.shard_state import ShardState
from quarkchain.cluster.tests.test_utils import get_test_env
from quarkchain.core import Address
from quarkchain.core import CrossShardTransactionList
from quarkchain.diff import EthDifficultyCalculator
def create_default_state(env, diff_calc=None):
r_state = RootState(env=env, diff_calc=diff_calc)
s_state_list = dict()
for full_shard_id in env.quark_chain_config.get_full_shard_ids():
shard_state = ShardState(
env=env, full_shard_id=full_shard_id, db=quarkchain.db.InMemoryDb()
)
mblock, coinbase_amount_map = shard_state.init_genesis_state(
r_state.get_tip_block()
)
block_hash = mblock.header.get_hash()
r_state.add_validated_minor_block_hash(
block_hash, coinbase_amount_map.balance_map
)
s_state_list[full_shard_id] = shard_state
# add a root block so that later minor blocks will be broadcasted to neighbor shards
minor_header_list = []
for state in s_state_list.values():
minor_header_list.append(state.header_tip)
root_block = r_state.create_block_to_mine(minor_header_list)
assert r_state.add_block(root_block)
for state in s_state_list.values():
assert state.add_root_block(root_block)
return r_state, s_state_list
def add_minor_block_to_cluster(s_states, block):
"""Add block to corresponding shard state and broadcast xshard list to other shards"""
full_shard_id = block.header.branch.get_full_shard_id()
s_states[full_shard_id].finalize_and_add_block(block)
block_hash = block.header.get_hash()
for dst_full_shard_id, state in s_states.items():
if dst_full_shard_id == full_shard_id:
continue
state.add_cross_shard_tx_list_by_minor_block_hash(
block_hash, CrossShardTransactionList(tx_list=[])
)
class TestRootState(unittest.TestCase):
def test_root_state_simple(self):
env = get_test_env()
state = RootState(env=env)
self.assertEqual(state.tip.height, 0)
def test_root_state_and_shard_state_add_block(self):
env = get_test_env()
r_state, s_states = create_default_state(env)
self.assertEqual(r_state.tip.total_difficulty, 2000000)
s_state0 = s_states[2 | 0]
s_state1 = s_states[2 | 1]
b0 = s_state0.create_block_to_mine()
add_minor_block_to_cluster(s_states, b0)
b1 = s_state1.create_block_to_mine()
add_minor_block_to_cluster(s_states, b1)
r_state.add_validated_minor_block_hash(
b0.header.get_hash(), b0.header.coinbase_amount_map.balance_map
)
r_state.add_validated_minor_block_hash(
b1.header.get_hash(), b1.header.coinbase_amount_map.balance_map
)
root_block = r_state.create_block_to_mine([b0.header, b1.header])
self.assertEqual(root_block.header.total_difficulty, 3000976)
self.assertTrue(r_state.add_block(root_block))
self.assertIsNone(r_state.get_root_block_by_height(3))
self.assertEqual(r_state.get_root_block_by_height(2), root_block)
self.assertEqual(r_state.get_root_block_by_height(None), root_block)
self.assertEqual(
r_state.get_root_block_by_height(1),
r_state.get_root_block_by_hash(root_block.header.hash_prev_block),
)
self.assertTrue(s_state0.add_root_block(root_block))
self.assertEqual(s_state0.root_tip, root_block.header)
self.assertTrue(s_state1.add_root_block(root_block))
self.assertEqual(s_state1.root_tip, root_block.header)
def test_root_state_add_block_no_proof_of_progress(self):
env = get_test_env()
r_state, s_states = create_default_state(env)
s_state0 = s_states[2 | 0]
s_state1 = s_states[2 | 1]
b0 = s_state0.create_block_to_mine()
s_state0.finalize_and_add_block(b0)
b1 = s_state1.create_block_to_mine()
s_state1.finalize_and_add_block(b1)
r_state.add_validated_minor_block_hash(
b0.header.get_hash(), b0.header.coinbase_amount_map.balance_map
)
r_state.add_validated_minor_block_hash(
b1.header.get_hash(), b1.header.coinbase_amount_map.balance_map
)
root_block = r_state.create_block_to_mine([])
self.assertTrue(r_state.add_block(root_block))
root_block = r_state.create_block_to_mine([b0.header])
self.assertTrue(r_state.add_block(root_block))
root_block = r_state.create_block_to_mine([b1.header])
self.assertTrue(r_state.add_block(root_block))
def test_root_state_add_two_blocks(self):
env = get_test_env()
r_state, s_states = create_default_state(env)
s_state0 = s_states[2 | 0]
s_state1 = s_states[2 | 1]
b0 = s_state0.create_block_to_mine()
add_minor_block_to_cluster(s_states, b0)
b1 = s_state1.create_block_to_mine()
add_minor_block_to_cluster(s_states, b1)
r_state.add_validated_minor_block_hash(
b0.header.get_hash(), b0.header.coinbase_amount_map.balance_map
)
r_state.add_validated_minor_block_hash(
b1.header.get_hash(), b1.header.coinbase_amount_map.balance_map
)
root_block0 = r_state.create_block_to_mine([b0.header, b1.header])
self.assertTrue(r_state.add_block(root_block0))
b2 = s_state0.create_block_to_mine()
add_minor_block_to_cluster(s_states, b2)
b3 = s_state1.create_block_to_mine()
add_minor_block_to_cluster(s_states, b3)
r_state.add_validated_minor_block_hash(
b2.header.get_hash(), b2.header.coinbase_amount_map.balance_map
)
r_state.add_validated_minor_block_hash(
b3.header.get_hash(), b3.header.coinbase_amount_map.balance_map
)
root_block1 = r_state.create_block_to_mine([b2.header, b3.header])
self.assertTrue(r_state.add_block(root_block1))
def test_root_state_and_shard_state_fork(self):
env = get_test_env()
r_state, s_states = create_default_state(env)
s_state0 = s_states[2 | 0]
s_state1 = s_states[2 | 1]
b0 = s_state0.create_block_to_mine()
b2 = s_state0.create_block_to_mine()
add_minor_block_to_cluster(s_states, b0)
b1 = s_state1.create_block_to_mine()
b3 = s_state1.create_block_to_mine()
add_minor_block_to_cluster(s_states, b1)
r_state.add_validated_minor_block_hash(
b0.header.get_hash(), b0.header.coinbase_amount_map.balance_map
)
r_state.add_validated_minor_block_hash(
b1.header.get_hash(), b1.header.coinbase_amount_map.balance_map
)
root_block0 = r_state.create_block_to_mine([b0.header, b1.header])
root_block1 = r_state.create_block_to_mine([])
self.assertTrue(r_state.add_block(root_block0))
self.assertTrue(s_state0.add_root_block(root_block0))
self.assertTrue(s_state1.add_root_block(root_block0))
add_minor_block_to_cluster(s_states, b2)
add_minor_block_to_cluster(s_states, b3)
r_state.add_validated_minor_block_hash(
b2.header.get_hash(), b2.header.coinbase_amount_map.balance_map
)
r_state.add_validated_minor_block_hash(
b3.header.get_hash(), b3.header.coinbase_amount_map.balance_map
)
root_block1.add_minor_block_header(b2.header).add_minor_block_header(
b3.header
).finalize(
coinbase_tokens=r_state._calculate_root_block_coinbase(
[header.get_hash() for header in root_block1.minor_block_header_list],
root_block1.header.height,
)
)
self.assertFalse(r_state.add_block(root_block1))
self.assertFalse(s_state0.add_root_block(root_block1))
self.assertFalse(s_state1.add_root_block(root_block1))
b4 = b2.create_block_to_append()
b5 = b3.create_block_to_append()
add_minor_block_to_cluster(s_states, b4)
add_minor_block_to_cluster(s_states, b5)
r_state.add_validated_minor_block_hash(
b4.header.get_hash(), b4.header.coinbase_amount_map.balance_map
)
r_state.add_validated_minor_block_hash(
b5.header.get_hash(), b5.header.coinbase_amount_map.balance_map
)
root_block2 = (
root_block1.create_block_to_append()
.add_minor_block_header(b4.header)
.add_minor_block_header(b5.header)
)
root_block2.finalize(
coinbase_tokens=r_state._calculate_root_block_coinbase(
[header.get_hash() for header in root_block2.minor_block_header_list],
root_block2.header.height,
)
)
self.assertTrue(r_state.add_block(root_block2))
self.assertTrue(s_state0.add_root_block(root_block2))
self.assertTrue(s_state1.add_root_block(root_block2))
self.assertEqual(r_state.tip, root_block2.header)
self.assertEqual(s_state0.root_tip, root_block2.header)
self.assertEqual(s_state1.root_tip, root_block2.header)
def test_root_state_difficulty_and_coinbase(self):
env = get_test_env()
env.quark_chain_config.SKIP_ROOT_DIFFICULTY_CHECK = False
env.quark_chain_config.ROOT.GENESIS.DIFFICULTY = 1000
diff_calc = EthDifficultyCalculator(cutoff=9, diff_factor=2048, minimum_diff=1)
env.quark_chain_config.NETWORK_ID = (
1
) # other network ids will skip difficulty check
env.quark_chain_config.REWARD_TAX_RATE = 0.8
env.quark_chain_config.ROOT.COINBASE_AMOUNT = 5
for c in env.quark_chain_config.shards.values():
c.COINBASE_AMOUNT = 5
r_state, s_states = create_default_state(env, diff_calc=diff_calc)
s_state0 = s_states[2 | 0]
s_state1 = s_states[2 | 1]
g0 = s_state0.header_tip
b0 = s_state0.get_tip().create_block_to_append()
add_minor_block_to_cluster(s_states, b0)
g1 = s_state1.header_tip
b1 = s_state1.get_tip().create_block_to_append()
add_minor_block_to_cluster(s_states, b1)
self.assertEqual(
b0.header.coinbase_amount_map.balance_map,
{env.quark_chain_config.genesis_token: 1},
)
self.assertEqual(
b1.header.coinbase_amount_map.balance_map,
{env.quark_chain_config.genesis_token: 1},
)
r_state.add_validated_minor_block_hash(
b0.header.get_hash(), b0.header.coinbase_amount_map.balance_map
)
r_state.add_validated_minor_block_hash(
b1.header.get_hash(), b1.header.coinbase_amount_map.balance_map
)
# Test coinbase
original_reward_tax_rate = env.quark_chain_config.REWARD_TAX_RATE
for tax_rate in [0.8, 0.6, 0.9]:
env.quark_chain_config.REWARD_TAX_RATE = tax_rate
root_block_tmp = r_state.create_block_to_mine(
m_header_list=[b0.header, b1.header],
address=Address.create_empty_account(),
create_time=r_state.tip.create_time + 9,
)
self.assertEqual(root_block_tmp.header.signature, bytes(65)) # empty sig
# still use minor block's coinbase amount, 1
self.assertEqual(
root_block_tmp.header.coinbase_amount_map.balance_map[
env.quark_chain_config.genesis_token
],
round((1 + 1) / (1 - tax_rate) * tax_rate + 5),
)
env.quark_chain_config.REWARD_TAX_RATE = original_reward_tax_rate
# Check new difficulty
root_block0 = r_state.create_block_to_mine(
m_header_list=[b0.header, b1.header],
address=Address.create_empty_account(),
create_time=r_state.tip.create_time + 9,
)
self.assertEqual(r_state.tip.difficulty, root_block0.header.difficulty)
root_block0 = r_state.create_block_to_mine(
m_header_list=[b0.header, b1.header],
address=Address.create_empty_account(),
create_time=r_state.tip.create_time + 3,
)
self.assertEqual(
r_state.tip.difficulty + r_state.tip.difficulty // 2048,
root_block0.header.difficulty,
)
root_block0 = r_state.create_block_to_mine(
m_header_list=[g0, b0.header, g1, b1.header],
address=Address.create_empty_account(),
create_time=r_state.tip.create_time + 26,
)
self.assertEqual(
r_state.tip.difficulty - r_state.tip.difficulty // 2048,
root_block0.header.difficulty,
)
def test_root_state_recovery(self):
env = get_test_env()
r_state, s_states = create_default_state(env)
s_state0 = s_states[2 | 0]
s_state1 = s_states[2 | 1]
b0 = s_state0.create_block_to_mine()
add_minor_block_to_cluster(s_states, b0)
b1 = s_state1.create_block_to_mine()
add_minor_block_to_cluster(s_states, b1)
r_state.add_validated_minor_block_hash(
b0.header.get_hash(), b0.header.coinbase_amount_map.balance_map
)
r_state.add_validated_minor_block_hash(
b1.header.get_hash(), b1.header.coinbase_amount_map.balance_map
)
root_block0 = r_state.create_block_to_mine([b0.header, b1.header])
root_block00 = r_state.create_block_to_mine([b0.header, b1.header])
self.assertTrue(r_state.add_block(root_block0))
# create a fork
root_block00.header.create_time += 1
root_block00.finalize(
coinbase_tokens=r_state._calculate_root_block_coinbase(
[header.get_hash() for header in root_block00.minor_block_header_list],
root_block00.header.height,
)
)
self.assertNotEqual(
root_block0.header.get_hash(), root_block00.header.get_hash()
)
self.assertFalse(r_state.add_block(root_block00))
self.assertEqual(
r_state.db.get_root_block_by_hash(root_block00.header.get_hash()),
root_block00,
)
b2 = s_state0.create_block_to_mine()
add_minor_block_to_cluster(s_states, b2)
b3 = s_state1.create_block_to_mine()
add_minor_block_to_cluster(s_states, b3)
r_state.add_validated_minor_block_hash(
b2.header.get_hash(), b2.header.coinbase_amount_map.balance_map
)
r_state.add_validated_minor_block_hash(
b3.header.get_hash(), b3.header.coinbase_amount_map.balance_map
)
root_block1 = r_state.create_block_to_mine([b2.header, b3.header])
self.assertTrue(r_state.add_block(root_block1))
# now the longest chain is root_block0 <-- root_block1
# but root_block0 will become the new tip after recovery
recovered_state = RootState(env=env)
self.assertEqual(recovered_state.tip, root_block0.header)
self.assertEqual(recovered_state.db.get_root_block_by_height(2), root_block0)
self.assertEqual(recovered_state.get_root_block_by_height(None), root_block0)
# fork is pruned from recovered state
self.assertIsNone(
recovered_state.db.get_root_block_by_hash(root_block00.header.get_hash())
)
self.assertEqual(
recovered_state.db.get_root_block_by_hash(
root_block00.header.get_hash(), consistency_check=False
),
root_block00,
)
def test_add_root_block_with_minor_block_with_wrong_root_block_hash(self):
""" Test for the following case
+--+ +--+
|r1|<---|r3|
/+--+ +--+
/ | |
+--+ / +--+ +--+
|r0|<----|m1|<---|m2|
+--+ \ +--+ +--+
\ | |
\+--+ |
|r2|<----+
+--+
where r3 is invalid because m2 depends on r2, which is not in the r3 chain.
"""
env = get_test_env(shard_size=1)
r_state, s_states = create_default_state(env)
s_state0 = s_states[1 | 0]
root_block0 = r_state.get_tip_block()
m1 = s_state0.get_tip().create_block_to_append()
add_minor_block_to_cluster(s_states, m1)
r_state.add_validated_minor_block_hash(
m1.header.get_hash(), m1.header.coinbase_amount_map.balance_map
)
root_block1 = root_block0.create_block_to_append(
nonce=0
).add_minor_block_header(m1.header)
root_block1.finalize(
coinbase_tokens=r_state._calculate_root_block_coinbase(
[header.get_hash() for header in root_block1.minor_block_header_list],
root_block1.header.height,
)
)
root_block2 = root_block0.create_block_to_append(
nonce=1
).add_minor_block_header(m1.header)
root_block2.finalize(
coinbase_tokens=r_state._calculate_root_block_coinbase(
[header.get_hash() for header in root_block2.minor_block_header_list],
root_block2.header.height,
)
)
self.assertTrue(r_state.add_block(root_block1))
self.assertFalse(r_state.add_block(root_block2))
self.assertTrue(s_state0.add_root_block(root_block1))
self.assertFalse(s_state0.add_root_block(root_block2))
m2 = m1.create_block_to_append()
m2.header.hash_prev_root_block = root_block2.header.get_hash()
add_minor_block_to_cluster(s_states, m2)
r_state.add_validated_minor_block_hash(
m2.header.get_hash(), m2.header.coinbase_amount_map.balance_map
)
root_block3 = root_block1.create_block_to_append().add_minor_block_header(
m2.header
)
root_block3.finalize(
coinbase_tokens=r_state._calculate_root_block_coinbase(
[header.get_hash() for header in root_block3.minor_block_header_list],
root_block3.header.height,
)
)
with self.assertRaises(ValueError):
r_state.add_block(root_block3)
root_block4 = root_block2.create_block_to_append().add_minor_block_header(
m2.header
)
root_block4.finalize(
coinbase_tokens=r_state._calculate_root_block_coinbase(
[header.get_hash() for header in root_block4.minor_block_header_list],
root_block4.header.height,
)
)
self.assertTrue(r_state.add_block(root_block4))
def test_add_minor_block_with_wrong_root_block_hash(self):
""" Test for the following case
+--+
|r1|
/+--+
/ |
+--+ / +--+ +--+
|r0|<----|m1|<---|m3|
+--+ \ +--+ +--+
^ \ |
| \+--+ |
| |r2|<----+
| +--+
| |
| +--+
+------|m2|
+--+
where m3 is invalid because m3 depends on r2, whose minor chain is not the same chain as m3
"""
env = get_test_env(shard_size=1)
r_state, s_states = create_default_state(env)
s_state0 = s_states[1 | 0]
root_block0 = r_state.get_tip_block()
m1 = s_state0.get_tip().create_block_to_append(nonce=0)
m2 = s_state0.get_tip().create_block_to_append(nonce=1)
add_minor_block_to_cluster(s_states, m1)
add_minor_block_to_cluster(s_states, m2)
r_state.add_validated_minor_block_hash(
m1.header.get_hash(), m1.header.coinbase_amount_map.balance_map
)
r_state.add_validated_minor_block_hash(
m2.header.get_hash(), m2.header.coinbase_amount_map.balance_map
)
root_block1 = root_block0.create_block_to_append(
nonce=0
).add_minor_block_header(m1.header)
root_block1.finalize(
coinbase_tokens=r_state._calculate_root_block_coinbase(
[header.get_hash() for header in root_block1.minor_block_header_list],
root_block1.header.height,
)
)
root_block2 = root_block0.create_block_to_append(
nonce=1
).add_minor_block_header(m2.header)
root_block2.finalize(
coinbase_tokens=r_state._calculate_root_block_coinbase(
[header.get_hash() for header in root_block2.minor_block_header_list],
root_block2.header.height,
)
)
self.assertTrue(r_state.add_block(root_block1))
self.assertFalse(r_state.add_block(root_block2))
self.assertTrue(s_state0.add_root_block(root_block1))
self.assertFalse(s_state0.add_root_block(root_block2))
m3 = m1.create_block_to_append()
m3.header.hash_prev_root_block = root_block2.header.get_hash()
with self.assertRaises(ValueError):
add_minor_block_to_cluster(s_states, m3)
m4 = m1.create_block_to_append()
m4.header.hash_prev_root_block = root_block1.header.get_hash()
add_minor_block_to_cluster(s_states, m4)
# Test recovery
s_state0_recovered = ShardState(env, full_shard_id=1 | 0, db=s_state0.raw_db)
s_state0_recovered.init_from_root_block(root_block1)
with self.assertRaises(ValueError):
add_minor_block_to_cluster(s_states, m3)
def test_root_state_add_root_block_too_many_minor_blocks(self):
env = get_test_env()
r_state, s_states = create_default_state(env)
s_state0 = s_states[2 | 0]
headers = []
max_mblock_in_rblock = (
s_state0.shard_config.max_blocks_per_shard_in_one_root_block
)
for i in range(max_mblock_in_rblock + 1):
b = s_state0.create_block_to_mine()
add_minor_block_to_cluster(s_states, b)
headers.append(b.header)
r_state.add_validated_minor_block_hash(
b.header.get_hash(), b.header.coinbase_amount_map.balance_map
)
root_block = r_state.create_block_to_mine(
m_header_list=headers, create_time=headers[-1].create_time + 1
)
with self.assertRaisesRegexp(
ValueError, "too many minor blocks in the root block for shard"
):
r_state.add_block(root_block)
headers = headers[:max_mblock_in_rblock]
root_block = r_state.create_block_to_mine(
m_header_list=headers, create_time=headers[-1].create_time + 1
)
r_state.add_block(root_block)
def test_root_chain_fork_using_largest_total_diff(self):
env = get_test_env(shard_size=1)
r_state, s_states = create_default_state(env)
coinbase = r_state._calculate_root_block_coinbase([], 0)
rb0 = r_state.get_tip_block()
# one fork with more blocks but small total diff
rb1 = rb0.create_block_to_append(difficulty=int(1e6)).finalize(coinbase)
rb2 = rb1.create_block_to_append(difficulty=int(1e6)).finalize(coinbase)
# another fork with less blocks but higher total diff
rb3 = rb0.create_block_to_append(difficulty=int(3e6)).finalize(coinbase)
# rb3 should be added as the tip
self.assertTrue(r_state.add_block(rb1))
self.assertTrue(r_state.add_block(rb2))
self.assertTrue(r_state.add_block(rb3))
self.assertEqual(r_state.tip.get_hash(), rb3.header.get_hash())
def test_root_coinbase_decay(self):
env = get_test_env()
r_state, s_states = create_default_state(env)
coinbase = r_state._calculate_root_block_coinbase(
[], env.quark_chain_config.ROOT.EPOCH_INTERVAL
)
self.assertEqual(
coinbase,
{
env.quark_chain_config.genesis_token: env.quark_chain_config.ROOT.COINBASE_AMOUNT
* env.quark_chain_config.BLOCK_REWARD_DECAY_FACTOR
},
)
coinbase = r_state._calculate_root_block_coinbase(
[], env.quark_chain_config.ROOT.EPOCH_INTERVAL + 1
)
self.assertEqual(
coinbase,
{
env.quark_chain_config.genesis_token: env.quark_chain_config.ROOT.COINBASE_AMOUNT
* env.quark_chain_config.BLOCK_REWARD_DECAY_FACTOR
},
)
coinbase = r_state._calculate_root_block_coinbase(
[], env.quark_chain_config.ROOT.EPOCH_INTERVAL * 2
)
self.assertEqual(
coinbase,
{
env.quark_chain_config.genesis_token: env.quark_chain_config.ROOT.COINBASE_AMOUNT
* env.quark_chain_config.BLOCK_REWARD_DECAY_FACTOR ** 2
},
)
| StarcoderdataPython |
9790648 | import math
import numpy as np
import torch
from torch.nn.parameter import Parameter
from torch import nn
from torch.nn import functional as F
from torch.utils import model_zoo
from copy import deepcopy
from .resnet12 import resnet12
eps = 1e-10
class CNNEncoder(nn.Module):
def __init__(self, in_c=3):
super(CNNEncoder, self).__init__()
self.layer1 = nn.Sequential(
nn.Conv2d(in_c, 64, kernel_size=3, padding=0),
nn.BatchNorm2d(64, momentum=1, affine=True),
nn.ReLU(),
nn.MaxPool2d(2))
self.layer2 = nn.Sequential(
nn.Conv2d(64, 64, kernel_size=3, padding=0),
nn.BatchNorm2d(64, momentum=1, affine=True),
nn.ReLU(),
nn.MaxPool2d(2))
self.layer3 = nn.Sequential(
nn.Conv2d(64, 64, kernel_size=3, padding=1),
nn.BatchNorm2d(64, momentum=1, affine=True),
nn.ReLU())
self.layer4 = nn.Sequential(
nn.Conv2d(64, 64, kernel_size=3, padding=1),
nn.BatchNorm2d(64, momentum=1, affine=True),
nn.ReLU())
def forward(self, x):
out = self.layer1(x)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
return out # 64
class MyLinear(nn.Module):
def __init__(self, in_features, out_features, bias=True, reset_each_iter=False):
super(MyLinear, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.Tensor(out_features, in_features))
self.reset_each_iter = reset_each_iter
if bias:
self.bias = Parameter(torch.Tensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
if reset_each_iter:
assert bias is False
def reset_parameters(self):
stdv = 1. / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def forward(self, input):
if self.reset_each_iter:
self.reset_parameters()
return F.linear(input, self.weight, self.bias), self.weight
def extra_repr(self):
return 'in_features={}, out_features={}, bias={}'.format(
self.in_features, self.out_features, self.bias is not None
)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3, bias=False)
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, in_c):
super(ResNet, self).__init__()
self.inplanes = 64
self.conv1 = nn.Conv2d(in_c, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.Sigmoid()
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
# self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
# self.avgpool = nn.AvgPool2d(7, stride=1)
# self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
return x
def feat_extract(pretrained=False, **kwargs):
"""Constructs a ResNet-Mini-Imagenet model"""
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet52': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
logger = kwargs['opts'].logger
# resnet"x", x = 1 + sum(layers)x3
if kwargs['structure'] == 'resnet40':
model = ResNet(Bottleneck, [3, 4, 6], kwargs['in_c'])
elif kwargs['structure'] == 'resnet19':
model = ResNet(Bottleneck, [2, 2, 2], kwargs['in_c'])
elif kwargs['structure'] == 'resnet12':
dropblock_size = 5 if 'imagenet' in kwargs['opts'].dataset.name else 2
model = resnet12(avg_pool=False, drop_rate=0.1, dropblock_size=dropblock_size)
elif kwargs['structure'] == 'resnet52':
model = ResNet(Bottleneck, [4, 8, 5], kwargs['in_c'])
elif kwargs['structure'] == 'resnet34':
model = ResNet(Bottleneck, [3, 4, 4], kwargs['in_c'])
elif kwargs['structure'] == 'shallow':
model = CNNEncoder(kwargs['in_c'])
else:
raise NameError('structure not known {} ...'.format(kwargs['structure']))
if pretrained:
logger('Using pre-trained model from pytorch official webiste, {:s}'.format(kwargs['structure']))
model.load_state_dict(model_zoo.load_url(model_urls[kwargs['structure']]), strict=False)
return model
# NOTE: this is the core of the Category Traversal Module (CTM)
class CTMNet(nn.Module):
"""repnet => feature concat => layer4 & layer5 & avg pooling => fc => sigmoid"""
def __init__(self, opts):
super(CTMNet, self).__init__()
self.opts = opts
self.mp_mean = False
self.delete_mp = False
self.dnet_supp_manner = opts.ctmnet.dnet_supp_manner
if self.opts.fsl.ctm:
# use forward_CTM method
self.use_relation_net = self.opts.ctmnet.CE_use_relation
self.dnet = self.opts.ctmnet.dnet # dnet or baseline
self.dnet_out_c = self.opts.ctmnet.dnet_out_c # define the reshaper
try:
self.baseline_manner = self.opts.ctmnet.baseline_manner
except:
self.baseline_manner = ''
_logger = opts.logger
_logger('Building up models ...')
# feature extractor
in_c = 1 if opts.dataset.name == 'omniglot' else 3
self.repnet = feat_extract(
self.opts.model.resnet_pretrain,
opts=opts, structure=opts.model.structure, in_c=in_c).to(self.opts.ctrl.device)
input_bs = opts.fsl.n_way[0] * opts.fsl.k_shot[0]
random_input = torch.rand(input_bs, in_c, opts.data.im_size, opts.data.im_size).to(self.opts.ctrl.device)
repnet_out = self.repnet(random_input)
repnet_sz = repnet_out.size()
assert repnet_sz[2] == repnet_sz[3]
_logger('\trepnet output sz: {} (assume bs=n_way*k_shot)'.format(repnet_sz))
self.c = repnet_sz[1] # supposed to be 64
self.d = repnet_sz[2]
if self.opts.fsl.ctm:
_embedding = repnet_out
if self.baseline_manner == 'sample_wise_similar':
assert self.opts.model.structure == 'shallow'
input_c = _embedding.size(1)
self.additional_repnet = nn.Sequential(
nn.Conv2d(input_c, input_c, kernel_size=3, padding=1),
nn.BatchNorm2d(input_c, momentum=1, affine=True),
nn.ReLU()
).to(self.opts.ctrl.device)
# RESHAPER
# if not (not self.dnet and self.baseline_manner == 'no_reshaper'):
if self.dnet or self.baseline_manner != 'no_reshaper':
assert np.mod(self.dnet_out_c, 4) == 0 # 4 is the 'expansion' of Bottleneck
out_size = int(self.dnet_out_c / 4)
self.inplanes = _embedding.size(1)
if self.opts.model.structure.startswith('resnet'):
self.reshaper = nn.Sequential(
self._make_layer(Bottleneck, out_size*2, 3, stride=1),
self._make_layer(Bottleneck, out_size, 2, stride=1)
).to(self.opts.ctrl.device)
else:
self.reshaper = self._make_layer(Bottleneck, out_size, 4, stride=1).to(self.opts.ctrl.device)
_out_downsample = self.reshaper(_embedding)
# CONCENTRATOR AND PROJECTOR
if self.dnet:
if self.mp_mean:
self.inplanes = _embedding.size(1)
else:
# concatenate along the channel for all samples in each class
self.inplanes = self.opts.fsl.k_shot[0] * _embedding.size(1)
if self.opts.model.structure.startswith('resnet'):
self.main_component = nn.Sequential(
self._make_layer(Bottleneck, out_size*2, 3, stride=1),
self._make_layer(Bottleneck, out_size, 2, stride=1)
).to(self.opts.ctrl.device)
else:
self.main_component = self._make_layer(Bottleneck, out_size, 4, stride=1).to(self.opts.ctrl.device)
# projector
if self.delete_mp:
assert self.opts.fsl.k_shot[0] == 1
del self.main_component
# input_c for Projector, no mp
self.inplanes = self.opts.fsl.n_way[0] * _embedding.size(1)
else:
# input_c for Projector, has mp
self.inplanes = self.opts.fsl.n_way[0] * out_size * 4
if self.opts.model.structure.startswith('resnet'):
self.projection = nn.Sequential(
self._make_layer(Bottleneck, out_size*2, 3, stride=1),
self._make_layer(Bottleneck, out_size, 2, stride=1)
).to(self.opts.ctrl.device)
else:
self.projection = self._make_layer(Bottleneck, out_size, 4, stride=1).to(self.opts.ctrl.device)
# RELATION METRIC
if self.use_relation_net:
# relation sub_net
if hasattr(self, 'reshaper'):
_input = _out_downsample
else:
_input = _embedding
if self.opts.model.relation_net == 'res_block':
# (256); it is "2" because combining two embedding
self.inplanes = 2 * _input.size(1)
self.relation1 = self._make_layer(Bottleneck, 32, 2, stride=2)
self.relation2 = self._make_layer(Bottleneck, 16, 2, stride=1)
_combine = torch.stack([_input, _input], dim=1).view(
_input.size(0), -1, _input.size(2), _input.size(3))
_out = self.relation2(self.relation1(_combine))
self.fc_input_c = _out.size(1)*_out.size(2)*_out.size(3)
_half = int(self.fc_input_c/2)
self.fc = nn.Sequential(
nn.Linear(self.fc_input_c, _half),
nn.BatchNorm1d(_half),
nn.ReLU(inplace=True),
nn.Linear(_half, 1)
).to(self.opts.ctrl.device)
elif self.opts.model.relation_net == 'simple':
input_c = 2 * _input.size(1)
self.relation1 = nn.Sequential(
nn.Conv2d(input_c, 64, kernel_size=3, padding=1),
nn.BatchNorm2d(64, momentum=1, affine=True),
nn.ReLU(),
nn.MaxPool2d(2)).to(self.opts.ctrl.device)
self.relation2 = nn.Sequential(
nn.Conv2d(64, 64, kernel_size=3, padding=1),
nn.BatchNorm2d(64, momentum=1, affine=True),
nn.ReLU(),
# nn.MaxPool2d(2)
).to(self.opts.ctrl.device)
_combine = torch.stack([_input, _input], dim=1).view(
_input.size(0), -1, _input.size(2), _input.size(3))
_out = self.relation2(self.relation1(_combine))
self.fc_input_c = _out.size(1) * _out.size(2) * _out.size(3)
_half = int(self.fc_input_c / 2)
self.fc = nn.Sequential(
nn.Linear(self.fc_input_c, _half),
nn.ReLU(),
nn.Linear(_half, 1), # except no sigmoid since we use CE
)
# else:
# # the original relation network
# self.inplanes = 2 * self.c
# # the original network in the relation net
# # after the relation module (three layers)
# self.relation1 = self._make_layer(Bottleneck, 128, 4, stride=2)
# self.relation2 = self._make_layer(Bottleneck, 64, 3, stride=2)
#
# if self.CE_loss:
# self.fc = nn.Sequential(
# nn.Linear(256, 64),
# nn.BatchNorm1d(64),
# nn.ReLU(inplace=True),
# nn.Linear(64, 1)
# )
# else:
# self.fc = nn.Sequential(
# nn.Linear(256, 64),
# nn.BatchNorm1d(64),
# nn.ReLU(inplace=True),
# nn.Linear(64, 1),
# nn.Sigmoid() # the only difference
# )
# combine = torch.stack([repnet_out, repnet_out], dim=1).view(
# repnet_out.size(0), -1, repnet_out.size(2), repnet_out.size(3))
# out = self.relation2(self.relation1(combine))
# _logger('\tafter layer5 sz: {} (assume bs=2)\n'.format(out.size()))
# self.pool_size = out.size(2)
self._initialize_weights()
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Conv1d):
# n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
# m.weight.data.normal_(0, math.sqrt(2. / n))
nn.init.xavier_uniform_(m.weight)
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.ConvTranspose2d) or isinstance(m, nn.ConvTranspose1d):
nn.init.xavier_normal(m.weight)
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = [block(self.inplanes, planes, stride, downsample)]
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def fac_adjust(self):
loss_fac = np.max([-2*self.epoch + 10, 1])
ot_loss_fac = np.min([.2*self.epoch, 1])
return loss_fac, ot_loss_fac
def forward_CTM(self, support_x, support_y, query_x, query_y, train=False, optimizer=None):
target, one_hot, target_support = self.get_target(support_y, query_y)
batch_sz, support_sz, _d = support_x.size(0), support_x.size(1), support_x.size(3)
query_sz = query_x.size(1)
n_way, k_shot = self.opts.fsl.n_way[0], self.opts.fsl.k_shot[0]
# 1. FEATURE EXTRACTION (FOR NOW DISABLE SWAP)
# support_sz (25), c (64), d (19), d (19)
support_xf_ori = self.repnet(support_x.view(batch_sz * support_sz, -1, _d, _d))
# query_sz (75), c (64), d (19), d (19)
query_xf_ori = self.repnet(query_x.view(batch_sz * query_sz, -1, _d, _d))
if self.dnet:
if not self.delete_mp:
if not self.mp_mean:
support_xf_reshape = support_xf_ori.view(
n_way, -1, support_xf_ori.size(2), support_xf_ori.size(3))
else:
support_xf_reshape = support_xf_ori
mp = self.main_component(support_xf_reshape) # 5(n_way), 64, 3, 3
if self.mp_mean:
mp = torch.mean(
mp.view(n_way, k_shot, mp.size(1), mp.size(2), mp.size(2)),
dim=1, keepdim=False
)
_input_P = mp.view(1, -1, mp.size(2), mp.size(3)) # mp -> 1, 5*64, 3, 3
else:
_input_P = support_xf_ori.view(
1, -1, support_xf_ori.size(2), support_xf_ori.size(3))
# for P: consider all components
P = self.projection(_input_P) # 1, 64, 3, 3
P = F.softmax(P, dim=1)
if self.dnet_supp_manner == '2' or self.dnet_supp_manner == '3':
mp_modified = torch.matmul(mp, P) # 5, 64, 3, 3
if self.dnet_supp_manner == '1':
v = self.reshaper(support_xf_ori)
v = torch.matmul(v, P)
elif self.dnet_supp_manner == '2':
v = self.reshaper(support_xf_ori) # 25, 64, 3, 3
v = v.view(n_way, -1, v.size(1), v.size(2), v.size(3)) # 5, 5(k_shot), 64, 3, 3
v = torch.matmul(v, mp_modified.unsqueeze(1)).view(support_sz, v.size(2), v.size(3), v.size(3))
elif self.dnet_supp_manner == '3':
v = mp_modified
query = self.reshaper(query_xf_ori) # 75, 64, 3, 3
query = torch.matmul(query, P)
else:
# baseline
if self.baseline_manner == 'no_reshaper':
v = support_xf_ori
query = query_xf_ori
elif self.baseline_manner.startswith('sample_wise'):
if self.baseline_manner == 'sample_wise_similar':
support_xf_ori = self.additional_repnet(support_xf_ori)
query_xf_ori = self.additional_repnet(query_xf_ori)
v = self.reshaper(support_xf_ori)
query = self.reshaper(query_xf_ori)
elif self.baseline_manner == 'sum':
v = self.reshaper(support_xf_ori)
v = v.view(n_way, -1, v.size(1), v.size(2), v.size(2)).sum(1, keepdim=False)
query = self.reshaper(query_xf_ori)
# 2. Standard pipeline
score = self.get_embedding_score(v, query, n_way, query_sz)
# 3. Output
if train:
# for legacy
zero = torch.zeros(1).to(self.opts.ctrl.device)
disc_weights = None
sinkhorn_loss, loss_discri = zero, zero
loss = F.cross_entropy(score, target).unsqueeze(0)
total_loss = loss
return torch.cat([total_loss, loss, sinkhorn_loss, loss_discri]).unsqueeze(0), disc_weights
else:
# TEST
prediction = score.argmax(dim=-1)
correct = torch.eq(prediction, target).sum().unsqueeze(0)
return prediction, correct
@staticmethod
def _norm(input):
return (input - input.min()) / (input.max() - input.min()).clamp(min=eps)
def get_target(self, support_y, query_y):
support_y = support_y[0, ::self.opts.fsl.k_shot[0]]
query_y = query_y[0]
target = torch.stack([
torch.nonzero(torch.eq(support_y, entry)) for entry in query_y
])
target = target.view(-1, 1) # shape: query_size
one_hot_labels = \
torch.zeros(target.size(0), self.opts.fsl.n_way[0]).to(self.opts.ctrl.device).scatter_(
1, target, 1)
target_support = torch.arange(self.opts.fsl.n_way[0]).unsqueeze(1).expand(
-1, self.opts.fsl.k_shot[0]).contiguous().view(-1).long().to(self.opts.ctrl.device)
return target.squeeze(1), one_hot_labels, target_support
def get_embedding_score(self, support_xf_ori, query_xf_ori, n_way, query_sz):
# sum up samples with support
k_shot = int(support_xf_ori.size(0) / n_way)
if self.use_relation_net:
ch_sz, spatial_sz = support_xf_ori.size(1), support_xf_ori.size(2)
# support_xf_ori: 25/5, 256, 5, 5
# query_xf_ori: 75, 256, 5, 5
# first expand
support_xf_ori = support_xf_ori.unsqueeze(0).expand(query_sz, -1, -1, -1, -1).contiguous().view(
query_sz*n_way*k_shot, ch_sz, spatial_sz, spatial_sz
)
query_xf_ori = query_xf_ori.unsqueeze(1).expand(-1, n_way*k_shot, -1, -1, -1).contiguous().view(
query_sz*n_way*k_shot, ch_sz, spatial_sz, spatial_sz
)
embed_combine = torch.stack([support_xf_ori, query_xf_ori], dim=1).view(
query_sz*n_way*k_shot, -1, spatial_sz, spatial_sz)
_out = self.relation2(self.relation1(embed_combine))
_out = _out.view(_out.size(0), -1)
score = self.fc(_out).view(query_sz, n_way, k_shot)
else:
support_xf = support_xf_ori.view(support_xf_ori.size(0), -1) # size: 25/5 (support_sz/n_way) x feat_dim
query_xf = query_xf_ori.view(query_xf_ori.size(0), -1) # size: 75 (query_size) x feat_dim
feat_dim = support_xf.size(-1)
support_xf = support_xf.unsqueeze(0).expand(query_sz, -1, -1).contiguous().view(-1, feat_dim)
query_xf = query_xf.unsqueeze(1).expand(-1, n_way*k_shot, -1).contiguous().view(-1, feat_dim)
score = -F.pairwise_distance(support_xf, query_xf, p=2)
score = score.view(query_sz, n_way, k_shot)
# sum up here
score = torch.sum(score, dim=2, keepdim=False)
return score
| StarcoderdataPython |
6421178 | ###############################
## 100DaysOfCode ##
## d02_ecercice03 ##
###############################
# Love Calculator
print("Welcome to the Love Calculator!")
name1 = input("\nWhat is your name? ")
name2 = input("What is their name? ")
lower_names = name1.lower() + name2.lower()
decimal = lower_names.count('t') + lower_names.count('r') + lower_names.count('u') + lower_names.count('e')
unit = lower_names.count('l') + lower_names.count('o') + lower_names.count('v') + lower_names.count('e')
score = int(str(decimal) + str(unit))
# For Love Scores less than 10 or greater than 90, the message should be:
if score < 10 or score > 90:
print(f"\nYour score is {score}, you go together like coke and mentos.")
# For Love Scores between 40 and 50, the message should be:
elif score >= 40 and score <= 50:
print(f"\nYour score is {score}, you are alright together.")
# Otherwise, the message will just be their score
else:
print(f"\nYour score is {score}.")
| StarcoderdataPython |
9602130 | <filename>RandomFileQueue.py
# MusicPlayer, https://github.com/albertz/music-player
# Copyright (c) 2012, <NAME>, www.az2000.de
# All rights reserved.
# This code is under the 2-clause BSD license, see License.txt in the root directory of this project.
# loosely inspired from https://github.com/albertz/PictureSlider/blob/master/PictureSlider/FileQueue.cpp
import os, random
from os import access, R_OK
C_nonloaded_dirs_expectedFac = 0.5
C_nonloaded_dirs_expectedMin = 100
rndInt = random.randint
class RandomFileQueue:
def __init__(self, rootdir, fileexts):
self.rootdir = rootdir
self.fileexts = fileexts
def hasCorrectFileext(f):
ext = os.path.splitext(f)[1]
ext = ext.lower()
for allowedExt in self.fileexts:
if ext == "." + allowedExt.lower():
return True
return False
class Dir:
owner = self
isLoaded = False
base = None
def __init__(self):
self.files = []
self.loadedDirs = []
self.nonloadedDirs = []
def load(self):
self.isLoaded = True
# Note: If we could use the C readdir() more directly, that would be much faster because it already provides the stat info (wether it is a file or dir), so we don't need to do a separate call for isfile/isdir.
try:
listeddir = os.listdir(self.base)
except:
# it might fail because of permission errors or whatever
listeddir = []
for f in listeddir:
if f.startswith("."): continue
if os.path.isfile(self.base + "/" + f):
if hasCorrectFileext(f) and access(self.base + "/" + f, R_OK):
self.files += [f]
elif os.path.isdir(self.base + "/" + f):
subdir = Dir()
subdir.base = self.base + "/" + f
self.nonloadedDirs += [subdir]
def expectedFilesCount(self):
c = 0
c += len(self.files)
for d in self.loadedDirs:
c += d.expectedFilesCount()
c += len(self.nonloadedDirs) * \
max(int(C_nonloaded_dirs_expectedFac * c), C_nonloaded_dirs_expectedMin)
return c
def randomGet(self):
if not self.isLoaded: self.load()
while True:
rmax = self.expectedFilesCount()
if rmax == 0: return None
r = rndInt(0, rmax - 1)
if r < len(self.files):
return self.base + "/" + self.files[r]
r -= len(self.files)
for d in self.loadedDirs:
c = d.expectedFilesCount()
if r < c:
f = d.randomGet()
if f: return f
r = None
break
r -= c
if r is None: continue
assert len(self.nonloadedDirs) > 0
r = rndInt(0, len(self.nonloadedDirs) - 1)
d = self.nonloadedDirs[r]
self.nonloadedDirs = self.nonloadedDirs[:r] + self.nonloadedDirs[r+1:]
d.load()
self.loadedDirs += [d]
self.root = Dir()
self.root.base = rootdir
def getNextFile(self):
return self.root.randomGet()
def test():
q = RandomFileQueue(rootdir = os.path.expanduser("~/Music"), fileexts=["mp3","ogg","flac"])
i = 0
while i < 100:
i += 1
print q.getNextFile()
if __name__ == '__main__':
test()
| StarcoderdataPython |
3509599 | <gh_stars>1-10
# Generated by Django 3.0.3 on 2020-09-21 17:19
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='MovieModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('model_name', models.CharField(max_length=1000, unique=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('model_root_path', models.CharField(max_length=10000, unique=True)),
('movie', models.FileField(upload_to='')),
],
),
]
| StarcoderdataPython |
1939693 | import random
from flask import Flask, jsonify, render_template, request
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
##Connect to Database
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///cafes.db'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
##Cafe TABLE Configuration
class Cafe(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(250), unique=True, nullable=False)
map_url = db.Column(db.String(500), nullable=False)
img_url = db.Column(db.String(500), nullable=False)
location = db.Column(db.String(250), nullable=False)
seats = db.Column(db.String(250), nullable=False)
has_toilet = db.Column(db.Boolean, nullable=False)
has_wifi = db.Column(db.Boolean, nullable=False)
has_sockets = db.Column(db.Boolean, nullable=False)
can_take_calls = db.Column(db.Boolean, nullable=False)
coffee_price = db.Column(db.String(250), nullable=True)
def to_dict(self):
return {column.name: getattr(self, column.name) for column in self.__table__.columns}
@app.route("/")
def home():
return render_template("index.html")
## HTTP GET - Read Record
@app.route("/random")
def get_random_cafe():
cafes = db.session.query(Cafe).all()
random_cafe = random.choice(cafes)
return jsonify(cafe=random_cafe.to_dict())
@app.route("/all")
def all_cafes():
cafes = db.session.query(Cafe).all()
return jsonify(cafes=[cafe.to_dict() for cafe in cafes])
@app.route("/search")
def search_cafes():
query_location = request.args.get("loc")
cafe = db.session.query(Cafe).filter_by(location=query_location).first()
if cafe:
return jsonify(cafe=cafe.to_dict())
else:
return jsonify(error={"Not Found": "Sorry, we don't have a cafe at that location."})
## HTTP POST - Create Record
@app.route("/add", methods=["POST"])
def add_cafe():
new_cafe = Cafe(
name=request.form.get("name"),
map_url=request.form.get("map_url"),
img_url=request.form.get("img_url"),
location=request.form.get("loc"),
has_sockets=bool(request.form.get("sockets")),
has_toilet=bool(request.form.get("toilet")),
has_wifi=bool(request.form.get("wifi")),
can_take_calls=bool(request.form.get("calls")),
seats=request.form.get("seats"),
coffee_price=request.form.get("coffee_price"),
)
db.session.add(new_cafe)
db.session.commit()
return jsonify(response={"success": "Successfully added the new cafe."})
## HTTP PUT/PATCH - Update Record
@app.route("/update-price/<int:cafe_id>", methods=["PATCH"])
def patch_new_price(cafe_id):
new_price = request.args.get("new_price")
cafe = db.session.query(Cafe).get(cafe_id)
if cafe:
cafe.coffee_price = new_price
db.session.commit()
## Just add the code after the jsonify method. 200 = Ok
return jsonify(response={"success": "Successfully updated the price."}), 200
else:
#404 = Resource not found
return jsonify(error={"Not Found": "Sorry a cafe with that id was not found in the database."}), 404
## HTTP DELETE - Delete Record
@app.route("/report-closed/<int:cafe_id>", methods=["DELETE"])
def delete_cafe(cafe_id):
cafe = db.session.query(Cafe).get(cafe_id)
if cafe:
db.session.delete(cafe)
db.session.commit()
return jsonify(response={"success": "Successfully deleted the cafe from the database."}), 200
else:
return jsonify(error={"Not Found": "Sorry a cafe with that id was not found in the database."}), 404
if __name__ == '__main__':
app.run()
| StarcoderdataPython |
6453698 | <filename>server.py
from flask import Flask, request, flash, redirect, render_template
import cv2
import FocFace
import face_recognition
import os
ALLOWED_EXTENSIONS = set(['txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'])
UPLOAD_FOLDER = './temp_photos'
app = Flask(__name__)
app.secret_key = "secret key"
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
app.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024
faces = []
@app.route('/show', methods=['GET', 'POST'])
def show():
if request.method == 'GET':
addComma = False
ret = "{'faces':["
for face in faces:
if addComma:
ret += ","
addComma = True
ret += face.toJson()
ret += "]}"
return ret
@app.route('/init', methods=['GET', 'POST'])
def init():
if request.method == 'POST':
print('Hello in init')
faces.clear()
jsonReq = request.get_json()
facesJson = jsonReq.get("faces")
for faceJson in facesJson:
FocFace.readFaceFromJson(faces, faceJson)
return ""
@app.route('/upload', methods=['GET', 'POST'])
def upload():
if request.method == 'POST':
file = request.files['image']
originalFilename = file.filename
originalFilename, file_extension = os.path.splitext(originalFilename)
filename = file.filename
fullfilename = os.path.join(app.config['UPLOAD_FOLDER'], filename)
file.save(fullfilename)
image = cv2.imread(fullfilename)
os.remove(fullfilename)
newFaces = []
FocFace.detect(newFaces, originalFilename, image)
flash('File successfully uploaded')
addComma = False
ret = "{'faces':["
for face in newFaces:
if addComma:
ret += ","
addComma = True
ret += face.toJson()
faces.append(face)
ret += "]}"
return ret
@app.route('/search', methods=['GET', 'POST'])
def search():
if request.method == 'POST':
file = request.files['image']
filename = file.filename
fullfilename = os.path.join(app.config['UPLOAD_FOLDER'], filename)
file.save(fullfilename)
image = cv2.imread(fullfilename)
os.remove(fullfilename)
newFaces = []
FocFace.detect(newFaces, "0", image)
flash('File successfully uploaded')
known_face_encodings = []
for face in faces:
known_face_encodings.append(face.encoding)
addComma = False
for newFace in newFaces:
ret = "{'similar_faces':["
# Calculate face distance
face_distances = face_recognition.face_distance(known_face_encodings, newFace.encoding)
for faceIdx in range(len(face_distances)):
face_distance = face_distances[faceIdx]
if face_distance < 0.5:
face = faces[faceIdx]
if addComma:
ret += ","
addComma = True
ret += "{'ref':'"+str(face.ref)+"',"
ret += "'distance':'" + str(face_distance) + "'}"
ret += "]}"
return ret
if __name__ == "__main__":
app.run(host='0.0.0.0')
| StarcoderdataPython |
160743 | #!/usr/bin/env python3
import os
import json
import torch
from misc_scripts import run_cl_exp, run_rep_exp
from utils import get_mini_imagenet, get_omniglot
from core_functions.vision import evaluate
from core_functions.vision_models import OmniglotCNN, MiniImagenetCNN, ConvBase
from core_functions.maml import MAML
cuda = True
base_path = "/home/kosz/Projects/KTH/Thesis/models/vision/mini_imagenet/5w5s/maml_5w5s_min_31_03_12h53_1_1434"
meta_test = False
eval_iters = False
cl_exp = False
rep_exp = True
cl_params = {
"adapt_steps": 1,
"inner_lr": 0.5,
"n_tasks": 5
}
rep_params = {
"adapt_steps": 1,
"inner_lr": 0.5,
"n_tasks": 5,
"layers": [1, 2, 3, 4, -1], # MIN
# "layers": [2, 4] # Omni
}
class Lambda(torch.nn.Module):
def __init__(self, fn):
super(Lambda, self).__init__()
self.fn = fn
def forward(self, x):
return self.fn(x)
def run(path):
# Initialize
with open(path + "/logger.json", "r") as f:
params = json.load(f)['config']
device = torch.device('cpu')
torch.manual_seed(params['seed'])
if cuda and torch.cuda.device_count():
torch.cuda.manual_seed(params['seed'])
device = torch.device('cuda')
if "min" in path:
_, _, test_tasks = get_mini_imagenet(params['ways'], params['shots'])
else:
_, _, test_tasks = get_omniglot(params['ways'], params['shots'])
if "maml" in path:
run_maml(params, test_tasks, device)
else:
run_anil(params, test_tasks, device)
def run_maml(params, test_tasks, device):
if 'min' == params['dataset']:
print('Loading Mini-ImageNet model')
model = MiniImagenetCNN(params['ways'])
else:
print('Loading Omniglot model')
model = OmniglotCNN(params['ways'])
# Evaluate the model at every checkpoint
if eval_iters:
ckpnt = base_path + "/model_checkpoints/"
model_ckpnt_results = {}
for model_ckpnt in os.scandir(ckpnt):
if model_ckpnt.path.endswith(".pt"):
print(f'Testing {model_ckpnt.path}')
res = evaluate_maml(params, model, test_tasks, device, model_ckpnt.path)
model_ckpnt_results[model_ckpnt.path] = res
with open(base_path + '/ckpnt_results.json', 'w') as fp:
json.dump(model_ckpnt_results, fp, sort_keys=True, indent=4)
final_model = base_path + '/model.pt'
if meta_test:
evaluate_maml(params, model, test_tasks, device, final_model)
# Run a Continual Learning experiment
if cl_exp:
print("Running Continual Learning experiment...")
model.load_state_dict(torch.load(final_model))
model.to(device)
maml = MAML(model, lr=cl_params['inner_lr'], first_order=False)
loss = torch.nn.CrossEntropyLoss(reduction='mean')
run_cl_exp(base_path, maml, loss, test_tasks, device,
params['ways'], params['shots'], cl_params=cl_params)
# Run a Representation change experiment
if rep_exp:
model.load_state_dict(torch.load(final_model))
model.to(device)
maml = MAML(model, lr=rep_params['inner_lr'], first_order=False)
loss = torch.nn.CrossEntropyLoss(reduction='mean')
print("Running Representation experiment...")
run_rep_exp(base_path, maml, loss, test_tasks, device,
params['ways'], params['shots'], rep_params=rep_params)
def run_anil(params, test_tasks, device):
# ANIL
if 'omni' == params['dataset']:
print('Loading Omniglot model')
fc_neurons = 128
features = ConvBase(output_size=64, hidden=32, channels=1, max_pool=False)
else:
print('Loading Mini-ImageNet model')
fc_neurons = 1600
features = ConvBase(output_size=64, channels=3, max_pool=True)
features = torch.nn.Sequential(features, Lambda(lambda x: x.view(-1, fc_neurons)))
head = torch.nn.Linear(fc_neurons, params['ways'])
head = MAML(head, lr=params['inner_lr'])
# Evaluate the model at every checkpoint
if eval_iters:
ckpnt = base_path + "/model_checkpoints/"
model_ckpnt_results = {}
for model_ckpnt in os.scandir(ckpnt):
if model_ckpnt.path.endswith(".pt"):
if "features" in model_ckpnt.path:
features_path = model_ckpnt.path
head_path = str.replace(features_path, "features", "head")
print(f'Testing {model_ckpnt.path}')
res = evaluate_anil(params, features, head, test_tasks, device, features_path, head_path)
model_ckpnt_results[model_ckpnt.path] = res
with open(base_path + '/ckpnt_results.json', 'w') as fp:
json.dump(model_ckpnt_results, fp, sort_keys=True, indent=4)
final_features = base_path + '/features.pt'
final_head = base_path + '/head.pt'
if meta_test:
evaluate_anil(params, features, head, test_tasks, device, final_features, final_head)
if cl_exp:
print("Running Continual Learning experiment...")
features.load_state_dict(torch.load(final_features))
features.to(device)
head.load_state_dict(torch.load(final_head))
head.to(device)
loss = torch.nn.CrossEntropyLoss(reduction='mean')
run_cl_exp(base_path, head, loss, test_tasks, device,
params['ways'], params['shots'], cl_params=cl_params, features=features)
if rep_exp:
features.load_state_dict(torch.load(final_features))
features.to(device)
head.load_state_dict(torch.load(final_head))
head.to(device)
loss = torch.nn.CrossEntropyLoss(reduction='mean')
# Only check head change
rep_params['layers'] = [-1]
print("Running Representation experiment...")
run_rep_exp(base_path, head, loss, test_tasks, device,
params['ways'], params['shots'], rep_params=rep_params, features=features)
def evaluate_maml(params, model, test_tasks, device, path):
model.load_state_dict(torch.load(path))
model.to(device)
maml = MAML(model, lr=params['inner_lr'], first_order=False)
loss = torch.nn.CrossEntropyLoss(reduction='mean')
return evaluate(params, test_tasks, maml, loss, device)
def evaluate_anil(params, features, head, test_tasks, device, features_path, head_path):
features.load_state_dict(torch.load(features_path))
features.to(device)
head.load_state_dict(torch.load(head_path))
head = MAML(head, lr=params['inner_lr'])
head.to(device)
loss = torch.nn.CrossEntropyLoss(reduction='mean')
return evaluate(params, test_tasks, head, loss, device, features=features)
if __name__ == '__main__':
run(base_path)
exit()
# MIN
# ANIL 5w1s
base_path = "/home/kosz/Projects/KTH/Thesis/models/vision/mini_imagenet/5w1s/anil_5w1s_min_10_09_10h08_3_8815"
base_path = "/home/kosz/Projects/KTH/Thesis/models/vision/mini_imagenet/5w1s/anil_5w1s_min_10_09_11h06_2_2906"
base_path = "/home/kosz/Projects/KTH/Thesis/models/vision/mini_imagenet/5w1s/anil_5w1s_min_10_09_11h59_1_1374"
# MAML 5w1s
base_path = "/home/kosz/Projects/KTH/Thesis/models/vision/mini_imagenet/5w1s/maml_5w1s_min_10_09_12h58_3_2722"
base_path = "/home/kosz/Projects/KTH/Thesis/models/vision/mini_imagenet/5w1s/maml_5w1s_min_10_09_15h12_1_9323"
base_path = "/home/kosz/Projects/KTH/Thesis/models/vision/mini_imagenet/5w1s/maml_5w1s_min_10_09_17h09_2_6302"
# ANIL 5w5s
base_path = "/home/kosz/Projects/KTH/Thesis/models/vision/mini_imagenet/5w5s/anil_5w5s_min_11_09_00h36_1_6461"
base_path = "/home/kosz/Projects/KTH/Thesis/models/vision/mini_imagenet/5w5s/anil_5w5s_min_11_09_03h38_2_8655"
base_path = "/home/kosz/Projects/KTH/Thesis/models/vision/mini_imagenet/5w5s/anil_5w5s_min_11_09_05h56_3_6285"
# MAML 5w5s
base_path = "/home/kosz/Projects/KTH/Thesis/models/vision/mini_imagenet/5w5s/maml_5w5s_min_31_03_12h53_1_1434"
base_path = "/home/kosz/Projects/KTH/Thesis/models/vision/mini_imagenet/5w5s/maml_5w5s_min_31_03_12h54_2_1671"
base_path = "/home/kosz/Projects/KTH/Thesis/models/vision/mini_imagenet/5w5s/maml_5w5s_min_31_03_12h54_3_2104"
# Omni
# ANIL 20w1s
base_path = "/home/kosz/Projects/KTH/Thesis/models/vision/omniglot/20w1s/anil_20w1s_omni_06_09_11h17_1_4305"
base_path = "/home/kosz/Projects/KTH/Thesis/models/vision/omniglot/20w1s/anil_20w1s_omni_06_09_11h17_2_8126"
base_path = "/home/kosz/Projects/KTH/Thesis/models/vision/omniglot/20w1s/anil_20w1s_omni_06_09_11h17_3_4772"
# MAML 20w1s
base_path = "/home/kosz/Projects/KTH/Thesis/models/vision/omniglot/20w1s/maml_20w1s_omni_31_03_10h18_1_9247"
base_path = "/home/kosz/Projects/KTH/Thesis/models/vision/omniglot/20w1s/maml_20w1s_omni_31_03_10h21_2_302"
base_path = "/home/kosz/Projects/KTH/Thesis/models/vision/omniglot/20w1s/maml_20w1s_omni_31_03_10h22_3_7628"
# ANIL 20w5s
base_path = "/home/kosz/Projects/KTH/Thesis/models/vision/omniglot/20w5s/anil/anil_20w5s_omni_09_09_13h23_2_4977"
base_path = "/home/kosz/Projects/KTH/Thesis/models/vision/omniglot/20w5s/anil/anil_20w5s_omni_09_09_13h24_1_775"
base_path = "/home/kosz/Projects/KTH/Thesis/models/vision/omniglot/20w5s/anil/anil_20w5s_omni_09_09_14h31_3_5663"
# MAML 20w5s
base_path = "/home/kosz/Projects/KTH/Thesis/models/vision/omniglot/20w5s/maml/maml_20w5s_omni_31_03_10h23_1_6864"
base_path = "/home/kosz/Projects/KTH/Thesis/models/vision/omniglot/20w5s/maml/maml_20w5s_omni_31_03_10h24_2_1576"
base_path = "/home/kosz/Projects/KTH/Thesis/models/vision/omniglot/20w5s/maml/maml_20w5s_omni_31_03_10h24_3_8259"
| StarcoderdataPython |
1816000 |
class Matrix:
def __init__(self, matrix_string):
self.lista = [[int(i) for i in j.split()] for j in matrix_string.splitlines()]
def row(self, fila):
return list(self.lista[fila-1])
def column(self, column):
return [i[column-1] for i in self.lista]
matriz = Matrix("9 8 7 \n5 3 2 \n6 6 7")
print(matriz.lista)
fila=input("fila a solicitar: " )
columna=input("Columna a solicitra: ")
print("Fila ", matriz.row(int(fila)))
print("Columna ", matriz.column(int(columna))) | StarcoderdataPython |
3367881 | from pyframework.exceptions.custom_exceptions import ArgumentException
from pyframework.helpers.lists import array_column
from .base_fire import BaseFire, Event
from ...models.city_endpoint import CityEndpoint
from ...models.restaurant import Restaurant
class FireRestaurantsInfoDownload(BaseFire):
_name = 'fire:restaurantsInfoDownload'
_description = 'Launch an event to download entity information.'
_arguments = [
['-e', '--endpoint', 'Endpoint ID to be fired.'],
['-c', '--city', 'City ID to be fired.'],
]
_city_id = int
"""City to be downloaded. """
_restaurants = []
"""Restaurants to be downloaded. """
_endpoint_id = int
"""Endpoint to be downloaded. """
def set_up(self):
self._city_id = self.get_argument('city')
if not self._city_id:
raise ArgumentException('City ID is required.')
self._city_id = int(self._city_id)
self._endpoint_id = self.get_argument('endpoint')
if self._endpoint_id is None:
raise ArgumentException('Endpoint ID is required.')
self._endpoint_id = int(self._endpoint_id)
download = CityEndpoint().get_downloads(self._city_id)
if not any([self._endpoint_id == task['endpoint_id'] for task in download]):
raise ArgumentException('Endpoint {} not enabled on city {}.'.format(self._endpoint_id, self._city_id))
self._restaurants = Restaurant().get_restaurants_on_city(self._city_id)
def handle(self) -> int:
info = {
'restaurants_ids': array_column(self._restaurants, 'id'),
'endpoint_id': self._endpoint_id,
}
self._fire_event(Event.RESTAURANTS_INFO_DOWNLOAD_ACTION, info)
return self.RETURN_SUCCESS
| StarcoderdataPython |
1925266 | # -*- coding: utf-8 -*-
"""
Leetcode - Find the Difference
https://leetcode.com/problems/find-the-difference
Created on Sat Nov 3 19:11:50 2018
Updated on Wed Nov 28 12:25:06 2018
@author: <NAME>
"""
## REQUIRED MODULES
import sys
## MODULE DEFINITIONS
class Solution:
"""
Iteration and bit maniuplation of all string characters using XOR.
Time complexity: O(n)
- Amortized iterate over all string characters
Space complexity: O(1)
- Update constant number of pointers
"""
def find_difference(self, s, t):
"""
Determines unique character using XOR operator.
:param str s: first input string
:param str t: second input string
:return: unique character between both strings
:rtype: str
"""
n = len(s)
c = ord(t[n])
for i in range(n):
# Removes duplicate characters using XOR
c ^= ord(s[i]) ^ ord(t[i])
return chr(c)
class Input:
def stdin(self, sys_stdin):
"""
Imports standard input.
:param _io.TextIOWrapper sys_stdin: standard input
:return: first and second input strings
:rtype: tuple[str, str]
"""
inputs = [x.strip("[]\"\n")
for x
in sys_stdin]
s = inputs[0]
t = inputs[1]
return s, t
## MAIN MODULE
if __name__ == "__main__":
# Imports standard input
s, t = Input()\
.stdin(sys.stdin)
# Evaluates solution
z = Solution()\
.find_difference(s, t)
print(z)
## END OF FILE | StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.